pping: Rename aggregated rtts to aggregated stats

The (per-subnet) aggregated stats already include packet byte counts,
so it is not strictly only RTTs. Future commits will further extend
the non-RTT related statistics that are aggregated. Therefore, rename
structs, functions and paramters of the from "aggregated_rtts" to
"aggregated_stats".

To clarify which members of the aggregated_rtt_stats struct (now
renamed to aggregated_stats) which are related to the RTT, prefix
their names with "rtt_", e.g. "min" -> "rtt_min".

Signed-off-by: Simon Sundberg <simon.sundberg@kau.se>
This commit is contained in:
Simon Sundberg
2023-10-17 16:23:51 +02:00
parent bc9df640cb
commit 0200196244
3 changed files with 108 additions and 105 deletions

View File

@@ -1119,26 +1119,26 @@ static void handle_missed_events(void *ctx, int cpu, __u64 lost_cnt)
fprintf(stderr, "Lost %llu events on CPU %d\n", lost_cnt, cpu); fprintf(stderr, "Lost %llu events on CPU %d\n", lost_cnt, cpu);
} }
static bool aggregated_rtt_stats_empty(struct aggregated_rtt_stats *stats) static bool aggregated_stats_empty(struct aggregated_stats *stats)
{ {
return stats->tx_packet_count == 0 && stats->rx_packet_count == 0; return stats->tx_packet_count == 0 && stats->rx_packet_count == 0;
} }
static bool aggregated_rtt_stats_nortts(struct aggregated_rtt_stats *stats) static bool aggregated_stats_nortts(struct aggregated_stats *stats)
{ {
return stats->max == 0; return stats->rtt_max == 0;
} }
static __u64 aggregated_rtt_stats_maxbins(struct aggregated_rtt_stats *stats, static __u64 aggregated_stats_maxbins(struct aggregated_stats *stats,
__u64 bin_width, __u64 n_bins) __u64 bin_width, __u64 n_bins)
{ {
return stats->max / bin_width < n_bins ? stats->max / bin_width + 1 : return stats->rtt_max / bin_width < n_bins ?
stats->rtt_max / bin_width + 1 :
n_bins; n_bins;
} }
static void static void merge_percpu_aggreated_stats(struct aggregated_stats *percpu_stats,
merge_percpu_aggreated_rtts(struct aggregated_rtt_stats *percpu_stats, struct aggregated_stats *merged_stats,
struct aggregated_rtt_stats *merged_stats,
int n_cpus, int n_bins) int n_cpus, int n_bins)
{ {
int i, bin; int i, bin;
@@ -1157,21 +1157,22 @@ merge_percpu_aggreated_rtts(struct aggregated_rtt_stats *percpu_stats,
merged_stats->rx_byte_count += percpu_stats[i].rx_byte_count; merged_stats->rx_byte_count += percpu_stats[i].rx_byte_count;
merged_stats->tx_byte_count += percpu_stats[i].tx_byte_count; merged_stats->tx_byte_count += percpu_stats[i].tx_byte_count;
if (aggregated_rtt_stats_nortts(&percpu_stats[i])) if (aggregated_stats_nortts(&percpu_stats[i]))
continue; continue;
if (percpu_stats[i].max > merged_stats->max) if (percpu_stats[i].rtt_max > merged_stats->rtt_max)
merged_stats->max = percpu_stats[i].max; merged_stats->rtt_max = percpu_stats[i].rtt_max;
if (merged_stats->min == 0 || if (merged_stats->rtt_min == 0 ||
percpu_stats[i].min < merged_stats->min) percpu_stats[i].rtt_min < merged_stats->rtt_min)
merged_stats->min = percpu_stats[i].min; merged_stats->rtt_min = percpu_stats[i].rtt_min;
for (bin = 0; bin < n_bins; bin++) for (bin = 0; bin < n_bins; bin++)
merged_stats->bins[bin] += percpu_stats[i].bins[bin]; merged_stats->rtt_bins[bin] +=
percpu_stats[i].rtt_bins[bin];
} }
} }
static void clear_aggregated_rtts(struct aggregated_rtt_stats *stats) static void clear_aggregated_stats(struct aggregated_stats *stats)
{ {
__u64 last_updated = stats->last_updated; __u64 last_updated = stats->last_updated;
memset(stats, 0, sizeof(*stats)); memset(stats, 0, sizeof(*stats));
@@ -1216,80 +1217,81 @@ static void print_aggmetadata(struct output_context *out_ctx,
print_aggmetadata_json(out_ctx->jctx, agg_conf); print_aggmetadata_json(out_ctx->jctx, agg_conf);
} }
static void print_aggrtts_standard(FILE *stream, __u64 t, const char *prefixstr, static void print_aggstats_standard(FILE *stream, __u64 t,
struct aggregated_rtt_stats *rtt_stats, const char *prefixstr,
struct aggregated_stats *stats,
struct aggregation_config *agg_conf) struct aggregation_config *agg_conf)
{ {
__u64 bw = agg_conf->bin_width; __u64 bw = agg_conf->bin_width;
__u64 nb = aggregated_rtt_stats_maxbins(rtt_stats, bw, agg_conf->n_bins); __u64 nb = aggregated_stats_maxbins(stats, bw, agg_conf->n_bins);
print_ns_datetime(stream, t); print_ns_datetime(stream, t);
fprintf(stream, fprintf(stream,
": %s -> rxpkts=%llu, rxbytes=%llu, txpkts=%llu, txbytes=%llu", ": %s -> rxpkts=%llu, rxbytes=%llu, txpkts=%llu, txbytes=%llu",
prefixstr, rtt_stats->rx_packet_count, rtt_stats->rx_byte_count, prefixstr, stats->rx_packet_count, stats->rx_byte_count,
rtt_stats->tx_packet_count, rtt_stats->tx_byte_count); stats->tx_packet_count, stats->tx_byte_count);
if (aggregated_rtt_stats_nortts(rtt_stats)) if (aggregated_stats_nortts(stats))
goto exit; goto exit;
fprintf(stream, fprintf(stream,
", rtt-count=%llu, min=%.6g ms, mean=%g ms, median=%g ms, p95=%g ms, max=%.6g ms", ", rtt-count=%llu, min=%.6g ms, mean=%g ms, median=%g ms, p95=%g ms, max=%.6g ms",
lhist_count(rtt_stats->bins, nb), lhist_count(stats->rtt_bins, nb),
(double)rtt_stats->min / NS_PER_MS, (double)stats->rtt_min / NS_PER_MS,
lhist_mean(rtt_stats->bins, nb, bw, 0) / NS_PER_MS, lhist_mean(stats->rtt_bins, nb, bw, 0) / NS_PER_MS,
lhist_percentile(rtt_stats->bins, 50, nb, bw, 0) / NS_PER_MS, lhist_percentile(stats->rtt_bins, 50, nb, bw, 0) / NS_PER_MS,
lhist_percentile(rtt_stats->bins, 95, nb, bw, 0) / NS_PER_MS, lhist_percentile(stats->rtt_bins, 95, nb, bw, 0) / NS_PER_MS,
(double)rtt_stats->max / NS_PER_MS); (double)stats->rtt_max / NS_PER_MS);
exit: exit:
fprintf(stream, "\n"); fprintf(stream, "\n");
} }
static void print_aggrtts_json(json_writer_t *ctx, __u64 t, static void print_aggstats_json(json_writer_t *ctx, __u64 t,
const char *prefixstr, const char *prefixstr,
struct aggregated_rtt_stats *rtt_stats, struct aggregated_stats *stats,
struct aggregation_config *agg_conf) struct aggregation_config *agg_conf)
{ {
__u64 bw = agg_conf->bin_width; __u64 bw = agg_conf->bin_width;
__u64 nb = aggregated_rtt_stats_maxbins(rtt_stats, bw, agg_conf->n_bins); __u64 nb = aggregated_stats_maxbins(stats, bw, agg_conf->n_bins);
int i; int i;
jsonw_start_object(ctx); jsonw_start_object(ctx);
jsonw_u64_field(ctx, "timestamp", convert_monotonic_to_realtime(t)); jsonw_u64_field(ctx, "timestamp", convert_monotonic_to_realtime(t));
jsonw_string_field(ctx, "ip_prefix", prefixstr); jsonw_string_field(ctx, "ip_prefix", prefixstr);
jsonw_u64_field(ctx, "rx_packets", rtt_stats->rx_packet_count); jsonw_u64_field(ctx, "rx_packets", stats->rx_packet_count);
jsonw_u64_field(ctx, "tx_packets", rtt_stats->tx_packet_count); jsonw_u64_field(ctx, "tx_packets", stats->tx_packet_count);
jsonw_u64_field(ctx, "rx_bytes", rtt_stats->rx_byte_count); jsonw_u64_field(ctx, "rx_bytes", stats->rx_byte_count);
jsonw_u64_field(ctx, "tx_bytes", rtt_stats->tx_byte_count); jsonw_u64_field(ctx, "tx_bytes", stats->tx_byte_count);
if (aggregated_rtt_stats_nortts(rtt_stats)) if (aggregated_stats_nortts(stats))
goto exit; goto exit;
jsonw_u64_field(ctx, "count_rtt", lhist_count(rtt_stats->bins, nb)); jsonw_u64_field(ctx, "count_rtt", lhist_count(stats->rtt_bins, nb));
jsonw_u64_field(ctx, "min_rtt", rtt_stats->min); jsonw_u64_field(ctx, "min_rtt", stats->rtt_min);
jsonw_float_field(ctx, "mean_rtt", jsonw_float_field(ctx, "mean_rtt",
lhist_mean(rtt_stats->bins, nb, bw, 0)); lhist_mean(stats->rtt_bins, nb, bw, 0));
jsonw_float_field(ctx, "median_rtt", jsonw_float_field(ctx, "median_rtt",
lhist_percentile(rtt_stats->bins, 50, nb, bw, 0)); lhist_percentile(stats->rtt_bins, 50, nb, bw, 0));
jsonw_float_field(ctx, "p95_rtt", jsonw_float_field(ctx, "p95_rtt",
lhist_percentile(rtt_stats->bins, 95, nb, bw, 0)); lhist_percentile(stats->rtt_bins, 95, nb, bw, 0));
jsonw_u64_field(ctx, "max_rtt", rtt_stats->max); jsonw_u64_field(ctx, "max_rtt", stats->rtt_max);
jsonw_name(ctx, "histogram"); jsonw_name(ctx, "histogram");
jsonw_start_array(ctx); jsonw_start_array(ctx);
for (i = 0; i < nb; i++) for (i = 0; i < nb; i++)
jsonw_uint(ctx, rtt_stats->bins[i]); jsonw_uint(ctx, stats->rtt_bins[i]);
jsonw_end_array(ctx); jsonw_end_array(ctx);
exit: exit:
jsonw_end_object(ctx); jsonw_end_object(ctx);
} }
static void print_aggregated_rtts(struct output_context *out_ctx, __u64 t, static void print_aggregated_stats(struct output_context *out_ctx, __u64 t,
struct ipprefix_key *prefix, int af, struct ipprefix_key *prefix, int af,
__u8 prefix_len, __u8 prefix_len,
struct aggregated_rtt_stats *rtt_stats, struct aggregated_stats *stats,
struct aggregation_config *agg_conf) struct aggregation_config *agg_conf)
{ {
char prefixstr[INET6_PREFIXSTRLEN] = { 0 }; char prefixstr[INET6_PREFIXSTRLEN] = { 0 };
@@ -1300,10 +1302,10 @@ static void print_aggregated_rtts(struct output_context *out_ctx, __u64 t,
format_ipprefix(prefixstr, sizeof(prefixstr), af, prefix, prefix_len); format_ipprefix(prefixstr, sizeof(prefixstr), af, prefix, prefix_len);
if (out_ctx->format == PPING_OUTPUT_STANDARD) if (out_ctx->format == PPING_OUTPUT_STANDARD)
print_aggrtts_standard(out_ctx->stream, t, prefixstr, rtt_stats, print_aggstats_standard(out_ctx->stream, t, prefixstr, stats,
agg_conf); agg_conf);
else if (out_ctx->jctx) else if (out_ctx->jctx)
print_aggrtts_json(out_ctx->jctx, t, prefixstr, rtt_stats, print_aggstats_json(out_ctx->jctx, t, prefixstr, stats,
agg_conf); agg_conf);
} }
@@ -1342,13 +1344,13 @@ static int switch_agg_map(int map_active_fd)
return prev_map; return prev_map;
} }
static void report_aggregated_rtt_mapentry( static void report_aggregated_stats_mapentry(
struct output_context *out_ctx, struct ipprefix_key *prefix, struct output_context *out_ctx, struct ipprefix_key *prefix,
struct aggregated_rtt_stats *percpu_stats, int n_cpus, int af, struct aggregated_stats *percpu_stats, int n_cpus, int af,
__u8 prefix_len, __u64 t_monotonic, struct aggregation_config *agg_conf, __u8 prefix_len, __u64 t_monotonic, struct aggregation_config *agg_conf,
bool *del_entry) bool *del_entry)
{ {
struct aggregated_rtt_stats merged_stats; struct aggregated_stats merged_stats;
struct ipprefix_key backup_key = { 0 }; struct ipprefix_key backup_key = { 0 };
int i; int i;
@@ -1359,7 +1361,7 @@ static void report_aggregated_rtt_mapentry(
prefix_len = 0; prefix_len = 0;
} }
merge_percpu_aggreated_rtts(percpu_stats, &merged_stats, n_cpus, merge_percpu_aggreated_stats(percpu_stats, &merged_stats, n_cpus,
agg_conf->n_bins); agg_conf->n_bins);
if (prefix_len > 0 && // Pointless deleting /0 entry, and ensures backup keys are never deleted if (prefix_len > 0 && // Pointless deleting /0 entry, and ensures backup keys are never deleted
@@ -1372,23 +1374,24 @@ static void report_aggregated_rtt_mapentry(
*del_entry = false; *del_entry = false;
// Only print and clear prefixes which have RTT samples // Only print and clear prefixes which have RTT samples
if (!aggregated_rtt_stats_empty(&merged_stats)) { if (!aggregated_stats_empty(&merged_stats)) {
print_aggregated_rtts(out_ctx, t_monotonic, prefix, af, print_aggregated_stats(out_ctx, t_monotonic, prefix, af,
prefix_len, &merged_stats, agg_conf); prefix_len, &merged_stats, agg_conf);
// Clear out the reported stats // Clear out the reported stats
if (!*del_entry) if (!*del_entry)
for (i = 0; i < n_cpus; i++) { for (i = 0; i < n_cpus; i++) {
clear_aggregated_rtts(&percpu_stats[i]); clear_aggregated_stats(&percpu_stats[i]);
} }
} }
} }
static int report_aggregated_rtt_map(struct output_context *out_ctx, int map_fd, static int report_aggregated_stats_map(struct output_context *out_ctx,
int af, __u8 prefix_len, __u64 t_monotonic, int map_fd, int af, __u8 prefix_len,
__u64 t_monotonic,
struct aggregation_config *agg_conf) struct aggregation_config *agg_conf)
{ {
struct aggregated_rtt_stats *values = NULL; struct aggregated_stats *values = NULL;
void *keys = NULL, *del_keys = NULL; void *keys = NULL, *del_keys = NULL;
int n_cpus = libbpf_num_possible_cpus(); int n_cpus = libbpf_num_possible_cpus();
size_t keysize = af == AF_INET ? sizeof(__u32) : sizeof(__u64); size_t keysize = af == AF_INET ? sizeof(__u32) : sizeof(__u64);
@@ -1419,7 +1422,7 @@ static int report_aggregated_rtt_map(struct output_context *out_ctx, int map_fd,
} }
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
report_aggregated_rtt_mapentry( report_aggregated_stats_mapentry(
out_ctx, keys + i * keysize, out_ctx, keys + i * keysize,
values + i * n_cpus, n_cpus, af, prefix_len, values + i * n_cpus, n_cpus, af, prefix_len,
t_monotonic, agg_conf, &del_key); t_monotonic, agg_conf, &del_key);
@@ -1451,7 +1454,7 @@ exit:
return err; return err;
} }
static int report_aggregated_rtts(struct output_context *out_ctx, static int report_aggregated_stats(struct output_context *out_ctx,
struct aggregation_maps *maps, struct aggregation_maps *maps,
struct aggregation_config *agg_conf) struct aggregation_config *agg_conf)
{ {
@@ -1462,15 +1465,15 @@ static int report_aggregated_rtts(struct output_context *out_ctx,
if (map_idx < 0) if (map_idx < 0)
return map_idx; return map_idx;
err = report_aggregated_rtt_map(out_ctx, maps->map_v4_fd[map_idx], err = report_aggregated_stats_map(out_ctx, maps->map_v4_fd[map_idx],
AF_INET, agg_conf->ipv4_prefix_len, t, AF_INET, agg_conf->ipv4_prefix_len, t,
agg_conf); agg_conf);
if (err) if (err)
return err; return err;
err = report_aggregated_rtt_map(out_ctx, maps->map_v6_fd[map_idx], err = report_aggregated_stats_map(out_ctx, maps->map_v6_fd[map_idx],
AF_INET6, agg_conf->ipv6_prefix_len, t, AF_INET6, agg_conf->ipv6_prefix_len,
agg_conf); t, agg_conf);
return err; return err;
} }
@@ -1856,7 +1859,7 @@ int fetch_aggregation_map_fds(struct bpf_object *obj,
static int init_agg_backup_entries(struct aggregation_maps *maps, bool ipv4, static int init_agg_backup_entries(struct aggregation_maps *maps, bool ipv4,
bool ipv6) bool ipv6)
{ {
struct aggregated_rtt_stats *empty_stats; struct aggregated_stats *empty_stats;
struct ipprefix_key key; struct ipprefix_key key;
int instance, err = 0; int instance, err = 0;
@@ -1972,7 +1975,7 @@ static int handle_aggregation_timer(int timer_fd,
timer_exps - 1); timer_exps - 1);
} }
err = report_aggregated_rtts(out_ctx, maps, agg_conf); err = report_aggregated_stats(out_ctx, maps, agg_conf);
if (err) { if (err) {
fprintf(stderr, "Failed reporting aggregated RTTs: %s\n", fprintf(stderr, "Failed reporting aggregated RTTs: %s\n",
get_libbpf_strerror(err)); get_libbpf_strerror(err));

View File

@@ -245,15 +245,15 @@ union pping_event {
struct map_clean_event map_clean_event; struct map_clean_event map_clean_event;
}; };
struct aggregated_rtt_stats { struct aggregated_stats {
__u64 last_updated; __u64 last_updated;
__u64 rx_packet_count; __u64 rx_packet_count;
__u64 tx_packet_count; __u64 tx_packet_count;
__u64 rx_byte_count; __u64 rx_byte_count;
__u64 tx_byte_count; __u64 tx_byte_count;
__u64 min; __u64 rtt_min;
__u64 max; __u64 rtt_max;
__u32 bins[RTT_AGG_NR_BINS]; __u32 rtt_bins[RTT_AGG_NR_BINS];
}; };
#endif #endif

View File

@@ -127,10 +127,10 @@ char _license[] SEC("license") = "GPL";
static volatile const struct bpf_config config = {}; static volatile const struct bpf_config config = {};
static volatile __u64 last_warn_time[2] = { 0 }; static volatile __u64 last_warn_time[2] = { 0 };
// Keep an empty aggregated_rtt_stats as a global variable to use as a template // Keep an empty aggregated_stats as a global variable to use as a template
// when creating new entries. That way, it won't have to be allocated on stack // when creating new entries. That way, it won't have to be allocated on stack
// (where it won't fit anyways) and initialized each time during run time. // (where it won't fit anyways) and initialized each time during run time.
static struct aggregated_rtt_stats empty_stats = { 0 }; static struct aggregated_stats empty_stats = { 0 };
// Map definitions // Map definitions
@@ -157,28 +157,28 @@ struct {
struct { struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH); __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__type(key, __u32); __type(key, __u32);
__type(value, struct aggregated_rtt_stats); __type(value, struct aggregated_stats);
__uint(max_entries, MAP_AGGREGATION_SIZE); __uint(max_entries, MAP_AGGREGATION_SIZE);
} map_v4_agg1 SEC(".maps"); } map_v4_agg1 SEC(".maps");
struct { struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH); __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__type(key, __u32); __type(key, __u32);
__type(value, struct aggregated_rtt_stats); __type(value, struct aggregated_stats);
__uint(max_entries, MAP_AGGREGATION_SIZE); __uint(max_entries, MAP_AGGREGATION_SIZE);
} map_v4_agg2 SEC(".maps"); } map_v4_agg2 SEC(".maps");
struct { struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH); __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__type(key, __u64); __type(key, __u64);
__type(value, struct aggregated_rtt_stats); __type(value, struct aggregated_stats);
__uint(max_entries, MAP_AGGREGATION_SIZE); __uint(max_entries, MAP_AGGREGATION_SIZE);
} map_v6_agg1 SEC(".maps"); } map_v6_agg1 SEC(".maps");
struct { struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH); __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__type(key, __u64); __type(key, __u64);
__type(value, struct aggregated_rtt_stats); __type(value, struct aggregated_stats);
__uint(max_entries, MAP_AGGREGATION_SIZE); __uint(max_entries, MAP_AGGREGATION_SIZE);
} map_v6_agg2 SEC(".maps"); } map_v6_agg2 SEC(".maps");
@@ -986,10 +986,10 @@ static void create_ipprefix_key_v6(__u64 *prefix_key, struct in6_addr *ip)
// *prefix_key = *(__u64 *)ip & config.ipv6_prefix_mask; // gives verifier rejection "misaligned stack access off" // *prefix_key = *(__u64 *)ip & config.ipv6_prefix_mask; // gives verifier rejection "misaligned stack access off"
} }
static struct aggregated_rtt_stats * static struct aggregated_stats *
lookup_or_create_aggregation_stats(struct in6_addr *ip, __u8 ipv) lookup_or_create_aggregation_stats(struct in6_addr *ip, __u8 ipv)
{ {
struct aggregated_rtt_stats *agg; struct aggregated_stats *agg;
struct ipprefix_key key; struct ipprefix_key key;
__u32 *map_choice; __u32 *map_choice;
__u32 zero = 0; __u32 zero = 0;
@@ -1027,21 +1027,21 @@ lookup_or_create_aggregation_stats(struct in6_addr *ip, __u8 ipv)
return bpf_map_lookup_elem(agg_map, &key); return bpf_map_lookup_elem(agg_map, &key);
} }
static void aggregate_rtt(__u64 rtt, struct aggregated_rtt_stats *agg_stats) static void aggregate_rtt(__u64 rtt, struct aggregated_stats *agg_stats)
{ {
if (!config.agg_rtts || !agg_stats) if (!config.agg_rtts || !agg_stats)
return; return;
int bin_idx; int bin_idx;
if (!agg_stats->min || rtt < agg_stats->min) if (!agg_stats->rtt_min || rtt < agg_stats->rtt_min)
agg_stats->min = rtt; agg_stats->rtt_min = rtt;
if (rtt > agg_stats->max) if (rtt > agg_stats->rtt_max)
agg_stats->max = rtt; agg_stats->rtt_max = rtt;
bin_idx = rtt / RTT_AGG_BIN_WIDTH; bin_idx = rtt / RTT_AGG_BIN_WIDTH;
bin_idx = bin_idx >= RTT_AGG_NR_BINS ? RTT_AGG_NR_BINS - 1 : bin_idx; bin_idx = bin_idx >= RTT_AGG_NR_BINS ? RTT_AGG_NR_BINS - 1 : bin_idx;
agg_stats->bins[bin_idx]++; agg_stats->rtt_bins[bin_idx]++;
} }
/* /*
@@ -1090,7 +1090,7 @@ static void pping_timestamp_packet(struct flow_state *f_state, void *ctx,
*/ */
static void pping_match_packet(struct flow_state *f_state, void *ctx, static void pping_match_packet(struct flow_state *f_state, void *ctx,
struct packet_info *p_info, struct packet_info *p_info,
struct aggregated_rtt_stats *agg_stats) struct aggregated_stats *agg_stats)
{ {
__u64 rtt; __u64 rtt;
__u64 *p_ts; __u64 *p_ts;
@@ -1121,8 +1121,8 @@ static void pping_match_packet(struct flow_state *f_state, void *ctx,
aggregate_rtt(rtt, agg_stats); aggregate_rtt(rtt, agg_stats);
} }
static void update_aggregate_stats(struct aggregated_rtt_stats **src_stats, static void update_aggregate_stats(struct aggregated_stats **src_stats,
struct aggregated_rtt_stats **dst_stats, struct aggregated_stats **dst_stats,
struct packet_info *p_info) struct packet_info *p_info)
{ {
if (!config.agg_rtts) if (!config.agg_rtts)
@@ -1157,7 +1157,7 @@ static void pping_parsed_packet(void *ctx, struct packet_info *p_info)
{ {
struct dual_flow_state *df_state; struct dual_flow_state *df_state;
struct flow_state *fw_flow, *rev_flow; struct flow_state *fw_flow, *rev_flow;
struct aggregated_rtt_stats *src_stats = NULL, *dst_stats = NULL; struct aggregated_stats *src_stats = NULL, *dst_stats = NULL;
df_state = lookup_or_create_dualflow_state(ctx, p_info); df_state = lookup_or_create_dualflow_state(ctx, p_info);
if (!df_state) if (!df_state)