Add a counter per namespace so we know the total offloaded flows. Change from v4: * use per cpu counters instead of an atomic variable Change from v3: * seq_file_net has to be seq_file_single_net Change from v2: * Add remove proc entry on nf_flow_table_fini_proc * Syntax fixes Change from v1: * Cleanup proc entries in case of an error Signed-off-by: Abdelrahman Morsy <abdelrahman.morsy@xxxxxxxxxxxx> Signed-off-by: Sven Auhagen <sven.auhagen@xxxxxxxxxxxx> diff --git a/include/net/netns/flow_table.h b/include/net/netns/flow_table.h index 1c5fc657e267..1496a6af6ac4 100644 --- a/include/net/netns/flow_table.h +++ b/include/net/netns/flow_table.h @@ -6,6 +6,8 @@ struct nf_flow_table_stat { unsigned int count_wq_add; unsigned int count_wq_del; unsigned int count_wq_stats; + unsigned int count_flowoffload_add; + unsigned int count_flowoffload_del; }; struct netns_ft { diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 81c26a96c30b..d6bc8f0ff51d 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -282,6 +282,7 @@ unsigned long flow_offload_get_timeout(struct flow_offload *flow) int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) { + struct net *net; int err; flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow); @@ -304,6 +305,9 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) nf_ct_offload_timeout(flow->ct); + net = read_pnet(&flow_table->net); + NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_flowoffload_add); + if (nf_flowtable_hw_offload(flow_table)) { __set_bit(NF_FLOW_HW, &flow->flags); nf_flow_offload_add(flow_table, flow); @@ -339,6 +343,8 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow) static void flow_offload_del(struct nf_flowtable *flow_table, struct flow_offload *flow) { + struct net *net = read_pnet(&flow_table->net); + rhashtable_remove_fast(&flow_table->rhashtable, &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node, nf_flow_offload_rhash_params); @@ -346,6 +352,8 @@ static void flow_offload_del(struct nf_flowtable *flow_table, &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node, nf_flow_offload_rhash_params); flow_offload_free(flow); + + NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_flowoffload_del); } void flow_offload_teardown(struct flow_offload *flow) diff --git a/net/netfilter/nf_flow_table_procfs.c b/net/netfilter/nf_flow_table_procfs.c index 159b033a43e6..c4d15bd1a0f0 100644 --- a/net/netfilter/nf_flow_table_procfs.c +++ b/net/netfilter/nf_flow_table_procfs.c @@ -64,17 +64,49 @@ static const struct seq_operations nf_flow_table_cpu_seq_ops = { .show = nf_flow_table_cpu_seq_show, }; +static int nf_flow_table_counter_show(struct seq_file *seq, void *v) +{ + struct net *net = seq_file_single_net(seq); + struct nf_flow_table_stat *st; + unsigned int counter_add = 0; + unsigned int counter_del = 0; + int cpu; + + for (cpu = 0; cpu < nr_cpu_ids; cpu++) { + if (!cpu_possible(cpu)) + continue; + + st = per_cpu_ptr(net->ft.stat, cpu); + counter_add += st->count_flowoffload_add; + counter_del += st->count_flowoffload_del; + } + + seq_printf(seq, "%d\n", + (counter_add - counter_del) + ); + return 0; +} + int nf_flow_table_init_proc(struct net *net) { - struct proc_dir_entry *pde; + if (!proc_create_net("nf_flowtable", 0444, net->proc_net_stat, + &nf_flow_table_cpu_seq_ops, sizeof(struct seq_net_private))) + goto err; - pde = proc_create_net("nf_flowtable", 0444, net->proc_net_stat, - &nf_flow_table_cpu_seq_ops, - sizeof(struct seq_net_private)); - return pde ? 0 : -ENOMEM; + if (!proc_create_net_single("nf_flowtable_counter", 0444, + net->proc_net, nf_flow_table_counter_show, NULL)) + goto err_net; + + return 0; + +err_net: + remove_proc_entry("nf_flowtable", net->proc_net_stat); +err: + return -ENOMEM; } void nf_flow_table_fini_proc(struct net *net) { remove_proc_entry("nf_flowtable", net->proc_net_stat); + remove_proc_entry("nf_flowtable_counter", net->proc_net); } -- 2.33.1