[PATCH net-next 4/8] netfilter: introduce total count of hw offload 'add' workqueue tasks

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



To improve hardware offload debuggability and allow capping total amount of
offload 'add' in-flight entries on workqueue in following patch extend
struct netns_nftables with 'count_wq_add' counter and expose it to
userspace as 'nf_flowtable_count_wq_add' sysctl entry. Increment the
counter when allocating new workqueue task and decrement it after
flow_offload_work_add() is finished.

Signed-off-by: Vlad Buslov <vladbu@xxxxxxxxxx>
Signed-off-by: Oz Shlomo <ozsh@xxxxxxxxxx>
Reviewed-by: Paul Blakey <paulb@xxxxxxxxxx>
---
 include/net/netns/nftables.h            |  1 +
 net/netfilter/nf_conntrack_standalone.c |  8 ++++++++
 net/netfilter/nf_flow_table_offload.c   | 10 +++++++++-
 3 files changed, 18 insertions(+), 1 deletion(-)

diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 5677f21fdd4c..a971d986a75b 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -8,6 +8,7 @@ struct netns_nftables {
 	u8			gencursor;
 	atomic_t		count_hw;
 	int			max_hw;
+	atomic_t		count_wq_add;
 };
 
 #endif
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index af0dea471119..fe2327823f7a 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -621,6 +621,7 @@ enum nf_ct_sysctl_index {
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
 	NF_SYSCTL_CT_COUNT_HW,
 	NF_SYSCTL_CT_MAX_HW,
+	NF_SYSCTL_CT_COUNT_WQ_ADD,
 #endif
 
 	__NF_SYSCTL_CT_LAST_SYSCTL,
@@ -991,6 +992,12 @@ static struct ctl_table nf_ct_sysctl_table[] = {
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
+	[NF_SYSCTL_CT_COUNT_WQ_ADD] = {
+		.procname	= "nf_flowtable_count_wq_add",
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= proc_dointvec,
+	},
 #endif
 	{}
 };
@@ -1131,6 +1138,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
 	table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD].data = &un->offload_timeout;
 	table[NF_SYSCTL_CT_COUNT_HW].data = &net->nft.count_hw;
 	table[NF_SYSCTL_CT_MAX_HW].data = &net->nft.max_hw;
+	table[NF_SYSCTL_CT_COUNT_WQ_ADD].data = &net->nft.count_wq_add;
 #endif
 
 	nf_conntrack_standalone_init_tcp_sysctl(net, table);
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index b561e0a44a45..ffbcf0cfefeb 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -953,11 +953,15 @@ static void flow_offload_work_stats(struct flow_offload_work *offload)
 static void flow_offload_work_handler(struct work_struct *work)
 {
 	struct flow_offload_work *offload;
+	struct net *net;
 
 	offload = container_of(work, struct flow_offload_work, work);
+	net = read_pnet(&offload->flowtable->net);
+
 	switch (offload->cmd) {
 		case FLOW_CLS_REPLACE:
 			flow_offload_work_add(offload);
+			atomic_dec(&net->nft.count_wq_add);
 			break;
 		case FLOW_CLS_DESTROY:
 			flow_offload_work_del(offload);
@@ -1011,11 +1015,15 @@ nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
 void nf_flow_offload_add(struct nf_flowtable *flowtable,
 			 struct flow_offload *flow)
 {
+	struct net *net = read_pnet(&flowtable->net);
 	struct flow_offload_work *offload;
 
+	atomic_inc(&net->nft.count_wq_add);
 	offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_REPLACE);
-	if (!offload)
+	if (!offload) {
+		atomic_dec(&net->nft.count_wq_add);
 		return;
+	}
 
 	flow_offload_queue_work(offload);
 }
-- 
2.31.1




[Index of Archives]     [Netfitler Users]     [Berkeley Packet Filter]     [LARTC]     [Bugtraq]     [Yosemite Forum]

  Powered by Linux