[PATCH][nf-next] netfilter: replace modulo operation with bitwise AND

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



CONNTRACK_LOCKS is 1024 and power of 2, so modulo operations
can be replaced with AND (CONNTRACK_LOCKS - 1)

and bitwise AND operation is quicker than module operation

Signed-off-by: Zhang Yu <zhangyu31@xxxxxxxxx>
Signed-off-by: Li RongQing <lirongqing@xxxxxxxxx>
---
 include/net/netfilter/nf_conntrack_core.h |  4 +++-
 net/netfilter/nf_conntrack_core.c         | 10 +++++-----
 net/netfilter/nf_conntrack_netlink.c      |  2 +-
 net/netfilter/nf_nat_core.c               |  6 +++---
 4 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index ae41e92251dd..1b75a141c63d 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -67,7 +67,9 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
 void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
 		 const struct nf_conntrack_l4proto *proto);
 
-#define CONNTRACK_LOCKS 1024
+#define CONNTRACK_LOCKS_BIT 10
+#define CONNTRACK_LOCKS  (1 << CONNTRACK_LOCKS_BIT)
+#define CONNTRACK_LOCKS_MASK (CONNTRACK_LOCKS - 1)
 
 extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
 void nf_conntrack_lock(spinlock_t *lock);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index e139c256e269..a0feb530ed00 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -116,8 +116,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_lock);
 
 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
 {
-	h1 %= CONNTRACK_LOCKS;
-	h2 %= CONNTRACK_LOCKS;
+	h1 &= CONNTRACK_LOCKS_MASK;
+	h2 &= CONNTRACK_LOCKS_MASK;
 	spin_unlock(&nf_conntrack_locks[h1]);
 	if (h1 != h2)
 		spin_unlock(&nf_conntrack_locks[h2]);
@@ -127,8 +127,8 @@ static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
 				     unsigned int h2, unsigned int sequence)
 {
-	h1 %= CONNTRACK_LOCKS;
-	h2 %= CONNTRACK_LOCKS;
+	h1 &= CONNTRACK_LOCKS_MASK;
+	h2 &= CONNTRACK_LOCKS_MASK;
 	if (h1 <= h2) {
 		nf_conntrack_lock(&nf_conntrack_locks[h1]);
 		if (h1 != h2)
@@ -1971,7 +1971,7 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
 	spinlock_t *lockp;
 
 	for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
-		lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
+		lockp = &nf_conntrack_locks[*bucket & CONNTRACK_LOCKS_MASK];
 		local_bh_disable();
 		nf_conntrack_lock(lockp);
 		if (*bucket < nf_conntrack_htable_size) {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 349b42a65c8a..8f22743c270f 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -918,7 +918,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
 			nf_ct_put(nf_ct_evict[i]);
 		}
 
-		lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
+		lockp = &nf_conntrack_locks[cb->args[0] & CONNTRACK_LOCKS_MASK];
 		nf_conntrack_lock(lockp);
 		if (cb->args[0] >= nf_conntrack_htable_size) {
 			spin_unlock(lockp);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 35e61038ae96..2b1d377bf3ab 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -588,7 +588,7 @@ nf_nat_setup_info(struct nf_conn *ct,
 
 		srchash = hash_by_src(net,
 				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-		lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
+		lock = &nf_nat_locks[srchash & CONNTRACK_LOCKS_MASK];
 		spin_lock_bh(lock);
 		hlist_add_head_rcu(&ct->nat_bysource,
 				   &nf_nat_bysource[srchash]);
@@ -773,9 +773,9 @@ static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
 	unsigned int h;
 
 	h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-	spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
+	spin_lock_bh(&nf_nat_locks[h & CONNTRACK_LOCKS_MASK]);
 	hlist_del_rcu(&ct->nat_bysource);
-	spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
+	spin_unlock_bh(&nf_nat_locks[h & CONNTRACK_LOCKS_MASK]);
 }
 
 static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
-- 
2.16.2




[Index of Archives]     [Netfitler Users]     [Berkeley Packet Filter]     [LARTC]     [Bugtraq]     [Yosemite Forum]

  Powered by Linux