Re: [PATCH net] netfilter: Use consistent ct id hash calculation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 8/7/19 4:45 PM, Florian Westphal wrote:


So Pablos suggestion above should work just fine.
Dirk, can you spin a v2 with that change?


Yes, will do tomorrow.

Also, just an idea, I also played around with just adding
u32 id to struct nf_conn and just calculating the hash inside
__nf_conntack_alloc when initialized or even lazily in nf_ct_get_id.
This seems to work fine and you don't have to worry about anything changing
and only calculate the hash once.

I'm presuming this method was avoided for some reason, like keeping the struct
size to a minimum.

---
 include/net/netfilter/nf_conntrack.h |    3 +++
 net/netfilter/nf_conntrack_core.c    |   30 +++++++++++++++---------------
 2 files changed, 18 insertions(+), 15 deletions(-)

diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 93bbae8..9772ddc 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -74,6 +74,9 @@ struct nf_conn {
 	/* jiffies32 when this ct is considered dead */
 	u32 timeout;
+ /* ct id */
+	u32 id;
+
 	possible_net_t ct_net;
#if IS_ENABLED(CONFIG_NF_NAT)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ab73c5f..614fd86 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -312,21 +312,7 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
  */
 u32 nf_ct_get_id(const struct nf_conn *ct)
 {
-	static __read_mostly siphash_key_t ct_id_seed;
-	unsigned long a, b, c, d;
-
-	net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
-
-	a = (unsigned long)ct;
-	b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
-	c = (unsigned long)ct->ext;
-	d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
-				   &ct_id_seed);
-#ifdef CONFIG_64BIT
-	return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
-#else
-	return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
-#endif
+    return ct->id;
 }
 EXPORT_SYMBOL_GPL(nf_ct_get_id);
@@ -1178,6 +1164,7 @@ __nf_conntrack_alloc(struct net *net,
 		     gfp_t gfp, u32 hash)
 {
 	struct nf_conn *ct;
+	static __read_mostly siphash_key_t ct_id_seed;
/* We don't want any race condition at early drop stage */
 	atomic_inc(&net->ct.count);
@@ -1215,6 +1202,19 @@ __nf_conntrack_alloc(struct net *net,
nf_ct_zone_add(ct, zone); + unsigned long a, b, c;
+	net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
+	a = (unsigned long)ct;
+	b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
+	c = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+				   sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
+				   &ct_id_seed);
+#ifdef CONFIG_64BIT
+	ct->id = siphash_3u64((u64)a, (u64)b, (u64)c, &ct_id_seed);
+#else
+	ct->id = siphash_3u32((u32)a, (u32)b, (u32)c, &ct_id_seed);
+#endif
+
 	/* Because we use RCU lookups, we set ct_general.use to zero before
 	 * this is inserted in any list.
 	 */



[Index of Archives]     [Netfitler Users]     [Berkeley Packet Filter]     [LARTC]     [Bugtraq]     [Yosemite Forum]

  Powered by Linux