In some use-cases, zone is used to differetiate conntrack state. This preserves that uniqueness by adding zone into the cache in addtion to 5-tuple data This preserves external-cache uniqueness per zone when synced. Follow up fix to: https://git.netfilter.org/conntrack-tools/commit/?id=a08af5d26297eb85218a3c3a9e0991001a88cf10 Signed-off-by: Adam Casella <adam.casella1984@xxxxxxxxx> --- src/cache-ct.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/cache-ct.c b/src/cache-ct.c index abcfde4..7e788d2 100644 --- a/src/cache-ct.c +++ b/src/cache-ct.c @@ -41,7 +41,8 @@ cache_hash4_ct(const struct nf_conntrack *ct, const struct hashtable *table) nfct_get_attr_u8(ct, ATTR_L4PROTO), [3] = nfct_get_attr_u16(ct, ATTR_PORT_SRC) << 16 | nfct_get_attr_u16(ct, ATTR_PORT_DST), - }; + [4] = nfct_get_attr_u16(ct, ATTR_ZONE), + }; /* * Instead of returning hash % table->hashsize (implying a divide) @@ -50,13 +51,13 @@ cache_hash4_ct(const struct nf_conntrack *ct, const struct hashtable *table) * but using a multiply, less expensive than a divide. See: * http://www.mail-archive.com/netdev@xxxxxxxxxxxxxxx/msg56623.html */ - return ((uint64_t)jhash2(a, 4, 0) * table->hashsize) >> 32; + return ((uint64_t)jhash2(a, 5, 0) * table->hashsize) >> 32; } static uint32_t cache_hash6_ct(const struct nf_conntrack *ct, const struct hashtable *table) { - uint32_t a[10]; + uint32_t a[11]; memcpy(&a[0], nfct_get_attr(ct, ATTR_IPV6_SRC), sizeof(uint32_t)*4); memcpy(&a[4], nfct_get_attr(ct, ATTR_IPV6_DST), sizeof(uint32_t)*4); @@ -64,8 +65,9 @@ cache_hash6_ct(const struct nf_conntrack *ct, const struct hashtable *table) nfct_get_attr_u8(ct, ATTR_ORIG_L4PROTO); a[9] = nfct_get_attr_u16(ct, ATTR_ORIG_PORT_SRC) << 16 | nfct_get_attr_u16(ct, ATTR_ORIG_PORT_DST); + a[10] = nfct_get_attr_u16(ct, ATTR_ZONE); - return ((uint64_t)jhash2(a, 10, 0) * table->hashsize) >> 32; + return ((uint64_t)jhash2(a, 11, 0) * table->hashsize) >> 32; } static uint32_t -- 2.30.1 (Apple Git-130)