On Tue, Dec 12, 2023 at 06:24:38PM +0200, Julian Anastasov wrote: > Make the hash table for services resizable in the bit range of 4-20. > Table is attached only while services are present. Resizing is done > by delayed work based on load (the number of hashed services). > Table grows when load increases 2+ times (above 12.5% with factor=3) > and shrinks 8+ times when load decreases 16+ times (below 0.78%). > > Switch to jhash hashing to reduce the collisions for multiple > services. > > Add a hash_key field into the service that includes table ID and > bucket ID. This helps the lookup and delete operations. > > Signed-off-by: Julian Anastasov <ja@xxxxxx> ... > @@ -391,18 +440,29 @@ static inline struct ip_vs_service * > __ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u16 protocol, > const union nf_inet_addr *vaddr, __be16 vport) > { > - unsigned int hash; > + DECLARE_IP_VS_RHT_WALK_BUCKET_RCU(); > + struct hlist_bl_head *head; > struct ip_vs_service *svc; > - > - /* Check for "full" addressed entries */ > - hash = ip_vs_svc_hashkey(ipvs, af, protocol, vaddr, vport); > - > - hlist_for_each_entry_rcu(svc, &ipvs->svc_table[hash], s_list) { > - if (svc->af == af && ip_vs_addr_equal(af, &svc->addr, vaddr) && > - svc->port == vport && svc->protocol == protocol && > - !svc->fwmark) { > - /* HIT */ > - return svc; > + struct ip_vs_rht *t, *p; > + struct hlist_bl_node *e; > + u32 hash, hash_key; > + > + ip_vs_rht_for_each_table_rcu(ipvs->svc_table, t, p) { > + /* Check for "full" addressed entries */ > + hash = ip_vs_svc_hashval(t, af, protocol, vaddr, vport); > + > + hash_key = ip_vs_rht_build_hash_key(t, hash); > + ip_vs_rht_walk_bucket_rcu(t, hash_key, head) { > + hlist_bl_for_each_entry_rcu(svc, e, head, s_list) { Hi Julian, Smatch thinks that head is used uninitialised here. This does seem to be the case to me too. > + if (READ_ONCE(svc->hash_key) == hash_key && > + svc->af == af && > + ip_vs_addr_equal(af, &svc->addr, vaddr) && > + svc->port == vport && > + svc->protocol == protocol && !svc->fwmark) { > + /* HIT */ > + return svc; > + } > + } > } > } > > @@ -416,16 +476,26 @@ __ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u16 protocol, > static inline struct ip_vs_service * > __ip_vs_svc_fwm_find(struct netns_ipvs *ipvs, int af, __u32 fwmark) > { > - unsigned int hash; > + DECLARE_IP_VS_RHT_WALK_BUCKET_RCU(); > + struct hlist_bl_head *head; > struct ip_vs_service *svc; > - > - /* Check for fwmark addressed entries */ > - hash = ip_vs_svc_fwm_hashkey(ipvs, fwmark); > - > - hlist_for_each_entry_rcu(svc, &ipvs->svc_table[hash], s_list) { > - if (svc->fwmark == fwmark && svc->af == af) { > - /* HIT */ > - return svc; > + struct ip_vs_rht *t, *p; > + struct hlist_bl_node *e; > + u32 hash, hash_key; > + > + ip_vs_rht_for_each_table_rcu(ipvs->svc_table, t, p) { > + /* Check for fwmark addressed entries */ > + hash = ip_vs_svc_fwm_hashval(t, af, fwmark); > + > + hash_key = ip_vs_rht_build_hash_key(t, hash); > + ip_vs_rht_walk_bucket_rcu(t, hash_key, head) { > + hlist_bl_for_each_entry_rcu(svc, e, head, s_list) { Ditto. > + if (READ_ONCE(svc->hash_key) == hash_key && > + svc->fwmark == fwmark && svc->af == af) { > + /* HIT */ > + return svc; > + } > + } > } > } > ...