On 1/5/23 1:26 AM, tong@xxxxxxxxxxxxx wrote:
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 5aa2b5525f79..974f104f47a0 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -152,7 +152,7 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
{
unsigned long flags;
- hash = hash & HASHTAB_MAP_LOCK_MASK;
+ hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets -1);
preempt_disable();
if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
@@ -171,7 +171,7 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
struct bucket *b, u32 hash,
unsigned long flags)
{
- hash = hash & HASHTAB_MAP_LOCK_MASK;
+ hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets -1);
Please run checkpatch.pl. patchwork also reports the same thing:
https://patchwork.kernel.org/project/netdevbpf/patch/20230105092637.35069-1-tong@xxxxxxxxxxxxx/
CHECK: spaces preferred around that '-' (ctx:WxV)
#46: FILE: kernel/bpf/hashtab.c:155:
+ hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets -1);
^
CHECK: spaces preferred around that '-' (ctx:WxV)
#55: FILE: kernel/bpf/hashtab.c:174:
+ hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets -1);
btw, instead of doing this min_t and -1 repeatedly, ensuring n_buckets is at
least HASHTAB_MAP_LOCK_COUNT during map_alloc should be as good? htab having 2
or 4 max_entries should be pretty uncommon.