On Sat, Jul 22, 2023 at 3:48 PM Arnd Bergmann <arnd@xxxxxxxxxx> wrote: > > From: Arnd Bergmann <arnd@xxxxxxxx> > > Splitting these out into separate helper functions means that we > actually pass an uninitialized variable into another function call > if dec_active() happens to not be inlined, and CONFIG_PREEMPT_RT > is disabled: Do you mean that the compiler can remove the flags automatically when dec_active() is inlined, but can't remove it automatically when dec_active() is not inlined ? If so, why can't we improve the compiler ? If we have to change the kernel, what about the change below? index 51d6389..17191ee 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -165,15 +165,17 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) #endif } -static void inc_active(struct bpf_mem_cache *c, unsigned long *flags) +static unsigned long inc_active(struct bpf_mem_cache *c) { + unsigned long flags = 0; + if (IS_ENABLED(CONFIG_PREEMPT_RT)) /* In RT irq_work runs in per-cpu kthread, so disable * interrupts to avoid preemption and interrupts and * reduce the chance of bpf prog executing on this cpu * when active counter is busy. */ - local_irq_save(*flags); + local_irq_save(flags); /* alloc_bulk runs from irq_work which will not preempt a bpf * program that does unit_alloc/unit_free since IRQs are * disabled there. There is no race to increment 'active' @@ -181,6 +183,7 @@ static void inc_active(struct bpf_mem_cache *c, unsigned long *flags) * bpf prog preempted this loop. */ WARN_ON_ONCE(local_inc_return(&c->active) != 1); + return flags; } > > kernel/bpf/memalloc.c: In function 'add_obj_to_free_list': > kernel/bpf/memalloc.c:200:9: error: 'flags' is used uninitialized [-Werror=uninitialized] > 200 | dec_active(c, flags); > > Mark the two functions as __always_inline so gcc can see through > this regardless of optimizations and other options that may > prevent it otherwise. > > Fixes: 18e027b1c7c6d ("bpf: Factor out inc/dec of active flag into helpers.") > Signed-off-by: Arnd Bergmann <arnd@xxxxxxxx> > --- > kernel/bpf/memalloc.c | 4 ++-- > 1 file changed, 2 insertions(+), 2 deletions(-) > > diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c > index 51d6389e5152e..23906325298da 100644 > --- a/kernel/bpf/memalloc.c > +++ b/kernel/bpf/memalloc.c > @@ -165,7 +165,7 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) > #endif > } > > -static void inc_active(struct bpf_mem_cache *c, unsigned long *flags) > +static __always_inline void inc_active(struct bpf_mem_cache *c, unsigned long *flags) > { > if (IS_ENABLED(CONFIG_PREEMPT_RT)) > /* In RT irq_work runs in per-cpu kthread, so disable > @@ -183,7 +183,7 @@ static void inc_active(struct bpf_mem_cache *c, unsigned long *flags) > WARN_ON_ONCE(local_inc_return(&c->active) != 1); > } > > -static void dec_active(struct bpf_mem_cache *c, unsigned long flags) > +static __always_inline void dec_active(struct bpf_mem_cache *c, unsigned long flags) > { > local_dec(&c->active); > if (IS_ENABLED(CONFIG_PREEMPT_RT)) > -- > 2.39.2 > > -- Regards Yafang