tree: https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master head: 840126e36e8ff272cb63158646433fa1324533d9 commit: 4ab67149f3c6e97c5c506a726f0ebdec38241679 [4808/5667] bpf: Add percpu allocation support to bpf_mem_alloc. config: csky-randconfig-s031-20220906 (https://download.01.org/0day-ci/archive/20220907/202209071020.4F92QeGY-lkp@xxxxxxxxx/config) compiler: csky-linux-gcc (GCC) 12.1.0 reproduce: wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # apt-get install sparse # sparse version: v0.6.4-39-gce1a6720-dirty # https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/commit/?id=4ab67149f3c6e97c5c506a726f0ebdec38241679 git remote add linux-next https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git git fetch --no-tags linux-next master git checkout 4ab67149f3c6e97c5c506a726f0ebdec38241679 # save the config file mkdir build_dir && cp config build_dir/.config COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' O=build_dir ARCH=csky SHELL=/bin/bash If you fix the issue, kindly add following tag where applicable Reported-by: kernel test robot <lkp@xxxxxxxxx> sparse warnings: (new ones prefixed by >>) >> kernel/bpf/memalloc.c:139:48: sparse: sparse: incorrect type in initializer (different address spaces) @@ expected void *pptr @@ got void [noderef] __percpu * @@ kernel/bpf/memalloc.c:139:48: sparse: expected void *pptr kernel/bpf/memalloc.c:139:48: sparse: got void [noderef] __percpu * >> kernel/bpf/memalloc.c:142:37: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected void [noderef] __percpu *__pdata @@ got void *pptr @@ kernel/bpf/memalloc.c:142:37: sparse: expected void [noderef] __percpu *__pdata kernel/bpf/memalloc.c:142:37: sparse: got void *pptr >> kernel/bpf/memalloc.c:211:43: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected void [noderef] __percpu *__pdata @@ got void * @@ kernel/bpf/memalloc.c:211:43: sparse: expected void [noderef] __percpu *__pdata kernel/bpf/memalloc.c:211:43: sparse: got void * vim +139 kernel/bpf/memalloc.c 127 128 static void *__alloc(struct bpf_mem_cache *c, int node) 129 { 130 /* Allocate, but don't deplete atomic reserves that typical 131 * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc 132 * will allocate from the current numa node which is what we 133 * want here. 134 */ 135 gfp_t flags = GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT; 136 137 if (c->percpu) { 138 void **obj = kmem_cache_alloc_node(c->kmem_cache, flags, node); > 139 void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags); 140 141 if (!obj || !pptr) { > 142 free_percpu(pptr); 143 kfree(obj); 144 return NULL; 145 } 146 obj[1] = pptr; 147 return obj; 148 } 149 150 if (c->kmem_cache) 151 return kmem_cache_alloc_node(c->kmem_cache, flags, node); 152 153 return kmalloc_node(c->unit_size, flags, node); 154 } 155 156 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) 157 { 158 #ifdef CONFIG_MEMCG_KMEM 159 if (c->objcg) 160 return get_mem_cgroup_from_objcg(c->objcg); 161 #endif 162 163 #ifdef CONFIG_MEMCG 164 return root_mem_cgroup; 165 #else 166 return NULL; 167 #endif 168 } 169 170 /* Mostly runs from irq_work except __init phase. */ 171 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) 172 { 173 struct mem_cgroup *memcg = NULL, *old_memcg; 174 unsigned long flags; 175 void *obj; 176 int i; 177 178 memcg = get_memcg(c); 179 old_memcg = set_active_memcg(memcg); 180 for (i = 0; i < cnt; i++) { 181 obj = __alloc(c, node); 182 if (!obj) 183 break; 184 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 185 /* In RT irq_work runs in per-cpu kthread, so disable 186 * interrupts to avoid preemption and interrupts and 187 * reduce the chance of bpf prog executing on this cpu 188 * when active counter is busy. 189 */ 190 local_irq_save(flags); 191 /* alloc_bulk runs from irq_work which will not preempt a bpf 192 * program that does unit_alloc/unit_free since IRQs are 193 * disabled there. There is no race to increment 'active' 194 * counter. It protects free_llist from corruption in case NMI 195 * bpf prog preempted this loop. 196 */ 197 WARN_ON_ONCE(local_inc_return(&c->active) != 1); 198 __llist_add(obj, &c->free_llist); 199 c->free_cnt++; 200 local_dec(&c->active); 201 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 202 local_irq_restore(flags); 203 } 204 set_active_memcg(old_memcg); 205 mem_cgroup_put(memcg); 206 } 207 208 static void free_one(struct bpf_mem_cache *c, void *obj) 209 { 210 if (c->percpu) { > 211 free_percpu(((void **)obj)[1]); 212 kmem_cache_free(c->kmem_cache, obj); 213 return; 214 } 215 216 if (c->kmem_cache) 217 kmem_cache_free(c->kmem_cache, obj); 218 else 219 kfree(obj); 220 } 221 -- 0-DAY CI Kernel Test Service https://01.org/lkp