On Mon, Nov 21, 2022 at 7:52 AM Daniel Borkmann <daniel@xxxxxxxxxxxxx> wrote: [...] > > + */ > > +void *execmem_alloc(unsigned long size, unsigned long align) > > +{ > > + struct vmap_area *va, *tmp; > > + unsigned long addr; > > + enum fit_type type; > > + int ret; > > + > > + va = kmem_cache_alloc_node(vmap_area_cachep, GFP_KERNEL, NUMA_NO_NODE); > > + if (unlikely(!va)) > > + return NULL; > > + > > +again: > > + preload_this_cpu_lock(&free_text_area_lock, GFP_KERNEL, NUMA_NO_NODE); > > + tmp = find_vmap_lowest_match(&free_text_area_root, size, align, 1, false); > > + > > + if (!tmp) { > > + unsigned long alloc_size; > > + void *ptr; > > + > > + spin_unlock(&free_text_area_lock); > > + > > + /* > > + * Not enough continuous space in free_text_area_root, try > > + * allocate more memory. The memory is first added to > > + * vmap_area_root, and then moved to free_text_area_root. > > + */ > > + alloc_size = roundup(size, PMD_SIZE * num_online_nodes()); > > + ptr = __vmalloc_node_range(alloc_size, PMD_SIZE, EXEC_MEM_START, > > + EXEC_MEM_END, GFP_KERNEL, PAGE_KERNEL, > > + VM_ALLOW_HUGE_VMAP | VM_NO_GUARD, > > + NUMA_NO_NODE, __builtin_return_address(0)); > > + if (unlikely(!ptr)) > > + goto err_out; > > + > > + move_vmap_to_free_text_tree(ptr); > > + goto again; > > + } > > + > > + addr = roundup(tmp->va_start, align); > > + type = classify_va_fit_type(tmp, addr, size); > > + if (WARN_ON_ONCE(type == NOTHING_FIT)) > > + goto err_out_unlock; > > Isn't this already covered in adjust_va_to_fit_type()? That's right! Now we can get rid of err_out_unlock. Thanks! Also fixed other nits. Song [...]