From: Alexei Starovoitov <ast@xxxxxxxxxx> The next commit will introduce bpf_arena which is a sparsely populated shared memory region between bpf program and user space process. It will function similar to vmalloc()/vm_map_ram(): - get_vm_area() - alloc_pages() - vmap_pages_range() Signed-off-by: Alexei Starovoitov <ast@xxxxxxxxxx> --- include/linux/vmalloc.h | 2 ++ mm/vmalloc.c | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index c720be70c8dd..bafb87c69e3d 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -233,6 +233,8 @@ static inline bool is_vm_area_hugepages(const void *addr) #ifdef CONFIG_MMU void vunmap_range(unsigned long addr, unsigned long end); +int vmap_pages_range(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages, unsigned int page_shift); static inline void set_vm_flush_reset_perms(void *addr) { struct vm_struct *vm = find_vm_area(addr); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d12a17fc0c17..eae93d575d1b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -625,8 +625,8 @@ int vmap_pages_range_noflush(unsigned long addr, unsigned long end, * RETURNS: * 0 on success, -errno on failure. */ -static int vmap_pages_range(unsigned long addr, unsigned long end, - pgprot_t prot, struct page **pages, unsigned int page_shift) +int vmap_pages_range(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages, unsigned int page_shift) { int err; -- 2.34.1