Allow memory to be allocated on a specified node. Use it in the perf ring-buffer code. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- I was looking at the perf ringbuffer code for other reasons, and I noticed that everything else gets allocated on the specified node ... except there's no way to do this for vmalloc. I assume it'd be helpful here too. include/linux/vmalloc.h | 17 ++++++++++++++++- kernel/events/ring_buffer.c | 2 +- mm/vmalloc.c | 9 +++++---- 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index c720be70c8dd..030bfe1a60ab 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -6,6 +6,7 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/llist.h> +#include <linux/numa.h> #include <asm/page.h> /* pgprot_t */ #include <linux/rbtree.h> #include <linux/overflow.h> @@ -139,7 +140,7 @@ static inline unsigned long vmalloc_nr_pages(void) { return 0; } extern void *vmalloc(unsigned long size) __alloc_size(1); extern void *vzalloc(unsigned long size) __alloc_size(1); -extern void *vmalloc_user(unsigned long size) __alloc_size(1); +extern void *vmalloc_user_node(unsigned long size, int node) __alloc_size(1); extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1); extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1); extern void *vmalloc_32(unsigned long size) __alloc_size(1); @@ -158,6 +159,20 @@ extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2); extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2); +/** + * vmalloc_user - allocate zeroed virtually contiguous memory for userspace + * @size: allocation size + * + * The resulting memory area is zeroed so it can be mapped to userspace + * without leaking data. + * + * Return: pointer to the allocated memory or %NULL on error + */ +static inline void *vmalloc_user(size_t size) +{ + return vmalloc_user_node(size, NUMA_NO_NODE); +} + extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index cc90d5299005..c73add132618 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -918,7 +918,7 @@ struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) INIT_WORK(&rb->work, rb_free_work); - all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); + all_buf = vmalloc_user_node((nr_pages + 1) * PAGE_SIZE, node); if (!all_buf) goto fail_all_buf; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 228a4a5312f2..3616bfe4348f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3461,22 +3461,23 @@ void *vzalloc(unsigned long size) EXPORT_SYMBOL(vzalloc); /** - * vmalloc_user - allocate zeroed virtually contiguous memory for userspace + * vmalloc_user_node - allocate zeroed virtually contiguous memory for userspace * @size: allocation size + * @node: NUMA node * * The resulting memory area is zeroed so it can be mapped to userspace * without leaking data. * * Return: pointer to the allocated memory or %NULL on error */ -void *vmalloc_user(unsigned long size) +void *vmalloc_user_node(unsigned long size, int node) { return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, - VM_USERMAP, NUMA_NO_NODE, + VM_USERMAP, node, __builtin_return_address(0)); } -EXPORT_SYMBOL(vmalloc_user); +EXPORT_SYMBOL(vmalloc_user_node); /** * vmalloc_node - allocate memory on a specific node -- 2.40.1