Re: [PATCH bpf-next 1/2] mm, bpf: Introduce __GFP_TRYLOCK for opportunistic page allocation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 11/16/24 02:48, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@xxxxxxxxxx>
> 
> Tracing BPF programs execute from tracepoints and kprobes where running
> context is unknown, but they need to request additional memory.
> The prior workarounds were using pre-allocated memory and BPF specific
> freelists to satisfy such allocation requests. Instead, introduce
> __GFP_TRYLOCK flag that makes page allocator accessible from any context.
> It relies on percpu free list of pages that rmqueue_pcplist() should be
> able to pop the page from. If it fails (due to IRQ re-entrancy or list
> being empty) then try_alloc_page() attempts to spin_trylock zone->lock
> and refill percpu freelist as normal.
> BPF program may execute with IRQs disabled and zone->lock is sleeping in RT,
> so trylock is the only option.
> In theory we can introduce percpu reentrance counter and increment it
> every time spin_lock_irqsave(&zone->lock, flags) is used,
> but we cannot rely on it. Even if this cpu is not in page_alloc path
> the spin_lock_irqsave() is not safe, since BPF prog might be called
> from tracepoint where preemption is disabled. So trylock only.
> 
> There is no attempt to make free_page() to be accessible from any
> context (yet). BPF infrastructure will asynchronously free pages from
> such contexts.
> memcg is also not charged in try_alloc_page() path. It has to be
> done asynchronously to avoid sleeping on
> local_lock_irqsave(&memcg_stock.stock_lock, flags).
> 
> This is a first step towards supporting BPF requirements in SLUB
> and getting rid of bpf_mem_alloc.
> That goal was discussed at LSFMM: https://lwn.net/Articles/974138/

Thanks for looking into this. I agree that something like __GFP_TRYLOCK
would be necessary to distinguish those allocation contexts. But I'm
wondering if the page allocator is the best place to start (or even
necessary in the end) if the goal is to replace the kmalloc-sized
allocations in bpf and not page sized? SLUB could have preallocated slab
pages to not call into the page allocator in such situations?

I've posted the first SLUB sheaves RFC this week [1]. The immediate
motivation was different, but I did mention there this could perhaps become
a basis for the bpf_mem_alloc replacement too. I'd imagine something like a
set of kmem_buckets with sheaves enabled and either a flag like
__GFP_TRYLOCK or a different variant of kmalloc() to only try using the
sheaves. Didn't Cc you/bpf as if seemed too vague yet, but guess I should have.

[1]
https://lore.kernel.org/all/20241112-slub-percpu-caches-v1-0-ddc0bdc27e05@xxxxxxx/#t

> Signed-off-by: Alexei Starovoitov <ast@xxxxxxxxxx>
> ---
>  include/linux/gfp.h            | 17 +++++++++++++++++
>  include/linux/gfp_types.h      |  3 +++
>  include/trace/events/mmflags.h |  1 +
>  mm/internal.h                  |  1 +
>  mm/page_alloc.c                | 19 ++++++++++++++++---
>  tools/perf/builtin-kmem.c      |  1 +
>  6 files changed, 39 insertions(+), 3 deletions(-)
> 
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index a951de920e20..319d8906ef3f 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -347,6 +347,23 @@ static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
>  }
>  #define alloc_page_vma(...)			alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
>  
> +static inline struct page *try_alloc_page_noprof(int nid)
> +{
> +	/* If spin_locks are not held and interrupts are enabled, use normal path. */
> +	if (preemptible())
> +		return alloc_pages_node_noprof(nid, GFP_NOWAIT | __GFP_ZERO, 0);
> +	/*
> +	 * Best effort allocation from percpu free list.
> +	 * If it's empty attempt to spin_trylock zone->lock.
> +	 * Do not specify __GFP_KSWAPD_RECLAIM to avoid wakeup_kswapd
> +	 * that may need to grab a lock.
> +	 * Do not specify __GFP_ACCOUNT to avoid local_lock.
> +	 * Do not warn either.
> +	 */
> +	return alloc_pages_node_noprof(nid, __GFP_TRYLOCK | __GFP_NOWARN | __GFP_ZERO, 0);
> +}
> +#define try_alloc_page(nid)			alloc_hooks(try_alloc_page_noprof(nid))
> +
>  extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
>  #define __get_free_pages(...)			alloc_hooks(get_free_pages_noprof(__VA_ARGS__))
>  
> diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
> index 65db9349f905..72b385a7888d 100644
> --- a/include/linux/gfp_types.h
> +++ b/include/linux/gfp_types.h
> @@ -48,6 +48,7 @@ enum {
>  	___GFP_THISNODE_BIT,
>  	___GFP_ACCOUNT_BIT,
>  	___GFP_ZEROTAGS_BIT,
> +	___GFP_TRYLOCK_BIT,
>  #ifdef CONFIG_KASAN_HW_TAGS
>  	___GFP_SKIP_ZERO_BIT,
>  	___GFP_SKIP_KASAN_BIT,
> @@ -86,6 +87,7 @@ enum {
>  #define ___GFP_THISNODE		BIT(___GFP_THISNODE_BIT)
>  #define ___GFP_ACCOUNT		BIT(___GFP_ACCOUNT_BIT)
>  #define ___GFP_ZEROTAGS		BIT(___GFP_ZEROTAGS_BIT)
> +#define ___GFP_TRYLOCK		BIT(___GFP_TRYLOCK_BIT)
>  #ifdef CONFIG_KASAN_HW_TAGS
>  #define ___GFP_SKIP_ZERO	BIT(___GFP_SKIP_ZERO_BIT)
>  #define ___GFP_SKIP_KASAN	BIT(___GFP_SKIP_KASAN_BIT)
> @@ -293,6 +295,7 @@ enum {
>  #define __GFP_COMP	((__force gfp_t)___GFP_COMP)
>  #define __GFP_ZERO	((__force gfp_t)___GFP_ZERO)
>  #define __GFP_ZEROTAGS	((__force gfp_t)___GFP_ZEROTAGS)
> +#define __GFP_TRYLOCK	((__force gfp_t)___GFP_TRYLOCK)
>  #define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
>  #define __GFP_SKIP_KASAN ((__force gfp_t)___GFP_SKIP_KASAN)
>  
> diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
> index bb8a59c6caa2..592c93ee5f35 100644
> --- a/include/trace/events/mmflags.h
> +++ b/include/trace/events/mmflags.h
> @@ -50,6 +50,7 @@
>  	gfpflag_string(__GFP_RECLAIM),		\
>  	gfpflag_string(__GFP_DIRECT_RECLAIM),	\
>  	gfpflag_string(__GFP_KSWAPD_RECLAIM),	\
> +	gfpflag_string(__GFP_TRYLOCK),		\
>  	gfpflag_string(__GFP_ZEROTAGS)
>  
>  #ifdef CONFIG_KASAN_HW_TAGS
> diff --git a/mm/internal.h b/mm/internal.h
> index 64c2eb0b160e..c1b08e95a63b 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -1173,6 +1173,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
>  #endif
>  #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
>  #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
> +#define ALLOC_TRYLOCK		0x1000000 /* Only use spin_trylock in allocation path */
>  
>  /* Flags that allow allocations below the min watermark. */
>  #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 216fbbfbedcf..71fed4f5bd0c 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -2304,7 +2304,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
>  	unsigned long flags;
>  	int i;
>  
> -	spin_lock_irqsave(&zone->lock, flags);
> +	if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
> +		if (!spin_trylock_irqsave(&zone->lock, flags))
> +			return 0;
> +	} else {
> +		spin_lock_irqsave(&zone->lock, flags);
> +	}
>  	for (i = 0; i < count; ++i) {
>  		struct page *page = __rmqueue(zone, order, migratetype,
>  								alloc_flags);
> @@ -2904,7 +2909,12 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
>  
>  	do {
>  		page = NULL;
> -		spin_lock_irqsave(&zone->lock, flags);
> +		if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
> +			if (!spin_trylock_irqsave(&zone->lock, flags))
> +				return 0;
> +		} else {
> +			spin_lock_irqsave(&zone->lock, flags);
> +		}
>  		if (alloc_flags & ALLOC_HIGHATOMIC)
>  			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
>  		if (!page) {
> @@ -4001,6 +4011,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
>  	 */
>  	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
>  	BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
> +	BUILD_BUG_ON(__GFP_TRYLOCK != (__force gfp_t) ALLOC_TRYLOCK);
>  
>  	/*
>  	 * The caller may dip into page reserves a bit more if the caller
> @@ -4009,7 +4020,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
>  	 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
>  	 */
>  	alloc_flags |= (__force int)
> -		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
> +		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM | __GFP_TRYLOCK));
>  
>  	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
>  		/*
> @@ -4509,6 +4520,8 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
>  
>  	might_alloc(gfp_mask);
>  
> +	*alloc_flags |= (__force int) (gfp_mask & __GFP_TRYLOCK);
> +
>  	if (should_fail_alloc_page(gfp_mask, order))
>  		return false;
>  
> diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
> index a756147e2eec..d245ff60d2a6 100644
> --- a/tools/perf/builtin-kmem.c
> +++ b/tools/perf/builtin-kmem.c
> @@ -682,6 +682,7 @@ static const struct {
>  	{ "__GFP_RECLAIM",		"R" },
>  	{ "__GFP_DIRECT_RECLAIM",	"DR" },
>  	{ "__GFP_KSWAPD_RECLAIM",	"KR" },
> +	{ "__GFP_TRYLOCK",		"TL" },
>  };
>  
>  static size_t max_gfp_len;





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux