Re: [PATCH mm v2 32/33] kasan: dynamically allocate stack ring entries

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Jul 19, 2022 at 02:10AM +0200, andrey.konovalov@xxxxxxxxx wrote:
> From: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
> 
> Instead of using a large static array, allocate the stack ring dynamically
> via memblock_alloc().
> 
> The size of the stack ring is controlled by a new kasan.stack_ring_size
> command-line parameter. When kasan.stack_ring_size is not provided, the
> default value of 32 << 10 is used.
> 
> When the stack trace collection is disabled via kasan.stacktrace=off,
> the stack ring is not allocated.
> 
> Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
> 
> ---
> 
> Changes v1->v2:
> - This is a new patch.
> ---
>  mm/kasan/kasan.h       |  5 +++--
>  mm/kasan/report_tags.c |  4 ++--
>  mm/kasan/tags.c        | 22 +++++++++++++++++++++-
>  3 files changed, 26 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index 447baf1a7a2e..4afe4db751da 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -252,12 +252,13 @@ struct kasan_stack_ring_entry {
>  	bool is_free;
>  };
>  
> -#define KASAN_STACK_RING_SIZE (32 << 10)
> +#define KASAN_STACK_RING_SIZE_DEFAULT (32 << 10)
>  

This could be moved to tags.c, as there are no other users elsewhere.

>  struct kasan_stack_ring {
>  	rwlock_t lock;
> +	size_t size;
>  	atomic64_t pos;
> -	struct kasan_stack_ring_entry entries[KASAN_STACK_RING_SIZE];
> +	struct kasan_stack_ring_entry *entries;
>  };
>  
>  #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
> diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c
> index a996489e6dac..7e267e69ce19 100644
> --- a/mm/kasan/report_tags.c
> +++ b/mm/kasan/report_tags.c
> @@ -56,11 +56,11 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info)
>  	 * entries relevant to the buggy object can be overwritten.
>  	 */
>  
> -	for (u64 i = pos - 1; i != pos - 1 - KASAN_STACK_RING_SIZE; i--) {
> +	for (u64 i = pos - 1; i != pos - 1 - stack_ring.size; i--) {
>  		if (alloc_found && free_found)
>  			break;
>  
> -		entry = &stack_ring.entries[i % KASAN_STACK_RING_SIZE];
> +		entry = &stack_ring.entries[i % stack_ring.size];
>  
>  		/* Paired with smp_store_release() in save_stack_info(). */
>  		ptr = (void *)smp_load_acquire(&entry->ptr);
> diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
> index 0eb6cf6717db..fd8c5f919156 100644
> --- a/mm/kasan/tags.c
> +++ b/mm/kasan/tags.c
> @@ -10,6 +10,7 @@
>  #include <linux/init.h>
>  #include <linux/kasan.h>
>  #include <linux/kernel.h>
> +#include <linux/memblock.h>
>  #include <linux/memory.h>
>  #include <linux/mm.h>
>  #include <linux/static_key.h>
> @@ -52,6 +53,16 @@ static int __init early_kasan_flag_stacktrace(char *arg)
>  }
>  early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
>  
> +/* kasan.stack_ring_size=32768 */

What does that comment say? Is it "kasan.stack_ring_size=<entries>"?

Is it already in the documentation?

> +static int __init early_kasan_flag_stack_ring_size(char *arg)
> +{
> +	if (!arg)
> +		return -EINVAL;
> +
> +	return kstrtoul(arg, 0, &stack_ring.size);
> +}
> +early_param("kasan.stack_ring_size", early_kasan_flag_stack_ring_size);
> +
>  void __init kasan_init_tags(void)
>  {
>  	switch (kasan_arg_stacktrace) {
> @@ -65,6 +76,15 @@ void __init kasan_init_tags(void)
>  		static_branch_enable(&kasan_flag_stacktrace);
>  		break;
>  	}
> +
> +	if (kasan_stack_collection_enabled()) {
> +		if (!stack_ring.size)
> +			stack_ring.size = KASAN_STACK_RING_SIZE_DEFAULT;
> +		stack_ring.entries = memblock_alloc(
> +					sizeof(stack_ring.entries[0]) *
> +						stack_ring.size,
> +					SMP_CACHE_BYTES);

memblock_alloc() can fail. Because unlikely, stack collection should
probably just be disabled.

(minor: excessive line breaks makes the above unreadable.)

> +	}
>  }
>  
>  static void save_stack_info(struct kmem_cache *cache, void *object,
> @@ -86,7 +106,7 @@ static void save_stack_info(struct kmem_cache *cache, void *object,
>  
>  next:
>  	pos = atomic64_fetch_add(1, &stack_ring.pos);
> -	entry = &stack_ring.entries[pos % KASAN_STACK_RING_SIZE];
> +	entry = &stack_ring.entries[pos % stack_ring.size];
>  
>  	/* Detect stack ring entry slots that are being written to. */
>  	old_ptr = READ_ONCE(entry->ptr);
> -- 
> 2.25.1




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux