Re: [PATCH v1 13/16] khwasan: add hooks implementation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 05/08/2018 08:20 PM, Andrey Konovalov wrote:

>  
>  static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
>  			      unsigned long ip, bool quarantine)
>  {
>  	s8 shadow_byte;
> +	u8 tag;
>  	unsigned long rounded_up_size;
>  
> +	tag = get_tag(object);
> +	object = reset_tag(object);
> +
>  	if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
>  	    object)) {
> -		kasan_report_invalid_free(object, ip);
> +		kasan_report_invalid_free(set_tag(object, tag), ip);

Using variable to store untagged_object pointer, instead of tagging/untagging back and forth would make the
code easier to follow.

>  		return true;
>  	}
>  
> @@ -326,20 +346,29 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
>  		return false;
>  
>  	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
> +#ifdef CONFIG_KASAN_GENERIC
>  	if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
>  		kasan_report_invalid_free(object, ip);
>  		return true;
>  	}
> +#else
> +	if (tag != (u8)shadow_byte) {
> +		kasan_report_invalid_free(set_tag(object, tag), ip);
> +		return true;
> +	}
> +#endif



static bool inline shadow_ivalid(u8 tag, s8 shadow_byte)
{
	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
		return shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
	else
		return tag != (u8)shadow_byte;
}


static bool __kasan_slab_free(struct kmem_cache *cache, void *object,

...
	if (shadow_invalid(tag, shadow_byte)) {
		kasan_report_invalid_free(object, ip);
		return true;
	}


>  
>  	rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
>  	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
>  
> -	if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN)))
> +	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
> +			unlikely(!(cache->flags & SLAB_KASAN)))
>  		return false;
>  
>  	set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
>  	quarantine_put(get_free_info(cache, object), cache);
> -	return true;
> +
> +	return IS_ENABLED(CONFIG_KASAN_GENERIC);
>  }
>  
>  bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
> @@ -352,6 +381,7 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
>  {
>  	unsigned long redzone_start;
>  	unsigned long redzone_end;
> +	u8 tag;
>  
>  	if (gfpflags_allow_blocking(flags))
>  		quarantine_reduce();
> @@ -364,14 +394,19 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
>  	redzone_end = round_up((unsigned long)object + cache->object_size,
>  				KASAN_SHADOW_SCALE_SIZE);
>  
> +#ifdef CONFIG_KASAN_GENERIC
>  	kasan_unpoison_shadow(object, size);
> +#else
> +	tag = random_tag();
> +	kasan_poison_shadow(object, redzone_start - (unsigned long)object, tag);
> +#

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 7cd4a4e8c3be..f11d6059fc06 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -404,12 +404,9 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
 	redzone_end = round_up((unsigned long)object + cache->object_size,
 				KASAN_SHADOW_SCALE_SIZE);
 
-#ifdef CONFIG_KASAN_GENERIC
-	kasan_unpoison_shadow(object, size);
-#else
 	tag = random_tag();
-	kasan_poison_shadow(object, redzone_start - (unsigned long)object, tag);
-#endif
+	kasan_unpoison_shadow(set_tag(object, tag), size);
+
 	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
 		KASAN_KMALLOC_REDZONE);
 


>  	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
>  		KASAN_KMALLOC_REDZONE);
>  
>  	if (cache->flags & SLAB_KASAN)
>  		set_track(&get_alloc_info(cache, object)->alloc_track, flags);
>  
> -	return (void *)object;
> +	return set_tag(object, tag);
>  }
>  EXPORT_SYMBOL(kasan_kmalloc);
>  



> @@ -380,6 +415,7 @@ void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
>  	struct page *page;
>  	unsigned long redzone_start;
>  	unsigned long redzone_end;
> +	u8 tag;
>  
>  	if (gfpflags_allow_blocking(flags))
>  		quarantine_reduce();
> @@ -392,11 +428,16 @@ void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
>  				KASAN_SHADOW_SCALE_SIZE);
>  	redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
>  
> +#ifdef CONFIG_KASAN_GENERIC
>  	kasan_unpoison_shadow(ptr, size);
> +#else
> +	tag = random_tag();
> +	kasan_poison_shadow(ptr, redzone_start - (unsigned long)ptr, tag);
> +#endif
>  	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
>  		KASAN_PAGE_REDZONE);
>  
> -	return (void *)ptr;
> +	return set_tag(ptr, tag);
>  }

kasan_kmalloc_large() should be left untouched. It works correctly as is in both cases.
ptr comes from page allocator already already tagged at this point.





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux