On 05/08/2018 08:21 PM, Andrey Konovalov wrote: > +#ifndef CONFIG_KASAN_HW > #define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) > +#else > +#define page_to_virt(page) ({ \ > + unsigned long __addr = \ > + ((__page_to_voff(page)) | PAGE_OFFSET); \ > + if (!PageSlab((struct page *)page)) \ > + __addr = KASAN_SET_TAG(__addr, page_kasan_tag(page)); \ You could avoid 'if (!PageSlab())' check by adding page_kasan_tag_reset() into kasan_poison_slab(). > + ((void *)__addr); \ > +}) > +#endif > + > #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) > > #define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ > diff --git a/mm/cma.c b/mm/cma.c > index aa40e6c7b042..f657db289bba 100644 > --- a/mm/cma.c > +++ b/mm/cma.c > @@ -526,6 +526,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, > } > > trace_cma_alloc(pfn, page, count, align); > + page_kasan_tag_reset(page); Why? Comment needed. > if (ret && !(gfp_mask & __GFP_NOWARN)) { > pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n", > diff --git a/mm/kasan/common.c b/mm/kasan/common.c > index 0654bf97257b..7cd4a4e8c3be 100644 > --- a/mm/kasan/common.c > +++ b/mm/kasan/common.c > @@ -207,8 +207,18 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark) > > void kasan_alloc_pages(struct page *page, unsigned int order) > { > +#ifdef CONFIG_KASAN_GENERIC > if (likely(!PageHighMem(page))) > kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); > +#else > + if (!PageSlab(page)) { > + u8 tag = random_tag(); > + > + kasan_poison_shadow(page_address(page), PAGE_SIZE << order, > + tag); > + page_kasan_tag_set(page, tag); > + } > +#endif > } diff --git a/mm/kasan/common.c b/mm/kasan/common.c index b8e0a8215021..f9f2181164a2 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -207,18 +207,11 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark) void kasan_alloc_pages(struct page *page, unsigned int order) { -#ifdef CONFIG_KASAN_GENERIC - if (likely(!PageHighMem(page))) - kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); -#else - if (!PageSlab(page)) { - u8 tag = random_tag(); + if (unlikely(PageHighMem(page))) + return; - kasan_poison_shadow(page_address(page), PAGE_SIZE << order, - tag); - page_kasan_tag_set(page, tag); - } -#endif + page_kasan_tag_set(page, random_tag()); + kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); } void kasan_free_pages(struct page *page, unsigned int order) -- 2.16.1 > > void kasan_free_pages(struct page *page, unsigned int order) > @@ -433,6 +443,7 @@ void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) > #else > tag = random_tag(); > kasan_poison_shadow(ptr, redzone_start - (unsigned long)ptr, tag); > + page_kasan_tag_set(page, tag); As already said before no changes needed in kasan_kmalloc_large. kasan_alloc_pages() alredy did tag_set(). > #endif > kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, > KASAN_PAGE_REDZONE); > @@ -462,7 +473,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip) > page = virt_to_head_page(ptr); > > if (unlikely(!PageSlab(page))) { > - if (reset_tag(ptr) != page_address(page)) { > + if (ptr != page_address(page)) { > kasan_report_invalid_free(ptr, ip); > return; > } > @@ -475,7 +486,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip) > > void kasan_kfree_large(void *ptr, unsigned long ip) > { > - if (reset_tag(ptr) != page_address(virt_to_head_page(ptr))) > + if (ptr != page_address(virt_to_head_page(ptr))) > kasan_report_invalid_free(ptr, ip); > /* The object will be poisoned by page_alloc. */ > }