free_page() is not needed to be protected by page table lock, and it will take a little longer, so move it out of the spinlock. Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> --- mm/kasan/shadow.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index fd15e38ff80e..d7d6724da2e0 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -423,12 +423,13 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, page = (unsigned long)__va(pte_pfn(ptep_get(ptep)) << PAGE_SHIFT); spin_lock(&init_mm.page_table_lock); - - if (likely(!pte_none(ptep_get(ptep)))) { + if (likely(!pte_none(ptep_get(ptep)))) pte_clear(&init_mm, addr, ptep); - free_page(page); - } + else + page = 0; spin_unlock(&init_mm.page_table_lock); + if (page) + free_page(page); return 0; } -- 2.41.0