It is more logical to update the stat before the page is freed, to avoid use after free scenarios. Fixes: 15995a352474 ("mm: report per-page metadata information") Signed-off-by: Pasha Tatashin <pasha.tatashin@xxxxxxxxxx> --- mm/hugetlb_vmemmap.c | 4 ++-- mm/page_ext.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 829112b0a914..fa83a7b38199 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -185,11 +185,11 @@ static int vmemmap_remap_range(unsigned long start, unsigned long end, static inline void free_vmemmap_page(struct page *page) { if (PageReserved(page)) { - free_bootmem_page(page); mod_node_page_state(page_pgdat(page), NR_MEMMAP_BOOT, -1); + free_bootmem_page(page); } else { - __free_page(page); mod_node_page_state(page_pgdat(page), NR_MEMMAP, -1); + __free_page(page); } } diff --git a/mm/page_ext.c b/mm/page_ext.c index c191e490c401..962d45eee1f8 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -330,18 +330,18 @@ static void free_page_ext(void *addr) if (is_vmalloc_addr(addr)) { page = vmalloc_to_page(addr); pgdat = page_pgdat(page); + mod_node_page_state(pgdat, NR_MEMMAP, + -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE))); vfree(addr); } else { page = virt_to_page(addr); pgdat = page_pgdat(page); + mod_node_page_state(pgdat, NR_MEMMAP, + -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE))); BUG_ON(PageReserved(page)); kmemleak_free(addr); free_pages_exact(addr, table_size); } - - mod_node_page_state(pgdat, NR_MEMMAP, - -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE))); - } static void __free_page_ext(unsigned long pfn) -- 2.46.0.76.ge559c4bf1a-goog