The patch titled Subject: ia64: perfmon: don't mark buffer pages as PG_reserved has been added to the -mm tree. Its filename is ia64-perfmon-dont-mark-buffer-pages-as-pg_reserved.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/ia64-perfmon-dont-mark-buffer-pages-as-pg_reserved.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/ia64-perfmon-dont-mark-buffer-pages-as-pg_reserved.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: David Hildenbrand <david@xxxxxxxxxx> Subject: ia64: perfmon: don't mark buffer pages as PG_reserved In the old days, remap_pfn_range() required pages to be marked as PG_reserved, so they would e.g. never get swapped out. This was required for special mappings. Nowadays, this is fully handled via the VMA (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP inside remap_pfn_range() to be precise). PG_reserved is no longer required but only a relict from the past. So only architecture specific MM handling might require it (e.g. to detect them as MMIO pages). As there are no architecture specific checks for PageReserved() apart from MCA handling in ia64code, this can go. Use simple vzalloc()/vfree() instead. Note that before calling vzalloc(), size has already been aligned to PAGE_SIZE, no need to align again. Link: http://lkml.kernel.org/r/20190114125903.24845-9-david@xxxxxxxxxx Signed-off-by: David Hildenbrand <david@xxxxxxxxxx> Cc: Tony Luck <tony.luck@xxxxxxxxx> Cc: Fenghua Yu <fenghua.yu@xxxxxxxxx> Cc: Oleg Nesterov <oleg@xxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: David Howells <dhowells@xxxxxxxxxx> Cc: Mike Rapoport <rppt@xxxxxxxxxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/ia64/kernel/perfmon.c | 59 ++--------------------------------- 1 file changed, 4 insertions(+), 55 deletions(-) --- a/arch/ia64/kernel/perfmon.c~ia64-perfmon-dont-mark-buffer-pages-as-pg_reserved +++ a/arch/ia64/kernel/perfmon.c @@ -583,17 +583,6 @@ pfm_put_task(struct task_struct *task) if (task != current) put_task_struct(task); } -static inline void -pfm_reserve_page(unsigned long a) -{ - SetPageReserved(vmalloc_to_page((void *)a)); -} -static inline void -pfm_unreserve_page(unsigned long a) -{ - ClearPageReserved(vmalloc_to_page((void*)a)); -} - static inline unsigned long pfm_protect_ctx_ctxsw(pfm_context_t *x) { @@ -816,44 +805,6 @@ pfm_reset_msgq(pfm_context_t *ctx) DPRINT(("ctx=%p msgq reset\n", ctx)); } -static void * -pfm_rvmalloc(unsigned long size) -{ - void *mem; - unsigned long addr; - - size = PAGE_ALIGN(size); - mem = vzalloc(size); - if (mem) { - //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem); - addr = (unsigned long)mem; - while (size > 0) { - pfm_reserve_page(addr); - addr+=PAGE_SIZE; - size-=PAGE_SIZE; - } - } - return mem; -} - -static void -pfm_rvfree(void *mem, unsigned long size) -{ - unsigned long addr; - - if (mem) { - DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size)); - addr = (unsigned long) mem; - while ((long) size > 0) { - pfm_unreserve_page(addr); - addr+=PAGE_SIZE; - size-=PAGE_SIZE; - } - vfree(mem); - } - return; -} - static pfm_context_t * pfm_context_alloc(int ctx_flags) { @@ -1498,7 +1449,7 @@ pfm_free_smpl_buffer(pfm_context_t *ctx) /* * free the buffer */ - pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size); + vfree(ctx->ctx_smpl_hdr); ctx->ctx_smpl_hdr = NULL; ctx->ctx_smpl_size = 0UL; @@ -2137,7 +2088,7 @@ doit: * All memory free operations (especially for vmalloc'ed memory) * MUST be done with interrupts ENABLED. */ - if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size); + vfree(smpl_buf_addr); /* * return the memory used by the context @@ -2266,10 +2217,8 @@ pfm_smpl_buffer_alloc(struct task_struct /* * We do the easy to undo allocations first. - * - * pfm_rvmalloc(), clears the buffer, so there is no leak */ - smpl_buf = pfm_rvmalloc(size); + smpl_buf = vzalloc(size); if (smpl_buf == NULL) { DPRINT(("Can't allocate sampling buffer\n")); return -ENOMEM; @@ -2346,7 +2295,7 @@ pfm_smpl_buffer_alloc(struct task_struct error: vm_area_free(vma); error_kmem: - pfm_rvfree(smpl_buf, size); + vfree(smpl_buf); return -ENOMEM; } _ Patches currently in -mm which might be from david@xxxxxxxxxx are mm-balloon-update-comment-about-isolation-migration-compaction.patch mm-convert-pg_balloon-to-pg_offline.patch kexec-export-pg_offline-to-vmcoreinfo.patch xen-balloon-mark-inflated-pages-pg_offline.patch hv_balloon-mark-inflated-pages-pg_offline.patch vmw_balloon-mark-inflated-pages-pg_offline.patch vmw_balloon-mark-inflated-pages-pg_offline-v2.patch pm-hibernate-use-pfn_to_online_page.patch pm-hibernate-exclude-all-pageoffline-pages.patch pm-hibernate-exclude-all-pageoffline-pages-v2.patch agp-efficeon-no-need-to-set-pg_reserved-on-gatt-tables.patch s390-vdso-dont-clear-pg_reserved.patch powerpc-vdso-dont-clear-pg_reserved.patch riscv-vdso-dont-clear-pg_reserved.patch m68k-mm-use-__clearpagereserved.patch arm64-kexec-no-need-to-clearpagereserved.patch arm64-kdump-no-need-to-mark-crashkernel-pages-manually-pg_reserved.patch ia64-perfmon-dont-mark-buffer-pages-as-pg_reserved.patch mm-better-document-pg_reserved.patch