The patch titled Subject: mm/zsmalloc: use memcpy_from/to_page whereever possible has been added to the -mm mm-unstable branch. Its filename is mm-zsmalloc-use-memcpy_from-to_page-whereever-possible.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-zsmalloc-use-memcpy_from-to_page-whereever-possible.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Pintu Kumar <quic_pintu@xxxxxxxxxxx> Subject: mm/zsmalloc: use memcpy_from/to_page whereever possible Date: Thu, 10 Oct 2024 23:21:43 +0530 As part of "zsmalloc: replace kmap_atomic with kmap_local_page" [1] we replaced kmap/kunmap_atomic() with kmap_local_page()/kunmap_local(). But later it was found that some of the code could be replaced with already available apis in highmem.h, such as memcpy_from_page()/memcpy_to_page(). Also, update the comments with correct api naming. [1] https://lkml.kernel.org/r/20241001175358.12970-1-quic_pintu@xxxxxxxxxxx Link: https://lkml.kernel.org/r/20241010175143.27262-1-quic_pintu@xxxxxxxxxxx Signed-off-by: Pintu Kumar <quic_pintu@xxxxxxxxxxx> Suggested-by: Matthew Wilcox <willy@xxxxxxxxxxxxx> Suggested-by: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx> Cc: Joe Perches <joe@xxxxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Pintu Agarwal <pintu.ping@xxxxxxxxx> Cc: Shuah Khan <skhan@xxxxxxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/zsmalloc.c | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) --- a/mm/zsmalloc.c~mm-zsmalloc-use-memcpy_from-to_page-whereever-possible +++ a/mm/zsmalloc.c @@ -263,7 +263,7 @@ struct zspage { struct mapping_area { local_lock_t lock; char *vm_buf; /* copy buffer for objects that span pages */ - char *vm_addr; /* address of kmap_atomic()'ed pages */ + char *vm_addr; /* address of kmap_local_page()'ed pages */ enum zs_mapmode vm_mm; /* mapping mode */ }; @@ -1046,11 +1046,10 @@ static inline void __zs_cpu_down(struct static void *__zs_map_object(struct mapping_area *area, struct page *pages[2], int off, int size) { - int sizes[2]; - void *addr; + size_t sizes[2]; char *buf = area->vm_buf; - /* disable page faults to match kmap_atomic() return conditions */ + /* disable page faults to match kmap_local_page() return conditions */ pagefault_disable(); /* no read fastpath */ @@ -1061,12 +1060,8 @@ static void *__zs_map_object(struct mapp sizes[1] = size - sizes[0]; /* copy object to per-cpu buffer */ - addr = kmap_local_page(pages[0]); - memcpy(buf, addr + off, sizes[0]); - kunmap_local(addr); - addr = kmap_local_page(pages[1]); - memcpy(buf + sizes[0], addr, sizes[1]); - kunmap_local(addr); + memcpy_from_page(buf, pages[0], off, sizes[0]); + memcpy_from_page(buf + sizes[0], pages[1], 0, sizes[1]); out: return area->vm_buf; } @@ -1074,8 +1069,7 @@ out: static void __zs_unmap_object(struct mapping_area *area, struct page *pages[2], int off, int size) { - int sizes[2]; - void *addr; + size_t sizes[2]; char *buf; /* no write fastpath */ @@ -1091,15 +1085,11 @@ static void __zs_unmap_object(struct map sizes[1] = size - sizes[0]; /* copy per-cpu buffer to object */ - addr = kmap_local_page(pages[0]); - memcpy(addr + off, buf, sizes[0]); - kunmap_local(addr); - addr = kmap_local_page(pages[1]); - memcpy(addr, buf + sizes[0], sizes[1]); - kunmap_local(addr); + memcpy_to_page(pages[0], off, buf, sizes[0]); + memcpy_to_page(pages[1], 0, buf + sizes[0], sizes[1]); out: - /* enable page faults to match kunmap_atomic() return conditions */ + /* enable page faults to match kunmap_local() return conditions */ pagefault_enable(); } @@ -1511,10 +1501,10 @@ static void zs_object_copy(struct size_c d_size -= size; /* - * Calling kunmap_atomic(d_addr) is necessary. kunmap_atomic() - * calls must occurs in reverse order of calls to kmap_atomic(). - * So, to call kunmap_atomic(s_addr) we should first call - * kunmap_atomic(d_addr). For more details see + * Calling kunmap_local(d_addr) is necessary. kunmap_local() + * calls must occurs in reverse order of calls to kmap_local_page(). + * So, to call kunmap_local(s_addr) we should first call + * kunmap_local(d_addr). For more details see * Documentation/mm/highmem.rst. */ if (s_off >= PAGE_SIZE) { _ Patches currently in -mm which might be from quic_pintu@xxxxxxxxxxx are mm-cma-fix-useless-return-in-void-function.patch zsmalloc-replace-kmap_atomic-with-kmap_local_page.patch mm-zsmalloc-use-memcpy_from-to_page-whereever-possible.patch