The patch titled Subject: mm/zsmalloc: move record_obj() into obj_malloc() has been added to the -mm mm-unstable branch. Its filename is mm-zsmalloc-move-record_obj-into-obj_malloc.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-zsmalloc-move-record_obj-into-obj_malloc.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Chengming Zhou <chengming.zhou@xxxxxxxxx> Subject: mm/zsmalloc: move record_obj() into obj_malloc() Date: Thu, 27 Jun 2024 15:59:59 +0800 We always record_obj() to make handle points to object after obj_malloc(), so simplify the code by moving record_obj() into obj_malloc(). There should be no functional change. Link: https://lkml.kernel.org/r/20240627075959.611783-2-chengming.zhou@xxxxxxxxx Signed-off-by: Chengming Zhou <chengming.zhou@xxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/zsmalloc.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) --- a/mm/zsmalloc.c~mm-zsmalloc-move-record_obj-into-obj_malloc +++ a/mm/zsmalloc.c @@ -1306,7 +1306,6 @@ static unsigned long obj_malloc(struct z void *vaddr; class = pool->size_class[zspage->class]; - handle |= OBJ_ALLOCATED_TAG; obj = get_freeobj(zspage); offset = obj * class->size; @@ -1322,15 +1321,16 @@ static unsigned long obj_malloc(struct z set_freeobj(zspage, link->next >> OBJ_TAG_BITS); if (likely(!ZsHugePage(zspage))) /* record handle in the header of allocated chunk */ - link->handle = handle; + link->handle = handle | OBJ_ALLOCATED_TAG; else /* record handle to page->index */ - zspage->first_page->index = handle; + zspage->first_page->index = handle | OBJ_ALLOCATED_TAG; kunmap_atomic(vaddr); mod_zspage_inuse(zspage, 1); obj = location_to_obj(m_page, obj); + record_obj(handle, obj); return obj; } @@ -1348,7 +1348,7 @@ static unsigned long obj_malloc(struct z */ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) { - unsigned long handle, obj; + unsigned long handle; struct size_class *class; int newfg; struct zspage *zspage; @@ -1371,10 +1371,9 @@ unsigned long zs_malloc(struct zs_pool * spin_lock(&class->lock); zspage = find_get_zspage(class); if (likely(zspage)) { - obj = obj_malloc(pool, zspage, handle); + obj_malloc(pool, zspage, handle); /* Now move the zspage to another fullness group, if required */ fix_fullness_group(class, zspage); - record_obj(handle, obj); class_stat_inc(class, ZS_OBJS_INUSE, 1); goto out; @@ -1389,10 +1388,9 @@ unsigned long zs_malloc(struct zs_pool * } spin_lock(&class->lock); - obj = obj_malloc(pool, zspage, handle); + obj_malloc(pool, zspage, handle); newfg = get_fullness_group(class, zspage); insert_zspage(class, zspage, newfg); - record_obj(handle, obj); atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); class_stat_inc(class, ZS_OBJS_INUSE, 1); @@ -1591,7 +1589,6 @@ static void migrate_zspage(struct zs_poo free_obj = obj_malloc(pool, dst_zspage, handle); zs_object_copy(class, free_obj, used_obj); obj_idx++; - record_obj(handle, free_obj); obj_free(class->size, used_obj); /* Stop if there is no more space */ _ Patches currently in -mm which might be from chengming.zhou@xxxxxxxxx are mm-zswap-use-only-one-pool-in-zswap.patch mm-zswap-use-only-one-pool-in-zswap-v2.patch mm-zswap-use-only-one-pool-in-zswap-v3.patch mm-ksm-refactor-out-try_to_merge_with_zero_page.patch mm-ksm-dont-waste-time-searching-stable-tree-for-fast-changing-page.patch mm-ksm-optimize-the-chain-chain_prune-interfaces.patch mm-zsmalloc-fix-class-per-fullness-zspage-counts.patch mm-zsmalloc-move-record_obj-into-obj_malloc.patch