The patch titled Subject: mm/zsmalloc: get rid of PAGE_MASK has been added to the -mm mm-unstable branch. Its filename is mm-zsmalloc-get-rid-of-page_mask.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-zsmalloc-get-rid-of-page_mask.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Alexey Romanov <avromanov@xxxxxxxxxxxxxx> Subject: mm/zsmalloc: get rid of PAGE_MASK Date: Tue, 16 May 2023 12:50:29 +0300 Use offset_in_page() macro instead of 'val & ~PAGE_MASK' Link: https://lkml.kernel.org/r/20230516095029.49036-2-avromanov@xxxxxxxxxxxxxx Signed-off-by: Alexey Romanov <avromanov@xxxxxxxxxxxxxx> Reviewed-by: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/zsmalloc.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) --- a/mm/zsmalloc.c~mm-zsmalloc-get-rid-of-page_mask +++ a/mm/zsmalloc.c @@ -1341,7 +1341,7 @@ void *zs_map_object(struct zs_pool *pool spin_unlock(&pool->lock); class = zspage_class(pool, zspage); - off = (class->size * obj_idx) & ~PAGE_MASK; + off = offset_in_page(class->size * obj_idx); local_lock(&zs_map_area.lock); area = this_cpu_ptr(&zs_map_area); @@ -1381,7 +1381,7 @@ void zs_unmap_object(struct zs_pool *poo obj_to_location(obj, &page, &obj_idx); zspage = get_zspage(page); class = zspage_class(pool, zspage); - off = (class->size * obj_idx) & ~PAGE_MASK; + off = offset_in_page(class->size * obj_idx); area = this_cpu_ptr(&zs_map_area); if (off + class->size <= PAGE_SIZE) @@ -1438,7 +1438,7 @@ static unsigned long obj_malloc(struct z offset = obj * class->size; nr_page = offset >> PAGE_SHIFT; - m_offset = offset & ~PAGE_MASK; + m_offset = offset_in_page(offset); m_page = get_first_page(zspage); for (i = 0; i < nr_page; i++) @@ -1548,7 +1548,7 @@ static void obj_free(int class_size, uns void *vaddr; obj_to_location(obj, &f_page, &f_objidx); - f_offset = (class_size * f_objidx) & ~PAGE_MASK; + f_offset = offset_in_page(class_size * f_objidx); zspage = get_zspage(f_page); vaddr = kmap_atomic(f_page); @@ -1640,8 +1640,8 @@ static void zs_object_copy(struct size_c obj_to_location(src, &s_page, &s_objidx); obj_to_location(dst, &d_page, &d_objidx); - s_off = (class->size * s_objidx) & ~PAGE_MASK; - d_off = (class->size * d_objidx) & ~PAGE_MASK; + s_off = offset_in_page(class->size * s_objidx); + d_off = offset_in_page(class->size * d_objidx); if (s_off + class->size > PAGE_SIZE) s_size = PAGE_SIZE - s_off; _ Patches currently in -mm which might be from avromanov@xxxxxxxxxxxxxx are mm-zsmalloc-get-rid-of-page_mask.patch