The patch titled Subject: mm/zsmalloc: add and use pfn/zpdesc seeking funcs has been added to the -mm mm-unstable branch. Its filename is mm-zsmalloc-add-and-use-pfn-zpdesc-seeking-funcs.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-zsmalloc-add-and-use-pfn-zpdesc-seeking-funcs.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> Subject: mm/zsmalloc: add and use pfn/zpdesc seeking funcs Date: Tue, 17 Dec 2024 00:04:35 +0900 Add pfn_zpdesc(), pfn_zpdesc() and kmap_local_zpdesc(). Convert obj_to_location() to take zpdesc and also convert its users to use zpdesc. Link: https://lkml.kernel.org/r/20241216150450.1228021-5-42.hyeyoo@xxxxxxxxx Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> Signed-off-by: Alex Shi <alexs@xxxxxxxxxx> Acked-by: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx> Tested-by: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/zpdesc.h | 14 ++++++++ mm/zsmalloc.c | 75 ++++++++++++++++++++++++------------------------ 2 files changed, 52 insertions(+), 37 deletions(-) --- a/mm/zpdesc.h~mm-zsmalloc-add-and-use-pfn-zpdesc-seeking-funcs +++ a/mm/zpdesc.h @@ -134,4 +134,18 @@ static inline void zpdesc_put(struct zpd folio_put(zpdesc_folio(zpdesc)); } +static inline void *kmap_local_zpdesc(struct zpdesc *zpdesc) +{ + return kmap_local_page(zpdesc_page(zpdesc)); +} + +static inline unsigned long zpdesc_pfn(struct zpdesc *zpdesc) +{ + return page_to_pfn(zpdesc_page(zpdesc)); +} + +static inline struct zpdesc *pfn_zpdesc(unsigned long pfn) +{ + return page_zpdesc(pfn_to_page(pfn)); +} #endif --- a/mm/zsmalloc.c~mm-zsmalloc-add-and-use-pfn-zpdesc-seeking-funcs +++ a/mm/zsmalloc.c @@ -757,15 +757,15 @@ static struct zpdesc *get_next_zpdesc(st } /** - * obj_to_location - get (<page>, <obj_idx>) from encoded object value + * obj_to_location - get (<zpdesc>, <obj_idx>) from encoded object value * @obj: the encoded object value - * @page: page object resides in zspage + * @zpdesc: zpdesc object resides in zspage * @obj_idx: object index */ -static void obj_to_location(unsigned long obj, struct page **page, +static void obj_to_location(unsigned long obj, struct zpdesc **zpdesc, unsigned int *obj_idx) { - *page = pfn_to_page(obj >> OBJ_INDEX_BITS); + *zpdesc = pfn_zpdesc(obj >> OBJ_INDEX_BITS); *obj_idx = (obj & OBJ_INDEX_MASK); } @@ -1181,13 +1181,13 @@ void *zs_map_object(struct zs_pool *pool enum zs_mapmode mm) { struct zspage *zspage; - struct page *page; + struct zpdesc *zpdesc; unsigned long obj, off; unsigned int obj_idx; struct size_class *class; struct mapping_area *area; - struct page *pages[2]; + struct zpdesc *zpdescs[2]; void *ret; /* @@ -1200,8 +1200,8 @@ void *zs_map_object(struct zs_pool *pool /* It guarantees it can get zspage from handle safely */ read_lock(&pool->migrate_lock); obj = handle_to_obj(handle); - obj_to_location(obj, &page, &obj_idx); - zspage = get_zspage(page); + obj_to_location(obj, &zpdesc, &obj_idx); + zspage = get_zspage(zpdesc_page(zpdesc)); /* * migration cannot move any zpages in this zspage. Here, class->lock @@ -1220,17 +1220,17 @@ void *zs_map_object(struct zs_pool *pool area->vm_mm = mm; if (off + class->size <= PAGE_SIZE) { /* this object is contained entirely within a page */ - area->vm_addr = kmap_local_page(page); + area->vm_addr = kmap_local_zpdesc(zpdesc); ret = area->vm_addr + off; goto out; } /* this object spans two pages */ - pages[0] = page; - pages[1] = get_next_page(page); - BUG_ON(!pages[1]); + zpdescs[0] = zpdesc; + zpdescs[1] = get_next_zpdesc(zpdesc); + BUG_ON(!zpdescs[1]); - ret = __zs_map_object(area, (struct zpdesc **)pages, off, class->size); + ret = __zs_map_object(area, zpdescs, off, class->size); out: if (likely(!ZsHugePage(zspage))) ret += ZS_HANDLE_SIZE; @@ -1242,7 +1242,7 @@ EXPORT_SYMBOL_GPL(zs_map_object); void zs_unmap_object(struct zs_pool *pool, unsigned long handle) { struct zspage *zspage; - struct page *page; + struct zpdesc *zpdesc; unsigned long obj, off; unsigned int obj_idx; @@ -1250,8 +1250,8 @@ void zs_unmap_object(struct zs_pool *poo struct mapping_area *area; obj = handle_to_obj(handle); - obj_to_location(obj, &page, &obj_idx); - zspage = get_zspage(page); + obj_to_location(obj, &zpdesc, &obj_idx); + zspage = get_zspage(zpdesc_page(zpdesc)); class = zspage_class(pool, zspage); off = offset_in_page(class->size * obj_idx); @@ -1259,13 +1259,13 @@ void zs_unmap_object(struct zs_pool *poo if (off + class->size <= PAGE_SIZE) kunmap_local(area->vm_addr); else { - struct page *pages[2]; + struct zpdesc *zpdescs[2]; - pages[0] = page; - pages[1] = get_next_page(page); - BUG_ON(!pages[1]); + zpdescs[0] = zpdesc; + zpdescs[1] = get_next_zpdesc(zpdesc); + BUG_ON(!zpdescs[1]); - __zs_unmap_object(area, (struct zpdesc **)pages, off, class->size); + __zs_unmap_object(area, zpdescs, off, class->size); } local_unlock(&zs_map_area.lock); @@ -1406,23 +1406,24 @@ static void obj_free(int class_size, uns { struct link_free *link; struct zspage *zspage; - struct page *f_page; + struct zpdesc *f_zpdesc; unsigned long f_offset; unsigned int f_objidx; void *vaddr; - obj_to_location(obj, &f_page, &f_objidx); + + obj_to_location(obj, &f_zpdesc, &f_objidx); f_offset = offset_in_page(class_size * f_objidx); - zspage = get_zspage(f_page); + zspage = get_zspage(zpdesc_page(f_zpdesc)); - vaddr = kmap_local_page(f_page); + vaddr = kmap_local_zpdesc(f_zpdesc); link = (struct link_free *)(vaddr + f_offset); /* Insert this object in containing zspage's freelist */ if (likely(!ZsHugePage(zspage))) link->next = get_freeobj(zspage) << OBJ_TAG_BITS; else - f_page->index = 0; + f_zpdesc->handle = 0; set_freeobj(zspage, f_objidx); kunmap_local(vaddr); @@ -1467,7 +1468,7 @@ EXPORT_SYMBOL_GPL(zs_free); static void zs_object_copy(struct size_class *class, unsigned long dst, unsigned long src) { - struct page *s_page, *d_page; + struct zpdesc *s_zpdesc, *d_zpdesc; unsigned int s_objidx, d_objidx; unsigned long s_off, d_off; void *s_addr, *d_addr; @@ -1476,8 +1477,8 @@ static void zs_object_copy(struct size_c s_size = d_size = class->size; - obj_to_location(src, &s_page, &s_objidx); - obj_to_location(dst, &d_page, &d_objidx); + obj_to_location(src, &s_zpdesc, &s_objidx); + obj_to_location(dst, &d_zpdesc, &d_objidx); s_off = offset_in_page(class->size * s_objidx); d_off = offset_in_page(class->size * d_objidx); @@ -1488,8 +1489,8 @@ static void zs_object_copy(struct size_c if (d_off + class->size > PAGE_SIZE) d_size = PAGE_SIZE - d_off; - s_addr = kmap_local_page(s_page); - d_addr = kmap_local_page(d_page); + s_addr = kmap_local_zpdesc(s_zpdesc); + d_addr = kmap_local_zpdesc(d_zpdesc); while (1) { size = min(s_size, d_size); @@ -1514,17 +1515,17 @@ static void zs_object_copy(struct size_c if (s_off >= PAGE_SIZE) { kunmap_local(d_addr); kunmap_local(s_addr); - s_page = get_next_page(s_page); - s_addr = kmap_local_page(s_page); - d_addr = kmap_local_page(d_page); + s_zpdesc = get_next_zpdesc(s_zpdesc); + s_addr = kmap_local_zpdesc(s_zpdesc); + d_addr = kmap_local_zpdesc(d_zpdesc); s_size = class->size - written; s_off = 0; } if (d_off >= PAGE_SIZE) { kunmap_local(d_addr); - d_page = get_next_page(d_page); - d_addr = kmap_local_page(d_page); + d_zpdesc = get_next_zpdesc(d_zpdesc); + d_addr = kmap_local_zpdesc(d_zpdesc); d_size = class->size - written; d_off = 0; } @@ -1763,7 +1764,7 @@ static int zs_page_migrate(struct page * struct zs_pool *pool; struct size_class *class; struct zspage *zspage; - struct page *dummy; + struct zpdesc *dummy; void *s_addr, *d_addr, *addr; unsigned int offset; unsigned long handle; _ Patches currently in -mm which might be from 42.hyeyoo@xxxxxxxxx are mm-migrate-remove-slab-checks-in-isolate_movable_page.patch mm-zsmalloc-convert-__zs_map_object-__zs_unmap_object-to-use-zpdesc.patch mm-zsmalloc-add-and-use-pfn-zpdesc-seeking-funcs.patch mm-zsmalloc-convert-obj_malloc-to-use-zpdesc.patch mm-zsmalloc-convert-obj_allocated-and-related-helpers-to-use-zpdesc.patch mm-zsmalloc-convert-init_zspage-to-use-zpdesc.patch mm-zsmalloc-convert-obj_to_page-and-zs_free-to-use-zpdesc.patch mm-zsmalloc-add-two-helpers-for-zs_page_migrate-and-make-it-use-zpdesc.patch mm-zsmalloc-convert-__free_zspage-to-use-zpdesc.patch mm-zsmalloc-convert-location_to_obj-to-take-zpdesc.patch mm-zsmalloc-convert-migrate_zspage-to-use-zpdesc.patch mm-zsmalloc-convert-get_zspage-to-take-zpdesc.patch