Convert obj_tagged(), and related helpers to take zsdesc. Also make its callers to cast (struct page *) to (struct zsdesc *) when calling them. The users will be converted gradually as there are many. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> --- mm/zsmalloc.c | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c65bdce987e9..e1262c0a5ad4 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1047,15 +1047,15 @@ static unsigned long handle_to_obj(unsigned long handle) return *(unsigned long *)handle; } -static bool obj_tagged(struct page *page, void *obj, unsigned long *phandle, +static bool obj_tagged(struct zsdesc *zsdesc, void *obj, unsigned long *phandle, int tag) { unsigned long handle; - struct zspage *zspage = get_zspage(page); + struct zspage *zspage = get_zspage(zsdesc_page(zsdesc)); if (unlikely(ZsHugePage(zspage))) { - VM_BUG_ON_PAGE(!is_first_page(page), page); - handle = page->index; + VM_BUG_ON_PAGE(!is_first_zsdesc(zsdesc), zsdesc_page(zsdesc)); + handle = zsdesc->handle; } else handle = *(unsigned long *)obj; @@ -1067,16 +1067,16 @@ static bool obj_tagged(struct page *page, void *obj, unsigned long *phandle, return true; } -static inline bool obj_allocated(struct page *page, void *obj, unsigned long *phandle) +static inline bool obj_allocated(struct zsdesc *zsdesc, void *obj, unsigned long *phandle) { - return obj_tagged(page, obj, phandle, OBJ_ALLOCATED_TAG); + return obj_tagged(zsdesc, obj, phandle, OBJ_ALLOCATED_TAG); } #ifdef CONFIG_ZPOOL -static bool obj_stores_deferred_handle(struct page *page, void *obj, +static bool obj_stores_deferred_handle(struct zsdesc *zsdesc, void *obj, unsigned long *phandle) { - return obj_tagged(page, obj, phandle, OBJ_DEFERRED_HANDLE_TAG); + return obj_tagged(zsdesc, obj, phandle, OBJ_DEFERRED_HANDLE_TAG); } #endif @@ -1112,7 +1112,7 @@ static int trylock_zspage(struct zspage *zspage) #ifdef CONFIG_ZPOOL static unsigned long find_deferred_handle_obj(struct size_class *class, - struct page *page, int *obj_idx); + struct zsdesc *zsdesc, int *obj_idx); /* * Free all the deferred handles whose objects are freed in zs_free. @@ -1125,7 +1125,7 @@ static void free_handles(struct zs_pool *pool, struct size_class *class, unsigned long handle; while (1) { - handle = find_deferred_handle_obj(class, page, &obj_idx); + handle = find_deferred_handle_obj(class, page_zsdesc(page), &obj_idx); if (!handle) { page = get_next_page(page); if (!page) @@ -1906,18 +1906,18 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, * return handle. */ static unsigned long find_tagged_obj(struct size_class *class, - struct page *page, int *obj_idx, int tag) + struct zsdesc *zsdesc, int *obj_idx, int tag) { unsigned int offset; int index = *obj_idx; unsigned long handle = 0; - void *addr = kmap_atomic(page); + void *addr = zsdesc_kmap_atomic(zsdesc); - offset = get_first_obj_offset(page); + offset = get_first_obj_offset(zsdesc_page(zsdesc)); offset += class->size * index; while (offset < PAGE_SIZE) { - if (obj_tagged(page, addr + offset, &handle, tag)) + if (obj_tagged(zsdesc, addr + offset, &handle, tag)) break; offset += class->size; @@ -1936,9 +1936,9 @@ static unsigned long find_tagged_obj(struct size_class *class, * return handle. */ static unsigned long find_alloced_obj(struct size_class *class, - struct page *page, int *obj_idx) + struct zsdesc *zsdesc, int *obj_idx) { - return find_tagged_obj(class, page, obj_idx, OBJ_ALLOCATED_TAG); + return find_tagged_obj(class, zsdesc, obj_idx, OBJ_ALLOCATED_TAG); } #ifdef CONFIG_ZPOOL @@ -1947,9 +1947,9 @@ static unsigned long find_alloced_obj(struct size_class *class, * and return handle. */ static unsigned long find_deferred_handle_obj(struct size_class *class, - struct page *page, int *obj_idx) + struct zsdesc *zsdesc, int *obj_idx) { - return find_tagged_obj(class, page, obj_idx, OBJ_DEFERRED_HANDLE_TAG); + return find_tagged_obj(class, zsdesc, obj_idx, OBJ_DEFERRED_HANDLE_TAG); } #endif @@ -1975,7 +1975,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, int ret = 0; while (1) { - handle = find_alloced_obj(class, s_page, &obj_idx); + handle = find_alloced_obj(class, page_zsdesc(s_page), &obj_idx); if (!handle) { s_page = get_next_page(s_page); if (!s_page) @@ -2243,7 +2243,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE; addr += class->size) { - if (obj_allocated(page, addr, &handle)) { + if (obj_allocated(page_zsdesc(page), addr, &handle)) { old_obj = handle_to_obj(handle); obj_to_location(old_obj, &dummy, &obj_idx); @@ -2727,14 +2727,14 @@ static void restore_freelist(struct zs_pool *pool, struct size_class *class, void *obj_addr = vaddr + off; /* skip allocated object */ - if (obj_allocated(page, obj_addr, &handle)) { + if (obj_allocated(page_zsdesc(page), obj_addr, &handle)) { obj_idx++; off += class->size; continue; } /* free deferred handle from reclaim attempt */ - if (obj_stores_deferred_handle(page, obj_addr, &handle)) + if (obj_stores_deferred_handle(page_zsdesc(page), obj_addr, &handle)) cache_free_handle(pool, handle); if (prev_free) @@ -2830,7 +2830,7 @@ static int zs_reclaim_page(struct zs_pool *pool, unsigned int retries) obj_idx = 0; page = get_first_page(zspage); while (1) { - handle = find_alloced_obj(class, page, &obj_idx); + handle = find_alloced_obj(class, page_zsdesc(page), &obj_idx); if (!handle) { page = get_next_page(page); if (!page) -- 2.25.1