Now that all users except get_next_page() (which will be removed in later patch) use zsdesc, convert get_zspage() to take zsdesc instead of page. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> --- mm/zsmalloc.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 488dc570d660..5af0fee6e3ed 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -989,9 +989,9 @@ static enum fullness_group fix_fullness_group(struct size_class *class, return newfg; } -static struct zspage *get_zspage(struct page *page) +static struct zspage *get_zspage(struct zsdesc *zsdesc) { - struct zspage *zspage = (struct zspage *)page_private(page); + struct zspage *zspage = zsdesc->zspage; BUG_ON(zspage->magic != ZSPAGE_MAGIC); return zspage; @@ -999,7 +999,7 @@ static struct zspage *get_zspage(struct page *page) static __maybe_unused struct page *get_next_page(struct page *page) { - struct zspage *zspage = get_zspage(page); + struct zspage *zspage = get_zspage(page_zsdesc(page)); if (unlikely(ZsHugePage(zspage))) return NULL; @@ -1009,7 +1009,7 @@ static __maybe_unused struct page *get_next_page(struct page *page) static __maybe_unused struct zsdesc *get_next_zsdesc(struct zsdesc *zsdesc) { - struct zspage *zspage = get_zspage(zsdesc_page(zsdesc)); + struct zspage *zspage = get_zspage(zsdesc); if (unlikely(ZsHugePage(zspage))) return NULL; @@ -1062,7 +1062,7 @@ static bool obj_tagged(struct zsdesc *zsdesc, void *obj, unsigned long *phandle, int tag) { unsigned long handle; - struct zspage *zspage = get_zspage(zsdesc_page(zsdesc)); + struct zspage *zspage = get_zspage(zsdesc); if (unlikely(ZsHugePage(zspage))) { VM_BUG_ON_PAGE(!is_first_zsdesc(zsdesc), zsdesc_page(zsdesc)); @@ -1518,7 +1518,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, spin_lock(&pool->lock); obj = handle_to_obj(handle); obj_to_location(obj, &zsdesc, &obj_idx); - zspage = get_zspage(zsdesc_page(zsdesc)); + zspage = get_zspage(zsdesc); #ifdef CONFIG_ZPOOL /* @@ -1593,7 +1593,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) obj = handle_to_obj(handle); obj_to_location(obj, &zsdesc, &obj_idx); - zspage = get_zspage(zsdesc_page(zsdesc)); + zspage = get_zspage(zsdesc); class = zspage_class(pool, zspage); off = (class->size * obj_idx) & ~PAGE_MASK; @@ -1757,7 +1757,7 @@ static void obj_free(int class_size, unsigned long obj, unsigned long *handle) obj_to_location(obj, &f_zsdesc, &f_objidx); f_offset = (class_size * f_objidx) & ~PAGE_MASK; - zspage = get_zspage(zsdesc_page(f_zsdesc)); + zspage = get_zspage(f_zsdesc); vaddr = zsdesc_kmap_atomic(f_zsdesc); link = (struct link_free *)(vaddr + f_offset); @@ -1804,7 +1804,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle) spin_lock(&pool->lock); obj = handle_to_obj(handle); obj_to_zsdesc(obj, &f_zsdesc); - zspage = get_zspage(zsdesc_page(f_zsdesc)); + zspage = get_zspage(f_zsdesc); class = zspage_class(pool, zspage); class_stat_dec(class, OBJ_USED, 1); @@ -1987,13 +1987,13 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, } /* Stop if there is no more space */ - if (zspage_full(class, get_zspage(zsdesc_page(d_zsdesc)))) { + if (zspage_full(class, get_zspage(d_zsdesc))) { ret = -ENOMEM; break; } used_obj = handle_to_obj(handle); - free_obj = obj_malloc(pool, get_zspage(zsdesc_page(d_zsdesc)), handle); + free_obj = obj_malloc(pool, get_zspage(d_zsdesc), handle); zs_object_copy(class, free_obj, used_obj); obj_idx++; record_obj(handle, free_obj); @@ -2189,7 +2189,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode) */ VM_BUG_ON_PAGE(zsdesc_is_isolated(zsdesc), zsdesc_page(zsdesc)); - zspage = get_zspage(zsdesc_page(zsdesc)); + zspage = get_zspage(zsdesc); migrate_write_lock(zspage); inc_zspage_isolation(zspage); migrate_write_unlock(zspage); @@ -2223,7 +2223,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, VM_BUG_ON_PAGE(!zsdesc_is_isolated(zsdesc), zsdesc_page(zsdesc)); /* The page is locked, so this pointer must remain valid */ - zspage = get_zspage(zsdesc_page(zsdesc)); + zspage = get_zspage(zsdesc); pool = zspage->pool; /* @@ -2287,7 +2287,7 @@ static void zs_page_putback(struct page *page) VM_BUG_ON_PAGE(!zsdesc_is_isolated(zsdesc), zsdesc_page(zsdesc)); - zspage = get_zspage(zsdesc_page(zsdesc)); + zspage = get_zspage(zsdesc); migrate_write_lock(zspage); dec_zspage_isolation(zspage); migrate_write_unlock(zspage); -- 2.25.1