Now that all users except get_next_page() (which will be removed in later patch) use zsdesc, convert get_zspage() to take zsdesc instead of page. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> --- mm/zsmalloc.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index db43a5d05233..6cb216b8564a 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -899,9 +899,9 @@ static int fix_fullness_group(struct size_class *class, struct zspage *zspage) return newfg; } -static struct zspage *get_zspage(struct page *page) +static struct zspage *get_zspage(struct zsdesc *zsdesc) { - struct zspage *zspage = (struct zspage *)page_private(page); + struct zspage *zspage = zsdesc->zspage; BUG_ON(zspage->magic != ZSPAGE_MAGIC); return zspage; @@ -909,7 +909,7 @@ static struct zspage *get_zspage(struct page *page) static __maybe_unused struct page *get_next_page(struct page *page) { - struct zspage *zspage = get_zspage(page); + struct zspage *zspage = get_zspage(page_zsdesc(page)); if (unlikely(ZsHugePage(zspage))) return NULL; @@ -919,7 +919,7 @@ static __maybe_unused struct page *get_next_page(struct page *page) static __maybe_unused struct zsdesc *get_next_zsdesc(struct zsdesc *zsdesc) { - struct zspage *zspage = get_zspage(zsdesc_page(zsdesc)); + struct zspage *zspage = get_zspage(zsdesc); if (unlikely(ZsHugePage(zspage))) return NULL; @@ -972,7 +972,7 @@ static inline bool obj_allocated(struct zsdesc *zsdesc, void *obj, unsigned long *phandle) { unsigned long handle; - struct zspage *zspage = get_zspage(zsdesc_page(zsdesc)); + struct zspage *zspage = get_zspage(zsdesc); if (unlikely(ZsHugePage(zspage))) { VM_BUG_ON_PAGE(!is_first_zsdesc(zsdesc), zsdesc_page(zsdesc)); @@ -1377,7 +1377,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, spin_lock(&pool->lock); obj = handle_to_obj(handle); obj_to_location(obj, &zsdesc, &obj_idx); - zspage = get_zspage(zsdesc_page(zsdesc)); + zspage = get_zspage(zsdesc); /* * migration cannot move any zpages in this zspage. Here, pool->lock @@ -1427,7 +1427,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) obj = handle_to_obj(handle); obj_to_location(obj, &zsdesc, &obj_idx); - zspage = get_zspage(zsdesc_page(zsdesc)); + zspage = get_zspage(zsdesc); class = zspage_class(pool, zspage); off = offset_in_page(class->size * obj_idx); @@ -1591,7 +1591,7 @@ static void obj_free(int class_size, unsigned long obj) obj_to_location(obj, &f_zsdesc, &f_objidx); f_offset = offset_in_page(class_size * f_objidx); - zspage = get_zspage(zsdesc_page(f_zsdesc)); + zspage = get_zspage(f_zsdesc); vaddr = zsdesc_kmap_atomic(f_zsdesc); link = (struct link_free *)(vaddr + f_offset); @@ -1625,7 +1625,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle) spin_lock(&pool->lock); obj = handle_to_obj(handle); obj_to_zsdesc(obj, &f_zsdesc); - zspage = get_zspage(zsdesc_page(f_zsdesc)); + zspage = get_zspage(f_zsdesc); class = zspage_class(pool, zspage); class_stat_dec(class, ZS_OBJS_INUSE, 1); @@ -1951,7 +1951,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode) */ VM_BUG_ON_PAGE(zsdesc_is_isolated(zsdesc), zsdesc_page(zsdesc)); - zspage = get_zspage(zsdesc_page(zsdesc)); + zspage = get_zspage(zsdesc); migrate_write_lock(zspage); inc_zspage_isolation(zspage); migrate_write_unlock(zspage); @@ -1985,7 +1985,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, VM_BUG_ON_PAGE(!zsdesc_is_isolated(zsdesc), zsdesc_page(zsdesc)); /* The page is locked, so this pointer must remain valid */ - zspage = get_zspage(zsdesc_page(zsdesc)); + zspage = get_zspage(zsdesc); pool = zspage->pool; /* @@ -2049,7 +2049,7 @@ static void zs_page_putback(struct page *page) VM_BUG_ON_PAGE(!zsdesc_is_isolated(zsdesc), zsdesc_page(zsdesc)); - zspage = get_zspage(zsdesc_page(zsdesc)); + zspage = get_zspage(zsdesc); migrate_write_lock(zspage); dec_zspage_isolation(zspage); migrate_write_unlock(zspage); -- 2.41.0