From: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> Now that all users except get_next_page() (which will be removed in later patch) use zpdesc, convert get_zspage() to take zpdesc instead of page. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> Signed-off-by: Alex Shi <alexs@xxxxxxxxxx> --- mm/zsmalloc.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c8a533b516ea..7ae98d048590 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -759,9 +759,9 @@ static int fix_fullness_group(struct size_class *class, struct zspage *zspage) return newfg; } -static struct zspage *get_zspage(struct page *page) +static struct zspage *get_zspage(struct zpdesc *zpdesc) { - struct zspage *zspage = (struct zspage *)page_private(page); + struct zspage *zspage = zpdesc->zspage; BUG_ON(zspage->magic != ZSPAGE_MAGIC); return zspage; @@ -769,7 +769,7 @@ static struct zspage *get_zspage(struct page *page) static struct page *get_next_page(struct page *page) { - struct zspage *zspage = get_zspage(page); + struct zspage *zspage = get_zspage(page_zpdesc(page)); if (unlikely(ZsHugePage(zspage))) return NULL; @@ -779,7 +779,7 @@ static struct page *get_next_page(struct page *page) static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc) { - struct zspage *zspage = get_zspage(zpdesc_page(zpdesc)); + struct zspage *zspage = get_zspage(zpdesc); if (unlikely(ZsHugePage(zspage))) return NULL; @@ -829,7 +829,7 @@ static inline bool obj_allocated(struct zpdesc *zpdesc, void *obj, unsigned long *phandle) { unsigned long handle; - struct zspage *zspage = get_zspage(zpdesc_page(zpdesc)); + struct zspage *zspage = get_zspage(zpdesc); if (unlikely(ZsHugePage(zspage))) { VM_BUG_ON_PAGE(!is_first_zpdesc(zpdesc), zpdesc_page(zpdesc)); @@ -1245,7 +1245,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, read_lock(&pool->migrate_lock); obj = handle_to_obj(handle); obj_to_location(obj, &zpdesc, &obj_idx); - zspage = get_zspage(zpdesc_page(zpdesc)); + zspage = get_zspage(zpdesc); /* * migration cannot move any zpages in this zspage. Here, class->lock @@ -1295,7 +1295,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) obj = handle_to_obj(handle); obj_to_location(obj, &zpdesc, &obj_idx); - zspage = get_zspage(zpdesc_page(zpdesc)); + zspage = get_zspage(zpdesc); class = zspage_class(pool, zspage); off = offset_in_page(class->size * obj_idx); @@ -1459,7 +1459,7 @@ static void obj_free(int class_size, unsigned long obj) obj_to_location(obj, &f_zpdesc, &f_objidx); f_offset = offset_in_page(class_size * f_objidx); - zspage = get_zspage(zpdesc_page(f_zpdesc)); + zspage = get_zspage(f_zpdesc); vaddr = zpdesc_kmap_atomic(f_zpdesc); link = (struct link_free *)(vaddr + f_offset); @@ -1493,7 +1493,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle) read_lock(&pool->migrate_lock); obj = handle_to_obj(handle); obj_to_zpdesc(obj, &f_zpdesc); - zspage = get_zspage(zpdesc_page(f_zpdesc)); + zspage = get_zspage(f_zpdesc); class = zspage_class(pool, zspage); spin_lock(&class->lock); read_unlock(&pool->migrate_lock); @@ -1826,7 +1826,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, __SetPageZsmalloc(zpdesc_page(newzpdesc)); /* The page is locked, so this pointer must remain valid */ - zspage = get_zspage(zpdesc_page(zpdesc)); + zspage = get_zspage(zpdesc); pool = zspage->pool; /* -- 2.43.0