From: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> To convert page to zpdesc in zs_page_migrate(), we added zpdesc_is_isolated()/zpdesc_zone() helpers. No functional change. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> Signed-off-by: Alex Shi <alexs@xxxxxxxxxx> --- mm/zpdesc.h | 11 +++++++++++ mm/zsmalloc.c | 30 ++++++++++++++++-------------- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/mm/zpdesc.h b/mm/zpdesc.h index 3a65a7d494b7..4b42d8517fcb 100644 --- a/mm/zpdesc.h +++ b/mm/zpdesc.h @@ -114,4 +114,15 @@ static inline void __zpdesc_set_movable(struct zpdesc *zpdesc, { __SetPageMovable(zpdesc_page(zpdesc), mops); } + +static inline bool zpdesc_is_isolated(struct zpdesc *zpdesc) +{ + return PageIsolated(zpdesc_page(zpdesc)); +} + +static inline struct zone *zpdesc_zone(struct zpdesc *zpdesc) +{ + return page_zone(zpdesc_page(zpdesc)); +} + #endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 90d93cdc5df6..caa4a7883ab8 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1817,19 +1817,21 @@ static int zs_page_migrate(struct page *newpage, struct page *page, struct size_class *class; struct zspage *zspage; struct zpdesc *dummy; + struct zpdesc *newzpdesc = page_zpdesc(newpage); + struct zpdesc *zpdesc = page_zpdesc(page); void *s_addr, *d_addr, *addr; unsigned int offset; unsigned long handle; unsigned long old_obj, new_obj; unsigned int obj_idx; - VM_BUG_ON_PAGE(!PageIsolated(page), page); + VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc)); /* We're committed, tell the world that this is a Zsmalloc page. */ - __SetPageZsmalloc(newpage); + __SetPageZsmalloc(zpdesc_page(newzpdesc)); /* The page is locked, so this pointer must remain valid */ - zspage = get_zspage(page); + zspage = get_zspage(zpdesc_page(zpdesc)); pool = zspage->pool; /* @@ -1846,30 +1848,30 @@ static int zs_page_migrate(struct page *newpage, struct page *page, /* the migrate_write_lock protects zpage access via zs_map_object */ migrate_write_lock(zspage); - offset = get_first_obj_offset(page); - s_addr = kmap_atomic(page); + offset = get_first_obj_offset(zpdesc_page(zpdesc)); + s_addr = zpdesc_kmap_atomic(zpdesc); /* * Here, any user cannot access all objects in the zspage so let's move. */ - d_addr = kmap_atomic(newpage); + d_addr = zpdesc_kmap_atomic(newzpdesc); copy_page(d_addr, s_addr); kunmap_atomic(d_addr); for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE; addr += class->size) { - if (obj_allocated(page_zpdesc(page), addr, &handle)) { + if (obj_allocated(zpdesc, addr, &handle)) { old_obj = handle_to_obj(handle); obj_to_location(old_obj, &dummy, &obj_idx); - new_obj = (unsigned long)location_to_obj(newpage, + new_obj = (unsigned long)location_to_obj(zpdesc_page(newzpdesc), obj_idx); record_obj(handle, new_obj); } } kunmap_atomic(s_addr); - replace_sub_page(class, zspage, page_zpdesc(newpage), page_zpdesc(page)); + replace_sub_page(class, zspage, newzpdesc, zpdesc); /* * Since we complete the data copy and set up new zspage structure, * it's okay to release migration_lock. @@ -1878,14 +1880,14 @@ static int zs_page_migrate(struct page *newpage, struct page *page, spin_unlock(&class->lock); migrate_write_unlock(zspage); - get_page(newpage); - if (page_zone(newpage) != page_zone(page)) { - dec_zone_page_state(page, NR_ZSPAGES); - inc_zone_page_state(newpage, NR_ZSPAGES); + zpdesc_get(newzpdesc); + if (zpdesc_zone(newzpdesc) != zpdesc_zone(zpdesc)) { + zpdesc_dec_zone_page_state(zpdesc); + zpdesc_inc_zone_page_state(newzpdesc); } reset_page(page); - put_page(page); + zpdesc_put(zpdesc); return MIGRATEPAGE_SUCCESS; } -- 2.46.0