Convert the functions for movable operations of zsmalloc to use zsdesc. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> --- mm/zsmalloc.c | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 5a3948cbe06f..ced7f144b884 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -2181,14 +2181,15 @@ static void replace_sub_zsdesc(struct size_class *class, struct zspage *zspage, static bool zs_page_isolate(struct page *page, isolate_mode_t mode) { struct zspage *zspage; + struct zsdesc *zsdesc = page_zsdesc(page); /* * Page is locked so zspage couldn't be destroyed. For detail, look at * lock_zspage in free_zspage. */ - VM_BUG_ON_PAGE(PageIsolated(page), page); + VM_BUG_ON_PAGE(zsdesc_is_isolated(zsdesc), zsdesc_page(zsdesc)); - zspage = get_zspage(page); + zspage = get_zspage(zsdesc_page(zsdesc)); migrate_write_lock(zspage); inc_zspage_isolation(zspage); migrate_write_unlock(zspage); @@ -2203,6 +2204,8 @@ static int zs_page_migrate(struct page *newpage, struct page *page, struct size_class *class; struct zspage *zspage; struct zsdesc *dummy; + struct zsdesc *new_zsdesc = page_zsdesc(newpage); + struct zsdesc *zsdesc = page_zsdesc(page); void *s_addr, *d_addr, *addr; unsigned int offset; unsigned long handle; @@ -2217,10 +2220,10 @@ static int zs_page_migrate(struct page *newpage, struct page *page, if (mode == MIGRATE_SYNC_NO_COPY) return -EINVAL; - VM_BUG_ON_PAGE(!PageIsolated(page), page); + VM_BUG_ON_PAGE(!zsdesc_is_isolated(zsdesc), zsdesc_page(zsdesc)); /* The page is locked, so this pointer must remain valid */ - zspage = get_zspage(page); + zspage = get_zspage(zsdesc_page(zsdesc)); pool = zspage->pool; /* @@ -2233,30 +2236,30 @@ static int zs_page_migrate(struct page *newpage, struct page *page, /* the migrate_write_lock protects zpage access via zs_map_object */ migrate_write_lock(zspage); - offset = get_first_obj_offset(page); - s_addr = kmap_atomic(page); + offset = get_first_obj_offset(zsdesc_page(zsdesc)); + s_addr = zsdesc_kmap_atomic(zsdesc); /* * Here, any user cannot access all objects in the zspage so let's move. */ - d_addr = kmap_atomic(newpage); + d_addr = zsdesc_kmap_atomic(new_zsdesc); memcpy(d_addr, s_addr, PAGE_SIZE); kunmap_atomic(d_addr); for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE; addr += class->size) { - if (obj_allocated(page_zsdesc(page), addr, &handle)) { + if (obj_allocated(zsdesc, addr, &handle)) { old_obj = handle_to_obj(handle); obj_to_location(old_obj, &dummy, &obj_idx); - new_obj = (unsigned long)location_to_obj(newpage, + new_obj = (unsigned long)location_to_obj(zsdesc_page(new_zsdesc), obj_idx); record_obj(handle, new_obj); } } kunmap_atomic(s_addr); - replace_sub_zsdesc(class, zspage, page_zsdesc(newpage), page_zsdesc(page)); + replace_sub_zsdesc(class, zspage, new_zsdesc, zsdesc); /* * Since we complete the data copy and set up new zspage structure, * it's okay to release the pool's lock. @@ -2265,14 +2268,14 @@ static int zs_page_migrate(struct page *newpage, struct page *page, dec_zspage_isolation(zspage); migrate_write_unlock(zspage); - get_page(newpage); - if (page_zone(newpage) != page_zone(page)) { - dec_zone_page_state(page, NR_ZSPAGES); - inc_zone_page_state(newpage, NR_ZSPAGES); + zsdesc_get(new_zsdesc); + if (zsdesc_zone(new_zsdesc) != zsdesc_zone(zsdesc)) { + zsdesc_dec_zone_page_state(zsdesc); + zsdesc_inc_zone_page_state(new_zsdesc); } - reset_zsdesc(page_zsdesc(page)); - put_page(page); + reset_zsdesc(zsdesc); + zsdesc_put(zsdesc); return MIGRATEPAGE_SUCCESS; } @@ -2280,10 +2283,11 @@ static int zs_page_migrate(struct page *newpage, struct page *page, static void zs_page_putback(struct page *page) { struct zspage *zspage; + struct zsdesc *zsdesc = page_zsdesc(page); - VM_BUG_ON_PAGE(!PageIsolated(page), page); + VM_BUG_ON_PAGE(!zsdesc_is_isolated(zsdesc), zsdesc_page(zsdesc)); - zspage = get_zspage(page); + zspage = get_zspage(zsdesc_page(zsdesc)); migrate_write_lock(zspage); dec_zspage_isolation(zspage); migrate_write_unlock(zspage); -- 2.25.1