From: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> Introduce zpdesc_is_locked() and convert __free_zspage() to use zpdesc. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> Signed-off-by: Alex Shi <alexs@xxxxxxxxxx> --- mm/zpdesc.h | 4 ++++ mm/zsmalloc.c | 20 ++++++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/mm/zpdesc.h b/mm/zpdesc.h index 4b42d8517fcb..a1834d36ccfc 100644 --- a/mm/zpdesc.h +++ b/mm/zpdesc.h @@ -125,4 +125,8 @@ static inline struct zone *zpdesc_zone(struct zpdesc *zpdesc) return page_zone(zpdesc_page(zpdesc)); } +static inline bool zpdesc_is_locked(struct zpdesc *zpdesc) +{ + return PageLocked(zpdesc_page(zpdesc)); +} #endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 6c5dccbc9a60..7b3344c226a0 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -895,23 +895,23 @@ static int trylock_zspage(struct zspage *zspage) static void __free_zspage(struct zs_pool *pool, struct size_class *class, struct zspage *zspage) { - struct page *page, *next; + struct zpdesc *zpdesc, *next; assert_spin_locked(&class->lock); VM_BUG_ON(get_zspage_inuse(zspage)); VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0); - next = page = get_first_page(zspage); + next = zpdesc = get_first_zpdesc(zspage); do { - VM_BUG_ON_PAGE(!PageLocked(page), page); - next = get_next_page(page); - reset_zpdesc(page_zpdesc(page)); - unlock_page(page); - dec_zone_page_state(page, NR_ZSPAGES); - put_page(page); - page = next; - } while (page != NULL); + VM_BUG_ON_PAGE(!zpdesc_is_locked(zpdesc), zpdesc_page(zpdesc)); + next = get_next_zpdesc(zpdesc); + reset_zpdesc(zpdesc); + zpdesc_unlock(zpdesc); + zpdesc_dec_zone_page_state(zpdesc); + zpdesc_put(zpdesc); + zpdesc = next; + } while (zpdesc != NULL); cache_free_zspage(pool, zspage); -- 2.43.0