After the common decompress part goes to __zswap_load(), we can cleanup the zswap_load() a little. Signed-off-by: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx> --- mm/zswap.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/mm/zswap.c b/mm/zswap.c index 667b66a3911b..50405811cd7b 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1752,7 +1752,6 @@ bool zswap_load(struct folio *folio) struct zswap_tree *tree = swap_zswap_tree(swp); struct zswap_entry *entry; u8 *dst; - bool ret; VM_WARN_ON_ONCE(!folio_test_locked(folio)); @@ -1769,19 +1768,16 @@ bool zswap_load(struct folio *folio) dst = kmap_local_page(page); zswap_fill_page(dst, entry->value); kunmap_local(dst); - ret = true; - goto stats; + } else { + __zswap_load(entry, page); } - __zswap_load(entry, page); - ret = true; -stats: count_vm_event(ZSWPIN); if (entry->objcg) count_objcg_event(entry->objcg, ZSWPIN); spin_lock(&tree->lock); - if (ret && zswap_exclusive_loads_enabled) { + if (zswap_exclusive_loads_enabled) { zswap_invalidate_entry(tree, entry); folio_mark_dirty(folio); } else if (entry->length) { @@ -1791,7 +1787,7 @@ bool zswap_load(struct folio *folio) zswap_entry_put(tree, entry); spin_unlock(&tree->lock); - return ret; + return true; } void zswap_invalidate(int type, pgoff_t offset) -- b4 0.10.1