Also after the common decompress part goes to __zswap_load(), we can cleanup the zswap_reclaim_entry() a little. Signed-off-by: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx> --- mm/zswap.c | 23 +++++------------------ 1 file changed, 5 insertions(+), 18 deletions(-) diff --git a/mm/zswap.c b/mm/zswap.c index 0476e1c553c2..9c709368a0e6 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1449,7 +1449,6 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct page *page; struct mempolicy *mpol; bool page_was_allocated; - int ret; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, }; @@ -1458,16 +1457,13 @@ static int zswap_writeback_entry(struct zswap_entry *entry, mpol = get_task_policy(current); page = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol, NO_INTERLEAVE_INDEX, &page_was_allocated, true); - if (!page) { - ret = -ENOMEM; - goto fail; - } + if (!page) + return -ENOMEM; /* Found an existing page, we raced with load/swapin */ if (!page_was_allocated) { put_page(page); - ret = -EEXIST; - goto fail; + return -EEXIST; } /* @@ -1481,8 +1477,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) { spin_unlock(&tree->lock); delete_from_swap_cache(page_folio(page)); - ret = -ENOMEM; - goto fail; + return -ENOMEM; } spin_unlock(&tree->lock); @@ -1503,15 +1498,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, __swap_writepage(page, &wbc); put_page(page); - return ret; - -fail: - /* - * If we get here because the page is already in swapcache, a - * load may be happening concurrently. It is safe and okay to - * not free the entry. It is also okay to return !0. - */ - return ret; + return 0; } static int zswap_is_page_same_filled(void *ptr, unsigned long *value) -- b4 0.10.1