Re: [PATCH 3/3] mm: zswap: kill zswap_get_swap_cache_page()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Jul 27, 2023 at 9:23 AM Johannes Weiner <hannes@xxxxxxxxxxx> wrote:
>
> The __read_swap_cache_async() interface isn't more difficult to
> understand than what the helper abstracts. Save the indirection and a
> level of indentation for the primary work of the writeback func.
>
> Signed-off-by: Johannes Weiner <hannes@xxxxxxxxxxx>

Arguably any abstraction to the swap code is better than dealing with
the swap code, but I am not against this :P

The diffstat looks nice and the code looks correct to me.

Reviewed-by: Yosry Ahmed <yosryahmed@xxxxxxxxxx>

> ---
>  mm/zswap.c | 142 ++++++++++++++++++++---------------------------------
>  1 file changed, 53 insertions(+), 89 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index e34ac89e6098..bba4ead672be 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -1016,43 +1016,6 @@ static int zswap_enabled_param_set(const char *val,
>  /*********************************
>  * writeback code
>  **********************************/
> -/* return enum for zswap_get_swap_cache_page */
> -enum zswap_get_swap_ret {
> -       ZSWAP_SWAPCACHE_NEW,
> -       ZSWAP_SWAPCACHE_EXIST,
> -       ZSWAP_SWAPCACHE_FAIL,
> -};
> -
> -/*
> - * zswap_get_swap_cache_page
> - *
> - * This is an adaption of read_swap_cache_async()
> - *
> - * This function tries to find a page with the given swap entry
> - * in the swapper_space address space (the swap cache).  If the page
> - * is found, it is returned in retpage.  Otherwise, a page is allocated,
> - * added to the swap cache, and returned in retpage.
> - *
> - * If success, the swap cache page is returned in retpage
> - * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
> - * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
> - *     the new page is added to swapcache and locked
> - * Returns ZSWAP_SWAPCACHE_FAIL on error
> - */
> -static int zswap_get_swap_cache_page(swp_entry_t entry,
> -                               struct page **retpage)
> -{
> -       bool page_was_allocated;
> -
> -       *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
> -                       NULL, 0, &page_was_allocated);
> -       if (page_was_allocated)
> -               return ZSWAP_SWAPCACHE_NEW;
> -       if (!*retpage)
> -               return ZSWAP_SWAPCACHE_FAIL;
> -       return ZSWAP_SWAPCACHE_EXIST;
> -}
> -
>  /*
>   * Attempts to free an entry by adding a page to the swap cache,
>   * decompressing the entry data into the page, and issuing a
> @@ -1073,7 +1036,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
>         struct scatterlist input, output;
>         struct crypto_acomp_ctx *acomp_ctx;
>         struct zpool *pool = entry->pool->zpool;
> -
> +       bool page_was_allocated;
>         u8 *src, *tmp = NULL;
>         unsigned int dlen;
>         int ret;
> @@ -1088,65 +1051,66 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
>         }
>
>         /* try to allocate swap cache page */
> -       switch (zswap_get_swap_cache_page(swpentry, &page)) {
> -       case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
> +       page = __read_swap_cache_async(swpentry, GFP_KERNEL, NULL, 0,
> +                                      &page_was_allocated);
> +       if (!page) {
>                 ret = -ENOMEM;
>                 goto fail;
> +       }
>
> -       case ZSWAP_SWAPCACHE_EXIST:
> -               /* page is already in the swap cache, ignore for now */
> +       /* Found an existing page, we raced with load/swapin */
> +       if (!page_was_allocated) {
>                 put_page(page);
>                 ret = -EEXIST;
>                 goto fail;
> +       }
>
> -       case ZSWAP_SWAPCACHE_NEW: /* page is locked */
> -               /*
> -                * Having a local reference to the zswap entry doesn't exclude
> -                * swapping from invalidating and recycling the swap slot. Once
> -                * the swapcache is secured against concurrent swapping to and
> -                * from the slot, recheck that the entry is still current before
> -                * writing.
> -                */
> -               spin_lock(&tree->lock);
> -               if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
> -                       spin_unlock(&tree->lock);
> -                       delete_from_swap_cache(page_folio(page));
> -                       ret = -ENOMEM;
> -                       goto fail;
> -               }
> +       /*
> +        * Page is locked, and the swapcache is now secured against
> +        * concurrent swapping to and from the slot. Verify that the
> +        * swap entry hasn't been invalidated and recycled behind our
> +        * backs (our zswap_entry reference doesn't prevent that), to
> +        * avoid overwriting a new swap page with old compressed data.
> +        */
> +       spin_lock(&tree->lock);
> +       if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
>                 spin_unlock(&tree->lock);
> +               delete_from_swap_cache(page_folio(page));
> +               ret = -ENOMEM;
> +               goto fail;
> +       }
> +       spin_unlock(&tree->lock);
>
> -               /* decompress */
> -               acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
> -               dlen = PAGE_SIZE;
> +       /* decompress */
> +       acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
> +       dlen = PAGE_SIZE;
>
> -               src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
> -               if (!zpool_can_sleep_mapped(pool)) {
> -                       memcpy(tmp, src, entry->length);
> -                       src = tmp;
> -                       zpool_unmap_handle(pool, entry->handle);
> -               }
> +       src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
> +       if (!zpool_can_sleep_mapped(pool)) {
> +               memcpy(tmp, src, entry->length);
> +               src = tmp;
> +               zpool_unmap_handle(pool, entry->handle);
> +       }
>
> -               mutex_lock(acomp_ctx->mutex);
> -               sg_init_one(&input, src, entry->length);
> -               sg_init_table(&output, 1);
> -               sg_set_page(&output, page, PAGE_SIZE, 0);
> -               acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
> -               ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
> -               dlen = acomp_ctx->req->dlen;
> -               mutex_unlock(acomp_ctx->mutex);
> -
> -               if (!zpool_can_sleep_mapped(pool))
> -                       kfree(tmp);
> -               else
> -                       zpool_unmap_handle(pool, entry->handle);
> +       mutex_lock(acomp_ctx->mutex);
> +       sg_init_one(&input, src, entry->length);
> +       sg_init_table(&output, 1);
> +       sg_set_page(&output, page, PAGE_SIZE, 0);
> +       acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
> +       ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
> +       dlen = acomp_ctx->req->dlen;
> +       mutex_unlock(acomp_ctx->mutex);
> +
> +       if (!zpool_can_sleep_mapped(pool))
> +               kfree(tmp);
> +       else
> +               zpool_unmap_handle(pool, entry->handle);
>
> -               BUG_ON(ret);
> -               BUG_ON(dlen != PAGE_SIZE);
> +       BUG_ON(ret);
> +       BUG_ON(dlen != PAGE_SIZE);
>
> -               /* page is up to date */
> -               SetPageUptodate(page);
> -       }
> +       /* page is up to date */
> +       SetPageUptodate(page);
>
>         /* move it to the tail of the inactive list after end_writeback */
>         SetPageReclaim(page);
> @@ -1157,16 +1121,16 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
>         zswap_written_back_pages++;
>
>         return ret;
> +
>  fail:
>         if (!zpool_can_sleep_mapped(pool))
>                 kfree(tmp);
>
>         /*
> -       * if we get here due to ZSWAP_SWAPCACHE_EXIST
> -       * a load may be happening concurrently.
> -       * it is safe and okay to not free the entry.
> -       * it is also okay to return !0
> -       */
> +        * If we get here because the page is already in swapcache, a
> +        * load may be happening concurrently. It is safe and okay to
> +        * not free the entry. It is also okay to return !0.
> +        */
>         return ret;
>  }
>
> --
> 2.41.0
>





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux