Re: [PATCH] mm: swap: async free swap slot cache entries

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Dec 21, 2023 at 10:25 PM Chris Li <chrisl@xxxxxxxxxx> wrote:
>
> We discovered that 1% swap page fault is 100us+ while 50% of
> the swap fault is under 20us.
>
> Further investigation show that a large portion of the time
> spent in the free_swap_slots() function for the long tail case.
>
> The percpu cache of swap slots is freed in a batch of 64 entries
> inside free_swap_slots(). These cache entries are accumulated
> from previous page faults, which may not be related to the current
> process.
>
> Doing the batch free in the page fault handler causes longer
> tail latencies and penalizes the current process.
>
> Move free_swap_slots() outside of the swapin page fault handler into an
> async work queue to avoid such long tail latencies.
>
> Testing:
>
> Chun-Tse did some benchmark in chromebook, showing that
> zram_wait_metrics improve about 15% with 80% and 95% confidence.
>
> I recently ran some experiments on about 1000 Google production
> machines. It shows swapin latency drops in the long tail
> 100us - 500us bucket dramatically.
>
> platform        (100-500us)             (0-100us)
> A               1.12% -> 0.36%          98.47% -> 99.22%
> B               0.65% -> 0.15%          98.96% -> 99.46%
> C               0.61% -> 0.23%          98.96% -> 99.38%

Nice! Are these values for zram as well, or ordinary (SSD?) swap? I
imagine it will matter less for swap, right?

>
> Signed-off-by: Chris Li <chrisl@xxxxxxxxxx>
> To: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
> Cc: linux-kernel@xxxxxxxxxxxxxxx
> Cc: linux-mm@xxxxxxxxx
> Cc: Wei Xu <weixugc@xxxxxxxxxx>
> Cc: Yu Zhao <yuzhao@xxxxxxxxxx>
> Cc: Greg Thelen <gthelen@xxxxxxxxxx>
> Cc: Chun-Tse Shao <ctshao@xxxxxxxxxx>
> Cc: Suren Baghdasaryan <surenb@xxxxxxxxxx>
> Cc: Yosry Ahmed <yosryahmed@xxxxxxxxxx>
> Cc: Brain Geffon <bgeffon@xxxxxxxxxx>
> Cc: Minchan Kim <minchan@xxxxxxxxxx>
> Cc: Michal Hocko <mhocko@xxxxxxxx>
> Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx>
> Cc: Huang Ying <ying.huang@xxxxxxxxx>
> Cc: Nhat Pham <nphamcs@xxxxxxxxx>
> Cc: Johannes Weiner <hannes@xxxxxxxxxxx>
> Cc: Kairui Song <kasong@xxxxxxxxxxx>
> Cc: Zhongkun He <hezhongkun.hzk@xxxxxxxxxxxxx>
> Cc: Kemeng Shi <shikemeng@xxxxxxxxxxxxxxx>
> Cc: Barry Song <v-songbaohua@xxxxxxxx>
> ---
>  include/linux/swap_slots.h |  1 +
>  mm/swap_slots.c            | 37 +++++++++++++++++++++++++++++--------
>  2 files changed, 30 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h
> index 15adfb8c813a..67bc8fa30d63 100644
> --- a/include/linux/swap_slots.h
> +++ b/include/linux/swap_slots.h
> @@ -19,6 +19,7 @@ struct swap_slots_cache {
>         spinlock_t      free_lock;  /* protects slots_ret, n_ret */
>         swp_entry_t     *slots_ret;
>         int             n_ret;
> +       struct work_struct async_free;
>  };
>
>  void disable_swap_slots_cache_lock(void);
> diff --git a/mm/swap_slots.c b/mm/swap_slots.c
> index 0bec1f705f8e..a3b306550732 100644
> --- a/mm/swap_slots.c
> +++ b/mm/swap_slots.c
> @@ -42,8 +42,10 @@ static bool  swap_slot_cache_initialized;
>  static DEFINE_MUTEX(swap_slots_cache_mutex);
>  /* Serialize swap slots cache enable/disable operations */
>  static DEFINE_MUTEX(swap_slots_cache_enable_mutex);
> +static struct workqueue_struct *swap_free_queue;
>
>  static void __drain_swap_slots_cache(unsigned int type);
> +static void swapcache_async_free_entries(struct work_struct *data);
>
>  #define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled)
>  #define SLOTS_CACHE 0x1
> @@ -149,6 +151,7 @@ static int alloc_swap_slot_cache(unsigned int cpu)
>                 spin_lock_init(&cache->free_lock);
>                 cache->lock_initialized = true;
>         }
> +       INIT_WORK(&cache->async_free, swapcache_async_free_entries);
>         cache->nr = 0;
>         cache->cur = 0;
>         cache->n_ret = 0;
> @@ -269,6 +272,20 @@ static int refill_swap_slots_cache(struct swap_slots_cache *cache)
>         return cache->nr;
>  }
>
> +static void swapcache_async_free_entries(struct work_struct *data)
> +{
> +       struct swap_slots_cache *cache;
> +
> +       cache = container_of(data, struct swap_slots_cache, async_free);
> +       spin_lock_irq(&cache->free_lock);
> +       /* Swap slots cache may be deactivated before acquiring lock */
> +       if (cache->slots_ret) {
> +               swapcache_free_entries(cache->slots_ret, cache->n_ret);
> +               cache->n_ret = 0;
> +       }
> +       spin_unlock_irq(&cache->free_lock);
> +}
> +
>  void free_swap_slot(swp_entry_t entry)
>  {
>         struct swap_slots_cache *cache;
> @@ -282,17 +299,14 @@ void free_swap_slot(swp_entry_t entry)
>                         goto direct_free;
>                 }
>                 if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) {
> -                       /*
> -                        * Return slots to global pool.
> -                        * The current swap_map value is SWAP_HAS_CACHE.
> -                        * Set it to 0 to indicate it is available for
> -                        * allocation in global pool
> -                        */
> -                       swapcache_free_entries(cache->slots_ret, cache->n_ret);
> -                       cache->n_ret = 0;
> +                       spin_unlock_irq(&cache->free_lock);
> +                       queue_work(swap_free_queue, &cache->async_free);
> +                       goto direct_free;
>                 }
>                 cache->slots_ret[cache->n_ret++] = entry;
>                 spin_unlock_irq(&cache->free_lock);
> +               if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE)
> +                       queue_work(swap_free_queue, &cache->async_free);
>         } else {
>  direct_free:
>                 swapcache_free_entries(&entry, 1);
> @@ -348,3 +362,10 @@ swp_entry_t folio_alloc_swap(struct folio *folio)
>         }
>         return entry;
>  }
> +
> +static int __init async_queue_init(void)
> +{
> +       swap_free_queue = create_workqueue("async swap cache");

nit(?): isn't create_workqueue() deprecated? from:

https://www.kernel.org/doc/html/latest/core-api/workqueue.html#application-programming-interface-api

I think there's a zswap patch proposing fixing that on the zswap side.

> +       return 0;
> +}
> +subsys_initcall(async_queue_init);
>
> ---
> base-commit: eacce8189e28717da6f44ee492b7404c636ae0de
> change-id: 20231216-async-free-bef392015432
>
> Best regards,
> --
> Chris Li <chrisl@xxxxxxxxxx>
>





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux