Re: [PATCH v3 1/2] mm: store zero pages to be swapped out in a bitmap

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 11/06/2024 19:39, Nhat Pham wrote:
On Mon, Jun 10, 2024 at 5:18 AM Usama Arif <usamaarif642@xxxxxxxxx> wrote:
Approximately 10-20% of pages to be swapped out are zero pages [1].
Rather than reading/writing these pages to flash resulting
in increased I/O and flash wear, a bitmap can be used to mark these
pages as zero at write time, and the pages can be filled at
read time if the bit corresponding to the page is set.
With this patch, NVMe writes in Meta server fleet decreased
by almost 10% with conventional swap setup (zswap disabled).

[1]https://lore.kernel.org/all/20171018104832epcms5p1b2232e2236258de3d03d1344dde9fce0@epcms5p1/

Signed-off-by: Usama Arif <usamaarif642@xxxxxxxxx>
---
  include/linux/swap.h |  1 +
  mm/page_io.c         | 92 +++++++++++++++++++++++++++++++++++++++++++-
  mm/swapfile.c        | 21 +++++++++-
  3 files changed, 111 insertions(+), 3 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index a11c75e897ec..e88563978441 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -299,6 +299,7 @@ struct swap_info_struct {
         signed char     type;           /* strange name for an index */
         unsigned int    max;            /* extent of the swap_map */
         unsigned char *swap_map;        /* vmalloc'ed array of usage counts */
+       unsigned long *zeromap;         /* vmalloc'ed bitmap to track zero pages */
         struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
         struct swap_cluster_list free_clusters; /* free clusters list */
         unsigned int lowest_bit;        /* index of first free in swap_map */
diff --git a/mm/page_io.c b/mm/page_io.c
index a360857cf75d..2cac1e11fb85 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -172,6 +172,82 @@ int generic_swapfile_activate(struct swap_info_struct *sis,
         goto out;
  }

+static bool is_folio_page_zero_filled(struct folio *folio, int i)
+{
+       unsigned long *data;
+       unsigned int pos, last_pos = PAGE_SIZE / sizeof(*data) - 1;
+       bool ret = false;
+
+       data = kmap_local_folio(folio, i * PAGE_SIZE);
+       if (data[last_pos])
+               goto out;
+       for (pos = 0; pos < PAGE_SIZE / sizeof(*data); pos++) {
+               if (data[pos])
+                       goto out;
+       }
+       ret = true;
+out:
+       kunmap_local(data);
+       return ret;
+}
+
+static bool is_folio_zero_filled(struct folio *folio)
+{
+       unsigned int i;
+
+       for (i = 0; i < folio_nr_pages(folio); i++) {
+               if (!is_folio_page_zero_filled(folio, i))
+                       return false;
+       }
+       return true;
+}
+
+static void folio_zero_fill(struct folio *folio)
+{
+       unsigned int i;
+
+       for (i = 0; i < folio_nr_pages(folio); i++)
+               clear_highpage(folio_page(folio, i));
+}
+
+static void swap_zeromap_folio_set(struct folio *folio)
+{
+       struct swap_info_struct *sis = swp_swap_info(folio->swap);
+       swp_entry_t entry;
+       unsigned int i;
+
+       for (i = 0; i < folio_nr_pages(folio); i++) {
+               entry = page_swap_entry(folio_page(folio, i));
+               set_bit(swp_offset(entry), sis->zeromap);
+       }
+}
+
+static void swap_zeromap_folio_clear(struct folio *folio)
+{
+       struct swap_info_struct *sis = swp_swap_info(folio->swap);
+       swp_entry_t entry;
+       unsigned int i;
+
+       for (i = 0; i < folio_nr_pages(folio); i++) {
+               entry = page_swap_entry(folio_page(folio, i));
+               clear_bit(swp_offset(entry), sis->zeromap);
+       }
+}
+
+static bool swap_zeromap_folio_test(struct folio *folio)
+{
+       struct swap_info_struct *sis = swp_swap_info(folio->swap);
+       swp_entry_t entry;
+       unsigned int i;
+
+       for (i = 0; i < folio_nr_pages(folio); i++) {
+               entry = page_swap_entry(folio_page(folio, i));
+               if (!test_bit(swp_offset(entry), sis->zeromap))
+                       return false;
+       }
+       return true;
+}
+
  /*
   * We may have stale swap cache pages in memory: notice
   * them here and get rid of the unnecessary final write.
@@ -195,6 +271,15 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
                 folio_unlock(folio);
                 return ret;
         }
+
+       if (is_folio_zero_filled(folio)) {
+               swap_zeromap_folio_set(folio);
+               folio_start_writeback(folio);
+               folio_unlock(folio);
+               folio_end_writeback(folio);
+               return 0;
+       }
+       swap_zeromap_folio_clear(folio);
         if (zswap_store(folio)) {
                 folio_start_writeback(folio);
                 folio_unlock(folio);
@@ -515,8 +600,11 @@ void swap_read_folio(struct folio *folio, bool synchronous,
                 psi_memstall_enter(&pflags);
         }
         delayacct_swapin_start();
-
-       if (zswap_load(folio)) {
+       if (swap_zeromap_folio_test(folio)) {
+               folio_zero_fill(folio);
+               folio_mark_uptodate(folio);
+               folio_unlock(folio);
+       } else if (zswap_load(folio)) {
                 folio_mark_uptodate(folio);
                 folio_unlock(folio);
         } else if (data_race(sis->flags & SWP_FS_OPS)) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index f1e559e216bd..90451174fe34 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -453,6 +453,8 @@ static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
  static void swap_cluster_schedule_discard(struct swap_info_struct *si,
                 unsigned int idx)
  {
+       unsigned int i;
+
         /*
          * If scan_swap_map_slots() can't find a free cluster, it will check
          * si->swap_map directly. To make sure the discarding cluster isn't
@@ -461,6 +463,13 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
          */
         memset(si->swap_map + idx * SWAPFILE_CLUSTER,
                         SWAP_MAP_BAD, SWAPFILE_CLUSTER);
+       /*
+        * zeromap can see updates from concurrent swap_writepage() and swap_read_folio()
+        * call on other slots, hence use atomic clear_bit for zeromap instead of the
+        * non-atomic bitmap_clear.
+        */
+       for (i = 0; i < SWAPFILE_CLUSTER; i++)
+               clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);

         cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);

@@ -482,7 +491,7 @@ static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
  static void swap_do_scheduled_discard(struct swap_info_struct *si)
  {
         struct swap_cluster_info *info, *ci;
-       unsigned int idx;
+       unsigned int idx, i;

         info = si->cluster_info;

@@ -498,6 +507,8 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si)
                 __free_cluster(si, idx);
                 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
                                 0, SWAPFILE_CLUSTER);
+               for (i = 0; i < SWAPFILE_CLUSTER; i++)
+                       clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);
                 unlock_cluster(ci);
         }
  }
@@ -1336,6 +1347,7 @@ static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
         count = p->swap_map[offset];
         VM_BUG_ON(count != SWAP_HAS_CACHE);
         p->swap_map[offset] = 0;
+       clear_bit(offset, p->zeromap);
Hmm so clear_bit() is done at the swap_entry_free() point. I wonder if
we can have a problem, where:

1. The swap entry has its zeromap bit set, and is freed to the swap
slot cache (free_swap_slot() in mm/swap_slots.c). For instance, it is
reclaimed from the swap cache, and all the processes referring to it
are terminated, which decrements the swap count to 0 (swap_free() ->
__swap_entry_free() -> free_swap_slots())

2. The swap slot is then re-used in swap space allocation
(add_to_swap()) - its zeromap bit is never cleared.

3. swap_writepage() writes that non-zero page to swap

In swap_writepage, with this patch you have:

    if (is_folio_zero_filled(folio)) {
        swap_zeromap_folio_set(folio);
        folio_unlock(folio);
        return 0;
    }
    swap_zeromap_folio_clear(folio);

i.e. if folio is not zero filled, swap_zeromap_folio_clear will be called and the bit is cleared, so I think it would take care of this scenario? swap_read_folio will see the bit cleared in step 4.

4. swap_read_folio() checks the bitmap, sees that the zeromap bit for
the entry is set, so populates a zero page for it.

zswap in the past has to carefully invalidate these leftover entries
quite carefully. Chengming then move the invalidation point to
free_swap_slot(), massively simplifying the logic.

I wonder if we need to do the same here?




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux