[merged mm-stable] mm-swap-inline-folio_set_swap_entry-and-folio_swap_entry.patch removed from -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The quilt patch titled
     Subject: mm/swap: inline folio_set_swap_entry() and folio_swap_entry()
has been removed from the -mm tree.  Its filename was
     mm-swap-inline-folio_set_swap_entry-and-folio_swap_entry.patch

This patch was dropped because it was merged into the mm-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

------------------------------------------------------
From: David Hildenbrand <david@xxxxxxxxxx>
Subject: mm/swap: inline folio_set_swap_entry() and folio_swap_entry()
Date: Mon, 21 Aug 2023 18:08:48 +0200

Let's simply work on the folio directly and remove the helpers.

Link: https://lkml.kernel.org/r/20230821160849.531668-4-david@xxxxxxxxxx
Signed-off-by: David Hildenbrand <david@xxxxxxxxxx>
Suggested-by: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Reviewed-by: Chris Li <chrisl@xxxxxxxxxx>
Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
Cc: Dan Streetman <ddstreet@xxxxxxxx>
Cc: Hugh Dickins <hughd@xxxxxxxxxx>
Cc: Peter Xu <peterx@xxxxxxxxxx>
Cc: Seth Jennings <sjenning@xxxxxxxxxx>
Cc: Vitaly Wool <vitaly.wool@xxxxxxxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 include/linux/swap.h |   12 +-----------
 mm/memory.c          |    2 +-
 mm/shmem.c           |    6 +++---
 mm/swap_state.c      |    7 +++----
 mm/swapfile.c        |    2 +-
 mm/util.c            |    2 +-
 mm/vmscan.c          |    2 +-
 mm/zswap.c           |    4 ++--
 8 files changed, 13 insertions(+), 24 deletions(-)

--- a/include/linux/swap.h~mm-swap-inline-folio_set_swap_entry-and-folio_swap_entry
+++ a/include/linux/swap.h
@@ -333,25 +333,15 @@ struct swap_info_struct {
 					   */
 };
 
-static inline swp_entry_t folio_swap_entry(struct folio *folio)
-{
-	return folio->swap;
-}
-
 static inline swp_entry_t page_swap_entry(struct page *page)
 {
 	struct folio *folio = page_folio(page);
-	swp_entry_t entry = folio_swap_entry(folio);
+	swp_entry_t entry = folio->swap;
 
 	entry.val += folio_page_idx(folio, page);
 	return entry;
 }
 
-static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
-{
-	folio->swap = entry;
-}
-
 /* linux/mm/workingset.c */
 bool workingset_test_recent(void *shadow, bool file, bool *workingset);
 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
--- a/mm/memory.c~mm-swap-inline-folio_set_swap_entry-and-folio_swap_entry
+++ a/mm/memory.c
@@ -3828,7 +3828,7 @@ vm_fault_t do_swap_page(struct vm_fault
 				folio_add_lru(folio);
 
 				/* To provide entry to swap_readpage() */
-				folio_set_swap_entry(folio, entry);
+				folio->swap = entry;
 				swap_readpage(page, true, NULL);
 				folio->private = NULL;
 			}
--- a/mm/shmem.c~mm-swap-inline-folio_set_swap_entry-and-folio_swap_entry
+++ a/mm/shmem.c
@@ -1642,7 +1642,7 @@ static int shmem_replace_folio(struct fo
 	int error;
 
 	old = *foliop;
-	entry = folio_swap_entry(old);
+	entry = old->swap;
 	swap_index = swp_offset(entry);
 	swap_mapping = swap_address_space(entry);
 
@@ -1663,7 +1663,7 @@ static int shmem_replace_folio(struct fo
 	__folio_set_locked(new);
 	__folio_set_swapbacked(new);
 	folio_mark_uptodate(new);
-	folio_set_swap_entry(new, entry);
+	new->swap = entry;
 	folio_set_swapcache(new);
 
 	/*
@@ -1785,7 +1785,7 @@ static int shmem_swapin_folio(struct ino
 	/* We have to do this with folio locked to prevent races */
 	folio_lock(folio);
 	if (!folio_test_swapcache(folio) ||
-	    folio_swap_entry(folio).val != swap.val ||
+	    folio->swap.val != swap.val ||
 	    !shmem_confirm_swap(mapping, index, swap)) {
 		error = -EEXIST;
 		goto unlock;
--- a/mm/swapfile.c~mm-swap-inline-folio_set_swap_entry-and-folio_swap_entry
+++ a/mm/swapfile.c
@@ -1536,7 +1536,7 @@ unlock_out:
 
 static bool folio_swapped(struct folio *folio)
 {
-	swp_entry_t entry = folio_swap_entry(folio);
+	swp_entry_t entry = folio->swap;
 	struct swap_info_struct *si = _swap_info_get(entry);
 
 	if (!si)
--- a/mm/swap_state.c~mm-swap-inline-folio_set_swap_entry-and-folio_swap_entry
+++ a/mm/swap_state.c
@@ -100,7 +100,7 @@ int add_to_swap_cache(struct folio *foli
 
 	folio_ref_add(folio, nr);
 	folio_set_swapcache(folio);
-	folio_set_swap_entry(folio, entry);
+	folio->swap = entry;
 
 	do {
 		xas_lock_irq(&xas);
@@ -156,8 +156,7 @@ void __delete_from_swap_cache(struct fol
 		VM_BUG_ON_PAGE(entry != folio, entry);
 		xas_next(&xas);
 	}
-	entry.val = 0;
-	folio_set_swap_entry(folio, entry);
+	folio->swap.val = 0;
 	folio_clear_swapcache(folio);
 	address_space->nrpages -= nr;
 	__node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
@@ -233,7 +232,7 @@ fail:
  */
 void delete_from_swap_cache(struct folio *folio)
 {
-	swp_entry_t entry = folio_swap_entry(folio);
+	swp_entry_t entry = folio->swap;
 	struct address_space *address_space = swap_address_space(entry);
 
 	xa_lock_irq(&address_space->i_pages);
--- a/mm/util.c~mm-swap-inline-folio_set_swap_entry-and-folio_swap_entry
+++ a/mm/util.c
@@ -764,7 +764,7 @@ struct address_space *folio_mapping(stru
 		return NULL;
 
 	if (unlikely(folio_test_swapcache(folio)))
-		return swap_address_space(folio_swap_entry(folio));
+		return swap_address_space(folio->swap);
 
 	mapping = folio->mapping;
 	if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
--- a/mm/vmscan.c~mm-swap-inline-folio_set_swap_entry-and-folio_swap_entry
+++ a/mm/vmscan.c
@@ -1423,7 +1423,7 @@ static int __remove_mapping(struct addre
 	}
 
 	if (folio_test_swapcache(folio)) {
-		swp_entry_t swap = folio_swap_entry(folio);
+		swp_entry_t swap = folio->swap;
 
 		if (reclaimed && !mapping_exiting(mapping))
 			shadow = workingset_eviction(folio, target_memcg);
--- a/mm/zswap.c~mm-swap-inline-folio_set_swap_entry-and-folio_swap_entry
+++ a/mm/zswap.c
@@ -1190,7 +1190,7 @@ static void zswap_fill_page(void *ptr, u
 
 bool zswap_store(struct folio *folio)
 {
-	swp_entry_t swp = folio_swap_entry(folio);
+	swp_entry_t swp = folio->swap;
 	int type = swp_type(swp);
 	pgoff_t offset = swp_offset(swp);
 	struct page *page = &folio->page;
@@ -1370,7 +1370,7 @@ shrink:
 
 bool zswap_load(struct folio *folio)
 {
-	swp_entry_t swp = folio_swap_entry(folio);
+	swp_entry_t swp = folio->swap;
 	int type = swp_type(swp);
 	pgoff_t offset = swp_offset(swp);
 	struct page *page = &folio->page;
_

Patches currently in -mm which might be from david@xxxxxxxxxx are





[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux