From: Kairui Song <kasong@xxxxxxxxxxx> No feature change, simply move the routine to a standalone function to be re-used later. The error path handling is copied from the "out_page" label, to make the code change minimized for easier reviewing. Signed-off-by: Kairui Song <kasong@xxxxxxxxxxx> --- mm/memory.c | 32 ++++---------------------------- mm/swap.h | 8 ++++++++ mm/swap_state.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 59 insertions(+), 28 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 7e1f4849463a..81dc9d467f4e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3803,7 +3803,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) swp_entry_t entry; pte_t pte; vm_fault_t ret = 0; - void *shadow = NULL; if (!pte_unmap_same(vmf)) goto out; @@ -3867,33 +3866,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) if (!folio) { if (data_race(si->flags & SWP_SYNCHRONOUS_IO) && __swap_count(entry) == 1) { - /* skip swapcache */ - folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, - vma, vmf->address, false); - page = &folio->page; - if (folio) { - __folio_set_locked(folio); - __folio_set_swapbacked(folio); - - if (mem_cgroup_swapin_charge_folio(folio, - vma->vm_mm, GFP_KERNEL, - entry)) { - ret = VM_FAULT_OOM; - goto out_page; - } - mem_cgroup_swapin_uncharge_swap(entry); - - shadow = get_shadow_from_swap_cache(entry); - if (shadow) - workingset_refault(folio, shadow); - - folio_add_lru(folio); - - /* To provide entry to swap_read_folio() */ - folio->swap = entry; - swap_read_folio(folio, true, NULL); - folio->private = NULL; - } + /* skip swapcache and readahead */ + folio = swapin_direct(entry, GFP_HIGHUSER_MOVABLE, vmf); + if (folio) + page = &folio->page; } else { page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vmf); diff --git a/mm/swap.h b/mm/swap.h index 758c46ca671e..83eab7b67e77 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -56,6 +56,8 @@ struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, struct mempolicy *mpol, pgoff_t ilx); struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, struct vm_fault *vmf); +struct folio *swapin_direct(swp_entry_t entry, gfp_t flag, + struct vm_fault *vmf); static inline unsigned int folio_swap_flags(struct folio *folio) { @@ -86,6 +88,12 @@ static inline struct folio *swap_cluster_readahead(swp_entry_t entry, return NULL; } +struct folio *swapin_direct(swp_entry_t entry, gfp_t flag, + struct vm_fault *vmf) +{ + return NULL; +} + static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, struct vm_fault *vmf) { diff --git a/mm/swap_state.c b/mm/swap_state.c index e671266ad772..645f5bcad123 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -861,6 +861,53 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, return folio; } +/** + * swapin_direct - swap in a folio skipping swap cache and readahead + * @entry: swap entry of this memory + * @gfp_mask: memory allocation flags + * @vmf: fault information + * + * Returns the struct folio for entry and addr after the swap entry is read + * in. + */ +struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask, + struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct folio *folio; + void *shadow = NULL; + + /* skip swapcache */ + folio = vma_alloc_folio(gfp_mask, 0, + vma, vmf->address, false); + if (folio) { + __folio_set_locked(folio); + __folio_set_swapbacked(folio); + + if (mem_cgroup_swapin_charge_folio(folio, + vma->vm_mm, GFP_KERNEL, + entry)) { + folio_unlock(folio); + folio_put(folio); + return NULL; + } + mem_cgroup_swapin_uncharge_swap(entry); + + shadow = get_shadow_from_swap_cache(entry); + if (shadow) + workingset_refault(folio, shadow); + + folio_add_lru(folio); + + /* To provide entry to swap_read_folio() */ + folio->swap = entry; + swap_read_folio(folio, true, NULL); + folio->private = NULL; + } + + return folio; +} + /** * swapin_readahead - swap in pages in hope we need them soon * @entry: swap entry of this memory -- 2.43.0