Re: [PATCH 18/59] mm: Convert do_swap_page()'s swapcache variable to a folio

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, 8 Aug 2022, Matthew Wilcox (Oracle) wrote:

> The 'swapcache' variable is used to track whether the page is from the
> swapcache or not.  It can do this equally well by being the folio of
> the page rather than the page itself, and this saves a number of calls
> to compound_head().
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
> ---
>  mm/memory.c | 32 ++++++++++++++++----------------
>  1 file changed, 16 insertions(+), 16 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index f172b148e29b..471102f0cbf2 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3718,8 +3718,8 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
>  vm_fault_t do_swap_page(struct vm_fault *vmf)
>  {
>  	struct vm_area_struct *vma = vmf->vma;
> -	struct folio *folio;
> -	struct page *page = NULL, *swapcache;
> +	struct folio *swapcache, *folio = NULL;
> +	struct page *page;
>  	struct swap_info_struct *si = NULL;
>  	rmap_t rmap_flags = RMAP_NONE;
>  	bool exclusive = false;
> @@ -3762,11 +3762,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  		goto out;
>  
>  	page = lookup_swap_cache(entry, vma, vmf->address);
> -	swapcache = page;
>  	if (page)
>  		folio = page_folio(page);
> +	swapcache = folio;
>  
> -	if (!page) {
> +	if (!folio) {
>  		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
>  		    __swap_count(entry) == 1) {
>  			/* skip swapcache */
> @@ -3799,12 +3799,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  		} else {
>  			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
>  						vmf);
> -			swapcache = page;
>  			if (page)
>  				folio = page_folio(page);
> +			swapcache = folio;
>  		}
>  
> -		if (!page) {
> +		if (!folio) {
>  			/*
>  			 * Back out if somebody else faulted in this pte
>  			 * while we released the pte lock.
> @@ -3856,10 +3856,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  		page = ksm_might_need_to_copy(page, vma, vmf->address);
>  		if (unlikely(!page)) {
>  			ret = VM_FAULT_OOM;
> -			page = swapcache;
>  			goto out_page;
>  		}
>  		folio = page_folio(page);
> +		swapcache = folio;

I couldn't get further than one iteration into my swapping loads:
processes hung waiting for the folio lock.

Delete that "swapcache = folio;" line: here is (one place) where
swapcache and folio may diverge, and shall need to be unlocked
and put separately.  All working okay since I deleted that.

Hugh

>  
>  		/*
>  		 * If we want to map a page that's in the swapcache writable, we
> @@ -3867,7 +3867,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  		 * owner. Try removing the extra reference from the local LRU
>  		 * pagevecs if required.
>  		 */
> -		if ((vmf->flags & FAULT_FLAG_WRITE) && page == swapcache &&
> +		if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
>  		    !folio_test_ksm(folio) && !folio_test_lru(folio))
>  			lru_add_drain();
>  	}
> @@ -3908,7 +3908,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  		 * without __HAVE_ARCH_PTE_SWP_EXCLUSIVE.
>  		 */
>  		exclusive = pte_swp_exclusive(vmf->orig_pte);
> -		if (page != swapcache) {
> +		if (folio != swapcache) {
>  			/*
>  			 * We have a fresh page that is not exposed to the
>  			 * swapcache -> certainly exclusive.
> @@ -3976,7 +3976,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  	vmf->orig_pte = pte;
>  
>  	/* ksm created a completely new copy */
> -	if (unlikely(page != swapcache && swapcache)) {
> +	if (unlikely(folio != swapcache && swapcache)) {
>  		page_add_new_anon_rmap(page, vma, vmf->address);
>  		folio_add_lru_vma(folio, vma);
>  	} else {
> @@ -3989,7 +3989,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
>  
>  	folio_unlock(folio);
> -	if (page != swapcache && swapcache) {
> +	if (folio != swapcache && swapcache) {
>  		/*
>  		 * Hold the lock to avoid the swap entry to be reused
>  		 * until we take the PT lock for the pte_same() check
> @@ -3998,8 +3998,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  		 * so that the swap count won't change under a
>  		 * parallel locked swapcache.
>  		 */
> -		unlock_page(swapcache);
> -		put_page(swapcache);
> +		folio_unlock(swapcache);
> +		folio_put(swapcache);
>  	}
>  
>  	if (vmf->flags & FAULT_FLAG_WRITE) {
> @@ -4023,9 +4023,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  	folio_unlock(folio);
>  out_release:
>  	folio_put(folio);
> -	if (page != swapcache && swapcache) {
> -		unlock_page(swapcache);
> -		put_page(swapcache);
> +	if (folio != swapcache && swapcache) {
> +		folio_unlock(swapcache);
> +		folio_put(swapcache);
>  	}
>  	if (si)
>  		put_swap_device(si);
> -- 
> 2.35.1




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux