Re: [RFC PATCH 11/14] mm: Free folios in a batch in shrink_folio_list()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Sep 04, 2023 at 04:43:22AM +0100, Matthew Wilcox wrote:
> On Fri, Aug 25, 2023 at 02:59:15PM +0100, Matthew Wilcox (Oracle) wrote:
> > Use free_unref_page_batch() to free the folios.  This may increase
> > the numer of IPIs from calling try_to_unmap_flush() more often,
> > but that's going to be very workload-dependent.
> 
> I'd like to propose this as a replacement for this patch.  Queue the
> mapped folios up so we can flush them all in one go.  Free the unmapped
> ones, and the mapped ones after the flush.

Any reaction to this patch?  I'm putting together a v2 for posting after
the merge window, and I got no feedback on whether the former version
or this one is better.

> It does change the ordering of mem_cgroup_uncharge_folios() and
> the page flush.  I think that's OK.  This is only build-tested;
> something has messed up my laptop and I can no longer launch VMs.
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 6f13394b112e..526d5bb84622 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1706,14 +1706,16 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
>  		struct pglist_data *pgdat, struct scan_control *sc,
>  		struct reclaim_stat *stat, bool ignore_references)
>  {
> +	struct folio_batch free_folios;
>  	LIST_HEAD(ret_folios);
> -	LIST_HEAD(free_folios);
> +	LIST_HEAD(mapped_folios);
>  	LIST_HEAD(demote_folios);
>  	unsigned int nr_reclaimed = 0;
>  	unsigned int pgactivate = 0;
>  	bool do_demote_pass;
>  	struct swap_iocb *plug = NULL;
>  
> +	folio_batch_init(&free_folios);
>  	memset(stat, 0, sizeof(*stat));
>  	cond_resched();
>  	do_demote_pass = can_demote(pgdat->node_id, sc);
> @@ -1723,7 +1725,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
>  		struct address_space *mapping;
>  		struct folio *folio;
>  		enum folio_references references = FOLIOREF_RECLAIM;
> -		bool dirty, writeback;
> +		bool dirty, writeback, mapped = false;
>  		unsigned int nr_pages;
>  
>  		cond_resched();
> @@ -1957,6 +1959,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
>  					stat->nr_lazyfree_fail += nr_pages;
>  				goto activate_locked;
>  			}
> +			mapped = true;
>  		}
>  
>  		/*
> @@ -2111,14 +2114,12 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
>  		 */
>  		nr_reclaimed += nr_pages;
>  
> -		/*
> -		 * Is there need to periodically free_folio_list? It would
> -		 * appear not as the counts should be low
> -		 */
> -		if (unlikely(folio_test_large(folio)))
> -			destroy_large_folio(folio);
> -		else
> -			list_add(&folio->lru, &free_folios);
> +		if (mapped) {
> +			list_add(&folio->lru, &mapped_folios);
> +		} else if (folio_batch_add(&free_folios, folio) == 0) {
> +			mem_cgroup_uncharge_folios(&free_folios);
> +			free_unref_folios(&free_folios);
> +		}
>  		continue;
>  
>  activate_locked_split:
> @@ -2182,9 +2183,22 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
>  
>  	pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
>  
> -	mem_cgroup_uncharge_list(&free_folios);
>  	try_to_unmap_flush();
> -	free_unref_page_list(&free_folios);
> +	while (!list_empty(&mapped_folios)) {
> +		struct folio *folio = list_first_entry(&mapped_folios,
> +					struct folio, lru);
> +
> +		list_del(&folio->lru);
> +		if (folio_batch_add(&free_folios, folio) > 0)
> +			continue;
> +		mem_cgroup_uncharge_folios(&free_folios);
> +		free_unref_folios(&free_folios);
> +	}
> +
> +	if (free_folios.nr) {
> +		mem_cgroup_uncharge_folios(&free_folios);
> +		free_unref_folios(&free_folios);
> +	}
>  
>  	list_splice(&ret_folios, folio_list);
>  	count_vm_events(PGACTIVATE, pgactivate);
> 




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux