Re: [PATCH 06/10] mm/truncate: Split invalidate_inode_page() into mapping_shrink_folio()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 2022/2/15 4:00, Matthew Wilcox (Oracle) wrote:
> Some of the callers already have the address_space and can avoid calling
> folio_mapping() and checking if the folio was already truncated.  Also
> add kernel-doc and fix the return type (in case we ever support folios
> larger than 4TB).
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>

LGTM. Thanks.

Reviewed-by: Miaohe Lin <linmiaohe@xxxxxxxxxx>
> ---
>  include/linux/mm.h  |  1 -
>  mm/internal.h       |  1 +
>  mm/memory-failure.c |  4 ++--
>  mm/truncate.c       | 34 +++++++++++++++++++++++-----------
>  4 files changed, 26 insertions(+), 14 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 4637368d9455..53b301dc5c14 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1853,7 +1853,6 @@ extern void truncate_setsize(struct inode *inode, loff_t newsize);
>  void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
>  void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
>  int generic_error_remove_page(struct address_space *mapping, struct page *page);
> -int invalidate_inode_page(struct page *page);
>  
>  #ifdef CONFIG_MMU
>  extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
> diff --git a/mm/internal.h b/mm/internal.h
> index b7a2195c12b1..927a17d58b85 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -100,6 +100,7 @@ void filemap_free_folio(struct address_space *mapping, struct folio *folio);
>  int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
>  bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
>  		loff_t end);
> +long invalidate_inode_page(struct page *page);
>  
>  /**
>   * folio_evictable - Test whether a folio is evictable.
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index 97a9ed8f87a9..0b72a936b8dd 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -2139,7 +2139,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
>   */
>  static int __soft_offline_page(struct page *page)
>  {
> -	int ret = 0;
> +	long ret = 0;
>  	unsigned long pfn = page_to_pfn(page);
>  	struct page *hpage = compound_head(page);
>  	char const *msg_page[] = {"page", "hugepage"};
> @@ -2196,7 +2196,7 @@ static int __soft_offline_page(struct page *page)
>  			if (!list_empty(&pagelist))
>  				putback_movable_pages(&pagelist);
>  
> -			pr_info("soft offline: %#lx: %s migration failed %d, type %pGp\n",
> +			pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n",
>  				pfn, msg_page[huge], ret, &page->flags);
>  			if (ret > 0)
>  				ret = -EBUSY;
> diff --git a/mm/truncate.c b/mm/truncate.c
> index 8aa86e294775..b1bdc61198f6 100644
> --- a/mm/truncate.c
> +++ b/mm/truncate.c
> @@ -273,18 +273,9 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page)
>  }
>  EXPORT_SYMBOL(generic_error_remove_page);
>  
> -/*
> - * Safely invalidate one page from its pagecache mapping.
> - * It only drops clean, unused pages. The page must be locked.
> - *
> - * Returns 1 if the page is successfully invalidated, otherwise 0.
> - */
> -int invalidate_inode_page(struct page *page)
> +static long mapping_shrink_folio(struct address_space *mapping,
> +		struct folio *folio)
>  {
> -	struct folio *folio = page_folio(page);
> -	struct address_space *mapping = folio_mapping(folio);
> -	if (!mapping)
> -		return 0;
>  	if (folio_test_dirty(folio) || folio_test_writeback(folio))
>  		return 0;
>  	if (folio_ref_count(folio) > folio_nr_pages(folio) + 1)
> @@ -295,6 +286,27 @@ int invalidate_inode_page(struct page *page)
>  	return remove_mapping(mapping, folio);
>  }
>  
> +/**
> + * invalidate_inode_page() - Remove an unused page from the pagecache.
> + * @page: The page to remove.
> + *
> + * Safely invalidate one page from its pagecache mapping.
> + * It only drops clean, unused pages.
> + *
> + * Context: Page must be locked.
> + * Return: The number of pages successfully removed.
> + */
> +long invalidate_inode_page(struct page *page)
> +{
> +	struct folio *folio = page_folio(page);
> +	struct address_space *mapping = folio_mapping(folio);
> +
> +	/* The page may have been truncated before it was locked */
> +	if (!mapping)
> +		return 0;
> +	return mapping_shrink_folio(mapping, folio);
> +}
> +
>  /**
>   * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
>   * @mapping: mapping to truncate
> 




[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [NTFS 3]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [NTFS 3]     [Samba]     [Device Mapper]     [CEPH Development]

  Powered by Linux