[PATCH 8/8] mm: Unify all add_to_page_cache variants

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx>

We already have various bits of add_to_page_cache() executed conditionally
on !PageHuge(page); add the add_to_page_cache_lru() pieces as some
more code which isn't executed for huge pages.  This lets us remove
the old add_to_page_cache() and rename __add_to_page_cache_locked() to
add_to_page_cache().  Include a compatibility define so we don't have
to change all 20+ callers of add_to_page_cache_lru().

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
 include/linux/pagemap.h |  5 ++--
 mm/filemap.c            | 65 ++++++++++++-----------------------------
 2 files changed, 21 insertions(+), 49 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 3ce051fb9c73..753e8df6a5b1 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -606,14 +606,15 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
 
 int add_to_page_cache(struct page *page, struct address_space *mapping,
 				pgoff_t index, gfp_t gfp);
-int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
-				pgoff_t index, gfp_t gfp_mask);
 extern void delete_from_page_cache(struct page *page);
 extern void __delete_from_page_cache(struct page *page, void *shadow);
 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
 void delete_from_page_cache_batch(struct address_space *mapping,
 				  struct pagevec *pvec);
 
+#define add_to_page_cache_lru(page, mapping, index, gfp) \
+	add_to_page_cache(page, mapping, index, gfp)
+
 static inline unsigned long dir_pages(struct inode *inode)
 {
 	return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
diff --git a/mm/filemap.c b/mm/filemap.c
index fb87f5fa75e6..83f45f31a00a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -847,19 +847,18 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 }
 EXPORT_SYMBOL_GPL(replace_page_cache_page);
 
-static int __add_to_page_cache_locked(struct page *page,
-				      struct address_space *mapping,
-				      pgoff_t offset, gfp_t gfp_mask,
-				      void **shadowp)
+int add_to_page_cache(struct page *page, struct address_space *mapping,
+		pgoff_t offset, gfp_t gfp_mask)
 {
 	XA_STATE(xas, &mapping->i_pages, offset);
 	int huge = PageHuge(page);
 	struct mem_cgroup *memcg;
 	int error;
-	void *old;
+	void *old, *shadow = NULL;
 
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
 	VM_BUG_ON_PAGE(PageSwapBacked(page), page);
+	__SetPageLocked(page);
 	mapping_set_update(&xas, mapping);
 
 	if (!huge) {
@@ -884,8 +883,7 @@ static int __add_to_page_cache_locked(struct page *page,
 
 		if (xa_is_value(old)) {
 			mapping->nrexceptional--;
-			if (shadowp)
-				*shadowp = old;
+			shadow = old;
 		}
 		mapping->nrpages++;
 
@@ -899,45 +897,8 @@ static int __add_to_page_cache_locked(struct page *page,
 	if (xas_error(&xas))
 		goto error;
 
-	if (!huge)
+	if (!huge) {
 		mem_cgroup_commit_charge(page, memcg, false, false);
-	trace_mm_filemap_add_to_page_cache(page);
-	return 0;
-error:
-	page->mapping = NULL;
-	/* Leave page->index set: truncation relies upon it */
-	if (!huge)
-		mem_cgroup_cancel_charge(page, memcg, false);
-	put_page(page);
-	return xas_error(&xas);
-}
-ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);
-
-int add_to_page_cache(struct page *page, struct address_space *mapping,
-		pgoff_t offset, gfp_t gfp_mask)
-{
-	int err;
-
-	__SetPageLocked(page);
-	err = __add_to_page_cache_locked(page, mapping, offset,
-					  gfp_mask, NULL);
-	if (unlikely(err))
-		__ClearPageLocked(page);
-	return err;
-}
-
-int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
-				pgoff_t offset, gfp_t gfp_mask)
-{
-	void *shadow = NULL;
-	int ret;
-
-	__SetPageLocked(page);
-	ret = __add_to_page_cache_locked(page, mapping, offset,
-					 gfp_mask, &shadow);
-	if (unlikely(ret))
-		__ClearPageLocked(page);
-	else {
 		/*
 		 * The page might have been evicted from cache only
 		 * recently, in which case it should be activated like
@@ -951,9 +912,19 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 			workingset_refault(page, shadow);
 		lru_cache_add(page);
 	}
-	return ret;
+	trace_mm_filemap_add_to_page_cache(page);
+	return 0;
+error:
+	page->mapping = NULL;
+	/* Leave page->index set: truncation relies upon it */
+	if (!huge)
+		mem_cgroup_cancel_charge(page, memcg, false);
+	put_page(page);
+	__ClearPageLocked(page);
+	return xas_error(&xas);
 }
-EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
+ALLOW_ERROR_INJECTION(add_to_page_cache, ERRNO);
+EXPORT_SYMBOL_GPL(add_to_page_cache);
 
 #ifdef CONFIG_NUMA
 struct page *__page_cache_alloc(gfp_t gfp)
-- 
2.24.1




[Index of Archives]     [XFS Filesystem Development (older mail)]     [Linux Filesystem Development]     [Linux Audio Users]     [Yosemite Trails]     [Linux Kernel]     [Linux RAID]     [Linux SCSI]


  Powered by Linux