No longer has any users, so remove it. Signed-off-by: Kent Overstreet <kent.overstreet@xxxxxxxxx> --- include/linux/pagemap.h | 20 ++----------- mm/filemap.c | 64 ++++++++++++++++++++--------------------- 2 files changed, 33 insertions(+), 51 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 434c9c34ae..aceaebfaab 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -689,8 +689,8 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) return 0; } -int add_to_page_cache_locked(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp_mask); +int add_to_page_cache(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); extern void delete_from_page_cache(struct page *page); @@ -710,22 +710,6 @@ void page_cache_readahead_unbounded(struct address_space *, struct file *, pgoff_t index, unsigned long nr_to_read, unsigned long lookahead_count); -/* - * Like add_to_page_cache_locked, but used to add newly allocated pages: - * the page is new, so we can just run __SetPageLocked() against it. - */ -static inline int add_to_page_cache(struct page *page, - struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) -{ - int error; - - __SetPageLocked(page); - error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); - if (unlikely(error)) - __ClearPageLocked(page); - return error; -} - /** * struct readahead_control - Describes a readahead request. * diff --git a/mm/filemap.c b/mm/filemap.c index bb71334fdd..b92ca48b90 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -827,20 +827,20 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) } EXPORT_SYMBOL_GPL(replace_page_cache_page); -static int __add_to_page_cache_locked(struct page *page, - struct address_space *mapping, - pgoff_t offset, gfp_t gfp_mask, - void **shadowp) +static int __add_to_page_cache(struct page *page, + struct address_space *mapping, + pgoff_t offset, gfp_t gfp_mask, + void **shadowp) { XA_STATE(xas, &mapping->i_pages, offset); int huge = PageHuge(page); int error; void *old; - VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageSwapBacked(page), page); mapping_set_update(&xas, mapping); + __SetPageLocked(page); get_page(page); page->mapping = mapping; page->index = offset; @@ -885,29 +885,31 @@ static int __add_to_page_cache_locked(struct page *page, page->mapping = NULL; /* Leave page->index set: truncation relies upon it */ put_page(page); + __ClearPageLocked(page); return error; } -ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO); +ALLOW_ERROR_INJECTION(__add_to_page_cache, ERRNO); /** - * add_to_page_cache_locked - add a locked page to the pagecache + * add_to_page_cache - add a newly allocated page to the pagecache * @page: page to add * @mapping: the page's address_space * @offset: page index * @gfp_mask: page allocation mode * - * This function is used to add a page to the pagecache. It must be locked. - * This function does not add the page to the LRU. The caller must do that. + * This function is used to add a page to the pagecache. It must be newly + * allocated. This function does not add the page to the LRU. The caller must + * do that. * * Return: %0 on success, negative error code otherwise. */ -int add_to_page_cache_locked(struct page *page, struct address_space *mapping, - pgoff_t offset, gfp_t gfp_mask) +int add_to_page_cache(struct page *page, struct address_space *mapping, + pgoff_t offset, gfp_t gfp_mask) { - return __add_to_page_cache_locked(page, mapping, offset, - gfp_mask, NULL); + return __add_to_page_cache(page, mapping, offset, gfp_mask, NULL); } -EXPORT_SYMBOL(add_to_page_cache_locked); +EXPORT_SYMBOL(add_to_page_cache); +ALLOW_ERROR_INJECTION(add_to_page_cache, ERRNO); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) @@ -915,26 +917,22 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, void *shadow = NULL; int ret; - __SetPageLocked(page); - ret = __add_to_page_cache_locked(page, mapping, offset, - gfp_mask, &shadow); + ret = __add_to_page_cache(page, mapping, offset, gfp_mask, &shadow); if (unlikely(ret)) - __ClearPageLocked(page); - else { - /* - * The page might have been evicted from cache only - * recently, in which case it should be activated like - * any other repeatedly accessed page. - * The exception is pages getting rewritten; evicting other - * data from the working set, only to cache data that will - * get overwritten with something else, is a waste of memory. - */ - WARN_ON_ONCE(PageActive(page)); - if (!(gfp_mask & __GFP_WRITE) && shadow) - workingset_refault(page, shadow); - lru_cache_add(page); - } - return ret; + return ret; + + /* + * The page might have been evicted from cache only recently, in which + * case it should be activated like any other repeatedly accessed page. + * The exception is pages getting rewritten; evicting other data from + * the working set, only to cache data that will get overwritten with + * something else, is a waste of memory. + */ + WARN_ON_ONCE(PageActive(page)); + if (!(gfp_mask & __GFP_WRITE) && shadow) + workingset_refault(page, shadow); + lru_cache_add(page); + return 0; } EXPORT_SYMBOL_GPL(add_to_page_cache_lru); -- 2.28.0