We no longer need to keep track of how many shadow entries are present in a mapping. This saves a few writes to the inode and memory barriers. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/filemap.c | 12 ------------ mm/truncate.c | 1 - mm/workingset.c | 1 - 3 files changed, 14 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 76383d558b7c..7c3f97bd6dcd 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -139,17 +139,6 @@ static void page_cache_delete(struct address_space *mapping, page->mapping = NULL; /* Leave page->index set: truncation lookup relies upon it */ - - if (shadow) { - mapping->nrexceptional += nr; - /* - * Make sure the nrexceptional update is committed before - * the nrpages update so that final truncate racing - * with reclaim does not see both counters 0 at the - * same time and miss a shadow entry. - */ - smp_wmb(); - } mapping->nrpages -= nr; } @@ -860,7 +849,6 @@ static int __add_to_page_cache_locked(struct page *page, goto unlock; if (xa_is_value(old)) { - mapping->nrexceptional--; if (shadowp) *shadowp = old; } diff --git a/mm/truncate.c b/mm/truncate.c index 7c4c8ac140be..a59184793607 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -40,7 +40,6 @@ static inline void __clear_shadow_entry(struct address_space *mapping, if (xas_load(&xas) != entry) return; xas_store(&xas, NULL); - mapping->nrexceptional--; } static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, diff --git a/mm/workingset.c b/mm/workingset.c index fdeabea54e77..0649bfb1ca33 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -547,7 +547,6 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, goto out_invalid; if (WARN_ON_ONCE(node->count != node->nr_values)) goto out_invalid; - mapping->nrexceptional -= node->nr_values; xas.xa_node = xa_parent_locked(&mapping->i_pages, node); xas.xa_offset = node->offset; xas.xa_shift = node->shift + XA_CHUNK_SHIFT; -- 2.27.0