From: Kairui Song <kasong@xxxxxxxxxxx> __filemap_add_folio only has two callers, one never passes hugetlb folio and one always passes in hugetlb folio. So move the hugetlb related cgroup charging out of it to make the code cleaner. Signed-off-by: Kairui Song <kasong@xxxxxxxxxxx> Acked-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/filemap.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index d447b36ffd41..7f734b7ad273 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -853,20 +853,12 @@ noinline int __filemap_add_folio(struct address_space *mapping, { XA_STATE(xas, &mapping->i_pages, index); bool huge = folio_test_hugetlb(folio); - bool charged = false; - long nr = 1; + long nr; VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio); mapping_set_update(&xas, mapping); - if (!huge) { - int error = mem_cgroup_charge(folio, NULL, gfp); - if (error) - return error; - charged = true; - } - VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); xas_set_order(&xas, index, folio_order(folio)); nr = folio_nr_pages(folio); @@ -931,8 +923,6 @@ noinline int __filemap_add_folio(struct address_space *mapping, trace_mm_filemap_add_to_page_cache(folio); return 0; error: - if (charged) - mem_cgroup_uncharge(folio); folio->mapping = NULL; /* Leave page->index set: truncation relies upon it */ folio_put_refs(folio, nr); @@ -946,11 +936,16 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio, void *shadow = NULL; int ret; + ret = mem_cgroup_charge(folio, NULL, gfp); + if (ret) + return ret; + __folio_set_locked(folio); ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); - if (unlikely(ret)) + if (unlikely(ret)) { + mem_cgroup_uncharge(folio); __folio_clear_locked(folio); - else { + } else { /* * The folio might have been evicted from cache only * recently, in which case it should be activated like -- 2.44.0