---
Documentation/filesystems/proc.txt | 4 ++--
include/linux/huge_mm.h | 7 +++++--
include/linux/mm_types.h | 3 ++-
mm/huge_memory.c | 13 ++++++++++++-
mm/rmap.c | 4 ++--
5 files changed, 23 insertions(+), 8 deletions(-)
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 99ca040..93fc183 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -968,8 +968,8 @@ ShmemHugePages: Memory used by shared memory (shmem) and tmpfs allocated
with huge pages
ShmemPmdMapped: Shared memory mapped into userspace with huge pages
KReclaimable: Kernel allocations that the kernel will attempt to reclaim
- under memory pressure. Includes SReclaimable (below), and other
- direct allocations with a shrinker.
+ under memory pressure. Includes SReclaimable (below), deferred
+ split THPs, and other direct allocations with a shrinker.
Slab: in-kernel data structures cache
SReclaimable: Part of Slab, that might be reclaimed, such as caches
SUnreclaim: Part of Slab, that cannot be reclaimed on memory pressure
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 61c9ffd..c194630 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -162,7 +162,7 @@ static inline int split_huge_page(struct page *page)
{
return split_huge_page_to_list(page, NULL);
}
-void deferred_split_huge_page(struct page *page);
+void deferred_split_huge_page(struct page *page, unsigned int nr);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct page *page);
@@ -324,7 +324,10 @@ static inline int split_huge_page(struct page *page)
{
return 0;
}
-static inline void deferred_split_huge_page(struct page *page) {}
+static inline void deferred_split_huge_page(struct page *page, unsigned int nr)
+{
+}
+
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 156640c..17e0fc5 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -138,7 +138,8 @@ struct page {
};
struct { /* Second tail page of compound page */
unsigned long _compound_pad_1; /* compound_head */
- unsigned long _compound_pad_2;
+ /* Freeable normal pages for deferred split shrinker */
+ unsigned long nr_freeable;
/* For both global and memcg */
struct list_head deferred_list;
};
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c9a596e..e04ac4d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -524,6 +524,7 @@ void prep_transhuge_page(struct page *page)
INIT_LIST_HEAD(page_deferred_list(page));
set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
+ page[2].nr_freeable = 0;
}
static unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
@@ -2766,6 +2767,10 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
if (!list_empty(page_deferred_list(head))) {
ds_queue->split_queue_len--;
list_del(page_deferred_list(head));
+ __mod_node_page_state(page_pgdat(page),
+ NR_KERNEL_MISC_RECLAIMABLE,
+ -head[2].nr_freeable);
+ head[2].nr_freeable = 0;
}
if (mapping)
__dec_node_page_state(page, NR_SHMEM_THPS);
@@ -2816,11 +2821,14 @@ void free_transhuge_page(struct page *page)
ds_queue->split_queue_len--;
list_del(page_deferred_list(page));
}
+ __mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
+ -page[2].nr_freeable);
+ page[2].nr_freeable = 0;
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
free_compound_page(page);
}
-void deferred_split_huge_page(struct page *page)
+void deferred_split_huge_page(struct page *page, unsigned int nr)
{
struct deferred_split *ds_queue = get_deferred_split_queue(page);
#ifdef CONFIG_MEMCG
@@ -2844,6 +2852,9 @@ void deferred_split_huge_page(struct page *page)
return;
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+ page[2].nr_freeable += nr;
+ __mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
+ nr);
if (list_empty(page_deferred_list(page))) {
count_vm_event(THP_DEFERRED_SPLIT_PAGE);
list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
diff --git a/mm/rmap.c b/mm/rmap.c
index e5dfe2a..6008fab 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1286,7 +1286,7 @@ static void page_remove_anon_compound_rmap(struct page *page)
if (nr) {
__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
- deferred_split_huge_page(page);
+ deferred_split_huge_page(page, nr);
}
}
@@ -1320,7 +1320,7 @@ void page_remove_rmap(struct page *page, bool compound)
clear_page_mlock(page);
if (PageTransCompound(page))
- deferred_split_huge_page(compound_head(page));
+ deferred_split_huge_page(compound_head(page), 1);
/*
* It would be tidy to reset the PageAnon mapping here,
--
1.8.3.1