KSM can save memory by merging identical pages, but also can consume additional memory, because it needs to generate rmap_items to save each scanned page's brief rmap information. Some of these pages may be merged, but some may not be abled to be merged after being checked several times, which are unprofitable memory consumed. The information about whether KSM save memory or consume memory in system-wide range can be determined by the comprehensive calculation of pages_sharing, pages_shared, pages_unshared and pages_volatile. A simple approximate calculation: profit ≈ pages_sharing * sizeof(page) - (all_rmap_items) * sizeof(rmap_item); where all_rmap_items equals to the sum of pages_sharing, pages_shared, pages_unshared and pages_volatile. But we cannot calculate this kind of ksm profit inner single-process wide because the information of ksm rmap_item's number of a process is lacked. For user applications, if this kind of information could be obtained, it helps upper users know how beneficial the ksm-policy (like madvise) they are using brings, and then optimize their app code. For example, one application madvise 1000 pages as MERGEABLE, while only a few pages are really merged, then it's not cost-efficient. So we add a new interface /proc/<pid>/ksm_alloced_items for each process to indicate the total allocated ksm rmap_items of this process. Signed-off-by: xu xin <xu.xin16@xxxxxxxxxx> Reviewed-by: Xiaokai Ran <ran.xiaokai@xxxxxxxxxx> Reviewed-by: Yang Yang <yang.yang29@xxxxxxxxxx> Signed-off-by: CGEL ZTE <cgel.zte@xxxxxxxxx> --- fs/proc/base.c | 15 +++++++++++++++ include/linux/mm_types.h | 5 +++++ mm/ksm.c | 2 ++ 3 files changed, 22 insertions(+) diff --git a/fs/proc/base.c b/fs/proc/base.c index 93f7e3d971e4..b6317981492a 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -3196,6 +3196,19 @@ static int proc_pid_ksm_merging_pages(struct seq_file *m, struct pid_namespace * return 0; } +static int proc_pid_ksm_rmp_items(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) +{ + struct mm_struct *mm; + + mm = get_task_mm(task); + if (mm) { + seq_printf(m, "%lu\n", mm->ksm_rmp_items); + mmput(mm); + } + + return 0; +} #endif /* CONFIG_KSM */ #ifdef CONFIG_STACKLEAK_METRICS @@ -3331,6 +3344,7 @@ static const struct pid_entry tgid_base_stuff[] = { #endif #ifdef CONFIG_KSM ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages), + ONE("ksm_rmp_items", S_IRUSR, proc_pid_ksm_rmp_items), #endif }; @@ -3668,6 +3682,7 @@ static const struct pid_entry tid_base_stuff[] = { #endif #ifdef CONFIG_KSM ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages), + ONE("ksm_rmp_items", S_IRUSR, proc_pid_ksm_rmp_items), #endif }; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index cf97f3884fda..0b9e76275ea7 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -671,6 +671,11 @@ struct mm_struct { * merging. */ unsigned long ksm_merging_pages; + /* + * Represent how many pages are checked for ksm merging + * including merged and not merged. + */ + unsigned long ksm_rmp_items; #endif } __randomize_layout; diff --git a/mm/ksm.c b/mm/ksm.c index 478bcf26bfcd..fc9879d7049f 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -421,6 +421,7 @@ static inline struct rmap_item *alloc_rmap_item(void) static inline void free_rmap_item(struct rmap_item *rmap_item) { ksm_rmap_items--; + rmap_item->mm->ksm_rmp_items--; rmap_item->mm = NULL; /* debug safety */ kmem_cache_free(rmap_item_cache, rmap_item); } @@ -2265,6 +2266,7 @@ static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, if (rmap_item) { /* It has already been zeroed */ rmap_item->mm = mm_slot->mm; + rmap_item->mm->ksm_rmp_items++; rmap_item->address = addr; rmap_item->rmap_list = *rmap_list; *rmap_list = rmap_item; -- 2.25.1