The current version of KSM does not take into account the number of used ksm_mm_slot. Therefore, when users want to obtain profits of KSM, KSM omits the memory used for allocating ksm_mm_slots. This patch introduces a new variable to keep track of the number of allocated ksm_mm_slots. By doing so, KSM will be able to provide a more accurate number of the gains made. Signed-off-by: Sung-hun Kim <sfoon.kim@xxxxxxxxxxx> --- Changelog in V2: - Add an MMF_VM_MERGEABLE flag check in ksm_process_profit for untracked processes --- mm/ksm.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 34c4820e0d3d..c8ced991ccda 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -267,6 +267,9 @@ static unsigned long ksm_pages_unshared; /* The number of rmap_items in use: to calculate pages_volatile */ static unsigned long ksm_rmap_items; +/* The number of ksm_mm_slot in use */ +static atomic_long_t ksm_mm_slots = ATOMIC_LONG_INIT(0); + /* The number of stable_node chains */ static unsigned long ksm_stable_node_chains; @@ -1245,6 +1248,7 @@ static int unmerge_and_remove_all_rmap_items(void) spin_unlock(&ksm_mmlist_lock); mm_slot_free(mm_slot_cache, mm_slot); + atomic_long_dec(&ksm_mm_slots); clear_bit(MMF_VM_MERGEABLE, &mm->flags); clear_bit(MMF_VM_MERGE_ANY, &mm->flags); mmdrop(mm); @@ -2717,6 +2721,7 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) spin_unlock(&ksm_mmlist_lock); mm_slot_free(mm_slot_cache, mm_slot); + atomic_long_dec(&ksm_mm_slots); clear_bit(MMF_VM_MERGEABLE, &mm->flags); clear_bit(MMF_VM_MERGE_ANY, &mm->flags); mmap_read_unlock(mm); @@ -3000,6 +3005,7 @@ int __ksm_enter(struct mm_struct *mm) list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node); spin_unlock(&ksm_mmlist_lock); + atomic_long_inc(&ksm_mm_slots); set_bit(MMF_VM_MERGEABLE, &mm->flags); mmgrab(mm); @@ -3042,6 +3048,7 @@ void __ksm_exit(struct mm_struct *mm) if (easy_to_free) { mm_slot_free(mm_slot_cache, mm_slot); + atomic_long_dec(&ksm_mm_slots); clear_bit(MMF_VM_MERGE_ANY, &mm->flags); clear_bit(MMF_VM_MERGEABLE, &mm->flags); mmdrop(mm); @@ -3374,7 +3381,8 @@ static void wait_while_offlining(void) long ksm_process_profit(struct mm_struct *mm) { return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE - - mm->ksm_rmap_items * sizeof(struct ksm_rmap_item); + mm->ksm_rmap_items * sizeof(struct ksm_rmap_item) - + (test_bit(MMF_VM_MERGEABLE, &mm->flags) ? sizeof(struct ksm_mm_slot) : 0); } #endif /* CONFIG_PROC_FS */ @@ -3672,7 +3680,8 @@ static ssize_t general_profit_show(struct kobject *kobj, long general_profit; general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE - - ksm_rmap_items * sizeof(struct ksm_rmap_item); + ksm_rmap_items * sizeof(struct ksm_rmap_item) - + atomic_long_read(&ksm_mm_slots) * sizeof(struct ksm_mm_slot); return sysfs_emit(buf, "%ld\n", general_profit); } -- 2.25.1