From: xu xin <xu.xin16@xxxxxxxxxx> A new function try_to_get_old_rmap_item is abstracted from get_next_rmap_item. This function will be reused by the subsequent patches about counting ksm_zero_pages. The patch improves the readability and reusability of KSM code. Signed-off-by: xu xin <xu.xin16@xxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: Claudio Imbrenda <imbrenda@xxxxxxxxxxxxx> Cc: Xuexin Jiang <jiang.xuexin@xxxxxxxxxx> Reviewed-by: Xiaokai Ran <ran.xiaokai@xxxxxxxxxx> Reviewed-by: Yang Yang <yang.yang29@xxxxxxxxxx> v5->v6: Modify some comments according to David's suggestions. --- mm/ksm.c | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 83e2f74ae7da..905a79d213da 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2214,23 +2214,38 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite } } -static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, - struct ksm_rmap_item **rmap_list, - unsigned long addr) +static struct ksm_rmap_item *try_to_get_old_rmap_item(unsigned long addr, + struct ksm_rmap_item **rmap_list) { - struct ksm_rmap_item *rmap_item; - while (*rmap_list) { - rmap_item = *rmap_list; + struct ksm_rmap_item *rmap_item = *rmap_list; + if ((rmap_item->address & PAGE_MASK) == addr) return rmap_item; if (rmap_item->address > addr) break; *rmap_list = rmap_item->rmap_list; + /* + * If we end up here, the VMA is MADV_UNMERGEABLE or its page + * is ineligible or discarded, e.g. MADV_DONTNEED. + */ remove_rmap_item_from_tree(rmap_item); free_rmap_item(rmap_item); } + return NULL; +} + +static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, + struct ksm_rmap_item **rmap_list, + unsigned long addr) +{ + struct ksm_rmap_item *rmap_item; + + rmap_item = try_to_get_old_rmap_item(addr, rmap_list); + if (rmap_item) + return rmap_item; + rmap_item = alloc_rmap_item(); if (rmap_item) { /* It has already been zeroed */ -- 2.15.2