The page_address_in_vma() is used to find the user virtual address of page in add_to_kill(), but it doesn't support ksm due to the ksm page->index unusable, add an addr as parameter to add_to_kill(), let's the caller to pass it, also rename the function to __add_to_kill(), and adding add_to_kill_anon_file() for handling anonymous pages and file pages, adding add_to_kill_fsdax() for handling fsdax pages. Signed-off-by: Longlong Xia <xialonglong1@xxxxxxxxxx> --- mm/memory-failure.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index a1ede7bdce95e..9ca058f659121 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -405,9 +405,9 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, * page->mapping are sufficient for mapping the page back to its * corresponding user virtual address. */ -static void add_to_kill(struct task_struct *tsk, struct page *p, - pgoff_t fsdax_pgoff, struct vm_area_struct *vma, - struct list_head *to_kill) +static void __add_to_kill(struct task_struct *tsk, struct page *p, + struct vm_area_struct *vma, struct list_head *to_kill, + unsigned long addr, pgoff_t fsdax_pgoff) { struct to_kill *tk; @@ -417,7 +417,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, return; } - tk->addr = page_address_in_vma(p, vma); + tk->addr = addr ? addr : page_address_in_vma(p, vma); if (is_zone_device_page(p)) { if (fsdax_pgoff != FSDAX_INVALID_PGOFF) tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma); @@ -448,6 +448,13 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, list_add_tail(&tk->nd, to_kill); } +static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p, + struct vm_area_struct *vma, + struct list_head *to_kill) +{ + __add_to_kill(tsk, p, vma, to_kill, 0, FSDAX_INVALID_PGOFF); +} + /* * Kill the processes that have been collected earlier. * @@ -573,7 +580,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, continue; if (!page_mapped_in_vma(page, vma)) continue; - add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma, to_kill); + add_to_kill_anon_file(t, page, vma, to_kill); } } read_unlock(&tasklist_lock); @@ -609,8 +616,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, * to be informed of all such data corruptions. */ if (vma->vm_mm == t->mm) - add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma, - to_kill); + add_to_kill_anon_file(t, page, vma, to_kill); } } read_unlock(&tasklist_lock); @@ -618,6 +624,13 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, } #ifdef CONFIG_FS_DAX +static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p, + struct vm_area_struct *vma, + struct list_head *to_kill, pgoff_t pgoff) +{ + __add_to_kill(tsk, p, vma, to_kill, 0, pgoff); +} + /* * Collect processes when the error hit a fsdax page. */ @@ -637,7 +650,7 @@ static void collect_procs_fsdax(struct page *page, continue; vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { if (vma->vm_mm == t->mm) - add_to_kill(t, page, pgoff, vma, to_kill); + add_to_kill_fsdax(t, page, vma, to_kill, pgoff); } } read_unlock(&tasklist_lock); -- 2.25.1