---
mm/ksm.c | 4 ++--
mm/memory-failure.c | 36 ++++++++++++++++++++++--------------
2 files changed, 24 insertions(+), 16 deletions(-)
diff --git a/mm/ksm.c b/mm/ksm.c
index 6b7b8928fb96..dcbc0c7f68e7 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2919,7 +2919,7 @@ void collect_procs_ksm(struct page *page, struct list_head *to_kill,
struct anon_vma *av = rmap_item->anon_vma;
anon_vma_lock_read(av);
- read_lock(&tasklist_lock);
+ rcu_read_lock();
for_each_process(tsk) {
struct anon_vma_chain *vmac;
unsigned long addr;
@@ -2938,7 +2938,7 @@ void collect_procs_ksm(struct page *page, struct list_head *to_kill,
}
}
}
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
anon_vma_unlock_read(av);
}
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 7b01fffe7a79..6a02706043f4 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -546,24 +546,32 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
* Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
* on behalf of the thread group. Return task_struct of the (first found)
* dedicated thread if found, and return NULL otherwise.
- *
- * We already hold read_lock(&tasklist_lock) in the caller, so we don't
- * have to call rcu_read_lock/unlock() in this function.
*/
static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
{
struct task_struct *t;
+ bool find = false;
+ rcu_read_lock();
for_each_thread(tsk, t) {
if (t->flags & PF_MCE_PROCESS) {
- if (t->flags & PF_MCE_EARLY)
- return t;
+ if (t->flags & PF_MCE_EARLY) {
+ find = true;
+ break;
+ }
} else {
- if (sysctl_memory_failure_early_kill)
- return t;
+ if (sysctl_memory_failure_early_kill) {
+ find = true;
+ break;
+ }
}
}
- return NULL;
+ rcu_read_unlock();
+
+ if (!find)
+ t = NULL;
+
+ return t;
}
/*
@@ -609,7 +617,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
return;
pgoff = page_to_pgoff(page);
- read_lock(&tasklist_lock);
+ rcu_read_lock();
for_each_process(tsk) {
struct anon_vma_chain *vmac;
struct task_struct *t = task_early_kill(tsk, force_early);
@@ -626,7 +634,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
add_to_kill_anon_file(t, page, vma, to_kill);
}
}
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
anon_vma_unlock_read(av);
}
@@ -642,7 +650,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
pgoff_t pgoff;
i_mmap_lock_read(mapping);
- read_lock(&tasklist_lock);
+ rcu_read_lock();
pgoff = page_to_pgoff(page);
for_each_process(tsk) {
struct task_struct *t = task_early_kill(tsk, force_early);
@@ -662,7 +670,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
add_to_kill_anon_file(t, page, vma, to_kill);
}
}
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
i_mmap_unlock_read(mapping);
}
@@ -685,7 +693,7 @@ static void collect_procs_fsdax(struct page *page,
struct task_struct *tsk;
i_mmap_lock_read(mapping);
- read_lock(&tasklist_lock);
+ rcu_read_lock();
for_each_process(tsk) {
struct task_struct *t = task_early_kill(tsk, true);
@@ -696,7 +704,7 @@ static void collect_procs_fsdax(struct page *page,
add_to_kill_fsdax(t, page, vma, to_kill, pgoff);
}
}
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
i_mmap_unlock_read(mapping);
}
#endif /* CONFIG_FS_DAX */
--
2.25.1