Make sure to prod idle CPUs so they call klp_update_patch_state(). Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> --- kernel/livepatch/transition.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -287,21 +287,21 @@ static int klp_check_task(struct task_st * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or * if the stack is unreliable, return false. */ -static bool klp_try_switch_task(struct task_struct *task) +static int klp_try_switch_task(struct task_struct *task) { const char *old_name; int ret; /* check if this task has already switched over */ if (task->patch_state == klp_target_state) - return true; + return 0; /* * For arches which don't have reliable stack traces, we have to rely * on other methods (e.g., switching tasks at kernel exit). */ if (!klp_have_reliable_stack()) - return false; + return -EINVAL; /* * Now try to check the stack for any to-be-patched or to-be-unpatched @@ -324,7 +324,7 @@ static bool klp_try_switch_task(struct t break; } - return !ret; + return ret; } /* @@ -394,7 +394,7 @@ void klp_try_complete_transition(void) */ read_lock(&tasklist_lock); for_each_process_thread(g, task) - if (!klp_try_switch_task(task)) + if (klp_try_switch_task(task)) complete = false; read_unlock(&tasklist_lock); @@ -405,8 +405,10 @@ void klp_try_complete_transition(void) for_each_possible_cpu(cpu) { task = idle_task(cpu); if (cpu_online(cpu)) { - if (!klp_try_switch_task(task)) - complete = false; + int ret = klp_try_switch_task(task); + if (ret == -EBUSY) + wake_up_if_idle(cpu); + complete = !ret; } else if (task->patch_state != klp_target_state) { /* offline idle tasks can be switched immediately */ clear_tsk_thread_flag(task, TIF_PATCH_PENDING);