From: Wardenjohn <zhangwarden@xxxxxxxxx> --- include/linux/livepatch.h | 1 + kernel/livepatch/patch.c | 2 +- kernel/livepatch/transition.c | 24 ++++++++++++------------ 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 9b9b38e89563..c1c53cd5b227 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -19,6 +19,7 @@ /* task patch states */ #define KLP_UNDEFINED -1 +#define KLP_IDLE -1 #define KLP_UNPATCHED 0 #define KLP_PATCHED 1 diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 4152c71507e2..01d3219289ee 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -95,7 +95,7 @@ static void notrace klp_ftrace_handler(unsigned long ip, patch_state = current->patch_state; - WARN_ON_ONCE(patch_state == KLP_UNDEFINED); + WARN_ON_ONCE(patch_state == KLP_IDLE); if (patch_state == KLP_UNPATCHED) { /* diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index e54c3d60a904..73f8f98dba84 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -23,7 +23,7 @@ static DEFINE_PER_CPU(unsigned long[MAX_STACK_ENTRIES], klp_stack_entries); struct klp_patch *klp_transition_patch; -static int klp_target_state = KLP_UNDEFINED; +static int klp_target_state = KLP_IDLE; static unsigned int klp_signals_cnt; @@ -123,21 +123,21 @@ static void klp_complete_transition(void) klp_for_each_func(obj, func) func->transition = false; - /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ + /* Prevent klp_ftrace_handler() from seeing KLP_IDLE state */ if (klp_target_state == KLP_PATCHED) klp_synchronize_transition(); read_lock(&tasklist_lock); for_each_process_thread(g, task) { WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); - task->patch_state = KLP_UNDEFINED; + task->patch_state = KLP_IDLE; } read_unlock(&tasklist_lock); for_each_possible_cpu(cpu) { task = idle_task(cpu); WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); - task->patch_state = KLP_UNDEFINED; + task->patch_state = KLP_IDLE; } klp_for_each_object(klp_transition_patch, obj) { @@ -152,7 +152,7 @@ static void klp_complete_transition(void) pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); - klp_target_state = KLP_UNDEFINED; + klp_target_state = KLP_IDLE; klp_transition_patch = NULL; } @@ -455,7 +455,7 @@ void klp_try_complete_transition(void) struct klp_patch *patch; bool complete = true; - WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); + WARN_ON_ONCE(klp_target_state == KLP_IDLE); /* * Try to switch the tasks to the target patch state by walking their @@ -532,7 +532,7 @@ void klp_start_transition(void) struct task_struct *g, *task; unsigned int cpu; - WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); + WARN_ON_ONCE(klp_target_state == KLP_IDLE); pr_notice("'%s': starting %s transition\n", klp_transition_patch->mod->name, @@ -578,7 +578,7 @@ void klp_init_transition(struct klp_patch *patch, int state) struct klp_func *func; int initial_state = !state; - WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED); + WARN_ON_ONCE(klp_target_state != KLP_IDLE); klp_transition_patch = patch; @@ -597,7 +597,7 @@ void klp_init_transition(struct klp_patch *patch, int state) */ read_lock(&tasklist_lock); for_each_process_thread(g, task) { - WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); + WARN_ON_ONCE(task->patch_state != KLP_IDLE); task->patch_state = initial_state; } read_unlock(&tasklist_lock); @@ -607,19 +607,19 @@ void klp_init_transition(struct klp_patch *patch, int state) */ for_each_possible_cpu(cpu) { task = idle_task(cpu); - WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); + WARN_ON_ONCE(task->patch_state != KLP_DILE); task->patch_state = initial_state; } /* * Enforce the order of the task->patch_state initializations and the * func->transition updates to ensure that klp_ftrace_handler() doesn't - * see a func in transition with a task->patch_state of KLP_UNDEFINED. + * see a func in transition with a task->patch_state of KLP_IDLE. * * Also enforce the order of the klp_target_state write and future * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() and * __klp_sched_try_switch() don't set a task->patch_state to - * KLP_UNDEFINED. + * KLP_IDLE. */ smp_wmb(); -- 2.37.3