From: Wardenjohn <zhangwarden@xxxxxxxxx> One system may contains more than one livepatch module. We can see which patch is enabled. If some patches applied to one system modifing the same function, livepatch will use the function enabled on top of the function stack. However, we can not excatly know which function of which patch is now enabling. This patch introduce one sysfs attribute of "using" to klp_func. For example, if there are serval patches make changes to function "meminfo_proc_show", the attribute "enabled" of all the patch is 1. With this attribute, we can easily know the version enabling belongs to which patch. cat /sys/kernel/livepatch/<patch1>/<object1>/<function1,sympos>/using -> 0 means that the function1 of patch1 is disabled. cat /sys/kernel/livepatch/<patchN>/<object1>/<function1,sympos>/using -> 1 means that the function1 of patchN is enabled. Signed-off-by: Wardenjohn <zhangwarden@xxxxxxxxx> diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 51a258c24ff5..26519c134578 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -37,6 +37,7 @@ * @nop: temporary patch to use the original code again; dyn. allocated * @patched: the func has been added to the klp_ops list * @transition: the func is currently being applied or reverted + * @using: the func is on top of the function stack that is using * * The patched and transition variables define the func's patching state. When * patching, a func is always in one of the following states: @@ -75,6 +76,7 @@ struct klp_func { bool nop; bool patched; bool transition; + bool using; }; struct klp_object; diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 52426665eecc..b938667f96e3 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -349,6 +349,7 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, * /sys/kernel/livepatch/<patch>/<object> * /sys/kernel/livepatch/<patch>/<object>/patched * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> + * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>/using */ static int __klp_disable_patch(struct klp_patch *patch); @@ -470,6 +471,22 @@ static struct attribute *klp_object_attrs[] = { }; ATTRIBUTE_GROUPS(klp_object); +static ssize_t using_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct klp_func *func; + + func = container_of(kobj, struct klp_func, kobj); + return sysfs_emit(buf, "%d\n", func->using); +} + +static struct kobj_attribute using_kobj_attr = __ATTR_RO(using); +static struct attribute *klp_func_attrs[] = { + &using_kobj_attr.attr, + NULL, +}; +ATTRIBUTE_GROUPS(klp_func); + static void klp_free_object_dynamic(struct klp_object *obj) { kfree(obj->name); @@ -631,6 +648,7 @@ static void klp_kobj_release_func(struct kobject *kobj) static const struct kobj_type klp_ktype_func = { .release = klp_kobj_release_func, .sysfs_ops = &kobj_sysfs_ops, + .default_groups = klp_func_groups, }; static void __klp_free_funcs(struct klp_object *obj, bool nops_only) @@ -903,6 +921,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) static void klp_init_func_early(struct klp_object *obj, struct klp_func *func) { + func->using = false; kobject_init(&func->kobj, &klp_ktype_func); list_add_tail(&func->node, &obj->func_list); } diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 90408500e5a3..561bfb3f59f7 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -127,6 +127,7 @@ static void notrace klp_ftrace_handler(unsigned long ip, static void klp_unpatch_func(struct klp_func *func) { struct klp_ops *ops; + struct klp_func *stack_top_func; if (WARN_ON(!func->patched)) return; @@ -152,6 +153,9 @@ static void klp_unpatch_func(struct klp_func *func) kfree(ops); } else { list_del_rcu(&func->stack_node); + stack_top_func = list_first_entry(&ops->func_stack, struct klp_func, + stack_node); + stack_top_func->using = true; } func->patched = false; @@ -160,6 +164,7 @@ static void klp_unpatch_func(struct klp_func *func) static int klp_patch_func(struct klp_func *func) { struct klp_ops *ops; + struct klp_func *stack_top_func; int ret; if (WARN_ON(!func->old_func)) @@ -170,6 +175,7 @@ static int klp_patch_func(struct klp_func *func) ops = klp_find_ops(func->old_func); if (!ops) { + // this function is the first time to be patched unsigned long ftrace_loc; ftrace_loc = ftrace_location((unsigned long)func->old_func); @@ -211,9 +217,15 @@ static int klp_patch_func(struct klp_func *func) goto err; } - + func->using = true; } else { + // find the function that enabling at this time and mark it disabled. + stack_top_func = list_first_entry(&ops->func_stack, struct klp_func, + stack_node); + stack_top_func->using = false; + // now, this new patched function is the one enabling list_add_rcu(&func->stack_node, &ops->func_stack); + func->using = true; } func->patched = true; -- 2.18.2