klp_mutex isn't acquired before calling kobject_put(klp_patch), so it is fine to free klp_patch object synchronously. One issue is that enabled store() method, in which the klp_patch kobject itself is deleted & released. However, sysfs has provided APIs for dealing with this corner case, so use sysfs_break_active_protection() and sysfs_unbreak_active_protection() for releasing klp_patch kobject from enabled_store(), meantime the enabled attribute has to be removed before deleting the klp_patch kobject. Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- include/linux/livepatch.h | 1 - kernel/livepatch/core.c | 37 +++++++++++++++-------------------- kernel/livepatch/core.h | 2 +- kernel/livepatch/transition.c | 2 +- 4 files changed, 18 insertions(+), 24 deletions(-) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 9712818997c5..4dcebf52fac5 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -169,7 +169,6 @@ struct klp_patch { struct list_head obj_list; bool enabled; bool forced; - struct work_struct free_work; }; #define klp_for_each_object_static(patch, obj) \ diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 27768bb5a38c..36999cddc011 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -337,6 +337,7 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, int ret; bool enabled; LIST_HEAD(to_free); + struct kernfs_node *kn = NULL; ret = kstrtobool(buf, &enabled); if (ret) @@ -369,10 +370,18 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, out: mutex_unlock(&klp_mutex); - klp_free_patches_async(&to_free); - if (ret) return ret; + + if (!list_empty(&to_free)) { + kn = sysfs_break_active_protection(kobj, &attr->attr); + WARN_ON_ONCE(!kn); + sysfs_remove_file(kobj, &attr->attr); + klp_free_patches(&to_free); + if (kn) + sysfs_unbreak_active_protection(kn); + } + return count; } @@ -684,32 +693,19 @@ static void klp_free_patch_finish(struct klp_patch *patch) kobject_put(&patch->kobj); } -/* - * The livepatch might be freed from sysfs interface created by the patch. - * This work allows to wait until the interface is destroyed in a separate - * context. - */ -static void klp_free_patch_work_fn(struct work_struct *work) -{ - struct klp_patch *patch = - container_of(work, struct klp_patch, free_work); - - klp_free_patch_finish(patch); -} - -static void klp_free_patch_async(struct klp_patch *patch) +static void klp_free_patch(struct klp_patch *patch) { klp_free_patch_start(patch); - schedule_work(&patch->free_work); + klp_free_patch_finish(patch); } -void klp_free_patches_async(struct list_head *to_free) +void klp_free_patches(struct list_head *to_free) { struct klp_patch *patch, *tmp_patch; list_for_each_entry_safe(patch, tmp_patch, to_free, list) { list_del_init(&patch->list); - klp_free_patch_async(patch); + klp_free_patch(patch); } } @@ -873,7 +869,6 @@ static int klp_init_patch_early(struct klp_patch *patch) kobject_init(&patch->kobj, &klp_ktype_patch); patch->enabled = false; patch->forced = false; - INIT_WORK(&patch->free_work, klp_free_patch_work_fn); klp_for_each_object_static(patch, obj) { if (!obj->funcs) @@ -1067,7 +1062,7 @@ int klp_enable_patch(struct klp_patch *patch) mutex_unlock(&klp_mutex); - klp_free_patches_async(&to_free); + klp_free_patches(&to_free); return 0; diff --git a/kernel/livepatch/core.h b/kernel/livepatch/core.h index 8ff97745ba40..ea593f370049 100644 --- a/kernel/livepatch/core.h +++ b/kernel/livepatch/core.h @@ -13,7 +13,7 @@ extern struct list_head klp_patches; #define klp_for_each_patch(patch) \ list_for_each_entry(patch, &klp_patches, list) -void klp_free_patches_async(struct list_head *to_free); +void klp_free_patches(struct list_head *to_free); void klp_unpatch_replaced_patches(struct klp_patch *new_patch); void klp_discard_nops(struct klp_patch *new_patch); diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 0c1857405c17..1a339a076dd4 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -40,7 +40,7 @@ static void klp_transition_work_fn(struct work_struct *work) mutex_unlock(&klp_mutex); - klp_free_patches_async(&to_free); + klp_free_patches(&to_free); } static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); -- 2.31.1