Introduce a new helper function, delete_module(), designed to delete kernel modules from locations outside of the `kernel/module` directory. No functional change. Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx> --- include/linux/module.h | 1 + kernel/module/main.c | 82 ++++++++++++++++++++++++++++++++---------- 2 files changed, 65 insertions(+), 18 deletions(-) diff --git a/include/linux/module.h b/include/linux/module.h index 1153b0d99a80..c24557f1b795 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -75,6 +75,7 @@ extern struct module_attribute module_uevent; /* These are either module local, or the kernel's dummy ones. */ extern int init_module(void); extern void cleanup_module(void); +extern int delete_module(struct module *mod); #ifndef MODULE /** diff --git a/kernel/module/main.c b/kernel/module/main.c index e1e8a7a9d6c1..3b48ee66db41 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -695,12 +695,74 @@ EXPORT_SYMBOL(module_refcount); /* This exists whether we can unload or not */ static void free_module(struct module *mod); +static void __delete_module(struct module *mod) +{ + char buf[MODULE_FLAGS_BUF_SIZE]; + + WARN_ON_ONCE(mod->state != MODULE_STATE_GOING); + + /* Final destruction now no one is using it. */ + if (mod->exit != NULL) + mod->exit(); + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_GOING, mod); + klp_module_going(mod); + ftrace_release_mod(mod); + + async_synchronize_full(); + + /* Store the name and taints of the last unloaded module for diagnostic purposes */ + strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name)); + strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), + sizeof(last_unloaded_module.taints)); + + free_module(mod); + /* someone could wait for the module in add_unformed_module() */ + wake_up_all(&module_wq); +} + +int delete_module(struct module *mod) +{ + int ret; + + mutex_lock(&module_mutex); + if (!list_empty(&mod->source_list)) { + /* Other modules depend on us: get rid of them first. */ + ret = -EWOULDBLOCK; + goto out; + } + + /* Doing init or already dying? */ + if (mod->state != MODULE_STATE_LIVE) { + ret = -EBUSY; + goto out; + } + + /* If it has an init func, it must have an exit func to unload */ + if (mod->init && !mod->exit) { + ret = -EBUSY; + goto out; + } + + if (try_release_module_ref(mod) != 0) { + ret = -EWOULDBLOCK; + goto out; + } + mod->state = MODULE_STATE_GOING; + mutex_unlock(&module_mutex); + __delete_module(mod); + return 0; + +out: + mutex_unlock(&module_mutex); + return ret; +} + SYSCALL_DEFINE2(delete_module, const char __user *, name_user, unsigned int, flags) { struct module *mod; char name[MODULE_NAME_LEN]; - char buf[MODULE_FLAGS_BUF_SIZE]; int ret, forced = 0; if (!capable(CAP_SYS_MODULE) || modules_disabled) @@ -750,23 +812,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, goto out; mutex_unlock(&module_mutex); - /* Final destruction now no one is using it. */ - if (mod->exit != NULL) - mod->exit(); - blocking_notifier_call_chain(&module_notify_list, - MODULE_STATE_GOING, mod); - klp_module_going(mod); - ftrace_release_mod(mod); - - async_synchronize_full(); - - /* Store the name and taints of the last unloaded module for diagnostic purposes */ - strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name)); - strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints)); - - free_module(mod); - /* someone could wait for the module in add_unformed_module() */ - wake_up_all(&module_wq); + __delete_module(mod); return 0; out: mutex_unlock(&module_mutex); -- 2.39.1