On Wed, Oct 16, 2019 at 08:51:27AM +0200, Miroslav Benes wrote: > On Tue, 15 Oct 2019, Joe Lawrence wrote: > > > On 10/15/19 10:13 AM, Miroslav Benes wrote: > > > Yes, it does. klp_module_coming() calls module_disable_ro() on all > > > patching modules which patch the coming module in order to call > > > apply_relocate_add(). New (patching) code for a module can be relocated > > > only when the relevant module is loaded. > > > > FWIW, would the LPC blue-sky2 model (ie, Steve's suggestion @ plumber's where > > livepatches only patch a single object and updates are kept on disk to handle > > coming module updates as they are loaded) eliminate those outstanding > > relocations and the need to perform this late permission flipping? > > Yes, it should, but we don't have to wait for it. PeterZ proposed a > different solution to this specific issue in > https://lore.kernel.org/lkml/20191015141111.GP2359@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/ > > It should not be a problem to create a live patch module like that and the > code in kernel/livepatch/ is almost ready. Something like > module_section_disable_ro(mod, section) (and similar for X protection) > should be enough. Module reloads would still require juggling with the > protections, but I think it is all feasible. Something a little like so.. completely fresh of the keyboard. --- --- a/include/linux/module.h +++ b/include/linux/module.h @@ -853,6 +853,18 @@ static inline void module_enable_ro(cons static inline void module_disable_ro(const struct module *mod) { } #endif +#if defined(CONFIG_STRICT_MODULE_RWX) && defined(CONFIG_LIVEPATCH) +extern void module_section_disable_ro(struct module *mod, const char *sec); +extern void module_section_enable_ro(struct module *mod, const char *sec); +extern void module_section_disable_x(struct module *mod, const char *sec); +extern void module_section_enable_x(struct module *mod, const char *sec); +#else +static inline void module_section_disable_ro(struct module *mod, const char *sec) { } +static inline void module_section_enable_ro(struct module *mod, const char *sec) { } +static inline void module_section_disable_x(struct module *mod, const char *sec) { } +static inline void module_section_enable_x(struct module *mod, const char *sec) { } +#endif + #ifdef CONFIG_GENERIC_BUG void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, struct module *); --- a/kernel/module.c +++ b/kernel/module.c @@ -2107,6 +2107,54 @@ static void free_module_elf(struct modul kfree(mod->klp_info->secstrings); kfree(mod->klp_info); } + +#ifdef CONFIG_STRICT_MODULE_RWX + +static void __frob_section(struct Elf_Shdr *sec, int (*set_memory)(unsigned long start, int num_pages)) +{ + BUG_ON((unsigned long)sec->sh_addr & (PAGE_SIZE-1)); + BUG_ON((unsigned long)sec->sh_size & (PAGE_SIZE-1)); + set_memory((unsigned long)sec->sh_addr, sec->sh_size >> PAGE_SHIFT); +} + +static void frob_section(struct module *mod, const char *section, + int (*set_memory)(unsigned long start, int num_pages)) +{ + struct klp_modinfo *info = mod->klp_info; + const char *secname; + Elf_Shdr *s; + + for (s = info->sechdrs; s < info->sechdrs + info->hdr.e_shnum; s++) { + secname = mod->klp_info->secstrings + s->sh_name; + if (strcmp(secname, section)) + continue; + + __frob_section(s, set_memory); + } +} + +void module_section_disable_ro(struct module *mod, const char *section) +{ + frob_section(mod, section, set_memory_rw); +} + +void module_section_enable_ro(struct module *mod, const char *section) +{ + frob_section(mod, section, set_memory_ro); +} + +void module_section_disable_x(struct module *mod, const char *section) +{ + frob_section(mod, section, set_memory_nx); +} + +void module_section_enable_x(struct module *mod, const char *section) +{ + frob_section(mod, section, set_memory_x); +} + +#endif /* ONFIG_STRICT_MODULE_RWX */ + #else /* !CONFIG_LIVEPATCH */ static int copy_module_elf(struct module *mod, struct load_info *info) {