at 12:58 PM, Andy Lutomirski <luto@xxxxxxxxxx> wrote: > On Mon, Aug 27, 2018 at 12:43 PM, Nadav Amit <nadav.amit@xxxxxxxxx> wrote: >> at 12:10 PM, Nadav Amit <nadav.amit@xxxxxxxxx> wrote: >> >>> at 11:58 AM, Andy Lutomirski <luto@xxxxxxxxxx> wrote: >>> >>>> On Mon, Aug 27, 2018 at 11:54 AM, Nadav Amit <nadav.amit@xxxxxxxxx> wrote: >>>>>> On Mon, Aug 27, 2018 at 10:34 AM, Nadav Amit <nadav.amit@xxxxxxxxx> wrote: >>>>>> What do you all think? >>>>> >>>>> I agree in general. But I think that current->mm would need to be loaded, as >>>>> otherwise I am afraid it would break switch_mm_irqs_off(). >>>> >>>> What breaks? >>> >>> Actually nothing. I just saw the IBPB stuff regarding tsk, but it should not >>> matter. >> >> So here is what I got. It certainly needs some cleanup, but it boots. >> >> Let me know how crappy you find it... >> >> >> diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h >> index bbc796eb0a3b..336779650a41 100644 >> --- a/arch/x86/include/asm/mmu_context.h >> +++ b/arch/x86/include/asm/mmu_context.h >> @@ -343,4 +343,24 @@ static inline unsigned long __get_current_cr3_fast(void) >> return cr3; >> } >> >> +typedef struct { >> + struct mm_struct *prev; >> +} temporary_mm_state_t; >> + >> +static inline temporary_mm_state_t use_temporary_mm(struct mm_struct *mm) >> +{ >> + temporary_mm_state_t state; >> + >> + lockdep_assert_irqs_disabled(); >> + state.prev = this_cpu_read(cpu_tlbstate.loaded_mm); >> + switch_mm_irqs_off(NULL, mm, current); >> + return state; >> +} >> + >> +static inline void unuse_temporary_mm(temporary_mm_state_t prev) >> +{ >> + lockdep_assert_irqs_disabled(); >> + switch_mm_irqs_off(NULL, prev.prev, current); >> +} >> + >> #endif /* _ASM_X86_MMU_CONTEXT_H */ >> diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h >> index 5715647fc4fe..ef62af9a0ef7 100644 >> --- a/arch/x86/include/asm/pgtable.h >> +++ b/arch/x86/include/asm/pgtable.h >> @@ -976,6 +976,10 @@ static inline void __meminit init_trampoline_default(void) >> /* Default trampoline pgd value */ >> trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)]; >> } >> + >> +void __init patching_mm_init(void); >> +#define patching_mm_init patching_mm_init >> + >> # ifdef CONFIG_RANDOMIZE_MEMORY >> void __meminit init_trampoline(void); >> # else >> diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h >> index 054765ab2da2..9f44262abde0 100644 >> --- a/arch/x86/include/asm/pgtable_64_types.h >> +++ b/arch/x86/include/asm/pgtable_64_types.h >> @@ -116,6 +116,9 @@ extern unsigned int ptrs_per_p4d; >> #define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) >> #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) >> >> +#define TEXT_POKE_PGD_ENTRY -5UL >> +#define TEXT_POKE_ADDR (TEXT_POKE_PGD_ENTRY << PGDIR_SHIFT) >> + >> #define __VMALLOC_BASE_L4 0xffffc90000000000UL >> #define __VMALLOC_BASE_L5 0xffa0000000000000UL >> >> diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h >> index 99fff853c944..840c72ec8c4f 100644 >> --- a/arch/x86/include/asm/pgtable_types.h >> +++ b/arch/x86/include/asm/pgtable_types.h >> @@ -505,6 +505,9 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, >> /* Install a pte for a particular vaddr in kernel space. */ >> void set_pte_vaddr(unsigned long vaddr, pte_t pte); >> >> +struct mm_struct; >> +void set_mm_pte_vaddr(struct mm_struct *mm, unsigned long vaddr, pte_t pte); >> + >> #ifdef CONFIG_X86_32 >> extern void native_pagetable_init(void); >> #else >> diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h >> index 2ecd34e2d46c..cb364ea5b19d 100644 >> --- a/arch/x86/include/asm/text-patching.h >> +++ b/arch/x86/include/asm/text-patching.h >> @@ -38,4 +38,6 @@ extern void *text_poke(void *addr, const void *opcode, size_t len); >> extern int poke_int3_handler(struct pt_regs *regs); >> extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); >> >> +extern struct mm_struct *patching_mm; >> + >> #endif /* _ASM_X86_TEXT_PATCHING_H */ >> diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c >> index a481763a3776..fd8a950b0d62 100644 >> --- a/arch/x86/kernel/alternative.c >> +++ b/arch/x86/kernel/alternative.c >> @@ -11,6 +11,7 @@ >> #include <linux/stop_machine.h> >> #include <linux/slab.h> >> #include <linux/kdebug.h> >> +#include <linux/mmu_context.h> >> #include <asm/text-patching.h> >> #include <asm/alternative.h> >> #include <asm/sections.h> >> @@ -701,8 +702,36 @@ void *text_poke(void *addr, const void *opcode, size_t len) >> WARN_ON(!PageReserved(pages[0])); >> pages[1] = virt_to_page(addr + PAGE_SIZE); >> } >> - BUG_ON(!pages[0]); >> + >> local_irq_save(flags); >> + BUG_ON(!pages[0]); >> + >> + /* >> + * During initial boot, it is hard to initialize patching_mm due to >> + * dependencies in boot order. >> + */ >> + if (patching_mm) { >> + pte_t pte; >> + temporary_mm_state_t prev; >> + >> + prev = use_temporary_mm(patching_mm); >> + pte = mk_pte(pages[0], PAGE_KERNEL); >> + set_mm_pte_vaddr(patching_mm, TEXT_POKE_ADDR, pte); >> + pte = mk_pte(pages[1], PAGE_KERNEL); >> + set_mm_pte_vaddr(patching_mm, TEXT_POKE_ADDR + PAGE_SIZE, pte); >> + >> + memcpy((void *)(TEXT_POKE_ADDR | ((unsigned long)addr & ~PAGE_MASK)), >> + opcode, len); >> + >> + set_mm_pte_vaddr(patching_mm, TEXT_POKE_ADDR, __pte(0)); >> + set_mm_pte_vaddr(patching_mm, TEXT_POKE_ADDR + PAGE_SIZE, __pte(0)); >> + local_flush_tlb(); > > Hmm. This is stuff busted on SMP, and it's IMO more complicated than > needed. How about getting rid of all the weird TLB flushing stuff and > instead putting the mapping at vaddr - __START_KERNEL_map or whatever > it is? You *might* need to flush_tlb_mm_range() on module unload, but > that's it. I don’t see what’s wrong in SMP, since this entire piece of code should be running under text_mutex. I don’t quite understand your proposal. I really don’t want to have any chance in which the page-tables for the poked address is not preallocated. It is more complicated than needed, and there are redundant TLB flushes. The reason I preferred to do it this way, is in order not to use other functions that take locks during the software page-walk and not to duplicate existing code. Yet, duplication might be the way to go. >> + sync_core(); > > I can't think of any case where sync_core() is needed. The mm switch > serializes. Good point! > > Also, is there any circumstance in which any of this is used before at > least jump table init? All the early stuff is text_poke_early(), > right? Not before jump_label_init. However, I did not manage to get rid of the two code-patches in text_poke(), since text_poke is used relatively early by x86_late_time_init(), and at this stage kmem_cache_alloc() - which is needed to duplicate init_mm - still fails.