+Paul > On Mar 25, 2022, at 5:06 PM, Edgecombe, Rick P <rick.p.edgecombe@xxxxxxxxx> wrote: > > On Fri, 2022-02-04 at 10:57 -0800, Song Liu wrote: >> From: Song Liu <songliubraving@xxxxxx> >> >> This enables module_alloc() to allocate huge page for 2MB+ requests. >> To check the difference of this change, we need enable config >> CONFIG_PTDUMP_DEBUGFS, and call module_alloc(2MB). Before the change, >> /sys/kernel/debug/page_tables/kernel shows pte for this map. With the >> change, /sys/kernel/debug/page_tables/ show pmd for thie map. >> >> Signed-off-by: Song Liu <songliubraving@xxxxxx> >> --- >> arch/x86/Kconfig | 1 + >> 1 file changed, 1 insertion(+) > > Hi, > > I just saw this upstream today. Glad to see this functionality, but I > think turning on huge vmalloc pages for x86 needs a bit more. I’ll > describe a couple possible failure modes I haven’t actually tested. > > One problem is that the direct map permission reset part in vmalloc > assumes any special permissioned pages are mapped 4k on the direct map. > Otherwise the operation could fail to reset a page RW if a PTE page > allocation fails when it tries to split the page to toggle a 4k sized > region NP/P. If you are not familiar, x86 CPA generally leaves the > direct map page sizes mirroring the primary alias (vmalloc). So once > vmalloc has huge pages, the special permissioned direct map aliases > will have them too. This limitation of HAVE_ARCH_HUGE_VMALLOC is > actually hinted about in the Kconfig comments, but I guess it wasn’t > specific that x86 has these properties. > > I think to make the vmalloc resetting part safe: > 1. set_direct_map_invalid/default() needs to support multiple pages > like this[0]. > 2. vm_remove_mappings() needs to call them with the correct page size > in the hpage case so they don't cause a split[1]. > 3. Then hibernate needs to be blocked during this operation so it > doesn’t encounter the now sometimes huge NP pages, which it can’t > handle. Not sure what the right way to do this is, but potentially like > in the diff below[1]. > > Another problem is that CPA will sometimes now split pages of vmalloc > mappings in cases where it sets a region of an allocation to a > different permission than the rest (for example regular modules calling > set_memory_x() on the text section). Before this change, these couldn’t > fail since the module space mapping would never require a split. > Modules doesn’t check for failure there, so I’m thinking now it would > proceed to try to execute NX memory if the split failed. It could only > happen on allocation of especially large modules. Maybe it should just > be avoided for now by having regular module allocations pass > VM_NO_HUGE_VMAP on x86. And BPF could call __vmalloc_node_range() > directly to get 2MB vmallocs. I like this direction. But I am afraid this is not enough. Using VM_NO_HUGE_VMAP in module_alloc() will make sure we don't allocate huge pages for modules. But other users of __vmalloc_node_range(), such as vzalloc in Paul's report, may still hit the issue. Maybe we need another flag VM_FORCE_HUGE_VMAP that bypasses vmap_allow_huge check. Something like the diff below. Would this work? Thanks, Song diff --git i/include/linux/vmalloc.h w/include/linux/vmalloc.h index 3b1df7da402d..a639405dab99 100644 --- i/include/linux/vmalloc.h +++ w/include/linux/vmalloc.h @@ -27,6 +27,7 @@ struct notifier_block; /* in notifier.h */ #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ #define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */ +#define VM_FORCE_HUGE_VMAP 0x00000800 /* force PMD_SIZE mapping (bypass vmap_allow_huge check) */ #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ !defined(CONFIG_KASAN_VMALLOC) diff --git i/kernel/bpf/core.c w/kernel/bpf/core.c index 13e9dbeeedf3..3cd0ff66d39c 100644 --- i/kernel/bpf/core.c +++ w/kernel/bpf/core.c @@ -851,13 +851,22 @@ static LIST_HEAD(pack_list); #define BPF_HPAGE_MASK PAGE_MASK #endif +static void *bpf_prog_pack_vmalloc(unsigned long size) +{ + return __vmalloc_node_range(size, MODULE_ALIGN, + MODULES_VADDR + get_module_load_offset(), + MODULES_END, gfp_mask, PAGE_KERNEL, + VM_DEFER_KMEMLEAK | VM_FORCE_HUGE_VMAP, + NUMA_NO_NODE, __builtin_return_address(0)); +} + static size_t select_bpf_prog_pack_size(void) { size_t size; void *ptr; size = BPF_HPAGE_SIZE * num_online_nodes(); - ptr = module_alloc(size); + ptr = bpf_prog_pack_vmalloc(size); /* Test whether we can get huge pages. If not just use PAGE_SIZE * packs. @@ -881,7 +890,7 @@ static struct bpf_prog_pack *alloc_new_pack(void) GFP_KERNEL); if (!pack) return NULL; - pack->ptr = module_alloc(bpf_prog_pack_size); + pack->ptr = bpf_prog_pack_vmalloc(bpf_prog_pack_size); if (!pack->ptr) { kfree(pack); return NULL; diff --git i/mm/vmalloc.c w/mm/vmalloc.c index e163372d3967..df2dd6779fa8 100644 --- i/mm/vmalloc.c +++ w/mm/vmalloc.c @@ -3106,7 +3106,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, return NULL; } - if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) { + if ((vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) || + (vm_flags & VM_FORCE_HUGE_VMAP)) { unsigned long size_per_node; /* > > [0] > https://lore.kernel.org/lkml/20210208084920.2884-5-rppt@xxxxxxxxxx/#t > > [1] Untested, but something like this possibly: > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index 99e0f3e8d1a5..97c4ca3a29b1 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -42,6 +42,7 @@ > #include <linux/sched/mm.h> > #include <asm/tlbflush.h> > #include <asm/shmparam.h> > +#include <linux/suspend.h> > > #include "internal.h" > #include "pgalloc-track.h" > @@ -2241,7 +2242,7 @@ EXPORT_SYMBOL(vm_map_ram); > > static struct vm_struct *vmlist __initdata; > > -static inline unsigned int vm_area_page_order(struct vm_struct *vm) > +static inline unsigned int vm_area_page_order(const struct vm_struct > *vm) > { > #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC > return vm->page_order; > @@ -2560,12 +2561,12 @@ struct vm_struct *remove_vm_area(const void > *addr) > static inline void set_area_direct_map(const struct vm_struct *area, > int (*set_direct_map)(struct > page *page)) > { > + unsigned int page_order = vm_area_page_order(area); > int i; > > - /* HUGE_VMALLOC passes small pages to set_direct_map */ > - for (i = 0; i < area->nr_pages; i++) > + for (i = 0; i < area->nr_pages; i += 1U << page_order) > if (page_address(area->pages[i])) > - set_direct_map(area->pages[i]); > + set_direct_map(area->pages[i], 1U << > page_order); > } > > /* Handle removing and resetting vm mappings related to the vm_struct. > */ > @@ -2592,6 +2593,10 @@ static void vm_remove_mappings(struct vm_struct > *area, int deallocate_pages) > return; > } > > + /* Hibernate can't handle large NP pages */ > + if (page_order) > + lock_system_sleep(); > + > /* > * If execution gets here, flush the vm mapping and reset the > direct > * map. Find the start and end range of the direct mappings to > make sure > @@ -2617,6 +2622,9 @@ static void vm_remove_mappings(struct vm_struct > *area, int deallocate_pages) > set_area_direct_map(area, set_direct_map_invalid_noflush); > _vm_unmap_aliases(start, end, flush_dmap); > set_area_direct_map(area, set_direct_map_default_noflush); > + > + if (page_order) > + unlock_system_sleep(); > } > > static void __vunmap(const void *addr, int deallocate_pages)