Le 12/03/2024 à 23:28, Rick Edgecombe a écrit : > When memory is being placed, mmap() will take care to respect the guard > gaps of certain types of memory (VM_SHADOWSTACK, VM_GROWSUP and > VM_GROWSDOWN). In order to ensure guard gaps between mappings, mmap() > needs to consider two things: > 1. That the new mapping isn’t placed in an any existing mappings guard > gaps. > 2. That the new mapping isn’t placed such that any existing mappings > are not in *its* guard gaps. > > The long standing behavior of mmap() is to ensure 1, but not take any care > around 2. So for example, if there is a PAGE_SIZE free area, and a > mmap() with a PAGE_SIZE size, and a type that has a guard gap is being > placed, mmap() may place the shadow stack in the PAGE_SIZE free area. Then > the mapping that is supposed to have a guard gap will not have a gap to > the adjacent VMA. > > Add a THP implementations of the vm_flags variant of get_unmapped_area(). > Future changes will call this from mmap.c in the do_mmap() path to allow > shadow stacks to be placed with consideration taken for the start guard > gap. Shadow stack memory is always private and anonymous and so special > guard gap logic is not needed in a lot of caseis, but it can be mapped by > THP, so needs to be handled. > > Signed-off-by: Rick Edgecombe <rick.p.edgecombe@xxxxxxxxx> Reviewed-by: Christophe Leroy <christophe.leroy@xxxxxxxxxx> > --- > include/linux/huge_mm.h | 11 +++++++++++ > mm/huge_memory.c | 23 ++++++++++++++++------- > mm/mmap.c | 12 +++++++----- > 3 files changed, 34 insertions(+), 12 deletions(-) > > diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h > index 5adb86af35fc..8744c808d380 100644 > --- a/include/linux/huge_mm.h > +++ b/include/linux/huge_mm.h > @@ -262,6 +262,9 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, > > unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, > unsigned long len, unsigned long pgoff, unsigned long flags); > +unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, > + unsigned long len, unsigned long pgoff, unsigned long flags, > + vm_flags_t vm_flags); > > void folio_prep_large_rmappable(struct folio *folio); > bool can_split_folio(struct folio *folio, int *pextra_pins); > @@ -416,6 +419,14 @@ static inline void folio_prep_large_rmappable(struct folio *folio) {} > > #define thp_get_unmapped_area NULL > > +static inline unsigned long > +thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, > + unsigned long len, unsigned long pgoff, > + unsigned long flags, vm_flags_t vm_flags) > +{ > + return 0; > +} > + > static inline bool > can_split_folio(struct folio *folio, int *pextra_pins) > { > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index bc3bf441e768..349c93a1a7c3 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -806,7 +806,8 @@ static inline bool is_transparent_hugepage(struct folio *folio) > > static unsigned long __thp_get_unmapped_area(struct file *filp, > unsigned long addr, unsigned long len, > - loff_t off, unsigned long flags, unsigned long size) > + loff_t off, unsigned long flags, unsigned long size, > + vm_flags_t vm_flags) > { > loff_t off_end = off + len; > loff_t off_align = round_up(off, size); > @@ -822,8 +823,8 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, > if (len_pad < len || (off + len_pad) < off) > return 0; > > - ret = mm_get_unmapped_area(current->mm, filp, addr, len_pad, > - off >> PAGE_SHIFT, flags); > + ret = mm_get_unmapped_area_vmflags(current->mm, filp, addr, len_pad, > + off >> PAGE_SHIFT, flags, vm_flags); > > /* > * The failure might be due to length padding. The caller will retry > @@ -848,17 +849,25 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, > return ret; > } > > -unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, > - unsigned long len, unsigned long pgoff, unsigned long flags) > +unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, > + unsigned long len, unsigned long pgoff, unsigned long flags, > + vm_flags_t vm_flags) > { > unsigned long ret; > loff_t off = (loff_t)pgoff << PAGE_SHIFT; > > - ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); > + ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags); > if (ret) > return ret; > > - return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); > + return mm_get_unmapped_area_vmflags(current->mm, filp, addr, len, pgoff, flags, > + vm_flags); > +} > + > +unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, > + unsigned long len, unsigned long pgoff, unsigned long flags) > +{ > + return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0); > } > EXPORT_SYMBOL_GPL(thp_get_unmapped_area); > > diff --git a/mm/mmap.c b/mm/mmap.c > index a3128ed26676..68381b90f906 100644 > --- a/mm/mmap.c > +++ b/mm/mmap.c > @@ -1863,20 +1863,22 @@ __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, > * so use shmem's get_unmapped_area in case it can be huge. > */ > get_area = shmem_get_unmapped_area; > - } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { > - /* Ensures that larger anonymous mappings are THP aligned. */ > - get_area = thp_get_unmapped_area; > } > > /* Always treat pgoff as zero for anonymous memory. */ > if (!file) > pgoff = 0; > > - if (get_area) > + if (get_area) { > addr = get_area(file, addr, len, pgoff, flags); > - else > + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { > + /* Ensures that larger anonymous mappings are THP aligned. */ > + addr = thp_get_unmapped_area_vmflags(file, addr, len, > + pgoff, flags, vm_flags); > + } else { > addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len, > pgoff, flags, vm_flags); > + } > if (IS_ERR_VALUE(addr)) > return addr; >