Hi, Xuerui, On Tue, Aug 29, 2023 at 1:05 AM WANG Xuerui <kernel@xxxxxxxxxx> wrote: > > On 8/28/23 23:25, Huacai Chen wrote: > > Both shm_align_mask and SHMLBA want to avoid cache alias. But they are > > inconsistent: shm_align_mask is (PAGE_SIZE - 1) while SHMLBA is SZ_64K, > > but PAGE_SIZE is not always equal to SZ_64K. > > > > This may cause problems when shmat() twice. Fix this problem by removing > > shm_align_mask and using SHMLBA (SHMLBA - 1, strctly) instead. > "strictly SHMLBA - 1"? > > > > Reported-by: Jiantao Shan <shanjiantao@xxxxxxxxxxx> > > Signed-off-by: Huacai Chen <chenhuacai@xxxxxxxxxxx> > > --- > > arch/loongarch/mm/cache.c | 1 - > > arch/loongarch/mm/mmap.c | 12 ++++-------- > > 3 files changed, 8 insertions(+), 9 deletions(-) > > > > diff --git a/arch/loongarch/mm/cache.c b/arch/loongarch/mm/cache.c > > index 72685a48eaf0..6be04d36ca07 100644 > > --- a/arch/loongarch/mm/cache.c > > +++ b/arch/loongarch/mm/cache.c > > @@ -156,7 +156,6 @@ void cpu_cache_init(void) > > > > current_cpu_data.cache_leaves_present = leaf; > > current_cpu_data.options |= LOONGARCH_CPU_PREFETCH; > > - shm_align_mask = PAGE_SIZE - 1; > > } > > > > static const pgprot_t protection_map[16] = { > > diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c > > index fbe1a4856fc4..c99c8015651a 100644 > > --- a/arch/loongarch/mm/mmap.c > > +++ b/arch/loongarch/mm/mmap.c > > @@ -8,12 +8,8 @@ > > #include <linux/mm.h> > > #include <linux/mman.h> > > > > -unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ > > -EXPORT_SYMBOL(shm_align_mask); > > By removing this altogether, a lot of code duplication is introduced it > seems. Better make this a private #define so use sites remain nicely > symbolic: > > "#define SHM_ALIGN_MASK (SHMLBA - 1)" OK, thanks. Huacai > > > - > > -#define COLOUR_ALIGN(addr, pgoff) \ > > - ((((addr) + shm_align_mask) & ~shm_align_mask) + \ > > - (((pgoff) << PAGE_SHIFT) & shm_align_mask)) > > +#define COLOUR_ALIGN(addr, pgoff) \ > > + ((((addr) + (SHMLBA - 1)) & ~(SHMLBA - 1)) + (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1))) > > > > enum mmap_allocation_direction {UP, DOWN}; > > > > @@ -40,7 +36,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, > > * cache aliasing constraints. > > */ > > if ((flags & MAP_SHARED) && > > - ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) > > + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) > > return -EINVAL; > > return addr; > > } > > @@ -63,7 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, > > } > > > > info.length = len; > > - info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; > > + info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; > > info.align_offset = pgoff << PAGE_SHIFT; > > > > if (dir == DOWN) { > > -- > WANG "xen0n" Xuerui > > Linux/LoongArch mailing list: https://lore.kernel.org/loongarch/ > >