On Thu, 5 Dec 2024 at 16:03, Ard Biesheuvel <ardb+git@xxxxxxxxxx> wrote: > > From: Ard Biesheuvel <ardb@xxxxxxxxxx> > > Currently, LPA2 kernel support implies support for up to 52 bits of > physical addressing, and this is reflected in global definitions such as > PHYS_MASK_SHIFT and MAX_PHYSMEM_BITS. > > This is potentially problematic, given that LPA2 hardware support is > modeled as a CPU feature which can be overridden, and with LPA2 hardware > support turned off, attempting to map physical regions with address bits > [51:48] set (which may exist on LPA2 capable systems booting with > arm64.nolva) will result in corrupted mappings with a truncated output > address and bogus shareability attributes. > > This means that the accepted physical address range in the mapping > routines should be at most 48 bits wide when LPA2 support is configured > but not enabled at runtime. > > Fixes: 352b0395b505 ("arm64: Enable 52-bit virtual addressing for 4k and 16k granule configs") > Cc: <stable@xxxxxxxxxxxxxxx> > Reviewed-by: Anshuman Khandual <anshuman.khandual@xxxxxxx> > Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx> > --- > arch/arm64/include/asm/pgtable-hwdef.h | 6 ------ > arch/arm64/include/asm/pgtable-prot.h | 7 +++++++ > arch/arm64/include/asm/sparsemem.h | 4 +++- > 3 files changed, 10 insertions(+), 7 deletions(-) > > diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h > index c78a988cca93..a9136cc551cc 100644 > --- a/arch/arm64/include/asm/pgtable-hwdef.h > +++ b/arch/arm64/include/asm/pgtable-hwdef.h > @@ -222,12 +222,6 @@ > */ > #define S1_TABLE_AP (_AT(pmdval_t, 3) << 61) > > -/* > - * Highest possible physical address supported. > - */ > -#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS) > -#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1) > - > #define TTBR_CNP_BIT (UL(1) << 0) > > /* > diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h > index 9f9cf13bbd95..a95f1f77bb39 100644 > --- a/arch/arm64/include/asm/pgtable-prot.h > +++ b/arch/arm64/include/asm/pgtable-prot.h > @@ -81,6 +81,7 @@ extern unsigned long prot_ns_shared; > #define lpa2_is_enabled() false > #define PTE_MAYBE_SHARED PTE_SHARED > #define PMD_MAYBE_SHARED PMD_SECT_S > +#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS) > #else > static inline bool __pure lpa2_is_enabled(void) > { > @@ -89,8 +90,14 @@ static inline bool __pure lpa2_is_enabled(void) > > #define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED) > #define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S) > +#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? CONFIG_ARM64_PA_BITS : 48) > #endif > > +/* > + * Highest possible physical address supported. > + */ > +#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1) > + > /* > * If we have userspace only BTI we don't want to mark kernel pages > * guarded even if the system does support BTI. > diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h > index 8a8acc220371..035e0ca74e88 100644 > --- a/arch/arm64/include/asm/sparsemem.h > +++ b/arch/arm64/include/asm/sparsemem.h > @@ -5,7 +5,9 @@ > #ifndef __ASM_SPARSEMEM_H > #define __ASM_SPARSEMEM_H > > -#define MAX_PHYSMEM_BITS CONFIG_ARM64_PA_BITS > +#include <asm/pgtable-prot.h> > + > +#define MAX_PHYSMEM_BITS PHYS_MASK_SHIFT > This needs --- a/arch/arm64/include/asm/sparsemem.h +++ b/arch/arm64/include/asm/sparsemem.h @@ -7,7 +7,8 @@ #include <asm/pgtable-prot.h> -#define MAX_PHYSMEM_BITS PHYS_MASK_SHIFT +#define MAX_PHYSMEM_BITS PHYS_MASK_SHIFT +#define MAX_POSSIBLE_PHYSMEM_BITS (52) /* * Section size must be at least 512MB for 64K base applied on top to make the ZSMALLOC code happy.