On CPUs with hardware AF/DBM, initialising prefaulted PTEs as 'old' improves vmscan behaviour and does not appear to introduce any overhead. Implement arch_wants_old_faultaround_pte() to return 'true' if we detect hardware access flag support at runtime. This can be extended in future based on MIDR matching if necessary. Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Signed-off-by: Will Deacon <will@xxxxxxxxxx> --- arch/arm64/include/asm/cpufeature.h | 12 ++++++++++++ arch/arm64/include/asm/pgtable.h | 8 +++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index da250e4741bd..3424f5881390 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -764,6 +764,18 @@ static inline bool cpu_has_hw_af(void) ID_AA64MMFR1_HADBS_SHIFT); } +static inline bool system_has_hw_af(void) +{ + u64 mmfr1; + + if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM)) + return false; + + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + return cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_HADBS_SHIFT); +} + #ifdef CONFIG_ARM64_AMU_EXTN /* Check whether the cpu supports the Activity Monitors Unit (AMU) */ extern bool cpu_has_amu_feat(int cpu); diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 5628289b9d5e..d5c2a7625e9a 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -974,7 +974,13 @@ static inline bool arch_faults_on_old_pte(void) return !cpu_has_hw_af(); } -#define arch_faults_on_old_pte arch_faults_on_old_pte +#define arch_faults_on_old_pte arch_faults_on_old_pte + +/* + * Experimentally, it's cheap to set the access flag in hardware and we + * benefit from prefaulting mappings as 'old' to start with. + */ +#define arch_wants_old_faultaround_pte system_has_hw_af #endif /* !__ASSEMBLY__ */ -- 2.29.2.576.ga3fc446d84-goog