Aggregate all sanity-checks for sharing CPU page tables with the SMMU under a single ARM_SMMU_FEAT_SVA bit. For PCIe SVA, users also need to check FEAT_ATS and FEAT_PRI. For platform SVA, they will have to check FEAT_STALLS. Introduce ARM_SMMU_FEAT_BTM (Broadcast TLB Maintenance), but don't enable it at the moment. Since the entire VMID space is shared with the CPU, enabling DVM (by clearing SMMU_CR2.PTM) could result in over-invalidation and affect performance of stage-2 mappings. Cc: Suzuki K Poulose <suzuki.poulose@xxxxxxx> Signed-off-by: Jean-Philippe Brucker <jean-philippe@xxxxxxxxxx> --- v7->v8: Use id_aa64mmfr0_parange_to_phys_shift() --- drivers/iommu/arm-smmu-v3.c | 48 +++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 5506add42c9c8..e2d5171bfb7b9 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -654,6 +654,8 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_STALL_FORCE (1 << 13) #define ARM_SMMU_FEAT_VAX (1 << 14) #define ARM_SMMU_FEAT_RANGE_INV (1 << 15) +#define ARM_SMMU_FEAT_BTM (1 << 16) +#define ARM_SMMU_FEAT_SVA (1 << 17) u32 features; #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) @@ -3894,6 +3896,49 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) return 0; } +static bool arm_smmu_supports_sva(struct arm_smmu_device *smmu) +{ + unsigned long reg, fld; + unsigned long oas; + unsigned long asid_bits; + + u32 feat_mask = ARM_SMMU_FEAT_BTM | ARM_SMMU_FEAT_COHERENCY; + + if ((smmu->features & feat_mask) != feat_mask) + return false; + + if (!(smmu->pgsize_bitmap & PAGE_SIZE)) + return false; + + /* + * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're + * not even pretending to support AArch32 here. Abort if the MMU outputs + * addresses larger than what we support. + */ + reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); + fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT); + oas = id_aa64mmfr0_parange_to_phys_shift(fld); + if (smmu->oas < oas) + return false; + + /* We can support bigger ASIDs than the CPU, but not smaller */ + fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT); + asid_bits = fld ? 16 : 8; + if (smmu->asid_bits < asid_bits) + return false; + + /* + * See max_pinned_asids in arch/arm64/mm/context.c. The following is + * generally the maximum number of bindable processes. + */ + if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) + asid_bits--; + dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) - + num_possible_cpus() - 2); + + return true; +} + static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) { u32 reg; @@ -4093,6 +4138,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) smmu->ias = max(smmu->ias, smmu->oas); + if (arm_smmu_supports_sva(smmu)) + smmu->features |= ARM_SMMU_FEAT_SVA; + dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", smmu->ias, smmu->oas, smmu->features); return 0; -- 2.27.0