With nested stage support, soon we will need to invalidate S1 contexts and ranges tagged with an unmanaged asid, this latter being managed by the guest. So let's introduce 2 helpers that allow to invalidate with externally managed ASIDs Signed-off-by: Eric Auger <eric.auger@xxxxxxxxxx> --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 35 +++++++++++++++++---- 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 08ab0dd81049..73f7a56101dd 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1679,9 +1679,9 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, } /* IO_PGTABLE API */ -static void arm_smmu_tlb_inv_context(void *cookie) +static void __arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain, + int ext_asid) { - struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_cmdq_ent cmd; @@ -1692,7 +1692,11 @@ static void arm_smmu_tlb_inv_context(void *cookie) * insertion to guarantee those are observed before the TLBI. Do be * careful, 007. */ - if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { + if (ext_asid >= 0) { /* guest stage 1 invalidation */ + cmd.opcode = CMDQ_OP_TLBI_NH_ASID; + cmd.tlbi.asid = ext_asid; + cmd.tlbi.vmid = smmu_domain->s2_cfg->vmid; + } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg->cd.asid); } else { cmd.opcode = CMDQ_OP_TLBI_S12_VMALL; @@ -1703,9 +1707,17 @@ static void arm_smmu_tlb_inv_context(void *cookie) arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0); } -static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, +static void arm_smmu_tlb_inv_context(void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + + __arm_smmu_tlb_inv_context(smmu_domain, -1); +} + +static void __arm_smmu_tlb_inv_range(unsigned long iova, size_t size, size_t granule, bool leaf, - struct arm_smmu_domain *smmu_domain) + struct arm_smmu_domain *smmu_domain, + int ext_asid) { struct arm_smmu_device *smmu = smmu_domain->smmu; unsigned long start = iova, end = iova + size, num_pages = 0, tg = 0; @@ -1720,7 +1732,11 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, if (!size) return; - if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { + if (ext_asid >= 0) { /* guest stage 1 invalidation */ + cmd.opcode = CMDQ_OP_TLBI_NH_VA; + cmd.tlbi.asid = ext_asid; + cmd.tlbi.vmid = smmu_domain->s2_cfg->vmid; + } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { cmd.opcode = CMDQ_OP_TLBI_NH_VA; cmd.tlbi.asid = smmu_domain->s1_cfg->cd.asid; } else { @@ -1780,6 +1796,13 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, arm_smmu_atc_inv_domain(smmu_domain, 0, start, size); } +static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, + size_t granule, bool leaf, + struct arm_smmu_domain *smmu_domain) +{ + __arm_smmu_tlb_inv_range(iova, size, granule, leaf, smmu_domain, -1); +} + static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) -- 2.21.3