On 7/9/2020 8:01 AM, Bjorn Andersson wrote: > With many Qualcomm platforms not having functional S2CR BYPASS a > temporary IOMMU domain, without translation, needs to be allocated in > order to allow these memory transactions. > > Unfortunately the boot loader uses the first few context banks, so > rather than overwriting a active bank the last context bank is used and > streams are diverted here during initialization. > > This also performs the readback of SMR registers for the Qualcomm > platform, to trigger the mechanism. > > This is based on prior work by Thierry Reding and Laurentiu Tudor. > > Signed-off-by: Bjorn Andersson <bjorn.andersson@xxxxxxxxxx> > --- > drivers/iommu/arm-smmu-qcom.c | 11 +++++ > drivers/iommu/arm-smmu.c | 80 +++++++++++++++++++++++++++++++++-- > drivers/iommu/arm-smmu.h | 3 ++ > 3 files changed, 90 insertions(+), 4 deletions(-) > > diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm-smmu-qcom.c > index 86b1917459a4..397df27c1d69 100644 > --- a/drivers/iommu/arm-smmu-qcom.c > +++ b/drivers/iommu/arm-smmu-qcom.c > @@ -26,6 +26,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] = { > static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) > { > unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1); > + u32 smr; > u32 reg; > int i; > > @@ -56,6 +57,16 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) > } > } > > + for (i = 0; i < smmu->num_mapping_groups; i++) { > + smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); > + > + if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) { > + smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr); > + smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr); > + smmu->smrs[i].valid = true; > + } > + } > + > return 0; > } > > diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c > index e2d6c0aaf1ea..a7cb27c1a49e 100644 > --- a/drivers/iommu/arm-smmu.c > +++ b/drivers/iommu/arm-smmu.c > @@ -652,7 +652,8 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) > } > > static int arm_smmu_init_domain_context(struct iommu_domain *domain, > - struct arm_smmu_device *smmu) > + struct arm_smmu_device *smmu, > + bool boot_domain) > { > int irq, start, ret = 0; > unsigned long ias, oas; > @@ -770,6 +771,15 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, > ret = -EINVAL; > goto out_unlock; > } > + > + /* > + * Use the last context bank for identity mappings during boot, to > + * avoid overwriting in-use bank configuration while we're setting up > + * the new mappings. > + */ > + if (boot_domain) > + start = smmu->num_context_banks - 1; > + > ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, > smmu->num_context_banks); > if (ret < 0) > @@ -1149,7 +1159,10 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) > struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); > struct arm_smmu_master_cfg *cfg; > struct arm_smmu_device *smmu; > + bool free_identity_domain = false; > + int idx; > int ret; > + int i; > > if (!fwspec || fwspec->ops != &arm_smmu_ops) { > dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); > @@ -1174,7 +1187,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) > return ret; > > /* Ensure that the domain is finalised */ > - ret = arm_smmu_init_domain_context(domain, smmu); > + ret = arm_smmu_init_domain_context(domain, smmu, false); > if (ret < 0) > goto rpm_put; > > @@ -1190,9 +1203,34 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) > goto rpm_put; > } > > + /* Decrement use counter for any references to the identity domain */ > + mutex_lock(&smmu->stream_map_mutex); > + if (smmu->identity) { > + struct arm_smmu_domain *identity = to_smmu_domain(smmu->identity); > + > + for_each_cfg_sme(cfg, fwspec, i, idx) { > + dev_err(smmu->dev, "%s() %#x\n", __func__, smmu->smrs[idx].id); Debug leftovers? Apart from that I plan to give this a go on some NXP chips. Will keep you updated. Thanks a lot Bjorn. --- Best Regards, Laurentiu