+ break;
+ }
+ }
+ }
+}
+
static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ const struct actlr_variant *actlrvar;
+ int cbndx = smmu_domain->cfg.cbndx;
struct adreno_smmu_priv *priv;
+ int i;
smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
@@ -248,6 +280,18 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
priv->set_stall = qcom_adreno_smmu_set_stall;
priv->resume_translation = qcom_adreno_smmu_resume_translation;
+ actlrvar = qsmmu->data->actlrvar;
+ if (!actlrvar)
+ return 0;
+
+ for (i = 0; i < qsmmu->data->num_smmu ; i++) {
+ if (actlrvar[i].io_start == smmu->ioaddr) {
+ qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar[i].actlrcfg,
+ actlrvar[i].num_actlrcfg);
+ break;
+ }
+ }
+
return 0;
}
@@ -274,7 +318,24 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ const struct actlr_variant *actlrvar;
+ int cbndx = smmu_domain->cfg.cbndx;
+ int i;
+
smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
+ actlrvar = qsmmu->data->actlrvar;
+ if (!actlrvar)
+ return 0;
+
+ for (i = 0; i < qsmmu->data->num_smmu ; i++) {
+ if (actlrvar[i].io_start == smmu->ioaddr) {
+ qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar[i].actlrcfg,
+ actlrvar[i].num_actlrcfg);
+ break;
+ }
+ }
return 0;
}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
index f3b91963e234..3f651242de7c 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ARM_SMMU_QCOM_H
@@ -24,8 +24,22 @@ struct qcom_smmu_config {
const u32 *reg_offset;
};
+struct actlr_config {
+ u16 sid;
+ u16 mask;
+ u32 actlr;
+};
+
+struct actlr_variant {
+ const resource_size_t io_start;
+ const struct actlr_config * const actlrcfg;
+ const size_t num_actlrcfg;
+};
+
struct qcom_smmu_match_data {
+ const struct actlr_variant * const actlrvar;
const struct qcom_smmu_config *cfg;
+ const size_t num_smmu;
const struct arm_smmu_impl *impl;
const struct arm_smmu_impl *adreno_impl;
};
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index d6d1a2a55cc0..0c7f700b27dd 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -990,9 +990,10 @@ static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
* expect simply identical entries for this case, but there's
* no harm in accommodating the generalisation.
*/
- if ((mask & smrs[i].mask) == mask &&
- !((id ^ smrs[i].id) & ~smrs[i].mask))
+
+ if (smr_is_subset(&smrs[i], id, mask))
return i;
+
/*
* If the new entry has any other overlap with an existing one,
* though, then there always exists at least one stream ID
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index 703fd5817ec1..2e4f65412c6b 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -501,6 +501,11 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
writeq_relaxed(val, arm_smmu_page(smmu, page) + offset);
}
+static inline bool smr_is_subset(struct arm_smmu_smr *smrs, u16 id, u16 mask)
+{
+ return (mask & smrs->mask) == mask && !((id ^ smrs->id) & ~smrs->mask);
+}
+
#define ARM_SMMU_GR0 0
#define ARM_SMMU_GR1 1
#define ARM_SMMU_CB(s, n) ((s)->numpage + (n))
--
2.17.1