1. Redefine mtk_iommu_domain structure, it will include iommu_group and iommu_domain. Different mtk_iommu_domains can be distinguished by ID. When we realize multiple mtk_iommu_domains, every mtk_iommu_domain can describe one iova region. 2. In theory, every device has one iommu_group, so this patch will get iommu_group by checking device. All the devices belong to the same m4u_group currently, so they also use the same mtk_iommu_domain(id=0). Signed-off-by: Chao Hao <chao.hao@xxxxxxxxxxxx> --- drivers/iommu/mtk_iommu.c | 46 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index b34bd3abccf8..bf781f4d7364 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -111,12 +111,16 @@ #define MTK_M4U_TO_PORT(id) ((id) & 0x1f) struct mtk_iommu_domain { + u32 id; struct iommu_domain domain; + struct iommu_group *group; + struct list_head list; }; struct mtk_iommu_pgtable { struct io_pgtable_cfg cfg; struct io_pgtable_ops *iop; + struct list_head m4u_dom_v2; }; static struct mtk_iommu_pgtable *share_pgtable; @@ -167,6 +171,41 @@ static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void) return NULL; } +static u32 get_domain_id(void) +{ + /* only support one mtk_iommu_domain currently */ + return 0; +} + +static u32 mtk_iommu_get_domain_id(void) +{ + return get_domain_id(); +} + +static struct mtk_iommu_domain *get_mtk_domain(struct device *dev) +{ + struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv; + struct mtk_iommu_domain *dom; + u32 domain_id = mtk_iommu_get_domain_id(); + + list_for_each_entry(dom, &data->pgtable->m4u_dom_v2, list) { + if (dom->id == domain_id) + return dom; + } + return NULL; +} + +static struct iommu_group *mtk_iommu_get_group(struct device *dev) +{ + struct mtk_iommu_domain *dom; + + dom = get_mtk_domain(dev); + if (dom) + return dom->group; + + return NULL; +} + static struct mtk_iommu_pgtable *mtk_iommu_get_pgtable(void) { return share_pgtable; @@ -328,6 +367,8 @@ static struct mtk_iommu_pgtable *create_pgtable(struct mtk_iommu_data *data) if (!pgtable) return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&pgtable->m4u_dom_v2); + pgtable->cfg = (struct io_pgtable_cfg) { .quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_PERMS | @@ -382,6 +423,7 @@ static int mtk_iommu_attach_pgtable(struct mtk_iommu_data *data, static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) { struct mtk_iommu_pgtable *pgtable = mtk_iommu_get_pgtable(); + struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); struct mtk_iommu_domain *dom; if (type != IOMMU_DOMAIN_DMA) @@ -399,12 +441,15 @@ static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) if (iommu_get_dma_cookie(&dom->domain)) goto free_dom; + dom->group = data->m4u_group; + dom->id = mtk_iommu_get_domain_id(); /* Update our support page sizes bitmap */ dom->domain.pgsize_bitmap = pgtable->cfg.pgsize_bitmap; dom->domain.geometry.aperture_start = 0; dom->domain.geometry.aperture_end = DMA_BIT_MASK(32); dom->domain.geometry.force_aperture = true; + list_add_tail(&dom->list, &pgtable->m4u_dom_v2); return &dom->domain; @@ -560,6 +605,7 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev) } /* All the client devices are in the same m4u iommu-group */ + data->m4u_group = mtk_iommu_get_group(dev); if (!data->m4u_group) { data->m4u_group = iommu_group_alloc(); if (IS_ERR(data->m4u_group)) -- 2.18.0