From: Jean-Philippe Brucker <jean-philippe.brucker@xxxxxxx> When removing a mapping from a domain, we need to send an invalidation to all devices that might have stored it in their Address Translation Cache (ATC). In addition with SVM, we'll need to invalidate context descriptors of all devices attached to a live domain. Maintain a list of devices in each domain, protected by a spinlock. It is updated every time we attach or detach devices to and from domains. It needs to be a spinlock because we'll invalidate ATC entries from within hardirq-safe contexts, but it may be possible to relax the read side with RCU later. Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@xxxxxxx> --- drivers/iommu/arm-smmu-v3.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 0d284029dc73..ce222705f52b 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -595,6 +595,11 @@ struct arm_smmu_device { struct arm_smmu_master_data { struct arm_smmu_device *smmu; struct arm_smmu_strtab_ent ste; + + struct arm_smmu_domain *domain; + struct list_head list; /* domain->devices */ + + struct device *dev; }; /* SMMU private data for an IOMMU domain */ @@ -619,6 +624,9 @@ struct arm_smmu_domain { }; struct iommu_domain domain; + + struct list_head devices; + spinlock_t devices_lock; }; struct arm_smmu_option_prop { @@ -1494,6 +1502,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) } mutex_init(&smmu_domain->init_mutex); + INIT_LIST_HEAD(&smmu_domain->devices); + spin_lock_init(&smmu_domain->devices_lock); + return &smmu_domain->domain; } @@ -1714,6 +1725,16 @@ static void arm_smmu_detach_dev(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct arm_smmu_master_data *master = fwspec->iommu_priv; + unsigned long flags; + struct arm_smmu_domain *smmu_domain = master->domain; + + if (smmu_domain) { + spin_lock_irqsave(&smmu_domain->devices_lock, flags); + list_del(&master->list); + spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); + + master->domain = NULL; + } master->ste.assigned = false; arm_smmu_install_ste_for_dev(fwspec); @@ -1723,6 +1744,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { int ret = 0; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + unsigned long flags; struct arm_smmu_device *smmu; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_master_data *master; @@ -1758,6 +1780,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) } ste->assigned = true; + master->domain = smmu_domain; + + spin_lock_irqsave(&smmu_domain->devices_lock, flags); + list_add(&master->list, &smmu_domain->devices); + spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) { ste->s1_cfg = NULL; @@ -1884,6 +1911,7 @@ static int arm_smmu_add_device(struct device *dev) return -ENOMEM; master->smmu = smmu; + master->dev = dev; fwspec->iommu_priv = master; } -- 2.17.2