Suravee, On 18/10/2023 21:27, Joao Martins wrote: > @@ -2379,6 +2407,69 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap) > return false; > } > > +static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain, > + bool enable) > +{ > + struct protection_domain *pdomain = to_pdomain(domain); > + struct dev_table_entry *dev_table; > + struct iommu_dev_data *dev_data; > + struct amd_iommu *iommu; > + unsigned long flags; > + u64 pte_root; > + > + spin_lock_irqsave(&pdomain->lock, flags); > + if (!(pdomain->dirty_tracking ^ enable)) { > + spin_unlock_irqrestore(&pdomain->lock, flags); > + return 0; > + } > + > + list_for_each_entry(dev_data, &pdomain->dev_list, list) { > + iommu = rlookup_amd_iommu(dev_data->dev); > + if (!iommu) > + continue; > + > + dev_table = get_dev_table(iommu); > + pte_root = dev_table[dev_data->devid].data[0]; > + > + pte_root = (enable ? > + pte_root | DTE_FLAG_HAD : pte_root & ~DTE_FLAG_HAD); > + > + /* Flush device DTE */ > + dev_table[dev_data->devid].data[0] = pte_root; > + device_flush_dte(dev_data); > + } > + > + /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */ > + amd_iommu_domain_flush_tlb_pde(pdomain); > + amd_iommu_domain_flush_complete(pdomain); > + pdomain->dirty_tracking = enable; > + spin_unlock_irqrestore(&pdomain->lock, flags); > + > + return 0; > +} > + I'm adding this snippet below considering some earlier discussion on Intel driver. This only skips the domain flush when the domain has no devices (or rlookup didnt give an iommu). Technically this was code that mistakenly deleted from rfcv1->rfcv2 after your first review, so still retaining your Reviewed-by; let me know if that's wrong. diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 6b4768ff66e1..c5be76e019bf 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2413,6 +2413,7 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain, struct protection_domain *pdomain = to_pdomain(domain); struct dev_table_entry *dev_table; struct iommu_dev_data *dev_data; + bool domain_flush = false; struct amd_iommu *iommu; unsigned long flags; u64 pte_root; @@ -2437,11 +2438,14 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain, /* Flush device DTE */ dev_table[dev_data->devid].data[0] = pte_root; device_flush_dte(dev_data); + domain_flush = true; } /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */ - amd_iommu_domain_flush_tlb_pde(pdomain); - amd_iommu_domain_flush_complete(pdomain); + if (domain_flush) { + amd_iommu_domain_flush_tlb_pde(pdomain); + amd_iommu_domain_flush_complete(pdomain); + } pdomain->dirty_tracking = enable; spin_unlock_irqrestore(&pdomain->lock, flags);