Currently GART writes one page entry at a time. More optimal would be to aggregate the writes and flush BUS buffer in the end, this gives map/unmap 10-40% performance boost (depending on size of mapping) in comparison to flushing after each page entry update. Signed-off-by: Dmitry Osipenko <digetx@xxxxxxxxx> Acked-by: Thierry Reding <treding@xxxxxxxxxx> --- drivers/iommu/tegra-gart.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 37a76388ff7e..835fea461c59 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -290,7 +290,6 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, } } gart_set_pte(gart, iova, GART_PTE(pfn)); - FLUSH_GART_REGS(gart); spin_unlock_irqrestore(&gart->pte_lock, flags); return 0; } @@ -307,7 +306,6 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, spin_lock_irqsave(&gart->pte_lock, flags); gart_set_pte(gart, iova, 0); - FLUSH_GART_REGS(gart); spin_unlock_irqrestore(&gart->pte_lock, flags); return bytes; } @@ -373,6 +371,14 @@ static int gart_iommu_of_xlate(struct device *dev, return 0; } +static void gart_iommu_sync(struct iommu_domain *domain) +{ + struct gart_domain *gart_domain = to_gart_domain(domain); + struct gart_device *gart = gart_domain->gart; + + FLUSH_GART_REGS(gart); +} + static const struct iommu_ops gart_iommu_ops = { .capable = gart_iommu_capable, .domain_alloc = gart_iommu_domain_alloc, @@ -387,6 +393,8 @@ static const struct iommu_ops gart_iommu_ops = { .iova_to_phys = gart_iommu_iova_to_phys, .pgsize_bitmap = GART_IOMMU_PGSIZES, .of_xlate = gart_iommu_of_xlate, + .iotlb_sync_map = gart_iommu_sync, + .iotlb_sync = gart_iommu_sync, }; static int tegra_gart_suspend(struct device *dev) -- 2.20.0