Currently GART writes one page entry at a time. More optimal would be to aggregate the writes and flush BUS buffer in the end, this gives map/unmap 10-40% performance boost (depending on size of mapping) in comparison to flushing after each page entry update. Signed-off-by: Dmitry Osipenko <digetx@xxxxxxxxx> --- drivers/iommu/tegra-gart.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index bcdb8973a0ad..5e3fd32fce4a 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -293,7 +293,6 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, } } gart_set_pte(gart, iova, GART_PTE(pfn)); - FLUSH_GART_REGS(gart); spin_unlock_irqrestore(&gart->pte_lock, flags); return 0; } @@ -310,7 +309,6 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, spin_lock_irqsave(&gart->pte_lock, flags); gart_set_pte(gart, iova, 0); - FLUSH_GART_REGS(gart); spin_unlock_irqrestore(&gart->pte_lock, flags); return bytes; } @@ -376,6 +374,14 @@ static int gart_iommu_of_xlate(struct device *dev, return 0; } +static void gart_iommu_sync(struct iommu_domain *domain) +{ + struct gart_domain *gart_domain = to_gart_domain(domain); + struct gart_device *gart = gart_domain->gart; + + FLUSH_GART_REGS(gart); +} + static const struct iommu_ops gart_iommu_ops = { .capable = gart_iommu_capable, .domain_alloc = gart_iommu_domain_alloc, @@ -391,6 +397,8 @@ static const struct iommu_ops gart_iommu_ops = { .iova_to_phys = gart_iommu_iova_to_phys, .pgsize_bitmap = GART_IOMMU_PGSIZES, .of_xlate = gart_iommu_of_xlate, + .iotlb_sync_map = gart_iommu_sync, + .iotlb_sync = gart_iommu_sync, }; static int tegra_gart_suspend(struct device *dev) -- 2.18.0 -- To unsubscribe from this list: send the line "unsubscribe linux-tegra" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html