From: Joerg Roedel <jroedel@xxxxxxx> Make use of the new IOTLB flush-interface in the IOMMU-API. We don't implement the iotlb_range_add() call-back for now, as this will put too many pressure on the command buffer. Instead, we do a full TLB flush in the iotlb_sync() call-back. Signed-off-by: Joerg Roedel <jroedel@xxxxxxx> --- drivers/iommu/amd_iommu.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 3a702c4..8804264 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3032,6 +3032,14 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, return ret; } +static void amd_iommu_flush_iotlb_all(struct iommu_domain *dom) +{ + struct protection_domain *domain = to_pdomain(dom); + + domain_flush_tlb_pde(domain); + domain_flush_complete(domain); +} + static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, size_t page_size) { @@ -3045,9 +3053,6 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, unmap_size = iommu_unmap_page(domain, iova, page_size); mutex_unlock(&domain->api_lock); - domain_flush_tlb_pde(domain); - domain_flush_complete(domain); - return unmap_size; } @@ -3174,6 +3179,8 @@ const struct iommu_ops amd_iommu_ops = { .map = amd_iommu_map, .unmap = amd_iommu_unmap, .map_sg = default_iommu_map_sg, + .flush_iotlb_all = amd_iommu_flush_iotlb_all, + .iotlb_sync = amd_iommu_flush_iotlb_all, .iova_to_phys = amd_iommu_iova_to_phys, .add_device = amd_iommu_add_device, .remove_device = amd_iommu_remove_device, -- 2.7.4