From: Yong Wu <yong.wu@xxxxxxxxxxxx> The tlb_flush_all touches the registers controlling tlb operations. Protect it with the tlb_lock spinlock. This also require the range_sync func to release that spinlock before calling tlb_flush_all. Signed-off-by: Yong Wu <yong.wu@xxxxxxxxxxxx> [refactor commit log] Signed-off-by: Dafna Hirschfeld <dafna.hirschfeld@xxxxxxxxxxxxx> --- drivers/iommu/mtk_iommu.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index e30ac68fab48..195a411e3087 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -210,10 +210,14 @@ static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) { + unsigned long flags; + + spin_lock_irqsave(&data->tlb_lock, flags); writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + data->plat_data->inv_sel_reg); writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); wmb(); /* Make sure the tlb flush all done */ + spin_unlock_irqrestore(&data->tlb_lock, flags); } static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, @@ -242,14 +246,16 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, /* tlb sync */ ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp, tmp != 0, 10, 1000); + + /* Clear the CPE status */ + writel_relaxed(0, data->base + REG_MMU_CPE_DONE); + spin_unlock_irqrestore(&data->tlb_lock, flags); + if (ret) { dev_warn(data->dev, "Partial TLB flush timed out, falling back to full flush\n"); mtk_iommu_tlb_flush_all(data); } - /* Clear the CPE status */ - writel_relaxed(0, data->base + REG_MMU_CPE_DONE); - spin_unlock_irqrestore(&data->tlb_lock, flags); pm_runtime_put(data->dev); } -- 2.17.1