Patch "iommu/arm-smmu-v3: Fix soft lockup triggered by arm_smmu_mm_invalidate_range" has been added to the 5.15-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    iommu/arm-smmu-v3: Fix soft lockup triggered by arm_smmu_mm_invalidate_range

to the 5.15-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     iommu-arm-smmu-v3-fix-soft-lockup-triggered-by-arm_s.patch
and it can be found in the queue-5.15 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.



commit ae427060296521f33d2d7395a84a4d3361b6ec2b
Author: Nicolin Chen <nicolinc@xxxxxxxxxx>
Date:   Tue Oct 3 16:35:49 2023 -0700

    iommu/arm-smmu-v3: Fix soft lockup triggered by arm_smmu_mm_invalidate_range
    
    commit d5afb4b47e13161b3f33904d45110f9e6463bad6 upstream.
    
    When running an SVA case, the following soft lockup is triggered:
    --------------------------------------------------------------------
    watchdog: BUG: soft lockup - CPU#244 stuck for 26s!
    pstate: 83400009 (Nzcv daif +PAN -UAO +TCO +DIT -SSBS BTYPE=--)
    pc : arm_smmu_cmdq_issue_cmdlist+0x178/0xa50
    lr : arm_smmu_cmdq_issue_cmdlist+0x150/0xa50
    sp : ffff8000d83ef290
    x29: ffff8000d83ef290 x28: 000000003b9aca00 x27: 0000000000000000
    x26: ffff8000d83ef3c0 x25: da86c0812194a0e8 x24: 0000000000000000
    x23: 0000000000000040 x22: ffff8000d83ef340 x21: ffff0000c63980c0
    x20: 0000000000000001 x19: ffff0000c6398080 x18: 0000000000000000
    x17: 0000000000000000 x16: 0000000000000000 x15: ffff3000b4a3bbb0
    x14: ffff3000b4a30888 x13: ffff3000b4a3cf60 x12: 0000000000000000
    x11: 0000000000000000 x10: 0000000000000000 x9 : ffffc08120e4d6bc
    x8 : 0000000000000000 x7 : 0000000000000000 x6 : 0000000000048cfa
    x5 : 0000000000000000 x4 : 0000000000000001 x3 : 000000000000000a
    x2 : 0000000080000000 x1 : 0000000000000000 x0 : 0000000000000001
    Call trace:
     arm_smmu_cmdq_issue_cmdlist+0x178/0xa50
     __arm_smmu_tlb_inv_range+0x118/0x254
     arm_smmu_tlb_inv_range_asid+0x6c/0x130
     arm_smmu_mm_invalidate_range+0xa0/0xa4
     __mmu_notifier_invalidate_range_end+0x88/0x120
     unmap_vmas+0x194/0x1e0
     unmap_region+0xb4/0x144
     do_mas_align_munmap+0x290/0x490
     do_mas_munmap+0xbc/0x124
     __vm_munmap+0xa8/0x19c
     __arm64_sys_munmap+0x28/0x50
     invoke_syscall+0x78/0x11c
     el0_svc_common.constprop.0+0x58/0x1c0
     do_el0_svc+0x34/0x60
     el0_svc+0x2c/0xd4
     el0t_64_sync_handler+0x114/0x140
     el0t_64_sync+0x1a4/0x1a8
    --------------------------------------------------------------------
    
    The commit 06ff87bae8d3 ("arm64: mm: remove unused functions and variable
    protoypes") fixed a similar lockup on the CPU MMU side. Yet, it can occur
    to SMMU too since arm_smmu_mm_invalidate_range() is typically called next
    to MMU tlb flush function, e.g.
            tlb_flush_mmu_tlbonly {
                    tlb_flush {
                            __flush_tlb_range {
                                    // check MAX_TLBI_OPS
                            }
                    }
                    mmu_notifier_invalidate_range {
                            arm_smmu_mm_invalidate_range {
                                    // does not check MAX_TLBI_OPS
                            }
                    }
            }
    
    Clone a CMDQ_MAX_TLBI_OPS from the MAX_TLBI_OPS in tlbflush.h, since in an
    SVA case SMMU uses the CPU page table, so it makes sense to align with the
    tlbflush code. Then, replace per-page TLBI commands with a single per-asid
    TLBI command, if the request size hits this threshold.
    
    Signed-off-by: Nicolin Chen <nicolinc@xxxxxxxxxx>
    Link: https://lore.kernel.org/r/20230920052257.8615-1-nicolinc@xxxxxxxxxx
    Signed-off-by: Will Deacon <will@xxxxxxxxxx>
    Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index e2e80eb2840ca..01748742c6842 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -186,6 +186,15 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
 	}
 }
 
+/*
+ * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
+ * is used as a threshold to replace per-page TLBI commands to issue in the
+ * command queue with an address-space TLBI command, when SMMU w/o a range
+ * invalidation feature handles too many per-page TLBI commands, which will
+ * otherwise result in a soft lockup.
+ */
+#define CMDQ_MAX_TLBI_OPS		(1 << (PAGE_SHIFT - 3))
+
 static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
 					 struct mm_struct *mm,
 					 unsigned long start, unsigned long end)
@@ -200,10 +209,22 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
 	 * range. So do a simple translation here by calculating size correctly.
 	 */
 	size = end - start;
+	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) {
+		if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
+			size = 0;
+	}
+
+	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) {
+		if (!size)
+			arm_smmu_tlb_inv_asid(smmu_domain->smmu,
+					      smmu_mn->cd->asid);
+		else
+			arm_smmu_tlb_inv_range_asid(start, size,
+						    smmu_mn->cd->asid,
+						    PAGE_SIZE, false,
+						    smmu_domain);
+	}
 
-	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
-		arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
-					    PAGE_SIZE, false, smmu_domain);
 	arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size);
 }
 



[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux