[PATCH 30/37] KVM: arm64: nVHE: Remove MMU assumption in speculative AT workaround

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Rather than making an assumption about the state of the host's MMU,
always force it on. This starts reducing trust and dependence on the
host state and sets the stage for a common path for both guests and the
host vcpu.

The EPDx bits must be set for the full duration that the MMU is being
enabled so that no S1 walks can occur if we are enabling an
uninitialized or unused MMU.

Signed-off-by: Andrew Scull <ascull@xxxxxxxxxx>
---
 arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h |  6 ++++--
 arch/arm64/kvm/hyp/nvhe/tlb.c              | 15 +++++++++++----
 2 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index c55b2d17ada8..0c24c922bae8 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -83,13 +83,15 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
 	} else	if (!ctxt->is_host) {
 		/*
 		 * Must only be done for guest registers, hence the context
-		 * test. We're coming from the host, so SCTLR.M is already
-		 * set. Pairs with nVHE's __activate_traps().
+		 * test. Pairs with nVHE's __activate_traps().
 		 */
 		write_sysreg_el1((ctxt_sys_reg(ctxt, TCR_EL1) |
 				  TCR_EPD1_MASK | TCR_EPD0_MASK),
 				 SYS_TCR);
 		isb();
+		write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1] | SCTLR_ELx_M,
+				 SYS_SCTLR);
+		isb();
 	}
 
 	write_sysreg_el1(ctxt_sys_reg(ctxt, CPACR_EL1),	SYS_CPACR);
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index e5f65f0da106..16fa06ff0554 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -10,6 +10,7 @@
 
 struct tlb_inv_context {
 	u64		tcr;
+	u64		sctlr;
 };
 
 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
@@ -21,14 +22,18 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
 		/*
 		 * For CPUs that are affected by ARM 1319367, we need to
 		 * avoid a host Stage-1 walk while we have the guest's
-		 * VMID set in the VTTBR in order to invalidate TLBs.
-		 * We're guaranteed that the S1 MMU is enabled, so we can
-		 * simply set the EPD bits to avoid any further TLB fill.
+		 * VMID set in the VTTBR in order to invalidate TLBs. This
+		 * is done by setting the EPD bits in the TCR_EL1 register.
+		 * We also need to prevent TLB allocation from IPA->PA walks,
+		 * so we enable the S1 MMU.
 		 */
 		val = cxt->tcr = read_sysreg_el1(SYS_TCR);
 		val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
 		write_sysreg_el1(val, SYS_TCR);
 		isb();
+		val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
+		val |= SCTLR_ELx_M;
+		isb();
 	}
 
 	/* __load_guest_stage2() includes an ISB for the workaround. */
@@ -43,7 +48,9 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
 		/* Ensure write of the host VMID */
 		isb();
-		/* Restore the host's TCR_EL1 */
+		/* Restore the host's SCTLR and then TCR_EL1 */
+		write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
+		isb();
 		write_sysreg_el1(cxt->tcr, SYS_TCR);
 	}
 }
-- 
2.27.0.389.gc38d7665816-goog

_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm



[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux