[PATCH v5 4/5] KVM: x86: emulation: Apply LAM mask when emulating data access in 64-bit mode

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Emulate HW LAM masking when doing data access under 64-bit mode.

kvm_lam_untag_addr() implements this: per CR4/CR3 LAM bits configuration,
firstly check the linear addr conforms LAM canonical, i.e. the highest
address bit matches bit 63. Then mask out meta data per LAM configuration.
If failed in above process, emulate #GP to guest.

Signed-off-by: Robert Hoo <robert.hu@xxxxxxxxxxxxxxx>
---
 arch/x86/kvm/emulate.c | 13 ++++++++
 arch/x86/kvm/x86.h     | 70 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 83 insertions(+)

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 5cc3efa0e21c..77bd13f40711 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -700,6 +700,19 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
 	*max_size = 0;
 	switch (mode) {
 	case X86EMUL_MODE_PROT64:
+		/* LAM applies only on data access */
+		if (!fetch && guest_cpuid_has(ctxt->vcpu, X86_FEATURE_LAM)) {
+			enum lam_type type;
+
+			type = kvm_vcpu_lam_type(la, ctxt->vcpu);
+			if (type == LAM_ILLEGAL) {
+				*linear = la;
+				goto bad;
+			} else {
+				la = kvm_lam_untag_addr(la, type);
+			}
+		}
+
 		*linear = la;
 		va_bits = ctxt_virt_addr_bits(ctxt);
 		if (!__is_canonical_address(la, va_bits))
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 6b6bfddc84e0..d992e5220602 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -201,6 +201,76 @@ static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
 	return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
 }
 
+enum lam_type {
+	LAM_ILLEGAL = -1,
+	LAM_U57,
+	LAM_U48,
+	LAM_S57,
+	LAM_S48,
+	LAM_NONE
+};
+
+#ifdef CONFIG_X86_64
+/*
+ * LAM Canonical Rule:
+ * LAM_U/S48 -- bit 63 == bit 47
+ * LAM_U/S57 -- bit 63 == bit 56
+ */
+static inline bool lam_canonical(u64 addr, int effect_width)
+{
+	return (addr >> 63) == ((addr >> effect_width) & BIT(0));
+}
+
+static inline enum lam_type kvm_vcpu_lam_type(u64 addr, struct kvm_vcpu *vcpu)
+{
+	WARN_ON_ONCE(!is_64_bit_mode(vcpu));
+
+	if (addr >> 63 == 0) {
+		if (kvm_read_cr3(vcpu) & X86_CR3_LAM_U57)
+			return lam_canonical(addr, 56) ?  LAM_U57 : LAM_ILLEGAL;
+		else if (kvm_read_cr3(vcpu) & X86_CR3_LAM_U48)
+			return lam_canonical(addr, 47) ?  LAM_U48 : LAM_ILLEGAL;
+	} else if (kvm_read_cr4_bits(vcpu, X86_CR4_LAM_SUP)) {
+		if (kvm_read_cr4_bits(vcpu, X86_CR4_LA57))
+			return lam_canonical(addr, 56) ?  LAM_S57 : LAM_ILLEGAL;
+		else
+			return lam_canonical(addr, 47) ?  LAM_S48 : LAM_ILLEGAL;
+	}
+
+	return LAM_NONE;
+}
+
+/* untag addr for guest, according to vCPU's LAM config */
+static inline u64 kvm_lam_untag_addr(u64 addr, enum lam_type type)
+{
+	switch (type) {
+	case LAM_U57:
+	case LAM_S57:
+		addr = __canonical_address(addr, 57);
+		break;
+	case LAM_U48:
+	case LAM_S48:
+		addr = __canonical_address(addr, 48);
+		break;
+	case LAM_NONE:
+	default:
+		break;
+	}
+
+	return addr;
+}
+#else
+static inline enum lam_type kvm_vcpu_lam_type(u64 addr, struct kvm_vcpu *vcpu)
+{
+	return LAM_NONE;
+}
+
+static inline u64 kvm_lam_untag_addr(u64 addr, enum lam_type type)
+{
+	return addr;
+}
+#endif
+
 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
 					gva_t gva, gfn_t gfn, unsigned access)
 {
-- 
2.31.1




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux