On 1/19/21 4:45 PM, Sean Christopherson wrote:
On Sat, Jan 16, 2021, Krish Sadhukhan wrote:
According to section "Canonicalization and Consistency Checks" in APM vol 2,
the following guest state is illegal:
"The MSR or IOIO intercept tables extend to a physical address that
is greater than or equal to the maximum supported physical address."
Also check that these addresses are aligned on page boundary.
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@xxxxxxxxxx>
---
arch/x86/kvm/svm/nested.c | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index cb4c6ee10029..2419f392a13d 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -211,7 +211,8 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
return true;
}
-static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
+static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
+ struct vmcb_control_area *control)
{
if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
return false;
@@ -223,10 +224,15 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
!npt_enabled)
return false;
+ if (!page_address_valid(vcpu, control->msrpm_base_pa))
+ return false;
+ if (!page_address_valid(vcpu, control->iopm_base_pa))
These checks are wrong. The MSRPM is 8kb in size, and the IOPM is 12kb, and the
APM explicitly states that the last byte is checked:
if the address of the last byte in the table is greater than or equal to the
maximum supported physical address, this is treated as illegal VMCB state and
causes a #VMEXIT(VMEXIT_INVALID).
KVM can't check just the last byte, as that would fail to detect a wrap of the
64-bit boundary. Might be worth adding yet another helper? I think this will
work, though I'm sure Paolo has a much more clever solution :-)
static inline bool page_range_valid(struct kvm_vcpu *vcpu, gpa_t gpa, int size)
{
gpa_t last_page = gpa + size - PAGE_SIZE;
if (last_page < gpa)
return false;
return page_address_valid(last_page);
}
Note, the IOPM is 12kb in size, but KVM allocates and initializes 16kb, i.e.
using IOPM_ALLOC_ORDER for the check would be wrong. Maybe define the actual
size for both bitmaps and use get_order() instead of hardcoding the order? That
would make it easy to "fix" svm_hardware_setup() so that it doesn't initialize
unused memory.
Is there any issues with using alloc_pages_exact() instead of
alloc_pages() for allocating the IOPM bitmap ?
+ return false;
+
return true;
}
-static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+static bool nested_vmcb_checks(struct kvm_vcpu *vcpu, struct vmcb *vmcb12)
{
bool vmcb12_lma;
@@ -255,10 +261,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
(vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
return false;
}
- if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
+ if (!kvm_is_valid_cr4(vcpu, vmcb12->save.cr4))
return false;
- return nested_vmcb_check_controls(&vmcb12->control);
+ return nested_vmcb_check_controls(vcpu, &vmcb12->control);
}
static void load_nested_vmcb_control(struct vcpu_svm *svm,
@@ -485,7 +491,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
if (WARN_ON_ONCE(!svm->nested.initialized))
return -EINVAL;
- if (!nested_vmcb_checks(svm, vmcb12)) {
+ if (!nested_vmcb_checks(&svm->vcpu, vmcb12)) {
vmcb12->control.exit_code = SVM_EXIT_ERR;
vmcb12->control.exit_code_hi = 0;
vmcb12->control.exit_info_1 = 0;
@@ -1173,7 +1179,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
goto out_free;
ret = -EINVAL;
- if (!nested_vmcb_check_controls(ctl))
+ if (!nested_vmcb_check_controls(vcpu, ctl))
goto out_free;
/*
--
2.27.0