On 03/02/21 01:40, Krish Sadhukhan wrote:
According to section "Canonicalization and Consistency Checks" in APM vol 2,
the following guest state is illegal:
"The MSR or IOIO intercept tables extend to a physical address that
is greater than or equal to the maximum supported physical address."
Also check that these addresses are aligned on page boundary.
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@xxxxxxxxxx>
---
arch/x86/kvm/svm/nested.c | 20 ++++++++++++++------
1 file changed, 14 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 7a605ad8254d..caf285e643db 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -214,7 +214,8 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
return true;
}
-static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
+static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
+ struct vmcb_control_area *control)
{
if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
return false;
@@ -226,10 +227,17 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
!npt_enabled)
return false;
+ if (!page_address_valid(vcpu, control->msrpm_base_pa +
+ MSRPM_ALLOC_ORDER * PAGE_SIZE))
+ return false;
+ if (!page_address_valid(vcpu, control->iopm_base_pa +
+ IOPM_ALLOC_ORDER * PAGE_SIZE))
There are four problems:
1) The value does not have to be page-aligned
2) you also have an off-by-one here, the value to be checked is the last
byte of the previous page
3) ORDER is a shift count not a number of pages
4) there could be an overflow
1-3 can be fixed by something like this:
if (!page_address_valid(vcpu,
PAGE_ALIGN(control->xyz_pa) +
((PAGE_SIZE << XYZ_ALLOC_ORDER) - 1)));
but it's even better to extract everything to a new function and not use
page_address_valid at all.
static inline nested_check_pa(struct kvm_vcpu *vcpu, uint64_t pa,
unsigned int order)
{
uint64_t last_pa = PAGE_ALIGN(pa) + (PAGE_SIZE << order) - 1;
return last_pa > pa && !(last_pa >> cpuid_maxphyaddr(vcpu));
}
Paolo
+ return false;
+
return true;
}
-static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+static bool nested_vmcb_checks(struct kvm_vcpu *vcpu, struct vmcb *vmcb12)
{
bool vmcb12_lma;
@@ -258,10 +266,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
(vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
return false;
}
- if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
+ if (!kvm_is_valid_cr4(vcpu, vmcb12->save.cr4))
return false;
- return nested_vmcb_check_controls(&vmcb12->control);
+ return nested_vmcb_check_controls(vcpu, &vmcb12->control);
}
static void load_nested_vmcb_control(struct vcpu_svm *svm,
@@ -488,7 +496,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
if (WARN_ON_ONCE(!svm->nested.initialized))
return -EINVAL;
- if (!nested_vmcb_checks(svm, vmcb12)) {
+ if (!nested_vmcb_checks(&svm->vcpu, vmcb12)) {
vmcb12->control.exit_code = SVM_EXIT_ERR;
vmcb12->control.exit_code_hi = 0;
vmcb12->control.exit_info_1 = 0;
@@ -1176,7 +1184,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
goto out_free;
ret = -EINVAL;
- if (!nested_vmcb_check_controls(ctl))
+ if (!nested_vmcb_check_controls(vcpu, ctl))
goto out_free;
/*