Per Intel's SDM, use vmcall instruction in non VMX operation for cpl3 it should trigger a #UD. And in VMX root operation, it should trigger a #GP for cpl3. So hypervisor could inject such exceptions for guest cpl3 to act like host. Per AMD's APM, no cpl check for vmmcall instruction. But use it in host can trigger a #UD, so hypervisor is suitable to inject a #UD. Fixes: 07708c4af1346 ("KVM: x86: Disallow hypercalls for guest callers in rings > 0") Signed-off-by: Hou Wenlong <houwenlong93@xxxxxxxxxxxxxxxxx> --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm/svm.c | 6 ++++++ arch/x86/kvm/vmx/vmx.c | 9 +++++++++ arch/x86/kvm/x86.c | 6 +++--- 5 files changed, 20 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index cefe1d81e2e8..00a8b8c80cb0 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -60,6 +60,7 @@ KVM_X86_OP_NULL(update_emulated_instruction) KVM_X86_OP(set_interrupt_shadow) KVM_X86_OP(get_interrupt_shadow) KVM_X86_OP(patch_hypercall) +KVM_X86_OP(handle_hypercall_fail) KVM_X86_OP(set_irq) KVM_X86_OP(set_nmi) KVM_X86_OP(queue_exception) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f8f48a7ec577..3548c8047820 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1369,6 +1369,7 @@ struct kvm_x86_ops { u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu); void (*patch_hypercall)(struct kvm_vcpu *vcpu, unsigned char *hypercall_addr); + void (*handle_hypercall_fail)(struct kvm_vcpu *vcpu); void (*set_irq)(struct kvm_vcpu *vcpu); void (*set_nmi)(struct kvm_vcpu *vcpu); void (*queue_exception)(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 1a70e11f0487..a8048e5b2aff 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3944,6 +3944,11 @@ svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) hypercall[2] = 0xd9; } +static void svm_handle_hypercall_fail(struct kvm_vcpu *vcpu) +{ + kvm_queue_exception(vcpu, UD_VECTOR); +} + static int __init svm_check_processor_compat(void) { return 0; @@ -4563,6 +4568,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .set_interrupt_shadow = svm_set_interrupt_shadow, .get_interrupt_shadow = svm_get_interrupt_shadow, .patch_hypercall = svm_patch_hypercall, + .handle_hypercall_fail = svm_handle_hypercall_fail, .set_irq = svm_set_irq, .set_nmi = svm_inject_nmi, .queue_exception = svm_queue_exception, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 0c2c0d5ae873..3bd66eb46309 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4921,6 +4921,14 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) hypercall[2] = 0xc1; } +static void vmx_handle_hypercall_fail(struct kvm_vcpu *vcpu) +{ + if (to_vmx(vcpu)->nested.vmxon) + kvm_queue_exception_e(vcpu, GP_VECTOR, 0); + else + kvm_queue_exception(vcpu, UD_VECTOR); +} + /* called to set cr0 as appropriate for a mov-to-cr0 exit. */ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) { @@ -7606,6 +7614,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .set_interrupt_shadow = vmx_set_interrupt_shadow, .get_interrupt_shadow = vmx_get_interrupt_shadow, .patch_hypercall = vmx_patch_hypercall, + .handle_hypercall_fail = vmx_handle_hypercall_fail, .set_irq = vmx_inject_irq, .set_nmi = vmx_inject_nmi, .queue_exception = vmx_queue_exception, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 28ef14155726..4e2836b94a01 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8665,8 +8665,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) } if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { - ret = -KVM_EPERM; - goto out; + static_call(kvm_x86_handle_hypercall_fail)(vcpu); + return 1; } ret = -KVM_ENOSYS; @@ -8727,7 +8727,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) ret = -KVM_ENOSYS; break; } -out: + if (!op_64_bit) ret = (u32)ret; kvm_rax_write(vcpu, ret); -- 2.31.1