Push the injection of #GP up to the callers, so that they can just use kvm_complete_insn_gp. Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- arch/x86/kvm/svm/svm.c | 11 ++++++----- arch/x86/kvm/vmx/vmx.c | 11 ++++++----- arch/x86/kvm/x86.c | 9 +++------ 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 65d70b9691b4..c0d41a6920f0 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3057,6 +3057,7 @@ static int invpcid_interception(struct vcpu_svm *svm) struct kvm_vcpu *vcpu = &svm->vcpu; unsigned long type; gva_t gva; + int err; if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { kvm_queue_exception(vcpu, UD_VECTOR); @@ -3071,12 +3072,12 @@ static int invpcid_interception(struct vcpu_svm *svm) type = svm->vmcb->control.exit_info_2; gva = svm->vmcb->control.exit_info_1; - if (type > 3) { - kvm_inject_gp(vcpu, 0); - return 1; - } + if (type > 3) + err = 1; + else + err = kvm_handle_invpcid(vcpu, type, gva); - return kvm_handle_invpcid(vcpu, type, gva); + return kvm_complete_insn_gp(&svm->vcpu, err); } static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 28daceb4f70d..a07fce6d0bbb 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5559,6 +5559,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) u64 pcid; u64 gla; } operand; + int err = 1; if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { kvm_queue_exception(vcpu, UD_VECTOR); @@ -5568,10 +5569,8 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); - if (type > 3) { - kvm_inject_gp(vcpu, 0); - return 1; - } + if (type > 3) + goto out; /* According to the Intel instruction reference, the memory operand * is read even if it isn't needed (e.g., for type==all) @@ -5581,7 +5580,9 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) sizeof(operand), &gva)) return 1; - return kvm_handle_invpcid(vcpu, type, gva); + err = kvm_handle_invpcid(vcpu, type, gva); +out: + return kvm_complete_insn_gp(vcpu, err); } static int handle_pml_full(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 08568c47337c..edbeb162012b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11375,7 +11375,6 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) return kvm_handle_memory_failure(vcpu, r, &e); if (operand.pcid >> 12 != 0) { - kvm_inject_gp(vcpu, 0); return 1; } @@ -11385,15 +11384,13 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) case INVPCID_TYPE_INDIV_ADDR: if ((!pcid_enabled && (operand.pcid != 0)) || is_noncanonical_address(operand.gla, vcpu)) { - kvm_inject_gp(vcpu, 0); return 1; } kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); - return kvm_skip_emulated_instruction(vcpu); + return 0; case INVPCID_TYPE_SINGLE_CTXT: if (!pcid_enabled && (operand.pcid != 0)) { - kvm_inject_gp(vcpu, 0); return 1; } @@ -11414,7 +11411,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) * resync will happen anyway before switching to any other CR3. */ - return kvm_skip_emulated_instruction(vcpu); + return 0; case INVPCID_TYPE_ALL_NON_GLOBAL: /* @@ -11427,7 +11424,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) fallthrough; case INVPCID_TYPE_ALL_INCL_GLOBAL: kvm_mmu_unload(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return 0; default: BUG(); /* We have already checked above that type <= 3 */ -- 2.26.2