Use the gp interception path to inject a #UD to the guest if the guest has invpcid disabled. This is required because for CPL > 0, #GP takes precedence over the INVPCID intercept. Signed-off-by: Bandan Das <bsd@xxxxxxxxxx> --- arch/x86/kvm/svm/svm.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 754e07538b4a..0e8ce7adb815 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2170,6 +2170,7 @@ enum { SVM_INSTR_VMRUN, SVM_INSTR_VMLOAD, SVM_INSTR_VMSAVE, + SVM_INSTR_INVPCID, }; /* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */ @@ -2177,6 +2178,8 @@ static int svm_instr_opcode(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; + if (ctxt->b == 0x82) + return SVM_INSTR_INVPCID; if (ctxt->b != 0x1 || ctxt->opcode_len != 2) return NONE_SVM_INSTR; @@ -2200,11 +2203,13 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN, [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD, [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE, + [SVM_INSTR_INVPCID] = SVM_EXIT_EXCP_BASE + UD_VECTOR, }; int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = { [SVM_INSTR_VMRUN] = vmrun_interception, [SVM_INSTR_VMLOAD] = vmload_interception, [SVM_INSTR_VMSAVE] = vmsave_interception, + [SVM_INSTR_INVPCID] = ud_interception, }; struct vcpu_svm *svm = to_svm(vcpu); @@ -2253,8 +2258,12 @@ static int gp_interception(struct kvm_vcpu *vcpu) if (!is_guest_mode(vcpu)) return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE); - } else + } else { + if ((opcode == SVM_INSTR_INVPCID) && + guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) + goto reinject; return emulate_svm_instr(vcpu, opcode); + } reinject: kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); -- 2.24.1