On Tue, Feb 02, 2021, Paolo Bonzini wrote: > Push the injection of #GP up to the callers, so that they can just use > kvm_complete_insn_gp. > > Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> > --- ... > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 08568c47337c..edbeb162012b 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -11375,7 +11375,6 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) > return kvm_handle_memory_failure(vcpu, r, &e); This is broken. kvm_handle_memory_failure() injects a #PF and returns 1, which means an error here will trigger #PF->#GP->#DF. > > if (operand.pcid >> 12 != 0) { > - kvm_inject_gp(vcpu, 0); > return 1; > } Curly braces can be dropped. > @@ -11385,15 +11384,13 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) > case INVPCID_TYPE_INDIV_ADDR: > if ((!pcid_enabled && (operand.pcid != 0)) || > is_noncanonical_address(operand.gla, vcpu)) { > - kvm_inject_gp(vcpu, 0); > return 1; > } > kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); > - return kvm_skip_emulated_instruction(vcpu); > + return 0; > > case INVPCID_TYPE_SINGLE_CTXT: > if (!pcid_enabled && (operand.pcid != 0)) { > - kvm_inject_gp(vcpu, 0); > return 1; > } Same here. > > @@ -11414,7 +11411,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) > * resync will happen anyway before switching to any other CR3. > */ > > - return kvm_skip_emulated_instruction(vcpu); > + return 0; > > case INVPCID_TYPE_ALL_NON_GLOBAL: > /* > @@ -11427,7 +11424,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) > fallthrough; > case INVPCID_TYPE_ALL_INCL_GLOBAL: > kvm_mmu_unload(vcpu); > - return kvm_skip_emulated_instruction(vcpu); > + return 0; > > default: > BUG(); /* We have already checked above that type <= 3 */ > -- > 2.26.2 IMO, this isn't an improvement. For flows that can't easily be consolidated to x86.c, e.g. CRs (and DRs?), I agree it makes sense to use kvm_complete_insn_gp(), but this feels forced. What about a pure refactoring of kvm_handle_invpcid() to get a similar effect? diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ef630f8d8bd2..b9f57f9f2422 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11363,28 +11363,23 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) if (r != X86EMUL_CONTINUE) return kvm_handle_memory_failure(vcpu, r, &e); - if (operand.pcid >> 12 != 0) { - kvm_inject_gp(vcpu, 0); - return 1; - } + if (operand.pcid >> 12 != 0) + goto inject_gp; pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); switch (type) { case INVPCID_TYPE_INDIV_ADDR: if ((!pcid_enabled && (operand.pcid != 0)) || - is_noncanonical_address(operand.gla, vcpu)) { - kvm_inject_gp(vcpu, 0); - return 1; - } + is_noncanonical_address(operand.gla, vcpu)) + goto inject_gp; + kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); - return kvm_skip_emulated_instruction(vcpu); + break; case INVPCID_TYPE_SINGLE_CTXT: - if (!pcid_enabled && (operand.pcid != 0)) { - kvm_inject_gp(vcpu, 0); - return 1; - } + if (!pcid_enabled && (operand.pcid != 0)) + goto inject_gp; if (kvm_get_active_pcid(vcpu) == operand.pcid) { kvm_mmu_sync_roots(vcpu); @@ -11402,8 +11397,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) * given PCID, then nothing needs to be done here because a * resync will happen anyway before switching to any other CR3. */ - - return kvm_skip_emulated_instruction(vcpu); + break; case INVPCID_TYPE_ALL_NON_GLOBAL: /* @@ -11416,11 +11410,17 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) fallthrough; case INVPCID_TYPE_ALL_INCL_GLOBAL: kvm_mmu_unload(vcpu); - return kvm_skip_emulated_instruction(vcpu); + break; default: BUG(); /* We have already checked above that type <= 3 */ } + + return kvm_skip_emulated_instruction(vcpu); + +inject_gp: + kvm_inject_gp(vcpu, 0); + return 1; } EXPORT_SYMBOL_GPL(kvm_handle_invpcid);