On Mon, Jul 15, 2019 at 11:30:43PM +0300, Liran Alon wrote: > I think this name is more appropriate to what the x86_ops method does. > In addition, modify VMX callback to return true as #PF handler can > proceed to emulation in this case. This didn't result in a bug > only because the callback is called when DecodeAssist is supported > which is currently supported only on SVM. > > Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> > Signed-off-by: Liran Alon <liran.alon@xxxxxxxxxx> > --- > arch/x86/include/asm/kvm_host.h | 3 ++- > arch/x86/kvm/mmu.c | 2 +- > arch/x86/kvm/svm.c | 4 ++-- > arch/x86/kvm/vmx/vmx.c | 6 +++--- > 4 files changed, 8 insertions(+), 7 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 450d69a1e6fa..536fd56f777d 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -1201,7 +1201,8 @@ struct kvm_x86_ops { > uint16_t *vmcs_version); > uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu); > > - bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu); > + /* Returns true if #PF handler can proceed to emulation */ > + bool (*handle_no_insn_on_page_fault)(struct kvm_vcpu *vcpu); The problem with this name is that it requires a comment to explain the boolean return value. The VMX implementation particular would be inscrutuable. "no insn" is also a misnomer, as the AMD quirk has an insn, it's the insn_len that's missing. What about something like force_emulation_on_zero_len_insn()? > }; > > struct kvm_arch_async_pf { > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index 1e9ba81accba..889de3ccf655 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -5423,7 +5423,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, > * guest, with the exception of AMD Erratum 1096 which is unrecoverable. > */ > if (unlikely(insn && !insn_len)) { > - if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu)) > + if (!kvm_x86_ops->handle_no_insn_on_page_fault(vcpu)) > return 1; > } > > diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c > index 79023a41f7a7..ab89bb0de8df 100644 > --- a/arch/x86/kvm/svm.c > +++ b/arch/x86/kvm/svm.c > @@ -7118,7 +7118,7 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu, > return -ENODEV; > } > > -static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu) > +static bool svm_handle_no_insn_on_page_fault(struct kvm_vcpu *vcpu) > { > bool is_user, smap; > > @@ -7291,7 +7291,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { > .nested_enable_evmcs = nested_enable_evmcs, > .nested_get_evmcs_version = nested_get_evmcs_version, > > - .need_emulation_on_page_fault = svm_need_emulation_on_page_fault, > + .handle_no_insn_on_page_fault = svm_handle_no_insn_on_page_fault, > }; > > static int __init svm_init(void) > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index f64bcbb03906..088fc6d943e9 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -7419,9 +7419,9 @@ static int enable_smi_window(struct kvm_vcpu *vcpu) > return 0; > } > > -static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu) > +static bool vmx_handle_no_insn_on_page_fault(struct kvm_vcpu *vcpu) > { > - return 0; > + return true; Any functional change here should be done in a different patch. Given that we should never reach this point on VMX, a WARN and triple fault request seems in order. WARN_ON_ONCE(1); kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return false; > } > > static __init int hardware_setup(void) > @@ -7726,7 +7726,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { > .set_nested_state = NULL, > .get_vmcs12_pages = NULL, > .nested_enable_evmcs = NULL, > - .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault, > + .handle_no_insn_on_page_fault = vmx_handle_no_insn_on_page_fault, > }; > > static void vmx_cleanup_l1d_flush(void) > -- > 2.20.1 >