Re: [PATCH v2 15/18] KVM: nVMX: call kvm_skip_emulated_instruction in nested_vmx_{fail,succeed}

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Aug 28, 2018 at 9:04 AM, Sean Christopherson
<sean.j.christopherson@xxxxxxxxx> wrote:
> ... as every invocation of nested_vmx_{fail,succeed} is immediately
> followed by a call to kvm_skip_emulated_instruction().  This saves
> a bit of code and eliminates some silly paths, e.g. nested_vmx_run()
> ended up with a goto label purely used to call and return
> kvm_skip_emulated_instruction().
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
> ---

Very nice.

Reviewed-by: Jim Mattson <jmattson@xxxxxxxxxx>

>  arch/x86/kvm/vmx.c | 199 +++++++++++++++++----------------------------
>  1 file changed, 76 insertions(+), 123 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 8ab028fb1f5f..1edf82632832 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -8054,35 +8054,37 @@ static int handle_monitor(struct kvm_vcpu *vcpu)
>
>  /*
>   * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
> - * set the success or error code of an emulated VMX instruction, as specified
> - * by Vol 2B, VMX Instruction Reference, "Conventions".
> + * set the success or error code of an emulated VMX instruction (as specified
> + * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
> + * instruction.
>   */
> -static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
> +static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
>  {
>         vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
>                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
>                             X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
> +       return kvm_skip_emulated_instruction(vcpu);
>  }
>
> -static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
> +static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
>  {
>         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
>                         & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
>                             X86_EFLAGS_SF | X86_EFLAGS_OF))
>                         | X86_EFLAGS_CF);
> +       return kvm_skip_emulated_instruction(vcpu);
>  }
>
> -static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
> +static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
>                                         u32 vm_instruction_error)
>  {
> -       if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
> -               /*
> -                * failValid writes the error number to the current VMCS, which
> -                * can't be done there isn't a current VMCS.
> -                */
> -               nested_vmx_failInvalid(vcpu);
> -               return;
> -       }
> +       /*
> +        * failValid writes the error number to the current VMCS, which
> +        * can't be done if there isn't a current VMCS.
> +        */
> +       if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
> +               return nested_vmx_failInvalid(vcpu);
> +
>         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
>                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
>                             X86_EFLAGS_SF | X86_EFLAGS_OF))
> @@ -8092,6 +8094,7 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
>          * We don't need to force a shadow sync because
>          * VM_INSTRUCTION_ERROR is not shadowed
>          */
> +       return kvm_skip_emulated_instruction(vcpu);
>  }
>
>  static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
> @@ -8333,8 +8336,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>         }
>
>         if (vmx->nested.vmxon) {
> -               nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
> -               return kvm_skip_emulated_instruction(vcpu);
> +               return nested_vmx_failValid(vcpu,
> +                       VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
>         }
>
>         if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
> @@ -8354,21 +8357,17 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>          * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
>          * which replaces physical address width with 32
>          */
> -       if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
> -               nested_vmx_failInvalid(vcpu);
> -               return kvm_skip_emulated_instruction(vcpu);
> -       }
> +       if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
> +               return nested_vmx_failInvalid(vcpu);
>
>         page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
> -       if (is_error_page(page)) {
> -               nested_vmx_failInvalid(vcpu);
> -               return kvm_skip_emulated_instruction(vcpu);
> -       }
> +       if (is_error_page(page))
> +               return nested_vmx_failInvalid(vcpu);
> +
>         if (*(u32 *)kmap(page) != VMCS12_REVISION) {
>                 kunmap(page);
>                 kvm_release_page_clean(page);
> -               nested_vmx_failInvalid(vcpu);
> -               return kvm_skip_emulated_instruction(vcpu);
> +               return nested_vmx_failInvalid(vcpu);
>         }
>         kunmap(page);
>         kvm_release_page_clean(page);
> @@ -8378,8 +8377,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>         if (ret)
>                 return ret;
>
> -       nested_vmx_succeed(vcpu);
> -       return kvm_skip_emulated_instruction(vcpu);
> +       return nested_vmx_succeed(vcpu);
>  }
>
>  /*
> @@ -8479,8 +8477,7 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
>         if (!nested_vmx_check_permission(vcpu))
>                 return 1;
>         free_nested(to_vmx(vcpu));
> -       nested_vmx_succeed(vcpu);
> -       return kvm_skip_emulated_instruction(vcpu);
> +       return nested_vmx_succeed(vcpu);
>  }
>
>  /* Emulate the VMCLEAR instruction */
> @@ -8497,13 +8494,13 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
>                 return 1;
>
>         if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
> -               nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
> -               return kvm_skip_emulated_instruction(vcpu);
> +               return nested_vmx_failValid(vcpu,
> +                       VMXERR_VMCLEAR_INVALID_ADDRESS);
>         }
>
>         if (vmptr == vmx->nested.vmxon_ptr) {
> -               nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
> -               return kvm_skip_emulated_instruction(vcpu);
> +               return nested_vmx_failValid(vcpu,
> +                       VMXERR_VMCLEAR_VMXON_POINTER);
>         }
>
>         if (vmptr == vmx->nested.current_vmptr)
> @@ -8513,8 +8510,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
>                         vmptr + offsetof(struct vmcs12, launch_state),
>                         &zero, sizeof(zero));
>
> -       nested_vmx_succeed(vcpu);
> -       return kvm_skip_emulated_instruction(vcpu);
> +       return nested_vmx_succeed(vcpu);
>  }
>
>  static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
> @@ -8670,20 +8666,6 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
>         vmcs_load(vmx->loaded_vmcs->vmcs);
>  }
>
> -/*
> - * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
> - * used before) all generate the same failure when it is missing.
> - */
> -static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
> -{
> -       struct vcpu_vmx *vmx = to_vmx(vcpu);
> -       if (vmx->nested.current_vmptr == -1ull) {
> -               nested_vmx_failInvalid(vcpu);
> -               return 0;
> -       }
> -       return 1;
> -}
> -
>  static int handle_vmread(struct kvm_vcpu *vcpu)
>  {
>         unsigned long field;
> @@ -8696,8 +8678,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>         if (!nested_vmx_check_permission(vcpu))
>                 return 1;
>
> -       if (!nested_vmx_check_vmcs12(vcpu))
> -               return kvm_skip_emulated_instruction(vcpu);
> +       if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
> +               return nested_vmx_failInvalid(vcpu);
>
>         if (!is_guest_mode(vcpu))
>                 vmcs12 = get_vmcs12(vcpu);
> @@ -8706,10 +8688,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>                  * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
>                  * to shadowed-field sets the ALU flags for VMfailInvalid.
>                  */
> -               if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) {
> -                       nested_vmx_failInvalid(vcpu);
> -                       return kvm_skip_emulated_instruction(vcpu);
> -               }
> +               if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
> +                       return nested_vmx_failInvalid(vcpu);
>                 vmcs12 = get_shadow_vmcs12(vcpu);
>         }
>
> @@ -8717,9 +8697,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>         field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
>         /* Read the field, zero-extended to a u64 field_value */
>         if (vmcs12_read_any(vmcs12, field, &field_value) < 0) {
> -               nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> -               return kvm_skip_emulated_instruction(vcpu);
> +               return nested_vmx_failValid(vcpu,
> +                       VMXERR_UNSUPPORTED_VMCS_COMPONENT);
>         }

Nit: Remove the braces.

> +
>         /*
>          * Now copy part of this value to register or memory, as requested.
>          * Note that the number of bits actually copied is 32 or 64 depending
> @@ -8737,8 +8718,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>                                             (is_long_mode(vcpu) ? 8 : 4), NULL);
>         }
>
> -       nested_vmx_succeed(vcpu);
> -       return kvm_skip_emulated_instruction(vcpu);
> +       return nested_vmx_succeed(vcpu);
>  }
>
>
> @@ -8763,8 +8743,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>         if (!nested_vmx_check_permission(vcpu))
>                 return 1;
>
> -       if (!nested_vmx_check_vmcs12(vcpu))
> -               return kvm_skip_emulated_instruction(vcpu);
> +       if (vmx->nested.current_vmptr == -1ull)
> +               return nested_vmx_failInvalid(vcpu);
>
>         if (vmx_instruction_info & (1u << 10))
>                 field_value = kvm_register_readl(vcpu,
> @@ -8788,9 +8768,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>          */
>         if (vmcs_field_readonly(field) &&
>             !nested_cpu_has_vmwrite_any_field(vcpu)) {
> -               nested_vmx_failValid(vcpu,
> +               return nested_vmx_failValid(vcpu,
>                         VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
> -               return kvm_skip_emulated_instruction(vcpu);
>         }

Nit: Remove the braces.

>
>         if (!is_guest_mode(vcpu))
> @@ -8800,17 +8779,14 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>                  * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
>                  * to shadowed-field sets the ALU flags for VMfailInvalid.
>                  */
> -               if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) {
> -                       nested_vmx_failInvalid(vcpu);
> -                       return kvm_skip_emulated_instruction(vcpu);
> -               }
> +               if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
> +                       return nested_vmx_failInvalid(vcpu);
>                 vmcs12 = get_shadow_vmcs12(vcpu);
> -
>         }
>
>         if (vmcs12_write_any(vmcs12, field, field_value) < 0) {
> -               nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> -               return kvm_skip_emulated_instruction(vcpu);
> +               return nested_vmx_failValid(vcpu,
> +                       VMXERR_UNSUPPORTED_VMCS_COMPONENT);
>         }

Nit: Remove the braces.

>
>         /*
> @@ -8833,8 +8809,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>                 }
>         }
>
> -       nested_vmx_succeed(vcpu);
> -       return kvm_skip_emulated_instruction(vcpu);
> +       return nested_vmx_succeed(vcpu);
>  }
>
>  static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
> @@ -8863,32 +8838,30 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
>                 return 1;
>
>         if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
> -               nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
> -               return kvm_skip_emulated_instruction(vcpu);
> +               return nested_vmx_failValid(vcpu,
> +                       VMXERR_VMPTRLD_INVALID_ADDRESS);
>         }

Nit: Braces.

>
>         if (vmptr == vmx->nested.vmxon_ptr) {
> -               nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
> -               return kvm_skip_emulated_instruction(vcpu);
> +               return nested_vmx_failValid(vcpu,
> +                       VMXERR_VMPTRLD_VMXON_POINTER);
>         }

Nit: braces.

>
>         if (vmx->nested.current_vmptr != vmptr) {
>                 struct vmcs12 *new_vmcs12;
>                 struct page *page;
>                 page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
> -               if (is_error_page(page)) {
> -                       nested_vmx_failInvalid(vcpu);
> -                       return kvm_skip_emulated_instruction(vcpu);
> -               }
> +               if (is_error_page(page))
> +                       return nested_vmx_failInvalid(vcpu);
> +
>                 new_vmcs12 = kmap(page);
>                 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
>                     (new_vmcs12->hdr.shadow_vmcs &&
>                      !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
>                         kunmap(page);
>                         kvm_release_page_clean(page);
> -                       nested_vmx_failValid(vcpu,
> +                       return nested_vmx_failValid(vcpu,
>                                 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
> -                       return kvm_skip_emulated_instruction(vcpu);
>                 }
>
>                 nested_release_vmcs12(vmx);
> @@ -8903,8 +8876,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
>                 set_current_vmptr(vmx, vmptr);
>         }
>
> -       nested_vmx_succeed(vcpu);
> -       return kvm_skip_emulated_instruction(vcpu);
> +       return nested_vmx_succeed(vcpu);
>  }
>
>  /* Emulate the VMPTRST instruction */
> @@ -8927,8 +8899,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
>                 kvm_inject_page_fault(vcpu, &e);
>                 return 1;
>         }
> -       nested_vmx_succeed(vcpu);
> -       return kvm_skip_emulated_instruction(vcpu);
> +       return nested_vmx_succeed(vcpu);
>  }
>
>  /* Emulate the INVEPT instruction */
> @@ -8959,9 +8930,8 @@ static int handle_invept(struct kvm_vcpu *vcpu)
>         types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
>
>         if (type >= 32 || !(types & (1 << type))) {
> -               nested_vmx_failValid(vcpu,
> +               return nested_vmx_failValid(vcpu,
>                                 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
> -               return kvm_skip_emulated_instruction(vcpu);
>         }

Nit: Braces.

>
>         /* According to the Intel VMX instruction reference, the memory
> @@ -8984,14 +8954,13 @@ static int handle_invept(struct kvm_vcpu *vcpu)
>         case VMX_EPT_EXTENT_CONTEXT:
>                 kvm_mmu_sync_roots(vcpu);
>                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
> -               nested_vmx_succeed(vcpu);
>                 break;
>         default:
>                 BUG_ON(1);
>                 break;
>         }
>
> -       return kvm_skip_emulated_instruction(vcpu);
> +       return nested_vmx_succeed(vcpu);
>  }
>
>  static int handle_invvpid(struct kvm_vcpu *vcpu)
> @@ -9023,9 +8992,8 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
>                         VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
>
>         if (type >= 32 || !(types & (1 << type))) {
> -               nested_vmx_failValid(vcpu,
> +               return nested_vmx_failValid(vcpu,
>                         VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
> -               return kvm_skip_emulated_instruction(vcpu);
>         }

Nit: Braces.

>
>         /* according to the intel vmx instruction reference, the memory
> @@ -9039,19 +9007,18 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
>                 return 1;
>         }
>         if (operand.vpid >> 16) {
> -               nested_vmx_failValid(vcpu,
> +               return nested_vmx_failValid(vcpu,
>                         VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
> -               return kvm_skip_emulated_instruction(vcpu);
>         }

Nit: Braces.

>
>         switch (type) {
>         case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
>                 if (!operand.vpid ||
>                     is_noncanonical_address(operand.gla, vcpu)) {
> -                       nested_vmx_failValid(vcpu,
> +                       return nested_vmx_failValid(vcpu,
>                                 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
> -                       return kvm_skip_emulated_instruction(vcpu);
>                 }

Nit: Braces.

> +
>                 if (cpu_has_vmx_invvpid_individual_addr() &&
>                     vmx->nested.vpid02) {
>                         __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
> @@ -9062,9 +9029,8 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
>         case VMX_VPID_EXTENT_SINGLE_CONTEXT:
>         case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
>                 if (!operand.vpid) {
> -                       nested_vmx_failValid(vcpu,
> +                       return nested_vmx_failValid(vcpu,
>                                 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
> -                       return kvm_skip_emulated_instruction(vcpu);
>                 }

Nit: Braces.

>                 __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
>                 break;
> @@ -9076,9 +9042,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
>                 return kvm_skip_emulated_instruction(vcpu);
>         }
>
> -       nested_vmx_succeed(vcpu);
> -
> -       return kvm_skip_emulated_instruction(vcpu);
> +       return nested_vmx_succeed(vcpu);
>  }
>
>  static int handle_invpcid(struct kvm_vcpu *vcpu)
> @@ -12686,8 +12650,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
>         if (!nested_vmx_check_permission(vcpu))
>                 return 1;
>
> -       if (!nested_vmx_check_vmcs12(vcpu))
> -               goto out;
> +       if (vmx->nested.current_vmptr == -1ull)
> +               return nested_vmx_failInvalid(vcpu);
>
>         vmcs12 = get_vmcs12(vcpu);
>
> @@ -12697,10 +12661,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
>          * rather than RFLAGS.ZF, and no error number is stored to the
>          * VM-instruction error field.
>          */
> -       if (vmcs12->hdr.shadow_vmcs) {
> -               nested_vmx_failInvalid(vcpu);
> -               goto out;
> -       }
> +       if (vmcs12->hdr.shadow_vmcs)
> +               return nested_vmx_failInvalid(vcpu);
>
>         if (enable_shadow_vmcs)
>                 copy_shadow_to_vmcs12(vmx);
> @@ -12716,23 +12678,19 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
>          * when using the merged vmcs02.
>          */
>         if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) {
> -               nested_vmx_failValid(vcpu,
> -                                    VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
> -               goto out;
> +               return nested_vmx_failValid(vcpu,
> +                       VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
>         }

Nit: Braces.

>
>         if (vmcs12->launch_state == launch) {
> -               nested_vmx_failValid(vcpu,
> +               return nested_vmx_failValid(vcpu,
>                         launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
>                                : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
> -               goto out;
>         }

Nit: Braces.



[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux