Re: [RFC 03/33] KVM: x86: hyper-v: Introduce XMM output support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, 2023-11-08 at 13:16 +0100, Alexander Graf wrote:
> On 08.11.23 13:11, Vitaly Kuznetsov wrote:
> > Alexander Graf <graf@xxxxxxxxxx> writes:
> > 
> > > On 08.11.23 12:17, Nicolas Saenz Julienne wrote:
> > > > Prepare infrastructure to be able to return data through the XMM
> > > > registers when Hyper-V hypercalls are issues in fast mode. The XMM
> > > > registers are exposed to user-space through KVM_EXIT_HYPERV_HCALL and
> > > > restored on successful hypercall completion.
> > > > 
> > > > Signed-off-by: Nicolas Saenz Julienne <nsaenz@xxxxxxxxxx>
> > > > ---
> > > >    arch/x86/include/asm/hyperv-tlfs.h |  2 +-
> > > >    arch/x86/kvm/hyperv.c              | 33 +++++++++++++++++++++++++++++-
> > > >    include/uapi/linux/kvm.h           |  6 ++++++
> > > >    3 files changed, 39 insertions(+), 2 deletions(-)
> > > > 
> > > > diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
> > > > index 2ff26f53cd62..af594aa65307 100644
> > > > --- a/arch/x86/include/asm/hyperv-tlfs.h
> > > > +++ b/arch/x86/include/asm/hyperv-tlfs.h
> > > > @@ -49,7 +49,7 @@
> > > >    /* Support for physical CPU dynamic partitioning events is available*/
> > > >    #define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE  BIT(3)
> > > >    /*
> > > > - * Support for passing hypercall input parameter block via XMM
> > > > + * Support for passing hypercall input and output parameter block via XMM
> > > >     * registers is available
> > > >     */
> > > >    #define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE               BIT(4)
> > > > diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
> > > > index 238afd7335e4..e1bc861ab3b0 100644
> > > > --- a/arch/x86/kvm/hyperv.c
> > > > +++ b/arch/x86/kvm/hyperv.c
> > > > @@ -1815,6 +1815,7 @@ struct kvm_hv_hcall {
> > > >       u16 rep_idx;
> > > >       bool fast;
> > > >       bool rep;
> > > > +    bool xmm_dirty;
> > > >       sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
> > > > 
> > > >       /*
> > > > @@ -2346,9 +2347,33 @@ static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
> > > >       return ret;
> > > >    }
> > > > 
> > > > +static void kvm_hv_write_xmm(struct kvm_hyperv_xmm_reg *xmm)
> > > > +{
> > > > +    int reg;
> > > > +
> > > > +    kvm_fpu_get();
> > > > +    for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++) {
> > > > +            const sse128_t data = sse128(xmm[reg].low, xmm[reg].high);
> > > > +            _kvm_write_sse_reg(reg, &data);
> > > > +    }
> > > > +    kvm_fpu_put();
> > > > +}
> > > > +
> > > > +static bool kvm_hv_is_xmm_output_hcall(u16 code)
> > > > +{
> > > > +    return false;
> > > > +}
> > > > +
> > > >    static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
> > > >    {
> > > > -    return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
> > > > +    bool fast = !!(vcpu->run->hyperv.u.hcall.input & HV_HYPERCALL_FAST_BIT);
> > > > +    u16 code = vcpu->run->hyperv.u.hcall.input & 0xffff;
> > > > +    u64 result = vcpu->run->hyperv.u.hcall.result;
> > > > +
> > > > +    if (kvm_hv_is_xmm_output_hcall(code) && hv_result_success(result) && fast)
> > > > +            kvm_hv_write_xmm(vcpu->run->hyperv.u.hcall.xmm);
> > > > +
> > > > +    return kvm_hv_hypercall_complete(vcpu, result);
> > > >    }
> > > > 
> > > >    static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
> > > > @@ -2623,6 +2648,9 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
> > > >               break;
> > > >       }
> > > > 
> > > > +    if ((ret & HV_HYPERCALL_RESULT_MASK) == HV_STATUS_SUCCESS && hc.xmm_dirty)
> > > > +            kvm_hv_write_xmm((struct kvm_hyperv_xmm_reg*)hc.xmm);
> > > > +
> > > >    hypercall_complete:
> > > >       return kvm_hv_hypercall_complete(vcpu, ret);
> > > > 
> > > > @@ -2632,6 +2660,8 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
> > > >       vcpu->run->hyperv.u.hcall.input = hc.param;
> > > >       vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
> > > >       vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
> > > > +    if (hc.fast)
> > > > +            memcpy(vcpu->run->hyperv.u.hcall.xmm, hc.xmm, sizeof(hc.xmm));
> > > >       vcpu->arch.complete_userspace_io = kvm_hv_hypercall_complete_userspace;
> > > >       return 0;
> > > >    }
> > > > @@ -2780,6 +2810,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
> > > >                       ent->ebx |= HV_ENABLE_EXTENDED_HYPERCALLS;
> > > > 
> > > >                       ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
> > > > +                    ent->edx |= HV_X64_HYPERCALL_XMM_OUTPUT_AVAILABLE;
> > > 
> > > Shouldn't this be guarded by an ENABLE_CAP to make sure old user space
> > > that doesn't know about xmm outputs is still able to run with newer kernels?
> > > 
> > No, we don't do CAPs for new Hyper-V features anymore since we have
> > KVM_GET_SUPPORTED_HV_CPUID. Userspace is not supposed to simply copy
> > its output into guest visible CPUIDs, it must only enable features it
> > knows. Even 'hv_passthrough' option in QEMU doesn't pass unknown
> > features through.
> 
> Ah, nice :). That simplifies things.
> 
> 
> Alex


Besides other remarks I think that this patch is reasonable,
and maybe it can be queued before the main VSM series,
assuming that it comes with a unit test to avoid having
dead code in the kernel.

Best regards,
	Maxim Levitsky

> 
> 
> 
> 
> Amazon Development Center Germany GmbH
> Krausenstr. 38
> 10117 Berlin
> Geschaeftsfuehrung: Christian Schlaeger, Jonathan Weiss
> Eingetragen am Amtsgericht Charlottenburg unter HRB 149173 B
> Sitz: Berlin
> Ust-ID: DE 289 237 879
> 
> 








[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux