Re: [RFC 03/33] KVM: x86: hyper-v: Introduce XMM output support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 08.11.23 12:17, Nicolas Saenz Julienne wrote:
Prepare infrastructure to be able to return data through the XMM
registers when Hyper-V hypercalls are issues in fast mode. The XMM
registers are exposed to user-space through KVM_EXIT_HYPERV_HCALL and
restored on successful hypercall completion.

Signed-off-by: Nicolas Saenz Julienne <nsaenz@xxxxxxxxxx>
---
  arch/x86/include/asm/hyperv-tlfs.h |  2 +-
  arch/x86/kvm/hyperv.c              | 33 +++++++++++++++++++++++++++++-
  include/uapi/linux/kvm.h           |  6 ++++++
  3 files changed, 39 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 2ff26f53cd62..af594aa65307 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -49,7 +49,7 @@
  /* Support for physical CPU dynamic partitioning events is available*/
  #define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE	BIT(3)
  /*
- * Support for passing hypercall input parameter block via XMM
+ * Support for passing hypercall input and output parameter block via XMM
   * registers is available
   */
  #define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE		BIT(4)
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 238afd7335e4..e1bc861ab3b0 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1815,6 +1815,7 @@ struct kvm_hv_hcall {
  	u16 rep_idx;
  	bool fast;
  	bool rep;
+	bool xmm_dirty;
  	sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
/*
@@ -2346,9 +2347,33 @@ static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
  	return ret;
  }
+static void kvm_hv_write_xmm(struct kvm_hyperv_xmm_reg *xmm)
+{
+	int reg;
+
+	kvm_fpu_get();
+	for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++) {
+		const sse128_t data = sse128(xmm[reg].low, xmm[reg].high);
+		_kvm_write_sse_reg(reg, &data);
+	}
+	kvm_fpu_put();
+}
+
+static bool kvm_hv_is_xmm_output_hcall(u16 code)
+{
+	return false;
+}
+
  static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
  {
-	return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
+	bool fast = !!(vcpu->run->hyperv.u.hcall.input & HV_HYPERCALL_FAST_BIT);
+	u16 code = vcpu->run->hyperv.u.hcall.input & 0xffff;
+	u64 result = vcpu->run->hyperv.u.hcall.result;
+
+	if (kvm_hv_is_xmm_output_hcall(code) && hv_result_success(result) && fast)
+		kvm_hv_write_xmm(vcpu->run->hyperv.u.hcall.xmm);
+
+	return kvm_hv_hypercall_complete(vcpu, result);
  }
static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
@@ -2623,6 +2648,9 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
  		break;
  	}
+ if ((ret & HV_HYPERCALL_RESULT_MASK) == HV_STATUS_SUCCESS && hc.xmm_dirty)
+		kvm_hv_write_xmm((struct kvm_hyperv_xmm_reg*)hc.xmm);
+
  hypercall_complete:
  	return kvm_hv_hypercall_complete(vcpu, ret);
@@ -2632,6 +2660,8 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
  	vcpu->run->hyperv.u.hcall.input = hc.param;
  	vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
  	vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
+	if (hc.fast)
+		memcpy(vcpu->run->hyperv.u.hcall.xmm, hc.xmm, sizeof(hc.xmm));
  	vcpu->arch.complete_userspace_io = kvm_hv_hypercall_complete_userspace;
  	return 0;
  }
@@ -2780,6 +2810,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
  			ent->ebx |= HV_ENABLE_EXTENDED_HYPERCALLS;
ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
+			ent->edx |= HV_X64_HYPERCALL_XMM_OUTPUT_AVAILABLE;


Shouldn't this be guarded by an ENABLE_CAP to make sure old user space that doesn't know about xmm outputs is still able to run with newer kernels?


  			ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
  			ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index d7a01766bf21..5ce06a1eee2b 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -192,6 +192,11 @@ struct kvm_s390_cmma_log {
  	__u64 values;
  };
+struct kvm_hyperv_xmm_reg {
+	__u64 low;
+	__u64 high;
+};
+
  struct kvm_hyperv_exit {
  #define KVM_EXIT_HYPERV_SYNIC          1
  #define KVM_EXIT_HYPERV_HCALL          2
@@ -210,6 +215,7 @@ struct kvm_hyperv_exit {
  			__u64 input;
  			__u64 result;
  			__u64 params[2];
+			struct kvm_hyperv_xmm_reg xmm[6];


Would this change the size of struct kvm_hyperv_exit? And if so, wouldn't that potentially be a UABI breakage?


Alex




Amazon Development Center Germany GmbH
Krausenstr. 38
10117 Berlin
Geschaeftsfuehrung: Christian Schlaeger, Jonathan Weiss
Eingetragen am Amtsgericht Charlottenburg unter HRB 149173 B
Sitz: Berlin
Ust-ID: DE 289 237 879






[Index of Archives]     [Kernel Newbies]     [Security]     [Netfilter]     [Bugtraq]     [Linux FS]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Video 4 Linux]     [Device Mapper]     [Linux Resources]

  Powered by Linux