VTL call/return hypercalls have their own entry points in the hypercall page because they don't follow normal hyper-v hypercall conventions. Move the VTL call/return control input into ECX/RAX and set the hypercall code into EAX/RCX before calling the hypercall instruction in order to be able to use the Hyper-V hypercall entry function. Guests can read an emulated code page offsets register to know the offsets into the hypercall page for the VTL call/return entries. Signed-off-by: Nicolas Saenz Julienne <nsaenz@xxxxxxxxxx> --- My tree has the additional patch, we're still trying to understand under what conditions Windows expects the offset to be fixed. diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 54f7f36a89bf..9f2ea8c34447 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -294,6 +294,7 @@ static int patch_hypercall_page(struct kvm_vcpu *vcpu, u64 data) /* VTL call/return entries */ if (!kvm_xen_hypercall_enabled(kvm) && kvm_hv_vsm_enabled(kvm)) { + i = 22; #ifdef CONFIG_X86_64 if (is_64_bit_mode(vcpu)) { /* --- arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kvm/hyperv.c | 78 ++++++++++++++++++++++++++++++- include/asm-generic/hyperv-tlfs.h | 11 +++++ 3 files changed, 90 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a2f224f95404..00cd21b09f8c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1105,6 +1105,8 @@ struct kvm_hv { u64 hv_tsc_emulation_status; u64 hv_invtsc_control; + union hv_register_vsm_code_page_offsets vsm_code_page_offsets; + /* How many vCPUs have VP index != vCPU index */ atomic_t num_mismatched_vp_indexes; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 78d053042667..d4b1b53ea63d 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -259,7 +259,8 @@ static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) static int patch_hypercall_page(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; - u8 instructions[9]; + struct kvm_hv *hv = to_kvm_hv(kvm); + u8 instructions[0x30]; int i = 0; u64 addr; @@ -285,6 +286,81 @@ static int patch_hypercall_page(struct kvm_vcpu *vcpu, u64 data) /* ret */ ((unsigned char *)instructions)[i++] = 0xc3; + /* VTL call/return entries */ + if (!kvm_xen_hypercall_enabled(kvm) && kvm_hv_vsm_enabled(kvm)) { +#ifdef CONFIG_X86_64 + if (is_64_bit_mode(vcpu)) { + /* + * VTL call 64-bit entry prologue: + * mov %rcx, %rax + * mov $0x11, %ecx + * jmp 0: + */ + hv->vsm_code_page_offsets.vtl_call_offset = i; + instructions[i++] = 0x48; + instructions[i++] = 0x89; + instructions[i++] = 0xc8; + instructions[i++] = 0xb9; + instructions[i++] = 0x11; + instructions[i++] = 0x00; + instructions[i++] = 0x00; + instructions[i++] = 0x00; + instructions[i++] = 0xeb; + instructions[i++] = 0xe0; + /* + * VTL return 64-bit entry prologue: + * mov %rcx, %rax + * mov $0x12, %ecx + * jmp 0: + */ + hv->vsm_code_page_offsets.vtl_return_offset = i; + instructions[i++] = 0x48; + instructions[i++] = 0x89; + instructions[i++] = 0xc8; + instructions[i++] = 0xb9; + instructions[i++] = 0x12; + instructions[i++] = 0x00; + instructions[i++] = 0x00; + instructions[i++] = 0x00; + instructions[i++] = 0xeb; + instructions[i++] = 0xd6; + } else +#endif + { + /* + * VTL call 32-bit entry prologue: + * mov %eax, %ecx + * mov $0x11, %eax + * jmp 0: + */ + hv->vsm_code_page_offsets.vtl_call_offset = i; + instructions[i++] = 0x89; + instructions[i++] = 0xc1; + instructions[i++] = 0xb8; + instructions[i++] = 0x11; + instructions[i++] = 0x00; + instructions[i++] = 0x00; + instructions[i++] = 0x00; + instructions[i++] = 0xeb; + instructions[i++] = 0xf3; + /* + * VTL return 32-bit entry prologue: + * mov %eax, %ecx + * mov $0x12, %eax + * jmp 0: + */ + hv->vsm_code_page_offsets.vtl_return_offset = i; + instructions[i++] = 0x89; + instructions[i++] = 0xc1; + instructions[i++] = 0xb8; + instructions[i++] = 0x12; + instructions[i++] = 0x00; + instructions[i++] = 0x00; + instructions[i++] = 0x00; + instructions[i++] = 0xeb; + instructions[i++] = 0xea; + } + } addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK; if (kvm_vcpu_write_guest(vcpu, addr, instructions, i)) return 1; diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h index fdac4a1714ec..0e7643c1ef01 100644 --- a/include/asm-generic/hyperv-tlfs.h +++ b/include/asm-generic/hyperv-tlfs.h @@ -823,4 +823,15 @@ struct hv_mmio_write_input { u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH]; } __packed; +/* + * VTL call/return hypercall page offsets register + */ +union hv_register_vsm_code_page_offsets { + u64 as_u64; + struct { + u64 vtl_call_offset:12; + u64 vtl_return_offset:12; + u64 reserved:40; + } __packed; +}; #endif -- 2.40.1