On 11/6/2024 11:30 PM, Sean Christopherson wrote:
On Wed, Nov 06, 2024, Binbin Wu wrote:
On 11/6/2024 4:54 PM, Huang, Kai wrote:
On Wed, 2024-11-06 at 16:32 +0800, Binbin Wu wrote:
static void kvm_complete_hypercall_exit(struct kvm_vcpu *vcpu, int ret_reg,
unsigned long ret, bool op_64_bit)
{
if (!op_64_bit)
ret = (u32)ret;
kvm_register_write_raw(vcpu, ret_reg, ret);
++vcpu->stat.hypercalls;
}
If this is going to be the final version, it would be better to make it
public, and export the symbol, so that TDX code can reuse it.
Does making it 'static inline' and moving to kvm_host.h work?
It doesn't have a complete definition of struct kvm_vcpu in
arch/x86/include/asm/kvm_host.h, and the code is dereferencing
struct kvm_vcpu.
Also, the definition of kvm_register_write_raw() is in
arch/x86/kvm/kvm_cache_regs.h, which make it difficult to be called
there.
A way around that would be to move the declarations from asm/kvm_host.h to x86.h,
and then kvm_complete_hypercall_exit() can be inlined (or not), without having to
deal with the kvm_host.h ordering issues.
IMO, KVM x86 would ideally put as much as possible in x86.h. The vast majority
of KVM x86's exports are intended only for the vendor modules. Declaring those
exports in kvm_host.h is effectively bleeding KVM internals to the broader kernel.
I'll go that route for the series, assuming it works as I intend :-)
Based on the previous discussions, is the below code expected?
If so, I am gonna use __kvm_emulate_hypercall() and kvm_complete_hypercall_exit()
in TDX code for handling KVM hypercalls.
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e51a95aba824..356620f74bb0 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -2205,10 +2205,6 @@ static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
}
-unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
- unsigned long a0, unsigned long a1,
- unsigned long a2, unsigned long a3,
- int op_64_bit, int cpl);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2b5b0ae3dd7e..05191c4af156 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10008,19 +10008,17 @@ static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
{
- u64 ret = vcpu->run->hypercall.ret;
+ kvm_complete_hypercall_exit(vcpu, VCPU_REGS_RAX,
+ vcpu->run->hypercall.ret, is_64_bit_mode(vcpu));
- if (!is_64_bit_mode(vcpu))
- ret = (u32)ret;
- kvm_rax_write(vcpu, ret);
- ++vcpu->stat.hypercalls;
return kvm_skip_emulated_instruction(vcpu);
}
-unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
- unsigned long a0, unsigned long a1,
- unsigned long a2, unsigned long a3,
- int op_64_bit, int cpl)
+int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
+ unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ int op_64_bit, int cpl, int ret_reg,
+ int (*cui)(struct kvm_vcpu *vcpu))
{
unsigned long ret;
@@ -10104,16 +10102,15 @@ unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
}
out:
- ++vcpu->stat.hypercalls;
- return ret;
+ kvm_complete_hypercall_exit(vcpu, ret_reg, ret, op_64_bit);
+
+ return 1;
}
-EXPORT_SYMBOL_GPL(__kvm_emulate_hypercall);
+EXPORT_SYMBOL_GPL(____kvm_emulate_hypercall);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
- unsigned long nr, a0, a1, a2, a3, ret;
- int op_64_bit;
- int cpl;
+ int r;
if (kvm_xen_hypercall_enabled(vcpu->kvm))
return kvm_xen_hypercall(vcpu);
@@ -10121,23 +10118,13 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
if (kvm_hv_hypercall_enabled(vcpu))
return kvm_hv_hypercall(vcpu);
return kvm_hv_hypercall(vcpu);
- nr = kvm_rax_read(vcpu);
- a0 = kvm_rbx_read(vcpu);
- a1 = kvm_rcx_read(vcpu);
- a2 = kvm_rdx_read(vcpu);
- a3 = kvm_rsi_read(vcpu);
- op_64_bit = is_64_bit_hypercall(vcpu);
- cpl = kvm_x86_call(get_cpl)(vcpu);
-
- ret = __kvm_emulate_hypercall(vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl);
- if (nr == KVM_HC_MAP_GPA_RANGE && !ret)
- /* MAP_GPA tosses the request to the user space. */
+ r = __kvm_emulate_hypercall(vcpu, rax, rbx, rcx, rdx, rsi,
+ is_64_bit_hypercall(vcpu),
+ kvm_x86_call(get_cpl)(vcpu), RAX,
+ complete_hypercall_exit);
+ if (r <= 0)
return 0;
- if (!op_64_bit)
- ret = (u32)ret;
- kvm_rax_write(vcpu, ret);
-
return kvm_skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index a84c48ef5278..5667d029e7f0 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -568,4 +568,25 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port, void *data, unsigned int count,
int in);
+int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
+ unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ int op_64_bit, int cpl, int ret_reg,
+ int (*cui)(struct kvm_vcpu *vcpu));
+
+#define __kvm_emulate_hypercall(_vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl, ret, cui) \
+ ____kvm_emulate_hypercall(vcpu, \
+ kvm_##nr##_read(vcpu), kvm_##a0##_read(vcpu), \
+ kvm_##a1##_read(vcpu), kvm_##a2##_read(vcpu), \
+ kvm_##a3##_read(vcpu), op_64_bit, cpl, VCPU_REGS_##ret, \
+ cui)
+
+static inline void kvm_complete_hypercall_exit(struct kvm_vcpu *vcpu, int ret_reg,
+ unsigned long ret, bool op_64_bit)
+{
+ if (!op_64_bit)
+ ret = (u32)ret;
+ kvm_register_write_raw(vcpu, ret_reg, ret);
+ ++vcpu->stat.hypercalls;
+}
+
#endif