On 1/23/2024 7:52 AM, isaku.yamahata@xxxxxxxxx wrote:
From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
To match vmx_exit cleanup.
Do you mean vt_exit()?
Shouldn't vt_init() and vt_exit() be symmetric right from the beginning in
the refactor patch (006/121)?
And also, since the reorder of kvm_x86_vendor_init() and vmx_init() is going
to happen, can we just skip moving around the init of loaded_vmcss_on_cpu?
Now vmx_init() is before kvm_x86_vendor_init(),
vmx_init() can initialize loaded_vmcss_on_cpu. Oppertunistically move it
back into vmx_init().
Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
---
v18:
- move the loaded_vmcss_on_cpu initialization to vmx_init().
- fix error path of vt_init(). by Chao and Binbin
---
arch/x86/kvm/vmx/main.c | 17 +++++++----------
arch/x86/kvm/vmx/vmx.c | 6 ++++--
arch/x86/kvm/vmx/x86_ops.h | 2 --
3 files changed, 11 insertions(+), 14 deletions(-)
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index 18cecf12c7c8..443db8ec5cd5 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -171,7 +171,7 @@ struct kvm_x86_init_ops vt_init_ops __initdata = {
static int __init vt_init(void)
{
unsigned int vcpu_size, vcpu_align;
- int cpu, r;
+ int r;
if (!kvm_is_vmx_supported())
return -EOPNOTSUPP;
@@ -182,18 +182,14 @@ static int __init vt_init(void)
*/
hv_init_evmcs();
- /* vmx_hardware_disable() accesses loaded_vmcss_on_cpu. */
- for_each_possible_cpu(cpu)
- INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
-
- r = kvm_x86_vendor_init(&vt_init_ops);
- if (r)
- return r;
-
r = vmx_init();
if (r)
goto err_vmx_init;
+ r = kvm_x86_vendor_init(&vt_init_ops);
+ if (r)
+ goto err_vendor_init;
+
/*
* Common KVM initialization _must_ come last, after this, /dev/kvm is
* exposed to userspace!
@@ -207,9 +203,10 @@ static int __init vt_init(void)
return 0;
err_kvm_init:
+ kvm_x86_vendor_exit();
+err_vendor_init:
vmx_exit();
err_vmx_init:
- kvm_x86_vendor_exit();
return r;
}
module_init(vt_init);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 8efb956591d5..3f4dad3acb13 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -477,7 +477,7 @@ DEFINE_PER_CPU(struct vmcs *, current_vmcs);
* We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
* when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
*/
-DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
+static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
static DEFINE_SPINLOCK(vmx_vpid_lock);
@@ -8528,8 +8528,10 @@ int __init vmx_init(void)
if (r)
return r;
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
pi_init_cpu(cpu);
+ }
cpu_emergency_register_virt_callback(vmx_emergency_disable);
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index b936388853ab..bca2d27b3dfd 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -14,8 +14,6 @@ static inline __init void hv_init_evmcs(void) {}
static inline void hv_reset_evmcs(void) {}
#endif /* IS_ENABLED(CONFIG_HYPERV) */
-DECLARE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
-
bool kvm_is_vmx_supported(void);
int __init vmx_init(void);
void vmx_exit(void);