On 11/18/21 12:08, Lai Jiangshan wrote:
From: Lai Jiangshan <laijs@xxxxxxxxxxxxxxxxx>
And they use FSGSBASE instructions when enabled.
Cc: x86@xxxxxxxxxx
Signed-off-by: Lai Jiangshan <laijs@xxxxxxxxxxxxxxxxx>
This needs ACK from x86 maintainers.
I queued 2-4 and 6-14.
Paolo
---
arch/x86/include/asm/kvm_host.h | 10 ----------
arch/x86/kernel/process_64.c | 2 ++
arch/x86/kvm/vmx/vmx.c | 14 +++++++-------
3 files changed, 9 insertions(+), 17 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1fcb345bc107..4cbb402f5636 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1808,16 +1808,6 @@ static inline void kvm_load_ldt(u16 sel)
asm("lldt %0" : : "rm"(sel));
}
-#ifdef CONFIG_X86_64
-static inline unsigned long read_msr(unsigned long msr)
-{
- u64 value;
-
- rdmsrl(msr, value);
- return value;
-}
-#endif
-
static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
{
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 3402edec236c..296bd5c2e38b 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -443,6 +443,7 @@ unsigned long x86_gsbase_read_cpu_inactive(void)
return gsbase;
}
+EXPORT_SYMBOL_GPL(x86_gsbase_read_cpu_inactive);
void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
{
@@ -456,6 +457,7 @@ void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
}
}
+EXPORT_SYMBOL_GPL(x86_gsbase_write_cpu_inactive);
unsigned long x86_fsbase_read_task(struct task_struct *task)
{
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 3127c66a1651..48a34d1a2989 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1156,11 +1156,11 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
} else {
savesegment(fs, fs_sel);
savesegment(gs, gs_sel);
- fs_base = read_msr(MSR_FS_BASE);
- vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
+ fs_base = x86_fsbase_read_cpu();
+ vmx->msr_host_kernel_gs_base = x86_gsbase_read_cpu_inactive();
}
- wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ x86_gsbase_write_cpu_inactive(vmx->msr_guest_kernel_gs_base);
#else
savesegment(fs, fs_sel);
savesegment(gs, gs_sel);
@@ -1184,7 +1184,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
++vmx->vcpu.stat.host_state_reload;
#ifdef CONFIG_X86_64
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ vmx->msr_guest_kernel_gs_base = x86_gsbase_read_cpu_inactive();
#endif
if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
kvm_load_ldt(host_state->ldt_sel);
@@ -1204,7 +1204,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
#endif
invalidate_tss_limit();
#ifdef CONFIG_X86_64
- wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+ x86_gsbase_write_cpu_inactive(vmx->msr_host_kernel_gs_base);
#endif
load_fixmap_gdt(raw_smp_processor_id());
vmx->guest_state_loaded = false;
@@ -1216,7 +1216,7 @@ static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
{
preempt_disable();
if (vmx->guest_state_loaded)
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ vmx->msr_guest_kernel_gs_base = x86_gsbase_read_cpu_inactive();
preempt_enable();
return vmx->msr_guest_kernel_gs_base;
}
@@ -1225,7 +1225,7 @@ static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
{
preempt_disable();
if (vmx->guest_state_loaded)
- wrmsrl(MSR_KERNEL_GS_BASE, data);
+ x86_gsbase_write_cpu_inactive(data);
preempt_enable();
vmx->msr_guest_kernel_gs_base = data;
}