The current CPU's TSS base is a foregone conclusion, so there's no need to parse it out of the segment tables. This should save a couple cycles (as STR is surely microcoded and poorly optimized) but, more importantly, it's a cleanup and it means that segment_base() will never be called on 64-bit kernels. Cc: Thomas Garnier <thgarnie@xxxxxxxxxx> Cc: Jim Mattson <jmattson@xxxxxxxxxx> Cc: Radim Krčmář <rkrcmar@xxxxxxxxxx> Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx> Signed-off-by: Andy Lutomirski <luto@xxxxxxxxxx> --- arch/x86/kvm/vmx.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a236decb81e4..46420aaf1684 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2088,13 +2088,6 @@ static unsigned long segment_base(u16 selector) return v; } -static inline unsigned long kvm_read_tr_base(void) -{ - u16 tr; - asm("str %0" : "=g"(tr)); - return segment_base(tr); -} - static void vmx_save_host_state(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -2294,10 +2287,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) /* * Linux uses per-cpu TSS and GDT, so set these when switching - * processors. + * processors. See 22.2.4. */ - vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ - vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */ + vmcs_writel(HOST_TR_BASE, + (unsigned long)this_cpu_ptr(&cpu_tss)); + vmcs_writel(HOST_GDTR_BASE, gdt->address); rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ -- 2.9.3