2017-11-15 5:54 GMT+08:00 <riel@xxxxxxxxxx>: > From: Rik van Riel <riel@xxxxxxxxxx> > > Currently, every time a VCPU is scheduled out, the host kernel will > first save the guest FPU/xstate context, then load the qemu userspace > FPU context, only to then immediately save the qemu userspace FPU > context back to memory. When scheduling in a VCPU, the same extraneous > FPU loads and saves are done. > > This could be avoided by moving from a model where the guest FPU is > loaded and stored with preemption disabled, to a model where the > qemu userspace FPU is swapped out for the guest FPU context for > the duration of the KVM_RUN ioctl. What will happen if CONFIG_PREEMPT is enabled? Regards, Wanpeng Li > > This is done under the VCPU mutex, which is also taken when other > tasks inspect the VCPU FPU context, so the code should already be > safe for this change. That should come as no surprise, given that > s390 already has this optimization. > > No performance changes were detected in quick ping-pong tests on > my 4 socket system, which is expected since an FPU+xstate load is > on the order of 0.1us, while ping-ponging between CPUs is on the > order of 20us, and somewhat noisy. > > There may be other tests where performance changes are noticeable. > > Signed-off-by: Rik van Riel <riel@xxxxxxxxxx> > Suggested-by: Christian Borntraeger <borntraeger@xxxxxxxxxx> > --- > arch/x86/include/asm/kvm_host.h | 13 +++++++++++++ > arch/x86/kvm/x86.c | 34 +++++++++++++--------------------- > include/linux/kvm_host.h | 2 +- > 3 files changed, 27 insertions(+), 22 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 9d7d856b2d89..ffe54958491f 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -536,7 +536,20 @@ struct kvm_vcpu_arch { > struct kvm_mmu_memory_cache mmu_page_cache; > struct kvm_mmu_memory_cache mmu_page_header_cache; > > + /* > + * QEMU userspace and the guest each have their own FPU state. > + * In vcpu_run, we switch between the user and guest FPU contexts. > + * While running a VCPU, the VCPU thread will have the guest FPU > + * context. > + * > + * Note that while the PKRU state lives inside the fpu registers, > + * it is switched out separately at VMENTER and VMEXIT time. The > + * "guest_fpu" state here contains the guest FPU context, with the > + * host PRKU bits. > + */ > + struct fpu user_fpu; > struct fpu guest_fpu; > + > u64 xcr0; > u64 guest_supported_xcr0; > u32 guest_xstate_size; > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 03869eb7fcd6..aad5181ed4e9 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -2917,7 +2917,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) > srcu_read_unlock(&vcpu->kvm->srcu, idx); > pagefault_enable(); > kvm_x86_ops->vcpu_put(vcpu); > - kvm_put_guest_fpu(vcpu); > vcpu->arch.last_host_tsc = rdtsc(); > } > > @@ -5228,13 +5227,10 @@ static void emulator_halt(struct x86_emulate_ctxt *ctxt) > > static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) > { > - preempt_disable(); > - kvm_load_guest_fpu(emul_to_vcpu(ctxt)); > } > > static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt) > { > - preempt_enable(); > } > > static int emulator_intercept(struct x86_emulate_ctxt *ctxt, > @@ -6908,7 +6904,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) > preempt_disable(); > > kvm_x86_ops->prepare_guest_switch(vcpu); > - kvm_load_guest_fpu(vcpu); > > /* > * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt > @@ -7255,12 +7250,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) > } > } > > + kvm_load_guest_fpu(vcpu); > + > if (unlikely(vcpu->arch.complete_userspace_io)) { > int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; > vcpu->arch.complete_userspace_io = NULL; > r = cui(vcpu); > if (r <= 0) > - goto out; > + goto out_fpu; > } else > WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); > > @@ -7269,6 +7266,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) > else > r = vcpu_run(vcpu); > > +out_fpu: > + kvm_put_guest_fpu(vcpu); > out: > post_kvm_run_save(vcpu); > if (vcpu->sigset_active) > @@ -7663,32 +7662,25 @@ static void fx_init(struct kvm_vcpu *vcpu) > vcpu->arch.cr0 |= X86_CR0_ET; > } > > +/* Swap (qemu) user FPU context for the guest FPU context. */ > void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) > { > - if (vcpu->guest_fpu_loaded) > - return; > - > - /* > - * Restore all possible states in the guest, > - * and assume host would use all available bits. > - * Guest xcr0 would be loaded later. > - */ > - vcpu->guest_fpu_loaded = 1; > - __kernel_fpu_begin(); > + preempt_disable(); > + copy_fpregs_to_fpstate(&vcpu->arch.user_fpu); > /* PKRU is separately restored in kvm_x86_ops->run. */ > __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, > ~XFEATURE_MASK_PKRU); > + preempt_enable(); > trace_kvm_fpu(1); > } > > +/* When vcpu_run ends, restore user space FPU context. */ > void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) > { > - if (!vcpu->guest_fpu_loaded) > - return; > - > - vcpu->guest_fpu_loaded = 0; > + preempt_disable(); > copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); > - __kernel_fpu_end(); > + copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state); > + preempt_enable(); > ++vcpu->stat.fpu_reload; > trace_kvm_fpu(0); > } > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index 6882538eda32..354608487b8d 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -232,7 +232,7 @@ struct kvm_vcpu { > struct mutex mutex; > struct kvm_run *run; > > - int guest_fpu_loaded, guest_xcr0_loaded; > + int guest_xcr0_loaded; > struct swait_queue_head wq; > struct pid __rcu *pid; > int sigset_active; > -- > 2.9.4 >