On Wed, Jun 30, 2010 at 12:25:16PM +0800, Sheng Yang wrote: > It would buy us the ability to schedule compared to smp_call_function(). > > Signed-off-by: Sheng Yang <sheng@xxxxxxxxxxxxxxx> > --- > > But I am not sure if it worth the complexity. Anyway WBINVD itself can't be > interrupted, so the benefit should come to the caller cpu I think. And that > would extended the waiting time for the caller cpu, for we still need to wait > other cpus to complete their works. Alright, can be done later in case it shows to be necessary. > arch/x86/include/asm/kvm_host.h | 1 + > arch/x86/kvm/x86.c | 35 ++++++++++++++++++++++++++++------- > 2 files changed, 29 insertions(+), 7 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 2bda624..6e0b793 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -16,6 +16,7 @@ > #include <linux/mmu_notifier.h> > #include <linux/tracepoint.h> > #include <linux/cpumask.h> > +#include <linux/workqueue.h> > > #include <linux/kvm.h> > #include <linux/kvm_para.h> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index eea75f5..13f7c88 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -153,6 +153,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { > > u64 __read_mostly host_xcr0; > > +static struct workqueue_struct *wbinvd_wq; > +static DEFINE_PER_CPU(struct work_struct, wbinvd_work); > + > static inline u32 bit(int bitno) > { > return 1 << (bitno & 31); > @@ -1783,7 +1786,7 @@ out: > return r; > } > > -static void wbinvd_ipi(void *garbage) > +static void wbinvd_do_work(struct work_struct *work) > { > wbinvd(); > } > @@ -1800,9 +1803,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) > if (need_emulate_wbinvd(vcpu)) { > if (kvm_x86_ops->has_wbinvd_exit()) > cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); > - else if (vcpu->cpu != -1 && vcpu->cpu != cpu) > - smp_call_function_single(vcpu->cpu, > - wbinvd_ipi, NULL, 1); > + else if (vcpu->cpu != -1 && vcpu->cpu != cpu) { > + queue_work_on(vcpu->cpu, wbinvd_wq, > + &per_cpu(wbinvd_work, vcpu->cpu)); > + flush_workqueue(wbinvd_wq); > + } Can't schedule here since preemption must be disabled during vcpu_load. > } > > kvm_x86_ops->vcpu_load(vcpu, cpu); > @@ -3672,12 +3677,16 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) > > int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) > { > + int cpu; > + > if (!need_emulate_wbinvd(vcpu)) > return X86EMUL_CONTINUE; > > if (kvm_x86_ops->has_wbinvd_exit()) { > - smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, > - wbinvd_ipi, NULL, 1); > + for_each_cpu(cpu, vcpu->arch.wbinvd_dirty_mask) > + queue_work_on(cpu, wbinvd_wq, > + &per_cpu(wbinvd_work, cpu)); > + flush_workqueue(wbinvd_wq); > cpumask_clear(vcpu->arch.wbinvd_dirty_mask); > } > wbinvd(); -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html