On 20.03.15 10:39, Paul Mackerras wrote: > This uses msgsnd where possible for signalling other threads within > the same core on POWER8 systems, rather than IPIs through the XICS > interrupt controller. This includes waking secondary threads to run > the guest, the interrupts generated by the virtual XICS, and the > interrupts to bring the other threads out of the guest when exiting. > > Signed-off-by: Paul Mackerras <paulus@xxxxxxxxx> > --- > arch/powerpc/kernel/asm-offsets.c | 4 +++ > arch/powerpc/kvm/book3s_hv.c | 48 ++++++++++++++++++++++----------- > arch/powerpc/kvm/book3s_hv_rm_xics.c | 11 ++++++++ > arch/powerpc/kvm/book3s_hv_rmhandlers.S | 41 ++++++++++++++++++++++++---- > 4 files changed, 83 insertions(+), 21 deletions(-) > > diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c > index fa7b57d..0ce2aa6 100644 > --- a/arch/powerpc/kernel/asm-offsets.c > +++ b/arch/powerpc/kernel/asm-offsets.c > @@ -37,6 +37,7 @@ > #include <asm/thread_info.h> > #include <asm/rtas.h> > #include <asm/vdso_datapage.h> > +#include <asm/dbell.h> > #ifdef CONFIG_PPC64 > #include <asm/paca.h> > #include <asm/lppaca.h> > @@ -568,6 +569,7 @@ int main(void) > DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); > DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); > DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes)); > + DEFINE(VCORE_PCPU, offsetof(struct kvmppc_vcore, pcpu)); > DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); > DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); > DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); > @@ -757,5 +759,7 @@ int main(void) > offsetof(struct paca_struct, subcore_sibling_mask)); > #endif > > + DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER); > + > return 0; > } > diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c > index 03a8bb4..2c34bae 100644 > --- a/arch/powerpc/kvm/book3s_hv.c > +++ b/arch/powerpc/kvm/book3s_hv.c > @@ -51,6 +51,7 @@ > #include <asm/hvcall.h> > #include <asm/switch_to.h> > #include <asm/smp.h> > +#include <asm/dbell.h> > #include <linux/gfp.h> > #include <linux/vmalloc.h> > #include <linux/highmem.h> > @@ -84,9 +85,34 @@ static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); > static void kvmppc_end_cede(struct kvm_vcpu *vcpu); > static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); > > +static bool kvmppc_ipi_thread(int cpu) > +{ > + /* On POWER8 for IPIs to threads in the same core, use msgsnd */ > + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { > + preempt_disable(); > + if ((cpu & ~7) == (smp_processor_id() & ~7)) { > + unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); > + msg |= cpu & 7; > + smp_mb(); > + __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); > + preempt_enable(); > + return true; > + } > + preempt_enable(); > + } > + > +#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) > + if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) { > + xics_wake_cpu(cpu); > + return true; > + } > +#endif > + > + return false; > +} > + > static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) > { > - int me; > int cpu = vcpu->cpu; > wait_queue_head_t *wqp; > > @@ -96,20 +122,12 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) > ++vcpu->stat.halt_wakeup; > } > > - me = get_cpu(); > + if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid)) > + return; > > /* CPU points to the first thread of the core */ > - if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { > -#ifdef CONFIG_PPC_ICP_NATIVE > - int real_cpu = cpu + vcpu->arch.ptid; > - if (paca[real_cpu].kvm_hstate.xics_phys) > - xics_wake_cpu(real_cpu); > - else > -#endif > - if (cpu_online(cpu)) > - smp_send_reschedule(cpu); > - } > - put_cpu(); > + if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu)) > + smp_send_reschedule(cpu); > } > > /* > @@ -1754,10 +1772,8 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) > /* Order stores to hstate.kvm_vcore etc. before store to kvm_vcpu */ > smp_wmb(); > tpaca->kvm_hstate.kvm_vcpu = vcpu; > -#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) > if (cpu != smp_processor_id()) > - xics_wake_cpu(cpu); > -#endif > + kvmppc_ipi_thread(cpu); > } > > static void kvmppc_wait_for_nap(void) > diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c > index 6dded8c..457a8b1 100644 > --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c > +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c > @@ -18,6 +18,7 @@ > #include <asm/debug.h> > #include <asm/synch.h> > #include <asm/ppc-opcode.h> > +#include <asm/dbell.h> > > #include "book3s_xics.h" > > @@ -83,6 +84,16 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, > /* In SMT cpu will always point to thread 0, we adjust it */ > cpu += vcpu->arch.ptid; > > + /* On POWER8 for IPIs to threads in the same core, use msgsnd */ > + if (cpu_has_feature(CPU_FTR_ARCH_207S) && > + (cpu & ~7) == (raw_smp_processor_id() & ~7)) { Can we somehow encapsulate the secret knowledge that 8 threads mean one core? Alex -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html