Now that all the irq routing and irqfd pieces are generic, we can expose real irqchip support to all of KVM's internal helpers. This allows us to use irqfd with the in-kernel MPIC. Signed-off-by: Alexander Graf <agraf@xxxxxxx> --- v2 -> v3: - make mpic pointer type safe - add wmb before setting global mpic variable - make eoi notification happen unlockedly - add IRQ routing documentation - announce mpic availability after its creation --- Documentation/virtual/kvm/devices/mpic.txt | 11 +++ arch/powerpc/include/asm/kvm_host.h | 7 ++ arch/powerpc/include/uapi/asm/kvm.h | 1 + arch/powerpc/kvm/Kconfig | 3 + arch/powerpc/kvm/Makefile | 1 + arch/powerpc/kvm/irq.h | 17 ++++ arch/powerpc/kvm/mpic.c | 113 ++++++++++++++++++++++++++++ 7 files changed, 153 insertions(+), 0 deletions(-) create mode 100644 arch/powerpc/kvm/irq.h diff --git a/Documentation/virtual/kvm/devices/mpic.txt b/Documentation/virtual/kvm/devices/mpic.txt index ce98e32..dadc1e0 100644 --- a/Documentation/virtual/kvm/devices/mpic.txt +++ b/Documentation/virtual/kvm/devices/mpic.txt @@ -35,3 +35,14 @@ Groups: "attr" is the IRQ number. IRQ numbers for standard sources are the byte offset of the relevant IVPR from EIVPR0, divided by 32. + +IRQ Routing: + + The MPIC emulation supports IRQ routing. Only a single MPIC device can + be instantiated. Once that device has been created, it's available as + irqchip id 0. + + This irqchip 0 has 256 interrupt pins. These pins reflect the SRC pins + on the MPIC controller. + + Access to on-SRC registers is not implemented through IRQ routing mechanisms. diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 36368c9..80f2004 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -44,6 +44,10 @@ #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #endif +/* These values are internal and can be increased later */ +#define KVM_NR_IRQCHIPS 1 +#define KVM_IRQCHIP_NUM_PINS 256 + #if !defined(CONFIG_KVM_440) #include <linux/mmu_notifier.h> @@ -256,6 +260,9 @@ struct kvm_arch { #ifdef CONFIG_PPC_BOOK3S_64 struct list_head spapr_tce_tables; #endif +#ifdef CONFIG_KVM_MPIC + struct openpic *mpic; +#endif }; /* diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 36be2fe..3537bf3 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -25,6 +25,7 @@ /* Select powerpc specific features in <linux/kvm.h> */ #define __KVM_HAVE_SPAPR_TCE #define __KVM_HAVE_PPC_SMT +#define __KVM_HAVE_IRQCHIP struct kvm_regs { __u64 pc; diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 938a729..a608570 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -154,6 +154,9 @@ config KVM_E500MC config KVM_MPIC bool "KVM in-kernel MPIC emulation" depends on KVM + select HAVE_KVM_IRQCHIP + select HAVE_KVM_IRQ_ROUTING + select HAVE_KVM_MSI help Enable support for emulating MPIC devices inside the host kernel, rather than relying on userspace to emulate. diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 4a2277a..4eada0c 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -104,6 +104,7 @@ kvm-book3s_32-objs := \ kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs) kvm-objs-$(CONFIG_KVM_MPIC) += mpic.o +kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(addprefix ../../../virt/kvm/, irqchip.o) kvm-objs := $(kvm-objs-m) $(kvm-objs-y) diff --git a/arch/powerpc/kvm/irq.h b/arch/powerpc/kvm/irq.h new file mode 100644 index 0000000..f1e27fd --- /dev/null +++ b/arch/powerpc/kvm/irq.h @@ -0,0 +1,17 @@ +#ifndef __IRQ_H +#define __IRQ_H + +#include <linux/kvm_host.h> + +static inline int irqchip_in_kernel(struct kvm *kvm) +{ + int ret = 0; + +#ifdef CONFIG_KVM_MPIC + ret = ret || (kvm->arch.mpic != NULL); +#endif + smp_rmb(); + return ret; +} + +#endif diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c index 10bc08a..d137df8 100644 --- a/arch/powerpc/kvm/mpic.c +++ b/arch/powerpc/kvm/mpic.c @@ -1029,6 +1029,7 @@ static int openpic_cpu_write_internal(void *opaque, gpa_t addr, struct irq_source *src; struct irq_dest *dst; int s_IRQ, n_IRQ; + int notify_eoi = -1; pr_debug("%s: cpu %d addr %#llx <= 0x%08x\n", __func__, idx, addr, val); @@ -1087,6 +1088,8 @@ static int openpic_cpu_write_internal(void *opaque, gpa_t addr, } IRQ_resetbit(&dst->servicing, s_IRQ); + /* Notify listeners that the IRQ is over */ + notify_eoi = s_IRQ; /* Set up next servicing IRQ */ s_IRQ = IRQ_get_next(opp, &dst->servicing); /* Check queued interrupts. */ @@ -1104,6 +1107,12 @@ static int openpic_cpu_write_internal(void *opaque, gpa_t addr, break; } + if (notify_eoi != -1) { + spin_unlock_irq(&opp->lock); + kvm_notify_acked_irq(opp->kvm, 0, notify_eoi); + spin_lock_irq(&opp->lock); + } + return 0; } @@ -1639,14 +1648,42 @@ static void mpic_destroy(struct kvm_device *dev) unmap_mmio(opp); } + dev->kvm->arch.mpic = NULL; kfree(opp); } +static int mpic_set_default_irq_routing(struct openpic *opp) +{ + int i; + struct kvm_irq_routing_entry *routing; + + /* XXX be more dynamic if we ever want to support multiple MPIC chips */ + routing = kzalloc((sizeof(*routing) * opp->nb_irqs), GFP_KERNEL); + if (!routing) + return -ENOMEM; + + for (i = 0; i < opp->nb_irqs; i++) { + routing[i].gsi = i; + routing[i].type = KVM_IRQ_ROUTING_IRQCHIP; + routing[i].u.irqchip.irqchip = 0; + routing[i].u.irqchip.pin = i; + } + + kvm_set_irq_routing(opp->kvm, routing, opp->nb_irqs, 0); + + kfree(routing); + return 0; +} + static int mpic_create(struct kvm_device *dev, u32 type) { struct openpic *opp; int ret; + /* We only support one MPIC at a time for now */ + if (dev->kvm->arch.mpic) + return -EINVAL; + opp = kzalloc(sizeof(struct openpic), GFP_KERNEL); if (!opp) return -ENOMEM; @@ -1691,7 +1728,15 @@ static int mpic_create(struct kvm_device *dev, u32 type) goto err; } + ret = mpic_set_default_irq_routing(opp); + if (ret) + goto err; + openpic_reset(opp); + + smp_wmb(); + dev->kvm->arch.mpic = opp; + return 0; err: @@ -1761,3 +1806,71 @@ void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu) opp->dst[vcpu->arch.irq_cpu_id].vcpu = NULL; kvm_device_put(opp->dev); } + +/* + * Return value: + * < 0 Interrupt was ignored (masked or not delivered for other reasons) + * = 0 Interrupt was coalesced (previous irq is still pending) + * > 0 Number of CPUs interrupt was delivered to + */ +static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, + bool line_status) +{ + u32 irq = e->irqchip.pin; + struct openpic *opp = kvm->arch.mpic; + + spin_lock_irq(&opp->lock); + openpic_set_irq(opp, irq, level); + spin_unlock_irq(&opp->lock); + + /* All code paths we care about don't check for the return value */ + return 0; +} + +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, bool line_status) +{ + struct openpic *opp = kvm->arch.mpic; + spin_lock_irq(&opp->lock); + + /* + * XXX We ignore the target address for now, as we only support + * a single MSI bank. + */ + openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data); + spin_unlock_irq(&opp->lock); + + /* All code paths we care about don't check for the return value */ + return 0; +} + +int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, + struct kvm_kernel_irq_routing_entry *e, + const struct kvm_irq_routing_entry *ue) +{ + int r = -EINVAL; + + switch (ue->type) { + case KVM_IRQ_ROUTING_IRQCHIP: + e->set = mpic_set_irq; + e->irqchip.irqchip = ue->u.irqchip.irqchip; + e->irqchip.pin = ue->u.irqchip.pin; + if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) + goto out; + rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi; + break; + case KVM_IRQ_ROUTING_MSI: + e->set = kvm_set_msi; + e->msi.address_lo = ue->u.msi.address_lo; + e->msi.address_hi = ue->u.msi.address_hi; + e->msi.data = ue->u.msi.data; + break; + default: + goto out; + } + + r = 0; +out: + return r; +} -- 1.6.0.2 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html