Plug the interrupt injection code. Interrupts can now be generated from user space. Signed-off-by: Marc Zyngier <marc.zyngier@xxxxxxx> --- arch/arm/include/asm/kvm_vgic.h | 7 +++++ arch/arm/kvm/arm.c | 35 +++++++++++++++++++++++- arch/arm/kvm/vgic.c | 59 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 100 insertions(+), 1 deletion(-) diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h index d682ee7..a420ff8 100644 --- a/arch/arm/include/asm/kvm_vgic.h +++ b/arch/arm/include/asm/kvm_vgic.h @@ -238,6 +238,7 @@ struct kvm_exit_mmio; #ifdef CONFIG_KVM_ARM_VGIC void kvm_vgic_sync_to_cpu(struct kvm_vcpu *vcpu); void kvm_vgic_sync_from_cpu(struct kvm_vcpu *vcpu); +int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, const struct kvm_irq_level *irq); int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_exit_mmio *mmio); @@ -258,6 +259,12 @@ static inline void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) {} static inline void kvm_vgic_sync_to_cpu(struct kvm_vcpu *vcpu) {} static inline void kvm_vgic_sync_from_cpu(struct kvm_vcpu *vcpu) {} +static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, + const struct kvm_irq_level *irq) +{ + return 0; +} + static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) { return 0; diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index f0fa87b..4891105 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -691,6 +691,12 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level) bool set; int bit_index; + if (irqchip_in_kernel(kvm)) { + if (irq_level->irq < 32) + return -EINVAL; + return kvm_vgic_inject_irq(kvm, 0, irq_level); + } + vcpu_idx = irq_level->irq >> 1; if (vcpu_idx >= KVM_MAX_VCPUS) return -EINVAL; @@ -773,6 +779,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp, return -EFAULT; return kvm_arm_set_msrs(vcpu, umsrs->entries, msrs.nmsrs); } +#ifdef CONFIG_KVM_ARM_VGIC + case KVM_IRQ_LINE: { + struct kvm_irq_level irq_event; + + if (copy_from_user(&irq_event, argp, sizeof irq_event)) + return -EFAULT; + + if (!irqchip_in_kernel(vcpu->kvm)) + return -EINVAL; + + if (irq_event.irq < 16 || irq_event.irq >= 32) + return -EINVAL; + return kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, &irq_event); + } +#endif default: return -EINVAL; } @@ -786,7 +807,19 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { - return -EINVAL; + struct kvm *kvm = filp->private_data; + + switch (ioctl) { +#ifdef CONFIG_KVM_ARM_VGIC + case KVM_CREATE_IRQCHIP: + if (vgic_present) + return kvm_vgic_init(kvm); + else + return -EINVAL; +#endif + default: + return -EINVAL; + } } static void cpu_set_vector(void *vector) diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index 3ed4d20..06c0e12 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c @@ -69,6 +69,7 @@ #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) static void vgic_update_state(struct kvm *kvm); +static void vgic_kick_vcpus(struct kvm *kvm); static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); /** @@ -534,6 +535,9 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_exi kvm_handle_mmio_return(vcpu, run); spin_unlock(&vcpu->kvm->arch.vgic.lock); + if (updated_state) + vgic_kick_vcpus(vcpu->kvm); + return true; } @@ -828,3 +832,58 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); } + +static void vgic_kick_vcpus(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + int c; + + /* + * We've injected an interrupt, time to find out who deserves + * a good kick... + */ + kvm_for_each_vcpu(c, vcpu, kvm) { + if (kvm_vgic_vcpu_pending_irq(vcpu)) + kvm_vcpu_kick(vcpu); + } +} + +int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, const struct kvm_irq_level *irq) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + int nrcpus = atomic_read(&kvm->online_vcpus); + int is_edge, state; + unsigned long flags; + bool updated_state = false; + + if (cpuid >= nrcpus) + return -EINVAL; + + /* Only PPIs or SPIs */ + if (irq->irq >= VGIC_NR_IRQS || irq->irq < 16) + return -EINVAL; + + kvm_debug("Inject IRQ%d\n", irq->irq); + spin_lock_irqsave(&dist->lock, flags); + is_edge = vgic_irq_is_edge(dist, irq->irq); + state = vgic_bitmap_get_irq_val(&dist->irq_state, cpuid, irq->irq); + + /* + * Inject an interrupt if: + * - level triggered and we change level + * - edge triggered and we have a rising edge + */ + if ((!is_edge && (state ^ !!irq->level)) || + (is_edge && !state && irq->level)) { + vgic_bitmap_set_irq_val(&dist->irq_state, cpuid, + irq->irq, !!irq->level); + vgic_update_state(kvm); + updated_state = true; + } + spin_unlock_irqrestore(&dist->lock, flags); + + if (updated_state) + vgic_kick_vcpus(kvm); + + return 0; +} -- 1.7.11.4 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm