Hi Marc, On 3/4/20 9:33 PM, Marc Zyngier wrote: > Most of the GICv3 emulation code that deals with SGIs now has to be > aware of the v4.1 capabilities in order to benefit from it. > > Add such support, keyed on the interrupt having the hw flag set and > being a SGI. > > Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> > --- > virt/kvm/arm/vgic/vgic-mmio-v3.c | 15 +++++- > virt/kvm/arm/vgic/vgic-mmio.c | 88 ++++++++++++++++++++++++++++++-- > 2 files changed, 96 insertions(+), 7 deletions(-) > > diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c > index ebc218840fc2..de89da76a379 100644 > --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c > +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c > @@ -6,6 +6,7 @@ > #include <linux/irqchip/arm-gic-v3.h> > #include <linux/kvm.h> > #include <linux/kvm_host.h> > +#include <linux/interrupt.h> > #include <kvm/iodev.h> > #include <kvm/arm_vgic.h> > > @@ -942,8 +943,18 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) > * generate interrupts of either group. > */ > if (!irq->group || allow_group1) { > - irq->pending_latch = true; > - vgic_queue_irq_unlock(vcpu->kvm, irq, flags); > + if (!irq->hw) { > + irq->pending_latch = true; > + vgic_queue_irq_unlock(vcpu->kvm, irq, flags); > + } else { > + /* HW SGI? Ask the GIC to inject it */ > + int err; nit: add line > + err = irq_set_irqchip_state(irq->host_irq, > + IRQCHIP_STATE_PENDING, > + true); > + WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); > + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); > + } > } else { > raw_spin_unlock_irqrestore(&irq->irq_lock, flags); > } > diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c > index 97fb2a40e6ba..2199302597fa 100644 > --- a/virt/kvm/arm/vgic/vgic-mmio.c > +++ b/virt/kvm/arm/vgic/vgic-mmio.c > @@ -5,6 +5,8 @@ > > #include <linux/bitops.h> > #include <linux/bsearch.h> > +#include <linux/interrupt.h> > +#include <linux/irq.h> > #include <linux/kvm.h> > #include <linux/kvm_host.h> > #include <kvm/iodev.h> > @@ -59,6 +61,11 @@ unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu, > return value; > } > > +static void vgic_update_vsgi(struct vgic_irq *irq) > +{ > + WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group)); > +} > + > void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr, > unsigned int len, unsigned long val) > { > @@ -71,7 +78,12 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr, > > raw_spin_lock_irqsave(&irq->irq_lock, flags); > irq->group = !!(val & BIT(i)); > - vgic_queue_irq_unlock(vcpu->kvm, irq, flags); > + if (irq->hw && vgic_irq_is_sgi(irq->intid)) { > + vgic_update_vsgi(irq); > + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); > + } else { > + vgic_queue_irq_unlock(vcpu->kvm, irq, flags); > + } > > vgic_put_irq(vcpu->kvm, irq); > } > @@ -113,7 +125,21 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, > struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); > > raw_spin_lock_irqsave(&irq->irq_lock, flags); > - if (vgic_irq_is_mapped_level(irq)) { > + if (irq->hw && vgic_irq_is_sgi(irq->intid)) { > + if (!irq->enabled) { > + struct irq_data *data; > + > + irq->enabled = true; > + data = &irq_to_desc(irq->host_irq)->irq_data; > + while (irqd_irq_disabled(data)) > + enable_irq(irq->host_irq); could you explain me why the while() is requested? > + } > + > + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); > + vgic_put_irq(vcpu->kvm, irq); > + > + continue; > + } else if (vgic_irq_is_mapped_level(irq)) { > bool was_high = irq->line_level; > > /* > @@ -148,6 +174,8 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, > struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); > > raw_spin_lock_irqsave(&irq->irq_lock, flags); > + if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled) > + disable_irq_nosync(irq->host_irq); > > irq->enabled = false; > > @@ -167,10 +195,22 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, > for (i = 0; i < len * 8; i++) { > struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); > unsigned long flags; > + bool val; > > raw_spin_lock_irqsave(&irq->irq_lock, flags); > - if (irq_is_pending(irq)) > - value |= (1U << i); > + if (irq->hw && vgic_irq_is_sgi(irq->intid)) { > + int err; > + > + val = false; > + err = irq_get_irqchip_state(irq->host_irq, > + IRQCHIP_STATE_PENDING, > + &val); > + WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); > + } else { > + val = irq_is_pending(irq); > + } > + > + value |= ((u32)val << i); > raw_spin_unlock_irqrestore(&irq->irq_lock, flags); > > vgic_put_irq(vcpu->kvm, irq); > @@ -215,6 +255,21 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, > } > > raw_spin_lock_irqsave(&irq->irq_lock, flags); > + > + if (irq->hw && vgic_irq_is_sgi(irq->intid)) { > + /* HW SGI? Ask the GIC to inject it */ > + int err; nit: extra line > + err = irq_set_irqchip_state(irq->host_irq, > + IRQCHIP_STATE_PENDING, > + true); > + WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); > + > + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); > + vgic_put_irq(vcpu->kvm, irq); > + > + continue; > + } > + > if (irq->hw) > vgic_hw_irq_spending(vcpu, irq, is_uaccess); > else > @@ -269,6 +324,20 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, > > raw_spin_lock_irqsave(&irq->irq_lock, flags); > > + if (irq->hw && vgic_irq_is_sgi(irq->intid)) { > + /* HW SGI? Ask the GIC to clear its pending bit */ > + int err; > + err = irq_set_irqchip_state(irq->host_irq, > + IRQCHIP_STATE_PENDING, > + false); > + WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); > + > + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); > + vgic_put_irq(vcpu->kvm, irq); > + > + continue; > + } > + > if (irq->hw) > vgic_hw_irq_cpending(vcpu, irq, is_uaccess); > else > @@ -318,8 +387,15 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, > > raw_spin_lock_irqsave(&irq->irq_lock, flags); > > - if (irq->hw) { > + if (irq->hw && !vgic_irq_is_sgi(irq->intid)) { > vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); > + } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) { > + /* > + * GICv4.1 VSGI feature doesn't track an active state, > + * so let's not kid ourselves, there is nothing we can > + * do here. > + */ > + irq->active = false; > } else { > u32 model = vcpu->kvm->arch.vgic.vgic_model; > u8 active_source; > @@ -493,6 +569,8 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, > raw_spin_lock_irqsave(&irq->irq_lock, flags); > /* Narrow the priority range to what we actually support */ > irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); > + if (irq->hw && vgic_irq_is_sgi(irq->intid)) > + vgic_update_vsgi(irq); > raw_spin_unlock_irqrestore(&irq->irq_lock, flags); > > vgic_put_irq(vcpu->kvm, irq); > Reviewed-by: Eric Auger <eric.auger@xxxxxxxxxx> Thanks Eric _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm