On 17/04/2020 09:33, Marc Zyngier wrote: > When a guest tries to read the active state of its interrupts, > we currently just return whatever state we have in memory. This > means that if such an interrupt lives in a List Register on another > CPU, we fail to obsertve the latest active state for this interrupt. > > In order to remedy this, stop all the other vcpus so that they exit > and we can observe the most recent value for the state. This is > similar to what we are doing for the write side of the same > registers, and results in new MMIO handlers for userspace (which > do not need to stop the guest, as it is supposed to be stopped > already). Thanks for the changes! Checked for other users of VGIC_NR_PRIVATE_IRQS, also for not missing other ACTIVE bit register handlers. Looks good to me! > > Reported-by: Julien Grall <julien@xxxxxxx> > Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> Reviewed-by: Andre Przywara <andre.przywara@xxxxxxx> Cheers, Andre > --- > virt/kvm/arm/vgic/vgic-mmio-v2.c | 4 +- > virt/kvm/arm/vgic/vgic-mmio-v3.c | 12 ++-- > virt/kvm/arm/vgic/vgic-mmio.c | 100 ++++++++++++++++++++----------- > virt/kvm/arm/vgic/vgic-mmio.h | 3 + > 4 files changed, 75 insertions(+), 44 deletions(-) > > diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c > index 5945f062d749..d63881f60e1a 100644 > --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c > +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c > @@ -422,11 +422,11 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { > VGIC_ACCESS_32bit), > REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, > vgic_mmio_read_active, vgic_mmio_write_sactive, > - NULL, vgic_mmio_uaccess_write_sactive, 1, > + vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1, > VGIC_ACCESS_32bit), > REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR, > vgic_mmio_read_active, vgic_mmio_write_cactive, > - NULL, vgic_mmio_uaccess_write_cactive, 1, > + vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1, > VGIC_ACCESS_32bit), > REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI, > vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL, > diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c > index e72dcc454247..f2b37a081f26 100644 > --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c > +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c > @@ -553,11 +553,11 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = { > VGIC_ACCESS_32bit), > REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER, > vgic_mmio_read_active, vgic_mmio_write_sactive, > - NULL, vgic_mmio_uaccess_write_sactive, 1, > + vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1, > VGIC_ACCESS_32bit), > REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER, > vgic_mmio_read_active, vgic_mmio_write_cactive, > - NULL, vgic_mmio_uaccess_write_cactive, > + vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, > 1, VGIC_ACCESS_32bit), > REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR, > vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL, > @@ -625,12 +625,12 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = { > VGIC_ACCESS_32bit), > REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0, > vgic_mmio_read_active, vgic_mmio_write_sactive, > - NULL, vgic_mmio_uaccess_write_sactive, > - 4, VGIC_ACCESS_32bit), > + vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4, > + VGIC_ACCESS_32bit), > REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0, > vgic_mmio_read_active, vgic_mmio_write_cactive, > - NULL, vgic_mmio_uaccess_write_cactive, > - 4, VGIC_ACCESS_32bit), > + vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4, > + VGIC_ACCESS_32bit), > REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0, > vgic_mmio_read_priority, vgic_mmio_write_priority, 32, > VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), > diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c > index d085e047953f..b38e94e8f74a 100644 > --- a/virt/kvm/arm/vgic/vgic-mmio.c > +++ b/virt/kvm/arm/vgic/vgic-mmio.c > @@ -348,8 +348,39 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, > } > } > > -unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, > - gpa_t addr, unsigned int len) > + > +/* > + * If we are fiddling with an IRQ's active state, we have to make sure the IRQ > + * is not queued on some running VCPU's LRs, because then the change to the > + * active state can be overwritten when the VCPU's state is synced coming back > + * from the guest. > + * > + * For shared interrupts as well as GICv3 private interrupts, we have to > + * stop all the VCPUs because interrupts can be migrated while we don't hold > + * the IRQ locks and we don't want to be chasing moving targets. > + * > + * For GICv2 private interrupts we don't have to do anything because > + * userspace accesses to the VGIC state already require all VCPUs to be > + * stopped, and only the VCPU itself can modify its private interrupts > + * active state, which guarantees that the VCPU is not running. > + */ > +static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid) > +{ > + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || > + intid >= VGIC_NR_PRIVATE_IRQS) > + kvm_arm_halt_guest(vcpu->kvm); > +} > + > +/* See vgic_access_active_prepare */ > +static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid) > +{ > + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || > + intid >= VGIC_NR_PRIVATE_IRQS) > + kvm_arm_resume_guest(vcpu->kvm); > +} > + > +static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu, > + gpa_t addr, unsigned int len) > { > u32 intid = VGIC_ADDR_TO_INTID(addr, 1); > u32 value = 0; > @@ -359,6 +390,10 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, > for (i = 0; i < len * 8; i++) { > struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); > > + /* > + * Even for HW interrupts, don't evaluate the HW state as > + * all the guest is interested in is the virtual state. > + */ > if (irq->active) > value |= (1U << i); > > @@ -368,6 +403,29 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, > return value; > } > > +unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, > + gpa_t addr, unsigned int len) > +{ > + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); > + u32 val; > + > + mutex_lock(&vcpu->kvm->lock); > + vgic_access_active_prepare(vcpu, intid); > + > + val = __vgic_mmio_read_active(vcpu, addr, len); > + > + vgic_access_active_finish(vcpu, intid); > + mutex_unlock(&vcpu->kvm->lock); > + > + return val; > +} > + > +unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu, > + gpa_t addr, unsigned int len) > +{ > + return __vgic_mmio_read_active(vcpu, addr, len); > +} > + > /* Must be called with irq->irq_lock held */ > static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, > bool active, bool is_uaccess) > @@ -426,36 +484,6 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, > raw_spin_unlock_irqrestore(&irq->irq_lock, flags); > } > > -/* > - * If we are fiddling with an IRQ's active state, we have to make sure the IRQ > - * is not queued on some running VCPU's LRs, because then the change to the > - * active state can be overwritten when the VCPU's state is synced coming back > - * from the guest. > - * > - * For shared interrupts, we have to stop all the VCPUs because interrupts can > - * be migrated while we don't hold the IRQ locks and we don't want to be > - * chasing moving targets. > - * > - * For private interrupts we don't have to do anything because userspace > - * accesses to the VGIC state already require all VCPUs to be stopped, and > - * only the VCPU itself can modify its private interrupts active state, which > - * guarantees that the VCPU is not running. > - */ > -static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) > -{ > - if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || > - intid >= VGIC_NR_PRIVATE_IRQS) > - kvm_arm_halt_guest(vcpu->kvm); > -} > - > -/* See vgic_change_active_prepare */ > -static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) > -{ > - if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || > - intid >= VGIC_NR_PRIVATE_IRQS) > - kvm_arm_resume_guest(vcpu->kvm); > -} > - > static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, > gpa_t addr, unsigned int len, > unsigned long val) > @@ -477,11 +505,11 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, > u32 intid = VGIC_ADDR_TO_INTID(addr, 1); > > mutex_lock(&vcpu->kvm->lock); > - vgic_change_active_prepare(vcpu, intid); > + vgic_access_active_prepare(vcpu, intid); > > __vgic_mmio_write_cactive(vcpu, addr, len, val); > > - vgic_change_active_finish(vcpu, intid); > + vgic_access_active_finish(vcpu, intid); > mutex_unlock(&vcpu->kvm->lock); > } > > @@ -514,11 +542,11 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, > u32 intid = VGIC_ADDR_TO_INTID(addr, 1); > > mutex_lock(&vcpu->kvm->lock); > - vgic_change_active_prepare(vcpu, intid); > + vgic_access_active_prepare(vcpu, intid); > > __vgic_mmio_write_sactive(vcpu, addr, len, val); > > - vgic_change_active_finish(vcpu, intid); > + vgic_access_active_finish(vcpu, intid); > mutex_unlock(&vcpu->kvm->lock); > } > > diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h > index 5af2aefad435..30713a44e3fa 100644 > --- a/virt/kvm/arm/vgic/vgic-mmio.h > +++ b/virt/kvm/arm/vgic/vgic-mmio.h > @@ -152,6 +152,9 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, > unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, > gpa_t addr, unsigned int len); > > +unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu, > + gpa_t addr, unsigned int len); > + > void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, > gpa_t addr, unsigned int len, > unsigned long val); > _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm