Hi Julien,
Thanks for the heads up.
On 2020-04-06 14:16, Julien Grall wrote:
Hi,
Xen community is currently reviewing a new implementation for reading
I{S,C}ACTIVER registers (see [1]).
The implementation is based on vgic_mmio_read_active() in KVM, i.e the
active state of the interrupts is based on the vGIC state stored in
memory.
While reviewing the patch on xen-devel, I noticed a potential deadlock
at least with Xen implementation. I know that Xen vGIC and KVM vGIC
are quite different, so I looked at the implementation to see how this
is dealt.
With my limited knowledge of KVM, I wasn't able to rule it out. I am
curious to know if I missed anything.
vCPU A may read the active state of an interrupt routed to vCPU B.
When vCPU A is reading the state, it will read the state stored in
memory.
The only way the memory state can get synced with the HW state is when
vCPU B exit guest context.
AFAICT, vCPU B will not exit when deactivating HW mapped interrupts
and virtual edge interrupts. So vCPU B may run for an abritrary long
time before been exiting and syncing the memory state with the HW
state.
So while I agree that this is definitely not ideal, I don't think we
end-up
with a deadlock (or rather a livelock) either. That's because we are
guaranteed
to exit eventually if only because the kernel's own timer interrupt (or
any
other host interrupt routed to the same physical CPU) will fire and get
us
out of there. On its own, this is enough to allow the polling vcpu to
make
forward progress.
Now, it is obvious that we should improve on the current situation. I
just
hacked together a patch that provides the same guarantees as the one we
already have on the write side (kick all vcpus out of the guest,
snapshot
the state, kick everyone back in). I boot-tested it, so it is obviously
perfect
and won't eat your data at all! ;-)
Thanks,
M.
+
+/*
+ * If we are fiddling with an IRQ's active state, we have to make sure
the IRQ
+ * is not queued on some running VCPU's LRs, because then the change to
the
+ * active state can be overwritten when the VCPU's state is synced
coming back
+ * from the guest.
+ *
+ * For shared interrupts as well as GICv3 private interrupts, we have
to
+ * stop all the VCPUs because interrupts can be migrated while we don't
hold
+ * the IRQ locks and we don't want to be chasing moving targets.
+ *
+ * For GICv2 private interrupts we don't have to do anything because
+ * userspace accesses to the VGIC state already require all VCPUs to be
+ * stopped, and only the VCPU itself can modify its private interrupts
+ * active state, which guarantees that the VCPU is not running.
+ */
+static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32
intid)
+{
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+ intid > VGIC_NR_PRIVATE_IRQS)
+ kvm_arm_halt_guest(vcpu->kvm);
+}
+
+/* See vgic_access_active_prepare */
+static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
+{
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+ intid > VGIC_NR_PRIVATE_IRQS)
+ kvm_arm_resume_guest(vcpu->kvm);
+}
+
+static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
u32 value = 0;
@@ -359,6 +390,10 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu
*vcpu,
for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ /*
+ * Even for HW interrupts, don't evaluate the HW state as
+ * all the guest is interested in is the virtual state.
+ */
if (irq->active)
value |= (1U << i);
@@ -368,6 +403,29 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu
*vcpu,
return value;
}
+unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ u32 val;
+
+ mutex_lock(&vcpu->kvm->lock);
+ vgic_access_active_prepare(vcpu, intid);
+
+ val = __vgic_mmio_read_active(vcpu, addr, len);
+
+ vgic_access_active_finish(vcpu, intid);
+ mutex_unlock(&vcpu->kvm->lock);
+
+ return val;
+}
+
+unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ return __vgic_mmio_read_active(vcpu, addr, len);
+}
+
/* Must be called with irq->irq_lock held */
static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct
vgic_irq *irq,
bool active, bool is_uaccess)
@@ -426,36 +484,6 @@ static void vgic_mmio_change_active(struct kvm_vcpu
*vcpu, struct vgic_irq *irq,
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
-/*
- * If we are fiddling with an IRQ's active state, we have to make sure
the IRQ
- * is not queued on some running VCPU's LRs, because then the change to
the
- * active state can be overwritten when the VCPU's state is synced
coming back
- * from the guest.
- *
- * For shared interrupts, we have to stop all the VCPUs because
interrupts can
- * be migrated while we don't hold the IRQ locks and we don't want to
be
- * chasing moving targets.
- *
- * For private interrupts we don't have to do anything because
userspace
- * accesses to the VGIC state already require all VCPUs to be stopped,
and
- * only the VCPU itself can modify its private interrupts active state,
which
- * guarantees that the VCPU is not running.
- */
-static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32
intid)
-{
- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
- intid > VGIC_NR_PRIVATE_IRQS)
- kvm_arm_halt_guest(vcpu->kvm);
-}
-
-/* See vgic_change_active_prepare */
-static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
-{
- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
- intid > VGIC_NR_PRIVATE_IRQS)
- kvm_arm_resume_guest(vcpu->kvm);
-}
-
static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
@@ -477,11 +505,11 @@ void vgic_mmio_write_cactive(struct kvm_vcpu
*vcpu,
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->lock);
- vgic_change_active_prepare(vcpu, intid);
+ vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_cactive(vcpu, addr, len, val);
- vgic_change_active_finish(vcpu, intid);
+ vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock);
}
@@ -514,11 +542,11 @@ void vgic_mmio_write_sactive(struct kvm_vcpu
*vcpu,
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->lock);
- vgic_change_active_prepare(vcpu, intid);
+ vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_sactive(vcpu, addr, len, val);
- vgic_change_active_finish(vcpu, intid);
+ vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock);
}
--
Jazz is not dead. It just smells funny...
_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm