On 2020-11-23 06:54, Shenming Lu wrote:
After pausing all vCPUs and devices capable of interrupting, in order
^^^^^^^^^^^^^^^^^
See my comment below about this.
to save the information of all interrupts, besides flushing the pending
states in kvm’s vgic, we also try to flush the states of VLPIs in the
virtual pending tables into guest RAM, but we need to have GICv4.1 and
safely unmap the vPEs first.
Signed-off-by: Shenming Lu <lushenming@xxxxxxxxxx>
---
arch/arm64/kvm/vgic/vgic-v3.c | 62 +++++++++++++++++++++++++++++++----
1 file changed, 56 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c
b/arch/arm64/kvm/vgic/vgic-v3.c
index 9cdf39a94a63..e1b3aa4b2b12 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <kvm/arm_vgic.h>
@@ -356,6 +358,39 @@ int vgic_v3_lpi_sync_pending_status(struct kvm
*kvm, struct vgic_irq *irq)
return 0;
}
+/*
+ * With GICv4.1, we can get the VLPI's pending state after unmapping
+ * the vPE. The deactivation of the doorbell interrupt will trigger
+ * the unmapping of the associated vPE.
+ */
+static void get_vlpi_state_pre(struct vgic_dist *dist)
+{
+ struct irq_desc *desc;
+ int i;
+
+ if (!kvm_vgic_global_state.has_gicv4_1)
+ return;
+
+ for (i = 0; i < dist->its_vm.nr_vpes; i++) {
+ desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
+ irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
+ }
+}
+
+static void get_vlpi_state_post(struct vgic_dist *dist)
nit: the naming feels a bit... odd. Pre/post what?
+{
+ struct irq_desc *desc;
+ int i;
+
+ if (!kvm_vgic_global_state.has_gicv4_1)
+ return;
+
+ for (i = 0; i < dist->its_vm.nr_vpes; i++) {
+ desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
+ irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
+ }
+}
+
/**
* vgic_v3_save_pending_tables - Save the pending tables into guest
RAM
* kvm lock and all vcpu lock must be held
@@ -365,14 +400,17 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq;
gpa_t last_ptr = ~(gpa_t)0;
- int ret;
+ int ret = 0;
u8 val;
+ get_vlpi_state_pre(dist);
+
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
int byte_offset, bit_nr;
struct kvm_vcpu *vcpu;
gpa_t pendbase, ptr;
bool stored;
+ bool is_pending = irq->pending_latch;
vcpu = irq->target_vcpu;
if (!vcpu)
@@ -387,24 +425,36 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
if (ptr != last_ptr) {
ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
if (ret)
- return ret;
+ goto out;
last_ptr = ptr;
}
stored = val & (1U << bit_nr);
- if (stored == irq->pending_latch)
+
+ /* also flush hw pending state */
This comment looks out of place, as we aren't flushing anything.
+ if (irq->hw) {
+ WARN_RATELIMIT(irq_get_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING, &is_pending),
+ "IRQ %d", irq->host_irq);
Isn't this going to warn like mad on a GICv4.0 system where this, by
definition,
will generate an error?
+ }
+
+ if (stored == is_pending)
continue;
- if (irq->pending_latch)
+ if (is_pending)
val |= 1 << bit_nr;
else
val &= ~(1 << bit_nr);
ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
if (ret)
- return ret;
+ goto out;
}
- return 0;
+
+out:
+ get_vlpi_state_post(dist);
This bit worries me: you have unmapped the VPEs, so any interrupt that
has been
generated during that phase is now forever lost (the GIC doesn't have
ownership
of the pending tables).
Do you really expect the VM to be restartable from that point? I don't
see how
this is possible.
+
+ return ret;
}
/**
Thanks,
M.
--
Jazz is not dead. It just smells funny...