[Android-virt] [PATCH 04/15] ARM: KVM: VGIC distributor handling

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Add the GIC distributor emulation code. A number of the GIC features
are simply ignored as they are not required to boot a Linux guest.

Signed-off-by: Marc Zyngier <marc.zyngier at arm.com>
---
 arch/arm/include/asm/kvm_vgic.h |  101 +++++++++++++
 arch/arm/kvm/vgic.c             |  315 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 416 insertions(+), 0 deletions(-)

diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h
index 34f5cc0..dca2d09 100644
--- a/arch/arm/include/asm/kvm_vgic.h
+++ b/arch/arm/include/asm/kvm_vgic.h
@@ -1,7 +1,108 @@
 #ifndef __ASM_ARM_KVM_VGIC_H
 #define __ASM_ARM_KVM_VGIC_H
 
+#include <linux/kernel.h>
+#include <linux/kvm.h>
+#include <linux/irqreturn.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define VGIC_NR_IRQS	128	      /* Arbitrary number */
+#define VGIC_MAX_CPUS	KVM_MAX_VCPUS /* Same as the HW GIC */
+
+#if (VGIC_MAX_CPUS > 8)
+#error	Invalid number of CPU interfaces
+#endif
+
+/*
+ * The GIC registers describing interrupts have two parts:
+ * - 32 per-CPU interrupts (SGI + PPI)
+ * - a bunch of global interrups (SPI)
+ * They can have 1, 2 or 8 bit fields. Make it easier by having some template
+ * to create the structures and the accessors.
+ */
+#define DEFINE_VGIC_MAP_STRUCT(typename, size)				  \
+struct typename {						  	  \
+	union {								  \
+		u32 reg[32 / (sizeof(u32) * 8 / size)];			  \
+		unsigned long reg_ul[0];				  \
+	} percpu[VGIC_MAX_CPUS];					  \
+	union {								  \
+		u32 reg[(VGIC_NR_IRQS - 32) / (sizeof(u32) * 8 / size)];  \
+		unsigned long reg_ul[0];				  \
+	} global;							  \
+};								  	  \
+static inline u32 *typename##_get_reg(struct typename *x,		  \
+				      int cpuid, u32 offset)		  \
+{									  \
+	static const int irq_per_u32 = sizeof(u32) * 8 / size;		  \
+	static const int glob_offset = 32 / irq_per_u32;		  \
+	offset >>= 2;							  \
+	BUG_ON(offset > (VGIC_NR_IRQS  / irq_per_u32));			  \
+	if (offset < glob_offset)					  \
+		return x->percpu[cpuid].reg + offset;			  \
+	else								  \
+		return x->global.reg + offset - glob_offset;		  \
+}									  \
+static inline int typename##_get_irq_val(struct typename *x,		  \
+					 int cpuid, int irq)		  \
+{									  \
+	static const int irq_per_u32 = sizeof(u32) * 8 / size;		  \
+	static const u32 mask = (1 << size) - 1;			  \
+	u32 *reg, offset, shift;					  \
+	offset = (irq / irq_per_u32) << 2;				  \
+	shift = (irq % irq_per_u32) * size;				  \
+	reg = typename##_get_reg(x, cpuid, offset);			  \
+	return (*reg >> shift) & mask;					  \
+}									  \
+static inline void typename##_set_irq_val(struct typename *x,		  \
+					 int cpuid, int irq, int val)	  \
+{									  \
+	static const int irq_per_u32 = sizeof(u32) * 8 / size;		  \
+	static const u32 mask = (1 << size) - 1;			  \
+	u32 *reg, offset, shift;					  \
+	offset = (irq / irq_per_u32) << 2;				  \
+	shift = (irq % irq_per_u32) * size;				  \
+	reg = typename##_get_reg(x, cpuid, offset);			  \
+	*reg &= ~(mask << shift);					  \
+	*reg |= (val & mask) << shift;					  \
+}									  \
+static inline unsigned long *typename##_get_cpu_map(struct typename *x,	  \
+						    int cpu_id)		  \
+{									  \
+	if (unlikely(cpu_id >= VGIC_MAX_CPUS))				  \
+		return NULL;						  \
+	return x->percpu[cpu_id].reg_ul;				  \
+}
+
+
+DEFINE_VGIC_MAP_STRUCT(vgic_bitmap, 1);
+DEFINE_VGIC_MAP_STRUCT(vgic_2bitmap, 2);
+DEFINE_VGIC_MAP_STRUCT(vgic_bytemap, 8);
+
 struct vgic_dist {
+#ifdef CONFIG_KVM_ARM_VGIC
+	spinlock_t		lock;
+
+	void __iomem		*vctrl_base;
+
+	u32			enabled;
+
+	struct vgic_bitmap	irq_enabled;
+	struct vgic_bitmap	irq_pending;
+	struct vgic_bitmap	irq_active; /* Not used yet. Useful? */
+
+	struct vgic_bytemap	irq_priority;/* Not used yet. Useful? */
+	struct vgic_bytemap	irq_target;
+
+	struct vgic_2bitmap	irq_cfg; /* Not used yet. Useful? */
+
+	u8			irq_sgi_sources[VGIC_MAX_CPUS][16];
+
+	struct vgic_bitmap	irq_spi_target[VGIC_MAX_CPUS];
+
+	atomic_t		irq_pending_on_cpu;
+#endif
 };
 
 struct vgic_cpu {
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
index f7856a9..1ace859 100644
--- a/arch/arm/kvm/vgic.c
+++ b/arch/arm/kvm/vgic.c
@@ -21,6 +21,10 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 
+/* Temporary hacks, need to probe DT instead */
+#define VGIC_DIST_BASE		0x2c001000
+#define VGIC_DIST_SIZE		0x1000
+
 #define ACCESS_READ_VALUE	(1 << 0)
 #define ACCESS_READ_RAZ		(0 << 0)
 #define ACCESS_READ_MASK(x)	((x) & (1 << 0))
@@ -30,6 +34,9 @@
 #define ACCESS_WRITE_VALUE	(3 << 1)
 #define ACCESS_WRITE_MASK(x)	((x) & (3 << 1))
 
+static void vgic_update_state(struct kvm *kvm);
+static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
+
 static void mmio_do_copy(struct kvm_run *run, u32 *reg, u32 offset, int mode)
 {
 	int u32off = offset & 3;
@@ -83,6 +90,182 @@ static void mmio_do_copy(struct kvm_run *run, u32 *reg, u32 offset, int mode)
 	}
 }
 
+static void handle_mmio_misc(struct kvm_vcpu *vcpu,
+			     struct kvm_run *run, u32 offset)
+{
+	u32 reg;
+	u32 u32off = offset & 3;
+
+	switch (offset & ~3) {
+	case 0:			/* CTLR */
+		reg = vcpu->kvm->arch.vgic.enabled;
+		mmio_do_copy(run, &reg, u32off,
+			     ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+		if (run->mmio.is_write) {
+			vcpu->kvm->arch.vgic.enabled = reg & 1;
+			vgic_update_state(vcpu->kvm);
+		}
+		break;
+
+	case 4:			/* TYPER */
+		reg  = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
+		reg |= (VGIC_NR_IRQS >> 5) - 1;
+		mmio_do_copy(run, &reg, u32off,
+			     ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+		break;
+
+	case 8:			/* IIDR */
+		reg = 0x4B00043B;
+		mmio_do_copy(run, &reg, u32off,
+			     ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+		break;
+	}
+}
+
+static void handle_mmio_group_reg(struct kvm_vcpu *vcpu,
+				  struct kvm_run *run, u32 offset)
+{
+	mmio_do_copy(run, NULL, offset,
+		     ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+}
+
+static void handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
+				       struct kvm_run *run, u32 offset)
+{
+	u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
+				       vcpu->vcpu_id, offset);
+	mmio_do_copy(run, reg, offset,
+		     ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
+	if (run->mmio.is_write)
+		vgic_update_state(vcpu->kvm);
+}
+
+static void handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
+					 struct kvm_run *run, u32 offset)
+{
+	u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
+				       vcpu->vcpu_id, offset);
+	mmio_do_copy(run, reg, offset,
+		     ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+	if (run->mmio.is_write && offset < 4) /* Force SGI enabled */
+		*reg |= 0xffff;
+}
+
+static void handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
+					struct kvm_run *run, u32 offset)
+{
+	u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_pending,
+				       vcpu->vcpu_id, offset);
+	mmio_do_copy(run, reg, offset,
+		     ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
+	if (run->mmio.is_write)
+		vgic_update_state(vcpu->kvm);
+}
+
+static void handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
+					  struct kvm_run *run, u32 offset)
+{
+	u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_pending,
+				       vcpu->vcpu_id, offset);
+	mmio_do_copy(run, reg, offset,
+		     ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+}
+
+static void handle_mmio_set_active_reg(struct kvm_vcpu *vcpu,
+				       struct kvm_run *run, u32 offset)
+{
+	u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_active,
+				       vcpu->vcpu_id, offset);
+	mmio_do_copy(run, reg, offset,
+		     ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
+}
+
+static void handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu,
+					 struct kvm_run *run, u32 offset)
+{
+	u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_active,
+				       vcpu->vcpu_id, offset);
+	mmio_do_copy(run, reg, offset,
+		     ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+}
+
+static void handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
+				     struct kvm_run *run, u32 offset)
+{
+	u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
+					vcpu->vcpu_id, offset);
+	mmio_do_copy(run, reg, offset,
+		     ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+}
+
+static void update_spi_target(struct kvm *kvm)
+{
+	struct vgic_dist *dist = &kvm->arch.vgic;
+	int c, i, nrcpus = atomic_read(&kvm->online_vcpus);
+	u8 targ;
+	unsigned long *bmap;
+
+	for (i = 32; i < VGIC_NR_IRQS; i++) {
+		targ = vgic_bytemap_get_irq_val(&dist->irq_target, 0, i);
+
+		for (c = 0; c < nrcpus; c++) {
+			bmap = dist->irq_spi_target[c].global.reg_ul;
+
+			if (targ & (1 << c))
+				set_bit(i - 32, bmap);
+			else
+				clear_bit(i - 32, bmap);
+		}
+	}
+}
+
+static void handle_mmio_target_reg(struct kvm_vcpu *vcpu,
+				   struct kvm_run *run, u32 offset)
+{
+	u32 *reg;
+
+	/* We treat the banked interrupts targets as read-only */
+	if (offset < 32) {
+		u32 roreg = 1 << vcpu->vcpu_id;
+		roreg |= roreg << 8;
+		roreg |= roreg << 16;
+
+		mmio_do_copy(run, &roreg, offset,
+			     ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+		return;
+	}
+
+	reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_target,
+				   vcpu->vcpu_id, offset);
+	mmio_do_copy(run, reg, offset,
+		     ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+	if (run->mmio.is_write) {
+		update_spi_target(vcpu->kvm);
+		vgic_update_state(vcpu->kvm);
+	}
+}
+
+static void handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
+				struct kvm_run *run, u32 offset)
+{
+	u32 *reg = vgic_2bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+					vcpu->vcpu_id, offset);
+	mmio_do_copy(run, reg, offset,
+		     ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+}
+
+static void handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
+				struct kvm_run *run, u32 offset)
+{
+	u32 reg;
+	mmio_do_copy(run, &reg, offset,
+		     ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
+	if (run->mmio.is_write) {
+		vgic_dispatch_sgi(vcpu, reg);
+		vgic_update_state(vcpu->kvm);
+	}
+}
+
 /* All this should really be generic code... FIXME!!! */
 struct mmio_range {
 	unsigned long base;
@@ -92,6 +275,66 @@ struct mmio_range {
 };
 
 static const struct mmio_range vgic_ranges[] = {
+	{			/* CTRL, TYPER, IIDR */
+		.base		= VGIC_DIST_BASE,
+		.len		= 12,
+		.handle_mmio	= handle_mmio_misc,
+	},
+	{			/* IGROUPRn */
+		.base		= VGIC_DIST_BASE + 0x80,
+		.len		= VGIC_NR_IRQS / 8,
+		.handle_mmio	= handle_mmio_group_reg,
+	},
+	{			/* ISENABLERn */
+		.base		= VGIC_DIST_BASE + 0x100,
+		.len		= VGIC_NR_IRQS / 8,
+		.handle_mmio	= handle_mmio_set_enable_reg,
+	},
+	{			/* ICENABLERn */
+		.base		= VGIC_DIST_BASE + 0x180,
+		.len		= VGIC_NR_IRQS / 8,
+		.handle_mmio	= handle_mmio_clear_enable_reg,
+	},
+	{			/* ISPENDRn */
+		.base		= VGIC_DIST_BASE + 0x200,
+		.len		= VGIC_NR_IRQS / 8,
+		.handle_mmio	= handle_mmio_set_pending_reg,
+	},
+	{			/* ICPENDRn */
+		.base		= VGIC_DIST_BASE + 0x280,
+		.len		= VGIC_NR_IRQS / 8,
+		.handle_mmio	= handle_mmio_clear_pending_reg,
+	},
+	{			/* ISACTIVERn */
+		.base		= VGIC_DIST_BASE + 0x300,
+		.len		= VGIC_NR_IRQS / 8,
+		.handle_mmio	= handle_mmio_set_active_reg,
+	},
+	{			/* ICACTIVERn */
+		.base		= VGIC_DIST_BASE + 0x380,
+		.len		= VGIC_NR_IRQS / 8,
+		.handle_mmio	= handle_mmio_clear_active_reg,
+	},
+	{			/* IPRIORITYRn */
+		.base		= VGIC_DIST_BASE + 0x400,
+		.len		= VGIC_NR_IRQS,
+		.handle_mmio	= handle_mmio_priority_reg,
+	},
+	{			/* ITARGETSRn */
+		.base		= VGIC_DIST_BASE + 0x800,
+		.len		= VGIC_NR_IRQS,
+		.handle_mmio	= handle_mmio_target_reg,
+	},
+	{			/* ICFGRn */
+		.base		= VGIC_DIST_BASE + 0xC00,
+		.len		= VGIC_NR_IRQS / 4,
+		.handle_mmio	= handle_mmio_cfg_reg,
+	},
+	{			/* SGIRn */
+		.base		= VGIC_DIST_BASE + 0xF00,
+		.len		= 4,
+		.handle_mmio	= handle_mmio_sgi_reg,
+	},
 	{}
 };
 
@@ -121,10 +364,82 @@ int vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	if (!range || !range->handle_mmio)
 		return KVM_EXIT_MMIO;
 
+	spin_lock(&vcpu->kvm->arch.vgic.lock);
 	pr_debug("emulating %d %08llx %d\n", run->mmio.is_write,
 		 run->mmio.phys_addr, run->mmio.len);
 	range->handle_mmio(vcpu, run, run->mmio.phys_addr - range->base);
 	kvm_handle_mmio_return(vcpu, run);
+	spin_unlock(&vcpu->kvm->arch.vgic.lock);
 
 	return KVM_EXIT_UNKNOWN;
 }
+
+static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct vgic_dist *dist = &kvm->arch.vgic;
+	int nrcpus = atomic_read(&kvm->online_vcpus);
+	u8 target_cpus;
+	int sgi, mode, c, vcpu_id;
+
+	vcpu_id = vcpu->vcpu_id;
+
+	sgi = reg & 0xf;
+	target_cpus = (reg >> 16) & 0xff;
+	mode = (reg >> 24) & 3;
+
+	switch (mode) {
+	case 0:
+		if (!target_cpus)
+			return;
+
+	case 1:
+		target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
+		break;
+
+	case 2:
+		target_cpus = 1 << vcpu_id;
+		break;
+	}
+
+	for (c = 0; c < nrcpus; c++) {
+		if (target_cpus & 1) {
+			/* Flag the SGI as pending */
+			vgic_bitmap_set_irq_val(&dist->irq_pending, c, sgi, 1);
+			dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id;
+			pr_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
+		}
+
+		target_cpus >>= 1;
+	}
+}
+
+static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
+{
+	return 0;
+}
+
+/*
+ * Update the interrupt state and determine which CPUs have pending
+ * interrupts. Must be called with distributor lock held.
+ */
+static void vgic_update_state(struct kvm *kvm)
+{
+	struct vgic_dist *dist = &kvm->arch.vgic;
+	int nrcpus = atomic_read(&kvm->online_vcpus);
+	int c;
+
+	if (!dist->enabled) {
+		atomic_set(&dist->irq_pending_on_cpu, 0);
+		return;
+	}
+
+	for (c = 0; c < nrcpus; c++) {
+		struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, c);
+
+		if (compute_pending_for_cpu(vcpu)) {
+			pr_debug("CPU%d has pending interrupts\n", c);
+			atomic_or((1 << c), &dist->irq_pending_on_cpu);
+		}
+	}
+}
-- 
1.7.7.1





[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux