Wire the initial in-kernel MMIO support code for the VGIC, used for the distributor emulation. Signed-off-by: Marc Zyngier <marc.zyngier at arm.com> --- arch/arm/include/asm/kvm_vgic.h | 5 +- arch/arm/kvm/Makefile | 1 + arch/arm/kvm/mmu.c | 25 +++++--- arch/arm/kvm/vgic.c | 127 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 150 insertions(+), 8 deletions(-) create mode 100644 arch/arm/kvm/vgic.c diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h index 8e8bd6d..59c2f60 100644 --- a/arch/arm/include/asm/kvm_vgic.h +++ b/arch/arm/include/asm/kvm_vgic.h @@ -12,7 +12,10 @@ struct kvm_vcpu; struct kvm_run; struct kvm_exit_mmio; -#ifndef CONFIG_KVM_ARM_VGIC +#ifdef CONFIG_KVM_ARM_VGIC +int vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_exit_mmio *mmio); +#else static inline int kvm_vgic_hyp_init(void) { return 0; diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index 5f40c5a..c5c4a9f 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile @@ -15,3 +15,4 @@ kvm-arm-y += $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) kvm-arm-y += arm.o guest.o mmu.o emulate.o reset.o obj-$(CONFIG_KVM) += kvm-arm.o +obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 4955a51..d63e05e 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -641,10 +641,16 @@ static int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, /* Get instruction length in bytes */ instr_len = (vcpu->arch.hsr & HSR_IL) ? 4 : 2; - /* Export MMIO operations to user space */ - run->mmio.is_write = is_write; - run->mmio.phys_addr = fault_ipa; - run->mmio.len = len; + /* + * Prepare MMIO operation. First stash it in a private + * structure that we can use for in-kernel emulation. If the + * kernel can not handle it, copy it into run->mmio (as it is + * garanteed to be the same type) and let user space do its + * magic. + */ + mmio.mmio.is_write = is_write; + mmio.mmio.phys_addr = fault_ipa; + mmio.mmio.len = len; vcpu->arch.mmio_sign_extend = sign_extend; vcpu->arch.mmio_rd = rd; @@ -653,9 +659,14 @@ static int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, len, fault_ipa, (is_write) ? *vcpu_reg(vcpu, rd) : 0); if (is_write) - memcpy(run->mmio.data, vcpu_reg(vcpu, rd), len); + memcpy(mmio.mmio.data, vcpu_reg(vcpu, rd), len); - run->exit_reason = vgic_handle_mmio(vcpu, run); + run->exit_reason = vgic_handle_mmio(vcpu, run, &mmio); + switch (run->exit_reason) { + case KVM_EXIT_MMIO: + run->mmio = mmio.mmio; + break; + } /* * The MMIO instruction is emulated and should not be re-executed @@ -664,7 +675,7 @@ static int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, *vcpu_pc(vcpu) += instr_len; kvm_adjust_itstate(vcpu); vcpu->stat.mmio_exits++; - return KVM_EXIT_MMIO; + return run->exit_reason; } /** diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c new file mode 100644 index 0000000..0502212 --- /dev/null +++ b/arch/arm/kvm/vgic.c @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * Author: Marc Zyngier <marc.zyngier at arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kvm.h> +#include <linux/kvm_host.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <asm/kvm_emulate.h> + +#define ACCESS_READ_VALUE (1 << 0) +#define ACCESS_READ_RAZ (0 << 0) +#define ACCESS_READ_MASK(x) ((x) & (1 << 0)) +#define ACCESS_WRITE_IGNORED (0 << 1) +#define ACCESS_WRITE_SETBIT (1 << 1) +#define ACCESS_WRITE_CLEARBIT (2 << 1) +#define ACCESS_WRITE_VALUE (3 << 1) +#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) + +static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, u32 offset, int mode) +{ + int word_offset = offset & 3; + int shift = word_offset * 8; + u32 mask; + u32 regval; + + /* + * Any alignment fault should have been delivered to the guest + * directly (ARM ARM B3.12.7 "Prioritization of aborts"). + */ + + mask = (~0U) >> (word_offset * 8); + if (reg) + regval = *reg; + else { + BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED)); + regval = 0; + } + + if (mmio->mmio.is_write) { + u32 data = (*((u32 *)mmio->mmio.data) & mask) << shift; + switch (ACCESS_WRITE_MASK(mode)) { + case ACCESS_WRITE_IGNORED: + return; + + case ACCESS_WRITE_SETBIT: + regval |= data; + break; + + case ACCESS_WRITE_CLEARBIT: + regval &= ~data; + break; + + case ACCESS_WRITE_VALUE: + regval = (regval & ~(mask << shift)) | data; + break; + } + *reg = regval; + } else { + switch (ACCESS_READ_MASK(mode)) { + case ACCESS_READ_RAZ: + regval = 0; + /* fall through */ + + case ACCESS_READ_VALUE: + *((u32 *)mmio->mmio.data) = (regval >> shift) & mask; + } + } +} + +/* All this should be handled by kvm_bus_io_*()... FIXME!!! */ +struct mmio_range { + unsigned long base; + unsigned long len; + void (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, + u32 offset); +}; + +static const struct mmio_range vgic_ranges[] = { + {} +}; + +static const +struct mmio_range *find_matching_range(const struct mmio_range *ranges, + struct kvm_exit_mmio *mmio, + unsigned long base) +{ + const struct mmio_range *r = ranges; + unsigned long addr = mmio->mmio.phys_addr - base; + + while (r->len) { + if (addr >= r->base && + (addr + mmio->mmio.len) <= (r->base + r->len)) + return r; + r++; + } + + return NULL; +} + +/** + * vgic_handle_mmio - handle an in-kernel MMIO access + * @vcpu: pointer to the vcpu performing the access + * @mmio: pointer to the data describing the access + * + * returns KVM_EXIT_UNKNOWN if the MMIO access has been performed + * in kernel space, and KVM_EXIT_MMIO if it needs to be emulated + * in user space. + */ +int vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_exit_mmio *mmio) +{ + return KVM_EXIT_MMIO; +} -- 1.7.10.3