On Thu, Jul 12, 2012 at 7:54 AM, Rusty Russell <rusty.russell@xxxxxxxxxx> wrote: > Simple code move from emulate.c into coproc.c. > > Signed-off-by: Rusty Russell <rusty@xxxxxxxxxxxxxxx> > --- > arch/arm/include/asm/kvm_coproc.h | 28 +++ > arch/arm/include/asm/kvm_emulate.h | 6 - > arch/arm/kvm/Makefile | 2 +- > arch/arm/kvm/arm.c | 1 + > arch/arm/kvm/coproc.c | 377 ++++++++++++++++++++++++++++++++++++ > arch/arm/kvm/emulate.c | 352 --------------------------------- > 6 files changed, 407 insertions(+), 359 deletions(-) > create mode 100644 arch/arm/include/asm/kvm_coproc.h > create mode 100644 arch/arm/kvm/coproc.c > > diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h > new file mode 100644 > index 0000000..6bed190 > --- /dev/null > +++ b/arch/arm/include/asm/kvm_coproc.h > @@ -0,0 +1,28 @@ > +/* > + * Copyright (C) 2012 Rusty Russell IBM Corporation > + * > + * This program is free software; you can redistribute it and/or modify > + * it under the terms of the GNU General Public License, version 2, as > + * published by the Free Software Foundation. > + * > + * This program is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > + * GNU General Public License for more details. > + * > + * You should have received a copy of the GNU General Public License > + * along with this program; if not, write to the Free Software > + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. > + */ > + > +#ifndef __ARM_KVM_COPROC_H__ > +#define __ARM_KVM_COPROC_H__ > +#include <linux/kvm_host.h> > + > +int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); > +int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); > +int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); > +int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); > +int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); > +int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); > +#endif /* __ARM_KVM_COPROC_H__ */ > diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h > index 7774ec4..1a13319 100644 > --- a/arch/arm/include/asm/kvm_emulate.h > +++ b/arch/arm/include/asm/kvm_emulate.h > @@ -60,12 +60,6 @@ static inline enum vcpu_mode vcpu_mode(struct kvm_vcpu *vcpu) > return mode; > } > > -int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); > -int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); > -int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); > -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); > -int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); > -int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); > int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); > int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, > unsigned long instr); > diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile > index 9bd4af4..6d0fce7 100644 > --- a/arch/arm/kvm/Makefile > +++ b/arch/arm/kvm/Makefile > @@ -12,7 +12,7 @@ obj-$(CONFIG_KVM_ARM_HOST) += init.o interrupts.o exports.o > > kvm-arm-y += $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) > > -kvm-arm-y += arm.o guest.o mmu.o emulate.o reset.o > +kvm-arm-y += arm.o guest.o mmu.o emulate.o reset.o coproc.o > > obj-$(CONFIG_KVM) += kvm-arm.o > obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o > diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c > index f648735..4cf8d3d 100644 > --- a/arch/arm/kvm/arm.c > +++ b/arch/arm/kvm/arm.c > @@ -43,6 +43,7 @@ > #include <asm/kvm_asm.h> > #include <asm/kvm_mmu.h> > #include <asm/kvm_emulate.h> > +#include <asm/kvm_coproc.h> > > static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); > > diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c > new file mode 100644 > index 0000000..b15c8b1 > --- /dev/null > +++ b/arch/arm/kvm/coproc.c > @@ -0,0 +1,377 @@ > +/* > + * Copyright (C) 2012 - Virtual Open Systems and Columbia University > + * Author: Christoffer Dall <c.dall@xxxxxxxxxxxxxxxxxxxxxx> > + * > + * This program is free software; you can redistribute it and/or modify > + * it under the terms of the GNU General Public License, version 2, as > + * published by the Free Software Foundation. > + * > + * This program is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > + * GNU General Public License for more details. > + * > + * You should have received a copy of the GNU General Public License > + * along with this program; if not, write to the Free Software > + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. > + */ > +#include <linux/mm.h> > +#include <asm/kvm_arm.h> > +#include <asm/kvm_host.h> > +#include <asm/kvm_emulate.h> > +#include <asm/kvm_coproc.h> > +#include <trace/events/kvm.h> > + > +#include "trace.h" > + > +/****************************************************************************** > + * Co-processor emulation > + *****************************************************************************/ > + > +struct coproc_params { > + unsigned long CRn; > + unsigned long CRm; > + unsigned long Op1; > + unsigned long Op2; > + unsigned long Rt1; > + unsigned long Rt2; > + bool is_64bit; > + bool is_write; > +}; > + > +static void print_cp_instr(const struct coproc_params *p) > +{ > + /* Look, we even formatted it for you to paste into the table! */ > + if (p->is_64bit) { > + kvm_err(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n", > + p->CRm, p->Op1, p->is_write ? "write" : "read"); > + } else { > + kvm_err(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32," > + " func_%s },\n", > + p->CRn, p->CRm, p->Op1, p->Op2, > + p->is_write ? "write" : "read"); > + } > +} > + > +int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) > +{ > + kvm_inject_undefined(vcpu); > + return 0; > +} > + > +int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) > +{ > + kvm_inject_undefined(vcpu); > + return 0; > +} > + > +int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) > +{ > + kvm_inject_undefined(vcpu); > + return 0; > +} > + > +int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) > +{ > + kvm_inject_undefined(vcpu); > + return 0; > +} > + > +static bool ignore_write(struct kvm_vcpu *vcpu, > + const struct coproc_params *p, > + unsigned long arg) > +{ > + if (arg) > + trace_kvm_emulate_cp15_imp(p->Op1, p->Rt1, p->CRn, p->CRm, > + p->Op2, p->is_write); > + return true; > +} > + > +static bool read_zero(struct kvm_vcpu *vcpu, > + const struct coproc_params *p, > + unsigned long arg) > +{ > + if (arg) > + trace_kvm_emulate_cp15_imp(p->Op1, p->Rt1, p->CRn, p->CRm, > + p->Op2, p->is_write); > + *vcpu_reg(vcpu, p->Rt1) = 0; > + return true; > +} > + > +static bool access_l2ctlr(struct kvm_vcpu *vcpu, > + const struct coproc_params *p, > + unsigned long arg) > +{ > + u32 l2ctlr, ncores; > + > + if (p->is_write) > + return false; > + > + switch (vcpu->arch.target) { > + case KVM_ARM_TARGET_CORTEX_A15: > + asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); > + l2ctlr &= ~(3 << 24); > + ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; > + l2ctlr |= (ncores & 3) << 24; > + *vcpu_reg(vcpu, p->Rt1) = l2ctlr; > + return true; > + default: > + return false; > + } > +} > + > +static bool access_l2ectlr(struct kvm_vcpu *vcpu, > + const struct coproc_params *p, > + unsigned long arg) > +{ > + if (p->is_write) > + return false; > + > + switch (vcpu->arch.target) { > + case KVM_ARM_TARGET_CORTEX_A15: > + *vcpu_reg(vcpu, p->Rt1) = 0; > + return true; > + default: > + return false; > + } > +} > + > +static bool access_cbar(struct kvm_vcpu *vcpu, > + const struct coproc_params *p, > + unsigned long arg) > +{ > + if (p->is_write) > + return false; > + return read_zero(vcpu, p, 0); > +} > + > +static bool access_actlr(struct kvm_vcpu *vcpu, > + const struct coproc_params *p, > + unsigned long arg) > +{ > + u32 actlr; > + > + if (p->is_write) > + return ignore_write(vcpu, p, 0); > + > + switch (vcpu->arch.target) { > + case KVM_ARM_TARGET_CORTEX_A15: > + asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); > + /* Make the SMP bit consistent with the guest configuration */ > + if (atomic_read(&vcpu->kvm->online_vcpus) > 1) > + actlr |= 1U << 6; > + else > + actlr &= ~(1U << 6); > + *vcpu_reg(vcpu, p->Rt1) = actlr; > + break; > + default: > + asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); > + *vcpu_reg(vcpu, p->Rt1) = actlr; > + break; > + } > + > + return true; > +} > + > +static bool access_dcsw(struct kvm_vcpu *vcpu, > + const struct coproc_params *p, > + unsigned long arg) > +{ > + u32 val; > + > + if (!p->is_write) > + return false; > + > + val = *vcpu_reg(vcpu, p->Rt1); > + > + switch (p->CRm) { > + case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ > + case 14: /* DCCISW */ > + asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val)); > + break; > + > + case 10: /* DCCSW */ > + asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val)); > + break; > + } > + > + cpumask_setall(&vcpu->arch.require_dcache_flush); > + cpumask_clear_cpu(vcpu->cpu, &vcpu->arch.require_dcache_flush); > + > + return true; > +} > + > +/* > + * We could trap ID_DFR0 and tell the guest we don't support performance > + * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was > + * NAKed, so it will read the PMCR anyway. > + * > + * Therefore we tell the guest we have 0 counters. Unfortunately, we > + * must always support PMCCNTR (the cycle counter): we just WI/RAZ for > + * all PM registers, which doesn't crash the guest kernel at least. > + */ > +static bool pm_fake(struct kvm_vcpu *vcpu, > + const struct coproc_params *p, > + unsigned long arg) > +{ > + if (p->is_write) > + return ignore_write(vcpu, p, arg); > + else > + return read_zero(vcpu, p, arg); > +} > + > +#define access_pmcr pm_fake > +#define access_pmcntenset pm_fake > +#define access_pmcntenclr pm_fake > +#define access_pmovsr pm_fake > +#define access_pmselr pm_fake > +#define access_pmceid0 pm_fake > +#define access_pmceid1 pm_fake > +#define access_pmccntr pm_fake > +#define access_pmxevtyper pm_fake > +#define access_pmxevcntr pm_fake > +#define access_pmuserenr pm_fake > +#define access_pmintenset pm_fake > +#define access_pmintenclr pm_fake > + > +struct coproc_reg { > + unsigned long CRn; > + unsigned long CRm; > + unsigned long Op1; > + unsigned long Op2; > + > + bool is_64; > + > + bool (*access)(struct kvm_vcpu *, > + const struct coproc_params *, > + unsigned long); > + unsigned long arg; > +}; > + > +#define CRn(_x) .CRn = _x > +#define CRm(_x) .CRm = _x > +#define Op1(_x) .Op1 = _x > +#define Op2(_x) .Op2 = _x > +#define is64 .is_64 = true > +#define is32 .is_64 = false > + > +static const struct coproc_reg cp15_regs[] = { > + /* > + * ACTRL access: > + * > + * Ignore writes, and read returns the host settings. > + */ > + { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, access_actlr}, > + /* > + * DC{C,I,CI}SW operations: > + */ > + { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw}, > + { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw}, > + { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw}, > + /* > + * L2CTLR access (guest wants to know #CPUs). > + */ > + { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, access_l2ctlr}, > + { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, > + /* > + * Dummy performance monitor implementation. > + */ > + { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr}, > + { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset}, > + { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr}, > + { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr}, > + { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr}, > + { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0}, > + { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1}, > + { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr}, > + { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper}, > + { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr}, > + { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr}, > + { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset}, > + { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr}, > + > + /* The Configuration Base Address Register (R/O). */ > + { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, > +}; > + > +static int emulate_cp15(struct kvm_vcpu *vcpu, > + const struct coproc_params *params) > +{ > + unsigned long instr_len, i; > + > + for (i = 0; i < ARRAY_SIZE(cp15_regs); i++) { > + const struct coproc_reg *r = &cp15_regs[i]; > + > + if (params->is_64bit != r->is_64) > + continue; > + if (params->CRn != r->CRn) > + continue; > + if (params->CRm != r->CRm) > + continue; > + if (params->Op1 != r->Op1) > + continue; > + if (params->Op2 != r->Op2) > + continue; > + > + /* If function fails, it should complain. */ > + if (!r->access(vcpu, params, r->arg)) > + goto undef; > + > + /* Skip instruction, since it was emulated */ > + instr_len = ((vcpu->arch.hsr >> 25) & 1) ? 4 : 2; > + *vcpu_pc(vcpu) += instr_len; > + kvm_adjust_itstate(vcpu); > + return 0; > + } > + > + kvm_err("Unsupported guest CP15 access at: %08x\n", > + vcpu->arch.regs.pc); > + print_cp_instr(params); > +undef: > + kvm_inject_undefined(vcpu); > + return 0; > +} > + > +/** > + * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access > + * @vcpu: The VCPU pointer > + * @run: The kvm_run struct > + */ > +int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) > +{ > + struct coproc_params params; > + > + params.CRm = (vcpu->arch.hsr >> 1) & 0xf; > + params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; > + params.is_write = ((vcpu->arch.hsr & 1) == 0); > + params.is_64bit = true; > + > + params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; > + params.Op2 = 0; > + params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; > + params.CRn = 0; > + > + return emulate_cp15(vcpu, ¶ms); > +} > + > +/** > + * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access > + * @vcpu: The VCPU pointer > + * @run: The kvm_run struct > + */ > +int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) > +{ > + struct coproc_params params; > + > + params.CRm = (vcpu->arch.hsr >> 1) & 0xf; > + params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; > + params.is_write = ((vcpu->arch.hsr & 1) == 0); > + params.is_64bit = false; > + > + params.CRn = (vcpu->arch.hsr >> 10) & 0xf; > + params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; > + params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; > + params.Rt2 = 0; > + > + return emulate_cp15(vcpu, ¶ms); > +} > diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c > index a17c6f4..a596bc4 100644 > --- a/arch/arm/kvm/emulate.c > +++ b/arch/arm/kvm/emulate.c > @@ -154,358 +154,6 @@ static int kvm_instr_index(u32 instr, u32 table[][2], int table_entries) > return INSTR_NONE; > } > > -/****************************************************************************** > - * Co-processor emulation > - *****************************************************************************/ > - > -struct coproc_params { > - unsigned long CRn; > - unsigned long CRm; > - unsigned long Op1; > - unsigned long Op2; > - unsigned long Rt1; > - unsigned long Rt2; > - bool is_64bit; > - bool is_write; > -}; > - > -static void print_cp_instr(const struct coproc_params *p) > -{ > - /* Look, we even formatted it for you to paste into the table! */ > - if (p->is_64bit) { > - kvm_err(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n", > - p->CRm, p->Op1, p->is_write ? "write" : "read"); > - } else { > - kvm_err(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32," > - " func_%s },\n", > - p->CRn, p->CRm, p->Op1, p->Op2, > - p->is_write ? "write" : "read"); > - } > -} > - > -int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) > -{ > - kvm_inject_undefined(vcpu); > - return 0; > -} > - > -int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) > -{ > - kvm_inject_undefined(vcpu); > - return 0; > -} > - > -int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) > -{ > - kvm_inject_undefined(vcpu); > - return 0; > -} > - > -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) > -{ > - kvm_inject_undefined(vcpu); > - return 0; > -} > - > -static bool ignore_write(struct kvm_vcpu *vcpu, > - const struct coproc_params *p, > - unsigned long arg) > -{ > - if (arg) > - trace_kvm_emulate_cp15_imp(p->Op1, p->Rt1, p->CRn, p->CRm, > - p->Op2, p->is_write); > - return true; > -} > - > -static bool read_zero(struct kvm_vcpu *vcpu, > - const struct coproc_params *p, > - unsigned long arg) > -{ > - if (arg) > - trace_kvm_emulate_cp15_imp(p->Op1, p->Rt1, p->CRn, p->CRm, > - p->Op2, p->is_write); > - *vcpu_reg(vcpu, p->Rt1) = 0; > - return true; > -} > - > -static bool access_l2ctlr(struct kvm_vcpu *vcpu, > - const struct coproc_params *p, > - unsigned long arg) > -{ > - u32 l2ctlr, ncores; > - > - if (p->is_write) > - return false; > - > - switch (vcpu->arch.target) { > - case KVM_ARM_TARGET_CORTEX_A15: > - asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); > - l2ctlr &= ~(3 << 24); > - ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; > - l2ctlr |= (ncores & 3) << 24; > - *vcpu_reg(vcpu, p->Rt1) = l2ctlr; > - return true; > - default: > - return false; > - } > -} > - > -static bool access_l2ectlr(struct kvm_vcpu *vcpu, > - const struct coproc_params *p, > - unsigned long arg) > -{ > - if (p->is_write) > - return false; > - > - switch (vcpu->arch.target) { > - case KVM_ARM_TARGET_CORTEX_A15: > - *vcpu_reg(vcpu, p->Rt1) = 0; > - return true; > - default: > - return false; > - } > -} > - > -static bool access_cbar(struct kvm_vcpu *vcpu, > - const struct coproc_params *p, > - unsigned long arg) > -{ > - if (p->is_write) > - return false; > - return read_zero(vcpu, p, 0); > -} > - > -static bool access_actlr(struct kvm_vcpu *vcpu, > - const struct coproc_params *p, > - unsigned long arg) > -{ > - u32 actlr; > - > - if (p->is_write) > - return ignore_write(vcpu, p, 0); > - > - switch (vcpu->arch.target) { > - case KVM_ARM_TARGET_CORTEX_A15: > - asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); > - /* Make the SMP bit consistent with the guest configuration */ > - if (atomic_read(&vcpu->kvm->online_vcpus) > 1) > - actlr |= 1U << 6; > - else > - actlr &= ~(1U << 6); > - *vcpu_reg(vcpu, p->Rt1) = actlr; > - break; > - default: > - asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); > - *vcpu_reg(vcpu, p->Rt1) = actlr; > - break; > - } > - > - return true; > -} > - > -static bool access_dcsw(struct kvm_vcpu *vcpu, > - const struct coproc_params *p, > - unsigned long arg) > -{ > - u32 val; > - > - if (!p->is_write) > - return false; > - > - val = *vcpu_reg(vcpu, p->Rt1); > - > - switch (p->CRm) { > - case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ > - case 14: /* DCCISW */ > - asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val)); > - break; > - > - case 10: /* DCCSW */ > - asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val)); > - break; > - } > - > - cpumask_setall(&vcpu->arch.require_dcache_flush); > - cpumask_clear_cpu(vcpu->cpu, &vcpu->arch.require_dcache_flush); > - > - return true; > -} > - > -/* > - * We could trap ID_DFR0 and tell the guest we don't support performance > - * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was > - * NAKed, so it will read the PMCR anyway. > - * > - * Therefore we tell the guest we have 0 counters. Unfortunately, we > - * must always support PMCCNTR (the cycle counter): we just WI/RAZ for > - * all PM registers, which doesn't crash the guest kernel at least. > - */ > -static bool pm_fake(struct kvm_vcpu *vcpu, > - const struct coproc_params *p, > - unsigned long arg) > -{ > - if (p->is_write) > - return ignore_write(vcpu, p, arg); > - else > - return read_zero(vcpu, p, arg); > -} > - > -#define access_pmcr pm_fake > -#define access_pmcntenset pm_fake > -#define access_pmcntenclr pm_fake > -#define access_pmovsr pm_fake > -#define access_pmselr pm_fake > -#define access_pmceid0 pm_fake > -#define access_pmceid1 pm_fake > -#define access_pmccntr pm_fake > -#define access_pmxevtyper pm_fake > -#define access_pmxevcntr pm_fake > -#define access_pmuserenr pm_fake > -#define access_pmintenset pm_fake > -#define access_pmintenclr pm_fake > - > -struct coproc_reg { > - unsigned long CRn; > - unsigned long CRm; > - unsigned long Op1; > - unsigned long Op2; > - > - bool is_64; > - > - bool (*access)(struct kvm_vcpu *, > - const struct coproc_params *, > - unsigned long); > - unsigned long arg; > -}; > - > -#define CRn(_x) .CRn = _x > -#define CRm(_x) .CRm = _x > -#define Op1(_x) .Op1 = _x > -#define Op2(_x) .Op2 = _x > -#define is64 .is_64 = true > -#define is32 .is_64 = false > - > -static const struct coproc_reg cp15_regs[] = { > - /* > - * ACTRL access: > - * > - * Ignore writes, and read returns the host settings. > - */ > - { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, access_actlr}, > - /* > - * DC{C,I,CI}SW operations: > - */ > - { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw}, > - { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw}, > - { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw}, > - /* > - * L2CTLR access (guest wants to know #CPUs). > - */ > - { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, access_l2ctlr}, > - { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, > - /* > - * Dummy performance monitor implementation. > - */ > - { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr}, > - { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset}, > - { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr}, > - { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr}, > - { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr}, > - { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0}, > - { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1}, > - { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr}, > - { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper}, > - { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr}, > - { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr}, > - { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset}, > - { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr}, > - > - /* The Configuration Base Address Register (R/O). */ > - { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, > -}; > - > -static int emulate_cp15(struct kvm_vcpu *vcpu, > - const struct coproc_params *params) > -{ > - unsigned long instr_len, i; > - > - for (i = 0; i < ARRAY_SIZE(cp15_regs); i++) { > - const struct coproc_reg *r = &cp15_regs[i]; > - > - if (params->is_64bit != r->is_64) > - continue; > - if (params->CRn != r->CRn) > - continue; > - if (params->CRm != r->CRm) > - continue; > - if (params->Op1 != r->Op1) > - continue; > - if (params->Op2 != r->Op2) > - continue; > - > - /* If function fails, it should complain. */ > - if (!r->access(vcpu, params, r->arg)) > - goto undef; > - > - /* Skip instruction, since it was emulated */ > - instr_len = ((vcpu->arch.hsr >> 25) & 1) ? 4 : 2; > - *vcpu_pc(vcpu) += instr_len; > - kvm_adjust_itstate(vcpu); > - return 0; > - } > - > - kvm_err("Unsupported guest CP15 access at: %08x\n", > - vcpu->arch.regs.pc); > - print_cp_instr(params); > -undef: > - kvm_inject_undefined(vcpu); > - return 0; > -} > - > -/** > - * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access > - * @vcpu: The VCPU pointer > - * @run: The kvm_run struct > - */ > -int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) > -{ > - struct coproc_params params; > - > - params.CRm = (vcpu->arch.hsr >> 1) & 0xf; > - params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; > - params.is_write = ((vcpu->arch.hsr & 1) == 0); > - params.is_64bit = true; > - > - params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; > - params.Op2 = 0; > - params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; > - params.CRn = 0; > - > - return emulate_cp15(vcpu, ¶ms); > -} > - > -/** > - * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access > - * @vcpu: The VCPU pointer > - * @run: The kvm_run struct > - */ > -int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) > -{ > - struct coproc_params params; > - > - params.CRm = (vcpu->arch.hsr >> 1) & 0xf; > - params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; > - params.is_write = ((vcpu->arch.hsr & 1) == 0); > - params.is_64bit = false; > - > - params.CRn = (vcpu->arch.hsr >> 10) & 0xf; > - params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; > - params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; > - params.Rt2 = 0; > - > - return emulate_cp15(vcpu, ¶ms); > -} > - > /** > * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest > * @vcpu: the vcpu pointer > -- > 1.7.9.5 > Good idea! Applied, but I may have created a slight mess by holding off on the first patch about the target, so the merge may be a little painful, sorry about that. Thanks, -Christoffer _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm