Abstract the MIPS KVM guest CP0 register access macros into inline functions which are generated by macros. This allows them to be generated differently for VZ, where they will usually need to access the hardware guest CP0 context rather than the saved values in RAM. Accessors for each individual register are generated using these macros: - __BUILD_KVM_*_SW() for registers which are not present in the VZ hardware guest context, so kvm_{read,write}_c0_guest_##name() will access the saved value in RAM regardless of whether VZ is enabled. - __BUILD_KVM_*_HW() for registers which are present in the VZ hardware guest context, so kvm_{read,write}_c0_guest_##name() will access the hardware register when VZ is enabled. These build the underlying accessors using further macros: - __BUILD_KVM_*_SAVED() builds e.g. kvm_{read,write}_sw_gc0_##name() functions for accessing the saved versions of the registers in RAM. This is used for implementing the common kvm_{read,write}_c0_guest_##name() accessors with T&E where registers are always stored in RAM, but are also available with VZ HW registers to allow them to be accessed while saved. - __BUILD_KVM_*_VZ() builds e.g. kvm_{read,write}_vz_gc0_##name() functions for accessing the VZ hardware guest context registers directly. This is used for implementing the common kvm_{read,write}_c0_guest_##name() accessors with VZ. - __BUILD_KVM_*_WRAP() builds wrappers with different names, which allows the common kvm_{read,write}_c0_guest_##name() functions to be implemented using the VZ accessors while still having the SAVED accessors available too. - __BUILD_KVM_SAVE_VZ() builds functions for saving and restoring VZ hardware guest context register state to RAM, improving conciseness of VZ context saving and restoring. Similar macros exist for generating modifiers (set, clear, change), either with a normal unlocked read/modify/write, or using atomic LL/SC sequences. These changes change the types of 32-bit registers to u32 instead of unsigned long, which requires some changes to printk() functions in MIPS KVM. Signed-off-by: James Hogan <james.hogan@xxxxxxxxxx> Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx> Cc: "Radim Krčmář" <rkrcmar@xxxxxxxxxx> Cc: Ralf Baechle <ralf@xxxxxxxxxxxxxx> Cc: linux-mips@xxxxxxxxxxxxxx Cc: kvm@xxxxxxxxxxxxxxx --- arch/mips/include/asm/kvm_host.h | 343 +++++++++++++++++++++++--------- arch/mips/kvm/emulate.c | 2 +- arch/mips/kvm/mips.c | 6 +- arch/mips/kvm/trap_emul.c | 8 +- 4 files changed, 264 insertions(+), 95 deletions(-) diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 3fee98e44f67..cff18b2156f9 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -363,78 +363,6 @@ struct kvm_vcpu_arch { u8 msa_enabled; }; - -#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0]) -#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val) -#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0]) -#define kvm_write_c0_guest_entrylo0(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO0][0] = (val)) -#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0]) -#define kvm_write_c0_guest_entrylo1(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO1][0] = (val)) -#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0]) -#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val)) -#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2]) -#define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val)) -#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0]) -#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val)) -#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0]) -#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val)) -#define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0]) -#define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val)) -#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0]) -#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val)) -#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0]) -#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val)) -#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0]) -#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val)) -#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0]) -#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val)) -#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0]) -#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val)) -#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1]) -#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val)) -#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0]) -#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val)) -#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0]) -#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val)) -#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0]) -#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val)) -#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1]) -#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val)) -#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0]) -#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1]) -#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2]) -#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3]) -#define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4]) -#define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5]) -#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7]) -#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val)) -#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val)) -#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val)) -#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val)) -#define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val)) -#define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val)) -#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) -#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) -#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) -#define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2]) -#define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3]) -#define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4]) -#define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5]) -#define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6]) -#define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7]) -#define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val)) -#define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val)) -#define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val)) -#define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val)) -#define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val)) -#define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val)) - -/* - * Some of the guest registers may be modified asynchronously (e.g. from a - * hrtimer callback in hard irq context) and therefore need stronger atomicity - * guarantees than other registers. - */ - static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg, unsigned long val) { @@ -485,26 +413,265 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, } while (unlikely(!temp)); } -#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val)) -#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val)) +/* Guest register types, used in accessor build below */ +#define __KVMT32 u32 +#define __KVMTl unsigned long -/* Cause can be modified asynchronously from hardirq hrtimer callback */ -#define kvm_set_c0_guest_cause(cop0, val) \ - _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) -#define kvm_clear_c0_guest_cause(cop0, val) \ - _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) -#define kvm_change_c0_guest_cause(cop0, change, val) \ - _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \ - change, val) - -#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val)) -#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val)) -#define kvm_change_c0_guest_ebase(cop0, change, val) \ +/* + * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg() + * These operate on the saved guest C0 state in RAM. + */ + +/* Generate saved context simple accessors */ +#define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \ +static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \ +{ \ + return cop0->reg[(_reg)][(sel)]; \ +} \ +static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \ + __KVMT##type val) \ +{ \ + cop0->reg[(_reg)][(sel)] = val; \ +} + +/* Generate saved context bitwise modifiers */ +#define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \ +static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \ + __KVMT##type val) \ +{ \ + cop0->reg[(_reg)][(sel)] |= val; \ +} \ +static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \ + __KVMT##type val) \ +{ \ + cop0->reg[(_reg)][(sel)] &= ~val; \ +} \ +static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \ + __KVMT##type mask, \ + __KVMT##type val) \ +{ \ + unsigned long _mask = mask; \ + cop0->reg[(_reg)][(sel)] &= ~_mask; \ + cop0->reg[(_reg)][(sel)] |= val & _mask; \ +} + +/* Generate saved context atomic bitwise modifiers */ +#define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \ +static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \ + __KVMT##type val) \ +{ \ + _kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \ +} \ +static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \ + __KVMT##type val) \ +{ \ + _kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \ +} \ +static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \ + __KVMT##type mask, \ + __KVMT##type val) \ { \ - kvm_clear_c0_guest_ebase(cop0, change); \ - kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ + _kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \ + val); \ } +/* + * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg() + * These operate on the VZ guest C0 context in hardware. + */ + +/* Generate VZ guest context simple accessors */ +#define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \ +static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \ +{ \ + return read_gc0_##name(); \ +} \ +static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \ + __KVMT##type val) \ +{ \ + write_gc0_##name(val); \ +} + +/* Generate VZ guest context bitwise modifiers */ +#define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \ +static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \ + __KVMT##type val) \ +{ \ + set_gc0_##name(val); \ +} \ +static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \ + __KVMT##type val) \ +{ \ + clear_gc0_##name(val); \ +} \ +static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \ + __KVMT##type mask, \ + __KVMT##type val) \ +{ \ + change_gc0_##name(mask, val); \ +} + +/* Generate VZ guest context save/restore to/from saved context */ +#define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \ +static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \ +{ \ + write_gc0_##name(cop0->reg[(_reg)][(sel)]); \ +} \ +static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \ +{ \ + cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \ +} + +/* + * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2() + * These wrap a set of operations to provide them with a different name. + */ + +/* Generate simple accessor wrapper */ +#define __BUILD_KVM_RW_WRAP(name1, name2, type) \ +static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \ +{ \ + return kvm_read_##name2(cop0); \ +} \ +static inline void kvm_write_##name1(struct mips_coproc *cop0, \ + __KVMT##type val) \ +{ \ + kvm_write_##name2(cop0, val); \ +} + +/* Generate bitwise modifier wrapper */ +#define __BUILD_KVM_SET_WRAP(name1, name2, type) \ +static inline void kvm_set_##name1(struct mips_coproc *cop0, \ + __KVMT##type val) \ +{ \ + kvm_set_##name2(cop0, val); \ +} \ +static inline void kvm_clear_##name1(struct mips_coproc *cop0, \ + __KVMT##type val) \ +{ \ + kvm_clear_##name2(cop0, val); \ +} \ +static inline void kvm_change_##name1(struct mips_coproc *cop0, \ + __KVMT##type mask, \ + __KVMT##type val) \ +{ \ + kvm_change_##name2(cop0, mask, val); \ +} + +/* + * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg() + * These generate accessors operating on the saved context in RAM, and wrap them + * with the common guest C0 accessors (for use by common emulation code). + */ + +#define __BUILD_KVM_RW_SW(name, type, _reg, sel) \ + __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \ + __BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type) + +#define __BUILD_KVM_SET_SW(name, type, _reg, sel) \ + __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \ + __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type) + +#define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \ + __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \ + __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type) + +#ifndef CONFIG_KVM_MIPS_VZ + +/* + * T&E (trap & emulate software based virtualisation) + * We generate the common accessors operating exclusively on the saved context + * in RAM. + */ + +#define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW +#define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW +#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW + +#else + +/* + * VZ (hardware assisted virtualisation) + * These macros use the active guest state in VZ mode (hardware registers), + */ + +/* + * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg() + * These generate accessors operating on the VZ guest context in hardware, and + * wrap them with the common guest C0 accessors (for use by common emulation + * code). + * + * Accessors operating on the saved context in RAM are also generated to allow + * convenient explicit saving and restoring of the state. + */ + +#define __BUILD_KVM_RW_HW(name, type, _reg, sel) \ + __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \ + __BUILD_KVM_RW_VZ(name, type, _reg, sel) \ + __BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \ + __BUILD_KVM_SAVE_VZ(name, _reg, sel) + +#define __BUILD_KVM_SET_HW(name, type, _reg, sel) \ + __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \ + __BUILD_KVM_SET_VZ(name, type, _reg, sel) \ + __BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type) + +/* + * We can't do atomic modifications of COP0 state if hardware can modify it. + * Races must be handled explicitly. + */ +#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW + +#endif + +/* + * Define accessors for CP0 registers that are accessible to the guest. These + * are primarily used by common emulation code, which may need to access the + * registers differently depending on the implementation. + * + * fns_hw/sw name type reg num select + */ +__BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0) +__BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0) +__BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0) +__BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0) +__BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2) +__BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0) +__BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1) +__BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0) +__BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0) +__BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0) +__BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0) +__BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0) +__BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0) +__BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0) +__BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1) +__BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0) +__BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0) +__BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0) +__BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1) +__BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0) +__BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1) +__BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2) +__BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3) +__BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4) +__BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5) +__BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6) +__BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7) +__BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0) +__BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2) +__BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3) +__BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4) +__BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5) +__BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6) +__BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7) + +/* Bitwise operations (on HW state) */ +__BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0) +/* Cause can be modified asynchronously from hardirq hrtimer callback */ +__BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0) +__BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1) + /* Helpers */ static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu) diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index f09a161926e7..e6fce30eb440 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -881,7 +881,7 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) struct mips_coproc *cop0 = vcpu->arch.cop0; unsigned long pc = vcpu->arch.pc; - kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); + kvm_err("[%#lx] COP0_TLBR [%d]\n", pc, kvm_read_c0_guest_index(cop0)); return EMULATE_FAIL; } diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 9dd9905681ed..a500d2be2022 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1114,7 +1114,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); cop0 = vcpu->arch.cop0; - kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n", + kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n", kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0)); @@ -1289,7 +1289,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) break; case EXCCODE_TLBS: - kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", + kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n", cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, badvaddr); @@ -1360,7 +1360,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) opc += 1; inst = 0; kvm_get_badinstr(opc, vcpu, &inst); - kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", + kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", exccode, opc, inst, badvaddr, kvm_read_c0_guest_status(vcpu->arch.cop0)); kvm_arch_vcpu_dump_regs(vcpu); diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index 1e039fb592e0..cc9a8667b80e 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -55,7 +55,7 @@ static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu) opc += 1; kvm_get_badinstr(opc, vcpu, &inst); - kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", + kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", exccode, opc, inst, badvaddr, kvm_read_c0_guest_status(vcpu->arch.cop0)); kvm_arch_vcpu_dump_regs(vcpu); @@ -936,10 +936,12 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, if (v & CAUSEF_DC) { /* disable timer first */ kvm_mips_count_disable_cause(vcpu); - kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); + kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC, + v); } else { /* enable timer last */ - kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); + kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC, + v); kvm_mips_count_enable_cause(vcpu); } } else { -- git-series 0.8.10