Hi Andrew, On Thu, 03 Sep 2020 14:53:01 +0100, Andrew Scull <ascull@xxxxxxxxxx> wrote: > > Save and restore the host context when switching to and from hyp. This > gives hyp its own context that the host will not see as a step towards a > full trust boundary between the two. > > SP_EL0 and pointer authentication keys are currently shared between the > host and hyp so don't need to be switched yet. > > Signed-off-by: Andrew Scull <ascull@xxxxxxxxxx> > --- > arch/arm64/kvm/hyp/include/hyp/switch.h | 2 + > arch/arm64/kvm/hyp/nvhe/Makefile | 2 +- > arch/arm64/kvm/hyp/nvhe/host.S | 68 ++++++++++++++++++------- > arch/arm64/kvm/hyp/nvhe/hyp-main.c | 35 +++++++++++++ > 4 files changed, 88 insertions(+), 19 deletions(-) > create mode 100644 arch/arm64/kvm/hyp/nvhe/hyp-main.c > > diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h > index 821721b78ad9..4536b50ddc06 100644 > --- a/arch/arm64/kvm/hyp/include/hyp/switch.h > +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h > @@ -372,6 +372,8 @@ static inline bool esr_is_ptrauth_trap(u32 esr) > ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \ > } while(0) > > +DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); > + > static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) > { > struct kvm_cpu_context *ctxt; > diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile > index ddf98eb07b9d..46c89e8c30bc 100644 > --- a/arch/arm64/kvm/hyp/nvhe/Makefile > +++ b/arch/arm64/kvm/hyp/nvhe/Makefile > @@ -6,7 +6,7 @@ > asflags-y := -D__KVM_NVHE_HYPERVISOR__ > ccflags-y := -D__KVM_NVHE_HYPERVISOR__ > > -obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o > +obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o hyp-main.o > obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ > ../fpsimd.o ../hyp-entry.o > > diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S > index d4e8b8084020..1062547853db 100644 > --- a/arch/arm64/kvm/hyp/nvhe/host.S > +++ b/arch/arm64/kvm/hyp/nvhe/host.S > @@ -12,6 +12,55 @@ > > .text > > +SYM_FUNC_START(__host_exit) > + stp x0, x1, [sp, #-16]! > + > + get_host_ctxt x0, x1 > + > + ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN) > + > + /* Store the guest regs x2 and x3 */ These comments are massively confusing. Please stick with the conventional KVM terminology, where the host isn't a guest. > + stp x2, x3, [x0, #CPU_XREG_OFFSET(2)] > + > + /* Retrieve the guest regs x0-x1 from the stack */ > + ldp x2, x3, [sp], #16 // x0, x1 > + > + // Store the guest regs x0-x1 and x4-x17 > + stp x2, x3, [x0, #CPU_XREG_OFFSET(0)] > + stp x4, x5, [x0, #CPU_XREG_OFFSET(4)] > + stp x6, x7, [x0, #CPU_XREG_OFFSET(6)] > + stp x8, x9, [x0, #CPU_XREG_OFFSET(8)] > + stp x10, x11, [x0, #CPU_XREG_OFFSET(10)] > + stp x12, x13, [x0, #CPU_XREG_OFFSET(12)] > + stp x14, x15, [x0, #CPU_XREG_OFFSET(14)] > + stp x16, x17, [x0, #CPU_XREG_OFFSET(16)] > + > + /* Store the guest regs x18-x29, lr */ > + save_callee_saved_regs x0 > + > + /* Save the host context pointer in x29 across the function call */ > + mov x29, x0 > + bl handle_trap > + > + /* Restore guest regs x0-x17 */ > + ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] > + ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] > + ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)] > + ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)] > + ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)] > + ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)] > + ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)] > + ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)] > + ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)] > + > + /* Restore guest regs x18-x29, lr */ > + restore_callee_saved_regs x29 This is a lot of save/restoring on each and every HVC. And I fear that at some stage, you will want to restore some EL2-specific registers too, adding even more to the overhead. I'll have a go a measuring by how much we regress with this. > + > + /* Do not touch any register after this! */ > + eret > + sb > +SYM_FUNC_END(__host_exit) > + > SYM_FUNC_START(__hyp_do_panic) > mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ > PSR_MODE_EL1h) > @@ -35,7 +84,7 @@ SYM_FUNC_END(__hyp_do_panic) > > /* Check for a stub HVC call */ > cmp x0, #HVC_STUB_HCALL_NR > - b.hs 1f > + b.hs __host_exit > > /* > * Compute the idmap address of __kvm_handle_stub_hvc and > @@ -51,23 +100,6 @@ SYM_FUNC_END(__hyp_do_panic) > /* x5 = __pa(x5) */ > sub x5, x5, x6 > br x5 > - > -1: > - /* > - * Shuffle the parameters before calling the function > - * pointed to in x0. Assumes parameters in x[1,2,3]. > - */ > - kern_hyp_va x0 > - str lr, [sp, #-16]! > - mov lr, x0 > - mov x0, x1 > - mov x1, x2 > - mov x2, x3 > - blr lr > - ldr lr, [sp], #16 > - > - eret > - sb > .endm > > .macro invalid_host_vect > diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c > new file mode 100644 > index 000000000000..c8938e09f585 > --- /dev/null > +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c > @@ -0,0 +1,35 @@ > +// SPDX-License-Identifier: GPL-2.0-only > +/* > + * Copyright (C) 2020 - Google Inc > + * Author: Andrew Scull <ascull@xxxxxxxxxx> > + */ > + > +#include <hyp/switch.h> > + > +#include <asm/kvm_asm.h> > +#include <asm/kvm_emulate.h> > +#include <asm/kvm_host.h> > +#include <asm/kvm_hyp.h> > +#include <asm/kvm_mmu.h> > + > +typedef unsigned long (*hypcall_fn_t) > + (unsigned long, unsigned long, unsigned long); > + > +void handle_trap(struct kvm_cpu_context *host_ctxt) { Coding style. > + u64 esr = read_sysreg_el2(SYS_ESR); > + hypcall_fn_t func; > + unsigned long ret; > + > + if (ESR_ELx_EC(esr) != ESR_ELx_EC_HVC64) > + hyp_panic(); > + > + /* > + * __kvm_call_hyp takes a pointer in the host address space and > + * up to three arguments. > + */ > + func = (hypcall_fn_t)kern_hyp_va(host_ctxt->regs.regs[0]); > + ret = func(host_ctxt->regs.regs[1], > + host_ctxt->regs.regs[2], > + host_ctxt->regs.regs[3]); > + host_ctxt->regs.regs[0] = ret; > +} > -- > 2.28.0.402.g5ffc5be6b7-goog > > Thanks, M. -- Without deviation from the norm, progress is not possible. _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm