On Thu, Jun 04, 2020 at 02:33:52PM +0100, Marc Zyngier wrote: > When using the PtrAuth feature in a guest, we need to save the host's > keys before allowing the guest to program them. For that, we dump > them in a per-CPU data structure (the so called host context). > > But both call sites that do this are in preemptible context, > which may end up in disaster should the vcpu thread get preempted > before reentering the guest. Yuck! > Instead, save the keys eagerly on each vcpu_load(). This has an > increased overhead, but is at least safe. > > Cc: stable@xxxxxxxxxxxxxxx > Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> This looks sound to me given kvm_arch_vcpu_load() is surrounded with get_cpu() .. put_cpu() and gets called when the thread is preempted. Reviewed-by: Mark Rutland <mark.rutland@xxxxxxx> Thanks, Mark. > --- > arch/arm64/include/asm/kvm_emulate.h | 6 ------ > arch/arm64/kvm/arm.c | 18 +++++++++++++++++- > arch/arm64/kvm/handle_exit.c | 19 ++----------------- > 3 files changed, 19 insertions(+), 24 deletions(-) > > diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h > index a30b4eec7cb4..977843e4d5fb 100644 > --- a/arch/arm64/include/asm/kvm_emulate.h > +++ b/arch/arm64/include/asm/kvm_emulate.h > @@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) > vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); > } > > -static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) > -{ > - if (vcpu_has_ptrauth(vcpu)) > - vcpu_ptrauth_disable(vcpu); > -} > - > static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) > { > return vcpu->arch.vsesr_el2; > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c > index d6988401c22a..152049c5055d 100644 > --- a/arch/arm64/kvm/arm.c > +++ b/arch/arm64/kvm/arm.c > @@ -337,6 +337,12 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) > preempt_enable(); > } > > +#define __ptrauth_save_key(regs, key) \ > +({ \ > + regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ > + regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ > +}) > + > void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) > { > int *last_ran; > @@ -370,7 +376,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) > else > vcpu_set_wfx_traps(vcpu); > > - vcpu_ptrauth_setup_lazy(vcpu); > + if (vcpu_has_ptrauth(vcpu)) { > + struct kvm_cpu_context *ctxt = vcpu->arch.host_cpu_context; > + > + __ptrauth_save_key(ctxt->sys_regs, APIA); > + __ptrauth_save_key(ctxt->sys_regs, APIB); > + __ptrauth_save_key(ctxt->sys_regs, APDA); > + __ptrauth_save_key(ctxt->sys_regs, APDB); > + __ptrauth_save_key(ctxt->sys_regs, APGA); > + > + vcpu_ptrauth_disable(vcpu); > + } > } > > void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) > diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c > index eb194696ef62..065251efa2e6 100644 > --- a/arch/arm64/kvm/handle_exit.c > +++ b/arch/arm64/kvm/handle_exit.c > @@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) > return 1; > } > > -#define __ptrauth_save_key(regs, key) \ > -({ \ > - regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ > - regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ > -}) > - > /* > * Handle the guest trying to use a ptrauth instruction, or trying to access a > * ptrauth register. > */ > void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu) > { > - struct kvm_cpu_context *ctxt; > - > - if (vcpu_has_ptrauth(vcpu)) { > + if (vcpu_has_ptrauth(vcpu)) > vcpu_ptrauth_enable(vcpu); > - ctxt = vcpu->arch.host_cpu_context; > - __ptrauth_save_key(ctxt->sys_regs, APIA); > - __ptrauth_save_key(ctxt->sys_regs, APIB); > - __ptrauth_save_key(ctxt->sys_regs, APDA); > - __ptrauth_save_key(ctxt->sys_regs, APDB); > - __ptrauth_save_key(ctxt->sys_regs, APGA); > - } else { > + else > kvm_inject_undefined(vcpu); > - } > } > > /* > -- > 2.26.2 > _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm