Although KVM couples API and APK for simplicity, the architecture makes no such requirement, and the two can be independently set or cleared. Check for which of the two possible reasons we have trapped here, and if the corresponding L1 control bit isn't set, delegate the handling for forwarding. Otherwise, set this exact bit in HCR_EL2 and resume the guest. Of course, in the non-NV case, we keep setting both bits and be done with it. Note that the entry core already saves/restores the keys should any of the two control bits be set. This results in a bit of rework, and the removal of the (trivial) vcpu_ptrauth_enable() helper. Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_emulate.h | 5 ---- arch/arm64/kvm/hyp/include/hyp/switch.h | 32 +++++++++++++++++++++---- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index debc3753d2ef..d2177bc77844 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -125,11 +125,6 @@ static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu) vcpu->arch.hcr_el2 |= HCR_TWI; } -static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu) -{ - vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); -} - static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index f5f701f309a9..a0908d7a8f56 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -480,11 +480,35 @@ DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code) { struct kvm_cpu_context *ctxt; - u64 val; + u64 enable = 0; if (!vcpu_has_ptrauth(vcpu)) return false; + /* + * NV requires us to handle API and APK independently, just in + * case the hypervisor is totally nuts. Please barf >here<. + */ + if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { + switch (ESR_ELx_EC(kvm_vcpu_get_esr(vcpu))) { + case ESR_ELx_EC_PAC: + if (!(__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_API)) + return false; + + enable |= HCR_API; + break; + + case ESR_ELx_EC_SYS64: + if (!(__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_APK)) + return false; + + enable |= HCR_APK; + break; + } + } else { + enable = HCR_API | HCR_APK; + } + ctxt = this_cpu_ptr(&kvm_hyp_ctxt); __ptrauth_save_key(ctxt, APIA); __ptrauth_save_key(ctxt, APIB); @@ -492,11 +516,9 @@ static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code) __ptrauth_save_key(ctxt, APDB); __ptrauth_save_key(ctxt, APGA); - vcpu_ptrauth_enable(vcpu); - val = read_sysreg(hcr_el2); - val |= (HCR_API | HCR_APK); - write_sysreg(val, hcr_el2); + vcpu->arch.hcr_el2 |= enable; + sysreg_clear_set(hcr_el2, 0, enable); return true; } -- 2.39.2