So far, vcpu_has_ptrauth() is implemented in terms of system_supports_*_auth() calls, which are declared "inline". In some specific conditions (clang and SCS), the "inline" very much turns into an "out of line", which leads to a fireworks when this predicate is evaluated on a non-VHE system (right at the beginning of __hyp_handle_ptrauth). Instead, make sure vcpu_has_ptrauth gets expanded inline by directly using the cpus_have_final_cap() helpers, which are __always_inline, generate much better code, and are the only thing that make sense when running at EL2 on a nVHE system. Fixes: 29eb5a3c57f7 ("KVM: arm64: Handle PtrAuth traps early") Reported-by: Nathan Chancellor <natechancellor@xxxxxxxxx> Reported-by: Nick Desaulniers <ndesaulniers@xxxxxxxxxx> Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> Tested-by: Nathan Chancellor <natechancellor@xxxxxxxxx> Reviewed-by: Nathan Chancellor <natechancellor@xxxxxxxxx> Link: https://lore.kernel.org/r/20200722162231.3689767-1-maz@xxxxxxxxxx --- arch/arm64/include/asm/kvm_host.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index c3e6fcc664b1..e21d4a01372f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -380,9 +380,14 @@ struct kvm_vcpu_arch { #define vcpu_has_sve(vcpu) (system_supports_sve() && \ ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) -#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \ - system_supports_generic_auth()) && \ - ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)) +#ifdef CONFIG_ARM64_PTR_AUTH +#define vcpu_has_ptrauth(vcpu) \ + ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ + cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ + (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH) +#else +#define vcpu_has_ptrauth(vcpu) false +#endif #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) -- 2.27.0