On Wed, Sep 22, 2021 at 01:46:55PM +0100, Fuad Tabba wrote: > From: Marc Zyngier <maz@xxxxxxxxxx> > > Simplify the early exception handling by slicing the gigantic decoding > tree into a more manageable set of functions, similar to what we have > in handle_exit.c. > > This will also make the structure reusable for pKVM's own early exit > handling. > > Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> > Signed-off-by: Fuad Tabba <tabba@xxxxxxxxxx> > --- > arch/arm64/kvm/hyp/include/hyp/switch.h | 160 ++++++++++++++---------- > arch/arm64/kvm/hyp/nvhe/switch.c | 17 +++ > arch/arm64/kvm/hyp/vhe/switch.c | 17 +++ > 3 files changed, 126 insertions(+), 68 deletions(-) > > diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h > index 54abc8298ec3..0397606c0951 100644 > --- a/arch/arm64/kvm/hyp/include/hyp/switch.h > +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h > @@ -136,16 +136,7 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) > > static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) > { > - u8 ec; > - u64 esr; > - > - esr = vcpu->arch.fault.esr_el2; > - ec = ESR_ELx_EC(esr); > - > - if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) > - return true; > - > - return __get_fault_info(esr, &vcpu->arch.fault); > + return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); > } > > static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu) > @@ -166,8 +157,13 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) > write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); > } > > -/* Check for an FPSIMD/SVE trap and handle as appropriate */ > -static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) > +/* > + * We trap the first access to the FP/SIMD to save the host context and > + * restore the guest context lazily. > + * If FP/SIMD is not implemented, handle the trap and inject an undefined > + * instruction exception to the guest. Similarly for trapped SVE accesses. > + */ > +static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) > { > bool sve_guest, sve_host; > u8 esr_ec; > @@ -185,9 +181,6 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) > } > > esr_ec = kvm_vcpu_trap_get_class(vcpu); > - if (esr_ec != ESR_ELx_EC_FP_ASIMD && > - esr_ec != ESR_ELx_EC_SVE) > - return false; > > /* Don't handle SVE traps for non-SVE vcpus here: */ > if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD) > @@ -325,7 +318,7 @@ static inline bool esr_is_ptrauth_trap(u32 esr) > > DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); > > -static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) > +static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code) > { > struct kvm_cpu_context *ctxt; > u64 val; > @@ -350,6 +343,87 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) > return true; > } > > +static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) > +{ > + if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && > + handle_tx2_tvm(vcpu)) > + return true; > + > + if (static_branch_unlikely(&vgic_v3_cpuif_trap) && > + __vgic_v3_perform_cpuif_access(vcpu) == 1) > + return true; > + > + return false; > +} > + > +static bool kvm_hyp_handle_cp15(struct kvm_vcpu *vcpu, u64 *exit_code) > +{ > + if (static_branch_unlikely(&vgic_v3_cpuif_trap) && > + __vgic_v3_perform_cpuif_access(vcpu) == 1) > + return true; I think you're now calling this for the 64-bit CP15 access path, which I don't think is correct. Maybe have separate handlers for 32-bit v4 64-bit accesses? Will