On Mon, Jun 29, 2020 at 07:18:41PM +1000, Gavin Shan wrote: > There are a set of inline functions defined in kvm_emulate.h. Those > functions reads ESR from vCPU fault information struct and then operate > on it. So it's tied with vCPU fault information and vCPU struct. It > limits their usage scope. > > This detaches these functions from the vCPU struct by introducing an > other set of inline functions in esr.h to manupulate the specified > ESR value. With it, the inline functions defined in kvm_emulate.h > can call these inline functions (in esr.h) instead. This shouldn't > cause any functional changes. > > Signed-off-by: Gavin Shan <gshan@xxxxxxxxxx> > --- > arch/arm64/include/asm/esr.h | 32 +++++++++++++++++++++ > arch/arm64/include/asm/kvm_emulate.h | 43 ++++++++++++---------------- > 2 files changed, 51 insertions(+), 24 deletions(-) > > diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h > index 035003acfa87..950204c5fbe1 100644 > --- a/arch/arm64/include/asm/esr.h > +++ b/arch/arm64/include/asm/esr.h > @@ -326,6 +326,38 @@ static inline bool esr_is_data_abort(u32 esr) > return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR; > } > > +#define ESR_DECLARE_CHECK_FUNC(name, field) \ > +static inline bool esr_is_##name(u32 esr) \ > +{ \ > + return !!(esr & (field)); \ > +} > +#define ESR_DECLARE_GET_FUNC(name, mask, shift) \ > +static inline u32 esr_get_##name(u32 esr) \ > +{ \ > + return ((esr & (mask)) >> (shift)); \ > +} Should these be named DEFINE rather than DECLARE given it also includes the function definition? > + > +ESR_DECLARE_CHECK_FUNC(il_32bit, ESR_ELx_IL); > +ESR_DECLARE_CHECK_FUNC(condition, ESR_ELx_CV); > +ESR_DECLARE_CHECK_FUNC(dabt_valid, ESR_ELx_ISV); > +ESR_DECLARE_CHECK_FUNC(dabt_sse, ESR_ELx_SSE); > +ESR_DECLARE_CHECK_FUNC(dabt_sf, ESR_ELx_SF); > +ESR_DECLARE_CHECK_FUNC(dabt_s1ptw, ESR_ELx_S1PTW); > +ESR_DECLARE_CHECK_FUNC(dabt_write, ESR_ELx_WNR); > +ESR_DECLARE_CHECK_FUNC(dabt_cm, ESR_ELx_CM); > + > +ESR_DECLARE_GET_FUNC(class, ESR_ELx_EC_MASK, ESR_ELx_EC_SHIFT); > +ESR_DECLARE_GET_FUNC(fault, ESR_ELx_FSC, 0); > +ESR_DECLARE_GET_FUNC(fault_type, ESR_ELx_FSC_TYPE, 0); > +ESR_DECLARE_GET_FUNC(condition, ESR_ELx_COND_MASK, ESR_ELx_COND_SHIFT); > +ESR_DECLARE_GET_FUNC(hvc_imm, ESR_ELx_xVC_IMM_MASK, 0); > +ESR_DECLARE_GET_FUNC(dabt_iss_nisv_sanitized, > + (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC), 0); > +ESR_DECLARE_GET_FUNC(dabt_rd, ESR_ELx_SRT_MASK, ESR_ELx_SRT_SHIFT); > +ESR_DECLARE_GET_FUNC(dabt_as, ESR_ELx_SAS, ESR_ELx_SAS_SHIFT); > +ESR_DECLARE_GET_FUNC(sys_rt, ESR_ELx_SYS64_ISS_RT_MASK, > + ESR_ELx_SYS64_ISS_RT_SHIFT); > + > const char *esr_get_class_string(u32 esr); > #endif /* __ASSEMBLY */ > > diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h > index c9ba0df47f7d..9337d90c517f 100644 > --- a/arch/arm64/include/asm/kvm_emulate.h > +++ b/arch/arm64/include/asm/kvm_emulate.h > @@ -266,12 +266,8 @@ static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu) > > static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) > { > - u32 esr = kvm_vcpu_get_esr(vcpu); > - > - if (esr & ESR_ELx_CV) > - return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; > - > - return -1; > + return esr_is_condition(kvm_vcpu_get_esr(vcpu)) ? > + esr_get_condition(kvm_vcpu_get_esr(vcpu)) : -1; > } > > static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) > @@ -291,79 +287,79 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu) > > static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) > { > - return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK; > + return esr_get_hvc_imm(kvm_vcpu_get_esr(vcpu)); > } It feels a little strange that in the raw esr case it uses macro magic but in the vcpu cases here it writes everything out in full. Was there a reason that I'm missing or is there a chance to apply a consistent approach? I'm not sure of the style preferences, but if it goes the macro path, the esr field definitions could be reused with something x-macro like to get the kvm_emulate.h and esr.h functions generated from a singe list of the esr fields. > static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) > { > - return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV); > + return esr_is_dabt_valid(kvm_vcpu_get_esr(vcpu)); > } > > static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu) > { > - return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC); > + return esr_get_dabt_iss_nisv_sanitized(kvm_vcpu_get_esr(vcpu)); > } > > static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) > { > - return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE); > + return esr_is_dabt_sse(kvm_vcpu_get_esr(vcpu)); > } > > static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) > { > - return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF); > + return esr_is_dabt_sf(kvm_vcpu_get_esr(vcpu)); > } > > static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) > { > - return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; > + return esr_get_dabt_rd(kvm_vcpu_get_esr(vcpu)); > } > > static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) > { > - return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW); > + return esr_is_dabt_s1ptw(kvm_vcpu_get_esr(vcpu)); > } > > static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) > { > - return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) || > - kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ > + return esr_is_dabt_write(kvm_vcpu_get_esr(vcpu)) || > + esr_is_dabt_s1ptw(kvm_vcpu_get_esr(vcpu)); /* AF/DBM update */ > } > > static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) > { > - return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM); > + return esr_is_dabt_cm(kvm_vcpu_get_esr(vcpu)); > } > > static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) > { > - return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); > + return 1 << esr_get_dabt_as(kvm_vcpu_get_esr(vcpu)); > } > > /* This one is not specific to Data Abort */ > static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) > { > - return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL); > + return esr_is_il_32bit(kvm_vcpu_get_esr(vcpu)); > } > > static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) > { > - return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu)); > + return esr_get_class(kvm_vcpu_get_esr(vcpu)); > } > > static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) > { > - return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; > + return esr_get_class(kvm_vcpu_get_esr(vcpu)) == ESR_ELx_EC_IABT_LOW; > } > > static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) > { > - return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC; > + return esr_get_fault(kvm_vcpu_get_esr(vcpu)); > } > > static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) > { > - return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE; > + return esr_get_fault_type(kvm_vcpu_get_esr(vcpu)); > } > > static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) > @@ -387,8 +383,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) > > static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) > { > - u32 esr = kvm_vcpu_get_esr(vcpu); > - return ESR_ELx_SYS64_ISS_RT(esr); > + return esr_get_sys_rt(kvm_vcpu_get_esr(vcpu)); > } > > static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) > -- > 2.23.0 > > _______________________________________________ > kvmarm mailing list > kvmarm@xxxxxxxxxxxxxxxxxxxxx > https://lists.cs.columbia.edu/mailman/listinfo/kvmarm _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm