Hi Reiji, On 11/17/21 7:43 AM, Reiji Watanabe wrote: > This patch adds id_reg_info for ID_AA64DFR0_EL1 to make it writable > by userspace. > > Return an error if userspace tries to set PMUVER field of the > register to a value that conflicts with the PMU configuration. > > Since number of context-aware breakpoints must be no more than number > of supported breakpoints according to Arm ARM, return an error > if userspace tries to set CTX_CMPS field to such value. > > Signed-off-by: Reiji Watanabe <reijiw@xxxxxxxxxx> > --- > arch/arm64/kvm/sys_regs.c | 84 ++++++++++++++++++++++++++++++++++----- > 1 file changed, 73 insertions(+), 11 deletions(-) > > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c > index 772e3d3067b2..0faf458b0efb 100644 > --- a/arch/arm64/kvm/sys_regs.c > +++ b/arch/arm64/kvm/sys_regs.c > @@ -626,6 +626,45 @@ static int validate_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu, > return 0; > } > > +static bool id_reg_has_pmu(u64 val, u64 shift, unsigned int min) I would rename the function as the name currently is misleading. The function validate the val filed @shift againt @min > +{ > + unsigned int pmu = cpuid_feature_extract_unsigned_field(val, shift); > + > + /* > + * Treat IMPLEMENTATION DEFINED functionality as unimplemented for > + * ID_AA64DFR0_EL1.PMUVer/ID_DFR0_EL1.PerfMon. > + */ > + if (pmu == 0xf) > + pmu = 0; Shouldn't we simply forbid the userspace to set 0xF? > + > + return (pmu >= min); > +} > + > +static int validate_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, > + const struct id_reg_info *id_reg, u64 val) > +{ > + unsigned int brps, ctx_cmps; > + bool vcpu_pmu, dfr0_pmu; > + > + brps = cpuid_feature_extract_unsigned_field(val, ID_AA64DFR0_BRPS_SHIFT); > + ctx_cmps = cpuid_feature_extract_unsigned_field(val, ID_AA64DFR0_CTX_CMPS_SHIFT); > + > + /* > + * Number of context-aware breakpoints can be no more than number of > + * supported breakpoints. > + */ > + if (ctx_cmps > brps) > + return -EINVAL; > + > + vcpu_pmu = kvm_vcpu_has_pmu(vcpu); > + dfr0_pmu = id_reg_has_pmu(val, ID_AA64DFR0_PMUVER_SHIFT, ID_AA64DFR0_PMUVER_8_0); > + /* Check if there is a conflict with a request via KVM_ARM_VCPU_INIT */ > + if (vcpu_pmu ^ dfr0_pmu) > + return -EPERM; > + > + return 0; > +} > + > static void init_id_aa64pfr0_el1_info(struct id_reg_info *id_reg) > { > u64 limit = id_reg->vcpu_limit_val; > @@ -669,6 +708,23 @@ static void init_id_aa64isar1_el1_info(struct id_reg_info *id_reg) > id_reg->vcpu_limit_val &= ~PTRAUTH_MASK; > } > > +static void init_id_aa64dfr0_el1_info(struct id_reg_info *id_reg) > +{ > + u64 limit = id_reg->vcpu_limit_val; > + > + /* Limit guests to PMUv3 for ARMv8.4 */ > + limit = cpuid_feature_cap_perfmon_field(limit, ID_AA64DFR0_PMUVER_SHIFT, > + ID_AA64DFR0_PMUVER_8_4); > + /* Limit debug to ARMv8.0 */ > + limit &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER); > + limit |= (FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6)); > + > + /* Hide SPE from guests */ > + limit &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER); > + > + id_reg->vcpu_limit_val = limit; > +} > + > static u64 get_reset_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, > const struct id_reg_info *idr) > { > @@ -698,6 +754,14 @@ static u64 get_reset_id_aa64isar1_el1(struct kvm_vcpu *vcpu, > idr->vcpu_limit_val : (idr->vcpu_limit_val & ~PTRAUTH_MASK); > } > > +static u64 get_reset_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, > + const struct id_reg_info *idr) > +{ > + return kvm_vcpu_has_pmu(vcpu) ? > + idr->vcpu_limit_val : > + (idr->vcpu_limit_val & ~(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER))); > +} > + > static struct id_reg_info id_aa64pfr0_el1_info = { > .sys_reg = SYS_ID_AA64PFR0_EL1, > .ftr_check_types = S_FCT(ID_AA64PFR0_ASIMD_SHIFT, FCT_LOWER_SAFE) | > @@ -742,6 +806,14 @@ static struct id_reg_info id_aa64mmfr0_el1_info = { > .validate = validate_id_aa64mmfr0_el1, > }; > > +static struct id_reg_info id_aa64dfr0_el1_info = { > + .sys_reg = SYS_ID_AA64DFR0_EL1, > + .ftr_check_types = S_FCT(ID_AA64DFR0_DOUBLELOCK_SHIFT, FCT_LOWER_SAFE), > + .init = init_id_aa64dfr0_el1_info, > + .validate = validate_id_aa64dfr0_el1, > + .get_reset_val = get_reset_id_aa64dfr0_el1, > +}; > + > /* > * An ID register that needs special handling to control the value for the > * guest must have its own id_reg_info in id_reg_info_table. > @@ -753,6 +825,7 @@ static struct id_reg_info id_aa64mmfr0_el1_info = { > static struct id_reg_info *id_reg_info_table[KVM_ARM_ID_REG_MAX_NUM] = { > [IDREG_IDX(SYS_ID_AA64PFR0_EL1)] = &id_aa64pfr0_el1_info, > [IDREG_IDX(SYS_ID_AA64PFR1_EL1)] = &id_aa64pfr1_el1_info, > + [IDREG_IDX(SYS_ID_AA64DFR0_EL1)] = &id_aa64dfr0_el1_info, > [IDREG_IDX(SYS_ID_AA64ISAR0_EL1)] = &id_aa64isar0_el1_info, > [IDREG_IDX(SYS_ID_AA64ISAR1_EL1)] = &id_aa64isar1_el1_info, > [IDREG_IDX(SYS_ID_AA64MMFR0_EL1)] = &id_aa64mmfr0_el1_info, > @@ -1604,17 +1677,6 @@ static u64 __read_id_reg(const struct kvm_vcpu *vcpu, u32 id) > val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), gic_lim); > } > break; > - case SYS_ID_AA64DFR0_EL1: > - /* Limit debug to ARMv8.0 */ > - val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER); > - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6); > - /* Limit guests to PMUv3 for ARMv8.4 */ > - val = cpuid_feature_cap_perfmon_field(val, > - ID_AA64DFR0_PMUVER_SHIFT, > - kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0); > - /* Hide SPE from guests */ > - val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER); > - break; > case SYS_ID_DFR0_EL1: > /* Limit guests to PMUv3 for ARMv8.4 */ > val = cpuid_feature_cap_perfmon_field(val, > Eric