Hi, With the patch set you posted I get some kvm unit tests failures due to being unable to update register values from userspace. I propose the following patch as a solution: [PATCH 1/2] KVM: arm64: Update id_reg limit value based on per vcpu flags There are multiple features the availability of which is enabled/disabled and tracked on a per vcpu level in vcpu->arch.flagset e.g. sve, ptrauth, and pmu. While the vm wide value of the id regs which represent the availability of these features is stored in the id_regs kvm struct their value needs to be manipulated on a per vcpu basis. This is done at read time in kvm_arm_read_id_reg(). The value of these per vcpu flags needs to be factored in when calculating the id_reg limit value in check_features() as otherwise we can run into the following scenario. [ running on cpu which supports sve ] 1. AA64PFR0.SVE set in id_reg by kvm_arm_init_id_regs() (cpu supports it and so is set in value returned from read_sanitised_ftr_reg()) 2. vcpus created without sve feature enabled 3. vmm reads AA64PFR0 and attempts to write the same value back (writing the same value back is allowed) 4. write fails in check_features() as limit has AA64PFR0.SVE set however it is not set in the value being written and although a lower value is allowed for this feature it is not in the mask of bits which can be modified and so much match exactly. Thus add a step in check_features() to update the limit returned from id_reg->reset() with the per vcpu features which may have been enabled/disabled at vcpu creation time after the id_regs were initialised. Split this update into a new function named kvm_arm_update_id_reg() so it can be called from check_features() as well as kvm_arm_read_id_reg() to dedup code. While we're here there are features which are masked in kvm_arm_update_id_reg() which cannot change through out a vms lifecycle. Thus rather than masking them each time the register is read, mask them at id_reg init time so that the value in the kvm id_reg reflects the state of support for that feature. Move masking of AA64PFR0_EL1.GIC and AA64PFR0_EL1.AMU into read_sanitised_id_aa64pfr0_el1(). Create read_sanitised_id_aa64pfr1_el1() and mask AA64PFR1_EL1.SME. Create read_sanitised_id_[mmfr4|aa64mmfr2] and mask CCIDX. Finally remove set_id_aa64pfr0_el1() as all it does is mask AA64PFR0_EL1_CS[2|3]. The limit for these fields is already set according to cpu support in read_sanitised_id_aa64pfr0_el1() and then checked when writing the register in check_features() as such there is no need to perform the check twice. Signed-off-by: Suraj Jitindar Singh <surajjs@xxxxxxxxxx> --- arch/arm64/kvm/sys_regs.c | 113 ++++++++++++++++++++++++-------------- 1 file changed, 73 insertions(+), 40 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index bec02ba45ee7..ca793cd692fe 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -42,6 +42,7 @@ */ static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val); +static u64 kvm_arm_update_id_reg(const struct kvm_vcpu *vcpu, u32 id, u64 val); static u64 kvm_arm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id); static u64 sys_reg_to_index(const struct sys_reg_desc *reg); @@ -1241,6 +1242,7 @@ static int arm64_check_features(struct kvm_vcpu *vcpu, /* For hidden and unallocated idregs without reset, only val = 0 is allowed. */ if (rd->reset) { limit = rd->reset(vcpu, rd); + limit = kvm_arm_update_id_reg(vcpu, id, limit); ftr_reg = get_arm64_ftr_reg(id); if (!ftr_reg) return -EINVAL; @@ -1317,24 +1319,17 @@ static u64 general_read_kvm_sanitised_reg(struct kvm_vcpu *vcpu, const struct sy return read_sanitised_ftr_reg(reg_to_encoding(rd)); } -static u64 kvm_arm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id) +/* Provide an updated value for an ID register based on per vcpu flags */ +static u64 kvm_arm_update_id_reg(const struct kvm_vcpu *vcpu, u32 id, u64 val) { - u64 val = IDREG(vcpu->kvm, id); - switch (id) { case SYS_ID_AA64PFR0_EL1: if (!vcpu_has_sve(vcpu)) val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE); - if (kvm_vgic_global_state.type == VGIC_V3) { - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC); - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1); - } break; case SYS_ID_AA64PFR1_EL1: if (!kvm_has_mte(vcpu->kvm)) val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE); - - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME); break; case SYS_ID_AA64ISAR1_EL1: if (!vcpu_has_ptrauth(vcpu)) @@ -1347,8 +1342,6 @@ static u64 kvm_arm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id) if (!vcpu_has_ptrauth(vcpu)) val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3)); - if (!cpus_have_final_cap(ARM64_HAS_WFXT)) - val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT); break; case SYS_ID_AA64DFR0_EL1: /* Set PMUver to the required version */ @@ -1361,17 +1354,18 @@ static u64 kvm_arm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id) val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), pmuver_to_perfmon(vcpu_pmuver(vcpu))); break; - case SYS_ID_AA64MMFR2_EL1: - val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK; - break; - case SYS_ID_MMFR4_EL1: - val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX); - break; } return val; } +static u64 kvm_arm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id) +{ + u64 val = IDREG(vcpu->kvm, id); + + return kvm_arm_update_id_reg(vcpu, id, val); +} + /* Read a sanitised cpufeature ID register by sys_reg_desc */ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r) { @@ -1477,34 +1471,28 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), 1); } + if (kvm_vgic_global_state.type == VGIC_V3) { + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1); + } + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU); return val; } -static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd, - u64 val) +static u64 read_sanitised_id_aa64pfr1_el1(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) { - u8 csv2, csv3; + u64 val; + u32 id = reg_to_encoding(rd); - /* - * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as - * it doesn't promise more than what is actually provided (the - * guest could otherwise be covered in ectoplasmic residue). - */ - csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT); - if (csv2 > 1 || - (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED)) - return -EINVAL; + val = read_sanitised_ftr_reg(id); - /* Same thing for CSV3 */ - csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT); - if (csv3 > 1 || - (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED)) - return -EINVAL; + /* SME is not supported */ + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME); - return set_id_reg(vcpu, rd, val); + return val; } static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, @@ -1680,6 +1668,34 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, return ret; } +static u64 read_sanitised_id_mmfr4_el1(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + u64 val; + u32 id = reg_to_encoding(rd); + + val = read_sanitised_ftr_reg(id); + + /* CCIDX is not supported */ + val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX); + + return val; +} + +static u64 read_sanitised_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + u64 val; + u32 id = reg_to_encoding(rd); + + val = read_sanitised_ftr_reg(id); + + /* CCIDX is not supported */ + val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK; + + return val; +} + /* * cpufeature ID register user accessors * @@ -2089,7 +2105,14 @@ static const struct sys_reg_desc sys_reg_descs[] = { AA32_ID_SANITISED(ID_ISAR3_EL1), AA32_ID_SANITISED(ID_ISAR4_EL1), AA32_ID_SANITISED(ID_ISAR5_EL1), - AA32_ID_SANITISED(ID_MMFR4_EL1), + { SYS_DESC(SYS_ID_MMFR4_EL1), + .access = access_id_reg, + .get_user = get_id_reg, + .set_user = set_id_reg, + .visibility = aa32_id_visibility, + .reset = read_sanitised_id_mmfr4_el1, + .val = 0, }, + ID_HIDDEN(ID_AFR0_EL1), AA32_ID_SANITISED(ID_ISAR6_EL1), /* CRm=3 */ @@ -2107,10 +2130,15 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg, .get_user = get_id_reg, - .set_user = set_id_aa64pfr0_el1, + .set_user = set_id_reg, .reset = read_sanitised_id_aa64pfr0_el1, .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, }, - ID_SANITISED(ID_AA64PFR1_EL1), + { SYS_DESC(SYS_ID_AA64PFR1_EL1), + .access = access_id_reg, + .get_user = get_id_reg, + .set_user = set_id_reg, + .reset = read_sanitised_id_aa64pfr1_el1, + .val = 0, }, ID_UNALLOCATED(4,2), ID_UNALLOCATED(4,3), ID_SANITISED(ID_AA64ZFR0_EL1), @@ -2146,7 +2174,12 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* CRm=7 */ ID_SANITISED(ID_AA64MMFR0_EL1), ID_SANITISED(ID_AA64MMFR1_EL1), - ID_SANITISED(ID_AA64MMFR2_EL1), + { SYS_DESC(SYS_ID_AA64MMFR2_EL1), + .access = access_id_reg, + .get_user = get_id_reg, + .set_user = set_id_reg, + .reset = read_sanitised_id_aa64mmfr2_el1, + .val = 0, }, ID_UNALLOCATED(7,3), ID_UNALLOCATED(7,4), ID_UNALLOCATED(7,5),