Feature fractional field of an ID register cannot be simply validated at KVM_SET_ONE_REG because its validity depends on its (main) feature field value, which could be in a different ID register (and might be set later). Validate fractional fields at the first KVM_RUN instead. Signed-off-by: Reiji Watanabe <reijiw@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/arm.c | 3 + arch/arm64/kvm/sys_regs.c | 113 +++++++++++++++++++++++++++++- 3 files changed, 114 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index dbed94e759a8..b85af83b4542 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -789,6 +789,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, void set_default_id_regs(struct kvm *kvm); int kvm_set_id_reg_feature(struct kvm *kvm, u32 id, u8 field_shift, u8 fval); void kvm_vcpu_breakpoint_config(struct kvm_vcpu *vcpu); +int kvm_id_regs_check_frac_fields(const struct kvm_vcpu *vcpu); /* Guest/host FPSIMD coordination helpers */ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 04312f7ee0da..5c1cee04aa95 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -524,6 +524,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) if (likely(vcpu_has_run_once(vcpu))) return 0; + if (!kvm_vm_is_protected(kvm) && kvm_id_regs_check_frac_fields(vcpu)) + return -EPERM; + kvm_arm_vcpu_init_debug(vcpu); if (likely(irqchip_in_kernel(kvm))) { diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 1045319c474e..fc7a8f2539a4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -4028,6 +4028,100 @@ void kvm_sys_reg_table_init(void) id_reg_desc_init_all(); } +/* ID register's fractional field information with its feature field. */ +struct feature_frac { + u32 id; + u32 shift; + u32 frac_id; + u32 frac_shift; +}; + +static struct feature_frac feature_frac_table[] = { + { + .frac_id = SYS_ID_AA64PFR1_EL1, + .frac_shift = ID_AA64PFR1_RASFRAC_SHIFT, + .id = SYS_ID_AA64PFR0_EL1, + .shift = ID_AA64PFR0_RAS_SHIFT, + }, + { + .frac_id = SYS_ID_AA64PFR1_EL1, + .frac_shift = ID_AA64PFR1_MPAMFRAC_SHIFT, + .id = SYS_ID_AA64PFR0_EL1, + .shift = ID_AA64PFR0_MPAM_SHIFT, + }, + { + .frac_id = SYS_ID_AA64PFR1_EL1, + .frac_shift = ID_AA64PFR1_CSV2FRAC_SHIFT, + .id = SYS_ID_AA64PFR0_EL1, + .shift = ID_AA64PFR0_CSV2_SHIFT, + }, +}; + +/* + * Return non-zero if the feature/fractional fields pair are not + * supported. Return zero otherwise. + * This function validates only the fractional feature field, + * and relies on the fact the feature field is validated before + * through arm64_check_features. + */ +static int vcpu_id_reg_feature_frac_check(const struct kvm_vcpu *vcpu, + const struct feature_frac *ftr_frac) +{ + const struct id_reg_desc *id_reg; + u32 id; + u64 val, lim, mask; + + /* Check if the feature field value is same as the limit */ + id = ftr_frac->id; + + mask = ARM64_FEATURE_FIELD_MASK << ftr_frac->shift; + id_reg = get_id_reg_desc(id); + val = __read_id_reg(vcpu, id_reg) & mask; + lim = id_reg->vcpu_limit_val & mask; + + if (val != lim) + /* + * The feature level is lower than the limit. + * Any fractional version should be fine. + */ + return 0; + + /* Check the fractional feature field */ + id = ftr_frac->frac_id; + + mask = ARM64_FEATURE_FIELD_MASK << ftr_frac->frac_shift; + id_reg = get_id_reg_desc(id); + val = __read_id_reg(vcpu, id_reg) & mask; + lim = id_reg->vcpu_limit_val & mask; + + if (val == lim) + /* + * Both the feature and fractional fields are the same + * as limit. + */ + return 0; + + return arm64_check_features(id_reg->ftr_bits, val, lim); +} + +int kvm_id_regs_check_frac_fields(const struct kvm_vcpu *vcpu) +{ + int i, err; + const struct feature_frac *frac; + + /* + * Check ID registers' fractional fields, which aren't checked + * at KVM_SET_ONE_REG. + */ + for (i = 0; i < ARRAY_SIZE(feature_frac_table); i++) { + frac = &feature_frac_table[i]; + err = vcpu_id_reg_feature_frac_check(vcpu, frac); + if (err) + return err; + } + return 0; +} + /* * Update the ID register's field with @fval for the guest. * The caller is expected to hold the kvm->lock. @@ -4055,9 +4149,6 @@ static struct id_reg_desc id_aa64pfr0_el1_desc = { static struct id_reg_desc id_aa64pfr1_el1_desc = { .reg_desc = ID_SANITISED(ID_AA64PFR1_EL1), - .ignore_mask = ARM64_FEATURE_MASK(ID_AA64PFR1_RASFRAC) | - ARM64_FEATURE_MASK(ID_AA64PFR1_MPAMFRAC) | - ARM64_FEATURE_MASK(ID_AA64PFR1_CSV2FRAC), .init = init_id_aa64pfr1_el1_desc, .validate = validate_id_aa64pfr1_el1, .vcpu_mask = vcpu_mask_id_aa64pfr1_el1, @@ -4329,6 +4420,8 @@ static void id_reg_desc_init_all(void) { int i; struct id_reg_desc *id_reg; + struct feature_frac *frac; + u64 ftr_mask = ARM64_FEATURE_FIELD_MASK; for (i = 0; i < ARRAY_SIZE(id_reg_desc_table); i++) { id_reg = (struct id_reg_desc *)id_reg_desc_table[i]; @@ -4337,6 +4430,20 @@ static void id_reg_desc_init_all(void) id_reg_desc_init(id_reg); } + + /* + * Update ignore_mask of ID registers based on fractional fields + * information. Any ID register that have fractional fields + * is expected to have its own id_reg_desc. + */ + for (i = 0; i < ARRAY_SIZE(feature_frac_table); i++) { + frac = &feature_frac_table[i]; + id_reg = get_id_reg_desc(frac->frac_id); + if (WARN_ON_ONCE(!id_reg)) + continue; + + id_reg->ignore_mask |= ftr_mask << frac->frac_shift; + } } /* -- 2.36.0.rc0.470.gd361397f0d-goog _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm