With per guest ID registers, PMUver settings from userspace can be stored in its corresponding ID register. No functional change intended. Signed-off-by: Jing Zhang <jingzhangos@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_host.h | 11 ++-- arch/arm64/kvm/arm.c | 6 -- arch/arm64/kvm/id_regs.c | 94 +++++++++++++++++++++++++------ include/kvm/arm_pmu.h | 5 +- 4 files changed, 87 insertions(+), 29 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4699f6b829b2..009f6ff41078 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -236,6 +236,12 @@ struct kvm_arch { #define KVM_ARCH_FLAG_EL1_32BIT 4 /* PSCI SYSTEM_SUSPEND enabled for the guest */ #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5 + /* + * AA64DFR0_EL1.PMUver was set as ID_AA64DFR0_EL1_PMUVer_IMP_DEF + * or DFR0_EL1.PerfMon was set as ID_DFR0_EL1_PerfMon_IMPDEF from + * userspace for VCPUs without PMU. + */ +#define KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU 6 unsigned long flags; @@ -248,11 +254,6 @@ struct kvm_arch { cpumask_var_t supported_cpus; - struct { - u8 imp:4; - u8 unimp:4; - } dfr0_pmuver; - /* Hypercall features firmware registers' descriptor */ struct kvm_smccc_features smccc_feat; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 0f71b10a2f05..9ecd0c5d0754 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -138,12 +138,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm_arm_init_hypercalls(kvm); kvm_arm_init_id_regs(kvm); - /* - * Initialise the default PMUver before there is a chance to - * create an actual PMU. - */ - kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit(); - return 0; err_free_cpumask: diff --git a/arch/arm64/kvm/id_regs.c b/arch/arm64/kvm/id_regs.c index 5e0fd4c8b375..0a04a90a8676 100644 --- a/arch/arm64/kvm/id_regs.c +++ b/arch/arm64/kvm/id_regs.c @@ -21,9 +21,12 @@ static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu) { if (kvm_vcpu_has_pmu(vcpu)) - return vcpu->kvm->arch.dfr0_pmuver.imp; + return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), + IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)); + else if (test_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags)) + return ID_AA64DFR0_EL1_PMUVer_IMP_DEF; - return vcpu->kvm->arch.dfr0_pmuver.unimp; + return 0; } static u8 perfmon_to_pmuver(u8 perfmon) @@ -244,8 +247,11 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val) { + struct kvm_arch *arch = &vcpu->kvm->arch; u8 pmuver, host_pmuver; bool valid_pmu; + u64 sval = val; + int ret = 0; host_pmuver = kvm_arm_pmu_get_pmuver_limit(); @@ -265,26 +271,50 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, if (kvm_vcpu_has_pmu(vcpu) != valid_pmu) return -EINVAL; + mutex_lock(&arch->config_lock); /* We can only differ with PMUver, and anything else is an error */ val ^= read_id_reg(vcpu, rd); val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer); - if (val) - return -EINVAL; + if (val) { + ret = -EINVAL; + goto out; + } - if (valid_pmu) - vcpu->kvm->arch.dfr0_pmuver.imp = pmuver; - else - vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver; + /* Only allow userspace to change the idregs before VM running */ + if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &vcpu->kvm->arch.flags)) { + if (sval != read_id_reg(vcpu, rd)) + ret = -EBUSY; + } else { + if (valid_pmu) { + val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); + val &= ~ID_AA64DFR0_EL1_PMUVer_MASK; + val |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK, pmuver); + IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) = val; + + val = IDREG(vcpu->kvm, SYS_ID_DFR0_EL1); + val &= ~ID_DFR0_EL1_PerfMon_MASK; + val |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK, pmuver_to_perfmon(pmuver)); + IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) = val; + } else { + assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags, + pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF); + } + } - return 0; +out: + mutex_unlock(&arch->config_lock); + return ret; } static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val) { + struct kvm_arch *arch = &vcpu->kvm->arch; u8 perfmon, host_perfmon; bool valid_pmu; + u64 sval = val; + int ret = 0; host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit()); @@ -305,18 +335,39 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, if (kvm_vcpu_has_pmu(vcpu) != valid_pmu) return -EINVAL; + mutex_lock(&arch->config_lock); /* We can only differ with PerfMon, and anything else is an error */ val ^= read_id_reg(vcpu, rd); val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon); - if (val) - return -EINVAL; + if (val) { + ret = -EINVAL; + goto out; + } - if (valid_pmu) - vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon); - else - vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon); + /* Only allow userspace to change the idregs before VM running */ + if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &vcpu->kvm->arch.flags)) { + if (sval != read_id_reg(vcpu, rd)) + ret = -EBUSY; + } else { + if (valid_pmu) { + val = IDREG(vcpu->kvm, SYS_ID_DFR0_EL1); + val &= ~ID_DFR0_EL1_PerfMon_MASK; + val |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK, perfmon); + IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) = val; + + val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); + val &= ~ID_AA64DFR0_EL1_PMUVer_MASK; + val |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK, perfmon_to_pmuver(perfmon)); + IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) = val; + } else { + assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags, + perfmon == ID_DFR0_EL1_PerfMon_IMPDEF); + } + } - return 0; +out: + mutex_unlock(&arch->config_lock); + return ret; } /* sys_reg_desc initialiser for known cpufeature ID registers */ @@ -517,4 +568,15 @@ void kvm_arm_init_id_regs(struct kvm *kvm) } IDREG(kvm, SYS_ID_AA64PFR0_EL1) = val; + /* + * Initialise the default PMUver before there is a chance to + * create an actual PMU. + */ + val = IDREG(kvm, SYS_ID_AA64DFR0_EL1); + + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), + kvm_arm_pmu_get_pmuver_limit()); + + IDREG(kvm, SYS_ID_AA64DFR0_EL1) = val; } diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 628775334d5e..e486347b297d 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -92,8 +92,9 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); /* * Evaluates as true when emulating PMUv3p5, and false otherwise. */ -#define kvm_pmu_is_3p5(vcpu) \ - (vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5) +#define kvm_pmu_is_3p5(vcpu) \ + (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), \ + IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)) >= ID_AA64DFR0_EL1_PMUVer_V3P5) u8 kvm_arm_pmu_get_pmuver_limit(void); -- 2.40.1.495.gc816e09b53d-goog