Hi Reiji, On Wed, Mar 8, 2023 at 8:42 AM Reiji Watanabe <reijiw@xxxxxxxxxx> wrote: > > Hi Jing, > > On Mon, Feb 27, 2023 at 10:23 PM Jing Zhang <jingzhangos@xxxxxxxxxx> wrote: > > > > With per guest ID registers, PMUver settings from userspace > > can be stored in its corresponding ID register. > > > > No functional change intended. > > > > Signed-off-by: Jing Zhang <jingzhangos@xxxxxxxxxx> > > --- > > arch/arm64/include/asm/kvm_host.h | 11 ++++--- > > arch/arm64/kvm/arm.c | 6 ---- > > arch/arm64/kvm/id_regs.c | 52 ++++++++++++++++++++++++------- > > include/kvm/arm_pmu.h | 6 ++-- > > 4 files changed, 51 insertions(+), 24 deletions(-) > > > > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h > > index f64347eb77c2..effb61a9a855 100644 > > --- a/arch/arm64/include/asm/kvm_host.h > > +++ b/arch/arm64/include/asm/kvm_host.h > > @@ -218,6 +218,12 @@ struct kvm_arch { > > #define KVM_ARCH_FLAG_EL1_32BIT 4 > > /* PSCI SYSTEM_SUSPEND enabled for the guest */ > > #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5 > > + /* > > + * AA64DFR0_EL1.PMUver was set as ID_AA64DFR0_EL1_PMUVer_IMP_DEF > > + * or DFR0_EL1.PerfMon was set as ID_DFR0_EL1_PerfMon_IMPDEF from > > + * userspace for VCPUs without PMU. > > + */ > > +#define KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU 6 > > > > unsigned long flags; > > > > @@ -230,11 +236,6 @@ struct kvm_arch { > > > > cpumask_var_t supported_cpus; > > > > - struct { > > - u8 imp:4; > > - u8 unimp:4; > > - } dfr0_pmuver; > > - > > /* Hypercall features firmware registers' descriptor */ > > struct kvm_smccc_features smccc_feat; > > > > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c > > index c78d68d011cb..fb2de2cb98cb 100644 > > --- a/arch/arm64/kvm/arm.c > > +++ b/arch/arm64/kvm/arm.c > > @@ -138,12 +138,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) > > kvm_arm_set_default_id_regs(kvm); > > kvm_arm_init_hypercalls(kvm); > > > > - /* > > - * Initialise the default PMUver before there is a chance to > > - * create an actual PMU. > > - */ > > - kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit(); > > - > > return 0; > > > > err_free_cpumask: > > diff --git a/arch/arm64/kvm/id_regs.c b/arch/arm64/kvm/id_regs.c > > index 36859e4caf02..21ec8fc10d79 100644 > > --- a/arch/arm64/kvm/id_regs.c > > +++ b/arch/arm64/kvm/id_regs.c > > @@ -21,9 +21,12 @@ > > static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu) > > { > > if (kvm_vcpu_has_pmu(vcpu)) > > - return vcpu->kvm->arch.dfr0_pmuver.imp; > > - > > - return vcpu->kvm->arch.dfr0_pmuver.unimp; > > + return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), > > + IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)); > > + else if (test_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags)) > > + return ID_AA64DFR0_EL1_PMUVer_IMP_DEF; > > + else > > + return 0; > > } > > > > static u8 perfmon_to_pmuver(u8 perfmon) > > @@ -256,10 +259,19 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, > > if (val) > > return -EINVAL; > > > > - if (valid_pmu) > > - vcpu->kvm->arch.dfr0_pmuver.imp = pmuver; > > - else > > - vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver; > > + if (valid_pmu) { > > + IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer); > > + IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) |= > > + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), pmuver); > > + > > + IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon); > > + IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) |= > > + FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), pmuver); > > The pmuver must be converted to perfmon for ID_DFR0_EL1. Yes, wil fix it. > > Also, I think those registers should be updated atomically, although PMUver > specified by userspace will be normally the same for all vCPUs with > PMUv3 configured (I have the same comment for set_id_dfr0_el1()). > I think there is no race condition here. No corrupted data would be set in the field, right? > > > + } else if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) { > > + set_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags); > > + } else { > > + clear_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags); > > + } > > > > return 0; > > } > > @@ -296,10 +308,19 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, > > if (val) > > return -EINVAL; > > > > - if (valid_pmu) > > - vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon); > > - else > > - vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon); > > + if (valid_pmu) { > > + IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon); > > + IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) |= FIELD_PREP( > > + ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), perfmon_to_pmuver(perfmon)); > > The perfmon value should be set for ID_DFR0_EL1 (not pmuver). > Sure, will fix it. > > + > > + IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer); > > + IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP( > > + ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), perfmon_to_pmuver(perfmon)); > > + } else if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) { > > + set_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags); > > + } else { > > + clear_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags); > > + } > > > > return 0; > > } > > @@ -543,4 +564,13 @@ void kvm_arm_set_default_id_regs(struct kvm *kvm) > > } > > > > IDREG(kvm, SYS_ID_AA64PFR0_EL1) = val; > > + > > + /* > > + * Initialise the default PMUver before there is a chance to > > + * create an actual PMU. > > + */ > > + IDREG(kvm, SYS_ID_AA64DFR0_EL1) &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer); > > + IDREG(kvm, SYS_ID_AA64DFR0_EL1) |= > > + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), > > + kvm_arm_pmu_get_pmuver_limit()); > > } > > diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h > > index 628775334d5e..eef67b7d9751 100644 > > --- a/include/kvm/arm_pmu.h > > +++ b/include/kvm/arm_pmu.h > > @@ -92,8 +92,10 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); > > /* > > * Evaluates as true when emulating PMUv3p5, and false otherwise. > > */ > > -#define kvm_pmu_is_3p5(vcpu) \ > > - (vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5) > > +#define kvm_pmu_is_3p5(vcpu) \ > > + (kvm_vcpu_has_pmu(vcpu) && \ > > What is the reason for adding this kvm_vcpu_has_pmu() checking ? > I don't think this patch's changes necessitated this. For the same VM, is it possible that some VCPUs would have PMU, but some may not have? That's why the kvm_vcpu_has_pmu is added here. > > > + FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), \ > > + IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)) >= ID_AA64DFR0_EL1_PMUVer_V3P5) > > > > u8 kvm_arm_pmu_get_pmuver_limit(void); > > > > -- > > 2.39.2.722.g9855ee24e9-goog > > > > > > Thank you, > Reiji Thanks, Jing