Re: [PATCH v10 3/5] KVM: arm64: Use per guest ID register for ID_AA64DFR0_EL1.PMUVer

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Sun, May 28, 2023 at 3:53 AM Marc Zyngier <maz@xxxxxxxxxx> wrote:
>
> On Mon, 22 May 2023 23:18:33 +0100,
> Jing Zhang <jingzhangos@xxxxxxxxxx> wrote:
> >
> > With per guest ID registers, PMUver settings from userspace
> > can be stored in its corresponding ID register.
> >
> > No functional change intended.
> >
> > Signed-off-by: Jing Zhang <jingzhangos@xxxxxxxxxx>
> > ---
> >  arch/arm64/include/asm/kvm_host.h |  12 ++--
> >  arch/arm64/kvm/arm.c              |   6 --
> >  arch/arm64/kvm/sys_regs.c         | 100 ++++++++++++++++++++++++------
> >  include/kvm/arm_pmu.h             |   5 +-
> >  4 files changed, 92 insertions(+), 31 deletions(-)
> >
> > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> > index 8a2fde6c04c4..7b0f43373dbe 100644
> > --- a/arch/arm64/include/asm/kvm_host.h
> > +++ b/arch/arm64/include/asm/kvm_host.h
> > @@ -246,6 +246,13 @@ struct kvm_arch {
> >  #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE           7
> >       /* SMCCC filter initialized for the VM */
> >  #define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED                8
> > +     /*
> > +      * AA64DFR0_EL1.PMUver was set as ID_AA64DFR0_EL1_PMUVer_IMP_DEF
> > +      * or DFR0_EL1.PerfMon was set as ID_DFR0_EL1_PerfMon_IMPDEF from
> > +      * userspace for VCPUs without PMU.
> > +      */
> > +#define KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU           9
> > +
> >       unsigned long flags;
> >
> >       /*
> > @@ -257,11 +264,6 @@ struct kvm_arch {
> >
> >       cpumask_var_t supported_cpus;
> >
> > -     struct {
> > -             u8 imp:4;
> > -             u8 unimp:4;
> > -     } dfr0_pmuver;
> > -
> >       /* Hypercall features firmware registers' descriptor */
> >       struct kvm_smccc_features smccc_feat;
> >       struct maple_tree smccc_filter;
> > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > index 5114521ace60..ca18c09ccf82 100644
> > --- a/arch/arm64/kvm/arm.c
> > +++ b/arch/arm64/kvm/arm.c
> > @@ -148,12 +148,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
> >       kvm_arm_init_hypercalls(kvm);
> >       kvm_arm_init_id_regs(kvm);
> >
> > -     /*
> > -      * Initialise the default PMUver before there is a chance to
> > -      * create an actual PMU.
> > -      */
> > -     kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit();
> > -
> >       return 0;
> >
> >  err_free_cpumask:
> > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> > index 9fb1c2f8f5a5..84d9e4baa4f8 100644
> > --- a/arch/arm64/kvm/sys_regs.c
> > +++ b/arch/arm64/kvm/sys_regs.c
> > @@ -1178,9 +1178,12 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
> >  static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
> >  {
> >       if (kvm_vcpu_has_pmu(vcpu))
> > -             return vcpu->kvm->arch.dfr0_pmuver.imp;
> > +             return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
> > +                              IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1));
> > +     else if (test_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags))
> > +             return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
> >
> > -     return vcpu->kvm->arch.dfr0_pmuver.unimp;
> > +     return 0;
> >  }
> >
> >  static u8 perfmon_to_pmuver(u8 perfmon)
> > @@ -1403,8 +1406,12 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
> >                              const struct sys_reg_desc *rd,
> >                              u64 val)
> >  {
> > +     struct kvm_arch *arch = &vcpu->kvm->arch;
> > +     u64 old_val = read_id_reg(vcpu, rd);
> >       u8 pmuver, host_pmuver;
> > +     u64 new_val = val;
> >       bool valid_pmu;
> > +     int ret = 0;
> >
> >       host_pmuver = kvm_arm_pmu_get_pmuver_limit();
> >
> > @@ -1424,26 +1431,51 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
> >       if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
> >               return -EINVAL;
> >
> > +     mutex_lock(&arch->config_lock);
> >       /* We can only differ with PMUver, and anything else is an error */
> > -     val ^= read_id_reg(vcpu, rd);
> > +     val ^= old_val;
> >       val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
> > -     if (val)
> > -             return -EINVAL;
> > +     if (val) {
> > +             ret = -EINVAL;
> > +             goto out;
> > +     }
> >
> > -     if (valid_pmu)
> > -             vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
> > -     else
> > -             vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
> > +     /* Only allow userspace to change the idregs before VM running */
> > +     if (kvm_vm_has_ran_once(vcpu->kvm)) {
> > +             if (new_val != old_val)
> > +                     ret = -EBUSY;
> > +     } else {
> > +             if (valid_pmu) {
> > +                     val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
> > +                     val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> > +                     val |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK, pmuver);
> > +                     IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) = val;
> > +
> > +                     val = IDREG(vcpu->kvm, SYS_ID_DFR0_EL1);
> > +                     val &= ~ID_DFR0_EL1_PerfMon_MASK;
> > +                     val |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK, pmuver_to_perfmon(pmuver));
> > +                     IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) = val;
> > +             } else {
> > +                     assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
> > +                                pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
> > +             }
> > +     }
> >
> > -     return 0;
> > +out:
> > +     mutex_unlock(&arch->config_lock);
> > +     return ret;
> >  }
> >
> >  static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
> >                          const struct sys_reg_desc *rd,
> >                          u64 val)
> >  {
> > +     struct kvm_arch *arch = &vcpu->kvm->arch;
> > +     u64 old_val = read_id_reg(vcpu, rd);
> >       u8 perfmon, host_perfmon;
> > +     u64 new_val = val;
> >       bool valid_pmu;
> > +     int ret = 0;
> >
> >       host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
> >
> > @@ -1464,18 +1496,39 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
> >       if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
> >               return -EINVAL;
> >
> > +     mutex_lock(&arch->config_lock);
> >       /* We can only differ with PerfMon, and anything else is an error */
> > -     val ^= read_id_reg(vcpu, rd);
> > +     val ^= old_val;
> >       val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
> > -     if (val)
> > -             return -EINVAL;
> > +     if (val) {
> > +             ret = -EINVAL;
> > +             goto out;
> > +     }
> >
> > -     if (valid_pmu)
> > -             vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
> > -     else
> > -             vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
> > +     /* Only allow userspace to change the idregs before VM running */
> > +     if (kvm_vm_has_ran_once(vcpu->kvm)) {
> > +             if (new_val != old_val)
> > +                     ret = -EBUSY;
> > +     } else {
> > +             if (valid_pmu) {
> > +                     val = IDREG(vcpu->kvm, SYS_ID_DFR0_EL1);
> > +                     val &= ~ID_DFR0_EL1_PerfMon_MASK;
> > +                     val |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK, perfmon);
> > +                     IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) = val;
> > +
> > +                     val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
> > +                     val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> > +                     val |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK, perfmon_to_pmuver(perfmon));
> > +                     IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) = val;
> > +             } else {
> > +                     assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
> > +                                perfmon == ID_DFR0_EL1_PerfMon_IMPDEF);
> > +             }
> > +     }
>
> This is the exact same code as for aa64fdr0. Make it a helper, please.
Will do.
>
> >
> > -     return 0;
> > +out:
> > +     mutex_unlock(&arch->config_lock);
> > +     return ret;
> >  }
> >
> >  /*
> > @@ -3422,6 +3475,17 @@ void kvm_arm_init_id_regs(struct kvm *kvm)
> >       }
> >
> >       IDREG(kvm, SYS_ID_AA64PFR0_EL1) = val;
> > +     /*
> > +      * Initialise the default PMUver before there is a chance to
> > +      * create an actual PMU.
> > +      */
> > +     val = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
> > +
> > +     val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
> > +     val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
> > +                       kvm_arm_pmu_get_pmuver_limit());
> > +
> > +     IDREG(kvm, SYS_ID_AA64DFR0_EL1) = val;
> >  }
> >
> >  int __init kvm_sys_reg_table_init(void)
> > diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> > index 1a6a695ca67a..8d70dbdc1e0a 100644
> > --- a/include/kvm/arm_pmu.h
> > +++ b/include/kvm/arm_pmu.h
> > @@ -92,8 +92,9 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
> >  /*
> >   * Evaluates as true when emulating PMUv3p5, and false otherwise.
> >   */
> > -#define kvm_pmu_is_3p5(vcpu)                                         \
> > -     (vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5)
> > +#define kvm_pmu_is_3p5(vcpu)                                                                 \
> > +      (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),                                 \
> > +                 IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)) >= ID_AA64DFR0_EL1_PMUVer_V3P5)
>
> This is getting unreadable. How about something like:
>
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index 8d70dbdc1e0a..ecb55d87fa36 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -92,9 +92,13 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
>  /*
>   * Evaluates as true when emulating PMUv3p5, and false otherwise.
>   */
> -#define kvm_pmu_is_3p5(vcpu)                                                                   \
> -        (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),                                 \
> -                   IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)) >= ID_AA64DFR0_EL1_PMUVer_V3P5)
> +#define kvm_pmu_is_3p5(vcpu)   ({                                      \
> +       u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);                \
> +       u8 v;                                                           \
> +                                                                       \
> +       v = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val); \
> +       v >= ID_AA64DFR0_EL1_PMUVer_V3P5;                               \
> +})
>
>  u8 kvm_arm_pmu_get_pmuver_limit(void);
Sure, will use your suggestion.
>
> Thanks,
>
>         M.
>
> --
> Without deviation from the norm, progress is not possible.

Thanks,
Jing




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux