Nicholas Piggin <npiggin@xxxxxxxxx> writes: > Guest LPCR depends on hardware type, and future changes will add > restrictions based on errata and guest MMU mode. Move this logic > to a common function and use it for the cases where the guest > wants to update its LPCR (or the LPCR of a nested guest). > > Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx> Reviewed-by: Fabiano Rosas <farosas@xxxxxxxxxxxxx> > --- > arch/powerpc/include/asm/kvm_book3s.h | 2 + > arch/powerpc/kvm/book3s_hv.c | 60 ++++++++++++++++++--------- > arch/powerpc/kvm/book3s_hv_nested.c | 3 +- > 3 files changed, 45 insertions(+), 20 deletions(-) > > diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h > index 2f5f919f6cd3..3eec3ef6f083 100644 > --- a/arch/powerpc/include/asm/kvm_book3s.h > +++ b/arch/powerpc/include/asm/kvm_book3s.h > @@ -258,6 +258,8 @@ extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, > extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, > struct kvm_memory_slot *memslot, > unsigned long *map); > +extern unsigned long kvmppc_filter_lpcr_hv(struct kvmppc_vcore *vc, > + unsigned long lpcr); > extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, > unsigned long mask); > extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr); > diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c > index 13bad6bf4c95..c4539c38c639 100644 > --- a/arch/powerpc/kvm/book3s_hv.c > +++ b/arch/powerpc/kvm/book3s_hv.c > @@ -1635,6 +1635,27 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, > return 0; > } > > +/* > + * Enforce limits on guest LPCR values based on hardware availability, > + * guest configuration, and possibly hypervisor support and security > + * concerns. > + */ > +unsigned long kvmppc_filter_lpcr_hv(struct kvmppc_vcore *vc, unsigned long lpcr) > +{ > + /* On POWER8 and above, userspace can modify AIL */ > + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) > + lpcr &= ~LPCR_AIL; > + > + /* > + * On POWER9, allow userspace to enable large decrementer for the > + * guest, whether or not the host has it enabled. > + */ > + if (!cpu_has_feature(CPU_FTR_ARCH_300)) > + lpcr &= ~LPCR_LD; > + > + return lpcr; > +} > + > static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, > bool preserve_top32) > { > @@ -1643,6 +1664,23 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, > u64 mask; > > spin_lock(&vc->lock); > + > + /* > + * Userspace can only modify > + * DPFD (default prefetch depth), ILE (interrupt little-endian), > + * TC (translation control), AIL (alternate interrupt location), > + * LD (large decrementer). > + * These are subject to restrictions from kvmppc_filter_lcpr_hv(). > + */ > + mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD; > + > + /* Broken 32-bit version of LPCR must not clear top bits */ > + if (preserve_top32) > + mask &= 0xFFFFFFFF; > + > + new_lpcr = kvmppc_filter_lpcr_hv(vc, > + (vc->lpcr & ~mask) | (new_lpcr & mask)); > + > /* > * If ILE (interrupt little-endian) has changed, update the > * MSR_LE bit in the intr_msr for each vcpu in this vcore. > @@ -1661,25 +1699,8 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, > } > } > > - /* > - * Userspace can only modify DPFD (default prefetch depth), > - * ILE (interrupt little-endian) and TC (translation control). > - * On POWER8 and POWER9 userspace can also modify AIL (alt. interrupt loc.). > - */ > - mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; > - if (cpu_has_feature(CPU_FTR_ARCH_207S)) > - mask |= LPCR_AIL; > - /* > - * On POWER9, allow userspace to enable large decrementer for the > - * guest, whether or not the host has it enabled. > - */ > - if (cpu_has_feature(CPU_FTR_ARCH_300)) > - mask |= LPCR_LD; > + vc->lpcr = new_lpcr; > > - /* Broken 32-bit version of LPCR must not clear top bits */ > - if (preserve_top32) > - mask &= 0xFFFFFFFF; > - vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); > spin_unlock(&vc->lock); > } > > @@ -4641,8 +4662,9 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) > struct kvmppc_vcore *vc = kvm->arch.vcores[i]; > if (!vc) > continue; > + > spin_lock(&vc->lock); > - vc->lpcr = (vc->lpcr & ~mask) | lpcr; > + vc->lpcr = kvmppc_filter_lpcr_hv(vc, (vc->lpcr & ~mask) | lpcr); > spin_unlock(&vc->lock); > if (++cores_done >= kvm->arch.online_vcores) > break; > diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c > index 2fe1fea4c934..f7b441b3eb17 100644 > --- a/arch/powerpc/kvm/book3s_hv_nested.c > +++ b/arch/powerpc/kvm/book3s_hv_nested.c > @@ -142,7 +142,8 @@ static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) > */ > mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | > LPCR_LPES | LPCR_MER; > - hr->lpcr = (vc->lpcr & ~mask) | (hr->lpcr & mask); > + hr->lpcr = kvmppc_filter_lpcr_hv(vc, > + (vc->lpcr & ~mask) | (hr->lpcr & mask)); > > /* > * Don't let L1 enable features for L2 which we've disabled for L1,