On 19.02.2018 09:53, Christian Borntraeger wrote: > Some facilities should only provided to the guest, if these are > enabled by a CPU model. This allows to avoid capabilities and > simply fallback to the cpumodel for deciding about a facility > without enabling it for older QEMUs or QEMUs without a CPU > model. > > Signed-off-by: Christian Borntraeger <borntraeger@xxxxxxxxxx> > --- > arch/s390/kvm/kvm-s390.c | 40 ++++++++++++++++++++++++++++++++-------- > arch/s390/kvm/kvm-s390.h | 2 -- > arch/s390/tools/gen_facilities.c | 20 ++++++++++++++++++++ > 3 files changed, 52 insertions(+), 10 deletions(-) > > diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c > index 5b7fe80cda56..7d982690b1fc 100644 > --- a/arch/s390/kvm/kvm-s390.c > +++ b/arch/s390/kvm/kvm-s390.c > @@ -151,13 +151,27 @@ static int nested; > module_param(nested, int, S_IRUGO); > MODULE_PARM_DESC(nested, "Nested virtualization support"); > > -/* upper facilities limit for kvm */ > -unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM }; > > -unsigned long kvm_s390_fac_list_mask_size(void) > +/* > + * For now we handle not more than 16 double words as this is what > + * the s390 base kernel handles and stores in prefix page. If we > + * ever need to go beyond this, this requires changes to code, but > + * the external uapi can stay. > + */ > +#define SIZE_INTERNAL 16 > + > +/* upper facilities limit for kvm */ > +static unsigned long kvm_s390_fac_list_mask[SIZE_INTERNAL] = { FACILITIES_KVM }; > +/* > + * facility bits that are in the mask, but initially not in the list. > + * Those can be activated by the CPU model > + */ > +static unsigned long kvm_s390_fac_model[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL }; _list_mask vs _model Can we make this more consistent? > + > +static unsigned long kvm_s390_fac_size(void) > { > - BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64); > - return ARRAY_SIZE(kvm_s390_fac_list_mask); > + BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64); > + return SIZE_INTERNAL; > } > > /* available cpu features supported by kvm */ > @@ -1946,17 +1960,27 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) > memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list, > sizeof(S390_lowcore.stfle_fac_list)); > for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { > - if (i < kvm_s390_fac_list_mask_size()) > - kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i]; > + if (i < kvm_s390_fac_size()) > + kvm->arch.model.fac_mask[i] &= (kvm_s390_fac_list_mask[i] | > + kvm_s390_fac_model[i]); > else > kvm->arch.model.fac_mask[i] = 0UL; > } > > - /* Populate the facility list initially. */ > + /* Populate the facility list initially as a copy of the mask */ > kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; > memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask, > S390_ARCH_FAC_LIST_SIZE_BYTE); > If you need a loop either way, why not simply drop the memcpy and instead do it explicitly in a single loop? > + /* > + * Disable facilities in the list that need CPU model support. > + * Those are now in the mask but not in the list. Userspace can > + * re-enable those that are in the mask via the CPU model > + */ > + for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) > + if (i < kvm_s390_fac_size()) > + kvm->arch.model.fac_list[i] &= ~kvm_s390_fac_model[i]; > + > /* we are always in czam mode - even on pre z14 machines */ > set_kvm_facility(kvm->arch.model.fac_mask, 138); > set_kvm_facility(kvm->arch.model.fac_list, 138); > diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h > index 3c0a975c2477..75efb6835acc 100644 > --- a/arch/s390/kvm/kvm-s390.h > +++ b/arch/s390/kvm/kvm-s390.h > @@ -295,8 +295,6 @@ void exit_sie(struct kvm_vcpu *vcpu); > void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu); > int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); > void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); > -unsigned long kvm_s390_fac_list_mask_size(void); > -extern unsigned long kvm_s390_fac_list_mask[]; > void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm); > __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu); > > diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c > index 424a1ba4f874..37a77a3f280c 100644 > --- a/arch/s390/tools/gen_facilities.c > +++ b/arch/s390/tools/gen_facilities.c > @@ -62,6 +62,13 @@ static struct facility_def facility_defs[] = { > } > }, > { > + /* > + * FACILITIES_KVM contains the list of facilities that are > + * part of the default facility mask and list passed to the > + * initial CPU model. If no CPU model is used this, together > + * with the non-hypervisor managed bits, is the maximum list > + * of guest facilities supported by KVM. > + */ > .name = "FACILITIES_KVM", > .bits = (int[]){ > 0, /* N3 instructions */ > @@ -89,6 +96,19 @@ static struct facility_def facility_defs[] = { > -1 /* END */ > } > }, > + { > + /* > + * FACILITIES_KVM_CPUMODEL contains the list of facilities > + * that can be enabled by CPU model code if the host supports > + * it. These facilities are not passed to the guest without > + * CPU model support. > + */ > + > + .name = "FACILITIES_KVM_CPUMODEL", > + .bits = (int[]){ > + -1 /* END */ > + } > + }, > }; > > static void print_facility_list(struct facility_def *def) > General idea sounds sane. If old user space already uses CPU models, it will only enable a sane subset - as they are filtered. New user space can enable new CPU models. If user space doesn't use CPU models, only a sane subset will be enabled. -- Thanks, David / dhildenb