>>> + } >>> + mutex_unlock(&kvm->lock); >>> +} >>> + >>> +static int kvm_s390_set_misc(struct kvm *kvm, struct kvm_device_attr *attr) >>> +{ >>> + int ret; >>> + u64 cpc; >>> + >>> + switch (attr->attr) { >>> + case KVM_S390_VM_MISC_CPC: >>> + ret = -EFAULT; >>> + if (get_user(cpc, (u64 __user *)attr->addr)) >>> + break; >>> + kvm_s390_set_cpc(kvm, cpc); >>> + ret = 0; >>> + break; >>> + default: >>> + ret = -ENXIO; >>> + break; >>> + } >>> + return ret; >>> +} >>> + >>> +static int kvm_s390_get_cpc(struct kvm *kvm, struct kvm_device_attr *attr) >>> +{ >>> + u64 cpc = kvm->arch.diag318_info.cpc; >> >> We could have a possible race with a guest VCPU here, but I guess we >> don't care. >> > > Better safe than sorry? I'll see how another sync request looks here. > Sync requests most probably don't make sense here. I guess we can leave the code as is. User space won't be able to recognize the difference either way (as long as the cpc is updated in one shot). A VCPU could update just before or just after the call. -- Thanks, David / dhildenb