Re: [RFC PATCH v2 2/2] KVM: s390: Extend the USER_SIGP capability

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 02.11.21 20:46, Eric Farman wrote:
> With commit 2444b352c3ac ("KVM: s390: forward most SIGP orders to user
> space") we have a capability that allows the "fast" SIGP orders (as
> defined by the Programming Notes for the SIGNAL PROCESSOR instruction in
> the Principles of Operation) to be handled in-kernel, while all others are
> sent to userspace for processing.
> 
> This works fine but it creates a situation when, for example, a SIGP SENSE
> might return CC1 (STATUS STORED, and status bits indicating the vcpu is
> stopped), when in actuality userspace is still processing a SIGP STOP AND
> STORE STATUS order, and the vcpu is not yet actually stopped. Thus, the
> SIGP SENSE should actually be returning CC2 (busy) instead of CC1.
> 
> To fix this, add another CPU capability, dependent on the USER_SIGP one,
> that will mark a vcpu as "busy" processing a SIGP order, and a
> corresponding IOCTL that userspace can call to indicate it has finished
> its work and the SIGP operation is completed.
> 
> Signed-off-by: Eric Farman <farman@xxxxxxxxxxxxx>
> ---
>  arch/s390/include/asm/kvm_host.h |  2 ++
>  arch/s390/kvm/kvm-s390.c         | 18 ++++++++++++++
>  arch/s390/kvm/kvm-s390.h         | 10 ++++++++
>  arch/s390/kvm/sigp.c             | 40 ++++++++++++++++++++++++++++++++
>  4 files changed, 70 insertions(+)
> 
> diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
> index a604d51acfc8..bd202bb3acb5 100644
> --- a/arch/s390/include/asm/kvm_host.h
> +++ b/arch/s390/include/asm/kvm_host.h
> @@ -746,6 +746,7 @@ struct kvm_vcpu_arch {
>  	__u64 cputm_start;
>  	bool gs_enabled;
>  	bool skey_enabled;
> +	atomic_t sigp_busy;
>  	struct kvm_s390_pv_vcpu pv;
>  	union diag318_info diag318_info;
>  };
> @@ -941,6 +942,7 @@ struct kvm_arch{
>  	int user_sigp;
>  	int user_stsi;
>  	int user_instr0;
> +	int user_sigp_busy;
>  	struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
>  	wait_queue_head_t ipte_wq;
>  	int ipte_lock_count;
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index 5f52e7eec02f..ff23a46288cc 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -564,6 +564,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
>  	case KVM_CAP_S390_VCPU_RESETS:
>  	case KVM_CAP_SET_GUEST_DEBUG:
>  	case KVM_CAP_S390_DIAG318:
> +	case KVM_CAP_S390_USER_SIGP_BUSY:
>  		r = 1;
>  		break;
>  	case KVM_CAP_SET_GUEST_DEBUG2:
> @@ -706,6 +707,15 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
>  		kvm->arch.user_sigp = 1;
>  		r = 0;
>  		break;
> +	case KVM_CAP_S390_USER_SIGP_BUSY:
> +		r = -EINVAL;
> +		if (kvm->arch.user_sigp) {
> +			kvm->arch.user_sigp_busy = 1;
> +			r = 0;
> +		}
> +		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_USER_SIGP_BUSY %s",
> +			 r ? "(not available)" : "(success)");
> +		break;
>  	case KVM_CAP_S390_VECTOR_REGISTERS:
>  		mutex_lock(&kvm->lock);
>  		if (kvm->created_vcpus) {
> @@ -4825,6 +4835,14 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
>  			return -EINVAL;
>  		return kvm_s390_inject_vcpu(vcpu, &s390irq);
>  	}
> +	case KVM_S390_VCPU_RESET_SIGP_BUSY: {
> +		if (!vcpu->kvm->arch.user_sigp_busy)
> +			return -EFAULT;
> +
> +		VCPU_EVENT(vcpu, 3, "SIGP: CPU %x reset busy", vcpu->vcpu_id);
> +		kvm_s390_vcpu_clear_sigp_busy(vcpu);
> +		return 0;
> +	}
>  	}
>  	return -ENOIOCTLCMD;
>  }
> diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
> index c07a050d757d..9ce97832224b 100644
> --- a/arch/s390/kvm/kvm-s390.h
> +++ b/arch/s390/kvm/kvm-s390.h
> @@ -82,6 +82,16 @@ static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
>  	return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
>  }
>  
> +static inline bool kvm_s390_vcpu_set_sigp_busy(struct kvm_vcpu *vcpu)
> +{
> +	return (atomic_cmpxchg(&vcpu->arch.sigp_busy, 0, 1) == 0);
> +}
> +
> +static inline void kvm_s390_vcpu_clear_sigp_busy(struct kvm_vcpu *vcpu)
> +{
> +	atomic_set(&vcpu->arch.sigp_busy, 0);
> +}
> +
>  static inline int kvm_is_ucontrol(struct kvm *kvm)
>  {
>  #ifdef CONFIG_KVM_S390_UCONTROL
> diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
> index 5ad3fb4619f1..034ea72e098a 100644
> --- a/arch/s390/kvm/sigp.c
> +++ b/arch/s390/kvm/sigp.c
> @@ -341,9 +341,42 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
>  			   "sigp order %u -> cpu %x: handled in user space",
>  			   order_code, dst_vcpu->vcpu_id);
>  
> +	kvm_s390_vcpu_clear_sigp_busy(dst_vcpu);
> +
>  	return rc;
>  }
>  
> +static int handle_sigp_order_busy(struct kvm_vcpu *vcpu, u8 order_code,
> +				  u16 cpu_addr)
> +{
> +	struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
> +
> +	if (!vcpu->kvm->arch.user_sigp_busy)
> +		return 0;
> +
> +	/*
> +	 * Just see if the target vcpu exists; the CC3 will be set wherever
> +	 * the SIGP order is processed directly.
> +	 */
> +	if (!dst_vcpu)
> +		return 0;
> +
> +	/* Reset orders will be accepted, regardless if target vcpu is busy */
> +	if (order_code == SIGP_INITIAL_CPU_RESET ||
> +	    order_code == SIGP_CPU_RESET)
> +		return 0;
> +
> +	/* Orders that affect multiple vcpus should not flag one vcpu busy */
> +	if (order_code == SIGP_SET_ARCHITECTURE)
> +		return 0;
> +
> +	/* If this fails, the vcpu is already busy processing another SIGP */
> +	if (!kvm_s390_vcpu_set_sigp_busy(dst_vcpu))
> +		return -EBUSY;
> +
> +	return 0;
> +}
> +
>  static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code,
>  					   u16 cpu_addr)
>  {
> @@ -408,6 +441,13 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
>  		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
>  
>  	order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
> +
> +	rc = handle_sigp_order_busy(vcpu, order_code, cpu_addr);
> +	if (rc) {
> +		kvm_s390_set_psw_cc(vcpu, SIGP_CC_BUSY);
> +		return 0;
> +	}
> +
>  	if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr))
>  		return -EOPNOTSUPP;


After looking at the QEMU side, I wonder if we should instead:

a) Let user space always set/reset SIGP busy. Don't set/reset it in the
   kernel automatically. All "heavy weight" SIGP orders are carried out
   in user space nowadays either way.
b) Reject all in-kernel SIGP orders targeting a CPU if marked BUSY by
   user space. (i.e., SIGP SENSE)
c) Don't reject SIGP orders that will be handled in QEMU from the
   kernel. Just let user space deal with it -- especially with the
   "problematic" ones like RESET and SET_ARCHITECTURE.

For example, we don't care about concurrent SIGP SENSE. We only care
about "lightweight" SIGP orders with concurrent "heavy weight" SIGP orders.

This should simplify this code and avoid having to clear the the BUSY
flag in QEMU (that might be bogus) when detecting another BUSY situation
(the trylock thingy, see my QEMU reply). The downside is that we have to
issue yet another IOCTL to set the CPU busy for SIGP -- not sure if we
really care.

-- 
Thanks,

David / dhildenb




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Kernel Development]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite Info]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Samba]     [Linux Media]     [Device Mapper]

  Powered by Linux