Re: [PATCH 2/5] drm/syncobj: add sync obj wait interface. (v4)

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Jun 01, 2017 at 11:06:40AM +1000, Dave Airlie wrote:
> From: Dave Airlie <airlied@xxxxxxxxxx>
> 
> This interface will allow sync object to be used to back
> Vulkan fences. This API is pretty much the vulkan fence waiting
> API, and I've ported the code from amdgpu.
> 
> v2: accept relative timeout, pass remaining time back
> to userspace.
> v3: return to absolute timeouts.
> v4: absolute zero = poll,
>     rewrite any/all code to have same operation for arrays
>     return -EINVAL for 0 fences.
> 
> Signed-off-by: Dave Airlie <airlied@xxxxxxxxxx>
> ---
>  drivers/gpu/drm/drm_internal.h |   2 +
>  drivers/gpu/drm/drm_ioctl.c    |   2 +
>  drivers/gpu/drm/drm_syncobj.c  | 129 +++++++++++++++++++++++++++++++++++++++++
>  include/uapi/drm/drm.h         |  14 +++++
>  4 files changed, 147 insertions(+)
> 
> diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
> index 3fdef2c..53e3f6b 100644
> --- a/drivers/gpu/drm/drm_internal.h
> +++ b/drivers/gpu/drm/drm_internal.h
> @@ -156,3 +156,5 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
>  				   struct drm_file *file_private);
>  int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
>  				   struct drm_file *file_private);
> +int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
> +			   struct drm_file *file_private);
> diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
> index f1e5681..385ce74 100644
> --- a/drivers/gpu/drm/drm_ioctl.c
> +++ b/drivers/gpu/drm/drm_ioctl.c
> @@ -657,6 +657,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
>  		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
>  	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
>  		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
> +	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
> +		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
>  };
>  
>  #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
> diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
> index 23bb197..ec462bd 100644
> --- a/drivers/gpu/drm/drm_syncobj.c
> +++ b/drivers/gpu/drm/drm_syncobj.c
> @@ -1,5 +1,7 @@
>  /*
>   * Copyright 2017 Red Hat
> + * Parts ported from amdgpu (fence wait code).
> + * Copyright 2016 Advanced Micro Devices, Inc.
>   *
>   * Permission is hereby granted, free of charge, to any person obtaining a
>   * copy of this software and associated documentation files (the "Software"),
> @@ -31,6 +33,9 @@
>   * that contain an optional fence. The fence can be updated with a new
>   * fence, or be NULL.
>   *
> + * syncobj's can be waited upon, where it will wait for the underlying
> + * fence.
> + *
>   * syncobj's can be export to fd's and back, these fd's are opaque and
>   * have no other use case, except passing the syncobj between processes.
>   *
> @@ -380,3 +385,127 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
>  	return drm_syncobj_fd_to_handle(file_private, args->fd,
>  					&args->handle);
>  }
> +
> +
> +/**
> + * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
> + *
> + * @timeout_ns: timeout in ns, 0 for poll
> + *
> + * Calculate the timeout in jiffies from an absolute timeout in ns.
> + */
> +static unsigned long drm_timeout_abs_to_jiffies(uint64_t timeout_ns)
> +{
> +	unsigned long timeout_jiffies;
> +	ktime_t timeout;
> +
> +	/* make 0 timeout means poll - absolute 0 doesn't seem valid */
> +	if (timeout_ns == 0)
> +		return 0;
> +
> +	/* clamp timeout if it's to large */

	/* Negative timeout means to wait forever */
> +	if (((int64_t)timeout_ns) < 0)
> +		return MAX_SCHEDULE_TIMEOUT;
> +
> +	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
> +	if (ktime_to_ns(timeout) < 0)
> +		return 0;
> +
> +	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
> +	/*  clamp timeout to avoid infinite timeout */
> +	if (timeout_jiffies >= MAX_SCHEDULE_TIMEOUT)
> +		return MAX_SCHEDULE_TIMEOUT - 1;
> +
> +	return timeout_jiffies + 1;

timeout == MAX_SCHEDULE_TIMEOUT - 1 just returned infinity.

/* clamp timeout to avoid infinite timeout */
return min(timeout_jiffies + 1, MAX_SCHEDULE_TIMEOUT - 1);

Hmm, except that nsecs_to_jiffies doesn't apply a clamp so we may
overflow with timeout_jiffies + 1.

 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
 * for scheduler, not for use in device drivers to calculate timeout
 * value.

Hmm, it is also EXPORT_SYMBOL_GPL which may cause a conflict with us
exporting syncobj as EXPORT_SYMBOL ? Funnily it is just a wrapper around
nsecs_to_jiffies64 which is !GPL compatible! So much for being scheduler
only.

> +}
> +
> +static int drm_syncobj_wait_fences(struct drm_device *dev,
> +				   struct drm_file *file_private,
> +				   struct drm_syncobj_wait *wait,
> +				   struct dma_fence **fences)
> +{
> +	unsigned long timeout = drm_timeout_abs_to_jiffies(wait->timeout_ns);
> +	int ret = 0;
> +	uint32_t first = ~0;
> +
> +	if (wait->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
> +		int i;
> +		for (i = 0; i < wait->count_handles; i++) {
> +			ret = dma_fence_wait_timeout(fences[i], true, timeout);
> +
> +			if (ret < 0)
> +				return ret;
> +			if (ret == 0)
> +				break;

With current dma_fence_default_wait and polling with timeout=0:
  1st fence busy, returns 1
  2+ fence idle, returns 1
wait->out_timeous_ns is set to 10ms

Or
  1st fence busy, returns 1
  2nd fence busy, returns 0
Takes 10ms for a poll.

Nevertheless the syncobj code is correct (just maybe appease checkpatch
about the unbalanced if {} else).

> +			timeout = ret;
> +		}
> +		first = 0;
> +	} else
> +		ret = dma_fence_wait_any_timeout(fences,
> +						 wait->count_handles,
> +						 true, timeout,
> +						 &first);
> +	if (ret < 0)
> +		return ret;
> +
> +	wait->out_timeout_ns = jiffies_to_nsecs(ret);
> +	wait->out_status = (ret > 0);
> +	wait->first_signaled = first;

> +	/* set return value 0 to indicate success */

Feels a little superfluous.

> +	return 0;
> +}
> +
> +int
> +drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
> +		       struct drm_file *file_private)
> +{
> +	struct drm_syncobj_wait *args = data;
> +	uint32_t *handles;
> +	struct dma_fence **fences;
> +	int ret = 0;
> +	int i;
> +
> +	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
> +		return -ENODEV;
> +
> +	if (args->flags != 0 && args->flags != DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
> +		return -EINVAL;
> +
> +	if (args->count_handles == 0)
> +		return -EINVAL;
> +
> +	/* Get the handles from userspace */
> +	handles = kmalloc_array(args->count_handles, sizeof(uint32_t),
> +				GFP_KERNEL);
> +	if (handles == NULL)
> +		return -ENOMEM;
> +
> +	if (copy_from_user(handles,
> +			   (void __user *)(unsigned long)(args->handles),

u64_to_user_ptr(args->handles)

> +			   sizeof(uint32_t) * args->count_handles)) {
> +		ret = -EFAULT;
> +		goto err_free_handles;
> +	}
> +
> +	fences = kcalloc(args->count_handles,
> +			 sizeof(struct dma_fence *), GFP_KERNEL);

if (!fences) /* blah */

> +
> +	for (i = 0; i < args->count_handles; i++) {
> +		ret = drm_syncobj_fence_get(file_private, handles[i],
> +					    &fences[i]);
> +		if (ret)
> +			goto err_free_fence_array;
> +	}
> +
> +	ret = drm_syncobj_wait_fences(dev, file_private,
> +				      args, fences);
> +
> +err_free_fence_array:
> +	for (i = 0; i < args->count_handles; i++)
> +		dma_fence_put(fences[i]);
> +	kfree(fences);
> +err_free_handles:
> +	kfree(handles);
> +
> +	return ret;
> +}

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/dri-devel




[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux