+ /* If userqueues are active, suspend userqueues */
+ schedule_delayed_work(&uq_mgr->suspend_work, 0);
+ else
+ /* Else just signal and replace eviction fence */
+ schedule_delayed_work(&evf_mgr->suspend_work, 0);
+
+ return true;
+}
+
static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
.use_64bit_seqno = true,
.get_driver_name = amdgpu_eviction_fence_get_driver_name,
.get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
+ .enable_signaling = amdgpu_eviction_fence_enable_signaling,
};
int amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr
*evf_mgr)
@@ -59,11 +203,14 @@ struct amdgpu_eviction_fence *
amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr
*evf_mgr)
{
struct amdgpu_eviction_fence *ev_fence;
+ struct amdgpu_fpriv *fpriv = evf_mgr_to_fpriv(evf_mgr);
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
ev_fence = kzalloc(sizeof(*ev_fence), GFP_KERNEL);
if (!ev_fence)
return NULL;
+ ev_fence->uq_mgr = uq_mgr;
get_task_comm(ev_fence->timeline_name, current);
spin_lock_init(&ev_fence->lock);
dma_fence_init(&ev_fence->base, &amdgpu_eviction_fence_ops,
@@ -143,6 +290,8 @@ void amdgpu_eviction_fence_init(struct
amdgpu_eviction_fence_mgr *evf_mgr)
goto unlock;
}
+ INIT_DELAYED_WORK(&evf_mgr->suspend_work,
amdgpu_eviction_fence_suspend_worker);
+
unlock:
spin_unlock(&evf_mgr->ev_fence_lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
index b47ab1307ec5..712fabf09fc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
@@ -37,6 +37,8 @@ struct amdgpu_eviction_fence_mgr {
atomic_t ev_fence_seq;
spinlock_t ev_fence_lock;
struct amdgpu_eviction_fence *ev_fence;
+ struct delayed_work suspend_work;
+ bool eviction_allowed;
};
/* Eviction fence helper functions */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index e7fb13e20197..88f3a885b1dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1434,6 +1434,7 @@ void amdgpu_driver_postclose_kms(struct
drm_device *dev,
{
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+ struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
struct amdgpu_bo_list *list;
struct amdgpu_bo *pd;
u32 pasid;
@@ -1466,6 +1467,7 @@ void amdgpu_driver_postclose_kms(struct
drm_device *dev,
amdgpu_bo_unreserve(pd);
}
+ evf_mgr->eviction_allowed = true;
amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
amdgpu_vm_fini(adev, &fpriv->vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index 614953b0fc19..4cf65aba9a9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -455,10 +455,18 @@ int amdgpu_userq_signal_ioctl(struct drm_device
*dev, void *data,
if (r)
goto exec_fini;
- for (i = 0; i < num_bo_handles; i++)
+ /* Save the fence to wait for during suspend */
+ dma_fence_put(queue->last_fence);
+ queue->last_fence = dma_fence_get(fence);
+
+ for (i = 0; i < num_bo_handles; i++) {
+ if (!gobj || !gobj[i]->resv)
+ continue;
+
dma_resv_add_fence(gobj[i]->resv, fence,
dma_resv_usage_rw(args->bo_flags &
AMDGPU_USERQ_BO_WRITE));
+ }
/* Add the created fence to syncobj/BO's */
for (i = 0; i < num_syncobj_handles; i++)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
index ba986d55ceeb..979174f80993 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
@@ -22,6 +22,7 @@
*
*/
#include <drm/drm_syncobj.h>
+#include <drm/drm_exec.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
#include "amdgpu_userqueue.h"
@@ -282,6 +283,7 @@ amdgpu_userqueue_destroy(struct drm_file *filp,
int queue_id)
amdgpu_bo_unpin(queue->db_obj.obj);
amdgpu_bo_unref(&queue->db_obj.obj);
amdgpu_userqueue_cleanup(uq_mgr, queue, queue_id);
+ uq_mgr->num_userqs--;
mutex_unlock(&uq_mgr->userq_mutex);
return 0;
}
@@ -369,6 +371,7 @@ amdgpu_userqueue_create(struct drm_file *filp,
union drm_amdgpu_userq *args)
goto unlock;
}
args->out.queue_id = qid;
+ uq_mgr->num_userqs++;
unlock:
mutex_unlock(&uq_mgr->userq_mutex);
@@ -402,12 +405,109 @@ int amdgpu_userq_ioctl(struct drm_device *dev,
void *data,
return r;
}
+static int
+amdgpu_userqueue_suspend_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs;
+ struct amdgpu_usermode_queue *queue;
+ int queue_id, ret;
+
+ userq_funcs = adev->userq_funcs[AMDGPU_HW_IP_GFX];
+
+ /* Suspend all the queues for this process */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ ret = userq_funcs->suspend(uq_mgr, queue);
+ if (ret)
+ DRM_ERROR("Failed to suspend queue\n");
+ }
+
+ return ret;
+}
+
+static int
+amdgpu_userqueue_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id, ret;
+
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ struct dma_fence *f;
+ struct drm_exec exec;
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&exec) {
+ f = queue->last_fence;
+ drm_exec_retry_on_contention(&exec);
+ }
+ drm_exec_fini(&exec);
+
+ if (!f || dma_fence_is_signaled(f))
+ continue;
+ ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+ if ( ret <= 0) {
+ DRM_ERROR("Timed out waiting for fence f=%p\n", f);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static void
+amdgpu_userqueue_suspend_worker(struct work_struct *work)
+{
+ int ret;
+ struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work,
suspend_work.work);
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
+
+ /* Wait for any pending userqueue fence to signal */
+ ret = amdgpu_userqueue_wait_for_signal(uq_mgr);
+ if (ret) {
+ DRM_ERROR("Not suspending userqueue, timeout waiting for
work\n");
+ return;
+ }
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ ret = amdgpu_userqueue_suspend_all(uq_mgr);
+ if (ret) {
+ DRM_ERROR("Failed to evict userqueue\n");
+ goto unlock;
+ }
+
+ /* Signal current eviction fence */
+ ret = amdgpu_eviction_fence_signal(evf_mgr);
+ if (ret) {
+ DRM_ERROR("Can't signal eviction fence to suspend\n");
+ goto unlock;
+ }
+
+ /* Cleanup old eviction fence entry */
+ amdgpu_eviction_fence_destroy(evf_mgr);
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr,
struct amdgpu_device *adev)
{
+ struct amdgpu_fpriv *fpriv;
+
mutex_init(&userq_mgr->userq_mutex);
idr_init_base(&userq_mgr->userq_idr, 1);
userq_mgr->adev = adev;
+ userq_mgr->num_userqs = 0;
+
+ fpriv = uq_mgr_to_fpriv(userq_mgr);
+ if (!fpriv->evf_mgr.ev_fence) {
+ DRM_ERROR("Eviction fence not initialized yet\n");
+ return -EINVAL;
+ }
+ /* This reference is required for suspend work */
+ fpriv->evf_mgr.ev_fence->uq_mgr = userq_mgr;
+ INIT_DELAYED_WORK(&userq_mgr->suspend_work,
amdgpu_userqueue_suspend_worker);
return 0;
}
diff --git a/drivers/gpu/drm/amd/include/amdgpu_userqueue.h
b/drivers/gpu/drm/amd/include/amdgpu_userqueue.h
index 37be29048f42..8b3b50fa8b5b 100644
--- a/drivers/gpu/drm/amd/include/amdgpu_userqueue.h
+++ b/drivers/gpu/drm/amd/include/amdgpu_userqueue.h
@@ -27,6 +27,10 @@
#define AMDGPU_MAX_USERQ_COUNT 512
+#define to_ev_fence(f) container_of(f, struct
amdgpu_eviction_fence, base)
+#define work_to_uq_mgr(w, name) container_of(w, struct
amdgpu_userq_mgr, name)
+#define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv,
userq_mgr)
+
struct amdgpu_mqd_prop;
struct amdgpu_userq_obj {
@@ -50,6 +54,7 @@ struct amdgpu_usermode_queue {
struct amdgpu_userq_obj wptr_obj;
struct xarray uq_fence_drv_xa;
struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *last_fence;
};
struct amdgpu_userq_funcs {
@@ -69,6 +74,9 @@ struct amdgpu_userq_mgr {
struct idr userq_idr;
struct mutex userq_mutex;
struct amdgpu_device *adev;
+
+ struct delayed_work suspend_work;
+ int num_userqs;
};
int amdgpu_userq_ioctl(struct drm_device *dev, void *data, struct
drm_file *filp);
@@ -86,4 +94,6 @@ void amdgpu_userqueue_destroy_object(struct
amdgpu_userq_mgr *uq_mgr,
int amdgpu_userqueue_update_bo_mapping(struct drm_file *filp,
struct amdgpu_bo_va *bo_va,
uint32_t operation, uint32_t syncobj_handle,
uint64_t point);
+
+int amdgpu_userqueue_enable_signaling(struct dma_fence *f);
#endif