diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 65cb1d6a5d64..efa9572fc217 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -695,13 +695,13 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
int count;
count = 0;
- list_for_each_entry(req, &ring->request_list, list)
+ list_for_each_entry(req, &ring->request_list, link)
count++;
if (count == 0)
continue;
seq_printf(m, "%s requests: %d\n", ring->name, count);
- list_for_each_entry(req, &ring->request_list, list) {
+ list_for_each_entry(req, &ring->request_list, link) {
struct task_struct *task;
rcu_read_lock();
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 77c253ddf060..f314b3ea2726 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2183,7 +2183,7 @@ i915_gem_find_active_request(struct intel_engine_cs *ring)
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
*/
- list_for_each_entry(request, &ring->request_list, list) {
+ list_for_each_entry(request, &ring->request_list, link) {
if (i915_gem_request_completed(request))
continue;
@@ -2208,7 +2208,7 @@ static void i915_gem_reset_ring_status(struct intel_engine_cs *ring)
i915_set_reset_status(dev_priv, request->ctx, ring_hung);
- list_for_each_entry_continue(request, &ring->request_list, list)
+ list_for_each_entry_continue(request, &ring->request_list, link)
i915_set_reset_status(dev_priv, request->ctx, false);
}
@@ -2255,7 +2255,7 @@ static void i915_gem_reset_ring_cleanup(struct intel_engine_cs *engine)
request = list_last_entry(&engine->request_list,
struct drm_i915_gem_request,
- list);
+ link);
i915_gem_request_retire_upto(request);
}
@@ -2317,7 +2317,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
- list);
+ link);
if (!i915_gem_request_completed(request))
break;
@@ -2336,7 +2336,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
struct drm_i915_gem_object,
ring_list[ring->id]);
- if (!list_empty(&obj->last_read[ring->id].request->list))
+ if (!list_empty(&obj->last_read[ring->id].request->link))
break;
i915_gem_object_retire__read(obj, ring->id);
@@ -2449,7 +2449,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
if (req == NULL)
continue;
- if (list_empty(&req->list))
+ if (list_empty(&req->link))
goto retire;
if (i915_gem_request_completed(req)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 01443d8d9224..7f38d8972721 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -333,7 +333,7 @@ void i915_gem_request_cancel(struct drm_i915_gem_request *req)
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
trace_i915_gem_request_retire(request);
- list_del_init(&request->list);
+ list_del_init(&request->link);
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
@@ -355,12 +355,12 @@ i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
lockdep_assert_held(&engine->dev->struct_mutex);
- if (list_empty(&req->list))
+ if (list_empty(&req->link))
return;
do {
tmp = list_first_entry(&engine->request_list,
- typeof(*tmp), list);
+ typeof(*tmp), link);
i915_gem_request_retire(tmp);
} while (tmp != req);
@@ -451,7 +451,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
request->emitted_jiffies = jiffies;
request->previous_seqno = request->engine->last_submitted_seqno;
request->engine->last_submitted_seqno = request->fence.seqno;
- list_add_tail(&request->list, &request->engine->request_list);
+ list_add_tail(&request->link, &request->engine->request_list);
trace_i915_gem_request_add(request);
@@ -565,7 +565,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
might_sleep();
- if (list_empty(&req->list))
+ if (list_empty(&req->link))
return 0;
if (i915_gem_request_completed(req))
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 0a21986c332b..01d589be95fd 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -88,8 +88,8 @@ struct drm_i915_gem_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
- /** global list entry for this request */
- struct list_head list;
+ /** engine->request_list entry for this request */
+ struct list_head link;
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 5027636e3624..c812079bc25c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1056,7 +1056,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
i915_gem_record_active_context(engine, error, &error->ring[i]);
count = 0;
- list_for_each_entry(request, &engine->request_list, list)
+ list_for_each_entry(request, &engine->request_list, link)
count++;
error->ring[i].num_requests = count;
@@ -1069,7 +1069,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
}
count = 0;
- list_for_each_entry(request, &engine->request_list, list) {
+ list_for_each_entry(request, &engine->request_list, link) {
struct drm_i915_error_request *erq;
if (count >= error->ring[i].num_requests) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d37cdb2f9073..213540f92c9d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2109,7 +2109,7 @@ int intel_engine_idle(struct intel_engine_cs *ring)
req = list_entry(ring->request_list.prev,
struct drm_i915_gem_request,
- list);
+ link);
/* Make sure we do not trigger any retires */
return __i915_wait_request(req,
@@ -2184,7 +2184,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
/* The whole point of reserving space is to not wait! */
WARN_ON(ring->reserved_in_use);
- list_for_each_entry(target, &engine->request_list, list) {
+ list_for_each_entry(target, &engine->request_list, link) {
/*
* The request queue is per-engine, so can contain requests
* from multiple ringbuffers. Here, we must ignore any that
@@ -2200,7 +2200,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
break;
}
- if (WARN_ON(&target->list == &engine->request_list))
+ if (WARN_ON(&target->link == &engine->request_list))
return -ENOSPC;
ret = i915_wait_request(target);