Re: [PATCH 27/62] drm/i915: Rename request->ringbuf to request->ring

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Jun 06, 2016 at 02:44:41PM +0100, Tvrtko Ursulin wrote:
> 
> On 03/06/16 17:36, Chris Wilson wrote:
> > Now that we have disambuigated ring and engine, we can use the clearer
> > and more consistent name for the intel_ringbuffer pointer in the
> > request.
> 
> This one needs all the stakeholders to agree about the rename. As before, I
> am not convinced it is better/worth it.

If we've indeed succeeded in eradicating all instances of calling an
intel_engine_cs a ring, then I think this makes sense.
-Daniel

> 
> Regards,
> 
> Tvrtko
> 
> 
> > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
> > ---
> >   drivers/gpu/drm/i915/i915_gem_context.c    |  4 +-
> >   drivers/gpu/drm/i915/i915_gem_execbuffer.c |  4 +-
> >   drivers/gpu/drm/i915/i915_gem_gtt.c        |  6 +-
> >   drivers/gpu/drm/i915/i915_gem_request.c    | 16 +++---
> >   drivers/gpu/drm/i915/i915_gem_request.h    |  3 +-
> >   drivers/gpu/drm/i915/i915_gpu_error.c      | 20 +++----
> >   drivers/gpu/drm/i915/intel_display.c       | 10 ++--
> >   drivers/gpu/drm/i915/intel_lrc.c           | 57 +++++++++---------
> >   drivers/gpu/drm/i915/intel_mocs.c          | 36 ++++++------
> >   drivers/gpu/drm/i915/intel_overlay.c       |  8 +--
> >   drivers/gpu/drm/i915/intel_ringbuffer.c    | 92 +++++++++++++++---------------
> >   11 files changed, 126 insertions(+), 130 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> > index 899731f9a2c4..a7911f39f416 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_context.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> > @@ -514,7 +514,7 @@ static inline int
> >   mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
> >   {
> >   	struct drm_i915_private *dev_priv = req->i915;
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	u32 flags = hw_flags | MI_MM_SPACE_GTT;
> >   	const int num_rings =
> >   		/* Use an extended w/a on ivb+ if signalling from other rings */
> > @@ -614,7 +614,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
> >   static int remap_l3(struct drm_i915_gem_request *req, int slice)
> >   {
> >   	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int i, ret;
> > 
> >   	if (!remap_info)
> > diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> > index 99663e8429b3..246bd70c0c9f 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> > @@ -1140,7 +1140,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
> >   static int
> >   i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int ret, i;
> > 
> >   	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
> > @@ -1270,7 +1270,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
> > 
> >   	if (params->engine->id == RCS &&
> >   	    instp_mode != dev_priv->relative_constants_mode) {
> > -		struct intel_ringbuffer *ring = params->request->ringbuf;
> > +		struct intel_ringbuffer *ring = params->request->ring;
> > 
> >   		ret = intel_ring_begin(params->request, 4);
> >   		if (ret)
> > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> > index 4b4e3de58ad9..b0a644cede20 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> > @@ -669,7 +669,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
> >   			  unsigned entry,
> >   			  dma_addr_t addr)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int ret;
> > 
> >   	BUG_ON(entry >= 4);
> > @@ -1660,7 +1660,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
> >   static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
> >   			 struct drm_i915_gem_request *req)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int ret;
> > 
> >   	/* NB: TLBs must be flushed and invalidated before a switch */
> > @@ -1699,7 +1699,7 @@ static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
> >   static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
> >   			  struct drm_i915_gem_request *req)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int ret;
> > 
> >   	/* NB: TLBs must be flushed and invalidated before a switch */
> > diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> > index 059ba88e182e..c6a7a7984f1f 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_request.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> > @@ -351,7 +351,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
> >   	 * Note this requires that we are always called in request
> >   	 * completion order.
> >   	 */
> > -	request->ringbuf->last_retired_head = request->postfix;
> > +	request->ring->last_retired_head = request->postfix;
> > 
> >   	i915_gem_request_remove_from_client(request);
> > 
> > @@ -415,7 +415,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
> >   			bool flush_caches)
> >   {
> >   	struct intel_engine_cs *engine;
> > -	struct intel_ringbuffer *ringbuf;
> > +	struct intel_ringbuffer *ring;
> >   	u32 request_start;
> >   	u32 reserved_tail;
> >   	int ret;
> > @@ -424,14 +424,14 @@ void __i915_add_request(struct drm_i915_gem_request *request,
> >   		return;
> > 
> >   	engine = request->engine;
> > -	ringbuf = request->ringbuf;
> > +	ring = request->ring;
> > 
> >   	/*
> >   	 * To ensure that this call will not fail, space for its emissions
> >   	 * should already have been reserved in the ring buffer. Let the ring
> >   	 * know that it is time to use that space up.
> >   	 */
> > -	request_start = intel_ring_get_tail(ringbuf);
> > +	request_start = intel_ring_get_tail(ring);
> >   	reserved_tail = request->reserved_space;
> >   	request->reserved_space = 0;
> > 
> > @@ -478,21 +478,21 @@ void __i915_add_request(struct drm_i915_gem_request *request,
> >   	 * GPU processing the request, we never over-estimate the
> >   	 * position of the head.
> >   	 */
> > -	request->postfix = intel_ring_get_tail(ringbuf);
> > +	request->postfix = intel_ring_get_tail(ring);
> > 
> >   	if (i915.enable_execlists)
> >   		ret = engine->emit_request(request);
> >   	else {
> >   		ret = engine->add_request(request);
> > 
> > -		request->tail = intel_ring_get_tail(ringbuf);
> > +		request->tail = intel_ring_get_tail(ring);
> >   	}
> >   	/* Not allowed to fail! */
> >   	WARN(ret, "emit|add_request failed: %d!\n", ret);
> >   	/* Sanity check that the reserved size was large enough. */
> > -	ret = intel_ring_get_tail(ringbuf) - request_start;
> > +	ret = intel_ring_get_tail(ring) - request_start;
> >   	if (ret < 0)
> > -		ret += ringbuf->size;
> > +		ret += ring->size;
> >   	WARN_ONCE(ret > reserved_tail,
> >   		  "Not enough space reserved (%d bytes) "
> >   		  "for adding the request (%d bytes)\n",
> > diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
> > index a3cac13ab9af..913565fbb0e3 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_request.h
> > +++ b/drivers/gpu/drm/i915/i915_gem_request.h
> > @@ -59,7 +59,7 @@ struct drm_i915_gem_request {
> >   	 */
> >   	struct i915_gem_context *ctx;
> >   	struct intel_engine_cs *engine;
> > -	struct intel_ringbuffer *ringbuf;
> > +	struct intel_ringbuffer *ring;
> >   	struct intel_signal_node signaling;
> > 
> >   	unsigned reset_counter;
> > @@ -86,7 +86,6 @@ struct drm_i915_gem_request {
> >   	/** Preallocate space in the ringbuffer for the emitting the request */
> >   	u32 reserved_space;
> > 
> > -
> >   	/**
> >   	 * Context related to the previous request.
> >   	 * As the contexts are accessed by the hardware until the switch is
> > diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> > index d1667aa640ef..b934986bb117 100644
> > --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> > +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> > @@ -1089,7 +1089,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
> >   		request = i915_gem_find_active_request(engine);
> >   		if (request) {
> >   			struct i915_address_space *vm;
> > -			struct intel_ringbuffer *rb;
> > +			struct intel_ringbuffer *ring;
> > 
> >   			vm = request->ctx && request->ctx->ppgtt ?
> >   				&request->ctx->ppgtt->base :
> > @@ -1107,7 +1107,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
> >   			if (HAS_BROKEN_CS_TLB(dev_priv))
> >   				error->ring[i].wa_batchbuffer =
> >   					i915_error_ggtt_object_create(dev_priv,
> > -							     engine->scratch.obj);
> > +								      engine->scratch.obj);
> > 
> >   			if (request->pid) {
> >   				struct task_struct *task;
> > @@ -1123,23 +1123,21 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
> > 
> >   			error->simulated |= request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
> > 
> > -			rb = request->ringbuf;
> > -			error->ring[i].cpu_ring_head = rb->head;
> > -			error->ring[i].cpu_ring_tail = rb->tail;
> > +			ring = request->ring;
> > +			error->ring[i].cpu_ring_head = ring->head;
> > +			error->ring[i].cpu_ring_tail = ring->tail;
> >   			error->ring[i].ringbuffer =
> >   				i915_error_ggtt_object_create(dev_priv,
> > -							      rb->obj);
> > +							      ring->obj);
> >   		}
> > 
> >   		error->ring[i].hws_page =
> >   			i915_error_ggtt_object_create(dev_priv,
> >   						      engine->status_page.obj);
> > 
> > -		if (engine->wa_ctx.obj) {
> > -			error->ring[i].wa_ctx =
> > -				i915_error_ggtt_object_create(dev_priv,
> > -							      engine->wa_ctx.obj);
> > -		}
> > +		error->ring[i].wa_ctx =
> > +			i915_error_ggtt_object_create(dev_priv,
> > +						      engine->wa_ctx.obj);
> > 
> >   		i915_gem_record_active_context(engine, error, &error->ring[i]);
> > 
> > diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> > index 2cba91207d7e..2dafbfbc8134 100644
> > --- a/drivers/gpu/drm/i915/intel_display.c
> > +++ b/drivers/gpu/drm/i915/intel_display.c
> > @@ -11174,7 +11174,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
> >   				 struct drm_i915_gem_request *req,
> >   				 uint32_t flags)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> >   	u32 flip_mask;
> >   	int ret;
> > @@ -11208,7 +11208,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
> >   				 struct drm_i915_gem_request *req,
> >   				 uint32_t flags)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> >   	u32 flip_mask;
> >   	int ret;
> > @@ -11239,7 +11239,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
> >   				 struct drm_i915_gem_request *req,
> >   				 uint32_t flags)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	struct drm_i915_private *dev_priv = dev->dev_private;
> >   	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> >   	uint32_t pf, pipesrc;
> > @@ -11277,7 +11277,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
> >   				 struct drm_i915_gem_request *req,
> >   				 uint32_t flags)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	struct drm_i915_private *dev_priv = dev->dev_private;
> >   	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> >   	uint32_t pf, pipesrc;
> > @@ -11312,7 +11312,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
> >   				 struct drm_i915_gem_request *req,
> >   				 uint32_t flags)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> >   	uint32_t plane_bit = 0;
> >   	int len, ret;
> > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> > index a1820d531e49..229545fc5b4a 100644
> > --- a/drivers/gpu/drm/i915/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/intel_lrc.c
> > @@ -692,7 +692,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
> >   			return ret;
> >   	}
> > 
> > -	request->ringbuf = ce->ringbuf;
> > +	request->ring = ce->ringbuf;
> > 
> >   	if (i915.enable_guc_submission) {
> >   		/*
> > @@ -748,11 +748,11 @@ err_unpin:
> >   static int
> >   intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
> >   {
> > -	struct intel_ringbuffer *ringbuf = request->ringbuf;
> > +	struct intel_ringbuffer *ring = request->ring;
> >   	struct intel_engine_cs *engine = request->engine;
> > 
> > -	intel_ring_advance(ringbuf);
> > -	request->tail = ringbuf->tail;
> > +	intel_ring_advance(ring);
> > +	request->tail = ring->tail;
> > 
> >   	/*
> >   	 * Here we add two extra NOOPs as padding to avoid
> > @@ -760,9 +760,9 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
> >   	 *
> >   	 * Caller must reserve WA_TAIL_DWORDS for us!
> >   	 */
> > -	intel_ring_emit(ringbuf, MI_NOOP);
> > -	intel_ring_emit(ringbuf, MI_NOOP);
> > -	intel_ring_advance(ringbuf);
> > +	intel_ring_emit(ring, MI_NOOP);
> > +	intel_ring_emit(ring, MI_NOOP);
> > +	intel_ring_advance(ring);
> > 
> >   	/* We keep the previous context alive until we retire the following
> >   	 * request. This ensures that any the context object is still pinned
> > @@ -805,7 +805,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
> >   	struct drm_device       *dev = params->dev;
> >   	struct intel_engine_cs *engine = params->engine;
> >   	struct drm_i915_private *dev_priv = dev->dev_private;
> > -	struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
> > +	struct intel_ringbuffer *ring = params->request->ring;
> >   	u64 exec_start;
> >   	int instp_mode;
> >   	u32 instp_mask;
> > @@ -817,7 +817,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
> >   	case I915_EXEC_CONSTANTS_REL_GENERAL:
> >   	case I915_EXEC_CONSTANTS_ABSOLUTE:
> >   	case I915_EXEC_CONSTANTS_REL_SURFACE:
> > -		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
> > +		if (instp_mode != 0 && engine->id != RCS) {
> >   			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
> >   			return -EINVAL;
> >   		}
> > @@ -846,17 +846,17 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
> >   	if (ret)
> >   		return ret;
> > 
> > -	if (engine == &dev_priv->engine[RCS] &&
> > +	if (engine->id == RCS &&
> >   	    instp_mode != dev_priv->relative_constants_mode) {
> >   		ret = intel_ring_begin(params->request, 4);
> >   		if (ret)
> >   			return ret;
> > 
> > -		intel_ring_emit(ringbuf, MI_NOOP);
> > -		intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
> > -		intel_ring_emit_reg(ringbuf, INSTPM);
> > -		intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
> > -		intel_ring_advance(ringbuf);
> > +		intel_ring_emit(ring, MI_NOOP);
> > +		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
> > +		intel_ring_emit_reg(ring, INSTPM);
> > +		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
> > +		intel_ring_advance(ring);
> > 
> >   		dev_priv->relative_constants_mode = instp_mode;
> >   	}
> > @@ -1011,7 +1011,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
> >   {
> >   	int ret, i;
> >   	struct intel_engine_cs *engine = req->engine;
> > -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	struct i915_workarounds *w = &req->i915->workarounds;
> > 
> >   	if (w->count == 0)
> > @@ -1026,14 +1026,14 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
> >   	if (ret)
> >   		return ret;
> > 
> > -	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
> > +	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
> >   	for (i = 0; i < w->count; i++) {
> > -		intel_ring_emit_reg(ringbuf, w->reg[i].addr);
> > -		intel_ring_emit(ringbuf, w->reg[i].value);
> > +		intel_ring_emit_reg(ring, w->reg[i].addr);
> > +		intel_ring_emit(ring, w->reg[i].value);
> >   	}
> > -	intel_ring_emit(ringbuf, MI_NOOP);
> > +	intel_ring_emit(ring, MI_NOOP);
> > 
> > -	intel_ring_advance(ringbuf);
> > +	intel_ring_advance(ring);
> > 
> >   	engine->gpu_caches_dirty = true;
> >   	ret = logical_ring_flush_all_caches(req);
> > @@ -1506,7 +1506,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
> >   static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
> >   {
> >   	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
> >   	int i, ret;
> > 
> > @@ -1533,7 +1533,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
> >   static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
> >   			      u64 offset, unsigned dispatch_flags)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
> >   	int ret;
> > 
> > @@ -1590,8 +1590,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
> >   			   u32 invalidate_domains,
> >   			   u32 unused)
> >   {
> > -	struct intel_ringbuffer *ring = request->ringbuf;
> > -	struct intel_engine_cs *engine = ring->engine;
> > +	struct intel_ringbuffer *ring = request->ring;
> >   	uint32_t cmd;
> >   	int ret;
> > 
> > @@ -1610,7 +1609,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
> > 
> >   	if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
> >   		cmd |= MI_INVALIDATE_TLB;
> > -		if (engine->id == VCS)
> > +		if (request->engine->id == VCS)
> >   			cmd |= MI_INVALIDATE_BSD;
> >   	}
> > 
> > @@ -1629,7 +1628,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
> >   				  u32 invalidate_domains,
> >   				  u32 flush_domains)
> >   {
> > -	struct intel_ringbuffer *ring = request->ringbuf;
> > +	struct intel_ringbuffer *ring = request->ring;
> >   	struct intel_engine_cs *engine = request->engine;
> >   	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> >   	bool vf_flush_wa = false;
> > @@ -1711,7 +1710,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
> > 
> >   static int gen8_emit_request(struct drm_i915_gem_request *request)
> >   {
> > -	struct intel_ringbuffer *ring = request->ringbuf;
> > +	struct intel_ringbuffer *ring = request->ring;
> >   	int ret;
> > 
> >   	ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
> > @@ -1734,7 +1733,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
> > 
> >   static int gen8_emit_request_render(struct drm_i915_gem_request *request)
> >   {
> > -	struct intel_ringbuffer *ring = request->ringbuf;
> > +	struct intel_ringbuffer *ring = request->ring;
> >   	int ret;
> > 
> >   	ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
> > diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
> > index 8513bf06d4df..4b44bbcfd7cd 100644
> > --- a/drivers/gpu/drm/i915/intel_mocs.c
> > +++ b/drivers/gpu/drm/i915/intel_mocs.c
> > @@ -231,7 +231,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
> >   static int emit_mocs_control_table(struct drm_i915_gem_request *req,
> >   				   const struct drm_i915_mocs_table *table)
> >   {
> > -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	enum intel_engine_id engine = req->engine->id;
> >   	unsigned int index;
> >   	int ret;
> > @@ -243,11 +243,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
> >   	if (ret)
> >   		return ret;
> > 
> > -	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
> > +	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
> > 
> >   	for (index = 0; index < table->size; index++) {
> > -		intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
> > -		intel_ring_emit(ringbuf, table->table[index].control_value);
> > +		intel_ring_emit_reg(ring, mocs_register(engine, index));
> > +		intel_ring_emit(ring, table->table[index].control_value);
> >   	}
> > 
> >   	/*
> > @@ -259,12 +259,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
> >   	 * that value to all the used entries.
> >   	 */
> >   	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
> > -		intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
> > -		intel_ring_emit(ringbuf, table->table[0].control_value);
> > +		intel_ring_emit_reg(ring, mocs_register(engine, index));
> > +		intel_ring_emit(ring, table->table[0].control_value);
> >   	}
> > 
> > -	intel_ring_emit(ringbuf, MI_NOOP);
> > -	intel_ring_advance(ringbuf);
> > +	intel_ring_emit(ring, MI_NOOP);
> > +	intel_ring_advance(ring);
> > 
> >   	return 0;
> >   }
> > @@ -291,7 +291,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
> >   static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
> >   				const struct drm_i915_mocs_table *table)
> >   {
> > -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	unsigned int i;
> >   	int ret;
> > 
> > @@ -302,18 +302,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
> >   	if (ret)
> >   		return ret;
> > 
> > -	intel_ring_emit(ringbuf,
> > +	intel_ring_emit(ring,
> >   			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
> > 
> >   	for (i = 0; i < table->size/2; i++) {
> > -		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
> > -		intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 2*i+1));
> > +		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
> > +		intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
> >   	}
> > 
> >   	if (table->size & 0x01) {
> >   		/* Odd table size - 1 left over */
> > -		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
> > -		intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
> > +		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
> > +		intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
> >   		i++;
> >   	}
> > 
> > @@ -323,12 +323,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
> >   	 * they are reserved by the hardware.
> >   	 */
> >   	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
> > -		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
> > -		intel_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
> > +		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
> > +		intel_ring_emit(ring, l3cc_combine(table, 0, 0));
> >   	}
> > 
> > -	intel_ring_emit(ringbuf, MI_NOOP);
> > -	intel_ring_advance(ringbuf);
> > +	intel_ring_emit(ring, MI_NOOP);
> > +	intel_ring_advance(ring);
> > 
> >   	return 0;
> >   }
> > diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
> > index be79c4497af5..f9c062fea39f 100644
> > --- a/drivers/gpu/drm/i915/intel_overlay.c
> > +++ b/drivers/gpu/drm/i915/intel_overlay.c
> > @@ -253,7 +253,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
> > 
> >   	overlay->active = true;
> > 
> > -	ring = req->ringbuf;
> > +	ring = req->ring;
> >   	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
> >   	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
> >   	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> > @@ -295,7 +295,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
> >   		return ret;
> >   	}
> > 
> > -	ring = req->ringbuf;
> > +	ring = req->ring;
> >   	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
> >   	intel_ring_emit(ring, flip_addr);
> >   	intel_ring_advance(ring);
> > @@ -362,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
> >   		return ret;
> >   	}
> > 
> > -	ring = req->ringbuf;
> > +	ring = req->ring;
> >   	/* wait for overlay to go idle */
> >   	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
> >   	intel_ring_emit(ring, flip_addr);
> > @@ -438,7 +438,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
> >   			return ret;
> >   		}
> > 
> > -		ring = req->ringbuf;
> > +		ring = req->ring;
> >   		intel_ring_emit(ring,
> >   				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> >   		intel_ring_emit(ring, MI_NOOP);
> > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > index ace455b2b2d6..0f13e9900bd6 100644
> > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > @@ -70,7 +70,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
> >   		       u32	invalidate_domains,
> >   		       u32	flush_domains)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	u32 cmd;
> >   	int ret;
> > 
> > @@ -97,7 +97,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
> >   		       u32	invalidate_domains,
> >   		       u32	flush_domains)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	u32 cmd;
> >   	int ret;
> > 
> > @@ -187,7 +187,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
> >   static int
> >   intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	u32 scratch_addr =
> >   	       	req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> >   	int ret;
> > @@ -224,7 +224,7 @@ static int
> >   gen6_render_ring_flush(struct drm_i915_gem_request *req,
> >   		       u32 invalidate_domains, u32 flush_domains)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	u32 scratch_addr =
> >   	       	req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> >   	u32 flags = 0;
> > @@ -277,7 +277,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
> >   static int
> >   gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int ret;
> > 
> >   	ret = intel_ring_begin(req, 4);
> > @@ -299,7 +299,7 @@ static int
> >   gen7_render_ring_flush(struct drm_i915_gem_request *req,
> >   		       u32 invalidate_domains, u32 flush_domains)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	u32 scratch_addr =
> >   	       	req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> >   	u32 flags = 0;
> > @@ -364,7 +364,7 @@ static int
> >   gen8_emit_pipe_control(struct drm_i915_gem_request *req,
> >   		       u32 flags, u32 scratch_addr)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int ret;
> > 
> >   	ret = intel_ring_begin(req, 6);
> > @@ -680,7 +680,7 @@ err:
> > 
> >   static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	struct i915_workarounds *w = &req->i915->workarounds;
> >   	int ret, i;
> > 
> > @@ -1242,7 +1242,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
> >   			   unsigned int num_dwords)
> >   {
> >   #define MBOX_UPDATE_DWORDS 8
> > -	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
> > +	struct intel_ringbuffer *signaller = signaller_req->ring;
> >   	struct drm_i915_private *dev_priv = signaller_req->i915;
> >   	struct intel_engine_cs *waiter;
> >   	enum intel_engine_id id;
> > @@ -1282,7 +1282,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
> >   			   unsigned int num_dwords)
> >   {
> >   #define MBOX_UPDATE_DWORDS 6
> > -	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
> > +	struct intel_ringbuffer *signaller = signaller_req->ring;
> >   	struct drm_i915_private *dev_priv = signaller_req->i915;
> >   	struct intel_engine_cs *waiter;
> >   	enum intel_engine_id id;
> > @@ -1319,7 +1319,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
> >   static int gen6_signal(struct drm_i915_gem_request *signaller_req,
> >   		       unsigned int num_dwords)
> >   {
> > -	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
> > +	struct intel_ringbuffer *signaller = signaller_req->ring;
> >   	struct drm_i915_private *dev_priv = signaller_req->i915;
> >   	struct intel_engine_cs *useless;
> >   	enum intel_engine_id id;
> > @@ -1363,7 +1363,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
> >   static int
> >   gen6_add_request(struct drm_i915_gem_request *req)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int ret;
> > 
> >   	if (req->engine->semaphore.signal)
> > @@ -1387,7 +1387,7 @@ static int
> >   gen8_render_add_request(struct drm_i915_gem_request *req)
> >   {
> >   	struct intel_engine_cs *engine = req->engine;
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int ret;
> > 
> >   	if (engine->semaphore.signal)
> > @@ -1432,7 +1432,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
> >   	       struct intel_engine_cs *signaller,
> >   	       u32 seqno)
> >   {
> > -	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
> > +	struct intel_ringbuffer *waiter = waiter_req->ring;
> >   	struct drm_i915_private *dev_priv = waiter_req->i915;
> >   	struct i915_hw_ppgtt *ppgtt;
> >   	int ret;
> > @@ -1469,7 +1469,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
> >   	       struct intel_engine_cs *signaller,
> >   	       u32 seqno)
> >   {
> > -	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
> > +	struct intel_ringbuffer *waiter = waiter_req->ring;
> >   	u32 dw1 = MI_SEMAPHORE_MBOX |
> >   		  MI_SEMAPHORE_COMPARE |
> >   		  MI_SEMAPHORE_REGISTER;
> > @@ -1603,7 +1603,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
> >   	       u32     invalidate_domains,
> >   	       u32     flush_domains)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int ret;
> > 
> >   	ret = intel_ring_begin(req, 2);
> > @@ -1619,7 +1619,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
> >   static int
> >   i9xx_add_request(struct drm_i915_gem_request *req)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int ret;
> > 
> >   	ret = intel_ring_begin(req, 4);
> > @@ -1697,7 +1697,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
> >   			 u64 offset, u32 length,
> >   			 unsigned dispatch_flags)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int ret;
> > 
> >   	ret = intel_ring_begin(req, 2);
> > @@ -1724,7 +1724,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
> >   			 u64 offset, u32 len,
> >   			 unsigned dispatch_flags)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	u32 cs_offset = req->engine->scratch.gtt_offset;
> >   	int ret;
> > 
> > @@ -1786,7 +1786,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
> >   			 u64 offset, u32 len,
> >   			 unsigned dispatch_flags)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int ret;
> > 
> >   	ret = intel_ring_begin(req, 2);
> > @@ -2221,7 +2221,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
> >   	 */
> >   	request->reserved_space += LEGACY_REQUEST_SIZE;
> > 
> > -	request->ringbuf = request->engine->buffer;
> > +	request->ring = request->engine->buffer;
> > 
> >   	ret = intel_ring_begin(request, 0);
> >   	if (ret)
> > @@ -2233,12 +2233,12 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
> > 
> >   static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
> >   {
> > -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	struct intel_engine_cs *engine = req->engine;
> >   	struct drm_i915_gem_request *target;
> > 
> > -	intel_ring_update_space(ringbuf);
> > -	if (ringbuf->space >= bytes)
> > +	intel_ring_update_space(ring);
> > +	if (ring->space >= bytes)
> >   		return 0;
> > 
> >   	/*
> > @@ -2260,12 +2260,12 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
> >   		 * from multiple ringbuffers. Here, we must ignore any that
> >   		 * aren't from the ringbuffer we're considering.
> >   		 */
> > -		if (target->ringbuf != ringbuf)
> > +		if (target->ring != ring)
> >   			continue;
> > 
> >   		/* Would completion of this request free enough space? */
> > -		space = __intel_ring_space(target->postfix, ringbuf->tail,
> > -					   ringbuf->size);
> > +		space = __intel_ring_space(target->postfix, ring->tail,
> > +					   ring->size);
> >   		if (space >= bytes)
> >   			break;
> >   	}
> > @@ -2278,9 +2278,9 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
> > 
> >   int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
> >   {
> > -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> > -	int remain_actual = ringbuf->size - ringbuf->tail;
> > -	int remain_usable = ringbuf->effective_size - ringbuf->tail;
> > +	struct intel_ringbuffer *ring = req->ring;
> > +	int remain_actual = ring->size - ring->tail;
> > +	int remain_usable = ring->effective_size - ring->tail;
> >   	int bytes = num_dwords * sizeof(u32);
> >   	int total_bytes, wait_bytes;
> >   	bool need_wrap = false;
> > @@ -2307,35 +2307,35 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
> >   		wait_bytes = total_bytes;
> >   	}
> > 
> > -	if (wait_bytes > ringbuf->space) {
> > +	if (wait_bytes > ring->space) {
> >   		int ret = wait_for_space(req, wait_bytes);
> >   		if (unlikely(ret))
> >   			return ret;
> > 
> > -		intel_ring_update_space(ringbuf);
> > -		if (unlikely(ringbuf->space < wait_bytes))
> > +		intel_ring_update_space(ring);
> > +		if (unlikely(ring->space < wait_bytes))
> >   			return -EAGAIN;
> >   	}
> > 
> >   	if (unlikely(need_wrap)) {
> > -		GEM_BUG_ON(remain_actual > ringbuf->space);
> > -		GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
> > +		GEM_BUG_ON(remain_actual > ring->space);
> > +		GEM_BUG_ON(ring->tail + remain_actual > ring->size);
> > 
> >   		/* Fill the tail with MI_NOOP */
> > -		memset(ringbuf->vaddr + ringbuf->tail, 0, remain_actual);
> > -		ringbuf->tail = 0;
> > -		ringbuf->space -= remain_actual;
> > +		memset(ring->vaddr + ring->tail, 0, remain_actual);
> > +		ring->tail = 0;
> > +		ring->space -= remain_actual;
> >   	}
> > 
> > -	ringbuf->space -= bytes;
> > -	GEM_BUG_ON(ringbuf->space < 0);
> > +	ring->space -= bytes;
> > +	GEM_BUG_ON(ring->space < 0);
> >   	return 0;
> >   }
> > 
> >   /* Align the ring tail to a cacheline boundary */
> >   int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int num_dwords =
> >   	       	(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
> >   	int ret;
> > @@ -2429,7 +2429,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
> >   static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
> >   			       u32 invalidate, u32 flush)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	uint32_t cmd;
> >   	int ret;
> > 
> > @@ -2475,7 +2475,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
> >   			      u64 offset, u32 len,
> >   			      unsigned dispatch_flags)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	bool ppgtt = USES_PPGTT(req->i915) &&
> >   			!(dispatch_flags & I915_DISPATCH_SECURE);
> >   	int ret;
> > @@ -2501,7 +2501,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
> >   			     u64 offset, u32 len,
> >   			     unsigned dispatch_flags)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int ret;
> > 
> >   	ret = intel_ring_begin(req, 2);
> > @@ -2526,7 +2526,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
> >   			      u64 offset, u32 len,
> >   			      unsigned dispatch_flags)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	int ret;
> > 
> >   	ret = intel_ring_begin(req, 2);
> > @@ -2549,7 +2549,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
> >   static int gen6_ring_flush(struct drm_i915_gem_request *req,
> >   			   u32 invalidate, u32 flush)
> >   {
> > -	struct intel_ringbuffer *ring = req->ringbuf;
> > +	struct intel_ringbuffer *ring = req->ring;
> >   	uint32_t cmd;
> >   	int ret;
> > 
> > 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx





[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux