[PATCH 054/190] drm/i915: Use the new rq->i915 field where appropriate

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



In a few frequent cases, having a direct pointer to the drm_i915_private
from the request is very useful.

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_gem.c            |  7 +++---
 drivers/gpu/drm/i915/i915_gem_context.c    | 21 +++++++++---------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  3 +--
 drivers/gpu/drm/i915/i915_gem_request.c    |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c           |  6 ++----
 drivers/gpu/drm/i915/intel_pm.c            |  3 +--
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 34 ++++++++++++------------------
 7 files changed, 32 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 31926a4fb42a..c2a1ec8abc11 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2568,7 +2568,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		return 0;
 
 	if (!i915.semaphores) {
-		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+		struct drm_i915_private *i915 = from_req->i915;
 		ret = __i915_wait_request(from_req,
 					  i915->mm.interruptible,
 					  NULL,
@@ -4069,12 +4069,11 @@ err:
 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
 	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = req->i915;
 	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
 	int i, ret;
 
-	if (!HAS_L3_DPF(dev) || !remap_info)
+	if (!HAS_L3_DPF(dev_priv) || !remap_info)
 		return 0;
 
 	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 361be1085a18..3e3b4bf3fed1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -524,7 +524,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
 		i915.semaphores ?
-		hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
+		hweight32(INTEL_INFO(req->i915)->ring_mask) - 1 :
 		0;
 	int len, i, ret;
 
@@ -533,21 +533,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 * explicitly, so we rely on the value at ring init, stored in
 	 * itlb_before_ctx_switch.
 	 */
-	if (IS_GEN6(ring->dev)) {
+	if (IS_GEN6(req->i915)) {
 		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
 
 	/* These flags are for resource streamer on HSW+ */
-	if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+	if (IS_HASWELL(req->i915) || INTEL_INFO(req->i915)->gen >= 8)
 		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
-	else if (INTEL_INFO(ring->dev)->gen < 8)
+	else if (INTEL_INFO(req->i915)->gen < 8)
 		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
 	len = 4;
-	if (INTEL_INFO(ring->dev)->gen >= 7)
+	if (INTEL_INFO(req->i915)->gen >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
 
 	ret = intel_ring_begin(req, len);
@@ -555,13 +555,13 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 		return ret;
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
-	if (INTEL_INFO(ring->dev)->gen >= 7) {
+	if (INTEL_INFO(req->i915)->gen >= 7) {
 		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_ring(signaller, to_i915(ring->dev), i) {
+			for_each_ring(signaller, req->i915, i) {
 				if (signaller == ring)
 					continue;
 
@@ -581,12 +581,12 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 */
 	intel_ring_emit(ring, MI_NOOP);
 
-	if (INTEL_INFO(ring->dev)->gen >= 7) {
+	if (INTEL_INFO(req->i915)->gen >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_ring(signaller, to_i915(ring->dev), i) {
+			for_each_ring(signaller, req->i915, i) {
 				if (signaller == ring)
 					continue;
 
@@ -827,10 +827,9 @@ unpin_out:
 int i915_switch_context(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *ring = req->ring;
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
 	WARN_ON(i915.enable_execlists);
-	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&req->i915->dev->struct_mutex));
 
 	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
 		if (req->ctx != ring->last_context) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dfabeee2ff0b..78b462956c78 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1099,7 +1099,6 @@ void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, vmas, exec_list) {
@@ -1126,7 +1125,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 			i915_gem_request_assign(&obj->last_fenced_req, req);
 			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-				struct drm_i915_private *dev_priv = to_i915(ring->dev);
+				struct drm_i915_private *dev_priv = req->i915;
 				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
 					       &dev_priv->mm.fence_list);
 			}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 01893d847dfd..619a9b063d9c 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -199,7 +199,7 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
 			   struct intel_context *ctx,
 			   struct drm_i915_gem_request **req_out)
 {
-	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+	struct drm_i915_private *dev_priv = ring->i915;
 	unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
 	struct drm_i915_gem_request *req;
 	u32 seqno;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 527eaf59be25..a369aa041522 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -329,8 +329,7 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
 {
 
 	struct intel_engine_cs *ring = rq0->ring;
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = rq0->i915;
 	uint64_t desc[2];
 
 	if (rq1) {
@@ -1094,8 +1093,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	int ret, i;
 	struct intel_engine_cs *ring = req->ring;
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct i915_workarounds *w = &dev_priv->workarounds;
 
 	if (w->count == 0)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b340f2a1f110..a082b4577599 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -7286,8 +7286,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
 	struct drm_i915_gem_request *req = boost->req;
 
 	if (!i915_gem_request_completed(req))
-		gen6_rps_boost(to_i915(req->ring->dev), NULL,
-			       req->emitted_jiffies);
+		gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
 
 	i915_gem_request_put(req);
 	kfree(boost);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e143da96dcfa..d17dd33ee94c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -99,7 +99,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	flush_domains)
 {
 	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
 	u32 cmd;
 	int ret;
 
@@ -138,7 +137,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		cmd |= MI_EXE_FLUSH;
 
 	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
-	    (IS_G4X(dev) || IS_GEN5(dev)))
+	    (IS_G4X(req->i915) || IS_GEN5(req->i915)))
 		cmd |= MI_INVALIDATE_ISP;
 
 	ret = intel_ring_begin(req, 2);
@@ -691,8 +690,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
 	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct i915_workarounds *w = &dev_priv->workarounds;
 
 	if (w->count == 0)
@@ -1194,12 +1192,11 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 {
 #define MBOX_UPDATE_DWORDS 8
 	struct intel_engine_cs *signaller = signaller_req->ring;
-	struct drm_device *dev = signaller->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	int i, ret, num_rings;
 
-	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1233,12 +1230,11 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 {
 #define MBOX_UPDATE_DWORDS 6
 	struct intel_engine_cs *signaller = signaller_req->ring;
-	struct drm_device *dev = signaller->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	int i, ret, num_rings;
 
-	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1269,13 +1265,12 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
 	struct intel_engine_cs *signaller = signaller_req->ring;
-	struct drm_device *dev = signaller->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *useless;
 	int i, ret, num_rings;
 
 #define MBOX_UPDATE_DWORDS 3
-	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
 	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
 #undef MBOX_UPDATE_DWORDS
 
@@ -1352,7 +1347,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       u32 seqno)
 {
 	struct intel_engine_cs *waiter = waiter_req->ring;
-	struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+	struct drm_i915_private *dev_priv = waiter_req->i915;
 	int ret;
 
 	ret = intel_ring_begin(waiter_req, 4);
@@ -2120,7 +2115,7 @@ int intel_ring_idle(struct intel_engine_cs *ring)
 
 	/* Make sure we do not trigger any retires */
 	return __i915_wait_request(req,
-				   to_i915(ring->dev)->mm.interruptible,
+				   req->i915->mm.interruptible,
 				   NULL, NULL);
 }
 
@@ -2248,7 +2243,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req,
 
 	WARN_ON(req == NULL);
 	ring = req->ring;
-	dev_priv = ring->dev->dev_private;
+	dev_priv = req->i915;
 
 	ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
 	if (ret)
@@ -2383,7 +2378,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      unsigned dispatch_flags)
 {
 	struct intel_engine_cs *ring = req->ring;
-	bool ppgtt = USES_PPGTT(ring->dev) &&
+	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -2457,7 +2452,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
 	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
 	uint32_t cmd;
 	int ret;
 
@@ -2466,7 +2460,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
 		return ret;
 
 	cmd = MI_FLUSH_DW;
-	if (INTEL_INFO(dev)->gen >= 8)
+	if (INTEL_INFO(req->i915)->gen >= 8)
 		cmd += 1;
 
 	/* We always require a command barrier so that subsequent
@@ -2486,7 +2480,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
 		cmd |= MI_INVALIDATE_TLB;
 	intel_ring_emit(ring, cmd);
 	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-	if (INTEL_INFO(dev)->gen >= 8) {
+	if (INTEL_INFO(req->i915)->gen >= 8) {
 		intel_ring_emit(ring, 0); /* upper addr */
 		intel_ring_emit(ring, 0); /* value */
 	} else  {
-- 
2.7.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux