Now that we can, we should split them up in a way that makes some sense and banishes the IS_ checks into init code. Signed-Off-by: Daniel Vetter <daniel.vetter at ffwll.ch> --- drivers/gpu/drm/i915/intel_ringbuffer.c | 69 ++++++++++++++++++------------- 1 files changed, 40 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 2cb1c8f..3d32c51 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -808,7 +808,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) } static int -ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) +i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) { int ret; @@ -826,37 +826,36 @@ ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) } static int -render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, +i830_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 len) { - struct drm_device *dev = ring->dev; int ret; - if (IS_I830(dev) || IS_845G(dev)) { - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); - intel_ring_emit(ring, offset + len - 8); - intel_ring_emit(ring, 0); - } else { - ret = intel_ring_begin(ring, 2); - if (ret) - return ret; + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); + intel_ring_emit(ring, offset + len - 8); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); - if (INTEL_INFO(dev)->gen >= 4) { - intel_ring_emit(ring, - MI_BATCH_BUFFER_START | (2 << 6) | - MI_BATCH_NON_SECURE_I965); - intel_ring_emit(ring, offset); - } else { - intel_ring_emit(ring, - MI_BATCH_BUFFER_START | (2 << 6)); - intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); - } - } + return 0; +} + +static int +i915_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 len) +{ + int ret; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, MI_BATCH_BUFFER_START | (2 << 6)); + intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); intel_ring_advance(ring); return 0; @@ -1326,7 +1325,14 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->irq_enable_mask = I915_USER_INTERRUPT; } ring->write_tail = ring_write_tail; - ring->dispatch_execbuffer = render_ring_dispatch_execbuffer; + if (INTEL_INFO(dev)->gen >= 6) + ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + else if (INTEL_INFO(dev)->gen >= 4) + ring->dispatch_execbuffer = i965_dispatch_execbuffer; + else if (IS_I830(dev) || IS_845G(dev)) + ring->dispatch_execbuffer = i830_dispatch_execbuffer; + else + ring->dispatch_execbuffer = i915_dispatch_execbuffer; ring->init = init_render_ring; ring->cleanup = render_ring_cleanup; @@ -1367,7 +1373,12 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ring->irq_enable_mask = I915_USER_INTERRUPT; } ring->write_tail = ring_write_tail; - ring->dispatch_execbuffer = render_ring_dispatch_execbuffer; + if (INTEL_INFO(dev)->gen >= 4) + ring->dispatch_execbuffer = i965_dispatch_execbuffer; + else if (IS_I830(dev) || IS_845G(dev)) + ring->dispatch_execbuffer = i830_dispatch_execbuffer; + else + ring->dispatch_execbuffer = i915_dispatch_execbuffer; ring->init = init_render_ring; ring->cleanup = render_ring_cleanup; @@ -1442,7 +1453,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ring->irq_get = i9xx_ring_get_irq; ring->irq_put = i9xx_ring_put_irq; } - ring->dispatch_execbuffer = ring_dispatch_execbuffer; + ring->dispatch_execbuffer = i965_dispatch_execbuffer; } ring->init = init_ring_common; -- 1.7.7.5