On r6xx+ a newer fence mechanism was implemented to replace the old wait_until plus scratch regs setup. A single EOP event will flush the destination caches, write a fence value, and generate an interrupt. This is the recommended fence mechanism on r6xx+ asics. This requires my previous writeback patch. Signed-off-by: Alex Deucher <alexdeucher@xxxxxxxxx> --- drivers/gpu/drm/radeon/evergreen.c | 1 + drivers/gpu/drm/radeon/r600.c | 41 ++++++++++++++++++++----------- drivers/gpu/drm/radeon/r600d.h | 21 ++++++++++++++++ drivers/gpu/drm/radeon/radeon.h | 2 + drivers/gpu/drm/radeon/radeon_device.c | 8 +++++- drivers/gpu/drm/radeon/radeon_fence.c | 6 ++++- 6 files changed, 62 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 6fe8ca5..8a88a0e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2399,6 +2399,7 @@ restart_ih: break; case 181: /* CP EOP event */ DRM_DEBUG("IH: CP EOP\n"); + radeon_fence_process(rdev); break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: CP EOP\n"); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index ff78265..160948f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2282,21 +2282,31 @@ int r600_ring_test(struct radeon_device *rdev) void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { - /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ - - radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); - radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT); - /* wait for 3D idle clean */ - radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); - radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); - /* Emit fence sequence & fire IRQ */ - radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); - radeon_ring_write(rdev, fence->seq); - /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ - radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); - radeon_ring_write(rdev, RB_INT_STAT); + if (rdev->wb.use_event) { + u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + + (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base); + /* EVENT_WRITE_EOP - flush caches, send int */ + radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); + radeon_ring_write(rdev, addr & 0xffffffff); + radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); + radeon_ring_write(rdev, fence->seq); + radeon_ring_write(rdev, 0); + } else { + radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); + radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); + /* wait for 3D idle clean */ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); + radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); + /* Emit fence sequence & fire IRQ */ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); + radeon_ring_write(rdev, fence->seq); + /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ + radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); + radeon_ring_write(rdev, RB_INT_STAT); + } } int r600_copy_blit(struct radeon_device *rdev, @@ -3389,6 +3399,7 @@ restart_ih: break; case 181: /* CP EOP event */ DRM_DEBUG("IH: CP EOP\n"); + radeon_fence_process(rdev); break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: CP EOP\n"); diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 858a192..966a793 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -474,6 +474,7 @@ #define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 #define VTX_REUSE_DEPTH_MASK 0x000000FF #define VGT_EVENT_INITIATOR 0x28a90 +# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0) # define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) #define VM_CONTEXT0_CNTL 0x1410 @@ -775,7 +776,27 @@ #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) #define PACKET3_COND_WRITE 0x45 #define PACKET3_EVENT_WRITE 0x46 +#define EVENT_TYPE(x) ((x) << 0) +#define EVENT_INDEX(x) ((x) << 8) + /* 0 - any non-TS event + * 1 - ZPASS_DONE + * 2 - SAMPLE_PIPELINESTAT + * 3 - SAMPLE_STREAMOUTSTAT* + * 4 - *S_PARTIAL_FLUSH + * 5 - TS events + */ #define PACKET3_EVENT_WRITE_EOP 0x47 +#define DATA_SEL(x) ((x) << 29) + /* 0 - discard + * 1 - send low 32bit data + * 2 - send 64bit data + * 3 - send 64bit counter value + */ +#define INT_SEL(x) ((x) << 24) + /* 0 - none + * 1 - interrupt only (DATA_SEL = 0) + * 2 - interrupt when data write is confirmed + */ #define PACKET3_ONE_REG_WRITE 0x57 #define PACKET3_SET_CONFIG_REG 0x68 #define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index bf3fe48..c6233b8 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -595,11 +595,13 @@ struct radeon_wb { volatile uint32_t *wb; uint64_t gpu_addr; bool enabled; + bool use_event; }; #define RADEON_WB_SCRATCH_OFFSET 0 #define RADEON_WB_CP_RPTR_OFFSET 1024 #define R600_WB_IH_WPTR_OFFSET 2048 +#define R600_WB_EVENT_OFFSET 3072 /** * struct radeon_pm - power management datas diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index cfc162d..659e206 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -208,6 +208,8 @@ int radeon_wb_init(struct radeon_device *rdev) return r; } + /* disable event_write fences */ + rdev->wb.use_event = true; /* disabled via module param */ if (radeon_no_wb == 1) rdev->wb.enabled = false; @@ -215,8 +217,12 @@ int radeon_wb_init(struct radeon_device *rdev) /* often unreliable on AGP */ if (rdev->flags & RADEON_IS_AGP) { rdev->wb.enabled = false; - } else + } else { rdev->wb.enabled = true; + /* event_write fences are only available on r600+ */ + if (rdev->family >= CHIP_R600) + rdev->wb.use_event = true; + } } dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 698a7ed..216392d 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -73,7 +73,11 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) unsigned long cjiffies; if (rdev->wb.enabled) { - u32 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; + u32 scratch_index; + if (rdev->wb.use_event) + scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; + else + scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; seq = rdev->wb.wb[scratch_index/4]; } else seq = RREG32(rdev->fence_drv.scratch_reg); -- 1.7.1.1 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/dri-devel