From: Michel Dänzer <michel.daenzer@xxxxxxx> This is to avoid submitting more flips while we are waiting for pending ones to complete. Signed-off-by: Michel Dänzer <michel.daenzer at amd.com> --- v2: * Rebased on top of new patch 2.5 src/amdgpu_drm_queue.c | 41 +++++++++++++++++++++++++++++++++++++++-- src/amdgpu_drm_queue.h | 1 + src/drmmode_display.c | 18 ++++++++++++------ src/drmmode_display.h | 4 ++++ 4 files changed, 56 insertions(+), 8 deletions(-) diff --git a/src/amdgpu_drm_queue.c b/src/amdgpu_drm_queue.c index f8660828c..b13d28014 100644 --- a/src/amdgpu_drm_queue.c +++ b/src/amdgpu_drm_queue.c @@ -117,6 +117,30 @@ amdgpu_drm_vblank_handler(int fd, unsigned int frame, unsigned int sec, user_ptr); } +/* + * Handle deferred DRM vblank events + * + * This function must be called after amdgpu_drm_wait_pending_flip, once + * it's safe to attempt queueing a flip again + */ +void +amdgpu_drm_queue_handle_deferred(xf86CrtcPtr crtc) +{ + drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private; + struct amdgpu_drm_queue_entry *e, *tmp; + + if (drmmode_crtc->wait_flip_nesting_level == 0 || + --drmmode_crtc->wait_flip_nesting_level > 0) + return; + + xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_signalled, list) { + drmmode_crtc_private_ptr drmmode_crtc = e->crtc->driver_private; + + if (drmmode_crtc->wait_flip_nesting_level == 0) + amdgpu_drm_queue_handle_one(e); + } +} + /* * Enqueue a potential drm response; when the associated response * appears, we've got data to pass to the handler from here @@ -191,6 +215,13 @@ amdgpu_drm_abort_entry(uintptr_t seq) if (seq == AMDGPU_DRM_QUEUE_ERROR) return; + xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_signalled, list) { + if (e->seq == seq) { + amdgpu_drm_abort_one(e); + return; + } + } + xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) { if (e->seq == seq) { amdgpu_drm_abort_one(e); @@ -229,8 +260,12 @@ amdgpu_drm_handle_event(int fd, drmEventContext *event_context) xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_flip_signalled, list) amdgpu_drm_queue_handle_one(e); - xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_signalled, list) - amdgpu_drm_queue_handle_one(e); + xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_signalled, list) { + drmmode_crtc_private_ptr drmmode_crtc = e->crtc->driver_private; + + if (drmmode_crtc->wait_flip_nesting_level == 0) + amdgpu_drm_queue_handle_one(e); + } return r; } @@ -244,6 +279,8 @@ void amdgpu_drm_wait_pending_flip(xf86CrtcPtr crtc) AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(crtc->scrn); struct amdgpu_drm_queue_entry *e, *tmp; + drmmode_crtc->wait_flip_nesting_level++; + xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_flip_signalled, list) amdgpu_drm_queue_handle_one(e); diff --git a/src/amdgpu_drm_queue.h b/src/amdgpu_drm_queue.h index 48ba9ab6e..4e7c8f4c4 100644 --- a/src/amdgpu_drm_queue.h +++ b/src/amdgpu_drm_queue.h @@ -42,6 +42,7 @@ typedef void (*amdgpu_drm_handler_proc)(xf86CrtcPtr crtc, uint32_t seq, uint64_t usec, void *data); typedef void (*amdgpu_drm_abort_proc)(xf86CrtcPtr crtc, void *data); +void amdgpu_drm_queue_handle_deferred(xf86CrtcPtr crtc); uintptr_t amdgpu_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client, uint64_t id, void *data, amdgpu_drm_handler_proc handler, diff --git a/src/drmmode_display.c b/src/drmmode_display.c index 167b30091..9919c0923 100644 --- a/src/drmmode_display.c +++ b/src/drmmode_display.c @@ -305,6 +305,9 @@ drmmode_do_crtc_dpms(xf86CrtcPtr crtc, int mode) nominal_frame_rate /= pix_in_frame; drmmode_crtc->dpms_last_fps = nominal_frame_rate; } + + drmmode_crtc->dpms_mode = mode; + amdgpu_drm_queue_handle_deferred(crtc); } else if (drmmode_crtc->dpms_mode != DPMSModeOn && mode == DPMSModeOn) { /* * Off->On transition: calculate and accumulate the @@ -322,8 +325,9 @@ drmmode_do_crtc_dpms(xf86CrtcPtr crtc, int mode) drmmode_crtc->interpolated_vblanks += delta_seq; } + + drmmode_crtc->dpms_mode = DPMSModeOn; } - drmmode_crtc->dpms_mode = mode; } static void @@ -1415,6 +1419,7 @@ done: } } + amdgpu_drm_queue_handle_deferred(crtc); return ret; } @@ -2320,11 +2325,6 @@ drmmode_output_set_tear_free(AMDGPUEntPtr pAMDGPUEnt, drmmode_output->tear_free = tear_free; if (crtc) { - /* Wait for pending flips before drmmode_set_mode_major calls - * drmmode_crtc_update_tear_free, to prevent a nested - * drmHandleEvent call, which would hang - */ - amdgpu_drm_wait_pending_flip(crtc); drmmode_set_mode_major(crtc, &crtc->mode, crtc->rotation, crtc->x, crtc->y); } @@ -3864,6 +3864,7 @@ Bool amdgpu_do_pageflip(ScrnInfoPtr scrn, ClientPtr client, drmmode_crtc_private_ptr drmmode_crtc = config->crtc[0]->driver_private; uint32_t flip_flags = flip_sync == FLIP_ASYNC ? DRM_MODE_PAGE_FLIP_ASYNC : 0; drmmode_flipdata_ptr flipdata; + Bool handle_deferred = FALSE; uintptr_t drm_queue_seq = 0; struct drmmode_fb *fb; int i = 0; @@ -3946,6 +3947,7 @@ Bool amdgpu_do_pageflip(ScrnInfoPtr scrn, ClientPtr client, if (drmmode_crtc->scanout_update_pending) { amdgpu_drm_wait_pending_flip(crtc); + handle_deferred = TRUE; amdgpu_drm_abort_entry(drmmode_crtc->scanout_update_pending); drmmode_crtc->scanout_update_pending = 0; } @@ -3981,6 +3983,8 @@ Bool amdgpu_do_pageflip(ScrnInfoPtr scrn, ClientPtr client, drm_queue_seq = 0; } + if (handle_deferred) + amdgpu_drm_queue_handle_deferred(ref_crtc); if (flipdata->flip_count > 0) return TRUE; @@ -4000,5 +4004,7 @@ error: xf86DrvMsg(scrn->scrnIndex, X_WARNING, "Page flip failed: %s\n", strerror(errno)); + if (handle_deferred) + amdgpu_drm_queue_handle_deferred(ref_crtc); return FALSE; } diff --git a/src/drmmode_display.h b/src/drmmode_display.h index 5618c6b40..27610d537 100644 --- a/src/drmmode_display.h +++ b/src/drmmode_display.h @@ -119,6 +119,10 @@ typedef struct { /* Modeset needed for DPMS on */ Bool need_modeset; + /* For keeping track of nested calls to drm_wait_pending_flip / + * drm_queue_handle_deferred + */ + int wait_flip_nesting_level; /* A flip to this FB is pending for this CRTC */ struct drmmode_fb *flip_pending; /* The FB currently being scanned out by this CRTC, if any */ -- 2.18.0