struct r500_irq_stat_regs {
u32 disp_int;
u32 hdmi0_status;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 6e3d1c8f3483..8ad4e2cfae15 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -248,23 +248,34 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
return 0;
}
-static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
+static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
int i;
for (i = 0; i < p->nrelocs; i++) {
struct reservation_object *resv;
struct fence *fence;
+ struct radeon_fence *rfence;
+ int r;
if (!p->relocs[i].robj)
continue;
resv = p->relocs[i].robj->tbo.resv;
fence = reservation_object_get_excl(resv);
+ if (!fence)
+ continue;
+ rfence = to_radeon_fence(fence);
+ if (!rfence || rfence->rdev != p->rdev) {
+ r = fence_wait(fence, true);
+ if (r)
+ return r;
+ continue;
+ }
- radeon_semaphore_sync_to(p->ib.semaphore,
- (struct radeon_fence *)fence);
+ radeon_semaphore_sync_to(p->ib.semaphore, rfence);
}
+ return 0;
}
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
@@ -474,13 +485,19 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
return r;
}
+ r = radeon_cs_sync_rings(parser);
+ if (r) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to sync rings: %i\n", r);
+ return r;
+ }
+
if (parser->ring == R600_RING_TYPE_UVD_INDEX)
radeon_uvd_note_usage(rdev);
else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
(parser->ring == TN_RING_TYPE_VCE2_INDEX))
radeon_vce_note_usage(rdev);
- radeon_cs_sync_rings(parser);
r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
@@ -567,7 +584,12 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if (r) {
goto out;
}
- radeon_cs_sync_rings(parser);
+ r = radeon_cs_sync_rings(parser);
+ if (r) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to sync rings: %i\n", r);
+ goto out;
+ }
radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
if ((rdev->family >= CHIP_TAHITI) &&
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bc894c17b2f9..715b2d95346c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -36,6 +36,17 @@
#include <linux/gcd.h>
+struct radeon_flip_work {
+ struct work_struct flip_work;
+ struct work_struct unpin_work;
+ struct radeon_device *rdev;
+ int crtc_id;
+ uint64_t base;
+ struct drm_pending_vblank_event *event;
+ struct radeon_bo *old_rbo;
+ struct fence *fence;
+};
+
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -402,14 +413,21 @@ static void radeon_flip_work_func(struct work_struct *__work)
down_read(&rdev->exclusive_lock);
if (work->fence) {
- r = radeon_fence_wait(work->fence, false);
- if (r == -EDEADLK) {
- up_read(&rdev->exclusive_lock);
- do {
- r = radeon_gpu_reset(rdev);
- } while (r == -EAGAIN);
- down_read(&rdev->exclusive_lock);
- }
+ struct radeon_fence *fence;
+
+ fence = to_radeon_fence(work->fence);
+ if (fence && fence->rdev == rdev) {
+ r = radeon_fence_wait(fence, false);
+ if (r == -EDEADLK) {
+ up_read(&rdev->exclusive_lock);
+ do {
+ r = radeon_gpu_reset(rdev);
+ } while (r == -EAGAIN);
+ down_read(&rdev->exclusive_lock);
+ }
+ } else
+ r = fence_wait(fence, false);
+
if (r)
DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
@@ -418,7 +436,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
* confused about which BO the CRTC is scanning out
*/
- radeon_fence_unref(&work->fence);
+ fence_put(work->fence);
+ work->fence = NULL;
}
/* We borrow the event spin lock for protecting flip_status */
@@ -494,7 +513,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = (struct radeon_fence *)fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+ work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
@@ -576,7 +595,7 @@ pflip_cleanup:
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
- radeon_fence_unref(&work->fence);
+ fence_put(work->fence);
kfree(work);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index af9f2d6bd7d0..0262fe2580d2 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -541,6 +541,9 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
uint64_t seq[RADEON_NUM_RINGS] = {};
long r;
+ if (unlikely(!to_radeon_fence(&fence->base)))
+ return fence_wait(&fence->base, intr);
+
seq[fence->ring] = fence->seq;
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
if (r < 0) {
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e27608c29c11..f6e9ee573de2 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -40,6 +40,7 @@
struct radeon_bo;
struct radeon_device;
+struct radeon_flip_work;
#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
#define to_radeon_connector(x) container_of(x, struct radeon_connector, base)