Instead of relying a static obj array inside igt_spin_t, access it with proper indexing. Cc: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Signed-off-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxxxxxxxx> --- lib/igt_dummyload.c | 32 +++++++++++++++----------------- lib/igt_dummyload.h | 1 + tests/i915/gem_exec_schedule.c | 25 +++++++++++++++---------- tests/i915/gem_softpin.c | 2 +- tests/i915/gem_spin_batch.c | 4 ++-- tests/i915/i915_hangman.c | 2 +- 6 files changed, 35 insertions(+), 31 deletions(-) diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c index 15d64fad..b2da222e 100644 --- a/lib/igt_dummyload.c +++ b/lib/igt_dummyload.c @@ -72,7 +72,6 @@ emit_recursive_batch(igt_spin_t *spin, int fd, const struct igt_spin_factory *opts) { #define SCRATCH 0 -#define BATCH 1 const int gen = intel_gen(intel_get_drm_devid(fd)); struct drm_i915_gem_relocation_entry relocs[2], *r; struct drm_i915_gem_execbuffer2 *execbuf; @@ -105,14 +104,14 @@ emit_recursive_batch(igt_spin_t *spin, obj = spin->obj; memset(relocs, 0, sizeof(relocs)); - obj[BATCH].handle = gem_create(fd, BATCH_SIZE); - batch = __gem_mmap__wc(fd, obj[BATCH].handle, + obj[SPIN_OBJ_BATCH].handle = gem_create(fd, BATCH_SIZE); + batch = __gem_mmap__wc(fd, obj[SPIN_OBJ_BATCH].handle, 0, BATCH_SIZE, PROT_WRITE); if (!batch) - batch = gem_mmap__gtt(fd, obj[BATCH].handle, + batch = gem_mmap__gtt(fd, obj[SPIN_OBJ_BATCH].handle, BATCH_SIZE, PROT_WRITE); - gem_set_domain(fd, obj[BATCH].handle, + gem_set_domain(fd, obj[SPIN_OBJ_BATCH].handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); execbuf->buffer_count++; cs = batch; @@ -120,7 +119,7 @@ emit_recursive_batch(igt_spin_t *spin, if (opts->dependency) { igt_assert(!(opts->flags & IGT_SPIN_POLL_RUN)); - r = &relocs[obj[BATCH].relocation_count++]; + r = &relocs[obj[SPIN_OBJ_BATCH].relocation_count++]; /* dummy write to dependency */ obj[SCRATCH].handle = opts->dependency; @@ -133,7 +132,7 @@ emit_recursive_batch(igt_spin_t *spin, execbuf->buffer_count++; } else if (opts->flags & IGT_SPIN_POLL_RUN) { - r = &relocs[obj[BATCH].relocation_count++]; + r = &relocs[obj[SPIN_OBJ_BATCH].relocation_count++]; igt_assert(!opts->dependency); @@ -182,7 +181,7 @@ emit_recursive_batch(igt_spin_t *spin, execbuf->buffer_count++; } - spin->handle = obj[BATCH].handle; + spin->handle = obj[SPIN_OBJ_BATCH].handle; igt_assert_lt(cs - batch, LOOP_START_OFFSET / sizeof(*cs)); spin->condition = batch + LOOP_START_OFFSET / sizeof(*cs); @@ -207,8 +206,8 @@ emit_recursive_batch(igt_spin_t *spin, cs += 1000; /* recurse */ - r = &relocs[obj[BATCH].relocation_count++]; - r->target_handle = obj[BATCH].handle; + r = &relocs[obj[SPIN_OBJ_BATCH].relocation_count++]; + r->target_handle = obj[SPIN_OBJ_BATCH].handle; r->offset = (cs + 1 - batch) * sizeof(*cs); r->read_domains = I915_GEM_DOMAIN_COMMAND; r->delta = LOOP_START_OFFSET; @@ -226,7 +225,7 @@ emit_recursive_batch(igt_spin_t *spin, *cs = r->delta; cs++; } - obj[BATCH].relocs_ptr = to_user_pointer(relocs); + obj[SPIN_OBJ_BATCH].relocs_ptr = to_user_pointer(relocs); execbuf->buffers_ptr = to_user_pointer(obj + (2 - execbuf->buffer_count)); @@ -261,12 +260,11 @@ emit_recursive_batch(igt_spin_t *spin, igt_assert_lt(cs - batch, BATCH_SIZE / sizeof(*cs)); /* Make it easier for callers to resubmit. */ - - obj[BATCH].relocation_count = 0; - obj[BATCH].relocs_ptr = 0; - - obj[SCRATCH].flags = EXEC_OBJECT_PINNED; - obj[BATCH].flags = EXEC_OBJECT_PINNED; + for (i = 0; i < ARRAY_SIZE(spin->obj); i++) { + spin->obj[i].relocation_count = 0; + spin->obj[i].relocs_ptr = 0; + spin->obj[i].flags = EXEC_OBJECT_PINNED; + } spin->cmd_precondition = *spin->condition; diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h index 61a9f2fc..f7772b90 100644 --- a/lib/igt_dummyload.h +++ b/lib/igt_dummyload.h @@ -42,6 +42,7 @@ typedef struct igt_spin { int out_fence; struct drm_i915_gem_exec_object2 obj[2]; +#define SPIN_OBJ_BATCH 1 struct drm_i915_gem_execbuffer2 execbuf; uint32_t poll_handle; uint32_t *poll; diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c index c0438513..93168d10 100644 --- a/tests/i915/gem_exec_schedule.c +++ b/tests/i915/gem_exec_schedule.c @@ -234,7 +234,8 @@ static void independent(int fd, unsigned int engine) } else { struct drm_i915_gem_execbuffer2 eb = { .buffer_count = 1, - .buffers_ptr = to_user_pointer(&spin->obj[1]), + .buffers_ptr = + to_user_pointer(&spin->obj[SPIN_OBJ_BATCH]), .flags = other, }; gem_execbuf(fd, &eb); @@ -537,7 +538,8 @@ static void semaphore_resolve(int i915) /* Then cancel the spinner */ *cs++ = MI_STORE_DWORD_IMM; - *cs++ = spin->obj[1].offset + offset_in_page(spin->condition); + *cs++ = spin->obj[SPIN_OBJ_BATCH].offset + + offset_in_page(spin->condition); *cs++ = 0; *cs++ = MI_BATCH_BUFFER_END; @@ -548,7 +550,7 @@ static void semaphore_resolve(int i915) /* First up is our spinning semaphore */ memset(obj, 0, sizeof(obj)); - obj[0] = spin->obj[1]; + obj[0] = spin->obj[SPIN_OBJ_BATCH]; obj[1].handle = semaphore; obj[1].offset = SEMAPHORE_ADDR; obj[1].flags = EXEC_OBJECT_PINNED; @@ -562,7 +564,7 @@ static void semaphore_resolve(int i915) memset(obj, 0, sizeof(obj)); obj[0].handle = handle; obj[0].flags = EXEC_OBJECT_WRITE; /* always after semaphore */ - obj[1] = spin->obj[1]; + obj[1] = spin->obj[SPIN_OBJ_BATCH]; eb.buffer_count = 2; eb.rsvd1 = 0; gem_execbuf(i915, &eb); @@ -638,11 +640,13 @@ static void semaphore_noskip(int i915) /* Cancel the following spinner */ *cs++ = MI_STORE_DWORD_IMM; if (gen >= 8) { - *cs++ = spin->obj[1].offset + offset_in_page(spin->condition); + *cs++ = spin->obj[SPIN_OBJ_BATCH].offset + + offset_in_page(spin->condition); *cs++ = 0; } else { *cs++ = 0; - *cs++ = spin->obj[1].offset + offset_in_page(spin->condition); + *cs++ = spin->obj[SPIN_OBJ_BATCH].offset + + offset_in_page(spin->condition); } *cs++ = MI_BATCH_BUFFER_END; @@ -651,9 +655,9 @@ static void semaphore_noskip(int i915) /* port0: implicit semaphore from engine */ memset(obj, 0, sizeof(obj)); - obj[0] = chain->obj[1]; + obj[0] = chain->obj[SPIN_OBJ_BATCH]; obj[0].flags |= EXEC_OBJECT_WRITE; - obj[1] = spin->obj[1]; + obj[1] = spin->obj[SPIN_OBJ_BATCH]; obj[2].handle = handle; memset(&eb, 0, sizeof(eb)); eb.buffer_count = 3; @@ -666,7 +670,7 @@ static void semaphore_noskip(int i915) memset(obj, 0, sizeof(obj)); obj[0].handle = handle; obj[0].flags = EXEC_OBJECT_WRITE; - obj[1] = spin->obj[1]; + obj[1] = spin->obj[SPIN_OBJ_BATCH]; memset(&eb, 0, sizeof(eb)); eb.buffer_count = 2; eb.buffers_ptr = to_user_pointer(obj); @@ -842,7 +846,8 @@ static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin) } else { struct drm_i915_gem_execbuffer2 eb = { .buffer_count = 1, - .buffers_ptr = to_user_pointer(&spin->obj[1]), + .buffers_ptr = + to_user_pointer(&spin->obj[SPIN_OBJ_BATCH]), .rsvd1 = ctx, .flags = other, }; diff --git a/tests/i915/gem_softpin.c b/tests/i915/gem_softpin.c index 336008b8..ecbc9050 100644 --- a/tests/i915/gem_softpin.c +++ b/tests/i915/gem_softpin.c @@ -360,7 +360,7 @@ static void test_evict_hang(int fd) execbuf.buffer_count = 1; hang = igt_hang_ctx(fd, 0, 0, 0); - expected = hang.spin->obj[1].offset; + expected = hang.spin->obj[SPIN_OBJ_BATCH].offset; /* Replace the hung batch with ourselves, forcing an eviction */ object.offset = expected; diff --git a/tests/i915/gem_spin_batch.c b/tests/i915/gem_spin_batch.c index a92672b8..c3aa0dfb 100644 --- a/tests/i915/gem_spin_batch.c +++ b/tests/i915/gem_spin_batch.c @@ -79,7 +79,7 @@ static void spin_resubmit(int fd, unsigned int engine, unsigned int flags) struct drm_i915_gem_execbuffer2 eb = { .buffer_count = 1, - .buffers_ptr = to_user_pointer(&spin->obj[1]), + .buffers_ptr = to_user_pointer(&spin->obj[SPIN_OBJ_BATCH]), .rsvd1 = ctx1, }; @@ -98,7 +98,7 @@ static void spin_resubmit(int fd, unsigned int engine, unsigned int flags) igt_spin_end(spin); - gem_sync(fd, spin->obj[1].handle); + gem_sync(fd, spin->handle); igt_spin_free(fd, spin); diff --git a/tests/i915/i915_hangman.c b/tests/i915/i915_hangman.c index 9a1d5889..2e77bbb9 100644 --- a/tests/i915/i915_hangman.c +++ b/tests/i915/i915_hangman.c @@ -209,7 +209,7 @@ static void test_error_state_capture(unsigned ring_id, clear_error_state(); hang = igt_hang_ctx(device, 0, ring_id, HANG_ALLOW_CAPTURE); - offset = hang.spin->obj[1].offset; + offset = hang.spin->obj[SPIN_OBJ_BATCH].offset; batch = gem_mmap__cpu(device, hang.spin->handle, 0, 4096, PROT_READ); gem_set_domain(device, hang.spin->handle, I915_GEM_DOMAIN_CPU, 0); -- 2.17.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx