[PATCH 1/3] lib: add igt_dummyload

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Generalized from auto-tuned GPU dummy workload in gem_wait and kms_flip

v2 : Add recursive batch feature from Chris

Cc: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
Cc: Daniel Vetter <daniel.vetter@xxxxxxxx>
Signed-off-by: Abdiel Janulgue <abdiel.janulgue@xxxxxxxxxxxxxxx>
---
 lib/Makefile.sources |   2 +
 lib/igt.h            |   1 +
 lib/igt_dummyload.c  | 613 +++++++++++++++++++++++++++++++++++++++++++++++++++
 lib/igt_dummyload.h  |  77 +++++++
 4 files changed, 693 insertions(+)
 create mode 100644 lib/igt_dummyload.c
 create mode 100644 lib/igt_dummyload.h

diff --git a/lib/Makefile.sources b/lib/Makefile.sources
index e8e277b..7fc5ec2 100644
--- a/lib/Makefile.sources
+++ b/lib/Makefile.sources
@@ -75,6 +75,8 @@ lib_source_list =	 	\
 	igt_draw.h		\
 	igt_pm.c		\
 	igt_pm.h		\
+	igt_dummyload.c		\
+	igt_dummyload.h		\
 	uwildmat/uwildmat.h	\
 	uwildmat/uwildmat.c	\
 	$(NULL)
diff --git a/lib/igt.h b/lib/igt.h
index d751f24..a0028d5 100644
--- a/lib/igt.h
+++ b/lib/igt.h
@@ -32,6 +32,7 @@
 #include "igt_core.h"
 #include "igt_debugfs.h"
 #include "igt_draw.h"
+#include "igt_dummyload.h"
 #include "igt_fb.h"
 #include "igt_gt.h"
 #include "igt_kms.h"
diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
new file mode 100644
index 0000000..f7a64b7
--- /dev/null
+++ b/lib/igt_dummyload.c
@@ -0,0 +1,613 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "igt.h"
+#include "igt_dummyload.h"
+#include <time.h>
+#include <signal.h>
+#include <sys/syscall.h>
+
+/**
+ * SECTION:igt_dummyload
+ * @short_description: Library for submitting GPU workloads
+ * @title: Dummyload
+ * @include: igt.h
+ *
+ * A lot of igt testcases need some GPU workload to make sure a race window is
+ * big enough. Unfortunately having a fixed amount of workload leads to
+ * spurious test failures or overtly long runtimes on some fast/slow platforms.
+ * This library contains functionality to submit GPU workloads that should
+ * consume exactly a specific amount of time.
+ */
+
+#define USEC_PER_SEC 1000000L
+#define NSEC_PER_SEC 1000000000L
+
+#define gettid() syscall(__NR_gettid)
+#define sigev_notify_thread_id _sigev_un._tid
+
+#define LOCAL_I915_EXEC_BSD_SHIFT      (13)
+#define LOCAL_I915_EXEC_BSD_MASK       (3 << LOCAL_I915_EXEC_BSD_SHIFT)
+
+#define ENGINE_MASK  (I915_EXEC_RING_MASK | LOCAL_I915_EXEC_BSD_MASK)
+
+
+/* Internal data structures to avoid having to pass tons of parameters
+ * around. */
+struct dummy_info {
+	drm_intel_bufmgr *bufmgr;
+	struct intel_batchbuffer *batch;
+	int drm_fd;
+	uint32_t buf_handle;
+	uint32_t buf_stride;
+	uint32_t buf_tiling;
+	int fb_width;
+	int fb_height;
+};
+
+static void blit_copy(struct intel_batchbuffer *batch,
+		      drm_intel_bo *dst, drm_intel_bo *src,
+		      unsigned int width, unsigned int height,
+		      unsigned int dst_pitch, unsigned int src_pitch)
+{
+	BLIT_COPY_BATCH_START(0);
+	OUT_BATCH((3 << 24) | /* 32 bits */
+		  (0xcc << 16) | /* copy ROP */
+		  dst_pitch);
+	OUT_BATCH(0 << 16 | 0);
+	OUT_BATCH(height << 16 | width);
+	OUT_RELOC_FENCED(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
+	OUT_BATCH(0 << 16 | 0);
+	OUT_BATCH(src_pitch);
+	OUT_RELOC_FENCED(src, I915_GEM_DOMAIN_RENDER, 0, 0);
+	ADVANCE_BATCH();
+
+	if (batch->gen >= 6) {
+		BEGIN_BATCH(3, 0);
+		OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
+		OUT_BATCH(0);
+		OUT_BATCH(0);
+		ADVANCE_BATCH();
+	}
+}
+
+static void blit_fill(struct intel_batchbuffer *batch, drm_intel_bo *dst,
+		      unsigned int width, unsigned int height)
+{
+	COLOR_BLIT_COPY_BATCH_START(COLOR_BLT_WRITE_ALPHA |
+				    XY_COLOR_BLT_WRITE_RGB);
+	OUT_BATCH((3 << 24)	| /* 32 Bit Color */
+		  (0xF0 << 16)	| /* Raster OP copy background register */
+		  0);		  /* Dest pitch is 0 */
+	OUT_BATCH(0);
+	OUT_BATCH(width << 16	|
+		  height);
+	OUT_RELOC_FENCED(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
+	OUT_BATCH(rand()); /* random pattern */
+	ADVANCE_BATCH();
+}
+
+static int emit_dummy_load_blitcopy(struct dummy_info *d, int limit, int timeout)
+{
+	int i, ret = 0;
+	drm_intel_bo *src_bo, *dst_bo, *fb_bo;
+	struct intel_batchbuffer *batch = d->batch;
+	drm_intel_bufmgr *bufmgr = d->bufmgr;
+
+	igt_require(bufmgr);
+
+	src_bo = drm_intel_bo_alloc(bufmgr, "dummy_bo", 2048*2048*4, 4096);
+	igt_assert(src_bo);
+
+	dst_bo = drm_intel_bo_alloc(bufmgr, "dummy_bo", 2048*2048*4, 4096);
+	igt_assert(dst_bo);
+
+	fb_bo = gem_handle_to_libdrm_bo(bufmgr, d->drm_fd, "imported",
+				        d->buf_handle);
+	igt_assert(fb_bo);
+
+	for (i = 0; i < limit; i++) {
+		blit_copy(batch, dst_bo, src_bo,
+			  2048, 2048,
+			  2048*4, 2048*4);
+		igt_swap(src_bo, dst_bo);
+	}
+	blit_copy(batch, fb_bo, src_bo,
+		  min(d->fb_width, 2048), min(d->fb_height, 2048),
+		  d->buf_stride, 2048*4);
+
+	intel_batchbuffer_flush(batch);
+
+	if (timeout > 0)
+		ret = drm_intel_gem_bo_wait(fb_bo, timeout * NSEC_PER_SEC);
+	drm_intel_bo_unreference(src_bo);
+	drm_intel_bo_unreference(dst_bo);
+	drm_intel_bo_unreference(fb_bo);
+
+	return ret;
+}
+
+static int emit_dummy_load_blitfill(struct dummy_info *d, int limit, int timeout)
+{
+	int i, ret = 0;
+	struct intel_batchbuffer *batch = d->batch;
+	drm_intel_bufmgr *bufmgr = d->bufmgr;
+	drm_intel_bo *dst_bo = gem_handle_to_libdrm_bo(bufmgr, d->drm_fd, "",
+						       d->buf_handle);
+	igt_require(bufmgr);
+	igt_assert(dst_bo);
+
+	for (i = 0; i < limit; i++) {
+		blit_fill(batch, dst_bo,
+			  min(d->fb_width, dst_bo->size/2),
+			  min(d->fb_height, dst_bo->size/2));
+	}
+	intel_batchbuffer_flush(batch);
+
+	if (timeout > 0)
+		ret = drm_intel_gem_bo_wait(dst_bo, timeout * NSEC_PER_SEC);
+	drm_intel_bo_unreference(dst_bo);
+
+	return ret;
+}
+
+static int emit_dummy_load_rendercopy(struct dummy_info *d, int limit, int timeout)
+{
+	struct intel_batchbuffer *batch = d->batch;
+	drm_intel_bufmgr *bufmgr = d->bufmgr;
+	static uint32_t devid = 0;
+	igt_render_copyfunc_t copyfunc;
+	struct igt_buf sb[3], *src, *dst, *fb;
+	int i, ret = 0;
+
+	igt_require(bufmgr);
+
+	if (!devid)
+		devid = intel_get_drm_devid(d->drm_fd);
+	copyfunc = igt_get_render_copyfunc(devid);
+	if (copyfunc == NULL)
+		return emit_dummy_load_blitfill(d, limit, timeout);
+
+	sb[0].bo = drm_intel_bo_alloc(bufmgr, "dummy_bo", 2048*2048*4, 4096);
+	igt_assert(sb[0].bo);
+	sb[0].size = sb[0].bo->size;
+	sb[0].tiling = I915_TILING_NONE;
+	sb[0].data = NULL;
+	sb[0].num_tiles = sb[0].bo->size;
+	sb[0].stride = 4 * 2048;
+
+	sb[1].bo = drm_intel_bo_alloc(bufmgr, "dummy_bo", 2048*2048*4, 4096);
+	igt_assert(sb[1].bo);
+	sb[1].size = sb[1].bo->size;
+	sb[1].tiling = I915_TILING_NONE;
+	sb[1].data = NULL;
+	sb[1].num_tiles = sb[1].bo->size;
+	sb[1].stride = 4 * 2048;
+
+	sb[2].bo = gem_handle_to_libdrm_bo(bufmgr, d->drm_fd, "imported",
+					   d->buf_handle);
+	igt_assert(sb[2].bo);
+	sb[2].size = sb[2].bo->size;
+	sb[2].tiling = d->buf_tiling;
+	sb[2].data = NULL;
+	sb[2].num_tiles = sb[2].bo->size;
+	sb[2].stride = d->buf_stride;
+
+	src = &sb[0];
+	dst = &sb[1];
+	fb = &sb[2];
+
+	for (i = 0; i < limit; i++) {
+		copyfunc(batch, NULL,
+			 src, 0, 0,
+			 2048, 2048,
+			 dst, 0, 0);
+
+		igt_swap(src, dst);
+	}
+	copyfunc(batch, NULL,
+		 src, 0, 0,
+		 min(d->fb_width, 2048), min(d->fb_height, 2048),
+		 fb, 0, 0);
+	intel_batchbuffer_flush(batch);
+
+	if (timeout > 0)
+		ret = drm_intel_gem_bo_wait(fb->bo, timeout * NSEC_PER_SEC);
+	drm_intel_bo_unreference(sb[0].bo);
+	drm_intel_bo_unreference(sb[1].bo);
+	drm_intel_bo_unreference(sb[2].bo);
+
+	return ret;
+}
+
+static unsigned long gettime_us(void)
+{
+	struct timespec ts;
+
+	clock_gettime(CLOCK_MONOTONIC, &ts);
+
+	return ts.tv_sec * 1000000 + ts.tv_nsec / 1000;
+}
+
+static int calibrate_dummy_load(struct dummy_info *d,
+				const char *ring_name,
+				int enough_work_in_seconds,
+				int (*emit)(struct dummy_info *d, int limit, int timeout))
+{
+	unsigned long start;
+	int ops = 1;
+
+	do {
+		unsigned long diff;
+		int ret;
+		start = gettime_us();
+		ret = emit(d, ops, 10);
+		diff = gettime_us() - start;
+
+		if (ret || diff / USEC_PER_SEC > enough_work_in_seconds)
+		  break;
+		ops += ops;
+	} while (ops < 100000);
+
+	igt_debug("%s dummy load calibrated: %d operations / second\n",
+		  ring_name, ops);
+
+	return ops;
+}
+
+static void igt_dummy_load_blitcopy(struct dummy_info* d, int seconds)
+{
+	static int ops_per_sec = 0;
+
+	if (ops_per_sec == 0)
+		ops_per_sec = calibrate_dummy_load(d, "bcs", seconds,
+						   emit_dummy_load_blitcopy);
+
+	emit_dummy_load_blitcopy(d, seconds * ops_per_sec, 0);
+}
+
+static void igt_dummy_load_blitfill(struct dummy_info* d, int seconds)
+{
+	static int ops_per_sec = 0;
+
+	if (ops_per_sec == 0)
+		ops_per_sec = calibrate_dummy_load(d, "bcs", seconds,
+						   emit_dummy_load_blitfill);
+
+	emit_dummy_load_blitfill(d, seconds * ops_per_sec, 0);
+}
+
+static void igt_dummy_load_rendercopy(struct dummy_info* d, int seconds)
+{
+	static int ops_per_sec = 0;
+
+	if (ops_per_sec == 0)
+		ops_per_sec = calibrate_dummy_load(d, "rcs", seconds,
+						   emit_dummy_load_rendercopy);
+
+	emit_dummy_load_rendercopy(d, seconds * ops_per_sec, 0);
+}
+
+/**
+ * igt_calibrate_dummy_load:
+ * @bufmgr: the libdrm bufmgr
+ * @batch: the batchbuffer
+ * @drm_fd: the DRM file descriptor
+ * @buf_handle: handle of the destination buffer where the operation is applied.
+ *              For IGT_DUMMY_BLIT_COPY and IGT_DUMMY_RENDER_COPY this
+ *              is the destination buffer where final results are copied into
+ * @buf_stride: the stride of the buffer, ignored by IGT_DUMMY_BLIT_FILL
+ * @fb_width: width of the rectangle
+ * @fb_height: height of the rectangle
+ * @enough_work_in_seconds: time it takes to execute a GPU workload
+ * @method: Type of GPU workload
+ *
+ * This function returns the amount of operations a GPU workload executes in
+ * a specific amount of time.
+ */
+int igt_calibrate_dummy_load(drm_intel_bufmgr *bufmgr,
+			     struct intel_batchbuffer *batch,
+			     int drm_fd,
+			     uint32_t buf_handle,
+			     uint32_t buf_stride,
+			     int fb_width,
+			     int fb_height,
+			     int enough_work_in_seconds,
+			     enum igt_dummy_load_method method)
+{
+	struct dummy_info dummy_info = {
+		.bufmgr = bufmgr,
+		.batch = batch,
+		.drm_fd = drm_fd,
+		.buf_handle = buf_handle,
+		.buf_stride = buf_stride,
+	        .fb_width = fb_width,
+		.fb_height = fb_height,
+	};
+
+	switch (method) {
+	case IGT_DUMMY_RENDER_COPY:
+		return calibrate_dummy_load(&dummy_info, "rcs",
+					    enough_work_in_seconds,
+					    emit_dummy_load_rendercopy);
+	case IGT_DUMMY_BLIT_COPY:
+		return calibrate_dummy_load(&dummy_info, "bcs",
+					    enough_work_in_seconds,
+					    emit_dummy_load_blitcopy);
+	case IGT_DUMMY_BLIT_FILL:
+		return calibrate_dummy_load(&dummy_info, "bcs",
+					    enough_work_in_seconds,
+					    emit_dummy_load_blitfill);
+	default:
+		igt_assert(false);
+	}
+}
+
+/**
+ * igt_emit_dummy_load:
+ * @bufmgr: the libdrm bufmgr
+ * @batch: the batchbuffer
+ * @drm_fd: the DRM file descriptor
+ * @buf_handle: handle of the destination buffer where the operation is applied.
+ *              For IGT_DUMMY_BLIT_COPY and IGT_DUMMY_RENDER_COPY this
+ *              is the destination buffer where final results are copied into
+ * @buf_stride: the stride of the buffer, ignored by IGT_DUMMY_BLIT_FILL
+ * @fb_width: width of the rectangle
+ * @fb_height: height of the rectangle
+ * @iterations: manually specify the amount of operations that the dummy load
+ *              executes. If less than 1, automatically determine the amount of
+ *              iterations it takes to execute @enough_work_in_seconds
+ *              seconds of GPU workload
+ * @enough_work_in_seconds: time it takes to execute a GPU workload. Ignored when
+ *              auto-calibration is disabled (@iterations >= 1)
+ * @method: Type of GPU workload
+ *
+ * This functions submits a dummy workload to the GPU optionally auto-tuning it
+ * so the workload consumes a specific amount of time.
+ */
+void igt_emit_dummy_load(drm_intel_bufmgr *bufmgr,
+			 struct intel_batchbuffer *batch,
+			 int drm_fd,
+			 uint32_t buf_handle,
+			 uint32_t buf_stride,
+			 int fb_width,
+			 int fb_height,
+			 int iterations,
+			 int enough_work_in_seconds,
+			 enum igt_dummy_load_method method)
+{
+	struct dummy_info dummy_info = {
+		.bufmgr = bufmgr,
+		.batch = batch,
+		.drm_fd = drm_fd,
+		.buf_handle = buf_handle,
+		.buf_stride = buf_stride,
+	        .fb_width = fb_width,
+		.fb_height = fb_height,
+	};
+
+	switch (method) {
+	case IGT_DUMMY_RENDER_COPY:
+		iterations > 0 ? emit_dummy_load_rendercopy(&dummy_info,
+							    iterations, 0):
+		igt_dummy_load_rendercopy(&dummy_info, enough_work_in_seconds);
+		break;
+	case IGT_DUMMY_BLIT_COPY:
+		iterations > 0 ? emit_dummy_load_blitcopy(&dummy_info,
+							  iterations, 0):
+		igt_dummy_load_blitcopy(&dummy_info, enough_work_in_seconds);
+		break;
+	case IGT_DUMMY_BLIT_FILL:
+		iterations > 0 ? emit_dummy_load_blitfill(&dummy_info,
+							  iterations, 0):
+		igt_dummy_load_blitfill(&dummy_info, enough_work_in_seconds);
+		break;
+	default:
+		igt_assert(false);
+		break;
+	}
+}
+
+static uint32_t *batch;
+
+static uint32_t emit_recursive_batch(int fd, int engine)
+{
+	const int gen = intel_gen(intel_get_drm_devid(fd));
+	struct drm_i915_gem_exec_object2 obj;
+	struct drm_i915_gem_relocation_entry reloc;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	unsigned engines[16];
+	unsigned nengine;
+	int i;
+
+	nengine = 0;
+	if (engine < 0) {
+		for_each_engine(fd, engine)
+			if (engine)
+				engines[nengine++] = engine;
+	} else {
+		igt_require(gem_has_ring(fd, engine));
+		engines[nengine++] = engine;
+	}
+	igt_require(nengine);
+
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffers_ptr = (uintptr_t)&obj;
+	execbuf.buffer_count = 1;
+
+	memset(&obj, 0, sizeof(obj));
+	obj.handle = gem_create(fd, 4096);
+
+	obj.relocs_ptr = (uintptr_t)&reloc;
+	obj.relocation_count = 1;
+	memset(&reloc, 0, sizeof(reloc));
+
+	batch = gem_mmap__gtt(fd, obj.handle, 4096, PROT_WRITE);
+	gem_set_domain(fd, obj.handle,
+			I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+
+	reloc.target_handle = obj.handle; /* recurse */
+	reloc.presumed_offset = 0;
+	reloc.offset = sizeof(uint32_t);
+	reloc.delta = 0;
+	reloc.read_domains = I915_GEM_DOMAIN_COMMAND;
+	reloc.write_domain = 0;
+
+	i = 0;
+	batch[i] = MI_BATCH_BUFFER_START;
+	if (gen >= 8) {
+		batch[i] |= 1 << 8 | 1;
+		batch[++i] = 0;
+		batch[++i] = 0;
+	} else if (gen >= 6) {
+		batch[i] |= 1 << 8;
+		batch[++i] = 0;
+	} else {
+		batch[i] |= 2 << 6;
+		batch[++i] = 0;
+		if (gen < 4) {
+			batch[i] |= 1;
+			reloc.delta = 1;
+		}
+	}
+
+	for (i = 0; i < nengine; i++) {
+		execbuf.flags &= ~ENGINE_MASK;
+		execbuf.flags = engines[i];
+		gem_execbuf(fd, &execbuf);
+	}
+
+	return obj.handle;
+}
+
+static void sigiter(int sig, siginfo_t *info, void *arg)
+{
+	*batch = MI_BATCH_BUFFER_END;
+	__sync_synchronize();
+}
+
+static timer_t setup_batch_exit_timer(int seconds)
+{
+	timer_t timer;
+	struct sigevent sev;
+	struct sigaction act;
+	struct itimerspec its;
+
+	memset(&sev, 0, sizeof(sev));
+	sev.sigev_notify = SIGEV_SIGNAL | SIGEV_THREAD_ID;
+	sev.sigev_notify_thread_id = gettid();
+	sev.sigev_signo = SIGRTMIN + 1;
+	igt_assert(timer_create(CLOCK_MONOTONIC, &sev, &timer) == 0);
+	igt_assert(timer > 0);
+
+	memset(&act, 0, sizeof(act));
+	act.sa_sigaction = sigiter;
+	act.sa_flags = SA_SIGINFO;
+	igt_assert(sigaction(SIGRTMIN + 1, &act, NULL) == 0);
+
+	memset(&its, 0, sizeof(its));
+	its.it_value.tv_nsec = 0;
+	its.it_value.tv_sec = seconds;
+	igt_assert(timer_settime(timer, 0, &its, NULL) == 0);
+
+	return timer;
+}
+
+/**
+ * igt_spin_batch:
+ * @fd: open i915 drm file descriptor
+ * @seconds: amount of time in seconds the batch executes after terminating.
+ *           If value is less than 0, execute batch forever.
+ * @engine: Ring to execute batch OR'd with execbuf flags. If value is -1
+ *          execute on all available rings.
+ *
+ * Start a recursive batch on a ring that terminates after an exact amount
+ * of time has elapsed. Immediately returns a #igt_spin_t that contains the
+ * gem handle that can be waited upon. The returned structure must be passed to
+ * igt_post_spin_batch() for post-processing.
+ *
+ * Returns:
+ * Structure with helper internal state for igt_post_spin_batch().
+ */
+igt_spin_t igt_spin_batch(int fd, int seconds, int engine)
+{
+	timer_t timer;
+	uint32_t handle = emit_recursive_batch(fd, engine);
+	int64_t wait_timeout = 0;
+	igt_assert_eq(gem_wait(fd, handle, &wait_timeout), -ETIME);
+
+	if (seconds < 1) {
+		if (seconds == 0) {
+			*batch = MI_BATCH_BUFFER_END;
+			__sync_synchronize();
+			return (igt_spin_t){ handle, batch, 0};
+		}
+		return (igt_spin_t){ handle, batch, 0 };
+	}
+	timer = setup_batch_exit_timer(seconds);
+
+	return (igt_spin_t){ handle, batch, timer };
+}
+
+/**
+ * igt_post_spin_batch:
+ * @fd: open i915 drm file descriptor
+ * @arg: spin batch state from igt_spin_batch()
+ *
+ * This function does the necessary post-processing after starting a recursive
+ * batch with igt_spin_batch().
+ */
+void igt_post_spin_batch(int fd, igt_spin_t arg)
+{
+	if (arg.handle == 0)
+		return;
+
+	if (arg.timer > 0)
+		timer_delete(arg.timer);
+
+	gem_close(fd, arg.handle);
+	munmap(arg.batch, 4096);
+}
+
+
+/**
+ * igt_spin_batch_wait:
+ * @fd: open i915 drm file descriptor
+ * @seconds: amount of time in seconds the batch executes after terminating.
+ *           If value is less than 0, execute batch forever.
+ * @engine: Ring to execute batch OR'd with execbuf flags. If value is less
+ *          than 0, execute on all available rings.
+ *
+ * This is similar to igt_spin_batch(), but waits on the recursive batch to finish
+ * instead of returning right away. The function also does the necessary
+ * post-processing automatically if set to timeout.
+ */
+void igt_spin_batch_wait(int fd, int seconds, int engine)
+{
+	igt_spin_t spin = igt_spin_batch(fd, seconds, engine);
+	int64_t wait_timeout = (seconds + 0.5) * NSEC_PER_SEC;
+	igt_assert_eq(gem_wait(fd, spin.handle, &wait_timeout), 0);
+
+	igt_post_spin_batch(fd, spin);
+}
diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h
new file mode 100644
index 0000000..4b4d2b6
--- /dev/null
+++ b/lib/igt_dummyload.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __IGT_DUMMYLOAD_H__
+#define __IGT_DUMMYLOAD_H__
+
+/**
+ * igt_dummy_load_method:
+ * @IGT_DUMMY_BLIT_FILL: Use blitter engine to fill a buffer with random color
+ * @IGT_DUMMY_BLIT_COPY: Use blitter engine to copy between buffers
+ * @IGT_DUMMY_RENDER_COPY: Use render engine to copy between buffers
+ *
+ * Method to generate a GPU dummy load
+ */
+enum igt_dummy_load_method {
+	IGT_DUMMY_BLIT_FILL,
+	IGT_DUMMY_BLIT_COPY,
+	IGT_DUMMY_RENDER_COPY,
+};
+
+int igt_calibrate_dummy_load(drm_intel_bufmgr *bufmgr,
+			     struct intel_batchbuffer *batch,
+			     int drm_fd,
+			     uint32_t buf_handle,
+			     uint32_t buf_stride,
+			     int fb_width,
+			     int fb_height,
+			     int enough_work_in_seconds,
+			     enum igt_dummy_load_method method);
+
+void igt_emit_dummy_load(drm_intel_bufmgr *bufmgr,
+			 struct intel_batchbuffer *batch,
+			 int drm_fd,
+			 uint32_t buf_handle,
+			 uint32_t buf_stride,
+			 int fb_width,
+			 int fb_height,
+			 int iterations,
+			 int enough_work_in_seconds,
+			 enum igt_dummy_load_method method);
+
+typedef struct igt_spin {
+	unsigned handle;
+	uint32_t *batch;
+	timer_t timer;
+} igt_spin_t;
+
+
+igt_spin_t igt_spin_batch(int fd, int seconds, int engine);
+
+void igt_post_spin_batch(int fd, igt_spin_t arg);
+
+void igt_spin_batch_wait(int fd, int seconds, int engine);
+
+
+#endif /* __IGT_DUMMYLOAD_H__ */
-- 
2.7.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux