[PATCH 2/3] tests/gem_close_race: Adapt the test for Full PPGTT

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Oscar Mateo <oscar.mateo@xxxxxxxxx>

With Full PPGTT, each new fd creates a new context and thus a new
PPGTT, so we have to reduce the number of simultaneous fds or face
OOM problems. For every new PPGTT, its PDEs are stored in the GGTT
which imposes a limit of 1024 new contexts. We want to leave at
least 1/4 of the GGTT available for "important" stuff like scanout
buffers, so never open more than 768 fds.

Signed-off-by: Oscar Mateo <oscar.mateo@xxxxxxxxx>
---
 tests/gem_close_race.c |   39 ++++++++++++++++++++++++++-------------
 1 file changed, 26 insertions(+), 13 deletions(-)

diff --git a/tests/gem_close_race.c b/tests/gem_close_race.c
index 6064c02..f658c90 100644
--- a/tests/gem_close_race.c
+++ b/tests/gem_close_race.c
@@ -49,6 +49,8 @@
 
 static char device[80];
 static uint32_t devid;
+static unsigned int num_childs = 2000;
+static unsigned int num_fds = 32000;
 
 static void selfcopy(int fd, uint32_t handle, int loops)
 {
@@ -136,11 +138,10 @@ static void run(int child)
 		gem_read(fd, handle, 0, &handle, sizeof(handle));
 }
 
-#define NUM_FD 32000
-
 struct thread {
 	pthread_mutex_t mutex;
-	int fds[NUM_FD];
+	unsigned int num_fds;
+	int *fds;
 	int done;
 };
 
@@ -152,7 +153,7 @@ static void *thread_run(void *_data)
 	while (!t->done) {
 		pthread_mutex_unlock(&t->mutex);
 
-		for (int n = 0; n < NUM_FD; n++) {
+		for (int n = 0; n < t->num_fds; n++) {
 			struct drm_i915_gem_create create;
 
 			create.handle = 0;
@@ -185,7 +186,7 @@ static void *thread_busy(void *_data)
 
 		pthread_mutex_unlock(&t->mutex);
 
-		n  = rand() % NUM_FD;
+		n  = rand() % t->num_fds;
 
 		create.handle = 0;
 		create.size = OBJECT_SIZE;
@@ -213,16 +214,23 @@ igt_main
 {
 	igt_skip_on_simulation();
 
-	sprintf(device, "/dev/dri/card%d", drm_get_card());
-	{
-		int fd = open(device, O_RDWR);
+	igt_fixture {
+		int fd;
+		sprintf(device, "/dev/dri/card%d", drm_get_card());
+		fd = open(device, O_RDWR);
 		igt_assert(fd != -1);
 		devid = intel_get_drm_devid(fd);
+		if (gem_uses_full_ppgtt(fd))
+		{
+			/* Reduce the number of simultaneous fds or face OOM */
+			num_childs = 768;
+			num_fds = 768;
+		}
 		close(fd);
 	}
 
 	igt_subtest("process-exit") {
-		igt_fork(child, 2000)
+		igt_fork(child, num_childs)
 			run(child);
 		igt_waitchildren();
 	}
@@ -232,17 +240,21 @@ igt_main
 		struct thread *data = calloc(1, sizeof(struct thread));
 		int n;
 
+		data->num_fds = num_fds;
+		data->fds = calloc(num_fds, sizeof(int));
+
 		igt_assert(data);
+		igt_assert(data->fds);
 
 		pthread_mutex_init(&data->mutex, NULL);
-		for (n = 0; n < NUM_FD; n++)
+		for (n = 0; n < num_fds; n++)
 			data->fds[n] = open(device, O_RDWR);
 
 		pthread_create(&thread[0], NULL, thread_run, data);
 		pthread_create(&thread[1], NULL, thread_busy, data);
 
-		for (n = 0; n < 1000*NUM_FD; n++) {
-			int i = rand() % NUM_FD;
+		for (n = 0; n < 1000*num_fds; n++) {
+			int i = rand() % num_fds;
 			if (data->fds[i] == -1) {
 				data->fds[i] = open(device, O_RDWR);
 			} else{
@@ -258,8 +270,9 @@ igt_main
 		pthread_join(thread[1], NULL);
 		pthread_join(thread[0], NULL);
 
-		for (n = 0; n < NUM_FD; n++)
+		for (n = 0; n < num_fds; n++)
 			close(data->fds[n]);
+		free(data->fds);
 		free(data);
 	}
 }
-- 
1.7.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux