From: Artur Harasimiuk <artur.harasimiuk@xxxxxxxxx> It's possible to trigger a race between vma eviction and closing the object backing it when handling colliding addresses, resulting in use-after-free. Cc: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Thomas Daniel <thomas.daniel@xxxxxxxxx> Signed-off-by: Artur Harasimiuk <artur.harasimiuk@xxxxxxxxx> Signed-off-by: Michał Winiarski <michal.winiarski@xxxxxxxxx> --- tests/gem_softpin.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/tests/gem_softpin.c b/tests/gem_softpin.c index 1a9ef02..d4613bc 100644 --- a/tests/gem_softpin.c +++ b/tests/gem_softpin.c @@ -27,6 +27,7 @@ */ #include "igt.h" +#include <pthread.h> #define EXEC_OBJECT_PINNED (1<<4) #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) @@ -471,6 +472,74 @@ static void test_noreloc(int fd, enum sleep sleep) gem_close(fd, object[i].handle); } +#define SOFTPIN_STRESS_LOOPS 100000 + +struct evict_close_thread_data { + int fd; + int pipefd[2]; + bool stop; + pthread_mutex_t mutex; +}; + +static void *evict_close_thread(void *data) +{ + struct evict_close_thread_data *t = (struct evict_close_thread_data*)data; + uint32_t handle; + + pthread_mutex_lock(&t->mutex); + while (!t->stop) { + pthread_mutex_unlock(&t->mutex); + read(t->pipefd[0], &handle, sizeof(handle)); + gem_close(t->fd, handle); + pthread_mutex_lock(&t->mutex); + } + pthread_mutex_unlock(&t->mutex); + + return NULL; +} + +static void test_evict_close_race(int fd) +{ + pthread_t t; + struct evict_close_thread_data t_data; + unsigned int loops = SOFTPIN_STRESS_LOOPS; + const uint32_t bbe = MI_BATCH_BUFFER_END; + struct drm_i915_gem_execbuffer2 execbuf; + struct drm_i915_gem_exec_object2 object; + + memset(&execbuf, 0, sizeof(execbuf)); + memset(&object, 0, sizeof(object)); + + execbuf.buffers_ptr = (uintptr_t)&object; + execbuf.buffer_count = 1; + object.flags = EXEC_OBJECT_PINNED; + object.offset = 0; + + memset(&t_data, 0, sizeof(t_data)); + t_data.fd = fd; + igt_assert(pthread_mutex_init(&t_data.mutex, NULL) == 0); + igt_assert(pipe(t_data.pipefd) == 0); + + igt_assert(pthread_create(&t, NULL, evict_close_thread, &t_data) == 0); + + while (loops--) { + object.handle = gem_create(fd, 4096); + gem_write(fd, object.handle, 0, &bbe, sizeof(bbe)); + gem_execbuf(fd, &execbuf); + write(t_data.pipefd[1], &object.handle, sizeof(object.handle)); + } + + pthread_mutex_lock(&t_data.mutex); + t_data.stop = 1; + pthread_mutex_unlock(&t_data.mutex); + + pthread_join(t, NULL); + + close(t_data.pipefd[0]); + close(t_data.pipefd[1]); + pthread_mutex_destroy(&t_data.mutex); +} + igt_main { int fd = -1; @@ -507,6 +576,9 @@ igt_main igt_subtest("evict-hang") test_evict_hang(fd); + igt_subtest("stress-evict-close") + test_evict_close_race(fd); + igt_fixture close(fd); } -- 2.8.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx