On Fri, Apr 08, 2016 at 05:44:13PM +0100, Peter Antoine wrote: > +static int create_read_batch(struct drm_i915_gem_relocation_entry *reloc, > + uint32_t *batch, > + uint32_t dst_handle, > + uint32_t size, > + uint32_t reg_base) > +{ > + unsigned int index; > + unsigned int offset = 0; > + > + for (index = 0, offset = 0; index < size; index++, offset += 4) offset = 0 twice > + { > + batch[offset] = MI_STORE_REGISTER_MEM_64_BIT_ADDR; > + batch[offset+1] = reg_base + (index * sizeof(uint32_t)); > + batch[offset+2] = index * sizeof(uint32_t); /* reloc */ > + batch[offset+3] = 0; > + > + reloc[index].offset = (offset + 2) * sizeof(uint32_t); > + reloc[index].delta = index * sizeof(uint32_t); > + reloc[index].target_handle = dst_handle; Missed something. > + } > + > + batch[offset++] = MI_BATCH_BUFFER_END; > + batch[offset++] = 0; > + > + return offset * sizeof(uint32_t); > +} > + > +static void do_read_registers(int fd, > + uint32_t ctx_id, > + uint32_t dst_handle, > + uint32_t reg_base, > + uint32_t size, > + uint32_t engine_id) > +{ > + struct drm_i915_gem_execbuffer2 execbuf; > + struct drm_i915_gem_exec_object2 gem_exec[2]; > + struct drm_i915_gem_relocation_entry reloc[MAX_NUMBER_MOCS_REGISTERS]; > + uint32_t batch[MAX_NUMBER_MOCS_REGISTERS * 4 + 4]; > + uint32_t handle = gem_create(fd, 4096); > + > + memset(reloc, 0, sizeof(reloc)); > + memset(gem_exec, 0, sizeof(gem_exec)); > + memset(&execbuf, 0, sizeof(execbuf)); > + > + gem_exec[0].handle = dst_handle; > + gem_set_domain(fd, dst_handle, I915_GEM_DOMAIN_CPU, 0); ?????? > + gem_exec[1].handle = handle; > + gem_exec[1].relocation_count = size; > + gem_exec[1].relocs_ptr = (uintptr_t) reloc; > + > + execbuf.buffers_ptr = (uintptr_t)gem_exec; > + execbuf.buffer_count = 2; > + execbuf.batch_len = create_read_batch(reloc, > + batch, > + dst_handle, > + size, > + reg_base); > + > + gem_write(fd, handle, 0, batch, execbuf.batch_len); > + > + if (ctx_id != 0) > + i915_execbuffer2_set_context_id(execbuf, ctx_id); Just set it. > + > + execbuf.flags = I915_EXEC_SECURE | engine_id; > + > + gem_execbuf(fd, &execbuf); > + gem_sync(fd, handle); ^ Papering over a bug in your code. Hint: did you tell anyone that you were writing into the buffer? > + > + gem_close(fd, handle); > +} > + > +#define LOCAL_MI_LOAD_REGISTER_IMM (0x22 << 23) > + > +static int create_write_batch(uint32_t *batch, > + const uint32_t *values, > + uint32_t size, > + uint32_t reg_base) > +{ > + unsigned int i; > + unsigned int offset = 0; > + > + batch[offset++] = LOCAL_MI_LOAD_REGISTER_IMM | (size * 2 - 1); > + > + for (i = 0; i < size; i++) { > + batch[offset++] = reg_base + (i * 4); > + batch[offset++] = values[i]; > + } > + > + batch[offset++] = 0; > + batch[offset++] = MI_BATCH_BUFFER_END; > + batch[offset++] = 0; > + > + return offset * sizeof(uint32_t); > +} > + > +static void write_registers(int fd, > + uint32_t ctx_id, > + uint32_t reg_base, > + const uint32_t *values, > + uint32_t size, > + uint32_t engine_id) > +{ > + struct drm_i915_gem_exec_object2 gem_exec; > + struct drm_i915_gem_execbuffer2 execbuf; > + uint32_t batch[MAX_NUMBER_MOCS_REGISTERS * 4 + 4]; > + uint32_t handle = gem_create(fd, 4096); > + > + memset(&gem_exec, 0, sizeof(gem_exec)); > + memset(&execbuf, 0, sizeof(execbuf)); > + > + gem_exec.handle = handle; > + > + execbuf.buffers_ptr = (uintptr_t)&gem_exec; > + execbuf.buffer_count = 1; > + execbuf.batch_len = create_write_batch(batch, values, size, reg_base); > + > + gem_write(fd, handle, 0, batch, execbuf.batch_len); > + > + if (ctx_id != 0) > + i915_execbuffer2_set_context_id(execbuf, ctx_id); > + > + execbuf.flags = I915_EXEC_SECURE | engine_id; > + > + gem_execbuf(fd, &execbuf); > + gem_sync(fd, handle); > + > + gem_close(fd, handle); > +} > + > +static void check_control_registers(int fd, > + const struct intel_execution_engine *engine, > + uint32_t ctx_id, > + bool dirty) > +{ > + uint32_t dst_handle = gem_create(fd, 4096); > + uint32_t reg_base = get_engine_base(engine->exec_id | engine->flags); > + uint32_t *read_regs; > + struct mocs_table table; > + > + igt_assert(get_mocs_settings(intel_get_drm_devid(fd), &table, dirty)); > + > + do_read_registers(fd, > + ctx_id, > + dst_handle, > + reg_base, > + table.size, > + engine->exec_id | engine->flags); > + > + read_regs = gem_mmap__gtt(fd, dst_handle, 4096, PROT_READ); Missing set-domain. GTT for readback! I know we want some variety in test cases, but we shouldn't encourage people to use GTT for reads. > + for (int index = 0; index < table.size; index++) > + igt_assert_eq(read_regs[index], > + table.table[index].control_value); > + > + gem_close(fd, dst_handle); > +} > + > +static void check_l3cc_registers(int fd, > + const struct intel_execution_engine *engine, > + uint32_t ctx_id, > + bool dirty) > +{ > + struct mocs_table table; > + uint32_t dst_handle = gem_create(fd, 4096); > + uint32_t *read_regs; > + int index; > + > + igt_assert(get_mocs_settings(intel_get_drm_devid(fd), &table, dirty)); > + > + do_read_registers(fd, > + ctx_id, > + dst_handle, > + GEN9_LNCFCMOCS0, > + (table.size + 1) / 2, > + engine->exec_id | engine->flags); > + > + read_regs = gem_mmap__gtt(fd, dst_handle, 4096, PROT_READ); > + > + for (index = 0; index < table.size / 2; index++) { > + igt_assert_eq(read_regs[index] & 0xffff, > + table.table[index * 2].l3cc_value); > + igt_assert_eq(read_regs[index] >> 16, > + table.table[index * 2 + 1].l3cc_value); > + } > + > + if (table.size & 0x01) > + igt_assert_eq(read_regs[index] & 0xffff, > + table.table[index * 2].l3cc_value); > + munmap() > + gem_close(fd, dst_handle); > +} > + > +static void test_context_mocs_values(int fd, > + const struct intel_execution_engine *engine, > + bool use_default) > +{ > + uint32_t ctx_id = 0; > + > + if (!use_default) > + ctx_id = gem_context_create(fd); > + > + check_control_registers(fd, engine, ctx_id, false); > + check_l3cc_registers(fd, engine, ctx_id, false); > + > + if (!use_default) > + gem_context_destroy(fd, ctx_id); > +} > + > +static void non_context_tests(unsigned mode) > +{ > + int fd = drm_open_driver_master(DRIVER_INTEL); > + const int gen = intel_gen(intel_get_drm_devid(fd)); > + const struct intel_execution_engine *e; > + > + igt_require(gen >= 9); Make this a function like has_mocs() so that the automatic message is a little more explanatory. > + > + igt_info("Testing Non/Default Context Engines\n"); > + test_mocs_values(fd); > + > + switch(mode) { > + case NONE: break; > + case RESET: igt_force_gpu_reset(); break; > + case SUSPEND: igt_system_suspend_autoresume(); break; > + case HIBERNATE: igt_system_hibernate_autoresume(); break; > + }; > + > + for (e = intel_execution_engines; e->name; e++) > + > + if (e->exec_id != I915_EXEC_DEFAULT && > + gem_has_ring(fd, e->exec_id | e->flags)) { > + igt_info(" Engine: %s\n", e->name); Both of these are debug, not info. > + test_context_mocs_values(fd, e, true); > + } > + > + test_mocs_values(fd); > + > + drmDropMaster(fd); Superfluous; here and everywhere else. > + close(fd); > +} > + > +static void context_save_restore_test(unsigned mode) > +{ > + int fd = drm_open_driver_master(DRIVER_INTEL); > + const int gen = intel_gen(intel_get_drm_devid(fd)); > + const struct intel_execution_engine *e; > + uint32_t ctx_id = gem_context_create(fd); Should we not apply a little more stress here? We need at least 2 active contexts in order to have one unpinned. Same for fds (the default context tests). > + > + igt_require(gen >= 9); > + > + igt_info("Testing Save Restore\n"); > + > + for (e = intel_execution_engines; e->name; e++) > + if (e->exec_id == I915_EXEC_RENDER) { > + check_control_registers(fd, e, ctx_id, false); > + check_l3cc_registers(fd, e, ctx_id, false); > + } > + > + switch(mode) { > + case NONE: break; > + case RESET: igt_force_gpu_reset(); break; > + case SUSPEND: igt_system_suspend_autoresume(); break; > + case HIBERNATE: igt_system_hibernate_autoresume(); break; > + }; > + > + for (e = intel_execution_engines; e->name; e++) > + if (e->exec_id == I915_EXEC_RENDER) { > + check_control_registers(fd, e, ctx_id, false); > + check_l3cc_registers(fd, e, ctx_id, false); > + } > + > + gem_context_destroy(fd, ctx_id); > + drmDropMaster(fd); > + close(fd); > +} -- Chris Wilson, Intel Open Source Technology Centre _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx