The dynamic page allocation patch series added it for GEN6, this patch adds them for GEN8. v2: Consolidate pagetable/page_directory events v3: Multiple rebases. v4: Rebase after s/page_tables/page_table/. Signed-off-by: Ben Widawsky <ben@xxxxxxxxxxxx> Signed-off-by: Michel Thierry <michel.thierry@xxxxxxxxx> (v3+) --- drivers/gpu/drm/i915/i915_gem_gtt.c | 21 ++++++++++++++------- drivers/gpu/drm/i915/i915_trace.h | 16 ++++++++++++++++ 2 files changed, 30 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index a950f26..dc33314f8 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -684,19 +684,24 @@ static void gen8_initialize_pd(struct i915_address_space *vm, /* It's likely we'll map more than one pagetable at a time. This function will * save us unnecessary kmap calls, but do no more functionally than multiple * calls to map_pt. */ -static void gen8_map_pagetable_range(struct i915_page_directory *pd, +static void gen8_map_pagetable_range(struct i915_address_space *vm, + struct i915_page_directory *pd, uint64_t start, - uint64_t length, - struct drm_device *dev) + uint64_t length) { gen8_pde_t * const page_directory = kmap_atomic(pd->page); struct i915_page_table *pt; uint64_t temp, pde; - gen8_for_each_pde(pt, pd, start, length, temp, pde) - __gen8_do_map_pt(page_directory + pde, pt, dev); + gen8_for_each_pde(pt, pd, start, length, temp, pde) { + __gen8_do_map_pt(page_directory + pde, pt, vm->dev); + trace_i915_page_table_entry_map(vm, pde, pt, + gen8_pte_index(start), + gen8_pte_count(start, length), + GEN8_PTES); + } - if (!HAS_LLC(dev)) + if (!HAS_LLC(vm->dev)) drm_clflush_virt_range(page_directory, PAGE_SIZE); kunmap_atomic(page_directory); @@ -790,6 +795,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm, gen8_initialize_pt(vm, pt); pd->page_table[pde] = pt; set_bit(pde, new_pts); + trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT); } return 0; @@ -853,6 +859,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm, gen8_initialize_pd(vm, pd); pdp->page_directory[pdpe] = pd; set_bit(pdpe, new_pds); + trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT); } return 0; @@ -982,7 +989,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, } set_bit(pdpe, pdp->used_pdpes); - gen8_map_pagetable_range(pd, start, length, dev); + gen8_map_pagetable_range(vm, pd, start, length); } free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 497cba5..7f68ec3 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -213,6 +213,22 @@ DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc, TP_ARGS(vm, pde, start, pde_shift) ); +DEFINE_EVENT_PRINT(i915_page_table_entry, i915_page_directory_entry_alloc, + TP_PROTO(struct i915_address_space *vm, u32 pdpe, u64 start, u64 pdpe_shift), + TP_ARGS(vm, pdpe, start, pdpe_shift), + + TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)", + __entry->vm, __entry->pde, __entry->start, __entry->end) +); + +DEFINE_EVENT_PRINT(i915_page_table_entry, i915_page_directory_pointer_entry_alloc, + TP_PROTO(struct i915_address_space *vm, u32 pml4e, u64 start, u64 pml4e_shift), + TP_ARGS(vm, pml4e, start, pml4e_shift), + + TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)", + __entry->vm, __entry->pde, __entry->start, __entry->end) +); + /* Avoid extra math because we only support two sizes. The format is defined by * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */ #define TRACE_PT_SIZE(bits) \ -- 2.4.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx