Instead of implementing the full tracking + dynamic allocation, this patch does a bit less than half of the work, by tracking and warning on unexpected conditions. The tracking itself follows which PTEs within a page table are currently being used for objects. The next patch will modify this to actually allocate the page tables only when necessary. With the current patch there isn't much in the way of making a gen agnostic range allocation function. However, in the next patch we'll add more specificity which makes having separate functions a bit easier to manage. Notice that aliasing PPGTT is not managed here. The patch which actually begins dynamic allocation/teardown explains the reasoning forthis. v2: s/pdp.pagedir/pdp.pagedirs Make a scratch page allocation helper Signed-off-by: Ben Widawsky <ben@xxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_gtt.c | 203 ++++++++++++++++++++++++++++-------- drivers/gpu/drm/i915/i915_gem_gtt.h | 117 +++++++++++++-------- 2 files changed, 231 insertions(+), 89 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 51fc036..b7a0232 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -66,10 +66,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) return HAS_ALIASING_PPGTT(dev) ? 1 : 0; } - -static void ppgtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags); +static int ppgtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags); static void ppgtt_unbind_vma(struct i915_vma *vma); static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt); @@ -232,37 +231,78 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, (px)->page, 0, 4096, \ PCI_DMA_BIDIRECTIONAL)) -static void free_pt_single(struct i915_pagetab *pt, struct drm_device *dev) +static void __free_pt_single(struct i915_pagetab *pt, struct drm_device *dev, + int scratch) { + if (WARN(scratch ^ pt->scratch, + "Tried to free scratch = %d. Is scratch = %d\n", + scratch, pt->scratch)) + return; + if (WARN_ON(!pt->page)) return; + if (!scratch) { + const size_t count = INTEL_INFO(dev)->gen >= 8 ? + GEN8_PTES_PER_PT : GEN6_PTES_PER_PT; + WARN(!bitmap_empty(pt->used_ptes, count), + "Free page table with %d used pages\n", + bitmap_weight(pt->used_ptes, count)); + } + i915_dma_unmap_single(pt, dev); __free_page(pt->page); + kfree(pt->used_ptes); kfree(pt); } +#define free_pt_single(pt, dev) \ + __free_pt_single(pt, dev, false) +#define free_pt_scratch(pt, dev) \ + __free_pt_single(pt, dev, true) + static struct i915_pagetab *alloc_pt_single(struct drm_device *dev) { struct i915_pagetab *pt; - int ret; + const size_t count = INTEL_INFO(dev)->gen >= 8 ? + GEN8_PTES_PER_PT : GEN6_PTES_PER_PT; + int ret = -ENOMEM; pt = kzalloc(sizeof(*pt), GFP_KERNEL); if (!pt) return ERR_PTR(-ENOMEM); + pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes), + GFP_KERNEL); + + if (!pt->used_ptes) + goto fail_bitmap; + pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!pt->page) { - kfree(pt); - return ERR_PTR(-ENOMEM); - } + if (!pt->page) + goto fail_page; ret = i915_dma_map_px_single(pt, dev); - if (ret) { - __free_page(pt->page); - kfree(pt); - return ERR_PTR(ret); - } + if (ret) + goto fail_dma; + + return pt; + +fail_dma: + __free_page(pt->page); +fail_page: + kfree(pt->used_ptes); +fail_bitmap: + kfree(pt); + + return ERR_PTR(ret); +} + +static inline struct i915_pagetab *alloc_pt_scratch(struct drm_device *dev) +{ + struct i915_pagetab *pt = alloc_pt_single(dev); + if (!IS_ERR(pt)) + pt->scratch = 1; return pt; } @@ -389,7 +429,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, int used_pd = ppgtt->num_pd_entries / I915_PDES_PER_PD; for (i = used_pd - 1; i >= 0; i--) { - dma_addr_t addr = ppgtt->pdp.pagedir[i]->daddr; + dma_addr_t addr = ppgtt->pdp.pagedirs[i]->daddr; ret = gen8_write_pdp(ring, i, addr, synchronous); if (ret) return ret; @@ -416,7 +456,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, I915_CACHE_LLC, use_scratch); while (num_entries) { - struct i915_pagedir *pd = ppgtt->pdp.pagedir[pdpe]; + struct i915_pagedir *pd = ppgtt->pdp.pagedirs[pdpe]; struct i915_pagetab *pt = pd->page_tables[pde]; struct page *page_table = pt->page; @@ -463,7 +503,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, break; if (pt_vaddr == NULL) { - struct i915_pagedir *pd = ppgtt->pdp.pagedir[pdpe]; + struct i915_pagedir *pd = ppgtt->pdp.pagedirs[pdpe]; struct i915_pagetab *pt = pd->page_tables[pde]; struct page *page_table = pt->page; pt_vaddr = kmap_atomic(page_table); @@ -509,8 +549,8 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt) int i; for (i = 0; i < ppgtt->num_pd_pages; i++) { - gen8_free_page_tables(ppgtt->pdp.pagedir[i], ppgtt->base.dev); - free_pd_single(ppgtt->pdp.pagedir[i], ppgtt->base.dev); + gen8_free_page_tables(ppgtt->pdp.pagedirs[i], ppgtt->base.dev); + free_pd_single(ppgtt->pdp.pagedirs[i], ppgtt->base.dev); } } @@ -530,7 +570,7 @@ static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) int i, ret; for (i = 0; i < ppgtt->num_pd_pages; i++) { - ret = alloc_pt_range(ppgtt->pdp.pagedir[i], + ret = alloc_pt_range(ppgtt->pdp.pagedirs[i], 0, I915_PDES_PER_PD, ppgtt->base.dev); if (ret) goto unwind_out; @@ -540,7 +580,7 @@ static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) unwind_out: while (i--) - gen8_free_page_tables(ppgtt->pdp.pagedir[i], ppgtt->base.dev); + gen8_free_page_tables(ppgtt->pdp.pagedirs[i], ppgtt->base.dev); return -ENOMEM; } @@ -551,8 +591,8 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, int i; for (i = 0; i < max_pdp; i++) { - ppgtt->pdp.pagedir[i] = alloc_pd_single(ppgtt->base.dev); - if (IS_ERR(ppgtt->pdp.pagedir[i])) + ppgtt->pdp.pagedirs[i] = alloc_pd_single(ppgtt->base.dev); + if (IS_ERR(ppgtt->pdp.pagedirs[i])) goto unwind_out; } @@ -563,7 +603,7 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, unwind_out: while (i--) - free_pd_single(ppgtt->pdp.pagedir[i], + free_pd_single(ppgtt->pdp.pagedirs[i], ppgtt->base.dev); return -ENOMEM; @@ -625,9 +665,9 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) * will never need to touch the PDEs again. */ for (i = 0; i < max_pdp; i++) { - struct i915_pagedir *pd = ppgtt->pdp.pagedir[i]; + struct i915_pagedir *pd = ppgtt->pdp.pagedirs[i]; gen8_ppgtt_pde_t *pd_vaddr; - pd_vaddr = kmap_atomic(ppgtt->pdp.pagedir[i]->page); + pd_vaddr = kmap_atomic(ppgtt->pdp.pagedirs[i]->page); for (j = 0; j < I915_PDES_PER_PD; j++) { struct i915_pagetab *pt = pd->page_tables[j]; dma_addr_t addr = pt->daddr; @@ -726,15 +766,13 @@ static void gen6_map_single(struct i915_pagedir *pd, /* Map all the page tables found in the ppgtt structure to incrementing page * directories. */ static void gen6_map_page_range(struct drm_i915_private *dev_priv, - struct i915_pagedir *pd, unsigned pde, size_t n) + struct i915_pagedir *pd, uint32_t start, uint32_t length) { - if (WARN_ON(pde + n > I915_PDES_PER_PD)) - n = I915_PDES_PER_PD - pde; - - n += pde; + struct i915_pagetab *pt; + uint32_t pde, temp; - for (; pde < n; pde++) - gen6_map_single(pd, pde, pd->page_tables[pde]); + gen6_for_each_pde(pt, pd, start, length, temp, pde) + gen6_map_single(pd, pde, pt); /* Make sure write is complete before other code can use this page * table. Also require for WC mapped PTEs */ @@ -1023,6 +1061,51 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, kunmap_atomic(pt_vaddr); } +static int gen6_alloc_va_range(struct i915_address_space *vm, + uint64_t start, uint64_t length) +{ + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); + struct i915_pagetab *pt; + uint32_t pde, temp; + + gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { + int j; + + DECLARE_BITMAP(tmp_bitmap, GEN6_PTES_PER_PT); + bitmap_zero(tmp_bitmap, GEN6_PTES_PER_PT); + bitmap_set(tmp_bitmap, gen6_pte_index(start), + gen6_pte_count(start, length)); + + /* TODO: To be done in the next patch. Map the page/insert + * entries here */ + for_each_set_bit(j, tmp_bitmap, GEN6_PTES_PER_PT) { + if (test_bit(j, pt->used_ptes)) { + /* Check that we're changing cache levels */ + } + } + + bitmap_or(pt->used_ptes, pt->used_ptes, tmp_bitmap, + GEN6_PTES_PER_PT); + } + + return 0; +} + +static void gen6_teardown_va_range(struct i915_address_space *vm, + uint64_t start, uint64_t length) +{ + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); + struct i915_pagetab *pt; + uint32_t pde, temp; + + gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { + bitmap_clear(pt->used_ptes, gen6_pte_index(start), + gen6_pte_count(start, length)); + } +} + static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) { int i; @@ -1030,6 +1113,7 @@ static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) for (i = 0; i < ppgtt->num_pd_entries; i++) free_pt_single(ppgtt->pd.page_tables[i], ppgtt->base.dev); + free_pt_scratch(ppgtt->scratch_pt, ppgtt->base.dev); free_pd_single(&ppgtt->pd, ppgtt->base.dev); } @@ -1057,6 +1141,9 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) * size. We allocate at the top of the GTT to avoid fragmentation. */ BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); + ppgtt->scratch_pt = alloc_pt_scratch(ppgtt->base.dev); + if (IS_ERR(ppgtt->scratch_pt)) + return PTR_ERR(ppgtt->scratch_pt); alloc: ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, &ppgtt->node, GEN6_PD_SIZE, @@ -1068,20 +1155,25 @@ alloc: GEN6_PD_SIZE, GEN6_PD_ALIGN, I915_CACHE_NONE, 0); if (ret) - return ret; + goto err_out; retried = true; goto alloc; } if (ret) - return ret; + goto err_out; + if (ppgtt->node.start < dev_priv->gtt.mappable_end) DRM_DEBUG("Forced to use aperture for PDEs\n"); ppgtt->num_pd_entries = I915_PDES_PER_PD; return 0; + +err_out: + free_pt_scratch(ppgtt->scratch_pt, ppgtt->base.dev); + return ret; } static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) @@ -1126,6 +1218,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) if (ret) return ret; + ppgtt->base.allocate_va_range = gen6_alloc_va_range; + ppgtt->base.teardown_va_range = gen6_teardown_va_range; ppgtt->base.clear_range = gen6_ppgtt_clear_range; ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; ppgtt->base.cleanup = gen6_ppgtt_cleanup; @@ -1139,7 +1233,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ppgtt->pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t); - gen6_map_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->num_pd_entries); + gen6_map_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total); DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", ppgtt->node.size >> 20, @@ -1174,13 +1268,25 @@ int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) return 0; } -static void +static int ppgtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { + int ret; + + WARN_ON(flags); + if (vma->vm->allocate_va_range) { + ret = vma->vm->allocate_va_range(vma->vm, + vma->node.start, + vma->node.size); + if (ret) + return ret; + } + vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, cache_level); + return 0; } static void ppgtt_unbind_vma(struct i915_vma *vma) @@ -1189,6 +1295,9 @@ static void ppgtt_unbind_vma(struct i915_vma *vma) vma->node.start, vma->obj->base.size, true); + if (vma->vm->teardown_va_range) + vma->vm->teardown_va_range(vma->vm, + vma->node.start, vma->node.size); } extern int intel_iommu_gfx_mapped; @@ -1496,9 +1605,9 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, } -static void i915_ggtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 unused) +static int i915_ggtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 unused) { const unsigned long entry = vma->node.start >> PAGE_SHIFT; unsigned int flags = (cache_level == I915_CACHE_NONE) ? @@ -1507,6 +1616,8 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma, BUG_ON(!i915_is_ggtt(vma->vm)); intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags); vma->obj->has_global_gtt_mapping = 1; + + return 0; } static void i915_ggtt_clear_range(struct i915_address_space *vm, @@ -1529,9 +1640,9 @@ static void i915_ggtt_unbind_vma(struct i915_vma *vma) intel_gtt_clear_range(first, size); } -static void ggtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) +static int ggtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) { struct drm_device *dev = vma->vm->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1559,7 +1670,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, } if (!(flags & ALIASING_BIND)) - return; + return 0; if (dev_priv->mm.aliasing_ppgtt && (!obj->has_aliasing_ppgtt_mapping || @@ -1571,6 +1682,8 @@ static void ggtt_bind_vma(struct i915_vma *vma, cache_level); vma->obj->has_aliasing_ppgtt_mapping = 1; } + + return 0; } static void ggtt_unbind_vma(struct i915_vma *vma) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index fea846d..1246df1 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -169,9 +169,33 @@ struct i915_vma { #define GLOBAL_BIND (1<<0) /* Only use this if you know you want a strictly aliased binding */ #define ALIASING_BIND (1<<1) - void (*bind_vma)(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags); + int (*bind_vma)(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags); +}; + + +struct i915_pagetab { + struct page *page; + dma_addr_t daddr; + + unsigned long *used_ptes; + unsigned int scratch:1; +}; + +struct i915_pagedir { + struct page *page; /* NULL for GEN6-GEN7 */ + union { + uint32_t pd_offset; + dma_addr_t daddr; + }; + + struct i915_pagetab *page_tables[I915_PDES_PER_PD]; +}; + +struct i915_pagedirpo { + /* struct page *page; */ + struct i915_pagedir *pagedirs[GEN8_LEGACY_PDPES]; }; struct i915_address_space { @@ -213,6 +237,12 @@ struct i915_address_space { gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, enum i915_cache_level level, bool valid); /* Create a valid PTE */ + int (*allocate_va_range)(struct i915_address_space *vm, + uint64_t start, + uint64_t length); + void (*teardown_va_range)(struct i915_address_space *vm, + uint64_t start, + uint64_t length); void (*clear_range)(struct i915_address_space *vm, uint64_t start, uint64_t length, @@ -224,6 +254,30 @@ struct i915_address_space { void (*cleanup)(struct i915_address_space *vm); }; +struct i915_hw_ppgtt { + struct i915_address_space base; + struct kref ref; + struct drm_mm_node node; + unsigned num_pd_entries; + unsigned num_pd_pages; /* gen8+ */ + union { + struct i915_pagedirpo pdp; + struct i915_pagedir pd; + }; + + struct i915_pagetab *scratch_pt; + + struct i915_hw_context *ctx; + + gen6_gtt_pte_t __iomem *pd_addr; + + int (*enable)(struct i915_hw_ppgtt *ppgtt); + int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, + struct intel_ring_buffer *ring, + bool synchronous); + void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); +}; + /* The Graphics Translation Table is the way in which GEN hardware translates a * Graphics Virtual Address into a Physical Address. In addition to the normal * collateral associated with any va->pa translations GEN hardware also has a @@ -252,47 +306,22 @@ struct i915_gtt { unsigned long *mappable_end); }; -struct i915_pagetab { - struct page *page; - dma_addr_t daddr; -}; - -struct i915_pagedir { - struct page *page; /* NULL for GEN6-GEN7 */ - union { - uint32_t pd_offset; - dma_addr_t daddr; - }; - - struct i915_pagetab *page_tables[I915_PDES_PER_PD]; /* PDEs */ -}; - -struct i915_pagedirpo { - /* struct page *page; */ - struct i915_pagedir *pagedir[GEN8_LEGACY_PDPES]; -}; - -struct i915_hw_ppgtt { - struct i915_address_space base; - struct kref ref; - struct drm_mm_node node; - unsigned num_pd_entries; - unsigned num_pd_pages; /* gen8+ */ - union { - struct i915_pagedirpo pdp; - struct i915_pagedir pd; - }; - - struct i915_hw_context *ctx; - - gen6_gtt_pte_t __iomem *pd_addr; - - int (*enable)(struct i915_hw_ppgtt *ppgtt); - int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, - struct intel_ring_buffer *ring, - bool synchronous); - void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); -}; +/* For each pde iterates over every pde between from start until start + length. + * If start, and start+length are not perfectly divisible, the macro will round + * down, and up as needed. The macro modifies pde, start, and length. Dev is + * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0, + * and length = 2G effectively iterates over every PDE in the system. On gen8+ + * it simply iterates over every page directory entry in a page directory. + * + * XXX: temp is not actually needed, but it saves doing the ALIGN operation. + */ +#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \ + for (iter = gen6_pde_index(start), pt = (pd)->page_tables[iter]; \ + length > 0 && iter < I915_PDES_PER_PD; \ + pt = (pd)->page_tables[++iter], \ + temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \ + temp = min(temp, (unsigned)length), \ + start += temp, length -= temp) static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift) { -- 1.9.2 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx