Lay out scratch page structure in similar manner than other paging structures. This allows us to use the same tools for setup and teardown. Signed-off-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_gtt.c | 91 ++++++++++++++++++++----------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 9 ++-- 2 files changed, 54 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c013a4c..ccdb35f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -301,11 +301,12 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr, return pte; } -static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p) +static int __setup_page_dma(struct drm_device *dev, + struct i915_page_dma *p, gfp_t flags) { struct device *device = &dev->pdev->dev; - p->page = alloc_page(GFP_KERNEL); + p->page = alloc_page(flags); if (!p->page) return -ENOMEM; @@ -320,6 +321,11 @@ static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p) return 0; } +static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p) +{ + return __setup_page_dma(dev, p, GFP_KERNEL); +} + static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p) { if (WARN_ON(!p->page)) @@ -404,7 +410,7 @@ static void gen8_setup_scratch_pt(struct i915_address_space *vm) container_of(vm, struct i915_hw_ppgtt, base); gen8_pte_t scratch_pte; - scratch_pte = gen8_pte_encode(vm->scratch.addr, + scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), I915_CACHE_LLC, true); fill_px(vm->dev, ppgtt->scratch_pt, scratch_pte); @@ -532,7 +538,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, unsigned num_entries = length >> PAGE_SHIFT; unsigned last_pte, i; - scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr, + scratch_pte = gen8_pte_encode(px_dma(ppgtt->base.scratch_page), I915_CACHE_LLC, use_scratch); while (num_entries) { @@ -641,7 +647,7 @@ static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_dev { int i; - if (!pd->base.page) + if (!px_page(pd)) return; for_each_set_bit(i, pd->used_pdes, I915_PDES) { @@ -989,7 +995,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) uint32_t pte, pde, temp; uint32_t start = ppgtt->base.start, length = ppgtt->base.total; - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); + scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), I915_CACHE_LLC, true, 0); gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) { u32 expected; @@ -1224,7 +1230,8 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, unsigned first_pte = first_entry % GEN6_PTES; unsigned last_pte, i; - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); + scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), + I915_CACHE_LLC, true, 0); while (num_entries) { last_pte = first_pte + num_entries; @@ -1283,12 +1290,12 @@ static void gen6_setup_scratch_pt(struct i915_address_space *vm) container_of(vm, struct i915_hw_ppgtt, base); gen6_pte_t scratch_pte; - WARN_ON(vm->scratch.addr == 0); + WARN_ON(px_dma(vm->scratch_page) == 0); - scratch_pte = vm->pte_encode(vm->scratch.addr, + scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), I915_CACHE_LLC, true, 0); - fill_page_dma_32(vm->dev, &ppgtt->scratch_pt->base, scratch_pte); + fill32_px(vm->dev, ppgtt->scratch_pt, scratch_pte); } static int gen6_alloc_va_range(struct i915_address_space *vm, @@ -1523,13 +1530,14 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) struct drm_i915_private *dev_priv = dev->dev_private; ppgtt->base.dev = dev; - ppgtt->base.scratch = dev_priv->gtt.base.scratch; + ppgtt->base.scratch_page = dev_priv->gtt.base.scratch_page; if (INTEL_INFO(dev)->gen < 8) return gen6_ppgtt_init(ppgtt); else return gen8_ppgtt_init(ppgtt); } + int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1844,7 +1852,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = gen8_pte_encode(vm->scratch.addr, + scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), I915_CACHE_LLC, use_scratch); for (i = 0; i < num_entries; i++) @@ -1870,7 +1878,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0); + scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), + I915_CACHE_LLC, use_scratch, 0); for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); @@ -2127,42 +2136,40 @@ void i915_global_gtt_cleanup(struct drm_device *dev) vm->cleanup(vm); } -static int setup_scratch_page(struct drm_device *dev) +static int alloc_scratch_page(struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct page *page; - dma_addr_t dma_addr; + struct i915_page_scratch *sp; + int ret; + + WARN_ON(vm->scratch_page); - page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); - if (page == NULL) + sp = kzalloc(sizeof(*sp), GFP_KERNEL); + if (sp == NULL) return -ENOMEM; - set_pages_uc(page, 1); -#ifdef CONFIG_INTEL_IOMMU - dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(dev->pdev, dma_addr)) { - __free_page(page); - return -EINVAL; + ret = __setup_page_dma(vm->dev, px_base(sp), GFP_DMA32 | __GFP_ZERO); + if (ret) { + kfree(sp); + return ret; } -#else - dma_addr = page_to_phys(page); -#endif - dev_priv->gtt.base.scratch.page = page; - dev_priv->gtt.base.scratch.addr = dma_addr; + + set_pages_uc(px_page(sp), 1); + + vm->scratch_page = sp; return 0; } -static void teardown_scratch_page(struct drm_device *dev) +static void free_scratch_page(struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct page *page = dev_priv->gtt.base.scratch.page; + struct i915_page_scratch *sp = vm->scratch_page; - set_pages_wb(page, 1); - pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - __free_page(page); + set_pages_wb(px_page(sp), 1); + + cleanup_px(vm->dev, sp); + kfree(sp); + + vm->scratch_page = NULL; } static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) @@ -2270,7 +2277,7 @@ static int ggtt_probe_common(struct drm_device *dev, return -ENOMEM; } - ret = setup_scratch_page(dev); + ret = alloc_scratch_page(&dev_priv->gtt.base); if (ret) { DRM_ERROR("Scratch setup failed\n"); /* iounmap will also get called at remove, but meh */ @@ -2449,7 +2456,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm) struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); iounmap(gtt->gsm); - teardown_scratch_page(vm->dev); + free_scratch_page(vm); } static int i915_gmch_probe(struct drm_device *dev, @@ -2513,13 +2520,13 @@ int i915_gem_gtt_init(struct drm_device *dev) dev_priv->gtt.base.cleanup = gen6_gmch_remove; } + gtt->base.dev = dev; + ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, >t->mappable_base, >t->mappable_end); if (ret) return ret; - gtt->base.dev = dev; - /* GMADR is the PCI mmio aperture into the global GTT. */ DRM_INFO("Memory usable by graphics device = %lluM\n", gtt->base.total >> 20); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 006b839..1fd4041 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -217,6 +217,10 @@ struct i915_page_dma { #define px_page(px) (px_base(px)->page) #define px_dma(px) (px_base(px)->daddr) +struct i915_page_scratch { + struct i915_page_dma base; +}; + struct i915_page_table { struct i915_page_dma base; @@ -243,10 +247,7 @@ struct i915_address_space { u64 start; /* Start offset always 0 for dri2 */ u64 total; /* size addr space maps (ex. 2GB for ggtt) */ - struct { - dma_addr_t addr; - struct page *page; - } scratch; + struct i915_page_scratch *scratch_page; /** * List of objects currently involved in rendering. -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx