On Wed, 2014-02-12 at 14:28 -0800, Ben Widawsky wrote: > Like cleanup in an earlier patch, the code becomes much more readable, > and easier to extend if we extract out helper functions for the various > stages of init. > > Note that with this patch it becomes really simple, and tempting to begin > using the 'goto out' idiom with explicit free/fini semantics. I've > kept the error path as similar as possible to the cleanup() function to > make sure cleanup is as robust as possible > > v2: Remove comment "NB:From here on, ppgtt->base.cleanup() should > function properly" > Update commit message to reflect above > > Signed-off-by: Ben Widawsky <ben@xxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/i915_gem_gtt.c | 182 +++++++++++++++++++++++++----------- > 1 file changed, 126 insertions(+), 56 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c > index c6c221c..8a5cad9 100644 > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c > @@ -366,91 +366,161 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) > gen8_ppgtt_free(ppgtt); > } > > -/** > - * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers > - * with a net effect resembling a 2-level page table in normal x86 terms. Each > - * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address > - * space. > - * > - * FIXME: split allocation into smaller pieces. For now we only ever do this > - * once, but with full PPGTT, the multiple contiguous allocations will be bad. > - * TODO: Do something with the size parameter > - */ > -static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) > +static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt, > + const int max_pdp) > { > struct page *pt_pages; > - const int max_pdp = DIV_ROUND_UP(size, 1 << 30); > const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; > - int i, j, ret; > - > - if (size % (1<<30)) > - DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); > - > - /* 1. Do all our allocations for page directories and page tables */ > - ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); > - if (!ppgtt->pd_pages) > - return -ENOMEM; > > pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT)); > - if (!pt_pages) { > - __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); > + if (!pt_pages) > return -ENOMEM; > - } > > ppgtt->gen8_pt_pages = pt_pages; > - ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); > ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT); > - ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; > - BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); > > - ppgtt->gen8_pt_dma_addr = kcalloc(max_pdp, > + return 0; > +} > + > +static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt) > +{ > + int i; > + > + ppgtt->gen8_pt_dma_addr = kcalloc(ppgtt->num_pd_entries, > sizeof(*ppgtt->gen8_pt_dma_addr), > GFP_KERNEL); > - if (!ppgtt->gen8_pt_dma_addr) { > - ret = -ENOMEM; > - goto bail; > - } > + if (!ppgtt->gen8_pt_dma_addr) > + return -ENOMEM; > > - for (i = 0; i < max_pdp; i++) { > + for (i = 0; i < ppgtt->num_pd_entries; i++) { > ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE, > sizeof(dma_addr_t), > GFP_KERNEL); > if (!ppgtt->gen8_pt_dma_addr[i]) { > - ret = -ENOMEM; > - goto bail; > + kfree(ppgtt->gen8_pt_dma_addr); > + while(i--) > + kfree(ppgtt->gen8_pt_dma_addr[i]); > + > + return -ENOMEM; > } > } > > + return 0; > +} > + > +static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, > + const int max_pdp) > +{ > + ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); > + if (!ppgtt->pd_pages) > + return -ENOMEM; > + > + ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); > + > + return 0; > +} > + > +static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, > + const int max_pdp) > +{ > + int ret; > + > + ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp); > + if (ret) > + return ret; > + > + ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp); > + if (ret) { > + __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); > + return ret; > + } > + > + ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; > + BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); This check belongs to gen8_ppgtt_allocate_page_directories(). > + > + ret = gen8_ppgtt_allocate_dma(ppgtt); > + if (ret) > + gen8_ppgtt_free(ppgtt); Just for reference, this the same ppgtt->gen8_pt_dma_addr NULL deref issue on the error path as in 2/9. > + > + return ret; > +} > + > +static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, > + const int pd) > +{ > + dma_addr_t pd_addr; > + int ret; > + > + pd_addr = pci_map_page(ppgtt->base.dev->pdev, > + &ppgtt->pd_pages[pd], 0, > + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); > + > + ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); > + if (ret) > + return ret; > + > + ppgtt->pd_dma_addr[pd] = pd_addr; > + > + return 0; > +} > + > +static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, > + const int pd, > + const int pt) > +{ > + dma_addr_t pt_addr; > + struct page *p; > + int ret; > + > + p = &ppgtt->gen8_pt_pages[pd * GEN8_PDES_PER_PAGE + pt]; > + pt_addr = pci_map_page(ppgtt->base.dev->pdev, > + p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); > + ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); > + if (ret) > + return ret; > + > + ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr; > + > + return 0; > +} > + > +/** > + * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers > + * with a net effect resembling a 2-level page table in normal x86 terms. Each > + * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address > + * space. > + * > + * FIXME: split allocation into smaller pieces. For now we only ever do this > + * once, but with full PPGTT, the multiple contiguous allocations will be bad. > + * TODO: Do something with the size parameter > + */ > +static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) > +{ > + const int max_pdp = DIV_ROUND_UP(size, 1 << 30); > + const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; > + int i, j, ret; > + > + if (size % (1<<30)) > + DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); > + > + /* 1. Do all our allocations for page directories and page tables. */ > + ret = gen8_ppgtt_alloc(ppgtt, max_pdp); > + if (ret) > + return ret; > + > /* > - * 2. Create all the DMA mappings for the page directories and page > - * tables > + * 2. Create DMA mappings for the page directories and page tables. > */ > for (i = 0; i < max_pdp; i++) { > - dma_addr_t pd_addr, pt_addr; > - > - /* Get the page table mappings per page directory */ > for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { > - struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j]; > - > - pt_addr = pci_map_page(ppgtt->base.dev->pdev, > - p, 0, PAGE_SIZE, > - PCI_DMA_BIDIRECTIONAL); > - ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); > + ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j); > if (ret) > goto bail; > - > - ppgtt->gen8_pt_dma_addr[i][j] = pt_addr; > } > > - /* And the page directory mappings */ > - pd_addr = pci_map_page(ppgtt->base.dev->pdev, > - &ppgtt->pd_pages[i], 0, > - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); > - ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); > + ret = gen8_ppgtt_setup_page_directories(ppgtt, i); > if (ret) > goto bail; Again only for reference the same leaked mappings for page tables on the error path as in 2/9. > - > - ppgtt->pd_dma_addr[i] = pd_addr; > } > > /* > @@ -488,7 +558,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) > ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); > DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n", > ppgtt->num_pt_pages, > - (ppgtt->num_pt_pages - num_pt_pages) + > + (ppgtt->num_pt_pages - min_pt_pages) + > size % (1<<30)); > return 0; >
Attachment:
signature.asc
Description: This is a digitally signed message part
_______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx