Michel Thierry <michel.thierry@xxxxxxxxx> writes: > From: Ben Widawsky <benjamin.widawsky@xxxxxxxxx> > > Move the remaining members over to the new page table structures. > > This can be squashed with the previous commit if desire. The reasoning > is the same as that patch. I simply felt it is easier to review if split. > > v2: In lrc: s/ppgtt->pd_dma_addr[i]/ppgtt->pdp.page_directory[i].daddr/ > v3: Rebase. > > Signed-off-by: Ben Widawsky <ben@xxxxxxxxxxxx> > Signed-off-by: Michel Thierry <michel.thierry@xxxxxxxxx> (v2, v3) > --- > drivers/gpu/drm/i915/i915_debugfs.c | 2 +- > drivers/gpu/drm/i915/i915_gem_gtt.c | 85 +++++++++++++------------------------ > drivers/gpu/drm/i915/i915_gem_gtt.h | 14 +++--- > drivers/gpu/drm/i915/intel_lrc.c | 16 +++---- > 4 files changed, 44 insertions(+), 73 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c > index 63be374..4d07030 100644 > --- a/drivers/gpu/drm/i915/i915_debugfs.c > +++ b/drivers/gpu/drm/i915/i915_debugfs.c > @@ -2185,7 +2185,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) > struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; > > seq_puts(m, "aliasing PPGTT:\n"); > - seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); > + seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset); > > ppgtt->debug_dump(ppgtt, m); > } > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c > index 10026d3..eb0714c 100644 > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c > @@ -311,7 +311,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, > int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; > > for (i = used_pd - 1; i >= 0; i--) { > - dma_addr_t addr = ppgtt->pd_dma_addr[i]; > + dma_addr_t addr = ppgtt->pdp.page_directory[i].daddr; > ret = gen8_write_pdp(ring, i, addr); > if (ret) > return ret; > @@ -437,7 +437,6 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt) > > for (i = 0; i < ppgtt->num_pd_pages; i++) { > gen8_free_page_directory(&ppgtt->pdp.page_directory[i]); > - kfree(ppgtt->gen8_pt_dma_addr[i]); > } > } > > @@ -449,14 +448,14 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) > for (i = 0; i < ppgtt->num_pd_pages; i++) { > /* TODO: In the future we'll support sparse mappings, so this > * will have to change. */ > - if (!ppgtt->pd_dma_addr[i]) > + if (!ppgtt->pdp.page_directory[i].daddr) > continue; > > - pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE, > + pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i].daddr, PAGE_SIZE, > PCI_DMA_BIDIRECTIONAL); > > for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { > - dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; > + dma_addr_t addr = ppgtt->pdp.page_directory[i].page_tables[j].daddr; > if (addr) > pci_unmap_page(hwdev, addr, PAGE_SIZE, > PCI_DMA_BIDIRECTIONAL); > @@ -473,32 +472,19 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) > gen8_ppgtt_free(ppgtt); > } > > -static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt) > -{ > - int i; > - > - for (i = 0; i < ppgtt->num_pd_pages; i++) { > - ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE, > - sizeof(dma_addr_t), > - GFP_KERNEL); > - if (!ppgtt->gen8_pt_dma_addr[i]) > - return -ENOMEM; > - } > - > - return 0; > -} > - > static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) > { > int i, j; > > for (i = 0; i < ppgtt->num_pd_pages; i++) { > + struct i915_page_directory_entry *pd = &ppgtt->pdp.page_directory[i]; > for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { > - struct i915_page_table_entry *pt = &ppgtt->pdp.page_directory[i].page_tables[j]; > + struct i915_page_table_entry *pt = &pd->page_tables[j]; > > pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO); > if (!pt->page) > goto unwind_out; > + This hunk should have been in the previous patch, oh well.. > } > } > > @@ -561,10 +547,6 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, > > ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; > > - ret = gen8_ppgtt_allocate_dma(ppgtt); > - if (ret) > - goto err_out; > - > return 0; > > err_out: > @@ -586,7 +568,7 @@ static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, Here we are again setting only one page directory. But as it is not problem with this patch: Reviewed-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxx> > if (ret) > return ret; > > - ppgtt->pd_dma_addr[pd] = pd_addr; > + ppgtt->pdp.page_directory[pd].daddr = pd_addr; > > return 0; > } > @@ -596,17 +578,18 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, > const int pt) > { > dma_addr_t pt_addr; > - struct page *p; > + struct i915_page_directory_entry *pdir = &ppgtt->pdp.page_directory[pd]; > + struct i915_page_table_entry *ptab = &pdir->page_tables[pt]; > + struct page *p = ptab->page; > int ret; > > - p = ppgtt->pdp.page_directory[pd].page_tables[pt].page; > pt_addr = pci_map_page(ppgtt->base.dev->pdev, > p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); > ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); > if (ret) > return ret; > > - ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr; > + ptab->daddr = pt_addr; > > return 0; > } > @@ -662,7 +645,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) > gen8_ppgtt_pde_t *pd_vaddr; > pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i].page); > for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { > - dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; > + dma_addr_t addr = ppgtt->pdp.page_directory[i].page_tables[j].daddr; > pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, > I915_CACHE_LLC); > } > @@ -705,14 +688,15 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) > scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); > > pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + > - ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); > + ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t); > > seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, > - ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries); > + ppgtt->pd.pd_offset, > + ppgtt->pd.pd_offset + ppgtt->num_pd_entries); > for (pde = 0; pde < ppgtt->num_pd_entries; pde++) { > u32 expected; > gen6_gtt_pte_t *pt_vaddr; > - dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde]; > + dma_addr_t pt_addr = ppgtt->pd.page_tables[pde].daddr; > pd_entry = readl(pd_addr + pde); > expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); > > @@ -756,13 +740,13 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) > uint32_t pd_entry; > int i; > > - WARN_ON(ppgtt->pd_offset & 0x3f); > + WARN_ON(ppgtt->pd.pd_offset & 0x3f); > pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + > - ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); > + ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t); > for (i = 0; i < ppgtt->num_pd_entries; i++) { > dma_addr_t pt_addr; > > - pt_addr = ppgtt->pt_dma_addr[i]; > + pt_addr = ppgtt->pd.page_tables[i].daddr; > pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); > pd_entry |= GEN6_PDE_VALID; > > @@ -773,9 +757,9 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) > > static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) > { > - BUG_ON(ppgtt->pd_offset & 0x3f); > + BUG_ON(ppgtt->pd.pd_offset & 0x3f); > > - return (ppgtt->pd_offset / 64) << 16; > + return (ppgtt->pd.pd_offset / 64) << 16; > } > > static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, > @@ -988,19 +972,16 @@ static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) > { > int i; > > - if (ppgtt->pt_dma_addr) { > - for (i = 0; i < ppgtt->num_pd_entries; i++) > - pci_unmap_page(ppgtt->base.dev->pdev, > - ppgtt->pt_dma_addr[i], > - 4096, PCI_DMA_BIDIRECTIONAL); > - } > + for (i = 0; i < ppgtt->num_pd_entries; i++) > + pci_unmap_page(ppgtt->base.dev->pdev, > + ppgtt->pd.page_tables[i].daddr, > + 4096, PCI_DMA_BIDIRECTIONAL); > } > > static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) > { > int i; > > - kfree(ppgtt->pt_dma_addr); > for (i = 0; i < ppgtt->num_pd_entries; i++) > __free_page(ppgtt->pd.page_tables[i].page); > kfree(ppgtt->pd.page_tables); > @@ -1093,14 +1074,6 @@ static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) > return ret; > } > > - ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t), > - GFP_KERNEL); > - if (!ppgtt->pt_dma_addr) { > - drm_mm_remove_node(&ppgtt->node); > - gen6_ppgtt_free(ppgtt); > - return -ENOMEM; > - } > - > return 0; > } > > @@ -1122,7 +1095,7 @@ static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt) > return -EIO; > } > > - ppgtt->pt_dma_addr[i] = pt_addr; > + ppgtt->pd.page_tables[i].daddr = pt_addr; > } > > return 0; > @@ -1164,7 +1137,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) > ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; > ppgtt->debug_dump = gen6_dump_ppgtt; > > - ppgtt->pd_offset = > + ppgtt->pd.pd_offset = > ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t); > > ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); > @@ -1175,7 +1148,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) > > gen6_write_pdes(ppgtt); > DRM_DEBUG("Adding PPGTT at offset %x\n", > - ppgtt->pd_offset << 10); > + ppgtt->pd.pd_offset << 10); > > return 0; > } > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h > index d9bc375..6efeb18 100644 > --- a/drivers/gpu/drm/i915/i915_gem_gtt.h > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h > @@ -189,10 +189,16 @@ struct i915_vma { > > struct i915_page_table_entry { > struct page *page; > + dma_addr_t daddr; > }; > > struct i915_page_directory_entry { > struct page *page; /* NULL for GEN6-GEN7 */ > + union { > + uint32_t pd_offset; > + dma_addr_t daddr; > + }; > + > struct i915_page_table_entry *page_tables; > }; > > @@ -286,14 +292,6 @@ struct i915_hw_ppgtt { > unsigned num_pd_entries; > unsigned num_pd_pages; /* gen8+ */ > union { > - uint32_t pd_offset; > - dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPES]; > - }; > - union { > - dma_addr_t *pt_dma_addr; > - dma_addr_t *gen8_pt_dma_addr[GEN8_LEGACY_PDPES]; > - }; > - union { > struct i915_page_directory_pointer_entry pdp; > struct i915_page_directory_entry pd; > }; > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c > index 1c65949..9e71992 100644 > --- a/drivers/gpu/drm/i915/intel_lrc.c > +++ b/drivers/gpu/drm/i915/intel_lrc.c > @@ -1735,14 +1735,14 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o > reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1); > reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); > reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0); > - reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]); > - reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]); > - reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]); > - reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]); > - reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]); > - reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]); > - reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]); > - reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]); > + reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[3].daddr); > + reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[3].daddr); > + reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[2].daddr); > + reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[2].daddr); > + reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[1].daddr); > + reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[1].daddr); > + reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[0].daddr); > + reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[0].daddr); > if (ring->id == RCS) { > reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); > reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8; > -- > 2.1.1 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@xxxxxxxxxxxxxxxxxxxxx > http://lists.freedesktop.org/mailman/listinfo/intel-gfx _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx