[PATCH 01/11] drm/i915: Move gtt and ppgtt under address space umbrella

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, Jul 10, 2013 at 09:36:58AM -0700, Ben Widawsky wrote:
> On Tue, Jul 09, 2013 at 08:37:45AM +0200, Daniel Vetter wrote:
> > On Mon, Jul 08, 2013 at 11:08:32PM -0700, Ben Widawsky wrote:
> > > The GTT and PPGTT can be thought of more generally as GPU address
> > > spaces. Many of their actions (insert entries), state (LRU lists) and
> > > many of their characteristics (size), can be shared. Do that.
> > > 
> > > The change itself doesn't actually impact most of the VMA/VM rework
> > > coming up, it just fits in with the grand scheme. GGTT will usually be a
> > > special case where we either know an object must be in the GGTT (dislay
> > > engine, workarounds, etc.).
> > 
> > Commit message cut off?
> > -Daniel
> 
> Maybe. I can't remember. Do you want me to add something else in
> particular.

I was just wondering since after the "either" I'd expect and "or". My
parser never found it though ;-)
-Daniel

> 
> > 
> > > 
> > > v2: Drop usage of i915_gtt_vm (Daniel)
> > > Make cleanup also part of the parent class (Ben)
> > > Modified commit msg
> > > Rebased
> > > 
> > > Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
> > > ---
> > >  drivers/gpu/drm/i915/i915_debugfs.c |   4 +-
> > >  drivers/gpu/drm/i915/i915_dma.c     |   4 +-
> > >  drivers/gpu/drm/i915/i915_drv.h     |  57 ++++++-------
> > >  drivers/gpu/drm/i915/i915_gem.c     |   4 +-
> > >  drivers/gpu/drm/i915/i915_gem_gtt.c | 162 ++++++++++++++++++++----------------
> > >  5 files changed, 121 insertions(+), 110 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> > > index c8059f5..d870f27 100644
> > > --- a/drivers/gpu/drm/i915/i915_debugfs.c
> > > +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> > > @@ -287,8 +287,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
> > >  		   count, size);
> > >  
> > >  	seq_printf(m, "%zu [%lu] gtt total\n",
> > > -		   dev_priv->gtt.total,
> > > -		   dev_priv->gtt.mappable_end - dev_priv->gtt.start);
> > > +		   dev_priv->gtt.base.total,
> > > +		   dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
> > >  
> > >  	seq_putc(m, '\n');
> > >  	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
> > > diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
> > > index 0e22142..15bca96 100644
> > > --- a/drivers/gpu/drm/i915/i915_dma.c
> > > +++ b/drivers/gpu/drm/i915/i915_dma.c
> > > @@ -1669,7 +1669,7 @@ out_gem_unload:
> > >  out_mtrrfree:
> > >  	arch_phys_wc_del(dev_priv->gtt.mtrr);
> > >  	io_mapping_free(dev_priv->gtt.mappable);
> > > -	dev_priv->gtt.gtt_remove(dev);
> > > +	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
> > >  out_rmmap:
> > >  	pci_iounmap(dev->pdev, dev_priv->regs);
> > >  put_bridge:
> > > @@ -1764,7 +1764,7 @@ int i915_driver_unload(struct drm_device *dev)
> > >  	destroy_workqueue(dev_priv->wq);
> > >  	pm_qos_remove_request(&dev_priv->pm_qos);
> > >  
> > > -	dev_priv->gtt.gtt_remove(dev);
> > > +	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
> > >  
> > >  	if (dev_priv->slab)
> > >  		kmem_cache_destroy(dev_priv->slab);
> > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> > > index c8d6104..d6d4d7d 100644
> > > --- a/drivers/gpu/drm/i915/i915_drv.h
> > > +++ b/drivers/gpu/drm/i915/i915_drv.h
> > > @@ -446,6 +446,29 @@ enum i915_cache_level {
> > >  
> > >  typedef uint32_t gen6_gtt_pte_t;
> > >  
> > > +struct i915_address_space {
> > > +	struct drm_device *dev;
> > > +	unsigned long start;		/* Start offset always 0 for dri2 */
> > > +	size_t total;		/* size addr space maps (ex. 2GB for ggtt) */
> > > +
> > > +	struct {
> > > +		dma_addr_t addr;
> > > +		struct page *page;
> > > +	} scratch;
> > > +
> > > +	/* FIXME: Need a more generic return type */
> > > +	gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
> > > +				     enum i915_cache_level level);
> > > +	void (*clear_range)(struct i915_address_space *vm,
> > > +			    unsigned int first_entry,
> > > +			    unsigned int num_entries);
> > > +	void (*insert_entries)(struct i915_address_space *vm,
> > > +			       struct sg_table *st,
> > > +			       unsigned int first_entry,
> > > +			       enum i915_cache_level cache_level);
> > > +	void (*cleanup)(struct i915_address_space *vm);
> > > +};
> > > +
> > >  /* The Graphics Translation Table is the way in which GEN hardware translates a
> > >   * Graphics Virtual Address into a Physical Address. In addition to the normal
> > >   * collateral associated with any va->pa translations GEN hardware also has a
> > > @@ -454,8 +477,7 @@ typedef uint32_t gen6_gtt_pte_t;
> > >   * the spec.
> > >   */
> > >  struct i915_gtt {
> > > -	unsigned long start;		/* Start offset of used GTT */
> > > -	size_t total;			/* Total size GTT can map */
> > > +	struct i915_address_space base;
> > >  	size_t stolen_size;		/* Total size of stolen memory */
> > >  
> > >  	unsigned long mappable_end;	/* End offset that we can CPU map */
> > > @@ -466,10 +488,6 @@ struct i915_gtt {
> > >  	void __iomem *gsm;
> > >  
> > >  	bool do_idle_maps;
> > > -	struct {
> > > -		dma_addr_t addr;
> > > -		struct page *page;
> > > -	} scratch;
> > >  
> > >  	int mtrr;
> > >  
> > > @@ -477,38 +495,17 @@ struct i915_gtt {
> > >  	int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
> > >  			  size_t *stolen, phys_addr_t *mappable_base,
> > >  			  unsigned long *mappable_end);
> > > -	void (*gtt_remove)(struct drm_device *dev);
> > > -	void (*gtt_clear_range)(struct drm_device *dev,
> > > -				unsigned int first_entry,
> > > -				unsigned int num_entries);
> > > -	void (*gtt_insert_entries)(struct drm_device *dev,
> > > -				   struct sg_table *st,
> > > -				   unsigned int pg_start,
> > > -				   enum i915_cache_level cache_level);
> > > -	gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
> > > -				     enum i915_cache_level level);
> > >  };
> > > -#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
> > > +#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
> > >  
> > >  struct i915_hw_ppgtt {
> > > -	struct drm_device *dev;
> > > +	struct i915_address_space base;
> > >  	unsigned num_pd_entries;
> > >  	struct page **pt_pages;
> > >  	uint32_t pd_offset;
> > >  	dma_addr_t *pt_dma_addr;
> > >  
> > > -	/* pte functions, mirroring the interface of the global gtt. */
> > > -	void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
> > > -			    unsigned int first_entry,
> > > -			    unsigned int num_entries);
> > > -	void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
> > > -			       struct sg_table *st,
> > > -			       unsigned int pg_start,
> > > -			       enum i915_cache_level cache_level);
> > > -	gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
> > > -				     enum i915_cache_level level);
> > >  	int (*enable)(struct drm_device *dev);
> > > -	void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
> > >  };
> > >  
> > >  struct i915_ctx_hang_stats {
> > > @@ -1124,7 +1121,7 @@ typedef struct drm_i915_private {
> > >  	enum modeset_restore modeset_restore;
> > >  	struct mutex modeset_restore_lock;
> > >  
> > > -	struct i915_gtt gtt;
> > > +	struct i915_gtt gtt; /* VMA representing the global address space */
> > >  
> > >  	struct i915_gem_mm mm;
> > >  
> > > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> > > index af61be8..3ecedfd 100644
> > > --- a/drivers/gpu/drm/i915/i915_gem.c
> > > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > > @@ -181,7 +181,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
> > >  			pinned += i915_gem_obj_ggtt_size(obj);
> > >  	mutex_unlock(&dev->struct_mutex);
> > >  
> > > -	args->aper_size = dev_priv->gtt.total;
> > > +	args->aper_size = dev_priv->gtt.base.total;
> > >  	args->aper_available_size = args->aper_size - pinned;
> > >  
> > >  	return 0;
> > > @@ -3070,7 +3070,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
> > >  	u32 size, fence_size, fence_alignment, unfenced_alignment;
> > >  	bool mappable, fenceable;
> > >  	size_t gtt_max = map_and_fenceable ?
> > > -		dev_priv->gtt.mappable_end : dev_priv->gtt.total;
> > > +		dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
> > >  	int ret;
> > >  
> > >  	fence_size = i915_gem_get_gtt_size(dev,
> > > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> > > index 242d0f9..693115a 100644
> > > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> > > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> > > @@ -102,7 +102,7 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
> > >  
> > >  static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
> > >  {
> > > -	struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
> > > +	struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
> > >  	gen6_gtt_pte_t __iomem *pd_addr;
> > >  	uint32_t pd_entry;
> > >  	int i;
> > > @@ -181,18 +181,18 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
> > >  }
> > >  
> > >  /* PPGTT support for Sandybdrige/Gen6 and later */
> > > -static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
> > > +static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
> > >  				   unsigned first_entry,
> > >  				   unsigned num_entries)
> > >  {
> > > -	struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
> > > +	struct i915_hw_ppgtt *ppgtt =
> > > +		container_of(vm, struct i915_hw_ppgtt, base);
> > >  	gen6_gtt_pte_t *pt_vaddr, scratch_pte;
> > >  	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
> > >  	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
> > >  	unsigned last_pte, i;
> > >  
> > > -	scratch_pte = ppgtt->pte_encode(dev_priv->gtt.scratch.addr,
> > > -					I915_CACHE_LLC);
> > > +	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
> > >  
> > >  	while (num_entries) {
> > >  		last_pte = first_pte + num_entries;
> > > @@ -212,11 +212,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
> > >  	}
> > >  }
> > >  
> > > -static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
> > > +static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
> > >  				      struct sg_table *pages,
> > >  				      unsigned first_entry,
> > >  				      enum i915_cache_level cache_level)
> > >  {
> > > +	struct i915_hw_ppgtt *ppgtt =
> > > +		container_of(vm, struct i915_hw_ppgtt, base);
> > >  	gen6_gtt_pte_t *pt_vaddr;
> > >  	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
> > >  	unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
> > > @@ -227,7 +229,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
> > >  		dma_addr_t page_addr;
> > >  
> > >  		page_addr = sg_page_iter_dma_address(&sg_iter);
> > > -		pt_vaddr[act_pte] = ppgtt->pte_encode(page_addr, cache_level);
> > > +		pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
> > >  		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
> > >  			kunmap_atomic(pt_vaddr);
> > >  			act_pt++;
> > > @@ -239,13 +241,15 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
> > >  	kunmap_atomic(pt_vaddr);
> > >  }
> > >  
> > > -static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
> > > +static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
> > >  {
> > > +	struct i915_hw_ppgtt *ppgtt =
> > > +		container_of(vm, struct i915_hw_ppgtt, base);
> > >  	int i;
> > >  
> > >  	if (ppgtt->pt_dma_addr) {
> > >  		for (i = 0; i < ppgtt->num_pd_entries; i++)
> > > -			pci_unmap_page(ppgtt->dev->pdev,
> > > +			pci_unmap_page(ppgtt->base.dev->pdev,
> > >  				       ppgtt->pt_dma_addr[i],
> > >  				       4096, PCI_DMA_BIDIRECTIONAL);
> > >  	}
> > > @@ -259,7 +263,7 @@ static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
> > >  
> > >  static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
> > >  {
> > > -	struct drm_device *dev = ppgtt->dev;
> > > +	struct drm_device *dev = ppgtt->base.dev;
> > >  	struct drm_i915_private *dev_priv = dev->dev_private;
> > >  	unsigned first_pd_entry_in_global_pt;
> > >  	int i;
> > > @@ -271,17 +275,17 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
> > >  	first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
> > >  
> > >  	if (IS_HASWELL(dev)) {
> > > -		ppgtt->pte_encode = hsw_pte_encode;
> > > +		ppgtt->base.pte_encode = hsw_pte_encode;
> > >  	} else if (IS_VALLEYVIEW(dev)) {
> > > -		ppgtt->pte_encode = byt_pte_encode;
> > > +		ppgtt->base.pte_encode = byt_pte_encode;
> > >  	} else {
> > > -		ppgtt->pte_encode = gen6_pte_encode;
> > > +		ppgtt->base.pte_encode = gen6_pte_encode;
> > >  	}
> > >  	ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
> > >  	ppgtt->enable = gen6_ppgtt_enable;
> > > -	ppgtt->clear_range = gen6_ppgtt_clear_range;
> > > -	ppgtt->insert_entries = gen6_ppgtt_insert_entries;
> > > -	ppgtt->cleanup = gen6_ppgtt_cleanup;
> > > +	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
> > > +	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
> > > +	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
> > >  	ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
> > >  				  GFP_KERNEL);
> > >  	if (!ppgtt->pt_pages)
> > > @@ -312,8 +316,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
> > >  		ppgtt->pt_dma_addr[i] = pt_addr;
> > >  	}
> > >  
> > > -	ppgtt->clear_range(ppgtt, 0,
> > > -			   ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
> > > +	ppgtt->base.clear_range(&ppgtt->base, 0,
> > > +				ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
> > >  
> > >  	ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
> > >  
> > > @@ -346,7 +350,7 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
> > >  	if (!ppgtt)
> > >  		return -ENOMEM;
> > >  
> > > -	ppgtt->dev = dev;
> > > +	ppgtt->base.dev = dev;
> > >  
> > >  	if (INTEL_INFO(dev)->gen < 8)
> > >  		ret = gen6_ppgtt_init(ppgtt);
> > > @@ -369,7 +373,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
> > >  	if (!ppgtt)
> > >  		return;
> > >  
> > > -	ppgtt->cleanup(ppgtt);
> > > +	ppgtt->base.cleanup(&ppgtt->base);
> > >  	dev_priv->mm.aliasing_ppgtt = NULL;
> > >  }
> > >  
> > > @@ -377,17 +381,17 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
> > >  			    struct drm_i915_gem_object *obj,
> > >  			    enum i915_cache_level cache_level)
> > >  {
> > > -	ppgtt->insert_entries(ppgtt, obj->pages,
> > > -			      i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> > > -			      cache_level);
> > > +	ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
> > > +				   i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> > > +				   cache_level);
> > >  }
> > >  
> > >  void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
> > >  			      struct drm_i915_gem_object *obj)
> > >  {
> > > -	ppgtt->clear_range(ppgtt,
> > > -			   i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> > > -			   obj->base.size >> PAGE_SHIFT);
> > > +	ppgtt->base.clear_range(&ppgtt->base,
> > > +				i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> > > +				obj->base.size >> PAGE_SHIFT);
> > >  }
> > >  
> > >  extern int intel_iommu_gfx_mapped;
> > > @@ -434,8 +438,9 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
> > >  	struct drm_i915_gem_object *obj;
> > >  
> > >  	/* First fill our portion of the GTT with scratch pages */
> > > -	dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
> > > -				      dev_priv->gtt.total / PAGE_SIZE);
> > > +	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
> > > +				       dev_priv->gtt.base.start / PAGE_SIZE,
> > > +				       dev_priv->gtt.base.total / PAGE_SIZE);
> > >  
> > >  	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> > >  		i915_gem_clflush_object(obj);
> > > @@ -464,12 +469,12 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
> > >   * within the global GTT as well as accessible by the GPU through the GMADR
> > >   * mapped BAR (dev_priv->mm.gtt->gtt).
> > >   */
> > > -static void gen6_ggtt_insert_entries(struct drm_device *dev,
> > > +static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
> > >  				     struct sg_table *st,
> > >  				     unsigned int first_entry,
> > >  				     enum i915_cache_level level)
> > >  {
> > > -	struct drm_i915_private *dev_priv = dev->dev_private;
> > > +	struct drm_i915_private *dev_priv = vm->dev->dev_private;
> > >  	gen6_gtt_pte_t __iomem *gtt_entries =
> > >  		(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
> > >  	int i = 0;
> > > @@ -478,8 +483,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
> > >  
> > >  	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
> > >  		addr = sg_page_iter_dma_address(&sg_iter);
> > > -		iowrite32(dev_priv->gtt.pte_encode(addr, level),
> > > -			  &gtt_entries[i]);
> > > +		iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
> > >  		i++;
> > >  	}
> > >  
> > > @@ -490,8 +494,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
> > >  	 * hardware should work, we must keep this posting read for paranoia.
> > >  	 */
> > >  	if (i != 0)
> > > -		WARN_ON(readl(&gtt_entries[i-1])
> > > -			!= dev_priv->gtt.pte_encode(addr, level));
> > > +		WARN_ON(readl(&gtt_entries[i-1]) !=
> > > +			vm->pte_encode(addr, level));
> > >  
> > >  	/* This next bit makes the above posting read even more important. We
> > >  	 * want to flush the TLBs only after we're certain all the PTE updates
> > > @@ -501,11 +505,11 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
> > >  	POSTING_READ(GFX_FLSH_CNTL_GEN6);
> > >  }
> > >  
> > > -static void gen6_ggtt_clear_range(struct drm_device *dev,
> > > +static void gen6_ggtt_clear_range(struct i915_address_space *vm,
> > >  				  unsigned int first_entry,
> > >  				  unsigned int num_entries)
> > >  {
> > > -	struct drm_i915_private *dev_priv = dev->dev_private;
> > > +	struct drm_i915_private *dev_priv = vm->dev->dev_private;
> > >  	gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
> > >  		(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
> > >  	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
> > > @@ -516,15 +520,14 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
> > >  		 first_entry, num_entries, max_entries))
> > >  		num_entries = max_entries;
> > >  
> > > -	scratch_pte = dev_priv->gtt.pte_encode(dev_priv->gtt.scratch.addr,
> > > -					       I915_CACHE_LLC);
> > > +	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
> > >  	for (i = 0; i < num_entries; i++)
> > >  		iowrite32(scratch_pte, &gtt_base[i]);
> > >  	readl(gtt_base);
> > >  }
> > >  
> > >  
> > > -static void i915_ggtt_insert_entries(struct drm_device *dev,
> > > +static void i915_ggtt_insert_entries(struct i915_address_space *vm,
> > >  				     struct sg_table *st,
> > >  				     unsigned int pg_start,
> > >  				     enum i915_cache_level cache_level)
> > > @@ -536,7 +539,7 @@ static void i915_ggtt_insert_entries(struct drm_device *dev,
> > >  
> > >  }
> > >  
> > > -static void i915_ggtt_clear_range(struct drm_device *dev,
> > > +static void i915_ggtt_clear_range(struct i915_address_space *vm,
> > >  				  unsigned int first_entry,
> > >  				  unsigned int num_entries)
> > >  {
> > > @@ -549,10 +552,11 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
> > >  {
> > >  	struct drm_device *dev = obj->base.dev;
> > >  	struct drm_i915_private *dev_priv = dev->dev_private;
> > > +	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
> > >  
> > > -	dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
> > > -					 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> > > -					 cache_level);
> > > +	dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
> > > +					  entry,
> > > +					  cache_level);
> > >  
> > >  	obj->has_global_gtt_mapping = 1;
> > >  }
> > > @@ -561,10 +565,11 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
> > >  {
> > >  	struct drm_device *dev = obj->base.dev;
> > >  	struct drm_i915_private *dev_priv = dev->dev_private;
> > > +	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
> > >  
> > > -	dev_priv->gtt.gtt_clear_range(obj->base.dev,
> > > -				      i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> > > -				      obj->base.size >> PAGE_SHIFT);
> > > +	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
> > > +				       entry,
> > > +				       obj->base.size >> PAGE_SHIFT);
> > >  
> > >  	obj->has_global_gtt_mapping = 0;
> > >  }
> > > @@ -641,20 +646,23 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
> > >  		obj->has_global_gtt_mapping = 1;
> > >  	}
> > >  
> > > -	dev_priv->gtt.start = start;
> > > -	dev_priv->gtt.total = end - start;
> > > +	dev_priv->gtt.base.start = start;
> > > +	dev_priv->gtt.base.total = end - start;
> > >  
> > >  	/* Clear any non-preallocated blocks */
> > >  	drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
> > >  			     hole_start, hole_end) {
> > > +		const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
> > >  		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
> > >  			      hole_start, hole_end);
> > > -		dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
> > > -					      (hole_end-hole_start) / PAGE_SIZE);
> > > +		dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
> > > +					       hole_start / PAGE_SIZE,
> > > +					       count);
> > >  	}
> > >  
> > >  	/* And finally clear the reserved guard page */
> > > -	dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
> > > +	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
> > > +				       end / PAGE_SIZE - 1, 1);
> > >  }
> > >  
> > >  static bool
> > > @@ -677,7 +685,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
> > >  	struct drm_i915_private *dev_priv = dev->dev_private;
> > >  	unsigned long gtt_size, mappable_size;
> > >  
> > > -	gtt_size = dev_priv->gtt.total;
> > > +	gtt_size = dev_priv->gtt.base.total;
> > >  	mappable_size = dev_priv->gtt.mappable_end;
> > >  
> > >  	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
> > > @@ -722,8 +730,8 @@ static int setup_scratch_page(struct drm_device *dev)
> > >  #else
> > >  	dma_addr = page_to_phys(page);
> > >  #endif
> > > -	dev_priv->gtt.scratch.page = page;
> > > -	dev_priv->gtt.scratch.addr = dma_addr;
> > > +	dev_priv->gtt.base.scratch.page = page;
> > > +	dev_priv->gtt.base.scratch.addr = dma_addr;
> > >  
> > >  	return 0;
> > >  }
> > > @@ -731,11 +739,13 @@ static int setup_scratch_page(struct drm_device *dev)
> > >  static void teardown_scratch_page(struct drm_device *dev)
> > >  {
> > >  	struct drm_i915_private *dev_priv = dev->dev_private;
> > > -	set_pages_wb(dev_priv->gtt.scratch.page, 1);
> > > -	pci_unmap_page(dev->pdev, dev_priv->gtt.scratch.addr,
> > > +	struct page *page = dev_priv->gtt.base.scratch.page;
> > > +
> > > +	set_pages_wb(page, 1);
> > > +	pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
> > >  		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
> > > -	put_page(dev_priv->gtt.scratch.page);
> > > -	__free_page(dev_priv->gtt.scratch.page);
> > > +	put_page(page);
> > > +	__free_page(page);
> > >  }
> > >  
> > >  static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
> > > @@ -798,17 +808,18 @@ static int gen6_gmch_probe(struct drm_device *dev,
> > >  	if (ret)
> > >  		DRM_ERROR("Scratch setup failed\n");
> > >  
> > > -	dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
> > > -	dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
> > > +	dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
> > > +	dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
> > >  
> > >  	return ret;
> > >  }
> > >  
> > > -static void gen6_gmch_remove(struct drm_device *dev)
> > > +static void gen6_gmch_remove(struct i915_address_space *vm)
> > >  {
> > > -	struct drm_i915_private *dev_priv = dev->dev_private;
> > > -	iounmap(dev_priv->gtt.gsm);
> > > -	teardown_scratch_page(dev_priv->dev);
> > > +
> > > +	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
> > > +	iounmap(gtt->gsm);
> > > +	teardown_scratch_page(vm->dev);
> > >  }
> > >  
> > >  static int i915_gmch_probe(struct drm_device *dev,
> > > @@ -829,13 +840,13 @@ static int i915_gmch_probe(struct drm_device *dev,
> > >  	intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
> > >  
> > >  	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
> > > -	dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
> > > -	dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
> > > +	dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
> > > +	dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
> > >  
> > >  	return 0;
> > >  }
> > >  
> > > -static void i915_gmch_remove(struct drm_device *dev)
> > > +static void i915_gmch_remove(struct i915_address_space *vm)
> > >  {
> > >  	intel_gmch_remove();
> > >  }
> > > @@ -848,25 +859,28 @@ int i915_gem_gtt_init(struct drm_device *dev)
> > >  
> > >  	if (INTEL_INFO(dev)->gen <= 5) {
> > >  		gtt->gtt_probe = i915_gmch_probe;
> > > -		gtt->gtt_remove = i915_gmch_remove;
> > > +		gtt->base.cleanup = i915_gmch_remove;
> > >  	} else {
> > >  		gtt->gtt_probe = gen6_gmch_probe;
> > > -		gtt->gtt_remove = gen6_gmch_remove;
> > > +		gtt->base.cleanup = gen6_gmch_remove;
> > >  		if (IS_HASWELL(dev))
> > > -			gtt->pte_encode = hsw_pte_encode;
> > > +			gtt->base.pte_encode = hsw_pte_encode;
> > >  		else if (IS_VALLEYVIEW(dev))
> > > -			gtt->pte_encode = byt_pte_encode;
> > > +			gtt->base.pte_encode = byt_pte_encode;
> > >  		else
> > > -			gtt->pte_encode = gen6_pte_encode;
> > > +			gtt->base.pte_encode = gen6_pte_encode;
> > >  	}
> > >  
> > > -	ret = gtt->gtt_probe(dev, &gtt->total, &gtt->stolen_size,
> > > +	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
> > >  			     &gtt->mappable_base, &gtt->mappable_end);
> > >  	if (ret)
> > >  		return ret;
> > >  
> > > +	gtt->base.dev = dev;
> > > +
> > >  	/* GMADR is the PCI mmio aperture into the global GTT. */
> > > -	DRM_INFO("Memory usable by graphics device = %zdM\n", gtt->total >> 20);
> > > +	DRM_INFO("Memory usable by graphics device = %zdM\n",
> > > +		 gtt->base.total >> 20);
> > >  	DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
> > >  	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
> > >  
> > > -- 
> > > 1.8.3.2
> > > 
> > > _______________________________________________
> > > Intel-gfx mailing list
> > > Intel-gfx at lists.freedesktop.org
> > > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
> > 
> > -- 
> > Daniel Vetter
> > Software Engineer, Intel Corporation
> > +41 (0) 79 365 57 48 - http://blog.ffwll.ch
> 
> -- 
> Ben Widawsky, Intel Open Source Technology Center

-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch


[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux