Re: [PATCH] drm/i915: Use a cached mapping for the physical HWS

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, May 17, 2017 at 02:02:50PM +0100, Chris Wilson wrote:
> Older gen use a physical address for the hardware status page, for which
> we use cache-coherent writes. As the writes are into the cpu cache, we use
> a normal WB mapped page to read the HWS, used for our seqno tracking.
> 
> Anecdotally, I observed lost breadcrumbs writes into the HWS on i965gm,
> which so far have not reoccurred with this patch. How reliable that
> evidence is remains to be seen.
> 
> Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>

dma is assumed to be coherent, so this should give you the same thing in
the end. Except that dma_map_page can use bounce buffers, whereas
dma_alloc_coherent will make sure you're not doing that. So I don't think
this makes stuff better (or changes anything fwiw).

Getting rid of the drm_pci_alloc stuff otoh would be nice either way.
-Daniel
> ---
>  drivers/gpu/drm/i915/i915_drv.h         |  1 -
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 34 ++++++++++++++++++++-------------
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  5 ++++-
>  3 files changed, 25 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 8ec3bb2913d4..bc223471d391 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2061,7 +2061,6 @@ struct drm_i915_private {
>  	struct i915_gem_context *kernel_context;
>  	struct intel_engine_cs *engine[I915_NUM_ENGINES];
>  
> -	struct drm_dma_handle *status_page_dmah;
>  	struct resource mch_res;
>  
>  	/* protects the irq masks */
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index c32a4ba9579f..3d80cbcd5d94 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -342,9 +342,10 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
>  	struct drm_i915_private *dev_priv = engine->i915;
>  	u32 addr;
>  
> -	addr = dev_priv->status_page_dmah->busaddr;
> +	addr = engine->status_page.dma_addr;
>  	if (INTEL_GEN(dev_priv) >= 4)
> -		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
> +		addr |= (engine->status_page.dma_addr >> 28) & 0xf0;
> +
>  	I915_WRITE(HWS_PGA, addr);
>  }
>  
> @@ -1000,12 +1001,14 @@ i915_emit_bb_start(struct drm_i915_gem_request *req,
>  
>  static void cleanup_phys_status_page(struct intel_engine_cs *engine)
>  {
> -	struct drm_i915_private *dev_priv = engine->i915;
> -
> -	if (!dev_priv->status_page_dmah)
> +	if (!engine->status_page.page_addr)
>  		return;
>  
> -	drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
> +	dma_unmap_page(engine->i915->drm.dev,
> +		       engine->status_page.dma_addr, PAGE_SIZE,
> +		       PCI_DMA_BIDIRECTIONAL);
> +
> +	__free_page(virt_to_page(engine->status_page.page_addr));
>  	engine->status_page.page_addr = NULL;
>  }
>  
> @@ -1091,17 +1094,22 @@ static int init_status_page(struct intel_engine_cs *engine)
>  
>  static int init_phys_status_page(struct intel_engine_cs *engine)
>  {
> -	struct drm_i915_private *dev_priv = engine->i915;
> +	struct page *page;
>  
> -	GEM_BUG_ON(engine->id != RCS);
> +	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
> +	if (!page)
> +		return -ENOMEM;
>  
> -	dev_priv->status_page_dmah =
> -		drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
> -	if (!dev_priv->status_page_dmah)
> +	engine->status_page.dma_addr =
> +		dma_map_page(engine->i915->drm.dev, page, 0, PAGE_SIZE,
> +			     PCI_DMA_BIDIRECTIONAL);
> +	if (dma_mapping_error(engine->i915->drm.dev,
> +			      engine->status_page.dma_addr)) {
> +		__free_page(page);
>  		return -ENOMEM;
> +	}
>  
> -	engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
> -	memset(engine->status_page.page_addr, 0, PAGE_SIZE);
> +	engine->status_page.page_addr = page_address(page);
>  
>  	return 0;
>  }
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 46927e9530a2..fc3a2ac8914e 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -18,9 +18,12 @@
>  #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
>  
>  struct intel_hw_status_page {
> -	struct i915_vma *vma;
>  	u32 *page_addr;
>  	u32 ggtt_offset;
> +	union {
> +		struct i915_vma *vma;
> +		dma_addr_t dma_addr;
> +	};
>  };
>  
>  #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
> -- 
> 2.11.0
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux