On Thu, Oct 08, 2015 at 01:39:54PM +0100, Chris Wilson wrote: > If we have llc coherency, we can write directly into the ringbuffer > using ordinary cached writes rather than forcing WC access. > > v2: An important consequence is that we can forgo the mappable request > for WB ringbuffers, allowing for many more simultaneous contexts. Fits in with my understanding on llc coherency. Reviewed-by: Ville Syrjälä <ville.syrjala@xxxxxxxxxxxxxxx> > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/intel_ringbuffer.c | 70 ++++++++++++++++++++++++++------- > 1 file changed, 56 insertions(+), 14 deletions(-) > > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c > index d608be46ea6e..f81ec7785fac 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c > @@ -1966,11 +1966,35 @@ static int init_phys_status_page(struct intel_engine_cs *ring) > > void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) > { > - iounmap(ringbuf->virtual_start); > + if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) > + vunmap(ringbuf->virtual_start); > + else > + iounmap(ringbuf->virtual_start); > ringbuf->virtual_start = NULL; > i915_gem_object_ggtt_unpin(ringbuf->obj); > } > > +static u32 *vmap_obj(struct drm_i915_gem_object *obj) > +{ > + struct sg_page_iter sg_iter; > + struct page **pages; > + void *addr; > + int i; > + > + pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); > + if (pages == NULL) > + return NULL; > + > + i = 0; > + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) > + pages[i++] = sg_page_iter_page(&sg_iter); > + > + addr = vmap(pages, i, 0, PAGE_KERNEL); > + drm_free_large(pages); > + > + return addr; > +} > + > int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, > struct intel_ringbuffer *ringbuf) > { > @@ -1978,21 +2002,39 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, > struct drm_i915_gem_object *obj = ringbuf->obj; > int ret; > > - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); > - if (ret) > - return ret; > + if (HAS_LLC(dev_priv) && !obj->stolen) { > + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0); > + if (ret) > + return ret; > > - ret = i915_gem_object_set_to_gtt_domain(obj, true); > - if (ret) { > - i915_gem_object_ggtt_unpin(obj); > - return ret; > - } > + ret = i915_gem_object_set_to_cpu_domain(obj, true); > + if (ret) { > + i915_gem_object_ggtt_unpin(obj); > + return ret; > + } > + > + ringbuf->virtual_start = vmap_obj(obj); > + if (ringbuf->virtual_start == NULL) { > + i915_gem_object_ggtt_unpin(obj); > + return -ENOMEM; > + } > + } else { > + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); > + if (ret) > + return ret; > > - ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + > - i915_gem_obj_ggtt_offset(obj), ringbuf->size); > - if (ringbuf->virtual_start == NULL) { > - i915_gem_object_ggtt_unpin(obj); > - return -EINVAL; > + ret = i915_gem_object_set_to_gtt_domain(obj, true); > + if (ret) { > + i915_gem_object_ggtt_unpin(obj); > + return ret; > + } > + > + ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + > + i915_gem_obj_ggtt_offset(obj), ringbuf->size); > + if (ringbuf->virtual_start == NULL) { > + i915_gem_object_ggtt_unpin(obj); > + return -EINVAL; > + } > } > > return 0; > -- > 2.6.1 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@xxxxxxxxxxxxxxxxxxxxx > http://lists.freedesktop.org/mailman/listinfo/intel-gfx -- Ville Syrjälä Intel OTC _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx