We can skip the set_page_dirty() calls if we already know that the entire object is dirty. Futhermore, the WARN is redundant (we'll crash shortly afterwards) but adds substantial overhead to the function (roughly increasing the relocation per-page cost by 10%). Fixes regression from commit 033908aed5a596f6202c848c6bbc8a40fb1a8490 Author: Dave Gordon <david.s.gordon@xxxxxxxxx> Date: Thu Dec 10 18:51:23 2015 +0000 drm/i915: mark GEM object pages dirty when mapped & written by the CPU Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 8 +++++--- drivers/gpu/drm/i915/i915_gem.c | 14 +++++--------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6827e26b5681..2f8b5e7f9320 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2807,16 +2807,18 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); -static inline int __sg_page_count(struct scatterlist *sg) +static inline int __sg_page_count(const struct scatterlist *sg) { return sg->length >> PAGE_SHIFT; } struct page * -i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n); +i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, + unsigned int n); static inline struct page * -i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) +i915_gem_object_get_page(struct drm_i915_gem_object *obj, + unsigned int n) { if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT)) return NULL; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d452499ae5a9..9cd161645041 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4388,16 +4388,12 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, /* Like i915_gem_object_get_page(), but mark the returned page dirty */ struct page * -i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n) +i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, + unsigned int n) { - struct page *page; - - /* Only default objects have per-page dirty tracking */ - if (WARN_ON(obj->ops != &i915_gem_object_ops)) - return NULL; - - page = i915_gem_object_get_page(obj, n); - set_page_dirty(page); + struct page *page = i915_gem_object_get_page(obj, n); + if (!i915_gem_object_is_dirty(obj)) + set_page_dirty(page); return page; } -- 2.7.0.rc3 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx