Hi, Much more likeable in general, some comments inline: On 03/12/2015 11:10 AM, Joonas Lahtinen wrote:
GGTT views are only applicable when dealing with GGTT. Change the code to reject ggtt_view where it should not be used and require it when it should be. v2: - Dropped _ppgtt_ infixes, allow both types to be passed - Disregard other but normal views when no view is specified - More checks that valid parameters are passed - More readable error checking Signed-off-by: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 102 +++++++++------------- drivers/gpu/drm/i915/i915_gem.c | 169 +++++++++++++++++++++++++++--------- drivers/gpu/drm/i915/i915_gem_gtt.c | 67 ++++++++++---- 3 files changed, 219 insertions(+), 119 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a80b15f..8b7d4f9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2620,20 +2620,16 @@ void i915_gem_vma_destroy(struct i915_vma *vma); #define PIN_GLOBAL 0x4 #define PIN_OFFSET_BIAS 0x8 #define PIN_OFFSET_MASK (~4095) -int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - uint32_t alignment, - uint64_t flags, - const struct i915_ggtt_view *view); -static inline -int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - uint32_t alignment, - uint64_t flags) -{ - return i915_gem_object_pin_view(obj, vm, alignment, flags, - &i915_ggtt_view_normal); -} +int __must_check +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + uint32_t alignment, + uint64_t flags); +int __must_check +i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view, + uint32_t alignment, + uint64_t flags); int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags); @@ -2797,60 +2793,48 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, void i915_gem_restore_fences(struct drm_device *dev); -unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o, - struct i915_address_space *vm, - enum i915_ggtt_view_type view); -static inline -unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, - struct i915_address_space *vm) +static inline bool i915_is_ggtt(struct i915_address_space *vm); + +unsigned long +i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, + enum i915_ggtt_view_type view); +unsigned long +i915_gem_obj_offset(struct drm_i915_gem_object *o, + struct i915_address_space *vm); +static inline unsigned long +i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) { - return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL); + return i915_gem_obj_ggtt_offset_view(o, I915_GGTT_VIEW_NORMAL); } + bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); -bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o, - struct i915_address_space *vm, - enum i915_ggtt_view_type view); -static inline +bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, + enum i915_ggtt_view_type view); bool i915_gem_obj_bound(struct drm_i915_gem_object *o, - struct i915_address_space *vm) -{ - return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL); -} + struct i915_address_space *vm); unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, struct i915_address_space *vm); -struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view); -static inline -struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) -{ - return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal); -} - struct i915_vma * -i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view); +i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm); +struct i915_vma * +i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view); -static inline struct i915_vma * i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) -{ - return i915_gem_obj_lookup_or_create_vma_view(obj, vm, - &i915_ggtt_view_normal); -} + struct i915_address_space *vm); +struct i915_vma * +i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view); -struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); -static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { - struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (vma->pin_count > 0) - return true; - return false; +static inline struct i915_vma * +i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) +{ + return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); } +bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); /* Some GGTT VM helpers */ #define i915_obj_to_ggtt(obj) \ @@ -2873,13 +2857,7 @@ i915_vm_to_ppgtt(struct i915_address_space *vm) static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) { - return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj)); -} - -static inline unsigned long -i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) -{ - return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj)); + return i915_gem_obj_ggtt_bound_view(obj, I915_GGTT_VIEW_NORMAL); } static inline unsigned long diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0fe313d..b6c3b9f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3518,9 +3518,9 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma, static struct i915_vma * i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_address_space *vm, + const struct i915_ggtt_view *ggtt_view, unsigned alignment, - uint64_t flags, - const struct i915_ggtt_view *view) + uint64_t flags) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -3532,6 +3532,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_vma *vma; int ret; + if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) + return ERR_PTR(-EINVAL); + fence_size = i915_gem_get_gtt_size(dev, obj->base.size, obj->tiling_mode); @@ -3570,7 +3573,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, i915_gem_object_pin_pages(obj); - vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view); + vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) : + i915_gem_obj_lookup_or_create_vma(obj, vm); + if (IS_ERR(vma)) goto err_unpin; @@ -4167,12 +4172,12 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) return false; } -int -i915_gem_object_pin_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - uint32_t alignment, - uint64_t flags, - const struct i915_ggtt_view *view) +static int +i915_gem_object_do_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *ggtt_view, + uint32_t alignment, + uint64_t flags) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct i915_vma *vma; @@ -4188,17 +4193,26 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj, if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE)) return -EINVAL; - vma = i915_gem_obj_to_vma_view(obj, vm, view); + if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) + return -EINVAL; + + vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) : + i915_gem_obj_to_vma(obj, vm); + if (vma) { if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) return -EBUSY; if (i915_vma_misplaced(vma, alignment, flags)) { + unsigned long offset; + offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view->type) : + i915_gem_obj_offset(obj, vm); WARN(vma->pin_count, - "bo is already pinned with incorrect alignment:" + "bo is already pinned in %s with incorrect alignment:" " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", - i915_gem_obj_offset_view(obj, vm, view->type), + ggtt_view ? "ggtt" : "ppgtt", + offset, alignment, !!(flags & PIN_MAPPABLE), obj->map_and_fenceable); @@ -4212,8 +4226,8 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj, bound = vma ? vma->bound : 0; if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { - vma = i915_gem_object_bind_to_vm(obj, vm, alignment, - flags, view); + vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment, + flags); if (IS_ERR(vma)) return PTR_ERR(vma); } @@ -4254,6 +4268,28 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj, return 0; } +int +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + uint32_t alignment, + uint64_t flags) +{ + return i915_gem_object_do_pin(obj, vm, + i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL, + alignment, flags); +} + +int +i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view, + uint32_t alignment, + uint64_t flags) +{ + BUG_ON(!view);
I wouldn't crash the system here but just WARN_ONCE and fail to pin since that is handled well anyway.
+ return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view, + alignment, flags); +} + void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) { @@ -4559,15 +4595,31 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) intel_runtime_pm_put(dev_priv); } -struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view) +struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) { struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (vma->vm == vm && vma->ggtt_view.type == view->type) + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue;
Micro optimisation territory but view type other that normal implies GGTT so maybe no need for i915_is_ggtt? Since vma objects are zeroed on allocation and do I have, or did I have, BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL != 0) somewhere.
There are more instances of this pattern, but it's your call.
+ if (vma->vm == vm) return vma; + } + return NULL; +} + +struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view) +{ + struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); + struct i915_vma *vma; + + BUG_ON(!view);
This BUG_ON is kind of correct, but still it is nicer not to crash the box on programming errors if they can be avoided. In this case it looks it would be easy to return ERR_PTR here and add handling to the caller(s). And WARN_ONCE here of course.
+ list_for_each_entry(vma, &obj->vma_list, vma_link) + if (vma->vm == ggtt && vma->ggtt_view.type == view->type) + return vma; return NULL; } @@ -5176,9 +5228,9 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) } /* All the new VM stuff */ -unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o, - struct i915_address_space *vm, - enum i915_ggtt_view_type view) +unsigned long +i915_gem_obj_offset(struct drm_i915_gem_object *o, + struct i915_address_space *vm) { struct drm_i915_private *dev_priv = o->base.dev->dev_private; struct i915_vma *vma; @@ -5186,23 +5238,58 @@ unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o, WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); list_for_each_entry(vma, &o->vma_list, vma_link) { - if (vma->vm == vm && vma->ggtt_view.type == view) + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; + if (vma->vm == vm) return vma->node.start; - } + WARN(1, "%s vma for this object not found.\n", i915_is_ggtt(vm) ? "global" : "ppgtt"); return -1; } -bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o, - struct i915_address_space *vm, - enum i915_ggtt_view_type view) +unsigned long +i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, + enum i915_ggtt_view_type view) +{ + struct drm_i915_private *dev_priv = o->base.dev->dev_private; + struct i915_address_space *ggtt = i915_obj_to_ggtt(o); + struct i915_vma *vma; + + list_for_each_entry(vma, &o->vma_list, vma_link) + if (vma->vm == ggtt && vma->ggtt_view.type == view) + return vma->node.start; + + WARN(1, "global vma for this object not found.\n"); + return -1; +} + +bool i915_gem_obj_bound(struct drm_i915_gem_object *o, + struct i915_address_space *vm) +{ + struct i915_vma *vma; + + list_for_each_entry(vma, &o->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; + if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) + return true; + } + + return false; +} + +bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, + enum i915_ggtt_view_type view) { + struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, vma_link) - if (vma->vm == vm && + if (vma->vm == ggtt && vma->ggtt_view.type == view && drm_mm_node_allocated(&vma->node)) return true; @@ -5231,10 +5318,13 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, BUG_ON(list_empty(&o->vma_list)); - list_for_each_entry(vma, &o->vma_list, vma_link) + list_for_each_entry(vma, &o->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; if (vma->vm == vm) return vma->node.size; - + } return 0; }
Oh wow I just realised this function is actually very misleading and have a bug in my rotation code because of that.
It actually only has a single user - i915_gem_obj_ggtt_size - which kinds of tells you what it does - gets a size of a object in ggtt space.
i915_gem_obj_size on the other hand lies and should be called i915_gem_obj_gtt_size instead.
Bercause of the single user, I would vote you simply remove it, or in other words rename to i915_gem_obj_ggtt_size.
@@ -5334,15 +5424,16 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) return NOTIFY_DONE; } -struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) +bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { - struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); struct i915_vma *vma; - - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (vma->vm == ggtt && - vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) - return vma; - - return NULL; + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; + if (vma->pin_count > 0) + return true; + } + return false; } + diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 2034f7c..d2f9041 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -67,8 +67,9 @@ * i915_ggtt_view_type and struct i915_ggtt_view. * * A new flavour of core GEM functions which work with GGTT bound objects were - * added with the _view suffix. They take the struct i915_ggtt_view parameter - * encapsulating all metadata required to implement a view. + * added with the _ggtt_ infix, and sometimes with _view postfix to avoid + * renaming in large amounts of code. They take the struct i915_ggtt_view + * parameter encapsulating all metadata required to implement a view. * * As a helper for callers which are only interested in the normal view, * globally const i915_ggtt_view_normal singleton instance exists. All old core @@ -1726,11 +1727,15 @@ static void ggtt_bind_vma(struct i915_vma *vma, struct drm_device *dev = vma->vm->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj = vma->obj; + struct sg_table *pages = obj->pages; /* Currently applicable only to VLV */ if (obj->gt_ro) flags |= PTE_READ_ONLY; + if (i915_is_ggtt(vma->vm)) + pages = vma->ggtt_view.pages; + /* If there is no aliasing PPGTT, or the caller needs a global mapping, * or we have a global mapping already but the cacheability flags have * changed, set the global PTEs. @@ -1745,7 +1750,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { if (!(vma->bound & GLOBAL_BIND) || (cache_level != obj->cache_level)) { - vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages, + vma->vm->insert_entries(vma->vm, pages, vma->node.start, cache_level, flags); vma->bound |= GLOBAL_BIND; @@ -1756,8 +1761,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, (!(vma->bound & LOCAL_BIND) || (cache_level != obj->cache_level))) { struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; - appgtt->base.insert_entries(&appgtt->base, - vma->ggtt_view.pages, + appgtt->base.insert_entries(&appgtt->base, pages, vma->node.start, cache_level, flags); vma->bound |= LOCAL_BIND; @@ -2331,23 +2335,28 @@ int i915_gem_gtt_init(struct drm_device *dev) return 0; } -static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view) +static struct i915_vma * +__i915_gem_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *ggtt_view) { struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (vma == NULL) return ERR_PTR(-ENOMEM); + if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) + return ERR_PTR(-EINVAL); + INIT_LIST_HEAD(&vma->vma_link); INIT_LIST_HEAD(&vma->mm_list); INIT_LIST_HEAD(&vma->exec_list); vma->vm = vm; vma->obj = obj; - vma->ggtt_view = *view; if (INTEL_INFO(vm->dev)->gen >= 6) { if (i915_is_ggtt(vm)) { + vma->ggtt_view = *ggtt_view; + vma->unbind_vma = ggtt_unbind_vma; vma->bind_vma = ggtt_bind_vma; } else { @@ -2356,6 +2365,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, } } else { BUG_ON(!i915_is_ggtt(vm)); + vma->ggtt_view = *ggtt_view; vma->unbind_vma = i915_ggtt_unbind_vma; vma->bind_vma = i915_ggtt_bind_vma; } @@ -2368,21 +2378,40 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, } struct i915_vma * -i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, +i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) +{ + struct i915_vma *vma; + + vma = i915_gem_obj_to_vma(obj, vm); + if (!vma) + vma = __i915_gem_vma_create(obj, vm, + i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL); + + return vma; +} + +struct i915_vma * +i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view) { + struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); struct i915_vma *vma; - vma = i915_gem_obj_to_vma_view(obj, vm, view); + if (WARN_ON(!view)) + return ERR_PTR(-EINVAL); + + vma = i915_gem_obj_to_ggtt_view(obj, view); if (!vma) - vma = __i915_gem_vma_create(obj, vm, view); + vma = __i915_gem_vma_create(obj, ggtt, view); return vma; + } + static inline -int i915_get_vma_pages(struct i915_vma *vma) +int i915_get_ggtt_vma_pages(struct i915_vma *vma) { if (vma->ggtt_view.pages) return 0; @@ -2394,7 +2423,7 @@ int i915_get_vma_pages(struct i915_vma *vma) vma->ggtt_view.type); if (!vma->ggtt_view.pages) { - DRM_ERROR("Failed to get pages for VMA view type %u!\n", + DRM_ERROR("Failed to get pages for GGTT view type %u!\n", vma->ggtt_view.type); return -EINVAL; } @@ -2415,10 +2444,12 @@ int i915_get_vma_pages(struct i915_vma *vma) int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { - int ret = i915_get_vma_pages(vma); + if (i915_is_ggtt(vma->vm)) { + int ret = i915_get_ggtt_vma_pages(vma); - if (ret) - return ret; + if (ret) + return ret; + } vma->bind_vma(vma, cache_level, flags);
Regards, Tvrtko _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx