On Mon, Jul 05, 2021 at 02:53:06PM +0100, Matthew Auld wrote: > For discrete, users of pin_map() needs to obey the same rules at the TTM > backend, where we map system only objects as WB, and everything else as > WC. The simplest for now is to just force the correct mapping type as > per the new rules for discrete. > > Suggested-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx> > Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx> > Cc: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx> > Cc: Maarten Lankhorst <maarten.lankhorst@xxxxxxxxxxxxxxx> > Cc: Daniel Vetter <daniel.vetter@xxxxxxxx> > Cc: Ramalingam C <ramalingam.c@xxxxxxxxx> Huge thanks for doing all the kerneldoc work for uapi you're doing in this series, this should help a lot with umd conversions since we can just point them at the docs and tell them to pls update code. Yes I know there's no kerneldoc here, but I didn't see the cover letter :-) Cheers, Daniel > --- > drivers/gpu/drm/i915/gem/i915_gem_object.c | 34 ++++++++++++++++++++++ > drivers/gpu/drm/i915/gem/i915_gem_object.h | 4 +++ > drivers/gpu/drm/i915/gem/i915_gem_pages.c | 22 ++++++++++++-- > 3 files changed, 58 insertions(+), 2 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c > index 547cc9dad90d..9da7b288b7ed 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c > @@ -625,6 +625,40 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj, > return obj->ops->migrate(obj, mr); > } > > +/** > + * i915_gem_object_placement_possible - Check whether the object can be > + * placed at certain memory type > + * @obj: Pointer to the object > + * @type: The memory type to check > + * > + * Return: True if the object can be placed in @type. False otherwise. > + */ > +bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, > + enum intel_memory_type type) > +{ > + unsigned int i; > + > + if (!obj->mm.n_placements) { > + switch (type) { > + case INTEL_MEMORY_LOCAL: > + return i915_gem_object_has_iomem(obj); > + case INTEL_MEMORY_SYSTEM: > + return i915_gem_object_has_pages(obj); > + default: > + /* Ignore stolen for now */ > + GEM_BUG_ON(1); > + return false; > + } > + } > + > + for (i = 0; i < obj->mm.n_placements; i++) { > + if (obj->mm.placements[i]->type == type) > + return true; > + } > + > + return false; > +} > + > void i915_gem_init__objects(struct drm_i915_private *i915) > { > INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h > index d423d8cac4f2..8be4fadeee48 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h > +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h > @@ -12,6 +12,7 @@ > #include <drm/drm_device.h> > > #include "display/intel_frontbuffer.h" > +#include "intel_memory_region.h" > #include "i915_gem_object_types.h" > #include "i915_gem_gtt.h" > #include "i915_gem_ww.h" > @@ -607,6 +608,9 @@ bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj, > int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, > unsigned int flags); > > +bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, > + enum intel_memory_type type); > + > #ifdef CONFIG_MMU_NOTIFIER > static inline bool > i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c > index f2f850e31b8e..810a157a18f8 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c > @@ -321,8 +321,7 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, > dma_addr_t addr; > void *vaddr; > > - if (type != I915_MAP_WC) > - return ERR_PTR(-ENODEV); > + GEM_BUG_ON(type != I915_MAP_WC); > > if (n_pfn > ARRAY_SIZE(stack)) { > /* Too big for stack -- allocate temporary array instead */ > @@ -374,6 +373,25 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, > } > GEM_BUG_ON(!i915_gem_object_has_pages(obj)); > > + /* > + * For discrete our CPU mappings needs to be consistent in order to > + * function correctly on !x86. When mapping things through TTM, we use > + * the same rules to determine the caching type. > + * > + * Internal users of lmem are already expected to get this right, so no > + * fudging needed there. > + */ > + if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) { > + if (type != I915_MAP_WC && !obj->mm.n_placements) { > + ptr = ERR_PTR(-ENODEV); > + goto err_unpin; > + } > + > + type = I915_MAP_WC; > + } else if (IS_DGFX(to_i915(obj->base.dev))) { > + type = I915_MAP_WB; > + } > + > ptr = page_unpack_bits(obj->mm.mapping, &has_type); > if (ptr && has_type != type) { > if (pinned) { > -- > 2.26.3 > -- Daniel Vetter Software Engineer, Intel Corporation http://blog.ffwll.ch _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx