On devices with non-mappable LMEM ensure we always allocate the pages within the mappable portion. For now we assume that all LMEM buffers will require CPU access, which is also inline with pretty much all current kernel internal users. In the next patch we will introduce a new flag to override this behaviour. Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx> Cc: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 4 ++++ drivers/gpu/drm/i915/intel_region_ttm.c | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index e1140ca3d9a0..d9a04c7d41b1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -128,6 +128,10 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr, if (flags & I915_BO_ALLOC_CONTIGUOUS) place->flags = TTM_PL_FLAG_CONTIGUOUS; + if (mr->io_size && mr->io_size < mr->total) { + place->fpfn = 0; + place->lpfn = mr->io_size >> PAGE_SHIFT; + } } static void diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c index f2b888c16958..4689192d5e8d 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.c +++ b/drivers/gpu/drm/i915/intel_region_ttm.c @@ -199,6 +199,11 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem, struct ttm_resource *res; int ret; + if (mem->io_size && mem->io_size < mem->total) { + place.fpfn = 0; + place.lpfn = mem->io_size >> PAGE_SHIFT; + } + mock_bo.base.size = size; place.flags = flags; -- 2.34.1