On 6/14/21 6:26 PM, Thomas Hellström wrote:
From: Ramalingam C <ramalingam.c@xxxxxxxxx>
Invokes the pipelined page migration through blt, for
i915_ttm_move requests of eviction and also obj clear.
Signed-off-by: Ramalingam C <ramalingam.c@xxxxxxxxx>
---
v2:
- subfunction for accel_move (Thomas)
- engine_pm_get/put around context_move/clear (Thomas)
- Invalidation at accel_clear (Thomas)
v3:
- conflict resolution s/&bo->mem/bo->resource/g
---
drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 87 +++++++++++++++++++++----
1 file changed, 74 insertions(+), 13 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index bf33724bed5c..08b72c280cb5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -15,6 +15,9 @@
#include "gem/i915_gem_ttm.h"
#include "gem/i915_gem_mman.h"
+#include "gt/intel_migrate.h"
+#include "gt/intel_engine_pm.h"
+
#define I915_PL_LMEM0 TTM_PL_PRIV
#define I915_PL_SYSTEM TTM_PL_SYSTEM
#define I915_PL_STOLEN TTM_PL_VRAM
@@ -282,6 +285,61 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
return intel_region_ttm_node_to_st(obj->mm.region, res);
}
+static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
+ struct ttm_resource *dst_mem,
+ struct sg_table *dst_st)
+{
+ struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
+ bdev);
+ struct ttm_resource_manager *src_man =
+ ttm_manager_type(bo->bdev, bo->resource->mem_type);
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ struct sg_table *src_st;
+ struct i915_request *rq;
+ int ret;
+
+ if (!i915->gt.migrate.context)
+ return -EINVAL;
+
+ if (!bo->ttm || !ttm_tt_is_populated(bo->ttm)) {
+ if (bo->type == ttm_bo_type_kernel)
+ return -EINVAL;
+
+ if (bo->ttm &&
+ !(bo->ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
+ return 0;
+
+ intel_engine_pm_get(i915->gt.migrate.context->engine);
+ ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL,
+ dst_st->sgl, I915_CACHE_NONE,
+ dst_mem->mem_type >= TTM_PL_PRIV,
Here we should probably use I915_PL_LMEM0 instead of TTM_PL_PRIV, but
since this test will replaced by gpu_binds_iomem() in an upcoming
patch, doesn't matter really.
+ 0, &rq);
+
+ if (!ret && rq) {
+ i915_request_wait(rq, 0, HZ);
Could be a MAX_SCHEDULE_TIMEOUT here to avoid surprises in case the
queue to the blitter is getting long?
+ i915_request_put(rq);
+ }
+ intel_engine_pm_put(i915->gt.migrate.context->engine);
+ } else {
+ src_st = src_man->use_tt ? i915_ttm_tt_get_st(bo->ttm) :
+ obj->ttm.cached_io_st;
+
+ intel_engine_pm_get(i915->gt.migrate.context->engine);
+ ret = intel_context_migrate_copy(i915->gt.migrate.context,
+ NULL, src_st->sgl, I915_CACHE_NONE,
+ bo->resource->mem_type >= TTM_PL_PRIV,
+ dst_st->sgl, I915_CACHE_NONE,
+ dst_mem->mem_type >= TTM_PL_PRIV, &rq);
+ if (!ret && rq) {
+ i915_request_wait(rq, 0, HZ);
Same thing here.
With that fixed,
Reviewed-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx>