From: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx> Introduce xe_bo_put_async to put a bo where the context is such that the bo destructor can't run due to lockdep problems or atomic context. If the put is the final put, freeing will be done from a work item. v5: - Kerenl doc for xe_bo_put_async (Thomas) Signed-off-by: Matthew Brost <matthew.brost@xxxxxxxxx> Signed-off-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx> Reviewed-by: Matthew Brost <matthew.brost@xxxxxxxxx> --- drivers/gpu/drm/xe/xe_bo.c | 25 +++++++++++++++++++++++++ drivers/gpu/drm/xe/xe_bo.h | 19 +++++++++++++++++++ drivers/gpu/drm/xe/xe_device.c | 3 +++ drivers/gpu/drm/xe/xe_device_types.h | 8 ++++++++ 4 files changed, 55 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index cd1c693c0b62..a2a924b531e5 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -2644,6 +2644,31 @@ void xe_bo_put_commit(struct llist_head *deferred) drm_gem_object_free(&bo->ttm.base.refcount); } +static void xe_bo_dev_work_func(struct work_struct *work) +{ + struct xe_bo_dev *bo_dev = container_of(work, typeof(*bo_dev), async_free); + + xe_bo_put_commit(&bo_dev->async_list); +} + +/** + * xe_bo_dev_init() - Initialize BO dev to manage async BO freeing + * @bo_dev: The BO dev structure + */ +void xe_bo_dev_init(struct xe_bo_dev *bo_dev) +{ + INIT_WORK(&bo_dev->async_free, xe_bo_dev_work_func); +} + +/** + * xe_bo_dev_fini() - Finalize BO dev managing async BO freeing + * @bo_dev: The BO dev structure + */ +void xe_bo_dev_fini(struct xe_bo_dev *bo_dev) +{ + flush_work(&bo_dev->async_free); +} + void xe_bo_put(struct xe_bo *bo) { struct xe_tile *tile; diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index f09b9315721b..9dfec438d1c7 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -322,6 +322,25 @@ xe_bo_put_deferred(struct xe_bo *bo, struct llist_head *deferred) void xe_bo_put_commit(struct llist_head *deferred); +/** + * xe_bo_put_async() - Put BO async + * @bo: The bo to put. + * + * Put BO async, the final put is deferred to a worker to exit an IRQ context. + */ +static inline void +xe_bo_put_async(struct xe_bo *bo) +{ + struct xe_bo_dev *bo_device = &xe_bo_device(bo)->bo_device; + + if (xe_bo_put_deferred(bo, &bo_device->async_list)) + schedule_work(&bo_device->async_free); +} + +void xe_bo_dev_init(struct xe_bo_dev *bo_device); + +void xe_bo_dev_fini(struct xe_bo_dev *bo_device); + struct sg_table *xe_bo_sg(struct xe_bo *bo); /* diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 36d7ffb3b4d9..756099e870cd 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -388,6 +388,8 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy) { struct xe_device *xe = to_xe_device(dev); + xe_bo_dev_fini(&xe->bo_device); + if (xe->preempt_fence_wq) destroy_workqueue(xe->preempt_fence_wq); @@ -425,6 +427,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, if (WARN_ON(err)) goto err; + xe_bo_dev_init(&xe->bo_device); err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL); if (err) goto err; diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 833c29fed3a3..6a41f608a7a1 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -525,6 +525,14 @@ struct xe_device { int mode; } wedged; + /** @bo_device: Struct to control async free of BOs */ + struct xe_bo_dev { + /** @async_free: Free worker */ + struct work_struct async_free; + /** @async_list: List of BOs to be freed */ + struct llist_head async_list; + } bo_device; + /** @pmu: performance monitoring unit */ struct xe_pmu pmu; -- 2.34.1