On 6/8/21 10:11 AM, Matthew Auld wrote:
On 08/06/2021 08:11, Thomas Hellström wrote:
On Mon, 2021-06-07 at 19:22 +0100, Matthew Auld wrote:
Add back our standalone i915_buddy allocator and integrate it into a
ttm_resource_manager. This will plug into our ttm backend for
managing
device local-memory in the next couple of patches.
Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx>
Cc: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx>
---
Since the buddy + selftests have been part of the driver before, I
didn't review them separately, but for the TTM interface, some minor
comments below. With those fixed,
Acked-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx>
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
new file mode 100644
index 000000000000..d7bf37be1932
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/slab.h>
+
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "i915_ttm_buddy_manager.h"
+
+#include "i915_buddy.h"
+#include "i915_gem.h"
+
+struct i915_ttm_buddy_manager {
+ struct ttm_resource_manager manager;
+ struct i915_buddy_mm mm;
+ struct list_head reserved;
+ struct mutex lock;
+};
+
+static inline struct i915_ttm_buddy_manager *
"inline" shouldn't be needed here.
+to_buddy_manager(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct i915_ttm_buddy_manager,
manager);
+}
+
+static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager
*man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
+ struct i915_ttm_buddy_resource *bman_res;
+ struct i915_buddy_mm *mm = &bman->mm;
+ unsigned long n_pages;
+ unsigned int min_order;
+ u64 size;
+ int err;
+
+ GEM_BUG_ON(place->fpfn || place->lpfn);
+ GEM_BUG_ON(bo->page_alignment < mm->chunk_size);
+
+ bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL);
+ if (!bman_res)
+ return -ENOMEM;
+
+ ttm_resource_init(bo, place, &bman_res->base);
+ INIT_LIST_HEAD(&bman_res->blocks);
+ bman_res->mm = mm;
+
+ GEM_BUG_ON(!bman_res->base.num_pages);
+ size = bman_res->base.num_pages << PAGE_SHIFT;
+
+ min_order = ilog2(bo->page_alignment) - ilog2(mm-
chunk_size);
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
+ size = roundup_pow_of_two(size);
+ min_order = ilog2(size) - ilog2(mm->chunk_size);
+ }
+
+ if (size > mm->size) {
+ err = -E2BIG;
+ goto err_free_res;
+ }
+
+ n_pages = size >> ilog2(mm->chunk_size);
+
+ do {
+ struct i915_buddy_block *block;
+ unsigned int order;
+
+ order = fls(n_pages) - 1;
+ GEM_BUG_ON(order > mm->max_order);
+ GEM_BUG_ON(order < min_order);
+
+ do {
+ mutex_lock(&bman->lock);
+ block = i915_buddy_alloc(mm, order);
+ mutex_unlock(&bman->lock);
+ if (!IS_ERR(block))
+ break;
+
+ if (order-- == min_order) {
+ err = -ENXIO;
IIRC, TTM relies on -ENOSPC to retry with evictions.
Ah, right. We convert that back to -ENXIO in the upper levels somewhere?
Yes, that's done in the ttm bo backend after ttm_bo_validate() and bo
initialization.
/Thomas
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx