Am 05.02.21 um 09:06 schrieb John Stultz:
This adds a shrinker controlled page pool, closely
following the ttm_pool logic, which is abstracted out
a bit so it can be used by other non-ttm drivers.
Cc: Daniel Vetter <daniel@xxxxxxxx>
Cc: Christian Koenig <christian.koenig@xxxxxxx>
Cc: Sumit Semwal <sumit.semwal@xxxxxxxxxx>
Cc: Liam Mark <lmark@xxxxxxxxxxxxxx>
Cc: Chris Goldsworthy <cgoldswo@xxxxxxxxxxxxxx>
Cc: Laura Abbott <labbott@xxxxxxxxxx>
Cc: Brian Starkey <Brian.Starkey@xxxxxxx>
Cc: Hridya Valsaraju <hridya@xxxxxxxxxx>
Cc: Suren Baghdasaryan <surenb@xxxxxxxxxx>
Cc: Sandeep Patil <sspatil@xxxxxxxxxx>
Cc: Daniel Mentz <danielmentz@xxxxxxxxxx>
Cc: Ørjan Eide <orjan.eide@xxxxxxx>
Cc: Robin Murphy <robin.murphy@xxxxxxx>
Cc: Ezequiel Garcia <ezequiel@xxxxxxxxxxxxx>
Cc: Simon Ser <contact@xxxxxxxxxxx>
Cc: James Jones <jajones@xxxxxxxxxx>
Cc: linux-media@xxxxxxxxxxxxxxx
Cc: dri-devel@xxxxxxxxxxxxxxxxxxxxx
Signed-off-by: John Stultz <john.stultz@xxxxxxxxxx>
---
drivers/gpu/drm/Kconfig | 4 +
drivers/gpu/drm/Makefile | 1 +
drivers/gpu/drm/page_pool.c | 220 ++++++++++++++++++++++++++++++++++++
include/drm/page_pool.h | 54 +++++++++
4 files changed, 279 insertions(+)
create mode 100644 drivers/gpu/drm/page_pool.c
create mode 100644 include/drm/page_pool.h
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 0973f408d75f..d16bf340ed2e 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -174,6 +174,10 @@ config DRM_DP_CEC
Note: not all adapters support this feature, and even for those
that do support this they often do not hook up the CEC pin.
+config DRM_PAGE_POOL
+ bool
+ depends on DRM
+
config DRM_TTM
tristate
depends on DRM && MMU
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index fefaff4c832d..877e0111ed34 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -32,6 +32,7 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
drm-$(CONFIG_PCI) += drm_pci.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm-$(CONFIG_DRM_PAGE_POOL) += page_pool.o
drm_vram_helper-y := drm_gem_vram_helper.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
diff --git a/drivers/gpu/drm/page_pool.c b/drivers/gpu/drm/page_pool.c
new file mode 100644
index 000000000000..2139f86e6ca7
--- /dev/null
+++ b/drivers/gpu/drm/page_pool.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
Please use a BSD/MIT compatible license if you want to copy this from
the TTM code.
+/*
+ * DRM page pool system
+ *
+ * Copyright (C) 2020 Linaro Ltd.
+ *
+ * Based on the ION page pool code
+ * Copyright (C) 2011 Google, Inc.
+ * As well as the ttm_pool code
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/freezer.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/sched/signal.h>
+#include <drm/page_pool.h>
+
+static LIST_HEAD(pool_list);
+static DEFINE_MUTEX(pool_list_lock);
+static atomic_long_t total_pages;
+static unsigned long page_pool_max;
+MODULE_PARM_DESC(page_pool_max, "Number of pages in the WC/UC/DMA pool");
+module_param(page_pool_max, ulong, 0644);
+
+void drm_page_pool_set_max(unsigned long max)
+{
+ /* only write once */
+ if (!page_pool_max)
+ page_pool_max = max;
+}
+
+unsigned long drm_page_pool_get_max(void)
+{
+ return page_pool_max;
+}
+
+unsigned long drm_page_pool_get_total(void)
+{
+ return atomic_long_read(&total_pages);
+}
+
+int drm_page_pool_get_size(struct drm_page_pool *pool)
+{
+ int ret;
+
+ spin_lock(&pool->lock);
+ ret = pool->count;
+ spin_unlock(&pool->lock);
Maybe use an atomic for the count instead?
+ return ret;
+}
+
+static inline unsigned int drm_page_pool_free_pages(struct drm_page_pool *pool,
+ struct page *page)
+{
+ return pool->free(page, pool->order);
+}
+
+static int drm_page_pool_shrink_one(void);
+
+void drm_page_pool_add(struct drm_page_pool *pool, struct page *page)
+{
+ spin_lock(&pool->lock);
+ list_add_tail(&page->lru, &pool->items);
+ pool->count++;
+ atomic_long_add(1 << pool->order, &total_pages);
+ spin_unlock(&pool->lock);
+
+ mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
+ 1 << pool->order);
Hui what? What should that be good for?
+
+ /* make sure we don't grow too large */
+ while (page_pool_max && atomic_long_read(&total_pages) > page_pool_max)
+ drm_page_pool_shrink_one();
+}
+EXPORT_SYMBOL_GPL(drm_page_pool_add);
+
+static struct page *drm_page_pool_remove(struct drm_page_pool *pool)
+{
+ struct page *page;
+
+ if (!pool->count)
+ return NULL;
Better use list_first_entry_or_null instead of checking the count.
This way you can also pull the lock into the function.
+
+ page = list_first_entry(&pool->items, struct page, lru);
+ pool->count--;
+ atomic_long_sub(1 << pool->order, &total_pages);
+
+ list_del(&page->lru);
+ mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
+ -(1 << pool->order));
+ return page;
+}
+
+struct page *drm_page_pool_fetch(struct drm_page_pool *pool)
+{
+ struct page *page = NULL;
+
+ if (!pool) {
+ WARN_ON(!pool);
+ return NULL;
+ }
+
+ spin_lock(&pool->lock);
+ page = drm_page_pool_remove(pool);
+ spin_unlock(&pool->lock);
+
+ return page;
+}
+EXPORT_SYMBOL_GPL(drm_page_pool_fetch);
+
+struct drm_page_pool *drm_page_pool_create(unsigned int order,
+ int (*free_page)(struct page *p, unsigned int order))
+{
+ struct drm_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
Why not making this an embedded object? We should not see much dynamic
pool creation.
+
+ if (!pool)
+ return NULL;
+
+ pool->count = 0;
+ INIT_LIST_HEAD(&pool->items);
+ pool->order = order;
+ pool->free = free_page;
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->list);
+
+ mutex_lock(&pool_list_lock);
+ list_add(&pool->list, &pool_list);
+ mutex_unlock(&pool_list_lock);
+
+ return pool;
+}
+EXPORT_SYMBOL_GPL(drm_page_pool_create);
+
+void drm_page_pool_destroy(struct drm_page_pool *pool)
+{
+ struct page *page;
+
+ /* Remove us from the pool list */
+ mutex_lock(&pool_list_lock);
+ list_del(&pool->list);
+ mutex_unlock(&pool_list_lock);
+
+ /* Free any remaining pages in the pool */
+ spin_lock(&pool->lock);
Locking should be unnecessary when the pool is destroyed anyway.
+ while (pool->count) {
+ page = drm_page_pool_remove(pool);
+ spin_unlock(&pool->lock);
+ drm_page_pool_free_pages(pool, page);
+ spin_lock(&pool->lock);
+ }
+ spin_unlock(&pool->lock);
+
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(drm_page_pool_destroy);
+
+static int drm_page_pool_shrink_one(void)
+{
+ struct drm_page_pool *pool;
+ struct page *page;
+ int nr_freed = 0;
+
+ mutex_lock(&pool_list_lock);
+ pool = list_first_entry(&pool_list, typeof(*pool), list);
+
+ spin_lock(&pool->lock);
+ page = drm_page_pool_remove(pool);
+ spin_unlock(&pool->lock);
+
+ if (page)
+ nr_freed = drm_page_pool_free_pages(pool, page);
+
+ list_move_tail(&pool->list, &pool_list);
Better to move this up, directly after the list_first_entry().
+ mutex_unlock(&pool_list_lock);
+
+ return nr_freed;
+}
+
+static unsigned long drm_page_pool_shrink_count(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ unsigned long count = atomic_long_read(&total_pages);
+
+ return count ? count : SHRINK_EMPTY;
+}
+
+static unsigned long drm_page_pool_shrink_scan(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ int to_scan = sc->nr_to_scan;
+ int nr_total = 0;
+
+ if (to_scan == 0)
+ return 0;
+
+ do {
+ int nr_freed = drm_page_pool_shrink_one();
+
+ to_scan -= nr_freed;
+ nr_total += nr_freed;
+ } while (to_scan >= 0 && atomic_long_read(&total_pages));
+
+ return nr_total;
+}
+
+static struct shrinker pool_shrinker = {
+ .count_objects = drm_page_pool_shrink_count,
+ .scan_objects = drm_page_pool_shrink_scan,
+ .seeks = 1,
+ .batch = 0,
+};
+
+int drm_page_pool_init_shrinker(void)
+{
+ return register_shrinker(&pool_shrinker);
+}
+module_init(drm_page_pool_init_shrinker);
+MODULE_LICENSE("GPL v2");
diff --git a/include/drm/page_pool.h b/include/drm/page_pool.h
new file mode 100644
index 000000000000..47e240b2bc69
--- /dev/null
+++ b/include/drm/page_pool.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _DRM_PAGE_POOL_H_
+#define _DRM_PAGE_POOL_H_
+
+#include <linux/mmzone.h>
+#include <linux/llist.h>
+#include <linux/spinlock.h>
+
+struct drm_page_pool {
+ int count;
+ struct list_head items;
+
+ int order;
+ int (*free)(struct page *p, unsigned int order);
+
+ spinlock_t lock;
+ struct list_head list;
+};
+
+void drm_page_pool_set_max(unsigned long max);
+unsigned long drm_page_pool_get_max(void);
+unsigned long drm_page_pool_get_total(void);
+int drm_page_pool_get_size(struct drm_page_pool *pool);
+struct page *drm_page_pool_fetch(struct drm_page_pool *pool);
+void drm_page_pool_add(struct drm_page_pool *pool, struct page *page);
+struct drm_page_pool *drm_page_pool_create(unsigned int order,
+ int (*free_page)(struct page *p, unsigned int order));
+void drm_page_pool_destroy(struct drm_page_pool *pool);
+
+#endif
_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/dri-devel