Am 05.03.21 um 00:20 schrieb John Stultz:
This patch provides infrastructure for deferring buffer frees. This is a feature ION provided which when used with some form of a page pool, provides a nice performance boost in an allocation microbenchmark. The reason it helps is it allows the page-zeroing to be done out of the normal allocation/free path, and pushed off to a kthread.
In general that's a nice idea, but to be honest this implementation looks broken and rather inefficient.
You should probably rather integrate that into the DRM pool core functionality which is currently clearing all freed pages anyway.
I would also use a work item per pool instead of a kthread, that would help with data locality.
Regards, Christian.
As not all heaps will find this useful, its implemented as a optional helper library that heaps can utilize. Cc: Daniel Vetter <daniel@xxxxxxxx> Cc: Christian Koenig <christian.koenig@xxxxxxx> Cc: Sumit Semwal <sumit.semwal@xxxxxxxxxx> Cc: Liam Mark <lmark@xxxxxxxxxxxxxx> Cc: Chris Goldsworthy <cgoldswo@xxxxxxxxxxxxxx> Cc: Laura Abbott <labbott@xxxxxxxxxx> Cc: Brian Starkey <Brian.Starkey@xxxxxxx> Cc: Hridya Valsaraju <hridya@xxxxxxxxxx> Cc: Suren Baghdasaryan <surenb@xxxxxxxxxx> Cc: Sandeep Patil <sspatil@xxxxxxxxxx> Cc: Daniel Mentz <danielmentz@xxxxxxxxxx> Cc: Ørjan Eide <orjan.eide@xxxxxxx> Cc: Robin Murphy <robin.murphy@xxxxxxx> Cc: Ezequiel Garcia <ezequiel@xxxxxxxxxxxxx> Cc: Simon Ser <contact@xxxxxxxxxxx> Cc: James Jones <jajones@xxxxxxxxxx> Cc: linux-media@xxxxxxxxxxxxxxx Cc: dri-devel@xxxxxxxxxxxxxxxxxxxxx Signed-off-by: John Stultz <john.stultz@xxxxxxxxxx> --- v2: * Fix sleep in atomic issue from using a mutex, by switching to a spinlock as Reported-by: kernel test robot <oliver.sang@xxxxxxxxx> * Cleanup API to use a reason enum for clarity and add some documentation comments as suggested by Suren Baghdasaryan. v3: * Minor tweaks so it can be built as a module * A few small fixups suggested by Daniel Mentz v4: * Tweak from Daniel Mentz to make sure the shrinker count/freed values are tracked in pages not bytes v5: * Fix up page count tracking as suggested by Suren Baghdasaryan v7: * Rework accounting to use nr_pages rather then size, as suggested by Suren Baghdasaryan --- drivers/dma-buf/heaps/Kconfig | 3 + drivers/dma-buf/heaps/Makefile | 1 + drivers/dma-buf/heaps/deferred-free-helper.c | 138 +++++++++++++++++++ drivers/dma-buf/heaps/deferred-free-helper.h | 55 ++++++++ 4 files changed, 197 insertions(+) create mode 100644 drivers/dma-buf/heaps/deferred-free-helper.c create mode 100644 drivers/dma-buf/heaps/deferred-free-helper.h diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index a5eef06c4226..f7aef8bc7119 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -1,3 +1,6 @@ +config DMABUF_HEAPS_DEFERRED_FREE + tristate + config DMABUF_HEAPS_SYSTEM bool "DMA-BUF System Heap" depends on DMABUF_HEAPS diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index 974467791032..4e7839875615 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_DMABUF_HEAPS_DEFERRED_FREE) += deferred-free-helper.o obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o diff --git a/drivers/dma-buf/heaps/deferred-free-helper.c b/drivers/dma-buf/heaps/deferred-free-helper.c new file mode 100644 index 000000000000..e19c8b68dfeb --- /dev/null +++ b/drivers/dma-buf/heaps/deferred-free-helper.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Deferred dmabuf freeing helper + * + * Copyright (C) 2020 Linaro, Ltd. + * + * Based on the ION page pool code + * Copyright (C) 2011 Google, Inc. + */ + +#include <linux/freezer.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/swap.h> +#include <linux/sched/signal.h> + +#include "deferred-free-helper.h" + +static LIST_HEAD(free_list); +static size_t list_nr_pages; +wait_queue_head_t freelist_waitqueue; +struct task_struct *freelist_task; +static DEFINE_SPINLOCK(free_list_lock); + +void deferred_free(struct deferred_freelist_item *item, + void (*free)(struct deferred_freelist_item*, + enum df_reason), + size_t nr_pages) +{ + unsigned long flags; + + INIT_LIST_HEAD(&item->list); + item->nr_pages = nr_pages; + item->free = free; + + spin_lock_irqsave(&free_list_lock, flags); + list_add(&item->list, &free_list); + list_nr_pages += nr_pages; + spin_unlock_irqrestore(&free_list_lock, flags); + wake_up(&freelist_waitqueue); +} +EXPORT_SYMBOL_GPL(deferred_free); + +static size_t free_one_item(enum df_reason reason) +{ + unsigned long flags; + size_t nr_pages; + struct deferred_freelist_item *item; + + spin_lock_irqsave(&free_list_lock, flags); + if (list_empty(&free_list)) { + spin_unlock_irqrestore(&free_list_lock, flags); + return 0; + } + item = list_first_entry(&free_list, struct deferred_freelist_item, list); + list_del(&item->list); + nr_pages = item->nr_pages; + list_nr_pages -= nr_pages; + spin_unlock_irqrestore(&free_list_lock, flags); + + item->free(item, reason); + return nr_pages; +} + +static unsigned long get_freelist_nr_pages(void) +{ + unsigned long nr_pages; + unsigned long flags; + + spin_lock_irqsave(&free_list_lock, flags); + nr_pages = list_nr_pages; + spin_unlock_irqrestore(&free_list_lock, flags); + return nr_pages; +} + +static unsigned long freelist_shrink_count(struct shrinker *shrinker, + struct shrink_control *sc) +{ + return get_freelist_nr_pages(); +} + +static unsigned long freelist_shrink_scan(struct shrinker *shrinker, + struct shrink_control *sc) +{ + unsigned long total_freed = 0; + + if (sc->nr_to_scan == 0) + return 0; + + while (total_freed < sc->nr_to_scan) { + size_t pages_freed = free_one_item(DF_UNDER_PRESSURE); + + if (!pages_freed) + break; + + total_freed += pages_freed; + } + + return total_freed; +} + +static struct shrinker freelist_shrinker = { + .count_objects = freelist_shrink_count, + .scan_objects = freelist_shrink_scan, + .seeks = DEFAULT_SEEKS, + .batch = 0, +}; + +static int deferred_free_thread(void *data) +{ + while (true) { + wait_event_freezable(freelist_waitqueue, + get_freelist_nr_pages() > 0); + + free_one_item(DF_NORMAL); + } + + return 0; +} + +static int deferred_freelist_init(void) +{ + list_nr_pages = 0; + + init_waitqueue_head(&freelist_waitqueue); + freelist_task = kthread_run(deferred_free_thread, NULL, + "%s", "dmabuf-deferred-free-worker"); + if (IS_ERR(freelist_task)) { + pr_err("Creating thread for deferred free failed\n"); + return -1; + } + sched_set_normal(freelist_task, 19); + + return register_shrinker(&freelist_shrinker); +} +module_init(deferred_freelist_init); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/dma-buf/heaps/deferred-free-helper.h b/drivers/dma-buf/heaps/deferred-free-helper.h new file mode 100644 index 000000000000..11940328ce3f --- /dev/null +++ b/drivers/dma-buf/heaps/deferred-free-helper.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef DEFERRED_FREE_HELPER_H +#define DEFERRED_FREE_HELPER_H + +/** + * df_reason - enum for reason why item was freed + * + * This provides a reason for why the free function was called + * on the item. This is useful when deferred_free is used in + * combination with a pagepool, so under pressure the page can + * be immediately freed. + * + * DF_NORMAL: Normal deferred free + * + * DF_UNDER_PRESSURE: Free was called because the system + * is under memory pressure. Usually + * from a shrinker. Avoid allocating + * memory in the free call, as it may + * fail. + */ +enum df_reason { + DF_NORMAL, + DF_UNDER_PRESSURE, +}; + +/** + * deferred_freelist_item - item structure for deferred freelist + * + * This is to be added to the structure for whatever you want to + * defer freeing on. + * + * @nr_pages: number of pages used by item to be freed + * @free: function pointer to be called when freeing the item + * @list: list entry for the deferred list + */ +struct deferred_freelist_item { + size_t nr_pages; + void (*free)(struct deferred_freelist_item *i, + enum df_reason reason); + struct list_head list; +}; + +/** + * deferred_free - call to add item to the deferred free list + * + * @item: Pointer to deferred_freelist_item field of a structure + * @free: Function pointer to the free call + * @nr_pages: number of pages to be freed + */ +void deferred_free(struct deferred_freelist_item *item, + void (*free)(struct deferred_freelist_item *i, + enum df_reason reason), + size_t nr_pages); +#endif