[merged mm-stable] dmapool-create-destroy-cleanup.patch removed from -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The quilt patch titled
     Subject: dmapool: create/destroy cleanup
has been removed from the -mm tree.  Its filename was
     dmapool-create-destroy-cleanup.patch

This patch was dropped because it was merged into the mm-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

------------------------------------------------------
From: Keith Busch <kbusch@xxxxxxxxxx>
Subject: dmapool: create/destroy cleanup
Date: Thu, 26 Jan 2023 13:51:25 -0800

Set the 'empty' bool directly from the result of the function that
determines its value instead of adding additional logic.

Link: https://lkml.kernel.org/r/20230126215125.4069751-13-kbusch@xxxxxxxx
Signed-off-by: Keith Busch <kbusch@xxxxxxxxxx>
Reviewed-by: Christoph Hellwig <hch@xxxxxx>
Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Cc: Tony Battersby <tonyb@xxxxxxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---


--- a/mm/dmapool.c~dmapool-create-destroy-cleanup
+++ a/mm/dmapool.c
@@ -15,7 +15,7 @@
  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
  * allocated pages.  Each page in the page_list is split into blocks of at
  * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
- * list of free blocks across all pages.  Used blocks aren't tracked, but we
+ * list of free blocks within the page.  Used blocks aren't tracked, but we
  * keep a count of how many are currently allocated from each page.
  */
 
@@ -40,22 +40,13 @@
 #define DMAPOOL_DEBUG 1
 #endif
 
-struct dma_block {
-	struct dma_block *next_block;
-	dma_addr_t dma;
-};
-
 struct dma_pool {		/* the pool */
 	struct list_head page_list;
 	spinlock_t lock;
-	struct dma_block *next_block;
-	size_t nr_blocks;
-	size_t nr_active;
-	size_t nr_pages;
+	size_t size;
 	struct device *dev;
-	unsigned int size;
-	unsigned int allocation;
-	unsigned int boundary;
+	size_t allocation;
+	size_t boundary;
 	char name[32];
 	struct list_head pools;
 };
@@ -64,6 +55,8 @@ struct dma_page {		/* cacheable header f
 	struct list_head page_list;
 	void *vaddr;
 	dma_addr_t dma;
+	unsigned int in_use;
+	unsigned int offset;
 };
 
 static DEFINE_MUTEX(pools_lock);
@@ -71,132 +64,45 @@ static DEFINE_MUTEX(pools_reg_lock);
 
 static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	struct dma_pool *pool;
+	unsigned temp;
 	unsigned size;
-
-	size = sysfs_emit(buf, "poolinfo - 0.1\n");
-
-	mutex_lock(&pools_lock);
-	list_for_each_entry(pool, &dev->dma_pools, pools) {
-		/* per-pool info, no real statistics yet */
-		size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2zu\n",
-				      pool->name, pool->nr_active,
-				      pool->nr_blocks, pool->size,
-				      pool->nr_pages);
-	}
-	mutex_unlock(&pools_lock);
-
-	return size;
-}
-
-static DEVICE_ATTR_RO(pools);
-
-#ifdef DMAPOOL_DEBUG
-static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
-			     gfp_t mem_flags)
-{
-	u8 *data = (void *)block;
-	int i;
-
-	for (i = sizeof(struct dma_block); i < pool->size; i++) {
-		if (data[i] == POOL_POISON_FREED)
-			continue;
-		dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__,
-			pool->name, block);
-
-		/*
-		 * Dump the first 4 bytes even if they are not
-		 * POOL_POISON_FREED
-		 */
-		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
-				data, pool->size, 1);
-		break;
-	}
-
-	if (!want_init_on_alloc(mem_flags))
-		memset(block, POOL_POISON_ALLOCATED, pool->size);
-}
-
-static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
-{
+	char *next;
 	struct dma_page *page;
+	struct dma_pool *pool;
 
-	list_for_each_entry(page, &pool->page_list, page_list) {
-		if (dma < page->dma)
-			continue;
-		if ((dma - page->dma) < pool->allocation)
-			return page;
-	}
-	return NULL;
-}
+	next = buf;
+	size = PAGE_SIZE;
 
-static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
-{
-	struct dma_block *block = pool->next_block;
-	struct dma_page *page;
+	temp = scnprintf(next, size, "poolinfo - 0.1\n");
+	size -= temp;
+	next += temp;
 
-	page = pool_find_page(pool, dma);
-	if (!page) {
-		dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
-			__func__, pool->name, vaddr, &dma);
-		return true;
-	}
+	mutex_lock(&pools_lock);
+	list_for_each_entry(pool, &dev->dma_pools, pools) {
+		unsigned pages = 0;
+		unsigned blocks = 0;
 
-	while (block) {
-		if (block != vaddr) {
-			block = block->next_block;
-			continue;
+		spin_lock_irq(&pool->lock);
+		list_for_each_entry(page, &pool->page_list, page_list) {
+			pages++;
+			blocks += page->in_use;
 		}
-		dev_err(pool->dev, "%s %s, dma %pad already free\n",
-			__func__, pool->name, &dma);
-		return true;
-	}
-
-	memset(vaddr, POOL_POISON_FREED, pool->size);
-	return false;
-}
-
-static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
-{
-	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
-}
-#else
-static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
-			     gfp_t mem_flags)
-{
-}
-
-static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
-{
-	if (want_init_on_free())
-		memset(vaddr, 0, pool->size);
-	return false;
-}
-
-static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
-{
-}
-#endif
-
-static struct dma_block *pool_block_pop(struct dma_pool *pool)
-{
-	struct dma_block *block = pool->next_block;
+		spin_unlock_irq(&pool->lock);
 
-	if (block) {
-		pool->next_block = block->next_block;
-		pool->nr_active++;
+		/* per-pool info, no real statistics yet */
+		temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
+				 pool->name, blocks,
+				 pages * (pool->allocation / pool->size),
+				 pool->size, pages);
+		size -= temp;
+		next += temp;
 	}
-	return block;
-}
+	mutex_unlock(&pools_lock);
 
-static void pool_block_push(struct dma_pool *pool, struct dma_block *block,
-			    dma_addr_t dma)
-{
-	block->dma = dma;
-	block->next_block = pool->next_block;
-	pool->next_block = block;
+	return PAGE_SIZE - size;
 }
 
+static DEVICE_ATTR_RO(pools);
 
 /**
  * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
@@ -228,18 +134,15 @@ struct dma_pool *dma_pool_create(const c
 	size_t allocation;
 	bool empty = false;
 
-	if (!dev)
-		return NULL;
-
 	if (align == 0)
 		align = 1;
 	else if (align & (align - 1))
 		return NULL;
 
-	if (size == 0 || size > INT_MAX)
+	if (size == 0)
 		return NULL;
-	if (size < sizeof(struct dma_block))
-		size = sizeof(struct dma_block);
+	else if (size < 4)
+		size = 4;
 
 	size = ALIGN(size, align);
 	allocation = max_t(size_t, size, PAGE_SIZE);
@@ -249,9 +152,7 @@ struct dma_pool *dma_pool_create(const c
 	else if ((boundary < size) || (boundary & (boundary - 1)))
 		return NULL;
 
-	boundary = min(boundary, allocation);
-
-	retval = kzalloc(sizeof(*retval), GFP_KERNEL);
+	retval = kmalloc(sizeof(*retval), GFP_KERNEL);
 	if (!retval)
 		return retval;
 
@@ -264,6 +165,7 @@ struct dma_pool *dma_pool_create(const c
 	retval->size = size;
 	retval->boundary = boundary;
 	retval->allocation = allocation;
+
 	INIT_LIST_HEAD(&retval->pools);
 
 	/*
@@ -300,36 +202,18 @@ EXPORT_SYMBOL(dma_pool_create);
 
 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
 {
-	unsigned int next_boundary = pool->boundary, offset = 0;
-	struct dma_block *block, *first = NULL, *last = NULL;
+	unsigned int offset = 0;
+	unsigned int next_boundary = pool->boundary;
 
-	pool_init_page(pool, page);
-	while (offset + pool->size <= pool->allocation) {
-		if (offset + pool->size > next_boundary) {
-			offset = next_boundary;
+	do {
+		unsigned int next = offset + pool->size;
+		if (unlikely((next + pool->size) >= next_boundary)) {
+			next = next_boundary;
 			next_boundary += pool->boundary;
-			continue;
 		}
-
-		block = page->vaddr + offset;
-		block->dma = page->dma + offset;
-		block->next_block = NULL;
-
-		if (last)
-			last->next_block = block;
-		else
-			first = block;
-		last = block;
-
-		offset += pool->size;
-		pool->nr_blocks++;
-	}
-
-	last->next_block = pool->next_block;
-	pool->next_block = first;
-
-	list_add(&page->page_list, &pool->page_list);
-	pool->nr_pages++;
+		*(int *)(page->vaddr + offset) = next;
+		offset = next;
+	} while (offset < pool->allocation);
 }
 
 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
@@ -339,17 +223,39 @@ static struct dma_page *pool_alloc_page(
 	page = kmalloc(sizeof(*page), mem_flags);
 	if (!page)
 		return NULL;
-
 	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
 					 &page->dma, mem_flags);
-	if (!page->vaddr) {
+	if (page->vaddr) {
+#ifdef	DMAPOOL_DEBUG
+		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
+#endif
+		pool_initialise_page(pool, page);
+		page->in_use = 0;
+		page->offset = 0;
+	} else {
 		kfree(page);
-		return NULL;
+		page = NULL;
 	}
-
 	return page;
 }
 
+static inline bool is_page_busy(struct dma_page *page)
+{
+	return page->in_use != 0;
+}
+
+static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
+{
+	dma_addr_t dma = page->dma;
+
+#ifdef	DMAPOOL_DEBUG
+	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
+#endif
+	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
+	list_del(&page->page_list);
+	kfree(page);
+}
+
 /**
  * dma_pool_destroy - destroys a pool of dma memory blocks.
  * @pool: dma pool that will be destroyed
@@ -361,7 +267,7 @@ static struct dma_page *pool_alloc_page(
 void dma_pool_destroy(struct dma_pool *pool)
 {
 	struct dma_page *page, *tmp;
-	bool empty = false, busy = false;
+	bool empty = false;
 
 	if (unlikely(!pool))
 		return;
@@ -369,24 +275,26 @@ void dma_pool_destroy(struct dma_pool *p
 	mutex_lock(&pools_reg_lock);
 	mutex_lock(&pools_lock);
 	list_del(&pool->pools);
-	if (list_empty(&pool->dev->dma_pools))
+	if (pool->dev && list_empty(&pool->dev->dma_pools))
 		empty = true;
 	mutex_unlock(&pools_lock);
 	if (empty)
 		device_remove_file(pool->dev, &dev_attr_pools);
 	mutex_unlock(&pools_reg_lock);
 
-	if (pool->nr_active) {
-		dev_err(pool->dev, "%s %s busy\n", __func__, pool->name);
-		busy = true;
-	}
-
 	list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
-		if (!busy)
-			dma_free_coherent(pool->dev, pool->allocation,
-					  page->vaddr, page->dma);
-		list_del(&page->page_list);
-		kfree(page);
+		if (is_page_busy(page)) {
+			if (pool->dev)
+				dev_err(pool->dev, "%s %s, %p busy\n", __func__,
+					pool->name, page->vaddr);
+			else
+				pr_err("%s %s, %p busy\n", __func__,
+				       pool->name, page->vaddr);
+			/* leak the still-in-use consistent memory */
+			list_del(&page->page_list);
+			kfree(page);
+		} else
+			pool_free_page(pool, page);
 	}
 
 	kfree(pool);
@@ -406,40 +314,84 @@ EXPORT_SYMBOL(dma_pool_destroy);
 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 		     dma_addr_t *handle)
 {
-	struct dma_block *block;
-	struct dma_page *page;
 	unsigned long flags;
+	struct dma_page *page;
+	size_t offset;
+	void *retval;
 
 	might_alloc(mem_flags);
 
 	spin_lock_irqsave(&pool->lock, flags);
-	block = pool_block_pop(pool);
-	if (!block) {
-		/*
-		 * pool_alloc_page() might sleep, so temporarily drop
-		 * &pool->lock
-		 */
-		spin_unlock_irqrestore(&pool->lock, flags);
+	list_for_each_entry(page, &pool->page_list, page_list) {
+		if (page->offset < pool->allocation)
+			goto ready;
+	}
 
-		page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
-		if (!page)
-			return NULL;
+	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
+	spin_unlock_irqrestore(&pool->lock, flags);
 
-		spin_lock_irqsave(&pool->lock, flags);
-		pool_initialise_page(pool, page);
-		block = pool_block_pop(pool);
+	page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
+	if (!page)
+		return NULL;
+
+	spin_lock_irqsave(&pool->lock, flags);
+
+	list_add(&page->page_list, &pool->page_list);
+ ready:
+	page->in_use++;
+	offset = page->offset;
+	page->offset = *(int *)(page->vaddr + offset);
+	retval = offset + page->vaddr;
+	*handle = offset + page->dma;
+#ifdef	DMAPOOL_DEBUG
+	{
+		int i;
+		u8 *data = retval;
+		/* page->offset is stored in first 4 bytes */
+		for (i = sizeof(page->offset); i < pool->size; i++) {
+			if (data[i] == POOL_POISON_FREED)
+				continue;
+			if (pool->dev)
+				dev_err(pool->dev, "%s %s, %p (corrupted)\n",
+					__func__, pool->name, retval);
+			else
+				pr_err("%s %s, %p (corrupted)\n",
+					__func__, pool->name, retval);
+
+			/*
+			 * Dump the first 4 bytes even if they are not
+			 * POOL_POISON_FREED
+			 */
+			print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
+					data, pool->size, 1);
+			break;
+		}
 	}
+	if (!(mem_flags & __GFP_ZERO))
+		memset(retval, POOL_POISON_ALLOCATED, pool->size);
+#endif
 	spin_unlock_irqrestore(&pool->lock, flags);
 
-	*handle = block->dma;
-	pool_check_block(pool, block, mem_flags);
 	if (want_init_on_alloc(mem_flags))
-		memset(block, 0, pool->size);
+		memset(retval, 0, pool->size);
 
-	return block;
+	return retval;
 }
 EXPORT_SYMBOL(dma_pool_alloc);
 
+static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
+{
+	struct dma_page *page;
+
+	list_for_each_entry(page, &pool->page_list, page_list) {
+		if (dma < page->dma)
+			continue;
+		if ((dma - page->dma) < pool->allocation)
+			return page;
+	}
+	return NULL;
+}
+
 /**
  * dma_pool_free - put block back into dma pool
  * @pool: the dma pool holding the block
@@ -451,14 +403,65 @@ EXPORT_SYMBOL(dma_pool_alloc);
  */
 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
 {
-	struct dma_block *block = vaddr;
+	struct dma_page *page;
 	unsigned long flags;
+	unsigned int offset;
 
 	spin_lock_irqsave(&pool->lock, flags);
-	if (!pool_block_err(pool, vaddr, dma)) {
-		pool_block_push(pool, block, dma);
-		pool->nr_active--;
+	page = pool_find_page(pool, dma);
+	if (!page) {
+		spin_unlock_irqrestore(&pool->lock, flags);
+		if (pool->dev)
+			dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
+				__func__, pool->name, vaddr, &dma);
+		else
+			pr_err("%s %s, %p/%pad (bad dma)\n",
+			       __func__, pool->name, vaddr, &dma);
+		return;
+	}
+
+	offset = vaddr - page->vaddr;
+	if (want_init_on_free())
+		memset(vaddr, 0, pool->size);
+#ifdef	DMAPOOL_DEBUG
+	if ((dma - page->dma) != offset) {
+		spin_unlock_irqrestore(&pool->lock, flags);
+		if (pool->dev)
+			dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
+				__func__, pool->name, vaddr, &dma);
+		else
+			pr_err("%s %s, %p (bad vaddr)/%pad\n",
+			       __func__, pool->name, vaddr, &dma);
+		return;
 	}
+	{
+		unsigned int chain = page->offset;
+		while (chain < pool->allocation) {
+			if (chain != offset) {
+				chain = *(int *)(page->vaddr + chain);
+				continue;
+			}
+			spin_unlock_irqrestore(&pool->lock, flags);
+			if (pool->dev)
+				dev_err(pool->dev, "%s %s, dma %pad already free\n",
+					__func__, pool->name, &dma);
+			else
+				pr_err("%s %s, dma %pad already free\n",
+				       __func__, pool->name, &dma);
+			return;
+		}
+	}
+	memset(vaddr, POOL_POISON_FREED, pool->size);
+#endif
+
+	page->in_use--;
+	*(int *)vaddr = page->offset;
+	page->offset = offset;
+	/*
+	 * Resist a temptation to do
+	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
+	 * Better have a few empty pages hang around.
+	 */
 	spin_unlock_irqrestore(&pool->lock, flags);
 }
 EXPORT_SYMBOL(dma_pool_free);
_

Patches currently in -mm which might be from kbusch@xxxxxxxxxx are





[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux