+ dmapool-rename-fields-in-dma_page.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: mm/dmapool.c: rename fields in struct dma_page
has been added to the -mm tree.  Its filename is
     dmapool-rename-fields-in-dma_page.patch

This patch should soon appear at
    http://ozlabs.org/~akpm/mmots/broken-out/dmapool-rename-fields-in-dma_page.patch
and later at
    http://ozlabs.org/~akpm/mmotm/broken-out/dmapool-rename-fields-in-dma_page.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Tony Battersby <tonyb@xxxxxxxxxxxxxxx>
Subject: mm/dmapool.c: rename fields in struct dma_page

Rename fields in 'struct dma_page' in preparation for moving them into
'struct page'.  No functional changes.

in_use -> dma_in_use
offset -> dma_free_off

Link: http://lkml.kernel.org/r/4ac76051-74fc-0a70-4d17-7618823d24c3@xxxxxxxxxxxxxxx
Signed-off-by: Tony Battersby <tonyb@xxxxxxxxxxxxxxx>
Acked-by: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Cc: Andy Shevchenko <andy.shevchenko@xxxxxxxxx>
Cc: Christoph Hellwig <hch@xxxxxx>
Cc: John Garry <john.garry@xxxxxxxxxx>
Cc: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 mm/dmapool.c |   34 +++++++++++++++++-----------------
 1 file changed, 17 insertions(+), 17 deletions(-)

--- a/mm/dmapool.c~dmapool-rename-fields-in-dma_page
+++ a/mm/dmapool.c
@@ -65,8 +65,8 @@ struct dma_page {		/* cacheable header f
 	struct list_head dma_list;
 	void *vaddr;
 	dma_addr_t dma;
-	unsigned int in_use;
-	unsigned int offset;
+	unsigned int dma_in_use;
+	unsigned int dma_free_off;
 };
 
 static DEFINE_MUTEX(pools_lock);
@@ -101,7 +101,7 @@ show_pools(struct device *dev, struct de
 					    &pool->page_list[list_idx],
 					    dma_list) {
 				pages++;
-				blocks += page->in_use;
+				blocks += page->dma_in_use;
 			}
 		}
 		spin_unlock_irq(&pool->lock);
@@ -248,8 +248,8 @@ static struct dma_page *pool_alloc_page(
 		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 #endif
 		pool_initialise_page(pool, page);
-		page->in_use = 0;
-		page->offset = 0;
+		page->dma_in_use = 0;
+		page->dma_free_off = 0;
 	} else {
 		kfree(page);
 		page = NULL;
@@ -259,7 +259,7 @@ static struct dma_page *pool_alloc_page(
 
 static inline bool is_page_busy(struct dma_page *page)
 {
-	return page->in_use != 0;
+	return page->dma_in_use != 0;
 }
 
 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
@@ -362,10 +362,10 @@ void *dma_pool_alloc(struct dma_pool *po
 
 	list_add(&page->dma_list, &pool->page_list[POOL_AVAIL_IDX]);
  ready:
-	page->in_use++;
-	offset = page->offset;
-	page->offset = *(int *)(page->vaddr + offset);
-	if (page->offset >= pool->allocation)
+	page->dma_in_use++;
+	offset = page->dma_free_off;
+	page->dma_free_off = *(int *)(page->vaddr + offset);
+	if (page->dma_free_off >= pool->allocation)
 		/* Move page from the "available" list to the "full" list. */
 		list_move_tail(&page->dma_list,
 			       &pool->page_list[POOL_FULL_IDX]);
@@ -375,8 +375,8 @@ void *dma_pool_alloc(struct dma_pool *po
 	{
 		int i;
 		u8 *data = retval;
-		/* page->offset is stored in first 4 bytes */
-		for (i = sizeof(page->offset); i < pool->size; i++) {
+		/* page->dma_free_off is stored in first 4 bytes */
+		for (i = sizeof(page->dma_free_off); i < pool->size; i++) {
 			if (data[i] == POOL_POISON_FREED)
 				continue;
 			dev_err(pool->dev,
@@ -458,7 +458,7 @@ void dma_pool_free(struct dma_pool *pool
 		return;
 	}
 	{
-		unsigned int chain = page->offset;
+		unsigned int chain = page->dma_free_off;
 		while (chain < pool->allocation) {
 			if (chain != offset) {
 				chain = *(int *)(page->vaddr + chain);
@@ -474,12 +474,12 @@ void dma_pool_free(struct dma_pool *pool
 	memset(vaddr, POOL_POISON_FREED, pool->size);
 #endif
 
-	page->in_use--;
-	if (page->offset >= pool->allocation)
+	page->dma_in_use--;
+	if (page->dma_free_off >= pool->allocation)
 		/* Move page from the "full" list to the "available" list. */
 		list_move(&page->dma_list, &pool->page_list[POOL_AVAIL_IDX]);
-	*(int *)vaddr = page->offset;
-	page->offset = offset;
+	*(int *)vaddr = page->dma_free_off;
+	page->dma_free_off = offset;
 	/*
 	 * Resist a temptation to do
 	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
_

Patches currently in -mm which might be from tonyb@xxxxxxxxxxxxxxx are

dmapool-fix-boundary-comparison.patch
dmapool-remove-checks-for-dev-==-null.patch
dmapool-cleanup-dma_pool_destroy.patch
dmapool-improve-scalability-of-dma_pool_alloc.patch
dmapool-rename-fields-in-dma_page.patch
dmapool-improve-scalability-of-dma_pool_free.patch
dmapool-cleanup-integer-types.patch
dmapool-improve-accuracy-of-debug-statistics.patch
dmapool-debug-prevent-endless-loop-in-case-of-corruption.patch




[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux