To represent the size of a single allocation, dmapool currently uses 'unsigned int' in some places and 'size_t' in other places. Standardize on 'unsigned int' to reduce overhead, but use 'size_t' when counting all the blocks in the entire pool. Signed-off-by: Tony Battersby <tonyb@xxxxxxxxxxxxxxx> --- Changes since v5: moved 'size' down in struct dma_pool to be near the other 32-bit ints. blks_per_alloc will fill out the hole in a later patch. This puts an upper bound on 'size' of INT_MAX to avoid overflowing the following comparison in pool_initialise_page(): unsigned int offset = 0; unsigned int next = offset + pool->size; if (unlikely((next + pool->size) > ... 'boundary' is passed in as a size_t but gets stored as an unsigned int. 'boundary' values >= 'allocation' do not have any effect, so clipping 'boundary' to 'allocation' keeps it within the range of unsigned int without affecting anything else. A few lines above (not in the diff) you can see that if 'boundary' is passed in as 0 then it is set to 'allocation', so it is nothing new. For reference, here is the relevant code after being patched: if (!boundary) boundary = allocation; else if ((boundary < size) || (boundary & (boundary - 1))) return NULL; boundary = min(boundary, allocation); mm/dmapool.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index 1829291f5d70..f85d6bde2205 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -43,10 +43,10 @@ struct dma_pool { /* the pool */ struct list_head page_list; spinlock_t lock; - size_t size; struct device *dev; - size_t allocation; - size_t boundary; + unsigned int size; + unsigned int allocation; + unsigned int boundary; char name[32]; struct list_head pools; }; @@ -73,7 +73,7 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha mutex_lock(&pools_lock); list_for_each_entry(pool, &dev->dma_pools, pools) { unsigned pages = 0; - unsigned blocks = 0; + size_t blocks = 0; spin_lock_irq(&pool->lock); list_for_each_entry(page, &pool->page_list, page_list) { @@ -83,9 +83,10 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha spin_unlock_irq(&pool->lock); /* per-pool info, no real statistics yet */ - size += sysfs_emit_at(buf, size, "%-16s %4u %4zu %4zu %2u\n", + size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2u\n", pool->name, blocks, - pages * (pool->allocation / pool->size), + (size_t) pages * + (pool->allocation / pool->size), pool->size, pages); } mutex_unlock(&pools_lock); @@ -130,7 +131,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, else if (align & (align - 1)) return NULL; - if (size == 0) + if (size == 0 || size > INT_MAX) return NULL; else if (size < 4) size = 4; @@ -143,6 +144,8 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, else if ((boundary < size) || (boundary & (boundary - 1))) return NULL; + boundary = min(boundary, allocation); + retval = kmalloc(sizeof(*retval), GFP_KERNEL); if (!retval) return retval; @@ -303,7 +306,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, { unsigned long flags; struct dma_page *page; - size_t offset; + unsigned int offset; void *retval; might_alloc(mem_flags); -- 2.25.1