On Thu, Feb 12, 2015 at 05:26:47PM -0500, Sasha Levin wrote: > Provides a userspace interface to trigger a CMA allocation. > > Usage: > > echo [pages] > alloc > > This would provide testing/fuzzing access to the CMA allocation paths. > > Signed-off-by: Sasha Levin <sasha.levin@xxxxxxxxxx> > --- > mm/cma.c | 6 ++++++ > mm/cma.h | 4 ++++ > mm/cma_debug.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-- > 3 files changed, 64 insertions(+), 2 deletions(-) > > diff --git a/mm/cma.c b/mm/cma.c > index e093b53..9e3d44a 100644 > --- a/mm/cma.c > +++ b/mm/cma.c > @@ -121,6 +121,12 @@ static int __init cma_activate_area(struct cma *cma) > } while (--i); > > mutex_init(&cma->lock); > + > +#ifdef CONFIG_CMA_DEBUGFS > + INIT_HLIST_HEAD(&cma->mem_head); > + spin_lock_init(&cma->mem_head_lock); > +#endif > + > return 0; > > err: > diff --git a/mm/cma.h b/mm/cma.h > index 4141887..1132d73 100644 > --- a/mm/cma.h > +++ b/mm/cma.h > @@ -7,6 +7,10 @@ struct cma { > unsigned long *bitmap; > unsigned int order_per_bit; /* Order of pages represented by one bit */ > struct mutex lock; > +#ifdef CONFIG_CMA_DEBUGFS > + struct hlist_head mem_head; > + spinlock_t mem_head_lock; > +#endif > }; > > extern struct cma cma_areas[MAX_CMA_AREAS]; > diff --git a/mm/cma_debug.c b/mm/cma_debug.c > index 3a25413..5bd6863 100644 > --- a/mm/cma_debug.c > +++ b/mm/cma_debug.c > @@ -7,9 +7,18 @@ > > #include <linux/debugfs.h> > #include <linux/cma.h> > +#include <linux/list.h> > +#include <linux/kernel.h> > +#include <linux/slab.h> > > #include "cma.h" > > +struct cma_mem { > + struct hlist_node node; > + struct page *p; > + unsigned long n; > +}; > + > static struct dentry *cma_debugfs_root; > > static int cma_debugfs_get(void *data, u64 *val) > @@ -23,8 +32,48 @@ static int cma_debugfs_get(void *data, u64 *val) > > DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n"); > > -static void cma_debugfs_add_one(struct cma *cma, int idx) > +static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem) > +{ > + spin_lock(&cma->mem_head_lock); > + hlist_add_head(&mem->node, &cma->mem_head); > + spin_unlock(&cma->mem_head_lock); > +} > + > +static int cma_alloc_mem(struct cma *cma, int count) > +{ > + struct cma_mem *mem; > + struct page *p; > + > + mem = kzalloc(sizeof(*mem), GFP_KERNEL); > + if (!mem) > + return -ENOMEM; > + > + p = cma_alloc(cma, count, CONFIG_CMA_ALIGNMENT); Alignment is resurrected. Please change it to 0. Other than that, Acked-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Thanks. -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>