In the following patches, total reserved page count is needed to initialize ZONE_CMA. This is the preparation step for that. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> --- include/linux/cma.h | 9 +++++++++ mm/cma.c | 17 +++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/include/linux/cma.h b/include/linux/cma.h index a93438b..aeaea90 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -15,6 +15,9 @@ struct cma; +#ifdef CONFIG_CMA +extern unsigned long cma_total_pages(unsigned long node_start_pfn, + unsigned long node_end_pfn); extern phys_addr_t cma_get_base(struct cma *cma); extern unsigned long cma_get_size(struct cma *cma); @@ -27,4 +30,10 @@ extern int cma_init_reserved_mem(phys_addr_t base, struct cma **res_cma); extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align); extern bool cma_release(struct cma *cma, struct page *pages, int count); + +#else +static inline unsigned long cma_total_pages(unsigned long node_start_pfn, + unsigned long node_end_pfn) { return 0; } + +#endif /* CONFIG_CMA */ #endif diff --git a/mm/cma.c b/mm/cma.c index c35ceef..f817b91 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -40,6 +40,23 @@ struct cma cma_areas[MAX_CMA_AREAS]; unsigned cma_area_count; static DEFINE_MUTEX(cma_mutex); +unsigned long cma_total_pages(unsigned long node_start_pfn, + unsigned long node_end_pfn) +{ + int i; + unsigned long total_pages = 0; + + for (i = 0; i < cma_area_count; i++) { + struct cma *cma = &cma_areas[i]; + + if (node_start_pfn <= cma->base_pfn && + cma->base_pfn < node_end_pfn) + total_pages += cma->count; + } + + return total_pages; +} + phys_addr_t cma_get_base(struct cma *cma) { return PFN_PHYS(cma->base_pfn); -- 1.7.9.5 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>