[PATCH] mm/cma: convert cma_alloc() to return folio

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Change cma_alloc() to return struct folio. This further increases the
usage of folios in mm/hugetlb.

Signed-off-by: Jianfeng Wang <jianfeng.w.wang@xxxxxxxxxx>
---
 arch/powerpc/kvm/book3s_hv_builtin.c |  6 ++++--
 drivers/dma-buf/heaps/cma_heap.c     | 18 +++++++++---------
 drivers/s390/char/vmcp.c             |  8 ++++----
 include/linux/cma.h                  |  3 ++-
 kernel/dma/contiguous.c              |  8 ++++++--
 mm/cma.c                             |  4 ++--
 mm/cma_debug.c                       |  6 +++---
 mm/hugetlb.c                         | 13 +++++++------
 8 files changed, 37 insertions(+), 29 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index fa0e3a22cac0..f6cd8a55bb1d 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -57,10 +57,12 @@ early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
 
 struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
 {
+	struct folio *folio;
 	VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 
-	return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
-			 false);
+	folio = cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
+			false);
+	return folio ? &folio->page : NULL;
 }
 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
 
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 4a63567e93ba..526f7de50759 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -283,7 +283,7 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
 	size_t size = PAGE_ALIGN(len);
 	pgoff_t pagecount = size >> PAGE_SHIFT;
 	unsigned long align = get_order(size);
-	struct page *cma_pages;
+	struct folio *cma_folios;
 	struct dma_buf *dmabuf;
 	int ret = -ENOMEM;
 	pgoff_t pg;
@@ -299,14 +299,14 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
 	if (align > CONFIG_CMA_ALIGNMENT)
 		align = CONFIG_CMA_ALIGNMENT;
 
-	cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
-	if (!cma_pages)
+	cma_folios = cma_alloc(cma_heap->cma, pagecount, align, false);
+	if (!cma_folios)
 		goto free_buffer;
 
 	/* Clear the cma pages */
-	if (PageHighMem(cma_pages)) {
+	if (folio_test_highmem(cma_folios)) {
 		unsigned long nr_clear_pages = pagecount;
-		struct page *page = cma_pages;
+		struct page *page = &cma_folios->page;
 
 		while (nr_clear_pages > 0) {
 			void *vaddr = kmap_atomic(page);
@@ -323,7 +323,7 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
 			nr_clear_pages--;
 		}
 	} else {
-		memset(page_address(cma_pages), 0, size);
+		memset(folio_address(cma_folios), 0, size);
 	}
 
 	buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
@@ -333,9 +333,9 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
 	}
 
 	for (pg = 0; pg < pagecount; pg++)
-		buffer->pages[pg] = &cma_pages[pg];
+		buffer->pages[pg] = &cma_folios[pg].page;
 
-	buffer->cma_pages = cma_pages;
+	buffer->cma_pages = &cma_folios->page;
 	buffer->heap = cma_heap;
 	buffer->pagecount = pagecount;
 
@@ -355,7 +355,7 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
 free_pages:
 	kfree(buffer->pages);
 free_cma:
-	cma_release(cma_heap->cma, cma_pages, pagecount);
+	cma_release(cma_heap->cma, &cma_folios->page, pagecount);
 free_buffer:
 	kfree(buffer);
 
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index eb0520a9d4af..b23147e4dba4 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -59,7 +59,7 @@ void __init vmcp_cma_reserve(void)
 
 static void vmcp_response_alloc(struct vmcp_session *session)
 {
-	struct page *page = NULL;
+	struct folio *folio = NULL;
 	int nr_pages, order;
 
 	order = get_order(session->bufsize);
@@ -70,9 +70,9 @@ static void vmcp_response_alloc(struct vmcp_session *session)
 	 * anymore the system won't work anyway.
 	 */
 	if (order > 2)
-		page = cma_alloc(vmcp_cma, nr_pages, 0, false);
-	if (page) {
-		session->response = (char *)page_to_virt(page);
+		folio = cma_alloc(vmcp_cma, nr_pages, 0, false);
+	if (folio) {
+		session->response = (char *)page_to_virt(&folio->page);
 		session->cma_alloc = 1;
 		return;
 	}
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 63873b93deaa..4c6234787fd8 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -25,6 +25,7 @@
 #define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)
 
 struct cma;
+struct page;
 
 extern unsigned long totalcma_pages;
 extern phys_addr_t cma_get_base(const struct cma *cma);
@@ -48,7 +49,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 					unsigned int order_per_bit,
 					const char *name,
 					struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
+extern struct folio *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
 			      bool no_warn);
 extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
 extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index f005c66f378c..af00d96fba7b 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -310,10 +310,12 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
 				       unsigned int align, bool no_warn)
 {
+	struct folio *folio;
 	if (align > CONFIG_CMA_ALIGNMENT)
 		align = CONFIG_CMA_ALIGNMENT;
 
-	return cma_alloc(dev_get_cma_area(dev), count, align, no_warn);
+	folio = cma_alloc(dev_get_cma_area(dev), count, align, no_warn);
+	return folio ? &folio->page : NULL;
 }
 
 /**
@@ -334,9 +336,11 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
 
 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
 {
+	struct folio *folio;
 	unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT);
 
-	return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN);
+	folio = cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN);
+	return folio ? &folio->page : NULL;
 }
 
 /**
diff --git a/mm/cma.c b/mm/cma.c
index 7c09c47e530b..44db112b8aa5 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -426,7 +426,7 @@ static inline void cma_debug_show_areas(struct cma *cma) { }
  * This function allocates part of contiguous memory on specific
  * contiguous memory area.
  */
-struct page *cma_alloc(struct cma *cma, unsigned long count,
+struct folio *cma_alloc(struct cma *cma, unsigned long count,
 		       unsigned int align, bool no_warn)
 {
 	unsigned long mask, offset;
@@ -525,7 +525,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
 			cma_sysfs_account_fail_pages(cma, count);
 	}
 
-	return page;
+	return page ? page_folio(page) : NULL;
 }
 
 bool cma_pages_valid(struct cma *cma, const struct page *pages,
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index 602fff89b15f..703a6b93d964 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -131,14 +131,14 @@ DEFINE_DEBUGFS_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
 static int cma_alloc_mem(struct cma *cma, int count)
 {
 	struct cma_mem *mem;
-	struct page *p;
+	struct folio *folio;
 
 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
 	if (!mem)
 		return -ENOMEM;
 
-	p = cma_alloc(cma, count, 0, false);
-	if (!p) {
+	folio = cma_alloc(cma, count, 0, false);
+	if (!folio) {
 		kfree(mem);
 		return -ENOMEM;
 	}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ed1581b670d4..22a3741e6e04 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1571,13 +1571,14 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
 
 #ifdef CONFIG_CMA
 	{
+		struct folio *folio;
 		int node;
 
 		if (hugetlb_cma[nid]) {
-			page = cma_alloc(hugetlb_cma[nid], nr_pages,
+			folio = cma_alloc(hugetlb_cma[nid], nr_pages,
 					huge_page_order(h), true);
-			if (page)
-				return page_folio(page);
+			if (folio)
+				return folio;
 		}
 
 		if (!(gfp_mask & __GFP_THISNODE)) {
@@ -1585,10 +1586,10 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
 				if (node == nid || !hugetlb_cma[node])
 					continue;
 
-				page = cma_alloc(hugetlb_cma[node], nr_pages,
+				folio = cma_alloc(hugetlb_cma[node], nr_pages,
 						huge_page_order(h), true);
-				if (page)
-					return page_folio(page);
+				if (folio)
+					return folio;
 			}
 		}
 	}
-- 
2.42.1





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux