This patch add memblock_free to also free the reserved memblock, so that the cma pages are not marked as reserved memory in /sys/kernel/debug/memblock/reserved debug file Signed-off-by: Yalin Wang <yalin.wang@xxxxxxxxxxxxxx> --- mm/cma.c | 6 +++++- mm/page_alloc.c | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/mm/cma.c b/mm/cma.c index c17751c..ec69c69 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -196,7 +196,11 @@ int __init cma_declare_contiguous(phys_addr_t base, if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) return -EINVAL; - /* Reserve memory */ + /* + * Reserve memory, and the reserved memory are marked as reserved by + * memblock driver, remember to clear the reserved status when free + * these cma pages, see init_cma_reserved_pageblock() + */ if (base && fixed) { if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 18cee0d..fffbb84 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -836,8 +836,8 @@ void __init init_cma_reserved_pageblock(struct page *page) set_page_refcounted(page); __free_pages(page, pageblock_order); } - adjust_managed_page_count(page, pageblock_nr_pages); + memblock_free(page_to_phys(page), pageblock_nr_pages << PAGE_SHIFT); } #endif -- 2.1.0 ��.n������g����a����&ޖ)���)��h���&������梷�����Ǟ�m������)������^�����������v���O��zf������