On Fri, Oct 01, 2021 at 10:52:07AM -0700, Mike Kravetz wrote: > +bool cma_pages_valid(struct cma *cma, const struct page *pages, > + unsigned long count) > +{ > + unsigned long pfn; > + > + if (!cma || !pages) > + return false; > + > + pfn = page_to_pfn(pages); > + > + if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) > + return false; > + > + return true; > +} > + > /** > * cma_release() - release allocated pages > * @cma: Contiguous memory region for which the allocation is performed. > @@ -539,16 +555,13 @@ bool cma_release(struct cma *cma, const struct page *pages, > { > unsigned long pfn; > > - if (!cma || !pages) > + if (!cma_pages_valid(cma, pages, count)) > return false; > > pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); > > pfn = page_to_pfn(pages); > > - if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) > - return false; > - After this patch, the timing of printing the debug statement changes as we back off earlier. You might want to point that out in the changelog in case someone wonders why. -- Oscar Salvador SUSE Labs