If we disable CONFIG_ZONE_DMA32 to make the selection of DMA memory from higher 4GB range. dma_alloc_coherant() api usage inside qcom_scm_pas_init_image() which usage scm 32bit device will fail for size of data passed less than PAGE_SIZE and it will fallback to buddy pool to allocate memory from which will fail. Convert the size to aligned to PAGE_SIZE before it gets pass to dma_alloc_coherant(), so that it gets coherant memory in lower 4GB from linux cma region. Signed-off-by: Mukesh Ojha <quic_mojha@xxxxxxxxxxx> --- drivers/firmware/qcom/qcom_scm.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c index 029ee5edbea6..6616048f1c33 100644 --- a/drivers/firmware/qcom/qcom_scm.c +++ b/drivers/firmware/qcom/qcom_scm.c @@ -562,6 +562,7 @@ static void qcom_scm_set_download_mode(u32 dload_mode) int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, struct qcom_scm_pas_metadata *ctx) { + size_t page_aligned_size; dma_addr_t mdata_phys; void *mdata_buf; int ret; @@ -579,7 +580,8 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, * data blob, so make sure it's physically contiguous, 4K aligned and * non-cachable to avoid XPU violations. */ - mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, + page_aligned_size = PAGE_ALIGN(size + PAGE_SIZE); + mdata_buf = dma_alloc_coherent(__scm->dev, page_aligned_size, &mdata_phys, GFP_KERNEL); if (!mdata_buf) return -ENOMEM; @@ -604,11 +606,11 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, out: if (ret < 0 || !ctx) { - dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); + dma_free_coherent(__scm->dev, page_aligned_size, mdata_buf, mdata_phys); } else if (ctx) { ctx->ptr = mdata_buf; ctx->phys = mdata_phys; - ctx->size = size; + ctx->size = page_aligned_size; } return ret ? : res.result[0]; -- 2.7.4