From: Oded Gabbay <oded.gabbay@xxxxxxxxx> [ Upstream commit d724170160f800fa8dfd3c0cdebb8b093570b504 ] This patch fix a bug in the mmu code that checks whether we can use huge page mappings for host pages. The code is supposed to enable huge page mappings only if ALL DMA addresses are aligned to 2MB AND the number of pages in each DMA chunk is a modulo of the number of pages in 2MB. However, the code ignored the first requirement for the first DMA chunk. This patch fix that issue by making sure the requirement of address alignment is validated against all DMA chunks. Signed-off-by: Oded Gabbay <oded.gabbay@xxxxxxxxx> Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> --- drivers/misc/habanalabs/memory.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c index fadaf557603f..425442819d31 100644 --- a/drivers/misc/habanalabs/memory.c +++ b/drivers/misc/habanalabs/memory.c @@ -675,11 +675,6 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, total_npages += npages; - if (first) { - first = false; - dma_addr &= PAGE_MASK_2MB; - } - if ((npages % PGS_IN_2MB_PAGE) || (dma_addr & (PAGE_SIZE_2MB - 1))) is_huge_page_opt = false; @@ -704,7 +699,6 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, phys_pg_pack->total_size = total_npages * page_size; j = 0; - first = true; for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { npages = get_sg_info(sg, &dma_addr); -- 2.20.1