From: Danit Goldberg <danitg@xxxxxxxxxxxx> After the allocation of a page for wqe pointer, the pointer is shifted. Therefore it is necessary to keep the original address, and free it at the end. Fixes: 0f51427bd097 ("RDMA/mlx5: Cleanup WQE page fault handler") Signed-off-by: Danit Goldberg <danitg@xxxxxxxxxxxx> Reviewed-by: Yishai Hadas <yishaih@xxxxxxxxxxxx> Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxxxx> --- drivers/infiniband/hw/mlx5/odp.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 0a59912a4cef..416e750808eb 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -1205,7 +1205,7 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, { bool sq = pfault->type & MLX5_PFAULT_REQUESTOR; u16 wqe_index = pfault->wqe.wqe_index; - void *wqe = NULL, *wqe_end = NULL; + void *wqe, *wqe_start = NULL, *wqe_end = NULL; u32 bytes_mapped, total_wqe_bytes; struct mlx5_core_rsc_common *res; int resume_with_error = 1; @@ -1226,12 +1226,13 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, goto resolve_page_fault; } - wqe = (void *)__get_free_page(GFP_KERNEL); - if (!wqe) { + wqe_start = (void *)__get_free_page(GFP_KERNEL); + if (!wqe_start) { mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n"); goto resolve_page_fault; } + wqe = wqe_start; qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL; if (qp && sq) { ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE, @@ -1286,7 +1287,7 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, pfault->wqe.wq_num, resume_with_error, pfault->type); mlx5_core_res_put(res); - free_page((unsigned long)wqe); + free_page((unsigned long)wqe_start); } static int pages_in_range(u64 address, u32 length) -- 2.20.1