From: Qiao Ma <mqaio@xxxxxxxxxxxxxxxxx> commit 52b2abef450a78e25d485ac61e32f4ce86a87701 upstream. If wq has only one page, we need to check wqe rolling over page by compare end_idx and curr_idx, and then copy wqe to shadow wqe to avoid out of bound access. This work has been done in hinic_get_wqe, but missed for hinic_read_wqe. This patch fixes it, and removes unnecessary MASKED_WQE_IDX(). Fixes: 7dd29ee12865 ("hinic: add sriov feature support") Signed-off-by: Qiao Ma <mqaio@xxxxxxxxxxxxxxxxx> Reviewed-by: Xunlei Pang <xlpang@xxxxxxxxxxxxxxxxx> Link: https://lore.kernel.org/r/282817b0e1ae2e28fdf3ed8271a04e77f57bf42e.1651148587.git.mqaio@xxxxxxxxxxxxxxxxx Signed-off-by: Jakub Kicinski <kuba@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -771,7 +771,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struc /* If we only have one page, still need to get shadown wqe when * wqe rolling-over page */ - if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) { + if (curr_pg != end_pg || end_prod_idx < *prod_idx) { void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx); @@ -841,7 +841,10 @@ struct hinic_hw_wqe *hinic_read_wqe(stru *cons_idx = curr_cons_idx; - if (curr_pg != end_pg) { + /* If we only have one page, still need to get shadown wqe when + * wqe rolling-over page + */ + if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) { void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);