The variable cur_qp is used in the function mlx4_ib_poll_one. Outside of this function, this variable is not used. So this variable is moved into this function. Signed-off-by: Zhu Yanjun <yanjun.zhu@xxxxxxxxxx> --- drivers/infiniband/hw/mlx4/cq.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 43512347b4f0..6cedca504873 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -669,7 +669,6 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries, } static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, - struct mlx4_ib_qp **cur_qp, struct ib_wc *wc) { struct mlx4_cqe *cqe; @@ -683,6 +682,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, u32 g_mlpath_rqpn; u16 wqe_ctr; unsigned tail = 0; + struct mlx4_ib_qp *cur_qp = NULL; repoll: cqe = next_cqe_sw(cq); @@ -720,8 +720,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, goto repoll; } - if (!*cur_qp || - (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { + if (!cur_qp || + (be32_to_cpu(cqe->vlan_my_qpn) & + MLX4_CQE_QPN_MASK) != cur_qp->mqp.qpn) { /* * We do not have to take the QP table lock here, * because CQs will be locked while QPs are removed @@ -729,10 +729,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, */ mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, be32_to_cpu(cqe->vlan_my_qpn)); - *cur_qp = to_mibqp(mqp); + cur_qp = to_mibqp(mqp); } - wc->qp = &(*cur_qp)->ibqp; + wc->qp = &cur_qp->ibqp; if (wc->qp->qp_type == IB_QPT_XRC_TGT) { u32 srq_num; @@ -744,15 +744,15 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, } if (is_send) { - wq = &(*cur_qp)->sq; - if (!(*cur_qp)->sq_signal_bits) { + wq = &cur_qp->sq; + if (!cur_qp->sq_signal_bits) { wqe_ctr = be16_to_cpu(cqe->wqe_index); wq->tail += (u16) (wqe_ctr - (u16) wq->tail); } wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; ++wq->tail; - } else if ((*cur_qp)->ibqp.srq) { - srq = to_msrq((*cur_qp)->ibqp.srq); + } else if (cur_qp->ibqp.srq) { + srq = to_msrq(cur_qp->ibqp.srq); wqe_ctr = be16_to_cpu(cqe->wqe_index); wc->wr_id = srq->wrid[wqe_ctr]; mlx4_ib_free_srq_wqe(srq, wqe_ctr); @@ -762,7 +762,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, wc->wr_id = srq->wrid[wqe_ctr]; mlx4_ib_free_srq_wqe(srq, wqe_ctr); } else { - wq = &(*cur_qp)->rq; + wq = &cur_qp->rq; tail = wq->tail & (wq->wqe_cnt - 1); wc->wr_id = wq->wrid[tail]; ++wq->tail; @@ -847,13 +847,13 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, } is_eth = (rdma_port_get_link_layer(wc->qp->device, - (*cur_qp)->port) == + cur_qp->port) == IB_LINK_LAYER_ETHERNET); if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { - if ((*cur_qp)->mlx4_ib_qp_type & + if (cur_qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { - use_tunnel_data(*cur_qp, cq, wc, tail, cqe, + use_tunnel_data(cur_qp, cq, wc, tail, cqe, is_eth); return 0; } @@ -891,7 +891,6 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct mlx4_ib_cq *cq = to_mcq(ibcq); - struct mlx4_ib_qp *cur_qp = NULL; unsigned long flags; int npolled; struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); @@ -903,7 +902,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) } for (npolled = 0; npolled < num_entries; ++npolled) { - if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled)) + if (mlx4_ib_poll_one(cq, wc + npolled)) break; } -- 2.17.1