From: Mark Bloch <markb@xxxxxxxxxxxx> A user can create a QP which can accept loopback traffic, but that's not enough. We need to enable loopback on the vport as well. Currently vport loopback is enabled only when more than 1 users are using the IB device, update the logic to consider whatever a QP which supports loopback was created, if so enable vport loopback even if there is only a single user. Signed-off-by: Mark Bloch <markb@xxxxxxxxxxxx> Reviewed-by: Yishai Hadas <yishaih@xxxxxxxxxxxx> Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxxxx> --- drivers/infiniband/hw/mlx5/main.c | 40 +++++++++++++++++++++++++----------- drivers/infiniband/hw/mlx5/mlx5_ib.h | 4 ++++ drivers/infiniband/hw/mlx5/qp.c | 34 +++++++++++++++++++++++------- 3 files changed, 59 insertions(+), 19 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index b64861ba2c42..7d5fcf76466f 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1571,28 +1571,44 @@ static void deallocate_uars(struct mlx5_ib_dev *dev, mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]); } -static int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev) +int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) { int err = 0; mutex_lock(&dev->lb.mutex); - dev->lb.user_td++; - - if (dev->lb.user_td == 2) - err = mlx5_nic_vport_update_local_lb(dev->mdev, true); + if (td) + dev->lb.user_td++; + if (qp) + dev->lb.qps++; + + if (dev->lb.user_td == 2 || + dev->lb.qps == 1) { + if (!dev->lb.enabled) { + err = mlx5_nic_vport_update_local_lb(dev->mdev, true); + dev->lb.enabled = true; + } + } mutex_unlock(&dev->lb.mutex); return err; } -static void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev) +void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) { mutex_lock(&dev->lb.mutex); - dev->lb.user_td--; - - if (dev->lb.user_td < 2) - mlx5_nic_vport_update_local_lb(dev->mdev, false); + if (td) + dev->lb.user_td--; + if (qp) + dev->lb.qps--; + + if (dev->lb.user_td == 1 && + dev->lb.qps == 0) { + if (dev->lb.enabled) { + mlx5_nic_vport_update_local_lb(dev->mdev, false); + dev->lb.enabled = false; + } + } mutex_unlock(&dev->lb.mutex); } @@ -1613,7 +1629,7 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn) !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) return err; - return mlx5_ib_enable_lb(dev); + return mlx5_ib_enable_lb(dev, true, false); } static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn) @@ -1628,7 +1644,7 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn) !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) return; - mlx5_ib_disable_lb(dev); + mlx5_ib_disable_lb(dev, true, false); } static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index b258adb93097..99c853c56d31 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -882,6 +882,8 @@ struct mlx5_ib_lb_state { /* protect the user_td */ struct mutex mutex; u32 user_td; + int qps; + bool enabled; }; struct mlx5_ib_dev { @@ -1040,6 +1042,8 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); int mlx5_ib_destroy_srq(struct ib_srq *srq); int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr); +int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); +void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 428e417e01da..1f318a47040c 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1256,6 +1256,16 @@ static bool tunnel_offload_supported(struct mlx5_core_dev *dev) MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx)); } +static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, + struct mlx5_ib_rq *rq, + u32 qp_flags_en) +{ + if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | + MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) + mlx5_ib_disable_lb(dev, false, true); + mlx5_core_destroy_tir(dev->mdev, rq->tirn); +} + static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, u32 tdn, u32 *qp_flags_en) @@ -1293,17 +1303,17 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn); + if (!err && MLX5_GET(tirc, tirc, self_lb_block)) { + err = mlx5_ib_enable_lb(dev, false, true); + + if (err) + destroy_raw_packet_qp_tir(dev, rq, 0); + } kvfree(in); return err; } -static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, - struct mlx5_ib_rq *rq) -{ - mlx5_core_destroy_tir(dev->mdev, rq->tirn); -} - static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, u32 *in, size_t inlen, struct ib_pd *pd) @@ -1372,7 +1382,7 @@ static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq = &raw_packet_qp->rq; if (qp->rq.wqe_cnt) { - destroy_raw_packet_qp_tir(dev, rq); + destroy_raw_packet_qp_tir(dev, rq, qp->flags_en); destroy_raw_packet_qp_rq(dev, rq); } @@ -1396,6 +1406,9 @@ static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp, static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) { + if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | + MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) + mlx5_ib_disable_lb(dev, false, true); mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn); } @@ -1606,6 +1619,13 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, create_tir: err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn); + if (!err && MLX5_GET(tirc, tirc, self_lb_block)) { + err = mlx5_ib_enable_lb(dev, false, true); + + if (err) + mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn); + } + if (err) goto err; -- 2.14.4