Re: [PATCH rdma-next v1 3/3] RDMA/mlx5: Use restrack allocation PD scheme

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Jan 29, 2019 at 10:38:35AM -0600, Steve Wise wrote:
>
> On 1/29/2019 1:25 AM, Leon Romanovsky wrote:
> > From: Leon Romanovsky <leonro@xxxxxxxxxxxx>
> >
> > Adapt mlx5 to share restrack ID instead of local variable.
> >
> > Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxxxx>
>
>
> Hey Leon, By not doing this for the other drivers, are they loosing some
> restrack functionality?  Shouldn't all the drivers be changed?

From functionality point of view, unconverted drivers will work exactly
as they worked before. The main differences between converted and
unconverted are:
1. Objects which are created by driver won't be visible to user through rdmatool.
2. The numbers (PDN, CQN e.t.c) are presented to user won't be in sync with visible by FW.

Anyway, I'm planning to convert all drivers, but I need from Jason/Doug acceptance
of this series before I'll invest more time in other drivers.

Thanks

>
> Steve.
>
>
> > ---
> >  drivers/infiniband/hw/mlx5/main.c    | 37 +++++++++++++++++++++++-----
> >  drivers/infiniband/hw/mlx5/mlx5_ib.h |  1 -
> >  drivers/infiniband/hw/mlx5/mr.c      | 16 ++++++------
> >  drivers/infiniband/hw/mlx5/qp.c      | 21 ++++++++++------
> >  drivers/infiniband/hw/mlx5/srq.c     |  2 +-
> >  5 files changed, 53 insertions(+), 24 deletions(-)
> >
> > diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
> > index cde451b8bfdc..0af6f343716b 100644
> > --- a/drivers/infiniband/hw/mlx5/main.c
> > +++ b/drivers/infiniband/hw/mlx5/main.c
> > @@ -2289,7 +2289,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
> >  	int err;
> >  	u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
> >  	u32 in[MLX5_ST_SZ_DW(alloc_pd_in)]   = {};
> > -	u16 uid = 0;
> > +	u16 pdn, uid = 0;
> >
> >  	uid = context ? to_mucontext(context)->devx_uid : 0;
> >  	MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
> > @@ -2299,17 +2299,28 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
> >  	if (err)
> >  		return err;
> >
> > -	pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
> > +	pdn = MLX5_GET(alloc_pd_out, out, pd);
> > +	rdma_rt_set_id(&pd->ibpd.res, pdn);
> > +	err = rdma_restrack_add(&pd->ibpd.res);
> > +	if (err)
> > +		goto err_add;
> > +
> >  	pd->uid = uid;
> >  	if (context) {
> > -		resp.pdn = pd->pdn;
> > +		resp.pdn = pdn;
> >  		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
> > -			mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
> > -			return -EFAULT;
> > +			err = -EFAULT;
> > +			goto err_copy;
> >  		}
> >  	}
> >
> >  	return 0;
> > +
> > +err_copy:
> > +	mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pdn, uid);
> > +err_add:
> > +	rdma_restrack_del(&pd->ibpd.res);
> > +	return err;
> >  }
> >
> >  static void mlx5_ib_dealloc_pd(struct ib_pd *pd)
> > @@ -2317,7 +2328,8 @@ static void mlx5_ib_dealloc_pd(struct ib_pd *pd)
> >  	struct mlx5_ib_dev *mdev = to_mdev(pd->device);
> >  	struct mlx5_ib_pd *mpd = to_mpd(pd);
> >
> > -	mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
> > +	mlx5_cmd_dealloc_pd(mdev->mdev, rdma_res_to_id(&pd->res), mpd->uid);
> > +	rdma_restrack_del(&pd->res);
> >  }
> >
> >  enum {
> > @@ -4688,9 +4700,20 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
> >  	devr->p0->uobject = NULL;
> >  	atomic_set(&devr->p0->usecnt, 0);
> >
> > +	rdma_rt_set_type(&devr->p0->res, RDMA_RESTRACK_PD);
> > +	rdma_restrack_set_task(&devr->p0->res, KBUILD_MODNAME);
> >  	ret = mlx5_ib_alloc_pd(devr->p0, NULL, NULL);
> >  	if (ret)
> >  		goto error0;
> > +	/*
> > +	 * The restrack object is added as part of mlx5_ib_alloc_pd()
> > +	 * and this part is needed to mark that entry kernel object and
> > +	 * mkae it visible. It can't fail, the entry already exists.
> > +	 *
> > +	 * The same goes for cleanup, mlx5_ib_dealloc_pd() is responsible
> > +	 * to clean it.
> > +	 */
> > +	rdma_restrack_kadd(&devr->p0->res);
> >
> >  	devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
> >  	if (IS_ERR(devr->c0)) {
> > @@ -6538,6 +6561,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
> >  	if (!dev)
> >  		return NULL;
> >
> > +	/* FW managed allocations */
> > +	rdma_rt_set_id_range(&dev->ib_dev, RDMA_RESTRACK_PD, 0, 0);
> >  	dev->mdev = mdev;
> >  	dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
> >  			     MLX5_CAP_GEN(mdev, num_vhca_ports));
> > diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
> > index f9817284c7a3..e98b4044b1fa 100644
> > --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
> > +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
> > @@ -143,7 +143,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte
> >
> >  struct mlx5_ib_pd {
> >  	struct ib_pd		ibpd;
> > -	u32			pdn;
> >  	u16			uid;
> >  };
> >
> > diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
> > index 705a79cd21da..97a28d59cf47 100644
> > --- a/drivers/infiniband/hw/mlx5/mr.c
> > +++ b/drivers/infiniband/hw/mlx5/mr.c
> > @@ -742,7 +742,7 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
> >  	MLX5_SET(mkc, mkc, lr, 1);
> >
> >  	MLX5_SET(mkc, mkc, length64, 1);
> > -	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
> > +	MLX5_SET(mkc, mkc, pd, rdma_res_to_id(&pd->res));
> >  	MLX5_SET(mkc, mkc, qpn, 0xffffff);
> >  	MLX5_SET64(mkc, mkc, start_addr, 0);
> >
> > @@ -892,7 +892,7 @@ static struct mlx5_ib_mr *alloc_mr_from_cache(
> >  	mr->desc_size = sizeof(struct mlx5_mtt);
> >  	mr->mmkey.iova = virt_addr;
> >  	mr->mmkey.size = len;
> > -	mr->mmkey.pd = to_mpd(pd)->pdn;
> > +	mr->mmkey.pd = rdma_res_to_id(&pd->res);
> >
> >  	return mr;
> >  }
> > @@ -1113,7 +1113,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
> >
> >  	MLX5_SET64(mkc, mkc, start_addr, virt_addr);
> >  	MLX5_SET64(mkc, mkc, len, length);
> > -	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
> > +	MLX5_SET(mkc, mkc, pd, rdma_res_to_id(&pd->res));
> >  	MLX5_SET(mkc, mkc, bsf_octword_size, 0);
> >  	MLX5_SET(mkc, mkc, translations_octword_size,
> >  		 get_octo_len(virt_addr, length, page_shift));
> > @@ -1192,7 +1192,7 @@ static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
> >  	MLX5_SET(mkc, mkc, lr, 1);
> >
> >  	MLX5_SET64(mkc, mkc, len, length);
> > -	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
> > +	MLX5_SET(mkc, mkc, pd, rdma_res_to_id(&pd->res));
> >  	MLX5_SET(mkc, mkc, qpn, 0xffffff);
> >  	MLX5_SET64(mkc, mkc, start_addr,
> >  		   memic_addr - pci_resource_start(dev->mdev->pdev, 0));
> > @@ -1467,7 +1467,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
> >  		mr->access_flags = access_flags;
> >  		mr->mmkey.iova = addr;
> >  		mr->mmkey.size = len;
> > -		mr->mmkey.pd = to_mpd(pd)->pdn;
> > +		mr->mmkey.pd = rdma_res_to_id(&pd->res);
> >
> >  		if (flags & IB_MR_REREG_TRANS) {
> >  			upd_flags = MLX5_IB_UPD_XLT_ADDR;
> > @@ -1645,7 +1645,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
> >  	MLX5_SET(mkc, mkc, free, 1);
> >  	MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
> >  	MLX5_SET(mkc, mkc, qpn, 0xffffff);
> > -	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
> > +	MLX5_SET(mkc, mkc, pd, rdma_res_to_id(&pd->res));
> >
> >  	if (mr_type == IB_MR_TYPE_MEM_REG) {
> >  		mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
> > @@ -1678,7 +1678,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
> >  		}
> >
> >  		/* create mem & wire PSVs */
> > -		err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
> > +		err = mlx5_core_create_psv(dev->mdev, rdma_res_to_id(&pd->res),
> >  					   2, psv_index);
> >  		if (err)
> >  			goto err_free_sig;
> > @@ -1776,7 +1776,7 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
> >
> >  	MLX5_SET(mkc, mkc, free, 1);
> >  	MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
> > -	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
> > +	MLX5_SET(mkc, mkc, pd, rdma_res_to_id(&pd->res));
> >  	MLX5_SET(mkc, mkc, umr_en, 1);
> >  	MLX5_SET(mkc, mkc, lr, 1);
> >  	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
> > diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
> > index 8cde49de9b2e..94f157c8f546 100644
> > --- a/drivers/infiniband/hw/mlx5/qp.c
> > +++ b/drivers/infiniband/hw/mlx5/qp.c
> > @@ -2136,10 +2136,13 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
> >  	MLX5_SET(qpc, qpc, st, mlx5_st);
> >  	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
> >
> > -	if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
> > -		MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
> > -	else
> > +	if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
> > +		struct ib_pd *t = pd ? pd : devr->p0;
> > +
> > +		MLX5_SET(qpc, qpc, pd, rdma_res_to_id(&t->res));
> > +	} else {
> >  		MLX5_SET(qpc, qpc, latency_sensitive, 1);
> > +	}
> >
> >
> >  	if (qp->wq_sig)
> > @@ -2529,7 +2532,7 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
> >  	MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
> >  	dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
> >  	qp->qp_sub_type = MLX5_IB_QPT_DCT;
> > -	MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
> > +	MLX5_SET(dctc, dctc, pd, rdma_res_to_id(&pd->res));
> >  	MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
> >  	MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
> >  	MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
> > @@ -3455,7 +3458,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
> >  	get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
> >  		&send_cq, &recv_cq);
> >
> > -	context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
> > +	context->flags_pd =
> > +		cpu_to_be32(pd ? rdma_res_to_id(&pd->ibpd.res) :
> > +				 rdma_res_to_id(&dev->devr.p0->res));
> >  	context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
> >  	context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
> >  	context->params1  = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
> > @@ -4246,7 +4251,7 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
> >
> >  	seg->flags = convert_access(umrwr->access_flags);
> >  	if (umrwr->pd)
> > -		seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
> > +		seg->flags_pd = cpu_to_be32(rdma_res_to_id(&umrwr->pd->res));
> >  	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
> >  	    !umrwr->length)
> >  		seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
> > @@ -4593,7 +4598,7 @@ static int set_sig_umr_wr(const struct ib_send_wr *send_wr,
> >  {
> >  	const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
> >  	struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
> > -	u32 pdn = get_pd(qp)->pdn;
> > +	u32 pdn = rdma_res_to_id(&get_pd(qp)->ibpd.res);
> >  	u32 xlt_size;
> >  	int region_len, ret;
> >
> > @@ -5745,7 +5750,7 @@ static int  create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
> >  			 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES);
> >  	}
> >  	MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
> > -	MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
> > +	MLX5_SET(wq, wq, pd, rdma_res_to_id(&pd->res));
> >  	MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
> >  	MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size);
> >  	MLX5_SET(wq, wq, wq_signature, rwq->wq_sig);
> > diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
> > index 22bd774e0b4e..57c7df0dabdf 100644
> > --- a/drivers/infiniband/hw/mlx5/srq.c
> > +++ b/drivers/infiniband/hw/mlx5/srq.c
> > @@ -297,7 +297,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
> >  	else
> >  		in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
> >
> > -	in.pd = to_mpd(pd)->pdn;
> > +	in.pd = rdma_res_to_id(&pd->res);
> >  	in.db_record = srq->db.dma;
> >  	err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
> >  	kvfree(in.pas);

Attachment: signature.asc
Description: PGP signature


[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux