Re: [PATCH rdma-next v1] RDMA/mlx5: Clean WQE page fault handler

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Mar 04, 2019 at 03:39:34PM -0400, Jason Gunthorpe wrote:
> On Mon, Feb 25, 2019 at 08:56:14AM +0200, Leon Romanovsky wrote:
> > From: Leon Romanovsky <leonro@xxxxxxxxxxxx>
> >
> > Refactor the page fault handler to be more readable and extensible,
> > this cleanup was triggered by error reported below. The code structure
> > made unclear to the automatic tools to identify that such flow is not
> > possible in real life because "requestor != NULL" means that "qp != NULL"
> > too.
> >
> >     drivers/infiniband/hw/mlx5/odp.c:1254 mlx5_ib_mr_wqe_pfault_handler()
> >     error: we previously assumed 'qp' could be null (see line 1230)
> >
> > Fixes: 08100fad5cac ("IB/mlx5: Add ODP SRQ support")
> > Reported-by: Dan Carpenter <dan.carpenter@xxxxxxxxxx>
> > Reviewed-by: Moni Shoua <monis@xxxxxxxxxxxx>
> > Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxxxx>
> > ---
> > Changes v0->v1:
> >  * Instead of simple fix, rewrote whole function to be more clear.
> > ---
> >  drivers/infiniband/hw/mlx5/odp.c | 121 ++++++++++++++-----------------
> >  1 file changed, 55 insertions(+), 66 deletions(-)
> >
> > --
> > 2.19.1
> >
> > diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
> > index c20bfc41ecf1..31caced2e8c2 100644
> > --- a/drivers/infiniband/hw/mlx5/odp.c
> > +++ b/drivers/infiniband/hw/mlx5/odp.c
> > @@ -919,7 +919,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
> >  				   struct mlx5_pagefault *pfault,
> >  				   void *wqe,
> >  				   void *wqe_end, u32 *bytes_mapped,
> > -				   u32 *total_wqe_bytes, int receive_queue)
> > +				   u32 *total_wqe_bytes, bool receive_queue)
> >  {
> >  	int ret = 0, npages = 0;
> >  	u64 io_virt;
> > @@ -1199,17 +1199,15 @@ static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
> >  static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
> >  					  struct mlx5_pagefault *pfault)
> >  {
> > -	int ret;
> > -	void *wqe, *wqe_end;
> > +	bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
> > +	u16 wqe_index = pfault->wqe.wqe_index;
> > +	void *wqe = NULL, *wqe_end = NULL;
> >  	u32 bytes_mapped, total_wqe_bytes;
> > -	char *buffer = NULL;
> > +	struct mlx5_core_rsc_common *res;
> >  	int resume_with_error = 1;
> > -	u16 wqe_index = pfault->wqe.wqe_index;
> > -	int requestor = pfault->type & MLX5_PFAULT_REQUESTOR;
> > -	struct mlx5_core_rsc_common *res = NULL;
> > -	struct mlx5_ib_qp *qp = NULL;
> > -	struct mlx5_ib_srq *srq = NULL;
> > +	struct mlx5_ib_qp *qp;
> >  	size_t bytes_copied;
> > +	int ret;
> >
> >  	res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
> >  	if (!res) {
> > @@ -1217,87 +1215,78 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
> >  		return;
> >  	}
> >
> > -	switch (res->res) {
> > -	case MLX5_RES_QP:
> > -		qp = res_to_qp(res);
> > -		break;
> > -	case MLX5_RES_SRQ:
> > -	case MLX5_RES_XSRQ:
> > -		srq = res_to_srq(res);
> > -		break;
> > -	default:
> > -		mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n", pfault->type);
> > +	if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ &&
> > +	    res->res != MLX5_RES_XSRQ) {
> > +		mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n",
> > +			    pfault->type);
> >  		goto resolve_page_fault;
> >  	}
> >
> > -	buffer = (char *)__get_free_page(GFP_KERNEL);
> > -	if (!buffer) {
> > +	wqe = (void *)__get_free_page(GFP_KERNEL);
> > +	if (!wqe) {
> >  		mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
> >  		goto resolve_page_fault;
> >  	}
> >
> > -	if (qp) {
> > -		if (requestor) {
> > -			ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index,
> > -					buffer, PAGE_SIZE,
> > -					&bytes_copied);
> > -		} else {
> > -			ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index,
> > -					buffer, PAGE_SIZE,
> > -					&bytes_copied);
> > -		}
> > -	} else {
> > -		ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index,
> > -						buffer, PAGE_SIZE,
> > -						&bytes_copied);
> > +	qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
> > +	if (qp && sq) {
> > +		ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
> > +					       &bytes_copied);
> > +		if (ret)
> > +			goto read_user;
> > +		ret = mlx5_ib_mr_initiator_pfault_handler(
> > +			dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
> >  	}
> >
> > -	if (ret) {
> > -		mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n",
> > -			    ret, wqe_index, pfault->token);
> > -		goto resolve_page_fault;
> > +	if (qp && !sq) {
>
> Why not
>
>   else if (..)
>
> In each of these branches? Only one can run, right?

I don't like "else if ()" construction and see it is as dangerous, due
to the fact that it hides code flow. I intentionally avoid "else if" in
my patches.

Thanks

>
> Jason

Attachment: signature.asc
Description: PGP signature


[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux