> Subject: [PATCH for-next 4/4] RDMA/irdma: Split CQ handler into > irdma_reg_user_mr_type_cq > > From: Zhu Yanjun <yanjun.zhu@xxxxxxxxx> > > Split the source codes related with CQ handling into a new function. > > Signed-off-by: Zhu Yanjun <yanjun.zhu@xxxxxxxxx> > --- > drivers/infiniband/hw/irdma/verbs.c | 60 +++++++++++++++++------------ > 1 file changed, 35 insertions(+), 25 deletions(-) > > diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c > index e90eba73c396..b4befbafb830 100644 > --- a/drivers/infiniband/hw/irdma/verbs.c > +++ b/drivers/infiniband/hw/irdma/verbs.c > @@ -2864,6 +2864,40 @@ static int irdma_reg_user_mr_type_qp(struct > irdma_mem_reg_req req, > return err; > } > > +static int irdma_reg_user_mr_type_cq(struct irdma_device *iwdev, > + struct irdma_mr *iwmr, > + struct ib_udata *udata, > + struct irdma_mem_reg_req req) I would keep the order of these API args same as the one for irdma_reg_user_mr_type_qp. > +{ > + int err = 0; No need to initialize. > + u8 shadow_pgcnt = 1; > + bool use_pbles = false; No need to initialize use_pbles. > + struct irdma_ucontext *ucontext; > + unsigned long flags; > + u32 total; > + struct irdma_pbl *iwpbl = &iwmr->iwpbl; > + > + if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & > IRDMA_FEATURE_CQ_RESIZE) > + shadow_pgcnt = 0; > + total = req.cq_pages + shadow_pgcnt; > + if (total > iwmr->page_cnt) > + return -EINVAL; > + > + use_pbles = (req.cq_pages > 1); > + err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles); > + if (err) > + return err; > + > + ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, > + ibucontext); > + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); > + list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); > + iwpbl->on_list = true; > + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); > + > + return err; > +} > + > /** > * irdma_reg_user_mr - Register a user memory region > * @pd: ptr of pd > @@ -2879,15 +2913,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd > *pd, u64 start, u64 len, { #define IRDMA_MEM_REG_MIN_REQ_LEN > offsetofend(struct irdma_mem_reg_req, sq_pages) > struct irdma_device *iwdev = to_iwdev(pd->device); > - struct irdma_ucontext *ucontext; > - struct irdma_pbl *iwpbl; > struct irdma_mr *iwmr; > struct ib_umem *region; > struct irdma_mem_reg_req req; > - u32 total; > - u8 shadow_pgcnt = 1; > - bool use_pbles = false; > - unsigned long flags; > int err = -EINVAL; Do we need to initialize err here too? Probably separate from this patch but could clean up. > > if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) > @@ -2915,8 +2943,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, > u64 start, u64 len, > return (struct ib_mr *)iwmr; > } > > - iwpbl = &iwmr->iwpbl; > - > switch (req.reg_type) { > case IRDMA_MEMREG_TYPE_QP: > err = irdma_reg_user_mr_type_qp(req, iwdev, udata, iwmr); @@ - > 2925,25 +2951,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 > start, u64 len, > > break; > case IRDMA_MEMREG_TYPE_CQ: > - if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & > IRDMA_FEATURE_CQ_RESIZE) > - shadow_pgcnt = 0; > - total = req.cq_pages + shadow_pgcnt; > - if (total > iwmr->page_cnt) { > - err = -EINVAL; > - goto error; > - } > - > - use_pbles = (req.cq_pages > 1); > - err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles); > + err = irdma_reg_user_mr_type_cq(iwdev, iwmr, udata, req); > if (err) > goto error; > - > - ucontext = rdma_udata_to_drv_context(udata, struct > irdma_ucontext, > - ibucontext); > - spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); > - list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); > - iwpbl->on_list = true; > - spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); > break; > case IRDMA_MEMREG_TYPE_MEM: > err = irdma_reg_user_mr_type_mem(iwdev, iwmr, access); > -- > 2.31.1