> From: longli@xxxxxxxxxxxxxxxxx <longli@xxxxxxxxxxxxxxxxx> > Sent: Wednesday, June 15, 2022 7:07 PM > ... > +int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct > ib_umem *umem, > + mana_handle_t *gdma_region, u64 page_sz) > +{ > + ... > + err = mana_gd_send_request(gc, create_req_msg_size, create_req, > + sizeof(create_resp), &create_resp); > + kfree(create_req); > + > + if (err || create_resp.hdr.status) { > + ibdev_err(&dev->ib_dev, > + "Failed to create DMA region: %d, 0x%x\n", err, > + create_resp.hdr.status); if (!err) err = -EPROTO; > + goto error; > + } > + ... > + err = mana_gd_send_request(gc, add_req_msg_size, > + add_req, sizeof(add_resp), > + &add_resp); > + if (!err || add_resp.hdr.status != expected_status) { > + ibdev_err(&dev->ib_dev, > + "Failed put DMA pages %u: %d,0x%x\n", > + i, err, add_resp.hdr.status); > + err = -EPROTO; Should we try to undo what has been done by calling GDMA_DESTROY_DMA_REGION? > + goto free_req; > + } > + > + num_pages_cur += num_pages_to_handle; > + num_pages_to_handle = > + min_t(size_t, num_pages_total - num_pages_cur, > + max_pgs_add_cmd); > + add_req_msg_size = sizeof(*add_req) + > + num_pages_to_handle * sizeof(u64); > + } > +free_req: > + kfree(add_req); > + } > + > +error: > + return err; > +} > + ... > +int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr > *mr, > + struct gdma_create_mr_params *mr_params) > +{ > + struct gdma_create_mr_response resp = {}; > + struct gdma_create_mr_request req = {}; > + struct gdma_dev *mdev = dev->gdma_dev; > + struct gdma_context *gc; > + int err; > + > + gc = mdev->gdma_context; > + > + mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req), > + sizeof(resp)); > + req.pd_handle = mr_params->pd_handle; > + > + switch (mr_params->mr_type) { > + case GDMA_MR_TYPE_GVA: > + req.mr_type = GDMA_MR_TYPE_GVA; > + req.gva.dma_region_handle = mr_params->gva.dma_region_handle; > + req.gva.virtual_address = mr_params->gva.virtual_address; > + req.gva.access_flags = mr_params->gva.access_flags; > + break; > + > + case GDMA_MR_TYPE_GPA: > + req.mr_type = GDMA_MR_TYPE_GPA; > + req.gpa.access_flags = mr_params->gpa.access_flags; > + break; > + > + case GDMA_MR_TYPE_FMR: > + req.mr_type = GDMA_MR_TYPE_FMR; > + req.fmr.page_size = mr_params->fmr.page_size; > + req.fmr.reserved_pte_count = mr_params->fmr.reserved_pte_count; > + break; > + > + default: > + ibdev_dbg(&dev->ib_dev, > + "invalid param (GDMA_MR_TYPE) passed, type %d\n", > + req.mr_type); Here req.mr_type is always 0. We should remove the 3 above lines of "req.mr_type = ...", and add a line "req.mr_type = mr_params->mr_type;" before the "switch" line.. > + err = -EINVAL; > + goto error; > + } > + > + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); > + > + if (err || resp.hdr.status) { > + ibdev_err(&dev->ib_dev, "Failed to create mr %d, %u", err, > + resp.hdr.status); if (!err) err = -EPROTO; > + goto error; > + } > + > + mr->ibmr.lkey = resp.lkey; > + mr->ibmr.rkey = resp.rkey; > + mr->mr_handle = resp.mr_handle; > + > + return 0; > +error: > + return err; > +} > + ... > +static int mana_ib_probe(struct auxiliary_device *adev, > + const struct auxiliary_device_id *id) > +{ > + struct mana_adev *madev = container_of(adev, struct mana_adev, adev); > + struct gdma_dev *mdev = madev->mdev; > + struct mana_context *mc; > + struct mana_ib_dev *dev; > + int ret = 0; No need to initialize 'ret' to 0. > +int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) > +{ > + struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr); > + struct ib_device *ibdev = ibmr->device; > + struct mana_ib_dev *dev; > + int err; > + > + dev = container_of(ibdev, struct mana_ib_dev, ib_dev); > + > + err = mana_ib_gd_destroy_mr(dev, mr->mr_handle); > + if (err) Should we return here without calling ib_umem_release() and kfree(mr)? > + return err; > + > + if (mr->umem) > + ib_umem_release(mr->umem); > + > + kfree(mr); > + > + return 0; > +}