This patch adds RNIC creation and destruction. If creation of RNIC fails, we support only RAW QPs as they are served by ethernet driver. Signed-off-by: Konstantin Taranov <kotaranov@xxxxxxxxxxxxxxxxxxx> --- drivers/infiniband/hw/mana/main.c | 31 +++++++++++++++++++++++++++++++ drivers/infiniband/hw/mana/mana_ib.h | 29 +++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index c64d569..33cd69e 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -581,14 +581,31 @@ static void mana_ib_destroy_eqs(struct mana_ib_dev *mdev) void mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev) { + struct mana_rnic_create_adapter_resp resp = {}; + struct mana_rnic_create_adapter_req req = {}; + struct gdma_context *gc = mdev_to_gc(mdev); int err; + mdev->adapter_handle = INVALID_MANA_HANDLE; + err = mana_ib_create_eqs(mdev); if (err) { ibdev_err(&mdev->ib_dev, "Failed to create EQs for RNIC err %d", err); goto cleanup; } + mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp)); + req.hdr.req.msg_version = GDMA_MESSAGE_V2; + req.hdr.dev_id = gc->mana_ib.dev_id; + req.notify_eq_id = mdev->fatal_err_eq->id; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err) { + ibdev_err(&mdev->ib_dev, "Failed to create RNIC adapter err %d", err); + goto cleanup; + } + mdev->adapter_handle = resp.adapter; + return; cleanup: @@ -599,5 +616,19 @@ void mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev) void mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev) { + struct mana_rnic_destroy_adapter_resp resp = {}; + struct mana_rnic_destroy_adapter_req req = {}; + struct gdma_context *gc; + + if (!rnic_is_enabled(mdev)) + return; + + gc = mdev_to_gc(mdev); + mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp)); + req.hdr.dev_id = gc->mana_ib.dev_id; + req.adapter = mdev->adapter_handle; + + mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + mdev->adapter_handle = INVALID_MANA_HANDLE; mana_ib_destroy_eqs(mdev); } diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h index a4b94ee..96454cf 100644 --- a/drivers/infiniband/hw/mana/mana_ib.h +++ b/drivers/infiniband/hw/mana/mana_ib.h @@ -48,6 +48,7 @@ struct mana_ib_adapter_caps { struct mana_ib_dev { struct ib_device ib_dev; struct gdma_dev *gdma_dev; + mana_handle_t adapter_handle; struct gdma_queue *fatal_err_eq; struct mana_ib_adapter_caps adapter_caps; }; @@ -115,6 +116,8 @@ struct mana_ib_rwq_ind_table { enum mana_ib_command_code { MANA_IB_GET_ADAPTER_CAP = 0x30001, + MANA_IB_CREATE_ADAPTER = 0x30002, + MANA_IB_DESTROY_ADAPTER = 0x30003, }; struct mana_ib_query_adapter_caps_req { @@ -143,6 +146,32 @@ struct mana_ib_query_adapter_caps_resp { u32 max_inline_data_size; }; /* HW Data */ +struct mana_rnic_create_adapter_req { + struct gdma_req_hdr hdr; + u32 notify_eq_id; + u32 reserved; + u64 feature_flags; +}; /*HW Data */ + +struct mana_rnic_create_adapter_resp { + struct gdma_resp_hdr hdr; + mana_handle_t adapter; +}; /* HW Data */ + +struct mana_rnic_destroy_adapter_req { + struct gdma_req_hdr hdr; + mana_handle_t adapter; +}; /*HW Data */ + +struct mana_rnic_destroy_adapter_resp { + struct gdma_resp_hdr hdr; +}; /* HW Data */ + +static inline bool rnic_is_enabled(struct mana_ib_dev *mdev) +{ + return mdev->adapter_handle != INVALID_MANA_HANDLE; +} + static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev) { return mdev->gdma_dev->gdma_context; -- 1.8.3.1