From: Shiraz Saleem <shiraz.saleem@xxxxxxxxx> Introduce support for 64-byte CQEs in GEN3 devices. Additionally, implement GEN3-specific CQE opcode decoding. Signed-off-by: Shiraz Saleem <shiraz.saleem@xxxxxxxxx> Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@xxxxxxxxx> --- drivers/infiniband/hw/irdma/verbs.c | 19 +++++++++++++++---- drivers/infiniband/hw/irdma/verbs.h | 13 +++++++++++++ 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 70652ab..593dbbd 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -2114,6 +2114,7 @@ static int irdma_create_cq(struct ib_cq *ibcq, unsigned long flags; int err_code; int entries = attr->cqe; + bool cqe_64byte_ena; err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); if (err_code) @@ -2137,6 +2138,9 @@ static int irdma_create_cq(struct ib_cq *ibcq, info.dev = dev; ukinfo->cq_size = max(entries, 4); ukinfo->cq_id = cq_num; + cqe_64byte_ena = dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE ? + true : false; + ukinfo->avoid_mem_cflct = cqe_64byte_ena; iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; if (attr->comp_vector < rf->ceqs_count) info.ceq_id = attr->comp_vector; @@ -2212,11 +2216,14 @@ static int irdma_create_cq(struct ib_cq *ibcq, } entries++; - if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) + if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) entries *= 2; ukinfo->cq_size = entries; - rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe); + if (cqe_64byte_ena) + rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe); + else + rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe); iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256); iwcq->kmem.va = dma_alloc_coherent(dev->hw->device, iwcq->kmem.size, @@ -3774,8 +3781,12 @@ static void irdma_process_cqe(struct ib_wc *entry, if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) { set_ib_wc_op_sq(cq_poll_info, entry); } else { - set_ib_wc_op_rq(cq_poll_info, entry, - qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM); + if (qp->dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) + set_ib_wc_op_rq(cq_poll_info, entry, + qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ? + true : false); + else + set_ib_wc_op_rq_gen_3(cq_poll_info, entry); if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD && cq_poll_info->stag_invalid_set) { entry->ex.invalidate_rkey = cq_poll_info->inv_stag; diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h index cfa140b..fcb163c 100644 --- a/drivers/infiniband/hw/irdma/verbs.h +++ b/drivers/infiniband/hw/irdma/verbs.h @@ -267,6 +267,19 @@ static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info, } } +static inline void set_ib_wc_op_rq_gen_3(struct irdma_cq_poll_info *info, + struct ib_wc *entry) +{ + switch (info->op_type) { + case IRDMA_OP_TYPE_RDMA_WRITE: + case IRDMA_OP_TYPE_RDMA_WRITE_SOL: + entry->opcode = IB_WC_RECV_RDMA_WITH_IMM; + break; + default: + entry->opcode = IB_WC_RECV; + } +} + static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info, struct ib_wc *entry, bool send_imm_support) { -- 1.8.3.1