> Subject: Re: [PATCH -next] RDMA/irdma: Silence the warnings in > irdma_uk_rdma_write() > > On Fri, Aug 11, 2023 at 02:22:15PM +0800, Ruan Jinjie wrote: > > Remove sparse warnings introduced by commit 272bba19d631 ("RDMA: > > Remove unnecessary ternary operators"): > > > > drivers/infiniband/hw/irdma/uk.c:285:24: sparse: sparse: incorrect type in > assignment (different base types) @@ expected bool [usertype] push_wqe:1 > @@ got restricted __le32 [usertype] *push_db @@ > > drivers/infiniband/hw/irdma/uk.c:285:24: sparse: expected bool [usertype] > push_wqe:1 > > drivers/infiniband/hw/irdma/uk.c:285:24: sparse: got restricted __le32 > [usertype] *push_db > > drivers/infiniband/hw/irdma/uk.c:386:24: sparse: sparse: incorrect type in > assignment (different base types) @@ expected bool [usertype] push_wqe:1 > @@ got restricted __le32 [usertype] *push_db @@ > > drivers/infiniband/hw/irdma/uk.c:386:24: sparse: expected bool [usertype] > push_wqe:1 > > drivers/infiniband/hw/irdma/uk.c:386:24: sparse: got restricted __le32 > [usertype] *push_db > > drivers/infiniband/hw/irdma/uk.c:471:24: sparse: sparse: incorrect type in > assignment (different base types) @@ expected bool [usertype] push_wqe:1 > @@ got restricted __le32 [usertype] *push_db @@ > > drivers/infiniband/hw/irdma/uk.c:471:24: sparse: expected bool [usertype] > push_wqe:1 > > drivers/infiniband/hw/irdma/uk.c:471:24: sparse: got restricted __le32 > [usertype] *push_db > > drivers/infiniband/hw/irdma/uk.c:723:24: sparse: sparse: incorrect type in > assignment (different base types) @@ expected bool [usertype] push_wqe:1 > @@ got restricted __le32 [usertype] *push_db @@ > > drivers/infiniband/hw/irdma/uk.c:723:24: sparse: expected bool [usertype] > push_wqe:1 > > drivers/infiniband/hw/irdma/uk.c:723:24: sparse: got restricted __le32 > [usertype] *push_db > > drivers/infiniband/hw/irdma/uk.c:797:24: sparse: sparse: incorrect type in > assignment (different base types) @@ expected bool [usertype] push_wqe:1 > @@ got restricted __le32 [usertype] *push_db @@ > > drivers/infiniband/hw/irdma/uk.c:797:24: sparse: expected bool [usertype] > push_wqe:1 > > drivers/infiniband/hw/irdma/uk.c:797:24: sparse: got restricted __le32 > [usertype] *push_db > > drivers/infiniband/hw/irdma/uk.c:875:24: sparse: sparse: incorrect type in > assignment (different base types) @@ expected bool [usertype] push_wqe:1 > @@ got restricted __le32 [usertype] *push_db @@ > > drivers/infiniband/hw/irdma/uk.c:875:24: sparse: expected bool [usertype] > push_wqe:1 > > drivers/infiniband/hw/irdma/uk.c:875:24: sparse: got restricted __le32 > [usertype] *push_db > > > > Signed-off-by: Ruan Jinjie <ruanjinjie@xxxxxxxxxx> > > Reported-by: kernel test robot <lkp@xxxxxxxxx> > > Closes: > > https://lore.kernel.org/oe-kbuild-all/202308110251.BV6BcwUR-lkp@intel. > > com/ > > --- > > drivers/infiniband/hw/irdma/uk.c | 12 ++++++------ > > 1 file changed, 6 insertions(+), 6 deletions(-) > > > > diff --git a/drivers/infiniband/hw/irdma/uk.c > > b/drivers/infiniband/hw/irdma/uk.c > > index a0739503140d..363c67c18924 100644 > > --- a/drivers/infiniband/hw/irdma/uk.c > > +++ b/drivers/infiniband/hw/irdma/uk.c > > @@ -282,7 +282,7 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct > irdma_post_sq_info *info, > > bool read_fence = false; > > u16 quanta; > > > > - info->push_wqe = qp->push_db; > > + info->push_wqe = !!qp->push_db; > > Shiraz, push_db is declared as pointer, but I don't see where it is allocated. Current > code works because push_db is always 1 entry. > > 316 struct irdma_qp_uk { > ... > 324 __le32 *push_db; > > and > > 156 set_32bit_val(qp->push_db, 0, > 157 FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, > wqe_idx >> 3) | qp->qp_id); > > Such variable use is not great. can you please fix it? > Can Ruan use "qp->push_mode" check instead of "qp->push_db"? > Hi Leon - Thanks for bring this to my attention. Seems we don't have all aspects of kernel push implementation and yes the push DB not mapped renders it void. And this code is also in kernel fast path :/ kernel push is not plan of record at this point and the patch (below) cleans it up. I can send this to the mailing list. I am fine if we want to spot fix the sparse issue through Ruan's patch here. qp->push_mode and qp->push_db are not really equivalent. The latter is constant once db is mapped. The former is transient per qp, comes and goes. But its all mute anyway as it stands now. Or we can just use my removal patch to fix the sparse issue as well. diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c index b8e96992f238..55421a92882c 100644 --- a/drivers/infiniband/hw/irdma/ctrl.c +++ b/drivers/infiniband/hw/irdma/ctrl.c @@ -1307,7 +1307,6 @@ int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp, sq_info.wr_id = info->wr_id; sq_info.signaled = info->signaled; - sq_info.push_wqe = info->push_wqe; wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info); @@ -1341,7 +1340,6 @@ int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp, FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) | FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) | FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) | - FIELD_PREP(IRDMAQPSQ_PUSHWQE, (sq_info.push_wqe ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | @@ -1352,13 +1350,9 @@ int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp, print_hex_dump_debug("WQE: FAST_REG WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false); - if (sq_info.push_wqe) { - irdma_qp_push_wqe(&qp->qp_uk, wqe, IRDMA_QP_WQE_MIN_QUANTA, - wqe_idx, post_sq); - } else { - if (post_sq) - irdma_uk_qp_post_wr(&qp->qp_uk); - } + + if (post_sq) + irdma_uk_qp_post_wr(&qp->qp_uk); return 0; } diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h index 16ada4c2ced0..bee9609f4be7 100644 --- a/drivers/infiniband/hw/irdma/type.h +++ b/drivers/infiniband/hw/irdma/type.h @@ -1017,7 +1017,6 @@ struct irdma_fast_reg_stag_info { bool local_fence:1; bool read_fence:1; bool signaled:1; - bool push_wqe:1; bool use_hmc_fcn_index:1; u8 hmc_fcn_index; bool use_pf_rid:1; diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c index 6f9238c4fe20..e803c30d88d9 100644 --- a/drivers/infiniband/hw/irdma/uk.c +++ b/drivers/infiniband/hw/irdma/uk.c @@ -127,10 +127,7 @@ void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp) hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp); sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); if (sw_sq_head != qp->initial_ring.head) { - if (qp->push_dropped) { - writel(qp->qp_id, qp->wqe_alloc_db); - qp->push_dropped = false; - } else if (sw_sq_head != hw_sq_tail) { + if (sw_sq_head != hw_sq_tail) { if (sw_sq_head > qp->initial_ring.head) { if (hw_sq_tail >= qp->initial_ring.head && hw_sq_tail < sw_sq_head) @@ -147,38 +144,6 @@ void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp) } /** - * irdma_qp_ring_push_db - ring qp doorbell - * @qp: hw qp ptr - * @wqe_idx: wqe index - */ -static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx) -{ - set_32bit_val(qp->push_db, 0, - FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id); - qp->initial_ring.head = qp->sq_ring.head; - qp->push_mode = true; - qp->push_dropped = false; -} - -void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta, - u32 wqe_idx, bool post_sq) -{ - __le64 *push; - - if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) != - IRDMA_RING_CURRENT_TAIL(qp->sq_ring) && - !qp->push_mode) { - if (post_sq) - irdma_uk_qp_post_wr(qp); - } else { - push = (__le64 *)((uintptr_t)qp->push_wqe + - (wqe_idx & 0x7) * 0x20); - memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE); - irdma_qp_ring_push_db(qp, wqe_idx); - } -} - -/** * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go * @qp: hw qp ptr * @wqe_idx: return wqe index @@ -214,9 +179,6 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, irdma_nop_1(qp); IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring); } - if (qp->push_db && info->push_wqe) - irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem, - avail_quanta, nop_wqe_idx, true); } *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); @@ -282,8 +244,6 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool read_fence = false; u16 quanta; - info->push_wqe = qp->push_db; - op_info = &info->op.rdma_write; if (op_info->num_lo_sges > qp->max_sq_frag_cnt) return -EINVAL; @@ -344,7 +304,6 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) | FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | - FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | @@ -353,12 +312,9 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); - if (info->push_wqe) { - irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); - } else { - if (post_sq) - irdma_uk_qp_post_wr(qp); - } + + if (post_sq) + irdma_uk_qp_post_wr(qp); return 0; } @@ -383,8 +339,6 @@ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, u16 quanta; u64 hdr; - info->push_wqe = qp->push_db; - op_info = &info->op.rdma_read; if (qp->max_sq_frag_cnt < op_info->num_lo_sges) return -EINVAL; @@ -431,7 +385,6 @@ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | FIELD_PREP(IRDMAQPSQ_OPCODE, (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) | - FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | @@ -440,12 +393,9 @@ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); - if (info->push_wqe) { - irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); - } else { - if (post_sq) - irdma_uk_qp_post_wr(qp); - } + + if (post_sq) + irdma_uk_qp_post_wr(qp); return 0; } @@ -468,8 +418,6 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool read_fence = false; u16 quanta; - info->push_wqe = qp->push_db; - op_info = &info->op.send; if (qp->max_sq_frag_cnt < op_info->num_sges) return -EINVAL; @@ -530,7 +478,6 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | - FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | @@ -541,12 +488,9 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); - if (info->push_wqe) { - irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); - } else { - if (post_sq) - irdma_uk_qp_post_wr(qp); - } + + if (post_sq) + irdma_uk_qp_post_wr(qp); return 0; } @@ -720,7 +664,6 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, u32 i, total_size = 0; u16 quanta; - info->push_wqe = qp->push_db; op_info = &info->op.rdma_write; if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges)) @@ -750,7 +693,6 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) | FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) | FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) | - FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | @@ -767,12 +709,8 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, set_64bit_val(wqe, 24, hdr); - if (info->push_wqe) { - irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); - } else { - if (post_sq) - irdma_uk_qp_post_wr(qp); - } + if (post_sq) + irdma_uk_qp_post_wr(qp); return 0; } @@ -794,7 +732,6 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp, u32 i, total_size = 0; u16 quanta; - info->push_wqe = qp->push_db; op_info = &info->op.send; if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges)) @@ -827,7 +764,6 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp, (info->imm_data_valid ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) | - FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | @@ -845,12 +781,8 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp, set_64bit_val(wqe, 24, hdr); - if (info->push_wqe) { - irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); - } else { - if (post_sq) - irdma_uk_qp_post_wr(qp); - } + if (post_sq) + irdma_uk_qp_post_wr(qp); return 0; } @@ -872,7 +804,6 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, bool local_fence = false; struct ib_sge sge = {}; - info->push_wqe = qp->push_db; op_info = &info->op.inv_local_stag; local_fence = info->local_fence; @@ -889,7 +820,6 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, set_64bit_val(wqe, 16, 0); hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) | - FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | @@ -899,13 +829,8 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, set_64bit_val(wqe, 24, hdr); - if (info->push_wqe) { - irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx, - post_sq); - } else { - if (post_sq) - irdma_uk_qp_post_wr(qp); - } + if (post_sq) + irdma_uk_qp_post_wr(qp); return 0; } @@ -1124,7 +1049,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3); info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3); - info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3); info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3); if (info->error) { info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3); @@ -1213,11 +1137,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, return irdma_uk_cq_poll_cmpl(cq, info); } } - /*cease posting push mode on push drop*/ - if (info->push_dropped) { - qp->push_mode = false; - qp->push_dropped = true; - } if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) { info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; if (!info->comp_status) @@ -1521,7 +1440,6 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) qp->wqe_alloc_db = info->wqe_alloc_db; qp->qp_id = info->qp_id; qp->sq_size = info->sq_size; - qp->push_mode = false; qp->max_sq_frag_cnt = info->max_sq_frag_cnt; sq_ring_size = qp->sq_size << info->sq_shift; IRDMA_RING_INIT(qp->sq_ring, sq_ring_size); @@ -1616,7 +1534,6 @@ int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq) u32 wqe_idx; struct irdma_post_sq_info info = {}; - info.push_wqe = false; info.wr_id = wr_id; wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA, 0, &info); diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h index dd145ec72a91..36feca57b274 100644 --- a/drivers/infiniband/hw/irdma/user.h +++ b/drivers/infiniband/hw/irdma/user.h @@ -216,7 +216,6 @@ struct irdma_post_sq_info { bool local_fence:1; bool inline_data:1; bool imm_data_valid:1; - bool push_wqe:1; bool report_rtt:1; bool udp_hdr:1; bool defer_flag:1; @@ -248,7 +247,6 @@ struct irdma_cq_poll_info { u8 op_type; u8 q_type; bool stag_invalid_set:1; /* or L_R_Key set */ - bool push_dropped:1; bool error:1; bool solicited_event:1; bool ipv4:1; @@ -321,8 +319,6 @@ struct irdma_qp_uk { struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array; u64 *rq_wrid_array; __le64 *shadow_area; - __le32 *push_db; - __le64 *push_wqe; struct irdma_ring sq_ring; struct irdma_ring rq_ring; struct irdma_ring initial_ring; @@ -342,8 +338,6 @@ struct irdma_qp_uk { u8 rq_wqe_size; u8 rq_wqe_size_multiplier; bool deferred_flag:1; - bool push_mode:1; /* whether the last post wqe was pushed */ - bool push_dropped:1; bool first_sq_wq:1; bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */ bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */ @@ -415,7 +409,5 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *wqdepth); int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *wqdepth); -void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta, - u32 wqe_idx, bool post_sq); void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx); #endif /* IRDMA_USER_H */ -- 1.8.3.1