On 3/18/22 7:13 PM, Wenpeng Liang wrote:
On 2022/3/14 14:47, Cheng Xu wrote:
<...>
+static int erdma_cmdq_eq_init(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ struct erdma_eq *eq = &cmdq->eq;
+ u32 buf_size;
+
+ eq->depth = cmdq->max_outstandings;
+ buf_size = eq->depth << EQE_SHIFT;
+
+ eq->qbuf = dma_alloc_coherent(&dev->pdev->dev,
+ WARPPED_BUFSIZE(buf_size),
+ &eq->qbuf_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!eq->qbuf)
+ return -ENOMEM;
+
+ spin_lock_init(&eq->lock);
+ atomic64_set(&eq->event_num, 0);
This patchset sets and increases the reference count of event_num, but does not
call other interfaces such as atomic_dec_and_test to judge event_num. This
variable seems to be redundant in this patchset. Will subsequent patches
extend the function of event_num?
Similar to notify_num, armed_num.
Yes, We plan to expose these counters to ib_device_ops.get_hw_stats
interface in later patches.
Thanks
<...>
+
+static void erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq,
+ __be32 *cqe)
+{
+ struct erdma_comp_wait *comp_wait;
+ u16 sqe_idx, ctx_id;
+ u64 *sqe;
+ int i;
+ u32 hdr0 = __be32_to_cpu(*cqe);
+
+ sqe_idx = __be32_to_cpu(*(cqe + 1));
+ sqe = (u64 *)get_cmdq_sqe(cmdq, sqe_idx);
The pointer type returned by get_cmdq_sqe is "void *",
which does not need to be cast.
Will fix.
<...>
+
+static void erdma_polling_cmd_completions(struct erdma_cmdq *cmdq)
+{
+ u32 hdr;
+ __be32 *cqe;
+ unsigned long flags;
+ u16 comp_num = 0;
+ u8 owner, expect_owner;
+ u16 cqe_idx;
+
+ spin_lock_irqsave(&cmdq->cq.lock, flags);
+
+ expect_owner = cmdq->cq.owner;
+ cqe_idx = cmdq->cq.ci & (cmdq->cq.depth - 1);
+
+ while (1) {
+ cqe = (__be32 *)get_cmdq_cqe(cmdq, cqe_idx);
+ hdr = __be32_to_cpu(READ_ONCE(*cqe));
+
+ owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK, hdr);
+ if (owner != expect_owner)
+ break;
+
+ dma_rmb();
+ erdma_poll_single_cmd_completion(cmdq, cqe);
+ comp_num++;
+ if (cqe_idx == cmdq->cq.depth - 1) {
+ cqe_idx = 0;
+ expect_owner = !expect_owner;
+ } else {
+ cqe_idx++;
+ }
+ }
+
+ if (comp_num) {
+ cmdq->cq.ci += comp_num;
+ cmdq->cq.owner = expect_owner;
+
+ if (cmdq->use_event)
+ arm_cmdq_cq(cmdq);
+ }
+
+ spin_unlock_irqrestore(&cmdq->cq.lock, flags);
+}
The logic for judging whether cqe is valid is too complicated,
you can refer to the function get_sw_cqe_v2() of hns roce,
I hope it will help you.
I will check this.
Thanks,
Cheng Xu
Thanks,
Wenpeng