Re: [PATCH for-next v3 06/12] RDMA/erdma: Add event queue implementation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 2/22/22 3:23 PM, Ma Ca wrote:

On 2022/2/17 11:01, Cheng Xu wrote:
From: Cheng Xu <chengyou@xxxxxxxxxxxxxxxxx>


<...>

+    eq->qbuf = dma_alloc_coherent(&dev->pdev->dev,
+                      WARPPED_BUFSIZE(buf_size),
+                      &eq->qbuf_dma_addr, GFP_KERNEL);
+    if (!eq->qbuf)
+        return -ENOMEM;
+
+    memset(eq->qbuf, 0, WARPPED_BUFSIZE(buf_size));
It's better replace the memset() by GFP_ZERO.

It is better, I will go through all the code and replace them.

Thanks.

+
+    spin_lock_init(&eq->lock);
+    atomic64_set(&eq->event_num, 0);
+    atomic64_set(&eq->notify_num, 0);
+
+    eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
+    eq->db_addr = (u64 __iomem *)(dev->func_bar + ERDMA_REGS_AEQ_DB_REG);
+    eq->db_record = (u64 *)(eq->qbuf + buf_size);
+    eq->owner = 1;
+

<...>

+    erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+                CMDQ_OPCODE_CREATE_EQ);
+    req.eqn = eqn;
+    req.depth = ilog2(eq->depth);
+    req.qbuf_addr = eq->qbuf_dma_addr;
+    req.qtype = 1; /* CEQ */
+    /* Vector index is the same sa EQN. */
same sa -> same as

Will fix it.

+    req.vector_idx = eqn;
+    db_info_dma_addr = eq->qbuf_dma_addr + (eq->depth << EQE_SHIFT);
+    req.db_dma_addr_l = lower_32_bits(db_info_dma_addr);
+    req.db_dma_addr_h = upper_32_bits(db_info_dma_addr);
+
+    return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req,
+                   sizeof(struct erdma_cmdq_create_eq_req),
+                   NULL, NULL);
+}
+
+static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
+{
+    struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
+    u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
+    int ret;
+
+    eq->qbuf = dma_alloc_coherent(&dev->pdev->dev,
+                      WARPPED_BUFSIZE(buf_size),
+                      &eq->qbuf_dma_addr, GFP_KERNEL);
+    if (!eq->qbuf)
+        return -ENOMEM;
+
+    memset(eq->qbuf, 0, WARPPED_BUFSIZE(buf_size));
ditto
<...>

+static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
+{
+    struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
+    u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
+    struct erdma_cmdq_destroy_eq_req req;
+    int err;
+
+    dev->ceqs[ceqn].ready = 0;
+
+    erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+                CMDQ_OPCODE_DESTROY_EQ);
+    /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
+    req.eqn = ceqn + 1;
+    req.qtype = 1;
+    req.vector_idx = ceqn + 1;
+
+ err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+                  NULL);
+    if (err)
+        return;
+
+ dma_free_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size), eq->qbuf,
+              eq->qbuf_dma_addr);
memleak maybe occurs at here when post failed.

If command failed, it means that the EQ is still owned by hardware, and
the queue buffer may be written by hardware after it is freed. I think
the current implementation is better than free it if command failed.

Thanks,
Cheng Xu





[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux