Re: [PATCH rdma-next 08/13] RDMA/efa: Implement functions that submit and complete admin commands

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 05-Dec-18 22:41, Hefty, Sean wrote:
>> +static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset) {
> 
> I noticed several places in this file where the open and close braces are on the wrong line.

Missed that, will fix.

> 
>> +	struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
>> +	struct efa_admin_mmio_req_read_less_resp *read_resp;
>> +	unsigned long exp_time;
>> +	u32 mmio_read_reg;
>> +	u32 err;
>> +
>> +	read_resp = mmio_read->read_resp;
>> +
>> +	spin_lock(&mmio_read->lock);
>> +	mmio_read->seq_num++;
>> +
>> +	/* trash DMA req_id to identify when hardware is done */
>> +	read_resp->req_id = mmio_read->seq_num + 0x9aL;
>> +	mmio_read_reg  = (offset <<
>> EFA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
>> +			 EFA_REGS_MMIO_REG_READ_REG_OFF_MASK;
>> +	mmio_read_reg |= mmio_read->seq_num &
>> +			 EFA_REGS_MMIO_REG_READ_REQ_ID_MASK;
>> +
>> +	writel(mmio_read_reg, edev->reg_bar +
>> EFA_REGS_MMIO_REG_READ_OFF);
>> +
>> +	exp_time = jiffies + usecs_to_jiffies(mmio_read-
>>> mmio_read_timeout);
>> +	do {
>> +		if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
>> +			break;
>> +		udelay(1);
>> +	} while (time_is_after_jiffies(exp_time));
>> +
>> +	if (unlikely(read_resp->req_id != mmio_read->seq_num)) {
>> +		pr_err("Reading register timed out. expected: req id[%u]
>> offset[%#x] actual: req id[%u] offset[%#x]\n",
>> +		       mmio_read->seq_num, offset, read_resp->req_id,
>> +		       read_resp->reg_off);
>> +		err = EFA_MMIO_READ_INVALID;
>> +		goto out;
>> +	}
>> +
>> +	if (read_resp->reg_off != offset) {
>> +		pr_err("Reading register failed: wrong offset provided");
>> +		err = EFA_MMIO_READ_INVALID;
>> +		goto out;
>> +	}
>> +
>> +	err = read_resp->reg_val;
>> +out:
>> +	spin_unlock(&mmio_read->lock);
>> +	return err;
>> +}
>> +
>> +static int efa_com_admin_init_sq(struct efa_com_dev *edev) {
>> +	struct efa_com_admin_queue *queue = &edev->admin_queue;
>> +	struct efa_com_admin_sq *sq = &queue->sq;
>> +	u16 size = ADMIN_SQ_SIZE(queue->depth);
>> +	u32 addr_high;
>> +	u32 addr_low;
>> +	u32 aq_caps;
>> +
>> +	sq->entries = dma_zalloc_coherent(queue->dmadev, size, &sq-
>>> dma_addr,
>> +					  GFP_KERNEL);
>> +	if (!sq->entries)
>> +		return -ENOMEM;
>> +
>> +	spin_lock_init(&sq->lock);
>> +
>> +	sq->cc = 0;
>> +	sq->pc = 0;
>> +	sq->phase = 1;
>> +
>> +	sq->db_addr = (u32 __iomem *)(edev->reg_bar +
>> +EFA_REGS_AQ_PROD_DB_OFF);
>> +
>> +	pr_debug("init sq dma_addr....\n");
>> +	addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(sq->dma_addr);
>> +	addr_low  = EFA_DMA_ADDR_TO_UINT32_LOW(sq->dma_addr);
>> +
>> +	writel(addr_low, edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF);
>> +	writel(addr_high, edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF);
>> +
>> +	aq_caps = 0;
>> +	aq_caps |= queue->depth & EFA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
>> +	aq_caps |= (sizeof(struct efa_admin_aq_entry) <<
>> +			EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
>> +			EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
>> +
>> +	writel(aq_caps, edev->reg_bar + EFA_REGS_AQ_CAPS_OFF);
>> +
>> +	return 0;
>> +}
>> +
>> +static int efa_com_admin_init_cq(struct efa_com_dev *edev) {
>> +	struct efa_com_admin_queue *queue = &edev->admin_queue;
>> +	struct efa_com_admin_cq *cq = &queue->cq;
>> +	u16 size = ADMIN_CQ_SIZE(queue->depth);
>> +	u32 addr_high;
>> +	u32 addr_low;
>> +	u32 acq_caps;
>> +
>> +	cq->entries = dma_zalloc_coherent(queue->dmadev, size, &cq-
>>> dma_addr,
>> +					  GFP_KERNEL);
>> +	if (!cq->entries)
>> +		return -ENOMEM;
>> +
>> +	spin_lock_init(&cq->lock);
>> +
>> +	cq->cc = 0;
>> +	cq->phase = 1;
>> +
>> +	pr_debug("init cq dma_addr....\n");
>> +	addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(cq->dma_addr);
>> +	addr_low  = EFA_DMA_ADDR_TO_UINT32_LOW(cq->dma_addr);
>> +
>> +	writel(addr_low, edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF);
>> +	writel(addr_high, edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF);
>> +
>> +	acq_caps = 0;
>> +	acq_caps |= queue->depth & EFA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
>> +	acq_caps |= (sizeof(struct efa_admin_acq_entry) <<
>> +			EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
>> +			EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
>> +	acq_caps |= (queue->msix_vector_idx <<
>> +			EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_SHIFT) &
>> +			EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_MASK;
>> +
>> +	writel(acq_caps, edev->reg_bar + EFA_REGS_ACQ_CAPS_OFF);
>> +
>> +	return 0;
>> +}
>> +
>> +static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
>> +				   struct efa_aenq_handlers *aenq_handlers) {
>> +	struct efa_com_aenq *aenq = &edev->aenq;
>> +	u32 addr_low, addr_high, aenq_caps;
>> +	u16 size;
>> +
>> +	pr_debug("init aenq...\n");
>> +
>> +	if (unlikely(!aenq_handlers)) {
>> +		pr_err("aenq handlers pointer is NULL\n");
>> +		return -EINVAL;
>> +	}
>> +
>> +	size = ADMIN_AENQ_SIZE(EFA_ASYNC_QUEUE_DEPTH);
>> +	aenq->entries = dma_zalloc_coherent(edev->dmadev, size, &aenq-
>>> dma_addr,
>> +					    GFP_KERNEL);
>> +	if (!aenq->entries)
>> +		return -ENOMEM;
>> +
>> +	aenq->aenq_handlers = aenq_handlers;
>> +	aenq->depth = EFA_ASYNC_QUEUE_DEPTH;
>> +	aenq->cc = 0;
>> +	aenq->phase = 1;
>> +
>> +	addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
>> +	addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
>> +
>> +	writel(addr_low, edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF);
>> +	writel(addr_high, edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF);
>> +
>> +	aenq_caps = 0;
>> +	aenq_caps |= aenq->depth & EFA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
>> +	aenq_caps |= (sizeof(struct efa_admin_aenq_entry) <<
>> +		EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
>> +		EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
>> +	aenq_caps |= (aenq->msix_vector_idx
>> +		      << EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_SHIFT) &
>> +		     EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_MASK;
>> +	writel(aenq_caps, edev->reg_bar + EFA_REGS_AENQ_CAPS_OFF);
>> +
>> +	/*
>> +	 * Init cons_db to mark that all entries in the queue
>> +	 * are initially available
>> +	 */
>> +	writel(edev->aenq.cc, edev->reg_bar +
>> EFA_REGS_AENQ_CONS_DB_OFF);
>> +
>> +	pr_debug("init aenq succeeded\n");
>> +
>> +	return 0;
>> +}
>> +
>> +/* ID to be used with efa_com_get_comp_ctx */ static u16
>> +efa_com_alloc_ctx_id(struct efa_com_admin_queue *admin_queue) {
>> +	u16 ctx_id;
>> +
>> +	spin_lock(&admin_queue->comp_ctx_lock);
>> +	ctx_id = admin_queue->comp_ctx_pool[admin_queue-
>>> comp_ctx_pool_next];
>> +	admin_queue->comp_ctx_pool_next++;
>> +	spin_unlock(&admin_queue->comp_ctx_lock);
>> +
>> +	return ctx_id;
>> +}
>> +
>> +static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue
>> *queue,
>> +					struct efa_comp_ctx *comp_ctx)
>> +{
>> +	u16 comp_id = comp_ctx->user_cqe->acq_common_descriptor.command
>> &
>> +		      EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
>> +
>> +	pr_debug("Putting completion command_id %d", comp_id);
>> +	comp_ctx->occupied = false;
>> +	spin_lock(&queue->comp_ctx_lock);
>> +	queue->comp_ctx_pool_next--;
>> +	queue->comp_ctx_pool[queue->comp_ctx_pool_next] = comp_id;
>> +	spin_unlock(&queue->comp_ctx_lock);
>> +	up(&queue->avail_cmds);
>> +}
>> +
>> +static struct efa_comp_ctx *efa_com_get_comp_ctx(struct
>> efa_com_admin_queue *queue,
>> +						 u16 command_id, bool capture)
>> +{
>> +	if (unlikely(command_id >= queue->depth)) {
>> +		pr_err("command id is larger than the queue size. cmd_id:
>> %u queue size %d\n",
>> +		       command_id, queue->depth);
>> +		return NULL;
>> +	}
>> +
>> +	if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
>> +		pr_err("Completion context is occupied\n");
>> +		return NULL;
>> +	}
>> +
>> +	if (capture) {
>> +		queue->comp_ctx[command_id].occupied = true;
>> +		pr_debug("Taking completion ctxt command_id %d",
>> command_id);
>> +	}
>> +
>> +	return &queue->comp_ctx[command_id];
>> +}
>> +
>> +static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct
>> efa_com_admin_queue *admin_queue,
>> +						       struct efa_admin_aq_entry
>> *cmd,
>> +						       size_t cmd_size_in_bytes,
>> +						       struct efa_admin_acq_entry
>> *comp,
>> +						       size_t comp_size_in_bytes)
>> {
>> +	struct efa_comp_ctx *comp_ctx;
>> +	u16 queue_size_mask;
>> +	u16 ctx_id;
>> +	u16 pi;
>> +
>> +	queue_size_mask = admin_queue->depth - 1;
>> +	pi = admin_queue->sq.pc & queue_size_mask;
>> +
>> +	ctx_id = efa_com_alloc_ctx_id(admin_queue);
>> +
>> +	cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
>> +		EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
>> +
>> +	cmd->aq_common_descriptor.command_id |= ctx_id &
>> +		EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
>> +
>> +	comp_ctx = efa_com_get_comp_ctx(admin_queue, ctx_id, true);
>> +	if (unlikely(!comp_ctx))
>> +		return ERR_PTR(-EINVAL);
>> +
>> +	comp_ctx->status = EFA_CMD_SUBMITTED;
>> +	comp_ctx->comp_size = comp_size_in_bytes;
>> +	comp_ctx->user_cqe = comp;
>> +	comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
>> +
>> +	reinit_completion(&comp_ctx->wait_event);
>> +
>> +	memcpy(&admin_queue->sq.entries[pi], cmd, cmd_size_in_bytes);
>> +
>> +	admin_queue->sq.pc++;
>> +	admin_queue->stats.submitted_cmd++;
> 
> Is synchronization needed around these increments?  This occurs in several places.

submitted_cmd and completed_cmd are protected by the sq/cq lock. Looks like we
have a problem with no_completion and aborted_cmd though. I'll synchronize them.

> 
> - Sean
> 



[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux