Re: [PATCH v1 02/10] crypto: octeontx2: add SGv2 support for CN10KB or CN10KA B0

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Nov 03, 2023 at 11:02:58AM +0530, Srujana Challa wrote:

Hi Srujana,

some minor feedback from my side.

> Scatter Gather input format for CPT has changed on CN10KB/CN10KA B0 HW
> to make it comapatible with NIX Scatter Gather format to support SG mode

nit: compatible

> for inline IPsec. This patch modifies the code to make the driver works
> for the same. This patch also enables CPT firmware load for these chips.
> 
> Signed-off-by: Srujana Challa <schalla@xxxxxxxxxxx>

...

> diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h

...

> +static inline int sgv2io_components_setup(struct pci_dev *pdev,
> +					  struct otx2_cpt_buf_ptr *list,
> +					  int buf_count, u8 *buffer)
> +{
> +	struct cn10kb_cpt_sglist_component *sg_ptr = NULL;
> +	int ret = 0, i, j;
> +	int components;
> +
> +	if (unlikely(!list)) {
> +		dev_err(&pdev->dev, "Input list pointer is NULL\n");
> +		return -EFAULT;
> +	}
> +
> +	for (i = 0; i < buf_count; i++) {
> +		if (unlikely(!list[i].vptr))
> +			continue;
> +		list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr,
> +						  list[i].size,
> +						  DMA_BIDIRECTIONAL);
> +		if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) {
> +			dev_err(&pdev->dev, "Dma mapping failed\n");
> +			ret = -EIO;
> +			goto sg_cleanup;
> +		}
> +	}
> +	components = buf_count / 3;
> +	sg_ptr = (struct cn10kb_cpt_sglist_component *)buffer;
> +	for (i = 0; i < components; i++) {
> +		sg_ptr->len0 = list[i * 3 + 0].size;
> +		sg_ptr->len1 = list[i * 3 + 1].size;
> +		sg_ptr->len2 = list[i * 3 + 2].size;
> +		sg_ptr->ptr0 = list[i * 3 + 0].dma_addr;
> +		sg_ptr->ptr1 = list[i * 3 + 1].dma_addr;
> +		sg_ptr->ptr2 = list[i * 3 + 2].dma_addr;
> +		sg_ptr->valid_segs = 3;
> +		sg_ptr++;
> +	}
> +	components = buf_count % 3;
> +
> +	sg_ptr->valid_segs = components;
> +	switch (components) {
> +	case 2:
> +		sg_ptr->len1 = list[i * 3 + 1].size;
> +		sg_ptr->ptr1 = list[i * 3 + 1].dma_addr;
> +		fallthrough;
> +	case 1:
> +		sg_ptr->len0 = list[i * 3 + 0].size;
> +		sg_ptr->ptr0 = list[i * 3 + 0].dma_addr;
> +		break;
> +	default:
> +		break;
> +	}
> +	return ret;

The above fields of sg_ptr all have big-endian types
but are being assigned values in host byte-order.

As flagged by Sparse.

> +
> +sg_cleanup:
> +	for (j = 0; j < i; j++) {
> +		if (list[j].dma_addr) {
> +			dma_unmap_single(&pdev->dev, list[j].dma_addr,
> +					 list[j].size, DMA_BIDIRECTIONAL);
> +		}
> +
> +		list[j].dma_addr = 0;
> +	}
> +	return ret;
> +}
> +
> +static inline struct otx2_cpt_inst_info *cn10k_sgv2_info_create(struct pci_dev *pdev,
> +					      struct otx2_cpt_req_info *req,
> +					      gfp_t gfp)

nit: I think it would be nicer to format the above as in a way
that indentation isn't pushed so far to the right that alignment
with the opening parentheses becomes impossible:

static inline struct otx2_cpt_inst_info *
cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
		       gfp_t gfp)

Running ./checkpatch.pl --strict over this patch-set might also be useful.

...




[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]
  Powered by Linux