[PATCH net-next 09/15] qed: add support for different page sizes for chains

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Extend current infrastructure to store chain page size in a struct
and use it in all functions instead of fixed QED_CHAIN_PAGE_SIZE.
Its value remains the default one, but can be overridden in
qed_chain_init_params before chain allocation.

Signed-off-by: Alexander Lobakin <alobakin@xxxxxxxxxxx>
Signed-off-by: Igor Russkikh <irusskikh@xxxxxxxxxxx>
Signed-off-by: Michal Kalderon <michal.kalderon@xxxxxxxxxxx>
---
 drivers/infiniband/hw/qedr/verbs.c          |  2 ++
 drivers/net/ethernet/qlogic/qed/qed_chain.c | 28 +++++++++++++--------
 include/linux/qed/qed_chain.h               | 21 ++++++++++------
 3 files changed, 33 insertions(+), 18 deletions(-)

diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 6737895a0d68..49b8a43e3fa2 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1960,9 +1960,11 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
 
 	in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
 						     QEDR_SQE_ELEMENT_SIZE,
+						     QED_CHAIN_PAGE_SIZE,
 						     QED_CHAIN_MODE_PBL);
 	in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
 						     QEDR_RQE_ELEMENT_SIZE,
+						     QED_CHAIN_PAGE_SIZE,
 						     QED_CHAIN_MODE_PBL);
 
 	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c
index 6e8dc6cefa5d..ce13ee8af4f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_chain.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c
@@ -16,8 +16,10 @@ static void qed_chain_init(struct qed_chain *chain,
 	chain->mode = params->mode;
 	chain->cnt_type = params->cnt_type;
 
-	chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size);
+	chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size,
+					      params->page_size);
 	chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size,
+						       params->page_size,
 						       params->mode);
 	chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size,
 						       params->mode);
@@ -26,6 +28,7 @@ static void qed_chain_init(struct qed_chain *chain,
 	chain->next_page_mask = chain->usable_per_page &
 				chain->elem_per_page_mask;
 
+	chain->page_size = params->page_size;
 	chain->page_cnt = page_cnt;
 	chain->capacity = chain->usable_per_page * page_cnt;
 	chain->size = chain->elem_per_page * page_cnt;
@@ -80,7 +83,7 @@ static void qed_chain_free_next_ptr(struct qed_dev *cdev,
 		virt_next = next->next_virt;
 		phys_next = HILO_DMA_REGPAIR(next->next_phys);
 
-		dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, virt, phys);
+		dma_free_coherent(dev, chain->page_size, virt, phys);
 
 		virt = virt_next;
 		phys = phys_next;
@@ -93,7 +96,7 @@ static void qed_chain_free_single(struct qed_dev *cdev,
 	if (!chain->p_virt_addr)
 		return;
 
-	dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
+	dma_free_coherent(&cdev->pdev->dev, chain->page_size,
 			  chain->p_virt_addr, chain->p_phys_addr);
 }
 
@@ -111,7 +114,7 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
 		if (!entry->virt_addr)
 			break;
 
-		dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, entry->virt_addr,
+		dma_free_coherent(dev, chain->page_size, entry->virt_addr,
 				  entry->dma_map);
 	}
 
@@ -156,7 +159,7 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev,
 {
 	u64 chain_size;
 
-	chain_size = ELEMS_PER_PAGE(params->elem_size);
+	chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size);
 	chain_size *= page_cnt;
 
 	if (!chain_size)
@@ -199,7 +202,7 @@ static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
 	u32 i;
 
 	for (i = 0; i < chain->page_cnt; i++) {
-		virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
+		virt = dma_alloc_coherent(dev, chain->page_size, &phys,
 					  GFP_KERNEL);
 		if (!virt)
 			return -ENOMEM;
@@ -230,7 +233,7 @@ static int qed_chain_alloc_single(struct qed_dev *cdev,
 	dma_addr_t phys;
 	void *virt;
 
-	virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
+	virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size,
 				  &phys, GFP_KERNEL);
 	if (!virt)
 		return -ENOMEM;
@@ -280,7 +283,7 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain)
 
 alloc_pages:
 	for (i = 0; i < page_cnt; i++) {
-		virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
+		virt = dma_alloc_coherent(dev, chain->page_size, &phys,
 					  GFP_KERNEL);
 		if (!virt)
 			return -ENOMEM;
@@ -316,11 +319,15 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
 	u32 page_cnt;
 	int rc;
 
+	if (!params->page_size)
+		params->page_size = QED_CHAIN_PAGE_SIZE;
+
 	if (params->mode == QED_CHAIN_MODE_SINGLE)
 		page_cnt = 1;
 	else
 		page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems,
 					      params->elem_size,
+					      params->page_size,
 					      params->mode);
 
 	rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt);
@@ -328,9 +335,10 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
 		DP_NOTICE(cdev,
 			  "Cannot allocate a chain with the given arguments:\n");
 		DP_NOTICE(cdev,
-			  "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
+			  "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu, page_size %u]\n",
 			  params->intended_use, params->mode, params->cnt_type,
-			  params->num_elems, params->elem_size);
+			  params->num_elems, params->elem_size,
+			  params->page_size);
 		return rc;
 	}
 
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index f5cfee0934e5..8a96c361cc19 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -11,6 +11,7 @@
 #include <asm/byteorder.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/sizes.h>
 #include <linux/slab.h>
 #include <linux/qed/common_hsi.h>
 
@@ -120,6 +121,8 @@ struct qed_chain {
 	 * but isn't involved in regular functionality.
 	 */
 
+	u32						page_size;
+
 	/* Base address of a pre-allocated buffer for pbl */
 	struct {
 		__le64					*table_virt;
@@ -147,6 +150,7 @@ struct qed_chain_init_params {
 	enum qed_chain_use_mode				intended_use;
 	enum qed_chain_cnt_type				cnt_type;
 
+	u32						page_size;
 	u32						num_elems;
 	size_t						elem_size;
 
@@ -154,22 +158,23 @@ struct qed_chain_init_params {
 	dma_addr_t					ext_pbl_phys;
 };
 
-#define QED_CHAIN_PAGE_SIZE				0x1000
+#define QED_CHAIN_PAGE_SIZE				SZ_4K
 
-#define ELEMS_PER_PAGE(elem_size)					     \
-	(QED_CHAIN_PAGE_SIZE / (elem_size))
+#define ELEMS_PER_PAGE(elem_size, page_size)				     \
+	((page_size) / (elem_size))
 
 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)			     \
 	(((mode) == QED_CHAIN_MODE_NEXT_PTR) ?				     \
 	 (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) :     \
 	 0)
 
-#define USABLE_ELEMS_PER_PAGE(elem_size, mode)				     \
-	((u32)(ELEMS_PER_PAGE(elem_size) -				     \
+#define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode)		     \
+	((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) -		     \
 	       UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode))))
 
-#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode)			     \
-	DIV_ROUND_UP((elem_cnt), USABLE_ELEMS_PER_PAGE((elem_size), (mode)))
+#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode)	     \
+	DIV_ROUND_UP((elem_cnt),					     \
+		     USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode)))
 
 #define is_chain_u16(p)							     \
 	((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
@@ -604,7 +609,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
 
 	for (i = 0; i < page_cnt; i++)
 		memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
-		       QED_CHAIN_PAGE_SIZE);
+		       p_chain->page_size);
 }
 
 #endif
-- 
2.25.1




[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux