[PATCH rdma-next 2/6] RDMA/umem: Add API to find best driver supported page size in an MR

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This helper iterates through the SG list to find the best page size to use
from a bitmap of HW supported page sizes. Drivers that support multiple
page sizes, but not mixed pages in an MR can use this API.

Suggested-by: Jason Gunthorpe <jgg@xxxxxxxx>
Reviewed-by: Michael J. Ruhl <michael.j.ruhl@xxxxxxxxx>
Signed-off-by: Shiraz Saleem <shiraz.saleem@xxxxxxxxx>
---
 drivers/infiniband/core/umem.c | 86 ++++++++++++++++++++++++++++++++++++++++++
 include/rdma/ib_umem.h         |  9 +++++
 2 files changed, 95 insertions(+)

diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 64bacc5..b2f2d75 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -119,6 +119,92 @@ static void ib_umem_add_sg_table(struct scatterlist **cur,
 }
 
 /**
+ * ib_umem_find_pg_bit - Find the page bit to use for phyaddr
+ *
+ * @phyaddr: Physical address after DMA translation
+ * @supported_pgsz: bitmask of HW supported page sizes
+ */
+static unsigned int ib_umem_find_pg_bit(unsigned long phyaddr,
+					unsigned long supported_pgsz)
+{
+	unsigned long num_zeroes;
+	unsigned long pgsz;
+
+	/* Trailing zero bits in the address */
+	num_zeroes = __ffs(phyaddr);
+
+	/* Find page bit such that phyaddr is aligned to the highest supported
+	 * HW page size
+	 */
+	pgsz = supported_pgsz & (BIT_ULL(num_zeroes + 1) - 1);
+	if (!pgsz)
+		return __ffs(supported_pgsz);
+
+	return (fls64(pgsz) - 1);
+}
+
+/**
+ * ib_umem_find_single_pg_size - Find best HW page size to use for this MR
+ * @umem: umem struct
+ * @supported_pgsz: bitmask of HW supported page sizes
+ * @uvirt_addr: user-space virtual MR base address
+ *
+ * This helper is intended for HW that support multiple page
+ * sizes but can do only a single page size in an MR.
+ */
+unsigned long ib_umem_find_single_pg_size(struct ib_umem *umem,
+					  unsigned long supported_pgsz,
+					  unsigned long uvirt_addr)
+{
+	struct scatterlist *sg;
+	unsigned int pg_bit_sg, min_pg_bit, best_pg_bit;
+	int i;
+
+	if (!supported_pgsz)
+		return 0;
+
+	min_pg_bit = __ffs(supported_pgsz);
+	best_pg_bit = fls64(supported_pgsz) - 1;
+
+	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+		unsigned long dma_addr_start, dma_addr_end;
+
+		dma_addr_start = sg_dma_address(sg);
+		dma_addr_end = sg_dma_address(sg) + sg_dma_len(sg);
+		if (!i) {
+			pg_bit_sg = ib_umem_find_pg_bit(dma_addr_end, supported_pgsz);
+
+			/* The start offset of the MR into a first _large_ page
+			 * should line up exactly for the user-space virtual buf
+			 * and physical buffer, in order to upgrade the page bit
+			 */
+			if (pg_bit_sg > PAGE_SHIFT) {
+				unsigned int uvirt_pg_bit;
+
+				uvirt_pg_bit = ib_umem_find_pg_bit(uvirt_addr + sg_dma_len(sg),
+								   supported_pgsz);
+				pg_bit_sg = min_t(unsigned int, uvirt_pg_bit, pg_bit_sg);
+			}
+		} else if (i == (umem->nmap - 1)) {
+			/* last SGE: Does not matter if MR ends at an
+			 * unaligned offset.
+			 */
+			pg_bit_sg = ib_umem_find_pg_bit(dma_addr_start, supported_pgsz);
+		} else {
+			pg_bit_sg = ib_umem_find_pg_bit(dma_addr_start,
+				supported_pgsz & (BIT_ULL(__ffs(sg_dma_len(sg)) + 1) - 1));
+		}
+
+		best_pg_bit = min_t(unsigned int, best_pg_bit, pg_bit_sg);
+		if (best_pg_bit == min_pg_bit)
+			break;
+	}
+
+	return BIT_ULL(best_pg_bit);
+}
+EXPORT_SYMBOL(ib_umem_find_single_pg_size);
+
+/**
  * ib_umem_get - Pin and DMA map userspace memory.
  *
  * If access flags indicate ODP memory, avoid pinning. Instead, stores
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 5d3755e..3e8e1ed 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -86,6 +86,9 @@ void ib_umem_release(struct ib_umem *umem);
 int ib_umem_page_count(struct ib_umem *umem);
 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
 		      size_t length);
+unsigned long ib_umem_find_single_pg_size(struct ib_umem *umem,
+					  unsigned long supported_pgsz,
+					  unsigned long uvirt_addr);
 
 #else /* CONFIG_INFINIBAND_USER_MEM */
 
@@ -102,6 +105,12 @@ static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offs
 		      		    size_t length) {
 	return -EINVAL;
 }
+static inline int ib_umem_find_single_pg_size(struct ib_umem *umem,
+					      unsigned long supported_pgsz,
+					      unsigned long uvirt_addr) {
+	return -EINVAL;
+}
+
 #endif /* CONFIG_INFINIBAND_USER_MEM */
 
 #endif /* IB_UMEM_H */
-- 
2.8.3




[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux