ULP's will use this information to determine the maximal data transfer size per IO operation. Signed-off-by: Max Gurtovoy <maxg@xxxxxxxxxxxx> --- drivers/infiniband/core/rw.c | 14 ++++++++++++-- include/rdma/rw.h | 1 + 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 4fad732..edc9bee 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -56,8 +56,17 @@ static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num, return false; } -static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev, - bool pi_support) +/** + * rdma_rw_fr_page_list_len - return the max number of pages mapped by 1 MR + * @dev: RDMA device that will eventually create a PD for needed MRs + * @pi_support: Whether MRs will be created for protection information offload + * + * Returns the number of pages that one MR can map for RDMA operation by the + * given device. One can determine the maximal data size according to the + * result of this function, or chose using multiple MRs for the RDMA operation + * as well. + */ +u32 rdma_rw_fr_page_list_len(struct ib_device *dev, bool pi_support) { u32 max_pages; @@ -69,6 +78,7 @@ static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev, /* arbitrary limit to avoid allocating gigantic resources */ return min_t(u32, max_pages, 256); } +EXPORT_SYMBOL(rdma_rw_fr_page_list_len); static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg) { diff --git a/include/rdma/rw.h b/include/rdma/rw.h index 6ad9dc8..a9bbda7 100644 --- a/include/rdma/rw.h +++ b/include/rdma/rw.h @@ -69,5 +69,6 @@ unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num, void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr); int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr); void rdma_rw_cleanup_mrs(struct ib_qp *qp); +u32 rdma_rw_fr_page_list_len(struct ib_device *dev, bool pi_support); #endif /* _RDMA_RW_H */ -- 1.8.3.1