Make it clearer what is going on by adding a function to go back from the "virtual" dma_addr to a struct page. This is used in the ib_uses_virt_dma() style drivers (siw, rxe, hfi, qib). Call it instead of a naked virt_to_page() when working with dma_addr values encoded by the various ib_map functions. This also fixes the vir_to_page() casting problem Linus Walleij has been chasing. Cc: Linus Walleij <linus.walleij@xxxxxxxxxx> Signed-off-by: Jason Gunthorpe <jgg@xxxxxxxxxx> --- drivers/infiniband/sw/rxe/rxe_mr.c | 16 ++++++++-------- drivers/infiniband/sw/siw/siw_qp_tx.c | 12 +++--------- include/rdma/ib_verbs.h | 13 +++++++++++++ 3 files changed, 24 insertions(+), 17 deletions(-) Maybe this will be clearer overall diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 1e17f8086d59a8..0e538fafcc20ff 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -210,10 +210,10 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr) return err; } -static int rxe_set_page(struct ib_mr *ibmr, u64 iova) +static int rxe_set_page(struct ib_mr *ibmr, u64 dma_addr) { struct rxe_mr *mr = to_rmr(ibmr); - struct page *page = virt_to_page(iova & mr->page_mask); + struct page *page = ib_virt_dma_to_page(dma_addr); bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT); int err; @@ -279,16 +279,16 @@ static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, return 0; } -static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr, +static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr, unsigned int length, enum rxe_mr_copy_dir dir) { - unsigned int page_offset = iova & (PAGE_SIZE - 1); + unsigned int page_offset = dma_addr & (PAGE_SIZE - 1); unsigned int bytes; struct page *page; u8 *va; while (length) { - page = virt_to_page(iova & mr->page_mask); + page = ib_virt_dma_to_page(dma_addr); bytes = min_t(unsigned int, length, PAGE_SIZE - page_offset); va = kmap_local_page(page); @@ -300,7 +300,7 @@ static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr, kunmap_local(va); page_offset = 0; - iova += bytes; + dma_addr += bytes; addr += bytes; length -= bytes; } @@ -488,7 +488,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, if (mr->ibmr.type == IB_MR_TYPE_DMA) { page_offset = iova & (PAGE_SIZE - 1); - page = virt_to_page(iova & PAGE_MASK); + page = ib_virt_dma_to_page(iova); } else { unsigned long index; int err; @@ -545,7 +545,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) if (mr->ibmr.type == IB_MR_TYPE_DMA) { page_offset = iova & (PAGE_SIZE - 1); - page = virt_to_page(iova & PAGE_MASK); + page = ib_virt_dma_to_page(iova); } else { unsigned long index; int err; diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 6bb9e9e81ff4ca..108985af49723b 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); if (paddr) - return virt_to_page((void *)(uintptr_t)paddr); + return ib_virt_dma_to_page(paddr); return NULL; } @@ -537,15 +537,9 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) * Cast to an uintptr_t to preserve all 64 bits * in sge->laddr. */ - uintptr_t va = (uintptr_t)(sge->laddr + sge_off); + u64 va = (uintptr_t)(sge->laddr + sge_off); - /* - * virt_to_page() takes a (void *) pointer - * so cast to a (void *) meaning it will be 64 - * bits on a 64 bit platform and 32 bits on a - * 32 bit platform. - */ - page_array[seg] = virt_to_page((void *)(va & PAGE_MASK)); + page_array[seg] = ib_virt_dma_to_page(va); if (do_crc) crypto_shash_update( c_tx->mpa_crc_hd, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 949cf4ffc536c5..4e8868171de68c 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -4035,6 +4035,19 @@ static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev) return dma_pci_p2pdma_supported(dev->dma_device); } +/** + * ib_virt_dma_to_page - Convert a dma_addr to a struct page + * @dma_addr: The DMA address to check + * + * Used by ib_uses_virt_dma() to get back to the struct page after + * going through the dma_addr marshalling. + */ +static inline struct page *ib_virt_dma_to_page(u64 dma_addr) +{ + /* virt_dma mode maps the kvs's directly into the dma addr */ + return virt_to_page((void *)(uintptr_t)dma_addr); +} + /** * ib_dma_mapping_error - check a DMA addr for error * @dev: The device for which the dma_addr was created base-commit: 78b26a335310a097d6b22581b706050db42f196c -- 2.40.0