Implement a ib device operation ‘reg_user_mr_dmabuf’. Generate a rxe_map from the memory space linked the passed dma-buf. Signed-off-by: Shunsuke Mie <mie@xxxxxxxxxx> --- drivers/infiniband/sw/rxe/rxe_loc.h | 2 + drivers/infiniband/sw/rxe/rxe_mr.c | 118 ++++++++++++++++++++++++++ drivers/infiniband/sw/rxe/rxe_verbs.c | 34 ++++++++ drivers/infiniband/sw/rxe/rxe_verbs.h | 2 + 4 files changed, 156 insertions(+) diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 1ca43b859d80..8bc19ea1a376 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -75,6 +75,8 @@ u8 rxe_get_next_key(u32 last_key); void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr); int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, int access, struct rxe_mr *mr); +int rxe_mr_dmabuf_init_user(struct rxe_pd *pd, int fd, u64 start, u64 length, + u64 iova, int access, struct rxe_mr *mr); int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr); int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, enum rxe_mr_copy_dir dir); diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 53271df10e47..af6ef671c3a5 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -4,6 +4,7 @@ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. */ +#include <linux/dma-buf.h> #include "rxe.h" #include "rxe_loc.h" @@ -245,6 +246,120 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, return err; } +static int rxe_map_dmabuf_mr(struct rxe_mr *mr, + struct ib_umem_dmabuf *umem_dmabuf) +{ + struct rxe_map_set *set; + struct rxe_phys_buf *buf = NULL; + struct rxe_map **map; + void *vaddr, *vaddr_end; + int num_buf = 0; + int err; + size_t remain; + + mr->dmabuf_map = kzalloc(sizeof &mr->dmabuf_map, GFP_KERNEL); + if (!mr->dmabuf_map) { + err = -ENOMEM; + goto err_out; + } + + err = dma_buf_vmap(umem_dmabuf->dmabuf, mr->dmabuf_map); + if (err) + goto err_free_dmabuf_map; + + set = mr->cur_map_set; + set->page_shift = PAGE_SHIFT; + set->page_mask = PAGE_SIZE - 1; + + map = set->map; + buf = map[0]->buf; + + vaddr = mr->dmabuf_map->vaddr; + vaddr_end = vaddr + umem_dmabuf->dmabuf->size; + remain = umem_dmabuf->dmabuf->size; + + for (; remain; vaddr += PAGE_SIZE) { + if (num_buf >= RXE_BUF_PER_MAP) { + map++; + buf = map[0]->buf; + num_buf = 0; + } + + buf->addr = (uintptr_t)vaddr; + if (remain >= PAGE_SIZE) + buf->size = PAGE_SIZE; + else + buf->size = remain; + remain -= buf->size; + + num_buf++; + buf++; + } + + return 0; + +err_free_dmabuf_map: + kfree(mr->dmabuf_map); +err_out: + return err; +} + +static void rxe_unmap_dmabuf_mr(struct rxe_mr *mr) +{ + struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem); + + dma_buf_vunmap(umem_dmabuf->dmabuf, mr->dmabuf_map); + kfree(mr->dmabuf_map); +} + +int rxe_mr_dmabuf_init_user(struct rxe_pd *pd, int fd, u64 start, u64 length, + u64 iova, int access, struct rxe_mr *mr) +{ + struct ib_umem_dmabuf *umem_dmabuf; + struct rxe_map_set *set; + int err; + + umem_dmabuf = ib_umem_dmabuf_get(pd->ibpd.device, start, length, fd, + access, NULL); + if (IS_ERR(umem_dmabuf)) { + err = PTR_ERR(umem_dmabuf); + goto err_out; + } + + rxe_mr_init(access, mr); + + err = rxe_mr_alloc(mr, ib_umem_num_pages(&umem_dmabuf->umem), 0); + if (err) { + pr_warn("%s: Unable to allocate memory for map\n", __func__); + goto err_release_umem; + } + + mr->ibmr.pd = &pd->ibpd; + mr->umem = &umem_dmabuf->umem; + mr->access = access; + mr->state = RXE_MR_STATE_VALID; + mr->type = IB_MR_TYPE_USER; + + set = mr->cur_map_set; + set->length = length; + set->iova = iova; + set->va = start; + set->offset = ib_umem_offset(mr->umem); + + err = rxe_map_dmabuf_mr(mr, umem_dmabuf); + if (err) + goto err_free_map_set; + + return 0; + +err_free_map_set: + rxe_mr_free_map_set(mr->num_map, mr->cur_map_set); +err_release_umem: + ib_umem_release(&umem_dmabuf->umem); +err_out: + return err; +} + int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr) { int err; @@ -703,6 +818,9 @@ void rxe_mr_cleanup(struct rxe_pool_entry *arg) { struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem); + if (mr->umem && mr->umem->is_dmabuf) + rxe_unmap_dmabuf_mr(mr); + ib_umem_release(mr->umem); if (mr->cur_map_set) diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 9d0bb9aa7514..6191bb4f434d 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -916,6 +916,39 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, return ERR_PTR(err); } +static struct ib_mr *rxe_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, + u64 length, u64 iova, int fd, + int access, struct ib_udata *udata) +{ + int err; + struct rxe_dev *rxe = to_rdev(ibpd->device); + struct rxe_pd *pd = to_rpd(ibpd); + struct rxe_mr *mr; + + mr = rxe_alloc(&rxe->mr_pool); + if (!mr) { + err = -ENOMEM; + goto err2; + } + + rxe_add_index(mr); + + rxe_add_ref(pd); + + err = rxe_mr_dmabuf_init_user(pd, fd, start, length, iova, access, mr); + if (err) + goto err3; + + return &mr->ibmr; + +err3: + rxe_drop_ref(pd); + rxe_drop_index(mr); + rxe_drop_ref(mr); +err2: + return ERR_PTR(err); +} + static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, u32 max_num_sg) { @@ -1081,6 +1114,7 @@ static const struct ib_device_ops rxe_dev_ops = { .query_qp = rxe_query_qp, .query_srq = rxe_query_srq, .reg_user_mr = rxe_reg_user_mr, + .reg_user_mr_dmabuf = rxe_reg_user_mr_dmabuf, .req_notify_cq = rxe_req_notify_cq, .resize_cq = rxe_resize_cq, diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index c807639435eb..0aa95ab06b6e 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -334,6 +334,8 @@ struct rxe_mr { struct rxe_map_set *cur_map_set; struct rxe_map_set *next_map_set; + + struct dma_buf_map *dmabuf_map; }; enum rxe_mw_state { -- 2.17.1