In shared object model ib_pd can belong to 1 or more ib_ucontext. Fix the nldev code so it could report multiple context ids. Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@xxxxxxxxxx> --- drivers/infiniband/core/nldev.c | 93 +++++++++++++++++++++++++++++++-- 1 file changed, 88 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index cbd712f5f8b2..f4cc92b897ff 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -41,6 +41,9 @@ #include "core_priv.h" #include "cma_priv.h" #include "restrack.h" +#include "uverbs.h" + +static bool is_visible_in_pid_ns(struct rdma_restrack_entry *res); static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 }, @@ -584,11 +587,80 @@ static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, err: return -EMSGSIZE; } +struct context_id { + struct list_head list; + u32 id; +}; + +static void pd_context(struct ib_pd *pd, struct list_head *list) +{ + struct ib_device *device = pd->device; + struct rdma_restrack_entry *res; + struct rdma_restrack_root *rt; + struct ib_uverbs_file *ufile; + struct ib_ucontext *ucontext; + struct ib_uobject *uobj; + unsigned long flags; + unsigned long id; + bool found; + + rt = &device->res[RDMA_RESTRACK_CTX]; + + xa_lock(&rt->xa); + + xa_for_each(&rt->xa, id, res) { + if (!is_visible_in_pid_ns(res)) + continue; + + if (!rdma_restrack_get(res)) + continue; + + xa_unlock(&rt->xa); + + ucontext = container_of(res, struct ib_ucontext, res); + ufile = ucontext->ufile; + found = false; + + /* See locking requirements in struct ib_uverbs_file */ + down_read(&ufile->hw_destroy_rwsem); + spin_lock_irqsave(&ufile->uobjects_lock, flags); + + list_for_each_entry(uobj, &ufile->uobjects, list) { + if (uobj->object == pd) { + found = true; + goto found; + } + } + +found: spin_unlock_irqrestore(&ufile->uobjects_lock, flags); + up_read(&ufile->hw_destroy_rwsem); + + if (found) { + struct context_id *ctx_id = + kmalloc(sizeof(*ctx_id), GFP_KERNEL); + + if (WARN_ON_ONCE(!ctx_id)) + goto next; + + ctx_id->id = ucontext->res.id; + list_add(&ctx_id->list, list); + } + +next: rdma_restrack_put(res); + xa_lock(&rt->xa); + } + + xa_unlock(&rt->xa); +} + static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_pd *pd = container_of(res, struct ib_pd, res); struct ib_device *dev = pd->device; + struct context_id *ctx_id; + struct context_id *tmp; + LIST_HEAD(pd_context_ids); if (has_cap_net_admin) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, @@ -606,10 +678,14 @@ static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id)) goto err; - if (!rdma_is_kernel_res(res) && - nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, - pd->uobject->context->res.id)) - goto err; + if (!rdma_is_kernel_res(res)) { + pd_context(pd, &pd_context_ids); + list_for_each_entry(ctx_id, &pd_context_ids, list) { + if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, + ctx_id->id)) + goto err; + } + } if (fill_res_name_pid(msg, res)) goto err; @@ -617,9 +693,16 @@ static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, if (fill_res_entry(dev, msg, res)) goto err; + list_for_each_entry_safe(ctx_id, tmp, &pd_context_ids, list) + kfree(ctx_id); + return 0; -err: return -EMSGSIZE; +err: + list_for_each_entry_safe(ctx_id, tmp, &pd_context_ids, list) + kfree(ctx_id); + + return -EMSGSIZE; } static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, -- 2.20.1