2021-12-23 7:43 GMT+09:00, Hyunchul Lee <hyc.lee@xxxxxxxxx>: > Set the rdma capability using ib_clien, Because > For some devices, ib_device_get_by_netdev() returns NULL. > > Signed-off-by: Hyunchul Lee <hyc.lee@xxxxxxxxx> > --- > fs/ksmbd/transport_rdma.c | 89 ++++++++++++++++++++++++++++++++++++--- > 1 file changed, 83 insertions(+), 6 deletions(-) > > diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c > index 7e57cbb0bb35..0820c6a9d75e 100644 > --- a/fs/ksmbd/transport_rdma.c > +++ b/fs/ksmbd/transport_rdma.c > @@ -79,6 +79,14 @@ static int smb_direct_max_read_write_size = 1024 * 1024; > > static int smb_direct_max_outstanding_rw_ops = 8; > > +static LIST_HEAD(smb_direct_device_list); > +static DEFINE_RWLOCK(smb_direct_device_lock); > + > +struct smb_direct_device { > + struct ib_device *ib_dev; > + struct list_head list; > +}; > + > static struct smb_direct_listener { > struct rdma_cm_id *cm_id; > } smb_direct_listener; > @@ -2007,12 +2015,62 @@ static int smb_direct_listen(int port) > return ret; > } > > +static int smb_direct_ib_client_add(struct ib_device *ib_dev) > +{ > + struct smb_direct_device *smb_dev; > + > + if (!ib_dev->ops.get_netdev || > + ib_dev->node_type != RDMA_NODE_IB_CA || > + !rdma_frwr_is_supported(&ib_dev->attrs)) > + return 0; > + > + smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL); > + if (!smb_dev) > + return -ENOMEM; > + smb_dev->ib_dev = ib_dev; > + > + write_lock(&smb_direct_device_lock); > + list_add(&smb_dev->list, &smb_direct_device_list); > + write_unlock(&smb_direct_device_lock); > + > + ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name); > + return 0; > +} > + > +static void smb_direct_ib_client_remove(struct ib_device *ib_dev, > + void *client_data) > +{ When is this function called? Is rdma shutdown needed here? > + struct smb_direct_device *smb_dev, *tmp; > + > + write_lock(&smb_direct_device_lock); > + list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) { > + if (smb_dev->ib_dev == ib_dev) { > + list_del(&smb_dev->list); > + kfree(smb_dev); > + break; > + } > + } > + write_unlock(&smb_direct_device_lock); > +} > + > +static struct ib_client smb_direct_ib_client = { > + .name = "ksmbd_smb_direct_ib", Move smb_direct_ib_client_add and smb_direct_ib_client_remove to here. > +}; > + > int ksmbd_rdma_init(void) > { > int ret; > > smb_direct_listener.cm_id = NULL; > > + smb_direct_ib_client.add = smb_direct_ib_client_add; > + smb_direct_ib_client.remove = smb_direct_ib_client_remove; > + ret = ib_register_client(&smb_direct_ib_client); > + if (ret) { > + pr_err("failed to ib_register_client\n"); > + return ret; > + } > + > /* When a client is running out of send credits, the credits are > * granted by the server's sending a packet using this queue. > * This avoids the situation that a clients cannot send packets > @@ -2046,20 +2104,39 @@ int ksmbd_rdma_destroy(void) > destroy_workqueue(smb_direct_wq); > smb_direct_wq = NULL; > } > + > + if (smb_direct_ib_client.add) > + ib_unregister_client(&smb_direct_ib_client); > + smb_direct_ib_client.add = NULL; Why ? > return 0; > } > > bool ksmbd_rdma_capable_netdev(struct net_device *netdev) > { > - struct ib_device *ibdev; > + struct smb_direct_device *smb_dev; > + int i; > bool rdma_capable = false; > > - ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN); > - if (ibdev) { > - if (rdma_frwr_is_supported(&ibdev->attrs)) > - rdma_capable = true; > - ib_device_put(ibdev); > + read_lock(&smb_direct_device_lock); > + list_for_each_entry(smb_dev, &smb_direct_device_list, list) { > + for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) { > + struct net_device *ndev; > + > + ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev, > + i + 1); > + if (!ndev) > + continue; > + > + if (ndev == netdev) { > + dev_put(ndev); > + rdma_capable = true; > + goto out; > + } > + dev_put(ndev); > + } > } > +out: > + read_unlock(&smb_direct_device_lock); > return rdma_capable; > } > > -- > 2.25.1 > >