2022년 1월 4일 (화) 오전 9:45, Namjae Jeon <linkinjeon@xxxxxxxxxx>님이 작성: > > 2022-01-03 9:02 GMT+09:00, Hyunchul Lee <hyc.lee@xxxxxxxxx>: > > if CONFIG_LOCKDEP is enabled, the following > > kernel warning message is generated because > > rdma_accept() is called not under CM(Connection > > Manager) handler. > > > > [ 63.211405 ] WARNING: CPU: 1 PID: 345 at > > drivers/infiniband/core/cma.c:4405 rdma_accept+0x17a/0x350 > > [ 63.212080 ] RIP: 0010:rdma_accept+0x17a/0x350 > > ... > > [ 63.214036 ] Call Trace: > > [ 63.214098 ] <TASK> > > [ 63.214185 ] smb_direct_accept_client+0xb4/0x170 [ksmbd] > > [ 63.214412 ] smb_direct_prepare+0x322/0x8c0 [ksmbd] > > [ 63.214555 ] ? rcu_read_lock_sched_held+0x3a/0x70 > > [ 63.214700 ] ksmbd_conn_handler_loop+0x63/0x270 [ksmbd] > > [ 63.214826 ] ? ksmbd_conn_alive+0x80/0x80 [ksmbd] > > [ 63.214952 ] kthread+0x171/0x1a0 > > [ 63.215039 ] ? set_kthread_struct+0x40/0x40 > > [ 63.215128 ] ret_from_fork+0x22/0x30 > I could not understand why lockdep trigger this warning. > Could you elaborate more ? > rdma_accept checks whether the handler_mutex is held. if not, this warning is generated. And CM holds the handler_mutex before ksmbd's handler callback is called. > > > > To avoid this, move creating a queue pair and accepting > > a client from transport_ops->prepare() to > > smb_direct_handle_connect_request(). > > > > Signed-off-by: Hyunchul Lee <hyc.lee@xxxxxxxxx> > > --- > > fs/ksmbd/transport_rdma.c | 97 +++++++++++++++++++++++---------------- > > 1 file changed, 57 insertions(+), 40 deletions(-) > > > > diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c > > index f89b64e27836..b37e4b9580ae 100644 > > --- a/fs/ksmbd/transport_rdma.c > > +++ b/fs/ksmbd/transport_rdma.c > > @@ -568,6 +568,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc > > *wc) > > } > > t->negotiation_requested = true; > > t->full_packet_received = true; > > + enqueue_reassembly(t, recvmsg, 0); > Is this a fix related to this patch? Yes, this is required to receive the negotiation request from a client. Before this patch, posting a buffer and waiting for the request is done in a function. Because we have the reference of the buffer, we don't need to append it into the queue. > > wake_up_interruptible(&t->wait_status); > > break; > > case SMB_DIRECT_MSG_DATA_TRANSFER: { > > @@ -1594,19 +1595,13 @@ static int smb_direct_accept_client(struct > > smb_direct_transport *t) > > pr_err("error at rdma_accept: %d\n", ret); > > return ret; > > } > > - > > - wait_event_interruptible(t->wait_status, > > - t->status != SMB_DIRECT_CS_NEW); > > - if (t->status != SMB_DIRECT_CS_CONNECTED) > > - return -ENOTCONN; > > return 0; > > } > > > > -static int smb_direct_negotiate(struct smb_direct_transport *t) > > +static int smb_direct_prepare_negotiation(struct smb_direct_transport *t) > > { > > int ret; > > struct smb_direct_recvmsg *recvmsg; > > - struct smb_direct_negotiate_req *req; > > > > recvmsg = get_free_recvmsg(t); > > if (!recvmsg) > > @@ -1616,44 +1611,20 @@ static int smb_direct_negotiate(struct > > smb_direct_transport *t) > > ret = smb_direct_post_recv(t, recvmsg); > > if (ret) { > > pr_err("Can't post recv: %d\n", ret); > > - goto out; > > + goto out_err; > > } > > > > t->negotiation_requested = false; > > ret = smb_direct_accept_client(t); > > if (ret) { > > pr_err("Can't accept client\n"); > > - goto out; > > + goto out_err; > > } > > > > smb_direct_post_recv_credits(&t->post_recv_credits_work.work); > > - > > - ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n"); > > - ret = wait_event_interruptible_timeout(t->wait_status, > > - t->negotiation_requested || > > - t->status == SMB_DIRECT_CS_DISCONNECTED, > > - SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ); > > - if (ret <= 0 || t->status == SMB_DIRECT_CS_DISCONNECTED) { > > - ret = ret < 0 ? ret : -ETIMEDOUT; > > - goto out; > > - } > > - > > - ret = smb_direct_check_recvmsg(recvmsg); > > - if (ret == -ECONNABORTED) > > - goto out; > > - > > - req = (struct smb_direct_negotiate_req *)recvmsg->packet; > > - t->max_recv_size = min_t(int, t->max_recv_size, > > - le32_to_cpu(req->preferred_send_size)); > > - t->max_send_size = min_t(int, t->max_send_size, > > - le32_to_cpu(req->max_receive_size)); > > - t->max_fragmented_send_size = > > - le32_to_cpu(req->max_fragmented_size); > > - > > - ret = smb_direct_send_negotiate_response(t, ret); > > -out: > > - if (recvmsg) > > - put_recvmsg(t, recvmsg); > > + return 0; > > +out_err: > > + put_recvmsg(t, recvmsg); > > return ret; > > } > > > > @@ -1890,6 +1861,47 @@ static int smb_direct_create_qpair(struct > > smb_direct_transport *t, > > static int smb_direct_prepare(struct ksmbd_transport *t) > > { > > struct smb_direct_transport *st = smb_trans_direct_transfort(t); > > + struct smb_direct_recvmsg *recvmsg; > > + struct smb_direct_negotiate_req *req; > > + int ret; > > + > > + ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n"); > > + ret = wait_event_interruptible_timeout(st->wait_status, > > + st->negotiation_requested || > > + st->status == SMB_DIRECT_CS_DISCONNECTED, > > + SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ); > > + if (ret <= 0 || st->status == SMB_DIRECT_CS_DISCONNECTED) > > + return ret < 0 ? ret : -ETIMEDOUT; > > + > > + recvmsg = get_first_reassembly(st); > > + if (!recvmsg) > > + return -ECONNABORTED; > > + > > + ret = smb_direct_check_recvmsg(recvmsg); > > + if (ret == -ECONNABORTED) > > + goto out; > > + > > + req = (struct smb_direct_negotiate_req *)recvmsg->packet; > > + st->max_recv_size = min_t(int, st->max_recv_size, > > + le32_to_cpu(req->preferred_send_size)); > > + st->max_send_size = min_t(int, st->max_send_size, > > + le32_to_cpu(req->max_receive_size)); > > + st->max_fragmented_send_size = > > + le32_to_cpu(req->max_fragmented_size); > > + > > + ret = smb_direct_send_negotiate_response(st, ret); > > +out: > > + spin_lock_irq(&st->reassembly_queue_lock); > > + st->reassembly_queue_length--; > > + list_del(&recvmsg->list); > > + spin_unlock_irq(&st->reassembly_queue_lock); > > + put_recvmsg(st, recvmsg); > > + > > + return ret; > > +} > > + > > +static int smb_direct_connect(struct smb_direct_transport *st) > > +{ > > int ret; > > struct ib_qp_cap qp_cap; > > > > @@ -1911,13 +1923,11 @@ static int smb_direct_prepare(struct ksmbd_transport > > *t) > > return ret; > > } > > > > - ret = smb_direct_negotiate(st); > > + ret = smb_direct_prepare_negotiation(st); > > if (ret) { > > pr_err("Can't negotiate: %d\n", ret); > > return ret; > > } > > - > > - st->status = SMB_DIRECT_CS_CONNECTED; > > return 0; > > } > > > > @@ -1933,6 +1943,7 @@ static bool rdma_frwr_is_supported(struct > > ib_device_attr *attrs) > > static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id) > > { > > struct smb_direct_transport *t; > > + int ret; > > > > if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) { > > ksmbd_debug(RDMA, > > @@ -1945,11 +1956,17 @@ static int smb_direct_handle_connect_request(struct > > rdma_cm_id *new_cm_id) > > if (!t) > > return -ENOMEM; > > > > + ret = smb_direct_connect(t); > > + if (ret) { > > + free_transport(t); > > + return ret; > I think that it is better to use goto statement to out after freeing > transport structure. Okay, I will change it. > > + } > > + > > KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop, > > KSMBD_TRANS(t)->conn, "ksmbd:r%u", > > smb_direct_port); > > if (IS_ERR(KSMBD_TRANS(t)->handler)) { > > - int ret = PTR_ERR(KSMBD_TRANS(t)->handler); > > + ret = PTR_ERR(KSMBD_TRANS(t)->handler); > > > > pr_err("Can't start thread\n"); > > free_transport(t); > > -- > > 2.25.1 > > > > -- Thanks, Hyunchul