Add virtio_vsock_vqs_init() and virtio_vsock_vqs_del() with the code that was in virtio_vsock_probe() and virtio_vsock_remove to initialize and delete VQs. These new functions will be used in the next commit to support device suspend/resume Signed-off-by: Stefano Garzarella <sgarzare@xxxxxxxxxx> --- net/vmw_vsock/virtio_transport.c | 150 +++++++++++++++++-------------- 1 file changed, 84 insertions(+), 66 deletions(-) diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index ba1c8cc0c467..31f4f6f40614 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -566,67 +566,28 @@ static void virtio_transport_rx_work(struct work_struct *work) mutex_unlock(&vsock->rx_lock); } -static int virtio_vsock_probe(struct virtio_device *vdev) +static int virtio_vsock_vqs_init(struct virtio_vsock *vsock) { - vq_callback_t *callbacks[] = { - virtio_vsock_rx_done, - virtio_vsock_tx_done, - virtio_vsock_event_done, - }; + struct virtio_device *vdev = vsock->vdev; static const char * const names[] = { "rx", "tx", "event", }; - struct virtio_vsock *vsock = NULL; + vq_callback_t *callbacks[] = { + virtio_vsock_rx_done, + virtio_vsock_tx_done, + virtio_vsock_event_done, + }; int ret; - ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); - if (ret) - return ret; - - /* Only one virtio-vsock device per guest is supported */ - if (rcu_dereference_protected(the_virtio_vsock, - lockdep_is_held(&the_virtio_vsock_mutex))) { - ret = -EBUSY; - goto out; - } - - vsock = kzalloc(sizeof(*vsock), GFP_KERNEL); - if (!vsock) { - ret = -ENOMEM; - goto out; - } - - vsock->vdev = vdev; - - ret = virtio_find_vqs(vsock->vdev, VSOCK_VQ_MAX, - vsock->vqs, callbacks, names, + ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, callbacks, names, NULL); if (ret < 0) - goto out; + return ret; virtio_vsock_update_guest_cid(vsock); - vsock->rx_buf_nr = 0; - vsock->rx_buf_max_nr = 0; - atomic_set(&vsock->queued_replies, 0); - - mutex_init(&vsock->tx_lock); - mutex_init(&vsock->rx_lock); - mutex_init(&vsock->event_lock); - spin_lock_init(&vsock->send_pkt_list_lock); - INIT_LIST_HEAD(&vsock->send_pkt_list); - INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); - INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); - INIT_WORK(&vsock->event_work, virtio_transport_event_work); - INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); - - if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET)) - vsock->seqpacket_allow = true; - - vdev->priv = vsock; - virtio_device_ready(vdev); mutex_lock(&vsock->tx_lock); @@ -643,30 +604,15 @@ static int virtio_vsock_probe(struct virtio_device *vdev) vsock->event_run = true; mutex_unlock(&vsock->event_lock); - rcu_assign_pointer(the_virtio_vsock, vsock); - - mutex_unlock(&the_virtio_vsock_mutex); - return 0; - -out: - kfree(vsock); - mutex_unlock(&the_virtio_vsock_mutex); - return ret; } -static void virtio_vsock_remove(struct virtio_device *vdev) +static void virtio_vsock_vqs_del(struct virtio_vsock *vsock) { - struct virtio_vsock *vsock = vdev->priv; + struct virtio_device *vdev = vsock->vdev; struct virtio_vsock_pkt *pkt; - mutex_lock(&the_virtio_vsock_mutex); - - vdev->priv = NULL; - rcu_assign_pointer(the_virtio_vsock, NULL); - synchronize_rcu(); - - /* Reset all connected sockets when the device disappear */ + /* Reset all connected sockets when the VQs disappear */ vsock_for_each_connected_socket(&virtio_transport.transport, virtio_vsock_reset_sock); @@ -711,6 +657,78 @@ static void virtio_vsock_remove(struct virtio_device *vdev) /* Delete virtqueues and flush outstanding callbacks if any */ vdev->config->del_vqs(vdev); +} + +static int virtio_vsock_probe(struct virtio_device *vdev) +{ + struct virtio_vsock *vsock = NULL; + int ret; + + ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); + if (ret) + return ret; + + /* Only one virtio-vsock device per guest is supported */ + if (rcu_dereference_protected(the_virtio_vsock, + lockdep_is_held(&the_virtio_vsock_mutex))) { + ret = -EBUSY; + goto out; + } + + vsock = kzalloc(sizeof(*vsock), GFP_KERNEL); + if (!vsock) { + ret = -ENOMEM; + goto out; + } + + vsock->vdev = vdev; + + vsock->rx_buf_nr = 0; + vsock->rx_buf_max_nr = 0; + atomic_set(&vsock->queued_replies, 0); + + mutex_init(&vsock->tx_lock); + mutex_init(&vsock->rx_lock); + mutex_init(&vsock->event_lock); + spin_lock_init(&vsock->send_pkt_list_lock); + INIT_LIST_HEAD(&vsock->send_pkt_list); + INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); + INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); + INIT_WORK(&vsock->event_work, virtio_transport_event_work); + INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); + + if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET)) + vsock->seqpacket_allow = true; + + vdev->priv = vsock; + + ret = virtio_vsock_vqs_init(vsock); + if (ret < 0) + goto out; + + rcu_assign_pointer(the_virtio_vsock, vsock); + + mutex_unlock(&the_virtio_vsock_mutex); + + return 0; + +out: + kfree(vsock); + mutex_unlock(&the_virtio_vsock_mutex); + return ret; +} + +static void virtio_vsock_remove(struct virtio_device *vdev) +{ + struct virtio_vsock *vsock = vdev->priv; + + mutex_lock(&the_virtio_vsock_mutex); + + vdev->priv = NULL; + rcu_assign_pointer(the_virtio_vsock, NULL); + synchronize_rcu(); + + virtio_vsock_vqs_del(vsock); /* Other works can be queued before 'config->del_vqs()', so we flush * all works before to free the vsock object to avoid use after free. -- 2.35.1