With concurrency managed workqueues, use of dedicated workqueues can be replaced by using system_wq. Drop usb_tx_wq and usb_rx_wq by using system_wq. Since there are multiple work items per udev but different udevs do not need to be ordered, increase of concurrency level by switching to system_wq should not break anything. cancel_work_sync() is used to ensure that work is not pending or executing on any CPU. Lastly, since all devices are suspended, which shutdowns the work items before the driver can be unregistered, it is guaranteed that no work item is pending or executing by the time exit path runs. Signed-off-by: Amitoj Kaur Chawla <amitoj1606@xxxxxxxxx> Acked-by: Tejun Heo <tj@xxxxxxxxxx> --- Only compile tested. drivers/staging/gdm724x/gdm_usb.c | 34 ++++++++-------------------------- 1 file changed, 8 insertions(+), 26 deletions(-) diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c index 92ea1a1..9db9b90 100644 --- a/drivers/staging/gdm724x/gdm_usb.c +++ b/drivers/staging/gdm724x/gdm_usb.c @@ -55,9 +55,6 @@ static const struct usb_device_id id_table[] = { MODULE_DEVICE_TABLE(usb, id_table); -static struct workqueue_struct *usb_tx_wq; -static struct workqueue_struct *usb_rx_wq; - static void do_tx(struct work_struct *work); static void do_rx(struct work_struct *work); @@ -476,7 +473,7 @@ static void gdm_usb_rcv_complete(struct urb *urb) if (!urb->status && r->callback) { spin_lock_irqsave(&rx->to_host_lock, flags); list_add_tail(&r->to_host_list, &rx->to_host_list); - queue_work(usb_rx_wq, &udev->work_rx.work); + schedule_work(&udev->work_rx.work); spin_unlock_irqrestore(&rx->to_host_lock, flags); } else { if (urb->status && udev->usb_state == PM_NORMAL) @@ -568,7 +565,7 @@ static void gdm_usb_send_complete(struct urb *urb) spin_lock_irqsave(&tx->lock, flags); udev->send_complete = 1; - queue_work(usb_tx_wq, &udev->work_tx.work); + schedule_work(&udev->work_tx.work); spin_unlock_irqrestore(&tx->lock, flags); } @@ -759,7 +756,7 @@ static int gdm_usb_sdu_send(void *priv_dev, void *data, int len, spin_lock_irqsave(&tx->lock, flags); list_add_tail(&t_sdu->list, &tx->sdu_list); - queue_work(usb_tx_wq, &udev->work_tx.work); + schedule_work(&udev->work_tx.work); spin_unlock_irqrestore(&tx->lock, flags); if (no_spc) @@ -796,7 +793,7 @@ static int gdm_usb_hci_send(void *priv_dev, void *data, int len, spin_lock_irqsave(&tx->lock, flags); list_add_tail(&t->list, &tx->hci_list); - queue_work(usb_tx_wq, &udev->work_tx.work); + schedule_work(&udev->work_tx.work); spin_unlock_irqrestore(&tx->lock, flags); return 0; @@ -944,6 +941,9 @@ static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg) } spin_unlock_irqrestore(&rx->submit_lock, flags); + cancel_work_sync(&udev->work_tx.work); + cancel_work_sync(&udev->work_rx.work); + return 0; } @@ -981,7 +981,7 @@ static int gdm_usb_resume(struct usb_interface *intf) tx = &udev->tx; spin_lock_irqsave(&tx->lock, flags); - queue_work(usb_tx_wq, &udev->work_tx.work); + schedule_work(&udev->work_tx.work); spin_unlock_irqrestore(&tx->lock, flags); return 0; @@ -1005,14 +1005,6 @@ static int __init gdm_usb_lte_init(void) return -1; } - usb_tx_wq = create_workqueue("usb_tx_wq"); - if (!usb_tx_wq) - return -1; - - usb_rx_wq = create_workqueue("usb_rx_wq"); - if (!usb_rx_wq) - return -1; - return usb_register(&gdm_usb_lte_driver); } @@ -1021,16 +1013,6 @@ static void __exit gdm_usb_lte_exit(void) gdm_lte_event_exit(); usb_deregister(&gdm_usb_lte_driver); - - if (usb_tx_wq) { - flush_workqueue(usb_tx_wq); - destroy_workqueue(usb_tx_wq); - } - - if (usb_rx_wq) { - flush_workqueue(usb_rx_wq); - destroy_workqueue(usb_rx_wq); - } } module_init(gdm_usb_lte_init); -- 1.9.1 _______________________________________________ devel mailing list devel@xxxxxxxxxxxxxxxxxxxxxx http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel