On Thu, 2024-06-20 at 22:05 -0700, Allen Pais wrote: > Migrate tasklet APIs to the new bottom half workqueue mechanism. It > replaces all occurrences of tasklet usage with the appropriate workqueue > APIs throughout the jme driver. This transition ensures compatibility > with the latest design and enhances performance. > > Signed-off-by: Allen Pais <allen.lkml@xxxxxxxxx> > --- > drivers/net/ethernet/jme.c | 72 +++++++++++++++++++------------------- > drivers/net/ethernet/jme.h | 8 ++--- > 2 files changed, 40 insertions(+), 40 deletions(-) > > diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c > index b06e24562973..b1a92b851b3b 100644 > --- a/drivers/net/ethernet/jme.c > +++ b/drivers/net/ethernet/jme.c > @@ -1141,7 +1141,7 @@ jme_dynamic_pcc(struct jme_adapter *jme) > > if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) { > if (dpi->attempt < dpi->cur) > - tasklet_schedule(&jme->rxclean_task); > + queue_work(system_bh_wq, &jme->rxclean_bh_work); > jme_set_rx_pcc(jme, dpi->attempt); > dpi->cur = dpi->attempt; > dpi->cnt = 0; > @@ -1182,9 +1182,9 @@ jme_shutdown_nic(struct jme_adapter *jme) > } > > static void > -jme_pcc_tasklet(struct tasklet_struct *t) > +jme_pcc_bh_work(struct work_struct *work) > { > - struct jme_adapter *jme = from_tasklet(jme, t, pcc_task); > + struct jme_adapter *jme = from_work(jme, work, pcc_bh_work); > struct net_device *netdev = jme->dev; > > if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) { > @@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work) > jme_stop_shutdown_timer(jme); > > jme_stop_pcc_timer(jme); > - tasklet_disable(&jme->txclean_task); > - tasklet_disable(&jme->rxclean_task); > - tasklet_disable(&jme->rxempty_task); > + disable_work_sync(&jme->txclean_bh_work); > + disable_work_sync(&jme->rxclean_bh_work); > + disable_work_sync(&jme->rxempty_bh_work); > > if (netif_carrier_ok(netdev)) { > jme_disable_rx_engine(jme); > @@ -1304,7 +1304,7 @@ static void jme_link_change_work(struct work_struct *work) > rc = jme_setup_rx_resources(jme); > if (rc) { > pr_err("Allocating resources for RX error, Device STOPPED!\n"); > - goto out_enable_tasklet; > + goto out_enable_bh_work; > } > > rc = jme_setup_tx_resources(jme); > @@ -1326,22 +1326,22 @@ static void jme_link_change_work(struct work_struct *work) > jme_start_shutdown_timer(jme); > } > > - goto out_enable_tasklet; > + goto out_enable_bh_work; > > err_out_free_rx_resources: > jme_free_rx_resources(jme); > -out_enable_tasklet: > - tasklet_enable(&jme->txclean_task); > - tasklet_enable(&jme->rxclean_task); > - tasklet_enable(&jme->rxempty_task); > +out_enable_bh_work: > + enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work); > + enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work); > + enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work); This will unconditionally schedule the rxempty_bh_work and is AFAICS a different behavior WRT prior this patch. In turn the rxempty_bh_work() will emit (almost unconditionally) the 'RX Queue Full!' message, so the change should be visibile to the user. I think you should queue the work only if it was queued at cancel time. You likely need additional status to do that. Thanks, Paolo