On Mon, Aug 17, 2020 at 07:53:55PM +0530, Sumit Garg wrote: > On Mon, 17 Aug 2020 at 19:27, Doug Anderson <dianders@xxxxxxxxxxxx> wrote: > > > > Hi, > > > > On Mon, Aug 17, 2020 at 5:27 AM Sumit Garg <sumit.garg@xxxxxxxxxx> wrote: > > > > > > Thanks for your suggestion, irq_work_schedule() looked even better > > > without any overhead, see below: > > > > > > diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h > > > index 3082378..1eade89 100644 > > > --- a/include/linux/irq_work.h > > > +++ b/include/linux/irq_work.h > > > @@ -3,6 +3,7 @@ > > > #define _LINUX_IRQ_WORK_H > > > > > > #include <linux/smp_types.h> > > > +#include <linux/workqueue.h> > > > > > > /* > > > * An entry can be in one of four states: > > > @@ -24,6 +25,11 @@ struct irq_work { > > > void (*func)(struct irq_work *); > > > }; > > > > > > +struct irq_work_schedule { > > > + struct irq_work work; > > > + struct work_struct *sched_work; > > > +}; > > > + > > > static inline > > > void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) > > > { > > > { > > > @@ -39,6 +45,7 @@ void init_irq_work(struct irq_work *work, void > > > (*func)(struct irq_work *)) > > > > > > bool irq_work_queue(struct irq_work *work); > > > bool irq_work_queue_on(struct irq_work *work, int cpu); > > > +bool irq_work_schedule(struct work_struct *sched_work); > > > > > > void irq_work_tick(void); > > > void irq_work_sync(struct irq_work *work); > > > diff --git a/kernel/irq_work.c b/kernel/irq_work.c > > > index eca8396..3880316 100644 > > > --- a/kernel/irq_work.c > > > +++ b/kernel/irq_work.c > > > @@ -24,6 +24,8 @@ > > > static DEFINE_PER_CPU(struct llist_head, raised_list); > > > static DEFINE_PER_CPU(struct llist_head, lazy_list); > > > > > > +static struct irq_work_schedule irq_work_sched; > > > + > > > /* > > > * Claim the entry so that no one else will poke at it. > > > */ > > > @@ -79,6 +81,25 @@ bool irq_work_queue(struct irq_work *work) > > > } > > > EXPORT_SYMBOL_GPL(irq_work_queue); > > > > > > +static void irq_work_schedule_fn(struct irq_work *work) > > > +{ > > > + struct irq_work_schedule *irq_work_sched = > > > + container_of(work, struct irq_work_schedule, work); > > > + > > > + if (irq_work_sched->sched_work) > > > + schedule_work(irq_work_sched->sched_work); > > > +} > > > + > > > +/* Schedule work via irq work queue */ > > > +bool irq_work_schedule(struct work_struct *sched_work) > > > +{ > > > + init_irq_work(&irq_work_sched.work, irq_work_schedule_fn); > > > + irq_work_sched.sched_work = sched_work; > > > + > > > + return irq_work_queue(&irq_work_sched.work); > > > +} > > > +EXPORT_SYMBOL_GPL(irq_work_schedule); > > > > Wait, howzat work? There's a single global variable that you stash > > the "sched_work" into with no locking? What if two people schedule > > work at the same time? > > This API is intended to be invoked from NMI context only, so I think > there will be a single user at a time. How can you possibly know that? This is library code, not a helper in a driver. Daniel. > And we can make that explicit > as well: > > +/* Schedule work via irq work queue */ > +bool irq_work_schedule(struct work_struct *sched_work) > +{ > + if (in_nmi()) { > + init_irq_work(&irq_work_sched.work, irq_work_schedule_fn); > + irq_work_sched.sched_work = sched_work; > + > + return irq_work_queue(&irq_work_sched.work); > + } > + > + return false; > +} > +EXPORT_SYMBOL_GPL(irq_work_schedule); > > -Sumit > > > > > -Doug