From: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx> v4.14.292-rt138-rc2 stable review patch. If anyone has any objections, please let me know. ----------- This reverts the PREEMPT_RT related changes to workqueue. It reverts the extra locking needed to protect the worker which will soon become obsolete. The sched/core.c changes, which were introduced in the original commit, must remain as the following code will rely on it. This is a preparation to pull in the PREEMPT_RT related changes which were merged upstream. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx> Message-Id: <20220819092446.980320-5-bigeasy@xxxxxxxxxxxxx> Signed-off-by: Luis Claudio R. Goncalves <lgoncalv@xxxxxxxxxx> --- kernel/workqueue.c | 60 ++++++++++------------------------------------ 1 file changed, 13 insertions(+), 47 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 856a2089f8632..cfca5027c3a21 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -124,11 +124,6 @@ enum { * cpu or grabbing pool->lock is enough for read access. If * POOL_DISASSOCIATED is set, it's identical to L. * - * On RT we need the extra protection via rt_lock_idle_list() for - * the list manipulations against read access from - * wq_worker_sleeping(). All other places are nicely serialized via - * pool->lock. - * * A: pool->attach_mutex protected. * * PL: wq_pool_mutex protected. @@ -434,31 +429,6 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ else -#ifdef CONFIG_PREEMPT_RT_BASE -static inline void rt_lock_idle_list(struct worker_pool *pool) -{ - preempt_disable(); -} -static inline void rt_unlock_idle_list(struct worker_pool *pool) -{ - preempt_enable(); -} -static inline void sched_lock_idle_list(struct worker_pool *pool) { } -static inline void sched_unlock_idle_list(struct worker_pool *pool) { } -#else -static inline void rt_lock_idle_list(struct worker_pool *pool) { } -static inline void rt_unlock_idle_list(struct worker_pool *pool) { } -static inline void sched_lock_idle_list(struct worker_pool *pool) -{ - spin_lock_irq(&pool->lock); -} -static inline void sched_unlock_idle_list(struct worker_pool *pool) -{ - spin_unlock_irq(&pool->lock); -} -#endif - - #ifdef CONFIG_DEBUG_OBJECTS_WORK static struct debug_obj_descr work_debug_descr; @@ -865,16 +835,10 @@ static struct worker *first_idle_worker(struct worker_pool *pool) */ static void wake_up_worker(struct worker_pool *pool) { - struct worker *worker; - - rt_lock_idle_list(pool); - - worker = first_idle_worker(pool); + struct worker *worker = first_idle_worker(pool); if (likely(worker)) wake_up_process(worker->task); - - rt_unlock_idle_list(pool); } /** @@ -903,7 +867,7 @@ void wq_worker_running(struct task_struct *task) */ void wq_worker_sleeping(struct task_struct *task) { - struct worker *worker = kthread_data(task); + struct worker *next, *worker = kthread_data(task); struct worker_pool *pool; /* @@ -920,18 +884,26 @@ void wq_worker_sleeping(struct task_struct *task) return; worker->sleeping = 1; + spin_lock_irq(&pool->lock); /* * The counterpart of the following dec_and_test, implied mb, * worklist not empty test sequence is in insert_work(). * Please read comment there. + * + * NOT_RUNNING is clear. This means that we're bound to and + * running on the local cpu w/ rq lock held and preemption + * disabled, which in turn means that none else could be + * manipulating idle_list, so dereferencing idle_list without pool + * lock is safe. */ if (atomic_dec_and_test(&pool->nr_running) && !list_empty(&pool->worklist)) { - sched_lock_idle_list(pool); - wake_up_worker(pool); - sched_unlock_idle_list(pool); + next = first_idle_worker(pool); + if (next) + wake_up_process(next->task); } + spin_unlock_irq(&pool->lock); } /** @@ -1662,9 +1634,7 @@ static void worker_enter_idle(struct worker *worker) worker->last_active = jiffies; /* idle_list is LIFO */ - rt_lock_idle_list(pool); list_add(&worker->entry, &pool->idle_list); - rt_unlock_idle_list(pool); if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); @@ -1697,9 +1667,7 @@ static void worker_leave_idle(struct worker *worker) return; worker_clr_flags(worker, WORKER_IDLE); pool->nr_idle--; - rt_lock_idle_list(pool); list_del_init(&worker->entry); - rt_unlock_idle_list(pool); } static struct worker *alloc_worker(int node) @@ -1865,9 +1833,7 @@ static void destroy_worker(struct worker *worker) pool->nr_workers--; pool->nr_idle--; - rt_lock_idle_list(pool); list_del_init(&worker->entry); - rt_unlock_idle_list(pool); worker->flags |= WORKER_DIE; wake_up_process(worker->task); } -- 2.37.3