Hello, On Sat, May 19, 2018 at 02:16:06PM +0800, Lai Jiangshan wrote: > pinning the task doesn't stop the kthread_data from > getting freed, especially when worker_thread() > free kthread_data by itself. Ah, yeah, right, I was thinking about kthread struct itself. I applied the following to wq/for-4.18. ------ 8< ------ >From 197f6accacdaf9a0cf4da3c4ac8dd788633c0e38 Mon Sep 17 00:00:00 2001 From: Tejun Heo <tj@xxxxxxxxxx> Date: Mon, 21 May 2018 08:04:35 -0700 Subject: [PATCH] workqueue: Make sure struct worker is accessible for wq_worker_comm() The worker struct could already be freed when wq_worker_comm() tries to access it for reporting. This patch protects PF_WQ_WORKER modifications with wq_pool_attach_mutex and makes wq_worker_comm() test the flag before dereferencing worker from kthread_data(), which ensures that it only dereferences when the worker struct is valid. Signed-off-by: Tejun Heo <tj@xxxxxxxxxx> Reported-by: Lai Jiangshan <jiangshanlai@xxxxxxxxx> Fixes: 6b59808bfe48 ("workqueue: Show the latest workqueue name in /proc/PID/{comm,stat,status}") --- kernel/workqueue.c | 58 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 24 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b4a39a1..60d6fd2 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2213,6 +2213,16 @@ static void process_scheduled_works(struct worker *worker) } } +static void set_pf_worker(bool val) +{ + mutex_lock(&wq_pool_attach_mutex); + if (val) + current->flags |= PF_WQ_WORKER; + else + current->flags &= ~PF_WQ_WORKER; + mutex_unlock(&wq_pool_attach_mutex); +} + /** * worker_thread - the worker thread function * @__worker: self @@ -2231,7 +2241,7 @@ static int worker_thread(void *__worker) struct worker_pool *pool = worker->pool; /* tell the scheduler that this is a workqueue worker */ - worker->task->flags |= PF_WQ_WORKER; + set_pf_worker(true); woke_up: spin_lock_irq(&pool->lock); @@ -2239,7 +2249,7 @@ static int worker_thread(void *__worker) if (unlikely(worker->flags & WORKER_DIE)) { spin_unlock_irq(&pool->lock); WARN_ON_ONCE(!list_empty(&worker->entry)); - worker->task->flags &= ~PF_WQ_WORKER; + set_pf_worker(false); set_task_comm(worker->task, "kworker/dying"); ida_simple_remove(&pool->worker_ida, worker->id); @@ -2342,7 +2352,7 @@ static int rescuer_thread(void *__rescuer) * Mark rescuer as worker too. As WORKER_PREP is never cleared, it * doesn't participate in concurrency management. */ - rescuer->task->flags |= PF_WQ_WORKER; + set_pf_worker(true); repeat: set_current_state(TASK_IDLE); @@ -2434,7 +2444,7 @@ static int rescuer_thread(void *__rescuer) if (should_stop) { __set_current_state(TASK_RUNNING); - rescuer->task->flags &= ~PF_WQ_WORKER; + set_pf_worker(false); return 0; } @@ -4580,8 +4590,6 @@ void show_workqueue_state(void) /* used to show worker information through /proc/PID/{comm,stat,status} */ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) { - struct worker *worker; - struct worker_pool *pool; int off; /* always show the actual comm */ @@ -4589,28 +4597,30 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) if (off < 0) return; - /* stabilize worker pool association */ + /* stabilize PF_WQ_WORKER and worker pool association */ mutex_lock(&wq_pool_attach_mutex); - worker = kthread_data(task); - pool = worker->pool; + if (task->flags & PF_WQ_WORKER) { + struct worker *worker = kthread_data(task); + struct worker_pool *pool = worker->pool; - if (pool) { - spin_lock_irq(&pool->lock); - /* - * ->desc tracks information (wq name or set_worker_desc()) - * for the latest execution. If current, prepend '+', - * otherwise '-'. - */ - if (worker->desc[0] != '\0') { - if (worker->current_work) - scnprintf(buf + off, size - off, "+%s", - worker->desc); - else - scnprintf(buf + off, size - off, "-%s", - worker->desc); + if (pool) { + spin_lock_irq(&pool->lock); + /* + * ->desc tracks information (wq name or + * set_worker_desc()) for the latest execution. If + * current, prepend '+', otherwise '-'. + */ + if (worker->desc[0] != '\0') { + if (worker->current_work) + scnprintf(buf + off, size - off, "+%s", + worker->desc); + else + scnprintf(buf + off, size - off, "-%s", + worker->desc); + } + spin_unlock_irq(&pool->lock); } - spin_unlock_irq(&pool->lock); } mutex_unlock(&wq_pool_attach_mutex); -- 2.9.5 -- To unsubscribe from this list: send the line "unsubscribe linux-api" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html