> +static inline bool task_is_traced(struct task_struct *task) > +{ > + bool traced = false; > + > + if (task->state & __TASK_TRACED) > + return true; > +#ifdef CONFIG_PREEMPT_RT_FULL > + /* in case the task is sleeping on tasklist_lock */ > + raw_spin_lock_irq(&task->pi_lock); > + if (task->state & __TASK_TRACED) > + traced = true; > + else if (task->saved_state & __TASK_TRACED) > + traced = true; > + raw_spin_unlock_irq(&task->pi_lock); > +#endif > + return traced; > +} Since this is a low-level function, maybe its better to use raw_spin_lock_irqsave()? In case someone in the future will call task_is_traced() with disabled interrupts. Otherwise looks good. Still this is only half of the solution because the patch doesn't solve the race in wait_task_inactive() (and all other places which test both state and saved_state without holding pi_lock). -- To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html