Move task_rq_lock/unlock() to sched.h so they can be used elsewhere. The livepatch code needs to lock each task's rq in order to safely examine its stack and switch it to a new patch universe. Signed-off-by: Josh Poimboeuf <jpoimboe@xxxxxxxxxx> --- kernel/sched/core.c | 32 -------------------------------- kernel/sched/sched.h | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 32 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b5797b7..78d91e6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -326,44 +326,12 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) } } -/* - * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. - */ -static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) - __acquires(p->pi_lock) - __acquires(rq->lock) -{ - struct rq *rq; - - for (;;) { - raw_spin_lock_irqsave(&p->pi_lock, *flags); - rq = task_rq(p); - raw_spin_lock(&rq->lock); - if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) - return rq; - raw_spin_unlock(&rq->lock); - raw_spin_unlock_irqrestore(&p->pi_lock, *flags); - - while (unlikely(task_on_rq_migrating(p))) - cpu_relax(); - } -} - static void __task_rq_unlock(struct rq *rq) __releases(rq->lock) { raw_spin_unlock(&rq->lock); } -static inline void -task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) - __releases(rq->lock) - __releases(p->pi_lock) -{ - raw_spin_unlock(&rq->lock); - raw_spin_unlock_irqrestore(&p->pi_lock, *flags); -} - /* * this_rq_lock - lock this runqueue and disable interrupts. */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 9a2a45c..ae514c9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1542,6 +1542,39 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) #endif +/* + * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. + */ +static inline struct rq *task_rq_lock(struct task_struct *p, + unsigned long *flags) + __acquires(p->pi_lock) + __acquires(rq->lock) +{ + struct rq *rq; + + for (;;) { + raw_spin_lock_irqsave(&p->pi_lock, *flags); + rq = task_rq(p); + raw_spin_lock(&rq->lock); + if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) + return rq; + raw_spin_unlock(&rq->lock); + raw_spin_unlock_irqrestore(&p->pi_lock, *flags); + + while (unlikely(task_on_rq_migrating(p))) + cpu_relax(); + } +} + +static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, + unsigned long *flags) + __releases(rq->lock) + __releases(p->pi_lock) +{ + raw_spin_unlock(&rq->lock); + raw_spin_unlock_irqrestore(&p->pi_lock, *flags); +} + extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); extern void print_cfs_stats(struct seq_file *m, int cpu); -- 2.1.0 -- To unsubscribe from this list: send the line "unsubscribe live-patching" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html