On Thu, Jan 6, 2011 at 12:57 AM, Mike Galbraith <efault@xxxxxx> wrote: > sched: Add yield_to(task, preempt) functionality. > > Currently only implemented for fair class tasks. > > Add a yield_to_task method() to the fair scheduling class. allowing the > caller of yield_to() to accelerate another thread in it's thread group, > task group, and sched class toward either it's cpu, or potentially the > caller's own cpu if the 'preempt' argument is also passed. > > Implemented via a scheduler hint, using cfs_rq->next to encourage the > target being selected. > > Signed-off-by: Rik van Riel <riel@xxxxxxxxxx> > Signed-off-by: Marcelo Tosatti <mtosatti@xxxxxxxxxx> > Signed-off-by: Mike Galbraith <efault@xxxxxx> > > --- > Âinclude/linux/sched.h |  Â1 > Âkernel/sched.c    Â|  56 ++++++++++++++++++++++++++++++++++++++++++++++++++ > Âkernel/sched_fair.c  |  52 ++++++++++++++++++++++++++++++++++++++++++++++ > Â3 files changed, 109 insertions(+) > > Index: linux-2.6/include/linux/sched.h > =================================================================== > --- linux-2.6.orig/include/linux/sched.h > +++ linux-2.6/include/linux/sched.h > @@ -1056,6 +1056,7 @@ struct sched_class { >    Âvoid (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); >    Âvoid (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); >    Âvoid (*yield_task) (struct rq *rq); > +    int (*yield_to_task) (struct task_struct *p, int preempt); > >    Âvoid (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); > > Index: linux-2.6/kernel/sched.c > =================================================================== > --- linux-2.6.orig/kernel/sched.c > +++ linux-2.6/kernel/sched.c > @@ -5327,6 +5327,62 @@ void __sched yield(void) > Â} > ÂEXPORT_SYMBOL(yield); > > +/** > + * yield_to - yield the current processor to another thread in > + * your thread group, or accelerate that thread toward the > + * processor it's on. > + * > + * It's the caller's job to ensure that the target task struct > + * can't go away on us before we can do any checks. > + */ > +void __sched yield_to(struct task_struct *p, int preempt) > +{ > +    struct task_struct *curr = current; > +    struct rq *rq, *p_rq; > +    unsigned long flags; > +    int yield = 0; > + > +    local_irq_save(flags); > +    rq = this_rq(); > + > +again: > +    p_rq = task_rq(p); > +    double_rq_lock(rq, p_rq); > +    while (task_rq(p) != p_rq) { > +        double_rq_unlock(rq, p_rq); > +        goto again; > +    } > + > +    if (!curr->sched_class->yield_to_task) > +        goto out; > + > +    if (curr->sched_class != p->sched_class) > +        goto out; > + to be clearer?    if (task_running(p_rq, p) || p->state != TASK_RUNNING) > +        goto out; > + > +    if (!same_thread_group(p, curr)) > +        goto out; -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html