The patch titled From: Ingo Molnar <mingo@xxxxxxx> has been added to the -mm tree. Its filename is sched-cleanup-remove-task_t-convert-to-struct-task_struct.patch See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find out what to do about this ------------------------------------------------------ Subject: From: Ingo Molnar <mingo@xxxxxxx> From: Ingo Molnar <mingo@xxxxxxx> Subject: sched: cleanup, remove task_t, convert to struct task_struct cleanup: remove task_t and convert all the uses to struct task_struct. I introduced it for the scheduler anno and it was a mistake. Conversion was mostly scripted, the result was reviewed and all secondary whitespace and style impact (if any) was fixed up by hand. Depends on the "sched: clean up fallout of recent changes" patch. Goes to the tail of the scheduler queue. Signed-off-by: Ingo Molnar <mingo@xxxxxxx> --- arch/alpha/kernel/process.c | 2 arch/ia64/kernel/smpboot.c | 2 arch/mips/kernel/entry.S | 2 arch/um/kernel/tt/process_kern.c | 2 drivers/char/tty_io.c | 2 fs/eventpoll.c | 4 include/asm-ia64/thread_info.h | 2 include/asm-m32r/system.h | 2 include/asm-sh/system.h | 2 kernel/hrtimer.c | 2 kernel/timer.c | 2 kernel/workqueue.c | 2 Index: linux/arch/alpha/kernel/process.c =================================================================== Signed-off-by: Andrew Morton <akpm@xxxxxxxx> --- arch/alpha/kernel/process.c | 2 arch/ia64/kernel/mca.c | 10 - arch/ia64/kernel/smpboot.c | 2 arch/mips/kernel/entry.S | 2 arch/mips/kernel/mips-mt.c | 6 arch/um/kernel/tt/process_kern.c | 2 drivers/char/tty_io.c | 2 fs/eventpoll.c | 4 include/asm-ia64/thread_info.h | 2 include/asm-m32r/system.h | 2 include/asm-sh/system.h | 2 include/linux/sched.h | 45 +++--- kernel/capability.c | 8 - kernel/exit.c | 24 ++- kernel/fork.c | 18 +- kernel/hrtimer.c | 2 kernel/pid.c | 6 kernel/ptrace.c | 6 kernel/sched.c | 188 +++++++++++++++-------------- kernel/timer.c | 2 kernel/workqueue.c | 2 mm/oom_kill.c | 8 - 22 files changed, 178 insertions(+), 167 deletions(-) diff -puN arch/alpha/kernel/process.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct arch/alpha/kernel/process.c --- a/arch/alpha/kernel/process.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/arch/alpha/kernel/process.c @@ -475,7 +475,7 @@ out: */ unsigned long -thread_saved_pc(task_t *t) +thread_saved_pc(struct task_struct *t) { unsigned long base = (unsigned long)task_stack_page(t); unsigned long fp, sp = task_thread_info(t)->pcb.ksp; diff -puN arch/ia64/kernel/mca.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct arch/ia64/kernel/mca.c --- a/arch/ia64/kernel/mca.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/arch/ia64/kernel/mca.c @@ -679,7 +679,7 @@ copy_reg(const u64 *fr, u64 fnat, u64 *t */ static void -ia64_mca_modify_comm(const task_t *previous_current) +ia64_mca_modify_comm(const struct task_struct *previous_current) { char *p, comm[sizeof(current->comm)]; if (previous_current->pid) @@ -710,7 +710,7 @@ ia64_mca_modify_comm(const task_t *previ * that we can do backtrace on the MCA/INIT handler code itself. */ -static task_t * +static struct task_struct * ia64_mca_modify_original_stack(struct pt_regs *regs, const struct switch_stack *sw, struct ia64_sal_os_state *sos, @@ -720,7 +720,7 @@ ia64_mca_modify_original_stack(struct pt ia64_va va; extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ const pal_min_state_area_t *ms = sos->pal_min_state; - task_t *previous_current; + struct task_struct *previous_current; struct pt_regs *old_regs; struct switch_stack *old_sw; unsigned size = sizeof(struct pt_regs) + @@ -1024,7 +1024,7 @@ ia64_mca_handler(struct pt_regs *regs, s pal_processor_state_info_t *psp = (pal_processor_state_info_t *) &sos->proc_state_param; int recover, cpu = smp_processor_id(); - task_t *previous_current; + struct task_struct *previous_current; struct ia64_mca_notify_die nd = { .sos = sos, .monarch_cpu = &monarch_cpu }; @@ -1353,7 +1353,7 @@ ia64_init_handler(struct pt_regs *regs, { static atomic_t slaves; static atomic_t monarchs; - task_t *previous_current; + struct task_struct *previous_current; int cpu = smp_processor_id(); struct ia64_mca_notify_die nd = { .sos = sos, .monarch_cpu = &monarch_cpu }; diff -puN arch/ia64/kernel/smpboot.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct arch/ia64/kernel/smpboot.c --- a/arch/ia64/kernel/smpboot.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/arch/ia64/kernel/smpboot.c @@ -125,7 +125,7 @@ extern void __devinit calibrate_delay (v extern void start_ap (void); extern unsigned long ia64_iobase; -task_t *task_for_booting_cpu; +struct task_struct *task_for_booting_cpu; /* * State for each CPU diff -puN arch/mips/kernel/entry.S~sched-cleanup-remove-task_t-convert-to-struct-task_struct arch/mips/kernel/entry.S --- a/arch/mips/kernel/entry.S~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/arch/mips/kernel/entry.S @@ -66,7 +66,7 @@ need_resched: #endif FEXPORT(ret_from_fork) - jal schedule_tail # a0 = task_t *prev + jal schedule_tail # a0 = struct task_struct *prev FEXPORT(syscall_exit) local_irq_disable # make sure need_resched and diff -puN arch/mips/kernel/mips-mt.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct arch/mips/kernel/mips-mt.c --- a/arch/mips/kernel/mips-mt.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/arch/mips/kernel/mips-mt.c @@ -47,7 +47,7 @@ unsigned long mt_fpemul_threshold = 0; * used in sys_sched_set/getaffinity() in kernel/sched.c, so * cloned here. */ -static inline task_t *find_process_by_pid(pid_t pid) +static inline struct task_struct *find_process_by_pid(pid_t pid) { return pid ? find_task_by_pid(pid) : current; } @@ -62,7 +62,7 @@ asmlinkage long mipsmt_sys_sched_setaffi cpumask_t new_mask; cpumask_t effective_mask; int retval; - task_t *p; + struct task_struct *p; if (len < sizeof(new_mask)) return -EINVAL; @@ -127,7 +127,7 @@ asmlinkage long mipsmt_sys_sched_getaffi unsigned int real_len; cpumask_t mask; int retval; - task_t *p; + struct task_struct *p; real_len = sizeof(mask); if (len < real_len) diff -puN arch/um/kernel/tt/process_kern.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct arch/um/kernel/tt/process_kern.c --- a/arch/um/kernel/tt/process_kern.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/arch/um/kernel/tt/process_kern.c @@ -119,7 +119,7 @@ void suspend_new_thread(int fd) panic("read failed in suspend_new_thread, err = %d", -err); } -void schedule_tail(task_t *prev); +void schedule_tail(struct task_struct *prev); static void new_thread_handler(int sig) { diff -puN drivers/char/tty_io.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct drivers/char/tty_io.c --- a/drivers/char/tty_io.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/drivers/char/tty_io.c @@ -2344,7 +2344,7 @@ static int fionbio(struct file *file, in static int tiocsctty(struct tty_struct *tty, int arg) { - task_t *p; + struct task_struct *p; if (current->signal->leader && (current->signal->session == tty->session)) diff -puN fs/eventpoll.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct fs/eventpoll.c --- a/fs/eventpoll.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/fs/eventpoll.c @@ -120,7 +120,7 @@ struct epoll_filefd { */ struct wake_task_node { struct list_head llink; - task_t *task; + struct task_struct *task; wait_queue_head_t *wq; }; @@ -413,7 +413,7 @@ static void ep_poll_safewake(struct poll { int wake_nests = 0; unsigned long flags; - task_t *this_task = current; + struct task_struct *this_task = current; struct list_head *lsthead = &psw->wake_task_list, *lnk; struct wake_task_node *tncur; struct wake_task_node tnode; diff -puN include/asm-ia64/thread_info.h~sched-cleanup-remove-task_t-convert-to-struct-task_struct include/asm-ia64/thread_info.h --- a/include/asm-ia64/thread_info.h~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/include/asm-ia64/thread_info.h @@ -68,7 +68,7 @@ struct thread_info { #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR -#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) +#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) #endif /* !__ASSEMBLY */ diff -puN include/asm-m32r/system.h~sched-cleanup-remove-task_t-convert-to-struct-task_struct include/asm-m32r/system.h --- a/include/asm-m32r/system.h~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/include/asm-m32r/system.h @@ -18,7 +18,7 @@ * switch_to(prev, next) should switch from task `prev' to `next' * `prev' will never be the same as `next'. * - * `next' and `prev' should be task_t, but it isn't always defined + * `next' and `prev' should be struct task_struct, but it isn't always defined */ #define switch_to(prev, next, last) do { \ diff -puN include/asm-sh/system.h~sched-cleanup-remove-task_t-convert-to-struct-task_struct include/asm-sh/system.h --- a/include/asm-sh/system.h~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/include/asm-sh/system.h @@ -12,7 +12,7 @@ */ #define switch_to(prev, next, last) do { \ - task_t *__last; \ + struct task_struct *__last; \ register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ diff -puN include/linux/sched.h~sched-cleanup-remove-task_t-convert-to-struct-task_struct include/linux/sched.h --- a/include/linux/sched.h~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/include/linux/sched.h @@ -182,11 +182,9 @@ extern unsigned long weighted_cpuload(co extern rwlock_t tasklist_lock; extern spinlock_t mmlist_lock; -typedef struct task_struct task_t; - extern void sched_init(void); extern void sched_init_smp(void); -extern void init_idle(task_t *idle, int cpu); +extern void init_idle(struct task_struct *idle, int cpu); extern cpumask_t nohz_cpu_mask; @@ -381,7 +379,7 @@ struct signal_struct { wait_queue_head_t wait_chldexit; /* for wait4() */ /* current thread group signal load-balancing target: */ - task_t *curr_target; + struct task_struct *curr_target; /* shared signal handling: */ struct sigpending shared_pending; @@ -1039,9 +1037,9 @@ static inline void put_task_struct(struc #define used_math() tsk_used_math(current) #ifdef CONFIG_SMP -extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); +extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); #else -static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) +static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { if (!cpu_isset(0, new_mask)) return -EINVAL; @@ -1050,7 +1048,8 @@ static inline int set_cpus_allowed(task_ #endif extern unsigned long long sched_clock(void); -extern unsigned long long current_sched_time(const task_t *current_task); +extern unsigned long long +current_sched_time(const struct task_struct *current_task); /* sched_exec is called by processes performing an exec */ #ifdef CONFIG_SMP @@ -1066,16 +1065,16 @@ static inline void idle_task_exit(void) #endif extern void sched_idle_next(void); -extern void set_user_nice(task_t *p, long nice); -extern int task_prio(const task_t *p); -extern int task_nice(const task_t *p); -extern int can_nice(const task_t *p, const int nice); -extern int task_curr(const task_t *p); +extern void set_user_nice(struct task_struct *p, long nice); +extern int task_prio(const struct task_struct *p); +extern int task_nice(const struct task_struct *p); +extern int can_nice(const struct task_struct *p, const int nice); +extern int task_curr(const struct task_struct *p); extern int idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); -extern task_t *idle_task(int cpu); -extern task_t *curr_task(int cpu); -extern void set_curr_task(int cpu, task_t *p); +extern struct task_struct *idle_task(int cpu); +extern struct task_struct *curr_task(int cpu); +extern void set_curr_task(int cpu, struct task_struct *p); void yield(void); @@ -1132,8 +1131,8 @@ extern void FASTCALL(wake_up_new_task(st #else static inline void kick_process(struct task_struct *tsk) { } #endif -extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); -extern void FASTCALL(sched_exit(task_t * p)); +extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags)); +extern void FASTCALL(sched_exit(struct task_struct * p)); extern int in_group_p(gid_t); extern int in_egroup_p(gid_t); @@ -1238,17 +1237,17 @@ extern NORET_TYPE void do_group_exit(int extern void daemonize(const char *, ...); extern int allow_signal(int); extern int disallow_signal(int); -extern task_t *child_reaper; +extern struct task_struct *child_reaper; extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); -task_t *fork_idle(int); +struct task_struct *fork_idle(int); extern void set_task_comm(struct task_struct *tsk, char *from); extern void get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP -extern void wait_task_inactive(task_t * p); +extern void wait_task_inactive(struct task_struct * p); #else #define wait_task_inactive(p) do { } while (0) #endif @@ -1274,13 +1273,13 @@ extern void wait_task_inactive(task_t * /* de_thread depends on thread_group_leader not being a pid based check */ #define thread_group_leader(p) (p == p->group_leader) -static inline task_t *next_thread(const task_t *p) +static inline struct task_struct *next_thread(const struct task_struct *p) { return list_entry(rcu_dereference(p->thread_group.next), - task_t, thread_group); + struct task_struct, thread_group); } -static inline int thread_group_empty(task_t *p) +static inline int thread_group_empty(struct task_struct *p) { return list_empty(&p->thread_group); } diff -puN kernel/capability.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct kernel/capability.c --- a/kernel/capability.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/kernel/capability.c @@ -46,7 +46,7 @@ asmlinkage long sys_capget(cap_user_head int ret = 0; pid_t pid; __u32 version; - task_t *target; + struct task_struct *target; struct __user_cap_data_struct data; if (get_user(version, &header->version)) @@ -96,7 +96,7 @@ static inline int cap_set_pg(int pgrp, k kernel_cap_t *inheritable, kernel_cap_t *permitted) { - task_t *g, *target; + struct task_struct *g, *target; int ret = -EPERM; int found = 0; @@ -128,7 +128,7 @@ static inline int cap_set_all(kernel_cap kernel_cap_t *inheritable, kernel_cap_t *permitted) { - task_t *g, *target; + struct task_struct *g, *target; int ret = -EPERM; int found = 0; @@ -172,7 +172,7 @@ asmlinkage long sys_capset(cap_user_head { kernel_cap_t inheritable, permitted, effective; __u32 version; - task_t *target; + struct task_struct *target; int ret; pid_t pid; diff -puN kernel/exit.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct kernel/exit.c --- a/kernel/exit.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/kernel/exit.c @@ -138,12 +138,12 @@ static void delayed_put_task_struct(stru void release_task(struct task_struct * p) { int zap_leader; - task_t *leader; + struct task_struct *leader; repeat: atomic_dec(&p->user->processes); write_lock_irq(&tasklist_lock); ptrace_unlink(p); - BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); + BUG_ON(!list_empty(&p->ptrace_list)||!list_empty(&p->ptrace_children)); __exit_signal(p); /* @@ -212,7 +212,7 @@ out: * * "I ask you, have you ever known what it is to be an orphan?" */ -static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task) +static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task) { struct task_struct *p; int ret = 1; @@ -585,7 +585,8 @@ static void exit_mm(struct task_struct * mmput(mm); } -static inline void choose_new_parent(task_t *p, task_t *reaper) +static inline void +choose_new_parent(struct task_struct *p, struct task_struct *reaper) { /* * Make sure we're not reparenting to ourselves and that @@ -595,7 +596,8 @@ static inline void choose_new_parent(tas p->real_parent = reaper; } -static void reparent_thread(task_t *p, task_t *father, int traced) +static void +reparent_thread(struct task_struct *p, struct task_struct *father, int traced) { /* We don't want people slaying init. */ if (p->exit_signal != -1) @@ -1012,7 +1014,7 @@ asmlinkage void sys_exit_group(int error do_group_exit((error_code & 0xff) << 8); } -static int eligible_child(pid_t pid, int options, task_t *p) +static int eligible_child(pid_t pid, int options, struct task_struct *p) { if (pid > 0) { if (p->pid != pid) @@ -1053,7 +1055,7 @@ static int eligible_child(pid_t pid, int return 1; } -static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, +static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, int why, int status, struct siginfo __user *infop, struct rusage __user *rusagep) @@ -1083,7 +1085,7 @@ static int wait_noreap_copyout(task_t *p * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_zombie(task_t *p, int noreap, +static int wait_task_zombie(struct task_struct *p, int noreap, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { @@ -1245,8 +1247,8 @@ static int wait_task_zombie(task_t *p, i * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap, - struct siginfo __user *infop, +static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, + int noreap, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { int retval, exit_code; @@ -1360,7 +1362,7 @@ bail_ref: * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_continued(task_t *p, int noreap, +static int wait_task_continued(struct task_struct *p, int noreap, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { diff -puN kernel/fork.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct kernel/fork.c --- a/kernel/fork.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/kernel/fork.c @@ -922,13 +922,13 @@ asmlinkage long sys_set_tid_address(int * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */ -static task_t *copy_process(unsigned long clone_flags, - unsigned long stack_start, - struct pt_regs *regs, - unsigned long stack_size, - int __user *parent_tidptr, - int __user *child_tidptr, - int pid) +static struct task_struct *copy_process(unsigned long clone_flags, + unsigned long stack_start, + struct pt_regs *regs, + unsigned long stack_size, + int __user *parent_tidptr, + int __user *child_tidptr, + int pid) { int retval; struct task_struct *p = NULL; @@ -1255,9 +1255,9 @@ struct pt_regs * __devinit __attribute__ return regs; } -task_t * __devinit fork_idle(int cpu) +struct task_struct * __devinit fork_idle(int cpu) { - task_t *task; + struct task_struct *task; struct pt_regs regs; task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0); diff -puN kernel/hrtimer.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct kernel/hrtimer.c --- a/kernel/hrtimer.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/kernel/hrtimer.c @@ -669,7 +669,7 @@ static int hrtimer_wakeup(struct hrtimer return HRTIMER_NORESTART; } -void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, task_t *task) +void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) { sl->timer.function = hrtimer_wakeup; sl->task = task; diff -puN kernel/pid.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct kernel/pid.c --- a/kernel/pid.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/kernel/pid.c @@ -218,7 +218,7 @@ struct pid * fastcall find_pid(int nr) return NULL; } -int fastcall attach_pid(task_t *task, enum pid_type type, int nr) +int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr) { struct pid_link *link; struct pid *pid; @@ -233,7 +233,7 @@ int fastcall attach_pid(task_t *task, en return 0; } -void fastcall detach_pid(task_t *task, enum pid_type type) +void fastcall detach_pid(struct task_struct *task, enum pid_type type) { struct pid_link *link; struct pid *pid; @@ -267,7 +267,7 @@ struct task_struct * fastcall pid_task(s /* * Must be called under rcu_read_lock() or with tasklist_lock read-held. */ -task_t *find_task_by_pid_type(int type, int nr) +struct task_struct *find_task_by_pid_type(int type, int nr) { return pid_task(find_pid(nr), type); } diff -puN kernel/ptrace.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct kernel/ptrace.c --- a/kernel/ptrace.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/kernel/ptrace.c @@ -28,7 +28,7 @@ * * Must be called with the tasklist lock write-held. */ -void __ptrace_link(task_t *child, task_t *new_parent) +void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) { BUG_ON(!list_empty(&child->ptrace_list)); if (child->parent == new_parent) @@ -46,7 +46,7 @@ void __ptrace_link(task_t *child, task_t * TASK_TRACED, resume it now. * Requires that irqs be disabled. */ -void ptrace_untrace(task_t *child) +void ptrace_untrace(struct task_struct *child) { spin_lock(&child->sighand->siglock); if (child->state == TASK_TRACED) { @@ -65,7 +65,7 @@ void ptrace_untrace(task_t *child) * * Must be called with the tasklist lock write-held. */ -void __ptrace_unlink(task_t *child) +void __ptrace_unlink(struct task_struct *child) { BUG_ON(!child->ptrace); diff -puN kernel/sched.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct kernel/sched.c --- a/kernel/sched.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/kernel/sched.c @@ -179,7 +179,7 @@ static unsigned int static_prio_timeslic return SCALE_PRIO(DEF_TIMESLICE, static_prio); } -static inline unsigned int task_timeslice(task_t *p) +static inline unsigned int task_timeslice(struct task_struct *p) { return static_prio_timeslice(p->static_prio); } @@ -236,7 +236,7 @@ struct runqueue { unsigned long expired_timestamp; unsigned long long timestamp_last_tick; - task_t *curr, *idle; + struct task_struct *curr, *idle; struct mm_struct *prev_mm; prio_array_t *active, *expired, arrays[2]; int best_expired_prio; @@ -249,7 +249,7 @@ struct runqueue { int active_balance; int push_cpu; - task_t *migration_thread; + struct task_struct *migration_thread; struct list_head migration_queue; #endif @@ -299,16 +299,16 @@ static DEFINE_PER_CPU(struct runqueue, r #endif #ifndef __ARCH_WANT_UNLOCKED_CTXSW -static inline int task_running(runqueue_t *rq, task_t *p) +static inline int task_running(runqueue_t *rq, struct task_struct *p) { return rq->curr == p; } -static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) +static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) { } -static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) +static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) { #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ @@ -318,7 +318,7 @@ static inline void finish_lock_switch(ru } #else /* __ARCH_WANT_UNLOCKED_CTXSW */ -static inline int task_running(runqueue_t *rq, task_t *p) +static inline int task_running(runqueue_t *rq, struct task_struct *p) { #ifdef CONFIG_SMP return p->oncpu; @@ -327,7 +327,7 @@ static inline int task_running(runqueue_ #endif } -static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) +static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) { #ifdef CONFIG_SMP /* @@ -344,7 +344,7 @@ static inline void prepare_lock_switch(r #endif } -static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) +static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) { #ifdef CONFIG_SMP /* @@ -366,7 +366,7 @@ static inline void finish_lock_switch(ru * interrupts. Note the ordering: we can safely lookup the task_rq without * explicitly disabling preemption. */ -static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) +static runqueue_t *task_rq_lock(struct task_struct *p, unsigned long *flags) __acquires(rq->lock) { struct runqueue *rq; @@ -546,7 +546,7 @@ static inline runqueue_t *this_rq_lock(v * long it was from the *first* time it was queued to the time that it * finally hit a cpu. */ -static inline void sched_info_dequeued(task_t *t) +static inline void sched_info_dequeued(struct task_struct *t) { t->sched_info.last_queued = 0; } @@ -556,7 +556,7 @@ static inline void sched_info_dequeued(t * long it was waiting to run. We also note when it began so that we * can keep stats on how long its timeslice is. */ -static void sched_info_arrive(task_t *t) +static void sched_info_arrive(struct task_struct *t) { unsigned long now = jiffies, delta_jiffies = 0; @@ -585,7 +585,7 @@ static void sched_info_arrive(task_t *t) * the timestamp if it is already not set. It's assumed that * sched_info_dequeued() will clear that stamp when appropriate. */ -static inline void sched_info_queued(task_t *t) +static inline void sched_info_queued(struct task_struct *t) { if (unlikely(sched_info_on())) { if (!t->sched_info.last_queued) @@ -597,7 +597,7 @@ static inline void sched_info_queued(tas * Called when a process ceases being the active-running process, either * voluntarily or involuntarily. Now we can calculate how long we ran. */ -static inline void sched_info_depart(task_t *t) +static inline void sched_info_depart(struct task_struct *t) { unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival; @@ -610,7 +610,8 @@ static inline void sched_info_depart(tas * their time slice. (This may also be called when switching to or from * the idle task.) We are only called when prev != next. */ -static inline void __sched_info_switch(task_t *prev, task_t *next) +static inline void +__sched_info_switch(struct task_struct *prev, struct task_struct *next) { struct runqueue *rq = task_rq(prev); @@ -625,14 +626,17 @@ static inline void __sched_info_switch(t if (next != rq->idle) sched_info_arrive(next); } -static inline void sched_info_switch(task_t *prev, task_t *next) + +static inline void +sched_info_switch(struct task_struct *prev, struct task_struct *next) { if (unlikely(sched_info_on())) __sched_info_switch(prev, next); } + #else -#define sched_info_queued(t) do { } while (0) -#define sched_info_switch(t, next) do { } while (0) +# define sched_info_queued(t) do { } while (0) +# define sched_info_switch(t, next) do { } while (0) #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ /* @@ -686,7 +690,7 @@ static inline void enqueue_task_head(str * * Both properties are important to certain workloads. */ -static int effective_prio(task_t *p) +static int effective_prio(struct task_struct *p) { int bonus, prio; @@ -725,7 +729,7 @@ static int effective_prio(task_t *p) #define RTPRIO_TO_LOAD_WEIGHT(rp) \ (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) -static void set_load_weight(task_t *p) +static void set_load_weight(struct task_struct *p) { if (rt_task(p)) { #ifdef CONFIG_SMP @@ -743,23 +747,25 @@ static void set_load_weight(task_t *p) p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio); } -static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p) +static inline void +inc_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) { rq->raw_weighted_load += p->load_weight; } -static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p) +static inline void +dec_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) { rq->raw_weighted_load -= p->load_weight; } -static inline void inc_nr_running(task_t *p, runqueue_t *rq) +static inline void inc_nr_running(struct task_struct *p, runqueue_t *rq) { rq->nr_running++; inc_raw_weighted_load(rq, p); } -static inline void dec_nr_running(task_t *p, runqueue_t *rq) +static inline void dec_nr_running(struct task_struct *p, runqueue_t *rq) { rq->nr_running--; dec_raw_weighted_load(rq, p); @@ -768,7 +774,7 @@ static inline void dec_nr_running(task_t /* * __activate_task - move a task to the runqueue. */ -static void __activate_task(task_t *p, runqueue_t *rq) +static void __activate_task(struct task_struct *p, runqueue_t *rq) { prio_array_t *target = rq->active; @@ -781,13 +787,13 @@ static void __activate_task(task_t *p, r /* * __activate_idle_task - move idle task to the _front_ of runqueue. */ -static inline void __activate_idle_task(task_t *p, runqueue_t *rq) +static inline void __activate_idle_task(struct task_struct *p, runqueue_t *rq) { enqueue_task_head(p, rq->active); inc_nr_running(p, rq); } -static int recalc_task_prio(task_t *p, unsigned long long now) +static int recalc_task_prio(struct task_struct *p, unsigned long long now) { /* Caller must always ensure 'now >= p->timestamp' */ unsigned long sleep_time = now - p->timestamp; @@ -859,7 +865,7 @@ static int recalc_task_prio(task_t *p, u * Update all the scheduling statistics stuff. (sleep average * calculation, priority modifiers, etc.) */ -static void activate_task(task_t *p, runqueue_t *rq, int local) +static void activate_task(struct task_struct *p, runqueue_t *rq, int local) { unsigned long long now; @@ -926,7 +932,7 @@ static void deactivate_task(struct task_ #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) #endif -static void resched_task(task_t *p) +static void resched_task(struct task_struct *p) { int cpu; @@ -947,7 +953,7 @@ static void resched_task(task_t *p) smp_send_reschedule(cpu); } #else -static inline void resched_task(task_t *p) +static inline void resched_task(struct task_struct *p) { assert_spin_locked(&task_rq(p)->lock); set_tsk_need_resched(p); @@ -958,7 +964,7 @@ static inline void resched_task(task_t * * task_curr - is this task currently executing on a CPU? * @p: the task in question. */ -inline int task_curr(const task_t *p) +inline int task_curr(const struct task_struct *p) { return cpu_curr(task_cpu(p)) == p; } @@ -973,7 +979,7 @@ unsigned long weighted_cpuload(const int typedef struct { struct list_head list; - task_t *task; + struct task_struct *task; int dest_cpu; struct completion done; @@ -983,7 +989,8 @@ typedef struct { * The task's runqueue lock must be held. * Returns true if you have to wait for migration thread. */ -static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) +static int +migrate_task(struct task_struct *p, int dest_cpu, migration_req_t *req) { runqueue_t *rq = task_rq(p); @@ -1013,7 +1020,7 @@ static int migrate_task(task_t *p, int d * smp_call_function() if an IPI is sent by the same process we are * waiting to become inactive. */ -void wait_task_inactive(task_t *p) +void wait_task_inactive(struct task_struct *p) { unsigned long flags; runqueue_t *rq; @@ -1047,7 +1054,7 @@ repeat: * to another CPU then no harm is done and the purpose has been * achieved as well. */ -void kick_process(task_t *p) +void kick_process(struct task_struct *p) { int cpu; @@ -1250,7 +1257,7 @@ nextlevel: * Returns the CPU we should wake onto. */ #if defined(ARCH_HAS_SCHED_WAKE_IDLE) -static int wake_idle(int cpu, task_t *p) +static int wake_idle(int cpu, struct task_struct *p) { cpumask_t tmp; struct sched_domain *sd; @@ -1273,7 +1280,7 @@ static int wake_idle(int cpu, task_t *p) return cpu; } #else -static inline int wake_idle(int cpu, task_t *p) +static inline int wake_idle(int cpu, struct task_struct *p) { return cpu; } @@ -1293,7 +1300,7 @@ static inline int wake_idle(int cpu, tas * * returns failure only if the task is already active. */ -static int try_to_wake_up(task_t *p, unsigned int state, int sync) +static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) { int cpu, this_cpu, success = 0; unsigned long flags; @@ -1451,14 +1458,14 @@ out: return success; } -int fastcall wake_up_process(task_t *p) +int fastcall wake_up_process(struct task_struct *p) { return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); } EXPORT_SYMBOL(wake_up_process); -int fastcall wake_up_state(task_t *p, unsigned int state) +int fastcall wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); } @@ -1467,7 +1474,7 @@ int fastcall wake_up_state(task_t *p, un * Perform scheduler related setup for a newly forked process p. * p is forked by current. */ -void fastcall sched_fork(task_t *p, int clone_flags) +void fastcall sched_fork(struct task_struct *p, int clone_flags) { int cpu = get_cpu(); @@ -1530,7 +1537,7 @@ void fastcall sched_fork(task_t *p, int * that must be done for every newly created context, then puts the task * on the runqueue and wakes it. */ -void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) +void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) { unsigned long flags; int this_cpu, cpu; @@ -1613,7 +1620,7 @@ void fastcall wake_up_new_task(task_t *p * artificially, because any timeslice recovered here * was given away by the parent in the first place.) */ -void fastcall sched_exit(task_t *p) +void fastcall sched_exit(struct task_struct *p) { unsigned long flags; runqueue_t *rq; @@ -1647,7 +1654,7 @@ void fastcall sched_exit(task_t *p) * prepare_task_switch sets up locking and calls architecture specific * hooks. */ -static inline void prepare_task_switch(runqueue_t *rq, task_t *next) +static inline void prepare_task_switch(runqueue_t *rq, struct task_struct *next) { prepare_lock_switch(rq, next); prepare_arch_switch(next); @@ -1668,7 +1675,7 @@ static inline void prepare_task_switch(r * with the lock held can cause deadlocks; see schedule() for * details.) */ -static inline void finish_task_switch(runqueue_t *rq, task_t *prev) +static inline void finish_task_switch(runqueue_t *rq, struct task_struct *prev) __releases(rq->lock) { struct mm_struct *mm = rq->prev_mm; @@ -1706,7 +1713,7 @@ static inline void finish_task_switch(ru * schedule_tail - first thing a freshly forked thread must call. * @prev: the thread we just switched away from. */ -asmlinkage void schedule_tail(task_t *prev) +asmlinkage void schedule_tail(struct task_struct *prev) __releases(rq->lock) { runqueue_t *rq = this_rq(); @@ -1723,8 +1730,9 @@ asmlinkage void schedule_tail(task_t *pr * context_switch - switch to the new MM and the new * thread's register state. */ -static inline -task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next) +static inline struct task_struct * +context_switch(runqueue_t *rq, struct task_struct *prev, + struct task_struct *next) { struct mm_struct *mm = next->mm; struct mm_struct *oldmm = prev->active_mm; @@ -1885,7 +1893,7 @@ static void double_lock_balance(runqueue * allow dest_cpu, which will force the cpu onto dest_cpu. Then * the cpu_allowed mask is restored. */ -static void sched_migrate_task(task_t *p, int dest_cpu) +static void sched_migrate_task(struct task_struct *p, int dest_cpu) { migration_req_t req; runqueue_t *rq; @@ -1928,9 +1936,9 @@ void sched_exec(void) * pull_task - move a task from a remote runqueue to the local runqueue. * Both runqueues must be locked. */ -static -void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, - runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) +static void pull_task(runqueue_t *src_rq, prio_array_t *src_array, + struct task_struct *p, runqueue_t *this_rq, + prio_array_t *this_array, int this_cpu) { dequeue_task(p, src_array); dec_nr_running(p, src_rq); @@ -1951,7 +1959,7 @@ void pull_task(runqueue_t *src_rq, prio_ * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? */ static -int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, +int can_migrate_task(struct task_struct *p, runqueue_t *rq, int this_cpu, struct sched_domain *sd, enum idle_type idle, int *all_pinned) { @@ -2000,8 +2008,8 @@ static int move_tasks(runqueue_t *this_r best_prio_seen, skip_for_load; prio_array_t *array, *dst_array; struct list_head *head, *curr; + struct task_struct *tmp; long rem_load_move; - task_t *tmp; if (max_nr_move == 0 || max_load_move == 0) goto out; @@ -2053,7 +2061,7 @@ skip_bitmap: head = array->queue + idx; curr = head->prev; skip_queue: - tmp = list_entry(curr, task_t, run_list); + tmp = list_entry(curr, struct task_struct, run_list); curr = curr->prev; @@ -2767,7 +2775,7 @@ EXPORT_PER_CPU_SYMBOL(kstat); * Bank in p->sched_time the ns elapsed since the last tick or switch. */ static inline void -update_cpu_clock(task_t *p, runqueue_t *rq, unsigned long long now) +update_cpu_clock(struct task_struct *p, runqueue_t *rq, unsigned long long now) { p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); } @@ -2776,7 +2784,7 @@ update_cpu_clock(task_t *p, runqueue_t * * Return current->sched_time plus any more ns on the sched_clock * that have not yet been banked. */ -unsigned long long current_sched_time(const task_t *p) +unsigned long long current_sched_time(const struct task_struct *p) { unsigned long long ns; unsigned long flags; @@ -2893,9 +2901,9 @@ void account_steal_time(struct task_stru void scheduler_tick(void) { unsigned long long now = sched_clock(); + struct task_struct *p = current; int cpu = smp_processor_id(); runqueue_t *rq = this_rq(); - task_t *p = current; update_cpu_clock(p, rq, now); @@ -3027,7 +3035,8 @@ static void wake_sleeping_dependent(int * utilize, if another task runs on a sibling. This models the * slowdown effect of other tasks running on siblings: */ -static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) +static inline unsigned long +smt_slice(struct task_struct *p, struct sched_domain *sd) { return p->time_slice * (100 - sd->per_cpu_gain) / 100; } @@ -3038,7 +3047,8 @@ static inline unsigned long smt_slice(ta * acquire their lock. As we only trylock the normal locking order does not * need to be obeyed. */ -static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) +static int +dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) { struct sched_domain *tmp, *sd = NULL; int ret = 0, i; @@ -3058,8 +3068,8 @@ static int dependent_sleeper(int this_cp return 0; for_each_cpu_mask(i, sd->span) { + struct task_struct *smt_curr; runqueue_t *smt_rq; - task_t *smt_curr; if (i == this_cpu) continue; @@ -3105,7 +3115,7 @@ static inline void wake_sleeping_depende { } static inline int -dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) +dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) { return 0; } @@ -3154,11 +3164,11 @@ static inline int interactive_sleep(enum */ asmlinkage void __sched schedule(void) { + struct task_struct *prev, *next; struct list_head *queue; unsigned long long now; unsigned long run_time; int cpu, idx, new_prio; - task_t *prev, *next; prio_array_t *array; long *switch_count; runqueue_t *rq; @@ -3251,7 +3261,7 @@ need_resched_nonpreemptible: idx = sched_find_first_bit(array->bitmap); queue = array->queue + idx; - next = list_entry(queue->next, task_t, run_list); + next = list_entry(queue->next, struct task_struct, run_list); if (!rt_task(next) && interactive_sleep(next->sleep_type)) { unsigned long long delta = now - next->timestamp; @@ -3706,7 +3716,7 @@ long fastcall __sched sleep_on_timeout(w } EXPORT_SYMBOL(sleep_on_timeout); -void set_user_nice(task_t *p, long nice) +void set_user_nice(struct task_struct *p, long nice) { int old_prio, new_prio, delta; unsigned long flags; @@ -3763,7 +3773,7 @@ EXPORT_SYMBOL(set_user_nice); * @p: task * @nice: nice value */ -int can_nice(const task_t *p, const int nice) +int can_nice(const struct task_struct *p, const int nice) { /* convert nice value [19,-20] to rlimit style value [1,40] */ int nice_rlim = 20 - nice; @@ -3822,7 +3832,7 @@ asmlinkage long sys_nice(int increment) * RT tasks are offset by -200. Normal tasks are centered * around 0, value goes from -16 to +15. */ -int task_prio(const task_t *p) +int task_prio(const struct task_struct *p) { return p->prio - MAX_RT_PRIO; } @@ -3831,7 +3841,7 @@ int task_prio(const task_t *p) * task_nice - return the nice value of a given task. * @p: the task in question. */ -int task_nice(const task_t *p) +int task_nice(const struct task_struct *p) { return TASK_NICE(p); } @@ -3850,7 +3860,7 @@ int idle_cpu(int cpu) * idle_task - return the idle task for a given cpu. * @cpu: the processor in question. */ -task_t *idle_task(int cpu) +struct task_struct *idle_task(int cpu) { return cpu_rq(cpu)->idle; } @@ -3859,7 +3869,7 @@ task_t *idle_task(int cpu) * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. */ -static inline task_t *find_process_by_pid(pid_t pid) +static inline struct task_struct *find_process_by_pid(pid_t pid) { return pid ? find_task_by_pid(pid) : current; } @@ -4038,8 +4048,8 @@ asmlinkage long sys_sched_setparam(pid_t */ asmlinkage long sys_sched_getscheduler(pid_t pid) { + struct task_struct *p; int retval = -EINVAL; - task_t *p; if (pid < 0) goto out_nounlock; @@ -4066,8 +4076,8 @@ out_nounlock: asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) { struct sched_param lp; + struct task_struct *p; int retval = -EINVAL; - task_t *p; if (!param || pid < 0) goto out_nounlock; @@ -4100,9 +4110,9 @@ out_unlock: long sched_setaffinity(pid_t pid, cpumask_t new_mask) { - task_t *p; - int retval; cpumask_t cpus_allowed; + struct task_struct *p; + int retval; lock_cpu_hotplug(); read_lock(&tasklist_lock); @@ -4188,8 +4198,8 @@ cpumask_t cpu_possible_map __read_mostly long sched_getaffinity(pid_t pid, cpumask_t *mask) { + struct task_struct *p; int retval; - task_t *p; lock_cpu_hotplug(); read_lock(&tasklist_lock); @@ -4470,9 +4480,9 @@ asmlinkage long sys_sched_get_priority_m asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) { + struct task_struct *p; int retval = -EINVAL; struct timespec t; - task_t *p; if (pid < 0) goto out_nounlock; @@ -4519,12 +4529,13 @@ static inline struct task_struct *younge return list_entry(p->sibling.next,struct task_struct,sibling); } -static void show_task(task_t *p) +static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; + +static void show_task(struct task_struct *p) { - task_t *relative; - unsigned state; + struct task_struct *relative; unsigned long free = 0; - static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; + unsigned state; printk("%-13.13s ", p->comm); state = p->state ? __ffs(p->state) + 1 : 0; @@ -4575,7 +4586,7 @@ static void show_task(task_t *p) void show_state(void) { - task_t *g, *p; + struct task_struct *g, *p; #if (BITS_PER_LONG == 32) printk("\n" @@ -4608,7 +4619,7 @@ void show_state(void) * NOTE: this function does not set the idle thread's NEED_RESCHED * flag, to make booting more robust. */ -void __devinit init_idle(task_t *idle, int cpu) +void __devinit init_idle(struct task_struct *idle, int cpu) { runqueue_t *rq = cpu_rq(cpu); unsigned long flags; @@ -4671,7 +4682,7 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE; * task must not exit() & deallocate itself prematurely. The * call is not atomic; no spinlocks may be held. */ -int set_cpus_allowed(task_t *p, cpumask_t new_mask) +int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { unsigned long flags; migration_req_t req; @@ -4938,7 +4949,7 @@ void idle_task_exit(void) mmdrop(mm); } -static void migrate_dead(unsigned int dead_cpu, task_t *p) +static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) { struct runqueue *rq = cpu_rq(dead_cpu); @@ -4973,9 +4984,8 @@ static void migrate_dead_tasks(unsigned struct list_head *list = &rq->arrays[arr].queue[i]; while (!list_empty(list)) - migrate_dead(dead_cpu, - list_entry(list->next, task_t, - run_list)); + migrate_dead(dead_cpu, list_entry(list->next, + struct task_struct, run_list)); } } } @@ -6675,7 +6685,7 @@ void normalize_rt_tasks(void) * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! */ -task_t *curr_task(int cpu) +struct task_struct *curr_task(int cpu) { return cpu_curr(cpu); } @@ -6695,7 +6705,7 @@ task_t *curr_task(int cpu) * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! */ -void set_curr_task(int cpu, task_t *p) +void set_curr_task(int cpu, struct task_struct *p) { cpu_curr(cpu) = p; } diff -puN kernel/timer.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct kernel/timer.c --- a/kernel/timer.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/kernel/timer.c @@ -1368,7 +1368,7 @@ asmlinkage long sys_getegid(void) static void process_timeout(unsigned long __data) { - wake_up_process((task_t *)__data); + wake_up_process((struct task_struct *)__data); } /** diff -puN kernel/workqueue.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct kernel/workqueue.c --- a/kernel/workqueue.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/kernel/workqueue.c @@ -51,7 +51,7 @@ struct cpu_workqueue_struct { wait_queue_head_t work_done; struct workqueue_struct *wq; - task_t *thread; + struct task_struct *thread; int run_depth; /* Detect run_workqueue() recursion depth */ } ____cacheline_aligned; diff -puN mm/oom_kill.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct mm/oom_kill.c --- a/mm/oom_kill.c~sched-cleanup-remove-task_t-convert-to-struct-task_struct +++ a/mm/oom_kill.c @@ -225,7 +225,7 @@ static struct task_struct *select_bad_pr * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that * we select a process with CAP_SYS_RAW_IO set). */ -static void __oom_kill_task(task_t *p, const char *message) +static void __oom_kill_task(struct task_struct *p, const char *message) { if (p->pid == 1) { WARN_ON(1); @@ -255,10 +255,10 @@ static void __oom_kill_task(task_t *p, c force_sig(SIGKILL, p); } -static int oom_kill_task(task_t *p, const char *message) +static int oom_kill_task(struct task_struct *p, const char *message) { struct mm_struct *mm; - task_t * g, * q; + struct task_struct *g, *q; mm = p->mm; @@ -316,7 +316,7 @@ static int oom_kill_process(struct task_ */ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) { - task_t *p; + struct task_struct *p; unsigned long points = 0; if (printk_ratelimit()) { _ Patches currently in -mm which might be from mingo@xxxxxxx are origin.patch work-around-ppc64-bootup-bug-by-making-mutex-debugging-save-restore-irqs.patch kernel-kernel-cpuc-to-mutexes.patch time-clocksource-infrastructure.patch fix-ide-deadlock-in-error-reporting-code.patch disable-debugging-version-of-write_lock.patch fix-drivers-mfd-ucb1x00-corec-irq-probing-bug.patch cifs-remove-f_ownerlock-use.patch lock-validator-fix-ns83820c-irq-flags-bug.patch revert-gregkh-pci-pci-test-that-drivers-properly-call-pci_set_master.patch x86-re-enable-generic-numa.patch vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma.patch vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma-tidy.patch vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma-arch_vma_name-fix.patch vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma-vs-x86_64-mm-reliable-stack-trace-support-i386.patch vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma-vs-x86_64-mm-reliable-stack-trace-support-i386-2.patch vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma-vs-x86_64-mm-reliable-stack-trace-support-i386-2-revert-maxmem-change.patch spin-rwlock-init-cleanups.patch sched-fix-smt-nice-lock-contention-and-optimization.patch sched-fix-smt-nice-lock-contention-and-optimization-tidy.patch sched-comment-bitmap-size-accounting.patch sched-fix-interactive-ceiling-code.patch sched-cpu-hotplug-race-vs-set_cpus_allowed.patch sched-implement-smpnice.patch sched-protect-calculation-of-max_pull-from-integer-wrap.patch sched-store-weighted-load-on-up.patch sched-add-discrete-weighted-cpu-load-function.patch sched-prevent-high-load-weight-tasks-suppressing-balancing.patch sched-improve-stability-of-smpnice-load-balancing.patch sched-improve-smpnice-load-balancing-when-load-per-task.patch smpnice-dont-consider-sched-groups-which-are-lightly-loaded-for-balancing.patch smpnice-dont-consider-sched-groups-which-are-lightly-loaded-for-balancing-fix.patch sched-modify-move_tasks-to-improve-load-balancing-outcomes.patch sched-avoid-unnecessarily-moving-highest-priority-task-move_tasks.patch sched-avoid-unnecessarily-moving-highest-priority-task-move_tasks-fix-2.patch sched_domain-handle-kmalloc-failure.patch sched_domain-handle-kmalloc-failure-fix.patch sched_domain-dont-use-gfp_atomic.patch sched_domain-use-kmalloc_node.patch sched_domain-allocate-sched_group-structures-dynamically.patch sched-mc-smt-power-savings-sched-policy.patch sched-uninline-task_rq_lock.patch bug-if-setscheduler-is-called-from-interrupt-context.patch sched-clean-up-fallout-of-recent-changes.patch sched-cleanup-remove-task_t-convert-to-struct-task_struct.patch sched-cleanup-convert-schedc-internal-typedefs-to-structs.patch sched-add-above-background-load-function.patch mm-implement-swap-prefetching.patch pi-futex-futex-code-cleanups.patch pi-futex-robust-futex-docs-fix.patch pi-futex-introduce-debug_check_no_locks_freed.patch pi-futex-introduce-warn_on_smp.patch pi-futex-add-plist-implementation.patch pi-futex-scheduler-support-for-pi.patch pi-futex-rt-mutex-core.patch pi-futex-rt-mutex-docs.patch pi-futex-rt-mutex-docs-update.patch pi-futex-rt-mutex-debug.patch pi-futex-rt-mutex-tester.patch pi-futex-rt-mutex-tester-fix.patch pi-futex-rt-mutex-futex-api.patch pi-futex-futex_lock_pi-futex_unlock_pi-support.patch pi-futex-futex_lock_pi-futex_unlock_pi-support-fix.patch fix-rt-mutex-defaults-and-dependencies.patch drop-tasklist-lock-in-do_sched_setscheduler.patch rtmutex-modify-rtmutex-tester-to-test-the-setscheduler.patch rtmutex-propagate-priority-settings-into-pi-lock-chains.patch rtmutex-propagate-priority-settings-into-pi-lock-chains-fix.patch futex_requeue-optimization.patch genirq-rename-desc-handler-to-desc-chip.patch genirq-rename-desc-handler-to-desc-chip-power-fix.patch genirq-rename-desc-handler-to-desc-chip-ia64-fix.patch genirq-rename-desc-handler-to-desc-chip-ia64-fix-2.patch genirq-rename-desc-handler-to-desc-chip-terminate_irqs-fix.patch genirq-sem2mutex-probe_sem-probing_active.patch genirq-cleanup-merge-irq_affinity-into-irq_desc.patch genirq-cleanup-remove-irq_descp.patch genirq-cleanup-remove-irq_descp-fix.patch genirq-cleanup-remove-fastcall.patch genirq-cleanup-misc-code-cleanups.patch genirq-cleanup-reduce-irq_desc_t-use-mark-it-obsolete.patch genirq-cleanup-include-linux-irqh.patch genirq-cleanup-merge-irq_dir-smp_affinity_entry-into-irq_desc.patch genirq-cleanup-merge-pending_irq_cpumask-into-irq_desc.patch genirq-cleanup-turn-arch_has_irq_per_cpu-into-config_irq_per_cpu.patch genirq-debug-better-debug-printout-in-enable_irq.patch genirq-add-retrigger-irq-op-to-consolidate-hw_irq_resend.patch genirq-doc-comment-include-linux-irqh-structures.patch genirq-doc-handle_irq_event-and-__do_irq-comments.patch genirq-cleanup-no_irq_type-cleanups.patch genirq-doc-add-design-documentation.patch genirq-add-genirq-sw-irq-retrigger.patch genirq-add-irq_noprobe-support.patch genirq-add-irq_norequest-support.patch genirq-add-irq_noautoen-support.patch genirq-update-copyrights.patch genirq-core.patch genirq-core-revert-noisiness-on-spurious-interrupts.patch genirq-msi-fixes-2.patch genirq-add-irq-chip-support.patch genirq-add-irq-chip-support-fix.patch genirq-add-irq-chip-support-misroute-irq-dont-call-desc-chip-end.patch genirq-add-handle_bad_irq.patch genirq-add-irq-wake-power-management-support.patch genirq-add-sa_trigger-support.patch genirq-cleanup-no_irq_type-no_irq_chip-rename.patch genirq-more-verbose-debugging-on-unexpected-irq-vectors.patch genirq-ia64-build-fix.patch genirq-add-irq_type_sense_mask.patch genirq-add-irq-chip-support-fasteoi-handler-handle-interrupt-disabling.patch genirq-irq-document-what-an-irq-is.patch genirq-add-chip-eoi-fastack-fasteoi-core.patch genirq-add-chip-eoi-fastack-fasteoi-fix.patch lockdep-floppyc-irq-release-fix.patch lockdep-acpi-locking-fix.patch lockdep-console_init-after-local_irq_enable.patch lockdep-add-is_module_address.patch lockdep-add-print_ip_sym.patch lockdep-add-per_cpu_offset.patch lockdep-add-disable-enable_irq_lockdep-api.patch lockdep-add-local_irq_enable_in_hardirq-api.patch lockdep-add-declare_completion_onstack-api.patch lockdep-clean-up-rwsems.patch lockdep-rename-debug_warn_on.patch lockdep-remove-debug_bug_on.patch lockdep-remove-mutex-deadlock-checking-code.patch lockdep-better-lock-debugging.patch lockdep-mutex-section-binutils-workaround.patch lockdep-locking-init-debugging-improvement.patch lockdep-beautify-x86_64-stacktraces.patch lockdep-x86_64-document-stack-frame-internals.patch lockdep-i386-remove-multi-entry-backtraces.patch lockdep-stacktrace-subsystem-core.patch lockdep-s390-config_frame_pointer-support.patch lockdep-stacktrace-subsystem-i386-support.patch lockdep-stacktrace-subsystem-x86_64-support.patch lockdep-stacktrace-subsystem-s390-support.patch lockdep-irqtrace-subsystem-core.patch lockdep-irqtrace-subsystem-docs.patch lockdep-irqtrace-subsystem-i386-support.patch lockdep-irqtrace-subsystem-x86_64-support.patch lockdep-irqtrace-subsystem-s390-support.patch lockdep-locking-api-self-tests.patch lockdep-irqtrace-cleanup-of-include-asm-i386-irqflagsh.patch lockdep-irqtrace-cleanup-of-include-asm-x86_64-irqflagsh.patch lockdep-core.patch lockdep-design-docs.patch lockdep-procfs.patch lockdep-prove-rwsem-locking-correctness.patch lockdep-prove-spinlock-rwlock-locking-correctness.patch lockdep-prove-mutex-locking-correctness.patch lockdep-kconfig.patch lockdep-print-all-lock-classes-on-sysrq-d.patch lockdep-x86_64-early-init.patch lockdep-x86-smp-alternatives-workaround.patch lockdep-do-not-recurse-in-printk.patch lockdep-fix-rt_hash_lock_sz.patch lockdep-s390-turn-validator-off-in-machine-check-handler.patch lockdep-enable-on-i386.patch lockdep-enable-on-x86_64.patch lockdep-enable-on-s390.patch lockdep-annotate-direct-io.patch lockdep-annotate-serial.patch lockdep-annotate-dcache.patch lockdep-annotate-i_mutex.patch lockdep-annotate-futex.patch lockdep-annotate-genirq.patch lockdep-annotate-waitqueues.patch lockdep-annotate-mm.patch lockdep-annotate-serio.patch lockdep-annotate-skb_queue_head_init.patch lockdep-annotate-timer-base-locks.patch lockdep-annotate-scheduler-runqueue-locks.patch lockdep-annotate-hrtimer-base-locks.patch lockdep-annotate-sock_lock_init.patch lockdep-annotate-af_unix-locking.patch lockdep-annotate-bh_lock_sock.patch lockdep-annotate-ieee1394-skb-queue-head-locking.patch lockdep-annotate-mmap_sem.patch lockdep-annotate-sunrpc-code.patch lockdep-annotate-ntfs-locking-rules.patch lockdep-annotate-the-quota-code.patch lockdep-annotate-usbfs.patch lockdep-annotate-sound-core-seq-seq_portsc.patch lockdep-annotate-sound-core-seq-seq_devicec.patch lockdep-annotate-8390c-disable_irq.patch lockdep-annotate-3c59xc-disable_irq.patch lockdep-annotate-forcedethc-disable_irq.patch lockdep-annotate-enable_in_hardirq.patch lockdep-annotate-on-stack-completions.patch lockdep-annotate-qeth-driver.patch lockdep-annotate-s_lock.patch lockdep-annotate-sb-s_umount.patch lockdep-annotate-slab-code.patch lockdep-annotate-blkdev-nesting.patch - To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html