There are two functions for handling robust lists during the task exit: exit_robust_list() and compat_exit_robust_list(). The first one handles either 64bit or 32bit lists, depending if it's a 64bit or 32bit kernel. The compat_exit_robust_list() only exists in 64bit kernels that supports 32bit syscalls, and handles 32bit lists. For the new syscall set_robust_list2(), 64bit kernels need to be able to handle 32bit lists despite having or not support for 32bit syscalls, so make compat_exit_robust_list() exist regardless of compat_ config. Also, use explicitly sizing, otherwise in a 32bit kernel both exit_robust_list() and compat_exit_robust_list() would be the exactly same function, with none of them dealing with 64bit robust lists. Signed-off-by: André Almeida <andrealmeid@xxxxxxxxxx> --- include/linux/compat.h | 12 +---------- include/linux/futex.h | 11 +++++++++++ include/linux/sched.h | 2 +- kernel/futex/core.c | 44 ++++++++++++++++++++++++++--------------- kernel/futex/syscalls.c | 4 ++-- 5 files changed, 43 insertions(+), 30 deletions(-) diff --git a/include/linux/compat.h b/include/linux/compat.h index 56cebaff0c91..968a9135ff48 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -385,16 +385,6 @@ struct compat_ifconf { compat_caddr_t ifcbuf; }; -struct compat_robust_list { - compat_uptr_t next; -}; - -struct compat_robust_list_head { - struct compat_robust_list list; - compat_long_t futex_offset; - compat_uptr_t list_op_pending; -}; - #ifdef CONFIG_COMPAT_OLD_SIGACTION struct compat_old_sigaction { compat_uptr_t sa_handler; @@ -672,7 +662,7 @@ asmlinkage long compat_sys_waitid(int, compat_pid_t, struct compat_siginfo __user *, int, struct compat_rusage __user *); asmlinkage long -compat_sys_set_robust_list(struct compat_robust_list_head __user *head, +compat_sys_set_robust_list(struct robust_list_head32 __user *head, compat_size_t len); asmlinkage long compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, diff --git a/include/linux/futex.h b/include/linux/futex.h index b70df27d7e85..8217b5ebdd9c 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -53,6 +53,17 @@ union futex_key { #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } } #ifdef CONFIG_FUTEX + +struct robust_list32 { + u32 next; +}; + +struct robust_list_head32 { + struct robust_list32 list; + s32 futex_offset; + u32 list_op_pending; +}; + enum { FUTEX_STATE_OK, FUTEX_STATE_EXITING, diff --git a/include/linux/sched.h b/include/linux/sched.h index 66b311fbd5d6..da573c16e93e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1292,7 +1292,7 @@ struct task_struct { #ifdef CONFIG_FUTEX struct robust_list_head __user *robust_list; #ifdef CONFIG_COMPAT - struct compat_robust_list_head __user *compat_robust_list; + struct robust_list_head32 __user *compat_robust_list; #endif struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; diff --git a/kernel/futex/core.c b/kernel/futex/core.c index ebdd76b4ecbb..cc2d0f00cd6b 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -768,13 +768,14 @@ static inline int fetch_robust_entry(struct robust_list __user **entry, return 0; } +#ifdef CONFIG_64BIT /* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * * We silently return on any sign of list-walking problem. */ -static void exit_robust_list(struct task_struct *curr) +static void exit_robust_list64(struct task_struct *curr) { struct robust_list_head __user *head = curr->robust_list; struct robust_list __user *entry, *next_entry, *pending; @@ -835,8 +836,13 @@ static void exit_robust_list(struct task_struct *curr) curr, pip, HANDLE_DEATH_PENDING); } } +#else +static void exit_robust_list64(struct task_struct *curr) +{ + pr_warn("32bit kernel should not allow ROBUST_LIST_64BIT"); +} +#endif -#ifdef CONFIG_COMPAT static void __user *futex_uaddr(struct robust_list __user *entry, compat_long_t futex_offset) { @@ -850,13 +856,13 @@ static void __user *futex_uaddr(struct robust_list __user *entry, * Fetch a robust-list pointer. Bit 0 signals PI futexes: */ static inline int -compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, - compat_uptr_t __user *head, unsigned int *pi) +fetch_robust_entry32(u32 *uentry, struct robust_list __user **entry, + u32 __user *head, unsigned int *pi) { if (get_user(*uentry, head)) return -EFAULT; - *entry = compat_ptr((*uentry) & ~1); + *entry = (void __user *)(unsigned long)((*uentry) & ~1); *pi = (unsigned int)(*uentry) & 1; return 0; @@ -868,21 +874,21 @@ compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **ent * * We silently return on any sign of list-walking problem. */ -static void compat_exit_robust_list(struct task_struct *curr) +static void exit_robust_list32(struct task_struct *curr) { - struct compat_robust_list_head __user *head = curr->compat_robust_list; + struct robust_list_head32 __user *head = curr->compat_robust_list; struct robust_list __user *entry, *next_entry, *pending; unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; unsigned int next_pi; - compat_uptr_t uentry, next_uentry, upending; - compat_long_t futex_offset; + u32 uentry, next_uentry, upending; + s32 futex_offset; int rc; /* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ - if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) + if (fetch_robust_entry32((u32 *)&uentry, &entry, (u32 *)&head->list.next, &pi)) return; /* * Fetch the relative futex offset: @@ -893,7 +899,7 @@ static void compat_exit_robust_list(struct task_struct *curr) * Fetch any possibly pending lock-add first, and handle it * if it exists: */ - if (compat_fetch_robust_entry(&upending, &pending, + if (fetch_robust_entry32(&upending, &pending, &head->list_op_pending, &pip)) return; @@ -903,8 +909,8 @@ static void compat_exit_robust_list(struct task_struct *curr) * Fetch the next entry in the list before calling * handle_futex_death: */ - rc = compat_fetch_robust_entry(&next_uentry, &next_entry, - (compat_uptr_t __user *)&entry->next, &next_pi); + rc = fetch_robust_entry32(&next_uentry, &next_entry, + (u32 __user *)&entry->next, &next_pi); /* * A pending lock might already be on the list, so * dont process it twice: @@ -935,7 +941,6 @@ static void compat_exit_robust_list(struct task_struct *curr) handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING); } } -#endif #ifdef CONFIG_FUTEX_PI @@ -1018,14 +1023,21 @@ static inline void exit_pi_state_list(struct task_struct *curr) { } static void futex_cleanup(struct task_struct *tsk) { +#ifdef CONFIG_64BIT if (unlikely(tsk->robust_list)) { - exit_robust_list(tsk); + exit_robust_list64(tsk); tsk->robust_list = NULL; } +#else + if (unlikely(tsk->robust_list)) { + exit_robust_list32(tsk); + tsk->robust_list = NULL; + } +#endif #ifdef CONFIG_COMPAT if (unlikely(tsk->compat_robust_list)) { - compat_exit_robust_list(tsk); + exit_robust_list32(tsk); tsk->compat_robust_list = NULL; } #endif diff --git a/kernel/futex/syscalls.c b/kernel/futex/syscalls.c index 4b6da9116aa6..dba193dfd216 100644 --- a/kernel/futex/syscalls.c +++ b/kernel/futex/syscalls.c @@ -440,7 +440,7 @@ SYSCALL_DEFINE4(futex_requeue, #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(set_robust_list, - struct compat_robust_list_head __user *, head, + struct robust_list_head32 __user *, head, compat_size_t, len) { if (unlikely(len != sizeof(*head))) @@ -455,7 +455,7 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, compat_uptr_t __user *, head_ptr, compat_size_t __user *, len_ptr) { - struct compat_robust_list_head __user *head; + struct robust_list_head32 __user *head; unsigned long ret; struct task_struct *p; -- 2.47.1