Until now, pagefault_disable()/pagefault_enabled() used the preempt count to track whether in an environment with pagefaults disabled (can be queried via in_atomic()). This patch introduces a separate counter in task_struct to count the level of pagefault_disable() calls. We'll keep manipulating the preempt count to retain compatibility to existing pagefault handlers. It is now possible to verify whether in a pagefault_disable() envionment by calling pagefault_disabled(). In contrast to in_atomic() it will not be influenced by preempt_enable()/preempt_disable(). This patch is based on a patch from Ingo Molnar. Signed-off-by: David Hildenbrand <dahi@xxxxxxxxxxxxxxxxxx> --- include/linux/sched.h | 1 + include/linux/uaccess.h | 36 +++++++++++++++++++++++++++++------- kernel/fork.c | 3 +++ 3 files changed, 33 insertions(+), 7 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index ede26ca..75778cb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1724,6 +1724,7 @@ struct task_struct { #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; #endif + int pagefault_disabled; }; /* Future-safe accessor for struct task_struct's cpus_allowed. */ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index ecd3319..23290cc 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -2,20 +2,36 @@ #define __LINUX_UACCESS_H__ #include <linux/preempt.h> +#include <linux/sched.h> #include <asm/uaccess.h> +static __always_inline void pagefault_disabled_inc(void) +{ + current->pagefault_disabled++; +} + +static __always_inline void pagefault_disabled_dec(void) +{ + current->pagefault_disabled--; + WARN_ON(current->pagefault_disabled < 0); +} + /* - * These routines enable/disable the pagefault handler in that - * it will not take any locks and go straight to the fixup table. + * These routines enable/disable the pagefault handler. If disabled, it will + * not take any locks and go straight to the fixup table. + * + * We increase the preempt and the pagefault count, to be able to distinguish + * whether we run in simple atomic context or in a real pagefault_disable() + * context. + * + * For now, after pagefault_disabled() has been called, we run in atomic + * context. User access methods will not sleep. * - * They have great resemblance to the preempt_disable/enable calls - * and in fact they are identical; this is because currently there is - * no other way to make the pagefault handlers do this. So we do - * disable preemption but we don't necessarily care about that. */ static inline void pagefault_disable(void) { preempt_count_inc(); + pagefault_disabled_inc(); /* * make sure to have issued the store before a pagefault * can hit. @@ -25,18 +41,24 @@ static inline void pagefault_disable(void) static inline void pagefault_enable(void) { -#ifndef CONFIG_PREEMPT /* * make sure to issue those last loads/stores before enabling * the pagefault handler again. */ barrier(); + pagefault_disabled_dec(); +#ifndef CONFIG_PREEMPT preempt_count_dec(); #else preempt_enable(); #endif } +/* + * Is the pagefault handler disabled? If so, user access methods will not sleep. + */ +#define pagefault_disabled() (current->pagefault_disabled != 0) + #ifndef ARCH_HAS_NOCACHE_UACCESS static inline unsigned long __copy_from_user_inatomic_nocache(void *to, diff --git a/kernel/fork.c b/kernel/fork.c index 03c1eaa..c344d27 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1396,6 +1396,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->hardirq_context = 0; p->softirq_context = 0; #endif + + p->pagefault_disabled = 0; + #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; -- 2.1.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>