From: Hou Tao <houtao1@xxxxxxxxxx> Moving is_vsyscall_vaddr() into mm_internal.h to make it available for copy_from_kernel_nofault_allowed() in arch/x86/mm/maccess.c. Signed-off-by: Hou Tao <houtao1@xxxxxxxxxx> --- arch/x86/mm/fault.c | 11 ++--------- arch/x86/mm/mm_internal.h | 13 +++++++++++++ 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 679b09cfe241c..69e007761d9a9 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -38,6 +38,8 @@ #define CREATE_TRACE_POINTS #include <asm/trace/exceptions.h> +#include "mm_internal.h" + /* * Returns 0 if mmiotrace is disabled, or if the fault is not * handled by mmiotrace: @@ -798,15 +800,6 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, show_opcodes(regs, loglvl); } -/* - * The (legacy) vsyscall page is the long page in the kernel portion - * of the address space that has user-accessible permissions. - */ -static bool is_vsyscall_vaddr(unsigned long vaddr) -{ - return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR); -} - static void __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, unsigned long address, u32 pkey, int si_code) diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h index 3f37b5c80bb32..4ebf6051e1ed7 100644 --- a/arch/x86/mm/mm_internal.h +++ b/arch/x86/mm/mm_internal.h @@ -2,6 +2,10 @@ #ifndef __X86_MM_INTERNAL_H #define __X86_MM_INTERNAL_H +#include <uapi/asm/vsyscall.h> + +#include <asm/page_types.h> + void *alloc_low_pages(unsigned int num); static inline void *alloc_low_page(void) { @@ -25,4 +29,13 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache); extern unsigned long tlb_single_page_flush_ceiling; +/* + * The (legacy) vsyscall page is the long page in the kernel portion + * of the address space that has user-accessible permissions. + */ +static inline bool is_vsyscall_vaddr(unsigned long vaddr) +{ + return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR); +} + #endif /* __X86_MM_INTERNAL_H */ -- 2.29.2