I have been trying to build and boot the last version available on linux-next. During the build I am being prompted with "has no CRC!" warnings for a bunch of modules. Has a result I get the following lines in the Module.symvers: [qlambert@sloth linux-next]$ grep 0x00000 Module.symvers 0x00000000 phys_base vmlinux EXPORT_SYMBOL 0x00000000 memmove vmlinux EXPORT_SYMBOL 0x00000000 __copy_user_nocache vmlinux EXPORT_SYMBOL 0x00000000 __get_user_4 vmlinux EXPORT_SYMBOL 0x00000000 __put_user_4 vmlinux EXPORT_SYMBOL 0x00000000 __memcpy vmlinux EXPORT_SYMBOL 0x00000000 __fentry__ vmlinux EXPORT_SYMBOL 0x00000000 memset vmlinux EXPORT_SYMBOL 0x00000000 __sw_hweight64 vmlinux EXPORT_SYMBOL 0x00000000 __sw_hweight32 vmlinux EXPORT_SYMBOL 0x00000000 memcpy_mcsafe_unrolled vmlinux EXPORT_SYMBOL_GPL 0x00000000 memcpy vmlinux EXPORT_SYMBOL 0x00000000 copy_user_enhanced_fast_string vmlinux EXPORT_SYMBOL 0x00000000 clear_page vmlinux EXPORT_SYMBOL 0x00000000 __put_user_2 vmlinux EXPORT_SYMBOL 0x00000000 __get_user_2 vmlinux EXPORT_SYMBOL 0x00000000 copy_page vmlinux EXPORT_SYMBOL 0x00000000 copy_user_generic_string vmlinux EXPORT_SYMBOL 0x00000000 _copy_to_user vmlinux EXPORT_SYMBOL 0x00000000 ___preempt_schedule vmlinux EXPORT_SYMBOL 0x00000000 __memmove vmlinux EXPORT_SYMBOL 0x00000000 empty_zero_page vmlinux EXPORT_SYMBOL 0x00000000 __get_user_8 vmlinux EXPORT_SYMBOL 0x00000000 __put_user_8 vmlinux EXPORT_SYMBOL 0x00000000 _copy_from_user vmlinux EXPORT_SYMBOL 0x00000000 native_load_gs_index vmlinux EXPORT_SYMBOL 0x00000000 __memset vmlinux EXPORT_SYMBOL 0x00000000 ___preempt_schedule_notrace vmlinux EXPORT_SYMBOL 0x00000000 __put_user_1 vmlinux EXPORT_SYMBOL 0x00000000 __get_user_1 vmlinux EXPORT_SYMBOL 0x00000000 copy_user_generic_unrolled vmlinux EXPORT_SYMBOL If I ignore these and go on to boot the resulting kernel. I am left with: ERROR: Unable to find root device 'UUID=<omitted>' I am then dropped to a recovery shell where the keyboard is not answering. The UUID is valid and is the same being used when I boot the HEAD of the linux tree. Prior to failing to find the root device, the kernel prints a series of message for each of the above symbol: scsi_mod: no symbol version for <foo> scsi_mod: Unknown symbol <foo> (err -22) Therefore, I suspect that the missing CRC messages are linked to the issue. I have git bisected this behavior to the patch 784d5699edd. Simply reverting this patch results in different issues. The errors I am having due to no CRC are fixed but I receive another error at boot time: failed command: WRITE LOG DMA EXT I mostly asking for help as to what to do next. Is it ok for me to send the reverting patch with an RFC tag to start the discussion ? Could someone point me toward documentation that would help me understand and fix this issue? --- arch/x86/entry/entry_32.S | 2 - arch/x86/entry/entry_64.S | 2 - arch/x86/entry/thunk_32.S | 3 -- arch/x86/entry/thunk_64.S | 3 -- arch/x86/include/asm/export.h | 4 -- arch/x86/kernel/Makefile | 4 +- arch/x86/kernel/head_32.S | 2 - arch/x86/kernel/head_64.S | 3 -- arch/x86/kernel/i386_ksyms_32.c | 47 ++++++++++++++++++++++ arch/x86/kernel/mcount_64.S | 2 - arch/x86/kernel/x8664_ksyms_64.c | 85 ++++++++++++++++++++++++++++++++++++++++ arch/x86/lib/checksum_32.S | 3 -- arch/x86/lib/clear_page_64.S | 2 - arch/x86/lib/cmpxchg8b_emu.S | 2 - arch/x86/lib/copy_page_64.S | 2 - arch/x86/lib/copy_user_64.S | 8 ---- arch/x86/lib/csum-partial_64.c | 1 - arch/x86/lib/getuser.S | 5 --- arch/x86/lib/hweight.S | 3 -- arch/x86/lib/memcpy_64.S | 4 -- arch/x86/lib/memmove_64.S | 3 -- arch/x86/lib/memset_64.S | 3 -- arch/x86/lib/putuser.S | 5 --- arch/x86/lib/strstr_32.c | 3 +- arch/x86/um/Makefile | 2 +- arch/x86/um/checksum_32.S | 2 - arch/x86/um/ksyms.c | 13 ++++++ 27 files changed, 150 insertions(+), 68 deletions(-) delete mode 100644 arch/x86/include/asm/export.h create mode 100644 arch/x86/kernel/i386_ksyms_32.c create mode 100644 arch/x86/kernel/x8664_ksyms_64.c create mode 100644 arch/x86/um/ksyms.c diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 21b352a11b49..b75a8bcd2d23 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -44,7 +44,6 @@ #include <asm/alternative-asm.h> #include <asm/asm.h> #include <asm/smap.h> -#include <asm/export.h> .section .entry.text, "ax" @@ -992,7 +991,6 @@ trace: jmp ftrace_stub END(mcount) #endif /* CONFIG_DYNAMIC_FTRACE */ -EXPORT_SYMBOL(mcount) #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index b9ca6b34b6c5..c0373d667674 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -35,7 +35,6 @@ #include <asm/asm.h> #include <asm/smap.h> #include <asm/pgtable_types.h> -#include <asm/export.h> #include <linux/err.h> /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ @@ -832,7 +831,6 @@ ENTRY(native_load_gs_index) popfq ret END(native_load_gs_index) -EXPORT_SYMBOL(native_load_gs_index) _ASM_EXTABLE(.Lgs_change, bad_gs) .section .fixup, "ax" diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S index fee6bc79b987..e5a17114a8c4 100644 --- a/arch/x86/entry/thunk_32.S +++ b/arch/x86/entry/thunk_32.S @@ -6,7 +6,6 @@ */ #include <linux/linkage.h> #include <asm/asm.h> - #include <asm/export.h> /* put return address in eax (arg1) */ .macro THUNK name, func, put_ret_addr_in_eax=0 @@ -37,7 +36,5 @@ #ifdef CONFIG_PREEMPT THUNK ___preempt_schedule, preempt_schedule THUNK ___preempt_schedule_notrace, preempt_schedule_notrace - EXPORT_SYMBOL(___preempt_schedule) - EXPORT_SYMBOL(___preempt_schedule_notrace) #endif diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S index be36bf4e0957..627ecbcb2e62 100644 --- a/arch/x86/entry/thunk_64.S +++ b/arch/x86/entry/thunk_64.S @@ -8,7 +8,6 @@ #include <linux/linkage.h> #include "calling.h" #include <asm/asm.h> -#include <asm/export.h> /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ .macro THUNK name, func, put_ret_addr_in_rdi=0 @@ -50,8 +49,6 @@ #ifdef CONFIG_PREEMPT THUNK ___preempt_schedule, preempt_schedule THUNK ___preempt_schedule_notrace, preempt_schedule_notrace - EXPORT_SYMBOL(___preempt_schedule) - EXPORT_SYMBOL(___preempt_schedule_notrace) #endif #if defined(CONFIG_TRACE_IRQFLAGS) \ diff --git a/arch/x86/include/asm/export.h b/arch/x86/include/asm/export.h deleted file mode 100644 index 138de56b13eb..000000000000 --- a/arch/x86/include/asm/export.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifdef CONFIG_64BIT -#define KSYM_ALIGN 16 -#endif -#include <asm-generic/export.h> diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 92fd50c77875..4f656fe156fd 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -46,7 +46,9 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-y += probe_roms.o -obj-$(CONFIG_X86_64) += sys_x86_64.o mcount_64.o +obj-$(CONFIG_X86_32) += i386_ksyms_32.o +obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o +obj-$(CONFIG_X86_64) += mcount_64.o obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o obj-$(CONFIG_SYSFS) += ksysfs.o obj-y += bootflag.o e820.o diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index b6b2f0264af3..5f401262f12d 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -23,7 +23,6 @@ #include <asm/percpu.h> #include <asm/nops.h> #include <asm/bootparam.h> -#include <asm/export.h> /* Physical address */ #define pa(X) ((X) - __PAGE_OFFSET) @@ -674,7 +673,6 @@ ENTRY(empty_zero_page) .fill 4096,1,0 ENTRY(swapper_pg_dir) .fill 1024,4,0 -EXPORT_SYMBOL(empty_zero_page) /* * This starts the data section. diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index b4421cc191b0..c98a559c346e 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -21,7 +21,6 @@ #include <asm/percpu.h> #include <asm/nops.h> #include "../entry/calling.h" -#include <asm/export.h> #ifdef CONFIG_PARAVIRT #include <asm/asm-offsets.h> @@ -487,12 +486,10 @@ early_gdt_descr_base: ENTRY(phys_base) /* This must match the first entry in level2_kernel_pgt */ .quad 0x0000000000000000 -EXPORT_SYMBOL(phys_base) #include "../../x86/xen/xen-head.S" __PAGE_ALIGNED_BSS NEXT_PAGE(empty_zero_page) .skip PAGE_SIZE -EXPORT_SYMBOL(empty_zero_page) diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c new file mode 100644 index 000000000000..1f9b878ef5ef --- /dev/null +++ b/arch/x86/kernel/i386_ksyms_32.c @@ -0,0 +1,47 @@ +#include <linux/export.h> +#include <linux/spinlock_types.h> + +#include <asm/checksum.h> +#include <asm/pgtable.h> +#include <asm/desc.h> +#include <asm/ftrace.h> + +#ifdef CONFIG_FUNCTION_TRACER +/* mcount is defined in assembly */ +EXPORT_SYMBOL(mcount); +#endif + +/* + * Note, this is a prototype to get at the symbol for + * the export, but dont use it from C code, it is used + * by assembly code and is not using C calling convention! + */ +#ifndef CONFIG_X86_CMPXCHG64 +extern void cmpxchg8b_emu(void); +EXPORT_SYMBOL(cmpxchg8b_emu); +#endif + +/* Networking helper routines. */ +EXPORT_SYMBOL(csum_partial_copy_generic); + +EXPORT_SYMBOL(__get_user_1); +EXPORT_SYMBOL(__get_user_2); +EXPORT_SYMBOL(__get_user_4); +EXPORT_SYMBOL(__get_user_8); + +EXPORT_SYMBOL(__put_user_1); +EXPORT_SYMBOL(__put_user_2); +EXPORT_SYMBOL(__put_user_4); +EXPORT_SYMBOL(__put_user_8); + +EXPORT_SYMBOL(strstr); + +EXPORT_SYMBOL(csum_partial); +EXPORT_SYMBOL(empty_zero_page); + +#ifdef CONFIG_PREEMPT +EXPORT_SYMBOL(___preempt_schedule); +EXPORT_SYMBOL(___preempt_schedule_notrace); +#endif + +EXPORT_SYMBOL(__sw_hweight32); diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S index efe73aacf966..61924222a9e1 100644 --- a/arch/x86/kernel/mcount_64.S +++ b/arch/x86/kernel/mcount_64.S @@ -7,7 +7,6 @@ #include <linux/linkage.h> #include <asm/ptrace.h> #include <asm/ftrace.h> -#include <asm/export.h> .code64 @@ -295,7 +294,6 @@ trace: jmp fgraph_trace END(function_hook) #endif /* CONFIG_DYNAMIC_FTRACE */ -EXPORT_SYMBOL(function_hook) #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c new file mode 100644 index 000000000000..b2cee3d19477 --- /dev/null +++ b/arch/x86/kernel/x8664_ksyms_64.c @@ -0,0 +1,85 @@ +/* Exports for assembly files. + All C exports should go in the respective C files. */ + +#include <linux/export.h> +#include <linux/spinlock_types.h> +#include <linux/smp.h> + +#include <net/checksum.h> + +#include <asm/processor.h> +#include <asm/pgtable.h> +#include <asm/uaccess.h> +#include <asm/desc.h> +#include <asm/ftrace.h> + +#ifdef CONFIG_FUNCTION_TRACER +/* mcount and __fentry__ are defined in assembly */ +#ifdef CC_USING_FENTRY +EXPORT_SYMBOL(__fentry__); +#else +EXPORT_SYMBOL(mcount); +#endif +#endif + +EXPORT_SYMBOL(__get_user_1); +EXPORT_SYMBOL(__get_user_2); +EXPORT_SYMBOL(__get_user_4); +EXPORT_SYMBOL(__get_user_8); +EXPORT_SYMBOL(__put_user_1); +EXPORT_SYMBOL(__put_user_2); +EXPORT_SYMBOL(__put_user_4); +EXPORT_SYMBOL(__put_user_8); + +EXPORT_SYMBOL(copy_user_generic_string); +EXPORT_SYMBOL(copy_user_generic_unrolled); +EXPORT_SYMBOL(copy_user_enhanced_fast_string); +EXPORT_SYMBOL(__copy_user_nocache); +EXPORT_SYMBOL(_copy_from_user); +EXPORT_SYMBOL(_copy_to_user); + +EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled); + +EXPORT_SYMBOL(copy_page); +EXPORT_SYMBOL(clear_page); + +EXPORT_SYMBOL(csum_partial); + +EXPORT_SYMBOL(__sw_hweight32); +EXPORT_SYMBOL(__sw_hweight64); + +/* + * Export string functions. We normally rely on gcc builtin for most of these, + * but gcc sometimes decides not to inline them. + */ +#undef memcpy +#undef memset +#undef memmove + +extern void *__memset(void *, int, __kernel_size_t); +extern void *__memcpy(void *, const void *, __kernel_size_t); +extern void *__memmove(void *, const void *, __kernel_size_t); +extern void *memset(void *, int, __kernel_size_t); +extern void *memcpy(void *, const void *, __kernel_size_t); +extern void *memmove(void *, const void *, __kernel_size_t); + +EXPORT_SYMBOL(__memset); +EXPORT_SYMBOL(__memcpy); +EXPORT_SYMBOL(__memmove); + +EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memmove); + +#ifndef CONFIG_DEBUG_VIRTUAL +EXPORT_SYMBOL(phys_base); +#endif +EXPORT_SYMBOL(empty_zero_page); +#ifndef CONFIG_PARAVIRT +EXPORT_SYMBOL(native_load_gs_index); +#endif + +#ifdef CONFIG_PREEMPT +EXPORT_SYMBOL(___preempt_schedule); +EXPORT_SYMBOL(___preempt_schedule_notrace); +#endif diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 4d34bb548b41..c1e623209853 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -28,7 +28,6 @@ #include <linux/linkage.h> #include <asm/errno.h> #include <asm/asm.h> -#include <asm/export.h> /* * computes a partial checksum, e.g. for TCP/UDP fragments @@ -252,7 +251,6 @@ ENTRY(csum_partial) ENDPROC(csum_partial) #endif -EXPORT_SYMBOL(csum_partial) /* unsigned int csum_partial_copy_generic (const char *src, char *dst, @@ -492,4 +490,3 @@ ENDPROC(csum_partial_copy_generic) #undef ROUND1 #endif -EXPORT_SYMBOL(csum_partial_copy_generic) diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S index 5e2af3a88cf5..65be7cfaf947 100644 --- a/arch/x86/lib/clear_page_64.S +++ b/arch/x86/lib/clear_page_64.S @@ -1,7 +1,6 @@ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> -#include <asm/export.h> /* * Most CPUs support enhanced REP MOVSB/STOSB instructions. It is @@ -24,7 +23,6 @@ ENTRY(clear_page) rep stosq ret ENDPROC(clear_page) -EXPORT_SYMBOL(clear_page) ENTRY(clear_page_orig) diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S index 03a186fc06ea..ad5349778490 100644 --- a/arch/x86/lib/cmpxchg8b_emu.S +++ b/arch/x86/lib/cmpxchg8b_emu.S @@ -7,7 +7,6 @@ */ #include <linux/linkage.h> -#include <asm/export.h> .text @@ -49,4 +48,3 @@ ENTRY(cmpxchg8b_emu) ret ENDPROC(cmpxchg8b_emu) -EXPORT_SYMBOL(cmpxchg8b_emu) diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S index e8508156c99d..24ef1c2104d4 100644 --- a/arch/x86/lib/copy_page_64.S +++ b/arch/x86/lib/copy_page_64.S @@ -3,7 +3,6 @@ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> -#include <asm/export.h> /* * Some CPUs run faster using the string copy instructions (sane microcode). @@ -18,7 +17,6 @@ ENTRY(copy_page) rep movsq ret ENDPROC(copy_page) -EXPORT_SYMBOL(copy_page) ENTRY(copy_page_regs) subq $2*8, %rsp diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index d376e4b48f88..bf603ebbfd8e 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -14,7 +14,6 @@ #include <asm/alternative-asm.h> #include <asm/asm.h> #include <asm/smap.h> -#include <asm/export.h> /* Standard copy_to_user with segment limit checking */ ENTRY(_copy_to_user) @@ -30,7 +29,6 @@ ENTRY(_copy_to_user) "jmp copy_user_enhanced_fast_string", \ X86_FEATURE_ERMS ENDPROC(_copy_to_user) -EXPORT_SYMBOL(_copy_to_user) /* Standard copy_from_user with segment limit checking */ ENTRY(_copy_from_user) @@ -46,8 +44,6 @@ ENTRY(_copy_from_user) "jmp copy_user_enhanced_fast_string", \ X86_FEATURE_ERMS ENDPROC(_copy_from_user) -EXPORT_SYMBOL(_copy_from_user) - .section .fixup,"ax" /* must zero dest */ @@ -159,7 +155,6 @@ ENTRY(copy_user_generic_unrolled) _ASM_EXTABLE(21b,50b) _ASM_EXTABLE(22b,50b) ENDPROC(copy_user_generic_unrolled) -EXPORT_SYMBOL(copy_user_generic_unrolled) /* Some CPUs run faster using the string copy instructions. * This is also a lot simpler. Use them when possible. @@ -205,7 +200,6 @@ ENTRY(copy_user_generic_string) _ASM_EXTABLE(1b,11b) _ASM_EXTABLE(3b,12b) ENDPROC(copy_user_generic_string) -EXPORT_SYMBOL(copy_user_generic_string) /* * Some CPUs are adding enhanced REP MOVSB/STOSB instructions. @@ -235,7 +229,6 @@ ENTRY(copy_user_enhanced_fast_string) _ASM_EXTABLE(1b,12b) ENDPROC(copy_user_enhanced_fast_string) -EXPORT_SYMBOL(copy_user_enhanced_fast_string) /* * copy_user_nocache - Uncached memory copy with exception handling @@ -386,4 +379,3 @@ ENTRY(__copy_user_nocache) _ASM_EXTABLE(40b,.L_fixup_1b_copy) _ASM_EXTABLE(41b,.L_fixup_1b_copy) ENDPROC(__copy_user_nocache) -EXPORT_SYMBOL(__copy_user_nocache) diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c index 378e5d5bf9b1..9a7fe6a70491 100644 --- a/arch/x86/lib/csum-partial_64.c +++ b/arch/x86/lib/csum-partial_64.c @@ -135,7 +135,6 @@ __wsum csum_partial(const void *buff, int len, __wsum sum) return (__force __wsum)add32_with_carry(do_csum(buff, len), (__force u32)sum); } -EXPORT_SYMBOL(csum_partial); /* * this routine is used for miscellaneous IP-like checksums, mainly diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 37b62d412148..0ef5128c2de8 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -32,7 +32,6 @@ #include <asm/thread_info.h> #include <asm/asm.h> #include <asm/smap.h> -#include <asm/export.h> .text ENTRY(__get_user_1) @@ -45,7 +44,6 @@ ENTRY(__get_user_1) ASM_CLAC ret ENDPROC(__get_user_1) -EXPORT_SYMBOL(__get_user_1) ENTRY(__get_user_2) add $1,%_ASM_AX @@ -59,7 +57,6 @@ ENTRY(__get_user_2) ASM_CLAC ret ENDPROC(__get_user_2) -EXPORT_SYMBOL(__get_user_2) ENTRY(__get_user_4) add $3,%_ASM_AX @@ -73,7 +70,6 @@ ENTRY(__get_user_4) ASM_CLAC ret ENDPROC(__get_user_4) -EXPORT_SYMBOL(__get_user_4) ENTRY(__get_user_8) #ifdef CONFIG_X86_64 @@ -101,7 +97,6 @@ ENTRY(__get_user_8) ret #endif ENDPROC(__get_user_8) -EXPORT_SYMBOL(__get_user_8) bad_get_user: diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S index 23d893cbc200..8a602a1e404a 100644 --- a/arch/x86/lib/hweight.S +++ b/arch/x86/lib/hweight.S @@ -1,5 +1,4 @@ #include <linux/linkage.h> -#include <asm/export.h> #include <asm/asm.h> @@ -33,7 +32,6 @@ ENTRY(__sw_hweight32) __ASM_SIZE(pop,) %__ASM_REG(dx) ret ENDPROC(__sw_hweight32) -EXPORT_SYMBOL(__sw_hweight32) ENTRY(__sw_hweight64) #ifdef CONFIG_X86_64 @@ -79,4 +77,3 @@ ENTRY(__sw_hweight64) ret #endif ENDPROC(__sw_hweight64) -EXPORT_SYMBOL(__sw_hweight64) diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 779782f58324..49e6ebac7e73 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -4,7 +4,6 @@ #include <asm/errno.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> -#include <asm/export.h> /* * We build a jump to memcpy_orig by default which gets NOPped out on @@ -41,8 +40,6 @@ ENTRY(memcpy) ret ENDPROC(memcpy) ENDPROC(__memcpy) -EXPORT_SYMBOL(memcpy) -EXPORT_SYMBOL(__memcpy) /* * memcpy_erms() - enhanced fast string memcpy. This is faster and @@ -277,7 +274,6 @@ ENTRY(memcpy_mcsafe_unrolled) xorq %rax, %rax ret ENDPROC(memcpy_mcsafe_unrolled) -EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled) .section .fixup, "ax" /* Return -EFAULT for any failure */ diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index 15de86cd15b0..90ce01bee00c 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -8,7 +8,6 @@ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> -#include <asm/export.h> #undef memmove @@ -208,5 +207,3 @@ ENTRY(__memmove) retq ENDPROC(__memmove) ENDPROC(memmove) -EXPORT_SYMBOL(__memmove) -EXPORT_SYMBOL(memmove) diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 55b95db30a61..e1229ecd2a82 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -3,7 +3,6 @@ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> -#include <asm/export.h> .weak memset @@ -44,8 +43,6 @@ ENTRY(__memset) ret ENDPROC(memset) ENDPROC(__memset) -EXPORT_SYMBOL(memset) -EXPORT_SYMBOL(__memset) /* * ISO C memset - set a memory block to a byte value. This function uses diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index cd5d716d2897..c891ece81e5b 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -15,7 +15,6 @@ #include <asm/errno.h> #include <asm/asm.h> #include <asm/smap.h> -#include <asm/export.h> /* @@ -44,7 +43,6 @@ ENTRY(__put_user_1) xor %eax,%eax EXIT ENDPROC(__put_user_1) -EXPORT_SYMBOL(__put_user_1) ENTRY(__put_user_2) ENTER @@ -57,7 +55,6 @@ ENTRY(__put_user_2) xor %eax,%eax EXIT ENDPROC(__put_user_2) -EXPORT_SYMBOL(__put_user_2) ENTRY(__put_user_4) ENTER @@ -70,7 +67,6 @@ ENTRY(__put_user_4) xor %eax,%eax EXIT ENDPROC(__put_user_4) -EXPORT_SYMBOL(__put_user_4) ENTRY(__put_user_8) ENTER @@ -86,7 +82,6 @@ ENTRY(__put_user_8) xor %eax,%eax EXIT ENDPROC(__put_user_8) -EXPORT_SYMBOL(__put_user_8) bad_put_user: movl $-EFAULT,%eax diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c index a03b1c750bfe..8e2d55f754bf 100644 --- a/arch/x86/lib/strstr_32.c +++ b/arch/x86/lib/strstr_32.c @@ -1,5 +1,4 @@ #include <linux/string.h> -#include <linux/export.h> char *strstr(const char *cs, const char *ct) { @@ -29,4 +28,4 @@ __asm__ __volatile__( : "dx", "di"); return __res; } -EXPORT_SYMBOL(strstr); + diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile index e7e7055a8658..3ee2bb6b440b 100644 --- a/arch/x86/um/Makefile +++ b/arch/x86/um/Makefile @@ -8,7 +8,7 @@ else BITS := 64 endif -obj-y = bug.o bugs_$(BITS).o delay.o fault.o ldt.o \ +obj-y = bug.o bugs_$(BITS).o delay.o fault.o ksyms.o ldt.o \ ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \ stub_$(BITS).o stub_segv.o \ sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \ diff --git a/arch/x86/um/checksum_32.S b/arch/x86/um/checksum_32.S index b9933eb9274a..fa4b8b9841ff 100644 --- a/arch/x86/um/checksum_32.S +++ b/arch/x86/um/checksum_32.S @@ -27,7 +27,6 @@ #include <asm/errno.h> #include <asm/asm.h> -#include <asm/export.h> /* * computes a partial checksum, e.g. for TCP/UDP fragments @@ -215,4 +214,3 @@ csum_partial: ret #endif - EXPORT_SYMBOL(csum_partial) diff --git a/arch/x86/um/ksyms.c b/arch/x86/um/ksyms.c new file mode 100644 index 000000000000..2e8f43ec6214 --- /dev/null +++ b/arch/x86/um/ksyms.c @@ -0,0 +1,13 @@ +#include <linux/module.h> +#include <asm/string.h> +#include <asm/checksum.h> + +#ifndef CONFIG_X86_32 +/*XXX: we need them because they would be exported by x86_64 */ +#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 +EXPORT_SYMBOL(memcpy); +#else +EXPORT_SYMBOL(__memcpy); +#endif +#endif +EXPORT_SYMBOL(csum_partial); -- 2.9.3 _______________________________________________ Kernelnewbies mailing list Kernelnewbies@xxxxxxxxxxxxxxxxx https://lists.kernelnewbies.org/mailman/listinfo/kernelnewbies