On Thu, Nov 23, 2023 at 7:18 AM <deller@xxxxxxxxxx> wrote: > > From: Helge Deller <deller@xxxxxx> > > On 64-bit architectures without CONFIG_HAVE_ARCH_PREL32_RELOCATIONS > (e.g. ppc64, ppc64le, parisc, s390x,...) the __KSYM_REF() macro stores > 64-bit pointers into the __ksymtab* sections. > Make sure that the start of those sections is 64-bit aligned in the vmlinux > executable, otherwise unaligned memory accesses may happen at runtime. Are you solving a real problem? 1/4 already ensures the proper alignment of __ksymtab*, doesn't it? I applied the following hack to attempt to break the alignment intentionally. diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index bae0fe4d499b..e2b5c9acee97 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -482,7 +482,7 @@ TRACEDATA \ \ PRINTK_INDEX \ - \ + . = . + 1; \ /* Kernel symbol table: Normal symbols */ \ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ __start___ksymtab = .; \ The __ksymtab section and __start___ksymtab symbol are still properly aligned due to the '.balign' in <linux/export-internal.h> So, my understanding is this patch is unneeded. Or, does the behaviour depend on toolchains? > The __kcrctab* sections store 32-bit entities, so make those sections > 32-bit aligned. > > The pci fixup routines want to be 64-bit aligned on 64-bit platforms > which don't define CONFIG_HAVE_ARCH_PREL32_RELOCATIONS. An alignment > of 8 bytes is sufficient to guarantee aligned accesses at runtime. > > Signed-off-by: Helge Deller <deller@xxxxxx> > Cc: <stable@xxxxxxxxxxxxxxx> # v6.0+ > --- > include/asm-generic/vmlinux.lds.h | 5 +++++ > 1 file changed, 5 insertions(+) > > diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h > index bae0fe4d499b..fa4335346e7d 100644 > --- a/include/asm-generic/vmlinux.lds.h > +++ b/include/asm-generic/vmlinux.lds.h > @@ -467,6 +467,7 @@ > } \ > \ > /* PCI quirks */ \ > + . = ALIGN(8); \ > .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ > BOUNDED_SECTION_PRE_LABEL(.pci_fixup_early, _pci_fixups_early, __start, __end) \ > BOUNDED_SECTION_PRE_LABEL(.pci_fixup_header, _pci_fixups_header, __start, __end) \ > @@ -484,6 +485,7 @@ > PRINTK_INDEX \ > \ > /* Kernel symbol table: Normal symbols */ \ > + . = ALIGN(8); \ > __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ > __start___ksymtab = .; \ > KEEP(*(SORT(___ksymtab+*))) \ > @@ -491,6 +493,7 @@ > } \ > \ > /* Kernel symbol table: GPL-only symbols */ \ > + . = ALIGN(8); \ > __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ > __start___ksymtab_gpl = .; \ > KEEP(*(SORT(___ksymtab_gpl+*))) \ > @@ -498,6 +501,7 @@ > } \ > \ > /* Kernel symbol table: Normal symbols */ \ > + . = ALIGN(4); \ > __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ > __start___kcrctab = .; \ > KEEP(*(SORT(___kcrctab+*))) \ > @@ -505,6 +509,7 @@ > } \ > \ > /* Kernel symbol table: GPL-only symbols */ \ > + . = ALIGN(4); \ > __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ > __start___kcrctab_gpl = .; \ > KEEP(*(SORT(___kcrctab_gpl+*))) \ > -- > 2.41.0 > -- Best Regards Masahiro Yamada