[PATCH v2 3/3] Automatically detect kernel aslr offset.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch improves support for kernel aslr by automatically
finding the aslr offset based on the _stext symbol in the vmcore
info.

Signed-off-by: Andrew Honig <ahonig@xxxxxxxxxx>
---
 netdump.c | 11 +++++++++++
 symbols.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++----
 2 files changed, 58 insertions(+), 4 deletions(-)

diff --git a/netdump.c b/netdump.c
index 7dc2fca..884dd73 100644
--- a/netdump.c
+++ b/netdump.c
@@ -411,6 +411,17 @@ is_netdump(char *file, ulong source_query)
 		get_log_from_vmcoreinfo(file, vmcoreinfo_read_string);
 	}
 
+	/*
+	 * We may need the _stext_SYMBOL from the vmcore_info to adjust for
+	 * kaslr and we may not have gotten it elsewhere.
+	 */
+	if (source_query == KDUMP_LOCAL) {
+		char *tmpstring = vmcoreinfo_read_string("SYMBOL(_stext)");
+		kt->vmcoreinfo._stext_SYMBOL =
+			htol(tmpstring, RETURN_ON_ERROR, NULL);
+		free(tmpstring);
+	}
+
 	return nd->header_size;
 
 bailout:
diff --git a/symbols.c b/symbols.c
index 28378ab..a2fc150 100644
--- a/symbols.c
+++ b/symbols.c
@@ -557,6 +557,43 @@ strip_symbol_end(const char *name, char *buf)
 }
 
 /*
+ * Derives the kernel aslr offset by comparing the _stext symbol from the
+ * the vmcore_info in the dump file to the _stext symbol in the vmlinux file.
+ */
+static void
+derive_kaslr_offset(bfd *abfd, int dynamic, bfd_byte *start, bfd_byte *end,
+		    unsigned int size, asymbol *store)
+{
+	symbol_info syminfo;
+	asymbol *sym;
+	char *name;
+	unsigned long relocate;
+	char buf[BUFSIZE];
+
+	for (; start < end; start += size) {
+		sym = bfd_minisymbol_to_symbol(abfd, dynamic, start, store);
+		if (sym == NULL)
+			error(FATAL, "bfd_minisymbol_to_symbol() failed\n");
+
+		bfd_get_symbol_info(abfd, sym, &syminfo);
+		name = strip_symbol_end(syminfo.name, buf);
+		if (strcmp("_stext", name) == 0) {
+			relocate = syminfo.value - kt->vmcoreinfo._stext_SYMBOL;
+			/*
+			 *To avoid mistaking an mismatched kernel version with
+			 * a kaslr offset, we make sure that the offset is
+			 * aligned by 0x1000, as it always will be for
+			 * kaslr.
+			 */
+			if ((relocate & 0xFFF) == 0) {
+				kt->relocate = relocate;
+				kt->flags |= RELOC_SET;
+			}
+		}
+	}
+}
+
+/*
  *  Store the symbols gathered by symtab_init().  The symbols are stored
  *  in increasing numerical order.
  */
@@ -591,15 +628,21 @@ store_symbols(bfd *abfd, int dynamic, void *minisyms, long symcount,
 	st->symcnt = 0;
 	sp = st->symtable;
 
+	first = 0;
+	from = (bfd_byte *) minisyms;
+	fromend = from + symcount * size;
+
 	if (machine_type("X86") || machine_type("X86_64")) {
-		if (!(kt->flags & RELOC_SET))
+		if (!(kt->flags & RELOC_SET)) {
+			if (machine_type("X86_64")) {
+				derive_kaslr_offset(abfd, dynamic, from,
+						fromend, size, store);
+			}
 			kt->flags |= RELOC_FORCE;
+		}
 	} else
 		kt->flags &= ~RELOC_SET;
 
-	first = 0;
-  	from = (bfd_byte *) minisyms;
-  	fromend = from + symcount * size;
   	for (; from < fromend; from += size)
     	{
       		if ((sym = bfd_minisymbol_to_symbol(abfd, dynamic, from, store))
-- 
1.9.0.rc1.175.g0b1dcb5

--
Crash-utility mailing list
Crash-utility@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/crash-utility




[Index of Archives]     [Fedora Development]     [Fedora Desktop]     [Fedora SELinux]     [Yosemite News]     [KDE Users]     [Fedora Tools]

 

Powered by Linux