Hi, Aditya
Thank you for the update.On Mon, Sep 25, 2023 at 5:28 PM Aditya Gupta <adityag@xxxxxxxxxxxxx> wrote:
Currently 'crash-tool' fails on vmcore collected on upstream kernel on
PowerPC64 with the error:
crash: invalid kernel virtual address: 0 type: "first list entry
Presently the address translation for vmemmap addresses is done using
the vmemmap_list. But with the below commit in Linux, vmemmap_list can
be empty, in case of Radix MMU on PowerPC64
368a0590d954: (powerpc/book3s64/vmemmap: switch radix to use a
different vmemmap handling function)
In case vmemmap_list is empty, then it's head is NULL, which crash tries
to access and fails due to accessing NULL.
Instead of depending on 'vmemmap_list' for address translation for
vmemmap addresses, do a kernel pagetable walk to get the physical
address associated with given virtual address
Tested-by: Sachin Sant <sachinp@xxxxxxxxxxxxx>
Reviewed-by: Hari Bathini <hbathini@xxxxxxxxxxxxx>
Signed-off-by: Aditya Gupta <adityag@xxxxxxxxxxxxx>
---
Testing
=======
Git tree with patch applied:
https://github.com/adi-g15-ibm/crash/tree/bugzilla-203296-list-v2
This can be tested with '/proc/vmcore' as the vmcore, since makedumpfile
also fails in absence of 'vmemmap_list' in upstream linux
The fix for makedumpfile will also been sent to upstream
Changelog
=========
V2
+ handle the case of 'vmemmap_list' symbol missing according to reviews
The v2 looks good to me, so: Ack.
Thanks
Lianbo
---
---
ppc64.c | 51 +++++++++++++++++++++++++++++++++++----------------
1 file changed, 35 insertions(+), 16 deletions(-)
diff --git a/ppc64.c b/ppc64.c
index fc34006f4863..cd98a679c45e 100644
--- a/ppc64.c
+++ b/ppc64.c
@@ -1280,10 +1280,32 @@ void ppc64_vmemmap_init(void)
long backing_size, virt_addr_offset, phys_offset, list_offset;
ulong *vmemmap_list;
char *vmemmap_buf;
- struct machine_specific *ms;
-
- if (!(kernel_symbol_exists("vmemmap_list")) ||
- !(kernel_symbol_exists("mmu_psize_defs")) ||
+ struct machine_specific *ms = machdep->machspec;
+
+ ld = &list_data;
+ BZERO(ld, sizeof(struct list_data));
+
+ /*
+ * vmemmap_list is missing or set to 0 in the kernel would imply
+ * vmemmap region is mapped in the kernel pagetable. So, read vmemmap_list
+ * anyway and use the translation method accordingly.
+ */
+ if (kernel_symbol_exists("vmemmap_list"))
+ readmem(symbol_value("vmemmap_list"), KVADDR, &ld->start,
+ sizeof(void *), "vmemmap_list", RETURN_ON_ERROR|QUIET);
+ if (!ld->start) {
+ /*
+ * vmemmap_list is set to 0 or missing. Do kernel pagetable walk
+ * for vmemmap address translation.
+ */
+ ms->vmemmap_list = NULL;
+ ms->vmemmap_cnt = 0;
+
+ machdep->flags |= VMEMMAP_AWARE;
+ return;
+ }
+
+ if (!(kernel_symbol_exists("mmu_psize_defs")) ||
!(kernel_symbol_exists("mmu_vmemmap_psize")) ||
!STRUCT_EXISTS("vmemmap_backing") ||
!STRUCT_EXISTS("mmu_psize_def") ||
@@ -1293,8 +1315,6 @@ void ppc64_vmemmap_init(void)
!MEMBER_EXISTS("vmemmap_backing", "list"))
return;
- ms = machdep->machspec;
-
backing_size = STRUCT_SIZE("vmemmap_backing");
virt_addr_offset = MEMBER_OFFSET("vmemmap_backing", "virt_addr");
phys_offset = MEMBER_OFFSET("vmemmap_backing", "phys");
@@ -1313,14 +1333,8 @@ void ppc64_vmemmap_init(void)
ms->vmemmap_psize = 1 << shift;
- ld = &list_data;
- BZERO(ld, sizeof(struct list_data));
- if (!readmem(symbol_value("vmemmap_list"),
- KVADDR, &ld->start, sizeof(void *), "vmemmap_list",
- RETURN_ON_ERROR))
- return;
- ld->end = symbol_value("vmemmap_list");
- ld->list_head_offset = list_offset;
+ ld->end = symbol_value("vmemmap_list");
+ ld->list_head_offset = list_offset;
hq_open();
cnt = do_list(ld);
@@ -1366,7 +1380,7 @@ ppc64_vmemmap_to_phys(ulong kvaddr, physaddr_t *paddr, int verbose)
{
int i;
ulong offset;
- struct machine_specific *ms;
+ struct machine_specific *ms = machdep->machspec;
if (!(machdep->flags & VMEMMAP_AWARE)) {
/*
@@ -1386,7 +1400,12 @@ ppc64_vmemmap_to_phys(ulong kvaddr, physaddr_t *paddr, int verbose)
return FALSE;
}
- ms = machdep->machspec;
+ /**
+ * When vmemmap_list is not populated, kernel does the mapping in init_mm
+ * page table, so do a pagetable walk in kernel page table
+ */
+ if (!ms->vmemmap_list)
+ return ppc64_vtop_level4(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose);
for (i = 0; i < ms->vmemmap_cnt; i++) {
if ((kvaddr >= ms->vmemmap_list[i].virt) &&
--
2.41.0
-- Crash-utility mailing list Crash-utility@xxxxxxxxxx https://listman.redhat.com/mailman/listinfo/crash-utility Contribution Guidelines: https://github.com/crash-utility/crash/wiki