Hi Alexey and Lijiang, On Fri, Jun 28, 2024 at 4:08 PM lijiang <lijiang@xxxxxxxxxx> wrote: > > On Fri, May 31, 2024 at 5:38 PM <devel-request@xxxxxxxxxxxxxxxxxxxxxxxxxxx> wrote: >> >> Date: Fri, 31 May 2024 17:19:38 +0800 >> From: Tao Liu <ltao@xxxxxxxxxx> >> Subject: [PATCH v4 15/16] vmware_guestdump: Various >> format versions support >> To: devel@xxxxxxxxxxxxxxxxxxxxxxxxxxx >> Cc: Alexey Makhalov <alexey.makhalov@xxxxxxxxxxxx>, Mahesh J >> Salgaonkar <mahesh@xxxxxxxxxxxxx>, "Naveen N . Rao" >> <naveen.n.rao@xxxxxxxxxxxxxxxxxx>, Lianbo Jiang <lijiang@xxxxxxxxxx> >> Message-ID: <20240531091939.97828-16-ltao@xxxxxxxxxx> >> Content-Type: text/plain; charset=UTF-8 >> >> From: Alexey Makhalov <alexey.makhalov@xxxxxxxxxxxx> >> >> There are several versions of debug.guest format. Current version of >> the code is able to parse only version 4. >> >> Improve parser to support other known versions. Split data structures >> on sub-structures and introduce a helper functions to calculate a gap >> between them based on the version number. Implement additional data >> structure (struct mainmeminfo_old) and logic specifically for original >> (version 1) format support. >> >> Cc: Sourabh Jain <sourabhjain@xxxxxxxxxxxxx> >> Cc: Hari Bathini <hbathini@xxxxxxxxxxxxx> >> Cc: Mahesh J Salgaonkar <mahesh@xxxxxxxxxxxxx> >> Cc: Naveen N. Rao <naveen.n.rao@xxxxxxxxxxxxxxxxxx> >> Cc: Lianbo Jiang <lijiang@xxxxxxxxxx> >> Cc: HAGIO KAZUHITO(萩尾 一仁) <k-hagio-ab@xxxxxxx> >> Cc: Tao Liu <ltao@xxxxxxxxxx> >> Cc: Alexey Makhalov <alexey.makhalov@xxxxxxxxxxxx> >> Signed-off-by: Alexey Makhalov <alexey.makhalov@xxxxxxxxxxxx> >> --- >> vmware_guestdump.c | 316 ++++++++++++++++++++++++++++++++------------- >> 1 file changed, 229 insertions(+), 87 deletions(-) >> > > The current patch is still not improved, commented on it there: > https://www.spinics.net/linux/fedora/redhat-crash-utility/msg11627.html > Could @Alexey Makhalov have some modification on it based on the comments, then I will merge it into v5. Thanks, Tao Liu > Thanks > Lianbo > >> >> diff --git a/vmware_guestdump.c b/vmware_guestdump.c >> index 5be26c8..5c7ee4d 100644 >> --- a/vmware_guestdump.c >> +++ b/vmware_guestdump.c >> @@ -2,6 +2,8 @@ >> * vmware_guestdump.c >> * >> * Copyright (c) 2020 VMware, Inc. >> + * Copyright (c) 2024 Broadcom. All Rights Reserved. The term "Broadcom" >> + * refers to Broadcom Inc. and/or its subsidiaries. >> * >> * This program is free software; you can redistribute it and/or modify >> * it under the terms of the GNU General Public License as published by >> @@ -13,7 +15,7 @@ >> * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the >> * GNU General Public License for more details. >> * >> - * Author: Alexey Makhalov <amakhalov@xxxxxxxxxx> >> + * Author: Alexey Makhalov <alexey.makhalov@xxxxxxxxxxxx> >> */ >> >> #include "defs.h" >> @@ -21,20 +23,31 @@ >> >> #define LOGPRX "vmw: " >> >> -#define GUESTDUMP_VERSION 4 >> -#define GUESTDUMP_MAGIC1 1 >> -#define GUESTDUMP_MAGIC2 0 >> - >> +/* >> + * debug.guest file layout >> + * 00000000: guest dump header, it includes: >> + * 1. Version (4 bytes) \ >> + * 2. Number of Virtual CPUs (4 bytes) } - struct guestdumpheader >> + * 3. Reserved gap >> + * 4. Main Memory information - struct mainmeminfo{,_old} >> + * (use get_vcpus_offset() to get total size of guestdumpheader) >> + * vcpus_offset: ---------\ >> + * 1. struct vcpu_state1 \ >> + * 2. reserved gap } num_vcpus times >> + * 3. struct vcpu_state2 / >> + * 4. 4KB of reserved data / >> + * --------/ >> + * >> + */ >> struct guestdumpheader { >> uint32_t version; >> uint32_t num_vcpus; >> - uint8_t magic1; >> - uint8_t reserved1; >> - uint32_t cpu_vendor; >> - uint64_t magic2; >> +} __attribute__((packed)) hdr; >> + >> +struct mainmeminfo { >> uint64_t last_addr; >> uint64_t memsize_in_pages; >> - uint32_t reserved2; >> + uint32_t reserved1; >> uint32_t mem_holes; >> struct memhole { >> uint64_t ppn; >> @@ -42,14 +55,36 @@ struct guestdumpheader { >> } holes[2]; >> } __attribute__((packed)); >> >> -struct vcpu_state { >> +/* Used by version 1 only */ >> +struct mainmeminfo_old { >> + uint64_t last_addr; >> + uint32_t memsize_in_pages; >> + uint32_t reserved1; >> + uint32_t mem_holes; >> + struct memhole1 { >> + uint32_t ppn; >> + uint32_t pages; >> + } holes[2]; >> + /* There are additional fields, see get_vcpus_offset() calculation. */ >> +} __attribute__((packed)); >> + >> +/* First half of vcpu_state */ >> +struct vcpu_state1 { >> uint32_t cr0; >> uint64_t cr2; >> uint64_t cr3; >> uint64_t cr4; >> uint64_t reserved1[10]; >> uint64_t idt_base; >> - uint16_t reserved2[21]; >> +} __attribute__((packed)); >> + >> +/* >> + * Unused fields between vcpu_state1 and vcpu_state2 swill be skipped. >> + * See get_vcpu_gapsize() calculation. >> + */ >> + >> +/* Second half of vcpu_state */ >> +struct vcpu_state2 { >> struct x86_64_pt_regs { >> uint64_t r15; >> uint64_t r14; >> @@ -76,9 +111,41 @@ struct vcpu_state { >> uint8_t reserved3[65]; >> } __attribute__((packed)); >> >> +/* >> + * Returns the size of the guest dump header. >> + */ >> +static inline long >> +get_vcpus_offset(uint32_t version, int mem_holes) >> +{ >> + switch (version) { >> + case 1: /* ESXi 6.7 and older */ >> + return sizeof(struct guestdumpheader) + 13 + sizeof(struct mainmeminfo_old) + >> + (mem_holes == -1 ? 0 : 8 * mem_holes + 4); >> + case 3: /* ESXi 6.8 */ >> + return sizeof(struct guestdumpheader) + 14 + sizeof(struct mainmeminfo); >> + case 4: /* ESXi 7.0 */ >> + case 5: /* ESXi 8.0 */ >> + return sizeof(struct guestdumpheader) + 14 + sizeof(struct mainmeminfo); >> + case 6: /* ESXi 8.0u2 */ >> + return sizeof(struct guestdumpheader) + 15 + sizeof(struct mainmeminfo); >> + >> + } >> + return 0; >> +} >> + >> +/* >> + * Returns the size of reserved (unused) fields in the middle of vcpu_state structure. >> + */ >> +static inline long >> +get_vcpu_gapsize(uint32_t version) >> +{ >> + if (version < 4) >> + return 45; >> + return 42; >> +} >> >> /* >> - * vmware_guestdump is extension to vmware_vmss with ability to debug >> + * vmware_guestdump is an extension to the vmware_vmss with ability to debug >> * debug.guest and debug.vmem files. >> * >> * debug.guest.gz and debug.vmem.gz can be obtained using following >> @@ -86,73 +153,136 @@ struct vcpu_state { >> * monitor.mini-suspend_on_panic = TRUE >> * monitor.suspend_on_triplefault = TRUE >> * >> - * guestdump (debug.guest) is simplified version of *.vmss which does >> - * not contain full VM state, but minimal guest state, such as memory >> + * guestdump (debug.guest) is a simplified version of the *.vmss which does >> + * not contain a full VM state, but minimal guest state, such as a memory >> * layout and CPUs state, needed for debugger. is_vmware_guestdump() >> * and vmware_guestdump_init() functions parse guestdump header and >> - * populate vmss data structure (from vmware_vmss.c). As result, all >> + * populate vmss data structure (from vmware_vmss.c). In result, all >> * handlers (except mempry_dump) from vmware_vmss.c can be reused. >> * >> - * debug.guest does not have dedicated header magic or signature for >> - * its format. To probe debug.guest we need to perform header fields >> - * and file size validity. In addition, check for the filename >> - * extension, which must be ".guest". >> + * debug.guest does not have a dedicated header magic or file format signature >> + * To probe debug.guest we need to perform series of validations. In addition, >> + * we check for the filename extension, which must be ".guest". >> */ >> - >> int >> is_vmware_guestdump(char *filename) >> { >> - struct guestdumpheader hdr; >> + struct mainmeminfo mmi; >> + long vcpus_offset; >> FILE *fp; >> - uint64_t filesize, holes_sum = 0; >> + uint64_t filesize, expected_filesize, holes_sum = 0; >> int i; >> >> if (strcmp(filename + strlen(filename) - 6, ".guest")) >> return FALSE; >> >> - if ((fp = fopen(filename, "r")) == NULL) { >> + if ((fp = fopen(filename, "r")) == NULL) { >> error(INFO, LOGPRX"Failed to open '%s': [Error %d] %s\n", >> - filename, errno, strerror(errno)); >> + filename, errno, strerror(errno)); >> return FALSE; >> - } >> + } >> >> if (fread(&hdr, sizeof(struct guestdumpheader), 1, fp) != 1) { >> error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", >> - "guestdumpheader", filename, errno, strerror(errno)); >> + "guestdumpheader", filename, errno, strerror(errno)); >> + fclose(fp); >> + return FALSE; >> + } >> + >> + vcpus_offset = get_vcpus_offset(hdr.version, -1 /* Unknown yet, adjust it later */); >> + >> + if (!vcpus_offset) { >> + if (CRASHDEBUG(1)) >> + error(INFO, LOGPRX"Not supported version %d\n", hdr.version); >> fclose(fp); >> return FALSE; >> } >> >> + if (hdr.version == 1) { >> + struct mainmeminfo_old tmp; >> + if (fseek(fp, vcpus_offset - sizeof(struct mainmeminfo_old), SEEK_SET) == -1) { >> + if (CRASHDEBUG(1)) >> + error(INFO, LOGPRX"Failed to fseek '%s': [Error %d] %s\n", >> + filename, errno, strerror(errno)); >> + fclose(fp); >> + return FALSE; >> + } >> + >> + if (fread(&tmp, sizeof(struct mainmeminfo_old), 1, fp) != 1) { >> + if (CRASHDEBUG(1)) >> + error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", >> + "mainmeminfo_old", filename, errno, strerror(errno)); >> + fclose(fp); >> + return FALSE; >> + } >> + mmi.last_addr = tmp.last_addr; >> + mmi.memsize_in_pages = tmp.memsize_in_pages; >> + mmi.mem_holes = tmp.mem_holes; >> + mmi.holes[0].ppn = tmp.holes[0].ppn; >> + mmi.holes[0].pages = tmp.holes[0].pages; >> + mmi.holes[1].ppn = tmp.holes[1].ppn; >> + mmi.holes[1].pages = tmp.holes[1].pages; >> + /* vcpu_offset adjustment for mem_holes is required only for version 1. */ >> + vcpus_offset = get_vcpus_offset(hdr.version, mmi.mem_holes); >> + } else { >> + if (fseek(fp, vcpus_offset - sizeof(struct mainmeminfo), SEEK_SET) == -1) { >> + if (CRASHDEBUG(1)) >> + error(INFO, LOGPRX"Failed to fseek '%s': [Error %d] %s\n", >> + filename, errno, strerror(errno)); >> + fclose(fp); >> + return FALSE; >> + } >> + >> + if (fread(&mmi, sizeof(struct mainmeminfo), 1, fp) != 1) { >> + if (CRASHDEBUG(1)) >> + error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", >> + "mainmeminfo", filename, errno, strerror(errno)); >> + fclose(fp); >> + return FALSE; >> + } >> + } >> if (fseek(fp, 0L, SEEK_END) == -1) { >> - error(INFO, LOGPRX"Failed to fseek '%s': [Error %d] %s\n", >> - filename, errno, strerror(errno)); >> + if (CRASHDEBUG(1)) >> + error(INFO, LOGPRX"Failed to fseek '%s': [Error %d] %s\n", >> + filename, errno, strerror(errno)); >> fclose(fp); >> return FALSE; >> } >> filesize = ftell(fp); >> fclose(fp); >> >> - if (hdr.mem_holes > 2) >> - goto unrecognized; >> + if (mmi.mem_holes > 2) { >> + if (CRASHDEBUG(1)) >> + error(INFO, LOGPRX"Unexpected mmi.mem_holes value %d\n", >> + mmi.mem_holes); >> + return FALSE; >> + } >> >> - for (i = 0; i < hdr.mem_holes; i++) { >> + for (i = 0; i < mmi.mem_holes; i++) { >> /* hole start page */ >> - vmss.regions[i].startpagenum = hdr.holes[i].ppn; >> + vmss.regions[i].startpagenum = mmi.holes[i].ppn; >> /* hole end page */ >> - vmss.regions[i].startppn = hdr.holes[i].ppn + hdr.holes[i].pages; >> - holes_sum += hdr.holes[i].pages; >> + vmss.regions[i].startppn = mmi.holes[i].ppn + mmi.holes[i].pages; >> + holes_sum += mmi.holes[i].pages; >> + } >> + >> + if ((mmi.last_addr + 1) != ((mmi.memsize_in_pages + holes_sum) << VMW_PAGE_SHIFT)) { >> + if (CRASHDEBUG(1)) >> + error(INFO, LOGPRX"Memory size check failed\n"); >> + return FALSE; >> } >> >> - if (hdr.version != GUESTDUMP_VERSION || >> - hdr.magic1 != GUESTDUMP_MAGIC1 || >> - hdr.magic2 != GUESTDUMP_MAGIC2 || >> - (hdr.last_addr + 1) != ((hdr.memsize_in_pages + holes_sum) << VMW_PAGE_SHIFT) || >> - filesize != sizeof(struct guestdumpheader) + >> - hdr.num_vcpus * (sizeof (struct vcpu_state) + VMW_PAGE_SIZE)) >> - goto unrecognized; >> + expected_filesize = vcpus_offset + hdr.num_vcpus * (sizeof(struct vcpu_state1) + >> + get_vcpu_gapsize(hdr.version) + sizeof(struct vcpu_state2) + VMW_PAGE_SIZE); >> + if (filesize != expected_filesize) { >> + if (CRASHDEBUG(1)) >> + error(INFO, LOGPRX"Incorrect file size: %d != %d\n", >> + filesize, expected_filesize); >> + return FALSE; >> + } >> >> - vmss.memsize = hdr.memsize_in_pages << VMW_PAGE_SHIFT; >> - vmss.regionscount = hdr.mem_holes + 1; >> + vmss.memsize = mmi.memsize_in_pages << VMW_PAGE_SHIFT; >> + vmss.regionscount = mmi.mem_holes + 1; >> vmss.memoffset = 0; >> vmss.num_vcpus = hdr.num_vcpus; >> return TRUE; >> @@ -169,7 +299,8 @@ vmware_guestdump_init(char *filename, FILE *ofp) >> FILE *fp = NULL; >> int i, result = TRUE; >> char *vmem_filename = NULL; >> - struct vcpu_state vs; >> + struct vcpu_state1 vs1; >> + struct vcpu_state2 vs2; >> char *p; >> >> if (!machine_type("X86") && !machine_type("X86_64")) { >> @@ -180,14 +311,14 @@ vmware_guestdump_init(char *filename, FILE *ofp) >> goto exit; >> } >> >> - if ((fp = fopen(filename, "r")) == NULL) { >> + if ((fp = fopen(filename, "r")) == NULL) { >> error(INFO, LOGPRX"Failed to open '%s': [Error %d] %s\n", >> filename, errno, strerror(errno)); >> result = FALSE; >> goto exit; >> - } >> + } >> >> - if (fseek(fp, sizeof(struct guestdumpheader), SEEK_SET) == -1) { >> + if (fseek(fp, get_vcpus_offset(hdr.version, vmss.regionscount - 1), SEEK_SET) == -1) { >> error(INFO, LOGPRX"Failed to fseek '%s': [Error %d] %s\n", >> filename, errno, strerror(errno)); >> result = FALSE; >> @@ -203,7 +334,19 @@ vmware_guestdump_init(char *filename, FILE *ofp) >> } >> >> for (i = 0; i < vmss.num_vcpus; i++) { >> - if (fread(&vs, sizeof(struct vcpu_state), 1, fp) != 1) { >> + if (fread(&vs1, sizeof(struct vcpu_state1), 1, fp) != 1) { >> + error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", >> + "vcpu_state", filename, errno, strerror(errno)); >> + result = FALSE; >> + goto exit; >> + } >> + if (fseek(fp, get_vcpu_gapsize(hdr.version), SEEK_CUR) == -1) { >> + error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", >> + "vcpu_state", filename, errno, strerror(errno)); >> + result = FALSE; >> + goto exit; >> + } >> + if (fread(&vs2, sizeof(struct vcpu_state2), 1, fp) != 1) { >> error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", >> "vcpu_state", filename, errno, strerror(errno)); >> result = FALSE; >> @@ -217,29 +360,29 @@ vmware_guestdump_init(char *filename, FILE *ofp) >> } >> vmss.vcpu_regs[i] = 0; >> >> - vmss.regs64[i]->rax = vs.regs64.rax; >> - vmss.regs64[i]->rcx = vs.regs64.rcx; >> - vmss.regs64[i]->rdx = vs.regs64.rdx; >> - vmss.regs64[i]->rbx = vs.regs64.rbx; >> - vmss.regs64[i]->rbp = vs.regs64.rbp; >> - vmss.regs64[i]->rsp = vs.regs64.rsp; >> - vmss.regs64[i]->rsi = vs.regs64.rsi; >> - vmss.regs64[i]->rdi = vs.regs64.rdi; >> - vmss.regs64[i]->r8 = vs.regs64.r8; >> - vmss.regs64[i]->r9 = vs.regs64.r9; >> - vmss.regs64[i]->r10 = vs.regs64.r10; >> - vmss.regs64[i]->r11 = vs.regs64.r11; >> - vmss.regs64[i]->r12 = vs.regs64.r12; >> - vmss.regs64[i]->r13 = vs.regs64.r13; >> - vmss.regs64[i]->r14 = vs.regs64.r14; >> - vmss.regs64[i]->r15 = vs.regs64.r15; >> - vmss.regs64[i]->idtr = vs.idt_base; >> - vmss.regs64[i]->cr[0] = vs.cr0; >> - vmss.regs64[i]->cr[2] = vs.cr2; >> - vmss.regs64[i]->cr[3] = vs.cr3; >> - vmss.regs64[i]->cr[4] = vs.cr4; >> - vmss.regs64[i]->rip = vs.regs64.rip; >> - vmss.regs64[i]->rflags = vs.regs64.eflags; >> + vmss.regs64[i]->rax = vs2.regs64.rax; >> + vmss.regs64[i]->rcx = vs2.regs64.rcx; >> + vmss.regs64[i]->rdx = vs2.regs64.rdx; >> + vmss.regs64[i]->rbx = vs2.regs64.rbx; >> + vmss.regs64[i]->rbp = vs2.regs64.rbp; >> + vmss.regs64[i]->rsp = vs2.regs64.rsp; >> + vmss.regs64[i]->rsi = vs2.regs64.rsi; >> + vmss.regs64[i]->rdi = vs2.regs64.rdi; >> + vmss.regs64[i]->r8 = vs2.regs64.r8; >> + vmss.regs64[i]->r9 = vs2.regs64.r9; >> + vmss.regs64[i]->r10 = vs2.regs64.r10; >> + vmss.regs64[i]->r11 = vs2.regs64.r11; >> + vmss.regs64[i]->r12 = vs2.regs64.r12; >> + vmss.regs64[i]->r13 = vs2.regs64.r13; >> + vmss.regs64[i]->r14 = vs2.regs64.r14; >> + vmss.regs64[i]->r15 = vs2.regs64.r15; >> + vmss.regs64[i]->idtr = vs1.idt_base; >> + vmss.regs64[i]->cr[0] = vs1.cr0; >> + vmss.regs64[i]->cr[2] = vs1.cr2; >> + vmss.regs64[i]->cr[3] = vs1.cr3; >> + vmss.regs64[i]->cr[4] = vs1.cr4; >> + vmss.regs64[i]->rip = vs2.regs64.rip; >> + vmss.regs64[i]->rflags = vs2.regs64.eflags; >> >> vmss.vcpu_regs[i] = REGS_PRESENT_ALL; >> } >> @@ -268,9 +411,9 @@ vmware_guestdump_init(char *filename, FILE *ofp) >> fprintf(ofp, LOGPRX"vmem file: %s\n\n", vmem_filename); >> >> if (CRASHDEBUG(1)) { >> - vmware_guestdump_memory_dump(ofp); >> - dump_registers_for_vmss_dump(); >> - } >> + vmware_guestdump_memory_dump(ofp); >> + dump_registers_for_vmss_dump(); >> + } >> >> exit: >> if (fp) >> @@ -296,24 +439,23 @@ exit: >> int >> vmware_guestdump_memory_dump(FILE *ofp) >> { >> + uint64_t holes_sum = 0; >> + unsigned i; >> + >> fprintf(ofp, "vmware_guestdump:\n"); >> fprintf(ofp, " Header: version=%d num_vcpus=%llu\n", >> - GUESTDUMP_VERSION, (ulonglong)vmss.num_vcpus); >> + hdr.version, (ulonglong)vmss.num_vcpus); >> fprintf(ofp, "Total memory: %llu\n", (ulonglong)vmss.memsize); >> >> - if (vmss.regionscount > 1) { >> - uint64_t holes_sum = 0; >> - unsigned i; >> >> - fprintf(ofp, "Memory regions[%d]:\n", vmss.regionscount); >> - fprintf(ofp, " [0x%016x-", 0); >> - for (i = 0; i < vmss.regionscount - 1; i++) { >> - fprintf(ofp, "0x%016llx]\n", (ulonglong)vmss.regions[i].startpagenum << VMW_PAGE_SHIFT); >> - fprintf(ofp, " [0x%016llx-", (ulonglong)vmss.regions[i].startppn << VMW_PAGE_SHIFT); >> - holes_sum += vmss.regions[i].startppn - vmss.regions[i].startpagenum; >> - } >> - fprintf(ofp, "0x%016llx]\n", (ulonglong)vmss.memsize + (holes_sum << VMW_PAGE_SHIFT)); >> + fprintf(ofp, "Memory regions[%d]:\n", vmss.regionscount); >> + fprintf(ofp, " [0x%016x-", 0); >> + for (i = 0; i < vmss.regionscount - 1; i++) { >> + fprintf(ofp, "0x%016llx]\n", (ulonglong)vmss.regions[i].startpagenum << VMW_PAGE_SHIFT); >> + fprintf(ofp, " [0x%016llx-", (ulonglong)vmss.regions[i].startppn << VMW_PAGE_SHIFT); >> + holes_sum += vmss.regions[i].startppn - vmss.regions[i].startpagenum; >> } >> + fprintf(ofp, "0x%016llx]\n", (ulonglong)vmss.memsize + (holes_sum << VMW_PAGE_SHIFT)); >> >> return TRUE; >> } >> -- >> 2.40.1 -- Crash-utility mailing list -- devel@xxxxxxxxxxxxxxxxxxxxxxxxxxx To unsubscribe send an email to devel-leave@xxxxxxxxxxxxxxxxxxxxxxxxxxx https://${domain_name}/admin/lists/devel.lists.crash-utility.osci.io/ Contribution Guidelines: https://github.com/crash-utility/crash/wiki