Re: [PATCH 3/3] xen: adjust to new scheduler structures

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 15.03.23 06:54, lijiang wrote:
On Mon, Mar 13, 2023 at 9:07 PM <crash-utility-request@xxxxxxxxxx <mailto:crash-utility-request@xxxxxxxxxx>> wrote:

    Send Crash-utility mailing list submissions to
    Date: Mon, 13 Mar 2023 14:01:12 +0100
    From: Juergen Gross <jgross@xxxxxxxx <mailto:jgross@xxxxxxxx>>
    To: crash-utility@xxxxxxxxxx <mailto:crash-utility@xxxxxxxxxx>
    Subject: [Crash-utility] [PATCH 3/3] xen: adjust to new scheduler
             structures
    Message-ID: <20230313130112.15353-4-jgross@xxxxxxxx
    <mailto:20230313130112.15353-4-jgross@xxxxxxxx>>
    Content-Type: text/plain; charset="US-ASCII"; x-default=true

    There has been a significant modification regarding scheduler data in
    the Xen hypervisor. Adapt to new structures and removed fields.

I would suggest adding the related hypervisor commit here.

    Note that this is only the bare minimum to not let crash error out when
    opening a vmcore in Xen mode with a recent Xen version.

    Signed-off-by: Juergen Gross <jgross@xxxxxxxx <mailto:jgross@xxxxxxxx>>
    ---
      xen_hyper.c      | 67 +++++++++++++++++++++++++++++++++---------------
      xen_hyper_defs.h |  4 ++-
      2 files changed, 49 insertions(+), 22 deletions(-)

    diff --git a/xen_hyper.c b/xen_hyper.c
    index 72720e2..4c884dd 100644
    --- a/xen_hyper.c
    +++ b/xen_hyper.c
    @@ -417,13 +417,21 @@ void
      xen_hyper_misc_init(void)
      {
             XEN_HYPER_STRUCT_SIZE_INIT(schedule_data, "schedule_data");
    -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_schedule_lock,
    "schedule_data", "schedule_lock");
    -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_curr, "schedule_data",
    "curr");
    -       if (MEMBER_EXISTS("schedule_data", "idle"))
    -               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_idle,
    "schedule_data", "idle");
    -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_sched_priv,
    "schedule_data", "sched_priv");
    -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_s_timer, "schedule_data",
    "s_timer");
    -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_tick, "schedule_data",
    "tick");
    +       XEN_HYPER_STRUCT_SIZE_INIT(sched_resource, "sched_resource");
    +       if (XEN_HYPER_VALID_SIZE(schedule_data)) {
    +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_schedule_lock,
    "schedule_data", "schedule_lock");
    +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_curr,
    "schedule_data", "curr");
    +               if (MEMBER_EXISTS("schedule_data", "idle"))
    +                       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_idle,
    "schedule_data", "idle");
    +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_sched_priv,
    "schedule_data", "sched_priv");
    +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_s_timer,
    "schedule_data", "s_timer");
    +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_tick,
    "schedule_data", "tick");
    +       } else if (XEN_HYPER_VALID_SIZE(sched_resource)) {
    +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_schedule_lock,
    "sched_resource", "schedule_lock");
    +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_curr,
    "sched_resource", "curr");
    +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_sched_priv,
    "sched_resource", "sched_priv");
    +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_s_timer,
    "sched_resource", "s_timer");
    +       }

             XEN_HYPER_STRUCT_SIZE_INIT(scheduler, "scheduler");
             XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_name, "scheduler", "name");
    @@ -467,6 +475,7 @@ xen_hyper_schedule_init(void)
             long *schedulers_buf;
             int nr_schedulers;
             struct xen_hyper_sched_context *schc;
    +       long buf_size;
             char *buf;
             char opt_name_buf[XEN_HYPER_OPT_SCHED_SIZE];
             int i, cpuid, flag;
    @@ -561,28 +570,43 @@ xen_hyper_schedule_init(void)
             }
             BZERO(xhscht->sched_context_array,
                     sizeof(struct xen_hyper_sched_context) * XEN_HYPER_MAX_CPUS());
    -       buf = GETBUF(XEN_HYPER_SIZE(schedule_data));
    -       if (symbol_exists("per_cpu__schedule_data")) {
    +       if (symbol_exists("per_cpu__sched_res")) {
    +               addr = symbol_value("per_cpu__sched_res");
    +               buf_size = XEN_HYPER_SIZE(sched_resource);
    +               flag = 0;
    +       } else if (symbol_exists("per_cpu__schedule_data")) {
                     addr = symbol_value("per_cpu__schedule_data");
    -               flag = TRUE;
    +               buf_size = XEN_HYPER_SIZE(schedule_data);
    +               flag = 1;
             } else {
                     addr = symbol_value("schedule_data");
    -               flag = FALSE;
    +               buf_size = XEN_HYPER_SIZE(schedule_data);
    +               flag = 2;
             }
    +       buf = GETBUF(buf_size);
             for_cpu_indexes(i, cpuid)
             {
                     schc = &xhscht->sched_context_array[cpuid];
                     if (flag) {
    -                       schc->schedule_data =
    -                               xen_hyper_per_cpu(addr, i);
    +                       if (flag == 1) {
    +                               schc->schedule_data =
    +                                       xen_hyper_per_cpu(addr, i);
    +                       } else {
    +                               schc->schedule_data = addr +
    +                                       XEN_HYPER_SIZE(schedule_data) * i;
    +                       }
    +                       if (!readmem(schc->schedule_data,
    +                               KVADDR, buf, XEN_HYPER_SIZE(schedule_data),
    +                               "schedule_data", RETURN_ON_ERROR)) {
    +                               error(FATAL, "cannot read schedule_data.\n");
    +                       }


As we mentioned in patch 2/3, the readmem(..., FAULT_ON_ERROR) looks better for this case.

Okay.


Juergen

Attachment: OpenPGP_0xB0DE9DD628BF132F.asc
Description: OpenPGP public key

Attachment: OpenPGP_signature
Description: OpenPGP digital signature

--
Crash-utility mailing list
Crash-utility@xxxxxxxxxx
https://listman.redhat.com/mailman/listinfo/crash-utility
Contribution Guidelines: https://github.com/crash-utility/crash/wiki

[Index of Archives]     [Fedora Development]     [Fedora Desktop]     [Fedora SELinux]     [Yosemite News]     [KDE Users]     [Fedora Tools]

 

Powered by Linux