Ira Weiny wrote: > If the firmware has configured CXL event support to be firmware first > the OS can process those events through CPER records. The CXL layer has > unique DPA to HPA knowledge and standard event trace parsing in place. > Matching memory devices to the CPER records can be done via Bus, Device, > Function which is part of the CPER record header. > > Detect firmware first, register a notifier callback for each memdev, and > trace events when they match the proper device. > > Signed-off-by: Ira Weiny <ira.weiny@xxxxxxxxx> > --- > drivers/cxl/core/mbox.c | 31 +++++++++++++++++++++----- > drivers/cxl/cxlmem.h | 6 +++++ > drivers/cxl/pci.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++- > 3 files changed, 89 insertions(+), 6 deletions(-) > > diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c > index 5ccc3843b736..8a0d4f67540d 100644 > --- a/drivers/cxl/core/mbox.c > +++ b/drivers/cxl/core/mbox.c > @@ -860,9 +860,30 @@ static const uuid_t mem_mod_event_uuid = > UUID_INIT(0xfe927475, 0xdd59, 0x4339, > 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74); > > -static void cxl_event_trace_record(const struct cxl_memdev *cxlmd, > - enum cxl_event_log_type type, > - struct cxl_event_record_raw *record) > +void cxl_event_trace_record(const struct cxl_memdev *cxlmd, > + enum cxl_event_log_type type, > + enum cxl_event_type event_type, > + union cxl_event *event) > +{ > + switch (event_type) { > + case CXL_CPER_EVENT_GEN_MEDIA: > + trace_cxl_general_media(cxlmd, type, &gen_media_event_uuid, > + &event->gen_media); > + break; > + case CXL_CPER_EVENT_DRAM: > + trace_cxl_dram(cxlmd, type, &dram_event_uuid, &event->dram); > + break; > + case CXL_CPER_EVENT_MEM_MODULE: > + trace_cxl_memory_module(cxlmd, type, &mem_mod_event_uuid, > + &event->mem_module); > + break; > + } > +} > +EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL); > + > +static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd, > + enum cxl_event_log_type type, > + struct cxl_event_record_raw *record) > { > union cxl_event *evt = &record->event; > uuid_t *id = &record->id; > @@ -985,8 +1006,8 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds, > break; > > for (i = 0; i < nr_rec; i++) > - cxl_event_trace_record(cxlmd, type, > - &payload->records[i]); > + __cxl_event_trace_record(cxlmd, type, > + &payload->records[i]); > > if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW) > trace_cxl_overflow(cxlmd, type, payload); > diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h > index f0e7ebb84f02..9cb0e3448780 100644 > --- a/drivers/cxl/cxlmem.h > +++ b/drivers/cxl/cxlmem.h > @@ -481,6 +481,8 @@ struct cxl_memdev_state { > struct cxl_security_state security; > struct cxl_fw_state fw; > > + struct notifier_block cxl_cper_nb; > + > struct rcuwait mbox_wait; > int (*mbox_send)(struct cxl_memdev_state *mds, > struct cxl_mbox_cmd *cmd); > @@ -778,6 +780,10 @@ void set_exclusive_cxl_commands(struct cxl_memdev_state *mds, > void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds, > unsigned long *cmds); > void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status); > +void cxl_event_trace_record(const struct cxl_memdev *cxlmd, > + enum cxl_event_log_type type, > + enum cxl_event_type event_type, > + union cxl_event *event); > int cxl_set_timestamp(struct cxl_memdev_state *mds); > int cxl_poison_state_init(struct cxl_memdev_state *mds); > int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, > diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c > index 0155fb66b580..ec65c11baf17 100644 > --- a/drivers/cxl/pci.c > +++ b/drivers/cxl/pci.c > @@ -1,5 +1,6 @@ > // SPDX-License-Identifier: GPL-2.0-only > /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ > +#include <asm-generic/unaligned.h> > #include <linux/io-64-nonatomic-lo-hi.h> > #include <linux/moduleparam.h> > #include <linux/module.h> > @@ -741,6 +742,59 @@ static bool cxl_event_int_is_fw(u8 setting) > return mode == CXL_INT_FW; > } > > +#define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0) > +static int cxl_cper_event_call(struct notifier_block *nb, unsigned long action, > + void *data) > +{ > + struct cxl_cper_notifier_data *nd = data; > + struct cper_cxl_event_devid *device_id = &nd->rec->hdr.device_id; > + enum cxl_event_log_type log_type; > + struct cxl_memdev_state *mds; > + struct cxl_dev_state *cxlds; > + struct pci_dev *pdev; > + unsigned int devfn; > + u32 hdr_flags; > + > + mds = container_of(nb, struct cxl_memdev_state, cxl_cper_nb); > + > + devfn = PCI_DEVFN(device_id->device_num, device_id->func_num); > + pdev = pci_get_domain_bus_and_slot(device_id->segment_num, > + device_id->bus_num, devfn); > + cxlds = pci_get_drvdata(pdev); > + if (cxlds != &mds->cxlds) { Checks of drvdata are only valid under the device lock, or with the assumption that this callback will never be called while pci_get_drvdata would return NULL. With that, the check of cxlds looks like another artifact of using a blocking notifier chain for this callback. With an explicit single callback it simply becomes safe to assume that it is being called back before unregister_cxl_cper() has run. I.e. it is impossible to even write this check in that case. > + pci_dev_put(pdev); > + return NOTIFY_DONE; > + } > + > + /* Fabricate a log type */ > + hdr_flags = get_unaligned_le24(nd->rec->event.generic.hdr.flags); > + log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags); > + > + cxl_event_trace_record(mds->cxlds.cxlmd, log_type, nd->event_type, > + &nd->rec->event); > + pci_dev_put(pdev); > + return NOTIFY_OK; > +} > + > +static void cxl_unregister_cper_events(void *_mds) > +{ > + struct cxl_memdev_state *mds = _mds; > + > + unregister_cxl_cper_notifier(&mds->cxl_cper_nb); > +} > + > +static void register_cper_events(struct cxl_memdev_state *mds) > +{ > + mds->cxl_cper_nb.notifier_call = cxl_cper_event_call; > + > + if (register_cxl_cper_notifier(&mds->cxl_cper_nb)) { > + dev_err(mds->cxlds.dev, "CPER registration failed\n"); > + return; > + } > + > + devm_add_action_or_reset(mds->cxlds.dev, cxl_unregister_cper_events, mds); Longer term I am not sure cxl_pci should be doing this registration directly to the CPER code vs some indirection in the core that the generic type-3 and the type-2 cases can register for processing. That can definitely wait until a Type-2 CXL.mem device driver arrives and wants to get notified of CXL CPER events. > +} > + > static int cxl_event_config(struct pci_host_bridge *host_bridge, > struct cxl_memdev_state *mds) > { > @@ -751,8 +805,10 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge, > * When BIOS maintains CXL error reporting control, it will process > * event records. Only one agent can do so. > */ > - if (!host_bridge->native_cxl_error) > + if (!host_bridge->native_cxl_error) { > + register_cper_events(mds); > return 0; > + } > > rc = cxl_mem_alloc_event_buf(mds); > if (rc) > > -- > 2.42.0 >