Since CXL device is a memory device, while CPU is consuming a poison page of CXL device, it always triggers a MCE (via interrupt #18) and calls memory_failure() to handle POISON page, no matter which-First path is configured. CXL device could also find and report the POISON, kernel now not only traces but also calls memory_failure() to handle it, which is marked as "NEW" in the figure blow. ``` 1. MCE (interrupt #18, while CPU consuming POISON) -> do_machine_check() -> mce_log() -> notify chain (x86_mce_decoder_chain) -> memory_failure() <---------------------------- EXISTS 2.a FW-First (optional, CXL device proactively find&report) -> CXL device -> Firmware -> OS: ACPI->APEI->GHES->CPER -> CXL driver -> trace \-> memory_failure() ^----- NEW 2.b OS-First (optional, CXL device proactively find&report) -> CXL device -> MSI -> OS: CXL driver -> trace \-> memory_failure() ^------------------------------- NEW ``` But in this way, the memory_failure() could be called twice or even at same time, as is shown in the figure above: (1.) and (2.a or 2.b), before the POISON page is cleared. memory_failure() has it own mutex lock so it actually won't be called at same time and the later call could be avoided because HWPoison bit has been set. However, assume such a scenario, "CXL device reports POISON error" triggers 1st call, user see it from log and want to clear the poison by executing `cxl clear-poison` command, and at the same time, a process tries to access this POISON page, which triggers MCE (it's the 2nd call). Since there is no lock between the 2nd call with clearing poison operation, race condition may happen, which may cause HWPoison bit of the page in an unknown state. Thus, we have to avoid the 2nd call. This patch[2] introduces a new notifier_block into `x86_mce_decoder_chain` and a POISON cache list, to stop the 2nd call of memory_failure(). It checks whether the current poison page has been reported (if yes, stop the notifier chain, don't call the following memory_failure() to report again). Signed-off-by: Shiyang Ruan <ruansy.fnst@xxxxxxxxxxx> --- arch/x86/include/asm/mce.h | 1 + drivers/cxl/core/mbox.c | 115 +++++++++++++++++++++++++++++++++++++ drivers/cxl/core/memdev.c | 6 +- drivers/cxl/cxlmem.h | 3 + 4 files changed, 124 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 3ad29b128943..5da45e870858 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -182,6 +182,7 @@ enum mce_notifier_prios { MCE_PRIO_NFIT, MCE_PRIO_EXTLOG, MCE_PRIO_UC, + MCE_PRIO_CXL, MCE_PRIO_EARLY, MCE_PRIO_CEC, MCE_PRIO_HIGHEST = MCE_PRIO_CEC diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 0cb6ef2e6600..b21700428c35 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -4,6 +4,8 @@ #include <linux/debugfs.h> #include <linux/ktime.h> #include <linux/mutex.h> +#include <linux/notifier.h> +#include <asm/mce.h> #include <asm/unaligned.h> #include <cxlpci.h> #include <cxlmem.h> @@ -925,6 +927,9 @@ void cxl_event_handle_record(struct cxl_memdev *cxlmd, if (cxlr) hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa); + if (hpa != ULLONG_MAX && cxl_mce_recorded(hpa)) + return; + if (event_type == CXL_CPER_EVENT_GEN_MEDIA) { trace_cxl_general_media(cxlmd, type, cxlr, hpa, &evt->gen_media); @@ -1457,6 +1462,112 @@ int cxl_poison_state_init(struct cxl_memdev_state *mds) } EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL); +DEFINE_XARRAY(cxl_mce_records); + +bool cxl_mce_recorded(u64 hpa) +{ + XA_STATE(xas, &cxl_mce_records, hpa); + void *entry; + + xas_lock_irq(&xas); + entry = xas_load(&xas); + if (entry) { + xas_unlock_irq(&xas); + return true; + } + entry = xa_mk_value(hpa); + xas_store(&xas, entry); + xas_unlock_irq(&xas); + + return false; +} +EXPORT_SYMBOL_NS_GPL(cxl_mce_recorded, CXL); + +void cxl_mce_clear(u64 hpa) +{ + XA_STATE(xas, &cxl_mce_records, hpa); + void *entry; + + xas_lock_irq(&xas); + entry = xas_load(&xas); + if (entry) { + xas_store(&xas, NULL); + } + xas_unlock_irq(&xas); +} +EXPORT_SYMBOL_NS_GPL(cxl_mce_clear, CXL); + +struct cxl_contains_hpa_context { + bool contains; + u64 hpa; +}; + +static int __cxl_contains_hpa(struct device *dev, void *arg) +{ + struct cxl_contains_hpa_context *ctx = arg; + struct cxl_endpoint_decoder *cxled; + struct range *range; + u64 hpa = ctx->hpa; + + if (!is_endpoint_decoder(dev)) + return 0; + + cxled = to_cxl_endpoint_decoder(dev); + range = &cxled->cxld.hpa_range; + + if (range->start <= hpa && hpa <= range->end) { + ctx->contains = true; + return 1; + } + + return 0; +} + +static bool cxl_contains_hpa(const struct cxl_memdev *cxlmd, u64 hpa) +{ + struct cxl_contains_hpa_context ctx = { + .contains = false, + .hpa = hpa, + }; + struct cxl_port *port; + + port = cxlmd->endpoint; + guard(rwsem_write)(&cxl_region_rwsem); + if (port && cxl_num_decoders_committed(port)) + device_for_each_child(&port->dev, &ctx, __cxl_contains_hpa); + + return ctx.contains; +} + +static int cxl_handle_mce(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct mce *mce = (struct mce *)data; + struct cxl_memdev_state *mds = container_of(nb, struct cxl_memdev_state, + mce_notifier); + u64 hpa; + + if (!mce || !mce_usable_address(mce)) + return NOTIFY_DONE; + + hpa = mce->addr & MCI_ADDR_PHYSADDR; + + /* Check if the PFN is located on this CXL device */ + if (!pfn_valid(hpa >> PAGE_SHIFT) && + !cxl_contains_hpa(mds->cxlds.cxlmd, hpa)) + return NOTIFY_DONE; + + /* + * Search PFN in the cxl_mce_records, if already exists, don't continue + * to do memory_failure() to avoid a poison address being reported + * more than once. + */ + if (cxl_mce_recorded(hpa)) + return NOTIFY_STOP; + else + return NOTIFY_OK; +} + struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) { struct cxl_memdev_state *mds; @@ -1476,6 +1587,10 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID; mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID; + mds->mce_notifier.notifier_call = cxl_handle_mce; + mds->mce_notifier.priority = MCE_PRIO_CXL; + mce_register_decode_chain(&mds->mce_notifier); + return mds; } EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, CXL); diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 0277726afd04..9d4ed4dc4d51 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -376,10 +376,14 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) goto out; cxlr = cxl_dpa_to_region(cxlmd, dpa); - if (cxlr) + if (cxlr) { + u64 hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa); + + cxl_mce_clear(hpa); dev_warn_once(mds->cxlds.dev, "poison clear dpa:%#llx region: %s\n", dpa, dev_name(&cxlr->dev)); + } record = (struct cxl_poison_record) { .address = cpu_to_le64(dpa), diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 5c4810dcbdeb..d2d906c26755 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -502,6 +502,7 @@ struct cxl_memdev_state { struct cxl_fw_state fw; struct rcuwait mbox_wait; + struct notifier_block mce_notifier; int (*mbox_send)(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd); }; @@ -837,6 +838,8 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, int cxl_trigger_poison_list(struct cxl_memdev *cxlmd); int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa); int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa); +bool cxl_mce_recorded(u64 pfn); +void cxl_mce_clear(u64 pfn); #ifdef CONFIG_CXL_SUSPEND void cxl_mem_active_inc(void); -- 2.34.1