> As you answered in another email, p->mce_vaddr is set only on KERNEL_COPYIN case, > then if p->mce_vaddr is available also for generic SRAR MCE (I'm assuming that we are > talking about issues on race between generic SRAR MCE not only for KERNEL_COPYIN case), > that might be helpful, although it might be hard to implement. > And I'm afraid that walking page table could find the wrong virtual address if a process > have multiple entries to the single page. We could send multiple SIGBUSs for such case, > but maybe that's not an optimal solution. Yes, agree, I can't find one way to address this multiple entries case, to make sure we get the exact correct virtual address. But also I will post a v2 RFC patch for this issue for only discussion purpose! Thanks! > ---- > From 147449a97e2ea3420ac3523f13523f5d30a13614 Mon Sep 17 00:00:00 2001 > From: Naoya Horiguchi <naoya.horiguchi@xxxxxxx> > Date: Tue, 16 Mar 2021 14:22:21 +0900 > Subject: [PATCH] pagemap: expose hwpoison entry > > not-signed-off-by-yet: Naoya Horiguchi <naoya.horiguchi@xxxxxxx> > --- > fs/proc/task_mmu.c | 6 ++++++ > include/linux/swapops.h | 12 ++++++++++++ > tools/vm/page-types.c | 5 ++++- > 3 files changed, 22 insertions(+), 1 deletion(-) > > diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c > index 602e3a52884d..08cea209bae7 100644 > --- a/fs/proc/task_mmu.c > +++ b/fs/proc/task_mmu.c > @@ -1300,6 +1300,7 @@ struct pagemapread { > #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0) > #define PM_SOFT_DIRTY BIT_ULL(55) > #define PM_MMAP_EXCLUSIVE BIT_ULL(56) > +#define PM_HWPOISON BIT_ULL(60) > #define PM_FILE BIT_ULL(61) > #define PM_SWAP BIT_ULL(62) > #define PM_PRESENT BIT_ULL(63) > @@ -1385,6 +1386,11 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, > if (is_migration_entry(entry)) > page = migration_entry_to_page(entry); > > + if (is_hwpoison_entry(entry)) { > + page = hwpoison_entry_to_page(entry); > + flags |= PM_HWPOISON; > + } > + > if (is_device_private_entry(entry)) > page = device_private_entry_to_page(entry); > } > diff --git a/include/linux/swapops.h b/include/linux/swapops.h > index d9b7c9132c2f..1b9dedbd06ab 100644 > --- a/include/linux/swapops.h > +++ b/include/linux/swapops.h > @@ -323,6 +323,13 @@ static inline int is_hwpoison_entry(swp_entry_t entry) > return swp_type(entry) == SWP_HWPOISON; > } > > +static inline struct page *hwpoison_entry_to_page(swp_entry_t entry) > +{ > + struct page *p = pfn_to_page(swp_offset(entry)); > + WARN_ON(!PageHWPoison(p)); > + return p; > +} > + > static inline void num_poisoned_pages_inc(void) > { > atomic_long_inc(&num_poisoned_pages); > @@ -345,6 +352,11 @@ static inline int is_hwpoison_entry(swp_entry_t swp) > return 0; > } > > +static inline struct page *hwpoison_entry_to_page(swp_entry_t entry) > +{ > + return NULL; > +} > + > static inline void num_poisoned_pages_inc(void) > { > } > diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c > index 0517c744b04e..1160d5a14955 100644 > --- a/tools/vm/page-types.c > +++ b/tools/vm/page-types.c > @@ -53,6 +53,7 @@ > #define PM_SWAP_OFFSET(x) (((x) & PM_PFRAME_MASK) >> MAX_SWAPFILES_SHIFT) > #define PM_SOFT_DIRTY (1ULL << 55) > #define PM_MMAP_EXCLUSIVE (1ULL << 56) > +#define PM_HWPOISON (1ULL << 60) > #define PM_FILE (1ULL << 61) > #define PM_SWAP (1ULL << 62) > #define PM_PRESENT (1ULL << 63) > @@ -311,6 +312,8 @@ static unsigned long pagemap_pfn(uint64_t val) > > if (val & PM_PRESENT) > pfn = PM_PFRAME(val); > + else if (val & PM_HWPOISON) > + pfn = PM_SWAP_OFFSET(val); > else > pfn = 0; > > @@ -742,7 +745,7 @@ static void walk_vma(unsigned long index, unsigned long count) > pfn = pagemap_pfn(buf[i]); > if (pfn) > walk_pfn(index + i, pfn, 1, buf[i]); > - if (buf[i] & PM_SWAP) > + else if (buf[i] & PM_SWAP) > walk_swap(index + i, buf[i]); > } > -- Thanks! Aili Yao