Because we reuse the first tail vmemmap page frame and remap it with read-only, we cannot set the PageHWPosion on some tail pages. So we can use the head[4].private (There are at least 128 struct page structures associated with the optimized HugeTLB page, so using head[4].private is safe) to record the real error page index and set the raw error page PageHWPoison later. We cannot have more poisoned tail pages. So a single slot is sufficient. Why? memory_failure() if (PageHuge(page)) memory_failure_hugetlb() head = compound_head(page) if (TestSetPageHWPoison(head)) return Because we do not clear the HWPoison of the head page, we cannot poison another tail page. Note: some pages might miss their poisoning (even without this patch). Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> Reviewed-by: Oscar Salvador <osalvador@xxxxxxx> Acked-by: David Rientjes <rientjes@xxxxxxxxxx> Tested-by: Chen Huang <chenhuang5@xxxxxxxxxx> Tested-by: Bodeddula Balasubramaniam <bodeddub@xxxxxxxxxx> --- include/linux/hugetlb.h | 3 ++ mm/hugetlb.c | 81 +++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 75 insertions(+), 9 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 09421f5f35e2..7f7a0e3405ae 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -42,6 +42,9 @@ enum { SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */ __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD, #endif +#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP + SUBPAGE_INDEX_HWPOISON, /* reuse page->private */ +#endif __NR_USED_SUBPAGE, }; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e42b19337a8f..53f239818293 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1304,6 +1304,74 @@ static inline void destroy_compound_gigantic_page(struct page *page, unsigned int order) { } #endif +#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP +static inline void hwpoison_subpage_deliver(struct hstate *h, struct page *head) +{ + struct page *page; + + if (!PageHWPoison(head) || !free_vmemmap_pages_per_hpage(h)) + return; + + page = head + page_private(head + SUBPAGE_INDEX_HWPOISON); + + /* + * Move PageHWPoison flag from head page to the raw error page, + * which makes any subpages rather than the error page reusable. + */ + if (page != head) { + SetPageHWPoison(page); + ClearPageHWPoison(head); + } +} + +static inline void hwpoison_subpage_set(struct hstate *h, struct page *head, + struct page *page) +{ + if (!PageHWPoison(head)) + return; + + if (free_vmemmap_pages_per_hpage(h)) { + set_page_private(head + SUBPAGE_INDEX_HWPOISON, page - head); + } else if (page != head) { + /* + * Move PageHWPoison flag from head page to the raw error page, + * which makes any subpages rather than the error page reusable. + */ + SetPageHWPoison(page); + ClearPageHWPoison(head); + } +} + +static inline void hwpoison_subpage_clear(struct hstate *h, struct page *head) +{ + if (!PageHWPoison(head) || !free_vmemmap_pages_per_hpage(h)) + return; + + set_page_private(head + SUBPAGE_INDEX_HWPOISON, 0); +} +#else +static inline void hwpoison_subpage_deliver(struct hstate *h, struct page *head) +{ +} + +static inline void hwpoison_subpage_set(struct hstate *h, struct page *head, + struct page *page) +{ + if (PageHWPoison(head) && page != head) { + /* + * Move PageHWPoison flag from head page to the raw error page, + * which makes any subpages rather than the error page reusable. + */ + SetPageHWPoison(page); + ClearPageHWPoison(head); + } +} + +static inline void hwpoison_subpage_clear(struct hstate *h, struct page *head) +{ +} +#endif + static int update_and_free_page_surplus(struct hstate *h, struct page *page, bool acct_surplus) __releases(&hugetlb_lock) __acquires(&hugetlb_lock) @@ -1807,22 +1875,17 @@ int dissolve_free_huge_page(struct page *page) goto retry; } - /* - * Move PageHWPoison flag from head page to the raw error page, - * which makes any subpages rather than the error page reusable. - */ - if (PageHWPoison(head) && page != head) { - SetPageHWPoison(page); - ClearPageHWPoison(head); - } + hwpoison_subpage_set(h, head, page); list_del(&head->lru); ClearHPageFreed(page); h->free_huge_pages--; h->free_huge_pages_node[nid]--; h->max_huge_pages--; rc = update_and_free_page_surplus(h, head, false); - if (rc) + if (rc) { h->max_huge_pages++; + hwpoison_subpage_clear(h, head); + } } out: spin_unlock(&hugetlb_lock); -- 2.11.0