The quilt patch titled Subject: mm/memory-failure: add macro GET_PAGE_MAX_RETRY_NUM has been removed from the -mm tree. Its filename was mm-memory-failure-add-macro-get_page_max_retry_num.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Miaohe Lin <linmiaohe@xxxxxxxxxx> Subject: mm/memory-failure: add macro GET_PAGE_MAX_RETRY_NUM Date: Wed, 12 Jun 2024 15:18:25 +0800 Add helper macro GET_PAGE_MAX_RETRY_NUM to replace magic number 3. No functional change intended. Link: https://lkml.kernel.org/r/20240612071835.157004-4-linmiaohe@xxxxxxxxxx Signed-off-by: Miaohe Lin <linmiaohe@xxxxxxxxxx> Cc: Borislav Petkov (AMD) <bp@xxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: kernel test robot <lkp@xxxxxxxxx> Cc: Naoya Horiguchi <nao.horiguchi@xxxxxxxxx> Cc: Tony Luck <tony.luck@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/memory-failure.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) --- a/mm/memory-failure.c~mm-memory-failure-add-macro-get_page_max_retry_num +++ a/mm/memory-failure.c @@ -1417,6 +1417,8 @@ static int __get_hwpoison_page(struct pa return 0; } +#define GET_PAGE_MAX_RETRY_NUM 3 + static int get_any_page(struct page *p, unsigned long flags) { int ret = 0, pass = 0; @@ -1431,12 +1433,12 @@ try_again: if (!ret) { if (page_count(p)) { /* We raced with an allocation, retry. */ - if (pass++ < 3) + if (pass++ < GET_PAGE_MAX_RETRY_NUM) goto try_again; ret = -EBUSY; } else if (!PageHuge(p) && !is_free_buddy_page(p)) { /* We raced with put_page, retry. */ - if (pass++ < 3) + if (pass++ < GET_PAGE_MAX_RETRY_NUM) goto try_again; ret = -EIO; } @@ -1462,7 +1464,7 @@ try_again: * A page we cannot handle. Check whether we can turn * it into something we can handle. */ - if (pass++ < 3) { + if (pass++ < GET_PAGE_MAX_RETRY_NUM) { put_page(p); shake_page(p); count_increased = false; _ Patches currently in -mm which might be from linmiaohe@xxxxxxxxxx are