We should check as early as possible if we should bail due to writeback or truncation. This will allow us to add further sanity checks earlier as well. This introduces no functional changes. Signed-off-by: Luis Chamberlain <mcgrof@xxxxxxxxxx> --- mm/huge_memory.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) While working on min order support for LBS this came up as an improvement as we can check for the min order early earlier, so this sets the stage up for that. diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 86a8c7b3b8dc..32c701821e0d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3055,8 +3055,17 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, if (new_order >= folio_order(folio)) return -EINVAL; - /* Cannot split anonymous THP to order-1 */ - if (new_order == 1 && folio_test_anon(folio)) { + if (folio_test_writeback(folio)) + return -EBUSY; + + if (!folio_test_anon(folio)) { + /* Truncated ? */ + if (!folio->mapping) { + ret = -EBUSY; + goto out; + } + } else if (new_order == 1) { + /* Cannot split anonymous THP to order-1 */ VM_WARN_ONCE(1, "Cannot split to order-1 folio"); return -EINVAL; } @@ -3079,16 +3088,12 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, } } - is_hzp = is_huge_zero_page(&folio->page); if (is_hzp) { pr_warn_ratelimited("Called split_huge_page for huge zero page\n"); return -EBUSY; } - if (folio_test_writeback(folio)) - return -EBUSY; - if (folio_test_anon(folio)) { /* * The caller does not necessarily hold an mmap_lock that would @@ -3111,12 +3116,6 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, mapping = folio->mapping; - /* Truncated ? */ - if (!mapping) { - ret = -EBUSY; - goto out; - } - /* * Do not split if mapping has minimum folio order * requirement. -- 2.43.0