On 7/29/22 12:44, Jason Gunthorpe wrote:
I came up with this ontop:
That cleans it up even more, looks nice. I'd go just a touch further, and also (unless there is some odd reason?) stay with -EAGAIN rather than -EBUSY, because otherwise both the function's comment header, and the caller, should change from -EBUSY to -EAGAIN just for consistency. And also because the way it's used: the caller is literally "trying again". So on top of the ontop: diff --git a/mm/gup.c b/mm/gup.c index 43c1fc532842..5f04033ee0ed 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1901,10 +1901,12 @@ struct page *get_dump_page(unsigned long addr) #ifdef CONFIG_MIGRATION /* - * Check whether all pages are pinnable. If some pages are not pinnable migrate - * them and unpin all the pages. Returns -EAGAIN if pages were unpinned or zero - * if all pages are pinnable and in the right zone. Other errors indicate - * migration failure. + * Check whether all pages are pinnable. If some pages are not pinnable, migrate + * them and unpin all the pages. + * Return values: + * 0: all pages are already pinnable and in the right zone + * -EAGAIN: some pages were unpinned or were zero + * -ENOMEM: migration of some pages failed */ static long check_and_migrate_movable_pages(unsigned long nr_pages, struct page **pages, @@ -1914,11 +1916,11 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages, .nid = NUMA_NO_NODE, .gfp_mask = GFP_USER | __GFP_NOWARN, }; - unsigned long i; + unsigned long i, not_migrated; struct folio *prev_folio = NULL; LIST_HEAD(movable_page_list); bool drain_allow = true, coherent_pages = false; - int ret = -EBUSY; + int ret = -EAGAIN; for (i = 0; i < nr_pages; i++) { struct folio *folio = page_folio(pages[i]); @@ -1990,7 +1992,7 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages, /* * Unpin all pages. If device coherent pages were found - * migrate_deivce_coherent_page() will have dropped the pin and set + * migrate_device_coherent_page() will have dropped the pin and set * pages[i] == NULL. */ for (i = 0; i < nr_pages; i++) { thanks, -- John Hubbard NVIDIA