Re: [PATCH 14/17] gup: Convert for_each_compound_head() to gup_for_each_folio()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 1/2/22 13:57, Matthew Wilcox (Oracle) wrote:
This macro can be considerably simplified by returning the folio from
gup_folio_next() instead of void from compound_next().  Convert both
callers to work on folios instead of pages.

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
  mm/gup.c | 47 ++++++++++++++++++++++++-----------------------
  1 file changed, 24 insertions(+), 23 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index 7bd1e4a2648a..eaffa6807609 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -239,31 +239,29 @@ static inline void compound_range_next(unsigned long i, unsigned long npages,
  	     __i < __npages; __i += __ntails, \
  	     compound_range_next(__i, __npages, __list, &(__head), &(__ntails)))
-static inline void compound_next(unsigned long i, unsigned long npages,
-				 struct page **list, struct page **head,
-				 unsigned int *ntails)
+static inline struct folio *gup_folio_next(unsigned long i,
+		unsigned long npages, struct page **list, unsigned int *ntails)
  {
-	struct page *page;
+	struct folio *folio;
  	unsigned int nr;
if (i >= npages)
-		return;
+		return NULL;
- page = compound_head(list[i]);
+	folio = page_folio(list[i]);
  	for (nr = i + 1; nr < npages; nr++) {
-		if (compound_head(list[nr]) != page)
+		if (page_folio(list[nr]) != folio)
  			break;
  	}
- *head = page;
  	*ntails = nr - i;
+	return folio;
  }
-#define for_each_compound_head(__i, __list, __npages, __head, __ntails) \
-	for (__i = 0, \
-	     compound_next(__i, __npages, __list, &(__head), &(__ntails)); \
-	     __i < __npages; __i += __ntails, \
-	     compound_next(__i, __npages, __list, &(__head), &(__ntails)))
+#define gup_for_each_folio(__i, __list, __npages, __folio, __ntails) \
+	for (__i = 0; \
+	     (__folio = gup_folio_next(__i, __npages, __list, &(__ntails))) != NULL; \
+	     __i += __ntails)


This is nice. I find these pre-existing macros to be really quite
horrible, but I was unable to suggest anything better at the time, so
it's good to see the simplification. :)

/**
   * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
@@ -291,15 +289,15 @@ void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
  				 bool make_dirty)
  {
  	unsigned long index;
-	struct page *head;
-	unsigned int ntails;
+	struct folio *folio;
+	unsigned int nr;
if (!make_dirty) {
  		unpin_user_pages(pages, npages);
  		return;
  	}
- for_each_compound_head(index, pages, npages, head, ntails) {
+	gup_for_each_folio(index, pages, npages, folio, nr) {
  		/*
  		 * Checking PageDirty at this point may race with
  		 * clear_page_dirty_for_io(), but that's OK. Two key
@@ -320,9 +318,12 @@ void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
  		 * written back, so it gets written back again in the
  		 * next writeback cycle. This is harmless.
  		 */
-		if (!PageDirty(head))
-			set_page_dirty_lock(head);
-		put_compound_head(head, ntails, FOLL_PIN);
+		if (!folio_test_dirty(folio)) {
+			folio_lock(folio);
+			folio_mark_dirty(folio);
+			folio_unlock(folio);

At some point, maybe even here, I suspect that creating the folio
version of set_page_dirty_lock() would help. I'm sure you have
a better feel for whether it helps, after doing all of this conversion
work, but it just sort of jumped out at me as surprising to see it
in this form.

In any case, this all looks correct, so


Reviewed-by: John Hubbard <jhubbard@xxxxxxxxxx>

thanks,
--
John Hubbard
NVIDIA

+		}
+		gup_put_folio(folio, nr, FOLL_PIN);
  	}
  }
  EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
@@ -375,8 +376,8 @@ EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
  void unpin_user_pages(struct page **pages, unsigned long npages)
  {
  	unsigned long index;
-	struct page *head;
-	unsigned int ntails;
+	struct folio *folio;
+	unsigned int nr;
/*
  	 * If this WARN_ON() fires, then the system *might* be leaking pages (by
@@ -386,8 +387,8 @@ void unpin_user_pages(struct page **pages, unsigned long npages)
  	if (WARN_ON(IS_ERR_VALUE(npages)))
  		return;
- for_each_compound_head(index, pages, npages, head, ntails)
-		put_compound_head(head, ntails, FOLL_PIN);
+	gup_for_each_folio(index, pages, npages, folio, nr)
+		gup_put_folio(folio, nr, FOLL_PIN);
  }
  EXPORT_SYMBOL(unpin_user_pages);





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux