Re: [RFC 2/4] filemap: use minimum order while allocating folios

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 6/21/23 10:38, Pankaj Raghav wrote:
Add support to filemap and readahead to use the folio order set by
mapping_min_folio_order().

Signed-off-by: Pankaj Raghav <p.raghav@xxxxxxxxxxx>
---
  mm/filemap.c   |  9 ++++++---
  mm/readahead.c | 34 ++++++++++++++++++++++++----------
  2 files changed, 30 insertions(+), 13 deletions(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index 3b73101f9f86..9dc8568e9336 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1936,7 +1936,8 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
  			gfp |= GFP_NOWAIT | __GFP_NOWARN;
  		}
- folio = filemap_alloc_folio(gfp, 0);
+		folio = filemap_alloc_folio(gfp,
+					    mapping_min_folio_order(mapping));
  		if (!folio)
  			return ERR_PTR(-ENOMEM);
@@ -2495,7 +2496,8 @@ static int filemap_create_folio(struct file *file,
  	struct folio *folio;
  	int error;
- folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
+	folio = filemap_alloc_folio(mapping_gfp_mask(mapping),
+				    mapping_min_folio_order(mapping));
  	if (!folio)
  		return -ENOMEM;
@@ -3663,7 +3665,8 @@ static struct folio *do_read_cache_folio(struct address_space *mapping,
  repeat:
  	folio = filemap_get_folio(mapping, index);
  	if (IS_ERR(folio)) {
-		folio = filemap_alloc_folio(gfp, 0);
+		folio = filemap_alloc_folio(gfp,
+					    mapping_min_folio_order(mapping));
  		if (!folio)
  			return ERR_PTR(-ENOMEM);
  		err = filemap_add_folio(mapping, folio, index, gfp);
diff --git a/mm/readahead.c b/mm/readahead.c
index 47afbca1d122..090b810ddeed 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -245,7 +245,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
  			continue;
  		}
- folio = filemap_alloc_folio(gfp_mask, 0);
+		folio = filemap_alloc_folio(gfp_mask,
+					    mapping_min_folio_order(mapping));
  		if (!folio)
  			break;
  		if (filemap_add_folio(mapping, folio, index + i,
@@ -259,7 +260,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
  		if (i == nr_to_read - lookahead_size)
  			folio_set_readahead(folio);
  		ractl->_workingset |= folio_test_workingset(folio);
-		ractl->_nr_pages++;
+		ractl->_nr_pages += folio_nr_pages(folio);
+		i += folio_nr_pages(folio) - 1;
  	}
/*
This is incomplete, as the loop above has some exit statements which blindly step backwards by one page.

I found it better to rework the 'for' into a 'while' loop; please check the attached patch.

Cheers,

Hannes
From bdab80c39d4da1d4d5c47706d85e8de7e3d2da10 Mon Sep 17 00:00:00 2001
From: Hannes Reinecke <hare@xxxxxxx>
Date: Tue, 20 Jun 2023 08:49:31 +0200
Subject: [PATCH 04/18] mm/readahead: rework loop in page_cache_ra_unbounded()

Rework the loop in page_cache_ra_unbounded() to advance with
the number of pages in a folio instead of just one page at a time.

Signed-off-by: Hannes Reinecke <hare@xxxxxxx>
---
 mm/readahead.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/mm/readahead.c b/mm/readahead.c
index 47afbca1d122..1700603685d0 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -209,7 +209,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
 	struct address_space *mapping = ractl->mapping;
 	unsigned long index = readahead_index(ractl);
 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
-	unsigned long i;
+	unsigned long i = 0;
 
 	/*
 	 * Partway through the readahead operation, we will have added
@@ -227,7 +227,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
 	/*
 	 * Preallocate as many pages as we will need.
 	 */
-	for (i = 0; i < nr_to_read; i++) {
+	do {
 		struct folio *folio = xa_load(&mapping->i_pages, index + i);
 
 		if (folio && !xa_is_value(folio)) {
@@ -240,8 +240,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
 			 * not worth getting one just for that.
 			 */
 			read_pages(ractl);
-			ractl->_index++;
-			i = ractl->_index + ractl->_nr_pages - index - 1;
+			ractl->_index += folio_nr_pages(folio);
+			i = ractl->_index + ractl->_nr_pages - index;
 			continue;
 		}
 
@@ -252,15 +252,16 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
 					gfp_mask) < 0) {
 			folio_put(folio);
 			read_pages(ractl);
-			ractl->_index++;
-			i = ractl->_index + ractl->_nr_pages - index - 1;
+			ractl->_index += folio_nr_pages(folio);
+			i = ractl->_index + ractl->_nr_pages - index;
 			continue;
 		}
 		if (i == nr_to_read - lookahead_size)
 			folio_set_readahead(folio);
 		ractl->_workingset |= folio_test_workingset(folio);
-		ractl->_nr_pages++;
-	}
+		ractl->_nr_pages += folio_nr_pages(folio);
+		i += folio_nr_pages(folio);
+	} while (i < nr_to_read);
 
 	/*
 	 * Now start the IO.  We ignore I/O errors - if the folio is not
-- 
2.35.3


[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [NTFS 3]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [NTFS 3]     [Samba]     [Device Mapper]     [CEPH Development]

  Powered by Linux