[RFC PATCH 09/25] mm/zsmalloc: convert create_page_chain() and its users to use zsdesc

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Convert create_page_chain() to use zsdesc and rename it to
create_zsdesc_chain(), update comments accordingly. Also, convert its
callers to use zsdesc.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx>
---
 mm/zsmalloc.c | 80 +++++++++++++++++++++++++--------------------------
 1 file changed, 40 insertions(+), 40 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 4386a24a246c..c65bdce987e9 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1251,36 +1251,36 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
 	set_freeobj(zspage, 0);
 }
 
-static void create_page_chain(struct size_class *class, struct zspage *zspage,
-				struct page *pages[])
+static void create_zsdesc_chain(struct size_class *class, struct zspage *zspage,
+				struct zsdesc *zsdescs[])
 {
 	int i;
-	struct page *page;
-	struct page *prev_page = NULL;
-	int nr_pages = class->pages_per_zspage;
+	struct zsdesc *zsdesc;
+	struct zsdesc *prev_zsdesc = NULL;
+	int nr_zsdescs = class->pages_per_zspage;
 
 	/*
-	 * Allocate individual pages and link them together as:
-	 * 1. all pages are linked together using page->index
-	 * 2. each sub-page point to zspage using page->private
+	 * Allocate individual zsdescs and link them together as:
+	 * 1. all zsdescs are linked together using zsdesc->next
+	 * 2. each sub-zsdesc point to zspage using zsdesc->zspage
 	 *
-	 * we set PG_private to identify the first page (i.e. no other sub-page
+	 * we set PG_private to identify the first zsdesc (i.e. no other sub-zsdesc
 	 * has this flag set).
 	 */
-	for (i = 0; i < nr_pages; i++) {
-		page = pages[i];
-		set_page_private(page, (unsigned long)zspage);
-		page->index = 0;
+	for (i = 0; i < nr_zsdescs; i++) {
+		zsdesc = zsdescs[i];
+		zsdesc_set_zspage(zsdesc, zspage);
+		zsdesc->next = NULL;
 		if (i == 0) {
-			zspage->first_zsdesc = page_zsdesc(page);
-			SetPagePrivate(page);
+			zspage->first_zsdesc = zsdesc;
+			zsdesc_set_first(zsdesc);
 			if (unlikely(class->objs_per_zspage == 1 &&
 					class->pages_per_zspage == 1))
 				SetZsHugePage(zspage);
 		} else {
-			prev_page->index = (unsigned long)page;
+			prev_zsdesc->next = zsdesc;
 		}
-		prev_page = page;
+		prev_zsdesc = zsdesc;
 	}
 }
 
@@ -1292,7 +1292,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
 					gfp_t gfp)
 {
 	int i;
-	struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
+	struct zsdesc *zsdescs[ZS_MAX_PAGES_PER_ZSPAGE];
 	struct zspage *zspage = cache_alloc_zspage(pool, gfp);
 
 	if (!zspage)
@@ -1302,23 +1302,21 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
 	migrate_lock_init(zspage);
 
 	for (i = 0; i < class->pages_per_zspage; i++) {
-		struct page *page;
+		struct zsdesc *zsdesc;
 
-		page = alloc_page(gfp);
-		if (!page) {
+		zsdesc = alloc_zsdesc(gfp);
+		if (!zsdesc) {
 			while (--i >= 0) {
-				dec_zone_page_state(pages[i], NR_ZSPAGES);
-				__free_page(pages[i]);
+				free_zsdesc(zsdescs[i]);
 			}
 			cache_free_zspage(pool, zspage);
 			return NULL;
 		}
 
-		inc_zone_page_state(page, NR_ZSPAGES);
-		pages[i] = page;
+		zsdescs[i] = zsdesc;
 	}
 
-	create_page_chain(class, zspage, pages);
+	create_zsdesc_chain(class, zspage, zsdescs);
 	init_zspage(class, zspage);
 	zspage->pool = pool;
 
@@ -2153,27 +2151,29 @@ static void dec_zspage_isolation(struct zspage *zspage)
 	zspage->isolated--;
 }
 
-static void replace_sub_page(struct size_class *class, struct zspage *zspage,
-				struct page *newpage, struct page *oldpage)
+static void replace_sub_zsdesc(struct size_class *class, struct zspage *zspage,
+				struct zsdesc *new_zsdesc, struct zsdesc *old_zsdesc)
 {
-	struct page *page;
-	struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
+	struct zsdesc *zsdesc;
+	struct zsdesc *zsdescs[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
+	unsigned int first_obj_offset;
 	int idx = 0;
 
-	page = get_first_page(zspage);
+	zsdesc = get_first_zsdesc(zspage);
 	do {
-		if (page == oldpage)
-			pages[idx] = newpage;
+		if (zsdesc == old_zsdesc)
+			zsdescs[idx] = new_zsdesc;
 		else
-			pages[idx] = page;
+			zsdescs[idx] = zsdesc;
 		idx++;
-	} while ((page = get_next_page(page)) != NULL);
+	} while ((zsdesc = get_next_zsdesc(zsdesc)) != NULL);
 
-	create_page_chain(class, zspage, pages);
-	set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
+	create_zsdesc_chain(class, zspage, zsdescs);
+	first_obj_offset = get_first_obj_offset(zsdesc_page(old_zsdesc));
+	set_first_obj_offset(zsdesc_page(new_zsdesc), first_obj_offset);
 	if (unlikely(ZsHugePage(zspage)))
-		newpage->index = oldpage->index;
-	__SetPageMovable(newpage, &zsmalloc_mops);
+		new_zsdesc->handle = old_zsdesc->handle;
+	zsdesc_set_movable(new_zsdesc);
 }
 
 static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
@@ -2254,7 +2254,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
 	}
 	kunmap_atomic(s_addr);
 
-	replace_sub_page(class, zspage, newpage, page);
+	replace_sub_zsdesc(class, zspage, page_zsdesc(newpage), page_zsdesc(page));
 	/*
 	 * Since we complete the data copy and set up new zspage structure,
 	 * it's okay to release the pool's lock.
-- 
2.25.1





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux