[PATCH 49/62] mm/slub: Convert get_map() and __fill_map() to struct slab

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Improve type safety and remove calls to slab_page().

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
 mm/slub.c | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 0d9299679ea2..86d06f6aa743 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -535,14 +535,14 @@ static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
 static DEFINE_RAW_SPINLOCK(object_map_lock);
 
 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
-		       struct page *page)
+		       struct slab *slab)
 {
-	void *addr = page_address(page);
+	void *addr = slab_address(slab);
 	void *p;
 
-	bitmap_zero(obj_map, page->objects);
+	bitmap_zero(obj_map, slab->objects);
 
-	for (p = page->freelist; p; p = get_freepointer(s, p))
+	for (p = slab->freelist; p; p = get_freepointer(s, p))
 		set_bit(__obj_to_index(s, addr, p), obj_map);
 }
 
@@ -567,19 +567,19 @@ static inline bool slab_add_kunit_errors(void) { return false; }
 #endif
 
 /*
- * Determine a map of object in use on a page.
+ * Determine a map of objects in use in a slab.
  *
- * Node listlock must be held to guarantee that the page does
+ * Node listlock must be held to guarantee that the slab does
  * not vanish from under us.
  */
-static unsigned long *get_map(struct kmem_cache *s, struct page *page)
+static unsigned long *get_map(struct kmem_cache *s, struct slab *slab)
 	__acquires(&object_map_lock)
 {
 	VM_BUG_ON(!irqs_disabled());
 
 	raw_spin_lock(&object_map_lock);
 
-	__fill_map(object_map, s, page);
+	__fill_map(object_map, s, slab);
 
 	return object_map;
 }
@@ -4216,7 +4216,7 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
 	slab_err(s, slab, text, s->name);
 	slab_lock(slab_page(slab), &flags);
 
-	map = get_map(s, slab_page(slab));
+	map = get_map(s, slab);
 	for_each_object(p, s, addr, slab->objects) {
 
 		if (!test_bit(__obj_to_index(s, addr, p), map)) {
@@ -4964,7 +4964,7 @@ static void validate_slab(struct kmem_cache *s, struct slab *slab,
 		goto unlock;
 
 	/* Now we know that a valid freelist exists */
-	__fill_map(obj_map, s, slab_page(slab));
+	__fill_map(obj_map, s, slab);
 	for_each_object(p, s, addr, slab->objects) {
 		u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
 			 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
@@ -5170,7 +5170,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
 	void *addr = slab_address(slab);
 	void *p;
 
-	__fill_map(obj_map, s, slab_page(slab));
+	__fill_map(obj_map, s, slab);
 
 	for_each_object(p, s, addr, slab->objects)
 		if (!test_bit(__obj_to_index(s, addr, p), obj_map))
-- 
2.32.0





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux