[PATCH 03/62] mm: Split slab into its own type

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Make struct slab independent of struct page.  It still uses the
underlying memory in struct page for storing slab-specific data,
but slab and slub can now be weaned off using struct page directly.
Some of the wrapper functions (slab_address() and slab_order())
still need to cast to struct page, but this is a significant
disentanglement.

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
 include/linux/mm_types.h   | 56 +++++++++++++++++++++++++++++
 include/linux/page-flags.h | 29 +++++++++++++++
 mm/slab.h                  | 73 ++++++++++++++++++++++++++++++++++++++
 mm/slub.c                  |  8 ++---
 4 files changed, 162 insertions(+), 4 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7f8ee09c711f..c2ea71aba84e 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -239,6 +239,62 @@ struct page {
 #endif
 } _struct_page_alignment;
 
+/* Reuses the bits in struct page */
+struct slab {
+	unsigned long flags;
+	union {
+		struct list_head slab_list;
+		struct {	/* Partial pages */
+			struct slab *next;
+#ifdef CONFIG_64BIT
+			int slabs;	/* Nr of slabs left */
+			int pobjects;	/* Approximate count */
+#else
+			short int slabs;
+			short int pobjects;
+#endif
+		};
+		struct rcu_head rcu_head;
+	};
+	struct kmem_cache *slab_cache; /* not slob */
+	/* Double-word boundary */
+	void *freelist;		/* first free object */
+	union {
+		void *s_mem;	/* slab: first object */
+		unsigned long counters;		/* SLUB */
+		struct {			/* SLUB */
+			unsigned inuse:16;
+			unsigned objects:15;
+			unsigned frozen:1;
+		};
+	};
+
+	union {
+		unsigned int active;		/* SLAB */
+		int units;			/* SLOB */
+	};
+	atomic_t _refcount;
+#ifdef CONFIG_MEMCG
+	unsigned long memcg_data;
+#endif
+};
+
+#define SLAB_MATCH(pg, sl)						\
+	static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
+SLAB_MATCH(flags, flags);
+SLAB_MATCH(compound_head, slab_list);	/* Ensure bit 0 is clear */
+SLAB_MATCH(slab_list, slab_list);
+SLAB_MATCH(rcu_head, rcu_head);
+SLAB_MATCH(slab_cache, slab_cache);
+SLAB_MATCH(s_mem, s_mem);
+SLAB_MATCH(active, active);
+SLAB_MATCH(_refcount, _refcount);
+#ifdef CONFIG_MEMCG
+SLAB_MATCH(memcg_data, memcg_data);
+#endif
+#undef SLAB_MATCH
+static_assert(sizeof(struct slab) <= sizeof(struct page));
+
 static inline atomic_t *compound_mapcount_ptr(struct page *page)
 {
 	return &page[1].compound_mapcount;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index a558d67ee86f..57bdb1eb2f29 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -165,6 +165,8 @@ enum pageflags {
 	/* Remapped by swiotlb-xen. */
 	PG_xen_remapped = PG_owner_priv_1,
 
+	/* SLAB / SLUB / SLOB */
+	PG_pfmemalloc = PG_active,
 	/* SLOB */
 	PG_slob_free = PG_private,
 
@@ -193,6 +195,33 @@ static inline unsigned long _compound_head(const struct page *page)
 
 #define compound_head(page)	((typeof(page))_compound_head(page))
 
+/**
+ * page_slab - Converts from page to slab.
+ * @p: The page.
+ *
+ * This function cannot be called on a NULL pointer.  It can be called
+ * on a non-slab page; the caller should check is_slab() to be sure
+ * that the slab really is a slab.
+ *
+ * Return: The slab which contains this page.
+ */
+#define page_slab(p)		(_Generic((p),				\
+	const struct page *:	(const struct slab *)_compound_head(p),	\
+	struct page *:		(struct slab *)_compound_head(p)))
+
+/**
+ * slab_page - The first struct page allocated for a slab
+ * @slab: The slab.
+ *
+ * Slabs are allocated as one-or-more pages.  It is occasionally necessary
+ * to convert back to a struct page in order to communicate with the rest
+ * of the mm.  Please use this helper function instead of casting yourself,
+ * as the implementation may change in the future.
+ */
+#define slab_page(s)		(_Generic((s),				\
+	const struct slab *:	(const struct page *)s,			\
+	struct slab *:		(struct page *)s))
+
 static __always_inline int PageTail(struct page *page)
 {
 	return READ_ONCE(page->compound_head) & 1;
diff --git a/mm/slab.h b/mm/slab.h
index 58c01a34e5b8..54b05f4d9eb5 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -5,6 +5,79 @@
  * Internal slab definitions
  */
 
+/*
+ * Does this memory belong to a slab cache?  Slub can return page allocator
+ * memory for certain size allocations.
+ */
+static inline bool slab_test_cache(const struct slab *slab)
+{
+	return test_bit(PG_slab, &slab->flags);
+}
+
+static inline bool slab_test_multi_page(const struct slab *slab)
+{
+	return test_bit(PG_head, &slab->flags);
+}
+
+/*
+ * If network-based swap is enabled, sl*b must keep track of whether pages
+ * were allocated from pfmemalloc reserves.
+ */
+static inline bool slab_test_pfmemalloc(const struct slab *slab)
+{
+	return test_bit(PG_pfmemalloc, &slab->flags);
+}
+
+static inline void slab_set_pfmemalloc(struct slab *slab)
+{
+	set_bit(PG_pfmemalloc, &slab->flags);
+}
+
+static inline void slab_clear_pfmemalloc(struct slab *slab)
+{
+	clear_bit(PG_pfmemalloc, &slab->flags);
+}
+
+static inline void __slab_clear_pfmemalloc(struct slab *slab)
+{
+	__clear_bit(PG_pfmemalloc, &slab->flags);
+}
+
+static inline void *slab_address(const struct slab *slab)
+{
+	return page_address(slab_page(slab));
+}
+
+static inline int slab_nid(const struct slab *slab)
+{
+	return pgflags_nid(slab->flags);
+}
+
+static inline pg_data_t *slab_pgdat(const struct slab *slab)
+{
+	return NODE_DATA(slab_nid(slab));
+}
+
+static inline struct slab *virt_to_slab(const void *addr)
+{
+	struct page *page = virt_to_page(addr);
+
+	return page_slab(page);
+}
+
+static inline int slab_order(const struct slab *slab)
+{
+	if (!slab_test_multi_page(slab))
+		return 0;
+	return ((struct page *)slab)[1].compound_order;
+}
+
+static inline size_t slab_size(const struct slab *slab)
+{
+	return PAGE_SIZE << slab_order(slab);
+}
+
+
 #ifdef CONFIG_SLOB
 /*
  * Common fields provided in kmem_cache by all slab allocators
diff --git a/mm/slub.c b/mm/slub.c
index 3d2025f7163b..7e429a31b326 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3755,7 +3755,7 @@ static unsigned int slub_min_objects;
  * requested a higher minimum order then we start with that one instead of
  * the smallest order which will fit the object.
  */
-static inline unsigned int slab_order(unsigned int size,
+static inline unsigned int calc_slab_order(unsigned int size,
 		unsigned int min_objects, unsigned int max_order,
 		unsigned int fract_leftover)
 {
@@ -3819,7 +3819,7 @@ static inline int calculate_order(unsigned int size)
 
 		fraction = 16;
 		while (fraction >= 4) {
-			order = slab_order(size, min_objects,
+			order = calc_slab_order(size, min_objects,
 					slub_max_order, fraction);
 			if (order <= slub_max_order)
 				return order;
@@ -3832,14 +3832,14 @@ static inline int calculate_order(unsigned int size)
 	 * We were unable to place multiple objects in a slab. Now
 	 * lets see if we can place a single object there.
 	 */
-	order = slab_order(size, 1, slub_max_order, 1);
+	order = calc_slab_order(size, 1, slub_max_order, 1);
 	if (order <= slub_max_order)
 		return order;
 
 	/*
 	 * Doh this slab cannot be placed using slub_max_order.
 	 */
-	order = slab_order(size, 1, MAX_ORDER, 1);
+	order = calc_slab_order(size, 1, MAX_ORDER, 1);
 	if (order < MAX_ORDER)
 		return order;
 	return -ENOSYS;
-- 
2.32.0





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux