[PATCH] mm: Functions used internally should not be put into slub_def.h

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Rong Tao <rongtao@xxxxxxxx>

commit 40f3bf0cb04c("mm: Convert struct page to struct slab in functions
used by other subsystems") introduce 'slab_address()' and 'struct slab'
in slab_def.h(CONFIG_SLAB) and slub_def.h(CONFIG_SLUB). When referencing
a header file <linux/slub_def.h> in a module or BPF code, 'slab_address()'
and 'struct slab' are not recognized, resulting in incomplete and
undefined errors(see bcc slabratetop.py error [0]).

Moving the function definitions of reference data structures such as
struct slab and slab_address() such as nearest_obj(), obj_to_index(),
and objs_per_slab() to the internal header file slab.h solves this
fatal problem.

[0] https://github.com/iovisor/bcc/issues/4438

Signed-off-by: Rong Tao <rongtao@xxxxxxxx>
---
 include/linux/slab_def.h | 33 --------------------
 include/linux/slub_def.h | 32 -------------------
 mm/slab.h                | 66 ++++++++++++++++++++++++++++++++++++++++
 3 files changed, 66 insertions(+), 65 deletions(-)

diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 5834bad8ad78..5658b5fddf9b 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -88,37 +88,4 @@ struct kmem_cache {
 	struct kmem_cache_node *node[MAX_NUMNODES];
 };
 
-static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
-				void *x)
-{
-	void *object = x - (x - slab->s_mem) % cache->size;
-	void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
-
-	if (unlikely(object > last_object))
-		return last_object;
-	else
-		return object;
-}
-
-/*
- * We want to avoid an expensive divide : (offset / cache->size)
- *   Using the fact that size is a constant for a particular cache,
- *   we can replace (offset / cache->size) by
- *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
- */
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
-					const struct slab *slab, void *obj)
-{
-	u32 offset = (obj - slab->s_mem);
-	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
-}
-
-static inline int objs_per_slab(const struct kmem_cache *cache,
-				     const struct slab *slab)
-{
-	if (is_kfence_address(slab_address(slab)))
-		return 1;
-	return cache->num;
-}
-
 #endif	/* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index aa0ee1678d29..660fd6b2a748 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -163,36 +163,4 @@ static inline void sysfs_slab_release(struct kmem_cache *s)
 
 void *fixup_red_left(struct kmem_cache *s, void *p);
 
-static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
-				void *x) {
-	void *object = x - (x - slab_address(slab)) % cache->size;
-	void *last_object = slab_address(slab) +
-		(slab->objects - 1) * cache->size;
-	void *result = (unlikely(object > last_object)) ? last_object : object;
-
-	result = fixup_red_left(cache, result);
-	return result;
-}
-
-/* Determine object index from a given position */
-static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
-					  void *addr, void *obj)
-{
-	return reciprocal_divide(kasan_reset_tag(obj) - addr,
-				 cache->reciprocal_size);
-}
-
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
-					const struct slab *slab, void *obj)
-{
-	if (is_kfence_address(obj))
-		return 0;
-	return __obj_to_index(cache, slab_address(slab), obj);
-}
-
-static inline int objs_per_slab(const struct kmem_cache *cache,
-				     const struct slab *slab)
-{
-	return slab->objects;
-}
 #endif /* _LINUX_SLUB_DEF_H */
diff --git a/mm/slab.h b/mm/slab.h
index 7cc432969945..38350a0efa91 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -227,10 +227,76 @@ struct kmem_cache {
 
 #ifdef CONFIG_SLAB
 #include <linux/slab_def.h>
+
+static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
+				void *x)
+{
+	void *object = x - (x - slab->s_mem) % cache->size;
+	void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
+
+	if (unlikely(object > last_object))
+		return last_object;
+	else
+		return object;
+}
+
+/*
+ * We want to avoid an expensive divide : (offset / cache->size)
+ *   Using the fact that size is a constant for a particular cache,
+ *   we can replace (offset / cache->size) by
+ *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
+ */
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+					const struct slab *slab, void *obj)
+{
+	u32 offset = (obj - slab->s_mem);
+	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
+}
+
+static inline int objs_per_slab(const struct kmem_cache *cache,
+				     const struct slab *slab)
+{
+	if (is_kfence_address(slab_address(slab)))
+		return 1;
+	return cache->num;
+}
 #endif
 
 #ifdef CONFIG_SLUB
 #include <linux/slub_def.h>
+
+static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
+				void *x) {
+	void *object = x - (x - slab_address(slab)) % cache->size;
+	void *last_object = slab_address(slab) +
+		(slab->objects - 1) * cache->size;
+	void *result = (unlikely(object > last_object)) ? last_object : object;
+
+	result = fixup_red_left(cache, result);
+	return result;
+}
+
+/* Determine object index from a given position */
+static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
+					  void *addr, void *obj)
+{
+	return reciprocal_divide(kasan_reset_tag(obj) - addr,
+				 cache->reciprocal_size);
+}
+
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+					const struct slab *slab, void *obj)
+{
+	if (is_kfence_address(obj))
+		return 0;
+	return __obj_to_index(cache, slab_address(slab), obj);
+}
+
+static inline int objs_per_slab(const struct kmem_cache *cache,
+				     const struct slab *slab)
+{
+	return slab->objects;
+}
 #endif
 
 #include <linux/memcontrol.h>
-- 
2.39.0





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux