[RFC PATCH bpf-next v2 04/11] mm: slab: introduce ksize_full()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



When the object is charged to kmemcg, it will alloc extra memory to
store the kmemcg ownership, so introduce a new helper to include this
memory size.

The reason we introduce a new helper other than changing the current
helper ksize() is that the allocation of the kmemcg ownership is a
nested allocation, which is independent of the original allocation. Some
user may relays on ksize() to get the layout of this slab, so we'd
better not changing it.

Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx>
---
 mm/slab.h        |  2 +-
 mm/slab_common.c | 52 ++++++++++++++++++++++++++++++++++++----------------
 mm/slob.c        |  2 +-
 3 files changed, 38 insertions(+), 18 deletions(-)

diff --git a/mm/slab.h b/mm/slab.h
index 35e0b3b..e07ae90 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -681,7 +681,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
 
 #endif /* CONFIG_SLOB */
 
-size_t __ksize(const void *objp);
+size_t ___ksize(const void *objp, bool full);
 
 static inline size_t slab_ksize(const struct kmem_cache *s)
 {
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 1cba98a..4f1e2bc 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1021,21 +1021,11 @@ void kfree(const void *object)
 }
 EXPORT_SYMBOL(kfree);
 
-/**
- * __ksize -- Report full size of underlying allocation
- * @object: pointer to the object
- *
- * This should only be used internally to query the true size of allocations.
- * It is not meant to be a way to discover the usable size of an allocation
- * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
- * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
- * and/or FORTIFY_SOURCE.
- *
- * Return: size of the actual memory used by @object in bytes
- */
-size_t __ksize(const void *object)
+size_t ___ksize(const void *object, bool full)
 {
+	size_t kmemcg_size = 0;
 	struct folio *folio;
+	struct slab *slab;
 
 	if (unlikely(object == ZERO_SIZE_PTR))
 		return 0;
@@ -1054,7 +1044,27 @@ size_t __ksize(const void *object)
 	skip_orig_size_check(folio_slab(folio)->slab_cache, object);
 #endif
 
-	return slab_ksize(folio_slab(folio)->slab_cache);
+	slab = folio_slab(folio);
+	if (memcg_kmem_enabled() && full && slab_objcgs(slab))
+		kmemcg_size = sizeof(struct obj_cgroup *);
+	return slab_ksize(slab->slab_cache) + kmemcg_size;
+}
+
+/**
+ * __ksize -- Report full size of underlying allocation
+ * @object: pointer to the object
+ *
+ * This should only be used internally to query the true size of allocations.
+ * It is not meant to be a way to discover the usable size of an allocation
+ * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
+ * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
+ * and/or FORTIFY_SOURCE.
+ *
+ * Return: size of the actual memory used by @object in bytes
+ */
+size_t __ksize(const void *object)
+{
+	return ___ksize(object, false);
 }
 
 void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
@@ -1428,7 +1438,7 @@ void kfree_sensitive(const void *p)
 }
 EXPORT_SYMBOL(kfree_sensitive);
 
-size_t ksize(const void *objp)
+size_t _ksize(const void *objp, bool full)
 {
 	/*
 	 * We need to first check that the pointer to the object is valid.
@@ -1448,10 +1458,20 @@ size_t ksize(const void *objp)
 	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
 		return 0;
 
-	return kfence_ksize(objp) ?: __ksize(objp);
+	return kfence_ksize(objp) ?: ___ksize(objp, full);
 }
 EXPORT_SYMBOL(ksize);
 
+size_t ksize(const void *objp)
+{
+	return _ksize(objp, false);
+}
+
+size_t ksize_full(const void *objp)
+{
+	return _ksize(objp, true);
+}
+
 /* Tracepoints definitions. */
 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
diff --git a/mm/slob.c b/mm/slob.c
index fe567fcf..8c46bdc 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -579,7 +579,7 @@ size_t kmalloc_size_roundup(size_t size)
 EXPORT_SYMBOL(kmalloc_size_roundup);
 
 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
-size_t __ksize(const void *block)
+size_t ___ksize(const void *block, bool full)
 {
 	struct folio *folio;
 	unsigned int align;
-- 
1.8.3.1




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux