On 2/21/22 11:53, Hyeonggon Yoo wrote: > Only SLOB need to implement __ksize() separately because SLOB records > size in object header for kmalloc objects. Unify SLAB/SLUB's __ksize(). > > Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> > --- > mm/slab.c | 23 ----------------------- > mm/slab_common.c | 29 +++++++++++++++++++++++++++++ > mm/slub.c | 16 ---------------- > 3 files changed, 29 insertions(+), 39 deletions(-) > > diff --git a/mm/slab.c b/mm/slab.c > index ddf5737c63d9..eb73d2499480 100644 > --- a/mm/slab.c > +++ b/mm/slab.c > @@ -4199,27 +4199,4 @@ void __check_heap_object(const void *ptr, unsigned long n, > } > #endif /* CONFIG_HARDENED_USERCOPY */ > > -/** > - * __ksize -- Uninstrumented ksize. > - * @objp: pointer to the object > - * > - * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same > - * safety checks as ksize() with KASAN instrumentation enabled. > - * > - * Return: size of the actual memory used by @objp in bytes > - */ > -size_t __ksize(const void *objp) > -{ > - struct kmem_cache *c; > - size_t size; > > - BUG_ON(!objp); > - if (unlikely(objp == ZERO_SIZE_PTR)) > - return 0; > - > - c = virt_to_cache(objp); > - size = c ? c->object_size : 0; This comes from commit a64b53780ec3 ("mm/slab: sanity-check page type when looking up cache") by Kees and virt_to_cache() is an implicit check for folio slab flag ... > - > - return size; > -} > -EXPORT_SYMBOL(__ksize); > diff --git a/mm/slab_common.c b/mm/slab_common.c > index 23f2ab0713b7..488997db0d97 100644 > --- a/mm/slab_common.c > +++ b/mm/slab_common.c > @@ -1245,6 +1245,35 @@ void kfree_sensitive(const void *p) > } > EXPORT_SYMBOL(kfree_sensitive); > > +#ifndef CONFIG_SLOB > +/** > + * __ksize -- Uninstrumented ksize. > + * @objp: pointer to the object > + * > + * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same > + * safety checks as ksize() with KASAN instrumentation enabled. > + * > + * Return: size of the actual memory used by @objp in bytes > + */ > +size_t __ksize(const void *object) > +{ > + struct folio *folio; > + > + if (unlikely(object == ZERO_SIZE_PTR)) > + return 0; > + > + folio = virt_to_folio(object); > + > +#ifdef CONFIG_SLUB > + if (unlikely(!folio_test_slab(folio))) > + return folio_size(folio); > +#endif > + > + return slab_ksize(folio_slab(folio)->slab_cache); ... and here in the common version you now for SLAB trust that the folio will be a slab folio, thus undoing the intention of that commit. Maybe that's not good and we should keep the folio_test_slab() for both cases? Although maybe it's also strange that prior this patch, SLAB would return 0 if the test fails, and SLUB would return folio_size(). Probably because with SLUB this can be a large kmalloc here and with SLAB not. So we could keep doing that in the unified version, or KASAN devs (CC'd) could advise something better? > +} > +EXPORT_SYMBOL(__ksize); > +#endif > + > /** > * ksize - get the actual amount of memory allocated for a given object > * @objp: Pointer to the object > diff --git a/mm/slub.c b/mm/slub.c > index 261474092e43..3a4458976ab7 100644 > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -4526,22 +4526,6 @@ void __check_heap_object(const void *ptr, unsigned long n, > } > #endif /* CONFIG_HARDENED_USERCOPY */ > > -size_t __ksize(const void *object) > -{ > - struct folio *folio; > - > - if (unlikely(object == ZERO_SIZE_PTR)) > - return 0; > - > - folio = virt_to_folio(object); > - > - if (unlikely(!folio_test_slab(folio))) > - return folio_size(folio); > - > - return slab_ksize(folio_slab(folio)->slab_cache); > -} > -EXPORT_SYMBOL(__ksize); > - > void kfree(const void *x) > { > struct folio *folio;