Re: [PATCH v5 2/2] slub: Introduce CONFIG_SLUB_RCU_DEBUG

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Aug 2, 2024 at 11:09 AM Jann Horn <jannh@xxxxxxxxxx> wrote:
> I guess I could also change the API to pass something different - like
> a flag meaning "the object is guaranteed to no longer be in use".
> There is already code in slab_free_hook() that computes this
> expression, so we could easily pass that to KASAN and then avoid doing
> the same logic in KASAN again... I think that would be the most
> elegant approach?

Regarding this, I think I'll add something like this on top of this patch in v6:

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index b63f5351c5f3..50bad011352e 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -201,16 +201,17 @@ bool __kasan_slab_free(struct kmem_cache *s,
void *object, bool init,
 /**
  * kasan_slab_free - Possibly handle slab object freeing.
  * @object: Object to free.
+ * @still_accessible: Whether the object contents are still accessible.
  *
  * This hook is called from the slab allocator to give KASAN a chance to take
  * ownership of the object and handle its freeing.
  * kasan_slab_pre_free() must have already been called on the same object.
  *
  * @Return true if KASAN took ownership of the object; false otherwise.
  */
 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
                                                void *object, bool init,
-                                               bool after_rcu_delay)
+                                               bool still_accessible)
 {
        if (kasan_enabled())
                return __kasan_slab_free(s, object, init, after_rcu_delay);
@@ -410,7 +411,7 @@ static inline bool kasan_slab_pre_free(struct
kmem_cache *s, void *object)
 }

 static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
-                                  bool init, bool after_rcu_delay)
+                                  bool init, bool still_accessible)
 {
        return false;
 }
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 71a20818b122..ed4873e18c75 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -230,14 +230,14 @@ static bool check_slab_allocation(struct
kmem_cache *cache, void *object,
 }

 static inline void poison_slab_object(struct kmem_cache *cache, void *object,
-                                     bool init, bool after_rcu_delay)
+                                     bool init, bool still_accessible)
 {
        void *tagged_object = object;

        object = kasan_reset_tag(object);

        /* RCU slabs could be legally used after free within the RCU period. */
-       if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay)
+       if (unlikely(still_accessible))
                return;

        kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
@@ -256,12 +256,12 @@ bool __kasan_slab_pre_free(struct kmem_cache
*cache, void *object,
 }

 bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
-                      bool after_rcu_delay)
+                      bool still_accessible)
 {
        if (!kasan_arch_is_ready() || is_kfence_address(object))
                return false;

-       poison_slab_object(cache, object, init, after_rcu_delay);
+       poison_slab_object(cache, object, init, still_accessible);

        /*
         * If the object is put into quarantine, do not let slab put the object
diff --git a/mm/slub.c b/mm/slub.c
index 49571d5ded75..a89f2006d46e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2221,31 +2221,34 @@ static __always_inline
 bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
                    bool after_rcu_delay)
 {
+       /* Are the object contents still accessible? */
+       bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) &&
!after_rcu_delay;
+
        kmemleak_free_recursive(x, s->flags);
        kmsan_slab_free(s, x);

        debug_check_no_locks_freed(x, s->object_size);

        if (!(s->flags & SLAB_DEBUG_OBJECTS))
                debug_check_no_obj_freed(x, s->object_size);

        /* Use KCSAN to help debug racy use-after-free. */
-       if (!(s->flags & SLAB_TYPESAFE_BY_RCU) || after_rcu_delay)
+       if (!still_accessible)
                __kcsan_check_access(x, s->object_size,
                                     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);

        if (kfence_free(x))
                return false;

        /*
         * Give KASAN a chance to notice an invalid free operation before we
         * modify the object.
         */
        if (kasan_slab_pre_free(s, x))
                return false;

 #ifdef CONFIG_SLUB_RCU_DEBUG
-       if ((s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay) {
+       if (still_accessible) {
                struct rcu_delayed_free *delayed_free;

                delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT);
@@ -2289,7 +2292,7 @@ bool slab_free_hook(struct kmem_cache *s, void
*x, bool init,
                       s->size - inuse - rsize);
        }
        /* KASAN might put x into memory quarantine, delaying its reuse. */
-       return !kasan_slab_free(s, x, init, after_rcu_delay);
+       return !kasan_slab_free(s, x, init, still_accessible);
 }

 static __fastpath_inline





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux