Re: [PATCH RFC v1 19/26] kmsan: call KMSAN hooks where needed

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Oct 18, 2019 at 5:02 PM Qian Cai <cai@xxxxxx> wrote:
>
> On Fri, 2019-10-18 at 11:42 +0200, glider@xxxxxxxxxx wrote:
> > Insert KMSAN hooks that check for potential memory errors and/or make
> > necessary bookkeeping changes:
> >  - allocate/split/deallocate metadata pages in
> >    alloc_pages()/split_page()/free_page();
>
> This also seems unnecessary where there are options like page_poison and
> debug_pagealloc should be able detect uninitialized memory access in the page
> allocator as well. Even KASAN has some of the functionality.
I still believe there's some misunderstanding between us because I
didn't CC you on KMSAN runtime or the documentation.
I'll do this and will be happy to answer questions if you still have them.
Without these hooks, KMSAN will be virtually unusable, because it
won't know about heap allocations.

When a buffer is allocated on heap, KMSAN updates the metadata for
that region of memory, writing 0xFF (meaning uninitialized) to every
shadow byte and a stack ID to every 4 region bytes.
Note that shadow and origin bytes are stored separately and are
generally incompatible with what debug pagealloc/page poison do.
> >  - clear page shadow and origins in clear_page(), copy_user_highpage();
> >  - copy page metadata in copy_highpage(), wp_page_copy();
> >  - handle vmap()/vunmap()/iounmap();
> >  - handle task creation and deletion;
> >  - handle vprintk();
> >  - call softirq entry/exit hooks in kernel/softirq.c;
> >  - check/initialize memory sent to/read from USB, I2C, and network
> >
> > Signed-off-by: Alexander Potapenko <glider@xxxxxxxxxx>
> > To: Alexander Potapenko <glider@xxxxxxxxxx>
> > Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
> > Cc: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
> > Cc: Eric Dumazet <edumazet@xxxxxxxxxx>
> > Cc: Wolfram Sang <wsa@xxxxxxxxxxxxx>
> > Cc: Petr Mladek <pmladek@xxxxxxxx>
> > Cc: Vegard Nossum <vegard.nossum@xxxxxxxxxx>
> > Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx>
> > Cc: linux-mm@xxxxxxxxx
> > ---
> >
> > Change-Id: I1250a928d9263bf71fdaa067a070bdee686ef47b
> > ---
> >  arch/x86/include/asm/page_64.h | 13 +++++++++++++
> >  arch/x86/mm/ioremap.c          |  3 +++
> >  drivers/i2c/i2c-core-base.c    |  2 ++
> >  drivers/usb/core/urb.c         |  2 ++
> >  include/linux/highmem.h        |  4 ++++
> >  kernel/exit.c                  |  2 ++
> >  kernel/fork.c                  |  2 ++
> >  kernel/kthread.c               |  2 ++
> >  kernel/printk/printk.c         |  8 +++++++-
> >  kernel/softirq.c               |  5 +++++
> >  lib/ioremap.c                  |  5 +++++
> >  mm/compaction.c                |  9 +++++++++
> >  mm/gup.c                       |  3 +++
> >  mm/memory.c                    |  2 ++
> >  mm/page_alloc.c                | 16 ++++++++++++++++
> >  mm/vmalloc.c                   | 23 +++++++++++++++++++++--
> >  net/sched/sch_generic.c        |  2 ++
> >  17 files changed, 100 insertions(+), 3 deletions(-)
> >
> > diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
> > index 939b1cff4a7b..0ba43d93414f 100644
> > --- a/arch/x86/include/asm/page_64.h
> > +++ b/arch/x86/include/asm/page_64.h
> > @@ -44,14 +44,27 @@ void clear_page_orig(void *page);
> >  void clear_page_rep(void *page);
> >  void clear_page_erms(void *page);
> >
> > +/* This is an assembly header, avoid including too much of kmsan.h */
> > +#ifdef CONFIG_KMSAN
> > +void kmsan_clear_page(void *page_addr);
> > +#endif
> > +__no_sanitize_memory
> >  static inline void clear_page(void *page)
> >  {
> > +#ifdef CONFIG_KMSAN
> > +     /* alternative_call_2() changes |page|. */
> > +     void *page_copy = page;
> > +#endif
> >       alternative_call_2(clear_page_orig,
> >                          clear_page_rep, X86_FEATURE_REP_GOOD,
> >                          clear_page_erms, X86_FEATURE_ERMS,
> >                          "=D" (page),
> >                          "0" (page)
> >                          : "cc", "memory", "rax", "rcx");
> > +#ifdef CONFIG_KMSAN
> > +     /* Clear KMSAN shadow for the pages that have it. */
> > +     kmsan_clear_page(page_copy);
> > +#endif
> >  }
> >
> >  void copy_page(void *to, void *from);
> > diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
> > index a39dcdb5ae34..fdb2abc11a82 100644
> > --- a/arch/x86/mm/ioremap.c
> > +++ b/arch/x86/mm/ioremap.c
> > @@ -7,6 +7,7 @@
> >   * (C) Copyright 1995 1996 Linus Torvalds
> >   */
> >
> > +#include <linux/kmsan.h>
> >  #include <linux/memblock.h>
> >  #include <linux/init.h>
> >  #include <linux/io.h>
> > @@ -451,6 +452,8 @@ void iounmap(volatile void __iomem *addr)
> >               return;
> >       }
> >
> > +     kmsan_iounmap_page_range((unsigned long)addr,
> > +             (unsigned long)addr + get_vm_area_size(p));
> >       free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
> >
> >       /* Finally remove it */
> > diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
> > index 5f6a4985f2bc..9685d3399c79 100644
> > --- a/drivers/i2c/i2c-core-base.c
> > +++ b/drivers/i2c/i2c-core-base.c
> > @@ -27,6 +27,7 @@
> >  #include <linux/irqflags.h>
> >  #include <linux/jump_label.h>
> >  #include <linux/kernel.h>
> > +#include <linux/kmsan.h>
> >  #include <linux/module.h>
> >  #include <linux/mutex.h>
> >  #include <linux/of_device.h>
> > @@ -1975,6 +1976,7 @@ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
> >                               trace_i2c_write(adap, &msgs[i], i);
> >       }
> >
> > +     kmsan_handle_i2c_transfer(msgs, num);
> >       /* Retry automatically on arbitration loss */
> >       orig_jiffies = jiffies;
> >       for (ret = 0, try = 0; try <= adap->retries; try++) {
> > diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
> > index 0eab79f82ce4..5bdb54d71c2e 100644
> > --- a/drivers/usb/core/urb.c
> > +++ b/drivers/usb/core/urb.c
> > @@ -8,6 +8,7 @@
> >  #include <linux/bitops.h>
> >  #include <linux/slab.h>
> >  #include <linux/log2.h>
> > +#include <linux/kmsan-checks.h>
> >  #include <linux/usb.h>
> >  #include <linux/wait.h>
> >  #include <linux/usb/hcd.h>
> > @@ -401,6 +402,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
> >                       URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
> >                       URB_DMA_SG_COMBINED);
> >       urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
> > +     kmsan_handle_urb(urb, is_out);
> >
> >       if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
> >                       dev->state < USB_STATE_CONFIGURED)
> > diff --git a/include/linux/highmem.h b/include/linux/highmem.h
> > index ea5cdbd8c2c3..623b56f48685 100644
> > --- a/include/linux/highmem.h
> > +++ b/include/linux/highmem.h
> > @@ -5,6 +5,7 @@
> >  #include <linux/fs.h>
> >  #include <linux/kernel.h>
> >  #include <linux/bug.h>
> > +#include <linux/kmsan.h>
> >  #include <linux/mm.h>
> >  #include <linux/uaccess.h>
> >  #include <linux/hardirq.h>
> > @@ -255,6 +256,8 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
> >       vfrom = kmap_atomic(from);
> >       vto = kmap_atomic(to);
> >       copy_user_page(vto, vfrom, vaddr, to);
> > +     /* User pages don't have shadow, just clear the destination. */
> > +     kmsan_clear_page(page_address(to));
> >       kunmap_atomic(vto);
> >       kunmap_atomic(vfrom);
> >  }
> > @@ -270,6 +273,7 @@ static inline void copy_highpage(struct page *to, struct page *from)
> >       vfrom = kmap_atomic(from);
> >       vto = kmap_atomic(to);
> >       copy_page(vto, vfrom);
> > +     kmsan_copy_page_meta(to, from);
> >       kunmap_atomic(vto);
> >       kunmap_atomic(vfrom);
> >  }
> > diff --git a/kernel/exit.c b/kernel/exit.c
> > index a46a50d67002..9e3ce929110b 100644
> > --- a/kernel/exit.c
> > +++ b/kernel/exit.c
> > @@ -60,6 +60,7 @@
> >  #include <linux/writeback.h>
> >  #include <linux/shm.h>
> >  #include <linux/kcov.h>
> > +#include <linux/kmsan.h>
> >  #include <linux/random.h>
> >  #include <linux/rcuwait.h>
> >  #include <linux/compat.h>
> > @@ -719,6 +720,7 @@ void __noreturn do_exit(long code)
> >
> >       profile_task_exit(tsk);
> >       kcov_task_exit(tsk);
> > +     kmsan_task_exit(tsk);
> >
> >       WARN_ON(blk_needs_flush_plug(tsk));
> >
> > diff --git a/kernel/fork.c b/kernel/fork.c
> > index bcdf53125210..0f08952a42dc 100644
> > --- a/kernel/fork.c
> > +++ b/kernel/fork.c
> > @@ -37,6 +37,7 @@
> >  #include <linux/fdtable.h>
> >  #include <linux/iocontext.h>
> >  #include <linux/key.h>
> > +#include <linux/kmsan.h>
> >  #include <linux/binfmts.h>
> >  #include <linux/mman.h>
> >  #include <linux/mmu_notifier.h>
> > @@ -931,6 +932,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
> >       account_kernel_stack(tsk, 1);
> >
> >       kcov_task_init(tsk);
> > +     kmsan_task_create(tsk);
> >
> >  #ifdef CONFIG_FAULT_INJECTION
> >       tsk->fail_nth = 0;
> > diff --git a/kernel/kthread.c b/kernel/kthread.c
> > index 621467c33fef..9b948559a34a 100644
> > --- a/kernel/kthread.c
> > +++ b/kernel/kthread.c
> > @@ -17,6 +17,7 @@
> >  #include <linux/unistd.h>
> >  #include <linux/file.h>
> >  #include <linux/export.h>
> > +#include <linux/kmsan.h>
> >  #include <linux/mutex.h>
> >  #include <linux/slab.h>
> >  #include <linux/freezer.h>
> > @@ -350,6 +351,7 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
> >               set_cpus_allowed_ptr(task, cpu_all_mask);
> >       }
> >       kfree(create);
> > +     kmsan_task_create(task);
> >       return task;
> >  }
> >
> > diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
> > index ca65327a6de8..f77fdcb5f861 100644
> > --- a/kernel/printk/printk.c
> > +++ b/kernel/printk/printk.c
> > @@ -1914,7 +1914,12 @@ int vprintk_store(int facility, int level,
> >        * The printf needs to come first; we need the syslog
> >        * prefix which might be passed-in as a parameter.
> >        */
> > -     text_len = vscnprintf(text, sizeof(textbuf), fmt, args);
> > +     /*
> > +      * We've checked the printk arguments in vprintk_emit() already.
> > +      * Initialize |text_len| to prevent the errors from spreading.
> > +      */
> > +     text_len = KMSAN_INIT_VALUE(vscnprintf(text, sizeof(textbuf), fmt,
> > +                                            args));
> >
> >       /* mark and strip a trailing newline */
> >       if (text_len && text[text_len-1] == '\n') {
> > @@ -1972,6 +1977,7 @@ asmlinkage int vprintk_emit(int facility, int level,
> >       boot_delay_msec(level);
> >       printk_delay();
> >
> > +     kmsan_handle_vprintk(&fmt, args);
> >       /* This stops the holder of console_sem just where we want him */
> >       logbuf_lock_irqsave(flags);
> >       curr_log_seq = log_next_seq;
> > diff --git a/kernel/softirq.c b/kernel/softirq.c
> > index 0427a86743a4..6d566dd68b35 100644
> > --- a/kernel/softirq.c
> > +++ b/kernel/softirq.c
> > @@ -11,6 +11,7 @@
> >
> >  #include <linux/export.h>
> >  #include <linux/kernel_stat.h>
> > +#include <linux/kmsan.h>
> >  #include <linux/interrupt.h>
> >  #include <linux/init.h>
> >  #include <linux/mm.h>
> > @@ -370,7 +371,9 @@ static inline void invoke_softirq(void)
> >                * it is the irq stack, because it should be near empty
> >                * at this stage.
> >                */
> > +             kmsan_softirq_enter();
> >               __do_softirq();
> > +             kmsan_softirq_exit();
> >  #else
> >               /*
> >                * Otherwise, irq_exit() is called on the task stack that can
> > @@ -600,7 +603,9 @@ static void run_ksoftirqd(unsigned int cpu)
> >                * We can safely run softirq on inline stack, as we are not deep
> >                * in the task stack here.
> >                */
> > +             kmsan_softirq_enter();
> >               __do_softirq();
> > +             kmsan_softirq_exit();
> >               local_irq_enable();
> >               cond_resched();
> >               return;
> > diff --git a/lib/ioremap.c b/lib/ioremap.c
> > index 0a2ffadc6d71..5f830cee5bfc 100644
> > --- a/lib/ioremap.c
> > +++ b/lib/ioremap.c
> > @@ -6,6 +6,7 @@
> >   *
> >   * (C) Copyright 1995 1996 Linus Torvalds
> >   */
> > +#include <linux/kmsan.h>
> >  #include <linux/vmalloc.h>
> >  #include <linux/mm.h>
> >  #include <linux/sched.h>
> > @@ -214,6 +215,8 @@ int ioremap_page_range(unsigned long addr,
> >       unsigned long start;
> >       unsigned long next;
> >       int err;
> > +     unsigned long old_addr = addr;
> > +     phys_addr_t old_phys_addr = phys_addr;
> >
> >       might_sleep();
> >       BUG_ON(addr >= end);
> > @@ -228,6 +231,8 @@ int ioremap_page_range(unsigned long addr,
> >       } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
> >
> >       flush_cache_vmap(start, end);
> > +     if (!err)
> > +             kmsan_ioremap_page_range(old_addr, end, old_phys_addr, prot);
> >
> >       return err;
> >  }
> > diff --git a/mm/compaction.c b/mm/compaction.c
> > index ce08b39d85d4..e9a5898e5fe0 100644
> > --- a/mm/compaction.c
> > +++ b/mm/compaction.c
> > @@ -84,6 +84,15 @@ static void split_map_pages(struct list_head *list)
> >
> >               for (i = 0; i < nr_pages; i++) {
> >                       list_add(&page->lru, &tmp_list);
> > +#ifdef CONFIG_KMSAN
> > +                     /*
> > +                      * TODO(glider): we may lose the metadata when copying
> > +                      * something to these pages. Need to allocate shadow
> > +                      * and origin pages here instead.
> > +                      */
> > +                     page->shadow = NULL;
> > +                     page->origin = NULL;
> > +#endif
> >                       page++;
> >               }
> >       }
> > diff --git a/mm/gup.c b/mm/gup.c
> > index 23a9f9c9d377..c16e4288997d 100644
> > --- a/mm/gup.c
> > +++ b/mm/gup.c
> > @@ -4,6 +4,7 @@
> >  #include <linux/err.h>
> >  #include <linux/spinlock.h>
> >
> > +#include <linux/kmsan.h>
> >  #include <linux/mm.h>
> >  #include <linux/memremap.h>
> >  #include <linux/pagemap.h>
> > @@ -2347,6 +2348,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
> >           gup_fast_permitted(start, end)) {
> >               local_irq_save(flags);
> >               gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr);
> > +             kmsan_gup_pgd_range(pages, nr);
> >               local_irq_restore(flags);
> >       }
> >
> > @@ -2416,6 +2418,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
> >           gup_fast_permitted(start, end)) {
> >               local_irq_disable();
> >               gup_pgd_range(addr, end, gup_flags, pages, &nr);
> > +             kmsan_gup_pgd_range(pages, nr);
> >               local_irq_enable();
> >               ret = nr;
> >       }
> > diff --git a/mm/memory.c b/mm/memory.c
> > index b1ca51a079f2..48ceacc06e2d 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -51,6 +51,7 @@
> >  #include <linux/highmem.h>
> >  #include <linux/pagemap.h>
> >  #include <linux/memremap.h>
> > +#include <linux/kmsan.h>
> >  #include <linux/ksm.h>
> >  #include <linux/rmap.h>
> >  #include <linux/export.h>
> > @@ -2328,6 +2329,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
> >               if (!new_page)
> >                       goto oom;
> >               cow_user_page(new_page, old_page, vmf->address, vma);
> > +             kmsan_copy_page_meta(new_page, old_page);
> >       }
> >
> >       if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
> > diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> > index c0b2e0306720..f52494e0dcd9 100644
> > --- a/mm/page_alloc.c
> > +++ b/mm/page_alloc.c
> > @@ -26,6 +26,8 @@
> >  #include <linux/compiler.h>
> >  #include <linux/kernel.h>
> >  #include <linux/kasan.h>
> > +#include <linux/kmsan.h>
> > +#include <linux/kmsan-checks.h>
> >  #include <linux/module.h>
> >  #include <linux/suspend.h>
> >  #include <linux/pagevec.h>
> > @@ -1133,6 +1135,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
> >       VM_BUG_ON_PAGE(PageTail(page), page);
> >
> >       trace_mm_page_free(page, order);
> > +     kmsan_free_page(page, order);
> >
> >       /*
> >        * Check tail pages before head page information is cleared to
> > @@ -3121,6 +3124,7 @@ void split_page(struct page *page, unsigned int order)
> >       VM_BUG_ON_PAGE(PageCompound(page), page);
> >       VM_BUG_ON_PAGE(!page_count(page), page);
> >
> > +     kmsan_split_page(page, order);
> >       for (i = 1; i < (1 << order); i++)
> >               set_page_refcounted(page + i);
> >       split_page_owner(page, order);
> > @@ -3253,6 +3257,13 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
> >  /*
> >   * Allocate a page from the given zone. Use pcplists for order-0 allocations.
> >   */
> > +/*
> > + * TODO(glider): rmqueue() may call __msan_poison_alloca() through a call to
> > + * set_pfnblock_flags_mask(). If __msan_poison_alloca() attempts to allocate
> > + * pages for the stack depot, it may call rmqueue() again, which will result
> > + * in a deadlock.
> > + */
> > +__no_sanitize_memory
> >  static inline
> >  struct page *rmqueue(struct zone *preferred_zone,
> >                       struct zone *zone, unsigned int order,
> > @@ -4779,6 +4790,11 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
> >
> >       trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
> >
> > +     if (page)
> > +             if (kmsan_alloc_page(page, order, gfp_mask)) {
> > +                     __free_pages(page, order);
> > +                     page = NULL;
> > +             }
> >       return page;
> >  }
> >  EXPORT_SYMBOL(__alloc_pages_nodemask);
> > diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> > index a3c70e275f4e..bdf66ffcf02c 100644
> > --- a/mm/vmalloc.c
> > +++ b/mm/vmalloc.c
> > @@ -29,6 +29,7 @@
> >  #include <linux/rcupdate.h>
> >  #include <linux/pfn.h>
> >  #include <linux/kmemleak.h>
> > +#include <linux/kmsan.h>
> >  #include <linux/atomic.h>
> >  #include <linux/compiler.h>
> >  #include <linux/llist.h>
> > @@ -119,7 +120,8 @@ static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
> >       } while (p4d++, addr = next, addr != end);
> >  }
> >
> > -static void vunmap_page_range(unsigned long addr, unsigned long end)
> > +/* Exported for KMSAN, visible in mm/kmsan/kmsan.h only. */
> > +void __vunmap_page_range(unsigned long addr, unsigned long end)
> >  {
> >       pgd_t *pgd;
> >       unsigned long next;
> > @@ -133,6 +135,12 @@ static void vunmap_page_range(unsigned long addr, unsigned long end)
> >               vunmap_p4d_range(pgd, addr, next);
> >       } while (pgd++, addr = next, addr != end);
> >  }
> > +EXPORT_SYMBOL(__vunmap_page_range);
> > +static void vunmap_page_range(unsigned long addr, unsigned long end)
> > +{
> > +     kmsan_vunmap_page_range(addr, end);
> > +     __vunmap_page_range(addr, end);
> > +}
> >
> >  static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
> >               unsigned long end, pgprot_t prot, struct page **pages, int *nr)
> > @@ -216,8 +224,11 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
> >   * will have pfns corresponding to the "pages" array.
> >   *
> >   * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
> > + *
> > + * This function is exported for use in KMSAN, but is only declared in KMSAN
> > + * headers.
> >   */
> > -static int vmap_page_range_noflush(unsigned long start, unsigned long end,
> > +int __vmap_page_range_noflush(unsigned long start, unsigned long end,
> >                                  pgprot_t prot, struct page **pages)
> >  {
> >       pgd_t *pgd;
> > @@ -237,6 +248,14 @@ static int vmap_page_range_noflush(unsigned long start, unsigned long end,
> >
> >       return nr;
> >  }
> > +EXPORT_SYMBOL(__vmap_page_range_noflush);
> > +
> > +static int vmap_page_range_noflush(unsigned long start, unsigned long end,
> > +                                pgprot_t prot, struct page **pages)
> > +{
> > +     kmsan_vmap_page_range_noflush(start, end, prot, pages);
> > +     return __vmap_page_range_noflush(start, end, prot, pages);
> > +}
> >
> >  static int vmap_page_range(unsigned long start, unsigned long end,
> >                          pgprot_t prot, struct page **pages)
> > diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
> > index 17bd8f539bc7..fd22c4a4ba42 100644
> > --- a/net/sched/sch_generic.c
> > +++ b/net/sched/sch_generic.c
> > @@ -11,6 +11,7 @@
> >  #include <linux/module.h>
> >  #include <linux/types.h>
> >  #include <linux/kernel.h>
> > +#include <linux/kmsan-checks.h>
> >  #include <linux/sched.h>
> >  #include <linux/string.h>
> >  #include <linux/errno.h>
> > @@ -659,6 +660,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
> >       } else {
> >               qdisc->empty = true;
> >       }
> > +     kmsan_check_skb(skb);
> >
> >       return skb;
> >  }



-- 
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Paul Manicle, Halimah DeLaine Prado
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux