Re: [PATCH] LoongArch: Add kernel address sanitizer support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi, Andrey
On 2023/3/30 上午3:02, Andrey Konovalov wrote:
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index f7ef70661ce2..3b91b941873d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -54,11 +54,13 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
  int kasan_populate_early_shadow(const void *shadow_start,
                                 const void *shadow_end);

+#ifndef __HAVE_ARCH_SHADOW_MAP
  static inline void *kasan_mem_to_shadow(const void *addr)
  {
         return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
                 + KASAN_SHADOW_OFFSET;
  }
+#endif

  int kasan_add_zero_shadow(void *start, unsigned long size);
  void kasan_remove_zero_shadow(void *start, unsigned long size);
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index e5eef670735e..f86194750df5 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -175,6 +175,11 @@ static __always_inline bool check_region_inline(unsigned long addr,
         if (unlikely(!addr_has_metadata((void *)addr)))
                 return !kasan_report(addr, size, write, ret_ip);

+#ifndef __HAVE_ARCH_SHADOW_MAP
+       if (unlikely(kasan_mem_to_shadow((unsigned long *)addr) == NULL))
+               return !kasan_report(addr, size, write, ret_ip);
+#endif

This should have been ifdef, right?

Sorry, it was a clerical error,
Here it is
#ifndef __HAVE_ARCH_SHADOW_MAP
if (unlikely(! addr_has_metadata((void *)addr)))
return ! kasan_report(addr, size, write, ret_ip);
#else
if (unlikely(kasan_mem_to_shadow((void *)addr) == NULL)) {
kasan_report(addr, size, write, ret_ip);
return;
}
#endif
But I don't think you need this check here at all: addr_has_metadata
already checks that shadow exists.

On LongArch, there's a lot of holes between different segments, so kasan
shadow area is some different type of memory that we concatenate, we
can't use if (unlikely((void *)addr <
kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) to determine the
validity, and in arch/loongarch/include/asm/kasan.h I construct invalid
NULL.
+
         if (likely(!memory_is_poisoned(addr, size)))
                 return true;

diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index cc64ed6858c6..860061a22ca9 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -166,8 +166,9 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
                                 if (!p)
                                         return -ENOMEM;
                         } else {
-                               pud_populate(&init_mm, pud,
-                                       early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+                               p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
+                               pmd_init(p);
+                               pud_populate(&init_mm, pud, p);
                         }
                 }
                 zero_pmd_populate(pud, addr, next);
@@ -207,8 +208,9 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
                                 if (!p)
                                         return -ENOMEM;
                         } else {
-                               p4d_populate(&init_mm, p4d,
-                                       early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+                               p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
+                               pud_init(p);
+                               p4d_populate(&init_mm, p4d, p);

Please explain why these changes are needed in the patch description.

This is because in pagetable_init on loongarch/mips, we populate pmd/pud
with invalid_pmd_table/invalid_pud_table,
So pmd_init/pud_init(p) is required, perhaps we define them as __weak in
mm/kasan/init.c, like mm/sparse-vmemmap.c.

diff --git a/include/linux/mm.h  b/include/linux/mm.h
...
+void pmd_init(void *addr);
+void pud_init(void *addr);
...
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
...
+void __weak __meminit pmd_init(void *addr)
+ {
+}
+
@@-203,11 +207,16 @@pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
if (! p)
return NULL;
+               pmd_init(p);
pud_populate(&init_mm, pud, p);
}
return pud;
}
+void __weak __meminit pud_init(void *addr)
+ {
+}
+
p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
{
p4d_t *p4d = p4d_offset(pgd, addr);
@@-215,6 +224,7 @@p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
if (! p)
return NULL;
+               pud_init(p);
p4d_populate(&init_mm, p4d, p);
}
return p4d;

Thanks,
- Qing

                         }
                 }
                 zero_pud_populate(p4d, addr, next);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index a61eeee3095a..033335c13b25 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -291,16 +291,22 @@ struct kasan_stack_ring {

  #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)

+#ifndef __HAVE_ARCH_SHADOW_MAP
  static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
  {
         return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
                 << KASAN_SHADOW_SCALE_SHIFT);
  }
+#endif

  static __always_inline bool addr_has_metadata(const void *addr)
  {
+#ifdef __HAVE_ARCH_SHADOW_MAP
+       return (kasan_mem_to_shadow((void *)addr) != NULL);
+#else
         return (kasan_reset_tag(addr) >=
                 kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
+#endif
  }

  /**
--
2.20.1






[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux