Treat the address stored in object in different way according to its type: - Only use kasan_reset_tag for virtual address - Only update min_addr and max_addr for virtual address - Convert physical address to virtual address in scan_object Suggested-by: Catalin Marinas <catalin.marinas@xxxxxxx> Signed-off-by: Patrick Wang <patrick.wang.shcn@xxxxxxxxx> --- mm/kmemleak.c | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 218144392446..246a70b7218f 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -297,7 +297,9 @@ static void hex_dump_object(struct seq_file *seq, warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len); kasan_disable_current(); warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE, - HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII); + HEX_GROUP_SIZE, object->flags & OBJECT_PHYS ? ptr : + kasan_reset_tag((void *)ptr), + len, HEX_ASCII); kasan_enable_current(); } @@ -389,14 +391,15 @@ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias, { struct rb_node *rb = is_phys ? object_phys_tree_root.rb_node : object_tree_root.rb_node; - unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); + unsigned long untagged_ptr = is_phys ? ptr : (unsigned long)kasan_reset_tag((void *)ptr); while (rb) { struct kmemleak_object *object; unsigned long untagged_objp; object = rb_entry(rb, struct kmemleak_object, rb_node); - untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); + untagged_objp = is_phys ? object->pointer : + (unsigned long)kasan_reset_tag((void *)object->pointer); if (untagged_ptr < untagged_objp) rb = object->rb_node.rb_left; @@ -643,16 +646,19 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, raw_spin_lock_irqsave(&kmemleak_lock, flags); - untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); - min_addr = min(min_addr, untagged_ptr); - max_addr = max(max_addr, untagged_ptr + size); + untagged_ptr = is_phys ? ptr : (unsigned long)kasan_reset_tag((void *)ptr); + if (!is_phys) { + min_addr = min(min_addr, untagged_ptr); + max_addr = max(max_addr, untagged_ptr + size); + } link = is_phys ? &object_phys_tree_root.rb_node : &object_tree_root.rb_node; rb_parent = NULL; while (*link) { rb_parent = *link; parent = rb_entry(rb_parent, struct kmemleak_object, rb_node); - untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer); + untagged_objp = is_phys ? parent->pointer : + (unsigned long)kasan_reset_tag((void *)parent->pointer); if (untagged_ptr + size <= untagged_objp) link = &parent->rb_node.rb_left; else if (untagged_objp + parent->size <= untagged_ptr) @@ -1202,7 +1208,9 @@ static bool update_checksum(struct kmemleak_object *object) kasan_disable_current(); kcsan_disable_current(); - object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size); + object->checksum = crc32(0, object->flags & OBJECT_PHYS ? (void *)object->pointer : + kasan_reset_tag((void *)object->pointer), + object->size); kasan_enable_current(); kcsan_enable_current(); @@ -1353,6 +1361,7 @@ static void scan_object(struct kmemleak_object *object) { struct kmemleak_scan_area *area; unsigned long flags; + void *obj_ptr; /* * Once the object->lock is acquired, the corresponding memory block @@ -1364,10 +1373,15 @@ static void scan_object(struct kmemleak_object *object) if (!(object->flags & OBJECT_ALLOCATED)) /* already freed object */ goto out; + + obj_ptr = object->flags & OBJECT_PHYS ? + __va((void *)object->pointer) : + (void *)object->pointer; + if (hlist_empty(&object->area_list) || object->flags & OBJECT_FULL_SCAN) { - void *start = (void *)object->pointer; - void *end = (void *)(object->pointer + object->size); + void *start = obj_ptr; + void *end = obj_ptr + object->size; void *next; do { -- 2.25.1