We will modify the ported kernel source code, making it compatible with crash. For maple tree iteration, struct ma_state is used to record the iteration state, which need to be stored in crash and will be dynamically changed at runtime. Other structures such as maple_tree/maple_node/maple_metadata... are stored in vmcore, their members will be resolved when iterating. The formal crash way of vmcore struct member resolving is: readmem(node, KVADDR, buf, SIZE(buf), "", flag); return buf + OFFSET(member); which is the reimplementation of kernel way of member resolving: return node->member; The 1st one is arch independent, it uses gdb to resolve the OFFSET of members, so crash don't need to know what the inside of the struct is, even if the struct changes for new kernel version. The 2nd one is arch dependent, the struct need to be ported to crash, so crash know what the member offset is when compiling. It cannot handle the case if the struct member changes, or the struct compiling differs between crash and kernel due to padding/alignment or optimization reasons. So we divide the whole procedure into 2 steps: 1) Keep the kernel structures unchange, aka let the 2nd one work. 2) Get rid of kernel structure dependency, aka replace the 2nd with 1st one. This patch handles step 1, mainly made the following changes: a. Remove/Rewrite the rcu related prefix and functions. b. Remove likely()/unlikely() macros. c. Change the following functions' signature: ma_dead_node() vma_find() mas_next_nentry() vma_next() ----- mte_parent() mas_parent_enum() mte_parent_slot() mte_is_root() VMA_ITERATOR() The first 4 are changed for necessary crash compatibility. The last 5 are changed for simplify the argument passing. Signed-off-by: Tao Liu <ltao@xxxxxxxxxx> --- Makefile | 10 ++- maple_tree.c | 184 ++++++++++++++++++++++++++++------------------- maple_tree.h | 24 ++++--- maple_tree_vma.h | 14 ++-- xarray.h | 12 ++-- 5 files changed, 147 insertions(+), 97 deletions(-) diff --git a/Makefile b/Makefile index 79aef17..d545bc0 100644 --- a/Makefile +++ b/Makefile @@ -59,6 +59,7 @@ IBM_HFILES=ibm_common.h SADUMP_HFILES=sadump.h UNWIND_HFILES=unwind.h unwind_i.h rse.h unwind_x86.h unwind_x86_64.h VMWARE_HFILES=vmware_vmss.h +MAPLE_TREE_HFILES=maple_tree.h xarray.h CFILES=main.c tools.c global_data.c memory.c filesys.c help.c task.c \ kernel.c test.c gdb_interface.c configure.c net.c dev.c bpf.c \ @@ -73,12 +74,12 @@ CFILES=main.c tools.c global_data.c memory.c filesys.c help.c task.c \ xen_hyper.c xen_hyper_command.c xen_hyper_global_data.c \ xen_hyper_dump_tables.c kvmdump.c qemu.c qemu-load.c sadump.c ipcs.c \ ramdump.c vmware_vmss.c vmware_guestdump.c \ - xen_dom0.c kaslr_helper.c sbitmap.c + xen_dom0.c kaslr_helper.c sbitmap.c maple_tree.c SOURCE_FILES=${CFILES} ${GENERIC_HFILES} ${MCORE_HFILES} \ ${REDHAT_CFILES} ${REDHAT_HFILES} ${UNWIND_HFILES} \ ${LKCD_DUMP_HFILES} ${LKCD_TRACE_HFILES} ${LKCD_OBSOLETE_HFILES}\ - ${IBM_HFILES} ${SADUMP_HFILES} ${VMWARE_HFILES} + ${IBM_HFILES} ${SADUMP_HFILES} ${VMWARE_HFILES} ${MAPLE_TREE_HFILES} OBJECT_FILES=main.o tools.o global_data.o memory.o filesys.o help.o task.o \ build_data.o kernel.o test.o gdb_interface.o net.o dev.o bpf.o \ @@ -93,7 +94,7 @@ OBJECT_FILES=main.o tools.o global_data.o memory.o filesys.o help.o task.o \ xen_hyper.o xen_hyper_command.o xen_hyper_global_data.o \ xen_hyper_dump_tables.o kvmdump.o qemu.o qemu-load.o sadump.o ipcs.o \ ramdump.o vmware_vmss.o vmware_guestdump.o \ - xen_dom0.o kaslr_helper.o sbitmap.o + xen_dom0.o kaslr_helper.o sbitmap.o maple_tree.o MEMORY_DRIVER_FILES=memory_driver/Makefile memory_driver/crash.c memory_driver/README @@ -536,6 +537,9 @@ kaslr_helper.o: ${GENERIC_HFILES} kaslr_helper.c bpf.o: ${GENERIC_HFILES} bpf.c ${CC} -c ${CRASH_CFLAGS} bpf.c ${WARNING_OPTIONS} ${WARNING_ERROR} +maple_tree.o: ${GENERIC_HFILES} ${MAPLE_TREE_HFILES} maple_tree.c + ${CC} -c ${CRASH_CFLAGS} maple_tree.c ${WARNING_OPTIONS} ${WARNING_ERROR} + ${PROGRAM}: force @$(MAKE) all diff --git a/maple_tree.c b/maple_tree.c index a7db8fa..21a2226 100644 --- a/maple_tree.c +++ b/maple_tree.c @@ -6,6 +6,10 @@ * Matthew Wilcox <willy@xxxxxxxxxxxxx> */ +#include "maple_tree.h" +#include "defs.h" +#include "xarray.h" + /* Bit 1 indicates the root is a node */ #define MAPLE_ROOT_NODE 0x02 /* maple_type stored bit 3-6 */ @@ -74,12 +78,15 @@ static inline enum maple_type mte_node_type(const struct maple_enode *entry) static inline void *mas_root(struct ma_state *mas) { - return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree)); + struct maple_tree tree; + readmem(mas->tree, KVADDR, &tree, sizeof(tree), + "mas_root read maple_tree", FAULT_ON_ERROR); + return tree.ma_root; } static inline struct maple_enode *mas_start(struct ma_state *mas) { - if (likely(mas_is_start(mas))) { + if (mas_is_start(mas)) { struct maple_enode *root; mas->node = MAS_NONE; @@ -90,13 +97,13 @@ static inline struct maple_enode *mas_start(struct ma_state *mas) root = mas_root(mas); /* Tree with nodes */ - if (likely(xa_is_node(root))) { + if (xa_is_node(root)) { mas->node = mte_safe_root(root); return NULL; } /* empty tree */ - if (unlikely(!root)) { + if (!root) { mas->offset = MAPLE_NODE_SLOTS; return NULL; } @@ -160,24 +167,25 @@ static inline unsigned char ma_data_end(struct maple_node *node, return ma_meta_end(node, type); offset = mt_pivots[type] - 1; - if (likely(!pivots[offset])) + if (!pivots[offset]) return ma_meta_end(node, type); - if (likely(pivots[offset] == max)) + if (pivots[offset] == max) return offset; return mt_pivots[type]; } -static inline bool ma_dead_node(const struct maple_node *node) +static inline bool ma_dead_node(const struct maple_node *node, + const struct maple_node *orig_node) { struct maple_node *parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK); - return (parent == node); + return (parent == orig_node); } -static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) +static inline void **ma_slots(struct maple_node *mn, enum maple_type mt) { switch (mt) { default: @@ -192,9 +200,9 @@ static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) } static inline void *mt_slot(const struct maple_tree *mt, - void __rcu **slots, unsigned char offset) + void **slots, unsigned char offset) { - return rcu_dereference_check(slots[offset], mt_locked(mt)); + return slots[offset]; } static inline bool ma_is_leaf(const enum maple_type type) @@ -209,11 +217,13 @@ static inline void *mtree_range_walk(struct ma_state *mas) struct maple_node *node; struct maple_enode *next, *last; enum maple_type type; - void __rcu **slots; + void **slots; unsigned char end; unsigned long max, min; unsigned long prev_max, prev_min; + struct maple_node tmp_node; + last = next = mas->node; prev_min = min = mas->min; max = mas->max; @@ -222,9 +232,11 @@ static inline void *mtree_range_walk(struct ma_state *mas) last = next; node = mte_to_node(next); type = mte_node_type(next); - pivots = ma_pivots(node, type); - end = ma_data_end(node, type, pivots, max); - if (unlikely(ma_dead_node(node))) + readmem(node, KVADDR, &tmp_node, sizeof(tmp_node), + "mtree_range_walk read maple_node", FAULT_ON_ERROR); + pivots = ma_pivots(&tmp_node, type); + end = ma_data_end(&tmp_node, type, pivots, max); + if (ma_dead_node(&tmp_node, node)) goto dead_node; if (pivots[offset] >= mas->index) { @@ -241,13 +253,13 @@ static inline void *mtree_range_walk(struct ma_state *mas) prev_min = min; min = pivots[offset - 1] + 1; prev_max = max; - if (likely(offset < end && pivots[offset])) + if (offset < end && pivots[offset]) max = pivots[offset]; next: - slots = ma_slots(node, type); + slots = ma_slots(&tmp_node, type); next = mt_slot(mas->tree, slots, offset); - if (unlikely(ma_dead_node(node))) + if (ma_dead_node(&tmp_node, node)) goto dead_node; } while (!ma_is_leaf(type)); @@ -297,13 +309,13 @@ static inline struct maple_node *mas_mn(const struct ma_state *mas) static inline unsigned long mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset) { - if (likely(offset)) + if (offset) return pivots[offset - 1] + 1; return mas->min; } -static inline void *mas_slot(struct ma_state *mas, void __rcu **slots, +static inline void *mas_slot(struct ma_state *mas, void **slots, unsigned char offset) { return mt_slot(mas->tree, slots, offset); @@ -320,12 +332,13 @@ mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots, } static inline void *mas_next_nentry(struct ma_state *mas, - struct maple_node *node, unsigned long max, enum maple_type type) + struct maple_node *node, unsigned long max, + enum maple_type type, struct maple_node *orig_node) { unsigned char count; unsigned long pivot; unsigned long *pivots; - void __rcu **slots; + void **slots; void *entry; if (mas->last == mas->max) { @@ -336,7 +349,7 @@ static inline void *mas_next_nentry(struct ma_state *mas, pivots = ma_pivots(node, type); slots = ma_slots(node, type); mas->index = mas_safe_min(mas, pivots, mas->offset); - if (ma_dead_node(node)) + if (ma_dead_node(node, orig_node)) return NULL; if (mas->index > max) @@ -349,7 +362,7 @@ static inline void *mas_next_nentry(struct ma_state *mas, while (mas->offset < count) { pivot = pivots[mas->offset]; entry = mas_slot(mas, slots, mas->offset); - if (ma_dead_node(node)) + if (ma_dead_node(node, orig_node)) return NULL; if (entry) @@ -369,7 +382,7 @@ static inline void *mas_next_nentry(struct ma_state *mas, pivot = mas_safe_pivot(mas, pivots, mas->offset, type); entry = mas_slot(mas, slots, mas->offset); - if (ma_dead_node(node)) + if (ma_dead_node(node, orig_node)) return NULL; if (!pivot) @@ -393,7 +406,6 @@ retry: goto retry; return; - } static inline bool ma_is_root(struct maple_node *node) @@ -401,16 +413,16 @@ static inline bool ma_is_root(struct maple_node *node) return ((unsigned long)node->parent & MA_ROOT_PARENT); } -static inline struct maple_node *mte_parent(const struct maple_enode *enode) +static inline struct maple_node *mte_parent(const struct maple_node *node) { return (void *)((unsigned long) - (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK); + (node->parent) & ~MAPLE_NODE_MASK); } static inline unsigned long mte_parent_slot_mask(unsigned long parent) { /* Note bit 1 == 0 means 16B */ - if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) + if (parent & MAPLE_PARENT_NOT_RANGE16) return MAPLE_PARENT_SLOT_MASK; return MAPLE_PARENT_16B_SLOT_MASK; @@ -426,6 +438,7 @@ enum maple_type mte_parent_enum(struct maple_enode *p_enode, struct maple_tree *mt) { unsigned long p_type; + struct maple_tree tmp_tree; p_type = (unsigned long)p_enode; if (p_type & MAPLE_PARENT_ROOT) @@ -436,7 +449,9 @@ enum maple_type mte_parent_enum(struct maple_enode *p_enode, switch (p_type) { case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */ - if (mt_is_alloc(mt)) + readmem(mt, KVADDR, &tmp_tree, sizeof(tmp_tree), + "mte_parent_enum read maple_tree", FAULT_ON_ERROR); + if (mt_is_alloc(&tmp_tree)) return maple_arange_64; return maple_range_64; } @@ -445,23 +460,23 @@ enum maple_type mte_parent_enum(struct maple_enode *p_enode, } static inline -enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode) +enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_node *node) { - return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree); + return mte_parent_enum(ma_enode_ptr(node->parent), mas->tree); } static inline unsigned long mte_parent_shift(unsigned long parent) { /* Note bit 1 == 0 means 16B */ - if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) + if (parent & MAPLE_PARENT_NOT_RANGE16) return MAPLE_PARENT_SLOT_SHIFT; return MAPLE_PARENT_16B_SLOT_SHIFT; } -static inline unsigned int mte_parent_slot(const struct maple_enode *enode) +static inline unsigned int mte_parent_slot(const struct maple_node *node) { - unsigned long val = (unsigned long) mte_to_node(enode)->parent; + unsigned long val = (unsigned long) node->parent; /* Root. */ if (val & 1) @@ -481,9 +496,9 @@ static inline struct maple_enode *mt_mk_node(const struct maple_node *node, (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL); } -static inline bool mte_is_root(const struct maple_enode *node) +static inline bool mte_is_root(struct maple_node *node) { - return ma_is_root(mte_to_node(node)); + return ma_is_root(node); } static int mas_ascend(struct ma_state *mas) @@ -498,28 +513,35 @@ static int mas_ascend(struct ma_state *mas) unsigned long *pivots; unsigned char offset; bool set_max = false, set_min = false; + struct maple_node tmp_node; a_node = mas_mn(mas); - if (ma_is_root(a_node)) { + readmem(a_node, KVADDR, &tmp_node, sizeof(tmp_node), + "mas_ascend read maple_node", FAULT_ON_ERROR); + if (ma_is_root(&tmp_node)) { mas->offset = 0; return 0; } - p_node = mte_parent(mas->node); - if (unlikely(a_node == p_node)) + readmem(mte_to_node(mas->node), KVADDR, &tmp_node, sizeof(tmp_node), + "mas_ascend read maple_node", FAULT_ON_ERROR); + p_node = mte_parent(&tmp_node); + if (a_node == p_node) return 1; - a_type = mas_parent_enum(mas, mas->node); - offset = mte_parent_slot(mas->node); + a_type = mas_parent_enum(mas, &tmp_node); + offset = mte_parent_slot(&tmp_node); a_enode = mt_mk_node(p_node, a_type); /* Check to make sure all parent information is still accurate */ - if (p_node != mte_parent(mas->node)) + if (p_node != mte_parent(&tmp_node)) return 1; mas->node = a_enode; mas->offset = offset; - if (mte_is_root(a_enode)) { + readmem(mte_to_node(a_enode), KVADDR, &tmp_node, sizeof(tmp_node), + "mas_ascend read maple_node", FAULT_ON_ERROR); + if (mte_is_root(&tmp_node)) { mas->max = ULONG_MAX; mas->min = 0; return 0; @@ -529,11 +551,15 @@ static int mas_ascend(struct ma_state *mas) max = ULONG_MAX; do { p_enode = a_enode; - a_type = mas_parent_enum(mas, p_enode); - a_node = mte_parent(p_enode); - a_slot = mte_parent_slot(p_enode); - pivots = ma_pivots(a_node, a_type); - a_enode = mt_mk_node(a_node, a_type); + readmem(mte_to_node(p_enode), KVADDR, &tmp_node, sizeof(tmp_node), + "mas_ascend read maple_node", FAULT_ON_ERROR); + a_type = mas_parent_enum(mas, &tmp_node); + a_node = mte_parent(&tmp_node); + a_slot = mte_parent_slot(&tmp_node); + readmem(a_node, KVADDR, &tmp_node, sizeof(tmp_node), + "mas_ascend read maple_node", FAULT_ON_ERROR); + pivots = ma_pivots(&tmp_node, a_type); + a_enode = mt_mk_node(&tmp_node, a_type); if (!set_min && a_slot) { set_min = true; @@ -545,10 +571,10 @@ static int mas_ascend(struct ma_state *mas) max = pivots[a_slot]; } - if (unlikely(ma_dead_node(a_node))) + if (ma_dead_node(&tmp_node, a_node)) return 1; - if (unlikely(ma_is_root(a_node))) + if (ma_is_root(&tmp_node)) break; } while (!set_min || !set_max); @@ -567,50 +593,58 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, int level = 0; unsigned char offset; enum maple_type mt; - void __rcu **slots; + void **slots; + + struct maple_node tmp_node; if (mas->max >= max) goto no_entry; level = 0; + readmem(node, KVADDR, &tmp_node, sizeof(tmp_node), + "mas_next_node read maple_node", FAULT_ON_ERROR); do { - if (ma_is_root(node)) + if (ma_is_root(&tmp_node)) goto no_entry; min = mas->max + 1; if (min > max) goto no_entry; - if (unlikely(mas_ascend(mas))) + if (mas_ascend(mas)) return 1; offset = mas->offset; level++; node = mas_mn(mas); mt = mte_node_type(mas->node); - pivots = ma_pivots(node, mt); - } while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max))); + readmem(node, KVADDR, &tmp_node, sizeof(tmp_node), + "mas_next_node read maple_node", FAULT_ON_ERROR); + pivots = ma_pivots(&tmp_node, mt); + } while (offset == ma_data_end(&tmp_node, mt, pivots, mas->max)); - slots = ma_slots(node, mt); + slots = ma_slots(&tmp_node, mt); pivot = mas_safe_pivot(mas, pivots, ++offset, mt); - while (unlikely(level > 1)) { + while (level > 1) { /* Descend, if necessary */ enode = mas_slot(mas, slots, offset); - if (unlikely(ma_dead_node(node))) + if (ma_dead_node(&tmp_node, node)) return 1; mas->node = enode; level--; node = mas_mn(mas); mt = mte_node_type(mas->node); - slots = ma_slots(node, mt); - pivots = ma_pivots(node, mt); + readmem(node, KVADDR, &tmp_node, sizeof(tmp_node), + "mas_next_node read maple_node", FAULT_ON_ERROR); + slots = ma_slots(&tmp_node, mt); + pivots = ma_pivots(&tmp_node, mt); offset = 0; pivot = pivots[0]; } enode = mas_slot(mas, slots, offset); - if (unlikely(ma_dead_node(node))) + if (ma_dead_node(&tmp_node, node)) return 1; mas->node = enode; @@ -619,7 +653,7 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, return 0; no_entry: - if (unlikely(ma_dead_node(node))) + if (ma_dead_node(&tmp_node, node)) return 1; mas->node = MAS_NONE; @@ -635,6 +669,8 @@ static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) unsigned long last; enum maple_type mt; + struct maple_node tmp_node; + last = mas->last; retry: offset = mas->offset; @@ -642,28 +678,30 @@ retry: node = mas_mn(mas); mt = mte_node_type(mas->node); mas->offset++; - if (unlikely(mas->offset >= mt_slots[mt])) { + if (mas->offset >= mt_slots[mt]) { mas->offset = mt_slots[mt] - 1; goto next_node; } while (!mas_is_none(mas)) { - entry = mas_next_nentry(mas, node, limit, mt); - if (unlikely(ma_dead_node(node))) { + readmem(node, KVADDR, &tmp_node, sizeof(tmp_node), + "mas_next_entry read maple_node", FAULT_ON_ERROR); + entry = mas_next_nentry(mas, &tmp_node, limit, mt, node); + if (ma_dead_node(&tmp_node, node)) { mas_rewalk(mas, last); goto retry; } - if (likely(entry)) + if (entry) return entry; - if (unlikely((mas->index > limit))) + if ((mas->index > limit)) break; next_node: prev_node = mas->node; offset = mas->offset; - if (unlikely(mas_next_node(mas, node, limit))) { + if (mas_next_node(mas, node, limit)) { mas_rewalk(mas, last); goto retry; } @@ -707,8 +745,8 @@ retry: void *mas_find(struct ma_state *mas, unsigned long max) { - if (unlikely(mas_is_paused(mas))) { - if (unlikely(mas->last == ULONG_MAX)) { + if (mas_is_paused(mas)) { + if (mas->last == ULONG_MAX) { mas->node = MAS_NONE; return NULL; } @@ -716,7 +754,7 @@ void *mas_find(struct ma_state *mas, unsigned long max) mas->index = ++mas->last; } - if (unlikely(mas_is_start(mas))) { + if (mas_is_start(mas)) { /* First run or continue */ void *entry; @@ -728,7 +766,7 @@ void *mas_find(struct ma_state *mas, unsigned long max) return entry; } - if (unlikely(!mas_searchable(mas))) + if (!mas_searchable(mas)) return NULL; /* Retries on dead nodes handled by mas_next_entry */ diff --git a/maple_tree.h b/maple_tree.h index c4b2828..a5a131e 100644 --- a/maple_tree.h +++ b/maple_tree.h @@ -1,19 +1,24 @@ /* SPDX-License-Identifier: GPL-2.0+ */ -#ifndef _LINUX_MAPLE_TREE_H -#define _LINUX_MAPLE_TREE_H +#ifndef _MAPLE_TREE_H +#define _MAPLE_TREE_H /* * Maple Tree - An RCU-safe adaptive tree for storing ranges * Copyright (c) 2018-2022 Oracle * Authors: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> * Matthew Wilcox <willy@xxxxxxxxxxxxx> */ +#include <stdint.h> +#include <stdbool.h> + +typedef uint32_t spinlock_t; +typedef uint32_t lockdep_map_p; struct maple_tree { union { spinlock_t ma_lock; lockdep_map_p ma_external_lock; }; - void __rcu *ma_root; + void *ma_root; unsigned int ma_flags; }; @@ -96,13 +101,15 @@ enum maple_type { #define MAPLE_RESERVED_RANGE 4096 +typedef struct rcu_head { unsigned char x[16]; } rcu_head; + struct maple_range_64 { struct maple_pnode *parent; unsigned long pivot[MAPLE_RANGE64_SLOTS - 1]; union { - void __rcu *slot[MAPLE_RANGE64_SLOTS]; + void *slot[MAPLE_RANGE64_SLOTS]; struct { - void __rcu *pad[MAPLE_RANGE64_SLOTS - 1]; + void *pad[MAPLE_RANGE64_SLOTS - 1]; struct maple_metadata meta; }; }; @@ -111,7 +118,7 @@ struct maple_range_64 { struct maple_arange_64 { struct maple_pnode *parent; unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1]; - void __rcu *slot[MAPLE_ARANGE64_SLOTS]; + void *slot[MAPLE_ARANGE64_SLOTS]; unsigned long gap[MAPLE_ARANGE64_SLOTS]; struct maple_metadata meta; }; @@ -127,7 +134,7 @@ struct maple_node { union { struct { struct maple_pnode *parent; - void __rcu *slot[MAPLE_NODE_SLOTS]; + void *slot[MAPLE_NODE_SLOTS]; }; struct { void *pad; @@ -172,5 +179,4 @@ static inline bool mas_is_paused(struct ma_state *mas) { return mas->node == MAS_PAUSE; } - -#endif /*_LINUX_MAPLE_TREE_H */ \ No newline at end of file +#endif /* _MAPLE_TREE_H */ \ No newline at end of file diff --git a/maple_tree_vma.h b/maple_tree_vma.h index 5a9fc46..0e006dc 100644 --- a/maple_tree_vma.h +++ b/maple_tree_vma.h @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include "maple_tree.h" #define for_each_vma(vmi, vma) while ((vma = vma_next(&(vmi))) != NULL) @@ -6,22 +7,23 @@ struct vma_iterator { struct ma_state mas; }; -#define VMA_ITERATOR(name, mm, addr) \ +#define VMA_ITERATOR(name, mm_mt, addr) \ struct vma_iterator name = { \ .mas = { \ - .tree = &mm->mm_mt, \ + .tree = mm_mt, \ .index = addr, \ .node = MAS_START, \ }, \ } -static inline -struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) +extern void *mas_find(struct ma_state *, unsigned long); + +void *vma_find(struct vma_iterator *vmi, unsigned long max) { - return mas_find(&vmi->mas, max); + return mas_find(&vmi->mas, max); } -static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) +void *vma_next(struct vma_iterator *vmi) { /* * Uses vma_find() to get the first VMA when the iterator starts. diff --git a/xarray.h b/xarray.h index 2e14ec7..084382d 100644 --- a/xarray.h +++ b/xarray.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ */ -#ifndef _LINUX_XARRAY_H -#define _LINUX_XARRAY_H +#ifndef _XARRAY_H +#define _XARRAY_H /* * eXtensible Arrays * Copyright (c) 2017 Microsoft Corporation @@ -25,8 +25,8 @@ static inline bool xa_is_internal(const void *entry) static inline bool xa_is_err(const void *entry) { - return unlikely(xa_is_internal(entry) && - entry >= xa_mk_internal(-MAX_ERRNO)); + return xa_is_internal(entry) && + entry >= xa_mk_internal(-MAX_ERRNO); } static inline int xa_err(void *entry) @@ -49,7 +49,7 @@ static inline bool xa_is_value(const void *entry) static inline bool xa_is_zero(const void *entry) { - return unlikely(entry == XA_ZERO_ENTRY); + return entry == XA_ZERO_ENTRY; } static inline unsigned long xa_to_internal(const void *entry) @@ -67,4 +67,4 @@ static inline bool xa_is_advanced(const void *entry) return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY); } -#endif /* _LINUX_XARRAY_H */ \ No newline at end of file +#endif /* _XARRAY_H */ \ No newline at end of file -- 2.33.1 -- Crash-utility mailing list Crash-utility@xxxxxxxxxx https://listman.redhat.com/mailman/listinfo/crash-utility Contribution Guidelines: https://github.com/crash-utility/crash/wiki