[RFC] [PATCH 1/6] Port linux maple tree related files to crash

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



There are 2 ways to iterate vm_area_struct: 1) by rbtree,
aka vma.vm_rb; 2) by linked list, aka vma.vm_prev/next.
However for linux maple tree patch[1][2], vm_rb and vm_prev/next
are removed from vm_area_struct. For memory.c:vm_area_dump
of crash, it mainly uses linked list as a way of vma iteration,
which will not work for this case. So maple tree iteration
need to be ported to crash.

For crash, currently it only iteratively read the maple tree,
no more rcu safe or maple tree modification features
needed. So we only port a subset of kernel maple tree
features.

This patch mainly ports for_each_vma() macro, and all its
dependencies from kernel source[3] to crash, to enable crash
maple tree vma iteration:

    maple_tree_vma.h: The interface of maple tree vma iteration.
    maple_tree.c: Maple tree main implementation.
    maple_tree.h: Maple tree constants and structures.
    xarray.h: Maple tree of xarray dependency.

[1]: https://github.com/oracle/linux-uek/commit/d19703645b80abe35dff1a88449d074b0b5b1bb1
[2]: https://github.com/oracle/linux-uek/commit/91dee01f1ebb6b6587463b6ee6f7bbc4965f91d5
[3]: https://github.com/oracle/linux-uek, maple/mainline branch

Signed-off-by: Tao Liu <ltao@xxxxxxxxxx>
---
 maple_tree.c     | 736 +++++++++++++++++++++++++++++++++++++++++++++++
 maple_tree.h     | 176 ++++++++++++
 maple_tree_vma.h |  31 ++
 xarray.h         |  70 +++++
 4 files changed, 1013 insertions(+)
 create mode 100644 maple_tree.c
 create mode 100644 maple_tree.h
 create mode 100644 maple_tree_vma.h
 create mode 100644 xarray.h

diff --git a/maple_tree.c b/maple_tree.c
new file mode 100644
index 0000000..a7db8fa
--- /dev/null
+++ b/maple_tree.c
@@ -0,0 +1,736 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Maple Tree implementation
+ * Copyright (c) 2018-2022 Oracle Corporation
+ * Authors: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx>
+ * 	    Matthew Wilcox <willy@xxxxxxxxxxxxx>
+ */
+
+/* Bit 1 indicates the root is a node */
+#define MAPLE_ROOT_NODE			0x02
+/* maple_type stored bit 3-6 */
+#define MAPLE_ENODE_TYPE_SHIFT		0x03
+/* Bit 2 means a NULL somewhere below */
+#define MAPLE_ENODE_NULL		0x04
+
+#define MA_ROOT_PARENT 1
+
+#define MAPLE_PARENT_ROOT		0x01
+
+#define MAPLE_PARENT_SLOT_SHIFT		0x03
+#define MAPLE_PARENT_SLOT_MASK		0xF8
+
+#define MAPLE_PARENT_16B_SLOT_SHIFT	0x02
+#define MAPLE_PARENT_16B_SLOT_MASK	0xFC
+
+#define MAPLE_PARENT_RANGE64		0x06
+#define MAPLE_PARENT_RANGE32		0x04
+#define MAPLE_PARENT_NOT_RANGE16	0x02
+
+static const unsigned char mt_slots[] = {
+	[maple_dense]		= MAPLE_NODE_SLOTS,
+	[maple_leaf_64]		= MAPLE_RANGE64_SLOTS,
+	[maple_range_64]	= MAPLE_RANGE64_SLOTS,
+	[maple_arange_64]	= MAPLE_ARANGE64_SLOTS,
+};
+#define mt_slot_count(x) mt_slots[mte_node_type(x)]
+
+static const unsigned char mt_pivots[] = {
+	[maple_dense]		= 0,
+	[maple_leaf_64]		= MAPLE_RANGE64_SLOTS - 1,
+	[maple_range_64]	= MAPLE_RANGE64_SLOTS - 1,
+	[maple_arange_64]	= MAPLE_ARANGE64_SLOTS - 1,
+};
+#define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
+
+#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
+#define ma_enode_ptr(x) ((struct maple_enode *)(x))
+
+static inline bool mas_is_ptr(struct ma_state *mas)
+{
+	return mas->node == MAS_ROOT;
+}
+
+static inline bool mas_is_start(struct ma_state *mas)
+{
+	return mas->node == MAS_START;
+}
+
+static inline void *mte_safe_root(const struct maple_enode *node)
+{
+	return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
+}
+
+static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
+{
+	return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
+}
+
+static inline enum maple_type mte_node_type(const struct maple_enode *entry)
+{
+	return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
+		MAPLE_NODE_TYPE_MASK;
+}
+
+static inline void *mas_root(struct ma_state *mas)
+{
+	return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
+}
+
+static inline struct maple_enode *mas_start(struct ma_state *mas)
+{
+	if (likely(mas_is_start(mas))) {
+		struct maple_enode *root;
+
+		mas->node = MAS_NONE;
+		mas->min = 0;
+		mas->max = ULONG_MAX;
+		mas->depth = 0;
+		mas->offset = 0;
+
+		root = mas_root(mas);
+		/* Tree with nodes */
+		if (likely(xa_is_node(root))) {
+			mas->node = mte_safe_root(root);
+			return NULL;
+		}
+
+		/* empty tree */
+		if (unlikely(!root)) {
+			mas->offset = MAPLE_NODE_SLOTS;
+			return NULL;
+		}
+
+		/* Single entry tree */
+		mas->node = MAS_ROOT;
+		mas->offset = MAPLE_NODE_SLOTS;
+
+		/* Single entry tree. */
+		if (mas->index > 0)
+			return NULL;
+
+		return root;
+	}
+
+	return NULL;
+}
+
+static inline unsigned long *ma_pivots(struct maple_node *node,
+					   enum maple_type type)
+{
+	switch (type) {
+	case maple_arange_64:
+		return node->ma64.pivot;
+	case maple_range_64:
+	case maple_leaf_64:
+		return node->mr64.pivot;
+	case maple_dense:
+		return NULL;
+	}
+	return NULL;
+}
+
+static inline struct maple_metadata *ma_meta(struct maple_node *mn,
+					     enum maple_type mt)
+{
+	switch (mt) {
+	case maple_arange_64:
+		return &mn->ma64.meta;
+	default:
+		return &mn->mr64.meta;
+	}
+}
+
+static inline unsigned char ma_meta_end(struct maple_node *mn,
+					enum maple_type mt)
+{
+	struct maple_metadata *meta = ma_meta(mn, mt);
+
+	return meta->end;
+}
+
+static inline unsigned char ma_data_end(struct maple_node *node,
+					enum maple_type type,
+					unsigned long *pivots,
+					unsigned long max)
+{
+	unsigned char offset;
+
+	if (type == maple_arange_64)
+		return ma_meta_end(node, type);
+
+	offset = mt_pivots[type] - 1;
+	if (likely(!pivots[offset]))
+		return ma_meta_end(node, type);
+
+	if (likely(pivots[offset] == max))
+		return offset;
+
+	return mt_pivots[type];
+}
+
+static inline bool ma_dead_node(const struct maple_node *node)
+{
+	struct maple_node *parent = (void *)((unsigned long)
+					     node->parent & ~MAPLE_NODE_MASK);
+
+	return (parent == node);
+}
+
+static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
+{
+	switch (mt) {
+	default:
+	case maple_arange_64:
+		return mn->ma64.slot;
+	case maple_range_64:
+	case maple_leaf_64:
+		return mn->mr64.slot;
+	case maple_dense:
+		return mn->slot;
+	}
+}
+
+static inline void *mt_slot(const struct maple_tree *mt,
+		void __rcu **slots, unsigned char offset)
+{
+	return rcu_dereference_check(slots[offset], mt_locked(mt));
+}
+
+static inline bool ma_is_leaf(const enum maple_type type)
+{
+	return type < maple_range_64;
+}
+
+static inline void *mtree_range_walk(struct ma_state *mas)
+{
+	unsigned long *pivots;
+	unsigned char offset;
+	struct maple_node *node;
+	struct maple_enode *next, *last;
+	enum maple_type type;
+	void __rcu **slots;
+	unsigned char end;
+	unsigned long max, min;
+	unsigned long prev_max, prev_min;
+
+	last = next = mas->node;
+	prev_min = min = mas->min;
+	max = mas->max;
+	do {
+		offset = 0;
+		last = next;
+		node = mte_to_node(next);
+		type = mte_node_type(next);
+		pivots = ma_pivots(node, type);
+		end = ma_data_end(node, type, pivots, max);
+		if (unlikely(ma_dead_node(node)))
+			goto dead_node;
+
+		if (pivots[offset] >= mas->index) {
+			prev_max = max;
+			prev_min = min;
+			max = pivots[offset];
+			goto next;
+		}
+
+		do {
+			offset++;
+		} while ((offset < end) && (pivots[offset] < mas->index));
+
+		prev_min = min;
+		min = pivots[offset - 1] + 1;
+		prev_max = max;
+		if (likely(offset < end && pivots[offset]))
+			max = pivots[offset];
+
+next:
+		slots = ma_slots(node, type);
+		next = mt_slot(mas->tree, slots, offset);
+		if (unlikely(ma_dead_node(node)))
+			goto dead_node;
+	} while (!ma_is_leaf(type));
+
+	mas->offset = offset;
+	mas->index = min;
+	mas->last = max;
+	mas->min = prev_min;
+	mas->max = prev_max;
+	mas->node = last;
+	return (void *) next;
+
+dead_node:
+	mas_reset(mas);
+	return NULL;
+}
+
+static inline void *mas_state_walk(struct ma_state *mas)
+{
+	void *entry;
+
+	entry = mas_start(mas);
+	if (mas_is_none(mas))
+		return NULL;
+
+	if (mas_is_ptr(mas))
+		return entry;
+
+	return mtree_range_walk(mas);
+}
+
+static inline bool mas_searchable(struct ma_state *mas)
+{
+	if (mas_is_none(mas))
+		return false;
+
+	if (mas_is_ptr(mas))
+		return false;
+
+	return true;
+}
+
+static inline struct maple_node *mas_mn(const struct ma_state *mas)
+{
+	return mte_to_node(mas->node);
+}
+
+static inline unsigned long
+mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
+{
+	if (likely(offset))
+		return pivots[offset - 1] + 1;
+
+	return mas->min;
+}
+
+static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
+			     unsigned char offset)
+{
+	return mt_slot(mas->tree, slots, offset);
+}
+
+static inline unsigned long
+mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
+	       unsigned char piv, enum maple_type type)
+{
+	if (piv >= mt_pivots[type])
+		return mas->max;
+
+	return pivots[piv];
+}
+
+static inline void *mas_next_nentry(struct ma_state *mas,
+	    struct maple_node *node, unsigned long max, enum maple_type type)
+{
+	unsigned char count;
+	unsigned long pivot;
+	unsigned long *pivots;
+	void __rcu **slots;
+	void *entry;
+
+	if (mas->last == mas->max) {
+		mas->index = mas->max;
+		return NULL;
+	}
+
+	pivots = ma_pivots(node, type);
+	slots = ma_slots(node, type);
+	mas->index = mas_safe_min(mas, pivots, mas->offset);
+	if (ma_dead_node(node))
+		return NULL;
+
+	if (mas->index > max)
+		return NULL;
+
+	count = ma_data_end(node, type, pivots, mas->max);
+	if (mas->offset > count)
+		return NULL;
+
+	while (mas->offset < count) {
+		pivot = pivots[mas->offset];
+		entry = mas_slot(mas, slots, mas->offset);
+		if (ma_dead_node(node))
+			return NULL;
+
+		if (entry)
+			goto found;
+
+		if (pivot >= max)
+			return NULL;
+
+		mas->index = pivot + 1;
+		mas->offset++;
+	}
+
+	if (mas->index > mas->max) {
+		mas->index = mas->last;
+		return NULL;
+	}
+
+	pivot = mas_safe_pivot(mas, pivots, mas->offset, type);
+	entry = mas_slot(mas, slots, mas->offset);
+	if (ma_dead_node(node))
+		return NULL;
+
+	if (!pivot)
+		return NULL;
+
+	if (!entry)
+		return NULL;
+
+found:
+	mas->last = pivot;
+	return entry;
+}
+
+static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
+{
+
+retry:
+	mas_set(mas, index);
+	mas_state_walk(mas);
+	if (mas_is_start(mas))
+		goto retry;
+
+	return;
+
+}
+
+static inline bool ma_is_root(struct maple_node *node)
+{
+	return ((unsigned long)node->parent & MA_ROOT_PARENT);
+}
+
+static inline struct maple_node *mte_parent(const struct maple_enode *enode)
+{
+	return (void *)((unsigned long)
+			(mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
+}
+
+static inline unsigned long mte_parent_slot_mask(unsigned long parent)
+{
+	/* Note bit 1 == 0 means 16B */
+	if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
+		return MAPLE_PARENT_SLOT_MASK;
+
+	return MAPLE_PARENT_16B_SLOT_MASK;
+}
+
+static inline bool mt_is_alloc(struct maple_tree *mt)
+{
+	return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
+}
+
+static inline
+enum maple_type mte_parent_enum(struct maple_enode *p_enode,
+				struct maple_tree *mt)
+{
+	unsigned long p_type;
+
+	p_type = (unsigned long)p_enode;
+	if (p_type & MAPLE_PARENT_ROOT)
+		return 0; /* Validated in the caller. */
+
+	p_type &= MAPLE_NODE_MASK;
+	p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type));
+
+	switch (p_type) {
+	case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
+		if (mt_is_alloc(mt))
+			return maple_arange_64;
+		return maple_range_64;
+	}
+
+	return 0;
+}
+
+static inline
+enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode)
+{
+	return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree);
+}
+
+static inline unsigned long mte_parent_shift(unsigned long parent)
+{
+	/* Note bit 1 == 0 means 16B */
+	if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
+		return MAPLE_PARENT_SLOT_SHIFT;
+
+	return MAPLE_PARENT_16B_SLOT_SHIFT;
+}
+
+static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
+{
+	unsigned long val = (unsigned long) mte_to_node(enode)->parent;
+
+	/* Root. */
+	if (val & 1)
+		return 0;
+
+	/*
+	 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
+	 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
+	 */
+	return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
+}
+
+static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
+					     enum maple_type type)
+{
+	return (void *)((unsigned long)node |
+			(type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
+}
+
+static inline bool mte_is_root(const struct maple_enode *node)
+{
+	return ma_is_root(mte_to_node(node));
+}
+
+static int mas_ascend(struct ma_state *mas)
+{
+	struct maple_enode *p_enode; /* parent enode. */
+	struct maple_enode *a_enode; /* ancestor enode. */
+	struct maple_node *a_node; /* ancestor node. */
+	struct maple_node *p_node; /* parent node. */
+	unsigned char a_slot;
+	enum maple_type a_type;
+	unsigned long min, max;
+	unsigned long *pivots;
+	unsigned char offset;
+	bool set_max = false, set_min = false;
+
+	a_node = mas_mn(mas);
+	if (ma_is_root(a_node)) {
+		mas->offset = 0;
+		return 0;
+	}
+
+	p_node = mte_parent(mas->node);
+	if (unlikely(a_node == p_node))
+		return 1;
+	a_type = mas_parent_enum(mas, mas->node);
+	offset = mte_parent_slot(mas->node);
+	a_enode = mt_mk_node(p_node, a_type);
+
+	/* Check to make sure all parent information is still accurate */
+	if (p_node != mte_parent(mas->node))
+		return 1;
+
+	mas->node = a_enode;
+	mas->offset = offset;
+
+	if (mte_is_root(a_enode)) {
+		mas->max = ULONG_MAX;
+		mas->min = 0;
+		return 0;
+	}
+
+	min = 0;
+	max = ULONG_MAX;
+	do {
+		p_enode = a_enode;
+		a_type = mas_parent_enum(mas, p_enode);
+		a_node = mte_parent(p_enode);
+		a_slot = mte_parent_slot(p_enode);
+		pivots = ma_pivots(a_node, a_type);
+		a_enode = mt_mk_node(a_node, a_type);
+
+		if (!set_min && a_slot) {
+			set_min = true;
+			min = pivots[a_slot - 1] + 1;
+		}
+
+		if (!set_max && a_slot < mt_pivots[a_type]) {
+			set_max = true;
+			max = pivots[a_slot];
+		}
+
+		if (unlikely(ma_dead_node(a_node)))
+			return 1;
+
+		if (unlikely(ma_is_root(a_node)))
+			break;
+
+	} while (!set_min || !set_max);
+
+	mas->max = max;
+	mas->min = min;
+	return 0;
+}
+
+static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
+				unsigned long max)
+{
+	unsigned long min, pivot;
+	unsigned long *pivots;
+	struct maple_enode *enode;
+	int level = 0;
+	unsigned char offset;
+	enum maple_type mt;
+	void __rcu **slots;
+
+	if (mas->max >= max)
+		goto no_entry;
+
+	level = 0;
+	do {
+		if (ma_is_root(node))
+			goto no_entry;
+
+		min = mas->max + 1;
+		if (min > max)
+			goto no_entry;
+
+		if (unlikely(mas_ascend(mas)))
+			return 1;
+
+		offset = mas->offset;
+		level++;
+		node = mas_mn(mas);
+		mt = mte_node_type(mas->node);
+		pivots = ma_pivots(node, mt);
+	} while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max)));
+
+	slots = ma_slots(node, mt);
+	pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
+	while (unlikely(level > 1)) {
+		/* Descend, if necessary */
+		enode = mas_slot(mas, slots, offset);
+		if (unlikely(ma_dead_node(node)))
+			return 1;
+
+		mas->node = enode;
+		level--;
+		node = mas_mn(mas);
+		mt = mte_node_type(mas->node);
+		slots = ma_slots(node, mt);
+		pivots = ma_pivots(node, mt);
+		offset = 0;
+		pivot = pivots[0];
+	}
+
+	enode = mas_slot(mas, slots, offset);
+	if (unlikely(ma_dead_node(node)))
+		return 1;
+
+	mas->node = enode;
+	mas->min = min;
+	mas->max = pivot;
+	return 0;
+
+no_entry:
+	if (unlikely(ma_dead_node(node)))
+		return 1;
+
+	mas->node = MAS_NONE;
+	return 0;
+}
+
+static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
+{
+	void *entry = NULL;
+	struct maple_enode *prev_node;
+	struct maple_node *node;
+	unsigned char offset;
+	unsigned long last;
+	enum maple_type mt;
+
+	last = mas->last;
+retry:
+	offset = mas->offset;
+	prev_node = mas->node;
+	node = mas_mn(mas);
+	mt = mte_node_type(mas->node);
+	mas->offset++;
+	if (unlikely(mas->offset >= mt_slots[mt])) {
+		mas->offset = mt_slots[mt] - 1;
+		goto next_node;
+	}
+
+	while (!mas_is_none(mas)) {
+		entry = mas_next_nentry(mas, node, limit, mt);
+		if (unlikely(ma_dead_node(node))) {
+			mas_rewalk(mas, last);
+			goto retry;
+		}
+
+		if (likely(entry))
+			return entry;
+
+		if (unlikely((mas->index > limit)))
+			break;
+
+next_node:
+		prev_node = mas->node;
+		offset = mas->offset;
+		if (unlikely(mas_next_node(mas, node, limit))) {
+			mas_rewalk(mas, last);
+			goto retry;
+		}
+		mas->offset = 0;
+		node = mas_mn(mas);
+		mt = mte_node_type(mas->node);
+	}
+
+	mas->index = mas->last = limit;
+	mas->offset = offset;
+	mas->node = prev_node;
+	return NULL;
+}
+
+void *mas_walk(struct ma_state *mas)
+{
+	void *entry;
+
+retry:
+	entry = mas_state_walk(mas);
+	if (mas_is_start(mas))
+		goto retry;
+
+	if (mas_is_ptr(mas)) {
+		if (!mas->index) {
+			mas->last = 0;
+		} else {
+			mas->index = 1;
+			mas->last = ULONG_MAX;
+		}
+		return entry;
+	}
+
+	if (mas_is_none(mas)) {
+		mas->index = 0;
+		mas->last = ULONG_MAX;
+	}
+
+	return entry;
+}
+
+void *mas_find(struct ma_state *mas, unsigned long max)
+{
+	if (unlikely(mas_is_paused(mas))) {
+		if (unlikely(mas->last == ULONG_MAX)) {
+			mas->node = MAS_NONE;
+			return NULL;
+		}
+		mas->node = MAS_START;
+		mas->index = ++mas->last;
+	}
+
+	if (unlikely(mas_is_start(mas))) {
+		/* First run or continue */
+		void *entry;
+
+		if (mas->index > max)
+			return NULL;
+
+		entry = mas_walk(mas);
+		if (entry)
+			return entry;
+	}
+
+	if (unlikely(!mas_searchable(mas)))
+		return NULL;
+
+	/* Retries on dead nodes handled by mas_next_entry */
+	return mas_next_entry(mas, max);
+}
\ No newline at end of file
diff --git a/maple_tree.h b/maple_tree.h
new file mode 100644
index 0000000..c4b2828
--- /dev/null
+++ b/maple_tree.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _LINUX_MAPLE_TREE_H
+#define _LINUX_MAPLE_TREE_H
+/*
+ * Maple Tree - An RCU-safe adaptive tree for storing ranges
+ * Copyright (c) 2018-2022 Oracle
+ * Authors:     Liam R. Howlett <Liam.Howlett@xxxxxxxxxx>
+ *              Matthew Wilcox <willy@xxxxxxxxxxxxx>
+ */
+
+struct maple_tree {
+	union {
+		spinlock_t	ma_lock;
+		lockdep_map_p	ma_external_lock;
+	};
+	void __rcu	*ma_root;
+	unsigned int	ma_flags;
+};
+
+typedef struct maple_enode *maple_enode; /* encoded node */
+typedef struct maple_pnode *maple_pnode; /* parent node */
+
+struct maple_metadata {
+	unsigned char end;
+	unsigned char gap;
+};
+
+struct ma_state {
+	struct maple_tree *tree;	/* The tree we're operating in */
+	unsigned long index;		/* The index we're operating on - range start */
+	unsigned long last;		/* The last index we're operating on - range end */
+	struct maple_enode *node;	/* The node containing this entry */
+	unsigned long min;		/* The minimum index of this node - implied pivot min */
+	unsigned long max;		/* The maximum index of this node - implied pivot max */
+	struct maple_alloc *alloc;	/* Allocated nodes for this operation */
+	unsigned char depth;		/* depth of tree descent during write */
+	unsigned char offset;
+	unsigned char mas_flags;
+};
+
+enum maple_type {
+	maple_dense,
+	maple_leaf_64,
+	maple_range_64,
+	maple_arange_64,
+};
+
+#define MAS_START	((struct maple_enode *)1UL)
+#define MAS_ROOT	((struct maple_enode *)5UL)
+#define MAS_NONE	((struct maple_enode *)9UL)
+#define MAS_PAUSE	((struct maple_enode *)17UL)
+#define MA_ERROR(err) \
+		((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
+
+#define MA_STATE(name, mt, first, end)					\
+	struct ma_state name = {					\
+		.tree = mt,						\
+		.index = first,						\
+		.last = end,						\
+		.node = MAS_START,					\
+		.min = 0,						\
+		.max = ULONG_MAX,					\
+	}
+
+#if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
+/* 64bit sizes */
+#define MAPLE_NODE_SLOTS	31	/* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS	16	/* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS	10	/* 240 bytes */
+#define MAPLE_ARANGE64_META_MAX	15	/* Out of range for metadata */
+#define MAPLE_ALLOC_SLOTS	(MAPLE_NODE_SLOTS - 1)
+#else
+/* 32bit sizes */
+#define MAPLE_NODE_SLOTS	63	/* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS	32	/* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS	21	/* 240 bytes */
+#define MAPLE_ARANGE64_META_MAX	31	/* Out of range for metadata */
+#define MAPLE_ALLOC_SLOTS	(MAPLE_NODE_SLOTS - 2)
+#endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
+
+#define MAPLE_NODE_MASK		255UL
+
+#define MT_FLAGS_ALLOC_RANGE	0x01
+#define MT_FLAGS_USE_RCU	0x02
+#define MT_FLAGS_HEIGHT_OFFSET	0x02
+#define MT_FLAGS_HEIGHT_MASK	0x7C
+#define MT_FLAGS_LOCK_MASK	0x300
+#define MT_FLAGS_LOCK_IRQ	0x100
+#define MT_FLAGS_LOCK_BH	0x200
+#define MT_FLAGS_LOCK_EXTERN	0x300
+
+#define MAPLE_HEIGHT_MAX	31
+
+#define MAPLE_NODE_TYPE_MASK	0x0F
+#define MAPLE_NODE_TYPE_SHIFT	0x03
+
+#define MAPLE_RESERVED_RANGE	4096
+
+struct maple_range_64 {
+	struct maple_pnode *parent;
+	unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
+	union {
+		void __rcu *slot[MAPLE_RANGE64_SLOTS];
+		struct {
+			void __rcu *pad[MAPLE_RANGE64_SLOTS - 1];
+			struct maple_metadata meta;
+		};
+	};
+};
+
+struct maple_arange_64 {
+	struct maple_pnode *parent;
+	unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1];
+	void __rcu *slot[MAPLE_ARANGE64_SLOTS];
+	unsigned long gap[MAPLE_ARANGE64_SLOTS];
+	struct maple_metadata meta;
+};
+
+struct maple_alloc {
+	unsigned long total;
+	unsigned char node_count;
+	unsigned int request_count;
+	struct maple_alloc *slot[MAPLE_ALLOC_SLOTS];
+};
+
+struct maple_node {
+	union {
+		struct {
+			struct maple_pnode *parent;
+			void __rcu *slot[MAPLE_NODE_SLOTS];
+		};
+		struct {
+			void *pad;
+			struct rcu_head rcu;
+			struct maple_enode *piv_parent;
+			unsigned char parent_slot;
+			enum maple_type type;
+			unsigned char slot_len;
+			unsigned int ma_flags;
+		};
+		struct maple_range_64 mr64;
+		struct maple_arange_64 ma64;
+		struct maple_alloc alloc;
+	};
+};
+
+static inline void mas_reset(struct ma_state *mas)
+{
+	mas->node = MAS_START;
+}
+
+static inline
+void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
+{
+	       mas->index = start;
+	       mas->last = last;
+	       mas->node = MAS_START;
+}
+
+static inline void mas_set(struct ma_state *mas, unsigned long index)
+{
+
+	mas_set_range(mas, index, index);
+}
+
+static inline bool mas_is_none(struct ma_state *mas)
+{
+	return mas->node == MAS_NONE;
+}
+
+static inline bool mas_is_paused(struct ma_state *mas)
+{
+	return mas->node == MAS_PAUSE;
+}
+
+#endif /*_LINUX_MAPLE_TREE_H */
\ No newline at end of file
diff --git a/maple_tree_vma.h b/maple_tree_vma.h
new file mode 100644
index 0000000..5a9fc46
--- /dev/null
+++ b/maple_tree_vma.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define for_each_vma(vmi, vma)		while ((vma = vma_next(&(vmi))) != NULL)
+
+struct vma_iterator {
+	struct ma_state mas;
+};
+
+#define VMA_ITERATOR(name, mm, addr)					\
+	struct vma_iterator name = {					\
+		.mas = {						\
+			.tree = &mm->mm_mt,				\
+			.index = addr,					\
+			.node = MAS_START,				\
+		},							\
+	}
+
+static inline
+struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
+{
+       return mas_find(&vmi->mas, max);
+}
+
+static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
+{
+	/*
+	 * Uses vma_find() to get the first VMA when the iterator starts.
+	 * Calling mas_next() could skip the first entry.
+	 */
+	return vma_find(vmi, ULONG_MAX);
+}
\ No newline at end of file
diff --git a/xarray.h b/xarray.h
new file mode 100644
index 0000000..2e14ec7
--- /dev/null
+++ b/xarray.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _LINUX_XARRAY_H
+#define _LINUX_XARRAY_H
+/*
+ * eXtensible Arrays
+ * Copyright (c) 2017 Microsoft Corporation
+ * Author: Matthew Wilcox <willy@xxxxxxxxxxxxx>
+ *
+ * See Documentation/core-api/xarray.rst for how to use the XArray.
+ */
+
+#define MAX_ERRNO	4095
+#define XA_ZERO_ENTRY		xa_mk_internal(257)
+#define XA_RETRY_ENTRY		xa_mk_internal(256)
+
+static inline void *xa_mk_internal(unsigned long v)
+{
+	return (void *)((v << 2) | 2);
+}
+
+static inline bool xa_is_internal(const void *entry)
+{
+	return ((unsigned long)entry & 3) == 2;
+}
+
+static inline bool xa_is_err(const void *entry)
+{
+	return unlikely(xa_is_internal(entry) &&
+			entry >= xa_mk_internal(-MAX_ERRNO));
+}
+
+static inline int xa_err(void *entry)
+{
+	/* xa_to_internal() would not do sign extension. */
+	if (xa_is_err(entry))
+		return (long)entry >> 2;
+	return 0;
+}
+
+static inline bool xa_is_node(const void *entry)
+{
+	return xa_is_internal(entry) && (unsigned long)entry > 4096;
+}
+
+static inline bool xa_is_value(const void *entry)
+{
+	return (unsigned long)entry & 1;
+}
+
+static inline bool xa_is_zero(const void *entry)
+{
+	return unlikely(entry == XA_ZERO_ENTRY);
+}
+
+static inline unsigned long xa_to_internal(const void *entry)
+{
+	return (unsigned long)entry >> 2;
+}
+
+static inline unsigned long xa_to_value(const void *entry)
+{
+	return (unsigned long)entry >> 1;
+}
+
+static inline bool xa_is_advanced(const void *entry)
+{
+	return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
+}
+
+#endif /* _LINUX_XARRAY_H */
\ No newline at end of file
-- 
2.33.1

--
Crash-utility mailing list
Crash-utility@xxxxxxxxxx
https://listman.redhat.com/mailman/listinfo/crash-utility
Contribution Guidelines: https://github.com/crash-utility/crash/wiki




[Index of Archives]     [Fedora Development]     [Fedora Desktop]     [Fedora SELinux]     [Yosemite News]     [KDE Users]     [Fedora Tools]

 

Powered by Linux