The quilt patch titled Subject: maple_tree: drop mas_{rev_}alloc() and mas_fill_gap() has been removed from the -mm tree. Its filename was maple_tree-drop-mas_rev_alloc-and-mas_fill_gap.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Peng Zhang <zhangpeng.00@xxxxxxxxxxxxx> Subject: maple_tree: drop mas_{rev_}alloc() and mas_fill_gap() Date: Wed, 24 May 2023 11:12:39 +0800 mas_{rev_}alloc() and mas_fill_gap() are no longer used, delete them. Link: https://lkml.kernel.org/r/20230524031247.65949-3-zhangpeng.00@xxxxxxxxxxxxx Signed-off-by: Peng Zhang <zhangpeng.00@xxxxxxxxxxxxx> Reviewed-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- lib/maple_tree.c | 108 --------------------------------------------- 1 file changed, 108 deletions(-) --- a/lib/maple_tree.c~maple_tree-drop-mas_rev_alloc-and-mas_fill_gap +++ a/lib/maple_tree.c @@ -5121,46 +5121,6 @@ static inline void mas_awalk(struct ma_s } /* - * mas_fill_gap() - Fill a located gap with @entry. - * @mas: The maple state - * @entry: The value to store - * @slot: The offset into the node to store the @entry - * @size: The size of the entry - * @index: The start location - */ -static inline void mas_fill_gap(struct ma_state *mas, void *entry, - unsigned char slot, unsigned long size, unsigned long *index) -{ - MA_WR_STATE(wr_mas, mas, entry); - unsigned char pslot = mte_parent_slot(mas->node); - struct maple_enode *mn = mas->node; - unsigned long *pivots; - enum maple_type ptype; - /* - * mas->index is the start address for the search - * which may no longer be needed. - * mas->last is the end address for the search - */ - - *index = mas->index; - mas->last = mas->index + size - 1; - - /* - * It is possible that using mas->max and mas->min to correctly - * calculate the index and last will cause an issue in the gap - * calculation, so fix the ma_state here - */ - mas_ascend(mas); - ptype = mte_node_type(mas->node); - pivots = ma_pivots(mas_mn(mas), ptype); - mas->max = mas_safe_pivot(mas, pivots, pslot, ptype); - mas->min = mas_safe_min(mas, pivots, pslot); - mas->node = mn; - mas->offset = slot; - mas_wr_store_entry(&wr_mas); -} - -/* * mas_sparse_area() - Internal function. Return upper or lower limit when * searching for a gap in an empty tree. * @mas: The maple state @@ -5307,74 +5267,6 @@ int mas_empty_area_rev(struct ma_state * } EXPORT_SYMBOL_GPL(mas_empty_area_rev); -static inline int mas_alloc(struct ma_state *mas, void *entry, - unsigned long size, unsigned long *index) -{ - unsigned long min; - - mas_start(mas); - if (mas_is_none(mas) || mas_is_ptr(mas)) { - mas_root_expand(mas, entry); - if (mas_is_err(mas)) - return xa_err(mas->node); - - if (!mas->index) - return mas_pivot(mas, 0); - return mas_pivot(mas, 1); - } - - /* Must be walking a tree. */ - mas_awalk(mas, size); - if (mas_is_err(mas)) - return xa_err(mas->node); - - if (mas->offset == MAPLE_NODE_SLOTS) - goto no_gap; - - /* - * At this point, mas->node points to the right node and we have an - * offset that has a sufficient gap. - */ - min = mas->min; - if (mas->offset) - min = mas_pivot(mas, mas->offset - 1) + 1; - - if (mas_is_err(mas)) - return xa_err(mas->node); - - if (mas->index < min) - mas->index = min; - - mas_fill_gap(mas, entry, mas->offset, size, index); - return 0; - -no_gap: - return -EBUSY; -} - -static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min, - unsigned long max, void *entry, - unsigned long size, unsigned long *index) -{ - int ret = 0; - - ret = mas_empty_area_rev(mas, min, max, size); - if (ret) - return ret; - - if (mas_is_err(mas)) - return xa_err(mas->node); - - if (mas->offset == MAPLE_NODE_SLOTS) - goto no_gap; - - mas_fill_gap(mas, entry, mas->offset, size, index); - return 0; - -no_gap: - return -EBUSY; -} - /* * mte_dead_leaves() - Mark all leaves of a node as dead. * @mas: The maple state _ Patches currently in -mm which might be from zhangpeng.00@xxxxxxxxxxxxx are