The quilt patch titled Subject: maple_tree: add mas_prev_range() and mas_find_range_rev interface has been removed from the -mm tree. Its filename was maple_tree-add-mas_prev_range-and-mas_find_range_rev-interface.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx> Subject: maple_tree: add mas_prev_range() and mas_find_range_rev interface Date: Thu, 18 May 2023 10:55:40 -0400 Some users of the maple tree may want to move to the previous range regardless of the value stored there. Add this interface as well as the 'find' variant to support walking to the first value, then iterating over the previous ranges. Link: https://lkml.kernel.org/r/20230518145544.1722059-32-Liam.Howlett@xxxxxxxxxx Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> Cc: Vernon Yang <vernon2gm@xxxxxxxxx> Cc: David Binderman <dcb314@xxxxxxxxxxx> Cc: Peng Zhang <zhangpeng.00@xxxxxxxxxxxxx> Cc: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx> Cc: Wei Yang <richard.weiyang@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/maple_tree.h | 2 lib/maple_tree.c | 161 ++++++++++++++++++++++++++--------- 2 files changed, 124 insertions(+), 39 deletions(-) --- a/include/linux/maple_tree.h~maple_tree-add-mas_prev_range-and-mas_find_range_rev-interface +++ a/include/linux/maple_tree.h @@ -457,6 +457,7 @@ void mas_store_prealloc(struct ma_state void *mas_find(struct ma_state *mas, unsigned long max); void *mas_find_range(struct ma_state *mas, unsigned long max); void *mas_find_rev(struct ma_state *mas, unsigned long min); +void *mas_find_range_rev(struct ma_state *mas, unsigned long max); int mas_preallocate(struct ma_state *mas, gfp_t gfp); bool mas_is_err(struct ma_state *mas); @@ -467,6 +468,7 @@ void mas_destroy(struct ma_state *mas); int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries); void *mas_prev(struct ma_state *mas, unsigned long min); +void *mas_prev_range(struct ma_state *mas, unsigned long max); void *mas_next(struct ma_state *mas, unsigned long max); void *mas_next_range(struct ma_state *mas, unsigned long max); --- a/lib/maple_tree.c~maple_tree-add-mas_prev_range-and-mas_find_range_rev-interface +++ a/lib/maple_tree.c @@ -5919,18 +5919,8 @@ void *mt_next(struct maple_tree *mt, uns } EXPORT_SYMBOL_GPL(mt_next); -/** - * mas_prev() - Get the previous entry - * @mas: The maple state - * @min: The minimum value to check. - * - * Must hold rcu_read_lock or the write lock. - * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not - * searchable nodes. - * - * Return: the previous value or %NULL. - */ -void *mas_prev(struct ma_state *mas, unsigned long min) +static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min, + void **entry) { if (mas->index <= min) goto none; @@ -5948,7 +5938,8 @@ void *mas_prev(struct ma_state *mas, uns if (!mas->index) goto none; mas->index = mas->last = 0; - return mas_root(mas); + *entry = mas_root(mas); + return true; } if (mas_is_none(mas)) { @@ -5956,19 +5947,65 @@ void *mas_prev(struct ma_state *mas, uns /* Walked to out-of-range pointer? */ mas->index = mas->last = 0; mas->node = MAS_ROOT; - return mas_root(mas); + *entry = mas_root(mas); + return true; } - return NULL; + return true; } - return mas_prev_slot(mas, min, false); + + return false; none: mas->node = MAS_NONE; - return NULL; + return true; +} + +/** + * mas_prev() - Get the previous entry + * @mas: The maple state + * @min: The minimum value to check. + * + * Must hold rcu_read_lock or the write lock. + * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not + * searchable nodes. + * + * Return: the previous value or %NULL. + */ +void *mas_prev(struct ma_state *mas, unsigned long min) +{ + void *entry = NULL; + + if (mas_prev_setup(mas, min, &entry)) + return entry; + + return mas_prev_slot(mas, min, false); } EXPORT_SYMBOL_GPL(mas_prev); /** + * mas_prev_range() - Advance to the previous range + * @mas: The maple state + * @min: The minimum value to check. + * + * Sets @mas->index and @mas->last to the range. + * Must hold rcu_read_lock or the write lock. + * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not + * searchable nodes. + * + * Return: the previous value or %NULL. + */ +void *mas_prev_range(struct ma_state *mas, unsigned long min) +{ + void *entry = NULL; + + if (mas_prev_setup(mas, min, &entry)) + return entry; + + return mas_prev_slot(mas, min, true); +} +EXPORT_SYMBOL_GPL(mas_prev_range); + +/** * mt_prev() - get the previous value in the maple tree * @mt: The maple tree * @index: The start index @@ -6114,20 +6151,18 @@ void *mas_find_range(struct ma_state *ma EXPORT_SYMBOL_GPL(mas_find_range); /** - * mas_find_rev: On the first call, find the first non-null entry at or below - * mas->index down to %min. Otherwise find the first non-null entry below - * mas->index down to %min. + * mas_find_rev_setup() - Internal function to set up mas_find_*_rev() * @mas: The maple state - * @min: The minimum value to check. - * - * Must hold rcu_read_lock or the write lock. - * If an entry exists, last and index are updated accordingly. - * May set @mas->node to MAS_NONE. + * @min: The minimum index + * @entry: Pointer to the entry * - * Return: The entry or %NULL. + * Returns: True if entry is the answer, false otherwise. */ -void *mas_find_rev(struct ma_state *mas, unsigned long min) +static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, + void **entry) { + *entry = NULL; + if (unlikely(mas_is_none(mas))) { if (mas->index <= min) goto none; @@ -6139,7 +6174,7 @@ void *mas_find_rev(struct ma_state *mas, if (unlikely(mas_is_paused(mas))) { if (unlikely(mas->index <= min)) { mas->node = MAS_NONE; - return NULL; + return true; } mas->node = MAS_START; mas->last = --mas->index; @@ -6147,14 +6182,12 @@ void *mas_find_rev(struct ma_state *mas, if (unlikely(mas_is_start(mas))) { /* First run or continue */ - void *entry; - if (mas->index < min) - return NULL; + return true; - entry = mas_walk(mas); - if (entry) - return entry; + *entry = mas_walk(mas); + if (*entry) + return true; } if (unlikely(!mas_searchable(mas))) { @@ -6168,23 +6201,73 @@ void *mas_find_rev(struct ma_state *mas, */ mas->last = mas->index = 0; mas->node = MAS_ROOT; - return mas_root(mas); + *entry = mas_root(mas); + return true; } } if (mas->index < min) - return NULL; + return true; - /* Retries on dead nodes handled by mas_prev_slot */ - return mas_prev_slot(mas, min, false); + return false; none: mas->node = MAS_NONE; - return NULL; + return true; +} + +/** + * mas_find_rev: On the first call, find the first non-null entry at or below + * mas->index down to %min. Otherwise find the first non-null entry below + * mas->index down to %min. + * @mas: The maple state + * @min: The minimum value to check. + * + * Must hold rcu_read_lock or the write lock. + * If an entry exists, last and index are updated accordingly. + * May set @mas->node to MAS_NONE. + * + * Return: The entry or %NULL. + */ +void *mas_find_rev(struct ma_state *mas, unsigned long min) +{ + void *entry; + + if (mas_find_rev_setup(mas, min, &entry)) + return entry; + + /* Retries on dead nodes handled by mas_prev_slot */ + return mas_prev_slot(mas, min, false); + } EXPORT_SYMBOL_GPL(mas_find_rev); /** + * mas_find_range_rev: On the first call, find the first non-null entry at or + * below mas->index down to %min. Otherwise advance to the previous slot after + * mas->index down to %min. + * @mas: The maple state + * @min: The minimum value to check. + * + * Must hold rcu_read_lock or the write lock. + * If an entry exists, last and index are updated accordingly. + * May set @mas->node to MAS_NONE. + * + * Return: The entry or %NULL. + */ +void *mas_find_range_rev(struct ma_state *mas, unsigned long min) +{ + void *entry; + + if (mas_find_rev_setup(mas, min, &entry)) + return entry; + + /* Retries on dead nodes handled by mas_prev_slot */ + return mas_prev_slot(mas, min, true); +} +EXPORT_SYMBOL_GPL(mas_find_range_rev); + +/** * mas_erase() - Find the range in which index resides and erase the entire * range. * @mas: The maple state _ Patches currently in -mm which might be from Liam.Howlett@xxxxxxxxxx are mm-mprotect-fix-do_mprotect_pkey-limit-check.patch maple_tree-add-benchmarking-for-mas_for_each.patch maple_tree-add-benchmarking-for-mas_prev.patch mm-move-unmap_vmas-declaration-to-internal-header.patch mm-change-do_vmi_align_munmap-side-tree-index.patch mm-remove-prev-check-from-do_vmi_align_munmap.patch maple_tree-introduce-__mas_set_range.patch mm-remove-re-walk-from-mmap_region.patch maple_tree-re-introduce-entry-to-mas_preallocate-arguments.patch mm-use-vma_iter_clear_gfp-in-nommu.patch mm-set-up-vma-iterator-for-vma_iter_prealloc-calls.patch maple_tree-move-mas_wr_end_piv-below-mas_wr_extend_null.patch maple_tree-update-mas_preallocate-testing.patch maple_tree-refine-mas_preallocate-node-calculations.patch mm-mmap-change-vma-iteration-order-in-do_vmi_align_munmap.patch userfaultfd-fix-regression-in-userfaultfd_unmap_prep.patch