The patch titled Subject: maple_tree: rework mas_wr_slot_store() to be cleaner and more efficient. has been added to the -mm mm-unstable branch. Its filename is maple_tree-rework-mas_wr_slot_store-to-be-cleaner-and-more-efficient.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/maple_tree-rework-mas_wr_slot_store-to-be-cleaner-and-more-efficient.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Peng Zhang <zhangpeng.00@xxxxxxxxxxxxx> Subject: maple_tree: rework mas_wr_slot_store() to be cleaner and more efficient. Date: Wed, 24 May 2023 11:12:45 +0800 Get whether the two gaps to be overwritten are empty to avoid calling mas_update_gap() all the time. Also clean up the code and add comments. Link: https://lkml.kernel.org/r/20230524031247.65949-9-zhangpeng.00@xxxxxxxxxxxxx Signed-off-by: Peng Zhang <zhangpeng.00@xxxxxxxxxxxxx> Reviewed-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- lib/maple_tree.c | 53 ++++++++++++++++----------------------------- 1 file changed, 19 insertions(+), 34 deletions(-) --- a/lib/maple_tree.c~maple_tree-rework-mas_wr_slot_store-to-be-cleaner-and-more-efficient +++ a/lib/maple_tree.c @@ -4202,49 +4202,34 @@ done: static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; - unsigned long lmax; /* Logical max. */ unsigned char offset = mas->offset; + bool gap = false; - if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) || - (offset != wr_mas->node_end))) + if (wr_mas->offset_end - offset != 1) return false; - if (offset == wr_mas->node_end - 1) - lmax = mas->max; - else - lmax = wr_mas->pivots[offset + 1]; - - /* going to overwrite too many slots. */ - if (lmax < mas->last) - return false; + gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset); + gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset + 1); - if (wr_mas->r_min == mas->index) { - /* overwriting two or more ranges with one. */ - if (lmax == mas->last) - return false; - - /* Overwriting all of offset and a portion of offset + 1. */ + if (mas->index == wr_mas->r_min) { + /* Overwriting the range and over a part of the next range. */ rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry); wr_mas->pivots[offset] = mas->last; - goto done; + } else { + /* Overwriting a part of the range and over the next range */ + rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry); + wr_mas->pivots[offset] = mas->index - 1; + mas->offset++; /* Keep mas accurate. */ } - /* Doesn't end on the next range end. */ - if (lmax != mas->last) - return false; - - /* Overwriting a portion of offset and all of offset + 1 */ - if ((offset + 1 < mt_pivots[wr_mas->type]) && - (wr_mas->entry || wr_mas->pivots[offset + 1])) - wr_mas->pivots[offset + 1] = mas->last; - - rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry); - wr_mas->pivots[offset] = mas->index - 1; - mas->offset++; /* Keep mas accurate. */ - -done: trace_ma_write(__func__, mas, 0, wr_mas->entry); - mas_update_gap(mas); + /* + * Only update gap when the new entry is empty or there is an empty + * entry in the original two ranges. + */ + if (!wr_mas->entry || gap) + mas_update_gap(mas); + return true; } @@ -4391,7 +4376,7 @@ static inline void mas_wr_modify(struct if (new_end == wr_mas->node_end + 1 && mas_wr_append(wr_mas)) return; - if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas)) + if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas)) return; else if (mas_wr_node_store(wr_mas)) return; _ Patches currently in -mm which might be from zhangpeng.00@xxxxxxxxxxxxx are radix-tree-move-declarations-to-header-fix.patch maple_tree-fix-potential-out-of-bounds-access-in-mas_wr_end_piv.patch maple_tree-rework-mtree_alloc_rangerrange.patch maple_tree-drop-mas_rev_alloc-and-mas_fill_gap.patch maple_tree-fix-the-arguments-to-__must_hold.patch maple_tree-simplify-mas_is_span_wr.patch maple_tree-make-the-code-symmetrical-in-mas_wr_extend_null.patch maple_tree-add-mas_wr_new_end-to-calculate-new_end-accurately.patch maple_tree-add-comments-and-some-minor-cleanups-to-mas_wr_append.patch maple_tree-rework-mas_wr_slot_store-to-be-cleaner-and-more-efficient.patch maple_tree-simplify-and-clean-up-mas_wr_node_store.patch maple_tree-relocate-the-declaration-of-mas_empty_area_rev.patch