Not all users of mas_store() enter with nodes already preallocated. Check for the MA_STATE_PREALLOC flag to decide whether to preallocate nodes within mas_store() rather than relying on future write helper functions to perform the allocations. This allows the write helper functions to be simplified as they do not have to do checks to make sure there are enough allocated nodes to perform the write. Signed-off-by: Sidhartha Kumar <sidhartha.kumar@xxxxxxxxxx> --- lib/maple_tree.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 98c64aaedb55..46bdc4ce6662 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -5536,13 +5536,12 @@ static inline void mte_destroy_walk(struct maple_enode *enode, * @entry: The entry to store. * * The @mas->index and @mas->last is used to set the range for the @entry. - * Note: The @mas should have pre-allocated entries to ensure there is memory to - * store the entry. Please see mas_expected_entries()/mas_destroy() for more details. * * Return: the first entry between mas->index and mas->last or %NULL. */ void *mas_store(struct ma_state *mas, void *entry) { + int request; MA_WR_STATE(wr_mas, mas, entry); trace_ma_write(__func__, mas, 0, entry); @@ -5565,7 +5564,24 @@ void *mas_store(struct ma_state *mas, void *entry) */ mas_wr_prealloc_setup(&wr_mas); mas_wr_store_type(&wr_mas); + WARN_ON_ONCE(mas->store_type == wr_invalid); + if (mas->mas_flags & MA_STATE_PREALLOC) { + mas_wr_store_entry(&wr_mas); + MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); + return wr_mas.content; + } + + request = mas_prealloc_calc(mas, entry); + if (!request) + goto store; + + mas_node_count(mas, request); + if (mas_is_err(mas)) + return NULL; + +store: mas_wr_store_entry(&wr_mas); + mas_destroy(mas); return wr_mas.content; } EXPORT_SYMBOL_GPL(mas_store); -- 2.45.2