Since the maple tree is inclusive in range, ensure that a range of 1 (min = max) works for searching for a gap in either direction, and make sure the size is at least 1 but not larger than the delta between min and max. This commit also updates the testing. Unfortunately there isn't a way to safely update the tests and code without a test failure. Suggested-by: Peng Zhang <zhangpeng.00@xxxxxxxxxxxxx> Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> --- lib/maple_tree.c | 20 +++++++++++++------- lib/test_maple_tree.c | 28 +++++++++++++++++++++------- 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 425ad922bb2d6..580310741d892 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -5282,7 +5282,10 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long *pivots; enum maple_type mt; - if (min >= max) + if (min > max) + return -EINVAL; + + if (size == 0 || max - min < size - 1) return -EINVAL; if (mas_is_start(mas)) @@ -5337,7 +5340,10 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, { struct maple_enode *last = mas->node; - if (min >= max) + if (min > max) + return -EINVAL; + + if (size == 0 || max - min < size - 1) return -EINVAL; if (mas_is_start(mas)) { @@ -5373,7 +5379,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, return -EBUSY; /* Trim the upper limit to the max. */ - if (max <= mas->last) + if (max < mas->last) mas->last = max; mas->index = mas->last - size + 1; @@ -6409,7 +6415,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, { int ret = 0; - MA_STATE(mas, mt, min, max - size); + MA_STATE(mas, mt, min, min); if (!mt_is_alloc(mt)) return -EINVAL; @@ -6429,7 +6435,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, retry: mas.offset = 0; mas.index = min; - mas.last = max - size; + mas.last = max - size + 1; ret = mas_alloc(&mas, entry, size, startp); if (mas_nomem(&mas, gfp)) goto retry; @@ -6445,14 +6451,14 @@ int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, { int ret = 0; - MA_STATE(mas, mt, min, max - size); + MA_STATE(mas, mt, min, max - size + 1); if (!mt_is_alloc(mt)) return -EINVAL; if (WARN_ON_ONCE(mt_is_reserved(entry))) return -EINVAL; - if (min >= max) + if (min > max) return -EINVAL; if (max < size - 1) diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 19b130c9dddea..f167d6ef81591 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -123,7 +123,7 @@ static noinline void __init check_mtree_alloc_rrange(struct maple_tree *mt, unsigned long result = expected + 1; int ret; - ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end - 1, + ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end, GFP_KERNEL); MT_BUG_ON(mt, ret != eret); if (ret) @@ -701,7 +701,7 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt) 0, /* Return value success. */ 0x0, /* Min */ - 0x565234AF1 << 12, /* Max */ + 0x565234AF0 << 12, /* Max */ 0x3000, /* Size */ 0x565234AEE << 12, /* max - 3. */ 0, /* Return value success. */ @@ -713,14 +713,14 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt) 0, /* Return value success. */ 0x0, /* Min */ - 0x7F36D510A << 12, /* Max */ + 0x7F36D5109 << 12, /* Max */ 0x4000, /* Size */ 0x7F36D5106 << 12, /* First rev hole of size 0x4000 */ 0, /* Return value success. */ /* Ascend test. */ 0x0, - 34148798629 << 12, + 34148798628 << 12, 19 << 12, 34148797418 << 12, 0x0, @@ -732,6 +732,12 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt) 0x0, -EBUSY, + /* Single space test. */ + 34148798725 << 12, + 34148798725 << 12, + 1 << 12, + 34148798725 << 12, + 0, }; int i, range_count = ARRAY_SIZE(range); @@ -780,9 +786,9 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt) mas_unlock(&mas); for (i = 0; i < req_range_count; i += 5) { #if DEBUG_REV_RANGE - pr_debug("\tReverse request between %lu-%lu size %lu, should get %lu\n", - req_range[i] >> 12, - (req_range[i + 1] >> 12) - 1, + pr_debug("\tReverse request %d between %lu-%lu size %lu, should get %lu\n", + i, req_range[i] >> 12, + (req_range[i + 1] >> 12), req_range[i+2] >> 12, req_range[i+3] >> 12); #endif @@ -798,6 +804,7 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt) mt_set_non_kernel(1); mtree_erase(mt, 34148798727); /* create a deleted range. */ + mtree_erase(mt, 34148798725); check_mtree_alloc_rrange(mt, 0, 34359052173, 210253414, 34148798725, 0, mt); @@ -901,6 +908,13 @@ static noinline void __init check_alloc_range(struct maple_tree *mt) 4503599618982063UL << 12, /* Size */ 34359052178 << 12, /* Expected location */ -EBUSY, /* Return failure. */ + + /* Test a single entry */ + 34148798648 << 12, /* Min */ + 34148798648 << 12, /* Max */ + 4096, /* Size of 1 */ + 34148798648 << 12, /* Location is the same as min/max */ + 0, /* Success */ }; int i, range_count = ARRAY_SIZE(range); int req_range_count = ARRAY_SIZE(req_range); -- 2.39.2