* Peng Zhang <zhangpeng.00@xxxxxxxxxxxxx> [230425 07:05]: > From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx> > > The previous changes to the gap searching made this testing fail. > Unfortunately, there was not a safe update order, so fix the testing > now. The testing will need to be updated to use the mas_ family of functions instead when the mtree_alloc_*() is dropped. > > Fixes: e15e06a83923 ("lib/test_maple_tree: add testing for maple tree") > Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> > Co-developed-by: Peng Zhang <zhangpeng.00@xxxxxxxxxxxxx> > Signed-off-by: Peng Zhang <zhangpeng.00@xxxxxxxxxxxxx> > --- > lib/test_maple_tree.c | 30 ++++++++++++++++++++++-------- > 1 file changed, 22 insertions(+), 8 deletions(-) > > diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c > index f1db333270e9f..30f2ebff95d75 100644 > --- a/lib/test_maple_tree.c > +++ b/lib/test_maple_tree.c > @@ -102,7 +102,7 @@ static noinline void check_mtree_alloc_rrange(struct maple_tree *mt, > unsigned long result = expected + 1; > int ret; > > - ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end - 1, > + ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end, > GFP_KERNEL); > MT_BUG_ON(mt, ret != eret); > if (ret) > @@ -680,7 +680,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) > 0, /* Return value success. */ > > 0x0, /* Min */ > - 0x565234AF1 << 12, /* Max */ > + 0x565234AF0 << 12, /* Max */ > 0x3000, /* Size */ > 0x565234AEE << 12, /* max - 3. */ > 0, /* Return value success. */ > @@ -692,14 +692,14 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) > 0, /* Return value success. */ > > 0x0, /* Min */ > - 0x7F36D510A << 12, /* Max */ > + 0x7F36D5109 << 12, /* Max */ > 0x4000, /* Size */ > 0x7F36D5106 << 12, /* First rev hole of size 0x4000 */ > 0, /* Return value success. */ > > /* Ascend test. */ > 0x0, > - 34148798629 << 12, > + 34148798628 << 12, > 19 << 12, > 34148797418 << 12, > 0x0, > @@ -711,6 +711,12 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) > 0x0, > -EBUSY, > > + /* Single space test. */ > + 34148798725 << 12, > + 34148798725 << 12, > + 1 << 12, > + 34148798725 << 12, > + 0, > }; > > int i, range_count = ARRAY_SIZE(range); > @@ -759,9 +765,9 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) > mas_unlock(&mas); > for (i = 0; i < req_range_count; i += 5) { > #if DEBUG_REV_RANGE > - pr_debug("\tReverse request between %lu-%lu size %lu, should get %lu\n", > - req_range[i] >> 12, > - (req_range[i + 1] >> 12) - 1, > + pr_debug("\tReverse request %d between %lu-%lu size %lu, should get %lu\n", > + i, req_range[i] >> 12, > + (req_range[i + 1] >> 12), > req_range[i+2] >> 12, > req_range[i+3] >> 12); > #endif > @@ -777,6 +783,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) > > mt_set_non_kernel(1); > mtree_erase(mt, 34148798727); /* create a deleted range. */ > + mtree_erase(mt, 34148798725); > check_mtree_alloc_rrange(mt, 0, 34359052173, 210253414, > 34148798725, 0, mt); > > @@ -880,6 +887,13 @@ static noinline void check_alloc_range(struct maple_tree *mt) > 4503599618982063UL << 12, /* Size */ > 34359052178 << 12, /* Expected location */ > -EBUSY, /* Return failure. */ > + > + /* Test a single entry */ > + 34148798648 << 12, /* Min */ > + 34148798648 << 12, /* Max */ > + 4096, /* Size of 1 */ > + 34148798648 << 12, /* Location is the same as min/max */ > + 0, /* Success */ > }; > int i, range_count = ARRAY_SIZE(range); > int req_range_count = ARRAY_SIZE(req_range); > @@ -2660,7 +2674,7 @@ static noinline void check_empty_area_window(struct maple_tree *mt) > MT_BUG_ON(mt, mas_empty_area(&mas, 5, 100, 6) != -EBUSY); > > mas_reset(&mas); > - MT_BUG_ON(mt, mas_empty_area(&mas, 0, 8, 10) != -EBUSY); > + MT_BUG_ON(mt, mas_empty_area(&mas, 0, 8, 10) != -EINVAL); > > mas_reset(&mas); > mas_empty_area(&mas, 100, 165, 3); > -- > 2.20.1 >