Signed-off-by: Yajun Deng <yajun.deng@xxxxxxxxx>
---
v3: reserve the current array immediately if slab is unavailable.
v2: remove the changes of memblock_double_array.
v1: https://lore.kernel.org/all/20230927013752.2515238-1-yajun.deng@xxxxxxxxx/
---
mm/memblock.c | 93 +++++++++++++++++++++++----------------------------
1 file changed, 41 insertions(+), 52 deletions(-)
diff --git a/mm/memblock.c b/mm/memblock.c
index 5a88d6d24d79..71449c0b8bc8 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -588,11 +588,12 @@ static int __init_memblock memblock_add_range(struct memblock_type *type,
phys_addr_t base, phys_addr_t size,
int nid, enum memblock_flags flags)
{
- bool insert = false;
phys_addr_t obase = base;
phys_addr_t end = base + memblock_cap_size(base, &size);
- int idx, nr_new, start_rgn = -1, end_rgn;
+ int idx, start_rgn = -1, end_rgn;
struct memblock_region *rgn;
+ int use_slab = slab_is_available();
+ unsigned long ocnt = type->cnt;
if (!size)
return 0;
@@ -608,25 +609,6 @@ static int __init_memblock memblock_add_range(struct memblock_type *type,
return 0;
}
- /*
- * The worst case is when new range overlaps all existing regions,
- * then we'll need type->cnt + 1 empty regions in @type. So if
- * type->cnt * 2 + 1 is less than or equal to type->max, we know
- * that there is enough empty regions in @type, and we can insert
- * regions directly.
- */
- if (type->cnt * 2 + 1 <= type->max)
- insert = true;
-
-repeat:
- /*
- * The following is executed twice. Once with %false @insert and
- * then with %true. The first counts the number of regions needed
- * to accommodate the new area. The second actually inserts them.
- */
- base = obase;
- nr_new = 0;
-
for_each_memblock_type(idx, type, rgn) {
phys_addr_t rbase = rgn->base;
phys_addr_t rend = rbase + rgn->size;
@@ -644,15 +626,30 @@ static int __init_memblock memblock_add_range(struct memblock_type *type,
WARN_ON(nid != memblock_get_region_node(rgn));
#endif
WARN_ON(flags != rgn->flags);
- nr_new++;
- if (insert) {
- if (start_rgn == -1)
- start_rgn = idx;
- end_rgn = idx + 1;
- memblock_insert_region(type, idx++, base,
- rbase - base, nid,
- flags);
+
+ /*
+ * If type->cnt is equal to type->max, it means there's
+ * not enough empty region and the array needs to be
+ * resized. Otherwise, insert it directly.
+ *
+ * If slab is unavailable, it means a new array was reserved
+ * in memblock_double_array. There is a nested call here, We
+ * need to reserve the current array now if its type is
+ * reserved.
+ */
+ if (type->cnt == type->max) {
+ if (memblock_double_array(type, obase, size))
+ return -ENOMEM;
+ else if (!use_slab && type == &memblock.reserved)
+ return memblock_reserve(obase, size);
}
+
+ if (start_rgn == -1)
+ start_rgn = idx;
+ end_rgn = idx + 1;
+ memblock_insert_region(type, idx++, base,
+ rbase - base, nid,
+ flags);
}
/* area below @rend is dealt with, forget about it */
base = min(rend, end);
@@ -660,33 +657,25 @@ static int __init_memblock memblock_add_range(struct memblock_type *type,
/* insert the remaining portion */
if (base < end) {
- nr_new++;
- if (insert) {
- if (start_rgn == -1)
- start_rgn = idx;
- end_rgn = idx + 1;
- memblock_insert_region(type, idx, base, end - base,
- nid, flags);
+
+ if (type->cnt == type->max) {
+ if (memblock_double_array(type, obase, size))
+ return -ENOMEM;
+ else if (!use_slab && type == &memblock.reserved)
+ return memblock_reserve(obase, size);
}
- }
- if (!nr_new)
- return 0;
+ if (start_rgn == -1)
+ start_rgn = idx;
+ end_rgn = idx + 1;
+ memblock_insert_region(type, idx, base, end - base,
+ nid, flags);
+ }
- /*
- * If this was the first round, resize array and repeat for actual
- * insertions; otherwise, merge and return.
- */
- if (!insert) {
- while (type->cnt + nr_new > type->max)
- if (memblock_double_array(type, obase, size) < 0)
- return -ENOMEM;
- insert = true;
- goto repeat;
- } else {
+ if (ocnt != type->cnt)
memblock_merge_regions(type, start_rgn, end_rgn);
- return 0;
- }
+
+ return 0;
}
/**
--
2.25.1