<snip>
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 634162271c00..35b28be27cf4 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -896,7 +896,7 @@ static struct vmap_node {
* is fully disabled. Later on, after vmap is initialized these
* parameters are updated based on a system capacity.
*/
-static struct vmap_node *vmap_nodes = &single;
+static struct vmap_node **vmap_nodes;
static __read_mostly unsigned int nr_vmap_nodes = 1;
static __read_mostly unsigned int vmap_zone_size = 1;
@@ -909,13 +909,13 @@ addr_to_node_id(unsigned long addr)
static inline struct vmap_node *
addr_to_node(unsigned long addr)
{
- return &vmap_nodes[addr_to_node_id(addr)];
+ return vmap_nodes[addr_to_node_id(addr)];
}
static inline struct vmap_node *
id_to_node(unsigned int id)
{
- return &vmap_nodes[id % nr_vmap_nodes];
+ return vmap_nodes[id % nr_vmap_nodes];
}
/*
@@ -1060,7 +1060,7 @@ find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
repeat:
for (i = 0, va_start_lowest = 0; i < nr_vmap_nodes; i++) {
- vn = &vmap_nodes[i];
+ vn = vmap_nodes[i];
spin_lock(&vn->busy.lock);
*va = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
@@ -2240,7 +2240,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
purge_nodes = CPU_MASK_NONE;
for (i = 0; i < nr_vmap_nodes; i++) {
- vn = &vmap_nodes[i];
+ vn = vmap_nodes[i];
INIT_LIST_HEAD(&vn->purge_list);
vn->skip_populate = full_pool_decay;
@@ -2272,7 +2272,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
for_each_cpu(i, &purge_nodes) {
- vn = &vmap_nodes[i];
+ vn = vmap_nodes[i];
if (nr_purge_helpers > 0) {
INIT_WORK(&vn->purge_work, purge_vmap_node);
@@ -2291,7 +2291,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
}
for_each_cpu(i, &purge_nodes) {
- vn = &vmap_nodes[i];
+ vn = vmap_nodes[i];
if (vn->purge_work.func) {
flush_work(&vn->purge_work);
@@ -2397,7 +2397,7 @@ struct vmap_area *find_vmap_area(unsigned long addr)
*/
i = j = addr_to_node_id(addr);
do {
- vn = &vmap_nodes[i];
+ vn = vmap_nodes[i];
spin_lock(&vn->busy.lock);
va = __find_vmap_area(addr, &vn->busy.root);
@@ -2421,7 +2421,7 @@ static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
*/
i = j = addr_to_node_id(addr);
do {
- vn = &vmap_nodes[i];
+ vn = vmap_nodes[i];
spin_lock(&vn->busy.lock);
va = __find_vmap_area(addr, &vn->busy.root);
@@ -4928,7 +4928,7 @@ static void show_purge_info(struct seq_file *m)
int i;
for (i = 0; i < nr_vmap_nodes; i++) {
- vn = &vmap_nodes[i];
+ vn = vmap_nodes[i];
spin_lock(&vn->lazy.lock);
list_for_each_entry(va, &vn->lazy.head, list) {
@@ -4948,7 +4948,7 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
int i;
for (i = 0; i < nr_vmap_nodes; i++) {
- vn = &vmap_nodes[i];
+ vn = vmap_nodes[i];
spin_lock(&vn->busy.lock);
list_for_each_entry(va, &vn->busy.head, list) {
@@ -5069,6 +5069,7 @@ static void __init vmap_init_free_space(void)
static void vmap_init_nodes(void)
{
+ struct vmap_node **nodes;
struct vmap_node *vn;
int i, n;
@@ -5087,23 +5088,34 @@ static void vmap_init_nodes(void)
* set of cores. Therefore a per-domain purging is supposed to
* be added as well as a per-domain balancing.
*/
- n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
+ n = 1024;
if (n > 1) {
- vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
- if (vn) {
+ nodes = kmalloc_array(n, sizeof(struct vmap_node **),
+ GFP_NOWAIT | __GFP_NOWARN | __GFP_ZERO);
+
+ if (nodes) {
+ for (i = 0; i < n; i++) {
+ nodes[i] = kmalloc(sizeof(struct vmap_node), GFP_NOWAIT | __GFP_ZERO);
+
+ if (!nodes[i])
+ break;
+ }
+
/* Node partition is 16 pages. */
vmap_zone_size = (1 << 4) * PAGE_SIZE;
- nr_vmap_nodes = n;
- vmap_nodes = vn;
+ nr_vmap_nodes = i;
+ vmap_nodes = nodes;
} else {
pr_err("Failed to allocate an array. Disable a node layer\n");
+ vmap_nodes[0] = &single;
+ nr_vmap_nodes = 1;
}
}
#endif
for (n = 0; n < nr_vmap_nodes; n++) {
- vn = &vmap_nodes[n];
+ vn = vmap_nodes[n];
vn->busy.root = RB_ROOT;
INIT_LIST_HEAD(&vn->busy.head);
spin_lock_init(&vn->busy.lock);
@@ -5129,7 +5141,7 @@ vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
int i, j;
for (count = 0, i = 0; i < nr_vmap_nodes; i++) {
- vn = &vmap_nodes[i];
+ vn = vmap_nodes[i];
for (j = 0; j < MAX_VA_SIZE_PAGES; j++)
count += READ_ONCE(vn->pool[j].len);
@@ -5144,7 +5156,7 @@ vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
int i;
for (i = 0; i < nr_vmap_nodes; i++)
- decay_va_pool_node(&vmap_nodes[i], true);
+ decay_va_pool_node(vmap_nodes[i], true);
return SHRINK_STOP;
}
<snip>
it sets a number of nodes to 1024. It would be really appreciated to see
the perf-delta with this patch. If it improves the things or not.
Thank you in advance.
--
Uladzislau Rezki