sparse_index_init() is designed to be safe if two copies of it race. It uses "index_init_lock" to ensure that, even in the case of a race, only one CPU will manage to do: mem_section[root] = section; However, in the case where two copies of sparse_index_init() _do_ race, the one that loses the race will leak the "section" that sparse_index_alloc() allocated for it. This patch fixes that leak. Signed-off-by: Gavin Shan <shangw@xxxxxxxxxxxxxxxxxx> --- mm/sparse.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/mm/sparse.c b/mm/sparse.c index 781fa04..a6984d9 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -75,6 +75,20 @@ static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) return section; } +static inline void __meminit sparse_index_free(struct mem_section *section) +{ + unsigned long size = SECTIONS_PER_ROOT * + sizeof(struct mem_section); + + if (!section) + return; + + if (slab_is_available()) + kfree(section); + else + free_bootmem(virt_to_phys(section), size); +} + static int __meminit sparse_index_init(unsigned long section_nr, int nid) { static DEFINE_SPINLOCK(index_init_lock); @@ -102,6 +116,9 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid) mem_section[root] = section; out: spin_unlock(&index_init_lock); + if (ret) + sparse_index_free(section); + return ret; } #else /* !SPARSEMEM_EXTREME */ -- 1.7.9.5 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>