Currently lock_memory_hotplug()/unlock_memory_hotplug() are used to protect totalram_pages and zone->managed_pages. Other than the memory hotplug driver, totalram_pages and zone->managed_pages may also be modified at runtime by other drivers, such as Xen balloon, virtio_balloon etc. For those cases, memory hotplug lock is a little too heavy, so introduce a dedicated lock to protect totalram_pages and zone->managed_pages. Now we have a simplified locking rules totalram_pages and zone->managed_pages as: 1) no locking for read accesses because they are unsigned long. 2) no locking for write accesses at boot time in single-threaded context. 3) serialize write accesses at runtime by acquiring the dedicated managed_page_count_lock. Also adjust zone->managed_pages when freeing reserved pages into the buddy system, to keep totalram_pages and zone->managed_pages in consistence. Signed-off-by: Jiang Liu <jiang.liu@xxxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxx> Cc: Michel Lespinasse <walken@xxxxxxxxxx> Cc: Rik van Riel <riel@xxxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: linux-mm@xxxxxxxxx (open list:MEMORY MANAGEMENT) Cc: linux-kernel@xxxxxxxxxxxxxxx (open list) --- include/linux/mm.h | 6 ++---- include/linux/mmzone.h | 14 ++++++++++---- mm/page_alloc.c | 12 ++++++++++++ 3 files changed, 24 insertions(+), 8 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index a56bcaa..bfe3686 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1309,6 +1309,7 @@ extern void free_initmem(void); */ extern unsigned long free_reserved_area(void *start, void *end, int poison, char *s); + #ifdef CONFIG_HIGHMEM /* * Free a highmem page into the buddy system, adjusting totalhigh_pages @@ -1317,10 +1318,7 @@ extern unsigned long free_reserved_area(void *start, void *end, extern void free_highmem_page(struct page *page); #endif -static inline void adjust_managed_page_count(struct page *page, long count) -{ - totalram_pages += count; -} +extern void adjust_managed_page_count(struct page *page, long count); /* Free the reserved page into the buddy system, so it gets managed. */ static inline void __free_reserved_page(struct page *page) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 131989a..178f166 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -474,10 +474,16 @@ struct zone { * frequently read in proximity to zone->lock. It's good to * give them a chance of being in the same cacheline. * - * Write access to present_pages and managed_pages at runtime should - * be protected by lock_memory_hotplug()/unlock_memory_hotplug(). - * Any reader who can't tolerant drift of present_pages and - * managed_pages should hold memory hotplug lock to get a stable value. + * Write access to present_pages at runtime should be protected by + * lock_memory_hotplug()/unlock_memory_hotplug(). Any reader who can't + * tolerant drift of present_pages should hold memory hotplug lock to + * get a stable value. + * + * Read access to managed_pages should be safe because it's unsigned + * long. Write access to zone->managed_pages and totalram_pages are + * protected by managed_page_count_lock at runtime. Idealy only + * adjust_managed_page_count() should be used instead of directly + * touching zone->managed_pages and totalram_pages. */ unsigned long spanned_pages; unsigned long present_pages; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9b50e6a..924c099 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -103,6 +103,9 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = { }; EXPORT_SYMBOL(node_states); +/* Protect totalram_pages and zone->managed_pages */ +static DEFINE_SPINLOCK(managed_page_count_lock); + unsigned long totalram_pages __read_mostly; unsigned long totalreserve_pages __read_mostly; /* @@ -5231,6 +5234,15 @@ early_param("movablecore", cmdline_parse_movablecore); #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ +void adjust_managed_page_count(struct page *page, long count) +{ + spin_lock(&managed_page_count_lock); + page_zone(page)->managed_pages += count; + totalram_pages += count; + spin_unlock(&managed_page_count_lock); +} +EXPORT_SYMBOL(adjust_managed_page_count); + unsigned long free_reserved_area(void *start, void *end, int poison, char *s) { void *pos; -- 1.8.1.2 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>