Reimplement si_mem_available() by reusing si_mem_available_node(). Signed-off-by: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx> --- mm/page_alloc.c | 45 +++++++-------------------------------------- 1 file changed, 7 insertions(+), 38 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 31f5e3e335cf..0982372c8e49 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5782,46 +5782,15 @@ long si_mem_available_node(struct sysinfo *val, int nid) long si_mem_available(void) { - long available; - unsigned long pagecache; - unsigned long wmark_low = 0; - unsigned long pages[NR_LRU_LISTS]; - unsigned long reclaimable; - struct zone *zone; - int lru; - - for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) - pages[lru] = global_node_page_state(NR_LRU_BASE + lru); - - for_each_zone(zone) - wmark_low += low_wmark_pages(zone); - - /* - * Estimate the amount of memory available for userspace allocations, - * without causing swapping. - */ - available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; - - /* - * Not all the page cache can be freed, otherwise the system will - * start swapping. Assume at least half of the page cache, or the - * low watermark worth of cache, needs to stay. - */ - pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; - pagecache -= min(pagecache / 2, wmark_low); - available += pagecache; + long available = 0; + struct sysinfo i; + int nid; - /* - * Part of the reclaimable slab and other kernel memory consists of - * items that are in use, and cannot be freed. Cap this estimate at the - * low watermark. - */ - reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + - global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); - available += reclaimable - min(reclaimable / 2, wmark_low); + for_each_online_node(nid) { + si_meminfo_node(&i, nid); + available += si_mem_available_node(&i, nid); + } - if (available < 0) - available = 0; return available; } EXPORT_SYMBOL_GPL(si_mem_available); -- 2.11.0