Function current_gfp_context() is called after fast path. However, soon we will add more constraints which will also limit zones based on context. Move this call into fast path, and apply the correct constraints for all allocations. Also update .reclaim_idx based on value returned by current_gfp_context() because it soon will modify the allowed zones. Note: With this patch we will do one extra current->flags load during fast path, but we already load current->flags in fast-path: __alloc_pages_nodemask() prepare_alloc_pages() current_alloc_flags(gfp_mask, *alloc_flags); Later, when we add the zone constrain logic to current_gfp_context() we will be able to remove current->flags load from current_alloc_flags, and therefore return fast-path to the current performance level. Suggested-by: Michal Hocko <mhocko@xxxxxxxxxx> Signed-off-by: Pavel Tatashin <pasha.tatashin@xxxxxxxxxx> --- mm/page_alloc.c | 15 ++++++++------- mm/vmscan.c | 10 ++++++---- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2dea5600f308..24c99b3b12af 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4932,6 +4932,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, } gfp_mask &= gfp_allowed_mask; + /* + * Apply scoped allocation constraints. This is mainly about GFP_NOFS + * resp. GFP_NOIO which has to be inherited for all allocation requests + * from a particular context which has been marked by + * memalloc_no{fs,io}_{save,restore}. + */ + gfp_mask = current_gfp_context(gfp_mask); alloc_mask = gfp_mask; if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) return NULL; @@ -4947,13 +4954,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, if (likely(page)) goto out; - /* - * Apply scoped allocation constraints. This is mainly about GFP_NOFS - * resp. GFP_NOIO which has to be inherited for all allocation requests - * from a particular context which has been marked by - * memalloc_no{fs,io}_{save,restore}. - */ - alloc_mask = current_gfp_context(gfp_mask); + alloc_mask = gfp_mask; ac.spread_dirty_pages = false; /* diff --git a/mm/vmscan.c b/mm/vmscan.c index 7b4e31eac2cf..f51581e33fe6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3233,11 +3233,12 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask) { + gfp_t current_gfp_mask = current_gfp_context(gfp_mask); unsigned long nr_reclaimed; struct scan_control sc = { .nr_to_reclaim = SWAP_CLUSTER_MAX, - .gfp_mask = current_gfp_context(gfp_mask), - .reclaim_idx = gfp_zone(gfp_mask), + .gfp_mask = current_gfp_mask, + .reclaim_idx = gfp_zone(current_gfp_mask), .order = order, .nodemask = nodemask, .priority = DEF_PRIORITY, @@ -4157,17 +4158,18 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in { /* Minimum pages needed in order to stay on node */ const unsigned long nr_pages = 1 << order; + gfp_t current_gfp_mask = current_gfp_context(gfp_mask); struct task_struct *p = current; unsigned int noreclaim_flag; struct scan_control sc = { .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), - .gfp_mask = current_gfp_context(gfp_mask), + .gfp_mask = current_gfp_mask, .order = order, .priority = NODE_RECLAIM_PRIORITY, .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE), .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP), .may_swap = 1, - .reclaim_idx = gfp_zone(gfp_mask), + .reclaim_idx = gfp_zone(current_gfp_mask), }; trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, -- 2.25.1