On Wed 02-12-20 00:23:29, Pavel Tatashin wrote: [...] > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index 611799c72da5..7a6d86d0bc5f 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -3766,20 +3766,25 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) > return alloc_flags; > } > > -static inline unsigned int current_alloc_flags(gfp_t gfp_mask, > - unsigned int alloc_flags) > +static inline unsigned int cma_alloc_flags(gfp_t gfp_mask, > + unsigned int alloc_flags) > { > #ifdef CONFIG_CMA > - unsigned int pflags = current->flags; > - > - if (!(pflags & PF_MEMALLOC_NOMOVABLE) && > - gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) > + if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) > alloc_flags |= ALLOC_CMA; > - > #endif > return alloc_flags; > } > > +static inline gfp_t current_gfp_checkmovable(gfp_t gfp_mask) > +{ > + unsigned int pflags = current->flags; > + > + if ((pflags & PF_MEMALLOC_NOMOVABLE)) > + return gfp_mask & ~__GFP_MOVABLE; > + return gfp_mask; > +} > + It sucks that we have to control both ALLOC and gfp flags. But wouldn't it be simpler and more straightforward to keep current_alloc_flags as is (module PF rename) and hook the gfp mask evaluation into current_gfp_context and move it up before the first allocation attempt? All scope flags should be applicable to the hot path as well. It would add few cycles to there but the question is whether that would be noticeable over just handling PF_MEMALLOC_NOMOVABLE on its own. The cache line would be pulled in anyway. -- Michal Hocko SUSE Labs