Re: [PATCH v2] mm/page_alloc: detect allocation forbidden by cpuset and bail out early

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Feng,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on hnaz-linux-mm/master]

url:    https://github.com/0day-ci/linux/commits/Feng-Tang/mm-page_alloc-detect-allocation-forbidden-by-cpuset-and-bail-out-early/20210913-154016
base:   https://github.com/hnaz/linux-mm master
config: arc-randconfig-r043-20210913 (attached as .config)
compiler: arc-elf-gcc (GCC) 11.2.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/276fb2292fa199777b3e9a394c8737e4c618cd23
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Feng-Tang/mm-page_alloc-detect-allocation-forbidden-by-cpuset-and-bail-out-early/20210913-154016
        git checkout 276fb2292fa199777b3e9a394c8737e4c618cd23
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross ARCH=arc 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@xxxxxxxxx>

All errors (new ones prefixed by >>):

   mm/page_alloc.c:3810:15: warning: no previous prototype for 'should_fail_alloc_page' [-Wmissing-prototypes]
    3810 | noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
         |               ^~~~~~~~~~~~~~~~~~~~~~
   mm/page_alloc.c: In function '__alloc_pages_slowpath':
>> mm/page_alloc.c:4922:13: error: implicit declaration of function 'cpusets_insane_config' [-Werror=implicit-function-declaration]
    4922 |         if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
         |             ^~~~~~~~~~~~~~~~~~~~~
   cc1: some warnings being treated as errors


vim +/cpusets_insane_config +4922 mm/page_alloc.c

  4868	
  4869	static inline struct page *
  4870	__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
  4871							struct alloc_context *ac)
  4872	{
  4873		bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
  4874		const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
  4875		struct page *page = NULL;
  4876		unsigned int alloc_flags;
  4877		unsigned long did_some_progress;
  4878		enum compact_priority compact_priority;
  4879		enum compact_result compact_result;
  4880		int compaction_retries;
  4881		int no_progress_loops;
  4882		unsigned int cpuset_mems_cookie;
  4883		int reserve_flags;
  4884	
  4885		/*
  4886		 * We also sanity check to catch abuse of atomic reserves being used by
  4887		 * callers that are not in atomic context.
  4888		 */
  4889		if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
  4890					(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
  4891			gfp_mask &= ~__GFP_ATOMIC;
  4892	
  4893	retry_cpuset:
  4894		compaction_retries = 0;
  4895		no_progress_loops = 0;
  4896		compact_priority = DEF_COMPACT_PRIORITY;
  4897		cpuset_mems_cookie = read_mems_allowed_begin();
  4898	
  4899		/*
  4900		 * The fast path uses conservative alloc_flags to succeed only until
  4901		 * kswapd needs to be woken up, and to avoid the cost of setting up
  4902		 * alloc_flags precisely. So we do that now.
  4903		 */
  4904		alloc_flags = gfp_to_alloc_flags(gfp_mask);
  4905	
  4906		/*
  4907		 * We need to recalculate the starting point for the zonelist iterator
  4908		 * because we might have used different nodemask in the fast path, or
  4909		 * there was a cpuset modification and we are retrying - otherwise we
  4910		 * could end up iterating over non-eligible zones endlessly.
  4911		 */
  4912		ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
  4913						ac->highest_zoneidx, ac->nodemask);
  4914		if (!ac->preferred_zoneref->zone)
  4915			goto nopage;
  4916	
  4917		/*
  4918		 * Check for insane configurations where the cpuset doesn't contain
  4919		 * any suitable zone to satisfy the request - e.g. non-movable
  4920		 * GFP_HIGHUSER allocations from MOVABLE nodes only.
  4921		 */
> 4922		if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
  4923			struct zoneref *z = first_zones_zonelist(ac->zonelist,
  4924						ac->highest_zoneidx,
  4925						&cpuset_current_mems_allowed);
  4926			if (!z->zone)
  4927				goto nopage;
  4928		}
  4929	
  4930		if (alloc_flags & ALLOC_KSWAPD)
  4931			wake_all_kswapds(order, gfp_mask, ac);
  4932	
  4933		/*
  4934		 * The adjusted alloc_flags might result in immediate success, so try
  4935		 * that first
  4936		 */
  4937		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
  4938		if (page)
  4939			goto got_pg;
  4940	
  4941		/*
  4942		 * For costly allocations, try direct compaction first, as it's likely
  4943		 * that we have enough base pages and don't need to reclaim. For non-
  4944		 * movable high-order allocations, do that as well, as compaction will
  4945		 * try prevent permanent fragmentation by migrating from blocks of the
  4946		 * same migratetype.
  4947		 * Don't try this for allocations that are allowed to ignore
  4948		 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
  4949		 */
  4950		if (can_direct_reclaim &&
  4951				(costly_order ||
  4952				   (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
  4953				&& !gfp_pfmemalloc_allowed(gfp_mask)) {
  4954			page = __alloc_pages_direct_compact(gfp_mask, order,
  4955							alloc_flags, ac,
  4956							INIT_COMPACT_PRIORITY,
  4957							&compact_result);
  4958			if (page)
  4959				goto got_pg;
  4960	
  4961			/*
  4962			 * Checks for costly allocations with __GFP_NORETRY, which
  4963			 * includes some THP page fault allocations
  4964			 */
  4965			if (costly_order && (gfp_mask & __GFP_NORETRY)) {
  4966				/*
  4967				 * If allocating entire pageblock(s) and compaction
  4968				 * failed because all zones are below low watermarks
  4969				 * or is prohibited because it recently failed at this
  4970				 * order, fail immediately unless the allocator has
  4971				 * requested compaction and reclaim retry.
  4972				 *
  4973				 * Reclaim is
  4974				 *  - potentially very expensive because zones are far
  4975				 *    below their low watermarks or this is part of very
  4976				 *    bursty high order allocations,
  4977				 *  - not guaranteed to help because isolate_freepages()
  4978				 *    may not iterate over freed pages as part of its
  4979				 *    linear scan, and
  4980				 *  - unlikely to make entire pageblocks free on its
  4981				 *    own.
  4982				 */
  4983				if (compact_result == COMPACT_SKIPPED ||
  4984				    compact_result == COMPACT_DEFERRED)
  4985					goto nopage;
  4986	
  4987				/*
  4988				 * Looks like reclaim/compaction is worth trying, but
  4989				 * sync compaction could be very expensive, so keep
  4990				 * using async compaction.
  4991				 */
  4992				compact_priority = INIT_COMPACT_PRIORITY;
  4993			}
  4994		}
  4995	
  4996	retry:
  4997		/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
  4998		if (alloc_flags & ALLOC_KSWAPD)
  4999			wake_all_kswapds(order, gfp_mask, ac);
  5000	
  5001		reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
  5002		if (reserve_flags)
  5003			alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags);
  5004	
  5005		/*
  5006		 * Reset the nodemask and zonelist iterators if memory policies can be
  5007		 * ignored. These allocations are high priority and system rather than
  5008		 * user oriented.
  5009		 */
  5010		if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
  5011			ac->nodemask = NULL;
  5012			ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
  5013						ac->highest_zoneidx, ac->nodemask);
  5014		}
  5015	
  5016		/* Attempt with potentially adjusted zonelist and alloc_flags */
  5017		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
  5018		if (page)
  5019			goto got_pg;
  5020	
  5021		/* Caller is not willing to reclaim, we can't balance anything */
  5022		if (!can_direct_reclaim)
  5023			goto nopage;
  5024	
  5025		/* Avoid recursion of direct reclaim */
  5026		if (current->flags & PF_MEMALLOC)
  5027			goto nopage;
  5028	
  5029		/* Try direct reclaim and then allocating */
  5030		page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
  5031								&did_some_progress);
  5032		if (page)
  5033			goto got_pg;
  5034	
  5035		/* Try direct compaction and then allocating */
  5036		page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
  5037						compact_priority, &compact_result);
  5038		if (page)
  5039			goto got_pg;
  5040	
  5041		/* Do not loop if specifically requested */
  5042		if (gfp_mask & __GFP_NORETRY)
  5043			goto nopage;
  5044	
  5045		/*
  5046		 * Do not retry costly high order allocations unless they are
  5047		 * __GFP_RETRY_MAYFAIL
  5048		 */
  5049		if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
  5050			goto nopage;
  5051	
  5052		if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
  5053					 did_some_progress > 0, &no_progress_loops))
  5054			goto retry;
  5055	
  5056		/*
  5057		 * It doesn't make any sense to retry for the compaction if the order-0
  5058		 * reclaim is not able to make any progress because the current
  5059		 * implementation of the compaction depends on the sufficient amount
  5060		 * of free memory (see __compaction_suitable)
  5061		 */
  5062		if (did_some_progress > 0 &&
  5063				should_compact_retry(ac, order, alloc_flags,
  5064					compact_result, &compact_priority,
  5065					&compaction_retries))
  5066			goto retry;
  5067	
  5068	
  5069		/* Deal with possible cpuset update races before we start OOM killing */
  5070		if (check_retry_cpuset(cpuset_mems_cookie, ac))
  5071			goto retry_cpuset;
  5072	
  5073		/* Reclaim has failed us, start killing things */
  5074		page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
  5075		if (page)
  5076			goto got_pg;
  5077	
  5078		/* Avoid allocations with no watermarks from looping endlessly */
  5079		if (tsk_is_oom_victim(current) &&
  5080		    (alloc_flags & ALLOC_OOM ||
  5081		     (gfp_mask & __GFP_NOMEMALLOC)))
  5082			goto nopage;
  5083	
  5084		/* Retry as long as the OOM killer is making progress */
  5085		if (did_some_progress) {
  5086			no_progress_loops = 0;
  5087			goto retry;
  5088		}
  5089	
  5090	nopage:
  5091		/* Deal with possible cpuset update races before we fail */
  5092		if (check_retry_cpuset(cpuset_mems_cookie, ac))
  5093			goto retry_cpuset;
  5094	
  5095		/*
  5096		 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
  5097		 * we always retry
  5098		 */
  5099		if (gfp_mask & __GFP_NOFAIL) {
  5100			/*
  5101			 * All existing users of the __GFP_NOFAIL are blockable, so warn
  5102			 * of any new users that actually require GFP_NOWAIT
  5103			 */
  5104			if (WARN_ON_ONCE(!can_direct_reclaim))
  5105				goto fail;
  5106	
  5107			/*
  5108			 * PF_MEMALLOC request from this context is rather bizarre
  5109			 * because we cannot reclaim anything and only can loop waiting
  5110			 * for somebody to do a work for us
  5111			 */
  5112			WARN_ON_ONCE(current->flags & PF_MEMALLOC);
  5113	
  5114			/*
  5115			 * non failing costly orders are a hard requirement which we
  5116			 * are not prepared for much so let's warn about these users
  5117			 * so that we can identify them and convert them to something
  5118			 * else.
  5119			 */
  5120			WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
  5121	
  5122			/*
  5123			 * Help non-failing allocations by giving them access to memory
  5124			 * reserves but do not use ALLOC_NO_WATERMARKS because this
  5125			 * could deplete whole memory reserves which would just make
  5126			 * the situation worse
  5127			 */
  5128			page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
  5129			if (page)
  5130				goto got_pg;
  5131	
  5132			cond_resched();
  5133			goto retry;
  5134		}
  5135	fail:
  5136		warn_alloc(gfp_mask, ac->nodemask,
  5137				"page allocation failure: order:%u", order);
  5138	got_pg:
  5139		return page;
  5140	}
  5141	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx

Attachment: .config.gz
Description: application/gzip


[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux