Hi Dave, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on linus/master] [cannot apply to v5.3-rc3 next-20190807] [if your patch is applied to the wrong git tree, please drop us a note to help improve the system] url: https://github.com/0day-ci/linux/commits/Dave-Chinner/mm-xfs-non-blocking-inode-reclaim/20190804-042311 config: i386-randconfig-a003-201931 (attached as .config) compiler: gcc-4.9 (Debian 4.9.2-10+deb8u1) 4.9.2 reproduce: # save the attached .config to linux build tree make ARCH=i386 If you fix the issue, kindly add following tag Reported-by: kbuild test robot <lkp@xxxxxxxxx> All warnings (new ones prefixed by >>): mm/vmscan.c: In function 'do_shrink_slab': >> mm/vmscan.c:539:34: warning: passing argument 1 of 'atomic64_xchg' from incompatible pointer type deferred_count = atomic64_xchg(&shrinker->nr_deferred[nid], 0); ^ In file included from arch/x86/include/asm/atomic.h:265:0, from include/linux/atomic.h:7, from include/linux/jump_label.h:249, from include/linux/static_key.h:1, from arch/x86/include/asm/nospec-branch.h:6, from arch/x86/include/asm/paravirt_types.h:46, from arch/x86/include/asm/ptrace.h:94, from arch/x86/include/asm/math_emu.h:5, from arch/x86/include/asm/processor.h:12, from arch/x86/include/asm/cpufeature.h:5, from arch/x86/include/asm/thread_info.h:53, from include/linux/thread_info.h:38, from arch/x86/include/asm/preempt.h:7, from include/linux/preempt.h:78, from include/linux/spinlock.h:51, from include/linux/mmzone.h:8, from include/linux/gfp.h:6, from include/linux/mm.h:10, from mm/vmscan.c:17: include/asm-generic/atomic-instrumented.h:1421:1: note: expected 'struct atomic64_t *' but argument is of type 'struct atomic_long_t *' atomic64_xchg(atomic64_t *v, s64 i) ^ vim +/atomic64_xchg +539 mm/vmscan.c 498 499 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, 500 struct shrinker *shrinker, int priority) 501 { 502 unsigned long freed = 0; 503 int64_t freeable_objects = 0; 504 int64_t scan_count; 505 int64_t scanned_objects = 0; 506 int64_t next_deferred = 0; 507 int64_t deferred_count = 0; 508 long new_nr; 509 int nid = shrinkctl->nid; 510 long batch_size = shrinker->batch ? shrinker->batch 511 : SHRINK_BATCH; 512 513 if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) 514 nid = 0; 515 516 scan_count = shrink_scan_count(shrinkctl, shrinker, priority, 517 &freeable_objects); 518 if (scan_count == 0 || scan_count == SHRINK_EMPTY) 519 return scan_count; 520 521 /* 522 * If kswapd, we take all the deferred work and do it here. We don't let 523 * direct reclaim do this, because then it means some poor sod is going 524 * to have to do somebody else's GFP_NOFS reclaim, and it hides the real 525 * amount of reclaim work from concurrent kswapd operations. Hence we do 526 * the work in the wrong place, at the wrong time, and it's largely 527 * unpredictable. 528 * 529 * By doing the deferred work only in kswapd, we can schedule the work 530 * according the the reclaim priority - low priority reclaim will do 531 * less deferred work, hence we'll do more of the deferred work the more 532 * desperate we become for free memory. This avoids the need for needing 533 * to specifically avoid deferred work windup as low amount os memory 534 * pressure won't excessive trim caches anymore. 535 */ 536 if (current_is_kswapd()) { 537 int64_t deferred_scan; 538 > 539 deferred_count = atomic64_xchg(&shrinker->nr_deferred[nid], 0); 540 541 /* we want to scan 5-10% of the deferred work here at minimum */ 542 deferred_scan = deferred_count; 543 if (priority) 544 do_div(deferred_scan, priority); 545 scan_count += deferred_scan; 546 547 /* 548 * If there is more deferred work than the number of freeable 549 * items in the cache, limit the amount of work we will carry 550 * over to the next kswapd run on this cache. This prevents 551 * deferred work windup. 552 */ 553 if (deferred_count > freeable_objects * 2) 554 deferred_count = freeable_objects * 2; 555 556 } 557 558 /* 559 * Avoid risking looping forever due to too large nr value: 560 * never try to free more than twice the estimate number of 561 * freeable entries. 562 */ 563 if (scan_count > freeable_objects * 2) 564 scan_count = freeable_objects * 2; 565 566 trace_mm_shrink_slab_start(shrinker, shrinkctl, deferred_count, 567 freeable_objects, scan_count, 568 scan_count, priority); 569 570 /* 571 * If the shrinker can't run (e.g. due to gfp_mask constraints), then 572 * defer the work to a context that can scan the cache. 573 */ 574 if (shrinkctl->will_defer) 575 goto done; 576 577 /* 578 * Normally, we should not scan less than batch_size objects in one 579 * pass to avoid too frequent shrinker calls, but if the slab has less 580 * than batch_size objects in total and we are really tight on memory, 581 * we will try to reclaim all available objects, otherwise we can end 582 * up failing allocations although there are plenty of reclaimable 583 * objects spread over several slabs with usage less than the 584 * batch_size. 585 * 586 * We detect the "tight on memory" situations by looking at the total 587 * number of objects we want to scan (total_scan). If it is greater 588 * than the total number of objects on slab (freeable), we must be 589 * scanning at high prio and therefore should try to reclaim as much as 590 * possible. 591 */ 592 while (scan_count >= batch_size || 593 scan_count >= freeable_objects) { 594 unsigned long ret; 595 unsigned long nr_to_scan = min_t(long, batch_size, scan_count); 596 597 shrinkctl->nr_to_scan = nr_to_scan; 598 shrinkctl->nr_scanned = nr_to_scan; 599 ret = shrinker->scan_objects(shrinker, shrinkctl); 600 if (ret == SHRINK_STOP) 601 break; 602 freed += ret; 603 604 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned); 605 scan_count -= shrinkctl->nr_scanned; 606 scanned_objects += shrinkctl->nr_scanned; 607 608 cond_resched(); 609 } 610 611 done: 612 if (deferred_count) 613 next_deferred = deferred_count - scanned_objects; 614 else if (scan_count > 0) 615 next_deferred = scan_count; 616 /* 617 * move the unused scan count back into the shrinker in a 618 * manner that handles concurrent updates. If we exhausted the 619 * scan, there is no need to do an update. 620 */ 621 if (next_deferred > 0) 622 new_nr = atomic_long_add_return(next_deferred, 623 &shrinker->nr_deferred[nid]); 624 else 625 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]); 626 627 trace_mm_shrink_slab_end(shrinker, nid, freed, deferred_count, new_nr, 628 scan_count); 629 return freed; 630 } 631 --- 0-DAY kernel test infrastructure Open Source Technology Center https://lists.01.org/pipermail/kbuild-all Intel Corporation
Attachment:
.config.gz
Description: application/gzip