tree: https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master head: 86ed57fd8c93fdfaabb4f58e78455180fa7d8a84 commit: 10c9a9b135c1dfc6a110d503b68c4e0ba8f4ca26 [9149/9522] mm, slub: prevent VM_BUG_ON in PageSlabPfmemalloc from ___slab_alloc config: x86_64-randconfig-a002-20210817 (attached as .config) compiler: clang version 14.0.0 (https://github.com/llvm/llvm-project a83d99c55ebb14532c414066a5aa3bdb65389965) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/commit/?id=10c9a9b135c1dfc6a110d503b68c4e0ba8f4ca26 git remote add linux-next https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git git fetch --no-tags linux-next master git checkout 10c9a9b135c1dfc6a110d503b68c4e0ba8f4ca26 # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@xxxxxxxxx> Note: the linux-next/master HEAD 86ed57fd8c93fdfaabb4f58e78455180fa7d8a84 builds fine. It may have been fixed somewhere. All errors (new ones prefixed by >>): >> mm/slub.c:2723:16: error: implicit declaration of function 'try_pfmemalloc_match' [-Werror,-Wimplicit-function-declaration] if (unlikely(!try_pfmemalloc_match(page, gfpflags))) ^ mm/slub.c:2723:16: note: did you mean 'pfmemalloc_match'? mm/slub.c:2601:20: note: 'pfmemalloc_match' declared here static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) ^ 1 error generated. vim +/try_pfmemalloc_match +2723 mm/slub.c 2655 2656 /* 2657 * Slow path. The lockless freelist is empty or we need to perform 2658 * debugging duties. 2659 * 2660 * Processing is still very fast if new objects have been freed to the 2661 * regular freelist. In that case we simply take over the regular freelist 2662 * as the lockless freelist and zap the regular freelist. 2663 * 2664 * If that is not working then we fall back to the partial lists. We take the 2665 * first element of the freelist as the object to allocate now and move the 2666 * rest of the freelist to the lockless freelist. 2667 * 2668 * And if we were unable to get a new slab from the partial slab lists then 2669 * we need to allocate a new slab. This is the slowest path since it involves 2670 * a call to the page allocator and the setup of a new slab. 2671 * 2672 * Version of __slab_alloc to use when we know that preemption is 2673 * already disabled (which is the case for bulk allocation). 2674 */ 2675 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2676 unsigned long addr, struct kmem_cache_cpu *c) 2677 { 2678 void *freelist; 2679 struct page *page; 2680 unsigned long flags; 2681 2682 stat(s, ALLOC_SLOWPATH); 2683 2684 reread_page: 2685 2686 page = READ_ONCE(c->page); 2687 if (!page) { 2688 /* 2689 * if the node is not online or has no normal memory, just 2690 * ignore the node constraint 2691 */ 2692 if (unlikely(node != NUMA_NO_NODE && 2693 !node_isset(node, slab_nodes))) 2694 node = NUMA_NO_NODE; 2695 local_irq_save(flags); 2696 if (unlikely(c->page)) { 2697 local_irq_restore(flags); 2698 goto reread_page; 2699 } 2700 goto new_slab; 2701 } 2702 redo: 2703 2704 if (unlikely(!node_match(page, node))) { 2705 /* 2706 * same as above but node_match() being false already 2707 * implies node != NUMA_NO_NODE 2708 */ 2709 if (!node_isset(node, slab_nodes)) { 2710 node = NUMA_NO_NODE; 2711 goto redo; 2712 } else { 2713 stat(s, ALLOC_NODE_MISMATCH); 2714 goto deactivate_slab; 2715 } 2716 } 2717 2718 /* 2719 * By rights, we should be searching for a slab page that was 2720 * PFMEMALLOC but right now, we are losing the pfmemalloc 2721 * information when the page leaves the per-cpu allocator 2722 */ > 2723 if (unlikely(!try_pfmemalloc_match(page, gfpflags))) 2724 goto deactivate_slab; 2725 2726 /* must check again c->page in case IRQ handler changed it */ 2727 local_irq_save(flags); 2728 if (unlikely(page != c->page)) { 2729 local_irq_restore(flags); 2730 goto reread_page; 2731 } 2732 freelist = c->freelist; 2733 if (freelist) 2734 goto load_freelist; 2735 2736 freelist = get_freelist(s, page); 2737 2738 if (!freelist) { 2739 c->page = NULL; 2740 stat(s, DEACTIVATE_BYPASS); 2741 goto new_slab; 2742 } 2743 2744 stat(s, ALLOC_REFILL); 2745 2746 load_freelist: 2747 2748 lockdep_assert_irqs_disabled(); 2749 2750 /* 2751 * freelist is pointing to the list of objects to be used. 2752 * page is pointing to the page from which the objects are obtained. 2753 * That page must be frozen for per cpu allocations to work. 2754 */ 2755 VM_BUG_ON(!c->page->frozen); 2756 c->freelist = get_freepointer(s, freelist); 2757 c->tid = next_tid(c->tid); 2758 local_irq_restore(flags); 2759 return freelist; 2760 2761 deactivate_slab: 2762 2763 local_irq_save(flags); 2764 if (page != c->page) { 2765 local_irq_restore(flags); 2766 goto reread_page; 2767 } 2768 deactivate_slab(s, page, c->freelist, c); 2769 2770 new_slab: 2771 2772 lockdep_assert_irqs_disabled(); 2773 2774 if (slub_percpu_partial(c)) { 2775 page = c->page = slub_percpu_partial(c); 2776 slub_set_percpu_partial(c, page); 2777 local_irq_restore(flags); 2778 stat(s, CPU_PARTIAL_ALLOC); 2779 goto redo; 2780 } 2781 2782 freelist = get_partial(s, gfpflags, node, &page); 2783 if (freelist) { 2784 c->page = page; 2785 goto check_new_page; 2786 } 2787 2788 put_cpu_ptr(s->cpu_slab); 2789 page = new_slab(s, gfpflags, node); 2790 c = get_cpu_ptr(s->cpu_slab); 2791 2792 if (unlikely(!page)) { 2793 local_irq_restore(flags); 2794 slab_out_of_memory(s, gfpflags, node); 2795 return NULL; 2796 } 2797 2798 if (c->page) 2799 flush_slab(s, c); 2800 2801 /* 2802 * No other reference to the page yet so we can 2803 * muck around with it freely without cmpxchg 2804 */ 2805 freelist = page->freelist; 2806 page->freelist = NULL; 2807 2808 stat(s, ALLOC_SLAB); 2809 c->page = page; 2810 2811 check_new_page: 2812 2813 if (kmem_cache_debug(s)) { 2814 if (!alloc_debug_processing(s, page, freelist, addr)) 2815 /* Slab failed checks. Next slab needed */ 2816 goto new_slab; 2817 else 2818 /* 2819 * For debug case, we don't load freelist so that all 2820 * allocations go through alloc_debug_processing() 2821 */ 2822 goto return_single; 2823 } 2824 2825 if (unlikely(!pfmemalloc_match(page, gfpflags))) 2826 /* 2827 * For !pfmemalloc_match() case we don't load freelist so that 2828 * we don't make further mismatched allocations easier. 2829 */ 2830 goto return_single; 2831 2832 goto load_freelist; 2833 2834 return_single: 2835 2836 deactivate_slab(s, page, get_freepointer(s, freelist), c); 2837 local_irq_restore(flags); 2838 return freelist; 2839 } 2840 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx
Attachment:
.config.gz
Description: application/gzip