On Mon, Jan 6, 2025 at 11:08 PM kernel test robot <lkp@xxxxxxxxx> wrote: > > Hi Barry, > > kernel test robot noticed the following build errors: > > [auto build test ERROR on akpm-mm/mm-everything] > > url: https://github.com/intel-lab-lkp/linux/commits/Barry-Song/mm-set-folio-swapbacked-iff-folios-are-dirty-in-try_to_unmap_one/20250106-112638 > base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything > patch link: https://lore.kernel.org/r/20250106031711.82855-3-21cnbao%40gmail.com > patch subject: [PATCH 2/3] mm: Support tlbbatch flush for a range of PTEs > config: riscv-randconfig-001-20250106 (https://download.01.org/0day-ci/archive/20250106/202501061736.FoHcInHJ-lkp@xxxxxxxxx/config) > compiler: riscv64-linux-gcc (GCC) 14.2.0 > reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250106/202501061736.FoHcInHJ-lkp@xxxxxxxxx/reproduce) > > If you fix the issue in a separate patch/commit (i.e. not just a new version of > the same patch/commit), kindly add following tags > | Reported-by: kernel test robot <lkp@xxxxxxxxx> > | Closes: https://lore.kernel.org/oe-kbuild-all/202501061736.FoHcInHJ-lkp@xxxxxxxxx/ > Sorry. My bad, does the below diff fix the build? diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 72e559934952..7f3ea687ce33 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -61,7 +61,8 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, bool arch_tlbbatch_should_defer(struct mm_struct *mm); void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, struct mm_struct *mm, - unsigned long uaddr); + unsigned long uaddr, + unsigned long size); void arch_flush_tlb_batched_pending(struct mm_struct *mm); void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 9b6e86ce3867..aeda64a36d50 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -187,7 +187,8 @@ bool arch_tlbbatch_should_defer(struct mm_struct *mm) void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, struct mm_struct *mm, - unsigned long uaddr) + unsigned long uaddr, + unsigned long size) { cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); } > All errors (new ones prefixed by >>): > > mm/rmap.c: In function 'set_tlb_ubc_flush_pending': > >> mm/rmap.c:685:9: error: too many arguments to function 'arch_tlbbatch_add_pending' > 685 | arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr, size); > | ^~~~~~~~~~~~~~~~~~~~~~~~~ > In file included from arch/riscv/include/asm/pgtable.h:113, > from include/linux/pgtable.h:6, > from include/linux/mm.h:30, > from mm/rmap.c:55: > arch/riscv/include/asm/tlbflush.h:62:6: note: declared here > 62 | void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, > | ^~~~~~~~~~~~~~~~~~~~~~~~~ > > > vim +/arch_tlbbatch_add_pending +685 mm/rmap.c > > 663 > 664 /* > 665 * Bits 0-14 of mm->tlb_flush_batched record pending generations. > 666 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. > 667 */ > 668 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 > 669 #define TLB_FLUSH_BATCH_PENDING_MASK \ > 670 ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) > 671 #define TLB_FLUSH_BATCH_PENDING_LARGE \ > 672 (TLB_FLUSH_BATCH_PENDING_MASK / 2) > 673 > 674 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, > 675 unsigned long uaddr, > 676 unsigned long size) > 677 { > 678 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; > 679 int batch; > 680 bool writable = pte_dirty(pteval); > 681 > 682 if (!pte_accessible(mm, pteval)) > 683 return; > 684 > > 685 arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr, size); > 686 tlb_ubc->flush_required = true; > 687 > 688 /* > 689 * Ensure compiler does not re-order the setting of tlb_flush_batched > 690 * before the PTE is cleared. > 691 */ > 692 barrier(); > 693 batch = atomic_read(&mm->tlb_flush_batched); > 694 retry: > 695 if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { > 696 /* > 697 * Prevent `pending' from catching up with `flushed' because of > 698 * overflow. Reset `pending' and `flushed' to be 1 and 0 if > 699 * `pending' becomes large. > 700 */ > 701 if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) > 702 goto retry; > 703 } else { > 704 atomic_inc(&mm->tlb_flush_batched); > 705 } > 706 > 707 /* > 708 * If the PTE was dirty then it's best to assume it's writable. The > 709 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() > 710 * before the page is queued for IO. > 711 */ > 712 if (writable) > 713 tlb_ubc->writable = true; > 714 } > 715 > > -- > 0-DAY CI Kernel Test Service > https://github.com/intel/lkp-tests/wiki