Hi Michel, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on linus/master] [also build test WARNING on v5.17-rc1 next-20220128] [cannot apply to tip/x86/mm arm64/for-next/core powerpc/next hnaz-mm/master] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Michel-Lespinasse/Speculative-page-faults/20220128-212122 base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 145d9b498fc827b79c1260b4caa29a8e59d4c2b9 config: arm-vt8500_v6_v7_defconfig (https://download.01.org/0day-ci/archive/20220129/202201290445.uKuWeLmf-lkp@xxxxxxxxx/config) compiler: clang version 14.0.0 (https://github.com/llvm/llvm-project 33b45ee44b1f32ffdbc995e6fec806271b4b3ba4) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # install arm cross compiling tool for clang build # apt-get install binutils-arm-linux-gnueabi # https://github.com/0day-ci/linux/commit/fa5331bae2e49ce86eff959390b451b7401f9156 git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Michel-Lespinasse/Speculative-page-faults/20220128-212122 git checkout fa5331bae2e49ce86eff959390b451b7401f9156 # save the config file to linux build tree mkdir build_dir COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=arm SHELL=/bin/bash If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@xxxxxxxxx> All warnings (new ones prefixed by >>): >> mm/memory.c:3876:20: warning: variable 'vmf' is uninitialized when used within its own initialization [-Wuninitialized] if (!pte_map_lock(vmf)) { ~~~~~~~~~~~~~^~~~ include/linux/mm.h:3418:25: note: expanded from macro 'pte_map_lock' struct vm_fault *vmf = __vmf; \ ~~~ ^~~~~ 1 warning generated. vim +/vmf +3876 mm/memory.c 3808 3809 /* 3810 * We enter with non-exclusive mmap_lock (to exclude vma changes, 3811 * but allow concurrent faults), and pte mapped but not yet locked. 3812 * We return with mmap_lock still held, but pte unmapped and unlocked. 3813 */ 3814 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) 3815 { 3816 struct vm_area_struct *vma = vmf->vma; 3817 struct page *page = NULL; 3818 vm_fault_t ret = 0; 3819 pte_t entry; 3820 3821 /* File mapping without ->vm_ops ? */ 3822 if (vma->vm_flags & VM_SHARED) 3823 return VM_FAULT_SIGBUS; 3824 3825 /* 3826 * Use pte_alloc() instead of pte_alloc_map(). We can't run 3827 * pte_offset_map() on pmds where a huge pmd might be created 3828 * from a different thread. 3829 * 3830 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when 3831 * parallel threads are excluded by other means. 3832 * 3833 * Here we only have mmap_read_lock(mm). 3834 */ 3835 if (pte_alloc(vma->vm_mm, vmf->pmd)) 3836 return VM_FAULT_OOM; 3837 3838 /* See comment in __handle_mm_fault() */ 3839 if (unlikely(pmd_trans_unstable(vmf->pmd))) 3840 return 0; 3841 3842 /* Use the zero-page for reads */ 3843 if (!(vmf->flags & FAULT_FLAG_WRITE) && 3844 !mm_forbids_zeropage(vma->vm_mm)) { 3845 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), 3846 vma->vm_page_prot)); 3847 } else { 3848 /* Allocate our own private page. */ 3849 if (unlikely(!vma->anon_vma)) { 3850 if (vmf->flags & FAULT_FLAG_SPECULATIVE) 3851 return VM_FAULT_RETRY; 3852 if (__anon_vma_prepare(vma)) 3853 goto oom; 3854 } 3855 page = alloc_zeroed_user_highpage_movable(vma, vmf->address); 3856 if (!page) 3857 goto oom; 3858 3859 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) 3860 goto oom_free_page; 3861 cgroup_throttle_swaprate(page, GFP_KERNEL); 3862 3863 /* 3864 * The memory barrier inside __SetPageUptodate makes sure that 3865 * preceding stores to the page contents become visible before 3866 * the set_pte_at() write. 3867 */ 3868 __SetPageUptodate(page); 3869 3870 entry = mk_pte(page, vma->vm_page_prot); 3871 entry = pte_sw_mkyoung(entry); 3872 if (vma->vm_flags & VM_WRITE) 3873 entry = pte_mkwrite(pte_mkdirty(entry)); 3874 } 3875 > 3876 if (!pte_map_lock(vmf)) { 3877 ret = VM_FAULT_RETRY; 3878 goto release; 3879 } 3880 if (!pte_none(*vmf->pte)) { 3881 update_mmu_tlb(vma, vmf->address, vmf->pte); 3882 goto unlock; 3883 } 3884 3885 ret = check_stable_address_space(vma->vm_mm); 3886 if (ret) 3887 goto unlock; 3888 3889 /* Deliver the page fault to userland, check inside PT lock */ 3890 if (userfaultfd_missing(vma)) { 3891 pte_unmap_unlock(vmf->pte, vmf->ptl); 3892 if (page) 3893 put_page(page); 3894 if (vmf->flags & FAULT_FLAG_SPECULATIVE) 3895 return VM_FAULT_RETRY; 3896 return handle_userfault(vmf, VM_UFFD_MISSING); 3897 } 3898 3899 if (page) { 3900 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 3901 page_add_new_anon_rmap(page, vma, vmf->address, false); 3902 lru_cache_add_inactive_or_unevictable(page, vma); 3903 } 3904 3905 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); 3906 3907 /* No need to invalidate - it was non-present before */ 3908 update_mmu_cache(vma, vmf->address, vmf->pte); 3909 pte_unmap_unlock(vmf->pte, vmf->ptl); 3910 return 0; 3911 unlock: 3912 pte_unmap_unlock(vmf->pte, vmf->ptl); 3913 release: 3914 if (page) 3915 put_page(page); 3916 return ret; 3917 oom_free_page: 3918 put_page(page); 3919 oom: 3920 return VM_FAULT_OOM; 3921 } 3922 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx