On Fri, Sep 08, 2023 at 03:26:47PM +0900, Daisuke Matsuda wrote: > +static inline bool rxe_odp_check_pages(struct rxe_mr *mr, u64 iova, > + int length, u32 flags) > +{ > + unsigned long lower, upper, idx; > + unsigned long hmm_flags = HMM_PFN_VALID; > + struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); > + struct page *page; > + bool need_fault = false; > + > + lower = rxe_mr_iova_to_index(mr, iova); > + upper = rxe_mr_iova_to_index(mr, iova + length - 1); > + > + if (!(flags & RXE_PAGEFAULT_RDONLY)) > + hmm_flags |= HMM_PFN_WRITE; > + > + /* xarray is protected by umem_mutex */ > + for (idx = lower; idx <= upper; idx++) { > + page = xa_load(&mr->page_list, idx); > + > + if (!page || !(umem_odp->pfn_list[idx] & hmm_flags)) { > + need_fault = true; Again you don't need the pfn_list and rxe should perhaps ideally find some way to disable it since we store struct pages in the xarray. This could also be a xas loop Jason