On Tue, Sep 27, 2022 at 01:53:31PM +0800, Li Zhijian wrote: > @@ -122,6 +129,7 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, > int num_buf; > void *vaddr; > int err; > + bool is_pmem = false; > int i; > > umem = ib_umem_get(&rxe->ib_dev, start, length, access); > @@ -149,6 +157,7 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, > num_buf = 0; > map = mr->map; > if (length > 0) { > + is_pmem = true; > buf = map[0]->buf; > > for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) { > @@ -166,6 +175,10 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, > goto err_cleanup_map; > } > > + /* True only if the *whole* MR is pmem */ > + if (is_pmem) > + is_pmem = vaddr_in_pmem(vaddr); > + I'm not so keen on this use of resources, but this should be written more like phys = page_to_phys(sg_page_iter_page(&sg_iter)) region_intersects(phys + sg_iter->offset, sg_iter->length,.. ) And you understand this will make memory registration of every RXE user a bit slower? And actual pmem will be painfully slow. It seems like we are doing something wrong here.. > @@ -174,6 +187,12 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, > } > } > > + if (!is_pmem && access & IB_ACCESS_FLUSH_PERSISTENT) { > + pr_warn("Cannot register IB_ACCESS_FLUSH_PERSISTENT for non-pmem memory\n"); > + err = -EINVAL; > + goto err_release_umem; > + } Do not pr_warn on syscall paths Jason