On Tue, Sep 22, 2020 at 11:21:01AM +0300, Leon Romanovsky wrote: > @@ -287,87 +289,48 @@ EXPORT_SYMBOL(ib_umem_odp_release); > * Map for DMA and insert a single page into the on-demand paging page tables. > * > * @umem: the umem to insert the page to. > - * @page_index: index in the umem to add the page to. > + * @dma_index: index in the umem to add the dma to. > * @page: the page struct to map and add. > * @access_mask: access permissions needed for this page. > * @current_seq: sequence number for synchronization with invalidations. > * the sequence number is taken from > * umem_odp->notifiers_seq. > * > - * The function returns -EFAULT if the DMA mapping operation fails. It returns > - * -EAGAIN if a concurrent invalidation prevents us from updating the page. > + * The function returns -EFAULT if the DMA mapping operation fails. > * > - * The page is released via put_page even if the operation failed. For on-demand > - * pinning, the page is released whenever it isn't stored in the umem. > */ > static int ib_umem_odp_map_dma_single_page( > struct ib_umem_odp *umem_odp, > - unsigned int page_index, > + unsigned int dma_index, > struct page *page, > - u64 access_mask, > - unsigned long current_seq) > + u64 access_mask) > { > struct ib_device *dev = umem_odp->umem.ibdev; > - dma_addr_t dma_addr; > - int ret = 0; > - > - if (mmu_interval_check_retry(&umem_odp->notifier, current_seq)) { > - ret = -EAGAIN; > - goto out; > - } > - if (!(umem_odp->dma_list[page_index])) { > - dma_addr = > - ib_dma_map_page(dev, page, 0, BIT(umem_odp->page_shift), > - DMA_BIDIRECTIONAL); > - if (ib_dma_mapping_error(dev, dma_addr)) { > - ret = -EFAULT; > - goto out; > - } > - umem_odp->dma_list[page_index] = dma_addr | access_mask; > - umem_odp->page_list[page_index] = page; > + dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index]; > + > + if (!*dma_addr) { > + *dma_addr = ib_dma_map_page(dev, page, 0, > + 1 << umem_odp->page_shift, > + DMA_BIDIRECTIONAL); > + if (ib_dma_mapping_error(dev, *dma_addr)) > + return -EFAULT; This leaves *dma_addr set to ib_dma_mapping_error, which means the next try to map it will fail the if (!dma_addr) and produce a corrupted dma address. *dma_addr should be set to 0 here Jason