+ return -ENODEV;
+
+ if (!dma_chanptr->device || !dmadev->dev) {
+ dmadev->device_free_chan_resources(dma_chanptr);
+ return -ENODEV;
+ }
+
+ ret = sg_alloc_table(&sg_table, nents, GFP_KERNEL);
+ if (ret) {
+ err = ret;
+ goto sg_table_alloc_failed;
+ }
+
+ for_each_sg(sg_table.sgl, sg, nents, i) {
+ u64 alloc_sz;
+ void *cpu_addr;
+
+ alloc_sz = round_up(size, nents);
+ do_div(alloc_sz, nents);
+ cpu_addr = kmalloc(alloc_sz, GFP_KERNEL);
+
+ if (!cpu_addr) {
+ err = -ENOMEM;
+ goto sg_buf_alloc_failed;
+ }
+
+ dev_dbg(dmadev->dev, "set sg buf[%d] :%p\n", i, cpu_addr);
+ sg_set_buf(sg, cpu_addr, alloc_sz);
+ }
+
+ dest_buf = kmalloc(round_up(size, nents), GFP_KERNEL);
+ if (!dest_buf) {
+ err = -ENOMEM;
+ goto dst_alloc_failed;
+ }
+ dev_dbg(dmadev->dev, "dest:%p\n", dest_buf);
+
+ /* Fill in src buffer */
+ count = 0;
+ for_each_sg(sg_table.sgl, sg, nents, i) {
+ src_buf = sg_virt(sg);
+ dev_dbg(dmadev->dev,
+ "set src[%d, %d, %p] = %d\n", i, j, src_buf, count);
+
+ for (j = 0; j < sg_dma_len(sg); j++)
+ src_buf[j] = count++;
+ }
+
+ /* dma_map_sg cleans and invalidates the cache in arm64 when
+ * DMA_TO_DEVICE is selected for src. That's why, we need to do
+ * the mapping after the data is copied.
+ */
+ map_count = dma_map_sg(dmadev->dev, sg_table.sgl, nents,
+ DMA_TO_DEVICE);
+ if (!map_count) {
+ err = -EINVAL;
+ goto src_map_failed;
+ }
+
+ dest_dma = dma_map_single(dmadev->dev, dest_buf,
+ size, DMA_FROM_DEVICE);
+
+ err = dma_mapping_error(dmadev->dev, dest_dma);
+ if (err)
+ goto dest_map_failed;
+
+ /* check scatter gather list contents */
+ for_each_sg(sg_table.sgl, sg, map_count, i)
+ dev_dbg(dmadev->dev,
+ "[%d/%d] src va=%p, iova = %pa len:%d\n",
+ i, map_count, sg_virt(sg), &sg_dma_address(sg),
+ sg_dma_len(sg));
+
+ dest_dma_it = dest_dma;
+ for_each_sg(sg_table.sgl, sg, map_count, i) {
+ src_buf = sg_virt(sg);
+ src_dma = sg_dma_address(sg);
+ dev_dbg(dmadev->dev, "src_dma: %pad dest_dma:%pad\n",
+ &src_dma, &dest_dma_it);
+
+ tx = dmadev->device_prep_dma_memcpy(dma_chanptr, dest_dma_it,
+ src_dma, sg_dma_len(sg), flags);
+ if (!tx) {
+ dev_err(dmadev->dev,
+ "Self-test sg failed, disabling\n");
+ err = -ENODEV;
+ goto prep_memcpy_failed;
+ }
+
+ tx->callback_param = &result;
+ tx->callback = dma_selftest_complete;
+ cookie = tx->tx_submit(tx);
+ dest_dma_it += sg_dma_len(sg);
+ }
+
+ dmadev->device_issue_pending(dma_chanptr);
+
+ /*
+ * It is assumed that the hardware can move the data within 1s
+ * and signal the OS of the completion
+ */
+ ret = wait_event_timeout(result.wq,
+ atomic_read(&result.counter) == (map_count),
+ msecs_to_jiffies(10000));
+
+ if (ret <= 0) {
+ dev_err(dmadev->dev,
+ "Self-test sg copy timed out, disabling\n");
+ err = -ENODEV;
+ goto tx_status;
+ }
+ dev_dbg(dmadev->dev,
+ "Self-test complete signal received\n");
+
+ if (dmadev->device_tx_status(dma_chanptr, cookie, NULL) !=
+ DMA_COMPLETE) {
+ dev_err(dmadev->dev,
+ "Self-test sg status not complete, disabling\n");
+ err = -ENODEV;
+ goto tx_status;
+ }
+
+ dma_sync_single_for_cpu(dmadev->dev, dest_dma, size,
+ DMA_FROM_DEVICE);
+
+ count = 0;
+ for_each_sg(sg_table.sgl, sg, map_count, i) {
+ src_buf = sg_virt(sg);
+ if (memcmp(src_buf, &dest_buf[count], sg_dma_len(sg)) == 0) {
+ count += sg_dma_len(sg);
+ continue;
+ }
+
+ for (j = 0; j < sg_dma_len(sg); j++) {
+ if (src_buf[j] != dest_buf[count]) {
+ dev_dbg(dmadev->dev,
+ "[%d, %d] (%p) src :%x dest (%p):%x cnt:%d\n",
+ i, j, &src_buf[j], src_buf[j],
+ &dest_buf[count], dest_buf[count],
+ count);
+ dev_err(dmadev->dev,
+ "Self-test copy failed compare, disabling\n");
+ err = -EFAULT;
+ return err;
+ goto compare_failed;