Re: [PATCH v6 16/36] nds32: DMA mapping API

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi, Arnd:

2018-01-23 16:23 GMT+08:00 Greentime Hu <green.hu@xxxxxxxxx>:
> Hi, Arnd:
>
> 2018-01-18 18:26 GMT+08:00 Arnd Bergmann <arnd@xxxxxxxx>:
>> On Mon, Jan 15, 2018 at 6:53 AM, Greentime Hu <green.hu@xxxxxxxxx> wrote:
>>> From: Greentime Hu <greentime@xxxxxxxxxxxxx>
>>>
>>> This patch adds support for the DMA mapping API. It uses dma_map_ops for
>>> flexibility.
>>>
>>> Signed-off-by: Vincent Chen <vincentc@xxxxxxxxxxxxx>
>>> Signed-off-by: Greentime Hu <greentime@xxxxxxxxxxxxx>
>>
>> I'm still unhappy about the way the cache flushes are done here as discussed
>> before. It's not a show-stopped, but no Ack from me.
>
> How about this implementation?
>
> static void
> nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
>                               size_t size, enum dma_data_direction dir)
> {
>         switch (direction) {
>         case DMA_TO_DEVICE:     /* writeback only */
>                 break;
>         case DMA_FROM_DEVICE:   /* invalidate only */
>         case DMA_BIDIRECTIONAL: /* writeback and invalidate */
>                 cpu_dma_inval_range(start, end);
>                 break;
>         default:
>                 BUG();
>         }
> }
>
> static void
> nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
>                                  size_t size, enum dma_data_direction dir)
> {
>         switch (direction) {
>         case DMA_FROM_DEVICE:   /* invalidate only */
>                 break;
>         case DMA_TO_DEVICE:     /* writeback only */
>         case DMA_BIDIRECTIONAL: /* writeback and invalidate */
>                 cpu_dma_wb_range(start, end);
>                 break;
>         default:
>                 BUG();
>         }
> }

I am not sure if I understand it correctly.
I list all the combinations.

RAM to DEVICE
    before DMA => writeback cache
    after DMA => nop

DEVICE to RAM
    before DMA => nop
    after DMA => invalidate cache

static void consistent_sync(void *vaddr, size_t size, int direction, int master)
{
        unsigned long start = (unsigned long)vaddr;
        unsigned long end = start + size;

        if (master == FOR_CPU) {
                switch (direction) {
                case DMA_TO_DEVICE:
                        break;
                case DMA_FROM_DEVICE:
                case DMA_BIDIRECTIONAL:
                        cpu_dma_inval_range(start, end);
                        break;
                default:
                        BUG();
                }
        } else {
                /* FOR_DEVICE */
                switch (direction) {
                case DMA_FROM_DEVICE:
                        break;
                case DMA_TO_DEVICE:
                case DMA_BIDIRECTIONAL:
                        cpu_dma_wb_range(start, end);
                        break;
                default:
                        BUG();
                }
        }
}

static void
nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
                              size_t size, enum dma_data_direction dir)
{
        consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_CPU);
}

static void
nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
                                 size_t size, enum dma_data_direction dir)
{
        consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_DEVICE);
}

static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page,
                                     unsigned long offset, size_t size,
                                     enum dma_data_direction dir,
                                     unsigned long attrs)
{
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
                consistent_sync((void *)(page_address(page) + offset),
size, dir, FOR_DEVICE);
        return page_to_phys(page) + offset;
}

static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle,
                                 size_t size, enum dma_data_direction dir,
                                 unsigned long attrs)
{
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
                consistent_sync(phys_to_virt(handle), size, dir, FOR_CPU);
}
--
To unsubscribe from this list: send the line "unsubscribe linux-serial" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Kernel Newbies]     [Security]     [Netfilter]     [Bugtraq]     [Linux PPP]     [Linux FS]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Video 4 Linux]     [Linmodem]     [Device Mapper]     [Linux Kernel for ARM]

  Powered by Linux