Re: [PATCH v4 05/10] drm/ttm: Add vmap/vunmap to TTM and TTM GEM helpers

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Oct 19, 2020 at 11:45:05AM +0200, Christian König wrote:
> Hi Thomas,
> 
> [SNIP]
> > > >    +int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
> > > > +{
> > > > +    struct ttm_resource *mem = &bo->mem;
> > > > +    int ret;
> > > > +
> > > > +    ret = ttm_mem_io_reserve(bo->bdev, mem);
> > > > +    if (ret)
> > > > +        return ret;
> > > > +
> > > > +    if (mem->bus.is_iomem) {
> > > > +        void __iomem *vaddr_iomem;
> > > > +        unsigned long size = bo->num_pages << PAGE_SHIFT;
> > > Please use uint64_t here and make sure to cast bo->num_pages before
> > > shifting.
> > I thought the rule of thumb is to use u64 in source code. Yet TTM only
> > uses uint*_t types. Is there anything special about TTM?
> 
> My last status is that you can use both and my personal preference is to use
> the uint*_t types because they are part of a higher level standard.

Yeah the only hard rule is that in uapi headers you need to use the __u64
and similar typedefs, to avoid cluttering the namespace for unrelated
stuff in userspace.

In the kernel c99 types are perfectly fine, and I think slowly on the
rise.
-Daniel

> 
> > > We have an unit tests of allocating a 8GB BO and that should work on a
> > > 32bit machine as well :)
> > > 
> > > > +
> > > > +        if (mem->bus.addr)
> > > > +            vaddr_iomem = (void *)(((u8 *)mem->bus.addr));
> > I after reading the patch again, I realized that this is the
> > 'ttm_bo_map_premapped' case and it's missing from _vunmap(). I see two
> > options here: ignore this case in _vunmap(), or do an ioremap()
> > unconditionally. Which one is preferable?
> 
> ioremap would be very very bad, so we should just do nothing.
> 
> Thanks,
> Christian.
> 
> > 
> > Best regards
> > Thomas
> > 
> > > > +        else if (mem->placement & TTM_PL_FLAG_WC)
> > > I've just nuked the TTM_PL_FLAG_WC flag in drm-misc-next. There is a new
> > > mem->bus.caching enum as replacement.
> > > 
> > > > +            vaddr_iomem = ioremap_wc(mem->bus.offset, size);
> > > > +        else
> > > > +            vaddr_iomem = ioremap(mem->bus.offset, size);
> > > > +
> > > > +        if (!vaddr_iomem)
> > > > +            return -ENOMEM;
> > > > +
> > > > +        dma_buf_map_set_vaddr_iomem(map, vaddr_iomem);
> > > > +
> > > > +    } else {
> > > > +        struct ttm_operation_ctx ctx = {
> > > > +            .interruptible = false,
> > > > +            .no_wait_gpu = false
> > > > +        };
> > > > +        struct ttm_tt *ttm = bo->ttm;
> > > > +        pgprot_t prot;
> > > > +        void *vaddr;
> > > > +
> > > > +        BUG_ON(!ttm);
> > > I think we can drop this, populate will just crash badly anyway.
> > > 
> > > > +
> > > > +        ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
> > > > +        if (ret)
> > > > +            return ret;
> > > > +
> > > > +        /*
> > > > +         * We need to use vmap to get the desired page protection
> > > > +         * or to make the buffer object look contiguous.
> > > > +         */
> > > > +        prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
> > > The calling convention has changed on drm-misc-next as well, but should
> > > be trivial to adapt.
> > > 
> > > Regards,
> > > Christian.
> > > 
> > > > +        vaddr = vmap(ttm->pages, bo->num_pages, 0, prot);
> > > > +        if (!vaddr)
> > > > +            return -ENOMEM;
> > > > +
> > > > +        dma_buf_map_set_vaddr(map, vaddr);
> > > > +    }
> > > > +
> > > > +    return 0;
> > > > +}
> > > > +EXPORT_SYMBOL(ttm_bo_vmap);
> > > > +
> > > > +void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map
> > > > *map)
> > > > +{
> > > > +    if (dma_buf_map_is_null(map))
> > > > +        return;
> > > > +
> > > > +    if (map->is_iomem)
> > > > +        iounmap(map->vaddr_iomem);
> > > > +    else
> > > > +        vunmap(map->vaddr);
> > > > +    dma_buf_map_clear(map);
> > > > +
> > > > +    ttm_mem_io_free(bo->bdev, &bo->mem);
> > > > +}
> > > > +EXPORT_SYMBOL(ttm_bo_vunmap);
> > > > +
> > > >    static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
> > > >                     bool dst_use_tt)
> > > >    {
> > > > diff --git a/include/drm/drm_gem_ttm_helper.h
> > > > b/include/drm/drm_gem_ttm_helper.h
> > > > index 118cef76f84f..7c6d874910b8 100644
> > > > --- a/include/drm/drm_gem_ttm_helper.h
> > > > +++ b/include/drm/drm_gem_ttm_helper.h
> > > > @@ -10,11 +10,17 @@
> > > >    #include <drm/ttm/ttm_bo_api.h>
> > > >    #include <drm/ttm/ttm_bo_driver.h>
> > > >    +struct dma_buf_map;
> > > > +
> > > >    #define drm_gem_ttm_of_gem(gem_obj) \
> > > >        container_of(gem_obj, struct ttm_buffer_object, base)
> > > >      void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int
> > > > indent,
> > > >                    const struct drm_gem_object *gem);
> > > > +int drm_gem_ttm_vmap(struct drm_gem_object *gem,
> > > > +             struct dma_buf_map *map);
> > > > +void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
> > > > +            struct dma_buf_map *map);
> > > >    int drm_gem_ttm_mmap(struct drm_gem_object *gem,
> > > >                 struct vm_area_struct *vma);
> > > >    diff --git a/include/drm/ttm/ttm_bo_api.h
> > > > b/include/drm/ttm/ttm_bo_api.h
> > > > index 37102e45e496..2c59a785374c 100644
> > > > --- a/include/drm/ttm/ttm_bo_api.h
> > > > +++ b/include/drm/ttm/ttm_bo_api.h
> > > > @@ -48,6 +48,8 @@ struct ttm_bo_global;
> > > >      struct ttm_bo_device;
> > > >    +struct dma_buf_map;
> > > > +
> > > >    struct drm_mm_node;
> > > >      struct ttm_placement;
> > > > @@ -494,6 +496,32 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
> > > > unsigned long start_page,
> > > >     */
> > > >    void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
> > > >    +/**
> > > > + * ttm_bo_vmap
> > > > + *
> > > > + * @bo: The buffer object.
> > > > + * @map: pointer to a struct dma_buf_map representing the map.
> > > > + *
> > > > + * Sets up a kernel virtual mapping, using ioremap or vmap to the
> > > > + * data in the buffer object. The parameter @map returns the virtual
> > > > + * address as struct dma_buf_map. Unmap the buffer with ttm_bo_vunmap().
> > > > + *
> > > > + * Returns
> > > > + * -ENOMEM: Out of memory.
> > > > + * -EINVAL: Invalid range.
> > > > + */
> > > > +int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map);
> > > > +
> > > > +/**
> > > > + * ttm_bo_vunmap
> > > > + *
> > > > + * @bo: The buffer object.
> > > > + * @map: Object describing the map to unmap.
> > > > + *
> > > > + * Unmaps a kernel map set up by ttm_bo_vmap().
> > > > + */
> > > > +void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map
> > > > *map);
> > > > +
> > > >    /**
> > > >     * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
> > > >     *
> > > > diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h
> > > > index fd1aba545fdf..2e8bbecb5091 100644
> > > > --- a/include/linux/dma-buf-map.h
> > > > +++ b/include/linux/dma-buf-map.h
> > > > @@ -45,6 +45,12 @@
> > > >     *
> > > >     *    dma_buf_map_set_vaddr(&map. 0xdeadbeaf);
> > > >     *
> > > > + * To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem().
> > > > + *
> > > > + * .. code-block:: c
> > > > + *
> > > > + *    dma_buf_map_set_vaddr_iomem(&map. 0xdeadbeaf);
> > > > + *
> > > >     * Test if a mapping is valid with either dma_buf_map_is_set() or
> > > >     * dma_buf_map_is_null().
> > > >     *
> > > > @@ -118,6 +124,20 @@ static inline void dma_buf_map_set_vaddr(struct
> > > > dma_buf_map *map, void *vaddr)
> > > >        map->is_iomem = false;
> > > >    }
> > > >    +/**
> > > > + * dma_buf_map_set_vaddr_iomem - Sets a dma-buf mapping structure to
> > > > an address in I/O memory
> > > > + * @map:        The dma-buf mapping structure
> > > > + * @vaddr_iomem:    An I/O-memory address
> > > > + *
> > > > + * Sets the address and the I/O-memory flag.
> > > > + */
> > > > +static inline void dma_buf_map_set_vaddr_iomem(struct dma_buf_map *map,
> > > > +                           void __iomem *vaddr_iomem)
> > > > +{
> > > > +    map->vaddr_iomem = vaddr_iomem;
> > > > +    map->is_iomem = true;
> > > > +}
> > > > +
> > > >    /**
> > > >     * dma_buf_map_is_equal - Compares two dma-buf mapping structures
> > > > for equality
> > > >     * @lhs:    The dma-buf mapping structure
> > > _______________________________________________
> > > dri-devel mailing list
> > > dri-devel@xxxxxxxxxxxxxxxxxxxxx
> > > https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Fdri-devel&amp;data=04%7C01%7Cchristian.koenig%40amd.com%7C07bc68af3c6440b5be8d08d8740e9b32%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637386953433558595%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&amp;sdata=RlGCmjzyZERvqfnl4kA1bEHez5bkLf3F9OlKi2ybDAM%3D&amp;reserved=0
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Virtualization mailing list
Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx
https://lists.linuxfoundation.org/mailman/listinfo/virtualization




[Index of Archives]     [KVM Development]     [Libvirt Development]     [Libvirt Users]     [CentOS Virtualization]     [Netdev]     [Ethernet Bridging]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Bugtraq]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux Admin]     [Samba]

  Powered by Linux