Re: [RFC PATCH 02/16] drm/ttm/pool: Fix ttm_pool_alloc error path

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Am 15.02.23 um 19:02 schrieb Thomas Hellström:
On Wed, 2023-02-15 at 18:31 +0100, Christian König wrote:
Am 15.02.23 um 17:13 schrieb Thomas Hellström:
When hitting an error, the error path forgot to unmap dma mappings
and
I don't see where this happens?
 From what I can tell, ttm_pool_page_allocated() maps the page for dma,
If we later hit an error, ttm_pool_free_page() will leak the mapping.

Ah, I see. Good point.


could call set_pages_wb() on already uncached pages.
Yeah, but what's the problem?
Umm, at least if you try to set WC on an already WC'd page, the
set_pages_ code will spam dmesg with warnings.
Not sure if set_pages_wb() on WB pages does the same, nor if it
issues unnecessary global cache / tlb flushes or whether that will
change in the future.
The point of avoiding the set_pages_wb() when already WB is you don't
have to check, and you don't have to care.

Please just open code the error handling then. That helper function looks horrible complicated to me.

Alternatively we could have a free function for a range of pages.

Regards,
Christian.



That said, the __ttm_pool_free() is used also in upcoming patches.

/Thomas


Regards,
Christian.

Fix this by introducing a common __ttm_pool_free() function that
does the right thing.

Fixes: d099fc8f540a ("drm/ttm: new TT backend allocation pool v3")
Cc: Christian König <christian.koenig@xxxxxxx>
Cc: Dave Airlie <airlied@xxxxxxxxxx>
Cc: Madhav Chauhan <madhav.chauhan@xxxxxxx>
Cc: Christian Koenig <christian.koenig@xxxxxxx>
Cc: Huang Rui <ray.huang@xxxxxxx>
Cc: dri-devel@xxxxxxxxxxxxxxxxxxxxx
Signed-off-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx>
---
   drivers/gpu/drm/ttm/ttm_pool.c | 74 +++++++++++++++++++++--------
-----
   1 file changed, 45 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_pool.c
b/drivers/gpu/drm/ttm/ttm_pool.c
index aa116a7bbae3..1cc7591a9542 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -367,6 +367,39 @@ static int ttm_pool_page_allocated(struct
ttm_pool *pool, unsigned int order,
         return 0;
   }
+static void __ttm_pool_free(struct ttm_pool *pool, struct ttm_tt
*tt,
+                           struct page **caching_divide,
+                           enum ttm_caching initial_caching,
+                           enum ttm_caching subseq_caching,
+                           pgoff_t num_pages)
+{
+       enum ttm_caching caching = subseq_caching;
+       struct page **pages = tt->pages;
+       unsigned int order;
+       pgoff_t i, nr;
+
+       if (pool && caching_divide)
+               caching = initial_caching;
+
+       for (i = 0; i < num_pages; i += nr, pages += nr) {
+               struct ttm_pool_type *pt = NULL;
+
+               if (unlikely(caching_divide == pages))
+                       caching = subseq_caching;
+
+               order = ttm_pool_page_order(pool, *pages);
+               nr = (1UL << order);
+               if (tt->dma_address)
+                       ttm_pool_unmap(pool, tt->dma_address[i],
nr);
+
+               pt = ttm_pool_select_type(pool, caching, order);
+               if (pt)
+                       ttm_pool_type_give(pt, *pages);
+               else
+                       ttm_pool_free_page(pool, caching, order,
*pages);
+       }
+}
+
   /**
    * ttm_pool_alloc - Fill a ttm_tt object
    *
@@ -386,8 +419,9 @@ int ttm_pool_alloc(struct ttm_pool *pool,
struct ttm_tt *tt,
         dma_addr_t *dma_addr = tt->dma_address;
         struct page **caching = tt->pages;
         struct page **pages = tt->pages;
+       enum ttm_caching page_caching;
         gfp_t gfp_flags = GFP_USER;
-       unsigned int i, order;
+       unsigned int order;
         struct page *p;
         int r;
@@ -410,6 +444,7 @@ int ttm_pool_alloc(struct ttm_pool *pool,
struct ttm_tt *tt,
              order = min_t(unsigned int, order, __fls(num_pages)))
{
                 struct ttm_pool_type *pt;
+               page_caching = tt->caching;
                 pt = ttm_pool_select_type(pool, tt->caching,
order);
                 p = pt ? ttm_pool_type_take(pt) : NULL;
                 if (p) {
@@ -418,6 +453,7 @@ int ttm_pool_alloc(struct ttm_pool *pool,
struct ttm_tt *tt,
                         if (r)
                                 goto error_free_page;
+                       caching = pages;
                         do {
                                 r = ttm_pool_page_allocated(pool,
order, p,
&dma_addr,
@@ -426,14 +462,15 @@ int ttm_pool_alloc(struct ttm_pool *pool,
struct ttm_tt *tt,
                                 if (r)
                                         goto error_free_page;
+                               caching = pages;
                                 if (num_pages < (1 << order))
                                         break;
                                p = ttm_pool_type_take(pt);
                         } while (p);
-                       caching = pages;
                 }
+               page_caching = ttm_cached;
                 while (num_pages >= (1 << order) &&
                        (p = ttm_pool_alloc_page(pool, gfp_flags,
order))) {
@@ -442,6 +479,7 @@ int ttm_pool_alloc(struct ttm_pool *pool,
struct ttm_tt *tt,
                                                            tt-
caching);
                                 if (r)
                                         goto error_free_page;
+                               caching = pages;
                         }
                         r = ttm_pool_page_allocated(pool, order, p,
&dma_addr,
                                                     &num_pages,
&pages);
@@ -468,15 +506,12 @@ int ttm_pool_alloc(struct ttm_pool *pool,
struct ttm_tt *tt,
         return 0;
  error_free_page:
-       ttm_pool_free_page(pool, tt->caching, order, p);
+       ttm_pool_free_page(pool, page_caching, order, p);
  error_free_all:
         num_pages = tt->num_pages - num_pages;
-       for (i = 0; i < num_pages; ) {
-               order = ttm_pool_page_order(pool, tt->pages[i]);
-               ttm_pool_free_page(pool, tt->caching, order, tt-
pages[i]);
-               i += 1 << order;
-       }
+       __ttm_pool_free(pool, tt, caching, tt->caching, ttm_cached,
+                       num_pages);
        return r;
   }
@@ -492,27 +527,8 @@ EXPORT_SYMBOL(ttm_pool_alloc);
    */
   void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
   {
-       unsigned int i;
-
-       for (i = 0; i < tt->num_pages; ) {
-               struct page *p = tt->pages[i];
-               unsigned int order, num_pages;
-               struct ttm_pool_type *pt;
-
-               order = ttm_pool_page_order(pool, p);
-               num_pages = 1ULL << order;
-               if (tt->dma_address)
-                       ttm_pool_unmap(pool, tt->dma_address[i],
num_pages);
-
-               pt = ttm_pool_select_type(pool, tt->caching,
order);
-               if (pt)
-                       ttm_pool_type_give(pt, tt->pages[i]);
-               else
-                       ttm_pool_free_page(pool, tt->caching,
order,
-                                          tt->pages[i]);
-
-               i += num_pages;
-       }
+       __ttm_pool_free(pool, tt, NULL, tt->caching, tt->caching,
+                       tt->num_pages);
        while (atomic_long_read(&allocated_pages) > page_pool_size)
                 ttm_pool_shrink();





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux