On Mon, Jan 24, 2022 at 01:25:13PM +0100, Christian König wrote: > Not just TT and VRAM. > > Signed-off-by: Christian König <christian.koenig@xxxxxxx> > --- > drivers/gpu/drm/ttm/ttm_resource.c | 49 +++++++++--------------------- > include/drm/ttm/ttm_device.h | 2 -- > include/drm/ttm/ttm_resource.h | 4 +-- > 3 files changed, 16 insertions(+), 39 deletions(-) > > diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c > index 9e68d36a1546..2ea8fb83377e 100644 > --- a/drivers/gpu/drm/ttm/ttm_resource.c > +++ b/drivers/gpu/drm/ttm/ttm_resource.c > @@ -51,36 +51,23 @@ EXPORT_SYMBOL(ttm_lru_bulk_move_init); > */ > void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk) > { > - unsigned i; > - > - for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { > - struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i]; > - struct ttm_resource_manager *man; > + unsigned i, j; > > - if (!pos->first) > - continue; > + for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) { > + for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) { > + struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j]; > + struct ttm_resource_manager *man; > > - dma_resv_assert_held(pos->first->bo->base.resv); > - dma_resv_assert_held(pos->last->bo->base.resv); > + if (!pos->first) > + continue; > > - man = ttm_manager_type(pos->first->bo->bdev, TTM_PL_TT); > - list_bulk_move_tail(&man->lru[i], &pos->first->lru, > - &pos->last->lru); > - } > - > - for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { > - struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i]; > - struct ttm_resource_manager *man; > + dma_resv_assert_held(pos->first->bo->base.resv); > + dma_resv_assert_held(pos->last->bo->base.resv); > > - if (!pos->first) > - continue; > - > - dma_resv_assert_held(pos->first->bo->base.resv); > - dma_resv_assert_held(pos->last->bo->base.resv); > - > - man = ttm_manager_type(pos->first->bo->bdev, TTM_PL_VRAM); > - list_bulk_move_tail(&man->lru[i], &pos->first->lru, > - &pos->last->lru); > + man = ttm_manager_type(pos->first->bo->bdev, i); > + list_bulk_move_tail(&man->lru[j], &pos->first->lru, > + &pos->last->lru); > + } > } > } > EXPORT_SYMBOL(ttm_lru_bulk_move_tail); > @@ -118,15 +105,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res, > if (!bulk) > return; > > - switch (res->mem_type) { > - case TTM_PL_TT: > - ttm_lru_bulk_move_set_pos(&bulk->tt[bo->priority], res); > - break; > - > - case TTM_PL_VRAM: > - ttm_lru_bulk_move_set_pos(&bulk->vram[bo->priority], res); > - break; > - } > + ttm_lru_bulk_move_set_pos(&bulk->pos[res->mem_type][bo->priority], res); > } > > void ttm_resource_init(struct ttm_buffer_object *bo, > diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h > index 0a4ddec78d8f..425150f35fbe 100644 > --- a/include/drm/ttm/ttm_device.h > +++ b/include/drm/ttm/ttm_device.h > @@ -30,8 +30,6 @@ > #include <drm/ttm/ttm_resource.h> > #include <drm/ttm/ttm_pool.h> > > -#define TTM_NUM_MEM_TYPES 8 > - > struct ttm_device; > struct ttm_placement; > struct ttm_buffer_object; > diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h > index 13da5e337350..1556d1f62251 100644 > --- a/include/drm/ttm/ttm_resource.h > +++ b/include/drm/ttm/ttm_resource.h > @@ -37,6 +37,7 @@ > #include <drm/ttm/ttm_kmap_iter.h> > > #define TTM_MAX_BO_PRIORITY 4U > +#define TTM_NUM_MEM_TYPES 8 > > struct ttm_device; > struct ttm_resource_manager; > @@ -216,8 +217,7 @@ struct ttm_lru_bulk_move_pos { > * Helper structure for bulk moves on the LRU list. > */ > struct ttm_lru_bulk_move { > - struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY]; > - struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY]; > + struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY]; 256 bytes per client vm for embedding this seems acceptable. And we can make this smarter if there's ever a need. I didn't fully check your code movement (dinner is calling!) but lgtm. Reviewed-by: Daniel Vetter <daniel.vetter@xxxxxxxx> > }; > > /** > -- > 2.25.1 > -- Daniel Vetter Software Engineer, Intel Corporation http://blog.ffwll.ch