From: Jerome Glisse <jglisse@xxxxxxxxxx> The sa allocator is suppose to be a ring allocator, ie allocation happen first at the end and if there is no more room we start at the begining again. This patch make the code match this design. sa_manager keep track of the start & end hole, it first try to allocate in the end hole, if it fails it allocate in the begining hole, if it fails it returns (caller is expected to retry). When freeing we need to make sure that we properly grow the end hole and start hole. We take advantage of the fact that the sa_bo list is ordered by offset. That means that when we free an sa_bo the previous sa_bo in list is also the sa_bo just before the sa_bo we are freeing and reversly for the next. Signed-off-by: Jerome Glisse <jglisse@xxxxxxxxxx> --- drivers/gpu/drm/radeon/radeon.h | 3 + drivers/gpu/drm/radeon/radeon_sa.c | 101 ++++++++++++++++++++++++------------ 2 files changed, 71 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 34c1041..ad12ef8 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -385,6 +385,9 @@ struct radeon_sa_manager { struct radeon_bo *bo; struct list_head sa_bo; unsigned size; + unsigned ehole_size; + unsigned shole_size; + unsigned ehole_offset; uint64_t gpu_addr; void *cpu_ptr; uint32_t domain; diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index 88906c9..8c0b3e6 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c @@ -40,6 +40,9 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev, mutex_init(&sa_manager->mutex); sa_manager->bo = NULL; sa_manager->size = size; + sa_manager->shole_size = 0; + sa_manager->ehole_size = size; + sa_manager->ehole_offset = 0; sa_manager->domain = domain; INIT_LIST_HEAD(&sa_manager->sa_bo); @@ -134,61 +137,93 @@ int radeon_sa_bo_new(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo, unsigned size, unsigned align) { - struct radeon_sa_bo *tmp; - struct list_head *head; unsigned offset = 0, wasted = 0; BUG_ON(align > RADEON_GPU_PAGE_SIZE); BUG_ON(size > sa_manager->size); + mutex_lock(&sa_manager->mutex); - /* no one ? */ - if (list_empty(&sa_manager->sa_bo)) { - head = &sa_manager->sa_bo; + wasted = (align - (sa_manager->ehole_offset % align)) % align; + if ((size + wasted) < sa_manager->ehole_size) { + /* we keep sa_bo list ordered by offset */ + list_add_tail(&sa_bo->list, &sa_manger->sa_bo); + offset = sa_manager->ehole_offset + wasted; + sa_manager->ehole_offset += wasted + size; + sa_manager->ehole_size -= wasted + size; goto out; } - /* look for a hole big enough */ - list_for_each_entry(tmp, &sa_manager->sa_bo, list) { - /* room before this object ? */ - if (offset < tmp->offset && (tmp->offset - offset) >= size) { - head = tmp->list.prev; - goto out; - } - offset = tmp->offset + tmp->size; - wasted = offset % align; - if (wasted) { - offset += align - wasted; - } - } - /* room at the end ? */ - head = sa_manager->sa_bo.prev; - tmp = list_entry(head, struct radeon_sa_bo, list); - offset = tmp->offset + tmp->size; - wasted = offset % align; - if (wasted) { - offset += wasted = align - wasted; - } - if ((sa_manager->size - offset) < size) { - /* failed to find somethings big enough */ - mutex_unlock(&sa_manager->mutex); - return -ENOMEM; + if (size < sa_manager->shole_size) { + /* keep sa_bo list ordered by offset */ + list_add(&sa_bo->list, &sa_manger->sa_bo); + sa_manager->shole_size = 0; + offset = 0; + goto out; } + mutex_unlock(&sa_manager->mutex); + return -ENOMEM; + out: sa_bo->manager = sa_manager; sa_bo->offset = offset; sa_bo->size = size; - list_add(&sa_bo->list, head); mutex_unlock(&sa_manager->mutex); return 0; } void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo) { - mutex_lock(&sa_bo->manager->mutex); + struct radeon_sa_manager *sa_manager = sa_bo->manager; + struct radeon_sa_bo *tmp; + + mutex_lock(&manager->mutex); + + /* try first to grow the end hole as we want to allocate from the + * end first + */ + if ((sa_bo->offset + sa_bo->size) == sa_manager->ehole_offset) { + if (sa_bo->list.prev != &sa_manager->sa_bo) { + /* all sa_bo are in list in offset order if there is a + * sa_bo before the one we are freeing then make sure + * we grow the end hole down to the end of the previous + * sa_bo + */ + tmp = list_entry(sa_bo->list.prev, struct radeon_sa_bo, list); + sa_manager->ehole_offset = tmp->offset + tmp->size; + sa_manager->ehole_size += sa_manager->size - sa_manager->ehole_offset; + } else { + /* this bo was alone in the list */ + sa_manager->ehole_offset = 0; + sa_manager->ehole_size = sa_manager->size; + sa_manager->shole_size = 0; + } + } else if (sa_bo->offset == sa_manager->shole_size) { + if (sa_bo->list.next != &sa_manager->sa_bo) { + /* all sa_bo are in list in offset order if there is a + * sa_bo after the one we are freeing then make sure + * we grow the start hole up to the begining of the next + * sa_bo + */ + tmp = list_entry(sa_bo->list.next, struct radeon_sa_bo, list); + sa_manager->shole_size = tmp->offset; + } else { + /* this case should never happen as it means this sa_b + * was alone in the list so that : + * (sa_bo->offset + sa_bo->size) == sa_manager->ehole_offset + * should be true and we should have been in previous + * branch restore things and print an warning + */ + sa_manager->ehole_offset = 0; + sa_manager->ehole_size = sa_manager->size; + sa_manager->shole_size = 0; + dev_warn(rdev->dev, "sa allocator structure corrupted\n"); + } + } list_del_init(&sa_bo->list); - mutex_unlock(&sa_bo->manager->mutex); + + mutex_unlock(&manager->mutex); } #if defined(CONFIG_DEBUG_FS) -- 1.7.7.6 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/dri-devel