diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a8721fc..f0aa3d4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -174,7 +174,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_puts(m, ")");
}
if (obj->stolen)
- seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
+ seq_printf(m, " (stolen: %08llx)", obj->stolen->base.start);
if (obj->pin_display || obj->fault_mappable) {
char s[3], *t = s;
if (obj->pin_display)
@@ -253,9 +253,9 @@ static int obj_rank_by_stolen(void *priv,
struct drm_i915_gem_object *b =
container_of(B, struct drm_i915_gem_object, obj_exec_link);
- if (a->stolen->start < b->stolen->start)
+ if (a->stolen->base.start < b->stolen->base.start)
return -1;
- if (a->stolen->start > b->stolen->start)
+ if (a->stolen->base.start > b->stolen->base.start)
return 1;
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index dcdfb97..479703b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -841,6 +841,12 @@ struct i915_ctx_hang_stats {
bool banned;
};
+struct i915_stolen_node {
+ struct drm_mm_node base;
+ struct list_head mm_link;
+ struct drm_i915_gem_object *obj;
+};
+
/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_HANDLE 0
@@ -1251,6 +1257,13 @@ struct i915_gem_mm {
*/
struct list_head unbound_list;
+ /**
+ * List of stolen objects that have been marked as purgeable and
+ * thus available for reaping if we need more space for a new
+ * allocation. Ordered by time of marking purgeable.
+ */
+ struct list_head stolen_list;
+
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
@@ -2031,7 +2044,7 @@ struct drm_i915_gem_object {
struct list_head vma_list;
/** Stolen memory for this object, instead of being backed by shmem. */
- struct drm_mm_node *stolen;
+ struct i915_stolen_node *stolen;
struct list_head global_list;
struct list_head ring_list[I915_NUM_RINGS];
@@ -2039,6 +2052,8 @@ struct drm_i915_gem_object {
struct list_head obj_exec_link;
struct list_head batch_pool_link;
+ /** Used during stolen memory allocations to temporarily hold a ref */
+ struct list_head stolen_link;
/**
* This is set if the object is on the active lists (has pending
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 05505de..8a508cd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4411,6 +4411,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
i915_gem_object_truncate(obj);
+ if (obj->stolen) {
+ switch (obj->madv) {
+ case I915_MADV_WILLNEED:
+ list_del_init(&obj->stolen->mm_link);
+ break;
+ case I915_MADV_DONTNEED:
+ list_move(&obj->stolen->mm_link,
+ &dev_priv->mm.stolen_list);
+ break;
+ default:
+ break;
+ }
+ }
+
args->retained = obj->madv != __I915_MADV_PURGED;
out:
@@ -4431,6 +4445,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
+ INIT_LIST_HEAD(&obj->stolen_link);
obj->ops = ops;
@@ -5046,6 +5061,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->context_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
+ INIT_LIST_HEAD(&dev_priv->mm.stolen_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 366080b9..014d478 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -542,7 +542,8 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
if (obj->stolen) {
- i915_gem_stolen_remove_node(dev_priv, obj->stolen);
+ list_del(&obj->stolen->mm_link);
+ i915_gem_stolen_remove_node(dev_priv, &obj->stolen->base);
kfree(obj->stolen);
obj->stolen = NULL;
}
@@ -555,7 +556,7 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
static struct drm_i915_gem_object *
_i915_gem_object_create_stolen(struct drm_device *dev,
- struct drm_mm_node *stolen)
+ struct i915_stolen_node *stolen)
{
struct drm_i915_gem_object *obj;
@@ -563,11 +564,12 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
if (obj == NULL)
return ERR_PTR(-ENOMEM);
- drm_gem_private_object_init(dev, &obj->base, stolen->size);
+ drm_gem_private_object_init(dev, &obj->base, stolen->base.size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->pages = i915_pages_create_for_stolen(dev,
- stolen->start, stolen->size);
+ stolen->base.start,
+ stolen->base.size);
if (IS_ERR(obj->pages)) {
i915_gem_object_free(obj);
return (void*) obj->pages;
@@ -579,24 +581,111 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
+ stolen->obj = obj;
+ INIT_LIST_HEAD(&stolen->mm_link);
+
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
return obj;
}
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_device *dev, u64 size)
+static bool
+mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
+{
+ BUG_ON(obj->stolen == NULL);
+
+ if (obj->madv != I915_MADV_DONTNEED)
+ return false;
+
+ if (obj->pin_display)
+ return false;
+
+ list_add(&obj->stolen_link, unwind);
+ return drm_mm_scan_add_block(&obj->stolen->base);
+}
+
+static int
+stolen_evict(struct drm_i915_private *dev_priv, u64 size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- struct drm_mm_node *stolen;
- int ret;
+ struct list_head unwind, evict;
+ struct i915_stolen_node *iter;
+ int ret, active;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
- return ERR_PTR(-ENODEV);
+ drm_mm_init_scan(&dev_priv->mm.stolen, size, 0, 0);
+ INIT_LIST_HEAD(&unwind);
+
+ /* Retire all requests before creating the evict list */
+ i915_gem_retire_requests(dev_priv->dev);
+
+ for (active = 0; active <= 1; active++) {
+ list_for_each_entry(iter, &dev_priv->mm.stolen_list, mm_link) {
+ if (iter->obj->active != active)
+ continue;
+
+ if (mark_free(iter->obj, &unwind))
+ goto found;
+ }
+ }
+
+found:
+ INIT_LIST_HEAD(&evict);
+ while (!list_empty(&unwind)) {
+ obj = list_first_entry(&unwind,
+ struct drm_i915_gem_object,
+ stolen_link);
+ list_del(&obj->stolen_link);
+
+ if (drm_mm_scan_remove_block(&obj->stolen->base)) {
+ list_add(&obj->stolen_link, &evict);
+ drm_gem_object_reference(&obj->base);
+ }
+ }
+
+ ret = 0;
+ while (!list_empty(&evict)) {
+ obj = list_first_entry(&evict,
+ struct drm_i915_gem_object,
+ stolen_link);
+ list_del(&obj->stolen_link);
+
+ if (ret == 0) {
+ struct i915_vma *vma, *vma_next;
+
+ list_for_each_entry_safe(vma, vma_next,
+ &obj->vma_list,
+ vma_link)
+ if (i915_vma_unbind(vma))
+ break;
+
+ /* Stolen pins its pages to prevent the
+ * normal shrinker from processing stolen
+ * objects.
+ */
+ i915_gem_object_unpin_pages(obj);
+
+ ret = i915_gem_object_put_pages(obj);
+ if (ret == 0) {
+ i915_gem_object_release_stolen(obj);
+ obj->madv = __I915_MADV_PURGED;
+ } else {
+ i915_gem_object_pin_pages(obj);
+ }
+ }
+
+ drm_gem_object_unreference(&obj->base);
+ }
+
+ return ret;
+}
+
+static struct i915_stolen_node *
+stolen_alloc(struct drm_i915_private *dev_priv, u64 size)
+{
+ struct i915_stolen_node *stolen;
+ int ret;
- DRM_DEBUG_KMS("creating stolen object: size=%llx\n", size);
if (size == 0)
return ERR_PTR(-EINVAL);
@@ -604,17 +693,60 @@ i915_gem_object_create_stolen(struct drm_device *dev, u64 size)
if (!stolen)
return ERR_PTR(-ENOMEM);
- ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
+ ret = i915_gem_stolen_insert_node(dev_priv, &stolen->base, size, 4096);
+ if (ret == 0)
+ goto out;
+
+ /* No more stolen memory available, or too fragmented.
+ * Try evicting purgeable objects and search again.
+ */
+ ret = stolen_evict(dev_priv, size);
+ if (ret == 0)
+ ret = i915_gem_stolen_insert_node(dev_priv, &stolen->base,
+ size, 4096);
+out:
if (ret) {
kfree(stolen);
return ERR_PTR(ret);
}
+ return stolen;
+}
+
+/**
+ * i915_gem_object_create_stolen() - creates object using the stolen memory
+ * @dev: drm device
+ * @size: size of the object requested
+ *
+ * i915_gem_object_create_stolen() tries to allocate memory for the object
+ * from the stolen memory region. If not enough memory is found, it tries
+ * evicting purgeable objects and searching again.
+ *
+ * Returns: Object pointer - success and error pointer - failure
+ */
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u64 size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct i915_stolen_node *stolen;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ return ERR_PTR(-ENODEV);
+
+ DRM_DEBUG_KMS("creating stolen object: size=%llx\n", size);
+
+ stolen = stolen_alloc(dev_priv, size);
+ if (IS_ERR(stolen))
+ return (void*) stolen;
+
obj = _i915_gem_object_create_stolen(dev, stolen);
if (!IS_ERR(obj))
return obj;
- i915_gem_stolen_remove_node(dev_priv, stolen);
+ i915_gem_stolen_remove_node(dev_priv, &stolen->base);
kfree(stolen);
return obj;
}
@@ -628,7 +760,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
- struct drm_mm_node *stolen;
+ struct i915_stolen_node *stolen;
struct i915_vma *vma;
int ret;
@@ -647,10 +779,10 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (!stolen)
return ERR_PTR(-ENOMEM);
- stolen->start = stolen_offset;
- stolen->size = size;
+ stolen->base.start = stolen_offset;
+ stolen->base.size = size;
mutex_lock(&dev_priv->mm.stolen_lock);
- ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+ ret = drm_mm_reserve_node(&dev_priv->mm.stolen, &stolen->base);
mutex_unlock(&dev_priv->mm.stolen_lock);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n");
@@ -661,7 +793,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
obj = _i915_gem_object_create_stolen(dev, stolen);
if (IS_ERR(obj)) {
DRM_DEBUG_KMS("failed to allocate stolen object\n");
- i915_gem_stolen_remove_node(dev_priv, stolen);
+ i915_gem_stolen_remove_node(dev_priv, &stolen->base);
kfree(stolen);
return obj;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0afb819..c94b39b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5119,7 +5119,7 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
- dev_priv->vlv_pctx->stolen->start);
+ dev_priv->vlv_pctx->stolen->base.start);
}
@@ -5194,7 +5194,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
return;
}
- pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
+ pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->base.start;
I915_WRITE(VLV_PCBR, pctx_paddr);
out: