Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
drivers/gpu/drm/i915/i915_debugfs.c | 7 +++---
drivers/gpu/drm/i915/i915_drv.h | 15 ++++++++-----
drivers/gpu/drm/i915/i915_gem.c | 34 +++++++++++++-----------------
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++---------
drivers/gpu/drm/i915/i915_gem_fence.c | 8 +++----
drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +-
drivers/gpu/drm/i915/i915_gem_stolen.c | 4 +---
drivers/gpu/drm/i915/i915_gem_tiling.c | 4 ++--
drivers/gpu/drm/i915/i915_gem_userptr.c | 4 ++--
drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +-
10 files changed, 46 insertions(+), 51 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e0ba3e38000f..33ddcdf6d046 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -128,7 +128,6 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct intel_engine_cs *engine;
struct i915_vma *vma;
int pin_count = 0;
@@ -143,13 +142,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain);
- for_each_engine(engine, dev_priv, i)
+ for_each_engine(engine, obj, i)
seq_printf(m, "%x ",
i915_gem_request_get_seqno(obj->last_read_req[i]));
seq_printf(m, "] %x %x%s%s%s",
i915_gem_request_get_seqno(obj->last_write_req),
i915_gem_request_get_seqno(obj->last_fenced_req),
- i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
+ i915_cache_level_str(to_i915(obj), obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
@@ -339,7 +338,7 @@ static int per_file_stats(int id, void *ptr, void *data)
if (obj->base.name || obj->base.dma_buf)
stats->shared += obj->base.size;
- if (USES_FULL_PPGTT(obj->base.dev)) {
+ if (USES_FULL_PPGTT(obj)) {
list_for_each_entry(vma, &obj->vma_list, obj_link) {
struct i915_hw_ppgtt *ppgtt;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0c9fe00d3e83..92365f047e53 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2186,6 +2186,12 @@ struct drm_i915_gem_object {
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+static inline struct drm_i915_private *
+__obj_to_i915(const struct drm_i915_gem_object *obj)
+{
+ return __to_i915(obj->base.dev);
+}
+
void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits);
@@ -2455,6 +2461,8 @@ struct drm_i915_cmd_table {
__p = (struct drm_i915_private *)p; \
else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
__p = __to_i915((struct drm_device *)p); \
+ else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_object)) \
+ __p = __obj_to_i915((struct drm_i915_gem_object *)p); \
else \
BUILD_BUG(); \
__p; \
@@ -3132,8 +3140,7 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */
-#define i915_obj_to_ggtt(obj) \
- (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
+#define i915_obj_to_ggtt(obj) (&to_i915(obj)->ggtt.base)
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
@@ -3282,9 +3289,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
- return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ return to_i915(obj)->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj->tiling_mode != I915_TILING_NONE;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8588c83abb35..710a6bbc985e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -361,14 +361,12 @@ out:
void *i915_gem_object_alloc(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
+ return kmem_cache_zalloc(to_i915(dev)->objects, GFP_KERNEL);
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- kmem_cache_free(dev_priv->objects, obj);
+ kmem_cache_free(to_i915(obj)->objects, obj);
}
static int
@@ -2028,7 +2026,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj);
int ret;
if (drm_vma_node_has_offset(&obj->base.vma_node))
@@ -2241,7 +2239,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj);
int page_count, i;
struct address_space *mapping;
struct sg_table *st;
@@ -2372,7 +2370,7 @@ err_pages:
int
i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj);
const struct drm_i915_gem_object_ops *ops = obj->ops;
int ret;
@@ -2449,8 +2447,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
* so that we don't steal from recently used but inactive objects
* (unless we are forced to ofc!)
*/
- list_move_tail(&obj->global_list,
- &to_i915(obj->base.dev)->mm.bound_list);
+ list_move_tail(&obj->global_list, &to_i915(obj)->mm.bound_list);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!list_empty(&vma->vm_link))
@@ -3172,7 +3169,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
return 0;
if (!i915_semaphore_is_enabled(obj->base.dev)) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct drm_i915_private *i915 = to_i915(obj);
ret = __i915_wait_request(from_req,
atomic_read(&i915->gpu_error.reset_counter),
i915->mm.interruptible,
@@ -3312,7 +3309,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
{
struct drm_i915_gem_object *obj = vma->obj;
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj);
int ret;
if (list_empty(&vma->obj_link))
@@ -3772,7 +3769,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
list_move_tail(&vma->vm_link,
- &to_i915(obj->base.dev)->ggtt.base.inactive_list);
+ &to_i915(obj)->ggtt.base.inactive_list);
return 0;
}
@@ -4010,7 +4007,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
ret = i915_gem_object_set_cache_level(obj,
- HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
+ HAS_WT(obj) ? I915_CACHE_WT : I915_CACHE_NONE);
if (ret)
goto err_unpin_display;
@@ -4209,7 +4206,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
(vma->node.start & (fence_alignment - 1)) == 0);
mappable = (vma->node.start + fence_size <=
- to_i915(obj->base.dev)->ggtt.mappable_end);
+ to_i915(obj)->ggtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
}
@@ -4221,7 +4218,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
uint64_t flags)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj);
struct i915_vma *vma;
unsigned bound;
int ret;
@@ -4456,7 +4453,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->fence_reg = I915_FENCE_REG_NONE;
obj->madv = I915_MADV_WILLNEED;
- i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+ i915_gem_info_add_obj(to_i915(obj), obj->base.size);
}
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
@@ -4545,8 +4542,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj);
struct i915_vma *vma, *next;
intel_runtime_pm_get(dev_priv);
@@ -4647,7 +4643,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
list_del(&vma->obj_link);
- kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+ kmem_cache_free(to_i915(vma->obj)->vmas, vma);
}
static void
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 374a0cb7a092..39ed403b9de3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -241,7 +241,7 @@ static void eb_destroy(struct eb_vmas *eb)
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
- return (HAS_LLC(obj->base.dev) ||
+ return (HAS_LLC(obj) ||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -312,8 +312,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
struct drm_i915_gem_relocation_entry *reloc,
uint64_t target_offset)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj);
uint64_t delta = relocation_target(reloc, target_offset);
uint64_t offset;
void __iomem *reloc_page;
@@ -334,7 +333,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
offset & PAGE_MASK);
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_INFO(obj)->gen >= 8) {
offset += sizeof(uint32_t);
if (offset_in_page(offset) == 0) {
@@ -367,7 +366,6 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
struct drm_i915_gem_relocation_entry *reloc,
uint64_t target_offset)
{
- struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset);
uint64_t delta = relocation_target(reloc, target_offset);
char *vaddr;
@@ -381,7 +379,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
reloc->offset >> PAGE_SHIFT));
clflush_write32(vaddr + page_offset, lower_32_bits(delta));
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_INFO(obj)->gen >= 8) {
page_offset = offset_in_page(page_offset + sizeof(uint32_t));
if (page_offset == 0) {
@@ -403,7 +401,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc)
{
- struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
struct i915_vma *target_vma;
@@ -422,7 +419,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
* through the ppgtt for non_secure batchbuffers. */
- if (unlikely(IS_GEN6(dev) &&
+ if (unlikely(IS_GEN6(obj) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
PIN_GLOBAL);
@@ -464,7 +461,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset >
- obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
+ obj->base.size - (INTEL_INFO(obj)->gen >= 8 ? 8 : 4))) {
DRM_DEBUG("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
@@ -672,7 +669,7 @@ need_reloc_mappable(struct i915_vma *vma)
return false;
/* See also use_cpu_reloc() */
- if (HAS_LLC(vma->obj->base.dev))
+ if (HAS_LLC(vma->obj))