From: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> There is a lot of code in i915_gem_gtt.c which repeatadly dereferences either ggtt or ppgtt in order to get to the vm. Cache those accesses in local variables for better readability. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_gtt.c | 254 +++++++++++++++------------- 1 file changed, 134 insertions(+), 120 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 516ffc4a521a..d09a4d9b71da 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1004,10 +1004,11 @@ gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt, { struct i915_page_directory *pd; const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); + struct i915_address_space *vm = &ppgtt->vm; gen8_pte_t *vaddr; bool ret; - GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); + GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(vm)); pd = pdp->page_directory[idx->pdpe]; vaddr = kmap_atomic_px(pd->page_table[idx->pde]); do { @@ -1038,7 +1039,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt, break; } - GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); + GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(vm)); pd = pdp->page_directory[idx->pdpe]; } @@ -1352,16 +1353,17 @@ static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt) { + struct i915_address_space *vm = &ppgtt->vm; int i; for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) { - if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp) + if (ppgtt->pml4.pdps[i] == vm->scratch_pdp) continue; - gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]); + gen8_ppgtt_cleanup_3lvl(vm, ppgtt->pml4.pdps[i]); } - cleanup_px(&ppgtt->vm, &ppgtt->pml4); + cleanup_px(vm, &ppgtt->pml4); } static void gen8_ppgtt_cleanup(struct i915_address_space *vm) @@ -1590,18 +1592,19 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; + struct i915_address_space *vm = &ppgtt->vm; - ppgtt->vm.gt = gt; - ppgtt->vm.i915 = i915; - ppgtt->vm.dma = &i915->drm.pdev->dev; - ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); + vm->gt = gt; + vm->i915 = i915; + vm->dma = &i915->drm.pdev->dev; + vm->total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); - i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); + i915_address_space_init(vm, VM_CLASS_PPGTT); - ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; - ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; - ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; - ppgtt->vm.vma_ops.clear_pages = clear_pages; + vm->vma_ops.bind_vma = ppgtt_bind_vma; + vm->vma_ops.unbind_vma = ppgtt_unbind_vma; + vm->vma_ops.set_pages = ppgtt_set_pages; + vm->vma_ops.clear_pages = clear_pages; } /* @@ -1613,6 +1616,7 @@ static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) */ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) { + struct i915_address_space *vm; struct i915_ppgtt *ppgtt; int err; @@ -1620,6 +1624,8 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); + vm = &ppgtt->vm; + ppgtt_init(ppgtt, &i915->gt); /* @@ -1628,30 +1634,30 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) * Gen11 has HSDES#:1807136187 unresolved. Disable ro support * for now. */ - ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11; + vm->has_read_only = INTEL_GEN(i915) != 11; /* There are only few exceptions for gen >=6. chv and bxt. * And we are not sure about the latter so play safe for now. */ if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915)) - ppgtt->vm.pt_kmap_wc = true; + vm->pt_kmap_wc = true; - err = gen8_init_scratch(&ppgtt->vm); + err = gen8_init_scratch(vm); if (err) goto err_free; - if (i915_vm_is_4lvl(&ppgtt->vm)) { - err = setup_px(&ppgtt->vm, &ppgtt->pml4); + if (i915_vm_is_4lvl(vm)) { + err = setup_px(vm, &ppgtt->pml4); if (err) goto err_scratch; - gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4); + gen8_initialize_pml4(vm, &ppgtt->pml4); - ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl; - ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl; - ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl; + vm->allocate_va_range = gen8_ppgtt_alloc_4lvl; + vm->insert_entries = gen8_ppgtt_insert_4lvl; + vm->clear_range = gen8_ppgtt_clear_4lvl; } else { - err = __pdp_init(&ppgtt->vm, &ppgtt->pdp); + err = __pdp_init(vm, &ppgtt->pdp); if (err) goto err_scratch; @@ -1663,20 +1669,20 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) } } - ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl; - ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl; - ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl; + vm->allocate_va_range = gen8_ppgtt_alloc_3lvl; + vm->insert_entries = gen8_ppgtt_insert_3lvl; + vm->clear_range = gen8_ppgtt_clear_3lvl; } if (intel_vgpu_active(i915)) gen8_ppgtt_notify_vgt(ppgtt, true); - ppgtt->vm.cleanup = gen8_ppgtt_cleanup; + vm->cleanup = gen8_ppgtt_cleanup; return ppgtt; err_scratch: - gen8_free_scratch(&ppgtt->vm); + gen8_free_scratch(vm); err_free: kfree(ppgtt); return ERR_PTR(err); @@ -2325,7 +2331,8 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv) static void ggtt_suspend_mappings(struct i915_ggtt *ggtt) { - struct drm_i915_private *i915 = ggtt->vm.i915; + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *i915 = vm->i915; /* Don't bother messing with faults pre GEN6 as we have little * documentation supporting that it's a good idea. @@ -2333,9 +2340,9 @@ static void ggtt_suspend_mappings(struct i915_ggtt *ggtt) if (INTEL_GEN(i915) < 6) return; - intel_gt_check_and_clear_faults(ggtt->vm.gt); + intel_gt_check_and_clear_faults(vm->gt); - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); + vm->clear_range(vm, 0, vm->total); ggtt->invalidate(ggtt); } @@ -2837,16 +2844,17 @@ static void fini_aliasing_ppgtt(struct drm_i915_private *i915) static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) { + struct i915_address_space *vm = &ggtt->vm; u64 size; int ret; - if (!USES_GUC(ggtt->vm.i915)) + if (!USES_GUC(vm->i915)) return 0; - GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); - size = ggtt->vm.total - GUC_GGTT_TOP; + GEM_BUG_ON(vm->total <= GUC_GGTT_TOP); + size = vm->total - GUC_GGTT_TOP; - ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, + ret = i915_gem_gtt_reserve(vm, &ggtt->uc_fw, size, GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, PIN_NOEVICT); if (ret) @@ -2873,6 +2881,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) * of the aperture. */ struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct i915_address_space *vm = &ggtt->vm; unsigned long hole_start, hole_end; struct drm_mm_node *entry; int ret; @@ -2891,7 +2900,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) return ret; /* Reserve a mappable slot for our lockless error capture */ - ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture, + ret = drm_mm_insert_node_in_range(&vm->mm, &ggtt->error_capture, PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 0, ggtt->mappable_end, DRM_MM_INSERT_LOW); @@ -2908,15 +2917,14 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) goto err_reserve; /* Clear any non-preallocated blocks */ - drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { + drm_mm_for_each_hole(entry, &vm->mm, hole_start, hole_end) { DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end); - ggtt->vm.clear_range(&ggtt->vm, hole_start, - hole_end - hole_start); + vm->clear_range(vm, hole_start, hole_end - hole_start); } /* And finally clear the reserved guard page */ - ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); + vm->clear_range(vm, vm->total - PAGE_SIZE, PAGE_SIZE); if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) { ret = init_aliasing_ppgtt(dev_priv); @@ -2940,15 +2948,16 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) { struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct i915_address_space *vm = &ggtt->vm; struct i915_vma *vma, *vn; struct pagevec *pvec; - ggtt->vm.closed = true; + vm->closed = true; mutex_lock(&dev_priv->drm.struct_mutex); fini_aliasing_ppgtt(dev_priv); - list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) + list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) WARN_ON(i915_vma_unbind(vma)); if (drm_mm_node_allocated(&ggtt->error_capture)) @@ -2956,12 +2965,12 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ggtt_release_guc_top(ggtt); - if (drm_mm_initialized(&ggtt->vm.mm)) { + if (drm_mm_initialized(&vm->mm)) { intel_vgt_deballoon(ggtt); - i915_address_space_fini(&ggtt->vm); + i915_address_space_fini(vm); } - ggtt->vm.cleanup(&ggtt->vm); + vm->cleanup(vm); pvec = &dev_priv->mm.wc_stash.pvec; if (pvec->nr) { @@ -3013,7 +3022,8 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) { - struct drm_i915_private *dev_priv = ggtt->vm.i915; + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *dev_priv = vm->i915; struct pci_dev *pdev = dev_priv->drm.pdev; phys_addr_t phys_addr; int ret; @@ -3037,7 +3047,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return -ENOMEM; } - ret = setup_scratch_page(&ggtt->vm, GFP_DMA32); + ret = setup_scratch_page(vm, GFP_DMA32); if (ret) { DRM_ERROR("Scratch setup failed\n"); /* iounmap will also get called at remove, but meh */ @@ -3045,9 +3055,8 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return ret; } - ggtt->vm.scratch_pte = - ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr, - I915_CACHE_NONE, 0); + vm->scratch_pte = + vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_NONE, 0); return 0; } @@ -3347,7 +3356,8 @@ static void setup_private_pat(struct drm_i915_private *dev_priv) static int gen8_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_i915_private *dev_priv = ggtt->vm.i915; + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *dev_priv = vm->i915; struct pci_dev *pdev = dev_priv->drm.pdev; unsigned int size; u16 snb_gmch_ctl; @@ -3371,22 +3381,22 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) else size = gen8_get_total_gtt_size(snb_gmch_ctl); - ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; - ggtt->vm.cleanup = gen6_gmch_remove; - ggtt->vm.insert_page = gen8_ggtt_insert_page; - ggtt->vm.clear_range = nop_clear_range; + vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; + vm->cleanup = gen6_gmch_remove; + vm->insert_page = gen8_ggtt_insert_page; + vm->clear_range = nop_clear_range; if (intel_scanout_needs_vtd_wa(dev_priv)) - ggtt->vm.clear_range = gen8_ggtt_clear_range; + vm->clear_range = gen8_ggtt_clear_range; - ggtt->vm.insert_entries = gen8_ggtt_insert_entries; + vm->insert_entries = gen8_ggtt_insert_entries; /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ if (intel_ggtt_update_needs_vtd_wa(dev_priv) || IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) { - ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; - ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; - if (ggtt->vm.clear_range != nop_clear_range) - ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; + vm->insert_entries = bxt_vtd_ggtt_insert_entries__BKL; + vm->insert_page = bxt_vtd_ggtt_insert_page__BKL; + if (vm->clear_range != nop_clear_range) + vm->clear_range = bxt_vtd_ggtt_clear_range__BKL; /* Prevent recursively calling stop_machine() and deadlocks. */ dev_info(dev_priv->drm.dev, @@ -3396,12 +3406,12 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->invalidate = gen6_ggtt_invalidate; - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - ggtt->vm.vma_ops.set_pages = ggtt_set_pages; - ggtt->vm.vma_ops.clear_pages = clear_pages; + vm->vma_ops.bind_vma = ggtt_bind_vma; + vm->vma_ops.unbind_vma = ggtt_unbind_vma; + vm->vma_ops.set_pages = ggtt_set_pages; + vm->vma_ops.clear_pages = clear_pages; - ggtt->vm.pte_encode = gen8_pte_encode; + vm->pte_encode = gen8_pte_encode; setup_private_pat(dev_priv); @@ -3410,7 +3420,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) static int gen6_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_i915_private *dev_priv = ggtt->vm.i915; + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *dev_priv = vm->i915; struct pci_dev *pdev = dev_priv->drm.pdev; unsigned int size; u16 snb_gmch_ctl; @@ -3437,32 +3448,32 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); size = gen6_get_total_gtt_size(snb_gmch_ctl); - ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; + vm->total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; - ggtt->vm.clear_range = nop_clear_range; + vm->clear_range = nop_clear_range; if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) - ggtt->vm.clear_range = gen6_ggtt_clear_range; - ggtt->vm.insert_page = gen6_ggtt_insert_page; - ggtt->vm.insert_entries = gen6_ggtt_insert_entries; - ggtt->vm.cleanup = gen6_gmch_remove; + vm->clear_range = gen6_ggtt_clear_range; + vm->insert_page = gen6_ggtt_insert_page; + vm->insert_entries = gen6_ggtt_insert_entries; + vm->cleanup = gen6_gmch_remove; ggtt->invalidate = gen6_ggtt_invalidate; if (HAS_EDRAM(dev_priv)) - ggtt->vm.pte_encode = iris_pte_encode; + vm->pte_encode = iris_pte_encode; else if (IS_HASWELL(dev_priv)) - ggtt->vm.pte_encode = hsw_pte_encode; + vm->pte_encode = hsw_pte_encode; else if (IS_VALLEYVIEW(dev_priv)) - ggtt->vm.pte_encode = byt_pte_encode; + vm->pte_encode = byt_pte_encode; else if (INTEL_GEN(dev_priv) >= 7) - ggtt->vm.pte_encode = ivb_pte_encode; + vm->pte_encode = ivb_pte_encode; else - ggtt->vm.pte_encode = snb_pte_encode; + vm->pte_encode = snb_pte_encode; - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - ggtt->vm.vma_ops.set_pages = ggtt_set_pages; - ggtt->vm.vma_ops.clear_pages = clear_pages; + vm->vma_ops.bind_vma = ggtt_bind_vma; + vm->vma_ops.unbind_vma = ggtt_unbind_vma; + vm->vma_ops.set_pages = ggtt_set_pages; + vm->vma_ops.clear_pages = clear_pages; return ggtt_probe_common(ggtt, size); } @@ -3474,7 +3485,8 @@ static void i915_gmch_remove(struct i915_address_space *vm) static int i915_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_i915_private *dev_priv = ggtt->vm.i915; + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *dev_priv = vm->i915; phys_addr_t gmadr_base; int ret; @@ -3484,24 +3496,24 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) return -EIO; } - intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); + intel_gtt_get(&vm->total, &gmadr_base, &ggtt->mappable_end); ggtt->gmadr = (struct resource) DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end); ggtt->do_idle_maps = needs_idle_maps(dev_priv); - ggtt->vm.insert_page = i915_ggtt_insert_page; - ggtt->vm.insert_entries = i915_ggtt_insert_entries; - ggtt->vm.clear_range = i915_ggtt_clear_range; - ggtt->vm.cleanup = i915_gmch_remove; + vm->insert_page = i915_ggtt_insert_page; + vm->insert_entries = i915_ggtt_insert_entries; + vm->clear_range = i915_ggtt_clear_range; + vm->cleanup = i915_gmch_remove; ggtt->invalidate = gmch_ggtt_invalidate; - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - ggtt->vm.vma_ops.set_pages = ggtt_set_pages; - ggtt->vm.vma_ops.clear_pages = clear_pages; + vm->vma_ops.bind_vma = ggtt_bind_vma; + vm->vma_ops.unbind_vma = ggtt_unbind_vma; + vm->vma_ops.set_pages = ggtt_set_pages; + vm->vma_ops.clear_pages = clear_pages; if (unlikely(ggtt->do_idle_maps)) DRM_INFO("applying Ironlake quirks for intel_iommu\n"); @@ -3512,11 +3524,12 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; + struct i915_address_space *vm = &ggtt->vm; int ret; - ggtt->vm.gt = gt; - ggtt->vm.i915 = i915; - ggtt->vm.dma = &i915->drm.pdev->dev; + vm->gt = gt; + vm->i915 = i915; + vm->dma = &i915->drm.pdev->dev; if (INTEL_GEN(i915) <= 5) ret = i915_gmch_probe(ggtt); @@ -3527,24 +3540,23 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) if (ret) return ret; - if ((ggtt->vm.total - 1) >> 32) { + if ((vm->total - 1) >> 32) { DRM_ERROR("We never expected a Global GTT with more than 32bits" " of address space! Found %lldM!\n", - ggtt->vm.total >> 20); - ggtt->vm.total = 1ULL << 32; - ggtt->mappable_end = - min_t(u64, ggtt->mappable_end, ggtt->vm.total); + vm->total >> 20); + vm->total = 1ULL << 32; + ggtt->mappable_end = min_t(u64, ggtt->mappable_end, vm->total); } - if (ggtt->mappable_end > ggtt->vm.total) { + if (ggtt->mappable_end > vm->total) { DRM_ERROR("mappable aperture extends past end of GGTT," " aperture=%pa, total=%llx\n", - &ggtt->mappable_end, ggtt->vm.total); - ggtt->mappable_end = ggtt->vm.total; + &ggtt->mappable_end, vm->total); + ggtt->mappable_end = vm->total; } /* GMADR is the PCI mmio aperture into the global GTT. */ - DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20); + DRM_DEBUG_DRIVER("GGTT size = %lluM\n", vm->total >> 20); DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); DRM_DEBUG_DRIVER("DSM size = %lluM\n", (u64)resource_size(&intel_graphics_stolen_res) >> 20); @@ -3577,20 +3589,21 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) static int ggtt_init_hw(struct i915_ggtt *ggtt) { - struct drm_i915_private *i915 = ggtt->vm.i915; + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *i915 = vm->i915; int ret = 0; mutex_lock(&i915->drm.struct_mutex); - i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); + i915_address_space_init(vm, VM_CLASS_GGTT); - ggtt->vm.is_ggtt = true; + vm->is_ggtt = true; /* Only VLV supports read-only GGTT mappings */ - ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); + vm->has_read_only = IS_VALLEYVIEW(i915); if (!HAS_LLC(i915) && !HAS_PPGTT(i915)) - ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; + vm->mm.color_adjust = i915_gtt_color_adjust; if (!io_mapping_init_wc(&ggtt->iomap, ggtt->gmadr.start, @@ -3681,24 +3694,25 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915) static void ggtt_restore_mappings(struct i915_ggtt *ggtt) { + struct i915_address_space *vm = &ggtt->vm; struct i915_vma *vma, *vn; - intel_gt_check_and_clear_faults(ggtt->vm.gt); + intel_gt_check_and_clear_faults(vm->gt); - mutex_lock(&ggtt->vm.mutex); + mutex_lock(&vm->mutex); /* First fill our portion of the GTT with scratch pages */ - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */ + vm->clear_range(vm, 0, vm->total); + vm->closed = true; /* skip rewriting PTE on VMA unbind */ /* clflush objects bound into the GGTT and rebind them. */ - list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { + list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; if (!(vma->flags & I915_VMA_GLOBAL_BIND)) continue; - mutex_unlock(&ggtt->vm.mutex); + mutex_unlock(&vm->mutex); if (!i915_vma_unbind(vma)) goto lock; @@ -3713,13 +3727,13 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt) } lock: - mutex_lock(&ggtt->vm.mutex); + mutex_lock(&vm->mutex); } - ggtt->vm.closed = false; + vm->closed = false; ggtt->invalidate(ggtt); - mutex_unlock(&ggtt->vm.mutex); + mutex_unlock(&vm->mutex); } void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) @@ -3730,7 +3744,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) struct intel_ppat *ppat = &dev_priv->ppat; bitmap_set(ppat->dirty, 0, ppat->max_entries); - dev_priv->ppat.update_hw(dev_priv); + ppat->update_hw(dev_priv); return; } } -- 2.20.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx