From: Ville Syrjälä <ville.syrjala@xxxxxxxxxxxxxxx> Add support for DPT (display page table). DPT is a slightly peculiar two level page table scheme used for tiled scanout buffers (linear uses direct ggtt mapping still). The plane surface address will point at a page in the DPT which holds the PTEs for 512 actual pages. Thus we require 1/512 of the ggttt address space compared to a direct ggtt mapping. We create a new DPT address space for each framebuffer and track two vmas (one for the DPT, another for the ggtt). TODO: - Is the i915_address_space approaach sane? - Maybe don't map the whole DPT to write the PTEs? - Deal with remapping/rotation? Need to create a separate DPT for each remapped/rotated plane I guess. Or else we'd need to make the per-fb DPT large enough to support potentially several remapped/rotated vmas. How large should that be? Signed-off-by: Ville Syrjälä <ville.syrjala@xxxxxxxxxxxxxxx> Signed-off-by: Bommu Krishnaiah <krishnaiah.bommu@xxxxxxxxx> Cc: Wilson Chris P <Chris.P.Wilson@xxxxxxxxx> Cc: Tang CQ <cq.tang@xxxxxxxxx> Cc: Auld Matthew <matthew.auld@xxxxxxxxx> Reviewed-by: Uma Shankar <uma.shankar@xxxxxxxxx> Reviewed-by: Wilson Chris P <Chris.P.Wilson@xxxxxxxxx> Signed-off-by: Imre Deak <imre.deak@xxxxxxxxx> --- .../gpu/drm/i915/display/intel_atomic_plane.c | 7 +- drivers/gpu/drm/i915/display/intel_display.c | 352 +++++++++++++++++- drivers/gpu/drm/i915/display/intel_display.h | 1 + .../drm/i915/display/intel_display_types.h | 15 +- drivers/gpu/drm/i915/display/intel_fbc.c | 6 +- .../drm/i915/display/skl_universal_plane.c | 19 +- drivers/gpu/drm/i915/gt/gen8_ppgtt.h | 7 + drivers/gpu/drm/i915/gt/intel_ggtt.c | 7 +- drivers/gpu/drm/i915/gt/intel_gtt.h | 5 + 9 files changed, 392 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 7bfb26ca0bd07..36f52a1d7552f 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -102,7 +102,8 @@ intel_plane_duplicate_state(struct drm_plane *plane) __drm_atomic_helper_plane_duplicate_state(plane, &intel_state->uapi); - intel_state->vma = NULL; + intel_state->ggtt_vma = NULL; + intel_state->dpt_vma = NULL; intel_state->flags = 0; /* add reference to fb */ @@ -125,7 +126,9 @@ intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { struct intel_plane_state *plane_state = to_intel_plane_state(state); - drm_WARN_ON(plane->dev, plane_state->vma); + + drm_WARN_ON(plane->dev, plane_state->ggtt_vma); + drm_WARN_ON(plane->dev, plane_state->dpt_vma); __drm_atomic_helper_plane_destroy_state(&plane_state->uapi); if (plane_state->hw.fb) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 5d53ee4c58f5b..99a921ea2e81b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -67,6 +67,7 @@ #include "gem/i915_gem_object.h" #include "gt/intel_rps.h" +#include "gt/gen8_ppgtt.h" #include "g4x_dp.h" #include "g4x_hdmi.h" @@ -124,6 +125,176 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); static void intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); +struct i915_dpt { + struct i915_address_space vm; + + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + void __iomem *iomem; +}; + +#define i915_is_dpt(vm) ((vm)->is_dpt) + +static inline struct i915_dpt * +i915_vm_to_dpt(struct i915_address_space *vm) +{ + BUILD_BUG_ON(offsetof(struct i915_dpt, vm)); + GEM_BUG_ON(!i915_is_dpt(vm)); + return container_of(vm, struct i915_dpt, vm); +} + +#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT) + +static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) +{ + writeq(pte, addr); +} + +static void dpt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level level, + u32 flags) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + gen8_pte_t __iomem *base = dpt->iomem; + + gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE, + vm->pte_encode(addr, level, flags)); +} + +static void dpt_insert_entries(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level level, + u32 flags) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + gen8_pte_t __iomem *base = dpt->iomem; + const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags); + struct sgt_iter sgt_iter; + dma_addr_t addr; + int i; + + /* + * Note that we ignore PTE_READ_ONLY here. The caller must be careful + * not to allow the user to override access to a read only page. + */ + + i = vma->node.start / I915_GTT_PAGE_SIZE; + for_each_sgt_daddr(addr, sgt_iter, vma->pages) + gen8_set_pte(&base[i++], pte_encode | addr); +} + +static void dpt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ +} + +static void dpt_bind_vma(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + u32 pte_flags; + + /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ + pte_flags = 0; + if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj)) + pte_flags |= PTE_READ_ONLY; + if (i915_gem_object_is_lmem(obj)) + pte_flags |= PTE_LM; + + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + + vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; + + /* + * Without aliasing PPGTT there's no difference between + * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally + * upgrade to both bound if we bind either to avoid double-binding. + */ + atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); +} + +static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) +{ + vm->clear_range(vm, vma->node.start, vma->size); +} + +static void dpt_cleanup(struct i915_address_space *vm) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + + i915_gem_object_put(dpt->obj); +} + +static struct i915_address_space * +intel_dpt_create(struct drm_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->dev); + size_t size = DIV_ROUND_UP_ULL(obj->size, 512); + struct drm_i915_gem_object *dpt_obj; + struct i915_address_space *vm; + struct i915_dpt *dpt; + int ret; + + size = round_up(size, 4096); + + if (HAS_LMEM(i915)) + dpt_obj = i915_gem_object_create_lmem(i915, size, 0); + else + dpt_obj = i915_gem_object_create_stolen(i915, size); + if (IS_ERR(dpt_obj)) + return ERR_CAST(dpt_obj); + + ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE); + if (ret) { + i915_gem_object_put(dpt_obj); + return ERR_PTR(ret); + } + + dpt = kzalloc(sizeof(*dpt), GFP_KERNEL); + if (!dpt) { + i915_gem_object_put(dpt_obj); + return ERR_PTR(-ENOMEM); + } + + vm = &dpt->vm; + + vm->gt = &i915->gt; + vm->i915 = i915; + vm->dma = i915->drm.dev; + vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; + vm->is_dpt = true; + + i915_address_space_init(vm, VM_CLASS_DPT); + + vm->insert_page = dpt_insert_page; + vm->clear_range = dpt_clear_range; + vm->insert_entries = dpt_insert_entries; + vm->cleanup = dpt_cleanup; + + vm->vma_ops.bind_vma = dpt_bind_vma; + vm->vma_ops.unbind_vma = dpt_unbind_vma; + vm->vma_ops.set_pages = ggtt_set_pages; + vm->vma_ops.clear_pages = clear_pages; + + vm->pte_encode = gen8_ggtt_pte_encode; + + dpt->obj = dpt_obj; + + return &dpt->vm; +} + +static void intel_dpt_destroy(struct i915_address_space *vm) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + + i915_vm_close(&dpt->vm); +} + /* returns HPLL frequency in kHz */ int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) { @@ -974,6 +1145,9 @@ unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, { struct drm_i915_private *dev_priv = to_i915(fb->dev); + if (intel_fb_uses_dpt(fb)) + return 512 * 4096; + /* AUX_DIST needs only 4K alignment */ if (is_ccs_plane(fb, color_plane)) return 4096; @@ -1027,6 +1201,62 @@ static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL); } +static struct i915_vma * +intel_pin_fb_obj_dpt(struct drm_framebuffer *fb, + const struct i915_ggtt_view *view, + bool uses_fence, + unsigned long *out_flags, + struct i915_address_space *vm) +{ + struct drm_device *dev = fb->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct i915_vma *vma; + u32 alignment; + int ret; + + if (WARN_ON(!i915_gem_object_is_framebuffer(obj))) + return ERR_PTR(-EINVAL); + + alignment = 4096 * 512; + + atomic_inc(&dev_priv->gpu_error.pending_fb_pin); + + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); + if (ret) { + vma = ERR_PTR(ret); + goto err; + } + + vma = i915_vma_instance(obj, vm, view); + if (IS_ERR(vma)) + goto err; + + if (i915_vma_misplaced(vma, 0, alignment, 0)) { + ret = i915_vma_unbind(vma); + if (ret) { + vma = ERR_PTR(ret); + goto err; + } + } + + ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL); + if (ret) { + vma = ERR_PTR(ret); + goto err; + } + + vma->display_alignment = max_t(u64, vma->display_alignment, alignment); + + i915_gem_object_flush_if_display(obj); + + i915_vma_get(vma); +err: + atomic_dec(&dev_priv->gpu_error.pending_fb_pin); + + return vma; +} + struct i915_vma * intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, bool phys_cursor, @@ -1630,6 +1860,49 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, intel_wait_for_vblank(dev_priv, crtc->pipe); } +static struct i915_vma *intel_dpt_pin(struct i915_address_space *vm) +{ + struct drm_i915_private *i915 = vm->i915; + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + intel_wakeref_t wakeref; + struct i915_vma *vma; + void __iomem *iomem; + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + atomic_inc(&i915->gpu_error.pending_fb_pin); + + vma = i915_gem_object_ggtt_pin(dpt->obj, NULL, 0, 4096, + HAS_LMEM(i915) ? 0 : PIN_MAPPABLE); + if (IS_ERR(vma)) + goto err; + + iomem = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(iomem)) { + vma = iomem; + goto err; + } + + dpt->vma = vma; + dpt->iomem = iomem; + + i915_vma_get(vma); + +err: + atomic_dec(&i915->gpu_error.pending_fb_pin); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + + return vma; +} + +static void intel_dpt_unpin(struct i915_address_space *vm) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + + i915_vma_unpin_iomap(dpt->vma); + i915_vma_put(dpt->vma); +} + static void intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct intel_initial_plane_config *plane_config) @@ -1675,12 +1948,12 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, continue; state = to_intel_plane_state(c->primary->state); - if (!state->vma) + if (!state->ggtt_vma) continue; if (intel_plane_ggtt_offset(state) == plane_config->base) { fb = state->hw.fb; - vma = state->vma; + vma = state->ggtt_vma; goto valid_fb; } } @@ -1707,7 +1980,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, &intel_state->view); __i915_vma_pin(vma); - intel_state->vma = i915_vma_get(vma); + intel_state->ggtt_vma = i915_vma_get(vma); if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0) if (vma->fence) intel_state->flags |= PLANE_HAS_FENCE; @@ -10551,25 +10824,60 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state) plane->id == PLANE_CURSOR && INTEL_INFO(dev_priv)->display.cursor_needs_physical; - vma = intel_pin_and_fence_fb_obj(fb, phys_cursor, - &plane_state->view.gtt, - intel_plane_uses_fence(plane_state), - &plane_state->flags); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - plane_state->vma = vma; + if (!intel_fb_uses_dpt(fb)) { + vma = intel_pin_and_fence_fb_obj(fb, phys_cursor, + &plane_state->view.gtt, + intel_plane_uses_fence(plane_state), + &plane_state->flags); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + plane_state->ggtt_vma = vma; + } else { + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + + vma = intel_dpt_pin(intel_fb->dpt_vm); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + plane_state->ggtt_vma = vma; + + vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false, + &plane_state->flags, intel_fb->dpt_vm); + if (IS_ERR(vma)) { + intel_dpt_unpin(intel_fb->dpt_vm); + plane_state->ggtt_vma = NULL; + return PTR_ERR(vma); + } + + plane_state->dpt_vma = vma; + + WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma); + } return 0; } void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) { + struct drm_framebuffer *fb = old_plane_state->hw.fb; struct i915_vma *vma; - vma = fetch_and_zero(&old_plane_state->vma); - if (vma) - intel_unpin_fb_vma(vma, old_plane_state->flags); + if (!intel_fb_uses_dpt(fb)) { + vma = fetch_and_zero(&old_plane_state->ggtt_vma); + if (vma) + intel_unpin_fb_vma(vma, old_plane_state->flags); + } else { + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + + vma = fetch_and_zero(&old_plane_state->dpt_vma); + if (vma) + intel_unpin_fb_vma(vma, old_plane_state->flags); + + vma = fetch_and_zero(&old_plane_state->ggtt_vma); + if (vma) + intel_dpt_unpin(intel_fb->dpt_vm); + } } /** @@ -11092,6 +11400,10 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); drm_framebuffer_cleanup(fb); + + if (intel_fb_uses_dpt(fb)) + intel_dpt_destroy(intel_fb->dpt_vm); + intel_frontbuffer_put(intel_fb->frontbuffer); kfree(intel_fb); @@ -11262,6 +11574,18 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, if (ret) goto err; + if (intel_fb_uses_dpt(fb)) { + struct i915_address_space *vm; + + vm = intel_dpt_create(&obj->base); + if (IS_ERR(vm)) { + ret = PTR_ERR(vm); + goto err; + } + + intel_fb->dpt_vm = vm; + } + ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); if (ret) { drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index fc0df4c63e8de..bf12e77bcd175 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -44,6 +44,7 @@ struct drm_mode_fb_cmd2; struct drm_modeset_acquire_ctx; struct drm_plane; struct drm_plane_state; +struct i915_address_space; struct i915_ggtt_view; struct intel_atomic_state; struct intel_crtc; diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 6d8cdaa367485..7fe96777dc671 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -128,6 +128,8 @@ struct intel_framebuffer { struct intel_fb_view normal_view; struct intel_fb_view rotated_view; struct intel_fb_view remapped_view; + + struct i915_address_space *dpt_vm; }; struct intel_fbdev { @@ -610,7 +612,8 @@ struct intel_plane_state { enum drm_scaling_filter scaling_filter; } hw; - struct i915_vma *vma; + struct i915_vma *ggtt_vma; + struct i915_vma *dpt_vma; unsigned long flags; #define PLANE_HAS_FENCE BIT(0) @@ -1972,9 +1975,15 @@ intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pip intel_wait_for_vblank(dev_priv, pipe); } -static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state) +static inline bool intel_fb_uses_dpt(const struct drm_framebuffer *fb) { - return i915_ggtt_offset(state->vma); + return fb && DISPLAY_VER(to_i915(fb->dev)) >= 13 && + fb->modifier != DRM_FORMAT_MOD_LINEAR; +} + +static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state) +{ + return i915_ggtt_offset(plane_state->ggtt_vma); } static inline struct intel_frontbuffer * diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index b2f3ac846f5b6..1847a161cb374 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -737,11 +737,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && - !plane_state->vma->fence); + !plane_state->ggtt_vma->fence); if (plane_state->flags & PLANE_HAS_FENCE && - plane_state->vma->fence) - cache->fence_id = plane_state->vma->fence->id; + plane_state->ggtt_vma->fence) + cache->fence_id = plane_state->ggtt_vma->fence->id; else cache->fence_id = -1; diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 0d34a5ad4e2b9..6df3e745f830d 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -934,6 +934,21 @@ static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, return plane_color_ctl; } +static u32 skl_surf_address(const struct intel_plane_state *plane_state, + int color_plane) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + u32 offset = plane_state->view.color_plane[color_plane].offset; + + if (intel_fb_uses_dpt(fb)) { + WARN_ON(offset & 0x1fffff); + return offset >> 9; + } else { + WARN_ON(offset & 0xfff); + return offset; + } +} + static void skl_program_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, @@ -944,7 +959,7 @@ skl_program_plane(struct intel_plane *plane, enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - u32 surf_addr = plane_state->view.color_plane[color_plane].offset; + u32 surf_addr = skl_surf_address(plane_state, color_plane); u32 stride = skl_plane_stride(plane_state, color_plane); const struct drm_framebuffer *fb = plane_state->hw.fb; int aux_plane = skl_main_to_aux_plane(fb, color_plane); @@ -983,7 +998,7 @@ skl_program_plane(struct intel_plane *plane, } if (aux_plane) { - aux_dist = plane_state->view.color_plane[aux_plane].offset - surf_addr; + aux_dist = skl_surf_address(plane_state, aux_plane) - surf_addr; if (DISPLAY_VER(dev_priv) < 12) aux_dist |= skl_plane_stride(plane_state, aux_plane); diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h index 76a08b9c1f5c8..b9028c2ad3c7d 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h @@ -6,8 +6,15 @@ #ifndef __GEN8_PPGTT_H__ #define __GEN8_PPGTT_H__ +#include <linux/kernel.h> + +struct i915_address_space; struct intel_gt; +enum i915_cache_level; struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt); +u64 gen8_ggtt_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags); #endif diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index dcb3b299cf4aa..35069ca5d7deb 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -18,6 +18,7 @@ #include "i915_vgpu.h" #include "intel_gtt.h" +#include "gen8_ppgtt.h" static int i915_get_ggtt_vma_pages(struct i915_vma *vma); @@ -187,9 +188,9 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) intel_gtt_chipset_flush(); } -static u64 gen8_ggtt_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) +u64 gen8_ggtt_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) { gen8_pte_t pte = addr | _PAGE_PRESENT; diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index 44ce27c516319..ca00b45827b74 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -248,6 +248,7 @@ struct i915_address_space { struct dma_resv resv; /* reservation lock for all pd objects, and buffer pool */ #define VM_CLASS_GGTT 0 #define VM_CLASS_PPGTT 1 +#define VM_CLASS_DPT 2 struct drm_i915_gem_object *scratch[4]; /** @@ -258,6 +259,9 @@ struct i915_address_space { /* Global GTT */ bool is_ggtt:1; + /* Display page table */ + bool is_dpt:1; + /* Some systems support read-only mappings for GGTT and/or PPGTT */ bool has_read_only:1; @@ -354,6 +358,7 @@ struct i915_ppgtt { }; #define i915_is_ggtt(vm) ((vm)->is_ggtt) +#define i915_is_dpt(vm) ((vm)->is_dpt) int __must_check i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww); -- 2.27.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx