In XenGT, the global graphic memory space is partitioned by multiple vgpu instances in different VMs. The ballooning code is added in i915_gem_setup_global_gtt(), utilizing the drm mm allocator APIs to mark the graphic address space which are partitioned out to other vgpus as reserved. v2: take Chris and Daniel's comments: - no guard page between different VMs - use drm_mm_reserve_node() to do the reservation for ballooning, instead of the previous drm_mm_insert_node_in_range_generic() Signed-off-by: Yu Zhang <yu.c.zhang@xxxxxxxxxxxxxxx> Signed-off-by: Jike Song <jike.song@xxxxxxxxx> Signed-off-by: Zhi Wang <zhi.a.wang@xxxxxxxxx> Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_gtt.c | 144 +++++++++++++++++++++++++++++++++++- 1 file changed, 141 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 39c2d13..90757ab 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -31,6 +31,134 @@ #include "i915_trace.h" #include "intel_drv.h" +struct _balloon_info_ { + /* + * There are up to 2 regions per low/high GM that + * might be ballooned. Here, index 0/1 is for low + * GM, 2/3 for high GM. + */ + struct drm_mm_node space[4]; +} bl_info; + +void intel_vgt_deballoon(void) +{ + int i; + + DRM_INFO("VGT deballoon.\n"); + + for (i = 0; i < 4; i++) { + if (bl_info.space[i].allocated) + drm_mm_remove_node(&bl_info.space[i]); + } + + memset(&bl_info, 0, sizeof(bl_info)); +} + +static int vgt_balloon_space(struct drm_mm *mm, + struct drm_mm_node *node, + unsigned long start, unsigned long end) +{ + unsigned long size = end - start; + + if (start == end) + return -EINVAL; + + DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KB.\n", + start, end, size / 1024); + + node->start = start; + node->size = size; + + return drm_mm_reserve_node(mm, node); +} + +static int intel_vgt_balloon(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; + unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total; + + unsigned long low_gm_base, low_gm_size, low_gm_end; + unsigned long high_gm_base, high_gm_size, high_gm_end; + int ret; + + low_gm_base = I915_READ(vgtif_reg(avail_rs.low_gmadr.my_base)); + low_gm_size = I915_READ(vgtif_reg(avail_rs.low_gmadr.my_size)); + high_gm_base = I915_READ(vgtif_reg(avail_rs.high_gmadr.my_base)); + high_gm_size = I915_READ(vgtif_reg(avail_rs.high_gmadr.my_size)); + + low_gm_end = low_gm_base + low_gm_size; + high_gm_end = high_gm_base + high_gm_size; + + DRM_INFO("VGT ballooning configuration:\n"); + DRM_INFO("Low GM: base 0x%lx size %ldKB\n", + low_gm_base, low_gm_size / 1024); + DRM_INFO("High GM: base 0x%lx size %ldKB\n", + high_gm_base, high_gm_size / 1024); + + if (low_gm_base < ggtt_vm->start + || low_gm_end > dev_priv->gtt.mappable_end + || high_gm_base < dev_priv->gtt.mappable_end + || high_gm_end > ggtt_vm_end) { + DRM_ERROR("Invalid ballooning configuration!\n"); + return -EINVAL; + } + + memset(&bl_info, 0, sizeof(bl_info)); + + /* High GM ballooning */ + if (high_gm_base > dev_priv->gtt.mappable_end) { + ret = vgt_balloon_space(&ggtt_vm->mm, + &bl_info.space[2], + dev_priv->gtt.mappable_end, + high_gm_base); + + if (ret) + goto err; + } + + /* + * No need to partition out the last physical page, + * because it is reserved to the guard page. + */ + if (high_gm_end < ggtt_vm_end - PAGE_SIZE) { + ret = vgt_balloon_space(&ggtt_vm->mm, + &bl_info.space[3], + high_gm_end, + ggtt_vm_end - PAGE_SIZE); + if (ret) + goto err; + } + + /* Low GM ballooning */ + if (low_gm_base > ggtt_vm->start) { + ret = vgt_balloon_space(&ggtt_vm->mm, + &bl_info.space[0], + ggtt_vm->start, low_gm_base); + + if (ret) + goto err; + } + + if (low_gm_end < dev_priv->gtt.mappable_end) { + ret = vgt_balloon_space(&ggtt_vm->mm, + &bl_info.space[1], + low_gm_end, + dev_priv->gtt.mappable_end); + + if (ret) + goto err; + } + + DRM_INFO("VGT balloon successfully\n"); + return 0; + +err: + DRM_ERROR("VGT balloon fail\n"); + intel_vgt_deballoon(); + return ret; +} + void i915_check_vgpu(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -1709,6 +1837,16 @@ int i915_gem_setup_global_gtt(struct drm_device *dev, /* Subtract the guard page ... */ drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE); + + dev_priv->gtt.base.start = start; + dev_priv->gtt.base.total = end - start; + + if (intel_vgpu_active(dev)) { + ret = intel_vgt_balloon(dev); + if (ret) + return ret; + } + if (!HAS_LLC(dev)) dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; @@ -1728,9 +1866,6 @@ int i915_gem_setup_global_gtt(struct drm_device *dev, obj->has_global_gtt_mapping = 1; } - dev_priv->gtt.base.start = start; - dev_priv->gtt.base.total = end - start; - /* Clear any non-preallocated blocks */ drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", @@ -1782,6 +1917,9 @@ void i915_global_gtt_cleanup(struct drm_device *dev) } if (drm_mm_initialized(&vm->mm)) { + if (intel_vgpu_active(dev)) + intel_vgt_deballoon(); + drm_mm_takedown(&vm->mm); list_del(&vm->global_link); } -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx