Quoting Abdiel Janulgue (2019-06-04 11:37:23) > Allow reuse of the fault-handling code in preparation for having > multiple fault handlers depending on the mmaping type and backing > storage. > > Cc: Matthew Auld <matthew.auld@xxxxxxxxx> > Cc: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> > > Signed-off-by: Abdiel Janulgue <abdiel.janulgue@xxxxxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/gem/i915_gem_mman.c | 208 ++++++++++++++--------- > 1 file changed, 132 insertions(+), 76 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c > index c7b9b34de01b..ed20fab66d2f 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c > @@ -197,6 +197,133 @@ compute_partial_view(const struct drm_i915_gem_object *obj, > return view; > } > > +struct gem_object_fault { > + struct drm_i915_gem_object *obj; > + pgoff_t page_offset; > + intel_wakeref_t wakeref; > + int srcu; > + bool pin_vma; > + vm_fault_t error_ret; > +}; > + > +static vm_fault_t __vm_fault_from_error(struct drm_i915_private *i915, > + int fault_ret) > +{ > + switch (fault_ret) { > + case -EIO: > + /* > + * We eat errors when the gpu is terminally wedged to avoid > + * userspace unduly crashing (gl has no provisions for mmaps to > + * fail). But any other -EIO isn't ours (e.g. swap in failure) > + * and so needs to be reported. > + */ > + if (!i915_terminally_wedged(i915)) > + return VM_FAULT_SIGBUS; > + /* else: fall through */ > + case -EAGAIN: > + /* > + * EAGAIN means the gpu is hung and we'll wait for the error > + * handler to reset everything when re-faulting in > + * i915_mutex_lock_interruptible. > + */ > + case 0: > + case -ERESTARTSYS: > + case -EINTR: > + case -EBUSY: > + /* > + * EBUSY is ok: this just means that another thread > + * already did the job. > + */ > + return VM_FAULT_NOPAGE; > + case -ENOMEM: > + return VM_FAULT_OOM; > + case -ENOSPC: > + case -EFAULT: > + return VM_FAULT_SIGBUS; > + default: > + WARN_ONCE(fault_ret, "unhandled error in %s: %i\n", __func__, > + fault_ret); > + return VM_FAULT_SIGBUS; > + } > +} > + > +static int __prepare_object_fault(struct vm_fault *vmf, > + bool pin_vma, > + struct gem_object_fault *f) > +{ > + struct vm_area_struct *area = vmf->vma; > + struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); > + struct drm_device *dev = obj->base.dev; > + struct drm_i915_private *dev_priv = to_i915(dev); > + bool write = area->vm_flags & VM_WRITE; > + int ret; > + > + /* Sanity check that we allow writing into this object */ > + if (i915_gem_object_is_readonly(obj) && write) { > + f->error_ret = VM_FAULT_SIGBUS; > + return -EACCES; > + } > + > + f->obj = obj; > + /* We don't use vmf->pgoff since that has the fake offset */ > + f->page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; > + > + trace_i915_gem_object_fault(obj, f->page_offset, true, write); > + > + ret = i915_gem_object_pin_pages(obj); > + if (ret) > + goto err; > + > + f->wakeref = intel_runtime_pm_get(dev_priv); Not every path requires the device reset. > + > + f->srcu = i915_reset_trylock(dev_priv); Not every path requires interaction with reset handling. > + if (f->srcu < 0) { > + ret = f->srcu; > + goto err_rpm; > + } > + > + f->pin_vma = pin_vma; > + if (f->pin_vma) { > + ret = i915_mutex_lock_interruptible(dev); Bah. > + if (ret) > + goto err_reset; > + } > + > + /* Access to snoopable pages through the GTT is incoherent. */ > + if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) { And that is very, very specific to one path. >From the sampling here, this is not generic preamble. -Chris _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx