> Am 30.11.2016 um 08:02 schrieb Xiangliang Yu: > > Schedule a workqueue for VM fault handler on GMC V8.0 for SRIOV > > support. > > > > Signed-off-by: shaoyunl <Shaoyun.Liu at amd.com> > > Signed-off-by: Xiangliang Yu <Xiangliang.Yu at amd.com> > > NAK, it is vital to print the information from the registers and reset them > ASAP even under SRIOV. > > My last status was that the registers in question shouldn't be subject to KIQ > and accessed directly. Ok, please ignore the patch. > > > --- > > drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 40 > ++++++++++++++++++++++++++++++++++- > > 1 file changed, 39 insertions(+), 1 deletion(-) > > > > diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c > > b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c > > index 0daac3a..e4a628c 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c > > +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c > > @@ -1219,7 +1219,7 @@ static int > gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, > > return 0; > > } > > > > -static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, > > +static int gmc_v8_0_process_vm_fault(struct amdgpu_device *adev, > > struct amdgpu_irq_src *source, > > struct amdgpu_iv_entry *entry) > > { > > @@ -1250,6 +1250,44 @@ static int gmc_v8_0_process_interrupt(struct > amdgpu_device *adev, > > return 0; > > } > > > > +struct gmc_vm_fault_work { > > + struct work_struct base; > > + struct amdgpu_device *adev; > > + struct amdgpu_irq_src *source; > > + struct amdgpu_iv_entry *entry; > > +}; > > + > > +static void gmc_v8_0_vm_fault_sched(struct work_struct *work) { > > + struct gmc_vm_fault_work *vm_work = > > + container_of(work, struct gmc_vm_fault_work, base); > > + struct amdgpu_device *adev = vm_work->adev; > > + struct amdgpu_irq_src *source = vm_work->source; > > + struct amdgpu_iv_entry *entry = vm_work->entry; > > + > > + gmc_v8_0_process_vm_fault(adev, source, entry); > > + kfree(vm_work); > > +} > > + > > +static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, > > + struct amdgpu_irq_src *source, > > + struct amdgpu_iv_entry *entry) { > > + struct gmc_vm_fault_work *work = NULL; > > + > > + if (amdgpu_sriov_vf(adev)) { > > + work = kmalloc(sizeof(struct gmc_vm_fault_work), > GFP_ATOMIC); > > + if (!work) > > + return -ENOMEM; > > + INIT_WORK(&work->base, gmc_v8_0_vm_fault_sched); > > + work->adev = adev; > > + work->source = source; > > + work->entry = entry; > > + return schedule_work(&work->base); > > + } > > + return gmc_v8_0_process_vm_fault(adev, source, entry); } > > + > > static void fiji_update_mc_medium_grain_clock_gating(struct > amdgpu_device *adev, > > bool enable) > > { >