On Tue, Oct 29, 2019 at 07:51:30AM +0000, Koenig, Christian wrote: > > +static bool amdgpu_mn_invalidate_gfx(struct mmu_range_notifier *mrn, > > + const struct mmu_notifier_range *range) > > { > > - struct amdgpu_bo *bo; > > + struct amdgpu_bo *bo = container_of(mrn, struct amdgpu_bo, notifier); > > + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); > > long r; > > > > - list_for_each_entry(bo, &node->bos, mn_list) { > > - > > - if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) > > - continue; > > - > > - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, > > - true, false, MAX_SCHEDULE_TIMEOUT); > > - if (r <= 0) > > - DRM_ERROR("(%ld) failed to wait for user bo\n", r); > > - } > > + /* FIXME: Is this necessary? */ > > Most likely not. > > Christian. > > > + if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, range->start, > > + range->end)) > > + return true; So is the bo->tbo.mem.num_pages == bo->tbo.ttm.num_pages always? And userptr can't be zero here, or at least it doesn't matter if it is? > > +static bool amdgpu_mn_invalidate_hsa(struct mmu_range_notifier *mrn, > > + const struct mmu_notifier_range *range) > > { > > - struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror); > > - unsigned long start = update->start; > > - unsigned long end = update->end; > > - bool blockable = mmu_notifier_range_blockable(update); > > - struct interval_tree_node *it; > > - > > - /* notification is exclusive, but interval is inclusive */ > > - end -= 1; > > - > > - /* TODO we should be able to split locking for interval tree and > > - * amdgpu_mn_invalidate_node > > - */ > > - if (amdgpu_mn_read_lock(amn, blockable)) > > - return -EAGAIN; > > - > > - it = interval_tree_iter_first(&amn->objects, start, end); > > - while (it) { > > - struct amdgpu_mn_node *node; > > - > > - if (!blockable) { > > - amdgpu_mn_read_unlock(amn); > > - return -EAGAIN; > > - } > > + struct amdgpu_bo *bo = container_of(mrn, struct amdgpu_bo, notifier); > > + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); > > > > - node = container_of(it, struct amdgpu_mn_node, it); > > - it = interval_tree_iter_next(it, start, end); > > + /* FIXME: Is this necessary? */ > > + if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, range->start, > > + range->end)) > > + return true; > > > > - amdgpu_mn_invalidate_node(node, start, end); > > - } This one too right? Jason