if (r == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index bcfd4a8d0288..da716aa38085 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2022,13 +2022,13 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
unsigned i, shared_count;
int r;
- r = dma_resv_get_fences_rcu(resv, &excl,
+ r = dma_resv_get_fences(resv, &excl,
&shared_count, &shared);
if (r) {
/* Not enough memory to grab the fence list, as last resort
* block for all the fences to complete.
*/
- dma_resv_wait_timeout_rcu(resv, true, false,
+ dma_resv_wait_timeout(resv, true, false,
MAX_SCHEDULE_TIMEOUT);
return;
}
@@ -2640,7 +2640,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return true;
/* Don't evict VM page tables while they are busy */
- if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
+ if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
return false;
/* Try to block ongoing updates */
@@ -2820,7 +2820,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
+ timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv,
true, true, timeout);
if (timeout <= 0)
return timeout;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3267eb2e35dd..1633afd3c03b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8400,7 +8400,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* deadlock during GPU reset when this fence will not signal
* but we hold reservation lock for the BO.
*/
- r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
+ r = dma_resv_wait_timeout(abo->tbo.base.resv, true,
false,
msecs_to_jiffies(5000));
if (unlikely(r <= 0))
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 263b4fb03303..11770da97dc0 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -770,7 +770,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
return -EINVAL;
}
- ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
+ ret = dma_resv_wait_timeout(obj->resv, wait_all,
true, timeout);
if (ret == 0)
ret = -ETIME;
@@ -1380,7 +1380,7 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
return drm_gem_fence_array_add(fence_array, fence);
}
- ret = dma_resv_get_fences_rcu(obj->resv, NULL,
+ ret = dma_resv_get_fences(obj->resv, NULL,
&fence_count, &fences);
if (ret || !fence_count)
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 4d43b8630f0e..e3c209628688 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -390,13 +390,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (op & ETNA_PREP_NOSYNC) {
- if (!dma_resv_test_signaled_rcu(obj->resv,
+ if (!dma_resv_test_signaled(obj->resv,
write))
return -EBUSY;
} else {
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
- ret = dma_resv_wait_timeout_rcu(obj->resv,
+ ret = dma_resv_wait_timeout(obj->resv,
write, true, remain);
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index c942d2a8c252..9cc36bbc2502 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -189,7 +189,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
continue;
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
- ret = dma_resv_get_fences_rcu(robj, &bo->excl,
+ ret = dma_resv_get_fences(robj, &bo->excl,
&bo->nr_shared,
&bo->shared);
if (ret)
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
index 9e508e7d4629..7df91b7e4ca8 100644
--- a/drivers/gpu/drm/i915/dma_resv_utils.c
+++ b/drivers/gpu/drm/i915/dma_resv_utils.c
@@ -10,7 +10,7 @@
void dma_resv_prune(struct dma_resv *resv)
{
if (dma_resv_trylock(resv)) {
- if (dma_resv_test_signaled_rcu(resv, true))
+ if (dma_resv_test_signaled(resv, true))
dma_resv_add_excl_fence(resv, NULL);
dma_resv_unlock(resv);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 3f94becac541..0083a850f839 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
- * !dma_resv_test_signaled_rcu(obj->resv, true);
+ * !dma_resv_test_signaled(obj->resv, true);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 297143511f99..66789111a24b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma)
if (DBG_FORCE_RELOC)
return false;
- return !dma_resv_test_signaled_rcu(vma->resv, true);
+ return !dma_resv_test_signaled(vma->resv, true);
}
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index a657b99ec760..e78738aec7b2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -85,7 +85,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
return true;
/* we will unbind on next submission, still have userptr pins */
- r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false,
+ r = dma_resv_wait_timeout(obj->base.resv, true, false,
MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index c13aeddf5aa7..e7aebb8fb468 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int count, i;
int ret;
- ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
+ ret = dma_resv_get_fences(resv, &excl, &count, &shared);
if (ret)
return ret;
@@ -158,7 +158,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int count, i;
int ret;
- ret = dma_resv_get_fences_rcu(obj->base.resv,
+ ret = dma_resv_get_fences(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c85494f411f4..4a70a1881d79 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1594,7 +1594,7 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence **shared;
unsigned int count, i;
- ret = dma_resv_get_fences_rcu(obj->base.resv,
+ ret = dma_resv_get_fences(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 7aaf74552d06..c589a681da77 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_fence **shared;
unsigned int count, i;
- ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
+ ret = dma_resv_get_fences(resv, &excl, &count, &shared);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 43af91df552e..ecd35986ddb5 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -915,7 +915,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
- ret = dma_resv_wait_timeout_rcu(obj->resv, write,
+ ret = dma_resv_wait_timeout(obj->resv, write,
true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index d863e5ed954a..c59072f254f1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -964,7 +964,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
+ lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
no_wait ? 0 : 30 * HZ);
if (!lret)
ret = -EBUSY;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index ca07098a6141..0e6e893eb81d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -311,7 +311,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
if (!gem_obj)
return -ENOENT;
- ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
+ ret = dma_resv_wait_timeout(gem_obj->resv, true,
true, timeout);
if (!ret)
ret = timeout ? -ETIMEDOUT : -EBUSY;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 3272c33af8fe..458f92a70887 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -161,7 +161,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+ r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
if (!r)
r = -EBUSY;
@@ -523,7 +523,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
+ r = dma_resv_test_signaled(robj->tbo.base.resv, true);
if (r == 0)
r = -EBUSY;
else
@@ -552,7 +552,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+ ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
if (ret == 0)
r = -EBUSY;
else if (ret < 0)
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index e37c9a57a7c3..adb084e6ddbe 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -66,7 +66,7 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
return true;
}
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c41ef0caa492..32004cf37549 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -296,7 +296,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
struct dma_resv *resv = &bo->base._resv;
int ret;
- if (dma_resv_test_signaled_rcu(resv, true))
+ if (dma_resv_test_signaled(resv, true))
ret = 0;
else
ret = -EBUSY;
@@ -308,7 +308,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
dma_resv_unlock(bo->base.resv);
spin_unlock(&bo->bdev->lru_lock);
- lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
+ lret = dma_resv_wait_timeout(resv, true, interruptible,
30 * HZ);
if (lret < 0)
@@ -411,7 +411,7 @@ static void ttm_bo_release(struct kref *kref)
/* Last resort, if we fail to allocate memory for the
* fences block for the BO to become idle
*/
- dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
+ dma_resv_wait_timeout(bo->base.resv, true, false,
30 * HZ);
}
@@ -422,7 +422,7 @@ static void ttm_bo_release(struct kref *kref)
ttm_mem_io_free(bdev, bo->resource);
}
- if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
+ if (!dma_resv_test_signaled(bo->base.resv, true) ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
@@ -1121,13 +1121,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
long timeout = 15 * HZ;
if (no_wait) {
- if (dma_resv_test_signaled_rcu(bo->base.resv, true))
+ if (dma_resv_test_signaled(bo->base.resv, true))
return 0;
else
return -EBUSY;
}
- timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
+ timeout = dma_resv_wait_timeout(bo->base.resv, true,
interruptible, timeout);
if (timeout < 0)
return timeout;
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 2902dc6e64fa..7f3125cf5358 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -151,7 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
/* Check for a conflicting fence */
resv = obj->resv;
- if (!dma_resv_test_signaled_rcu(resv,
+ if (!dma_resv_test_signaled(resv,
arg->flags & VGEM_FENCE_WRITE)) {
ret = -EBUSY;
goto err_fence;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 669f2ee39515..190d9495dc0e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -451,9 +451,9 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
if (args->flags & VIRTGPU_WAIT_NOWAIT) {
- ret = dma_resv_test_signaled_rcu(obj->resv, true);
+ ret = dma_resv_test_signaled(obj->resv, true);
} else {
- ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
+ ret = dma_resv_wait_timeout(obj->resv, true, true,
timeout);
}
if (ret == 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 176b6201ef2b..8faf1df027f3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -743,7 +743,7 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
if (flags & drm_vmw_synccpu_allow_cs) {
long lret;
- lret = dma_resv_wait_timeout_rcu
+ lret = dma_resv_wait_timeout
(bo->base.resv, true, true,
nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index f6b71712c029..22325dfa7744 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -268,19 +268,12 @@ void dma_resv_init(struct dma_resv *obj);
void dma_resv_fini(struct dma_resv *obj);
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
-
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-
-int dma_resv_get_fences_rcu(struct dma_resv *obj,
- struct dma_fence **pfence_excl,
- unsigned *pshared_count,
- struct dma_fence ***pshared);
-
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
+ unsigned *pshared_count, struct dma_fence ***pshared);
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
-
-long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
- unsigned long timeout);
-
-bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
+long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
+ unsigned long timeout);
+bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
#endif /* _LINUX_RESERVATION_H */
--
2.25.1