Am 10.07.24 um 02:31 schrieb jiadong.zhu@xxxxxxx:
From: Jiadong Zhu <Jiadong.Zhu@xxxxxxx>
The job's embedded fence is dma_fence which shall not be conversed
to amdgpu_fence.
Good catch.
The start timestamp shall be saved on job for
hw_fence.
But NAK to that approach. Why do we need the start time here in the
first place?
Regards,
Christian.
v2: optimize get_fence_start_time.
Signed-off-by: Jiadong Zhu <Jiadong.Zhu@xxxxxxx>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 31 ++++++++++++++++++++---
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 3 +++
2 files changed, 31 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 2f24a6aa13bf..72bb007e48c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -88,6 +88,31 @@ static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
return NULL;
}
+static inline void set_fence_start_time(struct dma_fence *f, ktime_t start_timestamp)
+{
+ if (f->ops == &amdgpu_fence_ops) {
+ struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
+
+ __f->start_timestamp = start_timestamp;
+ } else if (f->ops == &amdgpu_job_fence_ops) {
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+
+ job->start_timestamp = start_timestamp;
+ }
+}
+
+static inline ktime_t get_fence_start_time(struct dma_fence *f)
+{
+ if (unlikely(f->ops == &amdgpu_fence_ops)) {
+ struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
+
+ return __f->start_timestamp;
+ }
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+
+ return job->start_timestamp;
+}
+
/**
* amdgpu_fence_write - write a fence value
*
@@ -197,7 +222,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
}
}
- to_amdgpu_fence(fence)->start_timestamp = ktime_get();
+ set_fence_start_time(fence, ktime_get());
/* This function can't be called concurrently anyway, otherwise
* emitting the fence would mess up the hardware ring buffer.
@@ -428,7 +453,7 @@ u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring)
return 0;
return ktime_us_delta(ktime_get(),
- to_amdgpu_fence(fence)->start_timestamp);
+ get_fence_start_time(fence));
}
/**
@@ -451,7 +476,7 @@ void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
if (!fence)
return;
- to_amdgpu_fence(fence)->start_timestamp = timestamp;
+ set_fence_start_time(fence, timestamp);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index a963a25ddd62..3a73fe11a1ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -73,6 +73,9 @@ struct amdgpu_job {
uint64_t gds_va;
bool init_shadow;
+ /* start timestamp for hw_fence*/
+ ktime_t start_timestamp;
+
/* job_run_counter >= 1 means a resubmit job */
uint32_t job_run_counter;