Re: [PATCH 3/3] drm/amdgpu: drop need_vm_flush/need_pipe_sync

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Am 03.11.18 um 06:33 schrieb Monk Liu:
use a flag to hold need_flush and need_pipe_sync

NAK, if you want to safe space then use "bool variable:1" instead.

Flags in structures are not as self explaining as a member variable.

Christian.


Signed-off-by: Monk Liu <Monk.Liu@xxxxxxx>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c    |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c   | 14 +++++++++-----
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.h   |  6 ++++--
  drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    |  4 ++--
  7 files changed, 19 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index ac7d2da..3231a3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -170,7 +170,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
  		if ((need_ctx_switch && amdgpu_sriov_vf(adev)) ||
  			amdgpu_vm_need_pipeline_sync(ring, job))
  			need_pipe_sync = true;
-		else if (job->need_pipe_sync)
+		else if (job->flags & JOB_NEED_PIPE_SYNC)
  			need_pipe_sync = true;
  	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index df9b173..ed9b6c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -311,7 +311,9 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
  		dma_fence_put((*id)->flushed_updates);
  		(*id)->flushed_updates = dma_fence_get(updates);
  	}
-	job->vm_needs_flush = needs_flush;
+
+	if (needs_flush)
+		job->flags |= JOB_NEED_VM_FLUSH;
  	return 0;
  }
@@ -341,7 +343,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
  	struct dma_fence *updates = sync->last_vm_update;
  	int r;
- job->vm_needs_flush = vm->use_cpu_for_update;
+	if (vm->use_cpu_for_update)
+		job->flags |= JOB_NEED_VM_FLUSH;
/* Check if we can use a VMID already assigned to this VM */
  	list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
@@ -380,7 +383,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
  			(*id)->flushed_updates = dma_fence_get(updates);
  		}
- job->vm_needs_flush |= needs_flush;
+		if (needs_flush)
+			job->flags |= JOB_NEED_VM_FLUSH;
  		return 0;
  	}
@@ -438,7 +442,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, dma_fence_put(id->flushed_updates);
  			id->flushed_updates = dma_fence_get(updates);
-			job->vm_needs_flush = true;
+			job->flags |= JOB_NEED_VM_FLUSH;
  		}
list_move_tail(&id->list, &id_mgr->ids_lru);
@@ -447,7 +451,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
  	id->pd_gpu_addr = job->vm_pd_addr;
  	id->owner = vm->entity.fence_context;
- if (job->vm_needs_flush) {
+	if (job->flags & JOB_NEED_VM_FLUSH) {
  		dma_fence_put(id->last_flush);
  		id->last_flush = NULL;
  	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index dae997d..82e190d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -181,7 +181,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
if (fence && need_pipe_sync && drm_sched_dependency_optimized(fence, s_entity)) {
  		trace_amdgpu_ib_pipe_sync(job, fence);
-		job->need_pipe_sync = true;
+		job->flags |= JOB_NEED_PIPE_SYNC;
  	}
while (fence == NULL && vm && !job->vmid) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index c1d00f0..f9e8ecd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -37,6 +37,9 @@
struct amdgpu_fence; +#define JOB_NEED_VM_FLUSH 1 /* require a vm flush for this job */
+#define JOB_NEED_PIPE_SYNC      2   /* require a pipeline sync for this job */
+
  struct amdgpu_job {
  	struct drm_sched_job    base;
  	struct amdgpu_vm	*vm;
@@ -46,7 +49,6 @@ struct amdgpu_job {
  	uint32_t		preamble_status;
  	uint32_t		num_ibs;
  	void			*owner;
-	bool                    vm_needs_flush;
  	uint64_t		vm_pd_addr;
  	unsigned		vmid;
  	unsigned		pasid;
@@ -58,7 +60,7 @@ struct amdgpu_job {
  	/* user fence handling */
  	uint64_t		uf_addr;
  	uint64_t		uf_sequence;
-	bool            need_pipe_sync; /* require a pipeline sync for this job */
+	uint64_t		flags;
  };
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 626abca..4318d57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -232,7 +232,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
  			   __entry->vmid = job->vmid;
  			   __entry->vm_hub = ring->funcs->vmhub,
  			   __entry->pd_addr = job->vm_pd_addr;
-			   __entry->needs_flush = job->vm_needs_flush;
+			   __entry->needs_flush = job->flags & JOB_NEED_VM_FLUSH;
  			   ),
  	    TP_printk("pasid=%d, ring=%s, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
  		      __entry->pasid, __get_str(ring), __entry->vmid,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c91ec31..9ea2861 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1989,7 +1989,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
if (vm_needs_flush) {
  		job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
-		job->vm_needs_flush = true;
+		job->flags |= JOB_NEED_VM_FLUSH;
  	}
  	if (resv) {
  		r = amdgpu_sync_resv(adev, &job->sync, resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 352b304..dcdf9d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1038,7 +1038,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
  	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
  	struct amdgpu_vmid *id;
  	bool gds_switch_needed;
-	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
+	bool vm_flush_needed = (job->flags & JOB_NEED_VM_FLUSH) || ring->has_compute_vm_bug;
if (job->vmid == 0)
  		return false;
@@ -1082,7 +1082,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
  		id->gws_size != job->gws_size ||
  		id->oa_base != job->oa_base ||
  		id->oa_size != job->oa_size);
-	bool vm_flush_needed = job->vm_needs_flush;
+	bool vm_flush_needed = job->flags & JOB_NEED_VM_FLUSH;
  	bool pasid_mapping_needed = id->pasid != job->pasid ||
  		!id->pasid_mapping ||
  		!dma_fence_is_signaled(id->pasid_mapping);

_______________________________________________
amd-gfx mailing list
amd-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/amd-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux