Re: [PATCH v4] drm/amdgpu: Ensure the DMA engine is deactivated during set ups

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 04/28/ , ricetons@xxxxxxxxx wrote:
> From: Haohui Mai <ricetons@xxxxxxxxx>
> 
> The patch fully deactivates the DMA engine before setting up the ring
> buffer to avoid potential data races and crashes.
> 
> The v4 patch addresses the comments from Lang Yu <Lang.Yu@xxxxxxx>.
> 
> Signed-off-by: Haohui Mai <ricetons@xxxxxxxxx>
> ---
>  drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 113 ++++++++++++++-----------
>  1 file changed, 62 insertions(+), 51 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> index 013d2dec81d0..b000117b77d0 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> @@ -459,7 +459,6 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
>  	}
>  }
>  
> -
>  /**
>   * sdma_v5_2_gfx_stop - stop the gfx async dma engines
>   *
> @@ -505,17 +504,21 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
>  }
>  
>  /**
> - * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
> + * sdma_v5_2_ctx_switch_enable_for_instance - start the async dma engines
> + * context switch for an instance
>   *
>   * @adev: amdgpu_device pointer
> - * @enable: enable/disable the DMA MEs context switch.
> + * @instance_idx: the index of the SDMA instance
>   *
> - * Halt or unhalt the async dma engines context switch.
> + * Unhalt the async dma engines context switch.
>   */
> -static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
> +static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, int instance_idx)
>  {
>  	u32 f32_cntl, phase_quantum = 0;
> -	int i;
> +
> +	if (WARN_ON(instance_idx >= adev->sdma.num_instances)) {
> +		return;
> +	}
>  
>  	if (amdgpu_sdma_phase_quantum) {
>  		unsigned value = amdgpu_sdma_phase_quantum;
> @@ -539,61 +542,71 @@ static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
>  		phase_quantum =
>  			value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
>  			unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
> -	}
> -
> -	for (i = 0; i < adev->sdma.num_instances; i++) {
> -		if (enable && amdgpu_sdma_phase_quantum) {
> -			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
> -			       phase_quantum);
> -			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
> -			       phase_quantum);
> -			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
> -			       phase_quantum);
> -		}
>  
> -		if (!amdgpu_sriov_vf(adev)) {
> -			f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
> -			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
> -					AUTO_CTXSW_ENABLE, enable ? 1 : 0);
> -			WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
> -		}
> +		WREG32_SOC15_IP(GC,
> +			sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE0_QUANTUM),
> +			phase_quantum);
> +		WREG32_SOC15_IP(GC,
> +			sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE1_QUANTUM),
> +		    phase_quantum);
> +		WREG32_SOC15_IP(GC,
> +			sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE2_QUANTUM),
> +		    phase_quantum);
>  	}
>  
> +	if (!amdgpu_sriov_vf(adev)) {
> +		f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL));
> +		f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
> +				AUTO_CTXSW_ENABLE, 1);
> +		WREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL), f32_cntl);
> +	}
>  }
>  
>  /**
> - * sdma_v5_2_enable - stop the async dma engines
> + * sdma_v5_2_ctx_switch_disable_all - stop the async dma engines context switch
>   *
>   * @adev: amdgpu_device pointer
> - * @enable: enable/disable the DMA MEs.
>   *
> - * Halt or unhalt the async dma engines.
> + * Halt the async dma engines context switch.
>   */
> -static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
> +static void sdma_v5_2_ctx_switch_disable_all(struct amdgpu_device *adev)
>  {
>  	u32 f32_cntl;
>  	int i;
>  
> -	if (!enable) {
> -		sdma_v5_2_gfx_stop(adev);
> -		sdma_v5_2_rlc_stop(adev);
> -	}
> +	if (amdgpu_sriov_vf(adev))
> +		return;
>  
> -	if (!amdgpu_sriov_vf(adev)) {
> -		for (i = 0; i < adev->sdma.num_instances; i++) {
> -			f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
> -			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
> -			WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
> -		}
> +	for (i = 0; i < adev->sdma.num_instances; i++) {
> +		f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
> +		f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
> +				AUTO_CTXSW_ENABLE, 0);
> +		WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
>  	}
>  }
>  
> +/**
> + * sdma_v5_2_halt - stop the async dma engines
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Halt the async dma engines.
> + */
> +static void sdma_v5_2_halt(struct amdgpu_device *adev)
> +{
> +	sdma_v5_2_gfx_stop(adev);
> +	sdma_v5_2_rlc_stop(adev);
> +}
> +
>  /**
>   * sdma_v5_2_gfx_resume - setup and start the async dma engines
>   *
>   * @adev: amdgpu_device pointer
>   *
>   * Set up the gfx DMA ring buffers and enable them.
> + * It assumes that the dma engine is stopped for each instance.
> + * The function enables the engine and preemptions sequentially for each instance.
> + *
>   * Returns 0 for success, error for failure.
>   */
>  static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
> @@ -737,10 +750,7 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
>  
>  		ring->sched.ready = true;
>  
> -		if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
> -			sdma_v5_2_ctx_switch_enable(adev, true);
> -			sdma_v5_2_enable(adev, true);
> -		}
> +		sdma_v5_2_ctx_switch_enable_for_instance(adev, i);
>  
>  		r = amdgpu_ring_test_ring(ring);
>  		if (r) {
> @@ -784,7 +794,7 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
>  	int i, j;
>  
>  	/* halt the MEs */
> -	sdma_v5_2_enable(adev, false);
> +	sdma_v5_2_halt(adev);
>  
>  	for (i = 0; i < adev->sdma.num_instances; i++) {
>  		if (!adev->sdma.instance[i].fw)
> @@ -856,8 +866,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
>  	int r = 0;
>  
>  	if (amdgpu_sriov_vf(adev)) {
> -		sdma_v5_2_ctx_switch_enable(adev, false);
> -		sdma_v5_2_enable(adev, false);
> +		sdma_v5_2_ctx_switch_disable_all(adev);
> +		sdma_v5_2_halt(adev);
>  
>  		/* set RB registers */
>  		r = sdma_v5_2_gfx_resume(adev);
> @@ -865,6 +875,9 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
>  	}
>  
>  	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
> +		sdma_v5_2_ctx_switch_disable_all(adev);
> +		sdma_v5_2_halt(adev);

You don't need call these functions here. 

In original code, you can see sdma_v5_2_load_microcode() calls 
sdma_v5_2_enable(adev, false) to halt the engine before load ucode.

What I mean is sdma_v5_2_halt(adev) should have no functional change with 
original sdma_v5_2_enable(adev, false), like this,

static void sdma_v5_2_halt(struct amdgpu_device *adev) {
	u32 f32_cntl;
	int i;

	sdma_v5_2_gfx_stop(adev);
	sdma_v5_2_rlc_stop(adev);
	
	if (!amdgpu_sriov_vf(adev)) {
		for (i = 0; i < adev->sdma.num_instances; i++) {
			f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
			WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
		}
	}	
}

But you remove the codes to set the HLAT bit of register mmSDMA0_F32_CNTL.

Regards,
Lang

>  		r = sdma_v5_2_load_microcode(adev);
>  		if (r)
>  			return r;
> @@ -881,12 +894,10 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
>  		amdgpu_gfx_off_ctrl(adev, false);
>  
>  	sdma_v5_2_soft_reset(adev);
> -	/* unhalt the MEs */
> -	sdma_v5_2_enable(adev, true);
> -	/* enable sdma ring preemption */
> -	sdma_v5_2_ctx_switch_enable(adev, true);
>  
> -	/* start the gfx rings and rlc compute queues */
> +	/* Soft reset supposes to disable the dma engine and preemption.
> +	 * Now start the gfx rings and rlc compute queues.
> +	 */
>  	r = sdma_v5_2_gfx_resume(adev);
>  	if (adev->in_s0ix)
>  		amdgpu_gfx_off_ctrl(adev, true);
> @@ -1340,8 +1351,8 @@ static int sdma_v5_2_hw_fini(void *handle)
>  	if (amdgpu_sriov_vf(adev))
>  		return 0;
>  
> -	sdma_v5_2_ctx_switch_enable(adev, false);
> -	sdma_v5_2_enable(adev, false);
> +	sdma_v5_2_ctx_switch_disable_all(adev);
> +	sdma_v5_2_halt(adev);
>  
>  	return 0;
>  }
> -- 
> 2.25.1
> 



[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux