On Mon, Jun 17, 2019 at 5:42 PM Ernst Sjöstrand <ernstp@xxxxxxxxx> wrote: > > Done automatically with unexpand. > > Signed-off-by: Ernst Sjöstrand <ernstp@xxxxxxxxx> Thanks for the patches. I've gone ahead and squashed them into the original patches for upstream. Alex > --- > drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 158 ++++++++++++------------- > 1 file changed, 79 insertions(+), 79 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c > index 0090cba2d24d..d04f95ec4471 100644 > --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c > +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c > @@ -114,7 +114,7 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev); > static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev); > static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev); > static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, > - struct amdgpu_cu_info *cu_info); > + struct amdgpu_cu_info *cu_info); > static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev); > static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, > u32 sh_num, u32 instance); > @@ -345,63 +345,63 @@ static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring) > > static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) > { > - struct amdgpu_device *adev = ring->adev; > - struct amdgpu_ib ib; > - struct dma_fence *f = NULL; > - uint32_t scratch; > - uint32_t tmp = 0; > - long r; > - > - r = amdgpu_gfx_scratch_get(adev, &scratch); > - if (r) { > - DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); > - return r; > - } > - > - WREG32(scratch, 0xCAFEDEAD); > - > - memset(&ib, 0, sizeof(ib)); > - r = amdgpu_ib_get(adev, NULL, 256, &ib); > - if (r) { > - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); > - goto err1; > - } > - > - ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); > - ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); > - ib.ptr[2] = 0xDEADBEEF; > - ib.length_dw = 3; > - > - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); > - if (r) > - goto err2; > - > - r = dma_fence_wait_timeout(f, false, timeout); > - if (r == 0) { > - DRM_ERROR("amdgpu: IB test timed out.\n"); > - r = -ETIMEDOUT; > - goto err2; > - } else if (r < 0) { > - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); > - goto err2; > - } > - > - tmp = RREG32(scratch); > - if (tmp == 0xDEADBEEF) { > - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); > - r = 0; > - } else { > - DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", > - scratch, tmp); > - r = -EINVAL; > - } > + struct amdgpu_device *adev = ring->adev; > + struct amdgpu_ib ib; > + struct dma_fence *f = NULL; > + uint32_t scratch; > + uint32_t tmp = 0; > + long r; > + > + r = amdgpu_gfx_scratch_get(adev, &scratch); > + if (r) { > + DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); > + return r; > + } > + > + WREG32(scratch, 0xCAFEDEAD); > + > + memset(&ib, 0, sizeof(ib)); > + r = amdgpu_ib_get(adev, NULL, 256, &ib); > + if (r) { > + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); > + goto err1; > + } > + > + ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); > + ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); > + ib.ptr[2] = 0xDEADBEEF; > + ib.length_dw = 3; > + > + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); > + if (r) > + goto err2; > + > + r = dma_fence_wait_timeout(f, false, timeout); > + if (r == 0) { > + DRM_ERROR("amdgpu: IB test timed out.\n"); > + r = -ETIMEDOUT; > + goto err2; > + } else if (r < 0) { > + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); > + goto err2; > + } > + > + tmp = RREG32(scratch); > + if (tmp == 0xDEADBEEF) { > + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); > + r = 0; > + } else { > + DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", > + scratch, tmp); > + r = -EINVAL; > + } > err2: > - amdgpu_ib_free(adev, &ib, NULL); > - dma_fence_put(f); > + amdgpu_ib_free(adev, &ib, NULL); > + dma_fence_put(f); > err1: > - amdgpu_gfx_scratch_free(adev, scratch); > + amdgpu_gfx_scratch_free(adev, scratch); > > - return r; > + return r; > } > > static void gfx_v10_0_free_microcode(struct amdgpu_device *adev) > @@ -1153,10 +1153,10 @@ static int gfx_v10_0_sw_init(void *handle) > return r; > > /* Privileged reg */ > - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_REG_FAULT, > + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_REG_FAULT, > &adev->gfx.priv_reg_irq); > - if (r) > - return r; > + if (r) > + return r; > > /* Privileged inst */ > r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_INSTR_FAULT, > @@ -1554,13 +1554,13 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev) > gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); > > tmp = REG_SET_FIELD(0, PA_SC_FIFO_SIZE, SC_FRONTEND_PRIM_FIFO_SIZE, > - adev->gfx.config.sc_prim_fifo_size_frontend); > + adev->gfx.config.sc_prim_fifo_size_frontend); > tmp = REG_SET_FIELD(tmp, PA_SC_FIFO_SIZE, SC_BACKEND_PRIM_FIFO_SIZE, > - adev->gfx.config.sc_prim_fifo_size_backend); > + adev->gfx.config.sc_prim_fifo_size_backend); > tmp = REG_SET_FIELD(tmp, PA_SC_FIFO_SIZE, SC_HIZ_TILE_FIFO_SIZE, > - adev->gfx.config.sc_hiz_tile_fifo_size); > + adev->gfx.config.sc_hiz_tile_fifo_size); > tmp = REG_SET_FIELD(tmp, PA_SC_FIFO_SIZE, SC_EARLYZ_TILE_FIFO_SIZE, > - adev->gfx.config.sc_earlyz_tile_fifo_size); > + adev->gfx.config.sc_earlyz_tile_fifo_size); > WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE, tmp); > > mutex_unlock(&adev->grbm_idx_mutex); > @@ -2709,23 +2709,23 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev) > le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); > > /* Trigger an invalidation of the L1 instruction caches */ > - tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL); > - tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); > - WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp); > - > - /* Wait for invalidation complete */ > - for (i = 0; i < usec_timeout; i++) { > - tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL); > - if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, > - INVALIDATE_CACHE_COMPLETE)) > - break; > - udelay(1); > - } > - > - if (i >= usec_timeout) { > - dev_err(adev->dev, "failed to invalidate instruction cache\n"); > - return -EINVAL; > - } > + tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL); > + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); > + WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp); > + > + /* Wait for invalidation complete */ > + for (i = 0; i < usec_timeout; i++) { > + tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL); > + if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, > + INVALIDATE_CACHE_COMPLETE)) > + break; > + udelay(1); > + } > + > + if (i >= usec_timeout) { > + dev_err(adev->dev, "failed to invalidate instruction cache\n"); > + return -EINVAL; > + } > > if (amdgpu_emu_mode == 1) > adev->nbio_funcs->hdp_flush(adev, NULL); > -- > 2.20.1 > > _______________________________________________ > amd-gfx mailing list > amd-gfx@xxxxxxxxxxxxxxxxxxxxx > https://lists.freedesktop.org/mailman/listinfo/amd-gfx _______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx