[PATCH 21/26] drm/amdgpu: convert srbm lock to a spinlock v2

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Am 12.04.2017 um 00:19 schrieb Alex Deucher:
> On Thu, Apr 6, 2017 at 2:21 AM, Andres Rodriguez <andresx7 at gmail.com> wrote:
>> Replace adev->srbm_mutex with a spinlock adev->srbm_lock
>>
>> v2: rebased on 4.12 and included gfx9
>> Signed-off-by: Andres Rodriguez <andresx7 at gmail.com>
> Maybe move this one up to the front of the series so it can be applied now?

Actually I'm not sure if that patch is still a good idea.

It turned out we need to sleep way more often when the KIQ is in use.

Christian.

>
> Alex
>
>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h               |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c |  4 +--
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c |  4 +--
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c        |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/cik_sdma.c             |  4 +--
>>   drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c             | 20 ++++++-------
>>   drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c             | 34 +++++++++++------------
>>   drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c             | 24 ++++++++--------
>>   drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c            |  4 +--
>>   drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c            |  4 +--
>>   10 files changed, 51 insertions(+), 51 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> index 4f54846..b9a4161 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> @@ -1455,21 +1455,21 @@ struct amdgpu_device {
>>          struct work_struct              reset_work;
>>          struct notifier_block           acpi_nb;
>>          struct amdgpu_i2c_chan          *i2c_bus[AMDGPU_MAX_I2C_BUS];
>>          struct amdgpu_debugfs           debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
>>          unsigned                        debugfs_count;
>>   #if defined(CONFIG_DEBUG_FS)
>>          struct dentry                   *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
>>   #endif
>>          struct amdgpu_atif              atif;
>>          struct amdgpu_atcs              atcs;
>> -       struct mutex                    srbm_mutex;
>> +       spinlock_t                      srbm_lock;
>>          /* GRBM index mutex. Protects concurrent access to GRBM index */
>>          struct mutex                    grbm_idx_mutex;
>>          struct dev_pm_domain            vga_pm_domain;
>>          bool                            have_disp_power_ref;
>>
>>          /* BIOS */
>>          bool                            is_atom_fw;
>>          uint8_t                         *bios;
>>          uint32_t                        bios_size;
>>          struct amdgpu_bo                *stollen_vga_memory;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
>> index 5254562..a009990 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
>> @@ -162,30 +162,30 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
>>   {
>>          return (struct amdgpu_device *)kgd;
>>   }
>>
>>   static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
>>                          uint32_t queue, uint32_t vmid)
>>   {
>>          struct amdgpu_device *adev = get_amdgpu_device(kgd);
>>          uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
>>
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          WREG32(mmSRBM_GFX_CNTL, value);
>>   }
>>
>>   static void unlock_srbm(struct kgd_dev *kgd)
>>   {
>>          struct amdgpu_device *adev = get_amdgpu_device(kgd);
>>
>>          WREG32(mmSRBM_GFX_CNTL, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>   }
>>
>>   static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
>>                                  uint32_t queue_id)
>>   {
>>          struct amdgpu_device *adev = get_amdgpu_device(kgd);
>>
>>          uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
>>          uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
>> index db7410a..6b93a5c 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
>> @@ -123,30 +123,30 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
>>   {
>>          return (struct amdgpu_device *)kgd;
>>   }
>>
>>   static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
>>                          uint32_t queue, uint32_t vmid)
>>   {
>>          struct amdgpu_device *adev = get_amdgpu_device(kgd);
>>          uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
>>
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          WREG32(mmSRBM_GFX_CNTL, value);
>>   }
>>
>>   static void unlock_srbm(struct kgd_dev *kgd)
>>   {
>>          struct amdgpu_device *adev = get_amdgpu_device(kgd);
>>
>>          WREG32(mmSRBM_GFX_CNTL, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>   }
>>
>>   static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
>>                                  uint32_t queue_id)
>>   {
>>          struct amdgpu_device *adev = get_amdgpu_device(kgd);
>>
>>          uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
>>          uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index 0bfc6c6..07f16b4 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -1857,21 +1857,21 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>>                   amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
>>                   pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
>>
>>          /* mutex initialization are all done here so we
>>           * can recall function without having locking issues */
>>          mutex_init(&adev->vm_manager.lock);
>>          atomic_set(&adev->irq.ih.lock, 0);
>>          mutex_init(&adev->firmware.mutex);
>>          mutex_init(&adev->pm.mutex);
>>          mutex_init(&adev->gfx.gpu_clock_mutex);
>> -       mutex_init(&adev->srbm_mutex);
>> +       spin_lock_init(&adev->srbm_lock);
>>          mutex_init(&adev->grbm_idx_mutex);
>>          mutex_init(&adev->mn_lock);
>>          hash_init(adev->mn_hash);
>>
>>          amdgpu_check_arguments(adev);
>>
>>          /* Registers mapping */
>>          /* TODO: block userspace mapping of io register */
>>          spin_lock_init(&adev->mmio_idx_lock);
>>          spin_lock_init(&adev->smc_idx_lock);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
>> index c216e16..fe462ec 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
>> @@ -382,30 +382,30 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
>>          struct amdgpu_ring *ring;
>>          u32 rb_cntl, ib_cntl;
>>          u32 rb_bufsz;
>>          u32 wb_offset;
>>          int i, j, r;
>>
>>          for (i = 0; i < adev->sdma.num_instances; i++) {
>>                  ring = &adev->sdma.instance[i].ring;
>>                  wb_offset = (ring->rptr_offs * 4);
>>
>> -               mutex_lock(&adev->srbm_mutex);
>> +               spin_lock(&adev->srbm_lock);
>>                  for (j = 0; j < 16; j++) {
>>                          cik_srbm_select(adev, 0, 0, 0, j);
>>                          /* SDMA GFX */
>>                          WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
>>                          WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
>>                          /* XXX SDMA RLC - todo */
>>                  }
>>                  cik_srbm_select(adev, 0, 0, 0, 0);
>> -               mutex_unlock(&adev->srbm_mutex);
>> +               spin_unlock(&adev->srbm_lock);
>>
>>                  WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
>>                         adev->gfx.config.gb_addr_config & 0x70);
>>
>>                  WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
>>                  WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
>>
>>                  /* Set ring buffer size in dwords */
>>                  rb_bufsz = order_base_2(ring->ring_size / 4);
>>                  rb_cntl = rb_bufsz << 1;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
>> index c0cfcb9..83391d5 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
>> @@ -1856,31 +1856,31 @@ static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
>>          /*
>>           * Configure apertures:
>>           * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
>>           * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
>>           * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
>>          */
>>          sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
>>          sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
>>                          SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
>>          sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
>>                  cik_srbm_select(adev, 0, 0, 0, i);
>>                  /* CP and shaders */
>>                  WREG32(mmSH_MEM_CONFIG, sh_mem_config);
>>                  WREG32(mmSH_MEM_APE1_BASE, 1);
>>                  WREG32(mmSH_MEM_APE1_LIMIT, 0);
>>                  WREG32(mmSH_MEM_BASES, sh_mem_bases);
>>          }
>>          cik_srbm_select(adev, 0, 0, 0, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>   }
>>
>>   static void gfx_v7_0_config_init(struct amdgpu_device *adev)
>>   {
>>          adev->gfx.config.double_offchip_lds_buf = 1;
>>   }
>>
>>   /**
>>    * gfx_v7_0_gpu_init - setup the 3D engine
>>    *
>> @@ -1929,36 +1929,36 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
>>                                     MTYPE_UC);
>>          sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, PRIVATE_ATC, 0);
>>
>>          sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
>>                                     SWIZZLE_ENABLE, 1);
>>          sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
>>                                     ELEMENT_SIZE, 1);
>>          sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
>>                                     INDEX_STRIDE, 3);
>>
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          for (i = 0; i < adev->vm_manager.num_ids; i++) {
>>                  if (i == 0)
>>                          sh_mem_base = 0;
>>                  else
>>                          sh_mem_base = adev->mc.shared_aperture_start >> 48;
>>                  cik_srbm_select(adev, 0, 0, 0, i);
>>                  /* CP and shaders */
>>                  WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
>>                  WREG32(mmSH_MEM_APE1_BASE, 1);
>>                  WREG32(mmSH_MEM_APE1_LIMIT, 0);
>>                  WREG32(mmSH_MEM_BASES, sh_mem_base);
>>                  WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
>>          }
>>          cik_srbm_select(adev, 0, 0, 0, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>
>>          gmc_v7_0_init_compute_vmid(adev);
>>
>>          WREG32(mmSX_DEBUG_1, 0x20);
>>
>>          WREG32(mmTA_CNTL_AUX, 0x00010000);
>>
>>          tmp = RREG32(mmSPI_CONFIG_CNTL);
>>          tmp |= 0x03000000;
>>          WREG32(mmSPI_CONFIG_CNTL, tmp);
>> @@ -2962,40 +2962,40 @@ struct hqd_registers
>>          u32 cp_hqd_hq_scheduler1;
>>          u32 cp_mqd_control;
>>   };
>>
>>   static void gfx_v7_0_compute_pipe_init(struct amdgpu_device *adev, int me, int pipe)
>>   {
>>          u64 eop_gpu_addr;
>>          u32 tmp;
>>          size_t eop_offset = me * pipe * GFX7_MEC_HPD_SIZE * 2;
>>
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + eop_offset;
>>
>>          cik_srbm_select(adev, me, pipe, 0, 0);
>>
>>          /* write the EOP addr */
>>          WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
>>          WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
>>
>>          /* set the VMID assigned */
>>          WREG32(mmCP_HPD_EOP_VMID, 0);
>>
>>          /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
>>          tmp = RREG32(mmCP_HPD_EOP_CONTROL);
>>          tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
>>          tmp |= order_base_2(GFX7_MEC_HPD_SIZE / 8);
>>          WREG32(mmCP_HPD_EOP_CONTROL, tmp);
>>
>>          cik_srbm_select(adev, 0, 0, 0, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>   }
>>
>>   static int gfx_v7_0_mqd_deactivate(struct amdgpu_device *adev)
>>   {
>>          int i;
>>
>>          /* disable the queue if it's active */
>>          if (RREG32(mmCP_HQD_ACTIVE) & 1) {
>>                  WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
>>                  for (i = 0; i < adev->usec_timeout; i++) {
>> @@ -3210,29 +3210,29 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
>>          if (r) {
>>                  dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
>>                  goto out_unreserve;
>>          }
>>          r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
>>          if (r) {
>>                  dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
>>                  goto out_unreserve;
>>          }
>>
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
>>
>>          gfx_v7_0_mqd_init(adev, mqd, mqd_gpu_addr, ring);
>>          gfx_v7_0_mqd_deactivate(adev);
>>          gfx_v7_0_mqd_commit(adev, mqd);
>>
>>          cik_srbm_select(adev, 0, 0, 0, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>
>>          amdgpu_bo_kunmap(ring->mqd_obj);
>>   out_unreserve:
>>          amdgpu_bo_unreserve(ring->mqd_obj);
>>   out:
>>          return 0;
>>   }
>>
>>   /**
>>    * gfx_v7_0_cp_compute_resume - setup the compute queue registers
>> @@ -5088,28 +5088,28 @@ static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
>>                  DRM_ERROR("Ignoring request to enable interrupts for invalid me:%d\n", me);
>>                  return;
>>          }
>>
>>          if (pipe >= adev->gfx.mec.num_pipe_per_mec) {
>>                  DRM_ERROR("Ignoring request to enable interrupts for invalid "
>>                                  "me:%d pipe:%d\n", pipe, me);
>>                  return;
>>          }
>>
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          cik_srbm_select(adev, me, pipe, 0, 0);
>>
>>          WREG32_FIELD(CPC_INT_CNTL, TIME_STAMP_INT_ENABLE,
>>                          state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
>>
>>          cik_srbm_select(adev, 0, 0, 0, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>   }
>>
>>   static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
>>                                               struct amdgpu_irq_src *src,
>>                                               unsigned type,
>>                                               enum amdgpu_interrupt_state state)
>>   {
>>          u32 cp_int_cntl;
>>
>>          switch (state) {
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
>> index f0c1a3f..3cfe3c0 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
>> @@ -3936,31 +3936,31 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
>>           */
>>          sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
>>
>>          sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
>>                          SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
>>                          SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
>>                          SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
>>                          MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
>>                          SH_MEM_CONFIG__PRIVATE_ATC_MASK;
>>
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
>>                  vi_srbm_select(adev, 0, 0, 0, i);
>>                  /* CP and shaders */
>>                  WREG32(mmSH_MEM_CONFIG, sh_mem_config);
>>                  WREG32(mmSH_MEM_APE1_BASE, 1);
>>                  WREG32(mmSH_MEM_APE1_LIMIT, 0);
>>                  WREG32(mmSH_MEM_BASES, sh_mem_bases);
>>          }
>>          vi_srbm_select(adev, 0, 0, 0, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>   }
>>
>>   static void gfx_v8_0_config_init(struct amdgpu_device *adev)
>>   {
>>          switch (adev->asic_type) {
>>          default:
>>                  adev->gfx.config.double_offchip_lds_buf = 1;
>>                  break;
>>          case CHIP_CARRIZO:
>>          case CHIP_STONEY:
>> @@ -3985,21 +3985,21 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
>>          gfx_v8_0_config_init(adev);
>>
>>          /* XXX SH_MEM regs */
>>          /* where to put LDS, scratch, GPUVM in FSA64 space */
>>          sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
>>                                     SWIZZLE_ENABLE, 1);
>>          sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
>>                                     ELEMENT_SIZE, 1);
>>          sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
>>                                     INDEX_STRIDE, 3);
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          for (i = 0; i < adev->vm_manager.num_ids; i++) {
>>                  vi_srbm_select(adev, 0, 0, 0, i);
>>                  /* CP and shaders */
>>                  if (i == 0) {
>>                          tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
>>                          tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
>>                          tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
>>                                              SH_MEM_ALIGNMENT_MODE_UNALIGNED);
>>                          WREG32(mmSH_MEM_CONFIG, tmp);
>>                          WREG32(mmSH_MEM_BASES, 0);
>> @@ -4011,21 +4011,21 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
>>                          WREG32(mmSH_MEM_CONFIG, tmp);
>>                          tmp = adev->mc.shared_aperture_start >> 48;
>>                          WREG32(mmSH_MEM_BASES, tmp);
>>                  }
>>
>>                  WREG32(mmSH_MEM_APE1_BASE, 1);
>>                  WREG32(mmSH_MEM_APE1_LIMIT, 0);
>>                  WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
>>          }
>>          vi_srbm_select(adev, 0, 0, 0, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>
>>          gfx_v8_0_init_compute_vmid(adev);
>>
>>          mutex_lock(&adev->grbm_idx_mutex);
>>          /*
>>           * making sure that the following register writes will be broadcasted
>>           * to all the shaders
>>           */
>>          gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>>
>> @@ -5142,70 +5142,70 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
>>
>>          if (adev->gfx.in_reset) { /* for GPU_RESET case */
>>                  /* reset MQD to a clean status */
>>                  if (adev->gfx.mec.mqd_backup[mqd_idx])
>>                          memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
>>
>>                  /* reset ring buffer */
>>                  ring->wptr = 0;
>>                  amdgpu_ring_clear_ring(ring);
>>
>> -               mutex_lock(&adev->srbm_mutex);
>> +               spin_lock(&adev->srbm_lock);
>>                  vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
>>                  r = gfx_v8_0_mqd_deactivate(adev);
>>                  if (r) {
>>                          dev_err(adev->dev, "failed to deactivate ring %s\n", ring->name);
>>                          goto out_unlock;
>>                  }
>>                  gfx_v8_0_enable_doorbell(adev, ring->use_doorbell);
>>                  gfx_v8_0_mqd_commit(adev, mqd);
>>                  vi_srbm_select(adev, 0, 0, 0, 0);
>> -               mutex_unlock(&adev->srbm_mutex);
>> +               spin_unlock(&adev->srbm_lock);
>>          } else {
>> -               mutex_lock(&adev->srbm_mutex);
>> +               spin_lock(&adev->srbm_lock);
>>                  vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
>>                  gfx_v8_0_mqd_init(ring);
>>                  r = gfx_v8_0_mqd_deactivate(adev);
>>                  if (r) {
>>                          dev_err(adev->dev, "failed to deactivate ring %s\n", ring->name);
>>                          goto out_unlock;
>>                  }
>>                  gfx_v8_0_enable_doorbell(adev, ring->use_doorbell);
>>                  gfx_v8_0_mqd_commit(adev, mqd);
>>                  vi_srbm_select(adev, 0, 0, 0, 0);
>> -               mutex_unlock(&adev->srbm_mutex);
>> +               spin_unlock(&adev->srbm_lock);
>>
>>                  if (adev->gfx.mec.mqd_backup[mqd_idx])
>>                          memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
>>          }
>>
>>          return r;
>>
>>   out_unlock:
>>          vi_srbm_select(adev, 0, 0, 0, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>          return r;
>>   }
>>
>>   static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
>>   {
>>          struct amdgpu_device *adev = ring->adev;
>>          struct vi_mqd *mqd = ring->mqd_ptr;
>>          int mqd_idx = ring - &adev->gfx.compute_ring[0];
>>
>>          if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
>>                  memset((void *)mqd, 0, sizeof(*mqd));
>> -               mutex_lock(&adev->srbm_mutex);
>> +               spin_lock(&adev->srbm_lock);
>>                  vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
>>                  gfx_v8_0_mqd_init(ring);
>>                  vi_srbm_select(adev, 0, 0, 0, 0);
>> -               mutex_unlock(&adev->srbm_mutex);
>> +               spin_unlock(&adev->srbm_lock);
>>
>>                  if (adev->gfx.mec.mqd_backup[mqd_idx])
>>                          memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
>>          } else if (adev->gfx.in_reset) { /* for GPU_RESET case */
>>                  /* reset MQD to a clean status */
>>                  if (adev->gfx.mec.mqd_backup[mqd_idx])
>>                          memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
>>
>>                  /* reset ring buffer */
>>                  ring->wptr = 0;
>> @@ -5486,32 +5486,32 @@ static bool gfx_v8_0_check_soft_reset(void *handle)
>>                  adev->gfx.srbm_soft_reset = 0;
>>                  return false;
>>          }
>>   }
>>
>>   static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
>>                                    struct amdgpu_ring *ring)
>>   {
>>          int i;
>>
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
>>          if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
>>                  WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, 2);
>>                  for (i = 0; i < adev->usec_timeout; i++) {
>>                          if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
>>                                  break;
>>                          udelay(1);
>>                  }
>>          }
>>          vi_srbm_select(adev, 0, 0, 0, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>   }
>>
>>   static int gfx_v8_0_pre_soft_reset(void *handle)
>>   {
>>          struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>          u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
>>
>>          if ((!adev->gfx.grbm_soft_reset) &&
>>              (!adev->gfx.srbm_soft_reset))
>>                  return 0;
>> @@ -5603,27 +5603,27 @@ static int gfx_v8_0_soft_reset(void *handle)
>>
>>          /* Wait a little for things to settle down */
>>          udelay(50);
>>
>>          return 0;
>>   }
>>
>>   static void gfx_v8_0_init_hqd(struct amdgpu_device *adev,
>>                                struct amdgpu_ring *ring)
>>   {
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
>>          WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
>>          WREG32(mmCP_HQD_PQ_RPTR, 0);
>>          WREG32(mmCP_HQD_PQ_WPTR, 0);
>>          vi_srbm_select(adev, 0, 0, 0, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>   }
>>
>>   static int gfx_v8_0_post_soft_reset(void *handle)
>>   {
>>          struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>          u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
>>
>>          if ((!adev->gfx.grbm_soft_reset) &&
>>              (!adev->gfx.srbm_soft_reset))
>>                  return 0;
>> @@ -6877,28 +6877,28 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
>>                  DRM_ERROR("Ignoring request to enable interrupts for invalid me:%d\n", me);
>>                  return;
>>          }
>>
>>          if (pipe >= adev->gfx.mec.num_pipe_per_mec) {
>>                  DRM_ERROR("Ignoring request to enable interrupts for invalid "
>>                                  "me:%d pipe:%d\n", pipe, me);
>>                  return;
>>          }
>>
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          vi_srbm_select(adev, me, pipe, 0, 0);
>>
>>          WREG32_FIELD(CPC_INT_CNTL, TIME_STAMP_INT_ENABLE,
>>                          state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
>>
>>          vi_srbm_select(adev, 0, 0, 0, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>   }
>>
>>   static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
>>                                               struct amdgpu_irq_src *source,
>>                                               unsigned type,
>>                                               enum amdgpu_interrupt_state state)
>>   {
>>          WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
>>                       state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
>> index 388a6bb..a16cee7 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
>> @@ -1373,58 +1373,58 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
>>           * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
>>           * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
>>           * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
>>           */
>>          sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
>>
>>          sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
>>                          SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
>>                          SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
>>
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
>>                  soc15_grbm_select(adev, 0, 0, 0, i);
>>                  /* CP and shaders */
>>                  WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
>>                  WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
>>          }
>>          soc15_grbm_select(adev, 0, 0, 0, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>   }
>>
>>   static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
>>   {
>>          u32 tmp;
>>          int i;
>>
>>          WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
>>
>>          gfx_v9_0_tiling_mode_table_init(adev);
>>
>>          gfx_v9_0_setup_rb(adev);
>>          gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
>>
>>          /* XXX SH_MEM regs */
>>          /* where to put LDS, scratch, GPUVM in FSA64 space */
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          for (i = 0; i < 16; i++) {
>>                  soc15_grbm_select(adev, 0, 0, 0, i);
>>                  /* CP and shaders */
>>                  tmp = 0;
>>                  tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
>>                                      SH_MEM_ALIGNMENT_MODE_UNALIGNED);
>>                  WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), tmp);
>>                  WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), 0);
>>          }
>>          soc15_grbm_select(adev, 0, 0, 0, 0);
>>
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>
>>          gfx_v9_0_init_compute_vmid(adev);
>>
>>          mutex_lock(&adev->grbm_idx_mutex);
>>          /*
>>           * making sure that the following register writes will be broadcasted
>>           * to all the shaders
>>           */
>>          gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>>
>> @@ -2245,40 +2245,40 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
>>          int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
>>
>>          if (is_kiq) {
>>                  gfx_v9_0_kiq_setting(&kiq->ring);
>>          } else {
>>                  mqd_idx = ring - &adev->gfx.compute_ring[0];
>>          }
>>
>>          if (!adev->gfx.in_reset) {
>>                  memset((void *)mqd, 0, sizeof(*mqd));
>> -               mutex_lock(&adev->srbm_mutex);
>> +               spin_lock(&adev->srbm_lock);
>>                  soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
>>                  gfx_v9_0_mqd_init(ring);
>>                  if (is_kiq)
>>                          gfx_v9_0_kiq_init_register(ring);
>>                  soc15_grbm_select(adev, 0, 0, 0, 0);
>> -               mutex_unlock(&adev->srbm_mutex);
>> +               spin_unlock(&adev->srbm_lock);
>>
>>          } else { /* for GPU_RESET case */
>>                  /* reset MQD to a clean status */
>>
>>                  /* reset ring buffer */
>>                  ring->wptr = 0;
>>
>>                  if (is_kiq) {
>> -                   mutex_lock(&adev->srbm_mutex);
>> +                   spin_lock(&adev->srbm_lock);
>>                      soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
>>                      gfx_v9_0_kiq_init_register(ring);
>>                      soc15_grbm_select(adev, 0, 0, 0, 0);
>> -                   mutex_unlock(&adev->srbm_mutex);
>> +                   spin_unlock(&adev->srbm_lock);
>>                  }
>>          }
>>
>>          if (is_kiq)
>>                  gfx_v9_0_kiq_enable(ring);
>>          else
>>                  gfx_v9_0_map_queue_enable(&kiq->ring, ring);
>>
>>          return 0;
>>   }
>> @@ -3333,28 +3333,28 @@ static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
>>                  DRM_ERROR("Ignoring request to enable interrupts for invalid me:%d\n", me);
>>                  return;
>>          }
>>
>>          if (pipe >= adev->gfx.mec.num_pipe_per_mec) {
>>                  DRM_ERROR("Ignoring request to enable interrupts for invalid "
>>                                  "me:%d pipe:%d\n", pipe, me);
>>                  return;
>>          }
>>
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          soc15_grbm_select(adev, me, pipe, 0, 0);
>>
>>          WREG32_FIELD(CPC_INT_CNTL, TIME_STAMP_INT_ENABLE,
>>                          state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
>>
>>          soc15_grbm_select(adev, 0, 0, 0, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>   }
>>
>>   static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
>>                                               struct amdgpu_irq_src *source,
>>                                               unsigned type,
>>                                               enum amdgpu_interrupt_state state)
>>   {
>>          switch (state) {
>>          case AMDGPU_IRQ_STATE_DISABLE:
>>          case AMDGPU_IRQ_STATE_ENABLE:
>> @@ -3853,21 +3853,21 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
>>          memset(buf, 0, sizeof(struct v9_mqd));
>>
>>          mqd = (struct v9_mqd *)buf;
>>          mqd->header = 0xC0310800;
>>          mqd->compute_pipelinestat_enable = 0x00000001;
>>          mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
>>          mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
>>          mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
>>          mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
>>          mqd->compute_misc_reserved = 0x00000003;
>> -       mutex_lock(&adev->srbm_mutex);
>> +       spin_lock(&adev->srbm_lock);
>>          soc15_grbm_select(adev, ring->me,
>>                                 ring->pipe,
>>                                 ring->queue, 0);
>>          /* disable wptr polling */
>>          WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
>>
>>          /* write the EOP addr */
>>          BUG_ON(ring->me != 1 || ring->pipe != 0); /* can't handle other cases eop address */
>>          eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring->queue * MEC_HPD_SIZE);
>>          eop_gpu_addr >>= 8;
>> @@ -3997,21 +3997,21 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
>>          tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PERSISTENT_STATE));
>>          tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
>>          WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PERSISTENT_STATE), tmp);
>>          mqd->cp_hqd_persistent_state = tmp;
>>
>>          /* activate the queue */
>>          mqd->cp_hqd_active = 1;
>>          WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), mqd->cp_hqd_active);
>>
>>          soc15_grbm_select(adev, 0, 0, 0, 0);
>> -       mutex_unlock(&adev->srbm_mutex);
>> +       spin_unlock(&adev->srbm_lock);
>>
>>          amdgpu_bo_kunmap(ring->mqd_obj);
>>          amdgpu_bo_unreserve(ring->mqd_obj);
>>
>>          if (use_doorbell)
>>                  WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
>>
>>          return 0;
>>   }
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
>> index f2d0710..0e0e344 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
>> @@ -415,29 +415,29 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
>>          struct amdgpu_ring *ring;
>>          u32 rb_cntl, ib_cntl;
>>          u32 rb_bufsz;
>>          u32 wb_offset;
>>          int i, j, r;
>>
>>          for (i = 0; i < adev->sdma.num_instances; i++) {
>>                  ring = &adev->sdma.instance[i].ring;
>>                  wb_offset = (ring->rptr_offs * 4);
>>
>> -               mutex_lock(&adev->srbm_mutex);
>> +               spin_lock(&adev->srbm_lock);
>>                  for (j = 0; j < 16; j++) {
>>                          vi_srbm_select(adev, 0, 0, 0, j);
>>                          /* SDMA GFX */
>>                          WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
>>                          WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
>>                  }
>>                  vi_srbm_select(adev, 0, 0, 0, 0);
>> -               mutex_unlock(&adev->srbm_mutex);
>> +               spin_unlock(&adev->srbm_lock);
>>
>>                  WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
>>                         adev->gfx.config.gb_addr_config & 0x70);
>>
>>                  WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
>>
>>                  /* Set ring buffer size in dwords */
>>                  rb_bufsz = order_base_2(ring->ring_size / 4);
>>                  rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
>>                  rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
>> index a69e5d4..f8a5da3 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
>> @@ -609,29 +609,29 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
>>          u32 rb_bufsz;
>>          u32 wb_offset;
>>          u32 doorbell;
>>          int i, j, r;
>>
>>          for (i = 0; i < adev->sdma.num_instances; i++) {
>>                  ring = &adev->sdma.instance[i].ring;
>>                  amdgpu_ring_clear_ring(ring);
>>                  wb_offset = (ring->rptr_offs * 4);
>>
>> -               mutex_lock(&adev->srbm_mutex);
>> +               spin_lock(&adev->srbm_lock);
>>                  for (j = 0; j < 16; j++) {
>>                          vi_srbm_select(adev, 0, 0, 0, j);
>>                          /* SDMA GFX */
>>                          WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
>>                          WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
>>                  }
>>                  vi_srbm_select(adev, 0, 0, 0, 0);
>> -               mutex_unlock(&adev->srbm_mutex);
>> +               spin_unlock(&adev->srbm_lock);
>>
>>                  WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
>>                         adev->gfx.config.gb_addr_config & 0x70);
>>
>>                  WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
>>
>>                  /* Set ring buffer size in dwords */
>>                  rb_bufsz = order_base_2(ring->ring_size / 4);
>>                  rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
>>                  rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
>> --
>> 2.9.3
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx at lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> _______________________________________________
> amd-gfx mailing list
> amd-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux