some registers are pf&vf copy, and we can safely use mmio method to access them. and some time we are forbid to use kiq to access register like in INTR context. Change-Id: Ie6dc323dc86829a4a6ceb7073c269b106b534c4a Signed-off-by: Monk Liu <Monk.Liu at amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 10 +++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 36 ++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 402a895..5dd0615 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1509,6 +1509,11 @@ int amdgpu_device_init(struct amdgpu_device *adev, void amdgpu_device_fini(struct amdgpu_device *adev); int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); +uint32_t amdgpu_mm_rreg_nokiq(struct amdgpu_device *adev, uint32_t reg, + bool always_indirect); +void amdgpu_mm_wreg_nokiq(struct amdgpu_device *adev, uint32_t reg, uint32_t v, + bool always_indirect); + uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, bool always_indirect); void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, @@ -1523,6 +1528,11 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); /* * Registers read & write functions. */ +#define RREG32_nokiq(reg) amdgpu_mm_rreg_nokiq(adev, (reg), false) +#define RREG32_IDX_nokiq(reg) amdgpu_mm_rreg(adev, (reg), true) +#define WREG32_nokiq(reg, v) amdgpu_mm_wreg_nokiq(adev, (reg), (v), false) +#define WREG32_IDX_nokiq(reg, v) amdgpu_mm_wreg_nokiq(adev, (reg), (v), true) + #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false) #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true) #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 2b404ca..d5870d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -136,6 +136,42 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, } } +uint32_t amdgpu_mm_rreg_nokiq(struct amdgpu_device *adev, uint32_t reg, + bool always_indirect) +{ + uint32_t ret; + + if ((reg * 4) < adev->rmmio_size && !always_indirect) + ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); + else { + unsigned long flags; + + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); + ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + } + trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); + return ret; +} + +void amdgpu_mm_wreg_nokiq(struct amdgpu_device *adev, uint32_t reg, uint32_t v, + bool always_indirect) +{ + trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + + if ((reg * 4) < adev->rmmio_size && !always_indirect) + writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); + else { + unsigned long flags; + + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); + writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + } +} + u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) { if ((reg * 4) < adev->rio_mem_size) -- 2.7.4