[PATCH] drm/amdgpu: implement harvesting support for UVD 7.2 (v2)

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 2018-07-23 05:53 PM, Alex Deucher wrote:
> Properly handle cases where one or more instance of the IP
> block may be harvested.
>
> v2: make sure ip_num_rings is initialized amdgpu_queue_mgr.c
>
> Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c       | 10 ++++-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 13 +++++--
>   drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c       | 11 +++++-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h       |  5 +++
>   drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c         | 56 +++++++++++++++++++++++++--
>   5 files changed, 86 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index 258b6f73cbdf..f4d379cd4e47 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -348,8 +348,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
>   			break;
>   		case AMDGPU_HW_IP_UVD:
>   			type = AMD_IP_BLOCK_TYPE_UVD;
> -			for (i = 0; i < adev->uvd.num_uvd_inst; i++)
> +			for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
> +				if (adev->uvd.harvest_config & (1 << i))
> +					continue;
>   				ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
> +			}
>   			ib_start_alignment = 64;
>   			ib_size_alignment = 64;
>   			break;
> @@ -362,11 +365,14 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
>   			break;
>   		case AMDGPU_HW_IP_UVD_ENC:
>   			type = AMD_IP_BLOCK_TYPE_UVD;
> -			for (i = 0; i < adev->uvd.num_uvd_inst; i++)
> +			for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
> +				if (adev->uvd.harvest_config & (1 << i))
> +					continue;
>   				for (j = 0; j < adev->uvd.num_enc_rings; j++)
>   					ring_mask |=
>   					((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
>   					(j + i * adev->uvd.num_enc_rings));
> +			}
>   			ib_start_alignment = 64;
>   			ib_size_alignment = 64;
>   			break;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
> index ea9850c9224d..c59d3a2af388 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
> @@ -219,7 +219,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
>   			 u32 hw_ip, u32 instance, u32 ring,
>   			 struct amdgpu_ring **out_ring)
>   {
> -	int r, ip_num_rings;
> +	int i, r, ip_num_rings = 0;
>   	struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
>   
>   	if (!adev || !mgr || !out_ring)
> @@ -248,14 +248,21 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
>   		ip_num_rings = adev->sdma.num_instances;
>   		break;
>   	case AMDGPU_HW_IP_UVD:
> -		ip_num_rings = adev->uvd.num_uvd_inst;
> +		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
> +			if (!(adev->uvd.harvest_config & (1 << i)))
> +				ip_num_rings++;
> +		}
>   		break;
>   	case AMDGPU_HW_IP_VCE:
>   		ip_num_rings = adev->vce.num_rings;
>   		break;
>   	case AMDGPU_HW_IP_UVD_ENC:
> +		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
> +			if (!(adev->uvd.harvest_config & (1 << i)))
> +				ip_num_rings++;
> +		}
>   		ip_num_rings =
> -			adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
> +			adev->uvd.num_enc_rings * ip_num_rings;
>   		break;
>   	case AMDGPU_HW_IP_VCN_DEC:
>   		ip_num_rings = 1;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> index 80b5c453f8c1..a07548c99ab8 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> @@ -255,7 +255,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
>   		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
>   
>   	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
> -
> +		if (adev->uvd.harvest_config & (1 << j))
> +			continue;
>   		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
>   					    AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
>   					    &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
> @@ -309,6 +310,8 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
>   				 &adev->uvd.entity);
>   
>   	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
> +		if (adev->uvd.harvest_config & (1 << j))
> +			continue;
>   		kfree(adev->uvd.inst[j].saved_bo);
>   
>   		amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
> @@ -344,6 +347,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
>   	}
>   
>   	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
> +		if (adev->uvd.harvest_config & (1 << j))
> +			continue;
>   		if (adev->uvd.inst[j].vcpu_bo == NULL)
>   			continue;
>   
> @@ -366,6 +371,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
>   	int i;
>   
>   	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
> +		if (adev->uvd.harvest_config & (1 << i))
> +			continue;
>   		if (adev->uvd.inst[i].vcpu_bo == NULL)
>   			return -EINVAL;
>   
> @@ -1160,6 +1167,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
>   	unsigned fences = 0, i, j;
>   
>   	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
> +		if (adev->uvd.harvest_config & (1 << i))
> +			continue;
>   		fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
>   		for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
>   			fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
> index 66872286ab12..9cf42454ba81 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
> @@ -46,8 +46,12 @@ struct amdgpu_uvd_inst {
>   	struct amdgpu_ring	ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
>   	struct amdgpu_irq_src	irq;
>   	uint32_t                srbm_soft_reset;
I didn't see this patch use this new member : instance
James
> +	uint32_t                instance;
>   };
>   
> +#define AMDGPU_UVD_HARVEST_UVD0 (1 << 0)
> +#define AMDGPU_UVD_HARVEST_UVD1 (1 << 1)
> +
>   struct amdgpu_uvd {
>   	const struct firmware	*fw;	/* UVD firmware */
>   	unsigned		fw_version;
> @@ -61,6 +65,7 @@ struct amdgpu_uvd {
>   	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
>   	struct drm_sched_entity entity;
>   	struct delayed_work	idle_work;
> +	unsigned		harvest_config;
>   };
>   
>   int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> index db5f3d78ab12..8179317be750 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> @@ -41,6 +41,12 @@
>   #include "mmhub/mmhub_1_0_sh_mask.h"
>   #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
>   
> +#define mmUVD_PG0_CC_UVD_HARVESTING                                                                    0x00c7
> +#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX                                                           1
> +//UVD_PG0_CC_UVD_HARVESTING
> +#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT                                                         0x1
> +#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK                                                           0x00000002L
> +
>   #define UVD7_MAX_HW_INSTANCES_VEGA20			2
>   
>   static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
> @@ -370,10 +376,25 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>   static int uvd_v7_0_early_init(void *handle)
>   {
>   	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> -	if (adev->asic_type == CHIP_VEGA20)
> +
> +	if (adev->asic_type == CHIP_VEGA20) {
> +		u32 harvest;
> +		int i;
> +
>   		adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
> -	else
> +		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
> +			harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
> +			if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
> +				adev->uvd.harvest_config |= 1 << i;
> +			}
> +		}
> +		if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
> +						 AMDGPU_UVD_HARVEST_UVD1))
> +			/* both instances are harvested, disable the block */
> +			return -ENOENT;
> +	} else {
>   		adev->uvd.num_uvd_inst = 1;
> +	}
>   
>   	if (amdgpu_sriov_vf(adev))
>   		adev->uvd.num_enc_rings = 1;
> @@ -393,6 +414,8 @@ static int uvd_v7_0_sw_init(void *handle)
>   	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>   
>   	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
> +		if (adev->uvd.harvest_config & (1 << j))
> +			continue;
>   		/* UVD TRAP */
>   		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
>   		if (r)
> @@ -425,6 +448,8 @@ static int uvd_v7_0_sw_init(void *handle)
>   		return r;
>   
>   	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
> +		if (adev->uvd.harvest_config & (1 << j))
> +			continue;
>   		if (!amdgpu_sriov_vf(adev)) {
>   			ring = &adev->uvd.inst[j].ring;
>   			sprintf(ring->name, "uvd<%d>", j);
> @@ -472,6 +497,8 @@ static int uvd_v7_0_sw_fini(void *handle)
>   		return r;
>   
>   	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
> +		if (adev->uvd.harvest_config & (1 << j))
> +			continue;
>   		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
>   			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
>   	}
> @@ -500,6 +527,8 @@ static int uvd_v7_0_hw_init(void *handle)
>   		goto done;
>   
>   	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
> +		if (adev->uvd.harvest_config & (1 << j))
> +			continue;
>   		ring = &adev->uvd.inst[j].ring;
>   
>   		if (!amdgpu_sriov_vf(adev)) {
> @@ -579,8 +608,11 @@ static int uvd_v7_0_hw_fini(void *handle)
>   		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
>   	}
>   
> -	for (i = 0; i < adev->uvd.num_uvd_inst; ++i)
> +	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
> +		if (adev->uvd.harvest_config & (1 << i))
> +			continue;
>   		adev->uvd.inst[i].ring.ready = false;
> +	}
>   
>   	return 0;
>   }
> @@ -623,6 +655,8 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
>   	int i;
>   
>   	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
> +		if (adev->uvd.harvest_config & (1 << i))
> +			continue;
>   		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
>   			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
>   				lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
> @@ -695,6 +729,8 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
>   	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
>   
>   	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
> +		if (adev->uvd.harvest_config & (1 << i))
> +			continue;
>   		WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
>   		adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
>   		adev->uvd.inst[i].ring_enc[0].wptr = 0;
> @@ -751,6 +787,8 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
>   		init_table += header->uvd_table_offset;
>   
>   		for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
> +			if (adev->uvd.harvest_config & (1 << i))
> +				continue;
>   			ring = &adev->uvd.inst[i].ring;
>   			ring->wptr = 0;
>   			size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
> @@ -890,6 +928,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
>   	int i, j, k, r;
>   
>   	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
> +		if (adev->uvd.harvest_config & (1 << k))
> +			continue;
>   		/* disable DPG */
>   		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
>   				~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
> @@ -902,6 +942,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
>   	uvd_v7_0_mc_resume(adev);
>   
>   	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
> +		if (adev->uvd.harvest_config & (1 << k))
> +			continue;
>   		ring = &adev->uvd.inst[k].ring;
>   		/* disable clock gating */
>   		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
> @@ -1069,6 +1111,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
>   	uint8_t i = 0;
>   
>   	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
> +		if (adev->uvd.harvest_config & (1 << i))
> +			continue;
>   		/* force RBC into idle state */
>   		WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
>   
> @@ -1756,6 +1800,8 @@ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
>   	int i;
>   
>   	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
> +		if (adev->uvd.harvest_config & (1 << i))
> +			continue;
>   		adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
>   		adev->uvd.inst[i].ring.me = i;
>   		DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
> @@ -1767,6 +1813,8 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
>   	int i, j;
>   
>   	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
> +		if (adev->uvd.harvest_config & (1 << j))
> +			continue;
>   		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
>   			adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
>   			adev->uvd.inst[j].ring_enc[i].me = j;
> @@ -1786,6 +1834,8 @@ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
>   	int i;
>   
>   	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
> +		if (adev->uvd.harvest_config & (1 << i))
> +			continue;
>   		adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
>   		adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
>   	}



[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux