Modify sdma block ras functions to fit for the unified ras function pointers. Signed-off-by: yipechai <YiPeng.Chai@xxxxxxx> --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 11 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 11 +++---- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 42 ++++++++++++++++-------- drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c | 25 +++++++++++--- 4 files changed, 56 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 65306e0079af..e6d82e6e702c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -912,11 +912,8 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, adev->umc.ras_funcs->ops.query_ras_error_address(adev, &err_data); break; case AMDGPU_RAS_BLOCK__SDMA: - if (adev->sdma.funcs->query_ras_error_count) { - for (i = 0; i < adev->sdma.num_instances; i++) - adev->sdma.funcs->query_ras_error_count(adev, i, - &err_data); - } + if (adev->sdma.ras_funcs->ops.query_ras_error_count) + adev->sdma.ras_funcs->ops.query_ras_error_count(adev, &err_data); break; case AMDGPU_RAS_BLOCK__GFX: if (adev->gfx.ras_funcs && @@ -1035,8 +1032,8 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, adev->mmhub.ras_funcs->ops.reset_ras_error_status(adev); break; case AMDGPU_RAS_BLOCK__SDMA: - if (adev->sdma.funcs->reset_ras_error_count) - adev->sdma.funcs->reset_ras_error_count(adev); + if (adev->sdma.ras_funcs->ops.reset_ras_error_count) + adev->sdma.ras_funcs->ops.reset_ras_error_count(adev); break; case AMDGPU_RAS_BLOCK__HDP: if (adev->hdp.ras_funcs && diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index f8fb755e3aa6..a76c63520ca0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -23,6 +23,7 @@ #ifndef __AMDGPU_SDMA_H__ #define __AMDGPU_SDMA_H__ +#include "amdgpu_ras.h" /* max number of IP instances */ #define AMDGPU_MAX_SDMA_INSTANCES 8 @@ -51,12 +52,8 @@ struct amdgpu_sdma_instance { }; struct amdgpu_sdma_ras_funcs { - int (*ras_late_init)(struct amdgpu_device *adev, - void *ras_ih_info); - void (*ras_fini)(struct amdgpu_device *adev); - int (*query_ras_error_count)(struct amdgpu_device *adev, - uint32_t instance, void *ras_error_status); - void (*reset_ras_error_count)(struct amdgpu_device *adev); + struct amdgpu_ras_block_ops ops; + int (*sdma_ras_late_init)(struct amdgpu_device *adev, void *ras_ih_info); }; struct amdgpu_sdma { @@ -73,7 +70,7 @@ struct amdgpu_sdma { uint32_t srbm_soft_reset; bool has_page_queue; struct ras_common_if *ras_if; - const struct amdgpu_sdma_ras_funcs *funcs; + const struct amdgpu_sdma_ras_funcs *ras_funcs; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 69c9e460c1eb..d5bd23b57f5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1898,13 +1898,13 @@ static int sdma_v4_0_late_init(void *handle) sdma_v4_0_setup_ulv(adev); if (!amdgpu_persistent_edc_harvesting_supported(adev)) { - if (adev->sdma.funcs && - adev->sdma.funcs->reset_ras_error_count) - adev->sdma.funcs->reset_ras_error_count(adev); + if (adev->sdma.ras_funcs && + adev->sdma.ras_funcs->ops.reset_ras_error_count) + adev->sdma.ras_funcs->ops.reset_ras_error_count(adev); } - if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init) - return adev->sdma.funcs->ras_late_init(adev, &ih_info); + if (adev->sdma.ras_funcs && adev->sdma.ras_funcs->sdma_ras_late_init) + return adev->sdma.ras_funcs->sdma_ras_late_init(adev, &ih_info); else return 0; } @@ -2007,8 +2007,8 @@ static int sdma_v4_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - if (adev->sdma.funcs && adev->sdma.funcs->ras_fini) - adev->sdma.funcs->ras_fini(adev); + if (adev->sdma.ras_funcs && adev->sdma.ras_funcs->ops.ras_fini) + adev->sdma.ras_funcs->ops.ras_fini(adev); for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_ring_fini(&adev->sdma.instance[i].ring); @@ -2745,7 +2745,7 @@ static void sdma_v4_0_get_ras_error_count(uint32_t value, } } -static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev, +static int sdma_v4_0_query_ras_error_count_by_instance(struct amdgpu_device *adev, uint32_t instance, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; @@ -2778,11 +2778,25 @@ static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev) } } +static void sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) +{ + int i = 0; + for (i = 0; i < adev->sdma.num_instances; i++) { + if (sdma_v4_0_query_ras_error_count_by_instance(adev, i, ras_error_status)) + { + dev_err(adev->dev, "Query ras error count failed in SDMA%d \n", i); + return; + } + } +} + static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = { - .ras_late_init = amdgpu_sdma_ras_late_init, - .ras_fini = amdgpu_sdma_ras_fini, - .query_ras_error_count = sdma_v4_0_query_ras_error_count, - .reset_ras_error_count = sdma_v4_0_reset_ras_error_count, + .ops = { + .ras_fini = amdgpu_sdma_ras_fini, + .query_ras_error_count = sdma_v4_0_query_ras_error_count, + .reset_ras_error_count = sdma_v4_0_reset_ras_error_count, + }, + .sdma_ras_late_init = amdgpu_sdma_ras_late_init, }; static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) @@ -2790,10 +2804,10 @@ static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA20: case CHIP_ARCTURUS: - adev->sdma.funcs = &sdma_v4_0_ras_funcs; + adev->sdma.ras_funcs = &sdma_v4_0_ras_funcs; break; case CHIP_ALDEBARAN: - adev->sdma.funcs = &sdma_v4_4_ras_funcs; + adev->sdma.ras_funcs = &sdma_v4_4_ras_funcs; break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c index bf95007f0843..a4b05dbb88ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c @@ -188,7 +188,7 @@ static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev, } } -static int sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev, +static int sdma_v4_4_query_ras_error_count_by_instance(struct amdgpu_device *adev, uint32_t instance, void *ras_error_status) { @@ -245,9 +245,24 @@ static void sdma_v4_4_reset_ras_error_count(struct amdgpu_device *adev) } } +static void sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) +{ + int i = 0; + for (i = 0; i < adev->sdma.num_instances; i++) { + if (sdma_v4_4_query_ras_error_count_by_instance(adev, i, ras_error_status)) + { + dev_err(adev->dev, "Query ras error count failed in SDMA%d \n", i); + return; + } + } + +} + const struct amdgpu_sdma_ras_funcs sdma_v4_4_ras_funcs = { - .ras_late_init = amdgpu_sdma_ras_late_init, - .ras_fini = amdgpu_sdma_ras_fini, - .query_ras_error_count = sdma_v4_4_query_ras_error_count, - .reset_ras_error_count = sdma_v4_4_reset_ras_error_count, + .ops = { + .ras_fini = amdgpu_sdma_ras_fini, + .query_ras_error_count = sdma_v4_4_query_ras_error_count, + .reset_ras_error_count = sdma_v4_4_reset_ras_error_count, + }, + .sdma_ras_late_init = amdgpu_sdma_ras_late_init, }; -- 2.25.1