On 10 November 2017 at 04:30, Andrey Grodzovsky <andrey.grodzovsky@xxxxxxx> wrote: > Switch from disabling tests during run to using the new disable > API. > > Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@xxxxxxx> > --- > tests/amdgpu/amdgpu_test.c | 14 ++++++-- > tests/amdgpu/amdgpu_test.h | 15 ++++++++ > tests/amdgpu/deadlock_tests.c | 8 +---- > tests/amdgpu/uvd_enc_tests.c | 81 +++++++++++++++++-------------------------- > tests/amdgpu/vce_tests.c | 65 +++++++++++++++++----------------- > tests/amdgpu/vcn_tests.c | 74 +++++++++++++++++---------------------- > 6 files changed, 123 insertions(+), 134 deletions(-) > > diff --git a/tests/amdgpu/amdgpu_test.c b/tests/amdgpu/amdgpu_test.c > index 68ec5d3..91010dc 100644 > --- a/tests/amdgpu/amdgpu_test.c > +++ b/tests/amdgpu/amdgpu_test.c > @@ -150,15 +150,15 @@ static Suites_Active_Status suites_active_stat[] = { > }, > { > .pName = VCE_TESTS_STR, > - .pActive = always_active, > + .pActive = suite_vce_tests_enable, > }, > { > .pName = VCN_TESTS_STR, > - .pActive = always_active, > + .pActive = suite_vcn_tests_enable, > }, > { > .pName = UVD_ENC_TESTS_STR, > - .pActive = always_active, > + .pActive = suite_uvd_enc_tests_enable, > }, > { > .pName = DEADLOCK_TESTS_STR, > @@ -409,6 +409,14 @@ static void amdgpu_disable_suits() > if (amdgpu_set_suite_active(suites_active_stat[i].pName, > suites_active_stat[i].pActive())) > fprintf(stderr, "suit deactivation failed - %s\n", CU_get_error_msg()); > + > + /* Explicitly disable specific tests due to known bugs or preferences */ > + /* > + * BUG: Compute ring stalls and never recovers when the address is > + * written after the command already submitted > + */ > + if (amdgpu_set_test_active(DEADLOCK_TESTS_STR, "compute ring block test", CU_FALSE)) > + fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg()); > } > > /* The main() function for setting up and running the tests. > diff --git a/tests/amdgpu/amdgpu_test.h b/tests/amdgpu/amdgpu_test.h > index 9ccc1ff..dd236ed 100644 > --- a/tests/amdgpu/amdgpu_test.h > +++ b/tests/amdgpu/amdgpu_test.h > @@ -100,6 +100,11 @@ int suite_vce_tests_init(); > int suite_vce_tests_clean(); > > /** > + * Decide if the suite is enabled by default or not. > + */ > +CU_BOOL suite_vce_tests_enable(void); > + > +/** > * Tests in vce test suite > */ > extern CU_TestInfo vce_tests[]; > @@ -115,6 +120,11 @@ int suite_vcn_tests_init(); > int suite_vcn_tests_clean(); > > /** > + * Decide if the suite is enabled by default or not. > + */ > +CU_BOOL suite_vcn_tests_enable(void); > + > +/** > + * Tests in vcn test suite > + */ > extern CU_TestInfo vcn_tests[]; > @@ -130,6 +140,11 @@ int suite_uvd_enc_tests_init(); > int suite_uvd_enc_tests_clean(); > > /** > + * Decide if the suite is enabled by default or not. > + */ > +CU_BOOL suite_uvd_enc_tests_enable(void); > + > +/** > * Tests in uvd enc test suite > */ > extern CU_TestInfo uvd_enc_tests[]; > diff --git a/tests/amdgpu/deadlock_tests.c b/tests/amdgpu/deadlock_tests.c > index e23d903..f5c4552 100644 > --- a/tests/amdgpu/deadlock_tests.c > +++ b/tests/amdgpu/deadlock_tests.c > @@ -119,13 +119,7 @@ int suite_deadlock_tests_clean(void) > > CU_TestInfo deadlock_tests[] = { > { "gfx ring block test", amdgpu_deadlock_gfx }, > - > - /* > - * BUG: Compute ring stalls and never recovers when the address is > - * written after the command already submitted > - */ > - /* { "compute ring block test", amdgpu_deadlock_compute }, */ > - > + { "compute ring block test", amdgpu_deadlock_compute }, > CU_TEST_INFO_NULL, > }; > > diff --git a/tests/amdgpu/uvd_enc_tests.c b/tests/amdgpu/uvd_enc_tests.c > index bbda131..bed8494 100644 > --- a/tests/amdgpu/uvd_enc_tests.c > +++ b/tests/amdgpu/uvd_enc_tests.c > @@ -79,7 +79,6 @@ static void amdgpu_cs_uvd_enc_session_init(void); > static void amdgpu_cs_uvd_enc_encode(void); > static void amdgpu_cs_uvd_enc_destroy(void); > > -static bool uvd_enc_support(void); > > CU_TestInfo uvd_enc_tests[] = { > { "UVD ENC create", amdgpu_cs_uvd_enc_create }, > @@ -89,6 +88,27 @@ CU_TestInfo uvd_enc_tests[] = { > CU_TEST_INFO_NULL, > }; > > +CU_BOOL suite_uvd_enc_tests_enable(void) > +{ > + int r; > + struct drm_amdgpu_info_hw_ip info; > + > + if (amdgpu_device_initialize(drm_amdgpu[0], &major_version, > + &minor_version, &device_handle)) > + return CU_FALSE; > + > + r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_UVD_ENC, 0, &info); > + > + if (amdgpu_device_deinitialize(device_handle)) > + return CU_FALSE; > + > + if (!info.available_rings) > + printf("\n\nThe ASIC NOT support UVD ENC, suite disabled.\n"); > + > + return (r == 0 && (info.available_rings ? CU_TRUE : CU_FALSE)); > +} > + > + > int suite_uvd_enc_tests_init(void) > { > int r; > @@ -100,11 +120,6 @@ int suite_uvd_enc_tests_init(void) > > family_id = device_handle->info.family_id; > > - if (!uvd_enc_support()) { > - printf("\n\nThe ASIC NOT support UVD ENC, all sub-tests will pass\n"); > - return CUE_SUCCESS; > - } > - > r = amdgpu_cs_ctx_create(device_handle, &context_handle); > if (r) > return CUE_SINIT_FAILED; > @@ -123,28 +138,18 @@ int suite_uvd_enc_tests_clean(void) > { > int r; > > - if (!uvd_enc_support()) { > - > - r = amdgpu_device_deinitialize(device_handle); > - if (r) > - return CUE_SCLEAN_FAILED; > - > - return CUE_SUCCESS; > - } else { > - > - r = amdgpu_bo_unmap_and_free(ib_handle, ib_va_handle, > - ib_mc_address, IB_SIZE); > - if (r) > - return CUE_SCLEAN_FAILED; > + r = amdgpu_bo_unmap_and_free(ib_handle, ib_va_handle, > + ib_mc_address, IB_SIZE); > + if (r) > + return CUE_SCLEAN_FAILED; > > - r = amdgpu_cs_ctx_free(context_handle); > - if (r) > - return CUE_SCLEAN_FAILED; > + r = amdgpu_cs_ctx_free(context_handle); > + if (r) > + return CUE_SCLEAN_FAILED; > > - r = amdgpu_device_deinitialize(device_handle); > - if (r) > - return CUE_SCLEAN_FAILED; > - } > + r = amdgpu_device_deinitialize(device_handle); > + if (r) > + return CUE_SCLEAN_FAILED; > > return CUE_SUCCESS; Seems like this return disappeared somewhere between patching and pushing the patch. See https://bugs.freedesktop.org/show_bug.cgi?id=104280 -Emil _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel