Hey Ma, attached a solution - it's just compiled as I still can't
make my XGMI setup work (with bridge connected only one device is
visible to the system while the other is not). Please try it on
your system if you have a chance. Andrey On 12/4/19 10:14 PM, Ma, Le wrote:
|
>From 1c89d4d835b9dbb6e02bdbdce903adbd12b1c115 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky <andrey.grodzovsky@xxxxxxx> Date: Fri, 6 Dec 2019 12:26:33 -0500 Subject: drm: Add Reusable task barrier. It is used to synchronize N threads at a rendevouz point before execution of critical code that has to be started by all the threads at approximatly the same time. Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@xxxxxxx> --- include/drm/task_barrier.h | 97 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 include/drm/task_barrier.h diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h new file mode 100644 index 0000000..858cd7f --- /dev/null +++ b/include/drm/task_barrier.h @@ -0,0 +1,97 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/semaphore.h> +#include <linux/atomic.h> + +/* + * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks. + * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/ + */ + + + +#ifndef DRM_TASK_BARRIER_H_ +#define DRM_TASK_BARRIER_H_ + +/* + * Represents an instance of a task barrier. + */ +struct task_barrier { + unsigned int n; + atomic_t count; + struct semaphore enter_turnstile; + struct semaphore exit_turnstile; +}; + +static inline void task_barrier_signal_turnstile(struct semaphore *turnstile, + unsigned int n) { + int i; + for (i = 0 ; i < n; i++) + up(turnstile); +} + +static inline void task_barrier_init(struct task_barrier *tb) { + + tb->n = 0; + atomic_set(&tb->count, 0); + sema_init(&tb->enter_turnstile, 0); + sema_init(&tb->exit_turnstile, 0); +} + +static inline void task_barrier_add_task(struct task_barrier *tb) { + tb->n++; +} + +static inline void task_barrier_rem_task(struct task_barrier *tb) { + tb->n--; +} + +/* + * Lines up all the threads BEFORE the critical point. + * + * When all thread passed this code the entry barrier is back to locked state. + */ +static inline void task_barrier_enter(struct task_barrier *tb) { + + if (atomic_inc_return(&tb->count) == tb->n) + task_barrier_signal_turnstile(&tb->enter_turnstile, + tb->n); + + down(&tb->enter_turnstile); +} + +/* + * Lines up all the threads AFTER the critical point. + * + * This function is used to avoid any one thread running ahead of the reset if + * the barrier is used in a loop (repeatedly) . + */ +static inline void task_barrier_exit(struct task_barrier *tb) { + if (atomic_dec_return(&tb->count) == 0) + task_barrier_signal_turnstile(&tb->exit_turnstile, + tb->n); + + down(&tb->exit_turnstile); +} + +#endif -- 2.7.4
>From 77d54ef864cb0c667b9396f0cdf5c66cb672f6c2 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky <andrey.grodzovsky@xxxxxxx> Date: Fri, 6 Dec 2019 12:43:30 -0500 Subject: drm/amdgpu: Add task barrier to XGMI hive. Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@xxxxxxx> --- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h | 2 ++ 2 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 61d13d8..5cf920d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -261,6 +261,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo INIT_LIST_HEAD(&tmp->device_list); mutex_init(&tmp->hive_lock); mutex_init(&tmp->reset_lock); + task_barrier_init(&tmp->tb); if (lock) mutex_lock(&tmp->hive_lock); @@ -408,6 +409,8 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) top_info->num_nodes = count; hive->number_devices = count; + task_barrier_add_task(&hive->tb); + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { /* update node list for other device in the hive */ @@ -470,6 +473,7 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev) mutex_destroy(&hive->hive_lock); mutex_destroy(&hive->reset_lock); } else { + task_barrier_rem_task(&hive->tb); amdgpu_xgmi_sysfs_rem_dev_info(adev, hive); mutex_unlock(&hive->hive_lock); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index bbf504f..74011fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -22,6 +22,7 @@ #ifndef __AMDGPU_XGMI_H__ #define __AMDGPU_XGMI_H__ +#include <drm/task_barrier.h> #include "amdgpu_psp.h" struct amdgpu_hive_info { @@ -33,6 +34,7 @@ struct amdgpu_hive_info { struct device_attribute dev_attr; struct amdgpu_device *adev; int pstate; /*0 -- low , 1 -- high , -1 unknown*/ + struct task_barrier tb; }; struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock); -- 2.7.4
>From 016717e5caeaf1355935eb4302e43396c8a494ce Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky <andrey.grodzovsky@xxxxxxx> Date: Fri, 6 Dec 2019 13:19:15 -0500 Subject: drm/amdgpu: Redo concurrent support of BACO reset for XGMI. Use task barrier in XGMI hive to synchronize BACO enter/exit across devices in XGMI hive. This also reverts commit b01245ff54db66073b104ac9d9fbefb7b264b36d. Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@xxxxxxx> --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 113 +++++++++++------------------ 2 files changed, 44 insertions(+), 71 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a78a363..50bab33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1001,8 +1001,6 @@ struct amdgpu_device { bool pm_sysfs_en; bool ucode_sysfs_en; - - bool in_baco; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7324a5f..bf04e81 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -67,6 +67,7 @@ #include "amdgpu_tmz.h" #include <linux/suspend.h> +#include <drm/task_barrier.h> MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); @@ -2664,13 +2665,39 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) struct amdgpu_device *adev = container_of(__work, struct amdgpu_device, xgmi_reset_work); - if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) - adev->asic_reset_res = (adev->in_baco == false) ? - amdgpu_device_baco_enter(adev->ddev) : - amdgpu_device_baco_exit(adev->ddev); - else - adev->asic_reset_res = amdgpu_asic_reset(adev); + /* + * Use task barrier to synchronize all xgmi reset works across the + * hive. + * task_barrier_enter and task_barrier_exit will block untill all the + * threads running the xgmi reset works reach those points. I assume + * guarantee of progress here for all the threads as the workqueue code + * creates new worker threads as needed by amount of work items in queue + * (see worker_thread) and also each thread sleeps in the barrir and by + * this yielding the CPU for other work threads to make progress. + */ + if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); + + if (hive) + task_barrier_enter(&hive->tb); + + adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev); + + if (adev->asic_reset_res) + goto fail; + if (hive) + task_barrier_exit(&hive->tb); + + adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev); + + if (adev->asic_reset_res) + goto fail; + } else { + adev->asic_reset_res = amdgpu_asic_reset(adev); + } + +fail: if (adev->asic_reset_res) DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", adev->asic_reset_res, adev->ddev->unique); @@ -3796,18 +3823,13 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, return r; } -static int amdgpu_do_asic_reset(struct amdgpu_device *adev, - struct amdgpu_hive_info *hive, +static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, struct list_head *device_list_handle, bool *need_full_reset_arg) { struct amdgpu_device *tmp_adev = NULL; bool need_full_reset = *need_full_reset_arg, vram_lost = false; int r = 0; - int cpu = smp_processor_id(); - bool use_baco = - (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ? - true : false; /* * ASIC reset has to be done on all HGMI hive nodes ASAP @@ -3815,62 +3837,22 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev, */ if (need_full_reset) { list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { - /* - * For XGMI run all resets in parallel to speed up the - * process by scheduling the highpri wq on different - * cpus. For XGMI with baco reset, all nodes must enter - * baco within close proximity before anyone exit. - */ + /* For XGMI run all resets in parallel to speed up the process */ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - if (!queue_work_on(cpu, system_highpri_wq, - &tmp_adev->xgmi_reset_work)) + if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work)) r = -EALREADY; - cpu = cpumask_next(cpu, cpu_online_mask); } else r = amdgpu_asic_reset(tmp_adev); - if (r) - break; - } - /* For XGMI wait for all work to complete before proceed */ - if (!r) { - list_for_each_entry(tmp_adev, device_list_handle, - gmc.xgmi.head) { - if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - flush_work(&tmp_adev->xgmi_reset_work); - r = tmp_adev->asic_reset_res; - if (r) - break; - if (use_baco) - tmp_adev->in_baco = true; - } - } - } - - /* - * For XGMI with baco reset, need exit baco phase by scheduling - * xgmi_reset_work one more time. PSP reset and sGPU skips this - * phase. Not assume the situation that PSP reset and baco reset - * coexist within an XGMI hive. - */ - - if (!r && use_baco) { - cpu = smp_processor_id(); - list_for_each_entry(tmp_adev, device_list_handle, - gmc.xgmi.head) { - if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - if (!queue_work_on(cpu, - system_highpri_wq, - &tmp_adev->xgmi_reset_work)) - r = -EALREADY; - if (r) - break; - cpu = cpumask_next(cpu, cpu_online_mask); - } + if (r) { + DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", + r, tmp_adev->ddev->unique); + break; } } - if (!r && use_baco) { + /* For XGMI wait for all PSP resets to complete before proceed */ + if (!r) { list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { @@ -3878,21 +3860,15 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev, r = tmp_adev->asic_reset_res; if (r) break; - tmp_adev->in_baco = false; } } } - - if (r) { - DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", - r, tmp_adev->ddev->unique); - goto end; - } } if (!r && amdgpu_ras_intr_triggered()) amdgpu_ras_intr_cleared(); + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { if (need_full_reset) { /* post card */ @@ -4181,8 +4157,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, if (r) adev->asic_reset_res = r; } else { - r = amdgpu_do_asic_reset(adev, hive, device_list_handle, - &need_full_reset); + r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset); if (r && r == -EAGAIN) goto retry; } -- 2.7.4
_______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx