On Fri, May 11, 2018 at 9:15 PM, Felix Kuehling <felix.kuehling at amd.com> wrote: > This patch series was meant to be applied after the userptr changes. I > haven't tested this without the userptr changes. > > I think your main concern about userptr is the use of GFP_NOIO. I > remember considering memalloc_noio_save/restore when I worked on this > over a year ago. I found an old email thread I had with Christian about > this (subject: MMU notifier deadlock on kernel 4.9): > >> memalloc_noio_save doesn't affect kmalloc directly. It sets >> current->flags, which is used deep inside the page allocator, after >> lockdep_trace_alloc(gfp) flags a lock as being used with IO enabled. The >> slob allocator looks at gfp_allowed_mask, but I'm not sure how safe it >> is to change that in a driver and it doesn't seem to be an exported >> symbol anyway. > Maybe this has changed in the mean time, but a year ago, using > memalloc_noio_save/restore may have prevented real deadlocks, but would > not shut up the lockdep warnings. Maybe this has changed in the mean > time. FWIW, I don't find lockdep_trace_alloc in the current kernel. > > Regards, > Felix I'm not familiar with this API, but from reading about it, it seems a more robust solution then to change the GFP flags directly in each kmalloc from the reasons I mentioned in the original email I sent. Having said that, if no one else objects and we say we will look at moving to that API in the future, I don't object to taking your patch-set as is now. Oded > > > On 2018-05-11 05:10 AM, Oded Gabbay wrote: >> On Wed, Apr 11, 2018 at 12:33 AM, Felix Kuehling <Felix.Kuehling at amd.com> wrote: >>> Signed-off-by: John Bridgman <john.bridgman at amd.com> >>> Signed-off-by: Jay Cornwall <Jay.Cornwall at amd.com> >>> Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com> >>> --- >>> drivers/gpu/drm/amd/amdkfd/Makefile | 1 + >>> drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +- >>> drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 3 + >>> drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 443 ++++++++++++++++++++++++ >>> drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 + >>> 5 files changed, 451 insertions(+), 1 deletion(-) >>> create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c >>> >>> diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile >>> index 52b3c1b..094b591 100644 >>> --- a/drivers/gpu/drm/amd/amdkfd/Makefile >>> +++ b/drivers/gpu/drm/amd/amdkfd/Makefile >>> @@ -30,6 +30,7 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \ >>> kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \ >>> kfd_process.o kfd_queue.o kfd_mqd_manager.o \ >>> kfd_mqd_manager_cik.o kfd_mqd_manager_vi.o \ >>> + kfd_mqd_manager_v9.o \ >>> kfd_kernel_queue.o kfd_kernel_queue_cik.o \ >>> kfd_kernel_queue_vi.o kfd_kernel_queue_v9.o \ >>> kfd_packet_manager.o kfd_process_queue_manager.o \ >>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c >>> index f563acb..c368ce3 100644 >>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c >>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c >>> @@ -700,7 +700,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, >>> if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) >>> return -ENOMEM; >>> >>> - *mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_NOIO); >>> + *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO); >> This assumes the patch in the userptr patch-set is applied. I changed >> it to GFP_KERNEL for now. >> >>> if ((*mem_obj) == NULL) >>> return -ENOMEM; >>> >>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c >>> index ee7061e..4b8eb50 100644 >>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c >>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c >>> @@ -38,6 +38,9 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type, >>> case CHIP_POLARIS10: >>> case CHIP_POLARIS11: >>> return mqd_manager_init_vi_tonga(type, dev); >>> + case CHIP_VEGA10: >>> + case CHIP_RAVEN: >>> + return mqd_manager_init_v9(type, dev); >>> default: >>> WARN(1, "Unexpected ASIC family %u", >>> dev->device_info->asic_family); >>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c >>> new file mode 100644 >>> index 0000000..684054f >>> --- /dev/null >>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c >>> @@ -0,0 +1,443 @@ >>> +/* >>> + * Copyright 2016-2018 Advanced Micro Devices, Inc. >>> + * >>> + * Permission is hereby granted, free of charge, to any person obtaining a >>> + * copy of this software and associated documentation files (the "Software"), >>> + * to deal in the Software without restriction, including without limitation >>> + * the rights to use, copy, modify, merge, publish, distribute, sublicense, >>> + * and/or sell copies of the Software, and to permit persons to whom the >>> + * Software is furnished to do so, subject to the following conditions: >>> + * >>> + * The above copyright notice and this permission notice shall be included in >>> + * all copies or substantial portions of the Software. >>> + * >>> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR >>> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, >>> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL >>> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR >>> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, >>> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR >>> + * OTHER DEALINGS IN THE SOFTWARE. >>> + * >>> + */ >>> + >>> +#include <linux/printk.h> >>> +#include <linux/slab.h> >>> +#include <linux/uaccess.h> >>> +#include "kfd_priv.h" >>> +#include "kfd_mqd_manager.h" >>> +#include "v9_structs.h" >>> +#include "gc/gc_9_0_offset.h" >>> +#include "gc/gc_9_0_sh_mask.h" >>> +#include "sdma0/sdma0_4_0_sh_mask.h" >>> + >>> +static inline struct v9_mqd *get_mqd(void *mqd) >>> +{ >>> + return (struct v9_mqd *)mqd; >>> +} >>> + >>> +static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) >>> +{ >>> + return (struct v9_sdma_mqd *)mqd; >>> +} >>> + >>> +static int init_mqd(struct mqd_manager *mm, void **mqd, >>> + struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, >>> + struct queue_properties *q) >>> +{ >>> + int retval; >>> + uint64_t addr; >>> + struct v9_mqd *m; >>> + struct kfd_dev *kfd = mm->dev; >>> + >>> + /* From V9, for CWSR, the control stack is located on the next page >>> + * boundary after the mqd, we will use the gtt allocation function >>> + * instead of sub-allocation function. >>> + */ >>> + if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { >>> + *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO); >> Using GFP_NOIO directly is not recommended. Can we use the scope >> functions instead ? >> >>> + if (!*mqd_mem_obj) >>> + return -ENOMEM; >>> + retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd, >>> + ALIGN(q->ctl_stack_size, PAGE_SIZE) + >>> + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), >>> + &((*mqd_mem_obj)->gtt_mem), >>> + &((*mqd_mem_obj)->gpu_addr), >>> + (void *)&((*mqd_mem_obj)->cpu_ptr)); >>> + } else >>> + retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), >>> + mqd_mem_obj); >>> + if (retval != 0) >>> + return -ENOMEM; >>> + >>> + m = (struct v9_mqd *) (*mqd_mem_obj)->cpu_ptr; >>> + addr = (*mqd_mem_obj)->gpu_addr; >>> + >>> + memset(m, 0, sizeof(struct v9_mqd)); >>> + >>> + m->header = 0xC0310800; >>> + m->compute_pipelinestat_enable = 1; >>> + m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; >>> + m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; >>> + m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; >>> + m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; >>> + >>> + m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | >>> + 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; >>> + >>> + m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; >>> + >>> + m->cp_mqd_base_addr_lo = lower_32_bits(addr); >>> + m->cp_mqd_base_addr_hi = upper_32_bits(addr); >>> + >>> + m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT | >>> + 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | >>> + 10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; >>> + >>> + m->cp_hqd_pipe_priority = 1; >>> + m->cp_hqd_queue_priority = 15; >>> + >>> + if (q->format == KFD_QUEUE_FORMAT_AQL) { >>> + m->cp_hqd_aql_control = >>> + 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; >>> + } >>> + >>> + if (q->tba_addr) { >>> + m->compute_pgm_rsrc2 |= >>> + (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT); >>> + } >>> + >>> + if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { >>> + m->cp_hqd_persistent_state |= >>> + (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); >>> + m->cp_hqd_ctx_save_base_addr_lo = >>> + lower_32_bits(q->ctx_save_restore_area_address); >>> + m->cp_hqd_ctx_save_base_addr_hi = >>> + upper_32_bits(q->ctx_save_restore_area_address); >>> + m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size; >>> + m->cp_hqd_cntl_stack_size = q->ctl_stack_size; >>> + m->cp_hqd_cntl_stack_offset = q->ctl_stack_size; >>> + m->cp_hqd_wg_state_offset = q->ctl_stack_size; >>> + } >>> + >>> + *mqd = m; >>> + if (gart_addr) >>> + *gart_addr = addr; >>> + retval = mm->update_mqd(mm, m, q); >>> + >>> + return retval; >>> +} >>> + >>> +static int load_mqd(struct mqd_manager *mm, void *mqd, >>> + uint32_t pipe_id, uint32_t queue_id, >>> + struct queue_properties *p, struct mm_struct *mms) >>> +{ >>> + /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ >>> + uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); >>> + >>> + return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, >>> + (uint32_t __user *)p->write_ptr, >>> + wptr_shift, 0, mms); >>> +} >>> + >>> +static int update_mqd(struct mqd_manager *mm, void *mqd, >>> + struct queue_properties *q) >>> +{ >>> + struct v9_mqd *m; >>> + >>> + m = get_mqd(mqd); >>> + >>> + m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; >>> + m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1; >>> + pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); >>> + >>> + m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); >>> + m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); >>> + >>> + m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); >>> + m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); >>> + m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); >>> + m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); >>> + >>> + m->cp_hqd_pq_doorbell_control = >>> + q->doorbell_off << >>> + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; >>> + pr_debug("cp_hqd_pq_doorbell_control 0x%x\n", >>> + m->cp_hqd_pq_doorbell_control); >>> + >>> + m->cp_hqd_ib_control = >>> + 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT | >>> + 1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT; >>> + >>> + /* >>> + * HW does not clamp this field correctly. Maximum EOP queue size >>> + * is constrained by per-SE EOP done signal count, which is 8-bit. >>> + * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit >>> + * more than (EOP entry count - 1) so a queue size of 0x800 dwords >>> + * is safe, giving a maximum field value of 0xA. >>> + */ >>> + m->cp_hqd_eop_control = min(0xA, >>> + order_base_2(q->eop_ring_buffer_size / 4) - 1); >>> + m->cp_hqd_eop_base_addr_lo = >>> + lower_32_bits(q->eop_ring_buffer_address >> 8); >>> + m->cp_hqd_eop_base_addr_hi = >>> + upper_32_bits(q->eop_ring_buffer_address >> 8); >>> + >>> + m->cp_hqd_iq_timer = 0; >>> + >>> + m->cp_hqd_vmid = q->vmid; >>> + >>> + if (q->format == KFD_QUEUE_FORMAT_AQL) { >>> + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK | >>> + 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT | >>> + 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT | >>> + 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT; >>> + m->cp_hqd_pq_doorbell_control |= 1 << >>> + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; >>> + } >>> + if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) >>> + m->cp_hqd_ctx_save_control = 0; >>> + >>> + q->is_active = (q->queue_size > 0 && >>> + q->queue_address != 0 && >>> + q->queue_percent > 0 && >>> + !q->is_evicted); >>> + >>> + return 0; >>> +} >>> + >>> + >>> +static int destroy_mqd(struct mqd_manager *mm, void *mqd, >>> + enum kfd_preempt_type type, >>> + unsigned int timeout, uint32_t pipe_id, >>> + uint32_t queue_id) >>> +{ >>> + return mm->dev->kfd2kgd->hqd_destroy >>> + (mm->dev->kgd, mqd, type, timeout, >>> + pipe_id, queue_id); >>> +} >>> + >>> +static void uninit_mqd(struct mqd_manager *mm, void *mqd, >>> + struct kfd_mem_obj *mqd_mem_obj) >>> +{ >>> + struct kfd_dev *kfd = mm->dev; >>> + >>> + if (mqd_mem_obj->gtt_mem) { >>> + kfd->kfd2kgd->free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); >>> + kfree(mqd_mem_obj); >>> + } else { >>> + kfd_gtt_sa_free(mm->dev, mqd_mem_obj); >>> + } >>> +} >>> + >>> +static bool is_occupied(struct mqd_manager *mm, void *mqd, >>> + uint64_t queue_address, uint32_t pipe_id, >>> + uint32_t queue_id) >>> +{ >>> + return mm->dev->kfd2kgd->hqd_is_occupied( >>> + mm->dev->kgd, queue_address, >>> + pipe_id, queue_id); >>> +} >>> + >>> +static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, >>> + struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, >>> + struct queue_properties *q) >>> +{ >>> + struct v9_mqd *m; >>> + int retval = init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); >>> + >>> + if (retval != 0) >>> + return retval; >>> + >>> + m = get_mqd(*mqd); >>> + >>> + m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT | >>> + 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; >>> + >>> + return retval; >>> +} >>> + >>> +static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, >>> + struct queue_properties *q) >>> +{ >>> + struct v9_mqd *m; >>> + int retval = update_mqd(mm, mqd, q); >>> + >>> + if (retval != 0) >>> + return retval; >>> + >>> + /* TODO: what's the point? update_mqd already does this. */ >>> + m = get_mqd(mqd); >>> + m->cp_hqd_vmid = q->vmid; >>> + return retval; >>> +} >>> + >>> +static int init_mqd_sdma(struct mqd_manager *mm, void **mqd, >>> + struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, >>> + struct queue_properties *q) >>> +{ >>> + int retval; >>> + struct v9_sdma_mqd *m; >>> + >>> + >>> + retval = kfd_gtt_sa_allocate(mm->dev, >>> + sizeof(struct v9_sdma_mqd), >>> + mqd_mem_obj); >>> + >>> + if (retval != 0) >>> + return -ENOMEM; >>> + >>> + m = (struct v9_sdma_mqd *) (*mqd_mem_obj)->cpu_ptr; >>> + >>> + memset(m, 0, sizeof(struct v9_sdma_mqd)); >>> + >>> + *mqd = m; >>> + if (gart_addr) >>> + *gart_addr = (*mqd_mem_obj)->gpu_addr; >>> + >>> + retval = mm->update_mqd(mm, m, q); >>> + >>> + return retval; >>> +} >>> + >>> +static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd, >>> + struct kfd_mem_obj *mqd_mem_obj) >>> +{ >>> + kfd_gtt_sa_free(mm->dev, mqd_mem_obj); >>> +} >>> + >>> +static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, >>> + uint32_t pipe_id, uint32_t queue_id, >>> + struct queue_properties *p, struct mm_struct *mms) >>> +{ >>> + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, >>> + (uint32_t __user *)p->write_ptr, >>> + mms); >>> +} >>> + >>> +#define SDMA_RLC_DUMMY_DEFAULT 0xf >>> + >>> +static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, >>> + struct queue_properties *q) >>> +{ >>> + struct v9_sdma_mqd *m; >>> + >>> + m = get_sdma_mqd(mqd); >>> + m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4) >>> + << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | >>> + q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | >>> + 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | >>> + 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; >>> + >>> + m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8); >>> + m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8); >>> + m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); >>> + m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); >>> + m->sdmax_rlcx_doorbell_offset = >>> + q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT; >>> + >>> + m->sdma_engine_id = q->sdma_engine_id; >>> + m->sdma_queue_id = q->sdma_queue_id; >>> + m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT; >>> + >>> + q->is_active = (q->queue_size > 0 && >>> + q->queue_address != 0 && >>> + q->queue_percent > 0 && >>> + !q->is_evicted); >>> + >>> + return 0; >>> +} >>> + >>> +/* >>> + * * preempt type here is ignored because there is only one way >>> + * * to preempt sdma queue >>> + */ >>> +static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, >>> + enum kfd_preempt_type type, >>> + unsigned int timeout, uint32_t pipe_id, >>> + uint32_t queue_id) >>> +{ >>> + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); >>> +} >>> + >>> +static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, >>> + uint64_t queue_address, uint32_t pipe_id, >>> + uint32_t queue_id) >>> +{ >>> + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); >>> +} >>> + >>> +#if defined(CONFIG_DEBUG_FS) >>> + >>> +static int debugfs_show_mqd(struct seq_file *m, void *data) >>> +{ >>> + seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4, >>> + data, sizeof(struct v9_mqd), false); >>> + return 0; >>> +} >>> + >>> +static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) >>> +{ >>> + seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4, >>> + data, sizeof(struct v9_sdma_mqd), false); >>> + return 0; >>> +} >>> + >>> +#endif >>> + >>> +struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, >>> + struct kfd_dev *dev) >>> +{ >>> + struct mqd_manager *mqd; >>> + >>> + if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) >>> + return NULL; >>> + >>> + mqd = kzalloc(sizeof(*mqd), GFP_NOIO); >> Using GFP_NOIO directly is not recommended. Can we use the scope >> functions instead ? >> >>> + if (!mqd) >>> + return NULL; >>> + >>> + mqd->dev = dev; >>> + >>> + switch (type) { >>> + case KFD_MQD_TYPE_CP: >>> + case KFD_MQD_TYPE_COMPUTE: >>> + mqd->init_mqd = init_mqd; >>> + mqd->uninit_mqd = uninit_mqd; >>> + mqd->load_mqd = load_mqd; >>> + mqd->update_mqd = update_mqd; >>> + mqd->destroy_mqd = destroy_mqd; >>> + mqd->is_occupied = is_occupied; >>> +#if defined(CONFIG_DEBUG_FS) >>> + mqd->debugfs_show_mqd = debugfs_show_mqd; >>> +#endif >>> + break; >>> + case KFD_MQD_TYPE_HIQ: >>> + mqd->init_mqd = init_mqd_hiq; >>> + mqd->uninit_mqd = uninit_mqd; >>> + mqd->load_mqd = load_mqd; >>> + mqd->update_mqd = update_mqd_hiq; >>> + mqd->destroy_mqd = destroy_mqd; >>> + mqd->is_occupied = is_occupied; >>> +#if defined(CONFIG_DEBUG_FS) >>> + mqd->debugfs_show_mqd = debugfs_show_mqd; >>> +#endif >>> + break; >>> + case KFD_MQD_TYPE_SDMA: >>> + mqd->init_mqd = init_mqd_sdma; >>> + mqd->uninit_mqd = uninit_mqd_sdma; >>> + mqd->load_mqd = load_mqd_sdma; >>> + mqd->update_mqd = update_mqd_sdma; >>> + mqd->destroy_mqd = destroy_mqd_sdma; >>> + mqd->is_occupied = is_occupied_sdma; >>> +#if defined(CONFIG_DEBUG_FS) >>> + mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; >>> +#endif >>> + break; >>> + default: >>> + kfree(mqd); >>> + return NULL; >>> + } >>> + >>> + return mqd; >>> +} >>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h >>> index b68299a..fac2882 100644 >>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h >>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h >>> @@ -197,6 +197,7 @@ struct kfd_mem_obj { >>> uint32_t range_end; >>> uint64_t gpu_addr; >>> uint32_t *cpu_ptr; >>> + void *gtt_mem; >>> }; >>> >>> struct kfd_vmid_info { >>> @@ -822,6 +823,8 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, >>> struct kfd_dev *dev); >>> struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, >>> struct kfd_dev *dev); >>> +struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, >>> + struct kfd_dev *dev); >>> struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev); >>> void device_queue_manager_uninit(struct device_queue_manager *dqm); >>> struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, >>> -- >>> 2.7.4 >>> >