> +static int ufshcd_mcq_config_resource(struct ufs_hba *hba) > +{ > + struct platform_device *pdev = to_platform_device(hba->dev); > + struct ufshcd_res_info_t *res; > + struct resource *res_mem, *res_mcq; > + int i, ret = 0; > + > + memcpy(hba->res, ufshcd_res_info, sizeof(ufshcd_res_info)); > + > + for (i = 0; i < RES_MAX; i++) { > + res = &hba->res[i]; > + > + res->resource = platform_get_resource_byname(pdev, > + IORESOURCE_MEM, > + res->name); > + if (!res->resource) { > + dev_info(hba->dev, "Resource %s not provided\n", res- > >name); > + if (i == RES_MEM) > + return -ENOMEM; > + continue; > + } else if (i == RES_MEM) { > + res_mem = res->resource; > + res->base = hba->mmio_base; > + continue; > + } > + > + res->base = devm_ioremap_resource(hba->dev, res->resource); > + if (IS_ERR(res->base)) { > + dev_err(hba->dev, "Failed to map res %s, err = %d\n", > + res->name, (int)PTR_ERR(res->base)); > + res->base = NULL; > + ret = PTR_ERR(res->base); > + goto out_err; > + } > + } > + > + res = &hba->res[RES_MCQ]; > + /* MCQ resource provided */ > + if (res->base) > + goto out; > + > + /* Manually allocate MCQ resource */ Did you consider to force providing the MCQ configuration? > + res_mcq = res->resource; > + res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL); > + if (!res_mcq) { > + dev_err(hba->dev, "Failed to alloate MCQ resource\n"); > + goto out_err; > + } > + res->is_alloc = true; > + > + res_mcq->start = res_mem->start + > + mcq_sqattr_offset(hba->mcq_capabilities); > + res_mcq->end = res_mcq->start + 32 * MCQ_QCFG_SIZE - 1; Shouldn't there can be MCQCap.MAXQ queues and no more than 32? > +int ufshcd_mcq_init(struct ufs_hba *hba) > +{ > + struct Scsi_Host *host = hba->host; > + struct ufs_hw_queue *hwq; > + int i, ret = 0; > + > + if (!is_mcq_supported(hba)) > + return 0; > + > + ret = ufshcd_mcq_config_resource(hba); > + if (ret) { > + dev_err(hba->dev, "Failed to config MCQ resource\n"); > + return ret; > + } > + > + ret = ufshcd_vops_config_mcq_rop(hba); > + if (ret) { > + dev_err(hba->dev, "MCQ Runtime Operation Pointers not > configured\n"); > + goto out_err; > + } > + > + hba->nr_queues[HCTX_TYPE_DEFAULT] = num_possible_cpus(); > + hba->nr_queues[HCTX_TYPE_READ] = 0; > + hba->nr_queues[HCTX_TYPE_POLL] = 1; > + > + for (i = 0; i < HCTX_MAX_TYPES; i++) > + host->nr_hw_queues += hba->nr_queues[i]; > + > + host->can_queue = hba->nutrs; > + host->cmd_per_lun = hba->nutrs; > + > + /* One more reserved for dev_cmd_queue */ > + hba->nr_hw_queues = host->nr_hw_queues + 1; Is it possible, since MCQ memory space is *added* to the UTR & UTMR lists, That we'll keep using the legacy doorbell for query commands? Wouldn't it will simplify the hw_queue bookkeeping > -#define ufshcd_hex_dump(prefix_str, buf, len) do { \ > - size_t __len = (len); \ > - print_hex_dump(KERN_ERR, prefix_str, \ > - __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\ > - 16, 4, buf, __len, false); \ > +#define ufshcd_hex_dump(prefix_str, buf, len) do { \ > + size_t __len = (len); \ > + \ > + print_hex_dump(KERN_ERR, prefix_str, \ > + __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, \ > + 16, 4, buf, __len, false); \ > + \ > } while (0) Should this be part of this patch? > +#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1 Maybe add a comment above: "queue 0 is reserved for query commands" or something That is if the query commands don't use the legacy doorbell > +static inline bool ufshcd_is_hwq_full(struct ufs_hw_queue *q) > +{ > + return (q->sq_hp_slot == ((q->sq_tp_slot + 1) % > + q->max_entries)); > +} Isn't sq_tp_slot is already % q->max_entries ? Thanks, Avri