SCSI's reply qeueue is very similar with blk-mq's hw queue, both assigned by IRQ vector, so map te private reply queue into blk-mq's hw queue via .host_tagset. Then the private reply mapping can be removed. Another benefit is that the request/irq lost issue may be solved in generic approach because managed IRQ may be shutdown during CPU hotplug. Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- drivers/scsi/hpsa.c | 49 ++++++++++++++++++--------------------------- 1 file changed, 19 insertions(+), 30 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 1bef1da273c2..c7136f9f0ce1 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -51,6 +51,7 @@ #include <linux/jiffies.h> #include <linux/percpu-defs.h> #include <linux/percpu.h> +#include <linux/blk-mq-pci.h> #include <asm/unaligned.h> #include <asm/div64.h> #include "hpsa_cmd.h" @@ -902,6 +903,18 @@ static ssize_t host_show_legacy_board(struct device *dev, return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0); } +static int hpsa_map_queues(struct Scsi_Host *shost) +{ + struct ctlr_info *h = shost_to_hba(shost); + struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + + /* Switch to cpu mapping in case that managed IRQ isn't used */ + if (shost->nr_hw_queues > 1) + return blk_mq_pci_map_queues(qmap, h->pdev, 0); + else + return blk_mq_map_queues(qmap); +} + static DEVICE_ATTR_RO(raid_level); static DEVICE_ATTR_RO(lunid); static DEVICE_ATTR_RO(unique_id); @@ -971,6 +984,7 @@ static struct scsi_host_template hpsa_driver_template = { .slave_alloc = hpsa_slave_alloc, .slave_configure = hpsa_slave_configure, .slave_destroy = hpsa_slave_destroy, + .map_queues = hpsa_map_queues, #ifdef CONFIG_COMPAT .compat_ioctl = hpsa_compat_ioctl, #endif @@ -978,6 +992,7 @@ static struct scsi_host_template hpsa_driver_template = { .shost_attrs = hpsa_shost_attrs, .max_sectors = 2048, .no_write_same = 1, + .host_tagset = 1, }; static inline u32 next_command(struct ctlr_info *h, u8 q) @@ -1145,7 +1160,7 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h, dial_down_lockup_detection_during_fw_flash(h, c); atomic_inc(&h->commands_outstanding); - reply_queue = h->reply_map[raw_smp_processor_id()]; + reply_queue = scsi_cmnd_hctx_index(h->scsi_host, c->scsi_cmd); switch (c->cmd_type) { case CMD_IOACCEL1: set_ioaccel1_performant_mode(h, c, reply_queue); @@ -5785,6 +5800,9 @@ static int hpsa_scsi_add_host(struct ctlr_info *h) { int rv; + /* map reply queue to blk_mq hw queue */ + h->scsi_host->nr_hw_queues = h->nreply_queues; + rv = scsi_add_host(h->scsi_host, &h->pdev->dev); if (rv) { dev_err(&h->pdev->dev, "scsi_add_host failed\n"); @@ -7386,26 +7404,6 @@ static void hpsa_disable_interrupt_mode(struct ctlr_info *h) h->msix_vectors = 0; } -static void hpsa_setup_reply_map(struct ctlr_info *h) -{ - const struct cpumask *mask; - unsigned int queue, cpu; - - for (queue = 0; queue < h->msix_vectors; queue++) { - mask = pci_irq_get_affinity(h->pdev, queue); - if (!mask) - goto fallback; - - for_each_cpu(cpu, mask) - h->reply_map[cpu] = queue; - } - return; - -fallback: - for_each_possible_cpu(cpu) - h->reply_map[cpu] = 0; -} - /* If MSI/MSI-X is supported by the kernel we will try to enable it on * controllers that are capable. If not, we use legacy INTx mode. */ @@ -7802,9 +7800,6 @@ static int hpsa_pci_init(struct ctlr_info *h) if (err) goto clean1; - /* setup mapping between CPU and reply queue */ - hpsa_setup_reply_map(h); - err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); if (err) goto clean2; /* intmode+region, pci */ @@ -8516,7 +8511,6 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, static void hpda_free_ctlr_info(struct ctlr_info *h) { - kfree(h->reply_map); kfree(h); } @@ -8528,11 +8522,6 @@ static struct ctlr_info *hpda_alloc_ctlr_info(void) if (!h) return NULL; - h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL); - if (!h->reply_map) { - kfree(h); - return NULL; - } return h; } -- 2.20.1