Some SCSI devices support single hw queue(tags), meantime allow multiple private complete queues for handling request delivery & completion. And mapping between CPU and private completion queue is setup via pci_alloc_irq_vectors_affinity(PCI_IRQ_AFFINITY), just like normal blk-mq's queue mapping. Introduce .complete_queue_affinity callback for getting the complete queue's affinity, so that we can drain in-flight requests delivered from the complete queue if last CPU of the completion queue becomes offline. Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- include/linux/blk-mq.h | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 15d1aa53d96c..56f2e2ed62a7 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -140,7 +140,8 @@ typedef int (poll_fn)(struct blk_mq_hw_ctx *); typedef int (map_queues_fn)(struct blk_mq_tag_set *set); typedef bool (busy_fn)(struct request_queue *); typedef void (complete_fn)(struct request *); - +typedef const struct cpumask *(hctx_complete_queue_affinity_fn)( + struct blk_mq_hw_ctx *, int); struct blk_mq_ops { /* @@ -207,6 +208,15 @@ struct blk_mq_ops { map_queues_fn *map_queues; + /* + * Some SCSI devices support private complete queue, returns + * affinity of the complete queue, and the passed 'cpu' parameter + * has to be included in the complete queue's affinity cpumask, and + * used to figure out the mapped reply queue. If NULL is returns, + * it means this hctx hasn't private completion queues. + */ + hctx_complete_queue_affinity_fn *complete_queue_affinity; + #ifdef CONFIG_BLK_DEBUG_FS /* * Used by the debugfs implementation to show driver-specific -- 2.20.1