Unlike the RWF_HIPRI userspace ABI which is intentionally kept vague, the bio flag is specific to the polling implementation, so rename and document it properly. Signed-off-by: Christoph Hellwig <hch@xxxxxx> Reviewed-by: Sagi Grimberg <sagi@xxxxxxxxxxx> Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@xxxxxxx> Tested-by: Mark Wunderlich <mark.wunderlich@xxxxxxxxx> --- block/blk-core.c | 2 +- block/blk-merge.c | 3 +-- block/blk-mq-debugfs.c | 2 +- block/blk-mq.c | 4 ++-- block/blk-mq.h | 4 ++-- block/blk.h | 4 ++-- drivers/nvme/host/core.c | 2 +- drivers/scsi/scsi_debug.c | 10 +++++----- include/linux/bio.h | 2 +- include/linux/blk_types.h | 4 ++-- mm/page_io.c | 2 +- 11 files changed, 19 insertions(+), 20 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 9b8c706701900..14559b23fbe2e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -824,7 +824,7 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio) } if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) - bio_clear_hipri(bio); + bio_clear_polled(bio); switch (bio_op(bio)) { case REQ_OP_DISCARD: diff --git a/block/blk-merge.c b/block/blk-merge.c index bf7aedaad8f8e..762da71f9fde5 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -287,8 +287,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, * iopoll in direct IO routine. Given performance gain of iopoll for * big IO can be trival, disable iopoll when split needed. */ - bio_clear_hipri(bio); - + bio_clear_polled(bio); return bio_split(bio, sectors, GFP_NOIO, bs); } diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 4000376330c90..c345aef903358 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -286,7 +286,7 @@ static const char *const cmd_flag_name[] = { CMD_FLAG_NAME(BACKGROUND), CMD_FLAG_NAME(NOWAIT), CMD_FLAG_NAME(NOUNMAP), - CMD_FLAG_NAME(HIPRI), + CMD_FLAG_NAME(POLLED), }; #undef CMD_FLAG_NAME diff --git a/block/blk-mq.c b/block/blk-mq.c index b23dc0be01889..4dfa185f9f5b7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -702,7 +702,7 @@ bool blk_mq_complete_request_remote(struct request *rq) * For a polled request, always complete locallly, it's pointless * to redirect the completion. */ - if (rq->cmd_flags & REQ_HIPRI) + if (rq->cmd_flags & REQ_POLLED) return false; if (blk_mq_complete_need_ipi(rq)) { @@ -2248,7 +2248,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) rq_qos_throttle(q, bio); - hipri = bio->bi_opf & REQ_HIPRI; + hipri = bio->bi_opf & REQ_POLLED; plug = blk_mq_plug(q, bio); if (plug && plug->cached_rq) { diff --git a/block/blk-mq.h b/block/blk-mq.h index 5da970bb88659..a9fe01e149516 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -106,9 +106,9 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, enum hctx_type type = HCTX_TYPE_DEFAULT; /* - * The caller ensure that if REQ_HIPRI, poll must be enabled. + * The caller ensure that if REQ_POLLED, poll must be enabled. */ - if (flags & REQ_HIPRI) + if (flags & REQ_POLLED) type = HCTX_TYPE_POLL; else if ((flags & REQ_OP_MASK) == REQ_OP_READ) type = HCTX_TYPE_READ; diff --git a/block/blk.h b/block/blk.h index 35ca73355f90c..afe0717b5e8e4 100644 --- a/block/blk.h +++ b/block/blk.h @@ -396,11 +396,11 @@ extern struct device_attribute dev_attr_events; extern struct device_attribute dev_attr_events_async; extern struct device_attribute dev_attr_events_poll_msecs; -static inline void bio_clear_hipri(struct bio *bio) +static inline void bio_clear_polled(struct bio *bio) { /* can't support alloc cache if we turn off polling */ bio_clear_flag(bio, BIO_PERCPU_CACHE); - bio->bi_opf &= ~REQ_HIPRI; + bio->bi_opf &= ~REQ_POLLED; } long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 7fa75433c0361..56fca2cd42f03 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -632,7 +632,7 @@ static inline void nvme_init_request(struct request *req, req->cmd_flags |= REQ_FAILFAST_DRIVER; if (req->mq_hctx->type == HCTX_TYPE_POLL) - req->cmd_flags |= REQ_HIPRI; + req->cmd_flags |= REQ_POLLED; nvme_clear_nvme_request(req); memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 66f507469a31a..40b473eea357f 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -5384,7 +5384,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, { bool new_sd_dp; bool inject = false; - bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI; + bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED; int k, num_in_q, qdepth; unsigned long iflags; u64 ns_from_boot = 0; @@ -5471,7 +5471,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, if (sdebug_host_max_queue) sd_dp->hc_idx = get_tag(cmnd); - if (hipri) + if (polled) ns_from_boot = ktime_get_boottime_ns(); /* one of the resp_*() response functions is called here */ @@ -5531,7 +5531,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, kt -= d; } } - if (hipri) { + if (polled) { sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt); spin_lock_irqsave(&sqp->qc_lock, iflags); if (!sd_dp->init_poll) { @@ -5562,7 +5562,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) && atomic_read(&sdeb_inject_pending))) sd_dp->aborted = true; - if (hipri) { + if (polled) { sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot); spin_lock_irqsave(&sqp->qc_lock, iflags); if (!sd_dp->init_poll) { @@ -7331,7 +7331,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) if (kt_from_boot < sd_dp->cmpl_ts) continue; - } else /* ignoring non REQ_HIPRI requests */ + } else /* ignoring non REQ_POLLED requests */ continue; devip = (struct sdebug_dev_info *)scp->device->hostdata; if (likely(devip)) diff --git a/include/linux/bio.h b/include/linux/bio.h index 00952e92eae1b..0226a8c8fe810 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -784,7 +784,7 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page, */ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) { - bio->bi_opf |= REQ_HIPRI; + bio->bi_opf |= REQ_POLLED; if (!is_sync_kiocb(kiocb)) bio->bi_opf |= REQ_NOWAIT; } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 5017ba8fc5392..f8b9fce688346 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -384,7 +384,7 @@ enum req_flag_bits { /* command specific flags for REQ_OP_WRITE_ZEROES: */ __REQ_NOUNMAP, /* do not free blocks when zeroing */ - __REQ_HIPRI, + __REQ_POLLED, /* caller polls for completion using blk_poll */ /* for driver use */ __REQ_DRV, @@ -409,7 +409,7 @@ enum req_flag_bits { #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) -#define REQ_HIPRI (1ULL << __REQ_HIPRI) +#define REQ_POLLED (1ULL << __REQ_POLLED) #define REQ_DRV (1ULL << __REQ_DRV) #define REQ_SWAP (1ULL << __REQ_SWAP) diff --git a/mm/page_io.c b/mm/page_io.c index 5d5543fcefa4e..ed2eded74f3ad 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -416,7 +416,7 @@ int swap_readpage(struct page *page, bool synchronous) * attempt to access it in the page fault retry time check. */ if (synchronous) { - bio->bi_opf |= REQ_HIPRI; + bio->bi_opf |= REQ_POLLED; get_task_struct(current); bio->bi_private = current; } -- 2.30.2