blk_rq_err_bytes is only used by the scsi midlayer, so move it there. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- block/blk-core.c | 41 ---------------------------------------- drivers/scsi/scsi_lib.c | 42 ++++++++++++++++++++++++++++++++++++++++- include/linux/blk-mq.h | 3 --- 3 files changed, 41 insertions(+), 45 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 5ffe05b1d17ce..3edc5c5178dd6 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1221,47 +1221,6 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * } EXPORT_SYMBOL_GPL(blk_insert_cloned_request); -/** - * blk_rq_err_bytes - determine number of bytes till the next failure boundary - * @rq: request to examine - * - * Description: - * A request could be merge of IOs which require different failure - * handling. This function determines the number of bytes which - * can be failed from the beginning of the request without - * crossing into area which need to be retried further. - * - * Return: - * The number of bytes to fail. - */ -unsigned int blk_rq_err_bytes(const struct request *rq) -{ - unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; - unsigned int bytes = 0; - struct bio *bio; - - if (!(rq->rq_flags & RQF_MIXED_MERGE)) - return blk_rq_bytes(rq); - - /* - * Currently the only 'mixing' which can happen is between - * different fastfail types. We can safely fail portions - * which have all the failfast bits that the first one has - - * the ones which are at least as eager to fail as the first - * one. - */ - for (bio = rq->bio; bio; bio = bio->bi_next) { - if ((bio->bi_opf & ff) != ff) - break; - bytes += bio->bi_iter.bi_size; - } - - /* this could lead to infinite loop */ - BUG_ON(blk_rq_bytes(rq) && !bytes); - return bytes; -} -EXPORT_SYMBOL_GPL(blk_rq_err_bytes); - static void update_io_ticks(struct block_device *part, unsigned long now, bool end) { diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9823b65d15368..1f7e68699941f 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -617,6 +617,46 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result) } } +/** + * scsi_rq_err_bytes - determine number of bytes till the next failure boundary + * @rq: request to examine + * + * Description: + * A request could be merge of IOs which require different failure + * handling. This function determines the number of bytes which + * can be failed from the beginning of the request without + * crossing into area which need to be retried further. + * + * Return: + * The number of bytes to fail. + */ +static unsigned int scsi_rq_err_bytes(const struct request *rq) +{ + unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; + unsigned int bytes = 0; + struct bio *bio; + + if (!(rq->rq_flags & RQF_MIXED_MERGE)) + return blk_rq_bytes(rq); + + /* + * Currently the only 'mixing' which can happen is between + * different fastfail types. We can safely fail portions + * which have all the failfast bits that the first one has - + * the ones which are at least as eager to fail as the first + * one. + */ + for (bio = rq->bio; bio; bio = bio->bi_next) { + if ((bio->bi_opf & ff) != ff) + break; + bytes += bio->bi_iter.bi_size; + } + + /* this could lead to infinite loop */ + BUG_ON(blk_rq_bytes(rq) && !bytes); + return bytes; +} + /* Helper for scsi_io_completion() when "reprep" action required. */ static void scsi_io_completion_reprep(struct scsi_cmnd *cmd, struct request_queue *q) @@ -794,7 +834,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) scsi_print_command(cmd); } } - if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req))) + if (!scsi_end_request(req, blk_stat, scsi_rq_err_bytes(req))) return; fallthrough; case ACTION_REPREP: diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index ebc45cf0450b3..9f9634cd4cfd2 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -949,7 +949,6 @@ struct req_iterator { * blk_rq_pos() : the current sector * blk_rq_bytes() : bytes left in the entire request * blk_rq_cur_bytes() : bytes left in the current segment - * blk_rq_err_bytes() : bytes left till the next error boundary * blk_rq_sectors() : sectors left in the entire request * blk_rq_cur_sectors() : sectors left in the current segment * blk_rq_stats_sectors() : sectors of the entire request used for stats @@ -973,8 +972,6 @@ static inline int blk_rq_cur_bytes(const struct request *rq) return bio_iovec(rq->bio).bv_len; } -unsigned int blk_rq_err_bytes(const struct request *rq); - static inline unsigned int blk_rq_sectors(const struct request *rq) { return blk_rq_bytes(rq) >> SECTOR_SHIFT; -- 2.30.2