On 6/24/22 08:26, Bart Van Assche wrote: > Introduce a local variable for the expression bd->rq since that expression > occurs multiple times. This patch does not change any functionality. > > Cc: Damien Le Moal <damien.lemoal@xxxxxxxxxxxxxxxxxx> > Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx> Looks good. Completely independent from the series goal though, so this can be applied independently I think. Reviewed-by: Damien Le Moal <damien.lemoal@xxxxxxxxxxxxxxxxxx> > --- > drivers/block/null_blk/main.c | 21 +++++++++++---------- > 1 file changed, 11 insertions(+), 10 deletions(-) > > diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c > index 6b67088f4ea7..fd68e6f4637f 100644 > --- a/drivers/block/null_blk/main.c > +++ b/drivers/block/null_blk/main.c > @@ -1609,10 +1609,11 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) > static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, > const struct blk_mq_queue_data *bd) > { > - struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); > + struct request *rq = bd->rq; > + struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); > struct nullb_queue *nq = hctx->driver_data; > - sector_t nr_sectors = blk_rq_sectors(bd->rq); > - sector_t sector = blk_rq_pos(bd->rq); > + sector_t nr_sectors = blk_rq_sectors(rq); > + sector_t sector = blk_rq_pos(rq); > const bool is_poll = hctx->type == HCTX_TYPE_POLL; > > might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); > @@ -1621,14 +1622,14 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, > hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); > cmd->timer.function = null_cmd_timer_expired; > } > - cmd->rq = bd->rq; > + cmd->rq = rq; > cmd->error = BLK_STS_OK; > cmd->nq = nq; > - cmd->fake_timeout = should_timeout_request(bd->rq); > + cmd->fake_timeout = should_timeout_request(rq); > > - blk_mq_start_request(bd->rq); > + blk_mq_start_request(rq); > > - if (should_requeue_request(bd->rq)) { > + if (should_requeue_request(rq)) { > /* > * Alternate between hitting the core BUSY path, and the > * driver driven requeue path > @@ -1637,21 +1638,21 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, > if (nq->requeue_selection & 1) > return BLK_STS_RESOURCE; > else { > - blk_mq_requeue_request(bd->rq, true); > + blk_mq_requeue_request(rq, true); > return BLK_STS_OK; > } > } > > if (is_poll) { > spin_lock(&nq->poll_lock); > - list_add_tail(&bd->rq->queuelist, &nq->poll_list); > + list_add_tail(&rq->queuelist, &nq->poll_list); > spin_unlock(&nq->poll_lock); > return BLK_STS_OK; > } > if (cmd->fake_timeout) > return BLK_STS_OK; > > - return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq)); > + return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq)); > } > > static void cleanup_queue(struct nullb_queue *nq) -- Damien Le Moal Western Digital Research