On Tue, 2017-12-12 at 11:01 -0800, Tejun Heo wrote: > +/* > + * Bits for request->gstate. The lower two bits carry MQ_RQ_* state value > + * and the upper bits the generation number. > + */ > +enum mq_rq_state { > + MQ_RQ_IDLE = 0, > + MQ_RQ_IN_FLIGHT = 1, > + > + MQ_RQ_STATE_BITS = 2, > + MQ_RQ_STATE_MASK = (1 << MQ_RQ_STATE_BITS) - 1, > + MQ_RQ_GEN_INC = 1 << MQ_RQ_STATE_BITS, > +}; > + > @@ -85,6 +98,38 @@ extern void blk_mq_rq_timed_out(struct request *req, bool reserved); > +/** > + * blk_mq_rq_state() - read the current MQ_RQ_* state of a request > + * @rq: target request. > + */ > +static inline int blk_mq_rq_state(struct request *rq) > +{ > + return READ_ONCE(rq->gstate) & MQ_RQ_STATE_MASK; > +} > + > +/** > + * blk_mq_rq_update_state() - set the current MQ_RQ_* state of a request > + * @rq: target request. > + * @state: new state to set. > + * > + * Set @rq's state to @state. The caller is responsible for ensuring that > + * there are no other updaters. A request can transition into IN_FLIGHT > + * only from IDLE and doing so increments the generation number. > + */ > +static inline void blk_mq_rq_update_state(struct request *rq, > + enum mq_rq_state state) > +{ > + u64 new_val = (READ_ONCE(rq->gstate) & ~MQ_RQ_STATE_MASK) | state; > + > + if (state == MQ_RQ_IN_FLIGHT) { > + WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); > + new_val += MQ_RQ_GEN_INC; > + } > + > + /* avoid exposing interim values */ > + WRITE_ONCE(rq->gstate, new_val); > +} Hello Tejun, Have you considered the following instead of introducing MQ_RQ_IDLE and MQ_RQ_IN_FLIGHT? I think this could help to limit the number of new atomic operations introduced in the hot path by this patch series. static inline bool blk_mq_rq_in_flight(struct request *rq) { return list_empty(&rq->queuelist); } Thanks, Bart.