We need this helpers for supporting to use hashtable to improve bio merge from sw queue in the following patches. No functional change. Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk.h | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ block/elevator.c | 36 +++++++----------------------------- 2 files changed, 59 insertions(+), 29 deletions(-) diff --git a/block/blk.h b/block/blk.h index 3a3d715bd725..313002c2f666 100644 --- a/block/blk.h +++ b/block/blk.h @@ -147,6 +147,58 @@ static inline void blk_clear_rq_complete(struct request *rq) */ #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) +/* + * Merge hash stuff. + */ +#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) + +#define bucket(head, key) &((head)[hash_min((key), ELV_HASH_BITS)]) + +static inline void __rqhash_del(struct request *rq) +{ + hash_del(&rq->hash); + rq->rq_flags &= ~RQF_HASHED; +} + +static inline void rqhash_del(struct request *rq) +{ + if (ELV_ON_HASH(rq)) + __rqhash_del(rq); +} + +static inline void rqhash_add(struct hlist_head *hash, struct request *rq) +{ + BUG_ON(ELV_ON_HASH(rq)); + hlist_add_head(&rq->hash, bucket(hash, rq_hash_key(rq))); + rq->rq_flags |= RQF_HASHED; +} + +static inline void rqhash_reposition(struct hlist_head *hash, struct request *rq) +{ + __rqhash_del(rq); + rqhash_add(hash, rq); +} + +static inline struct request *rqhash_find(struct hlist_head *hash, sector_t offset) +{ + struct hlist_node *next; + struct request *rq = NULL; + + hlist_for_each_entry_safe(rq, next, bucket(hash, offset), hash) { + BUG_ON(!ELV_ON_HASH(rq)); + + if (unlikely(!rq_mergeable(rq))) { + __rqhash_del(rq); + continue; + } + + if (rq_hash_key(rq) == offset) + return rq; + } + + return NULL; +} + void blk_insert_flush(struct request *rq); static inline struct request *__elv_next_request(struct request_queue *q) diff --git a/block/elevator.c b/block/elevator.c index 4bb2f0c93fa6..7423e9c9cb27 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -47,11 +47,6 @@ static DEFINE_SPINLOCK(elv_list_lock); static LIST_HEAD(elv_list); /* - * Merge hash stuff. - */ -#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) - -/* * Query io scheduler to see if the current process issuing bio may be * merged with rq. */ @@ -268,14 +263,12 @@ EXPORT_SYMBOL(elevator_exit); static inline void __elv_rqhash_del(struct request *rq) { - hash_del(&rq->hash); - rq->rq_flags &= ~RQF_HASHED; + __rqhash_del(rq); } void elv_rqhash_del(struct request_queue *q, struct request *rq) { - if (ELV_ON_HASH(rq)) - __elv_rqhash_del(rq); + rqhash_del(rq); } EXPORT_SYMBOL_GPL(elv_rqhash_del); @@ -283,37 +276,22 @@ void elv_rqhash_add(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; - BUG_ON(ELV_ON_HASH(rq)); - hash_add(e->hash, &rq->hash, rq_hash_key(rq)); - rq->rq_flags |= RQF_HASHED; + rqhash_add(e->hash, rq); } EXPORT_SYMBOL_GPL(elv_rqhash_add); void elv_rqhash_reposition(struct request_queue *q, struct request *rq) { - __elv_rqhash_del(rq); - elv_rqhash_add(q, rq); + struct elevator_queue *e = q->elevator; + + rqhash_reposition(e->hash, rq); } struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) { struct elevator_queue *e = q->elevator; - struct hlist_node *next; - struct request *rq; - - hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { - BUG_ON(!ELV_ON_HASH(rq)); - if (unlikely(!rq_mergeable(rq))) { - __elv_rqhash_del(rq); - continue; - } - - if (rq_hash_key(rq) == offset) - return rq; - } - - return NULL; + return rqhash_find(e->hash, offset); } /* -- 2.9.5