[PATCH v2 1/2] blk-mq: add a new interface to get request by tag

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Ming Lei had fixed the request uaf while iterating tags in
commit bd63141d585b ("blk-mq: clear stale request in tags->rq[] before
freeing one request pool"). However, some drivers are still using
blk_mq_tag_to_rq(), which access request by tag directly.

Thus add a new interface for such drivers to avoid request uaf.

Signed-off-by: Yu Kuai <yukuai3@xxxxxxxxxx>
---
 block/blk-mq-tag.c     | 33 +++++++++++++++++++++++++++++++++
 block/blk-mq.c         |  1 +
 block/blk-mq.h         |  1 -
 include/linux/blk-mq.h |  3 +++
 4 files changed, 37 insertions(+), 1 deletion(-)

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 86f87346232a..7921bd2019df 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -652,3 +652,36 @@ u32 blk_mq_unique_tag(struct request *rq)
 		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
 }
 EXPORT_SYMBOL(blk_mq_unique_tag);
+
+/**
+ * blk_mq_get_rq_by_tag - if the request that is represented by the tag is
+ * not idle, increment it's reference and then return it. Otherwise return
+ * NULL.
+ */
+struct request *blk_mq_get_rq_by_tag(struct blk_mq_tags *tags,
+				     unsigned int tag)
+{
+	unsigned long flags;
+	struct request *rq;
+
+	/* hold lock to prevent accessing freed request by tag */
+	spin_lock_irqsave(&tags->lock, flags);
+	rq = blk_mq_tag_to_rq(tags, tag);
+	if (!rq)
+		goto out_unlock;
+
+	if (!refcount_inc_not_zero(&rq->ref)) {
+		rq = NULL;
+		goto out_unlock;
+	}
+
+	if (!blk_mq_request_started(rq)) {
+		blk_mq_put_rq_ref(rq);
+		rq = NULL;
+	}
+
+out_unlock:
+	spin_unlock_irqrestore(&tags->lock, flags);
+	return rq;
+}
+EXPORT_SYMBOL(blk_mq_get_rq_by_tag);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 495f508c6300..7d0bd3809336 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -916,6 +916,7 @@ void blk_mq_put_rq_ref(struct request *rq)
 	else if (refcount_dec_and_test(&rq->ref))
 		__blk_mq_free_request(rq);
 }
+EXPORT_SYMBOL_GPL(blk_mq_put_rq_ref);
 
 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
 		struct request *rq, void *priv, bool reserved)
diff --git a/block/blk-mq.h b/block/blk-mq.h
index d08779f77a26..20ef743a3ff6 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -47,7 +47,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
 					struct blk_mq_ctx *start);
-void blk_mq_put_rq_ref(struct request *rq);
 
 /*
  * Internal helpers for allocating/freeing the request map
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 1d18447ebebc..d460e5e2c07a 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -635,4 +635,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio);
 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
 		struct lock_class_key *key);
 
+struct request *blk_mq_get_rq_by_tag(struct blk_mq_tags *tags,
+		unsigned int tag);
+void blk_mq_put_rq_ref(struct request *rq);
 #endif
-- 
2.31.1




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux