[PATCH v2 1/8] block: Introduce CONFIG_BLK_SUB_PAGE_SEGMENTS and QUEUE_FLAG_SUB_PAGE_SEGMENTS

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Prepare for introducing support for segments smaller than the page size
by introducing the request queue flag QUEUE_FLAG_SUB_PAGE_SEGMENTS.
Introduce CONFIG_BLK_SUB_PAGE_SEGMENTS to prevent that performance of
block drivers that support segments >= PAGE_SIZE would be affected.

Cc: Christoph Hellwig <hch@xxxxxx>
Cc: Ming Lei <ming.lei@xxxxxxxxxx>
Cc: Keith Busch <kbusch@xxxxxxxxxx>
Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx>
---
 block/Kconfig          | 9 +++++++++
 include/linux/blkdev.h | 7 +++++++
 2 files changed, 16 insertions(+)

diff --git a/block/Kconfig b/block/Kconfig
index 444c5ab3b67e..c3857795fc0d 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -36,6 +36,15 @@ config BLOCK_LEGACY_AUTOLOAD
 	  created on demand, but scripts that manually create device nodes and
 	  then call losetup might rely on this behavior.
 
+config BLK_SUB_PAGE_SEGMENTS
+       bool "Support segments smaller than the page size"
+       default n
+       help
+	  Most storage controllers support DMA segments larger than the typical
+	  size of a virtual memory page. Some embedded controllers only support
+	  DMA segments smaller than the page size. Enable this option to support
+	  such controllers.
+
 config BLK_RQ_ALLOC_TIME
 	bool
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3dbd45725b9f..a2362cf07366 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -548,6 +548,7 @@ struct request_queue {
 /* Keep blk_queue_flag_name[] in sync with the definitions below */
 #define QUEUE_FLAG_STOPPED	0	/* queue is stopped */
 #define QUEUE_FLAG_DYING	1	/* queue being torn down */
+#define QUEUE_FLAG_SUB_PAGE_SEGMENTS 2	/* segments smaller than one page */
 #define QUEUE_FLAG_NOMERGES     3	/* disable merge attempts */
 #define QUEUE_FLAG_SAME_COMP	4	/* complete on same CPU-group */
 #define QUEUE_FLAG_FAIL_IO	5	/* fake timeout */
@@ -614,6 +615,12 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
 #define blk_queue_sq_sched(q)	test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
 #define blk_queue_skip_tagset_quiesce(q) \
 	test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags)
+#ifdef CONFIG_BLK_SUB_PAGE_SEGMENTS
+#define blk_queue_sub_page_segments(q)				\
+	test_bit(QUEUE_FLAG_SUB_PAGE_SEGMENTS, &(q)->queue_flags)
+#else
+#define blk_queue_sub_page_segments(q) false
+#endif
 
 extern void blk_set_pm_only(struct request_queue *q);
 extern void blk_clear_pm_only(struct request_queue *q);



[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [SCSI Target Devel]     [Linux SCSI Target Infrastructure]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Samba]     [Device Mapper]

  Powered by Linux