Prepare for introducing support for segments smaller than the page size one driver at a time by introducing the request queue flag QUEUE_FLAG_SUB_PAGE_SEGMENTS. Although I am not aware of any storage controller that restricts the segment size to 512 bytes, supporting 512 bytes as minimum segment size makes it easy to test small segment support on systems with PAGE_SIZE = 4096. Cc: Christoph Hellwig <hch@xxxxxx> Cc: Ming Lei <ming.lei@xxxxxxxxxx> Cc: Keith Busch <kbusch@xxxxxxxxxx> Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx> --- block/blk-settings.c | 10 ++++++---- include/linux/blkdev.h | 3 +++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/block/blk-settings.c b/block/blk-settings.c index 1cba5c2a2796..1b7687d0ece2 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -277,10 +277,12 @@ EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments); **/ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) { - if (max_size < PAGE_SIZE) { - max_size = PAGE_SIZE; - printk(KERN_INFO "%s: set to minimum %d\n", - __func__, max_size); + unsigned int min_segment_size = blk_queue_sub_page_segments(q) ? + SECTOR_SIZE : PAGE_SIZE; + + if (max_size < min_segment_size) { + max_size = min_segment_size; + printk(KERN_INFO "%s: set to minimum %d\n", __func__, max_size); } /* see blk_queue_virt_boundary() for the explanation */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 50e358a19d98..6757f836fd57 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -579,6 +579,7 @@ struct request_queue { #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ #define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ +#define QUEUE_FLAG_SUB_PAGE_SEGMENTS 31 /* segments smaller than one page */ #define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \ (1UL << QUEUE_FLAG_SAME_COMP) | \ @@ -619,6 +620,8 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) +#define blk_queue_sub_page_segments(q) \ + test_bit(QUEUE_FLAG_SUB_PAGE_SEGMENTS, &(q)->queue_flags) extern void blk_set_pm_only(struct request_queue *q); extern void blk_clear_pm_only(struct request_queue *q);