On 3/23/20 7:23 PM, Tokunori Ikegami wrote:
Currently data length can be specified as UINT_MAX but failed.
This is caused by the max segments parameter limit set as USHRT_MAX.
To resolve this issue change to increase the value limit range.
Signed-off-by: Tokunori Ikegami <ikegami.t@xxxxxxxxx>
Cc: linux-block@xxxxxxxxxxxxxxx
Cc: linux-nvme@xxxxxxxxxxxxxxxxxxx
---
block/blk-settings.c | 2 +-
drivers/nvme/host/core.c | 2 +-
include/linux/blkdev.h | 7 ++++---
3 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c8eda2e7b91e..ed40bda573c2 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -266,7 +266,7 @@ EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
* Enables a low level driver to set an upper limit on the number of
* hw data segments in a request.
**/
-void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
+void blk_queue_max_segments(struct request_queue *q, unsigned int max_segments)
{
if (!max_segments) {
max_segments = 1;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a4d8c90ee7cc..2b48aab0969e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2193,7 +2193,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
max_segments = min_not_zero(max_segments, ctrl->max_segments);
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
- blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
+ blk_queue_max_segments(q, min_t(u32, max_segments, UINT_MAX));
}
if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
is_power_of_2(ctrl->max_hw_sectors))
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f629d40c645c..4f4224e20c28 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -338,8 +338,8 @@ struct queue_limits {
unsigned int max_write_zeroes_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
+ unsigned int max_segments;
- unsigned short max_segments;
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
@@ -1067,7 +1067,8 @@ extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
-extern void blk_queue_max_segments(struct request_queue *, unsigned short);
+extern void blk_queue_max_segments(struct request_queue *q,
+ unsigned int max_segments);
extern void blk_queue_max_discard_segments(struct request_queue *,
unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
@@ -1276,7 +1277,7 @@ static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
return q->limits.max_hw_sectors;
}
-static inline unsigned short queue_max_segments(const struct request_queue *q)
+static inline unsigned int queue_max_segments(const struct request_queue *q)
{
return q->limits.max_segments;
}
One would assume that the same reasoning goes for max_integrity_segment, no?
Otherwise looks good.
Reviewed-by: Hannes Reinecke <hare@xxxxxxx>
Cheers,
Hannes
--
Dr. Hannes Reinecke Teamlead Storage & Networking
hare@xxxxxxx +49 911 74053 688
SUSE Software Solutions GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), Geschäftsführer: Felix Imendörffer