Only pure-iomap configurations, systems where CONFIG_BUFFER_HEAD is disabled can enable NVMe devices with LBA formats with a blocksize larger then the PAGE_SIZE. Systems with buffer-heads enabled cannot currently make use of these devices, but this will eventually get fixed. We cap the max supported LBA format to 19, 512 KiB as support for 1 MiB LBA format still needs some work. Also, add a debug module parameter nvme_core.debug_large_lbas to enable folks to shoot themselves on their foot though if they want to test and expand support beyond what is supported, only to be used on pure-iomap configurations. Signed-off-by: Luis Chamberlain <mcgrof@xxxxxxxxxx> Signed-off-by: Pankaj Raghav <p.raghav@xxxxxxxxxxx> --- drivers/nvme/host/core.c | 34 +++++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f3a01b79148c..0365f260c514 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -88,6 +88,10 @@ module_param(apst_secondary_latency_tol_us, ulong, 0644); MODULE_PARM_DESC(apst_secondary_latency_tol_us, "secondary APST latency tolerance in us"); +static bool debug_large_lbas; +module_param(debug_large_lbas, bool, 0644); +MODULE_PARM_DESC(debug_large_lbas, "allow LBAs > PAGE_SIZE"); + /* * nvme_wq - hosts nvme related works that are not reset or delete * nvme_reset_wq - hosts nvme reset works @@ -1878,6 +1882,29 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, blk_queue_write_cache(q, vwc, vwc); } +/* XXX: shift 20 (1 MiB LBA) crashes on pure-iomap */ +#define NVME_MAX_SHIFT_SUPPORTED 19 + +static bool nvme_lba_shift_supported(struct nvme_ns *ns) +{ + if (ns->lba_shift <= PAGE_SHIFT) + return true; + + if (IS_ENABLED(CONFIG_BUFFER_HEAD)) + return false; + + if (ns->lba_shift <= NVME_MAX_SHIFT_SUPPORTED) + return true; + + if (debug_large_lbas) { + dev_warn(ns->ctrl->device, + "forcibly allowing LBAS > 1 MiB due to nvme_core.debug_large_lbas -- use at your own risk\n"); + return true; + } + + return false; +} + static void nvme_update_disk_info(struct gendisk *disk, struct nvme_ns *ns, struct nvme_id_ns *id) { @@ -1885,13 +1912,10 @@ static void nvme_update_disk_info(struct gendisk *disk, u32 bs = 1U << ns->lba_shift; u32 atomic_bs, phys_bs, io_opt = 0; - /* - * The block layer can't support LBA sizes larger than the page size - * yet, so catch this early and don't allow block I/O. - */ - if (ns->lba_shift > PAGE_SHIFT) { + if (!nvme_lba_shift_supported(ns)) { capacity = 0; bs = (1 << 9); + dev_warn(ns->ctrl->device, "I'm sorry dave, I'm afraid I can't do that\n"); } blk_integrity_unregister(disk); -- 2.39.2