Re: Intermittent storage (dm-crypt?) freeze - regression 6.4->6.5

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On Thu, 2 Nov 2023, Marek Marczykowski-Górecki wrote:

> On Thu, Nov 02, 2023 at 10:28:57AM +0100, Mikulas Patocka wrote:
> 
> > Try lowring /sys/block/nvme0n1/queue/max_sectors_kb to some small value 
> > (for example 64) and test if it helps.
> 
> Yes, this helps too.

On a plain upstream kernel with no other modifications (and with default 
max_sectors_kb), set the value /sys/module/nvme/parameters/sgl_threshold 
to "0" and test it if it deadlocks. Then, set this value to "1" and test 
it again.

Revert sgl_threshold back to the default (32768). Boot the kernel with the 
option "iommu=panic". Reproduce the deadlock and if you get a kernel 
panic, send us the panic log.

Then, try this patch (without "iommu=panic"), reproduce the deadlock and 
tell us which one of the "printk" statements is triggered during the 
deadlock.

Mikulas

---
 drivers/nvme/host/core.c |    8 ++++++--
 drivers/nvme/host/pci.c  |   27 ++++++++++++++++++++++-----
 2 files changed, 28 insertions(+), 7 deletions(-)

Index: linux-stable/drivers/nvme/host/pci.c
===================================================================
--- linux-stable.orig/drivers/nvme/host/pci.c	2023-10-31 15:35:38.000000000 +0100
+++ linux-stable/drivers/nvme/host/pci.c	2023-11-02 17:38:20.000000000 +0100
@@ -622,6 +622,10 @@ static blk_status_t nvme_pci_setup_prps(
 	prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
 	if (!prp_list) {
 		iod->nr_allocations = -1;
+		if (nprps <= (256 / 8))
+			printk("allocation failure at %d\n", __LINE__);
+		else
+			printk("allocation failure at %d\n", __LINE__);
 		return BLK_STS_RESOURCE;
 	}
 	iod->list[0].prp_list = prp_list;
@@ -631,8 +635,10 @@ static blk_status_t nvme_pci_setup_prps(
 		if (i == NVME_CTRL_PAGE_SIZE >> 3) {
 			__le64 *old_prp_list = prp_list;
 			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
-			if (!prp_list)
+			if (!prp_list) {
+				printk("allocation failure at %d\n", __LINE__);
 				goto free_prps;
+			}
 			iod->list[iod->nr_allocations++].prp_list = prp_list;
 			prp_list[0] = old_prp_list[i - 1];
 			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -712,6 +718,7 @@ static blk_status_t nvme_pci_setup_sgls(
 	sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
 	if (!sg_list) {
 		iod->nr_allocations = -1;
+		printk("allocation failure at %d\n", __LINE__);
 		return BLK_STS_RESOURCE;
 	}
 
@@ -736,8 +743,10 @@ static blk_status_t nvme_setup_prp_simpl
 	unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
 
 	iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
-	if (dma_mapping_error(dev->dev, iod->first_dma))
+	if (dma_mapping_error(dev->dev, iod->first_dma)) {
+		printk("allocation failure at %d\n", __LINE__);
 		return BLK_STS_RESOURCE;
+	}
 	iod->dma_len = bv->bv_len;
 
 	cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
@@ -755,8 +764,10 @@ static blk_status_t nvme_setup_sgl_simpl
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 
 	iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
-	if (dma_mapping_error(dev->dev, iod->first_dma))
+	if (dma_mapping_error(dev->dev, iod->first_dma)) {
+		printk("allocation failure at %d\n", __LINE__);
 		return BLK_STS_RESOURCE;
+	}
 	iod->dma_len = bv->bv_len;
 
 	cmnd->flags = NVME_CMD_SGL_METABUF;
@@ -791,8 +802,10 @@ static blk_status_t nvme_map_data(struct
 
 	iod->dma_len = 0;
 	iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
-	if (!iod->sgt.sgl)
+	if (!iod->sgt.sgl) {
+		printk("allocation failure at %d\n", __LINE__);
 		return BLK_STS_RESOURCE;
+	}
 	sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
 	iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
 	if (!iod->sgt.orig_nents)
@@ -801,8 +814,12 @@ static blk_status_t nvme_map_data(struct
 	rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
 			     DMA_ATTR_NO_WARN);
 	if (rc) {
-		if (rc == -EREMOTEIO)
+		if (rc == -EREMOTEIO) {
+			printk("allocation failure at %d\n", __LINE__);
 			ret = BLK_STS_TARGET;
+		} else {
+			printk("allocation failure at %d\n", __LINE__);
+		}
 		goto out_free_sg;
 	}
 
Index: linux-stable/drivers/nvme/host/core.c
===================================================================
--- linux-stable.orig/drivers/nvme/host/core.c	2023-10-31 15:35:38.000000000 +0100
+++ linux-stable/drivers/nvme/host/core.c	2023-11-02 17:12:39.000000000 +0100
@@ -708,8 +708,10 @@ blk_status_t nvme_fail_nonready_command(
 	    ctrl->state != NVME_CTRL_DELETING &&
 	    ctrl->state != NVME_CTRL_DEAD &&
 	    !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
-	    !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+	    !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) {
+		printk("allocation failure at %d\n", __LINE__);
 		return BLK_STS_RESOURCE;
+	}
 	return nvme_host_path_error(rq);
 }
 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
@@ -784,8 +786,10 @@ static blk_status_t nvme_setup_discard(s
 		 * discard page. If that's also busy, it's safe to return
 		 * busy, as we know we can make progress once that's freed.
 		 */
-		if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
+		if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) {
+			printk("allocation failure at %d\n", __LINE__);
 			return BLK_STS_RESOURCE;
+		}
 
 		range = page_address(ns->ctrl->discard_page);
 	}

[Index of Archives]     [Linux Kernel]     [Kernel Development Newbies]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite Hiking]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux