Use the scatterlist memory iterator instead of just dereferencing virtual memory using sg_virt(). This make highmem references work properly. Suggested-by: Christoph Hellwig <hch@xxxxxx> Link: https://lore.kernel.org/linux-mmc/20240122073423.GA25859@xxxxxx/ Signed-off-by: Linus Walleij <linus.walleij@xxxxxxxxxx> --- drivers/mmc/host/davinci_mmc.c | 61 +++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 33 deletions(-) diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index ee3b1a4e0848..c46577305138 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -180,12 +180,6 @@ struct mmc_davinci_host { #define DAVINCI_MMC_DATADIR_WRITE 2 unsigned char data_dir; - /* buffer is used during PIO of one scatterlist segment, and - * is updated along with buffer_bytes_left. bytes_left applies - * to all N blocks of the PIO transfer. - */ - u8 *buffer; - u32 buffer_bytes_left; u32 bytes_left; struct dma_chan *dma_tx; @@ -196,8 +190,8 @@ struct mmc_davinci_host { bool active_request; /* For PIO we walk scatterlists one segment at a time. */ + struct sg_mapping_iter sg_miter; unsigned int sg_len; - struct scatterlist *sg; /* Version of the MMC/SD controller */ u8 version; @@ -213,30 +207,24 @@ struct mmc_davinci_host { static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); /* PIO only */ -static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) -{ - host->buffer_bytes_left = sg_dma_len(host->sg); - host->buffer = sg_virt(host->sg); - if (host->buffer_bytes_left > host->bytes_left) - host->buffer_bytes_left = host->bytes_left; -} - static void davinci_fifo_data_trans(struct mmc_davinci_host *host, unsigned int n) { + struct sg_mapping_iter *sgm = &host->sg_miter; + size_t sglen; u8 *p; unsigned int i; - if (host->buffer_bytes_left == 0) { - host->sg = sg_next(host->data->sg); - mmc_davinci_sg_to_buf(host); + /* + * By adjusting sgm->consumed this will give a pointer to the + * current index into the sgm. + */ + if (!sg_miter_next(sgm)) { + dev_err(mmc_dev(host->mmc), "ran out of sglist prematurely\n"); + return; } - - p = host->buffer; - if (n > host->buffer_bytes_left) - n = host->buffer_bytes_left; - host->buffer_bytes_left -= n; - host->bytes_left -= n; + p = sgm->addr; + sglen = sgm->length; /* NOTE: we never transfer more than rw_threshold bytes * to/from the fifo here; there's no I/O overlap. @@ -261,7 +249,9 @@ static void davinci_fifo_data_trans(struct mmc_davinci_host *host, p = p + (n & 3); } } - host->buffer = p; + + sgm->consumed = n; + host->bytes_left -= n; } static void mmc_davinci_start_command(struct mmc_davinci_host *host, @@ -517,6 +507,7 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; int timeout; struct mmc_data *data = req->data; + unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */ if (host->version == MMC_CTLR_VERSION_2) fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; @@ -545,12 +536,14 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) /* Configure the FIFO */ if (data->flags & MMC_DATA_WRITE) { + flags |= SG_MITER_FROM_SG; host->data_dir = DAVINCI_MMC_DATADIR_WRITE; writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, host->base + DAVINCI_MMCFIFOCTL); writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, host->base + DAVINCI_MMCFIFOCTL); } else { + flags |= SG_MITER_TO_SG; host->data_dir = DAVINCI_MMC_DATADIR_READ; writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, host->base + DAVINCI_MMCFIFOCTL); @@ -558,7 +551,6 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) host->base + DAVINCI_MMCFIFOCTL); } - host->buffer = NULL; host->bytes_left = data->blocks * data->blksz; /* For now we try to use DMA whenever we won't need partial FIFO @@ -576,8 +568,7 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) } else { /* Revert to CPU Copy */ host->sg_len = data->sg_len; - host->sg = host->data->sg; - mmc_davinci_sg_to_buf(host); + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); } } @@ -843,6 +834,8 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) { mmc_davinci_reset_ctrl(host, 1); mmc_davinci_reset_ctrl(host, 0); + if (!host->do_dma) + sg_miter_stop(&host->sg_miter); } static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) @@ -919,11 +912,13 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) if (qstatus & MMCST0_DATDNE) { /* All blocks sent/received, and CRC checks passed */ if (data != NULL) { - if ((host->do_dma == 0) && (host->bytes_left > 0)) { - /* if datasize < rw_threshold - * no RX ints are generated - */ - davinci_fifo_data_trans(host, host->bytes_left); + if (!host->do_dma) { + if (host->bytes_left > 0) + /* if datasize < rw_threshold + * no RX ints are generated + */ + davinci_fifo_data_trans(host, host->bytes_left); + sg_miter_stop(&host->sg_miter); } end_transfer = 1; data->bytes_xfered = data->blocks * data->blksz; -- 2.34.1