This is a note to let you know that I've just added the patch titled mmc: core: Use mrq.sbc in close-ended ffu to the 5.15-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: mmc-core-use-mrq.sbc-in-close-ended-ffu.patch and it can be found in the queue-5.15 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From 4d0c8d0aef6355660b6775d57ccd5d4ea2e15802 Mon Sep 17 00:00:00 2001 From: Avri Altman <avri.altman@xxxxxxx> Date: Wed, 29 Nov 2023 11:25:35 +0200 Subject: mmc: core: Use mrq.sbc in close-ended ffu From: Avri Altman <avri.altman@xxxxxxx> commit 4d0c8d0aef6355660b6775d57ccd5d4ea2e15802 upstream. Field Firmware Update (ffu) may use close-ended or open ended sequence. Each such sequence is comprised of a write commands enclosed between 2 switch commands - to and from ffu mode. So for the close-ended case, it will be: cmd6->cmd23-cmd25-cmd6. Some host controllers however, get confused when multi-block rw is sent without sbc, and may generate auto-cmd12 which breaks the ffu sequence. I encountered this issue while testing fwupd (github.com/fwupd/fwupd) on HP Chromebook x2, a qualcomm based QC-7c, code name - strongbad. Instead of a quirk, or hooking the request function of the msm ops, it would be better to fix the ioctl handling and make it use mrq.sbc instead of issuing SET_BLOCK_COUNT separately. Signed-off-by: Avri Altman <avri.altman@xxxxxxx> Acked-by: Adrian Hunter <adrian.hunter@xxxxxxxxx> Cc: stable@xxxxxxxxxxxxxxx Link: https://lore.kernel.org/r/20231129092535.3278-1-avri.altman@xxxxxxx Signed-off-by: Ulf Hansson <ulf.hansson@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/mmc/core/block.c | 46 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 3 deletions(-) --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -399,6 +399,10 @@ struct mmc_blk_ioc_data { struct mmc_ioc_cmd ic; unsigned char *buf; u64 buf_bytes; + unsigned int flags; +#define MMC_BLK_IOC_DROP BIT(0) /* drop this mrq */ +#define MMC_BLK_IOC_SBC BIT(1) /* use mrq.sbc */ + struct mmc_rpmb_data *rpmb; }; @@ -464,7 +468,7 @@ static int mmc_blk_ioctl_copy_to_user(st } static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, - struct mmc_blk_ioc_data *idata) + struct mmc_blk_ioc_data **idatas, int i) { struct mmc_command cmd = {}, sbc = {}; struct mmc_data data = {}; @@ -472,10 +476,18 @@ static int __mmc_blk_ioctl_cmd(struct mm struct scatterlist sg; int err; unsigned int target_part; + struct mmc_blk_ioc_data *idata = idatas[i]; + struct mmc_blk_ioc_data *prev_idata = NULL; if (!card || !md || !idata) return -EINVAL; + if (idata->flags & MMC_BLK_IOC_DROP) + return 0; + + if (idata->flags & MMC_BLK_IOC_SBC) + prev_idata = idatas[i - 1]; + /* * The RPMB accesses comes in from the character device, so we * need to target these explicitly. Else we just target the @@ -542,7 +554,7 @@ static int __mmc_blk_ioctl_cmd(struct mm return err; } - if (idata->rpmb) { + if (idata->rpmb || prev_idata) { sbc.opcode = MMC_SET_BLOCK_COUNT; /* * We don't do any blockcount validation because the max size @@ -550,6 +562,8 @@ static int __mmc_blk_ioctl_cmd(struct mm * 'Reliable Write' bit here. */ sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31)); + if (prev_idata) + sbc.arg = prev_idata->ic.arg; sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; mrq.sbc = &sbc; } @@ -561,6 +575,15 @@ static int __mmc_blk_ioctl_cmd(struct mm mmc_wait_for_req(card->host, &mrq); memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp)); + if (prev_idata) { + memcpy(&prev_idata->ic.response, sbc.resp, sizeof(sbc.resp)); + if (sbc.error) { + dev_err(mmc_dev(card->host), "%s: sbc error %d\n", + __func__, sbc.error); + return sbc.error; + } + } + if (cmd.error) { dev_err(mmc_dev(card->host), "%s: cmd error %d\n", __func__, cmd.error); @@ -1030,6 +1053,20 @@ static inline void mmc_blk_reset_success md->reset_done &= ~type; } +static void mmc_blk_check_sbc(struct mmc_queue_req *mq_rq) +{ + struct mmc_blk_ioc_data **idata = mq_rq->drv_op_data; + int i; + + for (i = 1; i < mq_rq->ioc_count; i++) { + if (idata[i - 1]->ic.opcode == MMC_SET_BLOCK_COUNT && + mmc_op_multi(idata[i]->ic.opcode)) { + idata[i - 1]->flags |= MMC_BLK_IOC_DROP; + idata[i]->flags |= MMC_BLK_IOC_SBC; + } + } +} + /* * The non-block commands come back from the block layer after it queued it and * processed it with all other requests and then they get issued in this @@ -1057,11 +1094,14 @@ static void mmc_blk_issue_drv_op(struct if (ret) break; } + + mmc_blk_check_sbc(mq_rq); + fallthrough; case MMC_DRV_OP_IOCTL_RPMB: idata = mq_rq->drv_op_data; for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { - ret = __mmc_blk_ioctl_cmd(card, md, idata[i]); + ret = __mmc_blk_ioctl_cmd(card, md, idata, i); if (ret) break; } Patches currently in stable-queue which might be from avri.altman@xxxxxxx are queue-5.15/mmc-core-use-mrq.sbc-in-close-ended-ffu.patch