From: Patrice Chotard <patrice.chotard@xxxxxxxxxxx> After power up, all SPI NAND's blocks are locked. Only read operations are allowed, write and erase operations are forbidden. The SPI NAND framework unlocks all the blocks during its initialization. During a standby low power, the memory is powered down, losing its configuration. During the resume, the QSPI driver state is restored but the SPI NAND framework does not reconfigured the memory. This patch adds SPI-NAND MTD PM handlers for resume ops. SPI NAND resume op re-initializes SPI NAND flash to its probed state. It also adds a new helper spinand_block_unlock() which is called in spinand_init() and in spinand_mtd_resume(). Signed-off-by: Christophe Kerello <christophe.kerello@xxxxxxxxxxx> Signed-off-by: Patrice Chotard <patrice.chotard@xxxxxxxxxxx> --- Changes in v2: - Add helper spinand_block_unlock(). - Add spinand_ecc_enable() call. - Remove some dev_err(). - Fix commit's title and message. drivers/mtd/nand/spi/core.c | 62 +++++++++++++++++++++++++++++++------ 1 file changed, 53 insertions(+), 9 deletions(-) diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 17f63f95f4a2..f77aeff11f43 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -1074,6 +1074,55 @@ static int spinand_detect(struct spinand_device *spinand) return 0; } +static int spinand_block_unlock(struct spinand_device *spinand) +{ + struct device *dev = &spinand->spimem->spi->dev; + struct nand_device *nand = spinand_to_nand(spinand); + int ret = 0, i; + + for (i = 0; i < nand->memorg.ntargets; i++) { + ret = spinand_select_target(spinand, i); + if (ret) + return ret; + + ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); + if (ret) + return ret; + } + + return ret; +} + +static void spinand_mtd_resume(struct mtd_info *mtd) +{ + struct spinand_device *spinand = mtd_to_spinand(mtd); + struct nand_device *nand = mtd_to_nanddev(mtd); + struct device *dev = &spinand->spimem->spi->dev; + int ret; + + ret = spinand_reset_op(spinand); + if (ret) + return; + + ret = spinand_init_quad_enable(spinand); + if (ret) + return; + + ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0); + if (ret) + return; + + ret = spinand_manufacturer_init(spinand); + if (ret) + return; + + ret = spinand_block_unlock(spinand); + if (ret) + return; + + spinand_ecc_enable(spinand, false); +} + static int spinand_init(struct spinand_device *spinand) { struct device *dev = &spinand->spimem->spi->dev; @@ -1137,15 +1186,9 @@ static int spinand_init(struct spinand_device *spinand) } /* After power up, all blocks are locked, so unlock them here. */ - for (i = 0; i < nand->memorg.ntargets; i++) { - ret = spinand_select_target(spinand, i); - if (ret) - goto err_manuf_cleanup; - - ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); - if (ret) - goto err_manuf_cleanup; - } + ret = spinand_block_unlock(spinand); + if ret) + goto err_manuf_cleanup; ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); if (ret) @@ -1167,6 +1210,7 @@ static int spinand_init(struct spinand_device *spinand) mtd->_block_isreserved = spinand_mtd_block_isreserved; mtd->_erase = spinand_mtd_erase; mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; + mtd->_resume = spinand_mtd_resume; if (nand->ecc.engine) { ret = mtd_ooblayout_count_freebytes(mtd); -- 2.17.1