Hi Jeff, I just noticed I hit the wrong button and my previous reply was only sent to the MTD list, so I'm resending with fixed recipients... On 10.05.19 14:17, lede@xxxxxxxxxxxx wrote: > From: Jeff Kletsky <git-commits@xxxxxxxxxxxx> > > The GigaDevice GD5F1GQ4UFxxG SPI NAND is in current production devices > and, while it has the same logical layout as the E-series devices, > it differs in the SPI interfacing in significant ways. > > To accommodate these changes, this patch also: > > * Adds support for two-byte manufacturer IDs > * Adds #define-s for three-byte addressing for read ops > > http://www.gigadevice.com/datasheet/gd5f1gq4xfxxg/ > > Signed-off-by: Jeff Kletsky <git-commits@xxxxxxxxxxxx> Maybe it would be better to split this patch into three parts: * Add support for two-byte device IDs * Add #define-s for three-byte addressing for read ops * Add support for GD5F1GQ4UFxxG Anyway the content looks good to me, so: Reviewed-by: Frieder Schrempf <frieder.schrempf@xxxxxxxxxx> > --- > drivers/mtd/nand/spi/core.c | 2 +- > drivers/mtd/nand/spi/gigadevice.c | 79 +++++++++++++++++++++++++++++++-------- > include/linux/mtd/spinand.h | 34 ++++++++++++++++- > 3 files changed, 97 insertions(+), 18 deletions(-) > > diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c > index fa87ae28cdfe..a13154785dad 100644 > --- a/drivers/mtd/nand/spi/core.c > +++ b/drivers/mtd/nand/spi/core.c > @@ -853,7 +853,7 @@ spinand_select_op_variant(struct spinand_device *spinand, > */ > int spinand_match_and_init(struct spinand_device *spinand, > const struct spinand_info *table, > - unsigned int table_size, u8 devid) > + unsigned int table_size, u16 devid) > { > struct nand_device *nand = spinand_to_nand(spinand); > unsigned int i; > diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c > index 0b49d8264bef..d6497ac4c5d8 100644 > --- a/drivers/mtd/nand/spi/gigadevice.c > +++ b/drivers/mtd/nand/spi/gigadevice.c > @@ -9,11 +9,17 @@ > #include <linux/mtd/spinand.h> > > #define SPINAND_MFR_GIGADEVICE 0xC8 > + > #define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4) > #define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4) > > #define GD5FXGQ4UEXXG_REG_STATUS2 0xf0 > > +#define GD5FXGQ4UXFXXG_STATUS_ECC_MASK (7 << 4) > +#define GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS (0 << 4) > +#define GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS (1 << 4) > +#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4) > + > static SPINAND_OP_VARIANTS(read_cache_variants, > SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), > SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), > @@ -22,6 +28,14 @@ static SPINAND_OP_VARIANTS(read_cache_variants, > SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), > SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); > > +static SPINAND_OP_VARIANTS(read_cache_variants_f, > + SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), > + SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0), > + SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), > + SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0), > + SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0), > + SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0)); > + > static SPINAND_OP_VARIANTS(write_cache_variants, > SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), > SPINAND_PROG_LOAD(true, 0, NULL, 0)); > @@ -59,6 +73,11 @@ static int gd5fxgq4xa_ooblayout_free(struct mtd_info *mtd, int section, > return 0; > } > > +static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = { > + .ecc = gd5fxgq4xa_ooblayout_ecc, > + .free = gd5fxgq4xa_ooblayout_free, > +}; > + > static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand, > u8 status) > { > @@ -83,7 +102,7 @@ static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand, > return -EINVAL; > } > > -static int gd5fxgq4uexxg_ooblayout_ecc(struct mtd_info *mtd, int section, > +static int gd5fxgq4_variant2_ooblayout_ecc(struct mtd_info *mtd, int section, > struct mtd_oob_region *region) > { > if (section) > @@ -95,7 +114,7 @@ static int gd5fxgq4uexxg_ooblayout_ecc(struct mtd_info *mtd, int section, > return 0; > } > > -static int gd5fxgq4uexxg_ooblayout_free(struct mtd_info *mtd, int section, > +static int gd5fxgq4_variant2_ooblayout_free(struct mtd_info *mtd, int section, > struct mtd_oob_region *region) > { > if (section) > @@ -108,6 +127,11 @@ static int gd5fxgq4uexxg_ooblayout_free(struct mtd_info *mtd, int section, > return 0; > } > > +static const struct mtd_ooblayout_ops gd5fxgq4_variant2_ooblayout = { > + .ecc = gd5fxgq4_variant2_ooblayout_ecc, > + .free = gd5fxgq4_variant2_ooblayout_free, > +}; > + > static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand, > u8 status) > { > @@ -150,15 +174,25 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand, > return -EINVAL; > } > > -static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = { > - .ecc = gd5fxgq4xa_ooblayout_ecc, > - .free = gd5fxgq4xa_ooblayout_free, > -}; > +static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand, > + u8 status) > +{ > + switch (status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) { > + case GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS: > + return 0; > > -static const struct mtd_ooblayout_ops gd5fxgq4uexxg_ooblayout = { > - .ecc = gd5fxgq4uexxg_ooblayout_ecc, > - .free = gd5fxgq4uexxg_ooblayout_free, > -}; > + case GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS: > + return 3; > + > + case GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR: > + return -EBADMSG; > + > + default: /* (2 << 4) through (6 << 4) are 4-8 corrected errors */ > + return ((status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) >> 4) + 2; > + } > + > + return -EINVAL; > +} > > static const struct spinand_info gigadevice_spinand_table[] = { > SPINAND_INFO("GD5F1GQ4xA", 0xF1, > @@ -195,25 +229,40 @@ static const struct spinand_info gigadevice_spinand_table[] = { > &write_cache_variants, > &update_cache_variants), > 0, > - SPINAND_ECCINFO(&gd5fxgq4uexxg_ooblayout, > + SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout, > gd5fxgq4uexxg_ecc_get_status)), > + SPINAND_INFO("GD5F1GQ4UFxxG", 0xb148, > + NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1), > + NAND_ECCREQ(8, 512), > + SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f, > + &write_cache_variants, > + &update_cache_variants), > + 0, > + SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout, > + gd5fxgq4ufxxg_ecc_get_status)), > }; > > static int gigadevice_spinand_detect(struct spinand_device *spinand) > { > u8 *id = spinand->id.data; > + u16 did; > int ret; > > /* > - * For GD NANDs, There is an address byte needed to shift in before IDs > - * are read out, so the first byte in raw_id is dummy. > + * Earlier GDF5-series devices (A,E) return [0][MID][DID] > + * Later (F) devices return [MID][DID1][DID2] > */ > - if (id[1] != SPINAND_MFR_GIGADEVICE) > + > + if (id[0] == SPINAND_MFR_GIGADEVICE) > + did = (id[1] << 8) + id[2]; > + else if (id[0] == 0 && id[1] == SPINAND_MFR_GIGADEVICE) > + did = id[2]; > + else > return 0; > > ret = spinand_match_and_init(spinand, gigadevice_spinand_table, > ARRAY_SIZE(gigadevice_spinand_table), > - id[2]); > + did); > if (ret) > return ret; > > diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h > index b92e2aa955b6..8901ba272538 100644 > --- a/include/linux/mtd/spinand.h > +++ b/include/linux/mtd/spinand.h > @@ -68,30 +68,60 @@ > SPI_MEM_OP_DUMMY(ndummy, 1), \ > SPI_MEM_OP_DATA_IN(len, buf, 1)) > > +#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \ > + SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \ > + SPI_MEM_OP_ADDR(3, addr, 1), \ > + SPI_MEM_OP_DUMMY(ndummy, 1), \ > + SPI_MEM_OP_DATA_IN(len, buf, 1)) > + > #define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \ > SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ > SPI_MEM_OP_ADDR(2, addr, 1), \ > SPI_MEM_OP_DUMMY(ndummy, 1), \ > SPI_MEM_OP_DATA_IN(len, buf, 2)) > > +#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \ > + SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ > + SPI_MEM_OP_ADDR(3, addr, 1), \ > + SPI_MEM_OP_DUMMY(ndummy, 1), \ > + SPI_MEM_OP_DATA_IN(len, buf, 2)) > + > #define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \ > SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ > SPI_MEM_OP_ADDR(2, addr, 1), \ > SPI_MEM_OP_DUMMY(ndummy, 1), \ > SPI_MEM_OP_DATA_IN(len, buf, 4)) > > +#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \ > + SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ > + SPI_MEM_OP_ADDR(3, addr, 1), \ > + SPI_MEM_OP_DUMMY(ndummy, 1), \ > + SPI_MEM_OP_DATA_IN(len, buf, 4)) > + > #define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \ > SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ > SPI_MEM_OP_ADDR(2, addr, 2), \ > SPI_MEM_OP_DUMMY(ndummy, 2), \ > SPI_MEM_OP_DATA_IN(len, buf, 2)) > > +#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \ > + SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ > + SPI_MEM_OP_ADDR(3, addr, 2), \ > + SPI_MEM_OP_DUMMY(ndummy, 2), \ > + SPI_MEM_OP_DATA_IN(len, buf, 2)) > + > #define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \ > SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ > SPI_MEM_OP_ADDR(2, addr, 4), \ > SPI_MEM_OP_DUMMY(ndummy, 4), \ > SPI_MEM_OP_DATA_IN(len, buf, 4)) > > +#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \ > + SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ > + SPI_MEM_OP_ADDR(3, addr, 4), \ > + SPI_MEM_OP_DUMMY(ndummy, 4), \ > + SPI_MEM_OP_DATA_IN(len, buf, 4)) > + > #define SPINAND_PROG_EXEC_OP(addr) \ > SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \ > SPI_MEM_OP_ADDR(3, addr, 1), \ > @@ -260,7 +290,7 @@ struct spinand_ecc_info { > */ > struct spinand_info { > const char *model; > - u8 devid; > + u16 devid; > u32 flags; > struct nand_memory_organization memorg; > struct nand_ecc_req eccreq; > @@ -415,7 +445,7 @@ static inline void spinand_set_of_node(struct spinand_device *spinand, > > int spinand_match_and_init(struct spinand_device *dev, > const struct spinand_info *table, > - unsigned int table_size, u8 devid); > + unsigned int table_size, u16 devid); > > int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); > int spinand_select_target(struct spinand_device *spinand, unsigned int target); > ______________________________________________________ Linux MTD discussion mailing list http://lists.infradead.org/mailman/listinfo/linux-mtd/