summaryrefslogtreecommitdiff
path: root/include/linux/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mtd')
-rw-r--r--include/linux/mtd/bbm.h39
-rw-r--r--include/linux/mtd/blktrans.h34
-rw-r--r--include/linux/mtd/cfi.h273
-rw-r--r--include/linux/mtd/cfi_endian.h16
-rw-r--r--include/linux/mtd/concat.h16
-rw-r--r--include/linux/mtd/doc2000.h16
-rw-r--r--include/linux/mtd/flashchip.h20
-rw-r--r--include/linux/mtd/fsmc.h173
-rw-r--r--include/linux/mtd/gen_probe.h16
-rw-r--r--include/linux/mtd/hyperbus.h95
-rw-r--r--include/linux/mtd/inftl.h2
-rw-r--r--include/linux/mtd/jedec.h94
-rw-r--r--include/linux/mtd/latch-addr-flash.h29
-rw-r--r--include/linux/mtd/lpc32xx_mlc.h7
-rw-r--r--include/linux/mtd/lpc32xx_slc.h7
-rw-r--r--include/linux/mtd/map.h206
-rw-r--r--include/linux/mtd/mtd.h496
-rw-r--r--include/linux/mtd/mtdram.h3
-rw-r--r--include/linux/mtd/nand-ecc-mtk.h47
-rw-r--r--include/linux/mtd/nand-ecc-mxic.h49
-rw-r--r--include/linux/mtd/nand-ecc-sw-bch.h71
-rw-r--r--include/linux/mtd/nand-ecc-sw-hamming.h89
-rw-r--r--include/linux/mtd/nand-gpio.h8
-rw-r--r--include/linux/mtd/nand-qpic-common.h483
-rw-r--r--include/linux/mtd/nand.h1727
-rw-r--r--include/linux/mtd/nand_bch.h72
-rw-r--r--include/linux/mtd/nand_ecc.h42
-rw-r--r--include/linux/mtd/ndfc.h8
-rw-r--r--include/linux/mtd/nftl.h17
-rw-r--r--include/linux/mtd/onenand.h10
-rw-r--r--include/linux/mtd/onenand_regs.h6
-rw-r--r--include/linux/mtd/onfi.h190
-rw-r--r--include/linux/mtd/partitions.h45
-rw-r--r--include/linux/mtd/pfow.h39
-rw-r--r--include/linux/mtd/physmap.h7
-rw-r--r--include/linux/mtd/pismo.h5
-rw-r--r--include/linux/mtd/plat-ram.h6
-rw-r--r--include/linux/mtd/platnand.h74
-rw-r--r--include/linux/mtd/qinfo.h5
-rw-r--r--include/linux/mtd/rawnand.h1640
-rw-r--r--include/linux/mtd/sh_flctl.h22
-rw-r--r--include/linux/mtd/sharpsl.h16
-rw-r--r--include/linux/mtd/spear_smi.h21
-rw-r--r--include/linux/mtd/spi-nor.h453
-rw-r--r--include/linux/mtd/spinand.h771
-rw-r--r--include/linux/mtd/super.h12
-rw-r--r--include/linux/mtd/ubi.h72
-rw-r--r--include/linux/mtd/xip.h15
48 files changed, 5864 insertions, 1700 deletions
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 211ff67e8b0d..d890805f5494 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * linux/include/linux/mtd/bbm.h
- *
* NAND family Bad Block Management (BBM) header file
* - Bad Block Table (BBT) implementation
*
@@ -9,21 +8,6 @@
*
* Copyright © 2000-2005
* Thomas Gleixner <tglx@linuxtronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __LINUX_MTD_BBM_H
#define __LINUX_MTD_BBM_H
@@ -91,18 +75,11 @@ struct nand_bbt_descr {
* with NAND_BBT_CREATE.
*/
#define NAND_BBT_CREATE_EMPTY 0x00000400
-/* Search good / bad pattern through all pages of a block */
-#define NAND_BBT_SCANALLPAGES 0x00000800
-/* Scan block empty during good / bad block scan */
-#define NAND_BBT_SCANEMPTY 0x00001000
/* Write bbt if neccecary */
#define NAND_BBT_WRITE 0x00002000
/* Read and write back block contents when writing bbt */
#define NAND_BBT_SAVECONTENT 0x00004000
-/* Search good / bad pattern on the first and the second page */
-#define NAND_BBT_SCAN2NDPAGE 0x00008000
-/* Search good / bad pattern on the last page of the eraseblock */
-#define NAND_BBT_SCANLASTPAGE 0x00010000
+
/*
* Use a flash based bad block table. By default, OOB identifier is saved in
* OOB area. This option is passed to the default bad block table function.
@@ -121,7 +98,7 @@ struct nand_bbt_descr {
/*
* Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
- * was allocated dynamicaly and must be freed in nand_release(). Has no meaning
+ * was allocated dynamicaly and must be freed in nand_cleanup(). Has no meaning
* in nand_chip.bbt_options.
*/
#define NAND_BBT_DYNAMICSTRUCT 0x80000000
@@ -130,13 +107,6 @@ struct nand_bbt_descr {
#define NAND_BBT_SCAN_MAXBLOCKS 4
/*
- * Constants for oob configuration
- */
-#define NAND_SMALL_BADBLOCK_POS 5
-#define NAND_LARGE_BADBLOCK_POS 0
-#define ONENAND_BADBLOCK_POS 0
-
-/*
* Bad block scanning errors
*/
#define ONENAND_BBT_READ_ERROR 1
@@ -146,7 +116,6 @@ struct nand_bbt_descr {
/**
* struct bbm_info - [GENERIC] Bad Block Table data structure
* @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
- * @badblockpos: [INTERN] position of the bad block marker in the oob area
* @options: options for this descriptor
* @bbt: [INTERN] bad block table pointer
* @isbad_bbt: function to determine if a block is bad
@@ -156,7 +125,6 @@ struct nand_bbt_descr {
*/
struct bbm_info {
int bbt_erase_shift;
- int badblockpos;
int options;
uint8_t *bbt;
@@ -170,7 +138,6 @@ struct bbm_info {
};
/* OneNAND BBT interface */
-extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
extern int onenand_default_bbt(struct mtd_info *mtd);
#endif /* __LINUX_MTD_BBM_H */
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index e93837f647de..6e471436bba5 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -1,20 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __MTD_TRANS_H__
@@ -23,7 +9,6 @@
#include <linux/mutex.h>
#include <linux/kref.h>
#include <linux/sysfs.h>
-#include <linux/workqueue.h>
struct hd_geometry;
struct mtd_info;
@@ -44,12 +29,12 @@ struct mtd_blktrans_dev {
struct kref ref;
struct gendisk *disk;
struct attribute_group *disk_attributes;
- struct workqueue_struct *wq;
- struct work_struct work;
struct request_queue *rq;
+ struct list_head rq_list;
+ struct blk_mq_tag_set *tag_set;
spinlock_t queue_lock;
void *priv;
- fmode_t file_mode;
+ bool writable;
};
struct mtd_blktrans_ops {
@@ -92,5 +77,16 @@ extern int add_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
extern int del_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
extern int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev);
+/**
+ * module_mtd_blktrans() - Helper macro for registering a mtd blktrans driver
+ * @__mtd_blktrans: mtd_blktrans_ops struct
+ *
+ * Helper macro for mtd blktrans drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_mtd_blktrans(__mtd_blktrans) \
+ module_driver(__mtd_blktrans, register_mtd_blktrans, \
+ deregister_mtd_blktrans)
#endif /* __MTD_TRANS_H__ */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 37ef6b194089..35ca19ae21ae 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -1,20 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __MTD_CFI_H__
@@ -152,8 +138,8 @@ struct cfi_ident {
uint16_t InterfaceDesc;
uint16_t MaxBufWriteSize;
uint8_t NumEraseRegions;
- uint32_t EraseRegionInfo[0]; /* Not host ordered */
-} __attribute__((packed));
+ uint32_t EraseRegionInfo[]; /* Not host ordered */
+} __packed;
/* Extended Query Structure for both PRI and ALT */
@@ -161,7 +147,7 @@ struct cfi_extquery {
uint8_t pri[3];
uint8_t MajorVersion;
uint8_t MinorVersion;
-} __attribute__((packed));
+} __packed;
/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
@@ -179,8 +165,8 @@ struct cfi_pri_intelext {
uint16_t ProtRegAddr;
uint8_t FactProtRegSize;
uint8_t UserProtRegSize;
- uint8_t extra[0];
-} __attribute__((packed));
+ uint8_t extra[];
+} __packed;
struct cfi_intelext_otpinfo {
uint32_t ProtRegAddr;
@@ -188,7 +174,7 @@ struct cfi_intelext_otpinfo {
uint8_t FactProtRegSize;
uint16_t UserGroups;
uint8_t UserProtRegSize;
-} __attribute__((packed));
+} __packed;
struct cfi_intelext_blockinfo {
uint16_t NumIdentBlocks;
@@ -196,7 +182,7 @@ struct cfi_intelext_blockinfo {
uint16_t MinBlockEraseCycles;
uint8_t BitsPerCell;
uint8_t BlockCap;
-} __attribute__((packed));
+} __packed;
struct cfi_intelext_regioninfo {
uint16_t NumIdentPartitions;
@@ -205,7 +191,7 @@ struct cfi_intelext_regioninfo {
uint8_t NumOpAllowedSimEraMode;
uint8_t NumBlockTypes;
struct cfi_intelext_blockinfo BlockTypes[1];
-} __attribute__((packed));
+} __packed;
struct cfi_intelext_programming_regioninfo {
uint8_t ProgRegShift;
@@ -214,7 +200,7 @@ struct cfi_intelext_programming_regioninfo {
uint8_t Reserved2;
uint8_t ControlInvalid;
uint8_t Reserved3;
-} __attribute__((packed));
+} __packed;
/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
@@ -233,7 +219,14 @@ struct cfi_pri_amdstd {
uint8_t VppMin;
uint8_t VppMax;
uint8_t TopBottom;
-} __attribute__((packed));
+ /* Below field are added from version 1.5 */
+ uint8_t ProgramSuspend;
+ uint8_t UnlockBypass;
+ uint8_t SecureSiliconSector;
+ uint8_t SoftwareFeatures;
+#define CFI_POLL_STATUS_REG BIT(0)
+#define CFI_POLL_DQ BIT(1)
+} __packed;
/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
@@ -245,18 +238,18 @@ struct cfi_pri_atmel {
uint8_t BottomBoot;
uint8_t BurstMode;
uint8_t PageMode;
-} __attribute__((packed));
+} __packed;
struct cfi_pri_query {
uint8_t NumFields;
uint32_t ProtField[1]; /* Not host ordered */
-} __attribute__((packed));
+} __packed;
struct cfi_bri_query {
uint8_t PageModeReadCap;
uint8_t NumFields;
uint32_t ConfField[1]; /* Not host ordered */
-} __attribute__((packed));
+} __packed;
#define P_ID_NONE 0x0000
#define P_ID_INTEL_EXT 0x0001
@@ -293,228 +286,57 @@ struct cfi_private {
map_word sector_erase_cmd;
unsigned long chipshift; /* Because they're of the same type */
const char *im_name; /* inter_module name for cmdset_setup */
- struct flchip chips[0]; /* per-chip data structure for each chip */
+ unsigned long quirks;
+ struct flchip chips[] __counted_by(numchips); /* per-chip data structure for each chip */
};
-/*
- * Returns the command address according to the given geometry.
- */
-static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
- struct map_info *map, struct cfi_private *cfi)
-{
- unsigned bankwidth = map_bankwidth(map);
- unsigned interleave = cfi_interleave(cfi);
- unsigned type = cfi->device_type;
- uint32_t addr;
-
- addr = (cmd_ofs * type) * interleave;
-
- /* Modify the unlock address if we are in compatibility mode.
- * For 16bit devices on 8 bit busses
- * and 32bit devices on 16 bit busses
- * set the low bit of the alternating bit sequence of the address.
- */
- if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
- addr |= (type >> 1)*interleave;
-
- return addr;
-}
-
-/*
- * Transforms the CFI command for the given geometry (bus width & interleave).
- * It looks too long to be inline, but in the common case it should almost all
- * get optimised away.
- */
-static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
-{
- map_word val = { {0} };
- int wordwidth, words_per_bus, chip_mode, chips_per_word;
- unsigned long onecmd;
- int i;
-
- /* We do it this way to give the compiler a fighting chance
- of optimising away all the crap for 'bankwidth' larger than
- an unsigned long, in the common case where that support is
- disabled */
- if (map_bankwidth_is_large(map)) {
- wordwidth = sizeof(unsigned long);
- words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
- } else {
- wordwidth = map_bankwidth(map);
- words_per_bus = 1;
- }
-
- chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
- chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
+uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
+ struct map_info *map, struct cfi_private *cfi);
- /* First, determine what the bit-pattern should be for a single
- device, according to chip mode and endianness... */
- switch (chip_mode) {
- default: BUG();
- case 1:
- onecmd = cmd;
- break;
- case 2:
- onecmd = cpu_to_cfi16(map, cmd);
- break;
- case 4:
- onecmd = cpu_to_cfi32(map, cmd);
- break;
- }
-
- /* Now replicate it across the size of an unsigned long, or
- just to the bus width as appropriate */
- switch (chips_per_word) {
- default: BUG();
-#if BITS_PER_LONG >= 64
- case 8:
- onecmd |= (onecmd << (chip_mode * 32));
-#endif
- case 4:
- onecmd |= (onecmd << (chip_mode * 16));
- case 2:
- onecmd |= (onecmd << (chip_mode * 8));
- case 1:
- ;
- }
-
- /* And finally, for the multi-word case, replicate it
- in all words in the structure */
- for (i=0; i < words_per_bus; i++) {
- val.x[i] = onecmd;
- }
-
- return val;
-}
+map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi);
#define CMD(x) cfi_build_cmd((x), map, cfi)
-
-static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
- struct cfi_private *cfi)
-{
- int wordwidth, words_per_bus, chip_mode, chips_per_word;
- unsigned long onestat, res = 0;
- int i;
-
- /* We do it this way to give the compiler a fighting chance
- of optimising away all the crap for 'bankwidth' larger than
- an unsigned long, in the common case where that support is
- disabled */
- if (map_bankwidth_is_large(map)) {
- wordwidth = sizeof(unsigned long);
- words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
- } else {
- wordwidth = map_bankwidth(map);
- words_per_bus = 1;
- }
-
- chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
- chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
-
- onestat = val.x[0];
- /* Or all status words together */
- for (i=1; i < words_per_bus; i++) {
- onestat |= val.x[i];
- }
-
- res = onestat;
- switch(chips_per_word) {
- default: BUG();
-#if BITS_PER_LONG >= 64
- case 8:
- res |= (onestat >> (chip_mode * 32));
-#endif
- case 4:
- res |= (onestat >> (chip_mode * 16));
- case 2:
- res |= (onestat >> (chip_mode * 8));
- case 1:
- ;
- }
-
- /* Last, determine what the bit-pattern should be for a single
- device, according to chip mode and endianness... */
- switch (chip_mode) {
- case 1:
- break;
- case 2:
- res = cfi16_to_cpu(map, res);
- break;
- case 4:
- res = cfi32_to_cpu(map, res);
- break;
- default: BUG();
- }
- return res;
-}
-
+unsigned long cfi_merge_status(map_word val, struct map_info *map,
+ struct cfi_private *cfi);
#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
-
-/*
- * Sends a CFI command to a bank of flash for the given geometry.
- *
- * Returns the offset in flash where the command was written.
- * If prev_val is non-null, it will be set to the value at the command address,
- * before the command was written.
- */
-static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
+uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
struct map_info *map, struct cfi_private *cfi,
- int type, map_word *prev_val)
-{
- map_word val;
- uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
- val = cfi_build_cmd(cmd, map, cfi);
-
- if (prev_val)
- *prev_val = map_read(map, addr);
-
- map_write(map, val, addr);
-
- return addr - base;
-}
+ int type, map_word *prev_val);
static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
{
map_word val = map_read(map, addr);
- if (map_bankwidth_is_1(map)) {
+ if (map_bankwidth_is_1(map))
return val.x[0];
- } else if (map_bankwidth_is_2(map)) {
+ if (map_bankwidth_is_2(map))
return cfi16_to_cpu(map, val.x[0]);
- } else {
- /* No point in a 64-bit byteswap since that would just be
- swapping the responses from different chips, and we are
- only interested in one chip (a representative sample) */
- return cfi32_to_cpu(map, val.x[0]);
- }
+ /*
+ * No point in a 64-bit byteswap since that would just be
+ * swapping the responses from different chips, and we are
+ * only interested in one chip (a representative sample)
+ */
+ return cfi32_to_cpu(map, val.x[0]);
}
static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
{
map_word val = map_read(map, addr);
- if (map_bankwidth_is_1(map)) {
+ if (map_bankwidth_is_1(map))
return val.x[0] & 0xff;
- } else if (map_bankwidth_is_2(map)) {
+ if (map_bankwidth_is_2(map))
return cfi16_to_cpu(map, val.x[0]);
- } else {
- /* No point in a 64-bit byteswap since that would just be
- swapping the responses from different chips, and we are
- only interested in one chip (a representative sample) */
- return cfi32_to_cpu(map, val.x[0]);
- }
+ /*
+ * No point in a 64-bit byteswap since that would just be
+ * swapping the responses from different chips, and we are
+ * only interested in one chip (a representative sample)
+ */
+ return cfi32_to_cpu(map, val.x[0]);
}
-static inline void cfi_udelay(int us)
-{
- if (us >= 1000) {
- msleep((us+999)/1000);
- } else {
- udelay(us);
- cond_resched();
- }
-}
+void cfi_udelay(int us);
int __xipram cfi_qry_present(struct map_info *map, __u32 base,
struct cfi_private *cfi);
@@ -549,6 +371,7 @@ struct cfi_fixup {
#define CFI_MFR_SHARP 0x00B0
#define CFI_MFR_SST 0x00BF
#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
+#define CFI_MFR_MICRON 0x002C /* Micron */
#define CFI_MFR_TOSHIBA 0x0098
#define CFI_MFR_WINBOND 0x00DA
diff --git a/include/linux/mtd/cfi_endian.h b/include/linux/mtd/cfi_endian.h
index b97a625071f8..5275118aa449 100644
--- a/include/linux/mtd/cfi_endian.h
+++ b/include/linux/mtd/cfi_endian.h
@@ -1,20 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <asm/byteorder.h>
diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h
index ccdbe93a909c..d6f653e07426 100644
--- a/include/linux/mtd/concat.h
+++ b/include/linux/mtd/concat.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* MTD device concatenation layer definitions
*
* Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef MTD_CONCAT_H
diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h
index 407d1e556c39..1b7b0ee070ca 100644
--- a/include/linux/mtd/doc2000.h
+++ b/include/linux/mtd/doc2000.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Linux driver for Disk-On-Chip devices
*
@@ -5,21 +6,6 @@
* Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
* Copyright © 2002-2003 Greg Ungerer <gerg@snapgear.com>
* Copyright © 2002-2003 SnapGear Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __MTD_DOC2000_H__
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index b63fa457febd..9798c1a1d3b6 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -1,21 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 2000 Red Hat UK Limited
* Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __MTD_FLASHCHIP_H__
@@ -27,6 +13,7 @@
*/
#include <linux/sched.h>
#include <linux/mutex.h>
+#include <linux/wait.h>
typedef enum {
FL_READY,
@@ -54,7 +41,7 @@ typedef enum {
FL_READING,
FL_CACHEDPRG,
/* These 4 come from onenand_state_t, which has been unified here */
- FL_RESETING,
+ FL_RESETTING,
FL_OTPING,
FL_PREPARING_ERASE,
FL_VERIFYING_ERASE,
@@ -85,6 +72,7 @@ struct flchip {
unsigned int write_suspended:1;
unsigned int erase_suspended:1;
unsigned long in_progress_block_addr;
+ unsigned long in_progress_block_mask;
struct mutex mutex;
wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
deleted file mode 100644
index d6ed61ef451d..000000000000
--- a/include/linux/mtd/fsmc.h
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * incude/mtd/fsmc.h
- *
- * ST Microelectronics
- * Flexible Static Memory Controller (FSMC)
- * platform data interface and header file
- *
- * Copyright © 2010 ST Microelectronics
- * Vipin Kumar <vipin.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __MTD_FSMC_H
-#define __MTD_FSMC_H
-
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/physmap.h>
-#include <linux/types.h>
-#include <linux/mtd/partitions.h>
-#include <asm/param.h>
-
-#define FSMC_NAND_BW8 1
-#define FSMC_NAND_BW16 2
-
-#define FSMC_MAX_NOR_BANKS 4
-#define FSMC_MAX_NAND_BANKS 4
-
-#define FSMC_FLASH_WIDTH8 1
-#define FSMC_FLASH_WIDTH16 2
-
-/* fsmc controller registers for NOR flash */
-#define CTRL 0x0
- /* ctrl register definitions */
- #define BANK_ENABLE (1 << 0)
- #define MUXED (1 << 1)
- #define NOR_DEV (2 << 2)
- #define WIDTH_8 (0 << 4)
- #define WIDTH_16 (1 << 4)
- #define RSTPWRDWN (1 << 6)
- #define WPROT (1 << 7)
- #define WRT_ENABLE (1 << 12)
- #define WAIT_ENB (1 << 13)
-
-#define CTRL_TIM 0x4
- /* ctrl_tim register definitions */
-
-#define FSMC_NOR_BANK_SZ 0x8
-#define FSMC_NOR_REG_SIZE 0x40
-
-#define FSMC_NOR_REG(base, bank, reg) (base + \
- FSMC_NOR_BANK_SZ * (bank) + \
- reg)
-
-/* fsmc controller registers for NAND flash */
-#define PC 0x00
- /* pc register definitions */
- #define FSMC_RESET (1 << 0)
- #define FSMC_WAITON (1 << 1)
- #define FSMC_ENABLE (1 << 2)
- #define FSMC_DEVTYPE_NAND (1 << 3)
- #define FSMC_DEVWID_8 (0 << 4)
- #define FSMC_DEVWID_16 (1 << 4)
- #define FSMC_ECCEN (1 << 6)
- #define FSMC_ECCPLEN_512 (0 << 7)
- #define FSMC_ECCPLEN_256 (1 << 7)
- #define FSMC_TCLR_1 (1)
- #define FSMC_TCLR_SHIFT (9)
- #define FSMC_TCLR_MASK (0xF)
- #define FSMC_TAR_1 (1)
- #define FSMC_TAR_SHIFT (13)
- #define FSMC_TAR_MASK (0xF)
-#define STS 0x04
- /* sts register definitions */
- #define FSMC_CODE_RDY (1 << 15)
-#define COMM 0x08
- /* comm register definitions */
- #define FSMC_TSET_0 0
- #define FSMC_TSET_SHIFT 0
- #define FSMC_TSET_MASK 0xFF
- #define FSMC_TWAIT_6 6
- #define FSMC_TWAIT_SHIFT 8
- #define FSMC_TWAIT_MASK 0xFF
- #define FSMC_THOLD_4 4
- #define FSMC_THOLD_SHIFT 16
- #define FSMC_THOLD_MASK 0xFF
- #define FSMC_THIZ_1 1
- #define FSMC_THIZ_SHIFT 24
- #define FSMC_THIZ_MASK 0xFF
-#define ATTRIB 0x0C
-#define IOATA 0x10
-#define ECC1 0x14
-#define ECC2 0x18
-#define ECC3 0x1C
-#define FSMC_NAND_BANK_SZ 0x20
-
-#define FSMC_NAND_REG(base, bank, reg) (base + FSMC_NOR_REG_SIZE + \
- (FSMC_NAND_BANK_SZ * (bank)) + \
- reg)
-
-#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
-
-/*
- * There are 13 bytes of ecc for every 512 byte block in FSMC version 8
- * and it has to be read consecutively and immediately after the 512
- * byte data block for hardware to generate the error bit offsets
- * Managing the ecc bytes in the following way is easier. This way is
- * similar to oobfree structure maintained already in u-boot nand driver
- */
-#define MAX_ECCPLACE_ENTRIES 32
-
-struct fsmc_nand_eccplace {
- uint8_t offset;
- uint8_t length;
-};
-
-struct fsmc_eccplace {
- struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES];
-};
-
-struct fsmc_nand_timings {
- uint8_t tclr;
- uint8_t tar;
- uint8_t thiz;
- uint8_t thold;
- uint8_t twait;
- uint8_t tset;
-};
-
-enum access_mode {
- USE_DMA_ACCESS = 1,
- USE_WORD_ACCESS,
-};
-
-/**
- * fsmc_nand_platform_data - platform specific NAND controller config
- * @partitions: partition table for the platform, use a default fallback
- * if this is NULL
- * @nr_partitions: the number of partitions in the previous entry
- * @options: different options for the driver
- * @width: bus width
- * @bank: default bank
- * @select_bank: callback to select a certain bank, this is
- * platform-specific. If the controller only supports one bank
- * this may be set to NULL
- */
-struct fsmc_nand_platform_data {
- struct fsmc_nand_timings *nand_timings;
- struct mtd_partition *partitions;
- unsigned int nr_partitions;
- unsigned int options;
- unsigned int width;
- unsigned int bank;
-
- enum access_mode mode;
-
- void (*select_bank)(uint32_t bank, uint32_t busw);
-
- /* priv structures for dma accesses */
- void *read_dma_priv;
- void *write_dma_priv;
-};
-
-extern int __init fsmc_nor_init(struct platform_device *pdev,
- unsigned long base, uint32_t bank, uint32_t width);
-extern void __init fsmc_init_board_info(struct platform_device *pdev,
- struct mtd_partition *partitions, unsigned int nr_partitions,
- unsigned int width);
-
-#endif /* __MTD_FSMC_H */
diff --git a/include/linux/mtd/gen_probe.h b/include/linux/mtd/gen_probe.h
index 2c456054fded..6bd0b30d5935 100644
--- a/include/linux/mtd/gen_probe.h
+++ b/include/linux/mtd/gen_probe.h
@@ -1,21 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 2001 Red Hat UK Limited
* Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __LINUX_MTD_GEN_PROBE_H__
diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h
new file mode 100644
index 000000000000..bb6b7121a542
--- /dev/null
+++ b/include/linux/mtd/hyperbus.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#ifndef __LINUX_MTD_HYPERBUS_H__
+#define __LINUX_MTD_HYPERBUS_H__
+
+#include <linux/mtd/map.h>
+
+/* HyperBus command bits */
+#define HYPERBUS_RW 0x80 /* R/W# */
+#define HYPERBUS_RW_WRITE 0
+#define HYPERBUS_RW_READ 0x80
+#define HYPERBUS_AS 0x40 /* Address Space */
+#define HYPERBUS_AS_MEM 0
+#define HYPERBUS_AS_REG 0x40
+#define HYPERBUS_BT 0x20 /* Burst Type */
+#define HYPERBUS_BT_WRAPPED 0
+#define HYPERBUS_BT_LINEAR 0x20
+
+enum hyperbus_memtype {
+ HYPERFLASH,
+ HYPERRAM,
+};
+
+/**
+ * struct hyperbus_device - struct representing HyperBus slave device
+ * @map: map_info struct for accessing MMIO HyperBus flash memory
+ * @np: pointer to HyperBus slave device node
+ * @mtd: pointer to MTD struct
+ * @ctlr: pointer to HyperBus controller struct
+ * @memtype: type of memory device: HyperFlash or HyperRAM
+ * @priv: pointer to controller specific per device private data
+ */
+
+struct hyperbus_device {
+ struct map_info map;
+ struct device_node *np;
+ struct mtd_info *mtd;
+ struct hyperbus_ctlr *ctlr;
+ enum hyperbus_memtype memtype;
+ void *priv;
+};
+
+/**
+ * struct hyperbus_ops - struct representing custom HyperBus operations
+ * @read16: read 16 bit of data from flash in a single burst. Used to read
+ * from non default address space, such as ID/CFI space
+ * @write16: write 16 bit of data to flash in a single burst. Used to
+ * send cmd to flash or write single 16 bit word at a time.
+ * @copy_from: copy data from flash memory
+ * @copy_to: copy data to flash memory
+ * @calibrate: calibrate HyperBus controller
+ */
+
+struct hyperbus_ops {
+ u16 (*read16)(struct hyperbus_device *hbdev, unsigned long addr);
+ void (*write16)(struct hyperbus_device *hbdev,
+ unsigned long addr, u16 val);
+ void (*copy_from)(struct hyperbus_device *hbdev, void *to,
+ unsigned long from, ssize_t len);
+ void (*copy_to)(struct hyperbus_device *dev, unsigned long to,
+ const void *from, ssize_t len);
+ int (*calibrate)(struct hyperbus_device *dev);
+};
+
+/**
+ * struct hyperbus_ctlr - struct representing HyperBus controller
+ * @dev: pointer to HyperBus controller device
+ * @calibrated: flag to indicate ctlr calibration sequence is complete
+ * @ops: HyperBus controller ops
+ */
+struct hyperbus_ctlr {
+ struct device *dev;
+ bool calibrated;
+
+ const struct hyperbus_ops *ops;
+};
+
+/**
+ * hyperbus_register_device - probe and register a HyperBus slave memory device
+ * @hbdev: hyperbus_device struct with dev, np and ctlr field populated
+ *
+ * Return: 0 for success, others for failure.
+ */
+int hyperbus_register_device(struct hyperbus_device *hbdev);
+
+/**
+ * hyperbus_unregister_device - deregister HyperBus slave memory device
+ * @hbdev: hyperbus_device to be unregistered
+ */
+void hyperbus_unregister_device(struct hyperbus_device *hbdev);
+
+#endif /* __LINUX_MTD_HYPERBUS_H__ */
diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h
index 02cd5f9b79b8..fdfff87066a9 100644
--- a/include/linux/mtd/inftl.h
+++ b/include/linux/mtd/inftl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* inftl.h -- defines to support the Inverse NAND Flash Translation Layer
*
@@ -44,7 +45,6 @@ struct INFTLrecord {
unsigned int nb_blocks; /* number of physical blocks */
unsigned int nb_boot_blocks; /* number of blocks used by the bios */
struct erase_info instr;
- struct nand_ecclayout oobinfo;
};
int INFTL_mount(struct INFTLrecord *s);
diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
new file mode 100644
index 000000000000..56047a4e54c9
--- /dev/null
+++ b/include/linux/mtd/jedec.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Contains all JEDEC related definitions
+ */
+
+#ifndef __LINUX_MTD_JEDEC_H
+#define __LINUX_MTD_JEDEC_H
+
+struct jedec_ecc_info {
+ u8 ecc_bits;
+ u8 codeword_size;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 reserved[2];
+} __packed;
+
+/* JEDEC features */
+#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
+
+/* JEDEC Optional Commands */
+#define JEDEC_OPT_CMD_READ_CACHE BIT(1)
+
+struct nand_jedec_params {
+ /* rev info and features block */
+ /* 'J' 'E' 'S' 'D' */
+ u8 sig[4];
+ __le16 revision;
+ __le16 features;
+ u8 opt_cmd[3];
+ __le16 sec_cmd;
+ u8 num_of_param_pages;
+ u8 reserved0[18];
+
+ /* manufacturer information block */
+ char manufacturer[12];
+ char model[20];
+ u8 jedec_id[6];
+ u8 reserved1[10];
+
+ /* memory organization block */
+ __le32 byte_per_page;
+ __le16 spare_bytes_per_page;
+ u8 reserved2[6];
+ __le32 pages_per_block;
+ __le32 blocks_per_lun;
+ u8 lun_count;
+ u8 addr_cycles;
+ u8 bits_per_cell;
+ u8 programs_per_page;
+ u8 multi_plane_addr;
+ u8 multi_plane_op_attr;
+ u8 reserved3[38];
+
+ /* electrical parameter block */
+ __le16 async_sdr_speed_grade;
+ __le16 toggle_ddr_speed_grade;
+ __le16 sync_ddr_speed_grade;
+ u8 async_sdr_features;
+ u8 toggle_ddr_features;
+ u8 sync_ddr_features;
+ __le16 t_prog;
+ __le16 t_bers;
+ __le16 t_r;
+ __le16 t_r_multi_plane;
+ __le16 t_ccs;
+ __le16 io_pin_capacitance_typ;
+ __le16 input_pin_capacitance_typ;
+ __le16 clk_pin_capacitance_typ;
+ u8 driver_strength_support;
+ __le16 t_adl;
+ u8 reserved4[36];
+
+ /* ECC and endurance block */
+ u8 guaranteed_good_blocks;
+ __le16 guaranteed_block_endurance;
+ struct jedec_ecc_info ecc_info[4];
+ u8 reserved5[29];
+
+ /* reserved */
+ u8 reserved6[148];
+
+ /* vendor */
+ __le16 vendor_rev_num;
+ u8 reserved7[88];
+
+ /* CRC for Parameter Page */
+ __le16 crc;
+} __packed;
+
+#endif /* __LINUX_MTD_JEDEC_H */
diff --git a/include/linux/mtd/latch-addr-flash.h b/include/linux/mtd/latch-addr-flash.h
deleted file mode 100644
index e94b8e128074..000000000000
--- a/include/linux/mtd/latch-addr-flash.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Interface for NOR flash driver whose high address lines are latched
- *
- * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#ifndef __LATCH_ADDR_FLASH__
-#define __LATCH_ADDR_FLASH__
-
-struct map_info;
-struct mtd_partition;
-
-struct latch_addr_flash_data {
- unsigned int width;
- unsigned int size;
-
- int (*init)(void *data, int cs);
- void (*done)(void *data);
- void (*set_window)(unsigned long offset, void *data);
- void *data;
-
- unsigned int nr_parts;
- struct mtd_partition *parts;
-};
-
-#endif
diff --git a/include/linux/mtd/lpc32xx_mlc.h b/include/linux/mtd/lpc32xx_mlc.h
index d91b1e35631e..35e971be0950 100644
--- a/include/linux/mtd/lpc32xx_mlc.h
+++ b/include/linux/mtd/lpc32xx_mlc.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Platform data for LPC32xx SoC MLC NAND controller
*
* Copyright © 2012 Roland Stigge
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MTD_LPC32XX_MLC_H
@@ -14,7 +11,7 @@
#include <linux/dmaengine.h>
struct lpc32xx_mlc_platform_data {
- bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+ dma_filter_fn dma_filter;
};
#endif /* __LINUX_MTD_LPC32XX_MLC_H */
diff --git a/include/linux/mtd/lpc32xx_slc.h b/include/linux/mtd/lpc32xx_slc.h
index 1169548a1535..a044b806566b 100644
--- a/include/linux/mtd/lpc32xx_slc.h
+++ b/include/linux/mtd/lpc32xx_slc.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Platform data for LPC32xx SoC SLC NAND controller
*
* Copyright © 2012 Roland Stigge
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MTD_LPC32XX_SLC_H
@@ -14,7 +11,7 @@
#include <linux/dmaengine.h>
struct lpc32xx_slc_platform_data {
- bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+ dma_filter_fn dma_filter;
};
#endif /* __LINUX_MTD_LPC32XX_SLC_H */
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 4b02512e421c..75b0b2abc880 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -1,20 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
/* Overhauled routines for dealing with different mmap regions of flash */
@@ -22,16 +8,17 @@
#ifndef __LINUX_MTD_MAP_H__
#define __LINUX_MTD_MAP_H__
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/string.h>
#include <linux/bug.h>
-#include <linux/kernel.h>
-
-#include <asm/unaligned.h>
-#include <asm/io.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
#include <asm/barrier.h>
+struct device_node;
+struct module;
+
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
#define map_bankwidth(map) 1
#define map_bankwidth_is_1(map) (map_bankwidth(map) == 1)
@@ -77,7 +64,7 @@
/* ensure we never evaluate anything shorted than an unsigned long
* to zero, and ensure we'll never miss the end of an comparison (bjd) */
-#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1))/ sizeof(unsigned long))
+#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1)) / sizeof(unsigned long))
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
# ifdef map_bankwidth
@@ -122,18 +109,13 @@
#endif
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
-# ifdef map_bankwidth
-# undef map_bankwidth
-# define map_bankwidth(map) ((map)->bankwidth)
-# undef map_bankwidth_is_large
-# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
-# undef map_words
-# define map_words(map) map_calc_words(map)
-# else
-# define map_bankwidth(map) 32
-# define map_bankwidth_is_large(map) (1)
-# define map_words(map) map_calc_words(map)
-# endif
+/* always use indirect access for 256-bit to preserve kernel stack */
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# undef map_bankwidth_is_large
+# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+# undef map_words
+# define map_words(map) map_calc_words(map)
#define map_bankwidth_is_32(map) (map_bankwidth(map) == 32)
#undef MAX_MAP_BANKWIDTH
#define MAX_MAP_BANKWIDTH 32
@@ -142,7 +124,9 @@
#endif
#ifndef map_bankwidth
+#ifdef CONFIG_MTD
#warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work"
+#endif
static inline int map_bankwidth(void *map)
{
BUG();
@@ -181,7 +165,7 @@ static inline int map_bankwidth_supported(int w)
}
}
-#define MAX_MAP_LONGS ( ((MAX_MAP_BANKWIDTH*8) + BITS_PER_LONG - 1) / BITS_PER_LONG )
+#define MAX_MAP_LONGS (((MAX_MAP_BANKWIDTH * 8) + BITS_PER_LONG - 1) / BITS_PER_LONG)
typedef union {
unsigned long x[MAX_MAP_LONGS];
@@ -205,6 +189,7 @@ typedef union {
of living.
*/
+struct mtd_chip_driver;
struct map_info {
const char *name;
unsigned long size;
@@ -238,8 +223,11 @@ struct map_info {
If there is no cache to care about this can be set to NULL. */
void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
- /* set_vpp() must handle being reentered -- enable, enable, disable
- must leave it enabled. */
+ /* This will be called with 1 as parameter when the first map user
+ * needs VPP, and called with 0 when the last user exits. The map
+ * core maintains a reference counter, and assumes that VPP is a
+ * global resource applying to all mapped flash chips on the system.
+ */
void (*set_vpp)(struct map_info *, int);
unsigned long pfow_base;
@@ -264,68 +252,73 @@ void unregister_mtd_chip_driver(struct mtd_chip_driver *);
struct mtd_info *do_map_probe(const char *name, struct map_info *map);
void map_destroy(struct mtd_info *mtd);
-#define ENABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 1); } while(0)
-#define DISABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 0); } while(0)
+#define ENABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 1); } while (0)
+#define DISABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 0); } while (0)
#define INVALIDATE_CACHED_RANGE(map, from, size) \
- do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
-
-
-static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
-{
- int i;
- for (i=0; i<map_words(map); i++) {
- if (val1.x[i] != val2.x[i])
- return 0;
- }
- return 1;
-}
-
-static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i=0; i<map_words(map); i++) {
- r.x[i] = val1.x[i] & val2.x[i];
- }
- return r;
-}
-
-static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i=0; i<map_words(map); i++) {
- r.x[i] = val1.x[i] & ~val2.x[i];
- }
- return r;
-}
-
-static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i=0; i<map_words(map); i++) {
- r.x[i] = val1.x[i] | val2.x[i];
- }
- return r;
-}
-
-#define map_word_andequal(m, a, b, z) map_word_equal(m, z, map_word_and(m, a, b))
-
-static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
-{
- int i;
-
- for (i=0; i<map_words(map); i++) {
- if (val1.x[i] & val2.x[i])
- return 1;
- }
- return 0;
-}
+ do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
+
+#define map_word_equal(map, val1, val2) \
+({ \
+ int i, ret = 1; \
+ for (i = 0; i < map_words(map); i++) \
+ if ((val1).x[i] != (val2).x[i]) { \
+ ret = 0; \
+ break; \
+ } \
+ ret; \
+})
+
+#define map_word_and(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] & (val2).x[i]; \
+ r; \
+})
+
+#define map_word_clr(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] & ~(val2).x[i]; \
+ r; \
+})
+
+#define map_word_or(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] | (val2).x[i]; \
+ r; \
+})
+
+#define map_word_andequal(map, val1, val2, val3) \
+({ \
+ int i, ret = 1; \
+ for (i = 0; i < map_words(map); i++) { \
+ if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \
+ ret = 0; \
+ break; \
+ } \
+ } \
+ ret; \
+})
+
+#define map_word_bitsset(map, val1, val2) \
+({ \
+ int i, ret = 0; \
+ for (i = 0; i < map_words(map); i++) { \
+ if ((val1).x[i] & (val2).x[i]) { \
+ ret = 1; \
+ break; \
+ } \
+ } \
+ ret; \
+})
static inline map_word map_word_load(struct map_info *map, const void *ptr)
{
@@ -355,17 +348,19 @@ static inline map_word map_word_load_partial(struct map_info *map, map_word orig
if (map_bankwidth_is_large(map)) {
char *dest = (char *)&orig;
+
memcpy(dest+start, buf, len);
} else {
- for (i=start; i < start+len; i++) {
+ for (i = start; i < start+len; i++) {
int bitpos;
+
#ifdef __LITTLE_ENDIAN
- bitpos = i*8;
+ bitpos = i * 8;
#else /* __BIG_ENDIAN */
- bitpos = (map_bankwidth(map)-1-i)*8;
+ bitpos = (map_bankwidth(map) - 1 - i) * 8;
#endif
orig.x[0] &= ~(0xff << bitpos);
- orig.x[0] |= buf[i-start] << bitpos;
+ orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
}
}
return orig;
@@ -384,9 +379,10 @@ static inline map_word map_word_ff(struct map_info *map)
if (map_bankwidth(map) < MAP_FF_LIMIT) {
int bw = 8 * map_bankwidth(map);
- r.x[0] = (1 << bw) - 1;
+
+ r.x[0] = (1UL << bw) - 1;
} else {
- for (i=0; i<map_words(map); i++)
+ for (i = 0; i < map_words(map); i++)
r.x[i] = ~0UL;
}
return r;
@@ -407,7 +403,7 @@ static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
r.x[0] = __raw_readq(map->virt + ofs);
#endif
else if (map_bankwidth_is_large(map))
- memcpy_fromio(r.x, map->virt+ofs, map->bankwidth);
+ memcpy_fromio(r.x, map->virt + ofs, map->bankwidth);
else
BUG();
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index a5cf4e8d6818..8d10d9d2e830 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -1,20 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __MTD_MTD_H__
@@ -22,42 +8,29 @@
#include <linux/types.h>
#include <linux/uio.h>
+#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/nvmem-provider.h>
#include <mtd/mtd-abi.h>
#include <asm/div64.h>
-#define MTD_CHAR_MAJOR 90
-#define MTD_BLOCK_MAJOR 31
-
-#define MTD_ERASE_PENDING 0x01
-#define MTD_ERASING 0x02
-#define MTD_ERASE_SUSPEND 0x04
-#define MTD_ERASE_DONE 0x08
-#define MTD_ERASE_FAILED 0x10
-
#define MTD_FAIL_ADDR_UNKNOWN -1LL
+struct mtd_info;
+
/*
* If the erase fails, fail_addr might indicate exactly which block failed. If
* fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
* or was not specific to any particular block.
*/
struct erase_info {
- struct mtd_info *mtd;
uint64_t addr;
uint64_t len;
uint64_t fail_addr;
- u_long time;
- u_long retries;
- unsigned dev;
- unsigned cell;
- void (*callback) (struct erase_info *self);
- u_long priv;
- u_char state;
- struct erase_info *next;
};
struct mtd_erase_region_info {
@@ -67,6 +40,12 @@ struct mtd_erase_region_info {
unsigned long *lockmap; /* If keeping bitmap of locks */
};
+struct mtd_req_stats {
+ unsigned int uncorrectable_errors;
+ unsigned int corrected_bitflips;
+ unsigned int max_bitflips;
+};
+
/**
* struct mtd_oob_ops - oob operation operands
* @mode: operation mode
@@ -82,9 +61,11 @@ struct mtd_erase_region_info {
* @datbuf: data buffer - if NULL only oob data are read/written
* @oobbuf: oob data buffer
*
- * Note, it is allowed to read more than one OOB area at one go, but not write.
- * The interface assumes that the OOB write requests program only one page's
- * OOB area.
+ * Note, some MTD drivers do not allow you to write more than one OOB area at
+ * one go. If you try to do that on such an MTD device, -EINVAL will be
+ * returned. If you want to make your implementation portable on all kind of MTD
+ * devices you should split the write request into several sub-requests when the
+ * request crosses a page boundary.
*/
struct mtd_oob_ops {
unsigned int mode;
@@ -95,25 +76,165 @@ struct mtd_oob_ops {
uint32_t ooboffs;
uint8_t *datbuf;
uint8_t *oobbuf;
+ struct mtd_req_stats *stats;
+};
+
+/**
+ * struct mtd_oob_region - oob region definition
+ * @offset: region offset
+ * @length: region length
+ *
+ * This structure describes a region of the OOB area, and is used
+ * to retrieve ECC or free bytes sections.
+ * Each section is defined by an offset within the OOB area and a
+ * length.
+ */
+struct mtd_oob_region {
+ u32 offset;
+ u32 length;
};
-#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
-#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
/*
- * Internal ECC layout control structure. For historical reasons, there is a
- * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
- * for export to user-space via the ECCGETLAYOUT ioctl.
- * nand_ecclayout should be expandable in the future simply by the above macros.
+ * struct mtd_ooblayout_ops - NAND OOB layout operations
+ * @ecc: function returning an ECC region in the OOB area.
+ * Should return -ERANGE if %section exceeds the total number of
+ * ECC sections.
+ * @free: function returning a free region in the OOB area.
+ * Should return -ERANGE if %section exceeds the total number of
+ * free sections.
*/
-struct nand_ecclayout {
- __u32 eccbytes;
- __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
- __u32 oobavail;
- struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
+struct mtd_ooblayout_ops {
+ int (*ecc)(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobecc);
+ int (*free)(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobfree);
+};
+
+/**
+ * struct mtd_pairing_info - page pairing information
+ *
+ * @pair: pair id
+ * @group: group id
+ *
+ * The term "pair" is used here, even though TLC NANDs might group pages by 3
+ * (3 bits in a single cell). A pair should regroup all pages that are sharing
+ * the same cell. Pairs are then indexed in ascending order.
+ *
+ * @group is defining the position of a page in a given pair. It can also be
+ * seen as the bit position in the cell: page attached to bit 0 belongs to
+ * group 0, page attached to bit 1 belongs to group 1, etc.
+ *
+ * Example:
+ * The H27UCG8T2BTR-BC datasheet describes the following pairing scheme:
+ *
+ * group-0 group-1
+ *
+ * pair-0 page-0 page-4
+ * pair-1 page-1 page-5
+ * pair-2 page-2 page-8
+ * ...
+ * pair-127 page-251 page-255
+ *
+ *
+ * Note that the "group" and "pair" terms were extracted from Samsung and
+ * Hynix datasheets, and might be referenced under other names in other
+ * datasheets (Micron is describing this concept as "shared pages").
+ */
+struct mtd_pairing_info {
+ int pair;
+ int group;
+};
+
+/**
+ * struct mtd_pairing_scheme - page pairing scheme description
+ *
+ * @ngroups: number of groups. Should be related to the number of bits
+ * per cell.
+ * @get_info: converts a write-unit (page number within an erase block) into
+ * mtd_pairing information (pair + group). This function should
+ * fill the info parameter based on the wunit index or return
+ * -EINVAL if the wunit parameter is invalid.
+ * @get_wunit: converts pairing information into a write-unit (page) number.
+ * This function should return the wunit index pointed by the
+ * pairing information described in the info argument. It should
+ * return -EINVAL, if there's no wunit corresponding to the
+ * passed pairing information.
+ *
+ * See mtd_pairing_info documentation for a detailed explanation of the
+ * pair and group concepts.
+ *
+ * The mtd_pairing_scheme structure provides a generic solution to represent
+ * NAND page pairing scheme. Instead of exposing two big tables to do the
+ * write-unit <-> (pair + group) conversions, we ask the MTD drivers to
+ * implement the ->get_info() and ->get_wunit() functions.
+ *
+ * MTD users will then be able to query these information by using the
+ * mtd_pairing_info_to_wunit() and mtd_wunit_to_pairing_info() helpers.
+ *
+ * @ngroups is here to help MTD users iterating over all the pages in a
+ * given pair. This value can be retrieved by MTD users using the
+ * mtd_pairing_groups() helper.
+ *
+ * Examples are given in the mtd_pairing_info_to_wunit() and
+ * mtd_wunit_to_pairing_info() documentation.
+ */
+struct mtd_pairing_scheme {
+ int ngroups;
+ int (*get_info)(struct mtd_info *mtd, int wunit,
+ struct mtd_pairing_info *info);
+ int (*get_wunit)(struct mtd_info *mtd,
+ const struct mtd_pairing_info *info);
};
struct module; /* only needed for owner field in mtd_info */
+/**
+ * struct mtd_debug_info - debugging information for an MTD device.
+ *
+ * @dfs_dir: direntry object of the MTD device debugfs directory
+ */
+struct mtd_debug_info {
+ struct dentry *dfs_dir;
+};
+
+/**
+ * struct mtd_part - MTD partition specific fields
+ *
+ * @node: list node used to add an MTD partition to the parent partition list
+ * @offset: offset of the partition relatively to the parent offset
+ * @size: partition size. Should be equal to mtd->size unless
+ * MTD_SLC_ON_MLC_EMULATION is set
+ * @flags: original flags (before the mtdpart logic decided to tweak them based
+ * on flash constraints, like eraseblock/pagesize alignment)
+ *
+ * This struct is embedded in mtd_info and contains partition-specific
+ * properties/fields.
+ */
+struct mtd_part {
+ struct list_head node;
+ u64 offset;
+ u64 size;
+ u32 flags;
+};
+
+/**
+ * struct mtd_master - MTD master specific fields
+ *
+ * @partitions_lock: lock protecting accesses to the partition list. Protects
+ * not only the master partition list, but also all
+ * sub-partitions.
+ * @suspended: set to 1 when the device is suspended, 0 otherwise
+ *
+ * This struct is embedded in mtd_info and contains master-specific
+ * properties/fields. The master is the root MTD device from the MTD partition
+ * point of view.
+ */
+struct mtd_master {
+ struct mutex partitions_lock;
+ struct mutex chrdev_lock;
+ unsigned int suspended : 1;
+};
+
struct mtd_info {
u_char type;
uint32_t flags;
@@ -166,12 +287,18 @@ struct mtd_info {
*/
unsigned int bitflip_threshold;
- // Kernel-only stuff starts here.
+ /* Kernel-only stuff starts here. */
const char *name;
int index;
- /* ECC layout structure pointer - read only! */
- struct nand_ecclayout *ecclayout;
+ /* OOB layout description */
+ const struct mtd_ooblayout_ops *ooblayout;
+
+ /* NAND pairing scheme, only provided for MLC/TLC NANDs */
+ const struct mtd_pairing_scheme *pairing;
+
+ /* the ecc step size. */
+ unsigned int ecc_step_size;
/* max number of correctible bit errors per ecc step */
unsigned int ecc_strength;
@@ -190,10 +317,6 @@ struct mtd_info {
int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
- unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags);
int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf);
int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
@@ -204,28 +327,34 @@ struct mtd_info {
struct mtd_oob_ops *ops);
int (*_write_oob) (struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops);
- int (*_get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
- size_t len);
+ int (*_get_fact_prot_info) (struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf);
int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf);
- int (*_get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
- size_t len);
+ int (*_get_user_prot_info) (struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf);
int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf);
int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
- size_t len, size_t *retlen, u_char *buf);
+ size_t len, size_t *retlen,
+ const u_char *buf);
int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
size_t len);
+ int (*_erase_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len);
int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
void (*_sync) (struct mtd_info *mtd);
int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
+ int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
+ int (*_max_bad_blocks) (struct mtd_info *mtd, loff_t ofs, size_t len);
int (*_suspend) (struct mtd_info *mtd);
void (*_resume) (struct mtd_info *mtd);
+ void (*_reboot) (struct mtd_info *mtd);
/*
* If the driver is something smart, like UBI, it may need to maintain
* its own reference counting. The below functions are only for driver.
@@ -233,10 +362,11 @@ struct mtd_info {
int (*_get_device) (struct mtd_info *mtd);
void (*_put_device) (struct mtd_info *mtd);
- /* Backing device capabilities for this device
- * - provides mmap capabilities
+ /*
+ * flag indicates a panic write, low level drivers can take appropriate
+ * action if required to ensure writes go through
*/
- struct backing_dev_info *backing_dev_info;
+ bool oops_panic_write;
struct notifier_block reboot_notifier; /* default mode before reboot */
@@ -249,9 +379,123 @@ struct mtd_info {
struct module *owner;
struct device dev;
- int usecount;
+ struct kref refcnt;
+ struct mtd_debug_info dbg;
+ struct nvmem_device *nvmem;
+ struct nvmem_device *otp_user_nvmem;
+ struct nvmem_device *otp_factory_nvmem;
+
+ /*
+ * Parent device from the MTD partition point of view.
+ *
+ * MTD masters do not have any parent, MTD partitions do. The parent
+ * MTD device can itself be a partition.
+ */
+ struct mtd_info *parent;
+
+ /* List of partitions attached to this MTD device */
+ struct list_head partitions;
+
+ struct mtd_part part;
+ struct mtd_master master;
};
+static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd)
+{
+ while (mtd->parent)
+ mtd = mtd->parent;
+
+ return mtd;
+}
+
+static inline u64 mtd_get_master_ofs(struct mtd_info *mtd, u64 ofs)
+{
+ while (mtd->parent) {
+ ofs += mtd->part.offset;
+ mtd = mtd->parent;
+ }
+
+ return ofs;
+}
+
+static inline bool mtd_is_partition(const struct mtd_info *mtd)
+{
+ return mtd->parent;
+}
+
+static inline bool mtd_has_partitions(const struct mtd_info *mtd)
+{
+ return !list_empty(&mtd->partitions);
+}
+
+int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobecc);
+int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
+ int *section,
+ struct mtd_oob_region *oobregion);
+int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
+ const u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
+ u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobfree);
+int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
+ const u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
+ u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_count_freebytes(struct mtd_info *mtd);
+int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd);
+
+static inline void mtd_set_ooblayout(struct mtd_info *mtd,
+ const struct mtd_ooblayout_ops *ooblayout)
+{
+ mtd->ooblayout = ooblayout;
+}
+
+static inline void mtd_set_pairing_scheme(struct mtd_info *mtd,
+ const struct mtd_pairing_scheme *pairing)
+{
+ mtd->pairing = pairing;
+}
+
+static inline void mtd_set_of_node(struct mtd_info *mtd,
+ struct device_node *np)
+{
+ mtd->dev.of_node = np;
+ if (!mtd->name)
+ of_property_read_string(np, "label", &mtd->name);
+}
+
+static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
+{
+ return dev_of_node(&mtd->dev);
+}
+
+static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
+{
+ return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
+}
+
+static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
+ loff_t ofs, size_t len)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_max_bad_blocks)
+ return -ENOTSUPP;
+
+ if (mtd->size < (len + ofs) || ofs < 0)
+ return -EINVAL;
+
+ return master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs),
+ len);
+}
+
+int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
+ struct mtd_pairing_info *info);
+int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
+ const struct mtd_pairing_info *info);
+int mtd_pairing_groups(struct mtd_info *mtd);
int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
void **virt, resource_size_t *phys);
@@ -266,54 +510,66 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
const u_char *buf);
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
+int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops);
-static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
-{
- ops->retlen = ops->oobretlen = 0;
- if (!mtd->_write_oob)
- return -EOPNOTSUPP;
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- return mtd->_write_oob(mtd, to, ops);
-}
-
-int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
- size_t len);
+int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+ struct otp_info *buf);
int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf);
-int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf,
- size_t len);
+int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+ struct otp_info *buf);
int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf);
int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, u_char *buf);
+ size_t *retlen, const u_char *buf);
int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
+int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
static inline void mtd_sync(struct mtd_info *mtd)
{
- if (mtd->_sync)
- mtd->_sync(mtd);
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (master->_sync)
+ master->_sync(master);
}
int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs);
int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
static inline int mtd_suspend(struct mtd_info *mtd)
{
- return mtd->_suspend ? mtd->_suspend(mtd) : 0;
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ if (master->master.suspended)
+ return 0;
+
+ ret = master->_suspend ? master->_suspend(master) : 0;
+ if (ret)
+ return ret;
+
+ master->master.suspended = 1;
+ return 0;
}
static inline void mtd_resume(struct mtd_info *mtd)
{
- if (mtd->_resume)
- mtd->_resume(mtd);
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->master.suspended)
+ return;
+
+ if (master->_resume)
+ master->_resume(master);
+
+ master->master.suspended = 0;
}
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
@@ -331,6 +587,34 @@ static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
return do_div(sz, mtd->erasesize);
}
+/**
+ * mtd_align_erase_req - Adjust an erase request to align things on eraseblock
+ * boundaries.
+ * @mtd: the MTD device this erase request applies on
+ * @req: the erase request to adjust
+ *
+ * This function will adjust @req->addr and @req->len to align them on
+ * @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0.
+ */
+static inline void mtd_align_erase_req(struct mtd_info *mtd,
+ struct erase_info *req)
+{
+ u32 mod;
+
+ if (WARN_ON(!mtd->erasesize))
+ return;
+
+ mod = mtd_mod_by_eb(req->addr, mtd);
+ if (mod) {
+ req->addr -= mod;
+ req->len += mod;
+ }
+
+ mod = mtd_mod_by_eb(req->addr + req->len, mtd);
+ if (mod)
+ req->len += mtd->erasesize - mod;
+}
+
static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
{
if (mtd->writesize_shift)
@@ -346,14 +630,42 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
return do_div(sz, mtd->writesize);
}
+static inline int mtd_wunit_per_eb(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ return master->erasesize / mtd->writesize;
+}
+
+static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs)
+{
+ return mtd_div_by_ws(mtd_mod_by_eb(offs, mtd), mtd);
+}
+
+static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base,
+ int wunit)
+{
+ return base + (wunit * mtd->writesize);
+}
+
+
static inline int mtd_has_oob(const struct mtd_info *mtd)
{
- return mtd->_read_oob && mtd->_write_oob;
+ struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
+
+ return master->_read_oob && master->_write_oob;
+}
+
+static inline int mtd_type_is_nand(const struct mtd_info *mtd)
+{
+ return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
}
static inline int mtd_can_have_bb(const struct mtd_info *mtd)
{
- return !!mtd->_block_isbad;
+ struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
+
+ return !!master->_block_isbad;
}
/* Kernel-side ioctl definitions */
@@ -372,6 +684,7 @@ extern int mtd_device_unregister(struct mtd_info *master);
extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
extern int __get_mtd_device(struct mtd_info *mtd);
extern void __put_mtd_device(struct mtd_info *mtd);
+extern struct mtd_info *of_get_mtd_device_by_node(struct device_node *np);
extern struct mtd_info *get_mtd_device_nm(const char *name);
extern void put_mtd_device(struct mtd_info *mtd);
@@ -387,8 +700,6 @@ extern void register_mtd_user (struct mtd_notifier *new);
extern int unregister_mtd_user (struct mtd_notifier *old);
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
-void mtd_erase_callback(struct erase_info *instr);
-
static inline int mtd_is_bitflip(int err) {
return err == -EUCLEAN;
}
@@ -401,4 +712,13 @@ static inline int mtd_is_bitflip_or_eccerr(int err) {
return mtd_is_bitflip(err) || mtd_is_eccerr(err);
}
+unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
+
+#ifdef CONFIG_DEBUG_FS
+bool mtd_check_expert_analysis_mode(void);
+#else
+static inline bool mtd_check_expert_analysis_mode(void) { return false; }
+#endif
+
+
#endif /* __MTD_MTD_H__ */
diff --git a/include/linux/mtd/mtdram.h b/include/linux/mtd/mtdram.h
index 68891313875d..ee8f95643f9b 100644
--- a/include/linux/mtd/mtdram.h
+++ b/include/linux/mtd/mtdram.h
@@ -1,8 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MTD_MTDRAM_H__
#define __MTD_MTDRAM_H__
#include <linux/mtd/mtd.h>
int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
- unsigned long size, char *name);
+ unsigned long size, const char *name);
#endif /* __MTD_MTDRAM_H__ */
diff --git a/include/linux/mtd/nand-ecc-mtk.h b/include/linux/mtd/nand-ecc-mtk.h
new file mode 100644
index 000000000000..0e48c36e6ca0
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-mtk.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * MTK SDG1 ECC controller
+ *
+ * Copyright (c) 2016 Mediatek
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ */
+
+#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
+#define __DRIVERS_MTD_NAND_MTK_ECC_H__
+
+#include <linux/types.h>
+
+enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
+enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
+
+struct device_node;
+struct mtk_ecc;
+
+struct mtk_ecc_stats {
+ u32 corrected;
+ u32 bitflips;
+ u32 failed;
+};
+
+struct mtk_ecc_config {
+ enum mtk_ecc_operation op;
+ enum mtk_ecc_mode mode;
+ dma_addr_t addr;
+ u32 strength;
+ u32 sectors;
+ u32 len;
+};
+
+int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32);
+void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
+int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
+int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
+void mtk_ecc_disable(struct mtk_ecc *);
+void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
+void mtk_ecc_release(struct mtk_ecc *);
+
+#endif
diff --git a/include/linux/mtd/nand-ecc-mxic.h b/include/linux/mtd/nand-ecc-mxic.h
new file mode 100644
index 000000000000..0da4b2999576
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-mxic.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2019 Macronix
+ * Author: Miquèl Raynal <miquel.raynal@bootlin.com>
+ *
+ * Header for the Macronix external ECC engine.
+ */
+
+#ifndef __MTD_NAND_ECC_MXIC_H__
+#define __MTD_NAND_ECC_MXIC_H__
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+
+struct mxic_ecc_engine;
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_MXIC) && IS_REACHABLE(CONFIG_MTD_NAND_CORE)
+
+const struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void);
+struct nand_ecc_engine *mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev);
+void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng);
+int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
+ unsigned int direction, dma_addr_t dirmap);
+
+#else /* !CONFIG_MTD_NAND_ECC_MXIC */
+
+static inline const struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
+{
+ return NULL;
+}
+
+static inline struct nand_ecc_engine *
+mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng) {}
+
+static inline int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
+ unsigned int direction,
+ dma_addr_t dirmap)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_MTD_NAND_ECC_MXIC */
+
+#endif /* __MTD_NAND_ECC_MXIC_H__ */
diff --git a/include/linux/mtd/nand-ecc-sw-bch.h b/include/linux/mtd/nand-ecc-sw-bch.h
new file mode 100644
index 000000000000..9da9969505a8
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-sw-bch.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ *
+ * This file is the header for the NAND BCH ECC implementation.
+ */
+
+#ifndef __MTD_NAND_ECC_SW_BCH_H__
+#define __MTD_NAND_ECC_SW_BCH_H__
+
+#include <linux/mtd/nand.h>
+#include <linux/bch.h>
+
+/**
+ * struct nand_ecc_sw_bch_conf - private software BCH ECC engine structure
+ * @req_ctx: Save request context and tweak the original request to fit the
+ * engine needs
+ * @code_size: Number of bytes needed to store a code (one code per step)
+ * @calc_buf: Buffer to use when calculating ECC bytes
+ * @code_buf: Buffer to use when reading (raw) ECC bytes from the chip
+ * @bch: BCH control structure
+ * @errloc: error location array
+ * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
+ */
+struct nand_ecc_sw_bch_conf {
+ struct nand_ecc_req_tweak_ctx req_ctx;
+ unsigned int code_size;
+ u8 *calc_buf;
+ u8 *code_buf;
+ struct bch_control *bch;
+ unsigned int *errloc;
+ unsigned char *eccmask;
+};
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
+
+int nand_ecc_sw_bch_calculate(struct nand_device *nand,
+ const unsigned char *buf, unsigned char *code);
+int nand_ecc_sw_bch_correct(struct nand_device *nand, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc);
+int nand_ecc_sw_bch_init_ctx(struct nand_device *nand);
+void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand);
+struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void);
+
+#else /* !CONFIG_MTD_NAND_ECC_SW_BCH */
+
+static inline int nand_ecc_sw_bch_calculate(struct nand_device *nand,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_bch_correct(struct nand_device *nand,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_bch_init_ctx(struct nand_device *nand)
+{
+ return -ENOTSUPP;
+}
+
+static inline void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand) {}
+
+#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
+
+#endif /* __MTD_NAND_ECC_SW_BCH_H__ */
diff --git a/include/linux/mtd/nand-ecc-sw-hamming.h b/include/linux/mtd/nand-ecc-sw-hamming.h
new file mode 100644
index 000000000000..c6c71894c575
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-sw-hamming.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com>
+ * David Woodhouse <dwmw2@infradead.org>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * This file is the header for the NAND Hamming ECC implementation.
+ */
+
+#ifndef __MTD_NAND_ECC_SW_HAMMING_H__
+#define __MTD_NAND_ECC_SW_HAMMING_H__
+
+#include <linux/mtd/nand.h>
+
+/**
+ * struct nand_ecc_sw_hamming_conf - private software Hamming ECC engine structure
+ * @req_ctx: Save request context and tweak the original request to fit the
+ * engine needs
+ * @code_size: Number of bytes needed to store a code (one code per step)
+ * @calc_buf: Buffer to use when calculating ECC bytes
+ * @code_buf: Buffer to use when reading (raw) ECC bytes from the chip
+ * @sm_order: Smart Media special ordering
+ */
+struct nand_ecc_sw_hamming_conf {
+ struct nand_ecc_req_tweak_ctx req_ctx;
+ unsigned int code_size;
+ u8 *calc_buf;
+ u8 *code_buf;
+ unsigned int sm_order;
+};
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
+
+int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand);
+void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand);
+int ecc_sw_hamming_calculate(const unsigned char *buf, unsigned int step_size,
+ unsigned char *code, bool sm_order);
+int nand_ecc_sw_hamming_calculate(struct nand_device *nand,
+ const unsigned char *buf,
+ unsigned char *code);
+int ecc_sw_hamming_correct(unsigned char *buf, unsigned char *read_ecc,
+ unsigned char *calc_ecc, unsigned int step_size,
+ bool sm_order);
+int nand_ecc_sw_hamming_correct(struct nand_device *nand, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc);
+
+#else /* !CONFIG_MTD_NAND_ECC_SW_HAMMING */
+
+static inline int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand)
+{
+ return -ENOTSUPP;
+}
+
+static inline void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand) {}
+
+static inline int ecc_sw_hamming_calculate(const unsigned char *buf,
+ unsigned int step_size,
+ unsigned char *code, bool sm_order)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_hamming_calculate(struct nand_device *nand,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ return -ENOTSUPP;
+}
+
+static inline int ecc_sw_hamming_correct(unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc,
+ unsigned int step_size, bool sm_order)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_hamming_correct(struct nand_device *nand,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return -ENOTSUPP;
+}
+
+#endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */
+
+#endif /* __MTD_NAND_ECC_SW_HAMMING_H__ */
diff --git a/include/linux/mtd/nand-gpio.h b/include/linux/mtd/nand-gpio.h
index 51534e50f7fc..7ab51bc4a380 100644
--- a/include/linux/mtd/nand-gpio.h
+++ b/include/linux/mtd/nand-gpio.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MTD_NAND_GPIO_H
#define __LINUX_MTD_NAND_GPIO_H
-#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
struct gpio_nand_platdata {
- int gpio_nce;
- int gpio_nwp;
- int gpio_cle;
- int gpio_ale;
- int gpio_rdy;
void (*adjust_parts)(struct gpio_nand_platdata *, size_t);
struct mtd_partition *parts;
unsigned int num_parts;
diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h
new file mode 100644
index 000000000000..e8201d1b7cf9
--- /dev/null
+++ b/include/linux/mtd/nand-qpic-common.h
@@ -0,0 +1,483 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * QCOM QPIC common APIs header file
+ *
+ * Copyright (c) 2023 Qualcomm Inc.
+ * Authors: Md sadre Alam <quic_mdalam@quicinc.com>
+ *
+ */
+#ifndef __MTD_NAND_QPIC_COMMON_H__
+#define __MTD_NAND_QPIC_COMMON_H__
+
+/* NANDc reg offsets */
+#define NAND_FLASH_CMD 0x00
+#define NAND_ADDR0 0x04
+#define NAND_ADDR1 0x08
+#define NAND_FLASH_CHIP_SELECT 0x0c
+#define NAND_EXEC_CMD 0x10
+#define NAND_FLASH_STATUS 0x14
+#define NAND_BUFFER_STATUS 0x18
+#define NAND_DEV0_CFG0 0x20
+#define NAND_DEV0_CFG1 0x24
+#define NAND_DEV0_ECC_CFG 0x28
+#define NAND_AUTO_STATUS_EN 0x2c
+#define NAND_DEV1_CFG0 0x30
+#define NAND_DEV1_CFG1 0x34
+#define NAND_READ_ID 0x40
+#define NAND_READ_STATUS 0x44
+#define NAND_DEV_CMD0 0xa0
+#define NAND_DEV_CMD1 0xa4
+#define NAND_DEV_CMD2 0xa8
+#define NAND_DEV_CMD_VLD 0xac
+#define SFLASHC_BURST_CFG 0xe0
+#define NAND_ERASED_CW_DETECT_CFG 0xe8
+#define NAND_ERASED_CW_DETECT_STATUS 0xec
+#define NAND_EBI2_ECC_BUF_CFG 0xf0
+#define FLASH_BUF_ACC 0x100
+
+#define NAND_CTRL 0xf00
+#define NAND_VERSION 0xf08
+#define NAND_READ_LOCATION_0 0xf20
+#define NAND_READ_LOCATION_1 0xf24
+#define NAND_READ_LOCATION_2 0xf28
+#define NAND_READ_LOCATION_3 0xf2c
+#define NAND_READ_LOCATION_LAST_CW_0 0xf40
+#define NAND_READ_LOCATION_LAST_CW_1 0xf44
+#define NAND_READ_LOCATION_LAST_CW_2 0xf48
+#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
+
+/* dummy register offsets, used by qcom_write_reg_dma */
+#define NAND_DEV_CMD1_RESTORE 0xdead
+#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
+
+/* NAND_FLASH_CMD bits */
+#define PAGE_ACC BIT(4)
+#define LAST_PAGE BIT(5)
+
+/* NAND_FLASH_CHIP_SELECT bits */
+#define NAND_DEV_SEL 0
+#define DM_EN BIT(2)
+
+/* NAND_FLASH_STATUS bits */
+#define FS_OP_ERR BIT(4)
+#define FS_READY_BSY_N BIT(5)
+#define FS_MPU_ERR BIT(8)
+#define FS_DEVICE_STS_ERR BIT(16)
+#define FS_DEVICE_WP BIT(23)
+
+/* NAND_BUFFER_STATUS bits */
+#define BS_UNCORRECTABLE_BIT BIT(8)
+#define BS_CORRECTABLE_ERR_MSK 0x1f
+
+/* NAND_DEVn_CFG0 bits */
+#define DISABLE_STATUS_AFTER_WRITE BIT(4)
+#define CW_PER_PAGE_MASK GENMASK(8, 6)
+#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
+#define ECC_PARITY_SIZE_BYTES_RS GENMASK(22, 19)
+#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
+#define NUM_ADDR_CYCLES_MASK GENMASK(29, 27)
+#define STATUS_BFR_READ BIT(30)
+#define SET_RD_MODE_AFTER_STATUS BIT(31)
+
+/* NAND_DEVn_CFG0 bits */
+#define DEV0_CFG1_ECC_DISABLE BIT(0)
+#define WIDE_FLASH BIT(1)
+#define NAND_RECOVERY_CYCLES_MASK GENMASK(4, 2)
+#define CS_ACTIVE_BSY BIT(5)
+#define BAD_BLOCK_BYTE_NUM_MASK GENMASK(15, 6)
+#define BAD_BLOCK_IN_SPARE_AREA BIT(16)
+#define WR_RD_BSY_GAP_MASK GENMASK(22, 17)
+#define ENABLE_BCH_ECC BIT(27)
+
+/* NAND_DEV0_ECC_CFG bits */
+#define ECC_CFG_ECC_DISABLE BIT(0)
+#define ECC_SW_RESET BIT(1)
+#define ECC_MODE_MASK GENMASK(5, 4)
+#define ECC_MODE_4BIT 0
+#define ECC_MODE_8BIT 1
+#define ECC_PARITY_SIZE_BYTES_BCH_MASK GENMASK(12, 8)
+#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
+#define ECC_FORCE_CLK_OPEN BIT(30)
+
+/* NAND_DEV_CMD1 bits */
+#define READ_ADDR_MASK GENMASK(7, 0)
+
+/* NAND_DEV_CMD_VLD bits */
+#define READ_START_VLD BIT(0)
+#define READ_STOP_VLD BIT(1)
+#define WRITE_START_VLD BIT(2)
+#define ERASE_START_VLD BIT(3)
+#define SEQ_READ_START_VLD BIT(4)
+
+/* NAND_EBI2_ECC_BUF_CFG bits */
+#define NUM_STEPS_MASK GENMASK(9, 0)
+
+/* NAND_ERASED_CW_DETECT_CFG bits */
+#define ERASED_CW_ECC_MASK 1
+#define AUTO_DETECT_RES 0
+#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
+#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
+#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
+#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
+#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
+
+/* NAND_ERASED_CW_DETECT_STATUS bits */
+#define PAGE_ALL_ERASED BIT(7)
+#define CODEWORD_ALL_ERASED BIT(6)
+#define PAGE_ERASED BIT(5)
+#define CODEWORD_ERASED BIT(4)
+#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
+#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
+
+/* NAND_READ_LOCATION_n bits */
+#define READ_LOCATION_OFFSET_MASK GENMASK(9, 0)
+#define READ_LOCATION_SIZE_MASK GENMASK(25, 16)
+#define READ_LOCATION_LAST_MASK BIT(31)
+
+/* Version Mask */
+#define NAND_VERSION_MAJOR_MASK 0xf0000000
+#define NAND_VERSION_MAJOR_SHIFT 28
+#define NAND_VERSION_MINOR_MASK 0x0fff0000
+#define NAND_VERSION_MINOR_SHIFT 16
+
+/* NAND OP_CMDs */
+#define OP_PAGE_READ 0x2
+#define OP_PAGE_READ_WITH_ECC 0x3
+#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
+#define OP_PAGE_READ_ONFI_READ 0x5
+#define OP_PROGRAM_PAGE 0x6
+#define OP_PAGE_PROGRAM_WITH_ECC 0x7
+#define OP_PROGRAM_PAGE_SPARE 0x9
+#define OP_BLOCK_ERASE 0xa
+#define OP_CHECK_STATUS 0xc
+#define OP_FETCH_ID 0xb
+#define OP_RESET_DEVICE 0xd
+
+/* Default Value for NAND_DEV_CMD_VLD */
+#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
+ ERASE_START_VLD | SEQ_READ_START_VLD)
+
+/* NAND_CTRL bits */
+#define BAM_MODE_EN BIT(0)
+
+/*
+ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+ * the driver calls the chunks 'step' or 'codeword' interchangeably
+ */
+#define NANDC_STEP_SIZE 512
+
+/*
+ * the largest page size we support is 8K, this will have 16 steps/codewords
+ * of 512 bytes each
+ */
+#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
+
+/* we read at most 3 registers per codeword scan */
+#define MAX_REG_RD (3 * MAX_NUM_STEPS)
+
+/* ECC modes supported by the controller */
+#define ECC_NONE BIT(0)
+#define ECC_RS_4BIT BIT(1)
+#define ECC_BCH_4BIT BIT(2)
+#define ECC_BCH_8BIT BIT(3)
+
+/*
+ * Returns the actual register address for all NAND_DEV_ registers
+ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
+ */
+#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+
+/* Returns the dma address for reg read buffer */
+#define reg_buf_dma_addr(chip, vaddr) \
+ ((chip)->reg_read_dma + \
+ ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
+
+#define QPIC_PER_CW_CMD_ELEMENTS 32
+#define QPIC_PER_CW_CMD_SGL 32
+#define QPIC_PER_CW_DATA_SGL 8
+
+#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
+
+/*
+ * Flags used in DMA descriptor preparation helper functions
+ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
+ */
+/* Don't set the EOT in current tx BAM sgl */
+#define NAND_BAM_NO_EOT BIT(0)
+/* Set the NWD flag in current BAM sgl */
+#define NAND_BAM_NWD BIT(1)
+/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
+#define NAND_BAM_NEXT_SGL BIT(2)
+/*
+ * Erased codeword status is being used two times in single transfer so this
+ * flag will determine the current value of erased codeword status register
+ */
+#define NAND_ERASED_CW_SET BIT(4)
+
+#define MAX_ADDRESS_CYCLE 5
+
+/*
+ * This data type corresponds to the BAM transaction which will be used for all
+ * NAND transfers.
+ * @bam_ce - the array of BAM command elements
+ * @cmd_sgl - sgl for NAND BAM command pipe
+ * @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @last_data_desc - last DMA desc in data channel (tx/rx).
+ * @last_cmd_desc - last DMA desc in command channel.
+ * @txn_done - completion for NAND transfer.
+ * @bam_ce_nitems - the number of elements in the @bam_ce array
+ * @cmd_sgl_nitems - the number of elements in the @cmd_sgl array
+ * @data_sgl_nitems - the number of elements in the @data_sgl array
+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
+ * @bam_ce_start - the index in bam_ce which marks the start position ce
+ * for current sgl. It will be used for size calculation
+ * for current sgl
+ * @cmd_sgl_pos - current index in command sgl.
+ * @cmd_sgl_start - start index in command sgl.
+ * @tx_sgl_pos - current index in data sgl for tx.
+ * @tx_sgl_start - start index in data sgl for tx.
+ * @rx_sgl_pos - current index in data sgl for rx.
+ * @rx_sgl_start - start index in data sgl for rx.
+ */
+struct bam_transaction {
+ struct bam_cmd_element *bam_ce;
+ struct scatterlist *cmd_sgl;
+ struct scatterlist *data_sgl;
+ struct dma_async_tx_descriptor *last_data_desc;
+ struct dma_async_tx_descriptor *last_cmd_desc;
+ struct completion txn_done;
+
+ unsigned int bam_ce_nitems;
+ unsigned int cmd_sgl_nitems;
+ unsigned int data_sgl_nitems;
+
+ struct_group(bam_positions,
+ u32 bam_ce_pos;
+ u32 bam_ce_start;
+ u32 cmd_sgl_pos;
+ u32 cmd_sgl_start;
+ u32 tx_sgl_pos;
+ u32 tx_sgl_start;
+ u32 rx_sgl_pos;
+ u32 rx_sgl_start;
+
+ );
+};
+
+/*
+ * This data type corresponds to the nand dma descriptor
+ * @dma_desc - low level DMA engine descriptor
+ * @list - list for desc_info
+ *
+ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
+ * ADM
+ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
+ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
+ * @dir - DMA transfer direction
+ */
+struct desc_info {
+ struct dma_async_tx_descriptor *dma_desc;
+ struct list_head node;
+
+ union {
+ struct scatterlist adm_sgl;
+ struct {
+ struct scatterlist *bam_sgl;
+ int sgl_cnt;
+ };
+ };
+ enum dma_data_direction dir;
+};
+
+/*
+ * holds the current register values that we want to write. acts as a contiguous
+ * chunk of memory which we use to write the controller registers through DMA.
+ */
+struct nandc_regs {
+ __le32 cmd;
+ __le32 addr0;
+ __le32 addr1;
+ __le32 chip_sel;
+ __le32 exec;
+
+ __le32 cfg0;
+ __le32 cfg1;
+ __le32 ecc_bch_cfg;
+
+ __le32 clrflashstatus;
+ __le32 clrreadstatus;
+
+ __le32 cmd1;
+ __le32 vld;
+
+ __le32 orig_cmd1;
+ __le32 orig_vld;
+
+ __le32 ecc_buf_cfg;
+ __le32 read_location0;
+ __le32 read_location1;
+ __le32 read_location2;
+ __le32 read_location3;
+ __le32 read_location_last0;
+ __le32 read_location_last1;
+ __le32 read_location_last2;
+ __le32 read_location_last3;
+ __le32 spi_cfg;
+ __le32 num_addr_cycle;
+ __le32 busy_wait_cnt;
+ __le32 flash_feature;
+
+ __le32 erased_cw_detect_cfg_clr;
+ __le32 erased_cw_detect_cfg_set;
+};
+
+/*
+ * NAND controller data struct
+ *
+ * @dev: parent device
+ *
+ * @base: MMIO base
+ *
+ * @core_clk: controller clock
+ * @aon_clk: another controller clock
+ * @iomacro_clk: io macro clock
+ *
+ * @regs: a contiguous chunk of memory for DMA register
+ * writes. contains the register values to be
+ * written to controller
+ *
+ * @props: properties of current NAND controller,
+ * initialized via DT match data
+ *
+ * @controller: base controller structure
+ * @qspi: qpic spi structure
+ * @host_list: list containing all the chips attached to the
+ * controller
+ *
+ * @chan: dma channel
+ * @cmd_crci: ADM DMA CRCI for command flow control
+ * @data_crci: ADM DMA CRCI for data flow control
+ *
+ * @desc_list: DMA descriptor list (list of desc_infos)
+ *
+ * @data_buffer: our local DMA buffer for page read/writes,
+ * used when we can't use the buffer provided
+ * by upper layers directly
+ * @reg_read_buf: local buffer for reading back registers via DMA
+ *
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
+ * @reg_read_dma: contains dma address for register read buffer
+ *
+ * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
+ * functions
+ * @max_cwperpage: maximum QPIC codewords required. calculated
+ * from all connected NAND devices pagesize
+ *
+ * @reg_read_pos: marker for data read in reg_read_buf
+ *
+ * @cmd1/vld: some fixed controller register values
+ *
+ * @exec_opwrite: flag to select correct number of code word
+ * while reading status
+ */
+struct qcom_nand_controller {
+ struct device *dev;
+
+ void __iomem *base;
+
+ struct clk *core_clk;
+ struct clk *aon_clk;
+
+ struct nandc_regs *regs;
+ struct bam_transaction *bam_txn;
+
+ const struct qcom_nandc_props *props;
+
+ struct nand_controller *controller;
+ struct qpic_spi_nand *qspi;
+ struct list_head host_list;
+
+ union {
+ /* will be used only by QPIC for BAM DMA */
+ struct {
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ struct dma_chan *cmd_chan;
+ };
+
+ /* will be used only by EBI2 for ADM DMA */
+ struct {
+ struct dma_chan *chan;
+ unsigned int cmd_crci;
+ unsigned int data_crci;
+ };
+ };
+
+ struct list_head desc_list;
+
+ u8 *data_buffer;
+ __le32 *reg_read_buf;
+
+ phys_addr_t base_phys;
+ dma_addr_t base_dma;
+ dma_addr_t reg_read_dma;
+
+ int buf_size;
+ int buf_count;
+ int buf_start;
+ unsigned int max_cwperpage;
+
+ int reg_read_pos;
+
+ u32 cmd1, vld;
+ bool exec_opwrite;
+};
+
+/*
+ * This data type corresponds to the NAND controller properties which varies
+ * among different NAND controllers.
+ * @ecc_modes - ecc mode for NAND
+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+ * @supports_bam - whether NAND controller is using BAM
+ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
+ * @qpic_version2 - flag to indicate QPIC IP version 2
+ * @use_codeword_fixup - whether NAND has different layout for boot partitions
+ */
+struct qcom_nandc_props {
+ u32 ecc_modes;
+ u32 dev_cmd_reg_start;
+ u32 bam_offset;
+ bool supports_bam;
+ bool nandc_part_of_qpic;
+ bool qpic_version2;
+ bool use_codeword_fixup;
+};
+
+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc);
+struct bam_transaction *qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc);
+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc);
+void qcom_qpic_bam_dma_done(void *data);
+void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu);
+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+ struct dma_chan *chan, unsigned long flags);
+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr, int size, unsigned int flags);
+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+ const void *vaddr, int size, unsigned int flags);
+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off,
+ const void *vaddr, int size, bool flow_control);
+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs,
+ unsigned int flags);
+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first,
+ int num_regs, unsigned int flags);
+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
+ int size, unsigned int flags);
+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
+ int size, unsigned int flags);
+int qcom_submit_descs(struct qcom_nand_controller *nandc);
+void qcom_clear_read_regs(struct qcom_nand_controller *nandc);
+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc);
+int qcom_nandc_alloc(struct qcom_nand_controller *nandc);
+#endif
+
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index ab6363443ce8..09c8c93e4dba 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -1,727 +1,1144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * linux/include/linux/mtd/nand.h
+ * Copyright 2017 - Free Electrons
*
- * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
- * Steven J. Hill <sjhill@realitydiluted.com>
- * Thomas Gleixner <tglx@linutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Info:
- * Contains standard defines and IDs for NAND flash devices
- *
- * Changelog:
- * See git changelog.
+ * Authors:
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Peter Pan <peterpandong@micron.com>
*/
+
#ifndef __LINUX_MTD_NAND_H
#define __LINUX_MTD_NAND_H
-#include <linux/wait.h>
-#include <linux/spinlock.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/flashchip.h>
-#include <linux/mtd/bbm.h>
-struct mtd_info;
-struct nand_flash_dev;
-/* Scan and identify a NAND device */
-extern int nand_scan(struct mtd_info *mtd, int max_chips);
-/*
- * Separate phases of nand_scan(), allowing board driver to intervene
- * and override command or ECC setup according to flash type.
- */
-extern int nand_scan_ident(struct mtd_info *mtd, int max_chips,
- struct nand_flash_dev *table);
-extern int nand_scan_tail(struct mtd_info *mtd);
+struct nand_device;
-/* Free resources held by the NAND device */
-extern void nand_release(struct mtd_info *mtd);
-
-/* Internal helper for board drivers which need to override command function */
-extern void nand_wait_ready(struct mtd_info *mtd);
+/**
+ * struct nand_memory_organization - Memory organization structure
+ * @bits_per_cell: number of bits per NAND cell
+ * @pagesize: page size
+ * @oobsize: OOB area size
+ * @pages_per_eraseblock: number of pages per eraseblock
+ * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
+ * @max_bad_eraseblocks_per_lun: maximum number of bad eraseblocks per LUN
+ * @planes_per_lun: number of planes per LUN
+ * @luns_per_target: number of LUN per target (target is a synonym for die)
+ * @ntargets: total number of targets exposed by the NAND device
+ */
+struct nand_memory_organization {
+ unsigned int bits_per_cell;
+ unsigned int pagesize;
+ unsigned int oobsize;
+ unsigned int pages_per_eraseblock;
+ unsigned int eraseblocks_per_lun;
+ unsigned int max_bad_eraseblocks_per_lun;
+ unsigned int planes_per_lun;
+ unsigned int luns_per_target;
+ unsigned int ntargets;
+};
-/* locks all blocks present in the device */
-extern int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+#define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt) \
+ { \
+ .bits_per_cell = (bpc), \
+ .pagesize = (ps), \
+ .oobsize = (os), \
+ .pages_per_eraseblock = (ppe), \
+ .eraseblocks_per_lun = (epl), \
+ .max_bad_eraseblocks_per_lun = (mbb), \
+ .planes_per_lun = (ppl), \
+ .luns_per_target = (lpt), \
+ .ntargets = (nt), \
+ }
-/* unlocks specified locked blocks */
-extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+/**
+ * struct nand_row_converter - Information needed to convert an absolute offset
+ * into a row address
+ * @lun_addr_shift: position of the LUN identifier in the row address
+ * @eraseblock_addr_shift: position of the eraseblock identifier in the row
+ * address
+ */
+struct nand_row_converter {
+ unsigned int lun_addr_shift;
+ unsigned int eraseblock_addr_shift;
+};
-/* The maximum number of NAND chips in an array */
-#define NAND_MAX_CHIPS 8
+/**
+ * struct nand_pos - NAND position object
+ * @target: the NAND target/die
+ * @lun: the LUN identifier
+ * @plane: the plane within the LUN
+ * @eraseblock: the eraseblock within the LUN
+ * @page: the page within the LUN
+ *
+ * These information are usually used by specific sub-layers to select the
+ * appropriate target/die and generate a row address to pass to the device.
+ */
+struct nand_pos {
+ unsigned int target;
+ unsigned int lun;
+ unsigned int plane;
+ unsigned int eraseblock;
+ unsigned int page;
+};
-/*
- * This constant declares the max. oobsize / page, which
- * is supported now. If you add a chip with bigger oobsize/page
- * adjust this accordingly.
+/**
+ * enum nand_page_io_req_type - Direction of an I/O request
+ * @NAND_PAGE_READ: from the chip, to the controller
+ * @NAND_PAGE_WRITE: from the controller, to the chip
*/
-#define NAND_MAX_OOBSIZE 640
-#define NAND_MAX_PAGESIZE 8192
+enum nand_page_io_req_type {
+ NAND_PAGE_READ = 0,
+ NAND_PAGE_WRITE,
+};
-/*
- * Constants for hardware specific CLE/ALE/NCE function
+/**
+ * struct nand_page_io_req - NAND I/O request object
+ * @type: the type of page I/O: read or write
+ * @pos: the position this I/O request is targeting
+ * @dataoffs: the offset within the page
+ * @datalen: number of data bytes to read from/write to this page
+ * @databuf: buffer to store data in or get data from
+ * @ooboffs: the OOB offset within the page
+ * @ooblen: the number of OOB bytes to read from/write to this page
+ * @oobbuf: buffer to store OOB data in or get OOB data from
+ * @mode: one of the %MTD_OPS_XXX mode
+ * @continuous: no need to start over the operation at the end of each page, the
+ * NAND device will automatically prepare the next one
*
- * These are bits which can be or'ed to set/clear multiple
- * bits in one go.
+ * This object is used to pass per-page I/O requests to NAND sub-layers. This
+ * way all useful information are already formatted in a useful way and
+ * specific NAND layers can focus on translating these information into
+ * specific commands/operations.
*/
-/* Select the chip by setting nCE to low */
-#define NAND_NCE 0x01
-/* Select the command latch by setting CLE to high */
-#define NAND_CLE 0x02
-/* Select the address latch by setting ALE to high */
-#define NAND_ALE 0x04
+struct nand_page_io_req {
+ enum nand_page_io_req_type type;
+ struct nand_pos pos;
+ unsigned int dataoffs;
+ unsigned int datalen;
+ union {
+ const void *out;
+ void *in;
+ } databuf;
+ unsigned int ooboffs;
+ unsigned int ooblen;
+ union {
+ const void *out;
+ void *in;
+ } oobbuf;
+ int mode;
+ bool continuous;
+};
-#define NAND_CTRL_CLE (NAND_NCE | NAND_CLE)
-#define NAND_CTRL_ALE (NAND_NCE | NAND_ALE)
-#define NAND_CTRL_CHANGE 0x80
+const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
+const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void);
+const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void);
-/*
- * Standard NAND flash commands
- */
-#define NAND_CMD_READ0 0
-#define NAND_CMD_READ1 1
-#define NAND_CMD_RNDOUT 5
-#define NAND_CMD_PAGEPROG 0x10
-#define NAND_CMD_READOOB 0x50
-#define NAND_CMD_ERASE1 0x60
-#define NAND_CMD_STATUS 0x70
-#define NAND_CMD_SEQIN 0x80
-#define NAND_CMD_RNDIN 0x85
-#define NAND_CMD_READID 0x90
-#define NAND_CMD_ERASE2 0xd0
-#define NAND_CMD_PARAM 0xec
-#define NAND_CMD_GET_FEATURES 0xee
-#define NAND_CMD_SET_FEATURES 0xef
-#define NAND_CMD_RESET 0xff
-
-#define NAND_CMD_LOCK 0x2a
-#define NAND_CMD_UNLOCK1 0x23
-#define NAND_CMD_UNLOCK2 0x24
-
-/* Extended commands for large page devices */
-#define NAND_CMD_READSTART 0x30
-#define NAND_CMD_RNDOUTSTART 0xE0
-#define NAND_CMD_CACHEDPROG 0x15
-
-#define NAND_CMD_NONE -1
-
-/* Status bits */
-#define NAND_STATUS_FAIL 0x01
-#define NAND_STATUS_FAIL_N1 0x02
-#define NAND_STATUS_TRUE_READY 0x20
-#define NAND_STATUS_READY 0x40
-#define NAND_STATUS_WP 0x80
+/**
+ * enum nand_ecc_engine_type - NAND ECC engine type
+ * @NAND_ECC_ENGINE_TYPE_INVALID: Invalid value
+ * @NAND_ECC_ENGINE_TYPE_NONE: No ECC correction
+ * @NAND_ECC_ENGINE_TYPE_SOFT: Software ECC correction
+ * @NAND_ECC_ENGINE_TYPE_ON_HOST: On host hardware ECC correction
+ * @NAND_ECC_ENGINE_TYPE_ON_DIE: On chip hardware ECC correction
+ */
+enum nand_ecc_engine_type {
+ NAND_ECC_ENGINE_TYPE_INVALID,
+ NAND_ECC_ENGINE_TYPE_NONE,
+ NAND_ECC_ENGINE_TYPE_SOFT,
+ NAND_ECC_ENGINE_TYPE_ON_HOST,
+ NAND_ECC_ENGINE_TYPE_ON_DIE,
+};
-/*
- * Constants for ECC_MODES
+/**
+ * enum nand_ecc_placement - NAND ECC bytes placement
+ * @NAND_ECC_PLACEMENT_UNKNOWN: The actual position of the ECC bytes is unknown
+ * @NAND_ECC_PLACEMENT_OOB: The ECC bytes are located in the OOB area
+ * @NAND_ECC_PLACEMENT_INTERLEAVED: Syndrome layout, there are ECC bytes
+ * interleaved with regular data in the main
+ * area
*/
-typedef enum {
- NAND_ECC_NONE,
- NAND_ECC_SOFT,
- NAND_ECC_HW,
- NAND_ECC_HW_SYNDROME,
- NAND_ECC_HW_OOB_FIRST,
- NAND_ECC_SOFT_BCH,
-} nand_ecc_modes_t;
+enum nand_ecc_placement {
+ NAND_ECC_PLACEMENT_UNKNOWN,
+ NAND_ECC_PLACEMENT_OOB,
+ NAND_ECC_PLACEMENT_INTERLEAVED,
+};
-/*
- * Constants for Hardware ECC
+/**
+ * enum nand_ecc_algo - NAND ECC algorithm
+ * @NAND_ECC_ALGO_UNKNOWN: Unknown algorithm
+ * @NAND_ECC_ALGO_HAMMING: Hamming algorithm
+ * @NAND_ECC_ALGO_BCH: Bose-Chaudhuri-Hocquenghem algorithm
+ * @NAND_ECC_ALGO_RS: Reed-Solomon algorithm
+ */
+enum nand_ecc_algo {
+ NAND_ECC_ALGO_UNKNOWN,
+ NAND_ECC_ALGO_HAMMING,
+ NAND_ECC_ALGO_BCH,
+ NAND_ECC_ALGO_RS,
+};
+
+/**
+ * struct nand_ecc_props - NAND ECC properties
+ * @engine_type: ECC engine type
+ * @placement: OOB placement (if relevant)
+ * @algo: ECC algorithm (if relevant)
+ * @strength: ECC strength
+ * @step_size: Number of bytes per step
+ * @flags: Misc properties
*/
-/* Reset Hardware ECC for read */
-#define NAND_ECC_READ 0
-/* Reset Hardware ECC for write */
-#define NAND_ECC_WRITE 1
-/* Enable Hardware ECC before syndrome is read back from flash */
-#define NAND_ECC_READSYN 2
+struct nand_ecc_props {
+ enum nand_ecc_engine_type engine_type;
+ enum nand_ecc_placement placement;
+ enum nand_ecc_algo algo;
+ unsigned int strength;
+ unsigned int step_size;
+ unsigned int flags;
+};
-/* Bit mask for flags passed to do_nand_read_ecc */
-#define NAND_GET_DEVICE 0x80
+#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
+/* NAND ECC misc flags */
+#define NAND_ECC_MAXIMIZE_STRENGTH BIT(0)
-/*
- * Option constants for bizarre disfunctionality and real
- * features.
+/**
+ * struct nand_bbt - bad block table object
+ * @cache: in memory BBT cache
*/
-/* Buswidth is 16 bit */
-#define NAND_BUSWIDTH_16 0x00000002
-/* Chip has cache program function */
-#define NAND_CACHEPRG 0x00000008
-/*
- * Chip requires ready check on read (for auto-incremented sequential read).
- * True only for small page devices; large page devices do not support
- * autoincrement.
+struct nand_bbt {
+ unsigned long *cache;
+};
+
+/**
+ * struct nand_ops - NAND operations
+ * @erase: erase a specific block. No need to check if the block is bad before
+ * erasing, this has been taken care of by the generic NAND layer
+ * @markbad: mark a specific block bad. No need to check if the block is
+ * already marked bad, this has been taken care of by the generic
+ * NAND layer. This method should just write the BBM (Bad Block
+ * Marker) so that future call to struct_nand_ops->isbad() return
+ * true
+ * @isbad: check whether a block is bad or not. This method should just read
+ * the BBM and return whether the block is bad or not based on what it
+ * reads
+ *
+ * These are all low level operations that should be implemented by specialized
+ * NAND layers (SPI NAND, raw NAND, ...).
*/
-#define NAND_NEED_READRDY 0x00000100
+struct nand_ops {
+ int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
+ int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
+ bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
+};
-/* Chip does not allow subpage writes */
-#define NAND_NO_SUBPAGE_WRITE 0x00000200
+/**
+ * struct nand_ecc_context - Context for the ECC engine
+ * @conf: basic ECC engine parameters
+ * @nsteps: number of ECC steps
+ * @total: total number of bytes used for storing ECC codes, this is used by
+ * generic OOB layouts
+ * @priv: ECC engine driver private data
+ */
+struct nand_ecc_context {
+ struct nand_ecc_props conf;
+ unsigned int nsteps;
+ unsigned int total;
+ void *priv;
+};
-/* Device is one of 'new' xD cards that expose fake nand command set */
-#define NAND_BROKEN_XD 0x00000400
+/**
+ * struct nand_ecc_engine_ops - ECC engine operations
+ * @init_ctx: given a desired user configuration for the pointed NAND device,
+ * requests the ECC engine driver to setup a configuration with
+ * values it supports.
+ * @cleanup_ctx: clean the context initialized by @init_ctx.
+ * @prepare_io_req: is called before reading/writing a page to prepare the I/O
+ * request to be performed with ECC correction.
+ * @finish_io_req: is called after reading/writing a page to terminate the I/O
+ * request and ensure proper ECC correction.
+ */
+struct nand_ecc_engine_ops {
+ int (*init_ctx)(struct nand_device *nand);
+ void (*cleanup_ctx)(struct nand_device *nand);
+ int (*prepare_io_req)(struct nand_device *nand,
+ struct nand_page_io_req *req);
+ int (*finish_io_req)(struct nand_device *nand,
+ struct nand_page_io_req *req);
+};
-/* Device behaves just like nand, but is readonly */
-#define NAND_ROM 0x00000800
+/**
+ * enum nand_ecc_engine_integration - How the NAND ECC engine is integrated
+ * @NAND_ECC_ENGINE_INTEGRATION_INVALID: Invalid value
+ * @NAND_ECC_ENGINE_INTEGRATION_PIPELINED: Pipelined engine, performs on-the-fly
+ * correction, does not need to copy
+ * data around
+ * @NAND_ECC_ENGINE_INTEGRATION_EXTERNAL: External engine, needs to bring the
+ * data into its own area before use
+ */
+enum nand_ecc_engine_integration {
+ NAND_ECC_ENGINE_INTEGRATION_INVALID,
+ NAND_ECC_ENGINE_INTEGRATION_PIPELINED,
+ NAND_ECC_ENGINE_INTEGRATION_EXTERNAL,
+};
-/* Device supports subpage reads */
-#define NAND_SUBPAGE_READ 0x00001000
+/**
+ * struct nand_ecc_engine - ECC engine abstraction for NAND devices
+ * @dev: Host device
+ * @node: Private field for registration time
+ * @ops: ECC engine operations
+ * @integration: How the engine is integrated with the host
+ * (only relevant on %NAND_ECC_ENGINE_TYPE_ON_HOST engines)
+ * @priv: Private data
+ */
+struct nand_ecc_engine {
+ struct device *dev;
+ struct list_head node;
+ const struct nand_ecc_engine_ops *ops;
+ enum nand_ecc_engine_integration integration;
+ void *priv;
+};
-/* Options valid for Samsung large page devices */
-#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
+void of_get_nand_ecc_user_config(struct nand_device *nand);
+int nand_ecc_init_ctx(struct nand_device *nand);
+void nand_ecc_cleanup_ctx(struct nand_device *nand);
+int nand_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req);
+int nand_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req);
+bool nand_ecc_is_strong_enough(struct nand_device *nand);
+
+#if IS_REACHABLE(CONFIG_MTD_NAND_CORE)
+int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine);
+int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine);
+#else
+static inline int
+nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
+{
+ return -ENOTSUPP;
+}
+static inline int
+nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand);
+struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand);
+struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand);
+void nand_ecc_put_on_host_hw_engine(struct nand_device *nand);
+struct device *nand_ecc_get_engine_dev(struct device *host);
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
+struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void);
+#else
+static inline struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */
-/* Macros to identify the above */
-#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
-#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
+struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void);
+#else
+static inline struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
-/* Non chip related options */
-/* This option skips the bbt scan during initialization. */
-#define NAND_SKIP_BBTSCAN 0x00010000
-/*
- * This option is defined if the board driver allocates its own buffers
- * (e.g. because it needs them DMA-coherent).
+/**
+ * struct nand_ecc_req_tweak_ctx - Help for automatically tweaking requests
+ * @orig_req: Pointer to the original IO request
+ * @nand: Related NAND device, to have access to its memory organization
+ * @page_buffer_size: Real size of the page buffer to use (can be set by the
+ * user before the tweaking mechanism initialization)
+ * @oob_buffer_size: Real size of the OOB buffer to use (can be set by the
+ * user before the tweaking mechanism initialization)
+ * @spare_databuf: Data bounce buffer
+ * @spare_oobbuf: OOB bounce buffer
+ * @bounce_data: Flag indicating a data bounce buffer is used
+ * @bounce_oob: Flag indicating an OOB bounce buffer is used
*/
-#define NAND_OWN_BUFFERS 0x00020000
-/* Chip may not exist, so silence any errors in scan */
-#define NAND_SCAN_SILENT_NODEV 0x00040000
-/*
- * Autodetect nand buswidth with readid/onfi.
- * This suppose the driver will configure the hardware in 8 bits mode
- * when calling nand_scan_ident, and update its configuration
- * before calling nand_scan_tail.
- */
-#define NAND_BUSWIDTH_AUTO 0x00080000
-
-/* Options set by nand scan */
-/* Nand scan has allocated controller struct */
-#define NAND_CONTROLLER_ALLOC 0x80000000
-
-/* Cell info constants */
-#define NAND_CI_CHIPNR_MSK 0x03
-#define NAND_CI_CELLTYPE_MSK 0x0C
-
-/* Keep gcc happy */
-struct nand_chip;
-
-/* ONFI timing mode, used in both asynchronous and synchronous mode */
-#define ONFI_TIMING_MODE_0 (1 << 0)
-#define ONFI_TIMING_MODE_1 (1 << 1)
-#define ONFI_TIMING_MODE_2 (1 << 2)
-#define ONFI_TIMING_MODE_3 (1 << 3)
-#define ONFI_TIMING_MODE_4 (1 << 4)
-#define ONFI_TIMING_MODE_5 (1 << 5)
-#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
-
-/* ONFI feature address */
-#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
-
-/* ONFI subfeature parameters length */
-#define ONFI_SUBFEATURE_PARAM_LEN 4
-
-struct nand_onfi_params {
- /* rev info and features block */
- /* 'O' 'N' 'F' 'I' */
- u8 sig[4];
- __le16 revision;
- __le16 features;
- __le16 opt_cmd;
- u8 reserved[22];
-
- /* manufacturer information block */
- char manufacturer[12];
- char model[20];
- u8 jedec_id;
- __le16 date_code;
- u8 reserved2[13];
-
- /* memory organization block */
- __le32 byte_per_page;
- __le16 spare_bytes_per_page;
- __le32 data_bytes_per_ppage;
- __le16 spare_bytes_per_ppage;
- __le32 pages_per_block;
- __le32 blocks_per_lun;
- u8 lun_count;
- u8 addr_cycles;
- u8 bits_per_cell;
- __le16 bb_per_lun;
- __le16 block_endurance;
- u8 guaranteed_good_blocks;
- __le16 guaranteed_block_endurance;
- u8 programs_per_page;
- u8 ppage_attr;
- u8 ecc_bits;
- u8 interleaved_bits;
- u8 interleaved_ops;
- u8 reserved3[13];
-
- /* electrical parameter block */
- u8 io_pin_capacitance_max;
- __le16 async_timing_mode;
- __le16 program_cache_timing_mode;
- __le16 t_prog;
- __le16 t_bers;
- __le16 t_r;
- __le16 t_ccs;
- __le16 src_sync_timing_mode;
- __le16 src_ssync_features;
- __le16 clk_pin_capacitance_typ;
- __le16 io_pin_capacitance_typ;
- __le16 input_pin_capacitance_typ;
- u8 input_pin_capacitance_max;
- u8 driver_strenght_support;
- __le16 t_int_r;
- __le16 t_ald;
- u8 reserved4[7];
-
- /* vendor */
- u8 reserved5[90];
-
- __le16 crc;
-} __attribute__((packed));
-
-#define ONFI_CRC_BASE 0x4F4E
-
-/**
- * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
- * @lock: protection lock
- * @active: the mtd device which holds the controller currently
- * @wq: wait queue to sleep on if a NAND operation is in
- * progress used instead of the per chip wait queue
- * when a hw controller is available.
- */
-struct nand_hw_control {
- spinlock_t lock;
- struct nand_chip *active;
- wait_queue_head_t wq;
+struct nand_ecc_req_tweak_ctx {
+ struct nand_page_io_req orig_req;
+ struct nand_device *nand;
+ unsigned int page_buffer_size;
+ unsigned int oob_buffer_size;
+ void *spare_databuf;
+ void *spare_oobbuf;
+ bool bounce_data;
+ bool bounce_oob;
};
+int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_device *nand);
+void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx);
+void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_page_io_req *req);
+void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_page_io_req *req);
+
/**
- * struct nand_ecc_ctrl - Control structure for ECC
- * @mode: ECC mode
- * @steps: number of ECC steps per page
- * @size: data bytes per ECC step
- * @bytes: ECC bytes per step
- * @strength: max number of correctible bits per ECC step
- * @total: total number of ECC bytes per page
- * @prepad: padding information for syndrome based ECC generators
- * @postpad: padding information for syndrome based ECC generators
- * @layout: ECC layout control struct pointer
- * @priv: pointer to private ECC control data
- * @hwctl: function to control hardware ECC generator. Must only
- * be provided if an hardware ECC is available
- * @calculate: function for ECC calculation or readback from ECC hardware
- * @correct: function for ECC correction, matching to ECC generator (sw/hw)
- * @read_page_raw: function to read a raw page without ECC
- * @write_page_raw: function to write a raw page without ECC
- * @read_page: function to read a page according to the ECC generator
- * requirements; returns maximum number of bitflips corrected in
- * any single ECC step, 0 if bitflips uncorrectable, -EIO hw error
- * @read_subpage: function to read parts of the page covered by ECC;
- * returns same as read_page()
- * @write_subpage: function to write parts of the page covered by ECC.
- * @write_page: function to write a page according to the ECC generator
- * requirements.
- * @write_oob_raw: function to write chip OOB data without ECC
- * @read_oob_raw: function to read chip OOB data without ECC
- * @read_oob: function to read chip OOB data
- * @write_oob: function to write chip OOB data
- */
-struct nand_ecc_ctrl {
- nand_ecc_modes_t mode;
- int steps;
- int size;
- int bytes;
- int total;
- int strength;
- int prepad;
- int postpad;
- struct nand_ecclayout *layout;
- void *priv;
- void (*hwctl)(struct mtd_info *mtd, int mode);
- int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
- uint8_t *ecc_code);
- int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc,
- uint8_t *calc_ecc);
- int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page);
- int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required);
- int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page);
- int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offs, uint32_t len, uint8_t *buf);
- int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offset, uint32_t data_len,
- const uint8_t *data_buf, int oob_required);
- int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required);
- int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
- int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
- int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page);
- int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
+ * struct nand_ecc - Information relative to the ECC
+ * @defaults: Default values, depend on the underlying subsystem
+ * @requirements: ECC requirements from the NAND chip perspective
+ * @user_conf: User desires in terms of ECC parameters
+ * @ctx: ECC context for the ECC engine, derived from the device @requirements
+ * the @user_conf and the @defaults
+ * @ondie_engine: On-die ECC engine reference, if any
+ * @engine: ECC engine actually bound
+ */
+struct nand_ecc {
+ struct nand_ecc_props defaults;
+ struct nand_ecc_props requirements;
+ struct nand_ecc_props user_conf;
+ struct nand_ecc_context ctx;
+ struct nand_ecc_engine *ondie_engine;
+ struct nand_ecc_engine *engine;
};
/**
- * struct nand_buffers - buffer structure for read/write
- * @ecccalc: buffer for calculated ECC
- * @ecccode: buffer for ECC read from flash
- * @databuf: buffer for data - dynamically sized
+ * struct nand_device - NAND device
+ * @mtd: MTD instance attached to the NAND device
+ * @memorg: memory layout
+ * @ecc: NAND ECC object attached to the NAND device
+ * @rowconv: position to row address converter
+ * @bbt: bad block table info
+ * @ops: NAND operations attached to the NAND device
*
- * Do not change the order of buffers. databuf and oobrbuf must be in
- * consecutive order.
+ * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
+ * should declare their own NAND object embedding a nand_device struct (that's
+ * how inheritance is done).
+ * struct_nand_device->memorg and struct_nand_device->ecc.requirements should
+ * be filled at device detection time to reflect the NAND device
+ * capabilities/requirements. Once this is done nanddev_init() can be called.
+ * It will take care of converting NAND information into MTD ones, which means
+ * the specialized NAND layers should never manually tweak
+ * struct_nand_device->mtd except for the ->_read/write() hooks.
*/
-struct nand_buffers {
- uint8_t ecccalc[NAND_MAX_OOBSIZE];
- uint8_t ecccode[NAND_MAX_OOBSIZE];
- uint8_t databuf[NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE];
+struct nand_device {
+ struct mtd_info mtd;
+ struct nand_memory_organization memorg;
+ struct nand_ecc ecc;
+ struct nand_row_converter rowconv;
+ struct nand_bbt bbt;
+ const struct nand_ops *ops;
};
/**
- * struct nand_chip - NAND Private Flash Chip Data
- * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
- * flash device
- * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
- * flash device.
- * @read_byte: [REPLACEABLE] read one byte from the chip
- * @read_word: [REPLACEABLE] read one word from the chip
- * @write_buf: [REPLACEABLE] write data from the buffer to the chip
- * @read_buf: [REPLACEABLE] read data from the chip into the buffer
- * @select_chip: [REPLACEABLE] select chip nr
- * @block_bad: [REPLACEABLE] check, if the block is bad
- * @block_markbad: [REPLACEABLE] mark the block bad
- * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling
- * ALE/CLE/nCE. Also used to write command and address
- * @init_size: [BOARDSPECIFIC] hardwarespecific function for setting
- * mtd->oobsize, mtd->writesize and so on.
- * @id_data contains the 8 bytes values of NAND_CMD_READID.
- * Return with the bus width.
- * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing
- * device ready/busy line. If set to NULL no access to
- * ready/busy is available and the ready/busy information
- * is read from the chip status register.
- * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing
- * commands to the chip.
- * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
- * ready.
- * @ecc: [BOARDSPECIFIC] ECC control structure
- * @buffers: buffer structure for read/write
- * @hwcontrol: platform-specific hardware control structure
- * @erase_cmd: [INTERN] erase command write function, selectable due
- * to AND support.
- * @scan_bbt: [REPLACEABLE] function to scan bad block table
- * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring
- * data from array to read regs (tR).
- * @state: [INTERN] the current state of the NAND device
- * @oob_poi: "poison value buffer," used for laying out OOB data
- * before writing
- * @page_shift: [INTERN] number of address bits in a page (column
- * address bits).
- * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
- * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
- * @chip_shift: [INTERN] number of address bits in one chip
- * @options: [BOARDSPECIFIC] various chip options. They can partly
- * be set to inform nand_scan about special functionality.
- * See the defines for further explanation.
- * @bbt_options: [INTERN] bad block specific options. All options used
- * here must come from bbm.h. By default, these options
- * will be copied to the appropriate nand_bbt_descr's.
- * @badblockpos: [INTERN] position of the bad block marker in the oob
- * area.
- * @badblockbits: [INTERN] minimum number of set bits in a good block's
- * bad block marker position; i.e., BBM == 11110111b is
- * not bad when badblockbits == 7
- * @cellinfo: [INTERN] MLC/multichip data from chip ident
- * @numchips: [INTERN] number of physical chips
- * @chipsize: [INTERN] the size of one chip for multichip arrays
- * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
- * @pagebuf: [INTERN] holds the pagenumber which is currently in
- * data_buf.
- * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
- * currently in data_buf.
- * @subpagesize: [INTERN] holds the subpagesize
- * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded),
- * non 0 if ONFI supported.
- * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is
- * supported, 0 otherwise.
- * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
- * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
- * @ecclayout: [REPLACEABLE] the default ECC placement scheme
- * @bbt: [INTERN] bad block table pointer
- * @bbt_td: [REPLACEABLE] bad block table descriptor for flash
- * lookup.
- * @bbt_md: [REPLACEABLE] bad block table mirror descriptor
- * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial
- * bad block scan.
- * @controller: [REPLACEABLE] a pointer to a hardware controller
- * structure which is shared among multiple independent
- * devices.
- * @priv: [OPTIONAL] pointer to private chip data
- * @errstat: [OPTIONAL] hardware specific function to perform
- * additional error status checks (determine if errors are
- * correctable).
- * @write_page: [REPLACEABLE] High-level page write function
- */
-
-struct nand_chip {
- void __iomem *IO_ADDR_R;
- void __iomem *IO_ADDR_W;
-
- uint8_t (*read_byte)(struct mtd_info *mtd);
- u16 (*read_word)(struct mtd_info *mtd);
- void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
- void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
- void (*select_chip)(struct mtd_info *mtd, int chip);
- int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);
- int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
- void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
- int (*init_size)(struct mtd_info *mtd, struct nand_chip *this,
- u8 *id_data);
- int (*dev_ready)(struct mtd_info *mtd);
- void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
- int page_addr);
- int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
- void (*erase_cmd)(struct mtd_info *mtd, int page);
- int (*scan_bbt)(struct mtd_info *mtd);
- int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state,
- int status, int page);
- int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offset, int data_len, const uint8_t *buf,
- int oob_required, int page, int cached, int raw);
- int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
- int feature_addr, uint8_t *subfeature_para);
- int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
- int feature_addr, uint8_t *subfeature_para);
-
- int chip_delay;
- unsigned int options;
- unsigned int bbt_options;
-
- int page_shift;
- int phys_erase_shift;
- int bbt_erase_shift;
- int chip_shift;
- int numchips;
- uint64_t chipsize;
- int pagemask;
- int pagebuf;
- unsigned int pagebuf_bitflips;
- int subpagesize;
- uint8_t cellinfo;
- int badblockpos;
- int badblockbits;
-
- int onfi_version;
- struct nand_onfi_params onfi_params;
-
- flstate_t state;
-
- uint8_t *oob_poi;
- struct nand_hw_control *controller;
- struct nand_ecclayout *ecclayout;
-
- struct nand_ecc_ctrl ecc;
- struct nand_buffers *buffers;
- struct nand_hw_control hwcontrol;
-
- uint8_t *bbt;
- struct nand_bbt_descr *bbt_td;
- struct nand_bbt_descr *bbt_md;
-
- struct nand_bbt_descr *badblock_pattern;
-
- void *priv;
+ * struct nand_io_iter - NAND I/O iterator
+ * @req: current I/O request
+ * @oobbytes_per_page: maximum number of OOB bytes per page
+ * @dataleft: remaining number of data bytes to read/write
+ * @oobleft: remaining number of OOB bytes to read/write
+ *
+ * Can be used by specialized NAND layers to iterate over all pages covered
+ * by an MTD I/O request, which should greatly simplifies the boiler-plate
+ * code needed to read/write data from/to a NAND device.
+ */
+struct nand_io_iter {
+ struct nand_page_io_req req;
+ unsigned int oobbytes_per_page;
+ unsigned int dataleft;
+ unsigned int oobleft;
};
-/*
- * NAND Flash Manufacturer ID Codes
- */
-#define NAND_MFR_TOSHIBA 0x98
-#define NAND_MFR_SAMSUNG 0xec
-#define NAND_MFR_FUJITSU 0x04
-#define NAND_MFR_NATIONAL 0x8f
-#define NAND_MFR_RENESAS 0x07
-#define NAND_MFR_STMICRO 0x20
-#define NAND_MFR_HYNIX 0xad
-#define NAND_MFR_MICRON 0x2c
-#define NAND_MFR_AMD 0x01
-#define NAND_MFR_MACRONIX 0xc2
-#define NAND_MFR_EON 0x92
-
-/* The maximum expected count of bytes in the NAND ID sequence */
-#define NAND_MAX_ID_LEN 8
+/**
+ * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
+ * @mtd: MTD instance
+ *
+ * Return: the NAND device embedding @mtd.
+ */
+static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct nand_device, mtd);
+}
-/*
- * A helper for defining older NAND chips where the second ID byte fully
- * defined the chip, including the geometry (chip size, eraseblock size, page
- * size). All these chips have 512 bytes NAND page size.
+/**
+ * nanddev_to_mtd() - Get the MTD device attached to a NAND device
+ * @nand: NAND device
+ *
+ * Return: the MTD device embedded in @nand.
*/
-#define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts) \
- { .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \
- .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) }
+static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
+{
+ return &nand->mtd;
+}
/*
- * A helper for defining newer chips which report their page size and
- * eraseblock size via the extended ID bytes.
- *
- * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with
- * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the
- * device ID now only represented a particular total chip size (and voltage,
- * buswidth), and the page size, eraseblock size, and OOB size could vary while
- * using the same device ID.
- */
-#define EXTENDED_ID_NAND(nm, devid, chipsz, opts) \
- { .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \
- .options = (opts) }
-
-/**
- * struct nand_flash_dev - NAND Flash Device ID Structure
- * @name: a human-readable name of the NAND chip
- * @dev_id: the device ID (the second byte of the full chip ID array)
- * @mfr_id: manufecturer ID part of the full chip ID array (refers the same
- * memory address as @id[0])
- * @dev_id: device ID part of the full chip ID array (refers the same memory
- * address as @id[1])
- * @id: full device ID array
- * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as
- * well as the eraseblock size) is determined from the extended NAND
- * chip ID array)
- * @chipsize: total chip size in MiB
- * @erasesize: eraseblock size in bytes (determined from the extended ID if 0)
- * @options: stores various chip bit options
- * @id_len: The valid length of the @id.
- * @oobsize: OOB size
- */
-struct nand_flash_dev {
- char *name;
- union {
- struct {
- uint8_t mfr_id;
- uint8_t dev_id;
- };
- uint8_t id[NAND_MAX_ID_LEN];
- };
- unsigned int pagesize;
- unsigned int chipsize;
- unsigned int erasesize;
- unsigned int options;
- uint16_t id_len;
- uint16_t oobsize;
-};
+ * nanddev_bits_per_cell() - Get the number of bits per cell
+ * @nand: NAND device
+ *
+ * Return: the number of bits per cell.
+ */
+static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
+{
+ return nand->memorg.bits_per_cell;
+}
/**
- * struct nand_manufacturers - NAND Flash Manufacturer ID Structure
- * @name: Manufacturer name
- * @id: manufacturer ID code of device.
-*/
-struct nand_manufacturers {
- int id;
- char *name;
-};
+ * nanddev_page_size() - Get NAND page size
+ * @nand: NAND device
+ *
+ * Return: the page size.
+ */
+static inline size_t nanddev_page_size(const struct nand_device *nand)
+{
+ return nand->memorg.pagesize;
+}
-extern struct nand_flash_dev nand_flash_ids[];
-extern struct nand_manufacturers nand_manuf_ids[];
-
-extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
-extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
-extern int nand_default_bbt(struct mtd_info *mtd);
-extern int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
-extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
- int allowbbt);
-extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, uint8_t *buf);
-
-/**
- * struct platform_nand_chip - chip level device structure
- * @nr_chips: max. number of chips to scan for
- * @chip_offset: chip number offset
- * @nr_partitions: number of partitions pointed to by partitions (or zero)
- * @partitions: mtd partition list
- * @chip_delay: R/B delay value in us
- * @options: Option flags, e.g. 16bit buswidth
- * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
- * @ecclayout: ECC layout info structure
- * @part_probe_types: NULL-terminated array of probe types
- */
-struct platform_nand_chip {
- int nr_chips;
- int chip_offset;
- int nr_partitions;
- struct mtd_partition *partitions;
- struct nand_ecclayout *ecclayout;
- int chip_delay;
- unsigned int options;
- unsigned int bbt_options;
- const char **part_probe_types;
-};
+/**
+ * nanddev_per_page_oobsize() - Get NAND OOB size
+ * @nand: NAND device
+ *
+ * Return: the OOB size.
+ */
+static inline unsigned int
+nanddev_per_page_oobsize(const struct nand_device *nand)
+{
+ return nand->memorg.oobsize;
+}
-/* Keep gcc happy */
-struct platform_device;
-
-/**
- * struct platform_nand_ctrl - controller level device structure
- * @probe: platform specific function to probe/setup hardware
- * @remove: platform specific function to remove/teardown hardware
- * @hwcontrol: platform specific hardware control structure
- * @dev_ready: platform specific function to read ready/busy pin
- * @select_chip: platform specific chip select function
- * @cmd_ctrl: platform specific function for controlling
- * ALE/CLE/nCE. Also used to write command and address
- * @write_buf: platform specific function for write buffer
- * @read_buf: platform specific function for read buffer
- * @read_byte: platform specific function to read one byte from chip
- * @priv: private data to transport driver specific settings
- *
- * All fields are optional and depend on the hardware driver requirements
- */
-struct platform_nand_ctrl {
- int (*probe)(struct platform_device *pdev);
- void (*remove)(struct platform_device *pdev);
- void (*hwcontrol)(struct mtd_info *mtd, int cmd);
- int (*dev_ready)(struct mtd_info *mtd);
- void (*select_chip)(struct mtd_info *mtd, int chip);
- void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
- void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
- void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
- unsigned char (*read_byte)(struct mtd_info *mtd);
- void *priv;
-};
+/**
+ * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
+ * @nand: NAND device
+ *
+ * Return: the number of pages per eraseblock.
+ */
+static inline unsigned int
+nanddev_pages_per_eraseblock(const struct nand_device *nand)
+{
+ return nand->memorg.pages_per_eraseblock;
+}
/**
- * struct platform_nand_data - container structure for platform-specific data
- * @chip: chip level chip structure
- * @ctrl: controller level device structure
+ * nanddev_pages_per_target() - Get the number of pages per target
+ * @nand: NAND device
+ *
+ * Return: the number of pages per target.
*/
-struct platform_nand_data {
- struct platform_nand_chip chip;
- struct platform_nand_ctrl ctrl;
-};
+static inline unsigned int
+nanddev_pages_per_target(const struct nand_device *nand)
+{
+ return nand->memorg.pages_per_eraseblock *
+ nand->memorg.eraseblocks_per_lun *
+ nand->memorg.luns_per_target;
+}
+
+/**
+ * nanddev_per_page_oobsize() - Get NAND erase block size
+ * @nand: NAND device
+ *
+ * Return: the eraseblock size.
+ */
+static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
+{
+ return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
+}
+
+/**
+ * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
+ * @nand: NAND device
+ *
+ * Return: the number of eraseblocks per LUN.
+ */
+static inline unsigned int
+nanddev_eraseblocks_per_lun(const struct nand_device *nand)
+{
+ return nand->memorg.eraseblocks_per_lun;
+}
+
+/**
+ * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target
+ * @nand: NAND device
+ *
+ * Return: the number of eraseblocks per target.
+ */
+static inline unsigned int
+nanddev_eraseblocks_per_target(const struct nand_device *nand)
+{
+ return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target;
+}
+
+/**
+ * nanddev_target_size() - Get the total size provided by a single target/die
+ * @nand: NAND device
+ *
+ * Return: the total size exposed by a single target/die in bytes.
+ */
+static inline u64 nanddev_target_size(const struct nand_device *nand)
+{
+ return (u64)nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun *
+ nand->memorg.pages_per_eraseblock *
+ nand->memorg.pagesize;
+}
+
+/**
+ * nanddev_ntarget() - Get the total of targets
+ * @nand: NAND device
+ *
+ * Return: the number of targets/dies exposed by @nand.
+ */
+static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
+{
+ return nand->memorg.ntargets;
+}
+
+/**
+ * nanddev_neraseblocks() - Get the total number of eraseblocks
+ * @nand: NAND device
+ *
+ * Return: the total number of eraseblocks exposed by @nand.
+ */
+static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
+{
+ return nand->memorg.ntargets * nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun;
+}
+
+/**
+ * nanddev_size() - Get NAND size
+ * @nand: NAND device
+ *
+ * Return: the total size (in bytes) exposed by @nand.
+ */
+static inline u64 nanddev_size(const struct nand_device *nand)
+{
+ return nanddev_target_size(nand) * nanddev_ntargets(nand);
+}
+
+/**
+ * nanddev_get_memorg() - Extract memory organization info from a NAND device
+ * @nand: NAND device
+ *
+ * This can be used by the upper layer to fill the memorg info before calling
+ * nanddev_init().
+ *
+ * Return: the memorg object embedded in the NAND device.
+ */
+static inline struct nand_memory_organization *
+nanddev_get_memorg(struct nand_device *nand)
+{
+ return &nand->memorg;
+}
-/* Some helpers to access the data structures */
-static inline
-struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd)
+/**
+ * nanddev_get_ecc_conf() - Extract the ECC configuration from a NAND device
+ * @nand: NAND device
+ */
+static inline const struct nand_ecc_props *
+nanddev_get_ecc_conf(struct nand_device *nand)
{
- struct nand_chip *chip = mtd->priv;
+ return &nand->ecc.ctx.conf;
+}
- return chip->priv;
+/**
+ * nanddev_get_ecc_nsteps() - Extract the number of ECC steps
+ * @nand: NAND device
+ */
+static inline unsigned int
+nanddev_get_ecc_nsteps(struct nand_device *nand)
+{
+ return nand->ecc.ctx.nsteps;
+}
+
+/**
+ * nanddev_get_ecc_bytes_per_step() - Extract the number of ECC bytes per step
+ * @nand: NAND device
+ */
+static inline unsigned int
+nanddev_get_ecc_bytes_per_step(struct nand_device *nand)
+{
+ return nand->ecc.ctx.total / nand->ecc.ctx.nsteps;
}
-/* return the supported asynchronous timing mode. */
-static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
+/**
+ * nanddev_get_ecc_requirements() - Extract the ECC requirements from a NAND
+ * device
+ * @nand: NAND device
+ */
+static inline const struct nand_ecc_props *
+nanddev_get_ecc_requirements(struct nand_device *nand)
{
- if (!chip->onfi_version)
- return ONFI_TIMING_MODE_UNKNOWN;
- return le16_to_cpu(chip->onfi_params.async_timing_mode);
+ return &nand->ecc.requirements;
}
-/* return the supported synchronous timing mode. */
-static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
+/**
+ * nanddev_set_ecc_requirements() - Assign the ECC requirements of a NAND
+ * device
+ * @nand: NAND device
+ * @reqs: Requirements
+ */
+static inline void
+nanddev_set_ecc_requirements(struct nand_device *nand,
+ const struct nand_ecc_props *reqs)
{
- if (!chip->onfi_version)
- return ONFI_TIMING_MODE_UNKNOWN;
- return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
+ nand->ecc.requirements = *reqs;
}
+int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
+ struct module *owner);
+void nanddev_cleanup(struct nand_device *nand);
+
+/**
+ * nanddev_register() - Register a NAND device
+ * @nand: NAND device
+ *
+ * Register a NAND device.
+ * This function is just a wrapper around mtd_device_register()
+ * registering the MTD device embedded in @nand.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static inline int nanddev_register(struct nand_device *nand)
+{
+ return mtd_device_register(&nand->mtd, NULL, 0);
+}
+
+/**
+ * nanddev_unregister() - Unregister a NAND device
+ * @nand: NAND device
+ *
+ * Unregister a NAND device.
+ * This function is just a wrapper around mtd_device_unregister()
+ * unregistering the MTD device embedded in @nand.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static inline int nanddev_unregister(struct nand_device *nand)
+{
+ return mtd_device_unregister(&nand->mtd);
+}
+
+/**
+ * nanddev_set_of_node() - Attach a DT node to a NAND device
+ * @nand: NAND device
+ * @np: DT node
+ *
+ * Attach a DT node to a NAND device.
+ */
+static inline void nanddev_set_of_node(struct nand_device *nand,
+ struct device_node *np)
+{
+ mtd_set_of_node(&nand->mtd, np);
+}
+
+/**
+ * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
+ * @nand: NAND device
+ *
+ * Return: the DT node attached to @nand.
+ */
+static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
+{
+ return mtd_get_of_node(&nand->mtd);
+}
+
+/**
+ * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
+ * @nand: NAND device
+ * @offs: absolute NAND offset (usually passed by the MTD layer)
+ * @pos: a NAND position object to fill in
+ *
+ * Converts @offs into a nand_pos representation.
+ *
+ * Return: the offset within the NAND page pointed by @pos.
+ */
+static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
+ loff_t offs,
+ struct nand_pos *pos)
+{
+ unsigned int pageoffs;
+ u64 tmp = offs;
+
+ pageoffs = do_div(tmp, nand->memorg.pagesize);
+ pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
+ pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
+ pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
+ pos->lun = do_div(tmp, nand->memorg.luns_per_target);
+ pos->target = tmp;
+
+ return pageoffs;
+}
+
+/**
+ * nanddev_pos_cmp() - Compare two NAND positions
+ * @a: First NAND position
+ * @b: Second NAND position
+ *
+ * Compares two NAND positions.
+ *
+ * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
+ */
+static inline int nanddev_pos_cmp(const struct nand_pos *a,
+ const struct nand_pos *b)
+{
+ if (a->target != b->target)
+ return a->target < b->target ? -1 : 1;
+
+ if (a->lun != b->lun)
+ return a->lun < b->lun ? -1 : 1;
+
+ if (a->eraseblock != b->eraseblock)
+ return a->eraseblock < b->eraseblock ? -1 : 1;
+
+ if (a->page != b->page)
+ return a->page < b->page ? -1 : 1;
+
+ return 0;
+}
+
+/**
+ * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
+ * @nand: NAND device
+ * @pos: the NAND position to convert
+ *
+ * Converts @pos NAND position into an absolute offset.
+ *
+ * Return: the absolute offset. Note that @pos points to the beginning of a
+ * page, if one wants to point to a specific offset within this page
+ * the returned offset has to be adjusted manually.
+ */
+static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ unsigned int npages;
+
+ npages = pos->page +
+ ((pos->eraseblock +
+ (pos->lun +
+ (pos->target * nand->memorg.luns_per_target)) *
+ nand->memorg.eraseblocks_per_lun) *
+ nand->memorg.pages_per_eraseblock);
+
+ return (loff_t)npages * nand->memorg.pagesize;
+}
+
+/**
+ * nanddev_pos_to_row() - Extract a row address from a NAND position
+ * @nand: NAND device
+ * @pos: the position to convert
+ *
+ * Converts a NAND position into a row address that can then be passed to the
+ * device.
+ *
+ * Return: the row address extracted from @pos.
+ */
+static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ return (pos->lun << nand->rowconv.lun_addr_shift) |
+ (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
+ pos->page;
+}
+
+/**
+ * nanddev_pos_next_target() - Move a position to the next target/die
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next target/die. Useful when you
+ * want to iterate over all targets/dies of a NAND device.
+ */
+static inline void nanddev_pos_next_target(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ pos->page = 0;
+ pos->plane = 0;
+ pos->eraseblock = 0;
+ pos->lun = 0;
+ pos->target++;
+}
+
+/**
+ * nanddev_pos_next_lun() - Move a position to the next LUN
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next LUN. Useful when you want to
+ * iterate over all LUNs of a NAND device.
+ */
+static inline void nanddev_pos_next_lun(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ if (pos->lun >= nand->memorg.luns_per_target - 1)
+ return nanddev_pos_next_target(nand, pos);
+
+ pos->lun++;
+ pos->page = 0;
+ pos->plane = 0;
+ pos->eraseblock = 0;
+}
+
+/**
+ * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next eraseblock. Useful when you
+ * want to iterate over all eraseblocks of a NAND device.
+ */
+static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
+ return nanddev_pos_next_lun(nand, pos);
+
+ pos->eraseblock++;
+ pos->page = 0;
+ pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
+}
+
+/**
+ * nanddev_pos_next_page() - Move a position to the next page
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next page. Useful when you want to
+ * iterate over all pages of a NAND device.
+ */
+static inline void nanddev_pos_next_page(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
+ return nanddev_pos_next_eraseblock(nand, pos);
+
+ pos->page++;
+}
+
+/**
+ * nand_io_page_iter_init - Initialize a NAND I/O iterator
+ * @nand: NAND device
+ * @offs: absolute offset
+ * @req: MTD request
+ * @iter: NAND I/O iterator
+ *
+ * Initializes a NAND iterator based on the information passed by the MTD
+ * layer for page jumps.
+ */
+static inline void nanddev_io_page_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
+{
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+ iter->req.type = reqtype;
+ iter->req.mode = req->mode;
+ iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
+ iter->req.ooboffs = req->ooboffs;
+ iter->oobbytes_per_page = mtd_oobavail(mtd, req);
+ iter->dataleft = req->len;
+ iter->oobleft = req->ooblen;
+ iter->req.databuf.in = req->datbuf;
+ iter->req.datalen = min_t(unsigned int,
+ nand->memorg.pagesize - iter->req.dataoffs,
+ iter->dataleft);
+ iter->req.oobbuf.in = req->oobbuf;
+ iter->req.ooblen = min_t(unsigned int,
+ iter->oobbytes_per_page - iter->req.ooboffs,
+ iter->oobleft);
+ iter->req.continuous = false;
+}
+
+/**
+ * nand_io_block_iter_init - Initialize a NAND I/O iterator
+ * @nand: NAND device
+ * @offs: absolute offset
+ * @req: MTD request
+ * @iter: NAND I/O iterator
+ *
+ * Initializes a NAND iterator based on the information passed by the MTD
+ * layer for block jumps (no OOB)
+ *
+ * In practice only reads may leverage this iterator.
+ */
+static inline void nanddev_io_block_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
+{
+ unsigned int offs_in_eb;
+
+ iter->req.type = reqtype;
+ iter->req.mode = req->mode;
+ iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
+ iter->req.ooboffs = 0;
+ iter->oobbytes_per_page = 0;
+ iter->dataleft = req->len;
+ iter->oobleft = 0;
+ iter->req.databuf.in = req->datbuf;
+ offs_in_eb = (nand->memorg.pagesize * iter->req.pos.page) + iter->req.dataoffs;
+ iter->req.datalen = min_t(unsigned int,
+ nanddev_eraseblock_size(nand) - offs_in_eb,
+ iter->dataleft);
+ iter->req.oobbuf.in = NULL;
+ iter->req.ooblen = 0;
+ iter->req.continuous = true;
+}
+
+/**
+ * nand_io_iter_next_page - Move to the next page
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Updates the @iter to point to the next page.
+ */
+static inline void nanddev_io_iter_next_page(struct nand_device *nand,
+ struct nand_io_iter *iter)
+{
+ nanddev_pos_next_page(nand, &iter->req.pos);
+ iter->dataleft -= iter->req.datalen;
+ iter->req.databuf.in += iter->req.datalen;
+ iter->oobleft -= iter->req.ooblen;
+ iter->req.oobbuf.in += iter->req.ooblen;
+ iter->req.dataoffs = 0;
+ iter->req.ooboffs = 0;
+ iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
+ iter->dataleft);
+ iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
+ iter->oobleft);
+}
+
+/**
+ * nand_io_iter_next_block - Move to the next block
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Updates the @iter to point to the next block.
+ * No OOB handling available.
+ */
+static inline void nanddev_io_iter_next_block(struct nand_device *nand,
+ struct nand_io_iter *iter)
+{
+ nanddev_pos_next_eraseblock(nand, &iter->req.pos);
+ iter->dataleft -= iter->req.datalen;
+ iter->req.databuf.in += iter->req.datalen;
+ iter->req.dataoffs = 0;
+ iter->req.datalen = min_t(unsigned int, nanddev_eraseblock_size(nand),
+ iter->dataleft);
+}
+
+/**
+ * nand_io_iter_end - Should end iteration or not
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Check whether @iter has reached the end of the NAND portion it was asked to
+ * iterate on or not.
+ *
+ * Return: true if @iter has reached the end of the iteration request, false
+ * otherwise.
+ */
+static inline bool nanddev_io_iter_end(struct nand_device *nand,
+ const struct nand_io_iter *iter)
+{
+ if (iter->dataleft || iter->oobleft)
+ return false;
+
+ return true;
+}
+
+/**
+ * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
+ * request
+ * @nand: NAND device
+ * @start: start address to read/write from
+ * @req: MTD I/O request
+ * @iter: NAND I/O iterator
+ *
+ * Should be used for iterating over pages that are contained in an MTD request.
+ */
+#define nanddev_io_for_each_page(nand, type, start, req, iter) \
+ for (nanddev_io_page_iter_init(nand, type, start, req, iter); \
+ !nanddev_io_iter_end(nand, iter); \
+ nanddev_io_iter_next_page(nand, iter))
+
+/**
+ * nand_io_for_each_block - Iterate over all NAND pages contained in an MTD I/O
+ * request, one block at a time
+ * @nand: NAND device
+ * @start: start address to read/write from
+ * @req: MTD I/O request
+ * @iter: NAND I/O iterator
+ *
+ * Should be used for iterating over blocks that are contained in an MTD request.
+ */
+#define nanddev_io_for_each_block(nand, type, start, req, iter) \
+ for (nanddev_io_block_iter_init(nand, type, start, req, iter); \
+ !nanddev_io_iter_end(nand, iter); \
+ nanddev_io_iter_next_block(nand, iter))
+
+bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
+bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
+int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
+
+/* ECC related functions */
+int nanddev_ecc_engine_init(struct nand_device *nand);
+void nanddev_ecc_engine_cleanup(struct nand_device *nand);
+
+static inline void *nand_to_ecc_ctx(struct nand_device *nand)
+{
+ return nand->ecc.ctx.priv;
+}
+
+/* BBT related functions */
+enum nand_bbt_block_status {
+ NAND_BBT_BLOCK_STATUS_UNKNOWN,
+ NAND_BBT_BLOCK_GOOD,
+ NAND_BBT_BLOCK_WORN,
+ NAND_BBT_BLOCK_RESERVED,
+ NAND_BBT_BLOCK_FACTORY_BAD,
+ NAND_BBT_BLOCK_NUM_STATUS,
+};
+
+int nanddev_bbt_init(struct nand_device *nand);
+void nanddev_bbt_cleanup(struct nand_device *nand);
+int nanddev_bbt_update(struct nand_device *nand);
+int nanddev_bbt_get_block_status(const struct nand_device *nand,
+ unsigned int entry);
+int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
+ enum nand_bbt_block_status status);
+int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
+
+/**
+ * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
+ * @nand: NAND device
+ * @pos: the NAND position we want to get BBT entry for
+ *
+ * Return the BBT entry used to store information about the eraseblock pointed
+ * by @pos.
+ *
+ * Return: the BBT entry storing information about eraseblock pointed by @pos.
+ */
+static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ return pos->eraseblock +
+ ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
+ nand->memorg.eraseblocks_per_lun);
+}
+
+/**
+ * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
+ * @nand: NAND device
+ *
+ * Return: true if the BBT has been initialized, false otherwise.
+ */
+static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
+{
+ return !!nand->bbt.cache;
+}
+
+/* MTD -> NAND helper functions. */
+int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
+int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len);
+
+int nand_check_erased_ecc_chunk(void *data, int datalen,
+ void *ecc, int ecclen,
+ void *extraoob, int extraooblen,
+ int threshold);
+
#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
deleted file mode 100644
index 74acf5367556..000000000000
--- a/include/linux/mtd/nand_bch.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This file is the header for the NAND BCH ECC implementation.
- */
-
-#ifndef __MTD_NAND_BCH_H__
-#define __MTD_NAND_BCH_H__
-
-struct mtd_info;
-struct nand_bch_control;
-
-#if defined(CONFIG_MTD_NAND_ECC_BCH)
-
-static inline int mtd_nand_has_bch(void) { return 1; }
-
-/*
- * Calculate BCH ecc code
- */
-int nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
- u_char *ecc_code);
-
-/*
- * Detect and correct bit errors
- */
-int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc,
- u_char *calc_ecc);
-/*
- * Initialize BCH encoder/decoder
- */
-struct nand_bch_control *
-nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
- unsigned int eccbytes, struct nand_ecclayout **ecclayout);
-/*
- * Release BCH encoder/decoder resources
- */
-void nand_bch_free(struct nand_bch_control *nbc);
-
-#else /* !CONFIG_MTD_NAND_ECC_BCH */
-
-static inline int mtd_nand_has_bch(void) { return 0; }
-
-static inline int
-nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
- u_char *ecc_code)
-{
- return -1;
-}
-
-static inline int
-nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
- unsigned char *read_ecc, unsigned char *calc_ecc)
-{
- return -1;
-}
-
-static inline struct nand_bch_control *
-nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
- unsigned int eccbytes, struct nand_ecclayout **ecclayout)
-{
- return NULL;
-}
-
-static inline void nand_bch_free(struct nand_bch_control *nbc) {}
-
-#endif /* CONFIG_MTD_NAND_ECC_BCH */
-
-#endif /* __MTD_NAND_BCH_H__ */
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
deleted file mode 100644
index 4d8406c81652..000000000000
--- a/include/linux/mtd/nand_ecc.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * drivers/mtd/nand_ecc.h
- *
- * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com>
- * David Woodhouse <dwmw2@infradead.org>
- * Thomas Gleixner <tglx@linutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This file is the header for the ECC algorithm.
- */
-
-#ifndef __MTD_NAND_ECC_H__
-#define __MTD_NAND_ECC_H__
-
-struct mtd_info;
-
-/*
- * Calculate 3 byte ECC code for eccsize byte block
- */
-void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
- u_char *ecc_code);
-
-/*
- * Calculate 3 byte ECC code for 256/512 byte block
- */
-int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
-
-/*
- * Detect and correct a 1 bit error for eccsize byte block
- */
-int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
- unsigned int eccsize);
-
-/*
- * Detect and correct a 1 bit error for 256/512 byte block
- */
-int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
-
-#endif /* __MTD_NAND_ECC_H__ */
diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h
index d0558a982628..98f075b86931 100644
--- a/include/linux/mtd/ndfc.h
+++ b/include/linux/mtd/ndfc.h
@@ -1,15 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * linux/include/linux/mtd/ndfc.h
- *
* Copyright (c) 2006 Thomas Gleixner <tglx@linutronix.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Info:
* Contains defines, datastructures for ndfc nand controller
- *
*/
#ifndef __LINUX_MTD_NDFC_H
#define __LINUX_MTD_NDFC_H
diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h
index b059629e22bc..4423d3b385b1 100644
--- a/include/linux/mtd/nftl.h
+++ b/include/linux/mtd/nftl.h
@@ -1,20 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __MTD_NFTL_H__
@@ -50,7 +36,6 @@ struct NFTLrecord {
unsigned int nb_blocks; /* number of physical blocks */
unsigned int nb_boot_blocks; /* number of blocks used by the bios */
struct erase_info instr;
- struct nand_ecclayout oobinfo;
};
int NFTL_mount(struct NFTLrecord *s);
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 4596503c9da9..1e517961d0ba 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/mtd/onenand.h
*
* Copyright © 2005-2009 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MTD_ONENAND_H
@@ -80,7 +77,6 @@ struct onenand_bufferram {
* @page_buf: [INTERN] page main data buffer
* @oob_buf: [INTERN] page oob data buffer
* @subpagesize: [INTERN] holds the subpagesize
- * @ecclayout: [REPLACEABLE] the default ecc placement scheme
* @bbm: [REPLACEABLE] pointer to Bad Block Management
* @priv: [OPTIONAL] pointer to private chip date
*/
@@ -95,6 +91,7 @@ struct onenand_chip {
unsigned int technology;
unsigned int density_mask;
unsigned int options;
+ unsigned int badblockpos;
unsigned int erase_shift;
unsigned int page_shift;
@@ -134,7 +131,6 @@ struct onenand_chip {
#endif
int subpagesize;
- struct nand_ecclayout *ecclayout;
void *bbm;
@@ -190,6 +186,8 @@ struct onenand_chip {
/* Check byte access in OneNAND */
#define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1)
+#define ONENAND_BADBLOCK_POS 0
+
/*
* Options bits
*/
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index d60130f88eed..5f728407a579 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/mtd/onenand_regs.h
*
@@ -5,10 +6,6 @@
*
* Copyright (C) 2005-2007 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ONENAND_REG_H
@@ -80,6 +77,7 @@
#define ONENAND_DEVICE_DENSITY_1Gb (0x003)
#define ONENAND_DEVICE_DENSITY_2Gb (0x004)
#define ONENAND_DEVICE_DENSITY_4Gb (0x005)
+#define ONENAND_DEVICE_DENSITY_8Gb (0x006)
/*
* Version ID Register F002h (R)
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
new file mode 100644
index 000000000000..55ab2e4d62f9
--- /dev/null
+++ b/include/linux/mtd/onfi.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Contains all ONFI related definitions
+ */
+
+#ifndef __LINUX_MTD_ONFI_H
+#define __LINUX_MTD_ONFI_H
+
+#include <linux/types.h>
+#include <linux/bitfield.h>
+
+/* ONFI version bits */
+#define ONFI_VERSION_1_0 BIT(1)
+#define ONFI_VERSION_2_0 BIT(2)
+#define ONFI_VERSION_2_1 BIT(3)
+#define ONFI_VERSION_2_2 BIT(4)
+#define ONFI_VERSION_2_3 BIT(5)
+#define ONFI_VERSION_3_0 BIT(6)
+#define ONFI_VERSION_3_1 BIT(7)
+#define ONFI_VERSION_3_2 BIT(8)
+#define ONFI_VERSION_4_0 BIT(9)
+
+/* ONFI features */
+#define ONFI_FEATURE_16_BIT_BUS BIT(0)
+#define ONFI_FEATURE_NV_DDR BIT(5)
+#define ONFI_FEATURE_EXT_PARAM_PAGE BIT(7)
+
+/* ONFI timing mode, used in both asynchronous and synchronous mode */
+#define ONFI_DATA_INTERFACE_SDR 0
+#define ONFI_DATA_INTERFACE_NVDDR BIT(4)
+#define ONFI_DATA_INTERFACE_NVDDR2 BIT(5)
+#define ONFI_TIMING_MODE_0 BIT(0)
+#define ONFI_TIMING_MODE_1 BIT(1)
+#define ONFI_TIMING_MODE_2 BIT(2)
+#define ONFI_TIMING_MODE_3 BIT(3)
+#define ONFI_TIMING_MODE_4 BIT(4)
+#define ONFI_TIMING_MODE_5 BIT(5)
+#define ONFI_TIMING_MODE_UNKNOWN BIT(6)
+#define ONFI_TIMING_MODE_PARAM(x) FIELD_GET(GENMASK(3, 0), (x))
+
+/* ONFI feature number/address */
+#define ONFI_FEATURE_NUMBER 256
+#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
+
+/* Vendor-specific feature address (Micron) */
+#define ONFI_FEATURE_ADDR_READ_RETRY 0x89
+#define ONFI_FEATURE_ON_DIE_ECC 0x90
+#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3)
+
+/* ONFI subfeature parameters length */
+#define ONFI_SUBFEATURE_PARAM_LEN 4
+
+/* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_READ_CACHE BIT(1)
+#define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2)
+
+struct nand_onfi_params {
+ /* rev info and features block */
+ /* 'O' 'N' 'F' 'I' */
+ u8 sig[4];
+ __le16 revision;
+ __le16 features;
+ __le16 opt_cmd;
+ u8 reserved0[2];
+ __le16 ext_param_page_length; /* since ONFI 2.1 */
+ u8 num_of_param_pages; /* since ONFI 2.1 */
+ u8 reserved1[17];
+
+ /* manufacturer information block */
+ char manufacturer[12];
+ char model[20];
+ u8 jedec_id;
+ __le16 date_code;
+ u8 reserved2[13];
+
+ /* memory organization block */
+ __le32 byte_per_page;
+ __le16 spare_bytes_per_page;
+ __le32 data_bytes_per_ppage;
+ __le16 spare_bytes_per_ppage;
+ __le32 pages_per_block;
+ __le32 blocks_per_lun;
+ u8 lun_count;
+ u8 addr_cycles;
+ u8 bits_per_cell;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 guaranteed_good_blocks;
+ __le16 guaranteed_block_endurance;
+ u8 programs_per_page;
+ u8 ppage_attr;
+ u8 ecc_bits;
+ u8 interleaved_bits;
+ u8 interleaved_ops;
+ u8 reserved3[13];
+
+ /* electrical parameter block */
+ u8 io_pin_capacitance_max;
+ __le16 sdr_timing_modes;
+ __le16 program_cache_timing_mode;
+ __le16 t_prog;
+ __le16 t_bers;
+ __le16 t_r;
+ __le16 t_ccs;
+ u8 nvddr_timing_modes;
+ u8 nvddr2_timing_modes;
+ u8 nvddr_nvddr2_features;
+ __le16 clk_pin_capacitance_typ;
+ __le16 io_pin_capacitance_typ;
+ __le16 input_pin_capacitance_typ;
+ u8 input_pin_capacitance_max;
+ u8 driver_strength_support;
+ __le16 t_int_r;
+ __le16 t_adl;
+ u8 reserved4[8];
+
+ /* vendor */
+ __le16 vendor_revision;
+ u8 vendor[88];
+
+ __le16 crc;
+} __packed;
+
+#define ONFI_CRC_BASE 0x4F4E
+
+/* Extended ECC information Block Definition (since ONFI 2.1) */
+struct onfi_ext_ecc_info {
+ u8 ecc_bits;
+ u8 codeword_size;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 reserved[2];
+} __packed;
+
+#define ONFI_SECTION_TYPE_0 0 /* Unused section. */
+#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */
+#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */
+struct onfi_ext_section {
+ u8 type;
+ u8 length;
+} __packed;
+
+#define ONFI_EXT_SECTION_MAX 8
+
+/* Extended Parameter Page Definition (since ONFI 2.1) */
+struct onfi_ext_param_page {
+ __le16 crc;
+ u8 sig[4]; /* 'E' 'P' 'P' 'S' */
+ u8 reserved0[10];
+ struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
+
+ /*
+ * The actual size of the Extended Parameter Page is in
+ * @ext_param_page_length of nand_onfi_params{}.
+ * The following are the variable length sections.
+ * So we do not add any fields below. Please see the ONFI spec.
+ */
+} __packed;
+
+/**
+ * struct onfi_params - ONFI specific parameters that will be reused
+ * @version: ONFI version (BCD encoded), 0 if ONFI is not supported
+ * @tPROG: Page program time
+ * @tBERS: Block erase time
+ * @tR: Page read time
+ * @tCCS: Change column setup time
+ * @fast_tCAD: Command/Address/Data slow or fast delay (NV-DDR only)
+ * @sdr_timing_modes: Supported asynchronous/SDR timing modes
+ * @nvddr_timing_modes: Supported source synchronous/NV-DDR timing modes
+ * @vendor_revision: Vendor specific revision number
+ * @vendor: Vendor specific data
+ */
+struct onfi_params {
+ int version;
+ u16 tPROG;
+ u16 tBERS;
+ u16 tR;
+ u16 tCCS;
+ bool fast_tCAD;
+ u16 sdr_timing_modes;
+ u16 nvddr_timing_modes;
+ u16 vendor_revision;
+ u8 vendor[88];
+};
+
+#endif /* __LINUX_MTD_ONFI_H */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 1f8d24bdafda..b74a539ec581 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -20,6 +20,12 @@
*
* For each partition, these fields are available:
* name: string that will be used to label the partition's MTD device.
+ * types: some partitions can be containers using specific format to describe
+ * embedded subpartitions / volumes. E.g. many home routers use "firmware"
+ * partition that contains at least kernel and rootfs. In such case an
+ * extra parser is needed that will detect these dynamic partitions and
+ * report them to the MTD subsystem. If set this property stores an array
+ * of parser names to use when looking for subpartitions.
* size: the partition size; if defined as MTDPART_SIZ_FULL, the partition
* will extend to the end of the master MTD device.
* offset: absolute starting position within the master MTD device; if
@@ -31,17 +37,20 @@
* master MTD flag set for the corresponding MTD partition.
* For example, to force a read-only partition, simply adding
* MTD_WRITEABLE to the mask_flags will do the trick.
+ * add_flags: contains flags to add to the parent flags
*
* Note: writeable partitions require their size and offset be
* erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
*/
struct mtd_partition {
- char *name; /* identifier string */
+ const char *name; /* identifier string */
+ const char *const *types; /* names of parsers to use if any */
uint64_t size; /* partition size */
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
- struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */
+ uint32_t add_flags; /* flags to add to the partition */
+ struct device_node *of_node;
};
#define MTDPART_OFS_RETAIN (-3)
@@ -56,11 +65,9 @@ struct device_node;
/**
* struct mtd_part_parser_data - used to pass data to MTD partition parsers.
* @origin: for RedBoot, start address of MTD device
- * @of_node: for OF parsers, device node containing partitioning information
*/
struct mtd_part_parser_data {
unsigned long origin;
- struct device_node *of_node;
};
@@ -72,15 +79,35 @@ struct mtd_part_parser {
struct list_head list;
struct module *owner;
const char *name;
- int (*parse_fn)(struct mtd_info *, struct mtd_partition **,
+ const struct of_device_id *of_match_table;
+ int (*parse_fn)(struct mtd_info *, const struct mtd_partition **,
struct mtd_part_parser_data *);
+ void (*cleanup)(const struct mtd_partition *pparts, int nr_parts);
+};
+
+/* Container for passing around a set of parsed partitions */
+struct mtd_partitions {
+ const struct mtd_partition *parts;
+ int nr_parts;
+ const struct mtd_part_parser *parser;
};
-extern int register_mtd_parser(struct mtd_part_parser *parser);
-extern int deregister_mtd_parser(struct mtd_part_parser *parser);
+extern int __register_mtd_parser(struct mtd_part_parser *parser,
+ struct module *owner);
+#define register_mtd_parser(parser) __register_mtd_parser(parser, THIS_MODULE)
+
+extern void deregister_mtd_parser(struct mtd_part_parser *parser);
+
+/*
+ * module_mtd_part_parser() - Helper macro for MTD partition parsers that don't
+ * do anything special in module init/exit. Each driver may only use this macro
+ * once, and calling it replaces module_init() and module_exit().
+ */
+#define module_mtd_part_parser(__mtd_part_parser) \
+ module_driver(__mtd_part_parser, register_mtd_parser, \
+ deregister_mtd_parser)
-int mtd_is_partition(const struct mtd_info *mtd);
-int mtd_add_partition(struct mtd_info *master, char *name,
+int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length);
int mtd_del_partition(struct mtd_info *master, int partno);
uint64_t mtd_get_device_size(const struct mtd_info *mtd);
diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h
index b730d4f84655..146413d4bdb7 100644
--- a/include/linux/mtd/pfow.h
+++ b/include/linux/mtd/pfow.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Primary function overlay window definitions
* and service functions used by LPDDR chips
*/
@@ -18,7 +19,7 @@
/* Identification info for LPDDR chip */
#define PFOW_MANUFACTURER_ID 0x0020
#define PFOW_DEVICE_ID 0x0022
-/* Address in PFOW where prog buffer can can be found */
+/* Address in PFOW where prog buffer can be found */
#define PFOW_PROGRAM_BUFFER_OFFSET 0x0040
/* Size of program buffer in words */
#define PFOW_PROGRAM_BUFFER_SIZE 0x0042
@@ -101,9 +102,6 @@ static inline void send_pfow_command(struct map_info *map,
unsigned long len, map_word *datum)
{
int bits_per_chip = map_bankwidth(map) * 8;
- int chipnum;
- struct lpddr_private *lpddr = map->fldrv_priv;
- chipnum = adr >> lpddr->chipshift;
map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE);
map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)),
@@ -123,37 +121,4 @@ static inline void send_pfow_command(struct map_info *map,
map_write(map, CMD(LPDDR_START_EXECUTION),
map->pfow_base + PFOW_COMMAND_EXECUTE);
}
-
-static inline void print_drs_error(unsigned dsr)
-{
- int prog_status = (dsr & DSR_RPS) >> 8;
-
- if (!(dsr & DSR_AVAILABLE))
- printk(KERN_NOTICE"DSR.15: (0) Device not Available\n");
- if (prog_status & 0x03)
- printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid "
- "half with 41h command\n");
- else if (prog_status & 0x02)
- printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt "
- "in region with Control Mode data\n");
- else if (prog_status & 0x01)
- printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region "
- "with Object Mode data\n");
- if (!(dsr & DSR_READY_STATUS))
- printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n");
- if (dsr & DSR_ESS)
- printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n");
- if (dsr & DSR_ERASE_STATUS)
- printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n");
- if (dsr & DSR_PROGRAM_STATUS)
- printk(KERN_NOTICE"DSR.4: (1) Program Error\n");
- if (dsr & DSR_VPPS)
- printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation "
- "aborted\n");
- if (dsr & DSR_PSS)
- printk(KERN_NOTICE"DSR.2: (1) Program suspended\n");
- if (dsr & DSR_DPS)
- printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt "
- "on locked block\n");
-}
#endif /* __LINUX_MTD_PFOW_H */
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index aa6a2633c2da..bfaa9cc1dc84 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* For boards with physically mapped flash and using
* drivers/mtd/maps/physmap.c mapping driver.
*
* Copyright (C) 2003 MontaVista Software Inc.
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MTD_PHYSMAP__
diff --git a/include/linux/mtd/pismo.h b/include/linux/mtd/pismo.h
index 8dfb7e1421c5..085b639c9538 100644
--- a/include/linux/mtd/pismo.h
+++ b/include/linux/mtd/pismo.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* PISMO memory driver - http://www.pismoworld.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#ifndef __LINUX_MTD_PISMO_H
#define __LINUX_MTD_PISMO_H
diff --git a/include/linux/mtd/plat-ram.h b/include/linux/mtd/plat-ram.h
index 44212d65aa97..09441856d244 100644
--- a/include/linux/mtd/plat-ram.h
+++ b/include/linux/mtd/plat-ram.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* linux/include/linux/mtd/plat-ram.h
*
* (c) 2004 Simtec Electronics
@@ -5,11 +6,6 @@
* Ben Dooks <ben@simtec.co.uk>
*
* Generic platform device based RAM map
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __LINUX_MTD_PLATRAM_H
diff --git a/include/linux/mtd/platnand.h b/include/linux/mtd/platnand.h
new file mode 100644
index 000000000000..bc11eb6b593b
--- /dev/null
+++ b/include/linux/mtd/platnand.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Contains all platform NAND related definitions.
+ */
+
+#ifndef __LINUX_MTD_PLATNAND_H
+#define __LINUX_MTD_PLATNAND_H
+
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/platform_device.h>
+
+/**
+ * struct platform_nand_chip - chip level device structure
+ * @nr_chips: max. number of chips to scan for
+ * @chip_offset: chip number offset
+ * @nr_partitions: number of partitions pointed to by partitions (or zero)
+ * @partitions: mtd partition list
+ * @chip_delay: R/B delay value in us
+ * @options: Option flags, e.g. 16bit buswidth
+ * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
+ * @part_probe_types: NULL-terminated array of probe types
+ */
+struct platform_nand_chip {
+ int nr_chips;
+ int chip_offset;
+ int nr_partitions;
+ struct mtd_partition *partitions;
+ int chip_delay;
+ unsigned int options;
+ unsigned int bbt_options;
+ const char **part_probe_types;
+};
+
+/**
+ * struct platform_nand_ctrl - controller level device structure
+ * @probe: platform specific function to probe/setup hardware
+ * @remove: platform specific function to remove/teardown hardware
+ * @dev_ready: platform specific function to read ready/busy pin
+ * @select_chip: platform specific chip select function
+ * @cmd_ctrl: platform specific function for controlling
+ * ALE/CLE/nCE. Also used to write command and address
+ * @write_buf: platform specific function for write buffer
+ * @read_buf: platform specific function for read buffer
+ * @priv: private data to transport driver specific settings
+ *
+ * All fields are optional and depend on the hardware driver requirements
+ */
+struct platform_nand_ctrl {
+ int (*probe)(struct platform_device *pdev);
+ void (*remove)(struct platform_device *pdev);
+ int (*dev_ready)(struct nand_chip *chip);
+ void (*select_chip)(struct nand_chip *chip, int cs);
+ void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
+ void (*write_buf)(struct nand_chip *chip, const uint8_t *buf, int len);
+ void (*read_buf)(struct nand_chip *chip, uint8_t *buf, int len);
+ void *priv;
+};
+
+/**
+ * struct platform_nand_data - container structure for platform-specific data
+ * @chip: chip level chip structure
+ * @ctrl: controller level device structure
+ */
+struct platform_nand_data {
+ struct platform_nand_chip chip;
+ struct platform_nand_ctrl ctrl;
+};
+
+#endif /* __LINUX_MTD_PLATNAND_H */
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
index 7b3d487d8b3f..0421f12156b5 100644
--- a/include/linux/mtd/qinfo.h
+++ b/include/linux/mtd/qinfo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MTD_QINFO_H
#define __LINUX_MTD_QINFO_H
@@ -14,7 +15,7 @@
* @DevId - Chip Device ID
* @qinfo - pointer to qinfo records describing the chip
* @numchips - number of chips including virual RWW partitions
- * @chipshift - Chip/partiton size 2^chipshift
+ * @chipshift - Chip/partition size 2^chipshift
* @chips - per-chip data structure
*/
struct lpddr_private {
@@ -23,7 +24,7 @@ struct lpddr_private {
struct qinfo_chip *qinfo;
int numchips;
unsigned long chipshift;
- struct flchip chips[0];
+ struct flchip chips[] __counted_by(numchips);
};
/* qinfo_query_info structure contains request information for
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
new file mode 100644
index 000000000000..d30bdc3fcfd7
--- /dev/null
+++ b/include/linux/mtd/rawnand.h
@@ -0,0 +1,1640 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Info:
+ * Contains standard defines and IDs for NAND flash devices
+ *
+ * Changelog:
+ * See git changelog.
+ */
+#ifndef __LINUX_MTD_RAWNAND_H
+#define __LINUX_MTD_RAWNAND_H
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/bbm.h>
+#include <linux/mtd/jedec.h>
+#include <linux/mtd/onfi.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/types.h>
+
+struct nand_chip;
+struct gpio_desc;
+
+/* The maximum number of NAND chips in an array */
+#define NAND_MAX_CHIPS 8
+
+/*
+ * Constants for hardware specific CLE/ALE/NCE function
+ *
+ * These are bits which can be or'ed to set/clear multiple
+ * bits in one go.
+ */
+/* Select the chip by setting nCE to low */
+#define NAND_NCE 0x01
+/* Select the command latch by setting CLE to high */
+#define NAND_CLE 0x02
+/* Select the address latch by setting ALE to high */
+#define NAND_ALE 0x04
+
+#define NAND_CTRL_CLE (NAND_NCE | NAND_CLE)
+#define NAND_CTRL_ALE (NAND_NCE | NAND_ALE)
+#define NAND_CTRL_CHANGE 0x80
+
+/*
+ * Standard NAND flash commands
+ */
+#define NAND_CMD_READ0 0
+#define NAND_CMD_READ1 1
+#define NAND_CMD_RNDOUT 5
+#define NAND_CMD_PAGEPROG 0x10
+#define NAND_CMD_READOOB 0x50
+#define NAND_CMD_ERASE1 0x60
+#define NAND_CMD_STATUS 0x70
+#define NAND_CMD_SEQIN 0x80
+#define NAND_CMD_RNDIN 0x85
+#define NAND_CMD_READID 0x90
+#define NAND_CMD_ERASE2 0xd0
+#define NAND_CMD_PARAM 0xec
+#define NAND_CMD_GET_FEATURES 0xee
+#define NAND_CMD_SET_FEATURES 0xef
+#define NAND_CMD_RESET 0xff
+
+/* Extended commands for large page devices */
+#define NAND_CMD_READSTART 0x30
+#define NAND_CMD_READCACHESEQ 0x31
+#define NAND_CMD_READCACHEEND 0x3f
+#define NAND_CMD_RNDOUTSTART 0xE0
+#define NAND_CMD_CACHEDPROG 0x15
+
+#define NAND_CMD_NONE -1
+
+/* Status bits */
+#define NAND_STATUS_FAIL 0x01
+#define NAND_STATUS_FAIL_N1 0x02
+#define NAND_STATUS_TRUE_READY 0x20
+#define NAND_STATUS_READY 0x40
+#define NAND_STATUS_WP 0x80
+
+#define NAND_DATA_IFACE_CHECK_ONLY -1
+
+/*
+ * Constants for Hardware ECC
+ */
+/* Reset Hardware ECC for read */
+#define NAND_ECC_READ 0
+/* Reset Hardware ECC for write */
+#define NAND_ECC_WRITE 1
+/* Enable Hardware ECC before syndrome is read back from flash */
+#define NAND_ECC_READSYN 2
+
+/*
+ * Enable generic NAND 'page erased' check. This check is only done when
+ * ecc.correct() returns -EBADMSG.
+ * Set this flag if your implementation does not fix bitflips in erased
+ * pages and you want to rely on the default implementation.
+ */
+#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
+
+/*
+ * Option constants for bizarre disfunctionality and real
+ * features.
+ */
+
+/* Buswidth is 16 bit */
+#define NAND_BUSWIDTH_16 BIT(1)
+
+/*
+ * When using software implementation of Hamming, we can specify which byte
+ * ordering should be used.
+ */
+#define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2)
+
+/* Chip has cache program function */
+#define NAND_CACHEPRG BIT(3)
+/* Options valid for Samsung large page devices */
+#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
+
+/*
+ * Chip requires ready check on read (for auto-incremented sequential read).
+ * True only for small page devices; large page devices do not support
+ * autoincrement.
+ */
+#define NAND_NEED_READRDY BIT(8)
+
+/* Chip does not allow subpage writes */
+#define NAND_NO_SUBPAGE_WRITE BIT(9)
+
+/* Device is one of 'new' xD cards that expose fake nand command set */
+#define NAND_BROKEN_XD BIT(10)
+
+/* Device behaves just like nand, but is readonly */
+#define NAND_ROM BIT(11)
+
+/* Device supports subpage reads */
+#define NAND_SUBPAGE_READ BIT(12)
+/* Macros to identify the above */
+#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
+
+/*
+ * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
+ * patterns.
+ */
+#define NAND_NEED_SCRAMBLING BIT(13)
+
+/* Device needs 3rd row address cycle */
+#define NAND_ROW_ADDR_3 BIT(14)
+
+/* Non chip related options */
+/* This option skips the bbt scan during initialization. */
+#define NAND_SKIP_BBTSCAN BIT(16)
+/* Chip may not exist, so silence any errors in scan */
+#define NAND_SCAN_SILENT_NODEV BIT(18)
+
+/*
+ * Autodetect nand buswidth with readid/onfi.
+ * This suppose the driver will configure the hardware in 8 bits mode
+ * when calling nand_scan_ident, and update its configuration
+ * before calling nand_scan_tail.
+ */
+#define NAND_BUSWIDTH_AUTO BIT(19)
+
+/*
+ * This option could be defined by controller drivers to protect against
+ * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
+ */
+#define NAND_USES_DMA BIT(20)
+
+/*
+ * In case your controller is implementing ->legacy.cmd_ctrl() and is relying
+ * on the default ->cmdfunc() implementation, you may want to let the core
+ * handle the tCCS delay which is required when a column change (RNDIN or
+ * RNDOUT) is requested.
+ * If your controller already takes care of this delay, you don't need to set
+ * this flag.
+ */
+#define NAND_WAIT_TCCS BIT(21)
+
+/*
+ * Whether the NAND chip is a boot medium. Drivers might use this information
+ * to select ECC algorithms supported by the boot ROM or similar restrictions.
+ */
+#define NAND_IS_BOOT_MEDIUM BIT(22)
+
+/*
+ * Do not try to tweak the timings at runtime. This is needed when the
+ * controller initializes the timings on itself or when it relies on
+ * configuration done by the bootloader.
+ */
+#define NAND_KEEP_TIMINGS BIT(23)
+
+/*
+ * There are different places where the manufacturer stores the factory bad
+ * block markers.
+ *
+ * Position within the block: Each of these pages needs to be checked for a
+ * bad block marking pattern.
+ */
+#define NAND_BBM_FIRSTPAGE BIT(24)
+#define NAND_BBM_SECONDPAGE BIT(25)
+#define NAND_BBM_LASTPAGE BIT(26)
+
+/*
+ * Some controllers with pipelined ECC engines override the BBM marker with
+ * data or ECC bytes, thus making bad block detection through bad block marker
+ * impossible. Let's flag those chips so the core knows it shouldn't check the
+ * BBM and consider all blocks good.
+ */
+#define NAND_NO_BBM_QUIRK BIT(27)
+
+/* Cell info constants */
+#define NAND_CI_CHIPNR_MSK 0x03
+#define NAND_CI_CELLTYPE_MSK 0x0C
+#define NAND_CI_CELLTYPE_SHIFT 2
+
+/* Position within the OOB data of the page */
+#define NAND_BBM_POS_SMALL 5
+#define NAND_BBM_POS_LARGE 0
+
+/**
+ * struct nand_parameters - NAND generic parameters from the parameter page
+ * @model: Model name
+ * @supports_set_get_features: The NAND chip supports setting/getting features
+ * @supports_read_cache: The NAND chip supports read cache operations
+ * @set_feature_list: Bitmap of features that can be set
+ * @get_feature_list: Bitmap of features that can be get
+ * @onfi: ONFI specific parameters
+ */
+struct nand_parameters {
+ /* Generic parameters */
+ const char *model;
+ bool supports_set_get_features;
+ bool supports_read_cache;
+ DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
+ DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
+
+ /* ONFI parameters */
+ struct onfi_params *onfi;
+};
+
+/* The maximum expected count of bytes in the NAND ID sequence */
+#define NAND_MAX_ID_LEN 8
+
+/**
+ * struct nand_id - NAND id structure
+ * @data: buffer containing the id bytes.
+ * @len: ID length.
+ */
+struct nand_id {
+ u8 data[NAND_MAX_ID_LEN];
+ int len;
+};
+
+/**
+ * struct nand_ecc_step_info - ECC step information of ECC engine
+ * @stepsize: data bytes per ECC step
+ * @strengths: array of supported strengths
+ * @nstrengths: number of supported strengths
+ */
+struct nand_ecc_step_info {
+ int stepsize;
+ const int *strengths;
+ int nstrengths;
+};
+
+/**
+ * struct nand_ecc_caps - capability of ECC engine
+ * @stepinfos: array of ECC step information
+ * @nstepinfos: number of ECC step information
+ * @calc_ecc_bytes: driver's hook to calculate ECC bytes per step
+ */
+struct nand_ecc_caps {
+ const struct nand_ecc_step_info *stepinfos;
+ int nstepinfos;
+ int (*calc_ecc_bytes)(int step_size, int strength);
+};
+
+/* a shorthand to generate struct nand_ecc_caps with only one ECC stepsize */
+#define NAND_ECC_CAPS_SINGLE(__name, __calc, __step, ...) \
+static const int __name##_strengths[] = { __VA_ARGS__ }; \
+static const struct nand_ecc_step_info __name##_stepinfo = { \
+ .stepsize = __step, \
+ .strengths = __name##_strengths, \
+ .nstrengths = ARRAY_SIZE(__name##_strengths), \
+}; \
+static const struct nand_ecc_caps __name = { \
+ .stepinfos = &__name##_stepinfo, \
+ .nstepinfos = 1, \
+ .calc_ecc_bytes = __calc, \
+}
+
+/**
+ * struct nand_ecc_ctrl - Control structure for ECC
+ * @engine_type: ECC engine type
+ * @placement: OOB bytes placement
+ * @algo: ECC algorithm
+ * @steps: number of ECC steps per page
+ * @size: data bytes per ECC step
+ * @bytes: ECC bytes per step
+ * @strength: max number of correctible bits per ECC step
+ * @total: total number of ECC bytes per page
+ * @prepad: padding information for syndrome based ECC generators
+ * @postpad: padding information for syndrome based ECC generators
+ * @options: ECC specific options (see NAND_ECC_XXX flags defined above)
+ * @calc_buf: buffer for calculated ECC, size is oobsize.
+ * @code_buf: buffer for ECC read from flash, size is oobsize.
+ * @hwctl: function to control hardware ECC generator. Must only
+ * be provided if an hardware ECC is available
+ * @calculate: function for ECC calculation or readback from ECC hardware
+ * @correct: function for ECC correction, matching to ECC generator (sw/hw).
+ * Should return a positive number representing the number of
+ * corrected bitflips, -EBADMSG if the number of bitflips exceed
+ * ECC strength, or any other error code if the error is not
+ * directly related to correction.
+ * If -EBADMSG is returned the input buffers should be left
+ * untouched.
+ * @read_page_raw: function to read a raw page without ECC. This function
+ * should hide the specific layout used by the ECC
+ * controller and always return contiguous in-band and
+ * out-of-band data even if they're not stored
+ * contiguously on the NAND chip (e.g.
+ * NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and
+ * out-of-band data).
+ * @write_page_raw: function to write a raw page without ECC. This function
+ * should hide the specific layout used by the ECC
+ * controller and consider the passed data as contiguous
+ * in-band and out-of-band data. ECC controller is
+ * responsible for doing the appropriate transformations
+ * to adapt to its specific layout (e.g.
+ * NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and
+ * out-of-band data).
+ * @read_page: function to read a page according to the ECC generator
+ * requirements; returns maximum number of bitflips corrected in
+ * any single ECC step, -EIO hw error
+ * @read_subpage: function to read parts of the page covered by ECC;
+ * returns same as read_page()
+ * @write_subpage: function to write parts of the page covered by ECC.
+ * @write_page: function to write a page according to the ECC generator
+ * requirements.
+ * @write_oob_raw: function to write chip OOB data without ECC
+ * @read_oob_raw: function to read chip OOB data without ECC
+ * @read_oob: function to read chip OOB data
+ * @write_oob: function to write chip OOB data
+ */
+struct nand_ecc_ctrl {
+ enum nand_ecc_engine_type engine_type;
+ enum nand_ecc_placement placement;
+ enum nand_ecc_algo algo;
+ int steps;
+ int size;
+ int bytes;
+ int total;
+ int strength;
+ int prepad;
+ int postpad;
+ unsigned int options;
+ u8 *calc_buf;
+ u8 *code_buf;
+ void (*hwctl)(struct nand_chip *chip, int mode);
+ int (*calculate)(struct nand_chip *chip, const uint8_t *dat,
+ uint8_t *ecc_code);
+ int (*correct)(struct nand_chip *chip, uint8_t *dat, uint8_t *read_ecc,
+ uint8_t *calc_ecc);
+ int (*read_page_raw)(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
+ int (*write_page_raw)(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+ int (*read_page)(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
+ int (*read_subpage)(struct nand_chip *chip, uint32_t offs,
+ uint32_t len, uint8_t *buf, int page);
+ int (*write_subpage)(struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *data_buf,
+ int oob_required, int page);
+ int (*write_page)(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+ int (*write_oob_raw)(struct nand_chip *chip, int page);
+ int (*read_oob_raw)(struct nand_chip *chip, int page);
+ int (*read_oob)(struct nand_chip *chip, int page);
+ int (*write_oob)(struct nand_chip *chip, int page);
+};
+
+/**
+ * struct nand_sdr_timings - SDR NAND chip timings
+ *
+ * This struct defines the timing requirements of a SDR NAND chip.
+ * These information can be found in every NAND datasheets and the timings
+ * meaning are described in the ONFI specifications:
+ * https://media-www.micron.com/-/media/client/onfi/specs/onfi_3_1_spec.pdf
+ * (chapter 4.15 Timing Parameters)
+ *
+ * All these timings are expressed in picoseconds.
+ *
+ * @tBERS_max: Block erase time
+ * @tCCS_min: Change column setup time
+ * @tPROG_max: Page program time
+ * @tR_max: Page read time
+ * @tALH_min: ALE hold time
+ * @tADL_min: ALE to data loading time
+ * @tALS_min: ALE setup time
+ * @tAR_min: ALE to RE# delay
+ * @tCEA_max: CE# access time
+ * @tCEH_min: CE# high hold time
+ * @tCH_min: CE# hold time
+ * @tCHZ_max: CE# high to output hi-Z
+ * @tCLH_min: CLE hold time
+ * @tCLR_min: CLE to RE# delay
+ * @tCLS_min: CLE setup time
+ * @tCOH_min: CE# high to output hold
+ * @tCS_min: CE# setup time
+ * @tDH_min: Data hold time
+ * @tDS_min: Data setup time
+ * @tFEAT_max: Busy time for Set Features and Get Features
+ * @tIR_min: Output hi-Z to RE# low
+ * @tITC_max: Interface and Timing Mode Change time
+ * @tRC_min: RE# cycle time
+ * @tREA_max: RE# access time
+ * @tREH_min: RE# high hold time
+ * @tRHOH_min: RE# high to output hold
+ * @tRHW_min: RE# high to WE# low
+ * @tRHZ_max: RE# high to output hi-Z
+ * @tRLOH_min: RE# low to output hold
+ * @tRP_min: RE# pulse width
+ * @tRR_min: Ready to RE# low (data only)
+ * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
+ * rising edge of R/B#.
+ * @tWB_max: WE# high to SR[6] low
+ * @tWC_min: WE# cycle time
+ * @tWH_min: WE# high hold time
+ * @tWHR_min: WE# high to RE# low
+ * @tWP_min: WE# pulse width
+ * @tWW_min: WP# transition to WE# low
+ */
+struct nand_sdr_timings {
+ u64 tBERS_max;
+ u32 tCCS_min;
+ u64 tPROG_max;
+ u64 tR_max;
+ u32 tALH_min;
+ u32 tADL_min;
+ u32 tALS_min;
+ u32 tAR_min;
+ u32 tCEA_max;
+ u32 tCEH_min;
+ u32 tCH_min;
+ u32 tCHZ_max;
+ u32 tCLH_min;
+ u32 tCLR_min;
+ u32 tCLS_min;
+ u32 tCOH_min;
+ u32 tCS_min;
+ u32 tDH_min;
+ u32 tDS_min;
+ u32 tFEAT_max;
+ u32 tIR_min;
+ u32 tITC_max;
+ u32 tRC_min;
+ u32 tREA_max;
+ u32 tREH_min;
+ u32 tRHOH_min;
+ u32 tRHW_min;
+ u32 tRHZ_max;
+ u32 tRLOH_min;
+ u32 tRP_min;
+ u32 tRR_min;
+ u64 tRST_max;
+ u32 tWB_max;
+ u32 tWC_min;
+ u32 tWH_min;
+ u32 tWHR_min;
+ u32 tWP_min;
+ u32 tWW_min;
+};
+
+/**
+ * struct nand_nvddr_timings - NV-DDR NAND chip timings
+ *
+ * This struct defines the timing requirements of a NV-DDR NAND data interface.
+ * These information can be found in every NAND datasheets and the timings
+ * meaning are described in the ONFI specifications:
+ * https://media-www.micron.com/-/media/client/onfi/specs/onfi_4_1_gold.pdf
+ * (chapter 4.18.2 NV-DDR)
+ *
+ * All these timings are expressed in picoseconds.
+ *
+ * @tBERS_max: Block erase time
+ * @tCCS_min: Change column setup time
+ * @tPROG_max: Page program time
+ * @tR_max: Page read time
+ * @tAC_min: Access window of DQ[7:0] from CLK
+ * @tAC_max: Access window of DQ[7:0] from CLK
+ * @tADL_min: ALE to data loading time
+ * @tCAD_min: Command, Address, Data delay
+ * @tCAH_min: Command/Address DQ hold time
+ * @tCALH_min: W/R_n, CLE and ALE hold time
+ * @tCALS_min: W/R_n, CLE and ALE setup time
+ * @tCAS_min: Command/address DQ setup time
+ * @tCEH_min: CE# high hold time
+ * @tCH_min: CE# hold time
+ * @tCK_min: Average clock cycle time
+ * @tCS_min: CE# setup time
+ * @tDH_min: Data hold time
+ * @tDQSCK_min: Start of the access window of DQS from CLK
+ * @tDQSCK_max: End of the access window of DQS from CLK
+ * @tDQSD_min: Min W/R_n low to DQS/DQ driven by device
+ * @tDQSD_max: Max W/R_n low to DQS/DQ driven by device
+ * @tDQSHZ_max: W/R_n high to DQS/DQ tri-state by device
+ * @tDQSQ_max: DQS-DQ skew, DQS to last DQ valid, per access
+ * @tDS_min: Data setup time
+ * @tDSC_min: DQS cycle time
+ * @tFEAT_max: Busy time for Set Features and Get Features
+ * @tITC_max: Interface and Timing Mode Change time
+ * @tQHS_max: Data hold skew factor
+ * @tRHW_min: Data output cycle to command, address, or data input cycle
+ * @tRR_min: Ready to RE# low (data only)
+ * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
+ * rising edge of R/B#.
+ * @tWB_max: WE# high to SR[6] low
+ * @tWHR_min: WE# high to RE# low
+ * @tWRCK_min: W/R_n low to data output cycle
+ * @tWW_min: WP# transition to WE# low
+ */
+struct nand_nvddr_timings {
+ u64 tBERS_max;
+ u32 tCCS_min;
+ u64 tPROG_max;
+ u64 tR_max;
+ u32 tAC_min;
+ u32 tAC_max;
+ u32 tADL_min;
+ u32 tCAD_min;
+ u32 tCAH_min;
+ u32 tCALH_min;
+ u32 tCALS_min;
+ u32 tCAS_min;
+ u32 tCEH_min;
+ u32 tCH_min;
+ u32 tCK_min;
+ u32 tCS_min;
+ u32 tDH_min;
+ u32 tDQSCK_min;
+ u32 tDQSCK_max;
+ u32 tDQSD_min;
+ u32 tDQSD_max;
+ u32 tDQSHZ_max;
+ u32 tDQSQ_max;
+ u32 tDS_min;
+ u32 tDSC_min;
+ u32 tFEAT_max;
+ u32 tITC_max;
+ u32 tQHS_max;
+ u32 tRHW_min;
+ u32 tRR_min;
+ u32 tRST_max;
+ u32 tWB_max;
+ u32 tWHR_min;
+ u32 tWRCK_min;
+ u32 tWW_min;
+};
+
+/*
+ * While timings related to the data interface itself are mostly different
+ * between SDR and NV-DDR, timings related to the internal chip behavior are
+ * common. IOW, the following entries which describe the internal delays have
+ * the same definition and are shared in both SDR and NV-DDR timing structures:
+ * - tADL_min
+ * - tBERS_max
+ * - tCCS_min
+ * - tFEAT_max
+ * - tPROG_max
+ * - tR_max
+ * - tRR_min
+ * - tRST_max
+ * - tWB_max
+ *
+ * The below macros return the value of a given timing, no matter the interface.
+ */
+#define NAND_COMMON_TIMING_PS(conf, timing_name) \
+ nand_interface_is_sdr(conf) ? \
+ nand_get_sdr_timings(conf)->timing_name : \
+ nand_get_nvddr_timings(conf)->timing_name
+
+#define NAND_COMMON_TIMING_MS(conf, timing_name) \
+ PSEC_TO_MSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
+
+#define NAND_COMMON_TIMING_NS(conf, timing_name) \
+ PSEC_TO_NSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
+
+/**
+ * enum nand_interface_type - NAND interface type
+ * @NAND_SDR_IFACE: Single Data Rate interface
+ * @NAND_NVDDR_IFACE: Double Data Rate interface
+ */
+enum nand_interface_type {
+ NAND_SDR_IFACE,
+ NAND_NVDDR_IFACE,
+};
+
+/**
+ * struct nand_interface_config - NAND interface timing
+ * @type: type of the timing
+ * @timings: The timing information
+ * @timings.mode: Timing mode as defined in the specification
+ * @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
+ * @timings.nvddr: Use it when @type is %NAND_NVDDR_IFACE.
+ */
+struct nand_interface_config {
+ enum nand_interface_type type;
+ struct nand_timings {
+ unsigned int mode;
+ union {
+ struct nand_sdr_timings sdr;
+ struct nand_nvddr_timings nvddr;
+ };
+ } timings;
+};
+
+/**
+ * nand_interface_is_sdr - get the interface type
+ * @conf: The data interface
+ */
+static bool nand_interface_is_sdr(const struct nand_interface_config *conf)
+{
+ return conf->type == NAND_SDR_IFACE;
+}
+
+/**
+ * nand_interface_is_nvddr - get the interface type
+ * @conf: The data interface
+ */
+static bool nand_interface_is_nvddr(const struct nand_interface_config *conf)
+{
+ return conf->type == NAND_NVDDR_IFACE;
+}
+
+/**
+ * nand_get_sdr_timings - get SDR timing from data interface
+ * @conf: The data interface
+ */
+static inline const struct nand_sdr_timings *
+nand_get_sdr_timings(const struct nand_interface_config *conf)
+{
+ if (!nand_interface_is_sdr(conf))
+ return ERR_PTR(-EINVAL);
+
+ return &conf->timings.sdr;
+}
+
+/**
+ * nand_get_nvddr_timings - get NV-DDR timing from data interface
+ * @conf: The data interface
+ */
+static inline const struct nand_nvddr_timings *
+nand_get_nvddr_timings(const struct nand_interface_config *conf)
+{
+ if (!nand_interface_is_nvddr(conf))
+ return ERR_PTR(-EINVAL);
+
+ return &conf->timings.nvddr;
+}
+
+/**
+ * struct nand_op_cmd_instr - Definition of a command instruction
+ * @opcode: the command to issue in one cycle
+ */
+struct nand_op_cmd_instr {
+ u8 opcode;
+};
+
+/**
+ * struct nand_op_addr_instr - Definition of an address instruction
+ * @naddrs: length of the @addrs array
+ * @addrs: array containing the address cycles to issue
+ */
+struct nand_op_addr_instr {
+ unsigned int naddrs;
+ const u8 *addrs;
+};
+
+/**
+ * struct nand_op_data_instr - Definition of a data instruction
+ * @len: number of data bytes to move
+ * @buf: buffer to fill
+ * @buf.in: buffer to fill when reading from the NAND chip
+ * @buf.out: buffer to read from when writing to the NAND chip
+ * @force_8bit: force 8-bit access
+ *
+ * Please note that "in" and "out" are inverted from the ONFI specification
+ * and are from the controller perspective, so a "in" is a read from the NAND
+ * chip while a "out" is a write to the NAND chip.
+ */
+struct nand_op_data_instr {
+ unsigned int len;
+ union {
+ void *in;
+ const void *out;
+ } buf;
+ bool force_8bit;
+};
+
+/**
+ * struct nand_op_waitrdy_instr - Definition of a wait ready instruction
+ * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
+ */
+struct nand_op_waitrdy_instr {
+ unsigned int timeout_ms;
+};
+
+/**
+ * enum nand_op_instr_type - Definition of all instruction types
+ * @NAND_OP_CMD_INSTR: command instruction
+ * @NAND_OP_ADDR_INSTR: address instruction
+ * @NAND_OP_DATA_IN_INSTR: data in instruction
+ * @NAND_OP_DATA_OUT_INSTR: data out instruction
+ * @NAND_OP_WAITRDY_INSTR: wait ready instruction
+ */
+enum nand_op_instr_type {
+ NAND_OP_CMD_INSTR,
+ NAND_OP_ADDR_INSTR,
+ NAND_OP_DATA_IN_INSTR,
+ NAND_OP_DATA_OUT_INSTR,
+ NAND_OP_WAITRDY_INSTR,
+};
+
+/**
+ * struct nand_op_instr - Instruction object
+ * @type: the instruction type
+ * @ctx: extra data associated to the instruction. You'll have to use the
+ * appropriate element depending on @type
+ * @ctx.cmd: use it if @type is %NAND_OP_CMD_INSTR
+ * @ctx.addr: use it if @type is %NAND_OP_ADDR_INSTR
+ * @ctx.data: use it if @type is %NAND_OP_DATA_IN_INSTR
+ * or %NAND_OP_DATA_OUT_INSTR
+ * @ctx.waitrdy: use it if @type is %NAND_OP_WAITRDY_INSTR
+ * @delay_ns: delay the controller should apply after the instruction has been
+ * issued on the bus. Most modern controllers have internal timings
+ * control logic, and in this case, the controller driver can ignore
+ * this field.
+ */
+struct nand_op_instr {
+ enum nand_op_instr_type type;
+ union {
+ struct nand_op_cmd_instr cmd;
+ struct nand_op_addr_instr addr;
+ struct nand_op_data_instr data;
+ struct nand_op_waitrdy_instr waitrdy;
+ } ctx;
+ unsigned int delay_ns;
+};
+
+/*
+ * Special handling must be done for the WAITRDY timeout parameter as it usually
+ * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
+ * tBERS (during an erase) which all of them are u64 values that cannot be
+ * divided by usual kernel macros and must be handled with the special
+ * DIV_ROUND_UP_ULL() macro.
+ *
+ * Cast to type of dividend is needed here to guarantee that the result won't
+ * be an unsigned long long when the dividend is an unsigned long (or smaller),
+ * which is what the compiler does when it sees ternary operator with 2
+ * different return types (picks the largest type to make sure there's no
+ * loss).
+ */
+#define __DIVIDE(dividend, divisor) ({ \
+ (__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ? \
+ DIV_ROUND_UP(dividend, divisor) : \
+ DIV_ROUND_UP_ULL(dividend, divisor)); \
+ })
+#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
+#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
+
+#define NAND_OP_CMD(id, ns) \
+ { \
+ .type = NAND_OP_CMD_INSTR, \
+ .ctx.cmd.opcode = id, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_ADDR(ncycles, cycles, ns) \
+ { \
+ .type = NAND_OP_ADDR_INSTR, \
+ .ctx.addr = { \
+ .naddrs = ncycles, \
+ .addrs = cycles, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_DATA_IN(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.in = b, \
+ .force_8bit = false, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_DATA_OUT(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.out = b, \
+ .force_8bit = false, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_8BIT_DATA_IN(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.in = b, \
+ .force_8bit = true, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_8BIT_DATA_OUT(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.out = b, \
+ .force_8bit = true, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_WAIT_RDY(tout_ms, ns) \
+ { \
+ .type = NAND_OP_WAITRDY_INSTR, \
+ .ctx.waitrdy.timeout_ms = tout_ms, \
+ .delay_ns = ns, \
+ }
+
+/**
+ * struct nand_subop - a sub operation
+ * @cs: the CS line to select for this NAND sub-operation
+ * @instrs: array of instructions
+ * @ninstrs: length of the @instrs array
+ * @first_instr_start_off: offset to start from for the first instruction
+ * of the sub-operation
+ * @last_instr_end_off: offset to end at (excluded) for the last instruction
+ * of the sub-operation
+ *
+ * Both @first_instr_start_off and @last_instr_end_off only apply to data or
+ * address instructions.
+ *
+ * When an operation cannot be handled as is by the NAND controller, it will
+ * be split by the parser into sub-operations which will be passed to the
+ * controller driver.
+ */
+struct nand_subop {
+ unsigned int cs;
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ unsigned int first_instr_start_off;
+ unsigned int last_instr_end_off;
+};
+
+unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int op_id);
+
+/**
+ * struct nand_op_parser_addr_constraints - Constraints for address instructions
+ * @maxcycles: maximum number of address cycles the controller can issue in a
+ * single step
+ */
+struct nand_op_parser_addr_constraints {
+ unsigned int maxcycles;
+};
+
+/**
+ * struct nand_op_parser_data_constraints - Constraints for data instructions
+ * @maxlen: maximum data length that the controller can handle in a single step
+ */
+struct nand_op_parser_data_constraints {
+ unsigned int maxlen;
+};
+
+/**
+ * struct nand_op_parser_pattern_elem - One element of a pattern
+ * @type: the instructuction type
+ * @optional: whether this element of the pattern is optional or mandatory
+ * @ctx: address or data constraint
+ * @ctx.addr: address constraint (number of cycles)
+ * @ctx.data: data constraint (data length)
+ */
+struct nand_op_parser_pattern_elem {
+ enum nand_op_instr_type type;
+ bool optional;
+ union {
+ struct nand_op_parser_addr_constraints addr;
+ struct nand_op_parser_data_constraints data;
+ } ctx;
+};
+
+#define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \
+ { \
+ .type = NAND_OP_CMD_INSTR, \
+ .optional = _opt, \
+ }
+
+#define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \
+ { \
+ .type = NAND_OP_ADDR_INSTR, \
+ .optional = _opt, \
+ .ctx.addr.maxcycles = _maxcycles, \
+ }
+
+#define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .optional = _opt, \
+ .ctx.data.maxlen = _maxlen, \
+ }
+
+#define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .optional = _opt, \
+ .ctx.data.maxlen = _maxlen, \
+ }
+
+#define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \
+ { \
+ .type = NAND_OP_WAITRDY_INSTR, \
+ .optional = _opt, \
+ }
+
+/**
+ * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
+ * @elems: array of pattern elements
+ * @nelems: number of pattern elements in @elems array
+ * @exec: the function that will issue a sub-operation
+ *
+ * A pattern is a list of elements, each element reprensenting one instruction
+ * with its constraints. The pattern itself is used by the core to match NAND
+ * chip operation with NAND controller operations.
+ * Once a match between a NAND controller operation pattern and a NAND chip
+ * operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
+ * hook is called so that the controller driver can issue the operation on the
+ * bus.
+ *
+ * Controller drivers should declare as many patterns as they support and pass
+ * this list of patterns (created with the help of the following macro) to
+ * the nand_op_parser_exec_op() helper.
+ */
+struct nand_op_parser_pattern {
+ const struct nand_op_parser_pattern_elem *elems;
+ unsigned int nelems;
+ int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
+};
+
+#define NAND_OP_PARSER_PATTERN(_exec, ...) \
+ { \
+ .exec = _exec, \
+ .elems = (const struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
+ .nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \
+ sizeof(struct nand_op_parser_pattern_elem), \
+ }
+
+/**
+ * struct nand_op_parser - NAND controller operation parser descriptor
+ * @patterns: array of supported patterns
+ * @npatterns: length of the @patterns array
+ *
+ * The parser descriptor is just an array of supported patterns which will be
+ * iterated by nand_op_parser_exec_op() everytime it tries to execute an
+ * NAND operation (or tries to determine if a specific operation is supported).
+ *
+ * It is worth mentioning that patterns will be tested in their declaration
+ * order, and the first match will be taken, so it's important to order patterns
+ * appropriately so that simple/inefficient patterns are placed at the end of
+ * the list. Usually, this is where you put single instruction patterns.
+ */
+struct nand_op_parser {
+ const struct nand_op_parser_pattern *patterns;
+ unsigned int npatterns;
+};
+
+#define NAND_OP_PARSER(...) \
+ { \
+ .patterns = (const struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
+ .npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \
+ sizeof(struct nand_op_parser_pattern), \
+ }
+
+/**
+ * struct nand_operation - NAND operation descriptor
+ * @cs: the CS line to select for this NAND operation
+ * @deassert_wp: set to true when the operation requires the WP pin to be
+ * de-asserted (ERASE, PROG, ...)
+ * @instrs: array of instructions to execute
+ * @ninstrs: length of the @instrs array
+ *
+ * The actual operation structure that will be passed to chip->exec_op().
+ */
+struct nand_operation {
+ unsigned int cs;
+ bool deassert_wp;
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+};
+
+#define NAND_OPERATION(_cs, _instrs) \
+ { \
+ .cs = _cs, \
+ .instrs = _instrs, \
+ .ninstrs = ARRAY_SIZE(_instrs), \
+ }
+
+#define NAND_DESTRUCTIVE_OPERATION(_cs, _instrs) \
+ { \
+ .cs = _cs, \
+ .deassert_wp = true, \
+ .instrs = _instrs, \
+ .ninstrs = ARRAY_SIZE(_instrs), \
+ }
+
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only);
+
+static inline void nand_op_trace(const char *prefix,
+ const struct nand_op_instr *instr)
+{
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ pr_debug("%sCMD [0x%02x]\n", prefix,
+ instr->ctx.cmd.opcode);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
+ instr->ctx.addr.naddrs,
+ instr->ctx.addr.naddrs < 64 ?
+ instr->ctx.addr.naddrs : 64,
+ instr->ctx.addr.addrs);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ pr_debug("%sDATA_IN [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ pr_debug("%sWAITRDY [max %d ms]\n", prefix,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+#endif
+}
+
+/**
+ * struct nand_controller_ops - Controller operations
+ *
+ * @attach_chip: this method is called after the NAND detection phase after
+ * flash ID and MTD fields such as erase size, page size and OOB
+ * size have been set up. ECC requirements are available if
+ * provided by the NAND chip or device tree. Typically used to
+ * choose the appropriate ECC configuration and allocate
+ * associated resources.
+ * This hook is optional.
+ * @detach_chip: free all resources allocated/claimed in
+ * nand_controller_ops->attach_chip().
+ * This hook is optional.
+ * @exec_op: controller specific method to execute NAND operations.
+ * This method replaces chip->legacy.cmdfunc(),
+ * chip->legacy.{read,write}_{buf,byte,word}(),
+ * chip->legacy.dev_ready() and chip->legacy.waitfunc().
+ * @setup_interface: setup the data interface and timing. If chipnr is set to
+ * %NAND_DATA_IFACE_CHECK_ONLY this means the configuration
+ * should not be applied but only checked.
+ * This hook is optional.
+ */
+struct nand_controller_ops {
+ int (*attach_chip)(struct nand_chip *chip);
+ void (*detach_chip)(struct nand_chip *chip);
+ int (*exec_op)(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only);
+ int (*setup_interface)(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf);
+};
+
+/**
+ * struct nand_controller - Structure used to describe a NAND controller
+ *
+ * @lock: lock used to serialize accesses to the NAND controller
+ * @ops: NAND controller operations.
+ * @supported_op: NAND controller known-to-be-supported operations,
+ * only writable by the core after initial checking.
+ * @supported_op.data_only_read: The controller supports reading more data from
+ * the bus without restarting an entire read operation nor
+ * changing the column.
+ * @supported_op.cont_read: The controller supports sequential cache reads.
+ * @controller_wp: the controller is in charge of handling the WP pin.
+ */
+struct nand_controller {
+ struct mutex lock;
+ const struct nand_controller_ops *ops;
+ struct {
+ unsigned int data_only_read: 1;
+ unsigned int cont_read: 1;
+ } supported_op;
+ bool controller_wp;
+};
+
+static inline void nand_controller_init(struct nand_controller *nfc)
+{
+ mutex_init(&nfc->lock);
+}
+
+/**
+ * struct nand_legacy - NAND chip legacy fields/hooks
+ * @IO_ADDR_R: address to read the 8 I/O lines of the flash device
+ * @IO_ADDR_W: address to write the 8 I/O lines of the flash device
+ * @select_chip: select/deselect a specific target/die
+ * @read_byte: read one byte from the chip
+ * @write_byte: write a single byte to the chip on the low 8 I/O lines
+ * @write_buf: write data from the buffer to the chip
+ * @read_buf: read data from the chip into the buffer
+ * @cmd_ctrl: hardware specific function for controlling ALE/CLE/nCE. Also used
+ * to write command and address
+ * @cmdfunc: hardware specific function for writing commands to the chip.
+ * @dev_ready: hardware specific function for accessing device ready/busy line.
+ * If set to NULL no access to ready/busy is available and the
+ * ready/busy information is read from the chip status register.
+ * @waitfunc: hardware specific function for wait on ready.
+ * @block_bad: check if a block is bad, using OOB markers
+ * @block_markbad: mark a block bad
+ * @set_features: set the NAND chip features
+ * @get_features: get the NAND chip features
+ * @chip_delay: chip dependent delay for transferring data from array to read
+ * regs (tR).
+ * @dummy_controller: dummy controller implementation for drivers that can
+ * only control a single chip
+ *
+ * If you look at this structure you're already wrong. These fields/hooks are
+ * all deprecated.
+ */
+struct nand_legacy {
+ void __iomem *IO_ADDR_R;
+ void __iomem *IO_ADDR_W;
+ void (*select_chip)(struct nand_chip *chip, int cs);
+ u8 (*read_byte)(struct nand_chip *chip);
+ void (*write_byte)(struct nand_chip *chip, u8 byte);
+ void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len);
+ void (*read_buf)(struct nand_chip *chip, u8 *buf, int len);
+ void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
+ void (*cmdfunc)(struct nand_chip *chip, unsigned command, int column,
+ int page_addr);
+ int (*dev_ready)(struct nand_chip *chip);
+ int (*waitfunc)(struct nand_chip *chip);
+ int (*block_bad)(struct nand_chip *chip, loff_t ofs);
+ int (*block_markbad)(struct nand_chip *chip, loff_t ofs);
+ int (*set_features)(struct nand_chip *chip, int feature_addr,
+ u8 *subfeature_para);
+ int (*get_features)(struct nand_chip *chip, int feature_addr,
+ u8 *subfeature_para);
+ int chip_delay;
+ struct nand_controller dummy_controller;
+};
+
+/**
+ * struct nand_chip_ops - NAND chip operations
+ * @suspend: Suspend operation
+ * @resume: Resume operation
+ * @lock_area: Lock operation
+ * @unlock_area: Unlock operation
+ * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs)
+ * @choose_interface_config: Choose the best interface configuration
+ */
+struct nand_chip_ops {
+ int (*suspend)(struct nand_chip *chip);
+ void (*resume)(struct nand_chip *chip);
+ int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
+ int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
+ int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
+ int (*choose_interface_config)(struct nand_chip *chip,
+ struct nand_interface_config *iface);
+};
+
+/**
+ * struct nand_manufacturer - NAND manufacturer structure
+ * @desc: The manufacturer description
+ * @priv: Private information for the manufacturer driver
+ */
+struct nand_manufacturer {
+ const struct nand_manufacturer_desc *desc;
+ void *priv;
+};
+
+/**
+ * struct nand_secure_region - NAND secure region structure
+ * @offset: Offset of the start of the secure region
+ * @size: Size of the secure region
+ */
+struct nand_secure_region {
+ u64 offset;
+ u64 size;
+};
+
+/**
+ * struct nand_chip - NAND Private Flash Chip Data
+ * @base: Inherit from the generic NAND device
+ * @id: Holds NAND ID
+ * @parameters: Holds generic parameters under an easily readable form
+ * @manufacturer: Manufacturer information
+ * @ops: NAND chip operations
+ * @legacy: All legacy fields/hooks. If you develop a new driver, don't even try
+ * to use any of these fields/hooks, and if you're modifying an
+ * existing driver that is using those fields/hooks, you should
+ * consider reworking the driver and avoid using them.
+ * @options: Various chip options. They can partly be set to inform nand_scan
+ * about special functionality. See the defines for further
+ * explanation.
+ * @current_interface_config: The currently used NAND interface configuration
+ * @best_interface_config: The best NAND interface configuration which fits both
+ * the NAND chip and NAND controller constraints. If
+ * unset, the default reset interface configuration must
+ * be used.
+ * @bbt_erase_shift: Number of address bits in a bbt entry
+ * @bbt_options: Bad block table specific options. All options used here must
+ * come from bbm.h. By default, these options will be copied to
+ * the appropriate nand_bbt_descr's.
+ * @badblockpos: Bad block marker position in the oob area
+ * @badblockbits: Minimum number of set bits in a good block's bad block marker
+ * position; i.e., BBM = 11110111b is good when badblockbits = 7
+ * @bbt_td: Bad block table descriptor for flash lookup
+ * @bbt_md: Bad block table mirror descriptor
+ * @badblock_pattern: Bad block scan pattern used for initial bad block scan
+ * @bbt: Bad block table pointer
+ * @page_shift: Number of address bits in a page (column address bits)
+ * @phys_erase_shift: Number of address bits in a physical eraseblock
+ * @chip_shift: Number of address bits in one chip
+ * @pagemask: Page number mask = number of (pages / chip) - 1
+ * @subpagesize: Holds the subpagesize
+ * @data_buf: Buffer for data, size is (page size + oobsize)
+ * @oob_poi: pointer on the OOB area covered by data_buf
+ * @pagecache: Structure containing page cache related fields
+ * @pagecache.bitflips: Number of bitflips of the cached page
+ * @pagecache.page: Page number currently in the cache. -1 means no page is
+ * currently cached
+ * @buf_align: Minimum buffer alignment required by a platform
+ * @lock: Lock protecting the suspended field. Also used to serialize accesses
+ * to the NAND device
+ * @suspended: Set to 1 when the device is suspended, 0 when it's not
+ * @resume_wq: wait queue to sleep if rawnand is in suspended state.
+ * @cur_cs: Currently selected target. -1 means no target selected, otherwise we
+ * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets().
+ * NAND Controller drivers should not modify this value, but they're
+ * allowed to read it.
+ * @read_retries: The number of read retry modes supported
+ * @secure_regions: Structure containing the secure regions info
+ * @nr_secure_regions: Number of secure regions
+ * @cont_read: Sequential page read internals
+ * @cont_read.ongoing: Whether a continuous read is ongoing or not
+ * @cont_read.first_page: Start of the continuous read operation
+ * @cont_read.pause_page: End of the current sequential cache read operation
+ * @cont_read.last_page: End of the continuous read operation
+ * @controller: The hardware controller structure which is shared among multiple
+ * independent devices
+ * @ecc: The ECC controller structure
+ * @priv: Chip private data
+ */
+struct nand_chip {
+ struct nand_device base;
+ struct nand_id id;
+ struct nand_parameters parameters;
+ struct nand_manufacturer manufacturer;
+ struct nand_chip_ops ops;
+ struct nand_legacy legacy;
+ unsigned int options;
+
+ /* Data interface */
+ const struct nand_interface_config *current_interface_config;
+ struct nand_interface_config *best_interface_config;
+
+ /* Bad block information */
+ unsigned int bbt_erase_shift;
+ unsigned int bbt_options;
+ unsigned int badblockpos;
+ unsigned int badblockbits;
+ struct nand_bbt_descr *bbt_td;
+ struct nand_bbt_descr *bbt_md;
+ struct nand_bbt_descr *badblock_pattern;
+ u8 *bbt;
+
+ /* Device internal layout */
+ unsigned int page_shift;
+ unsigned int phys_erase_shift;
+ unsigned int chip_shift;
+ unsigned int pagemask;
+ unsigned int subpagesize;
+
+ /* Buffers */
+ u8 *data_buf;
+ u8 *oob_poi;
+ struct {
+ unsigned int bitflips;
+ int page;
+ } pagecache;
+ unsigned long buf_align;
+
+ /* Internals */
+ struct mutex lock;
+ unsigned int suspended : 1;
+ wait_queue_head_t resume_wq;
+ int cur_cs;
+ int read_retries;
+ struct nand_secure_region *secure_regions;
+ u8 nr_secure_regions;
+ struct {
+ bool ongoing;
+ unsigned int first_page;
+ unsigned int pause_page;
+ unsigned int last_page;
+ } cont_read;
+
+ /* Externals */
+ struct nand_controller *controller;
+ struct nand_ecc_ctrl ecc;
+ void *priv;
+};
+
+static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct nand_chip, base.mtd);
+}
+
+static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip)
+{
+ return &chip->base.mtd;
+}
+
+static inline void *nand_get_controller_data(struct nand_chip *chip)
+{
+ return chip->priv;
+}
+
+static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
+{
+ chip->priv = priv;
+}
+
+static inline void nand_set_manufacturer_data(struct nand_chip *chip,
+ void *priv)
+{
+ chip->manufacturer.priv = priv;
+}
+
+static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
+{
+ return chip->manufacturer.priv;
+}
+
+static inline void nand_set_flash_node(struct nand_chip *chip,
+ struct device_node *np)
+{
+ mtd_set_of_node(nand_to_mtd(chip), np);
+}
+
+static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
+{
+ return mtd_get_of_node(nand_to_mtd(chip));
+}
+
+/**
+ * nand_get_interface_config - Retrieve the current interface configuration
+ * of a NAND chip
+ * @chip: The NAND chip
+ */
+static inline const struct nand_interface_config *
+nand_get_interface_config(struct nand_chip *chip)
+{
+ return chip->current_interface_config;
+}
+
+/*
+ * A helper for defining older NAND chips where the second ID byte fully
+ * defined the chip, including the geometry (chip size, eraseblock size, page
+ * size). All these chips have 512 bytes NAND page size.
+ */
+#define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts) \
+ { .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \
+ .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) }
+
+/*
+ * A helper for defining newer chips which report their page size and
+ * eraseblock size via the extended ID bytes.
+ *
+ * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with
+ * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the
+ * device ID now only represented a particular total chip size (and voltage,
+ * buswidth), and the page size, eraseblock size, and OOB size could vary while
+ * using the same device ID.
+ */
+#define EXTENDED_ID_NAND(nm, devid, chipsz, opts) \
+ { .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \
+ .options = (opts) }
+
+#define NAND_ECC_INFO(_strength, _step) \
+ { .strength_ds = (_strength), .step_ds = (_step) }
+#define NAND_ECC_STRENGTH(type) ((type)->ecc.strength_ds)
+#define NAND_ECC_STEP(type) ((type)->ecc.step_ds)
+
+/**
+ * struct nand_flash_dev - NAND Flash Device ID Structure
+ * @name: a human-readable name of the NAND chip
+ * @dev_id: the device ID (the second byte of the full chip ID array)
+ * @mfr_id: manufacturer ID part of the full chip ID array (refers the same
+ * memory address as ``id[0]``)
+ * @dev_id: device ID part of the full chip ID array (refers the same memory
+ * address as ``id[1]``)
+ * @id: full device ID array
+ * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as
+ * well as the eraseblock size) is determined from the extended NAND
+ * chip ID array)
+ * @chipsize: total chip size in MiB
+ * @erasesize: eraseblock size in bytes (determined from the extended ID if 0)
+ * @options: stores various chip bit options
+ * @id_len: The valid length of the @id.
+ * @oobsize: OOB size
+ * @ecc: ECC correctability and step information from the datasheet.
+ * @ecc.strength_ds: The ECC correctability from the datasheet, same as the
+ * @ecc_strength_ds in nand_chip{}.
+ * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the
+ * @ecc_step_ds in nand_chip{}, also from the datasheet.
+ * For example, the "4bit ECC for each 512Byte" can be set with
+ * NAND_ECC_INFO(4, 512).
+ */
+struct nand_flash_dev {
+ char *name;
+ union {
+ struct {
+ uint8_t mfr_id;
+ uint8_t dev_id;
+ };
+ uint8_t id[NAND_MAX_ID_LEN];
+ };
+ unsigned int pagesize;
+ unsigned int chipsize;
+ unsigned int erasesize;
+ unsigned int options;
+ uint16_t id_len;
+ uint16_t oobsize;
+ struct {
+ uint16_t strength_ds;
+ uint16_t step_ds;
+ } ecc;
+};
+
+int nand_create_bbt(struct nand_chip *chip);
+
+/*
+ * Check if it is a SLC nand.
+ * The !nand_is_slc() can be used to check the MLC/TLC nand chips.
+ * We do not distinguish the MLC and TLC now.
+ */
+static inline bool nand_is_slc(struct nand_chip *chip)
+{
+ WARN(nanddev_bits_per_cell(&chip->base) == 0,
+ "chip->bits_per_cell is used uninitialized\n");
+ return nanddev_bits_per_cell(&chip->base) == 1;
+}
+
+/**
+ * nand_opcode_8bits - Check if the opcode's address should be sent only on the
+ * lower 8 bits
+ * @command: opcode to check
+ */
+static inline int nand_opcode_8bits(unsigned int command)
+{
+ switch (command) {
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM:
+ case NAND_CMD_GET_FEATURES:
+ case NAND_CMD_SET_FEATURES:
+ return 1;
+ default:
+ break;
+ }
+ return 0;
+}
+
+int rawnand_sw_hamming_init(struct nand_chip *chip);
+int rawnand_sw_hamming_calculate(struct nand_chip *chip,
+ const unsigned char *buf,
+ unsigned char *code);
+int rawnand_sw_hamming_correct(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc);
+void rawnand_sw_hamming_cleanup(struct nand_chip *chip);
+int rawnand_sw_bch_init(struct nand_chip *chip);
+int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc);
+void rawnand_sw_bch_cleanup(struct nand_chip *chip);
+
+int nand_ecc_choose_conf(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail);
+
+/* Default write_oob implementation */
+int nand_write_oob_std(struct nand_chip *chip, int page);
+
+/* Default read_oob implementation */
+int nand_read_oob_std(struct nand_chip *chip, int page);
+
+/* Stub used by drivers that do not support GET/SET FEATURES operations */
+int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
+ u8 *subfeature_param);
+
+/* read_page_raw implementations */
+int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page);
+int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
+
+/* write_page_raw implementations */
+int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+
+/* Reset and initialize a NAND device */
+int nand_reset(struct nand_chip *chip, int chipnr);
+
+/* NAND operation helpers */
+int nand_reset_op(struct nand_chip *chip);
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len);
+int nand_status_op(struct nand_chip *chip, u8 *status);
+int nand_exit_status_op(struct nand_chip *chip);
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len);
+int nand_prog_page_end_op(struct nand_chip *chip);
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len);
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit, bool check_only);
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
+
+/* Scan and identify a NAND device */
+int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips,
+ struct nand_flash_dev *ids);
+
+static inline int nand_scan(struct nand_chip *chip, unsigned int max_chips)
+{
+ return nand_scan_with_ids(chip, max_chips, NULL);
+}
+
+/* Internal helper for board drivers which need to override command function */
+void nand_wait_ready(struct nand_chip *chip);
+
+/*
+ * Free resources held by the NAND device, must be called on error after a
+ * sucessful nand_scan().
+ */
+void nand_cleanup(struct nand_chip *chip);
+
+/*
+ * External helper for controller drivers that have to implement the WAITRDY
+ * instruction and have no physical pin to check it.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
+int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
+ unsigned long timeout_ms);
+
+/* Select/deselect a NAND target. */
+void nand_select_target(struct nand_chip *chip, unsigned int cs);
+void nand_deselect_target(struct nand_chip *chip);
+
+/* Bitops */
+void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
+ unsigned int src_off, unsigned int nbits);
+
+/**
+ * nand_get_data_buf() - Get the internal page buffer
+ * @chip: NAND chip object
+ *
+ * Returns the pre-allocated page buffer after invalidating the cache. This
+ * function should be used by drivers that do not want to allocate their own
+ * bounce buffer and still need such a buffer for specific operations (most
+ * commonly when reading OOB data only).
+ *
+ * Be careful to never call this function in the write/write_oob path, because
+ * the core may have placed the data to be written out in this buffer.
+ *
+ * Return: pointer to the page cache buffer
+ */
+static inline void *nand_get_data_buf(struct nand_chip *chip)
+{
+ chip->pagecache.page = -1;
+
+ return chip->data_buf;
+}
+
+/* Parse the gpio-cs property */
+int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
+ unsigned int *ncs_array);
+
+#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index 1c28f8879b1c..78fc2d4218c8 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -1,20 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* SuperH FLCTL nand controller
*
* Copyright © 2008 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __SH_FLCTL_H__
@@ -22,7 +10,7 @@
#include <linux/completion.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/pm_qos.h>
@@ -143,11 +131,11 @@ enum flctl_ecc_res_t {
struct dma_chan;
struct sh_flctl {
- struct mtd_info mtd;
struct nand_chip chip;
struct platform_device *pdev;
struct dev_pm_qos_request pm_qos;
void __iomem *reg;
+ resource_size_t fifo;
uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */
int read_bytes;
@@ -186,7 +174,7 @@ struct sh_flctl_platform_data {
static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)
{
- return container_of(mtdinfo, struct sh_flctl, mtd);
+ return container_of(mtd_to_nand(mtdinfo), struct sh_flctl, chip);
}
#endif /* __SH_FLCTL_H__ */
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h
index 25f4d2a845c1..231bd1c3f408 100644
--- a/include/linux/mtd/sharpsl.h
+++ b/include/linux/mtd/sharpsl.h
@@ -1,20 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* SharpSL NAND support
*
* Copyright (C) 2008 Dmitry Baryshkov
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
+#ifndef _MTD_SHARPSL_H
+#define _MTD_SHARPSL_H
+
+#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
struct sharpsl_nand_platform_data {
struct nand_bbt_descr *badblock_pattern;
- struct nand_ecclayout *ecc_layout;
+ const struct mtd_ooblayout_ops *ecc_layout;
struct mtd_partition *partitions;
unsigned int nr_partitions;
+ const char *const *part_parsers;
};
+
+#endif /* _MTD_SHARPSL_H */
diff --git a/include/linux/mtd/spear_smi.h b/include/linux/mtd/spear_smi.h
index 8ae1726044c3..871634862627 100644
--- a/include/linux/mtd/spear_smi.h
+++ b/include/linux/mtd/spear_smi.h
@@ -1,6 +1,6 @@
/*
* Copyright © 2010 ST Microelectronics
- * Shiraz Hashim <shiraz.hashim@st.com>
+ * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -31,12 +31,12 @@
* struct spear_smi_flash_info - platform structure for passing flash
* information
*
- * name: name of the serial nor flash for identification
- * mem_base: the memory base on which the flash is mapped
- * size: size of the flash in bytes
- * partitions: parition details
- * nr_partitions: number of partitions
- * fast_mode: whether flash supports fast mode
+ * @name: name of the serial nor flash for identification
+ * @mem_base: the memory base on which the flash is mapped
+ * @size: size of the flash in bytes
+ * @partitions: parition details
+ * @nr_partitions: number of partitions
+ * @fast_mode: whether flash supports fast mode
*/
struct spear_smi_flash_info {
@@ -51,9 +51,10 @@ struct spear_smi_flash_info {
/**
* struct spear_smi_plat_data - platform structure for configuring smi
*
- * clk_rate: clk rate at which SMI must operate
- * num_flashes: number of flashes present on board
- * board_flash_info: specific details of each flash present on board
+ * @clk_rate: clk rate at which SMI must operate
+ * @num_flashes: number of flashes present on board
+ * @board_flash_info: specific details of each flash present on board
+ * @np: array of DT node pointers for all possible flash chip devices
*/
struct spear_smi_plat_data {
unsigned long clk_rate;
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
new file mode 100644
index 000000000000..cdcfe0fd2e7d
--- /dev/null
+++ b/include/linux/mtd/spi-nor.h
@@ -0,0 +1,453 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ */
+
+#ifndef __LINUX_MTD_SPI_NOR_H
+#define __LINUX_MTD_SPI_NOR_H
+
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+#include <linux/spi/spi-mem.h>
+
+/*
+ * Note on opcode nomenclature: some opcodes have a format like
+ * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number
+ * of I/O lines used for the opcode, address, and data (respectively). The
+ * FUNCTION has an optional suffix of '4', to represent an opcode which
+ * requires a 4-byte (32-bit) address.
+ */
+
+/* Flash opcodes. */
+#define SPINOR_OP_WRDI 0x04 /* Write disable */
+#define SPINOR_OP_WREN 0x06 /* Write enable */
+#define SPINOR_OP_RDSR 0x05 /* Read status register */
+#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */
+#define SPINOR_OP_RDSR2 0x3f /* Read status register 2 */
+#define SPINOR_OP_WRSR2 0x3e /* Write status register 2 */
+#define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */
+#define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */
+#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual Output SPI) */
+#define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */
+#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */
+#define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */
+#define SPINOR_OP_READ_1_1_8 0x8b /* Read data bytes (Octal Output SPI) */
+#define SPINOR_OP_READ_1_8_8 0xcb /* Read data bytes (Octal I/O SPI) */
+#define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */
+#define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */
+#define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */
+#define SPINOR_OP_PP_1_1_8 0x82 /* Octal page program */
+#define SPINOR_OP_PP_1_8_8 0xc2 /* Octal page program */
+#define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */
+#define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */
+#define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */
+#define SPINOR_OP_CHIP_ERASE 0xc7 /* Erase whole flash chip */
+#define SPINOR_OP_SE 0xd8 /* Sector erase (usually 64KiB) */
+#define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */
+#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */
+#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
+#define SPINOR_OP_SRSTEN 0x66 /* Software Reset Enable */
+#define SPINOR_OP_SRST 0x99 /* Software Reset */
+#define SPINOR_OP_GBULK 0x98 /* Global Block Unlock */
+
+/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
+#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
+#define SPINOR_OP_READ_FAST_4B 0x0c /* Read data bytes (high frequency) */
+#define SPINOR_OP_READ_1_1_2_4B 0x3c /* Read data bytes (Dual Output SPI) */
+#define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */
+#define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */
+#define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */
+#define SPINOR_OP_READ_1_1_8_4B 0x7c /* Read data bytes (Octal Output SPI) */
+#define SPINOR_OP_READ_1_8_8_4B 0xcc /* Read data bytes (Octal I/O SPI) */
+#define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */
+#define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */
+#define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */
+#define SPINOR_OP_PP_1_1_8_4B 0x84 /* Octal page program */
+#define SPINOR_OP_PP_1_8_8_4B 0x8e /* Octal page program */
+#define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */
+#define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */
+#define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */
+
+/* Double Transfer Rate opcodes - defined in JEDEC JESD216B. */
+#define SPINOR_OP_READ_1_1_1_DTR 0x0d
+#define SPINOR_OP_READ_1_2_2_DTR 0xbd
+#define SPINOR_OP_READ_1_4_4_DTR 0xed
+
+#define SPINOR_OP_READ_1_1_1_DTR_4B 0x0e
+#define SPINOR_OP_READ_1_2_2_DTR_4B 0xbe
+#define SPINOR_OP_READ_1_4_4_DTR_4B 0xee
+
+/* Used for SST flashes only. */
+#define SPINOR_OP_BP 0x02 /* Byte program */
+#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
+
+/* Used for Macronix and Winbond flashes. */
+#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */
+#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */
+
+/* Used for Spansion flashes only. */
+#define SPINOR_OP_BRWR 0x17 /* Bank register write */
+
+/* Used for Micron flashes only. */
+#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */
+#define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */
+
+/* Used for GigaDevices and Winbond flashes. */
+#define SPINOR_OP_ESECR 0x44 /* Erase Security registers */
+#define SPINOR_OP_PSECR 0x42 /* Program Security registers */
+#define SPINOR_OP_RSECR 0x48 /* Read Security registers */
+
+/* Status Register bits. */
+#define SR_WIP BIT(0) /* Write in progress */
+#define SR_WEL BIT(1) /* Write enable latch */
+/* meaning of other SR_* bits may differ between vendors */
+#define SR_BP0 BIT(2) /* Block protect 0 */
+#define SR_BP1 BIT(3) /* Block protect 1 */
+#define SR_BP2 BIT(4) /* Block protect 2 */
+#define SR_BP3 BIT(5) /* Block protect 3 */
+#define SR_TB_BIT5 BIT(5) /* Top/Bottom protect */
+#define SR_BP3_BIT6 BIT(6) /* Block protect 3 */
+#define SR_TB_BIT6 BIT(6) /* Top/Bottom protect */
+#define SR_SRWD BIT(7) /* SR write protect */
+/* Spansion/Cypress specific status bits */
+#define SR_E_ERR BIT(5)
+#define SR_P_ERR BIT(6)
+
+#define SR1_QUAD_EN_BIT6 BIT(6)
+
+#define SR_BP_SHIFT 2
+
+/* Enhanced Volatile Configuration Register bits */
+#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
+
+/* Status Register 2 bits. */
+#define SR2_QUAD_EN_BIT1 BIT(1)
+#define SR2_LB1 BIT(3) /* Security Register Lock Bit 1 */
+#define SR2_LB2 BIT(4) /* Security Register Lock Bit 2 */
+#define SR2_LB3 BIT(5) /* Security Register Lock Bit 3 */
+#define SR2_QUAD_EN_BIT7 BIT(7)
+
+/* Supported SPI protocols */
+#define SNOR_PROTO_INST_MASK GENMASK(23, 16)
+#define SNOR_PROTO_INST_SHIFT 16
+#define SNOR_PROTO_INST(_nbits) \
+ ((((unsigned long)(_nbits)) << SNOR_PROTO_INST_SHIFT) & \
+ SNOR_PROTO_INST_MASK)
+
+#define SNOR_PROTO_ADDR_MASK GENMASK(15, 8)
+#define SNOR_PROTO_ADDR_SHIFT 8
+#define SNOR_PROTO_ADDR(_nbits) \
+ ((((unsigned long)(_nbits)) << SNOR_PROTO_ADDR_SHIFT) & \
+ SNOR_PROTO_ADDR_MASK)
+
+#define SNOR_PROTO_DATA_MASK GENMASK(7, 0)
+#define SNOR_PROTO_DATA_SHIFT 0
+#define SNOR_PROTO_DATA(_nbits) \
+ ((((unsigned long)(_nbits)) << SNOR_PROTO_DATA_SHIFT) & \
+ SNOR_PROTO_DATA_MASK)
+
+#define SNOR_PROTO_IS_DTR BIT(24) /* Double Transfer Rate */
+
+#define SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits) \
+ (SNOR_PROTO_INST(_inst_nbits) | \
+ SNOR_PROTO_ADDR(_addr_nbits) | \
+ SNOR_PROTO_DATA(_data_nbits))
+#define SNOR_PROTO_DTR(_inst_nbits, _addr_nbits, _data_nbits) \
+ (SNOR_PROTO_IS_DTR | \
+ SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits))
+
+enum spi_nor_protocol {
+ SNOR_PROTO_1_1_1 = SNOR_PROTO_STR(1, 1, 1),
+ SNOR_PROTO_1_1_2 = SNOR_PROTO_STR(1, 1, 2),
+ SNOR_PROTO_1_1_4 = SNOR_PROTO_STR(1, 1, 4),
+ SNOR_PROTO_1_1_8 = SNOR_PROTO_STR(1, 1, 8),
+ SNOR_PROTO_1_2_2 = SNOR_PROTO_STR(1, 2, 2),
+ SNOR_PROTO_1_4_4 = SNOR_PROTO_STR(1, 4, 4),
+ SNOR_PROTO_1_8_8 = SNOR_PROTO_STR(1, 8, 8),
+ SNOR_PROTO_2_2_2 = SNOR_PROTO_STR(2, 2, 2),
+ SNOR_PROTO_4_4_4 = SNOR_PROTO_STR(4, 4, 4),
+ SNOR_PROTO_8_8_8 = SNOR_PROTO_STR(8, 8, 8),
+
+ SNOR_PROTO_1_1_1_DTR = SNOR_PROTO_DTR(1, 1, 1),
+ SNOR_PROTO_1_2_2_DTR = SNOR_PROTO_DTR(1, 2, 2),
+ SNOR_PROTO_1_4_4_DTR = SNOR_PROTO_DTR(1, 4, 4),
+ SNOR_PROTO_1_8_8_DTR = SNOR_PROTO_DTR(1, 8, 8),
+ SNOR_PROTO_8_8_8_DTR = SNOR_PROTO_DTR(8, 8, 8),
+};
+
+static inline bool spi_nor_protocol_is_dtr(enum spi_nor_protocol proto)
+{
+ return !!(proto & SNOR_PROTO_IS_DTR);
+}
+
+static inline u8 spi_nor_get_protocol_inst_nbits(enum spi_nor_protocol proto)
+{
+ return ((unsigned long)(proto & SNOR_PROTO_INST_MASK)) >>
+ SNOR_PROTO_INST_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_addr_nbits(enum spi_nor_protocol proto)
+{
+ return ((unsigned long)(proto & SNOR_PROTO_ADDR_MASK)) >>
+ SNOR_PROTO_ADDR_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_data_nbits(enum spi_nor_protocol proto)
+{
+ return ((unsigned long)(proto & SNOR_PROTO_DATA_MASK)) >>
+ SNOR_PROTO_DATA_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto)
+{
+ return spi_nor_get_protocol_data_nbits(proto);
+}
+
+/**
+ * struct spi_nor_hwcaps - Structure for describing the hardware capabilies
+ * supported by the SPI controller (bus master).
+ * @mask: the bitmask listing all the supported hw capabilies
+ */
+struct spi_nor_hwcaps {
+ u32 mask;
+};
+
+/*
+ *(Fast) Read capabilities.
+ * MUST be ordered by priority: the higher bit position, the higher priority.
+ * As a matter of performances, it is relevant to use Octal SPI protocols first,
+ * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly
+ * (Slow) Read.
+ */
+#define SNOR_HWCAPS_READ_MASK GENMASK(15, 0)
+#define SNOR_HWCAPS_READ BIT(0)
+#define SNOR_HWCAPS_READ_FAST BIT(1)
+#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2)
+
+#define SNOR_HWCAPS_READ_DUAL GENMASK(6, 3)
+#define SNOR_HWCAPS_READ_1_1_2 BIT(3)
+#define SNOR_HWCAPS_READ_1_2_2 BIT(4)
+#define SNOR_HWCAPS_READ_2_2_2 BIT(5)
+#define SNOR_HWCAPS_READ_1_2_2_DTR BIT(6)
+
+#define SNOR_HWCAPS_READ_QUAD GENMASK(10, 7)
+#define SNOR_HWCAPS_READ_1_1_4 BIT(7)
+#define SNOR_HWCAPS_READ_1_4_4 BIT(8)
+#define SNOR_HWCAPS_READ_4_4_4 BIT(9)
+#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10)
+
+#define SNOR_HWCAPS_READ_OCTAL GENMASK(15, 11)
+#define SNOR_HWCAPS_READ_1_1_8 BIT(11)
+#define SNOR_HWCAPS_READ_1_8_8 BIT(12)
+#define SNOR_HWCAPS_READ_8_8_8 BIT(13)
+#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14)
+#define SNOR_HWCAPS_READ_8_8_8_DTR BIT(15)
+
+/*
+ * Page Program capabilities.
+ * MUST be ordered by priority: the higher bit position, the higher priority.
+ * Like (Fast) Read capabilities, Octal/Quad SPI protocols are preferred to the
+ * legacy SPI 1-1-1 protocol.
+ * Note that Dual Page Programs are not supported because there is no existing
+ * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory
+ * implements such commands.
+ */
+#define SNOR_HWCAPS_PP_MASK GENMASK(23, 16)
+#define SNOR_HWCAPS_PP BIT(16)
+
+#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17)
+#define SNOR_HWCAPS_PP_1_1_4 BIT(17)
+#define SNOR_HWCAPS_PP_1_4_4 BIT(18)
+#define SNOR_HWCAPS_PP_4_4_4 BIT(19)
+
+#define SNOR_HWCAPS_PP_OCTAL GENMASK(23, 20)
+#define SNOR_HWCAPS_PP_1_1_8 BIT(20)
+#define SNOR_HWCAPS_PP_1_8_8 BIT(21)
+#define SNOR_HWCAPS_PP_8_8_8 BIT(22)
+#define SNOR_HWCAPS_PP_8_8_8_DTR BIT(23)
+
+#define SNOR_HWCAPS_X_X_X (SNOR_HWCAPS_READ_2_2_2 | \
+ SNOR_HWCAPS_READ_4_4_4 | \
+ SNOR_HWCAPS_READ_8_8_8 | \
+ SNOR_HWCAPS_PP_4_4_4 | \
+ SNOR_HWCAPS_PP_8_8_8)
+
+#define SNOR_HWCAPS_X_X_X_DTR (SNOR_HWCAPS_READ_8_8_8_DTR | \
+ SNOR_HWCAPS_PP_8_8_8_DTR)
+
+#define SNOR_HWCAPS_DTR (SNOR_HWCAPS_READ_1_1_1_DTR | \
+ SNOR_HWCAPS_READ_1_2_2_DTR | \
+ SNOR_HWCAPS_READ_1_4_4_DTR | \
+ SNOR_HWCAPS_READ_1_8_8_DTR | \
+ SNOR_HWCAPS_READ_8_8_8_DTR)
+
+#define SNOR_HWCAPS_ALL (SNOR_HWCAPS_READ_MASK | \
+ SNOR_HWCAPS_PP_MASK)
+
+/* Forward declaration that is used in 'struct spi_nor_controller_ops' */
+struct spi_nor;
+
+/**
+ * struct spi_nor_controller_ops - SPI NOR controller driver specific
+ * operations.
+ * @prepare: [OPTIONAL] do some preparations for the
+ * read/write/erase/lock/unlock operations.
+ * @unprepare: [OPTIONAL] do some post work after the
+ * read/write/erase/lock/unlock operations.
+ * @read_reg: read out the register.
+ * @write_reg: write data to the register.
+ * @read: read data from the SPI NOR.
+ * @write: write data to the SPI NOR.
+ * @erase: erase a sector of the SPI NOR at the offset @offs; if
+ * not provided by the driver, SPI NOR will send the erase
+ * opcode via write_reg().
+ */
+struct spi_nor_controller_ops {
+ int (*prepare)(struct spi_nor *nor);
+ void (*unprepare)(struct spi_nor *nor);
+ int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len);
+ int (*write_reg)(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len);
+
+ ssize_t (*read)(struct spi_nor *nor, loff_t from, size_t len, u8 *buf);
+ ssize_t (*write)(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf);
+ int (*erase)(struct spi_nor *nor, loff_t offs);
+};
+
+/**
+ * enum spi_nor_cmd_ext - describes the command opcode extension in DTR mode
+ * @SPI_NOR_EXT_NONE: no extension. This is the default, and is used in Legacy
+ * SPI mode
+ * @SPI_NOR_EXT_REPEAT: the extension is same as the opcode
+ * @SPI_NOR_EXT_INVERT: the extension is the bitwise inverse of the opcode
+ * @SPI_NOR_EXT_HEX: the extension is any hex value. The command and opcode
+ * combine to form a 16-bit opcode.
+ */
+enum spi_nor_cmd_ext {
+ SPI_NOR_EXT_NONE = 0,
+ SPI_NOR_EXT_REPEAT,
+ SPI_NOR_EXT_INVERT,
+ SPI_NOR_EXT_HEX,
+};
+
+/*
+ * Forward declarations that are used internally by the core and manufacturer
+ * drivers.
+ */
+struct flash_info;
+struct spi_nor_manufacturer;
+struct spi_nor_flash_parameter;
+
+/**
+ * struct spi_nor - Structure for defining the SPI NOR layer
+ * @mtd: an mtd_info structure
+ * @lock: the lock for the read/write/erase/lock/unlock operations
+ * @rww: Read-While-Write (RWW) sync lock
+ * @rww.wait: wait queue for the RWW sync
+ * @rww.ongoing_io: the bus is busy
+ * @rww.ongoing_rd: a read is ongoing on the chip
+ * @rww.ongoing_pe: a program/erase is ongoing on the chip
+ * @rww.used_banks: bitmap of the banks in use
+ * @dev: pointer to an SPI device or an SPI NOR controller device
+ * @spimem: pointer to the SPI memory device
+ * @bouncebuf: bounce buffer used when the buffer passed by the MTD
+ * layer is not DMA-able
+ * @bouncebuf_size: size of the bounce buffer
+ * @id: The flash's ID bytes. Always contains
+ * SPI_NOR_MAX_ID_LEN bytes.
+ * @info: SPI NOR part JEDEC MFR ID and other info
+ * @manufacturer: SPI NOR manufacturer
+ * @addr_nbytes: number of address bytes
+ * @erase_opcode: the opcode for erasing a sector
+ * @read_opcode: the read opcode
+ * @read_dummy: the dummy needed by the read operation
+ * @program_opcode: the program opcode
+ * @sst_write_second: used by the SST write operation
+ * @flags: flag options for the current SPI NOR (SNOR_F_*)
+ * @cmd_ext_type: the command opcode extension type for DTR mode.
+ * @read_proto: the SPI protocol for read operations
+ * @write_proto: the SPI protocol for write operations
+ * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations
+ * @sfdp: the SFDP data of the flash
+ * @debugfs_root: pointer to the debugfs directory
+ * @controller_ops: SPI NOR controller driver specific operations.
+ * @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings.
+ * The structure includes legacy flash parameters and
+ * settings that can be overwritten by the spi_nor_fixups
+ * hooks, or dynamically when parsing the SFDP tables.
+ * @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes.
+ * @priv: pointer to the private data
+ */
+struct spi_nor {
+ struct mtd_info mtd;
+ struct mutex lock;
+ struct spi_nor_rww {
+ wait_queue_head_t wait;
+ bool ongoing_io;
+ bool ongoing_rd;
+ bool ongoing_pe;
+ unsigned int used_banks;
+ } rww;
+ struct device *dev;
+ struct spi_mem *spimem;
+ u8 *bouncebuf;
+ size_t bouncebuf_size;
+ u8 *id;
+ const struct flash_info *info;
+ const struct spi_nor_manufacturer *manufacturer;
+ u8 addr_nbytes;
+ u8 erase_opcode;
+ u8 read_opcode;
+ u8 read_dummy;
+ u8 program_opcode;
+ enum spi_nor_protocol read_proto;
+ enum spi_nor_protocol write_proto;
+ enum spi_nor_protocol reg_proto;
+ bool sst_write_second;
+ u32 flags;
+ enum spi_nor_cmd_ext cmd_ext_type;
+ struct sfdp *sfdp;
+ struct dentry *debugfs_root;
+
+ const struct spi_nor_controller_ops *controller_ops;
+
+ struct spi_nor_flash_parameter *params;
+
+ struct {
+ struct spi_mem_dirmap_desc *rdesc;
+ struct spi_mem_dirmap_desc *wdesc;
+ } dirmap;
+
+ void *priv;
+};
+
+static inline void spi_nor_set_flash_node(struct spi_nor *nor,
+ struct device_node *np)
+{
+ mtd_set_of_node(&nor->mtd, np);
+}
+
+static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
+{
+ return mtd_get_of_node(&nor->mtd);
+}
+
+/**
+ * spi_nor_scan() - scan the SPI NOR
+ * @nor: the spi_nor structure
+ * @name: the chip type name
+ * @hwcaps: the hardware capabilities supported by the controller driver
+ *
+ * The drivers can use this function to scan the SPI NOR.
+ * In the scanning, it will try to get all the necessary information to
+ * fill the mtd_info{} and the spi_nor{}.
+ *
+ * The chip type name can be provided through the @name parameter.
+ *
+ * Return: 0 for success, others for failure.
+ */
+int spi_nor_scan(struct spi_nor *nor, const char *name,
+ const struct spi_nor_hwcaps *hwcaps);
+
+#endif
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
new file mode 100644
index 000000000000..ce76f5c632e1
--- /dev/null
+++ b/include/linux/mtd/spinand.h
@@ -0,0 +1,771 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2017 Micron Technology, Inc.
+ *
+ * Authors:
+ * Peter Pan <peterpandong@micron.com>
+ */
+#ifndef __LINUX_MTD_SPINAND_H
+#define __LINUX_MTD_SPINAND_H
+
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/**
+ * Standard SPI NAND flash operations
+ */
+
+#define SPINAND_RESET_1S_0_0_OP \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_WR_EN_DIS_1S_0_0_OP(enable) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_READID_1S_1S_1S_OP(naddr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
+ SPI_MEM_OP_ADDR(naddr, 0, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1))
+
+#define SPINAND_SET_FEATURE_1S_1S_1S_OP(reg, valptr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \
+ SPI_MEM_OP_ADDR(1, reg, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(1, valptr, 1))
+
+#define SPINAND_GET_FEATURE_1S_1S_1S_OP(reg, valptr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \
+ SPI_MEM_OP_ADDR(1, reg, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_IN(1, valptr, 1))
+
+#define SPINAND_BLK_ERASE_1S_1S_0_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_1S_1S_0_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 2), \
+ SPI_MEM_OP_DUMMY(ndummy, 2), \
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_2S_2S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 2), \
+ SPI_MEM_OP_DUMMY(ndummy, 2), \
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbd, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 2), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 2), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 4), \
+ SPI_MEM_OP_DUMMY(ndummy, 4), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_4S_4S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 4), \
+ SPI_MEM_OP_DUMMY(ndummy, 4), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xed, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 4), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 4), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x8b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 8), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xcb, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_DUMMY(ndummy, 8), \
+ SPI_MEM_OP_DATA_IN(len, buf, 8), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x9d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 8), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PROG_EXEC_1S_1S_0_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PROG_LOAD_1S_1S_1S_OP(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 1))
+
+#define SPINAND_PROG_LOAD_1S_1S_4S_OP(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 4))
+
+#define SPINAND_PROG_LOAD_1S_1S_8S_OP(addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x82, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 8))
+
+#define SPINAND_PROG_LOAD_1S_8S_8S_OP(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0xc2 : 0xc4, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 8))
+
+/**
+ * Standard SPI NAND flash commands
+ */
+#define SPINAND_CMD_PROG_LOAD_X4 0x32
+#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34
+
+/* feature register */
+#define REG_BLOCK_LOCK 0xa0
+#define BL_ALL_UNLOCKED 0x00
+
+/* configuration register */
+#define REG_CFG 0xb0
+#define CFG_OTP_ENABLE BIT(6)
+#define CFG_ECC_ENABLE BIT(4)
+#define CFG_QUAD_ENABLE BIT(0)
+
+/* status register */
+#define REG_STATUS 0xc0
+#define STATUS_BUSY BIT(0)
+#define STATUS_ERASE_FAILED BIT(2)
+#define STATUS_PROG_FAILED BIT(3)
+#define STATUS_ECC_MASK GENMASK(5, 4)
+#define STATUS_ECC_NO_BITFLIPS (0 << 4)
+#define STATUS_ECC_HAS_BITFLIPS (1 << 4)
+#define STATUS_ECC_UNCOR_ERROR (2 << 4)
+
+struct spinand_op;
+struct spinand_device;
+
+#define SPINAND_MAX_ID_LEN 5
+/*
+ * For erase, write and read operation, we got the following timings :
+ * tBERS (erase) 1ms to 4ms
+ * tPROG 300us to 400us
+ * tREAD 25us to 100us
+ * In order to minimize latency, the min value is divided by 4 for the
+ * initial delay, and dividing by 20 for the poll delay.
+ * For reset, 5us/10us/500us if the device is respectively
+ * reading/programming/erasing when the RESET occurs. Since we always
+ * issue a RESET when the device is IDLE, 5us is selected for both initial
+ * and poll delay.
+ */
+#define SPINAND_READ_INITIAL_DELAY_US 6
+#define SPINAND_READ_POLL_DELAY_US 5
+#define SPINAND_RESET_INITIAL_DELAY_US 5
+#define SPINAND_RESET_POLL_DELAY_US 5
+#define SPINAND_WRITE_INITIAL_DELAY_US 75
+#define SPINAND_WRITE_POLL_DELAY_US 15
+#define SPINAND_ERASE_INITIAL_DELAY_US 250
+#define SPINAND_ERASE_POLL_DELAY_US 50
+
+#define SPINAND_WAITRDY_TIMEOUT_MS 400
+
+/**
+ * struct spinand_id - SPI NAND id structure
+ * @data: buffer containing the id bytes. Currently 4 bytes large, but can
+ * be extended if required
+ * @len: ID length
+ */
+struct spinand_id {
+ u8 data[SPINAND_MAX_ID_LEN];
+ int len;
+};
+
+enum spinand_readid_method {
+ SPINAND_READID_METHOD_OPCODE,
+ SPINAND_READID_METHOD_OPCODE_ADDR,
+ SPINAND_READID_METHOD_OPCODE_DUMMY,
+};
+
+/**
+ * struct spinand_devid - SPI NAND device id structure
+ * @id: device id of current chip
+ * @len: number of bytes in device id
+ * @method: method to read chip id
+ * There are 3 possible variants:
+ * SPINAND_READID_METHOD_OPCODE: chip id is returned immediately
+ * after read_id opcode.
+ * SPINAND_READID_METHOD_OPCODE_ADDR: chip id is returned after
+ * read_id opcode + 1-byte address.
+ * SPINAND_READID_METHOD_OPCODE_DUMMY: chip id is returned after
+ * read_id opcode + 1 dummy byte.
+ */
+struct spinand_devid {
+ const u8 *id;
+ const u8 len;
+ const enum spinand_readid_method method;
+};
+
+/**
+ * struct manufacurer_ops - SPI NAND manufacturer specific operations
+ * @init: initialize a SPI NAND device
+ * @cleanup: cleanup a SPI NAND device
+ *
+ * Each SPI NAND manufacturer driver should implement this interface so that
+ * NAND chips coming from this vendor can be initialized properly.
+ */
+struct spinand_manufacturer_ops {
+ int (*init)(struct spinand_device *spinand);
+ void (*cleanup)(struct spinand_device *spinand);
+};
+
+/**
+ * struct spinand_manufacturer - SPI NAND manufacturer instance
+ * @id: manufacturer ID
+ * @name: manufacturer name
+ * @devid_len: number of bytes in device ID
+ * @chips: supported SPI NANDs under current manufacturer
+ * @nchips: number of SPI NANDs available in chips array
+ * @ops: manufacturer operations
+ */
+struct spinand_manufacturer {
+ u8 id;
+ char *name;
+ const struct spinand_info *chips;
+ const size_t nchips;
+ const struct spinand_manufacturer_ops *ops;
+};
+
+/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
+extern const struct spinand_manufacturer ato_spinand_manufacturer;
+extern const struct spinand_manufacturer esmt_8c_spinand_manufacturer;
+extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
+extern const struct spinand_manufacturer fmsh_spinand_manufacturer;
+extern const struct spinand_manufacturer foresee_spinand_manufacturer;
+extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
+extern const struct spinand_manufacturer macronix_spinand_manufacturer;
+extern const struct spinand_manufacturer micron_spinand_manufacturer;
+extern const struct spinand_manufacturer paragon_spinand_manufacturer;
+extern const struct spinand_manufacturer skyhigh_spinand_manufacturer;
+extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
+extern const struct spinand_manufacturer winbond_spinand_manufacturer;
+extern const struct spinand_manufacturer xtx_spinand_manufacturer;
+
+/**
+ * struct spinand_op_variants - SPI NAND operation variants
+ * @ops: the list of variants for a given operation
+ * @nops: the number of variants
+ *
+ * Some operations like read-from-cache/write-to-cache have several variants
+ * depending on the number of IO lines you use to transfer data or address
+ * cycles. This structure is a way to describe the different variants supported
+ * by a chip and let the core pick the best one based on the SPI mem controller
+ * capabilities.
+ */
+struct spinand_op_variants {
+ const struct spi_mem_op *ops;
+ unsigned int nops;
+};
+
+#define SPINAND_OP_VARIANTS(name, ...) \
+ const struct spinand_op_variants name = { \
+ .ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \
+ .nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \
+ sizeof(struct spi_mem_op), \
+ }
+
+/**
+ * spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND
+ * chip
+ * @get_status: get the ECC status. Should return a positive number encoding
+ * the number of corrected bitflips if correction was possible or
+ * -EBADMSG if there are uncorrectable errors. I can also return
+ * other negative error codes if the error is not caused by
+ * uncorrectable bitflips
+ * @ooblayout: the OOB layout used by the on-die ECC implementation
+ */
+struct spinand_ecc_info {
+ int (*get_status)(struct spinand_device *spinand, u8 status);
+ const struct mtd_ooblayout_ops *ooblayout;
+};
+
+#define SPINAND_HAS_QE_BIT BIT(0)
+#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
+#define SPINAND_HAS_PROG_PLANE_SELECT_BIT BIT(2)
+#define SPINAND_HAS_READ_PLANE_SELECT_BIT BIT(3)
+#define SPINAND_NO_RAW_ACCESS BIT(4)
+
+/**
+ * struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure
+ * @status: status of the last wait operation that will be used in case
+ * ->get_status() is not populated by the spinand device.
+ */
+struct spinand_ondie_ecc_conf {
+ u8 status;
+};
+
+/**
+ * struct spinand_otp_layout - structure to describe the SPI NAND OTP area
+ * @npages: number of pages in the OTP
+ * @start_page: start page of the user/factory OTP area.
+ */
+struct spinand_otp_layout {
+ unsigned int npages;
+ unsigned int start_page;
+};
+
+/**
+ * struct spinand_fact_otp_ops - SPI NAND OTP methods for factory area
+ * @info: get the OTP area information
+ * @read: read from the SPI NAND OTP area
+ */
+struct spinand_fact_otp_ops {
+ int (*info)(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen);
+ int (*read)(struct spinand_device *spinand, loff_t from, size_t len,
+ size_t *retlen, u8 *buf);
+};
+
+/**
+ * struct spinand_user_otp_ops - SPI NAND OTP methods for user area
+ * @info: get the OTP area information
+ * @lock: lock an OTP region
+ * @erase: erase an OTP region
+ * @read: read from the SPI NAND OTP area
+ * @write: write to the SPI NAND OTP area
+ */
+struct spinand_user_otp_ops {
+ int (*info)(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen);
+ int (*lock)(struct spinand_device *spinand, loff_t from, size_t len);
+ int (*erase)(struct spinand_device *spinand, loff_t from, size_t len);
+ int (*read)(struct spinand_device *spinand, loff_t from, size_t len,
+ size_t *retlen, u8 *buf);
+ int (*write)(struct spinand_device *spinand, loff_t from, size_t len,
+ size_t *retlen, const u8 *buf);
+};
+
+/**
+ * struct spinand_fact_otp - SPI NAND OTP grouping structure for factory area
+ * @layout: OTP region layout
+ * @ops: OTP access ops
+ */
+struct spinand_fact_otp {
+ const struct spinand_otp_layout layout;
+ const struct spinand_fact_otp_ops *ops;
+};
+
+/**
+ * struct spinand_user_otp - SPI NAND OTP grouping structure for user area
+ * @layout: OTP region layout
+ * @ops: OTP access ops
+ */
+struct spinand_user_otp {
+ const struct spinand_otp_layout layout;
+ const struct spinand_user_otp_ops *ops;
+};
+
+/**
+ * struct spinand_info - Structure used to describe SPI NAND chips
+ * @model: model name
+ * @devid: device ID
+ * @flags: OR-ing of the SPINAND_XXX flags
+ * @memorg: memory organization
+ * @eccreq: ECC requirements
+ * @eccinfo: on-die ECC info
+ * @op_variants: operations variants
+ * @op_variants.read_cache: variants of the read-cache operation
+ * @op_variants.write_cache: variants of the write-cache operation
+ * @op_variants.update_cache: variants of the update-cache operation
+ * @select_target: function used to select a target/die. Required only for
+ * multi-die chips
+ * @configure_chip: Align the chip configuration with the core settings
+ * @set_cont_read: enable/disable continuous cached reads
+ * @fact_otp: SPI NAND factory OTP info.
+ * @user_otp: SPI NAND user OTP info.
+ * @read_retries: the number of read retry modes supported
+ * @set_read_retry: enable/disable read retry for data recovery
+ *
+ * Each SPI NAND manufacturer driver should have a spinand_info table
+ * describing all the chips supported by the driver.
+ */
+struct spinand_info {
+ const char *model;
+ struct spinand_devid devid;
+ u32 flags;
+ struct nand_memory_organization memorg;
+ struct nand_ecc_props eccreq;
+ struct spinand_ecc_info eccinfo;
+ struct {
+ const struct spinand_op_variants *read_cache;
+ const struct spinand_op_variants *write_cache;
+ const struct spinand_op_variants *update_cache;
+ } op_variants;
+ int (*select_target)(struct spinand_device *spinand,
+ unsigned int target);
+ int (*configure_chip)(struct spinand_device *spinand);
+ int (*set_cont_read)(struct spinand_device *spinand,
+ bool enable);
+ struct spinand_fact_otp fact_otp;
+ struct spinand_user_otp user_otp;
+ unsigned int read_retries;
+ int (*set_read_retry)(struct spinand_device *spinand,
+ unsigned int read_retry);
+};
+
+#define SPINAND_ID(__method, ...) \
+ { \
+ .id = (const u8[]){ __VA_ARGS__ }, \
+ .len = sizeof((u8[]){ __VA_ARGS__ }), \
+ .method = __method, \
+ }
+
+#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \
+ { \
+ .read_cache = __read, \
+ .write_cache = __write, \
+ .update_cache = __update, \
+ }
+
+#define SPINAND_ECCINFO(__ooblayout, __get_status) \
+ .eccinfo = { \
+ .ooblayout = __ooblayout, \
+ .get_status = __get_status, \
+ }
+
+#define SPINAND_SELECT_TARGET(__func) \
+ .select_target = __func
+
+#define SPINAND_CONFIGURE_CHIP(__configure_chip) \
+ .configure_chip = __configure_chip
+
+#define SPINAND_CONT_READ(__set_cont_read) \
+ .set_cont_read = __set_cont_read
+
+#define SPINAND_FACT_OTP_INFO(__npages, __start_page, __ops) \
+ .fact_otp = { \
+ .layout = { \
+ .npages = __npages, \
+ .start_page = __start_page, \
+ }, \
+ .ops = __ops, \
+ }
+
+#define SPINAND_USER_OTP_INFO(__npages, __start_page, __ops) \
+ .user_otp = { \
+ .layout = { \
+ .npages = __npages, \
+ .start_page = __start_page, \
+ }, \
+ .ops = __ops, \
+ }
+
+#define SPINAND_READ_RETRY(__read_retries, __set_read_retry) \
+ .read_retries = __read_retries, \
+ .set_read_retry = __set_read_retry
+
+#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
+ __flags, ...) \
+ { \
+ .model = __model, \
+ .devid = __id, \
+ .memorg = __memorg, \
+ .eccreq = __eccreq, \
+ .op_variants = __op_variants, \
+ .flags = __flags, \
+ __VA_ARGS__ \
+ }
+
+struct spinand_dirmap {
+ struct spi_mem_dirmap_desc *wdesc;
+ struct spi_mem_dirmap_desc *rdesc;
+ struct spi_mem_dirmap_desc *wdesc_ecc;
+ struct spi_mem_dirmap_desc *rdesc_ecc;
+};
+
+/**
+ * struct spinand_device - SPI NAND device instance
+ * @base: NAND device instance
+ * @spimem: pointer to the SPI mem object
+ * @lock: lock used to serialize accesses to the NAND
+ * @id: NAND ID as returned by READ_ID
+ * @flags: NAND flags
+ * @op_templates: various SPI mem op templates
+ * @op_templates.read_cache: read cache op template
+ * @op_templates.write_cache: write cache op template
+ * @op_templates.update_cache: update cache op template
+ * @select_target: select a specific target/die. Usually called before sending
+ * a command addressing a page or an eraseblock embedded in
+ * this die. Only required if your chip exposes several dies
+ * @cur_target: currently selected target/die
+ * @eccinfo: on-die ECC information
+ * @cfg_cache: config register cache. One entry per die
+ * @databuf: bounce buffer for data
+ * @oobbuf: bounce buffer for OOB data
+ * @scratchbuf: buffer used for everything but page accesses. This is needed
+ * because the spi-mem interface explicitly requests that buffers
+ * passed in spi_mem_op be DMA-able, so we can't based the bufs on
+ * the stack
+ * @manufacturer: SPI NAND manufacturer information
+ * @configure_chip: Align the chip configuration with the core settings
+ * @cont_read_possible: Field filled by the core once the whole system
+ * configuration is known to tell whether continuous reads are
+ * suitable to use or not in general with this chip/configuration.
+ * A per-transfer check must of course be done to ensure it is
+ * actually relevant to enable this feature.
+ * @set_cont_read: Enable/disable the continuous read feature
+ * @priv: manufacturer private data
+ * @fact_otp: SPI NAND factory OTP info.
+ * @user_otp: SPI NAND user OTP info.
+ * @read_retries: the number of read retry modes supported
+ * @set_read_retry: Enable/disable the read retry feature
+ */
+struct spinand_device {
+ struct nand_device base;
+ struct spi_mem *spimem;
+ struct mutex lock;
+ struct spinand_id id;
+ u32 flags;
+
+ struct {
+ const struct spi_mem_op *read_cache;
+ const struct spi_mem_op *write_cache;
+ const struct spi_mem_op *update_cache;
+ } op_templates;
+
+ struct spinand_dirmap *dirmaps;
+
+ int (*select_target)(struct spinand_device *spinand,
+ unsigned int target);
+ unsigned int cur_target;
+
+ struct spinand_ecc_info eccinfo;
+
+ u8 *cfg_cache;
+ u8 *databuf;
+ u8 *oobbuf;
+ u8 *scratchbuf;
+ const struct spinand_manufacturer *manufacturer;
+ void *priv;
+
+ int (*configure_chip)(struct spinand_device *spinand);
+ bool cont_read_possible;
+ int (*set_cont_read)(struct spinand_device *spinand,
+ bool enable);
+
+ const struct spinand_fact_otp *fact_otp;
+ const struct spinand_user_otp *user_otp;
+
+ unsigned int read_retries;
+ int (*set_read_retry)(struct spinand_device *spinand,
+ unsigned int retry_mode);
+};
+
+/**
+ * mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance
+ * @mtd: MTD instance
+ *
+ * Return: the SPI NAND device attached to @mtd.
+ */
+static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nanddev(mtd), struct spinand_device, base);
+}
+
+/**
+ * spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device
+ * @spinand: SPI NAND device
+ *
+ * Return: the MTD device embedded in @spinand.
+ */
+static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand)
+{
+ return nanddev_to_mtd(&spinand->base);
+}
+
+/**
+ * nand_to_spinand() - Get the SPI NAND device embedding an NAND object
+ * @nand: NAND object
+ *
+ * Return: the SPI NAND device embedding @nand.
+ */
+static inline struct spinand_device *nand_to_spinand(struct nand_device *nand)
+{
+ return container_of(nand, struct spinand_device, base);
+}
+
+/**
+ * spinand_to_nand() - Get the NAND device embedded in a SPI NAND object
+ * @spinand: SPI NAND device
+ *
+ * Return: the NAND device embedded in @spinand.
+ */
+static inline struct nand_device *
+spinand_to_nand(struct spinand_device *spinand)
+{
+ return &spinand->base;
+}
+
+/**
+ * spinand_set_of_node - Attach a DT node to a SPI NAND device
+ * @spinand: SPI NAND device
+ * @np: DT node
+ *
+ * Attach a DT node to a SPI NAND device.
+ */
+static inline void spinand_set_of_node(struct spinand_device *spinand,
+ struct device_node *np)
+{
+ nanddev_set_of_node(&spinand->base, np);
+}
+
+int spinand_match_and_init(struct spinand_device *spinand,
+ const struct spinand_info *table,
+ unsigned int table_size,
+ enum spinand_readid_method rdid_method);
+
+int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
+int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val);
+int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val);
+int spinand_write_enable_op(struct spinand_device *spinand);
+int spinand_select_target(struct spinand_device *spinand, unsigned int target);
+
+int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
+ unsigned long poll_delay_us, u8 *s);
+
+int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req);
+
+int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req);
+
+size_t spinand_otp_page_size(struct spinand_device *spinand);
+size_t spinand_fact_otp_size(struct spinand_device *spinand);
+size_t spinand_user_otp_size(struct spinand_device *spinand);
+
+int spinand_fact_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf);
+int spinand_user_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf);
+int spinand_user_otp_write(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, const u8 *buf);
+
+int spinand_set_mtd_otp_ops(struct spinand_device *spinand);
+
+#endif /* __LINUX_MTD_SPINAND_H */
diff --git a/include/linux/mtd/super.h b/include/linux/mtd/super.h
index f456230f9330..3608a6c36fac 100644
--- a/include/linux/mtd/super.h
+++ b/include/linux/mtd/super.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* MTD-based superblock handling
*
* Copyright © 2006 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef __MTD_SUPER_H__
@@ -18,9 +14,9 @@
#include <linux/fs.h>
#include <linux/mount.h>
-extern struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int));
+extern int get_tree_mtd(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc));
extern void kill_mtd_super(struct super_block *sb);
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index c3918a0684fe..c3f79c4be1cc 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) International Business Machines Corp., 2006
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
@@ -23,22 +10,32 @@
#include <linux/ioctl.h>
#include <linux/types.h>
+#include <linux/scatterlist.h>
#include <mtd/ubi-user.h>
/* All voumes/LEBs */
#define UBI_ALL -1
/*
+ * Maximum number of scatter gather list entries,
+ * we use only 64 to have a lower memory foot print.
+ */
+#define UBI_MAX_SG_COUNT 64
+
+/*
* enum ubi_open_mode - UBI volume open mode constants.
*
* UBI_READONLY: read-only mode
* UBI_READWRITE: read-write mode
* UBI_EXCLUSIVE: exclusive mode
+ * UBI_METAONLY: modify only the volume meta-data,
+ * i.e. the data stored in the volume table, but not in any of volume LEBs.
*/
enum {
UBI_READONLY = 1,
UBI_READWRITE,
- UBI_EXCLUSIVE
+ UBI_EXCLUSIVE,
+ UBI_METAONLY
};
/**
@@ -113,9 +110,39 @@ struct ubi_volume_info {
int name_len;
const char *name;
dev_t cdev;
+ struct device *dev;
+};
+
+/**
+ * struct ubi_sgl - UBI scatter gather list data structure.
+ * @list_pos: current position in @sg[]
+ * @page_pos: current position in @sg[@list_pos]
+ * @sg: the scatter gather list itself
+ *
+ * ubi_sgl is a wrapper around a scatter list which keeps track of the
+ * current position in the list and the current list item such that
+ * it can be used across multiple ubi_leb_read_sg() calls.
+ */
+struct ubi_sgl {
+ int list_pos;
+ int page_pos;
+ struct scatterlist sg[UBI_MAX_SG_COUNT];
};
/**
+ * ubi_sgl_init - initialize an UBI scatter gather list data structure.
+ * @usgl: the UBI scatter gather struct itself
+ *
+ * Please note that you still have to use sg_init_table() or any adequate
+ * function to initialize the unterlaying struct scatterlist.
+ */
+static inline void ubi_sgl_init(struct ubi_sgl *usgl)
+{
+ usgl->list_pos = 0;
+ usgl->page_pos = 0;
+}
+
+/**
* struct ubi_device_info - UBI device description data structure.
* @ubi_num: ubi device number
* @leb_size: logical eraseblock size on this UBI device
@@ -165,6 +192,7 @@ struct ubi_device_info {
* or a volume was removed)
* @UBI_VOLUME_RESIZED: a volume has been re-sized
* @UBI_VOLUME_RENAMED: a volume has been re-named
+ * @UBI_VOLUME_SHUTDOWN: a volume is going to removed, shutdown users
* @UBI_VOLUME_UPDATED: data has been written to a volume
*
* These constants define which type of event has happened when a volume
@@ -175,6 +203,7 @@ enum {
UBI_VOLUME_REMOVED,
UBI_VOLUME_RESIZED,
UBI_VOLUME_RENAMED,
+ UBI_VOLUME_SHUTDOWN,
UBI_VOLUME_UPDATED,
};
@@ -210,6 +239,8 @@ int ubi_unregister_volume_notifier(struct notifier_block *nb);
void ubi_close_volume(struct ubi_volume_desc *desc);
int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
int len, int check);
+int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
+ int offset, int len, int check);
int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
int offset, int len);
int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
@@ -219,7 +250,6 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum);
int ubi_leb_map(struct ubi_volume_desc *desc, int lnum);
int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum);
int ubi_sync(int ubi_num);
-int ubi_flush(int ubi_num, int vol_id, int lnum);
/*
* This function is the same as the 'ubi_leb_read()' function, but it does not
@@ -230,4 +260,14 @@ static inline int ubi_read(struct ubi_volume_desc *desc, int lnum, char *buf,
{
return ubi_leb_read(desc, lnum, buf, offset, len, 0);
}
+
+/*
+ * This function is the same as the 'ubi_leb_read_sg()' function, but it does
+ * not provide the checking capability.
+ */
+static inline int ubi_read_sg(struct ubi_volume_desc *desc, int lnum,
+ struct ubi_sgl *sgl, int offset, int len)
+{
+ return ubi_leb_read_sg(desc, lnum, sgl, offset, len, 0);
+}
#endif /* !__LINUX_UBI_H__ */
diff --git a/include/linux/mtd/xip.h b/include/linux/mtd/xip.h
index abed4dec5c2f..3cac9360588f 100644
--- a/include/linux/mtd/xip.h
+++ b/include/linux/mtd/xip.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MTD primitives for XIP support
*
@@ -7,10 +8,6 @@
*
* This XIP support for MTD has been loosely inspired
* by an earlier patch authored by David Woodhouse.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MTD_XIP_H__
@@ -30,7 +27,9 @@
* obviously not be running from flash. The __xipram is therefore marking
* those functions so they get relocated to ram.
*/
-#define __xipram noinline __attribute__ ((__section__ (".data")))
+#ifdef CONFIG_XIP_KERNEL
+#define __xipram noinline __section(".xiptext")
+#endif
/*
* Each architecture has to provide the following macros. They must access
@@ -90,10 +89,10 @@
#define xip_cpu_idle() do { } while (0)
#endif
-#else
+#endif /* CONFIG_MTD_XIP */
+#ifndef __xipram
#define __xipram
-
-#endif /* CONFIG_MTD_XIP */
+#endif
#endif /* __LINUX_MTD_XIP_H__ */