summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt10
-rw-r--r--Documentation/devicetree/bindings/mtd/nand-macronix.txt27
-rw-r--r--MAINTAINERS8
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c47
-rw-r--r--arch/mips/boot/dts/brcm/bcm7425.dtsi4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c5
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c17
-rw-r--r--drivers/mtd/chips/cfi_util.c12
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/phram.c19
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c12
-rw-r--r--drivers/mtd/hyperbus/hyperbus-core.c15
-rw-r--r--drivers/mtd/inftlmount.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c4
-rw-r--r--drivers/mtd/maps/sa1100-flash.c5
-rw-r--r--drivers/mtd/mtdblock.c5
-rw-r--r--drivers/mtd/mtdchar.c12
-rw-r--r--drivers/mtd/mtdcore.c250
-rw-r--r--drivers/mtd/mtdpart.c695
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c2
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c237
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c293
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c34
-rw-r--r--drivers/mtd/nand/raw/denali.c1
-rw-r--r--drivers/mtd/nand/raw/denali.h2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c3
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c21
-rw-r--r--drivers/mtd/nand/raw/ingenic/Kconfig1
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c4
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c2
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4725b_bch.c4
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4780_bch.c4
-rw-r--r--drivers/mtd/nand/raw/internals.h1
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c40
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c2
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c71
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c2
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c6
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c227
-rw-r--r--drivers/mtd/nand/raw/nand_toshiba.c58
-rw-r--r--drivers/mtd/nand/raw/nandsim.c4
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c8
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c105
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c44
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c17
-rw-r--r--drivers/mtd/nand/spi/core.c104
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c45
-rw-r--r--drivers/mtd/nand/spi/macronix.c30
-rw-r--r--drivers/mtd/nand/spi/micron.c172
-rw-r--r--drivers/mtd/nand/spi/paragon.c28
-rw-r--r--drivers/mtd/nand/spi/toshiba.c208
-rw-r--r--drivers/mtd/nand/spi/winbond.c34
-rw-r--r--drivers/mtd/spi-nor/Kconfig75
-rw-r--r--drivers/mtd/spi-nor/Makefile25
-rw-r--r--drivers/mtd/spi-nor/atmel.c46
-rw-r--r--drivers/mtd/spi-nor/catalyst.c29
-rw-r--r--drivers/mtd/spi-nor/controllers/Kconfig75
-rw-r--r--drivers/mtd/spi-nor/controllers/Makefile8
-rw-r--r--drivers/mtd/spi-nor/controllers/aspeed-smc.c (renamed from drivers/mtd/spi-nor/aspeed-smc.c)4
-rw-r--r--drivers/mtd/spi-nor/controllers/cadence-quadspi.c (renamed from drivers/mtd/spi-nor/cadence-quadspi.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c (renamed from drivers/mtd/spi-nor/hisi-sfc.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi-pci.c (renamed from drivers/mtd/spi-nor/intel-spi-pci.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi-platform.c (renamed from drivers/mtd/spi-nor/intel-spi-platform.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi.c (renamed from drivers/mtd/spi-nor/intel-spi.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi.h (renamed from drivers/mtd/spi-nor/intel-spi.h)0
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c (renamed from drivers/mtd/spi-nor/nxp-spifi.c)0
-rw-r--r--drivers/mtd/spi-nor/core.c3466
-rw-r--r--drivers/mtd/spi-nor/core.h441
-rw-r--r--drivers/mtd/spi-nor/eon.c34
-rw-r--r--drivers/mtd/spi-nor/esmt.c25
-rw-r--r--drivers/mtd/spi-nor/everspin.c27
-rw-r--r--drivers/mtd/spi-nor/fujitsu.c20
-rw-r--r--drivers/mtd/spi-nor/gigadevice.c59
-rw-r--r--drivers/mtd/spi-nor/intel.c32
-rw-r--r--drivers/mtd/spi-nor/issi.c83
-rw-r--r--drivers/mtd/spi-nor/macronix.c98
-rw-r--r--drivers/mtd/spi-nor/micron-st.c157
-rw-r--r--drivers/mtd/spi-nor/sfdp.c1204
-rw-r--r--drivers/mtd/spi-nor/sfdp.h98
-rw-r--r--drivers/mtd/spi-nor/spansion.c95
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c5434
-rw-r--r--drivers/mtd/spi-nor/sst.c151
-rw-r--r--drivers/mtd/spi-nor/winbond.c112
-rw-r--r--drivers/mtd/spi-nor/xilinx.c94
-rw-r--r--drivers/mtd/spi-nor/xmc.c23
-rw-r--r--drivers/mtd/ubi/attach.c2
-rw-r--r--drivers/mtd/ubi/build.c4
-rw-r--r--include/linux/mtd/mtd.h125
-rw-r--r--include/linux/mtd/partitions.h1
-rw-r--r--include/linux/mtd/rawnand.h11
-rw-r--r--include/linux/mtd/spi-nor.h285
-rw-r--r--include/linux/mtd/spinand.h67
95 files changed, 8455 insertions, 6932 deletions
diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
index 82156dc8f304..05651a654c66 100644
--- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
+++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
@@ -35,11 +35,11 @@ Required properties:
(optional) NAND flash cache range (if at non-standard offset)
- reg-names : a list of the names corresponding to the previous register
ranges. Should contain "nand" and (optionally)
- "flash-dma" and/or "nand-cache".
-- interrupts : The NAND CTLRDY interrupt and (if Flash DMA is available)
- FLASH_DMA_DONE
-- interrupt-names : May be "nand_ctlrdy" or "flash_dma_done", if broken out as
- individual interrupts.
+ "flash-dma" or "flash-edu" and/or "nand-cache".
+- interrupts : The NAND CTLRDY interrupt, (if Flash DMA is available)
+ FLASH_DMA_DONE and if EDU is avaialble and used FLASH_EDU_DONE
+- interrupt-names : May be "nand_ctlrdy" or "flash_dma_done" or "flash_edu_done",
+ if broken out as individual interrupts.
May be "nand", if the SoC has the individual NAND
interrupts multiplexed behind another custom piece of
hardware
diff --git a/Documentation/devicetree/bindings/mtd/nand-macronix.txt b/Documentation/devicetree/bindings/mtd/nand-macronix.txt
new file mode 100644
index 000000000000..ffab28a2c4d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/nand-macronix.txt
@@ -0,0 +1,27 @@
+Macronix NANDs Device Tree Bindings
+-----------------------------------
+
+Macronix NANDs support randomizer operation for scrambling user data,
+which can be enabled with a SET_FEATURE. The penalty when using the
+randomizer are subpage accesses prohibited and more time period needed
+for program operation, i.e., tPROG 300us to 340us (randomizer enabled).
+Enabling the randomizer is a one time persistent and non reversible
+operation.
+
+For more high-reliability concern, if subpage write is not available
+with hardware ECC and not enabled at UBI level, then enabling the
+randomizer is recommended by default by adding a new specific property
+in children nodes.
+
+Required NAND chip properties in children mode:
+- randomizer enable: should be "mxic,enable-randomizer-otp"
+
+Example:
+
+ nand: nand-controller@unit-address {
+
+ nand@0 {
+ reg = <0>;
+ mxic,enable-randomizer-otp;
+ };
+ };
diff --git a/MAINTAINERS b/MAINTAINERS
index 1a84dee31379..863011649869 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1945,7 +1945,7 @@ F: Documentation/devicetree/bindings/i2c/i2c-lpc2k.txt
F: arch/arm/boot/dts/lpc43*
F: drivers/i2c/busses/i2c-lpc2k.c
F: drivers/memory/pl172.c
-F: drivers/mtd/spi-nor/nxp-spifi.c
+F: drivers/mtd/spi-nor/controllers/nxp-spifi.c
F: drivers/rtc/rtc-lpc24xx.c
N: lpc18xx
@@ -7851,6 +7851,10 @@ F: Documentation/ABI/testing/debugfs-hyperv
HYPERBUS SUPPORT
M: Vignesh Raghavendra <vigneshr@ti.com>
+L: linux-mtd@lists.infradead.org
+Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git cfi/next
+C: irc://irc.oftc.net/mtd
S: Supported
F: drivers/mtd/hyperbus/
F: include/linux/mtd/hyperbus.h
@@ -11552,6 +11556,7 @@ L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git nand/next
+C: irc://irc.oftc.net/mtd
S: Maintained
F: drivers/mtd/nand/
F: include/linux/mtd/*nand*.h
@@ -15835,6 +15840,7 @@ L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git spi-nor/next
+C: irc://irc.oftc.net/mtd
S: Maintained
F: drivers/mtd/spi-nor/
F: include/linux/mtd/spi-nor.h
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index a2aa7a12b374..8d32894ecd2e 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -17,6 +17,8 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/leds.h>
+#include <linux/mtd/nand-gpio.h>
+#include <linux/mtd/partitions.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/fixed.h>
@@ -294,9 +296,42 @@ struct modem_private_data {
static struct modem_private_data modem_priv;
+/*
+ * Define partitions for flash device
+ */
+
+static struct mtd_partition partition_info[] = {
+ { .name = "Kernel",
+ .offset = 0,
+ .size = 3 * SZ_1M + SZ_512K },
+ { .name = "u-boot",
+ .offset = 3 * SZ_1M + SZ_512K,
+ .size = SZ_256K },
+ { .name = "u-boot params",
+ .offset = 3 * SZ_1M + SZ_512K + SZ_256K,
+ .size = SZ_256K },
+ { .name = "Amstrad LDR",
+ .offset = 4 * SZ_1M,
+ .size = SZ_256K },
+ { .name = "File system",
+ .offset = 4 * SZ_1M + 1 * SZ_256K,
+ .size = 27 * SZ_1M },
+ { .name = "PBL reserved",
+ .offset = 32 * SZ_1M - 3 * SZ_256K,
+ .size = 3 * SZ_256K },
+};
+
+static struct gpio_nand_platdata nand_platdata = {
+ .parts = partition_info,
+ .num_parts = ARRAY_SIZE(partition_info),
+};
+
static struct platform_device ams_delta_nand_device = {
.name = "ams-delta-nand",
.id = -1,
+ .dev = {
+ .platform_data = &nand_platdata,
+ },
};
#define OMAP_GPIO_LABEL "gpio-0-15"
@@ -306,10 +341,14 @@ static struct gpiod_lookup_table ams_delta_nand_gpio_table = {
.table = {
GPIO_LOOKUP(OMAP_GPIO_LABEL, AMS_DELTA_GPIO_PIN_NAND_RB, "rdy",
0),
- GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NCE, "nce", 0),
- GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NRE, "nre", 0),
- GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWP, "nwp", 0),
- GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWE, "nwe", 0),
+ GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NCE, "nce",
+ GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NRE, "nre",
+ GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWP, "nwp",
+ GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWE, "nwe",
+ GPIO_ACTIVE_LOW),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_ALE, "ale", 0),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_CLE, "cle", 0),
GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 0, "data", 0, 0),
diff --git a/arch/mips/boot/dts/brcm/bcm7425.dtsi b/arch/mips/boot/dts/brcm/bcm7425.dtsi
index 410e61ebaf9e..aa0b2d39c902 100644
--- a/arch/mips/boot/dts/brcm/bcm7425.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7425.dtsi
@@ -403,8 +403,8 @@
compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
#address-cells = <1>;
#size-cells = <0>;
- reg-names = "nand";
- reg = <0x41b800 0x400>;
+ reg-names = "nand", "flash-edu";
+ reg = <0x41b800 0x400>, <0x41bc00 0x24>;
interrupt-parent = <&hif_l2_intc>;
interrupts = <24>;
status = "disabled";
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 00a79489067c..142c0f9485fe 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -834,7 +834,7 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
/* Someone else might have been playing with it. */
return -EAGAIN;
}
- /* Fall through */
+ fallthrough;
case FL_READY:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
@@ -907,7 +907,7 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
- /* Fall through */
+ fallthrough;
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 04b383bc3947..a1f3e1031c3d 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -966,8 +966,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
- /* fall through */
-
+ fallthrough;
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -2935,7 +2934,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
- /* fall through */
+ fallthrough;
case FL_SYNCING:
mutex_unlock(&chip->mutex);
break;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 54edae63b92d..270322bca221 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -324,8 +324,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
case FL_JEDEC_QUERY:
map_write(map, CMD(0x70), cmd_addr);
chip->state = FL_STATUS;
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, cmd_addr);
if (map_word_andequal(map, status, status_OK, status_OK)) {
@@ -462,8 +461,7 @@ static int do_write_buffer(struct map_info *map, struct flchip *chip,
#ifdef DEBUG_CFI_FEATURES
printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
#endif
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, cmd_adr);
if (map_word_andequal(map, status, status_OK, status_OK))
@@ -756,8 +754,7 @@ retry:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
@@ -998,7 +995,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
- /* Fall through */
+ fallthrough;
case FL_SYNCING:
mutex_unlock(&chip->mutex);
break;
@@ -1054,8 +1051,7 @@ retry:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
@@ -1201,8 +1197,7 @@ retry:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index e2d4db05aeb3..99b7986002f0 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -109,13 +109,13 @@ map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi
case 8:
onecmd |= (onecmd << (chip_mode * 32));
#endif
- /* fall through */
+ fallthrough;
case 4:
onecmd |= (onecmd << (chip_mode * 16));
- /* fall through */
+ fallthrough;
case 2:
onecmd |= (onecmd << (chip_mode * 8));
- /* fall through */
+ fallthrough;
case 1:
;
}
@@ -165,13 +165,13 @@ unsigned long cfi_merge_status(map_word val, struct map_info *map,
case 8:
res |= (onestat >> (chip_mode * 32));
#endif
- /* fall through */
+ fallthrough;
case 4:
res |= (onestat >> (chip_mode * 16));
- /* fall through */
+ fallthrough;
case 2:
res |= (onestat >> (chip_mode * 8));
- /* fall through */
+ fallthrough;
case 1:
;
}
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 36aa082f6db0..c08721b11642 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -329,10 +329,10 @@ static int ustrtoul(const char *cp, char **endp, unsigned int base)
switch (**endp) {
case 'G' :
result *= 1024;
- /* fall through */
+ fallthrough;
case 'M':
result *= 1024;
- /* fall through */
+ fallthrough;
case 'K':
case 'k':
result *= 1024;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 931e5c2481b5..087b5e86d1bf 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -148,10 +148,10 @@ static int parse_num64(uint64_t *num64, char *token)
switch (token[len - 2]) {
case 'G':
shift += 10;
- /* fall through */
+ fallthrough;
case 'M':
shift += 10;
- /* fall through */
+ fallthrough;
case 'k':
shift += 10;
token[len - 2] = 0;
@@ -243,22 +243,25 @@ static int phram_setup(const char *val)
ret = parse_num64(&start, token[1]);
if (ret) {
- kfree(name);
parse_err("illegal start address\n");
+ goto error;
}
ret = parse_num64(&len, token[2]);
if (ret) {
- kfree(name);
parse_err("illegal device length\n");
+ goto error;
}
ret = register_device(name, start, len);
- if (!ret)
- pr_info("%s device: %#llx at %#llx\n", name, len, start);
- else
- kfree(name);
+ if (ret)
+ goto error;
+
+ pr_info("%s device: %#llx at %#llx\n", name, len, start);
+ return 0;
+error:
+ kfree(name);
return ret;
}
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index 08d543b124cd..f350a0809f88 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -11,6 +11,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mux/consumer.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
@@ -57,8 +58,10 @@ static const struct hyperbus_ops am654_hbmc_ops = {
static int am654_hbmc_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct am654_hbmc_priv *priv;
+ struct resource res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -67,6 +70,10 @@ static int am654_hbmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+
if (of_property_read_bool(dev->of_node, "mux-controls")) {
struct mux_control *control = devm_mux_control_get(dev, NULL);
@@ -88,6 +95,11 @@ static int am654_hbmc_probe(struct platform_device *pdev)
goto disable_pm;
}
+ priv->hbdev.map.size = resource_size(&res);
+ priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(priv->hbdev.map.virt))
+ return PTR_ERR(priv->hbdev.map.virt);
+
priv->ctlr.dev = dev;
priv->ctlr.ops = &am654_hbmc_ops;
priv->hbdev.ctlr = &priv->ctlr;
diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c
index 6af9ea34117d..32685e8dd278 100644
--- a/drivers/mtd/hyperbus/hyperbus-core.c
+++ b/drivers/mtd/hyperbus/hyperbus-core.c
@@ -10,7 +10,6 @@
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/types.h>
static struct hyperbus_device *map_to_hbdev(struct map_info *map)
@@ -62,7 +61,6 @@ int hyperbus_register_device(struct hyperbus_device *hbdev)
struct hyperbus_ctlr *ctlr;
struct device_node *np;
struct map_info *map;
- struct resource res;
struct device *dev;
int ret;
@@ -73,22 +71,15 @@ int hyperbus_register_device(struct hyperbus_device *hbdev)
np = hbdev->np;
ctlr = hbdev->ctlr;
- if (!of_device_is_compatible(np, "cypress,hyperflash"))
+ if (!of_device_is_compatible(np, "cypress,hyperflash")) {
+ dev_err(ctlr->dev, "\"cypress,hyperflash\" compatible missing\n");
return -ENODEV;
+ }
hbdev->memtype = HYPERFLASH;
- ret = of_address_to_resource(np, 0, &res);
- if (ret)
- return ret;
-
dev = ctlr->dev;
map = &hbdev->map;
- map->size = resource_size(&res);
- map->virt = devm_ioremap_resource(dev, &res);
- if (IS_ERR(map->virt))
- return PTR_ERR(map->virt);
-
map->name = dev_name(dev);
map->bankwidth = 2;
map->device_node = np;
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 54b176d4319f..af16d3485de0 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -130,7 +130,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
" NoOfBootImageBlocks = %d\n"
" NoOfBinaryPartitions = %d\n"
" NoOfBDTLPartitions = %d\n"
- " BlockMultiplerBits = %d\n"
+ " BlockMultiplierBits = %d\n"
" FormatFlgs = %d\n"
" OsakVersion = 0x%x\n"
" PercentUsed = %d\n",
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 1efc643c9871..fb1cbc9a2870 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -68,7 +68,6 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared),
GFP_KERNEL);
if (!shared) {
- kfree(lpddr);
kfree(mtd);
return NULL;
}
@@ -305,8 +304,7 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
- /* fall through */
-
+ fallthrough;
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 47602af4ee34..d3d4e987c163 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -34,7 +34,7 @@ struct sa_subdev_info {
struct sa_info {
struct mtd_info *mtd;
int num_subdev;
- struct sa_subdev_info subdev[0];
+ struct sa_subdev_info subdev[];
};
static DEFINE_SPINLOCK(sa1100_vpp_lock);
@@ -81,8 +81,7 @@ static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *r
default:
printk(KERN_WARNING "SA1100 flash: unknown base address "
"0x%08lx, assuming CS0\n", phys);
- /* Fall through */
-
+ fallthrough;
case SA1100_CS0_PHYS:
subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
break;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index c06b5322d470..078e0f67377d 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -294,12 +294,13 @@ static void mtdblock_release(struct mtd_blktrans_dev *mbd)
static int mtdblock_flush(struct mtd_blktrans_dev *dev)
{
struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
+ int ret;
mutex_lock(&mtdblk->cache_mutex);
- write_cached_data(mtdblk);
+ ret = write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
mtd_sync(dev->mtd);
- return 0;
+ return ret;
}
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index b841008a9eb7..c5935b2f9cd1 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -349,6 +349,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
+ struct mtd_info *master = mtd_get_master(mtd);
struct mtd_file_info *mfi = file->private_data;
struct mtd_oob_ops ops = {};
uint32_t retlen;
@@ -360,7 +361,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
if (length > 4096)
return -EINVAL;
- if (!mtd->_write_oob)
+ if (!master->_write_oob)
return -EOPNOTSUPP;
ops.ooblen = length;
@@ -586,6 +587,7 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
static int mtdchar_write_ioctl(struct mtd_info *mtd,
struct mtd_write_req __user *argp)
{
+ struct mtd_info *master = mtd_get_master(mtd);
struct mtd_write_req req;
struct mtd_oob_ops ops = {};
const void __user *usr_data, *usr_oob;
@@ -597,9 +599,8 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
usr_data = (const void __user *)(uintptr_t)req.usr_data;
usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
- if (!mtd->_write_oob)
+ if (!master->_write_oob)
return -EOPNOTSUPP;
-
ops.mode = req.mode;
ops.len = (size_t)req.len;
ops.ooblen = (size_t)req.ooblen;
@@ -635,6 +636,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
+ struct mtd_info *master = mtd_get_master(mtd);
void __user *argp = (void __user *)arg;
int ret = 0;
struct mtd_info_user info;
@@ -824,7 +826,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct nand_oobinfo oi;
- if (!mtd->ooblayout)
+ if (!master->ooblayout)
return -EOPNOTSUPP;
ret = get_oobinfo(mtd, &oi);
@@ -918,7 +920,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct nand_ecclayout_user *usrlay;
- if (!mtd->ooblayout)
+ if (!master->ooblayout)
return -EOPNOTSUPP;
usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5fac4355b9c2..2916674208b3 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -456,13 +456,14 @@ static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
struct mtd_pairing_info *info)
{
- int npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
+ struct mtd_info *master = mtd_get_master(mtd);
+ int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
if (wunit < 0 || wunit >= npairs)
return -EINVAL;
- if (mtd->pairing && mtd->pairing->get_info)
- return mtd->pairing->get_info(mtd, wunit, info);
+ if (master->pairing && master->pairing->get_info)
+ return master->pairing->get_info(master, wunit, info);
info->group = 0;
info->pair = wunit;
@@ -498,15 +499,16 @@ EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
const struct mtd_pairing_info *info)
{
- int ngroups = mtd_pairing_groups(mtd);
- int npairs = mtd_wunit_per_eb(mtd) / ngroups;
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ngroups = mtd_pairing_groups(master);
+ int npairs = mtd_wunit_per_eb(master) / ngroups;
if (!info || info->pair < 0 || info->pair >= npairs ||
info->group < 0 || info->group >= ngroups)
return -EINVAL;
- if (mtd->pairing && mtd->pairing->get_wunit)
- return mtd->pairing->get_wunit(mtd, info);
+ if (master->pairing && master->pairing->get_wunit)
+ return mtd->pairing->get_wunit(master, info);
return info->pair;
}
@@ -524,10 +526,12 @@ EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
*/
int mtd_pairing_groups(struct mtd_info *mtd)
{
- if (!mtd->pairing || !mtd->pairing->ngroups)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->pairing || !master->pairing->ngroups)
return 1;
- return mtd->pairing->ngroups;
+ return master->pairing->ngroups;
}
EXPORT_SYMBOL_GPL(mtd_pairing_groups);
@@ -587,6 +591,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
int add_mtd_device(struct mtd_info *mtd)
{
+ struct mtd_info *master = mtd_get_master(mtd);
struct mtd_notifier *not;
int i, error;
@@ -608,7 +613,7 @@ int add_mtd_device(struct mtd_info *mtd)
(mtd->_read && mtd->_read_oob)))
return -EINVAL;
- if (WARN_ON((!mtd->erasesize || !mtd->_erase) &&
+ if (WARN_ON((!mtd->erasesize || !master->_erase) &&
!(mtd->flags & MTD_NO_ERASE)))
return -EINVAL;
@@ -765,7 +770,8 @@ static void mtd_set_dev_defaults(struct mtd_info *mtd)
pr_debug("mtd device won't show a device symlink in sysfs\n");
}
- mtd->orig_flags = mtd->flags;
+ INIT_LIST_HEAD(&mtd->partitions);
+ mutex_init(&mtd->master.partitions_lock);
}
/**
@@ -971,20 +977,26 @@ EXPORT_SYMBOL_GPL(get_mtd_device);
int __get_mtd_device(struct mtd_info *mtd)
{
+ struct mtd_info *master = mtd_get_master(mtd);
int err;
- if (!try_module_get(mtd->owner))
+ if (!try_module_get(master->owner))
return -ENODEV;
- if (mtd->_get_device) {
- err = mtd->_get_device(mtd);
+ if (master->_get_device) {
+ err = master->_get_device(mtd);
if (err) {
- module_put(mtd->owner);
+ module_put(master->owner);
return err;
}
}
- mtd->usecount++;
+
+ while (mtd->parent) {
+ mtd->usecount++;
+ mtd = mtd->parent;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(__get_mtd_device);
@@ -1038,13 +1050,18 @@ EXPORT_SYMBOL_GPL(put_mtd_device);
void __put_mtd_device(struct mtd_info *mtd)
{
- --mtd->usecount;
- BUG_ON(mtd->usecount < 0);
+ struct mtd_info *master = mtd_get_master(mtd);
- if (mtd->_put_device)
- mtd->_put_device(mtd);
+ while (mtd->parent) {
+ --mtd->usecount;
+ BUG_ON(mtd->usecount < 0);
+ mtd = mtd->parent;
+ }
+
+ if (master->_put_device)
+ master->_put_device(master);
- module_put(mtd->owner);
+ module_put(master->owner);
}
EXPORT_SYMBOL_GPL(__put_mtd_device);
@@ -1055,9 +1072,13 @@ EXPORT_SYMBOL_GPL(__put_mtd_device);
*/
int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+ u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
+ int ret;
+
instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
- if (!mtd->erasesize || !mtd->_erase)
+ if (!mtd->erasesize || !master->_erase)
return -ENOTSUPP;
if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
@@ -1069,7 +1090,14 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
return 0;
ledtrig_mtd_activity();
- return mtd->_erase(mtd, instr);
+
+ instr->addr += mst_ofs;
+ ret = master->_erase(master, instr);
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= mst_ofs;
+
+ instr->addr -= mst_ofs;
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_erase);
@@ -1079,30 +1107,36 @@ EXPORT_SYMBOL_GPL(mtd_erase);
int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
void **virt, resource_size_t *phys)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
*virt = NULL;
if (phys)
*phys = 0;
- if (!mtd->_point)
+ if (!master->_point)
return -EOPNOTSUPP;
if (from < 0 || from >= mtd->size || len > mtd->size - from)
return -EINVAL;
if (!len)
return 0;
- return mtd->_point(mtd, from, len, retlen, virt, phys);
+
+ from = mtd_get_master_ofs(mtd, from);
+ return master->_point(master, from, len, retlen, virt, phys);
}
EXPORT_SYMBOL_GPL(mtd_point);
/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
- if (!mtd->_unpoint)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_unpoint)
return -EOPNOTSUPP;
if (from < 0 || from >= mtd->size || len > mtd->size - from)
return -EINVAL;
if (!len)
return 0;
- return mtd->_unpoint(mtd, from, len);
+ return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
}
EXPORT_SYMBOL_GPL(mtd_unpoint);
@@ -1129,6 +1163,25 @@ unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
}
EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
+static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
+ const struct mtd_ecc_stats *old_stats)
+{
+ struct mtd_ecc_stats diff;
+
+ if (master == mtd)
+ return;
+
+ diff = master->ecc_stats;
+ diff.failed -= old_stats->failed;
+ diff.corrected -= old_stats->corrected;
+
+ while (mtd->parent) {
+ mtd->ecc_stats.failed += diff.failed;
+ mtd->ecc_stats.corrected += diff.corrected;
+ mtd = mtd->parent;
+ }
+}
+
int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
u_char *buf)
{
@@ -1171,8 +1224,10 @@ EXPORT_SYMBOL_GPL(mtd_write);
int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
const u_char *buf)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
- if (!mtd->_panic_write)
+ if (!master->_panic_write)
return -EOPNOTSUPP;
if (to < 0 || to >= mtd->size || len > mtd->size - to)
return -EINVAL;
@@ -1183,7 +1238,8 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
if (!mtd->oops_panic_write)
mtd->oops_panic_write = true;
- return mtd->_panic_write(mtd, to, len, retlen, buf);
+ return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
+ retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_panic_write);
@@ -1222,7 +1278,10 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct mtd_ecc_stats old_stats = master->ecc_stats;
int ret_code;
+
ops->retlen = ops->oobretlen = 0;
ret_code = mtd_check_oob_ops(mtd, from, ops);
@@ -1232,14 +1291,17 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
ledtrig_mtd_activity();
/* Check the validity of a potential fallback on mtd->_read */
- if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf))
+ if (!master->_read_oob && (!master->_read || ops->oobbuf))
return -EOPNOTSUPP;
- if (mtd->_read_oob)
- ret_code = mtd->_read_oob(mtd, from, ops);
+ from = mtd_get_master_ofs(mtd, from);
+ if (master->_read_oob)
+ ret_code = master->_read_oob(master, from, ops);
else
- ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen,
- ops->datbuf);
+ ret_code = master->_read(master, from, ops->len, &ops->retlen,
+ ops->datbuf);
+
+ mtd_update_ecc_stats(mtd, master, &old_stats);
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
@@ -1258,6 +1320,7 @@ EXPORT_SYMBOL_GPL(mtd_read_oob);
int mtd_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
+ struct mtd_info *master = mtd_get_master(mtd);
int ret;
ops->retlen = ops->oobretlen = 0;
@@ -1272,14 +1335,16 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to,
ledtrig_mtd_activity();
/* Check the validity of a potential fallback on mtd->_write */
- if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf))
+ if (!master->_write_oob && (!master->_write || ops->oobbuf))
return -EOPNOTSUPP;
- if (mtd->_write_oob)
- return mtd->_write_oob(mtd, to, ops);
+ to = mtd_get_master_ofs(mtd, to);
+
+ if (master->_write_oob)
+ return master->_write_oob(master, to, ops);
else
- return mtd->_write(mtd, to, ops->len, &ops->retlen,
- ops->datbuf);
+ return master->_write(master, to, ops->len, &ops->retlen,
+ ops->datbuf);
}
EXPORT_SYMBOL_GPL(mtd_write_oob);
@@ -1302,15 +1367,17 @@ EXPORT_SYMBOL_GPL(mtd_write_oob);
int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobecc)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
memset(oobecc, 0, sizeof(*oobecc));
- if (!mtd || section < 0)
+ if (!master || section < 0)
return -EINVAL;
- if (!mtd->ooblayout || !mtd->ooblayout->ecc)
+ if (!master->ooblayout || !master->ooblayout->ecc)
return -ENOTSUPP;
- return mtd->ooblayout->ecc(mtd, section, oobecc);
+ return master->ooblayout->ecc(master, section, oobecc);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
@@ -1334,15 +1401,17 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
int mtd_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobfree)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
memset(oobfree, 0, sizeof(*oobfree));
- if (!mtd || section < 0)
+ if (!master || section < 0)
return -EINVAL;
- if (!mtd->ooblayout || !mtd->ooblayout->free)
+ if (!master->ooblayout || !master->ooblayout->free)
return -ENOTSUPP;
- return mtd->ooblayout->free(mtd, section, oobfree);
+ return master->ooblayout->free(master, section, oobfree);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
@@ -1651,60 +1720,69 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
struct otp_info *buf)
{
- if (!mtd->_get_fact_prot_info)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_get_fact_prot_info)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
+ return master->_get_fact_prot_info(master, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
- if (!mtd->_read_fact_prot_reg)
+ if (!master->_read_fact_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
+ return master->_read_fact_prot_reg(master, from, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
struct otp_info *buf)
{
- if (!mtd->_get_user_prot_info)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_get_user_prot_info)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_get_user_prot_info(mtd, len, retlen, buf);
+ return master->_get_user_prot_info(master, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
- if (!mtd->_read_user_prot_reg)
+ if (!master->_read_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
+ return master->_read_user_prot_reg(master, from, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, u_char *buf)
{
+ struct mtd_info *master = mtd_get_master(mtd);
int ret;
*retlen = 0;
- if (!mtd->_write_user_prot_reg)
+ if (!master->_write_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
+ ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
if (ret)
return ret;
@@ -1718,80 +1796,105 @@ EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
{
- if (!mtd->_lock_user_prot_reg)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_lock_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_lock_user_prot_reg(mtd, from, len);
+ return master->_lock_user_prot_reg(master, from, len);
}
EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
/* Chip-supported device locking */
int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
- if (!mtd->_lock)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_lock)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
return -EINVAL;
if (!len)
return 0;
- return mtd->_lock(mtd, ofs, len);
+ return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_lock);
int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
- if (!mtd->_unlock)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_unlock)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
return -EINVAL;
if (!len)
return 0;
- return mtd->_unlock(mtd, ofs, len);
+ return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_unlock);
int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
- if (!mtd->_is_locked)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_is_locked)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
return -EINVAL;
if (!len)
return 0;
- return mtd->_is_locked(mtd, ofs, len);
+ return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_is_locked);
int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
if (ofs < 0 || ofs >= mtd->size)
return -EINVAL;
- if (!mtd->_block_isreserved)
+ if (!master->_block_isreserved)
return 0;
- return mtd->_block_isreserved(mtd, ofs);
+ return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
}
EXPORT_SYMBOL_GPL(mtd_block_isreserved);
int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
if (ofs < 0 || ofs >= mtd->size)
return -EINVAL;
- if (!mtd->_block_isbad)
+ if (!master->_block_isbad)
return 0;
- return mtd->_block_isbad(mtd, ofs);
+ return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
}
EXPORT_SYMBOL_GPL(mtd_block_isbad);
int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
- if (!mtd->_block_markbad)
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ if (!master->_block_markbad)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size)
return -EINVAL;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- return mtd->_block_markbad(mtd, ofs);
+
+ ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
+ if (ret)
+ return ret;
+
+ while (mtd->parent) {
+ mtd->ecc_stats.badblocks++;
+ mtd = mtd->parent;
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mtd_block_markbad);
@@ -1841,12 +1944,17 @@ static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- if (!mtd->_writev)
+
+ if (!master->_writev)
return default_mtd_writev(mtd, vecs, count, to, retlen);
- return mtd->_writev(mtd, vecs, count, to, retlen);
+
+ return master->_writev(master, vecs, count,
+ mtd_get_master_ofs(mtd, to), retlen);
}
EXPORT_SYMBOL_GPL(mtd_writev);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 7328c066c5ba..3f6025684f58 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -20,339 +20,52 @@
#include "mtdcore.h"
-/* Our partition linked list */
-static LIST_HEAD(mtd_partitions);
-static DEFINE_MUTEX(mtd_partitions_mutex);
-
-/**
- * struct mtd_part - our partition node structure
- *
- * @mtd: struct holding partition details
- * @parent: parent mtd - flash device or another partition
- * @offset: partition offset relative to the *flash device*
- */
-struct mtd_part {
- struct mtd_info mtd;
- struct mtd_info *parent;
- uint64_t offset;
- struct list_head list;
-};
-
-/*
- * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
- * the pointer to that structure.
- */
-static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd)
-{
- return container_of(mtd, struct mtd_part, mtd);
-}
-
-static u64 part_absolute_offset(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- if (!mtd_is_partition(mtd))
- return 0;
-
- return part_absolute_offset(part->parent) + part->offset;
-}
-
/*
* MTD methods which simply translate the effective address and pass through
* to the _real_ device.
*/
-static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- struct mtd_ecc_stats stats;
- int res;
-
- stats = part->parent->ecc_stats;
- res = part->parent->_read(part->parent, from + part->offset, len,
- retlen, buf);
- if (unlikely(mtd_is_eccerr(res)))
- mtd->ecc_stats.failed +=
- part->parent->ecc_stats.failed - stats.failed;
- else
- mtd->ecc_stats.corrected +=
- part->parent->ecc_stats.corrected - stats.corrected;
- return res;
-}
-
-static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, void **virt, resource_size_t *phys)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return part->parent->_point(part->parent, from + part->offset, len,
- retlen, virt, phys);
-}
-
-static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return part->parent->_unpoint(part->parent, from + part->offset, len);
-}
-
-static int part_read_oob(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- struct mtd_ecc_stats stats;
- int res;
-
- stats = part->parent->ecc_stats;
- res = part->parent->_read_oob(part->parent, from + part->offset, ops);
- if (unlikely(mtd_is_eccerr(res)))
- mtd->ecc_stats.failed +=
- part->parent->ecc_stats.failed - stats.failed;
- else
- mtd->ecc_stats.corrected +=
- part->parent->ecc_stats.corrected - stats.corrected;
- return res;
-}
-
-static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len, size_t *retlen, u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_read_user_prot_reg(part->parent, from, len,
- retlen, buf);
-}
-
-static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
- size_t *retlen, struct otp_info *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_get_user_prot_info(part->parent, len, retlen,
- buf);
-}
-
-static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len, size_t *retlen, u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_read_fact_prot_reg(part->parent, from, len,
- retlen, buf);
-}
-
-static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
- size_t *retlen, struct otp_info *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_get_fact_prot_info(part->parent, len, retlen,
- buf);
-}
-
-static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_write(part->parent, to + part->offset, len,
- retlen, buf);
-}
-
-static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_panic_write(part->parent, to + part->offset, len,
- retlen, buf);
-}
-
-static int part_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return part->parent->_write_oob(part->parent, to + part->offset, ops);
-}
-
-static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len, size_t *retlen, u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_write_user_prot_reg(part->parent, from, len,
- retlen, buf);
-}
-
-static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_lock_user_prot_reg(part->parent, from, len);
-}
-
-static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_writev(part->parent, vecs, count,
- to + part->offset, retlen);
-}
-
-static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- int ret;
-
- instr->addr += part->offset;
- ret = part->parent->_erase(part->parent, instr);
- if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
- instr->fail_addr -= part->offset;
- instr->addr -= part->offset;
-
- return ret;
-}
-
-static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_lock(part->parent, ofs + part->offset, len);
-}
-
-static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_unlock(part->parent, ofs + part->offset, len);
-}
-
-static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_is_locked(part->parent, ofs + part->offset, len);
-}
-
-static void part_sync(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- part->parent->_sync(part->parent);
-}
-
-static int part_suspend(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_suspend(part->parent);
-}
-
-static void part_resume(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- part->parent->_resume(part->parent);
-}
-
-static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- ofs += part->offset;
- return part->parent->_block_isreserved(part->parent, ofs);
-}
-
-static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- ofs += part->offset;
- return part->parent->_block_isbad(part->parent, ofs);
-}
-
-static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- int res;
-
- ofs += part->offset;
- res = part->parent->_block_markbad(part->parent, ofs);
- if (!res)
- mtd->ecc_stats.badblocks++;
- return res;
-}
-
-static int part_get_device(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_get_device(part->parent);
-}
-
-static void part_put_device(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- part->parent->_put_device(part->parent);
-}
-
-static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return mtd_ooblayout_ecc(part->parent, section, oobregion);
-}
-
-static int part_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return mtd_ooblayout_free(part->parent, section, oobregion);
-}
-
-static const struct mtd_ooblayout_ops part_ooblayout_ops = {
- .ecc = part_ooblayout_ecc,
- .free = part_ooblayout_free,
-};
-
-static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return part->parent->_max_bad_blocks(part->parent,
- ofs + part->offset, len);
-}
-
-static inline void free_partition(struct mtd_part *p)
+static inline void free_partition(struct mtd_info *mtd)
{
- kfree(p->mtd.name);
- kfree(p);
+ kfree(mtd->name);
+ kfree(mtd);
}
-static struct mtd_part *allocate_partition(struct mtd_info *parent,
- const struct mtd_partition *part, int partno,
- uint64_t cur_offset)
+static struct mtd_info *allocate_partition(struct mtd_info *parent,
+ const struct mtd_partition *part,
+ int partno, uint64_t cur_offset)
{
int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize :
parent->erasesize;
- struct mtd_part *slave;
+ struct mtd_info *child, *master = mtd_get_master(parent);
u32 remainder;
char *name;
u64 tmp;
/* allocate the partition structure */
- slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ child = kzalloc(sizeof(*child), GFP_KERNEL);
name = kstrdup(part->name, GFP_KERNEL);
- if (!name || !slave) {
+ if (!name || !child) {
printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
parent->name);
kfree(name);
- kfree(slave);
+ kfree(child);
return ERR_PTR(-ENOMEM);
}
/* set up the MTD object for this partition */
- slave->mtd.type = parent->type;
- slave->mtd.flags = parent->orig_flags & ~part->mask_flags;
- slave->mtd.orig_flags = slave->mtd.flags;
- slave->mtd.size = part->size;
- slave->mtd.writesize = parent->writesize;
- slave->mtd.writebufsize = parent->writebufsize;
- slave->mtd.oobsize = parent->oobsize;
- slave->mtd.oobavail = parent->oobavail;
- slave->mtd.subpage_sft = parent->subpage_sft;
- slave->mtd.pairing = parent->pairing;
-
- slave->mtd.name = name;
- slave->mtd.owner = parent->owner;
+ child->type = parent->type;
+ child->part.flags = parent->flags & ~part->mask_flags;
+ child->flags = child->part.flags;
+ child->size = part->size;
+ child->writesize = parent->writesize;
+ child->writebufsize = parent->writebufsize;
+ child->oobsize = parent->oobsize;
+ child->oobavail = parent->oobavail;
+ child->subpage_sft = parent->subpage_sft;
+
+ child->name = name;
+ child->owner = parent->owner;
/* NOTE: Historically, we didn't arrange MTDs as a tree out of
* concern for showing the same data in multiple partitions.
@@ -360,134 +73,76 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
* so the MTD_PARTITIONED_MASTER option allows that. The master
* will have device nodes etc only if this is set, so make the
* parent conditional on that option. Note, this is a way to
- * distinguish between the master and the partition in sysfs.
+ * distinguish between the parent and its partitions in sysfs.
*/
- slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
- &parent->dev :
- parent->dev.parent;
- slave->mtd.dev.of_node = part->of_node;
-
- if (parent->_read)
- slave->mtd._read = part_read;
- if (parent->_write)
- slave->mtd._write = part_write;
-
- if (parent->_panic_write)
- slave->mtd._panic_write = part_panic_write;
-
- if (parent->_point && parent->_unpoint) {
- slave->mtd._point = part_point;
- slave->mtd._unpoint = part_unpoint;
- }
-
- if (parent->_read_oob)
- slave->mtd._read_oob = part_read_oob;
- if (parent->_write_oob)
- slave->mtd._write_oob = part_write_oob;
- if (parent->_read_user_prot_reg)
- slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
- if (parent->_read_fact_prot_reg)
- slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
- if (parent->_write_user_prot_reg)
- slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
- if (parent->_lock_user_prot_reg)
- slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
- if (parent->_get_user_prot_info)
- slave->mtd._get_user_prot_info = part_get_user_prot_info;
- if (parent->_get_fact_prot_info)
- slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
- if (parent->_sync)
- slave->mtd._sync = part_sync;
- if (!partno && !parent->dev.class && parent->_suspend &&
- parent->_resume) {
- slave->mtd._suspend = part_suspend;
- slave->mtd._resume = part_resume;
- }
- if (parent->_writev)
- slave->mtd._writev = part_writev;
- if (parent->_lock)
- slave->mtd._lock = part_lock;
- if (parent->_unlock)
- slave->mtd._unlock = part_unlock;
- if (parent->_is_locked)
- slave->mtd._is_locked = part_is_locked;
- if (parent->_block_isreserved)
- slave->mtd._block_isreserved = part_block_isreserved;
- if (parent->_block_isbad)
- slave->mtd._block_isbad = part_block_isbad;
- if (parent->_block_markbad)
- slave->mtd._block_markbad = part_block_markbad;
- if (parent->_max_bad_blocks)
- slave->mtd._max_bad_blocks = part_max_bad_blocks;
-
- if (parent->_get_device)
- slave->mtd._get_device = part_get_device;
- if (parent->_put_device)
- slave->mtd._put_device = part_put_device;
-
- slave->mtd._erase = part_erase;
- slave->parent = parent;
- slave->offset = part->offset;
-
- if (slave->offset == MTDPART_OFS_APPEND)
- slave->offset = cur_offset;
- if (slave->offset == MTDPART_OFS_NXTBLK) {
+ child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
+ &parent->dev : parent->dev.parent;
+ child->dev.of_node = part->of_node;
+ child->parent = parent;
+ child->part.offset = part->offset;
+ INIT_LIST_HEAD(&child->partitions);
+
+ if (child->part.offset == MTDPART_OFS_APPEND)
+ child->part.offset = cur_offset;
+ if (child->part.offset == MTDPART_OFS_NXTBLK) {
tmp = cur_offset;
- slave->offset = cur_offset;
+ child->part.offset = cur_offset;
remainder = do_div(tmp, wr_alignment);
if (remainder) {
- slave->offset += wr_alignment - remainder;
+ child->part.offset += wr_alignment - remainder;
printk(KERN_NOTICE "Moving partition %d: "
"0x%012llx -> 0x%012llx\n", partno,
- (unsigned long long)cur_offset, (unsigned long long)slave->offset);
+ (unsigned long long)cur_offset,
+ child->part.offset);
}
}
- if (slave->offset == MTDPART_OFS_RETAIN) {
- slave->offset = cur_offset;
- if (parent->size - slave->offset >= slave->mtd.size) {
- slave->mtd.size = parent->size - slave->offset
- - slave->mtd.size;
+ if (child->part.offset == MTDPART_OFS_RETAIN) {
+ child->part.offset = cur_offset;
+ if (parent->size - child->part.offset >= child->size) {
+ child->size = parent->size - child->part.offset -
+ child->size;
} else {
printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
- part->name, parent->size - slave->offset,
- slave->mtd.size);
+ part->name, parent->size - child->part.offset,
+ child->size);
/* register to preserve ordering */
goto out_register;
}
}
- if (slave->mtd.size == MTDPART_SIZ_FULL)
- slave->mtd.size = parent->size - slave->offset;
+ if (child->size == MTDPART_SIZ_FULL)
+ child->size = parent->size - child->part.offset;
- printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
- (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
+ printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n",
+ child->part.offset, child->part.offset + child->size,
+ child->name);
/* let's do some sanity checks */
- if (slave->offset >= parent->size) {
+ if (child->part.offset >= parent->size) {
/* let's register it anyway to preserve ordering */
- slave->offset = 0;
- slave->mtd.size = 0;
+ child->part.offset = 0;
+ child->size = 0;
/* Initialize ->erasesize to make add_mtd_device() happy. */
- slave->mtd.erasesize = parent->erasesize;
-
+ child->erasesize = parent->erasesize;
printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
part->name);
goto out_register;
}
- if (slave->offset + slave->mtd.size > parent->size) {
- slave->mtd.size = parent->size - slave->offset;
+ if (child->part.offset + child->size > parent->size) {
+ child->size = parent->size - child->part.offset;
printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
- part->name, parent->name, (unsigned long long)slave->mtd.size);
+ part->name, parent->name, child->size);
}
if (parent->numeraseregions > 1) {
/* Deal with variable erase size stuff */
int i, max = parent->numeraseregions;
- u64 end = slave->offset + slave->mtd.size;
+ u64 end = child->part.offset + child->size;
struct mtd_erase_region_info *regions = parent->eraseregions;
/* Find the first erase regions which is part of this
* partition. */
- for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
+ for (i = 0; i < max && regions[i].offset <= child->part.offset;
+ i++)
;
/* The loop searched for the region _behind_ the first one */
if (i > 0)
@@ -495,70 +150,68 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
/* Pick biggest erasesize */
for (; i < max && regions[i].offset < end; i++) {
- if (slave->mtd.erasesize < regions[i].erasesize) {
- slave->mtd.erasesize = regions[i].erasesize;
- }
+ if (child->erasesize < regions[i].erasesize)
+ child->erasesize = regions[i].erasesize;
}
- BUG_ON(slave->mtd.erasesize == 0);
+ BUG_ON(child->erasesize == 0);
} else {
/* Single erase size */
- slave->mtd.erasesize = parent->erasesize;
+ child->erasesize = parent->erasesize;
}
/*
- * Slave erasesize might differ from the master one if the master
+ * Child erasesize might differ from the parent one if the parent
* exposes several regions with different erasesize. Adjust
* wr_alignment accordingly.
*/
- if (!(slave->mtd.flags & MTD_NO_ERASE))
- wr_alignment = slave->mtd.erasesize;
+ if (!(child->flags & MTD_NO_ERASE))
+ wr_alignment = child->erasesize;
- tmp = part_absolute_offset(parent) + slave->offset;
+ tmp = mtd_get_master_ofs(child, 0);
remainder = do_div(tmp, wr_alignment);
- if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
+ if ((child->flags & MTD_WRITEABLE) && remainder) {
/* Doesn't start on a boundary of major erase size */
/* FIXME: Let it be writable if it is on a boundary of
* _minor_ erase size though */
- slave->mtd.flags &= ~MTD_WRITEABLE;
+ child->flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
part->name);
}
- tmp = part_absolute_offset(parent) + slave->mtd.size;
+ tmp = mtd_get_master_ofs(child, 0) + child->size;
remainder = do_div(tmp, wr_alignment);
- if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
- slave->mtd.flags &= ~MTD_WRITEABLE;
+ if ((child->flags & MTD_WRITEABLE) && remainder) {
+ child->flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
part->name);
}
- mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
- slave->mtd.ecc_step_size = parent->ecc_step_size;
- slave->mtd.ecc_strength = parent->ecc_strength;
- slave->mtd.bitflip_threshold = parent->bitflip_threshold;
+ child->ecc_step_size = parent->ecc_step_size;
+ child->ecc_strength = parent->ecc_strength;
+ child->bitflip_threshold = parent->bitflip_threshold;
- if (parent->_block_isbad) {
+ if (master->_block_isbad) {
uint64_t offs = 0;
- while (offs < slave->mtd.size) {
- if (mtd_block_isreserved(parent, offs + slave->offset))
- slave->mtd.ecc_stats.bbtblocks++;
- else if (mtd_block_isbad(parent, offs + slave->offset))
- slave->mtd.ecc_stats.badblocks++;
- offs += slave->mtd.erasesize;
+ while (offs < child->size) {
+ if (mtd_block_isreserved(child, offs))
+ child->ecc_stats.bbtblocks++;
+ else if (mtd_block_isbad(child, offs))
+ child->ecc_stats.badblocks++;
+ offs += child->erasesize;
}
}
out_register:
- return slave;
+ return child;
}
static ssize_t mtd_partition_offset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- struct mtd_part *part = mtd_to_part(mtd);
- return snprintf(buf, PAGE_SIZE, "%llu\n", part->offset);
+
+ return snprintf(buf, PAGE_SIZE, "%lld\n", mtd->part.offset);
}
static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
@@ -568,9 +221,9 @@ static const struct attribute *mtd_partition_attrs[] = {
NULL
};
-static int mtd_add_partition_attrs(struct mtd_part *new)
+static int mtd_add_partition_attrs(struct mtd_info *new)
{
- int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs);
+ int ret = sysfs_create_files(&new->dev.kobj, mtd_partition_attrs);
if (ret)
printk(KERN_WARNING
"mtd: failed to create partition attrs, err=%d\n", ret);
@@ -580,8 +233,9 @@ static int mtd_add_partition_attrs(struct mtd_part *new)
int mtd_add_partition(struct mtd_info *parent, const char *name,
long long offset, long long length)
{
+ struct mtd_info *master = mtd_get_master(parent);
struct mtd_partition part;
- struct mtd_part *new;
+ struct mtd_info *child;
int ret = 0;
/* the direct offset is expected */
@@ -600,28 +254,28 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
part.size = length;
part.offset = offset;
- new = allocate_partition(parent, &part, -1, offset);
- if (IS_ERR(new))
- return PTR_ERR(new);
+ child = allocate_partition(parent, &part, -1, offset);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
- mutex_lock(&mtd_partitions_mutex);
- list_add(&new->list, &mtd_partitions);
- mutex_unlock(&mtd_partitions_mutex);
+ mutex_lock(&master->master.partitions_lock);
+ list_add_tail(&child->part.node, &parent->partitions);
+ mutex_unlock(&master->master.partitions_lock);
- ret = add_mtd_device(&new->mtd);
+ ret = add_mtd_device(child);
if (ret)
goto err_remove_part;
- mtd_add_partition_attrs(new);
+ mtd_add_partition_attrs(child);
return 0;
err_remove_part:
- mutex_lock(&mtd_partitions_mutex);
- list_del(&new->list);
- mutex_unlock(&mtd_partitions_mutex);
+ mutex_lock(&master->master.partitions_lock);
+ list_del(&child->part.node);
+ mutex_unlock(&master->master.partitions_lock);
- free_partition(new);
+ free_partition(child);
return ret;
}
@@ -630,119 +284,142 @@ EXPORT_SYMBOL_GPL(mtd_add_partition);
/**
* __mtd_del_partition - delete MTD partition
*
- * @priv: internal MTD struct for partition to be deleted
+ * @priv: MTD structure to be deleted
*
* This function must be called with the partitions mutex locked.
*/
-static int __mtd_del_partition(struct mtd_part *priv)
+static int __mtd_del_partition(struct mtd_info *mtd)
{
- struct mtd_part *child, *next;
+ struct mtd_info *child, *next;
int err;
- list_for_each_entry_safe(child, next, &mtd_partitions, list) {
- if (child->parent == &priv->mtd) {
- err = __mtd_del_partition(child);
- if (err)
- return err;
- }
+ list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
+ err = __mtd_del_partition(child);
+ if (err)
+ return err;
}
- sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs);
+ sysfs_remove_files(&mtd->dev.kobj, mtd_partition_attrs);
- err = del_mtd_device(&priv->mtd);
+ err = del_mtd_device(mtd);
if (err)
return err;
- list_del(&priv->list);
- free_partition(priv);
+ list_del(&child->part.node);
+ free_partition(mtd);
return 0;
}
/*
* This function unregisters and destroy all slave MTD objects which are
- * attached to the given MTD object.
+ * attached to the given MTD object, recursively.
*/
-int del_mtd_partitions(struct mtd_info *mtd)
+static int __del_mtd_partitions(struct mtd_info *mtd)
{
- struct mtd_part *slave, *next;
+ struct mtd_info *child, *next;
+ LIST_HEAD(tmp_list);
int ret, err = 0;
- mutex_lock(&mtd_partitions_mutex);
- list_for_each_entry_safe(slave, next, &mtd_partitions, list)
- if (slave->parent == mtd) {
- ret = __mtd_del_partition(slave);
- if (ret < 0)
- err = ret;
+ list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
+ if (mtd_has_partitions(child))
+ del_mtd_partitions(child);
+
+ pr_info("Deleting %s MTD partition\n", child->name);
+ ret = del_mtd_device(child);
+ if (ret < 0) {
+ pr_err("Error when deleting partition \"%s\" (%d)\n",
+ child->name, ret);
+ err = ret;
+ continue;
}
- mutex_unlock(&mtd_partitions_mutex);
+
+ list_del(&child->part.node);
+ free_partition(child);
+ }
return err;
}
+int del_mtd_partitions(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ pr_info("Deleting MTD partitions on \"%s\":\n", mtd->name);
+
+ mutex_lock(&master->master.partitions_lock);
+ ret = __del_mtd_partitions(mtd);
+ mutex_unlock(&master->master.partitions_lock);
+
+ return ret;
+}
+
int mtd_del_partition(struct mtd_info *mtd, int partno)
{
- struct mtd_part *slave, *next;
+ struct mtd_info *child, *master = mtd_get_master(mtd);
int ret = -EINVAL;
- mutex_lock(&mtd_partitions_mutex);
- list_for_each_entry_safe(slave, next, &mtd_partitions, list)
- if ((slave->parent == mtd) &&
- (slave->mtd.index == partno)) {
- ret = __mtd_del_partition(slave);
+ mutex_lock(&master->master.partitions_lock);
+ list_for_each_entry(child, &mtd->partitions, part.node) {
+ if (child->index == partno) {
+ ret = __mtd_del_partition(child);
break;
}
- mutex_unlock(&mtd_partitions_mutex);
+ }
+ mutex_unlock(&master->master.partitions_lock);
return ret;
}
EXPORT_SYMBOL_GPL(mtd_del_partition);
/*
- * This function, given a master MTD object and a partition table, creates
- * and registers slave MTD objects which are bound to the master according to
- * the partition definitions.
+ * This function, given a parent MTD object and a partition table, creates
+ * and registers the child MTD objects which are bound to the parent according
+ * to the partition definitions.
*
- * For historical reasons, this function's caller only registers the master
+ * For historical reasons, this function's caller only registers the parent
* if the MTD_PARTITIONED_MASTER config option is set.
*/
-int add_mtd_partitions(struct mtd_info *master,
+int add_mtd_partitions(struct mtd_info *parent,
const struct mtd_partition *parts,
int nbparts)
{
- struct mtd_part *slave;
+ struct mtd_info *child, *master = mtd_get_master(parent);
uint64_t cur_offset = 0;
int i, ret;
- printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+ printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n",
+ nbparts, parent->name);
for (i = 0; i < nbparts; i++) {
- slave = allocate_partition(master, parts + i, i, cur_offset);
- if (IS_ERR(slave)) {
- ret = PTR_ERR(slave);
+ child = allocate_partition(parent, parts + i, i, cur_offset);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
goto err_del_partitions;
}
- mutex_lock(&mtd_partitions_mutex);
- list_add(&slave->list, &mtd_partitions);
- mutex_unlock(&mtd_partitions_mutex);
+ mutex_lock(&master->master.partitions_lock);
+ list_add_tail(&child->part.node, &parent->partitions);
+ mutex_unlock(&master->master.partitions_lock);
- ret = add_mtd_device(&slave->mtd);
+ ret = add_mtd_device(child);
if (ret) {
- mutex_lock(&mtd_partitions_mutex);
- list_del(&slave->list);
- mutex_unlock(&mtd_partitions_mutex);
+ mutex_lock(&master->master.partitions_lock);
+ list_del(&child->part.node);
+ mutex_unlock(&master->master.partitions_lock);
- free_partition(slave);
+ free_partition(child);
goto err_del_partitions;
}
- mtd_add_partition_attrs(slave);
+ mtd_add_partition_attrs(child);
+
/* Look for subpartitions */
- parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
+ parse_mtd_partitions(child, parts[i].types, NULL);
- cur_offset = slave->offset + slave->mtd.size;
+ cur_offset = child->part.offset + child->size;
}
return 0;
@@ -1023,29 +700,11 @@ void mtd_part_parser_cleanup(struct mtd_partitions *parts)
}
}
-int mtd_is_partition(const struct mtd_info *mtd)
-{
- struct mtd_part *part;
- int ispart = 0;
-
- mutex_lock(&mtd_partitions_mutex);
- list_for_each_entry(part, &mtd_partitions, list)
- if (&part->mtd == mtd) {
- ispart = 1;
- break;
- }
- mutex_unlock(&mtd_partitions_mutex);
-
- return ispart;
-}
-EXPORT_SYMBOL_GPL(mtd_is_partition);
-
/* Returns the size of the entire flash chip */
uint64_t mtd_get_device_size(const struct mtd_info *mtd)
{
- if (!mtd_is_partition(mtd))
- return mtd->size;
+ struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
- return mtd_get_device_size(mtd_to_part(mtd)->parent);
+ return master->size;
}
EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index d5326d19b136..ec18ade33262 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -3259,7 +3259,7 @@ static void onenand_check_features(struct mtd_info *mtd)
switch (density) {
case ONENAND_DEVICE_DENSITY_8Gb:
this->options |= ONENAND_HAS_NOP_1;
- /* fall through */
+ fallthrough;
case ONENAND_DEVICE_DENSITY_4Gb:
if (ONENAND_IS_DDP(this))
this->options |= ONENAND_HAS_2PLANE;
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index 8312182088c1..d66dab25df20 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -19,15 +19,17 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-gpio.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
/*
* MTD structure for E3 (Delta)
*/
-struct ams_delta_nand {
+struct gpio_nand {
struct nand_controller base;
struct nand_chip nand_chip;
struct gpio_desc *gpiod_rdy;
@@ -39,41 +41,20 @@ struct ams_delta_nand {
struct gpio_desc *gpiod_cle;
struct gpio_descs *data_gpiods;
bool data_in;
+ unsigned int tRP;
+ unsigned int tWP;
+ u8 (*io_read)(struct gpio_nand *this);
+ void (*io_write)(struct gpio_nand *this, u8 byte);
};
-/*
- * Define partitions for flash devices
- */
-
-static const struct mtd_partition partition_info[] = {
- { .name = "Kernel",
- .offset = 0,
- .size = 3 * SZ_1M + SZ_512K },
- { .name = "u-boot",
- .offset = 3 * SZ_1M + SZ_512K,
- .size = SZ_256K },
- { .name = "u-boot params",
- .offset = 3 * SZ_1M + SZ_512K + SZ_256K,
- .size = SZ_256K },
- { .name = "Amstrad LDR",
- .offset = 4 * SZ_1M,
- .size = SZ_256K },
- { .name = "File system",
- .offset = 4 * SZ_1M + 1 * SZ_256K,
- .size = 27 * SZ_1M },
- { .name = "PBL reserved",
- .offset = 32 * SZ_1M - 3 * SZ_256K,
- .size = 3 * SZ_256K },
-};
-
-static void ams_delta_write_commit(struct ams_delta_nand *priv)
+static void gpio_nand_write_commit(struct gpio_nand *priv)
{
- gpiod_set_value(priv->gpiod_nwe, 0);
- ndelay(40);
gpiod_set_value(priv->gpiod_nwe, 1);
+ ndelay(priv->tWP);
+ gpiod_set_value(priv->gpiod_nwe, 0);
}
-static void ams_delta_io_write(struct ams_delta_nand *priv, u8 byte)
+static void gpio_nand_io_write(struct gpio_nand *priv, u8 byte)
{
struct gpio_descs *data_gpiods = priv->data_gpiods;
DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
@@ -81,10 +62,10 @@ static void ams_delta_io_write(struct ams_delta_nand *priv, u8 byte)
gpiod_set_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
data_gpiods->info, values);
- ams_delta_write_commit(priv);
+ gpio_nand_write_commit(priv);
}
-static void ams_delta_dir_output(struct ams_delta_nand *priv, u8 byte)
+static void gpio_nand_dir_output(struct gpio_nand *priv, u8 byte)
{
struct gpio_descs *data_gpiods = priv->data_gpiods;
DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
@@ -94,30 +75,30 @@ static void ams_delta_dir_output(struct ams_delta_nand *priv, u8 byte)
gpiod_direction_output_raw(data_gpiods->desc[i],
test_bit(i, values));
- ams_delta_write_commit(priv);
+ gpio_nand_write_commit(priv);
priv->data_in = false;
}
-static u8 ams_delta_io_read(struct ams_delta_nand *priv)
+static u8 gpio_nand_io_read(struct gpio_nand *priv)
{
u8 res;
struct gpio_descs *data_gpiods = priv->data_gpiods;
DECLARE_BITMAP(values, BITS_PER_TYPE(res)) = { 0, };
- gpiod_set_value(priv->gpiod_nre, 0);
- ndelay(40);
+ gpiod_set_value(priv->gpiod_nre, 1);
+ ndelay(priv->tRP);
gpiod_get_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
data_gpiods->info, values);
- gpiod_set_value(priv->gpiod_nre, 1);
+ gpiod_set_value(priv->gpiod_nre, 0);
res = values[0];
return res;
}
-static void ams_delta_dir_input(struct ams_delta_nand *priv)
+static void gpio_nand_dir_input(struct gpio_nand *priv)
{
struct gpio_descs *data_gpiods = priv->data_gpiods;
int i;
@@ -128,68 +109,67 @@ static void ams_delta_dir_input(struct ams_delta_nand *priv)
priv->data_in = true;
}
-static void ams_delta_write_buf(struct ams_delta_nand *priv, const u8 *buf,
- int len)
+static void gpio_nand_write_buf(struct gpio_nand *priv, const u8 *buf, int len)
{
int i = 0;
if (len > 0 && priv->data_in)
- ams_delta_dir_output(priv, buf[i++]);
+ gpio_nand_dir_output(priv, buf[i++]);
while (i < len)
- ams_delta_io_write(priv, buf[i++]);
+ priv->io_write(priv, buf[i++]);
}
-static void ams_delta_read_buf(struct ams_delta_nand *priv, u8 *buf, int len)
+static void gpio_nand_read_buf(struct gpio_nand *priv, u8 *buf, int len)
{
int i;
- if (!priv->data_in)
- ams_delta_dir_input(priv);
+ if (priv->data_gpiods && !priv->data_in)
+ gpio_nand_dir_input(priv);
for (i = 0; i < len; i++)
- buf[i] = ams_delta_io_read(priv);
+ buf[i] = priv->io_read(priv);
}
-static void ams_delta_ctrl_cs(struct ams_delta_nand *priv, bool assert)
+static void gpio_nand_ctrl_cs(struct gpio_nand *priv, bool assert)
{
- gpiod_set_value(priv->gpiod_nce, assert ? 0 : 1);
+ gpiod_set_value(priv->gpiod_nce, assert);
}
-static int ams_delta_exec_op(struct nand_chip *this,
+static int gpio_nand_exec_op(struct nand_chip *this,
const struct nand_operation *op, bool check_only)
{
- struct ams_delta_nand *priv = nand_get_controller_data(this);
+ struct gpio_nand *priv = nand_get_controller_data(this);
const struct nand_op_instr *instr;
int ret = 0;
if (check_only)
return 0;
- ams_delta_ctrl_cs(priv, 1);
+ gpio_nand_ctrl_cs(priv, 1);
for (instr = op->instrs; instr < op->instrs + op->ninstrs; instr++) {
switch (instr->type) {
case NAND_OP_CMD_INSTR:
gpiod_set_value(priv->gpiod_cle, 1);
- ams_delta_write_buf(priv, &instr->ctx.cmd.opcode, 1);
+ gpio_nand_write_buf(priv, &instr->ctx.cmd.opcode, 1);
gpiod_set_value(priv->gpiod_cle, 0);
break;
case NAND_OP_ADDR_INSTR:
gpiod_set_value(priv->gpiod_ale, 1);
- ams_delta_write_buf(priv, instr->ctx.addr.addrs,
+ gpio_nand_write_buf(priv, instr->ctx.addr.addrs,
instr->ctx.addr.naddrs);
gpiod_set_value(priv->gpiod_ale, 0);
break;
case NAND_OP_DATA_IN_INSTR:
- ams_delta_read_buf(priv, instr->ctx.data.buf.in,
+ gpio_nand_read_buf(priv, instr->ctx.data.buf.in,
instr->ctx.data.len);
break;
case NAND_OP_DATA_OUT_INSTR:
- ams_delta_write_buf(priv, instr->ctx.data.buf.out,
+ gpio_nand_write_buf(priv, instr->ctx.data.buf.out,
instr->ctx.data.len);
break;
@@ -206,28 +186,61 @@ static int ams_delta_exec_op(struct nand_chip *this,
break;
}
- ams_delta_ctrl_cs(priv, 0);
+ gpio_nand_ctrl_cs(priv, 0);
return ret;
}
-static const struct nand_controller_ops ams_delta_ops = {
- .exec_op = ams_delta_exec_op,
+static int gpio_nand_setup_data_interface(struct nand_chip *this, int csline,
+ const struct nand_data_interface *cf)
+{
+ struct gpio_nand *priv = nand_get_controller_data(this);
+ const struct nand_sdr_timings *sdr = nand_get_sdr_timings(cf);
+ struct device *dev = &nand_to_mtd(this)->dev;
+
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ if (priv->gpiod_nre) {
+ priv->tRP = DIV_ROUND_UP(sdr->tRP_min, 1000);
+ dev_dbg(dev, "using %u ns read pulse width\n", priv->tRP);
+ }
+
+ priv->tWP = DIV_ROUND_UP(sdr->tWP_min, 1000);
+ dev_dbg(dev, "using %u ns write pulse width\n", priv->tWP);
+
+ return 0;
+}
+
+static const struct nand_controller_ops gpio_nand_ops = {
+ .exec_op = gpio_nand_exec_op,
+ .setup_data_interface = gpio_nand_setup_data_interface,
};
/*
* Main initialization routine
*/
-static int ams_delta_init(struct platform_device *pdev)
+static int gpio_nand_probe(struct platform_device *pdev)
{
- struct ams_delta_nand *priv;
+ struct gpio_nand_platdata *pdata = dev_get_platdata(&pdev->dev);
+ const struct mtd_partition *partitions = NULL;
+ int num_partitions = 0;
+ struct gpio_nand *priv;
struct nand_chip *this;
struct mtd_info *mtd;
- struct gpio_descs *data_gpiods;
+ int (*probe)(struct platform_device *pdev, struct gpio_nand *priv);
int err = 0;
+ if (pdata) {
+ partitions = pdata->parts;
+ num_partitions = pdata->num_parts;
+ }
+
/* Allocate memory for MTD device structure and private data */
- priv = devm_kzalloc(&pdev->dev, sizeof(struct ams_delta_nand),
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct gpio_nand),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -238,6 +251,7 @@ static int ams_delta_init(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
nand_set_controller_data(this, priv);
+ nand_set_flash_node(this, pdev->dev.of_node);
priv->gpiod_rdy = devm_gpiod_get_optional(&pdev->dev, "rdy", GPIOD_IN);
if (IS_ERR(priv->gpiod_rdy)) {
@@ -251,29 +265,33 @@ static int ams_delta_init(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- /* Set chip enabled, but */
- priv->gpiod_nwp = devm_gpiod_get(&pdev->dev, "nwp", GPIOD_OUT_HIGH);
+ /* Set chip enabled but write protected */
+ priv->gpiod_nwp = devm_gpiod_get_optional(&pdev->dev, "nwp",
+ GPIOD_OUT_HIGH);
if (IS_ERR(priv->gpiod_nwp)) {
err = PTR_ERR(priv->gpiod_nwp);
dev_err(&pdev->dev, "NWP GPIO request failed (%d)\n", err);
return err;
}
- priv->gpiod_nce = devm_gpiod_get(&pdev->dev, "nce", GPIOD_OUT_HIGH);
+ priv->gpiod_nce = devm_gpiod_get_optional(&pdev->dev, "nce",
+ GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_nce)) {
err = PTR_ERR(priv->gpiod_nce);
dev_err(&pdev->dev, "NCE GPIO request failed (%d)\n", err);
return err;
}
- priv->gpiod_nre = devm_gpiod_get(&pdev->dev, "nre", GPIOD_OUT_HIGH);
+ priv->gpiod_nre = devm_gpiod_get_optional(&pdev->dev, "nre",
+ GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_nre)) {
err = PTR_ERR(priv->gpiod_nre);
dev_err(&pdev->dev, "NRE GPIO request failed (%d)\n", err);
return err;
}
- priv->gpiod_nwe = devm_gpiod_get(&pdev->dev, "nwe", GPIOD_OUT_HIGH);
+ priv->gpiod_nwe = devm_gpiod_get_optional(&pdev->dev, "nwe",
+ GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_nwe)) {
err = PTR_ERR(priv->gpiod_nwe);
dev_err(&pdev->dev, "NWE GPIO request failed (%d)\n", err);
@@ -295,28 +313,62 @@ static int ams_delta_init(struct platform_device *pdev)
}
/* Request array of data pins, initialize them as input */
- data_gpiods = devm_gpiod_get_array(&pdev->dev, "data", GPIOD_IN);
- if (IS_ERR(data_gpiods)) {
- err = PTR_ERR(data_gpiods);
+ priv->data_gpiods = devm_gpiod_get_array_optional(&pdev->dev, "data",
+ GPIOD_IN);
+ if (IS_ERR(priv->data_gpiods)) {
+ err = PTR_ERR(priv->data_gpiods);
dev_err(&pdev->dev, "data GPIO request failed: %d\n", err);
return err;
}
- priv->data_gpiods = data_gpiods;
- priv->data_in = true;
+ if (priv->data_gpiods) {
+ if (!priv->gpiod_nwe) {
+ dev_err(&pdev->dev,
+ "mandatory NWE pin not provided by platform\n");
+ return -ENODEV;
+ }
- /* Initialize the NAND controller object embedded in ams_delta_nand. */
- priv->base.ops = &ams_delta_ops;
+ priv->io_read = gpio_nand_io_read;
+ priv->io_write = gpio_nand_io_write;
+ priv->data_in = true;
+ }
+
+ if (pdev->id_entry)
+ probe = (void *) pdev->id_entry->driver_data;
+ else
+ probe = of_device_get_match_data(&pdev->dev);
+ if (probe)
+ err = probe(pdev, priv);
+ if (err)
+ return err;
+
+ if (!priv->io_read || !priv->io_write) {
+ dev_err(&pdev->dev, "incomplete device configuration\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the NAND controller object embedded in gpio_nand. */
+ priv->base.ops = &gpio_nand_ops;
nand_controller_init(&priv->base);
this->controller = &priv->base;
+ /*
+ * FIXME: We should release write protection only after nand_scan() to
+ * be on the safe side but we can't do that until we have a generic way
+ * to assert/deassert WP from the core. Even if the core shouldn't
+ * write things in the nand_scan() path, it should have control on this
+ * pin just in case we ever need to disable write protection during
+ * chip detection/initialization.
+ */
+ /* Release write protection */
+ gpiod_set_value(priv->gpiod_nwp, 0);
+
/* Scan to find existence of the device */
err = nand_scan(this, 1);
if (err)
return err;
/* Register the partitions */
- err = mtd_device_register(mtd, partition_info,
- ARRAY_SIZE(partition_info));
+ err = mtd_device_register(mtd, partitions, num_partitions);
if (err)
goto err_nand_cleanup;
@@ -331,26 +383,47 @@ err_nand_cleanup:
/*
* Clean up routine
*/
-static int ams_delta_cleanup(struct platform_device *pdev)
+static int gpio_nand_remove(struct platform_device *pdev)
{
- struct ams_delta_nand *priv = platform_get_drvdata(pdev);
+ struct gpio_nand *priv = platform_get_drvdata(pdev);
struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip);
+ /* Apply write protection */
+ gpiod_set_value(priv->gpiod_nwp, 1);
+
/* Unregister device */
nand_release(mtd_to_nand(mtd));
return 0;
}
-static struct platform_driver ams_delta_nand_driver = {
- .probe = ams_delta_init,
- .remove = ams_delta_cleanup,
+static const struct of_device_id gpio_nand_of_id_table[] = {
+ {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_of_id_table);
+
+static const struct platform_device_id gpio_nand_plat_id_table[] = {
+ {
+ .name = "ams-delta-nand",
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(platform, gpio_nand_plat_id_table);
+
+static struct platform_driver gpio_nand_driver = {
+ .probe = gpio_nand_probe,
+ .remove = gpio_nand_remove,
+ .id_table = gpio_nand_plat_id_table,
.driver = {
.name = "ams-delta-nand",
+ .of_match_table = of_match_ptr(gpio_nand_of_id_table),
},
};
-module_platform_driver(ams_delta_nand_driver);
+module_platform_driver(gpio_nand_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 44518dada75b..e4e3ceeac38f 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -102,6 +102,45 @@ struct brcm_nand_dma_desc {
#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
#define NAND_POLL_STATUS_TIMEOUT_MS 100
+#define EDU_CMD_WRITE 0x00
+#define EDU_CMD_READ 0x01
+#define EDU_STATUS_ACTIVE BIT(0)
+#define EDU_ERR_STATUS_ERRACK BIT(0)
+#define EDU_DONE_MASK GENMASK(1, 0)
+
+#define EDU_CONFIG_MODE_NAND BIT(0)
+#define EDU_CONFIG_SWAP_BYTE BIT(1)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define EDU_CONFIG_SWAP_CFG EDU_CONFIG_SWAP_BYTE
+#else
+#define EDU_CONFIG_SWAP_CFG 0
+#endif
+
+/* edu registers */
+enum edu_reg {
+ EDU_CONFIG = 0,
+ EDU_DRAM_ADDR,
+ EDU_EXT_ADDR,
+ EDU_LENGTH,
+ EDU_CMD,
+ EDU_STOP,
+ EDU_STATUS,
+ EDU_DONE,
+ EDU_ERR_STATUS,
+};
+
+static const u16 edu_regs[] = {
+ [EDU_CONFIG] = 0x00,
+ [EDU_DRAM_ADDR] = 0x04,
+ [EDU_EXT_ADDR] = 0x08,
+ [EDU_LENGTH] = 0x0c,
+ [EDU_CMD] = 0x10,
+ [EDU_STOP] = 0x14,
+ [EDU_STATUS] = 0x18,
+ [EDU_DONE] = 0x1c,
+ [EDU_ERR_STATUS] = 0x20,
+};
+
/* flash_dma registers */
enum flash_dma_reg {
FLASH_DMA_REVISION = 0,
@@ -167,6 +206,8 @@ enum {
BRCMNAND_HAS_WP = BIT(3),
};
+struct brcmnand_host;
+
struct brcmnand_controller {
struct device *dev;
struct nand_controller controller;
@@ -185,17 +226,32 @@ struct brcmnand_controller {
int cmd_pending;
bool dma_pending;
+ bool edu_pending;
struct completion done;
struct completion dma_done;
+ struct completion edu_done;
/* List of NAND hosts (one for each chip-select) */
struct list_head host_list;
+ /* EDU info, per-transaction */
+ const u16 *edu_offsets;
+ void __iomem *edu_base;
+ int edu_irq;
+ int edu_count;
+ u64 edu_dram_addr;
+ u32 edu_ext_addr;
+ u32 edu_cmd;
+ u32 edu_config;
+
/* flash_dma reg */
const u16 *flash_dma_offsets;
struct brcm_nand_dma_desc *dma_desc;
dma_addr_t dma_pa;
+ int (*dma_trans)(struct brcmnand_host *host, u64 addr, u32 *buf,
+ u32 len, u8 dma_cmd);
+
/* in-memory cache of the FLASH_CACHE, used only for some commands */
u8 flash_cache[FC_BYTES];
@@ -216,6 +272,7 @@ struct brcmnand_controller {
u32 nand_cs_nand_xor;
u32 corr_stat_threshold;
u32 flash_dma_mode;
+ u32 flash_edu_mode;
bool pio_poll_mode;
};
@@ -657,6 +714,22 @@ static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
__raw_writel(val, ctrl->nand_fc + word * 4);
}
+static inline void edu_writel(struct brcmnand_controller *ctrl,
+ enum edu_reg reg, u32 val)
+{
+ u16 offs = ctrl->edu_offsets[reg];
+
+ brcmnand_writel(val, ctrl->edu_base + offs);
+}
+
+static inline u32 edu_readl(struct brcmnand_controller *ctrl,
+ enum edu_reg reg)
+{
+ u16 offs = ctrl->edu_offsets[reg];
+
+ return brcmnand_readl(ctrl->edu_base + offs);
+}
+
static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
{
@@ -926,6 +999,16 @@ static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
return ctrl->flash_dma_base;
}
+static inline bool has_edu(struct brcmnand_controller *ctrl)
+{
+ return ctrl->edu_base;
+}
+
+static inline bool use_dma(struct brcmnand_controller *ctrl)
+{
+ return has_flash_dma(ctrl) || has_edu(ctrl);
+}
+
static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
{
if (ctrl->pio_poll_mode)
@@ -1299,6 +1382,52 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
return tbytes;
}
+static void brcmnand_edu_init(struct brcmnand_controller *ctrl)
+{
+ /* initialize edu */
+ edu_writel(ctrl, EDU_ERR_STATUS, 0);
+ edu_readl(ctrl, EDU_ERR_STATUS);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_readl(ctrl, EDU_DONE);
+}
+
+/* edu irq */
+static irqreturn_t brcmnand_edu_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ if (ctrl->edu_count) {
+ ctrl->edu_count--;
+ while (!(edu_readl(ctrl, EDU_DONE) & EDU_DONE_MASK))
+ udelay(1);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_readl(ctrl, EDU_DONE);
+ }
+
+ if (ctrl->edu_count) {
+ ctrl->edu_dram_addr += FC_BYTES;
+ ctrl->edu_ext_addr += FC_BYTES;
+
+ edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
+ edu_readl(ctrl, EDU_DRAM_ADDR);
+ edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
+ edu_readl(ctrl, EDU_EXT_ADDR);
+
+ mb(); /* flush previous writes */
+ edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
+ edu_readl(ctrl, EDU_CMD);
+
+ return IRQ_HANDLED;
+ }
+
+ complete(&ctrl->edu_done);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
{
struct brcmnand_controller *ctrl = data;
@@ -1307,6 +1436,16 @@ static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
if (ctrl->dma_pending)
return IRQ_HANDLED;
+ /* check if you need to piggy back on the ctrlrdy irq */
+ if (ctrl->edu_pending) {
+ if (irq == ctrl->irq && ((int)ctrl->edu_irq >= 0))
+ /* Discard interrupts while using dedicated edu irq */
+ return IRQ_HANDLED;
+
+ /* no registered edu irq, call handler */
+ return brcmnand_edu_irq(irq, data);
+ }
+
complete(&ctrl->done);
return IRQ_HANDLED;
}
@@ -1645,6 +1784,81 @@ static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
}
/**
+ * Kick EDU engine
+ */
+static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
+ u32 len, u8 cmd)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned long timeo = msecs_to_jiffies(200);
+ int ret = 0;
+ int dir = (cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ u8 edu_cmd = (cmd == CMD_PAGE_READ ? EDU_CMD_READ : EDU_CMD_WRITE);
+ unsigned int trans = len >> FC_SHIFT;
+ dma_addr_t pa;
+
+ pa = dma_map_single(ctrl->dev, buf, len, dir);
+ if (dma_mapping_error(ctrl->dev, pa)) {
+ dev_err(ctrl->dev, "unable to map buffer for EDU DMA\n");
+ return -ENOMEM;
+ }
+
+ ctrl->edu_pending = true;
+ ctrl->edu_dram_addr = pa;
+ ctrl->edu_ext_addr = addr;
+ ctrl->edu_cmd = edu_cmd;
+ ctrl->edu_count = trans;
+
+ edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
+ edu_readl(ctrl, EDU_DRAM_ADDR);
+ edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
+ edu_readl(ctrl, EDU_EXT_ADDR);
+ edu_writel(ctrl, EDU_LENGTH, FC_BYTES);
+ edu_readl(ctrl, EDU_LENGTH);
+
+ /* Start edu engine */
+ mb(); /* flush previous writes */
+ edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
+ edu_readl(ctrl, EDU_CMD);
+
+ if (wait_for_completion_timeout(&ctrl->edu_done, timeo) <= 0) {
+ dev_err(ctrl->dev,
+ "timeout waiting for EDU; status %#x, error status %#x\n",
+ edu_readl(ctrl, EDU_STATUS),
+ edu_readl(ctrl, EDU_ERR_STATUS));
+ }
+
+ dma_unmap_single(ctrl->dev, pa, len, dir);
+
+ /* for program page check NAND status */
+ if (((brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+ INTFC_FLASH_STATUS) & NAND_STATUS_FAIL) &&
+ edu_cmd == EDU_CMD_WRITE) {
+ dev_info(ctrl->dev, "program failed at %llx\n",
+ (unsigned long long)addr);
+ ret = -EIO;
+ }
+
+ /* Make sure the EDU status is clean */
+ if (edu_readl(ctrl, EDU_STATUS) & EDU_STATUS_ACTIVE)
+ dev_warn(ctrl->dev, "EDU still active: %#x\n",
+ edu_readl(ctrl, EDU_STATUS));
+
+ if (unlikely(edu_readl(ctrl, EDU_ERR_STATUS) & EDU_ERR_STATUS_ERRACK)) {
+ dev_warn(ctrl->dev, "EDU RBUS error at addr %llx\n",
+ (unsigned long long)addr);
+ ret = -EIO;
+ }
+
+ ctrl->edu_pending = false;
+ brcmnand_edu_init(ctrl);
+ edu_writel(ctrl, EDU_STOP, 0); /* force stop */
+ edu_readl(ctrl, EDU_STOP);
+
+ return ret;
+}
+
+/**
* Construct a FLASH_DMA descriptor as part of a linked list. You must know the
* following ahead of time:
* - Is this descriptor the beginning or end of a linked list?
@@ -1850,9 +2064,11 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
try_dmaread:
brcmnand_clear_ecc_addr(ctrl);
- if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
- err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
- CMD_PAGE_READ);
+ if (ctrl->dma_trans && !oob && flash_dma_buf_ok(buf)) {
+ err = ctrl->dma_trans(host, addr, buf,
+ trans * FC_BYTES,
+ CMD_PAGE_READ);
+
if (err) {
if (mtd_is_bitflip_or_eccerr(err))
err_addr = addr;
@@ -1988,10 +2204,12 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; i < ctrl->max_oob; i += 4)
oob_reg_write(ctrl, i, 0xffffffff);
- if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
- if (brcmnand_dma_trans(host, addr, (u32 *)buf,
- mtd->writesize, CMD_PROGRAM_PAGE))
+ if (use_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
+ if (ctrl->dma_trans(host, addr, (u32 *)buf, mtd->writesize,
+ CMD_PROGRAM_PAGE))
+
ret = -EIO;
+
goto out;
}
@@ -2494,6 +2712,8 @@ static int brcmnand_suspend(struct device *dev)
if (has_flash_dma(ctrl))
ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
+ else if (has_edu(ctrl))
+ ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
return 0;
}
@@ -2508,6 +2728,14 @@ static int brcmnand_resume(struct device *dev)
flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
}
+ if (has_edu(ctrl))
+ ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
+ else {
+ edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config);
+ edu_readl(ctrl, EDU_CONFIG);
+ brcmnand_edu_init(ctrl);
+ }
+
brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
@@ -2553,6 +2781,49 @@ MODULE_DEVICE_TABLE(of, brcmnand_of_match);
/***********************************************************************
* Platform driver setup (per controller)
***********************************************************************/
+static int brcmnand_edu_setup(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-edu");
+ if (res) {
+ ctrl->edu_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->edu_base))
+ return PTR_ERR(ctrl->edu_base);
+
+ ctrl->edu_offsets = edu_regs;
+
+ edu_writel(ctrl, EDU_CONFIG, EDU_CONFIG_MODE_NAND |
+ EDU_CONFIG_SWAP_CFG);
+ edu_readl(ctrl, EDU_CONFIG);
+
+ /* initialize edu */
+ brcmnand_edu_init(ctrl);
+
+ ctrl->edu_irq = platform_get_irq_optional(pdev, 1);
+ if (ctrl->edu_irq < 0) {
+ dev_warn(dev,
+ "FLASH EDU enabled, using ctlrdy irq\n");
+ } else {
+ ret = devm_request_irq(dev, ctrl->edu_irq,
+ brcmnand_edu_irq, 0,
+ "brcmnand-edu", ctrl);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "can't allocate IRQ %d: error %d\n",
+ ctrl->edu_irq, ret);
+ return ret;
+ }
+
+ dev_info(dev, "FLASH EDU enabled using irq %u\n",
+ ctrl->edu_irq);
+ }
+ }
+
+ return 0;
+}
int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
{
@@ -2578,6 +2849,7 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
init_completion(&ctrl->done);
init_completion(&ctrl->dma_done);
+ init_completion(&ctrl->edu_done);
nand_controller_init(&ctrl->controller);
ctrl->controller.ops = &brcmnand_controller_ops;
INIT_LIST_HEAD(&ctrl->host_list);
@@ -2675,6 +2947,15 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
}
dev_info(dev, "enabling FLASH_DMA\n");
+ /* set flash dma transfer function to call */
+ ctrl->dma_trans = brcmnand_dma_trans;
+ } else {
+ ret = brcmnand_edu_setup(pdev);
+ if (ret < 0)
+ goto err;
+
+ /* set edu transfer function to call */
+ ctrl->dma_trans = brcmnand_edu_trans;
}
/* Disable automatic device ID config, direct addressing */
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index f6c7102a1e32..efddc5c68afb 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -30,7 +30,6 @@
* Generic mode is used for executing rest of commands.
*/
-#define MAX_OOB_SIZE_PER_SECTOR 32
#define MAX_ADDRESS_CYC 6
#define MAX_ERASE_ADDRESS_CYC 3
#define MAX_DATA_SIZE 0xFFFC
@@ -190,6 +189,7 @@
/* BCH Engine identification register 3. */
#define BCH_CFG_3 0x844
+#define BCH_CFG_3_METADATA_SIZE GENMASK(23, 16)
/* Ready/Busy# line status. */
#define RBN_SETINGS 0x1004
@@ -499,6 +499,7 @@ struct cdns_nand_ctrl {
unsigned long assigned_cs;
struct list_head chips;
+ u8 bch_metadata_size;
};
struct cdns_nand_chip {
@@ -997,6 +998,7 @@ static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
return status;
cadence_nand_reset_irq(cdns_ctrl);
+ reinit_completion(&cdns_ctrl->complete);
writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
cdns_ctrl->reg + CMD_REG2);
@@ -1077,6 +1079,14 @@ static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
int max_step_size = 0, nstrengths, i;
u32 reg;
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_3);
+ cdns_ctrl->bch_metadata_size = FIELD_GET(BCH_CFG_3_METADATA_SIZE, reg);
+ if (cdns_ctrl->bch_metadata_size < 4) {
+ dev_err(cdns_ctrl->dev,
+ "Driver needs at least 4 bytes of BCH meta data\n");
+ return -EIO;
+ }
+
reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
@@ -1170,7 +1180,8 @@ static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
cadence_nand_get_caps(cdns_ctrl);
- cadence_nand_read_bch_caps(cdns_ctrl);
+ if (cadence_nand_read_bch_caps(cdns_ctrl))
+ return -EIO;
/*
* Set IO width access to 8.
@@ -2585,9 +2596,8 @@ int cadence_nand_attach_chip(struct nand_chip *chip)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
- u32 ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
+ u32 ecc_size;
struct mtd_info *mtd = nand_to_mtd(chip);
- u32 max_oob_data_size;
int ret;
if (chip->options & NAND_BUSWIDTH_16) {
@@ -2603,12 +2613,9 @@ int cadence_nand_attach_chip(struct nand_chip *chip)
chip->options |= NAND_NO_SUBPAGE_WRITE;
cdns_chip->bbm_offs = chip->badblockpos;
- if (chip->options & NAND_BUSWIDTH_16) {
- cdns_chip->bbm_offs &= ~0x01;
- cdns_chip->bbm_len = 2;
- } else {
- cdns_chip->bbm_len = 1;
- }
+ cdns_chip->bbm_offs &= ~0x01;
+ /* this value should be even number */
+ cdns_chip->bbm_len = 2;
ret = nand_ecc_choose_conf(chip,
&cdns_ctrl->ecc_caps,
@@ -2625,13 +2632,12 @@ int cadence_nand_attach_chip(struct nand_chip *chip)
/* Error correction configuration. */
cdns_chip->sector_size = chip->ecc.size;
cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
+ ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
- max_oob_data_size = MAX_OOB_SIZE_PER_SECTOR;
-
- if (cdns_chip->avail_oob_size > max_oob_data_size)
- cdns_chip->avail_oob_size = max_oob_data_size;
+ if (cdns_chip->avail_oob_size > cdns_ctrl->bch_metadata_size)
+ cdns_chip->avail_oob_size = cdns_ctrl->bch_metadata_size;
if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
> mtd->oobsize)
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index fafd0a0aa8e2..6a6c919b2569 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -1317,6 +1317,7 @@ int denali_init(struct denali_controller *denali)
iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
+ iowrite32(WRITE_PROTECT__FLAG, denali->reg + WRITE_PROTECT);
denali_clear_irq_all(denali);
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index e5cdcda56d14..ac46eb7956ce 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -328,7 +328,7 @@ struct denali_chip {
struct nand_chip chip;
struct list_head node;
unsigned int nsels;
- struct denali_chip_sel sels[0];
+ struct denali_chip_sel sels[];
};
/**
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index c0e1a8ebe820..c2a391ad2c35 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1169,7 +1169,7 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti
" NoOfBootImageBlocks = %d\n"
" NoOfBinaryPartitions = %d\n"
" NoOfBDTLPartitions = %d\n"
- " BlockMultiplerBits = %d\n"
+ " BlockMultiplierBits = %d\n"
" FormatFlgs = %d\n"
" OsakVersion = %d.%d.%d.%d\n"
" PercentUsed = %d\n",
@@ -1482,7 +1482,7 @@ static int __init doc_probe(unsigned long physadr)
break;
case DOC_ChipID_DocMilPlus32:
pr_err("DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
- /* fall through */
+ fallthrough;
default:
ret = -ENODEV;
goto notfound;
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index 634c550db13a..e1dc675b12bb 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -324,8 +324,7 @@ static void fsl_elbc_cmdfunc(struct nand_chip *chip, unsigned int command,
/* READ0 and READ1 read the entire buffer to use hardware ECC. */
case NAND_CMD_READ1:
column += 256;
-
- /* fall-through */
+ fallthrough;
case NAND_CMD_READ0:
dev_dbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index b9d5d55a5edb..53b00c841aec 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -1148,20 +1148,21 @@ static int acquire_dma_channels(struct gpmi_nand_data *this)
{
struct platform_device *pdev = this->pdev;
struct dma_chan *dma_chan;
+ int ret = 0;
/* request dma channel */
- dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
- if (!dma_chan) {
- dev_err(this->dev, "Failed to request DMA channel.\n");
- goto acquire_err;
+ dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(this->dev, "DMA channel request failed: %d\n",
+ ret);
+ release_dma_channels(this);
+ } else {
+ this->dma_chans[0] = dma_chan;
}
- this->dma_chans[0] = dma_chan;
- return 0;
-
-acquire_err:
- release_dma_channels(this);
- return -EINVAL;
+ return ret;
}
static int gpmi_get_clks(struct gpmi_nand_data *this)
diff --git a/drivers/mtd/nand/raw/ingenic/Kconfig b/drivers/mtd/nand/raw/ingenic/Kconfig
index e30feb56b650..96c5ae8b1bbc 100644
--- a/drivers/mtd/nand/raw/ingenic/Kconfig
+++ b/drivers/mtd/nand/raw/ingenic/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config MTD_NAND_JZ4780
tristate "JZ4780 NAND controller"
+ depends on MIPS || COMPILE_TEST
depends on JZ4780_NEMC
help
Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index c954189606f6..8e22cd6ec71f 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -124,7 +124,6 @@ int ingenic_ecc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ingenic_ecc *ecc;
- struct resource *res;
ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
if (!ecc)
@@ -134,8 +133,7 @@ int ingenic_ecc_probe(struct platform_device *pdev)
if (!ecc->ops)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ecc->base = devm_ioremap_resource(dev, res);
+ ecc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ecc->base))
return PTR_ERR(ecc->base);
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index 49afebee50db..935c4902ada7 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
@@ -253,7 +253,7 @@ static int ingenic_nand_attach_chip(struct nand_chip *chip)
chip->ecc.hwctl = ingenic_nand_ecc_hwctl;
chip->ecc.calculate = ingenic_nand_ecc_calculate;
chip->ecc.correct = ingenic_nand_ecc_correct;
- /* fall through */
+ fallthrough;
case NAND_ECC_SOFT:
dev_info(nfc->dev, "using %s (strength %d, size %d, bytes %d)\n",
(nfc->ecc) ? "hardware ECC" : "software ECC",
diff --git a/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
index 6c852eae09cf..2d0e0a2192ae 100644
--- a/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
+++ b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
@@ -145,10 +145,10 @@ static void jz4725b_bch_read_parity(struct ingenic_ecc *bch, u8 *buf,
switch (size8) {
case 3:
dest8[2] = (val >> 16) & 0xff;
- /* fall-through */
+ fallthrough;
case 2:
dest8[1] = (val >> 8) & 0xff;
- /* fall-through */
+ fallthrough;
case 1:
dest8[0] = val & 0xff;
break;
diff --git a/drivers/mtd/nand/raw/ingenic/jz4780_bch.c b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
index 079266a0d6cf..d67dbfff76cc 100644
--- a/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
+++ b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
@@ -123,10 +123,10 @@ static void jz4780_bch_read_parity(struct ingenic_ecc *bch, void *buf,
switch (size8) {
case 3:
dest8[2] = (val >> 16) & 0xff;
- /* fall through */
+ fallthrough;
case 2:
dest8[1] = (val >> 8) & 0xff;
- /* fall through */
+ fallthrough;
case 1:
dest8[0] = val & 0xff;
break;
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
index cba6fe7dd8c4..9d0caadf940e 100644
--- a/drivers/mtd/nand/raw/internals.h
+++ b/drivers/mtd/nand/raw/internals.h
@@ -30,6 +30,7 @@
#define NAND_MFR_SAMSUNG 0xec
#define NAND_MFR_SANDISK 0x45
#define NAND_MFR_STMICRO 0x20
+/* Kioxia is new name of Toshiba memory. */
#define NAND_MFR_TOSHIBA 0x98
#define NAND_MFR_WINBOND 0xef
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index fb5abdcfb007..179f0ca585f8 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -334,7 +334,7 @@ struct marvell_nand_chip {
int addr_cyc;
int selected_die;
unsigned int nsels;
- struct marvell_nand_chip_sel sels[0];
+ struct marvell_nand_chip_sel sels[];
};
static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
@@ -2743,16 +2743,21 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
if (ret)
return ret;
- nfc->dma_chan = dma_request_slave_channel(nfc->dev, "data");
- if (!nfc->dma_chan) {
- dev_err(nfc->dev,
- "Unable to request data DMA channel\n");
- return -ENODEV;
+ nfc->dma_chan = dma_request_chan(nfc->dev, "data");
+ if (IS_ERR(nfc->dma_chan)) {
+ ret = PTR_ERR(nfc->dma_chan);
+ nfc->dma_chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nfc->dev, "DMA channel request failed: %d\n",
+ ret);
+ return ret;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENXIO;
+ if (!r) {
+ ret = -ENXIO;
+ goto release_channel;
+ }
config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -2763,7 +2768,7 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
ret = dmaengine_slave_config(nfc->dma_chan, &config);
if (ret < 0) {
dev_err(nfc->dev, "Failed to configure DMA channel\n");
- return ret;
+ goto release_channel;
}
/*
@@ -2773,12 +2778,20 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
* the provided buffer.
*/
nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
- if (!nfc->dma_buf)
- return -ENOMEM;
+ if (!nfc->dma_buf) {
+ ret = -ENOMEM;
+ goto release_channel;
+ }
nfc->use_dma = true;
return 0;
+
+release_channel:
+ dma_release_channel(nfc->dma_chan);
+ nfc->dma_chan = NULL;
+
+ return ret;
}
static void marvell_nfc_reset(struct marvell_nfc *nfc)
@@ -2920,10 +2933,13 @@ static int marvell_nfc_probe(struct platform_device *pdev)
ret = marvell_nand_chips_init(dev, nfc);
if (ret)
- goto unprepare_reg_clk;
+ goto release_dma;
return 0;
+release_dma:
+ if (nfc->use_dma)
+ dma_release_channel(nfc->dma_chan);
unprepare_reg_clk:
clk_disable_unprepare(nfc->reg_clk);
unprepare_core_clk:
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 9f17b5b8efbf..f6fb5c0e6255 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -118,7 +118,7 @@ struct meson_nfc_nand_chip {
u8 *data_buf;
__le64 *info_buf;
u32 nsels;
- u8 sels[0];
+ u8 sels[];
};
struct meson_nand_ecc {
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index b8305e39ab51..ef149e8b26d0 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -131,7 +131,7 @@ struct mtk_nfc_nand_chip {
u32 spare_per_sector;
int nsels;
- u8 sels[0];
+ u8 sels[];
/* nothing after this field */
};
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index f64e3b6605c6..c24e5e2ba130 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -683,7 +683,12 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
if (ret)
return ret;
- timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
+ /*
+ * +1 below is necessary because if we are now in the last fraction
+ * of jiffy and msecs_to_jiffies is 1 then we will wait only that
+ * small jiffy fraction - possibly leading to false timeout
+ */
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
do {
ret = nand_read_data_op(chip, &status, sizeof(status), true);
if (ret)
@@ -4321,16 +4326,22 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
/**
* nand_suspend - [MTD Interface] Suspend the NAND flash
* @mtd: MTD device structure
+ *
+ * Returns 0 for success or negative error code otherwise.
*/
static int nand_suspend(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret = 0;
mutex_lock(&chip->lock);
- chip->suspended = 1;
+ if (chip->suspend)
+ ret = chip->suspend(chip);
+ if (!ret)
+ chip->suspended = 1;
mutex_unlock(&chip->lock);
- return 0;
+ return ret;
}
/**
@@ -4342,11 +4353,14 @@ static void nand_resume(struct mtd_info *mtd)
struct nand_chip *chip = mtd_to_nand(mtd);
mutex_lock(&chip->lock);
- if (chip->suspended)
+ if (chip->suspended) {
+ if (chip->resume)
+ chip->resume(chip);
chip->suspended = 0;
- else
+ } else {
pr_err("%s called for a chip which is not in suspended state\n",
__func__);
+ }
mutex_unlock(&chip->lock);
}
@@ -4360,6 +4374,38 @@ static void nand_shutdown(struct mtd_info *mtd)
nand_suspend(mtd);
}
+/**
+ * nand_lock - [MTD Interface] Lock the NAND flash
+ * @mtd: MTD device structure
+ * @ofs: offset byte address
+ * @len: number of bytes to lock (must be a multiple of block/page size)
+ */
+static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->lock_area)
+ return -ENOTSUPP;
+
+ return chip->lock_area(chip, ofs, len);
+}
+
+/**
+ * nand_unlock - [MTD Interface] Unlock the NAND flash
+ * @mtd: MTD device structure
+ * @ofs: offset byte address
+ * @len: number of bytes to unlock (must be a multiple of block/page size)
+ */
+static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->unlock_area)
+ return -ENOTSUPP;
+
+ return chip->unlock_area(chip, ofs, len);
+}
+
/* Set default functions */
static void nand_set_defaults(struct nand_chip *chip)
{
@@ -5591,8 +5637,7 @@ static int nand_scan_tail(struct nand_chip *chip)
}
if (!ecc->read_page)
ecc->read_page = nand_read_page_hwecc_oob_first;
- /* fall through */
-
+ fallthrough;
case NAND_ECC_HW:
/* Use standard hwecc read page function? */
if (!ecc->read_page)
@@ -5611,8 +5656,7 @@ static int nand_scan_tail(struct nand_chip *chip)
ecc->read_subpage = nand_read_subpage;
if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
ecc->write_subpage = nand_write_subpage_hwecc;
- /* fall through */
-
+ fallthrough;
case NAND_ECC_HW_SYNDROME:
if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
(!ecc->read_page ||
@@ -5649,8 +5693,7 @@ static int nand_scan_tail(struct nand_chip *chip)
ecc->size, mtd->writesize);
ecc->mode = NAND_ECC_SOFT;
ecc->algo = NAND_ECC_HAMMING;
- /* fall through */
-
+ fallthrough;
case NAND_ECC_SOFT:
ret = nand_set_ecc_soft_ops(chip);
if (ret) {
@@ -5786,8 +5829,8 @@ static int nand_scan_tail(struct nand_chip *chip)
mtd->_read_oob = nand_read_oob;
mtd->_write_oob = nand_write_oob;
mtd->_sync = nand_sync;
- mtd->_lock = NULL;
- mtd->_unlock = NULL;
+ mtd->_lock = nand_lock;
+ mtd->_unlock = nand_unlock;
mtd->_suspend = nand_suspend;
mtd->_resume = nand_resume;
mtd->_reboot = nand_shutdown;
@@ -5907,6 +5950,8 @@ void nand_cleanup(struct nand_chip *chip)
chip->ecc.algo == NAND_ECC_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
+ nanddev_cleanup(&chip->base);
+
/* Free bad block table memory */
kfree(chip->bbt);
kfree(chip->data_buf);
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index 194e4227aefe..7caedaa5b9e5 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -26,7 +26,7 @@
struct hynix_read_retry {
int nregs;
const u8 *regs;
- u8 values[0];
+ u8 values[];
};
/**
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
index f2526ec616a6..f91e92e1b972 100644
--- a/drivers/mtd/nand/raw/nand_legacy.c
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -331,8 +331,7 @@ static void nand_command(struct nand_chip *chip, unsigned int command,
*/
if (column == -1 && page_addr == -1)
return;
- /* fall through */
-
+ fallthrough;
default:
/*
* If we don't have access to the busy pin, we apply the given
@@ -483,8 +482,7 @@ static void nand_command_lp(struct nand_chip *chip, unsigned int command,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
-
- /* fall through - This applies to read commands */
+ fallthrough; /* This applies to read commands */
default:
/*
* If we don't have access to the busy pin, we apply the given
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 3ff7ce00cbdb..09c254c97b5c 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -6,11 +6,31 @@
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
*/
+#include "linux/delay.h"
#include "internals.h"
#define MACRONIX_READ_RETRY_BIT BIT(0)
#define MACRONIX_NUM_READ_RETRY_MODES 6
+#define ONFI_FEATURE_ADDR_MXIC_PROTECTION 0xA0
+#define MXIC_BLOCK_PROTECTION_ALL_LOCK 0x38
+#define MXIC_BLOCK_PROTECTION_ALL_UNLOCK 0x0
+
+#define ONFI_FEATURE_ADDR_MXIC_RANDOMIZER 0xB0
+#define MACRONIX_RANDOMIZER_BIT BIT(1)
+#define MACRONIX_RANDOMIZER_ENPGM BIT(0)
+#define MACRONIX_RANDOMIZER_RANDEN BIT(1)
+#define MACRONIX_RANDOMIZER_RANDOPT BIT(2)
+#define MACRONIX_RANDOMIZER_MODE_ENTER \
+ (MACRONIX_RANDOMIZER_ENPGM | \
+ MACRONIX_RANDOMIZER_RANDEN | \
+ MACRONIX_RANDOMIZER_RANDOPT)
+#define MACRONIX_RANDOMIZER_MODE_EXIT \
+ (MACRONIX_RANDOMIZER_RANDEN | \
+ MACRONIX_RANDOMIZER_RANDOPT)
+
+#define MXIC_CMD_POWER_DOWN 0xB9
+
struct nand_onfi_vendor_macronix {
u8 reserved;
u8 reliability_func;
@@ -29,15 +49,83 @@ static int macronix_nand_setup_read_retry(struct nand_chip *chip, int mode)
return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
}
+static int macronix_nand_randomizer_check_enable(struct nand_chip *chip)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ if (feature[0])
+ return feature[0];
+
+ feature[0] = MACRONIX_RANDOMIZER_MODE_ENTER;
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ /* RANDEN and RANDOPT OTP bits are programmed */
+ feature[0] = 0x0;
+ ret = nand_prog_page_op(chip, 0, 0, feature, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ feature[0] &= MACRONIX_RANDOMIZER_MODE_EXIT;
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static void macronix_nand_onfi_init(struct nand_chip *chip)
{
struct nand_parameters *p = &chip->parameters;
struct nand_onfi_vendor_macronix *mxic;
+ struct device_node *dn = nand_get_flash_node(chip);
+ int rand_otp = 0;
+ int ret;
if (!p->onfi)
return;
+ if (of_find_property(dn, "mxic,enable-randomizer-otp", NULL))
+ rand_otp = 1;
+
mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor;
+ /* Subpage write is prohibited in randomizer operatoin */
+ if (rand_otp && chip->options & NAND_NO_SUBPAGE_WRITE &&
+ mxic->reliability_func & MACRONIX_RANDOMIZER_BIT) {
+ if (p->supports_set_get_features) {
+ bitmap_set(p->set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
+ bitmap_set(p->get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
+ ret = macronix_nand_randomizer_check_enable(chip);
+ if (ret < 0) {
+ bitmap_clear(p->set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ 1);
+ bitmap_clear(p->get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ 1);
+ pr_info("Macronix NAND randomizer failed\n");
+ } else {
+ pr_info("Macronix NAND randomizer enabled\n");
+ }
+ }
+ }
+
if ((mxic->reliability_func & MACRONIX_READ_RETRY_BIT) == 0)
return;
@@ -91,6 +179,143 @@ static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
ONFI_FEATURE_ADDR_TIMING_MODE, 1);
}
+/*
+ * Macronix NAND supports Block Protection by Protectoin(PT) pin;
+ * active high at power-on which protects the entire chip even the #WP is
+ * disabled. Lock/unlock protection area can be partition according to
+ * protection bits, i.e. upper 1/2 locked, upper 1/4 locked and so on.
+ */
+static int mxic_nand_lock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_LOCK;
+ nand_select_target(chip, 0);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret)
+ pr_err("%s all blocks failed\n", __func__);
+
+ return ret;
+}
+
+static int mxic_nand_unlock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
+ nand_select_target(chip, 0);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret)
+ pr_err("%s all blocks failed\n", __func__);
+
+ return ret;
+}
+
+static void macronix_nand_block_protection_support(struct nand_chip *chip)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ bitmap_set(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
+ nand_select_target(chip, 0);
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret || feature[0] != MXIC_BLOCK_PROTECTION_ALL_LOCK) {
+ if (ret)
+ pr_err("Block protection check failed\n");
+
+ bitmap_clear(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+ return;
+ }
+
+ bitmap_set(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+
+ chip->lock_area = mxic_nand_lock;
+ chip->unlock_area = mxic_nand_unlock;
+}
+
+static int nand_power_down_op(struct nand_chip *chip)
+{
+ int ret;
+
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(MXIC_CMD_POWER_DOWN, 0),
+ };
+
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ } else {
+ chip->legacy.cmdfunc(chip, MXIC_CMD_POWER_DOWN, -1, -1);
+ }
+
+ return 0;
+}
+
+static int mxic_nand_suspend(struct nand_chip *chip)
+{
+ int ret;
+
+ nand_select_target(chip, 0);
+ ret = nand_power_down_op(chip);
+ if (ret < 0)
+ pr_err("Suspending MXIC NAND chip failed (%d)\n", ret);
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static void mxic_nand_resume(struct nand_chip *chip)
+{
+ /*
+ * Toggle #CS pin to resume NAND device and don't care
+ * of the others CLE, #WE, #RE pins status.
+ * A NAND controller ensure it is able to assert/de-assert #CS
+ * by sending any byte over the NAND bus.
+ * i.e.,
+ * NAND power down command or reset command w/o R/B# status checking.
+ */
+ nand_select_target(chip, 0);
+ nand_power_down_op(chip);
+ /* The minimum of a recovery time tRDP is 35 us */
+ usleep_range(35, 100);
+ nand_deselect_target(chip);
+}
+
+static void macronix_nand_deep_power_down_support(struct nand_chip *chip)
+{
+ int i;
+ static const char * const deep_power_down_dev[] = {
+ "MX30UF1G28AD",
+ "MX30UF2G28AD",
+ "MX30UF4G28AD",
+ };
+
+ i = match_string(deep_power_down_dev, ARRAY_SIZE(deep_power_down_dev),
+ chip->parameters.model);
+ if (i < 0)
+ return;
+
+ chip->suspend = mxic_nand_suspend;
+ chip->resume = mxic_nand_resume;
+}
+
static int macronix_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
@@ -98,6 +323,8 @@ static int macronix_nand_init(struct nand_chip *chip)
macronix_nand_fix_broken_get_timings(chip);
macronix_nand_onfi_init(chip);
+ macronix_nand_block_protection_support(chip);
+ macronix_nand_deep_power_down_support(chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c
index 9c03fbb1f47d..f3dcd695b5db 100644
--- a/drivers/mtd/nand/raw/nand_toshiba.c
+++ b/drivers/mtd/nand/raw/nand_toshiba.c
@@ -14,14 +14,68 @@
/* Recommended to rewrite for BENAND */
#define TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED BIT(3)
+/* ECC Status Read Command for BENAND */
+#define TOSHIBA_NAND_CMD_ECC_STATUS_READ 0x7A
+
+/* ECC Status Mask for BENAND */
+#define TOSHIBA_NAND_ECC_STATUS_MASK 0x0F
+
+/* Uncorrectable Error for BENAND */
+#define TOSHIBA_NAND_ECC_STATUS_UNCORR 0x0F
+
+/* Max ECC Steps for BENAND */
+#define TOSHIBA_NAND_MAX_ECC_STEPS 8
+
+static int toshiba_nand_benand_read_eccstatus_op(struct nand_chip *chip,
+ u8 *buf)
+{
+ u8 *ecc_status = buf;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(TOSHIBA_NAND_CMD_ECC_STATUS_READ,
+ PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(chip->ecc.steps, ecc_status, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ return -ENOTSUPP;
+}
+
static int toshiba_nand_benand_eccstatus(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
unsigned int max_bitflips = 0;
- u8 status;
+ u8 status, ecc_status[TOSHIBA_NAND_MAX_ECC_STEPS];
/* Check Status */
+ ret = toshiba_nand_benand_read_eccstatus_op(chip, ecc_status);
+ if (!ret) {
+ unsigned int i, bitflips = 0;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ bitflips = ecc_status[i] & TOSHIBA_NAND_ECC_STATUS_MASK;
+ if (bitflips == TOSHIBA_NAND_ECC_STATUS_UNCORR) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += bitflips;
+ max_bitflips = max(max_bitflips, bitflips);
+ }
+ }
+
+ return max_bitflips;
+ }
+
+ /*
+ * Fallback to regular status check if
+ * toshiba_nand_benand_read_eccstatus_op() failed.
+ */
ret = nand_status_op(chip, &status);
if (ret)
return ret;
@@ -108,7 +162,7 @@ static void toshiba_nand_decode_id(struct nand_chip *chip)
*/
if (chip->id.len >= 6 && nand_is_slc(chip) &&
(chip->id.data[5] & 0x7) == 0x6 /* 24nm */ &&
- !(chip->id.data[4] & 0x80) /* !BENAND */) {
+ !(chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND) /* !BENAND */) {
memorg->oobsize = 32 * memorg->pagesize >> 9;
mtd->oobsize = memorg->oobsize;
}
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 9a70754a61ef..1de03bb34e84 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -2251,10 +2251,10 @@ static int __init ns_init_module(void)
switch (bbt) {
case 2:
chip->bbt_options |= NAND_BBT_NO_OOB;
- /* fall through */
+ fallthrough;
case 1:
chip->bbt_options |= NAND_BBT_USE_FLASH;
- /* fall through */
+ fallthrough;
case 0:
break;
default:
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
index 5502ffbdd1e6..3fa0e2cbbe53 100644
--- a/drivers/mtd/nand/raw/omap_elm.c
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -455,13 +455,13 @@ static int elm_context_save(struct elm_info *info)
ELM_SYNDROME_FRAGMENT_5 + offset);
regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_4 + offset);
- /* fall through */
+ fallthrough;
case BCH8_ECC:
regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_3 + offset);
regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_2 + offset);
- /* fall through */
+ fallthrough;
case BCH4_ECC:
regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_1 + offset);
@@ -503,13 +503,13 @@ static int elm_context_restore(struct elm_info *info)
regs->elm_syndrome_fragment_5[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
regs->elm_syndrome_fragment_4[i]);
- /* fall through */
+ fallthrough;
case BCH8_ECC:
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
regs->elm_syndrome_fragment_3[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
regs->elm_syndrome_fragment_2[i]);
- /* fall through */
+ fallthrough;
case BCH4_ECC:
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
regs->elm_syndrome_fragment_1[i]);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 7bb9a7e8e1e7..5b11c7061497 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2628,6 +2628,29 @@ static const struct nand_controller_ops qcom_nandc_ops = {
.attach_chip = qcom_nand_attach_chip,
};
+static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+{
+ if (nandc->props->is_bam) {
+ if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+ dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+
+ if (nandc->tx_chan)
+ dma_release_channel(nandc->tx_chan);
+
+ if (nandc->rx_chan)
+ dma_release_channel(nandc->rx_chan);
+
+ if (nandc->cmd_chan)
+ dma_release_channel(nandc->cmd_chan);
+ } else {
+ if (nandc->chan)
+ dma_release_channel(nandc->chan);
+ }
+}
+
static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
{
int ret;
@@ -2673,22 +2696,37 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
return -EIO;
}
- nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
- if (!nandc->tx_chan) {
- dev_err(nandc->dev, "failed to request tx channel\n");
- return -ENODEV;
+ nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
+ if (IS_ERR(nandc->tx_chan)) {
+ ret = PTR_ERR(nandc->tx_chan);
+ nandc->tx_chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nandc->dev,
+ "tx DMA channel request failed: %d\n",
+ ret);
+ goto unalloc;
}
- nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
- if (!nandc->rx_chan) {
- dev_err(nandc->dev, "failed to request rx channel\n");
- return -ENODEV;
+ nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
+ if (IS_ERR(nandc->rx_chan)) {
+ ret = PTR_ERR(nandc->rx_chan);
+ nandc->rx_chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nandc->dev,
+ "rx DMA channel request failed: %d\n",
+ ret);
+ goto unalloc;
}
- nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
- if (!nandc->cmd_chan) {
- dev_err(nandc->dev, "failed to request cmd channel\n");
- return -ENODEV;
+ nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
+ if (IS_ERR(nandc->cmd_chan)) {
+ ret = PTR_ERR(nandc->cmd_chan);
+ nandc->cmd_chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nandc->dev,
+ "cmd DMA channel request failed: %d\n",
+ ret);
+ goto unalloc;
}
/*
@@ -2702,14 +2740,19 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
if (!nandc->bam_txn) {
dev_err(nandc->dev,
"failed to allocate bam transaction\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unalloc;
}
} else {
- nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
- if (!nandc->chan) {
- dev_err(nandc->dev,
- "failed to request slave channel\n");
- return -ENODEV;
+ nandc->chan = dma_request_chan(nandc->dev, "rxtx");
+ if (IS_ERR(nandc->chan)) {
+ ret = PTR_ERR(nandc->chan);
+ nandc->chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nandc->dev,
+ "rxtx DMA channel request failed: %d\n",
+ ret);
+ return ret;
}
}
@@ -2720,29 +2763,9 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
nandc->controller.ops = &qcom_nandc_ops;
return 0;
-}
-
-static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
-{
- if (nandc->props->is_bam) {
- if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
- dma_unmap_single(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
-
- if (nandc->tx_chan)
- dma_release_channel(nandc->tx_chan);
-
- if (nandc->rx_chan)
- dma_release_channel(nandc->rx_chan);
-
- if (nandc->cmd_chan)
- dma_release_channel(nandc->cmd_chan);
- } else {
- if (nandc->chan)
- dma_release_channel(nandc->chan);
- }
+unalloc:
+ qcom_nandc_unalloc(nandc);
+ return ret;
}
/* one time setup of a few nand controller registers */
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 3ba73f18841f..b6d45cd911ae 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -1606,15 +1606,36 @@ static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
/* DMA configuration */
static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2)
{
- int ret;
+ int ret = 0;
- fmc2->dma_tx_ch = dma_request_slave_channel(fmc2->dev, "tx");
- fmc2->dma_rx_ch = dma_request_slave_channel(fmc2->dev, "rx");
- fmc2->dma_ecc_ch = dma_request_slave_channel(fmc2->dev, "ecc");
+ fmc2->dma_tx_ch = dma_request_chan(fmc2->dev, "tx");
+ if (IS_ERR(fmc2->dma_tx_ch)) {
+ ret = PTR_ERR(fmc2->dma_tx_ch);
+ if (ret != -ENODEV)
+ dev_err(fmc2->dev,
+ "failed to request tx DMA channel: %d\n", ret);
+ fmc2->dma_tx_ch = NULL;
+ goto err_dma;
+ }
- if (!fmc2->dma_tx_ch || !fmc2->dma_rx_ch || !fmc2->dma_ecc_ch) {
- dev_warn(fmc2->dev, "DMAs not defined in the device tree, polling mode is used\n");
- return 0;
+ fmc2->dma_rx_ch = dma_request_chan(fmc2->dev, "rx");
+ if (IS_ERR(fmc2->dma_rx_ch)) {
+ ret = PTR_ERR(fmc2->dma_rx_ch);
+ if (ret != -ENODEV)
+ dev_err(fmc2->dev,
+ "failed to request rx DMA channel: %d\n", ret);
+ fmc2->dma_rx_ch = NULL;
+ goto err_dma;
+ }
+
+ fmc2->dma_ecc_ch = dma_request_chan(fmc2->dev, "ecc");
+ if (IS_ERR(fmc2->dma_ecc_ch)) {
+ ret = PTR_ERR(fmc2->dma_ecc_ch);
+ if (ret != -ENODEV)
+ dev_err(fmc2->dev,
+ "failed to request ecc DMA channel: %d\n", ret);
+ fmc2->dma_ecc_ch = NULL;
+ goto err_dma;
}
ret = sg_alloc_table(&fmc2->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
@@ -1635,6 +1656,15 @@ static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2)
init_completion(&fmc2->dma_ecc_complete);
return 0;
+
+err_dma:
+ if (ret == -ENODEV) {
+ dev_warn(fmc2->dev,
+ "DMAs not defined in the DT, polling mode is used\n");
+ ret = 0;
+ }
+
+ return ret;
}
/* NAND callbacks setup */
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 37a4ac0dd85b..5f3e40b79fb1 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -195,7 +195,7 @@ struct sunxi_nand_chip {
u32 timing_cfg;
u32 timing_ctl;
int nsels;
- struct sunxi_nand_chip_sel sels[0];
+ struct sunxi_nand_chip_sel sels[];
};
static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
@@ -2123,8 +2123,16 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
if (ret)
goto out_ahb_reset_reassert;
- nfc->dmac = dma_request_slave_channel(dev, "rxtx");
- if (nfc->dmac) {
+ nfc->dmac = dma_request_chan(dev, "rxtx");
+ if (IS_ERR(nfc->dmac)) {
+ ret = PTR_ERR(nfc->dmac);
+ if (ret == -EPROBE_DEFER)
+ goto out_ahb_reset_reassert;
+
+ /* Ignore errors to fall back to PIO mode */
+ dev_warn(dev, "failed to request rxtx DMA channel: %d\n", ret);
+ nfc->dmac = NULL;
+ } else {
struct dma_slave_config dmac_cfg = { };
dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data;
@@ -2138,9 +2146,6 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
if (nfc->caps->extra_mbus_conf)
writel(readl(nfc->regs + NFC_REG_CTL) |
NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL);
-
- } else {
- dev_warn(dev, "failed to request rxtx DMA channel\n");
}
platform_set_drvdata(pdev, nfc);
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 89f6beefb01c..b6bb358b96ce 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -16,6 +16,7 @@
#include <linux/mtd/spinand.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
@@ -370,10 +371,11 @@ out:
return status & STATUS_BUSY ? -ETIMEDOUT : 0;
}
-static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
+static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
+ u8 ndummy, u8 *buf)
{
- struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
- SPINAND_MAX_ID_LEN);
+ struct spi_mem_op op = SPINAND_READID_OP(
+ naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
int ret;
ret = spi_mem_exec_op(spinand->spimem, &op);
@@ -568,18 +570,18 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 marker[2] = { };
struct nand_page_io_req req = {
.pos = *pos,
- .ooblen = 2,
+ .ooblen = sizeof(marker),
.ooboffs = 0,
- .oobbuf.in = spinand->oobbuf,
+ .oobbuf.in = marker,
.mode = MTD_OPS_RAW,
};
- memset(spinand->oobbuf, 0, 2);
spinand_select_target(spinand, pos->target);
spinand_read_page(spinand, &req, false);
- if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
+ if (marker[0] != 0xff || marker[1] != 0xff)
return true;
return false;
@@ -603,15 +605,16 @@ static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 marker[2] = { };
struct nand_page_io_req req = {
.pos = *pos,
.ooboffs = 0,
- .ooblen = 2,
- .oobbuf.out = spinand->oobbuf,
+ .ooblen = sizeof(marker),
+ .oobbuf.out = marker,
+ .mode = MTD_OPS_RAW,
};
int ret;
- /* Erase block before marking it bad. */
ret = spinand_select_target(spinand, pos->target);
if (ret)
return ret;
@@ -620,9 +623,6 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- spinand_erase_op(spinand, pos);
-
- memset(spinand->oobbuf, 0, 2);
return spinand_write_page(spinand, &req);
}
@@ -762,24 +762,62 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = {
&winbond_spinand_manufacturer,
};
-static int spinand_manufacturer_detect(struct spinand_device *spinand)
+static int spinand_manufacturer_match(struct spinand_device *spinand,
+ enum spinand_readid_method rdid_method)
{
+ u8 *id = spinand->id.data;
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
- ret = spinand_manufacturers[i]->ops->detect(spinand);
- if (ret > 0) {
- spinand->manufacturer = spinand_manufacturers[i];
- return 0;
- } else if (ret < 0) {
- return ret;
- }
- }
+ const struct spinand_manufacturer *manufacturer =
+ spinand_manufacturers[i];
+
+ if (id[0] != manufacturer->id)
+ continue;
+
+ ret = spinand_match_and_init(spinand,
+ manufacturer->chips,
+ manufacturer->nchips,
+ rdid_method);
+ if (ret < 0)
+ continue;
+ spinand->manufacturer = manufacturer;
+ return 0;
+ }
return -ENOTSUPP;
}
+static int spinand_id_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ ret = spinand_read_id_op(spinand, 0, 0, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
+ if (!ret)
+ return 0;
+
+ ret = spinand_read_id_op(spinand, 1, 0, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand,
+ SPINAND_READID_METHOD_OPCODE_ADDR);
+ if (!ret)
+ return 0;
+
+ ret = spinand_read_id_op(spinand, 0, 1, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand,
+ SPINAND_READID_METHOD_OPCODE_DUMMY);
+
+ return ret;
+}
+
static int spinand_manufacturer_init(struct spinand_device *spinand)
{
if (spinand->manufacturer->ops->init)
@@ -835,9 +873,9 @@ spinand_select_op_variant(struct spinand_device *spinand,
* @spinand: SPI NAND object
* @table: SPI NAND device description table
* @table_size: size of the device description table
+ * @rdid_method: read id method to match
*
- * Should be used by SPI NAND manufacturer drivers when they want to find a
- * match between a device ID retrieved through the READ_ID command and an
+ * Match between a device ID retrieved through the READ_ID command and an
* entry in the SPI NAND description table. If a match is found, the spinand
* object will be initialized with information provided by the matching
* spinand_info entry.
@@ -846,8 +884,10 @@ spinand_select_op_variant(struct spinand_device *spinand,
*/
int spinand_match_and_init(struct spinand_device *spinand,
const struct spinand_info *table,
- unsigned int table_size, u16 devid)
+ unsigned int table_size,
+ enum spinand_readid_method rdid_method)
{
+ u8 *id = spinand->id.data;
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int i;
@@ -855,13 +895,17 @@ int spinand_match_and_init(struct spinand_device *spinand,
const struct spinand_info *info = &table[i];
const struct spi_mem_op *op;
- if (devid != info->devid)
+ if (rdid_method != info->devid.method)
+ continue;
+
+ if (memcmp(id + 1, info->devid.id, info->devid.len))
continue;
nand->memorg = table[i].memorg;
nand->eccreq = table[i].eccreq;
spinand->eccinfo = table[i].eccinfo;
spinand->flags = table[i].flags;
+ spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
op = spinand_select_op_variant(spinand,
@@ -898,13 +942,7 @@ static int spinand_detect(struct spinand_device *spinand)
if (ret)
return ret;
- ret = spinand_read_id_op(spinand, spinand->id.data);
- if (ret)
- return ret;
-
- spinand->id.len = SPINAND_MAX_ID_LEN;
-
- ret = spinand_manufacturer_detect(spinand);
+ ret = spinand_id_detect(spinand);
if (ret) {
dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
spinand->id.data);
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index e99d425aa93f..d219c970042a 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -195,7 +195,8 @@ static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info gigadevice_spinand_table[] = {
- SPINAND_INFO("GD5F1GQ4xA", 0xF1,
+ SPINAND_INFO("GD5F1GQ4xA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf1),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -204,7 +205,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
0,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
- SPINAND_INFO("GD5F2GQ4xA", 0xF2,
+ SPINAND_INFO("GD5F2GQ4xA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf2),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -213,7 +215,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
0,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
- SPINAND_INFO("GD5F4GQ4xA", 0xF4,
+ SPINAND_INFO("GD5F4GQ4xA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf4),
NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -222,7 +225,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
0,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
- SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
+ SPINAND_INFO("GD5F1GQ4UExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd1),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -231,7 +235,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
0,
SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
- SPINAND_INFO("GD5F1GQ4UFxxG", 0xb148,
+ SPINAND_INFO("GD5F1GQ4UFxxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb1, 0x48),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
@@ -242,39 +247,13 @@ static const struct spinand_info gigadevice_spinand_table[] = {
gd5fxgq4ufxxg_ecc_get_status)),
};
-static int gigadevice_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- u16 did;
- int ret;
-
- /*
- * Earlier GDF5-series devices (A,E) return [0][MID][DID]
- * Later (F) devices return [MID][DID1][DID2]
- */
-
- if (id[0] == SPINAND_MFR_GIGADEVICE)
- did = (id[1] << 8) + id[2];
- else if (id[0] == 0 && id[1] == SPINAND_MFR_GIGADEVICE)
- did = id[2];
- else
- return 0;
-
- ret = spinand_match_and_init(spinand, gigadevice_spinand_table,
- ARRAY_SIZE(gigadevice_spinand_table),
- did);
- if (ret)
- return ret;
-
- return 1;
-}
-
static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
- .detect = gigadevice_spinand_detect,
};
const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
.id = SPINAND_MFR_GIGADEVICE,
.name = "GigaDevice",
+ .chips = gigadevice_spinand_table,
+ .nchips = ARRAY_SIZE(gigadevice_spinand_table),
.ops = &gigadevice_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 21def3f8fb36..0f900f3aa21a 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -99,7 +99,8 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info macronix_spinand_table[] = {
- SPINAND_INFO("MX35LF1GE4AB", 0x12,
+ SPINAND_INFO("MX35LF1GE4AB",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -108,7 +109,8 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
- SPINAND_INFO("MX35LF2GE4AB", 0x22,
+ SPINAND_INFO("MX35LF2GE4AB",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -118,33 +120,13 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
};
-static int macronix_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- int ret;
-
- /*
- * Macronix SPI NAND read ID needs a dummy byte, so the first byte in
- * raw_id is garbage.
- */
- if (id[1] != SPINAND_MFR_MACRONIX)
- return 0;
-
- ret = spinand_match_and_init(spinand, macronix_spinand_table,
- ARRAY_SIZE(macronix_spinand_table),
- id[2]);
- if (ret)
- return ret;
-
- return 1;
-}
-
static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
- .detect = macronix_spinand_detect,
};
const struct spinand_manufacturer macronix_spinand_manufacturer = {
.id = SPINAND_MFR_MACRONIX,
.name = "Macronix",
+ .chips = macronix_spinand_table,
+ .nchips = ARRAY_SIZE(macronix_spinand_table),
.ops = &macronix_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 7d7b1f7fcf71..5d370cfcdaaa 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -18,6 +18,16 @@
#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
#define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4)
+#define MICRON_CFG_CR BIT(0)
+
+/*
+ * As per datasheet, die selection is done by the 6th bit of Die
+ * Select Register (Address 0xD0).
+ */
+#define MICRON_DIE_SELECT_REG 0xD0
+
+#define MICRON_SELECT_DIE(x) ((x) << 6)
+
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -34,38 +44,52 @@ static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
-static int mt29f2g01abagd_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
- region->offset = 64;
- region->length = 64;
+ region->offset = mtd->oobsize / 2;
+ region->length = mtd->oobsize / 2;
return 0;
}
-static int mt29f2g01abagd_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int micron_8_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
/* Reserve 2 bytes for the BBM. */
region->offset = 2;
- region->length = 62;
+ region->length = (mtd->oobsize / 2) - 2;
return 0;
}
-static const struct mtd_ooblayout_ops mt29f2g01abagd_ooblayout = {
- .ecc = mt29f2g01abagd_ooblayout_ecc,
- .free = mt29f2g01abagd_ooblayout_free,
+static const struct mtd_ooblayout_ops micron_8_ooblayout = {
+ .ecc = micron_8_ooblayout_ecc,
+ .free = micron_8_ooblayout_free,
};
-static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
- u8 status)
+static int micron_select_target(struct spinand_device *spinand,
+ unsigned int target)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
+ spinand->scratchbuf);
+
+ if (target > 1)
+ return -EINVAL;
+
+ *spinand->scratchbuf = MICRON_SELECT_DIE(target);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int micron_8_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
{
switch (status & MICRON_STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@@ -91,43 +115,131 @@ static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info micron_spinand_table[] = {
- SPINAND_INFO("MT29F2G01ABAGD", 0x24,
+ /* M79A 2Gb 3.3V */
+ SPINAND_INFO("MT29F2G01ABAGD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M79A 2Gb 1.8V */
+ SPINAND_INFO("MT29F2G01ABBGD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&mt29f2g01abagd_ooblayout,
- mt29f2g01abagd_ecc_get_status)),
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M78A 1Gb 3.3V */
+ SPINAND_INFO("MT29F1G01ABAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M78A 1Gb 1.8V */
+ SPINAND_INFO("MT29F1G01ABAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M79A 4Gb 3.3V */
+ SPINAND_INFO("MT29F4G01ADAGD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x36),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 80, 2, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
+ /* M70A 4Gb 3.3V */
+ SPINAND_INFO("MT29F4G01ABAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x34),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M70A 4Gb 1.8V */
+ SPINAND_INFO("MT29F4G01ABBFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M70A 8Gb 3.3V */
+ SPINAND_INFO("MT29F8G01ADAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x46),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
+ /* M70A 8Gb 1.8V */
+ SPINAND_INFO("MT29F8G01ADBFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x47),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
};
-static int micron_spinand_detect(struct spinand_device *spinand)
+static int micron_spinand_init(struct spinand_device *spinand)
{
- u8 *id = spinand->id.data;
- int ret;
-
/*
- * Micron SPI NAND read ID need a dummy byte,
- * so the first byte in raw_id is dummy.
+ * M70A device series enable Continuous Read feature at Power-up,
+ * which is not supported. Disable this bit to avoid any possible
+ * failure.
*/
- if (id[1] != SPINAND_MFR_MICRON)
- return 0;
-
- ret = spinand_match_and_init(spinand, micron_spinand_table,
- ARRAY_SIZE(micron_spinand_table), id[2]);
- if (ret)
- return ret;
+ if (spinand->flags & SPINAND_HAS_CR_FEAT_BIT)
+ return spinand_upd_cfg(spinand, MICRON_CFG_CR, 0);
- return 1;
+ return 0;
}
static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = {
- .detect = micron_spinand_detect,
+ .init = micron_spinand_init,
};
const struct spinand_manufacturer micron_spinand_manufacturer = {
.id = SPINAND_MFR_MICRON,
.name = "Micron",
+ .chips = micron_spinand_table,
+ .nchips = ARRAY_SIZE(micron_spinand_table),
.ops = &micron_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
index 52307681cbd0..519ade513c1f 100644
--- a/drivers/mtd/nand/spi/paragon.c
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -97,7 +97,8 @@ static const struct mtd_ooblayout_ops pn26g0xa_ooblayout = {
static const struct spinand_info paragon_spinand_table[] = {
- SPINAND_INFO("PN26G01A", 0xe1,
+ SPINAND_INFO("PN26G01A",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe1),
NAND_MEMORG(1, 2048, 128, 64, 1024, 21, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -106,7 +107,8 @@ static const struct spinand_info paragon_spinand_table[] = {
0,
SPINAND_ECCINFO(&pn26g0xa_ooblayout,
pn26g0xa_ecc_get_status)),
- SPINAND_INFO("PN26G02A", 0xe2,
+ SPINAND_INFO("PN26G02A",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe2),
NAND_MEMORG(1, 2048, 128, 64, 2048, 41, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -117,31 +119,13 @@ static const struct spinand_info paragon_spinand_table[] = {
pn26g0xa_ecc_get_status)),
};
-static int paragon_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- int ret;
-
- /* Read ID returns [0][MID][DID] */
-
- if (id[1] != SPINAND_MFR_PARAGON)
- return 0;
-
- ret = spinand_match_and_init(spinand, paragon_spinand_table,
- ARRAY_SIZE(paragon_spinand_table),
- id[2]);
- if (ret)
- return ret;
-
- return 1;
-}
-
static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = {
- .detect = paragon_spinand_detect,
};
const struct spinand_manufacturer paragon_spinand_manufacturer = {
.id = SPINAND_MFR_PARAGON,
.name = "Paragon",
+ .chips = paragon_spinand_table,
+ .nchips = ARRAY_SIZE(paragon_spinand_table),
.ops = &paragon_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 0db5ee4e82af..bc801d83343e 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+/* Kioxia is new name of Toshiba memory. */
#define SPINAND_MFR_TOSHIBA 0x98
#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
@@ -19,14 +20,26 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+static SPINAND_OP_VARIANTS(write_cache_x4_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_x4_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+/**
+ * Backward compatibility for 1st generation Serial NAND devices
+ * which don't support Quad Program Load operation.
+ */
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD(false, 0, NULL, 0));
-static int tc58cxgxsx_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
{
if (section > 0)
return -ERANGE;
@@ -37,8 +50,8 @@ static int tc58cxgxsx_ooblayout_ecc(struct mtd_info *mtd, int section,
return 0;
}
-static int tc58cxgxsx_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int tx58cxgxsxraix_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
{
if (section > 0)
return -ERANGE;
@@ -50,13 +63,13 @@ static int tc58cxgxsx_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
-static const struct mtd_ooblayout_ops tc58cxgxsx_ooblayout = {
- .ecc = tc58cxgxsx_ooblayout_ecc,
- .free = tc58cxgxsx_ooblayout_free,
+static const struct mtd_ooblayout_ops tx58cxgxsxraix_ooblayout = {
+ .ecc = tx58cxgxsxraix_ooblayout_ecc,
+ .free = tx58cxgxsxraix_ooblayout_free,
};
-static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
- u8 status)
+static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
@@ -94,105 +107,174 @@ static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info toshiba_spinand_table[] = {
- /* 3.3V 1Gb */
- SPINAND_INFO("TC58CVG0S3", 0xC2,
+ /* 3.3V 1Gb (1st generation) */
+ SPINAND_INFO("TC58CVG0S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xC2),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 3.3V 2Gb */
- SPINAND_INFO("TC58CVG1S3", 0xCB,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 2Gb (1st generation) */
+ SPINAND_INFO("TC58CVG1S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCB),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 3.3V 4Gb */
- SPINAND_INFO("TC58CVG2S0", 0xCD,
- NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
- NAND_ECCREQ(8, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
- &write_cache_variants,
- &update_cache_variants),
- 0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 3.3V 4Gb */
- SPINAND_INFO("TC58CVG2S0", 0xED,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 4Gb (1st generation) */
+ SPINAND_INFO("TC58CVG2S0HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCD),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 1.8V 1Gb */
- SPINAND_INFO("TC58CYG0S3", 0xB2,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (1st generation) */
+ SPINAND_INFO("TC58CYG0S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xB2),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 1.8V 2Gb */
- SPINAND_INFO("TC58CYG1S3", 0xBB,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 2Gb (1st generation) */
+ SPINAND_INFO("TC58CYG1S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBB),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 1.8V 4Gb */
- SPINAND_INFO("TC58CYG2S0", 0xBD,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (1st generation) */
+ SPINAND_INFO("TC58CYG2S0HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBD),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
-};
-
-static int toshiba_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- int ret;
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
/*
- * Toshiba SPI NAND read ID needs a dummy byte,
- * so the first byte in id is garbage.
+ * 2nd generation serial nand has HOLD_D which is equivalent to
+ * QE_BIT.
*/
- if (id[1] != SPINAND_MFR_TOSHIBA)
- return 0;
-
- ret = spinand_match_and_init(spinand, toshiba_spinand_table,
- ARRAY_SIZE(toshiba_spinand_table),
- id[2]);
- if (ret)
- return ret;
-
- return 1;
-}
+ /* 3.3V 1Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG0S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE2),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 2Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG1S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xEB),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 4Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG2S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xED),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 8Gb (2nd generation) */
+ SPINAND_INFO("TH58CVG3S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4),
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG0S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD2),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 2Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG1S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDB),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG2S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDD),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 8Gb (2nd generation) */
+ SPINAND_INFO("TH58CYG3S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD4),
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+};
static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = {
- .detect = toshiba_spinand_detect,
};
const struct spinand_manufacturer toshiba_spinand_manufacturer = {
.id = SPINAND_MFR_TOSHIBA,
.name = "Toshiba",
+ .chips = toshiba_spinand_table,
+ .nchips = ARRAY_SIZE(toshiba_spinand_table),
.ops = &toshiba_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index a6c17e0cace8..76684428354e 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -75,7 +75,8 @@ static int w25m02gv_select_target(struct spinand_device *spinand,
}
static const struct spinand_info winbond_spinand_table[] = {
- SPINAND_INFO("W25M02GV", 0xAB,
+ SPINAND_INFO("W25M02GV",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -84,7 +85,8 @@ static const struct spinand_info winbond_spinand_table[] = {
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
SPINAND_SELECT_TARGET(w25m02gv_select_target)),
- SPINAND_INFO("W25N01GV", 0xAA,
+ SPINAND_INFO("W25N01GV",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -94,31 +96,6 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
};
-/**
- * winbond_spinand_detect - initialize device related part in spinand_device
- * struct if it is a Winbond device.
- * @spinand: SPI NAND device structure
- */
-static int winbond_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- int ret;
-
- /*
- * Winbond SPI NAND read ID need a dummy byte,
- * so the first byte in raw_id is dummy.
- */
- if (id[1] != SPINAND_MFR_WINBOND)
- return 0;
-
- ret = spinand_match_and_init(spinand, winbond_spinand_table,
- ARRAY_SIZE(winbond_spinand_table), id[2]);
- if (ret)
- return ret;
-
- return 1;
-}
-
static int winbond_spinand_init(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
@@ -138,12 +115,13 @@ static int winbond_spinand_init(struct spinand_device *spinand)
}
static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = {
- .detect = winbond_spinand_detect,
.init = winbond_spinand_init,
};
const struct spinand_manufacturer winbond_spinand_manufacturer = {
.id = SPINAND_MFR_WINBOND,
.name = "Winbond",
+ .chips = winbond_spinand_table,
+ .nchips = ARRAY_SIZE(winbond_spinand_table),
.ops = &winbond_spinand_manuf_ops,
};
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 267b9000782e..6e816eafb312 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -24,79 +24,6 @@ config MTD_SPI_NOR_USE_4K_SECTORS
Please note that some tools/drivers/filesystems may not work with
4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
-config SPI_ASPEED_SMC
- tristate "Aspeed flash controllers in SPI mode"
- depends on ARCH_ASPEED || COMPILE_TEST
- depends on HAS_IOMEM && OF
- help
- This enables support for the Firmware Memory controller (FMC)
- in the Aspeed AST2500/AST2400 SoCs when attached to SPI NOR chips,
- and support for the SPI flash memory controller (SPI) for
- the host firmware. The implementation only supports SPI NOR.
-
-config SPI_CADENCE_QUADSPI
- tristate "Cadence Quad SPI controller"
- depends on OF && (ARM || ARM64 || COMPILE_TEST)
- help
- Enable support for the Cadence Quad SPI Flash controller.
-
- Cadence QSPI is a specialized controller for connecting an SPI
- Flash over 1/2/4-bit wide bus. Enable this option if you have a
- device with a Cadence QSPI controller and want to access the
- Flash as an MTD device.
-
-config SPI_HISI_SFC
- tristate "Hisilicon FMC SPI-NOR Flash Controller(SFC)"
- depends on ARCH_HISI || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This enables support for HiSilicon FMC SPI-NOR flash controller.
-
-config SPI_NXP_SPIFI
- tristate "NXP SPI Flash Interface (SPIFI)"
- depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
- depends on HAS_IOMEM
- help
- Enable support for the NXP LPC SPI Flash Interface controller.
-
- SPIFI is a specialized controller for connecting serial SPI
- Flash. Enable this option if you have a device with a SPIFI
- controller and want to access the Flash as a mtd device.
-
-config SPI_INTEL_SPI
- tristate
-
-config SPI_INTEL_SPI_PCI
- tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
- depends on X86 && PCI
- select SPI_INTEL_SPI
- help
- This enables PCI support for the Intel PCH/PCU SPI controller in
- master mode. This controller is present in modern Intel hardware
- and is used to hold BIOS and other persistent settings. Using
- this driver it is possible to upgrade BIOS directly from Linux.
-
- Say N here unless you know what you are doing. Overwriting the
- SPI flash may render the system unbootable.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-spi-pci.
-
-config SPI_INTEL_SPI_PLATFORM
- tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
- depends on X86
- select SPI_INTEL_SPI
- help
- This enables platform support for the Intel PCH/PCU SPI
- controller in master mode. This controller is present in modern
- Intel hardware and is used to hold BIOS and other persistent
- settings. Using this driver it is possible to upgrade BIOS
- directly from Linux.
-
- Say N here unless you know what you are doing. Overwriting the
- SPI flash may render the system unbootable.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-spi-platform.
+source "drivers/mtd/spi-nor/controllers/Kconfig"
endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index 738dfd74cf76..7ddb742de1fe 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -1,9 +1,20 @@
# SPDX-License-Identifier: GPL-2.0
+
+spi-nor-objs := core.o sfdp.o
+spi-nor-objs += atmel.o
+spi-nor-objs += catalyst.o
+spi-nor-objs += eon.o
+spi-nor-objs += esmt.o
+spi-nor-objs += everspin.o
+spi-nor-objs += fujitsu.o
+spi-nor-objs += gigadevice.o
+spi-nor-objs += intel.o
+spi-nor-objs += issi.o
+spi-nor-objs += macronix.o
+spi-nor-objs += micron-st.o
+spi-nor-objs += spansion.o
+spi-nor-objs += sst.o
+spi-nor-objs += winbond.o
+spi-nor-objs += xilinx.o
+spi-nor-objs += xmc.o
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
-obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
-obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
-obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
-obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
-obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
-obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
-obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
new file mode 100644
index 000000000000..3f5f21a473a6
--- /dev/null
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info atmel_parts[] = {
+ /* Atmel -- some are (confusingly) marketed as "DataFlash" */
+ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
+ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
+
+ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
+ { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+ { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
+ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
+
+ { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+
+ { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
+ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
+ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
+ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+
+ { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
+};
+
+static void atmel_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static const struct spi_nor_fixups atmel_fixups = {
+ .default_init = atmel_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_atmel = {
+ .name = "atmel",
+ .parts = atmel_parts,
+ .nparts = ARRAY_SIZE(atmel_parts),
+ .fixups = &atmel_fixups,
+};
diff --git a/drivers/mtd/spi-nor/catalyst.c b/drivers/mtd/spi-nor/catalyst.c
new file mode 100644
index 000000000000..011b83e99e95
--- /dev/null
+++ b/drivers/mtd/spi-nor/catalyst.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info catalyst_parts[] = {
+ /* Catalyst / On Semiconductor -- non-JEDEC */
+ { "cat25c11", CAT25_INFO(16, 8, 16, 1,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c03", CAT25_INFO(32, 8, 16, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c09", CAT25_INFO(128, 8, 32, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c17", CAT25_INFO(256, 8, 32, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25128", CAT25_INFO(2048, 8, 64, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+};
+
+const struct spi_nor_manufacturer spi_nor_catalyst = {
+ .name = "catalyst",
+ .parts = catalyst_parts,
+ .nparts = ARRAY_SIZE(catalyst_parts),
+};
diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig
new file mode 100644
index 000000000000..10b86660b821
--- /dev/null
+++ b/drivers/mtd/spi-nor/controllers/Kconfig
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SPI_ASPEED_SMC
+ tristate "Aspeed flash controllers in SPI mode"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ help
+ This enables support for the Firmware Memory controller (FMC)
+ in the Aspeed AST2500/AST2400 SoCs when attached to SPI NOR chips,
+ and support for the SPI flash memory controller (SPI) for
+ the host firmware. The implementation only supports SPI NOR.
+
+config SPI_CADENCE_QUADSPI
+ tristate "Cadence Quad SPI controller"
+ depends on OF && (ARM || ARM64 || COMPILE_TEST)
+ help
+ Enable support for the Cadence Quad SPI Flash controller.
+
+ Cadence QSPI is a specialized controller for connecting an SPI
+ Flash over 1/2/4-bit wide bus. Enable this option if you have a
+ device with a Cadence QSPI controller and want to access the
+ Flash as an MTD device.
+
+config SPI_HISI_SFC
+ tristate "Hisilicon FMC SPI-NOR Flash Controller(SFC)"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables support for HiSilicon FMC SPI-NOR flash controller.
+
+config SPI_NXP_SPIFI
+ tristate "NXP SPI Flash Interface (SPIFI)"
+ depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+ depends on HAS_IOMEM
+ help
+ Enable support for the NXP LPC SPI Flash Interface controller.
+
+ SPIFI is a specialized controller for connecting serial SPI
+ Flash. Enable this option if you have a device with a SPIFI
+ controller and want to access the Flash as a mtd device.
+
+config SPI_INTEL_SPI
+ tristate
+
+config SPI_INTEL_SPI_PCI
+ tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
+ depends on X86 && PCI
+ select SPI_INTEL_SPI
+ help
+ This enables PCI support for the Intel PCH/PCU SPI controller in
+ master mode. This controller is present in modern Intel hardware
+ and is used to hold BIOS and other persistent settings. Using
+ this driver it is possible to upgrade BIOS directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-spi-pci.
+
+config SPI_INTEL_SPI_PLATFORM
+ tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
+ depends on X86
+ select SPI_INTEL_SPI
+ help
+ This enables platform support for the Intel PCH/PCU SPI
+ controller in master mode. This controller is present in modern
+ Intel hardware and is used to hold BIOS and other persistent
+ settings. Using this driver it is possible to upgrade BIOS
+ directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-spi-platform.
diff --git a/drivers/mtd/spi-nor/controllers/Makefile b/drivers/mtd/spi-nor/controllers/Makefile
new file mode 100644
index 000000000000..46e6fbe586e3
--- /dev/null
+++ b/drivers/mtd/spi-nor/controllers/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
+obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
+obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
+obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
+obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
+obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
+obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
index 395127349aa8..ae85e4c0e114 100644
--- a/drivers/mtd/spi-nor/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
@@ -109,7 +109,7 @@ struct aspeed_smc_controller {
void __iomem *ahb_base; /* per-chip windows resource */
u32 ahb_window_size; /* full mapping window size */
- struct aspeed_smc_chip *chips[0]; /* pointers to attached chips */
+ struct aspeed_smc_chip *chips[]; /* pointers to attached chips */
};
/*
@@ -354,7 +354,7 @@ static void aspeed_smc_send_cmd_addr(struct spi_nor *nor, u8 cmd, u32 addr)
default:
WARN_ONCE(1, "Unexpected address width %u, defaulting to 3\n",
nor->addr_width);
- /* FALLTHROUGH */
+ fallthrough;
case 3:
cmdaddr = addr & 0xFFFFFF;
cmdaddr |= cmd << 24;
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/controllers/cadence-quadspi.c
index 494dcab4aaaa..494dcab4aaaa 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/controllers/cadence-quadspi.c
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
index 6c7a4118752e..6c7a4118752e 100644
--- a/drivers/mtd/spi-nor/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
index 81329f680bec..81329f680bec 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
diff --git a/drivers/mtd/spi-nor/intel-spi-platform.c b/drivers/mtd/spi-nor/controllers/intel-spi-platform.c
index f80f1086f928..f80f1086f928 100644
--- a/drivers/mtd/spi-nor/intel-spi-platform.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi-platform.c
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/controllers/intel-spi.c
index 61d2a0ad2131..61d2a0ad2131 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi.c
diff --git a/drivers/mtd/spi-nor/intel-spi.h b/drivers/mtd/spi-nor/controllers/intel-spi.h
index e2f41b8827bf..e2f41b8827bf 100644
--- a/drivers/mtd/spi-nor/intel-spi.h
+++ b/drivers/mtd/spi-nor/controllers/intel-spi.h
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
index 9a5b1a7c636a..9a5b1a7c636a 100644
--- a/drivers/mtd/spi-nor/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
new file mode 100644
index 000000000000..cc68ea84318e
--- /dev/null
+++ b/drivers/mtd/spi-nor/core.c
@@ -0,0 +1,3466 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
+ * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
+ *
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/math64.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/of_platform.h>
+#include <linux/sched/task_stack.h>
+#include <linux/spi/flash.h>
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+/* Define max times to check status register before we give up. */
+
+/*
+ * For everything but full-chip erase; probably could be much smaller, but kept
+ * around for safety for now
+ */
+#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
+
+/*
+ * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
+ * for larger flash
+ */
+#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
+
+#define SPI_NOR_MAX_ADDR_WIDTH 4
+
+/**
+ * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
+ * transfer
+ * @nor: pointer to 'struct spi_nor'
+ * @op: pointer to 'struct spi_mem_op' template for transfer
+ *
+ * If we have to use the bounce buffer, the data field in @op will be updated.
+ *
+ * Return: true if the bounce buffer is needed, false if not
+ */
+static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
+{
+ /* op->data.buf.in occupies the same memory as op->data.buf.out */
+ if (object_is_on_stack(op->data.buf.in) ||
+ !virt_addr_valid(op->data.buf.in)) {
+ if (op->data.nbytes > nor->bouncebuf_size)
+ op->data.nbytes = nor->bouncebuf_size;
+ op->data.buf.in = nor->bouncebuf;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * spi_nor_spimem_exec_op() - execute a memory operation
+ * @nor: pointer to 'struct spi_nor'
+ * @op: pointer to 'struct spi_mem_op' template for transfer
+ *
+ * Return: 0 on success, -error otherwise.
+ */
+static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
+{
+ int error;
+
+ error = spi_mem_adjust_op_size(nor->spimem, op);
+ if (error)
+ return error;
+
+ return spi_mem_exec_op(nor->spimem, op);
+}
+
+/**
+ * spi_nor_spimem_read_data() - read data from flash's memory region via
+ * spi-mem
+ * @nor: pointer to 'struct spi_nor'
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @buf: pointer to dst buffer
+ *
+ * Return: number of bytes read successfully, -errno otherwise
+ */
+static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
+ size_t len, u8 *buf)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
+ SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
+ SPI_MEM_OP_DATA_IN(len, buf, 1));
+ bool usebouncebuf;
+ ssize_t nbytes;
+ int error;
+
+ /* get transfer protocols. */
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
+ op.dummy.buswidth = op.addr.buswidth;
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
+
+ /* convert the dummy cycles to the number of bytes */
+ op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
+
+ usebouncebuf = spi_nor_spimem_bounce(nor, &op);
+
+ if (nor->dirmap.rdesc) {
+ nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
+ op.data.nbytes, op.data.buf.in);
+ } else {
+ error = spi_nor_spimem_exec_op(nor, &op);
+ if (error)
+ return error;
+ nbytes = op.data.nbytes;
+ }
+
+ if (usebouncebuf && nbytes > 0)
+ memcpy(buf, op.data.buf.in, nbytes);
+
+ return nbytes;
+}
+
+/**
+ * spi_nor_read_data() - read data from flash memory
+ * @nor: pointer to 'struct spi_nor'
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @buf: pointer to dst buffer
+ *
+ * Return: number of bytes read successfully, -errno otherwise
+ */
+ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
+{
+ if (nor->spimem)
+ return spi_nor_spimem_read_data(nor, from, len, buf);
+
+ return nor->controller_ops->read(nor, from, len, buf);
+}
+
+/**
+ * spi_nor_spimem_write_data() - write data to flash memory via
+ * spi-mem
+ * @nor: pointer to 'struct spi_nor'
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @buf: pointer to src buffer
+ *
+ * Return: number of bytes written successfully, -errno otherwise
+ */
+static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
+ size_t len, const u8 *buf)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(len, buf, 1));
+ ssize_t nbytes;
+ int error;
+
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
+
+ if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+ op.addr.nbytes = 0;
+
+ if (spi_nor_spimem_bounce(nor, &op))
+ memcpy(nor->bouncebuf, buf, op.data.nbytes);
+
+ if (nor->dirmap.wdesc) {
+ nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
+ op.data.nbytes, op.data.buf.out);
+ } else {
+ error = spi_nor_spimem_exec_op(nor, &op);
+ if (error)
+ return error;
+ nbytes = op.data.nbytes;
+ }
+
+ return nbytes;
+}
+
+/**
+ * spi_nor_write_data() - write data to flash memory
+ * @nor: pointer to 'struct spi_nor'
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @buf: pointer to src buffer
+ *
+ * Return: number of bytes written successfully, -errno otherwise
+ */
+ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf)
+{
+ if (nor->spimem)
+ return spi_nor_spimem_write_data(nor, to, len, buf);
+
+ return nor->controller_ops->write(nor, to, len, buf);
+}
+
+/**
+ * spi_nor_write_enable() - Set write enable latch with Write Enable command.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_enable(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_write_disable() - Send Write Disable instruction to the chip.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_disable(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_sr() - Read the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, sr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
+ sr, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_fsr() - Read the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'
+ * @fsr: pointer to a DMA-able buffer where the value of the
+ * Flag Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, fsr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
+ fsr, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading FSR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_cr() - Read the Configuration Register using the
+ * SPINOR_OP_RDCR (35h) command.
+ * @nor: pointer to 'struct spi_nor'
+ * @cr: pointer to a DMA-able buffer where the value of the
+ * Configuration Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, cr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading CR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
+ SPINOR_OP_EN4B :
+ SPINOR_OP_EX4B,
+ 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor,
+ enable ? SPINOR_OP_EN4B :
+ SPINOR_OP_EX4B,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
+}
+
+/**
+ * spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion
+ * flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ nor->bouncebuf[0] = enable << 7;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
+ nor->bouncebuf, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_write_ear() - Write Extended Address Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @ear: value to write to the Extended Address Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
+{
+ int ret;
+
+ nor->bouncebuf[0] = ear;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
+ nor->bouncebuf, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d writing EAR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, sr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
+ sr, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if
+ * the flash is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_xsr_ready(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = spi_nor_xread_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ return !!(nor->bouncebuf[0] & XSR_RDY);
+}
+
+/**
+ * spi_nor_clear_sr() - Clear the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void spi_nor_clear_sr(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing SR\n", ret);
+}
+
+/**
+ * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
+ * for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_sr_ready(struct spi_nor *nor)
+{
+ int ret = spi_nor_read_sr(nor, nor->bouncebuf);
+
+ if (ret)
+ return ret;
+
+ if (nor->flags & SNOR_F_USE_CLSR &&
+ nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
+ if (nor->bouncebuf[0] & SR_E_ERR)
+ dev_err(nor->dev, "Erase Error occurred\n");
+ else
+ dev_err(nor->dev, "Programming Error occurred\n");
+
+ spi_nor_clear_sr(nor);
+
+ /*
+ * WEL bit remains set to one when an erase or page program
+ * error occurs. Issue a Write Disable command to protect
+ * against inadvertent writes that can possibly corrupt the
+ * contents of the memory.
+ */
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ return ret;
+
+ return -EIO;
+ }
+
+ return !(nor->bouncebuf[0] & SR_WIP);
+}
+
+/**
+ * spi_nor_clear_fsr() - Clear the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void spi_nor_clear_fsr(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
+}
+
+/**
+ * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
+ * ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_fsr_ready(struct spi_nor *nor)
+{
+ int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
+
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
+ if (nor->bouncebuf[0] & FSR_E_ERR)
+ dev_err(nor->dev, "Erase operation failed.\n");
+ else
+ dev_err(nor->dev, "Program operation failed.\n");
+
+ if (nor->bouncebuf[0] & FSR_PT_ERR)
+ dev_err(nor->dev,
+ "Attempted to modify a protected sector.\n");
+
+ spi_nor_clear_fsr(nor);
+
+ /*
+ * WEL bit remains set to one when an erase or page program
+ * error occurs. Issue a Write Disable command to protect
+ * against inadvertent writes that can possibly corrupt the
+ * contents of the memory.
+ */
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ return ret;
+
+ return -EIO;
+ }
+
+ return nor->bouncebuf[0] & FSR_READY;
+}
+
+/**
+ * spi_nor_ready() - Query the flash to see if it is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_ready(struct spi_nor *nor)
+{
+ int sr, fsr;
+
+ if (nor->flags & SNOR_F_READY_XSR_RDY)
+ sr = spi_nor_xsr_ready(nor);
+ else
+ sr = spi_nor_sr_ready(nor);
+ if (sr < 0)
+ return sr;
+ fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
+ if (fsr < 0)
+ return fsr;
+ return sr && fsr;
+}
+
+/**
+ * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
+ * Status Register until ready, or timeout occurs.
+ * @nor: pointer to "struct spi_nor".
+ * @timeout_jiffies: jiffies to wait until timeout.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
+ unsigned long timeout_jiffies)
+{
+ unsigned long deadline;
+ int timeout = 0, ret;
+
+ deadline = jiffies + timeout_jiffies;
+
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ ret = spi_nor_ready(nor);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ return 0;
+
+ cond_resched();
+ }
+
+ dev_dbg(nor->dev, "flash operation timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
+ * flash to be ready, or timeout occurs.
+ * @nor: pointer to "struct spi_nor".
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+ return spi_nor_wait_till_ready_with_timeout(nor,
+ DEFAULT_READY_WAIT_JIFFIES);
+}
+
+/**
+ * spi_nor_write_sr() - Write the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to DMA-able buffer to write to the Status Register.
+ * @len: number of bytes to write to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(len, sr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
+ sr, len);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d writing SR\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
+ * ensure that the byte written match the received value.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
+{
+ int ret;
+
+ nor->bouncebuf[0] = sr1;
+
+ ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] != sr1) {
+ dev_dbg(nor->dev, "SR1: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
+ * Status Register 2 in one shot. Ensure that the byte written in the Status
+ * Register 1 match the received value, and that the 16-bit Write did not
+ * affect what was already in the Status Register 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register 1.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+ int ret;
+ u8 *sr_cr = nor->bouncebuf;
+ u8 cr_written;
+
+ /* Make sure we don't overwrite the contents of Status Register 2. */
+ if (!(nor->flags & SNOR_F_NO_READ_CR)) {
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+ } else if (nor->params->quad_enable) {
+ /*
+ * If the Status Register 2 Read command (35h) is not
+ * supported, we should at least be sure we don't
+ * change the value of the SR2 Quad Enable bit.
+ *
+ * We can safely assume that when the Quad Enable method is
+ * set, the value of the QE bit is one, as a consequence of the
+ * nor->params->quad_enable() call.
+ *
+ * We can safely assume that the Quad Enable bit is present in
+ * the Status Register 2 at BIT(1). According to the JESD216
+ * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
+ * Write Status (01h) command is available just for the cases
+ * in which the QE bit is described in SR2 at BIT(1).
+ */
+ sr_cr[1] = SR2_QUAD_EN_BIT1;
+ } else {
+ sr_cr[1] = 0;
+ }
+
+ sr_cr[0] = sr1;
+
+ ret = spi_nor_write_sr(nor, sr_cr, 2);
+ if (ret)
+ return ret;
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return 0;
+
+ cr_written = sr_cr[1];
+
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+
+ if (cr_written != sr_cr[1]) {
+ dev_dbg(nor->dev, "CR: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
+ * Configuration Register in one shot. Ensure that the byte written in the
+ * Configuration Register match the received value, and that the 16-bit Write
+ * did not affect what was already in the Status Register 1.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @cr: byte value to be written to the Configuration Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
+{
+ int ret;
+ u8 *sr_cr = nor->bouncebuf;
+ u8 sr_written;
+
+ /* Keep the current value of the Status Register 1. */
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ sr_cr[1] = cr;
+
+ ret = spi_nor_write_sr(nor, sr_cr, 2);
+ if (ret)
+ return ret;
+
+ sr_written = sr_cr[0];
+
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ if (sr_written != sr_cr[0]) {
+ dev_dbg(nor->dev, "SR: Read back test failed\n");
+ return -EIO;
+ }
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return 0;
+
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+
+ if (cr != sr_cr[1]) {
+ dev_dbg(nor->dev, "CR: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
+ * the byte written match the received value without affecting other bits in the
+ * Status Register 1 and 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+ if (nor->flags & SNOR_F_HAS_16BIT_SR)
+ return spi_nor_write_16bit_sr_and_check(nor, sr1);
+
+ return spi_nor_write_sr1_and_check(nor, sr1);
+}
+
+/**
+ * spi_nor_write_sr2() - Write the Status Register 2 using the
+ * SPINOR_OP_WRSR2 (3eh) command.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, sr2, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
+ sr2, 1);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d writing SR2\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_read_sr2() - Read the Status Register 2 using the
+ * SPINOR_OP_RDSR2 (3fh) command.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr2: pointer to DMA-able buffer where the value of the
+ * Status Register 2 will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, sr2, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
+ sr2, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR2\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_erase_chip() - Erase the entire flash memory.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_erase_chip(struct spi_nor *nor)
+{
+ int ret;
+
+ dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d erasing chip\n", ret);
+
+ return ret;
+}
+
+static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == opcode)
+ return table[i][1];
+
+ /* No conversion found, keep input op code. */
+ return opcode;
+}
+
+u8 spi_nor_convert_3to4_read(u8 opcode)
+{
+ static const u8 spi_nor_3to4_read[][2] = {
+ { SPINOR_OP_READ, SPINOR_OP_READ_4B },
+ { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
+ { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
+ { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
+ { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
+ { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+ { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
+ { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
+
+ { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
+ { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
+ { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
+ ARRAY_SIZE(spi_nor_3to4_read));
+}
+
+static u8 spi_nor_convert_3to4_program(u8 opcode)
+{
+ static const u8 spi_nor_3to4_program[][2] = {
+ { SPINOR_OP_PP, SPINOR_OP_PP_4B },
+ { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
+ { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
+ { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
+ { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
+ ARRAY_SIZE(spi_nor_3to4_program));
+}
+
+static u8 spi_nor_convert_3to4_erase(u8 opcode)
+{
+ static const u8 spi_nor_3to4_erase[][2] = {
+ { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
+ { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
+ { SPINOR_OP_SE, SPINOR_OP_SE_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
+ ARRAY_SIZE(spi_nor_3to4_erase));
+}
+
+static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
+{
+ return !!nor->params->erase_map.uniform_erase_type;
+}
+
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
+{
+ nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
+ nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
+ nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
+
+ if (!spi_nor_has_uniform_erase(nor)) {
+ struct spi_nor_erase_map *map = &nor->params->erase_map;
+ struct spi_nor_erase_type *erase;
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ erase = &map->erase_type[i];
+ erase->opcode =
+ spi_nor_convert_3to4_erase(erase->opcode);
+ }
+ }
+}
+
+int spi_nor_lock_and_prep(struct spi_nor *nor)
+{
+ int ret = 0;
+
+ mutex_lock(&nor->lock);
+
+ if (nor->controller_ops && nor->controller_ops->prepare) {
+ ret = nor->controller_ops->prepare(nor);
+ if (ret) {
+ mutex_unlock(&nor->lock);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+void spi_nor_unlock_and_unprep(struct spi_nor *nor)
+{
+ if (nor->controller_ops && nor->controller_ops->unprepare)
+ nor->controller_ops->unprepare(nor);
+ mutex_unlock(&nor->lock);
+}
+
+static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
+{
+ if (!nor->params->convert_addr)
+ return addr;
+
+ return nor->params->convert_addr(nor, addr);
+}
+
+/*
+ * Initiate the erasure of a single sector
+ */
+static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
+{
+ int i;
+
+ addr = spi_nor_convert_addr(nor, addr);
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ return spi_mem_exec_op(nor->spimem, &op);
+ } else if (nor->controller_ops->erase) {
+ return nor->controller_ops->erase(nor, addr);
+ }
+
+ /*
+ * Default implementation, if driver doesn't have a specialized HW
+ * control
+ */
+ for (i = nor->addr_width - 1; i >= 0; i--) {
+ nor->bouncebuf[i] = addr & 0xff;
+ addr >>= 8;
+ }
+
+ return nor->controller_ops->write_reg(nor, nor->erase_opcode,
+ nor->bouncebuf, nor->addr_width);
+}
+
+/**
+ * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @dividend: dividend value
+ * @remainder: pointer to u32 remainder (will be updated)
+ *
+ * Return: the result of the division
+ */
+static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
+ u64 dividend, u32 *remainder)
+{
+ /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
+ *remainder = (u32)dividend & erase->size_mask;
+ return dividend >> erase->size_shift;
+}
+
+/**
+ * spi_nor_find_best_erase_type() - find the best erase type for the given
+ * offset in the serial flash memory and the
+ * number of bytes to erase. The region in
+ * which the address fits is expected to be
+ * provided.
+ * @map: the erase map of the SPI NOR
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Return: a pointer to the best fitted erase type, NULL otherwise.
+ */
+static const struct spi_nor_erase_type *
+spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
+ const struct spi_nor_erase_region *region,
+ u64 addr, u32 len)
+{
+ const struct spi_nor_erase_type *erase;
+ u32 rem;
+ int i;
+ u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
+
+ /*
+ * Erase types are ordered by size, with the smallest erase type at
+ * index 0.
+ */
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ /* Does the erase region support the tested erase type? */
+ if (!(erase_mask & BIT(i)))
+ continue;
+
+ erase = &map->erase_type[i];
+
+ /* Don't erase more than what the user has asked for. */
+ if (erase->size > len)
+ continue;
+
+ /* Alignment is not mandatory for overlaid regions */
+ if (region->offset & SNOR_OVERLAID_REGION)
+ return erase;
+
+ spi_nor_div_by_erase_size(erase, addr, &rem);
+ if (rem)
+ continue;
+ else
+ return erase;
+ }
+
+ return NULL;
+}
+
+static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
+{
+ return region->offset & SNOR_LAST_REGION;
+}
+
+static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
+{
+ return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
+}
+
+/**
+ * spi_nor_region_next() - get the next spi nor region
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ *
+ * Return: the next spi nor region or NULL if last region.
+ */
+struct spi_nor_erase_region *
+spi_nor_region_next(struct spi_nor_erase_region *region)
+{
+ if (spi_nor_region_is_last(region))
+ return NULL;
+ region++;
+ return region;
+}
+
+/**
+ * spi_nor_find_erase_region() - find the region of the serial flash memory in
+ * which the offset fits
+ * @map: the erase map of the SPI NOR
+ * @addr: offset in the serial flash memory
+ *
+ * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
+ * otherwise.
+ */
+static struct spi_nor_erase_region *
+spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
+{
+ struct spi_nor_erase_region *region = map->regions;
+ u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
+ u64 region_end = region_start + region->size;
+
+ while (addr < region_start || addr >= region_end) {
+ region = spi_nor_region_next(region);
+ if (!region)
+ return ERR_PTR(-EINVAL);
+
+ region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
+ region_end = region_start + region->size;
+ }
+
+ return region;
+}
+
+/**
+ * spi_nor_init_erase_cmd() - initialize an erase command
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ *
+ * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
+ * otherwise.
+ */
+static struct spi_nor_erase_command *
+spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
+ const struct spi_nor_erase_type *erase)
+{
+ struct spi_nor_erase_command *cmd;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&cmd->list);
+ cmd->opcode = erase->opcode;
+ cmd->count = 1;
+
+ if (region->offset & SNOR_OVERLAID_REGION)
+ cmd->size = region->size;
+ else
+ cmd->size = erase->size;
+
+ return cmd;
+}
+
+/**
+ * spi_nor_destroy_erase_cmd_list() - destroy erase command list
+ * @erase_list: list of erase commands
+ */
+static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
+{
+ struct spi_nor_erase_command *cmd, *next;
+
+ list_for_each_entry_safe(cmd, next, erase_list, list) {
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+}
+
+/**
+ * spi_nor_init_erase_cmd_list() - initialize erase command list
+ * @nor: pointer to a 'struct spi_nor'
+ * @erase_list: list of erase commands to be executed once we validate that the
+ * erase can be performed
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Builds the list of best fitted erase commands and verifies if the erase can
+ * be performed.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
+ struct list_head *erase_list,
+ u64 addr, u32 len)
+{
+ const struct spi_nor_erase_map *map = &nor->params->erase_map;
+ const struct spi_nor_erase_type *erase, *prev_erase = NULL;
+ struct spi_nor_erase_region *region;
+ struct spi_nor_erase_command *cmd = NULL;
+ u64 region_end;
+ int ret = -EINVAL;
+
+ region = spi_nor_find_erase_region(map, addr);
+ if (IS_ERR(region))
+ return PTR_ERR(region);
+
+ region_end = spi_nor_region_end(region);
+
+ while (len) {
+ erase = spi_nor_find_best_erase_type(map, region, addr, len);
+ if (!erase)
+ goto destroy_erase_cmd_list;
+
+ if (prev_erase != erase ||
+ region->offset & SNOR_OVERLAID_REGION) {
+ cmd = spi_nor_init_erase_cmd(region, erase);
+ if (IS_ERR(cmd)) {
+ ret = PTR_ERR(cmd);
+ goto destroy_erase_cmd_list;
+ }
+
+ list_add_tail(&cmd->list, erase_list);
+ } else {
+ cmd->count++;
+ }
+
+ addr += cmd->size;
+ len -= cmd->size;
+
+ if (len && addr >= region_end) {
+ region = spi_nor_region_next(region);
+ if (!region)
+ goto destroy_erase_cmd_list;
+ region_end = spi_nor_region_end(region);
+ }
+
+ prev_erase = erase;
+ }
+
+ return 0;
+
+destroy_erase_cmd_list:
+ spi_nor_destroy_erase_cmd_list(erase_list);
+ return ret;
+}
+
+/**
+ * spi_nor_erase_multi_sectors() - perform a non-uniform erase
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Build a list of best fitted erase commands and execute it once we validate
+ * that the erase can be performed.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
+{
+ LIST_HEAD(erase_list);
+ struct spi_nor_erase_command *cmd, *next;
+ int ret;
+
+ ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
+ if (ret)
+ return ret;
+
+ list_for_each_entry_safe(cmd, next, &erase_list, list) {
+ nor->erase_opcode = cmd->opcode;
+ while (cmd->count) {
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto destroy_erase_cmd_list;
+
+ ret = spi_nor_erase_sector(nor, addr);
+ if (ret)
+ goto destroy_erase_cmd_list;
+
+ addr += cmd->size;
+ cmd->count--;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto destroy_erase_cmd_list;
+ }
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+
+ return 0;
+
+destroy_erase_cmd_list:
+ spi_nor_destroy_erase_cmd_list(&erase_list);
+ return ret;
+}
+
+/*
+ * Erase an address range on the nor chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ u32 addr, len;
+ uint32_t rem;
+ int ret;
+
+ dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
+ (long long)instr->len);
+
+ if (spi_nor_has_uniform_erase(nor)) {
+ div_u64_rem(instr->len, mtd->erasesize, &rem);
+ if (rem)
+ return -EINVAL;
+ }
+
+ addr = instr->addr;
+ len = instr->len;
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ /* whole-chip erase? */
+ if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
+ unsigned long timeout;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto erase_err;
+
+ ret = spi_nor_erase_chip(nor);
+ if (ret)
+ goto erase_err;
+
+ /*
+ * Scale the timeout linearly with the size of the flash, with
+ * a minimum calibrated to an old 2MB flash. We could try to
+ * pull these from CFI/SFDP, but these values should be good
+ * enough for now.
+ */
+ timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
+ CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
+ (unsigned long)(mtd->size / SZ_2M));
+ ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
+ if (ret)
+ goto erase_err;
+
+ /* REVISIT in some cases we could speed up erasing large regions
+ * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
+ * to use "small sector erase", but that's not always optimal.
+ */
+
+ /* "sector"-at-a-time erase */
+ } else if (spi_nor_has_uniform_erase(nor)) {
+ while (len) {
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto erase_err;
+
+ ret = spi_nor_erase_sector(nor, addr);
+ if (ret)
+ goto erase_err;
+
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
+ }
+
+ /* erase multiple sectors */
+ } else {
+ ret = spi_nor_erase_multi_sectors(nor, addr, len);
+ if (ret)
+ goto erase_err;
+ }
+
+ ret = spi_nor_write_disable(nor);
+
+erase_err:
+ spi_nor_unlock_and_unprep(nor);
+
+ return ret;
+}
+
+static u8 spi_nor_get_sr_bp_mask(struct spi_nor *nor)
+{
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6)
+ return mask | SR_BP3_BIT6;
+
+ if (nor->flags & SNOR_F_HAS_4BIT_BP)
+ return mask | SR_BP3;
+
+ return mask;
+}
+
+static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor)
+{
+ if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
+ return SR_TB_BIT6;
+ else
+ return SR_TB_BIT5;
+}
+
+static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
+{
+ unsigned int bp_slots, bp_slots_needed;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+
+ /* Reserved one for "protect none" and one for "protect all". */
+ bp_slots = (1 << hweight8(mask)) - 2;
+ bp_slots_needed = ilog2(nor->info->n_sectors);
+
+ if (bp_slots_needed > bp_slots)
+ return nor->info->sector_size <<
+ (bp_slots_needed - bp_slots);
+ else
+ return nor->info->sector_size;
+}
+
+static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
+ uint64_t *len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u64 min_prot_len;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+ u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+ u8 bp, val = sr & mask;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3_BIT6)
+ val = (val & ~SR_BP3_BIT6) | SR_BP3;
+
+ bp = val >> SR_BP_SHIFT;
+
+ if (!bp) {
+ /* No protection */
+ *ofs = 0;
+ *len = 0;
+ return;
+ }
+
+ min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+ *len = min_prot_len << (bp - 1);
+
+ if (*len > mtd->size)
+ *len = mtd->size;
+
+ if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
+ *ofs = 0;
+ else
+ *ofs = mtd->size - *len;
+}
+
+/*
+ * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
+ * @locked is false); 0 otherwise
+ */
+static int spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
+ uint64_t len, u8 sr, bool locked)
+{
+ loff_t lock_offs;
+ uint64_t lock_len;
+
+ if (!len)
+ return 1;
+
+ spi_nor_get_locked_range_sr(nor, sr, &lock_offs, &lock_len);
+
+ if (locked)
+ /* Requested range is a sub-range of locked range */
+ return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
+ else
+ /* Requested range does not overlap with locked range */
+ return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
+}
+
+static int spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true);
+}
+
+static int spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false);
+}
+
+/*
+ * Lock a region of the flash. Compatible with ST Micro and similar flash.
+ * Supports the block protection bits BP{0,1,2}/BP{0,1,2,3} in the status
+ * register
+ * (SR). Does not support these features found in newer SR bitfields:
+ * - SEC: sector/block protect - only handle SEC=0 (block protect)
+ * - CMP: complement protect - only support CMP=0 (range is not complemented)
+ *
+ * Support for the following is provided conditionally for some flash:
+ * - TB: top/bottom protect
+ *
+ * Sample table portion for 8MB flash (Winbond w25q64fw):
+ *
+ * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
+ * --------------------------------------------------------------------------
+ * X | X | 0 | 0 | 0 | NONE | NONE
+ * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
+ * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
+ * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
+ * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
+ * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
+ * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
+ * X | X | 1 | 1 | 1 | 8 MB | ALL
+ * ------|-------|-------|-------|-------|---------------|-------------------
+ * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
+ * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
+ * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
+ * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
+ * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
+ * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u64 min_prot_len;
+ int ret, status_old, status_new;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+ u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+ u8 pow, val;
+ loff_t lock_len;
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+ bool use_top;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ status_old = nor->bouncebuf[0];
+
+ /* If nothing in our range is unlocked, we don't need to do anything */
+ if (spi_nor_is_locked_sr(nor, ofs, len, status_old))
+ return 0;
+
+ /* If anything below us is unlocked, we can't use 'bottom' protection */
+ if (!spi_nor_is_locked_sr(nor, 0, ofs, status_old))
+ can_be_bottom = false;
+
+ /* If anything above us is unlocked, we can't use 'top' protection */
+ if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ status_old))
+ can_be_top = false;
+
+ if (!can_be_bottom && !can_be_top)
+ return -EINVAL;
+
+ /* Prefer top, if both are valid */
+ use_top = can_be_top;
+
+ /* lock_len: length of region that should end up locked */
+ if (use_top)
+ lock_len = mtd->size - ofs;
+ else
+ lock_len = ofs + len;
+
+ if (lock_len == mtd->size) {
+ val = mask;
+ } else {
+ min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+ pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
+ val = pow << SR_BP_SHIFT;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
+ val = (val & ~SR_BP3) | SR_BP3_BIT6;
+
+ if (val & ~mask)
+ return -EINVAL;
+
+ /* Don't "lock" with no region! */
+ if (!(val & mask))
+ return -EINVAL;
+ }
+
+ status_new = (status_old & ~mask & ~tb_mask) | val;
+
+ /* Disallow further writes if WP pin is asserted */
+ status_new |= SR_SRWD;
+
+ if (!use_top)
+ status_new |= tb_mask;
+
+ /* Don't bother if they're the same */
+ if (status_new == status_old)
+ return 0;
+
+ /* Only modify protection if it will not unlock other areas */
+ if ((status_new & mask) < (status_old & mask))
+ return -EINVAL;
+
+ return spi_nor_write_sr_and_check(nor, status_new);
+}
+
+/*
+ * Unlock a region of the flash. See spi_nor_sr_lock() for more info
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u64 min_prot_len;
+ int ret, status_old, status_new;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+ u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+ u8 pow, val;
+ loff_t lock_len;
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+ bool use_top;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ status_old = nor->bouncebuf[0];
+
+ /* If nothing in our range is locked, we don't need to do anything */
+ if (spi_nor_is_unlocked_sr(nor, ofs, len, status_old))
+ return 0;
+
+ /* If anything below us is locked, we can't use 'top' protection */
+ if (!spi_nor_is_unlocked_sr(nor, 0, ofs, status_old))
+ can_be_top = false;
+
+ /* If anything above us is locked, we can't use 'bottom' protection */
+ if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ status_old))
+ can_be_bottom = false;
+
+ if (!can_be_bottom && !can_be_top)
+ return -EINVAL;
+
+ /* Prefer top, if both are valid */
+ use_top = can_be_top;
+
+ /* lock_len: length of region that should remain locked */
+ if (use_top)
+ lock_len = mtd->size - (ofs + len);
+ else
+ lock_len = ofs;
+
+ if (lock_len == 0) {
+ val = 0; /* fully unlocked */
+ } else {
+ min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+ pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
+ val = pow << SR_BP_SHIFT;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
+ val = (val & ~SR_BP3) | SR_BP3_BIT6;
+
+ /* Some power-of-two sizes are not supported */
+ if (val & ~mask)
+ return -EINVAL;
+ }
+
+ status_new = (status_old & ~mask & ~tb_mask) | val;
+
+ /* Don't protect status register if we're fully unlocked */
+ if (lock_len == 0)
+ status_new &= ~SR_SRWD;
+
+ if (!use_top)
+ status_new |= tb_mask;
+
+ /* Don't bother if they're the same */
+ if (status_new == status_old)
+ return 0;
+
+ /* Only modify protection if it will not lock other areas */
+ if ((status_new & mask) > (status_old & mask))
+ return -EINVAL;
+
+ return spi_nor_write_sr_and_check(nor, status_new);
+}
+
+/*
+ * Check if a region of the flash is (completely) locked. See spi_nor_sr_lock()
+ * for more info.
+ *
+ * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
+ * negative on errors.
+ */
+static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ return spi_nor_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
+}
+
+static const struct spi_nor_locking_ops spi_nor_sr_locking_ops = {
+ .lock = spi_nor_sr_lock,
+ .unlock = spi_nor_sr_unlock,
+ .is_locked = spi_nor_sr_is_locked,
+};
+
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ ret = nor->params->locking_ops->lock(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ ret = nor->params->locking_ops->unlock(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ ret = nor->params->locking_ops->is_locked(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+/**
+ * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
+ * Register 1.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
+ return 0;
+
+ nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
+
+ return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
+}
+
+/**
+ * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
+ * Register 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
+
+ ret = spi_nor_read_cr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
+ return 0;
+
+ nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
+
+ return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
+}
+
+/**
+ * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Status Register 2.
+ *
+ * This is one of the procedures to set the QE bit described in the SFDP
+ * (JESD216 rev B) specification but no manufacturer using this procedure has
+ * been identified yet, hence the name of the function.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
+{
+ u8 *sr2 = nor->bouncebuf;
+ int ret;
+ u8 sr2_written;
+
+ /* Check current Quad Enable bit value. */
+ ret = spi_nor_read_sr2(nor, sr2);
+ if (ret)
+ return ret;
+ if (*sr2 & SR2_QUAD_EN_BIT7)
+ return 0;
+
+ /* Update the Quad Enable bit. */
+ *sr2 |= SR2_QUAD_EN_BIT7;
+
+ ret = spi_nor_write_sr2(nor, sr2);
+ if (ret)
+ return ret;
+
+ sr2_written = *sr2;
+
+ /* Read back and check it. */
+ ret = spi_nor_read_sr2(nor, sr2);
+ if (ret)
+ return ret;
+
+ if (*sr2 != sr2_written) {
+ dev_dbg(nor->dev, "SR2: Read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct spi_nor_manufacturer *manufacturers[] = {
+ &spi_nor_atmel,
+ &spi_nor_catalyst,
+ &spi_nor_eon,
+ &spi_nor_esmt,
+ &spi_nor_everspin,
+ &spi_nor_fujitsu,
+ &spi_nor_gigadevice,
+ &spi_nor_intel,
+ &spi_nor_issi,
+ &spi_nor_macronix,
+ &spi_nor_micron,
+ &spi_nor_st,
+ &spi_nor_spansion,
+ &spi_nor_sst,
+ &spi_nor_winbond,
+ &spi_nor_xilinx,
+ &spi_nor_xmc,
+};
+
+static const struct flash_info *
+spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts,
+ const u8 *id)
+{
+ unsigned int i;
+
+ for (i = 0; i < nparts; i++) {
+ if (parts[i].id_len &&
+ !memcmp(parts[i].id, id, parts[i].id_len))
+ return &parts[i];
+ }
+
+ return NULL;
+}
+
+static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
+{
+ const struct flash_info *info;
+ u8 *id = nor->bouncebuf;
+ unsigned int i;
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
+ SPI_NOR_MAX_ID_LEN);
+ }
+ if (ret) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
+ info = spi_nor_search_part_by_id(manufacturers[i]->parts,
+ manufacturers[i]->nparts,
+ id);
+ if (info) {
+ nor->manufacturer = manufacturers[i];
+ return info;
+ }
+ }
+
+ dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
+ SPI_NOR_MAX_ID_LEN, id);
+ return ERR_PTR(-ENODEV);
+}
+
+static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ ssize_t ret;
+
+ dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ while (len) {
+ loff_t addr = from;
+
+ addr = spi_nor_convert_addr(nor, addr);
+
+ ret = spi_nor_read_data(nor, addr, len, buf);
+ if (ret == 0) {
+ /* We shouldn't see 0-length reads */
+ ret = -EIO;
+ goto read_err;
+ }
+ if (ret < 0)
+ goto read_err;
+
+ WARN_ON(ret > len);
+ *retlen += ret;
+ buf += ret;
+ from += ret;
+ len -= ret;
+ }
+ ret = 0;
+
+read_err:
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+/*
+ * Write an address range to the nor chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t page_offset, page_remain, i;
+ ssize_t ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < len; ) {
+ ssize_t written;
+ loff_t addr = to + i;
+
+ /*
+ * If page_size is a power of two, the offset can be quickly
+ * calculated with an AND operation. On the other cases we
+ * need to do a modulus operation (more expensive).
+ * Power of two numbers have only one bit set and we can use
+ * the instruction hweight32 to detect if we need to do a
+ * modulus (do_div()) or not.
+ */
+ if (hweight32(nor->page_size) == 1) {
+ page_offset = addr & (nor->page_size - 1);
+ } else {
+ uint64_t aux = addr;
+
+ page_offset = do_div(aux, nor->page_size);
+ }
+ /* the size of data remaining on the first page */
+ page_remain = min_t(size_t,
+ nor->page_size - page_offset, len - i);
+
+ addr = spi_nor_convert_addr(nor, addr);
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto write_err;
+
+ ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
+ if (ret < 0)
+ goto write_err;
+ written = ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
+ *retlen += written;
+ i += written;
+ }
+
+write_err:
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static int spi_nor_check(struct spi_nor *nor)
+{
+ if (!nor->dev ||
+ (!nor->spimem && !nor->controller_ops) ||
+ (!nor->spimem && nor->controller_ops &&
+ (!nor->controller_ops->read ||
+ !nor->controller_ops->write ||
+ !nor->controller_ops->read_reg ||
+ !nor->controller_ops->write_reg))) {
+ pr_err("spi-nor: please fill all the necessary fields!\n");
+ return -EINVAL;
+ }
+
+ if (nor->spimem && nor->controller_ops) {
+ dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+spi_nor_set_read_settings(struct spi_nor_read_command *read,
+ u8 num_mode_clocks,
+ u8 num_wait_states,
+ u8 opcode,
+ enum spi_nor_protocol proto)
+{
+ read->num_mode_clocks = num_mode_clocks;
+ read->num_wait_states = num_wait_states;
+ read->opcode = opcode;
+ read->proto = proto;
+}
+
+void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
+ enum spi_nor_protocol proto)
+{
+ pp->opcode = opcode;
+ pp->proto = proto;
+}
+
+static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == (int)hwcaps)
+ return table[i][1];
+
+ return -EINVAL;
+}
+
+int spi_nor_hwcaps_read2cmd(u32 hwcaps)
+{
+ static const int hwcaps_read2cmd[][2] = {
+ { SNOR_HWCAPS_READ, SNOR_CMD_READ },
+ { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
+ { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
+ { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
+ { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
+ { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
+ { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
+ { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
+ { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
+ { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
+ { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
+ { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
+ { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
+ { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
+ { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
+ ARRAY_SIZE(hwcaps_read2cmd));
+}
+
+static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
+{
+ static const int hwcaps_pp2cmd[][2] = {
+ { SNOR_HWCAPS_PP, SNOR_CMD_PP },
+ { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
+ { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
+ { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
+ { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
+ { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
+ { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
+ ARRAY_SIZE(hwcaps_pp2cmd));
+}
+
+/**
+ * spi_nor_spimem_check_op - check if the operation is supported
+ * by controller
+ *@nor: pointer to a 'struct spi_nor'
+ *@op: pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_op(struct spi_nor *nor,
+ struct spi_mem_op *op)
+{
+ /*
+ * First test with 4 address bytes. The opcode itself might
+ * be a 3B addressing opcode but we don't care, because
+ * SPI controller implementation should not check the opcode,
+ * but just the sequence.
+ */
+ op->addr.nbytes = 4;
+ if (!spi_mem_supports_op(nor->spimem, op)) {
+ if (nor->mtd.size > SZ_16M)
+ return -ENOTSUPP;
+
+ /* If flash size <= 16MB, 3 address bytes are sufficient */
+ op->addr.nbytes = 3;
+ if (!spi_mem_supports_op(nor->spimem, op))
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_spimem_check_readop - check if the read op is supported
+ * by controller
+ *@nor: pointer to a 'struct spi_nor'
+ *@read: pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_readop(struct spi_nor *nor,
+ const struct spi_nor_read_command *read)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
+ SPI_MEM_OP_ADDR(3, 0, 1),
+ SPI_MEM_OP_DUMMY(0, 1),
+ SPI_MEM_OP_DATA_IN(0, NULL, 1));
+
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
+ op.dummy.buswidth = op.addr.buswidth;
+ op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
+ op.dummy.buswidth / 8;
+
+ return spi_nor_spimem_check_op(nor, &op);
+}
+
+/**
+ * spi_nor_spimem_check_pp - check if the page program op is supported
+ * by controller
+ *@nor: pointer to a 'struct spi_nor'
+ *@pp: pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_pp(struct spi_nor *nor,
+ const struct spi_nor_pp_command *pp)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
+ SPI_MEM_OP_ADDR(3, 0, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(0, NULL, 1));
+
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
+
+ return spi_nor_spimem_check_op(nor, &op);
+}
+
+/**
+ * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
+ * based on SPI controller capabilities
+ * @nor: pointer to a 'struct spi_nor'
+ * @hwcaps: pointer to resulting capabilities after adjusting
+ * according to controller and flash's capability
+ */
+static void
+spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ unsigned int cap;
+
+ /* DTR modes are not supported yet, mask them all. */
+ *hwcaps &= ~SNOR_HWCAPS_DTR;
+
+ /* X-X-X modes are not supported yet, mask them all. */
+ *hwcaps &= ~SNOR_HWCAPS_X_X_X;
+
+ for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
+ int rdidx, ppidx;
+
+ if (!(*hwcaps & BIT(cap)))
+ continue;
+
+ rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
+ if (rdidx >= 0 &&
+ spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
+ *hwcaps &= ~BIT(cap);
+
+ ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
+ if (ppidx < 0)
+ continue;
+
+ if (spi_nor_spimem_check_pp(nor,
+ &params->page_programs[ppidx]))
+ *hwcaps &= ~BIT(cap);
+ }
+}
+
+/**
+ * spi_nor_set_erase_type() - set a SPI NOR erase type
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type
+ * @opcode: the SPI command op code to erase the sector/block
+ */
+void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
+ u8 opcode)
+{
+ erase->size = size;
+ erase->opcode = opcode;
+ /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
+ erase->size_shift = ffs(erase->size) - 1;
+ erase->size_mask = (1 << erase->size_shift) - 1;
+}
+
+/**
+ * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
+ * @map: the erase map of the SPI NOR
+ * @erase_mask: bitmask encoding erase types that can erase the entire
+ * flash memory
+ * @flash_size: the spi nor flash memory size
+ */
+void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
+ u8 erase_mask, u64 flash_size)
+{
+ /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
+ map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
+ SNOR_LAST_REGION;
+ map->uniform_region.size = flash_size;
+ map->regions = &map->uniform_region;
+ map->uniform_erase_type = erase_mask;
+}
+
+int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ int ret;
+
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->post_bfpt) {
+ ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
+ bfpt, params);
+ if (ret)
+ return ret;
+ }
+
+ if (nor->info->fixups && nor->info->fixups->post_bfpt)
+ return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
+ params);
+
+ return 0;
+}
+
+static int spi_nor_select_read(struct spi_nor *nor,
+ u32 shared_hwcaps)
+{
+ int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
+ const struct spi_nor_read_command *read;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
+ if (cmd < 0)
+ return -EINVAL;
+
+ read = &nor->params->reads[cmd];
+ nor->read_opcode = read->opcode;
+ nor->read_proto = read->proto;
+
+ /*
+ * In the spi-nor framework, we don't need to make the difference
+ * between mode clock cycles and wait state clock cycles.
+ * Indeed, the value of the mode clock cycles is used by a QSPI
+ * flash memory to know whether it should enter or leave its 0-4-4
+ * (Continuous Read / XIP) mode.
+ * eXecution In Place is out of the scope of the mtd sub-system.
+ * Hence we choose to merge both mode and wait state clock cycles
+ * into the so called dummy clock cycles.
+ */
+ nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
+ return 0;
+}
+
+static int spi_nor_select_pp(struct spi_nor *nor,
+ u32 shared_hwcaps)
+{
+ int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
+ const struct spi_nor_pp_command *pp;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
+ if (cmd < 0)
+ return -EINVAL;
+
+ pp = &nor->params->page_programs[cmd];
+ nor->program_opcode = pp->opcode;
+ nor->write_proto = pp->proto;
+ return 0;
+}
+
+/**
+ * spi_nor_select_uniform_erase() - select optimum uniform erase type
+ * @map: the erase map of the SPI NOR
+ * @wanted_size: the erase type size to search for. Contains the value of
+ * info->sector_size or of the "small sector" size in case
+ * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
+ *
+ * Once the optimum uniform sector erase command is found, disable all the
+ * other.
+ *
+ * Return: pointer to erase type on success, NULL otherwise.
+ */
+static const struct spi_nor_erase_type *
+spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
+ const u32 wanted_size)
+{
+ const struct spi_nor_erase_type *tested_erase, *erase = NULL;
+ int i;
+ u8 uniform_erase_type = map->uniform_erase_type;
+
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ if (!(uniform_erase_type & BIT(i)))
+ continue;
+
+ tested_erase = &map->erase_type[i];
+
+ /*
+ * If the current erase size is the one, stop here:
+ * we have found the right uniform Sector Erase command.
+ */
+ if (tested_erase->size == wanted_size) {
+ erase = tested_erase;
+ break;
+ }
+
+ /*
+ * Otherwise, the current erase size is still a valid canditate.
+ * Select the biggest valid candidate.
+ */
+ if (!erase && tested_erase->size)
+ erase = tested_erase;
+ /* keep iterating to find the wanted_size */
+ }
+
+ if (!erase)
+ return NULL;
+
+ /* Disable all other Sector Erase commands. */
+ map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
+ map->uniform_erase_type |= BIT(erase - map->erase_type);
+ return erase;
+}
+
+static int spi_nor_select_erase(struct spi_nor *nor)
+{
+ struct spi_nor_erase_map *map = &nor->params->erase_map;
+ const struct spi_nor_erase_type *erase = NULL;
+ struct mtd_info *mtd = &nor->mtd;
+ u32 wanted_size = nor->info->sector_size;
+ int i;
+
+ /*
+ * The previous implementation handling Sector Erase commands assumed
+ * that the SPI flash memory has an uniform layout then used only one
+ * of the supported erase sizes for all Sector Erase commands.
+ * So to be backward compatible, the new implementation also tries to
+ * manage the SPI flash memory as uniform with a single erase sector
+ * size, when possible.
+ */
+#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+ /* prefer "small sector" erase if possible */
+ wanted_size = 4096u;
+#endif
+
+ if (spi_nor_has_uniform_erase(nor)) {
+ erase = spi_nor_select_uniform_erase(map, wanted_size);
+ if (!erase)
+ return -EINVAL;
+ nor->erase_opcode = erase->opcode;
+ mtd->erasesize = erase->size;
+ return 0;
+ }
+
+ /*
+ * For non-uniform SPI flash memory, set mtd->erasesize to the
+ * maximum erase sector size. No need to set nor->erase_opcode.
+ */
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ if (map->erase_type[i].size) {
+ erase = &map->erase_type[i];
+ break;
+ }
+ }
+
+ if (!erase)
+ return -EINVAL;
+
+ mtd->erasesize = erase->size;
+ return 0;
+}
+
+static int spi_nor_default_setup(struct spi_nor *nor,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ u32 ignored_mask, shared_mask;
+ int err;
+
+ /*
+ * Keep only the hardware capabilities supported by both the SPI
+ * controller and the SPI flash memory.
+ */
+ shared_mask = hwcaps->mask & params->hwcaps.mask;
+
+ if (nor->spimem) {
+ /*
+ * When called from spi_nor_probe(), all caps are set and we
+ * need to discard some of them based on what the SPI
+ * controller actually supports (using spi_mem_supports_op()).
+ */
+ spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
+ } else {
+ /*
+ * SPI n-n-n protocols are not supported when the SPI
+ * controller directly implements the spi_nor interface.
+ * Yet another reason to switch to spi-mem.
+ */
+ ignored_mask = SNOR_HWCAPS_X_X_X;
+ if (shared_mask & ignored_mask) {
+ dev_dbg(nor->dev,
+ "SPI n-n-n protocols are not supported.\n");
+ shared_mask &= ~ignored_mask;
+ }
+ }
+
+ /* Select the (Fast) Read command. */
+ err = spi_nor_select_read(nor, shared_mask);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select read settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Select the Page Program command. */
+ err = spi_nor_select_pp(nor, shared_mask);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select write settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Select the Sector Erase command. */
+ err = spi_nor_select_erase(nor);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select erase settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int spi_nor_setup(struct spi_nor *nor,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ if (!nor->params->setup)
+ return 0;
+
+ return nor->params->setup(nor, hwcaps);
+}
+
+/**
+ * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
+ * settings based on MFR register and ->default_init() hook.
+ * @nor: pointer to a 'struct spi-nor'.
+ */
+static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
+{
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->default_init)
+ nor->manufacturer->fixups->default_init(nor);
+
+ if (nor->info->fixups && nor->info->fixups->default_init)
+ nor->info->fixups->default_init(nor);
+}
+
+/**
+ * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
+ * based on JESD216 SFDP standard.
+ * @nor: pointer to a 'struct spi-nor'.
+ *
+ * The method has a roll-back mechanism: in case the SFDP parsing fails, the
+ * legacy flash parameters and settings will be restored.
+ */
+static void spi_nor_sfdp_init_params(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter sfdp_params;
+
+ memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
+
+ if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
+ nor->addr_width = 0;
+ nor->flags &= ~SNOR_F_4B_OPCODES;
+ } else {
+ memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
+ }
+}
+
+/**
+ * spi_nor_info_init_params() - Initialize the flash's parameters and settings
+ * based on nor->info data.
+ * @nor: pointer to a 'struct spi-nor'.
+ */
+static void spi_nor_info_init_params(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spi_nor_erase_map *map = &params->erase_map;
+ const struct flash_info *info = nor->info;
+ struct device_node *np = spi_nor_get_flash_node(nor);
+ u8 i, erase_mask;
+
+ /* Initialize legacy flash parameters and settings. */
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+ params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
+ params->setup = spi_nor_default_setup;
+ /* Default to 16-bit Write Status (01h) Command */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+ /* Set SPI NOR sizes. */
+ params->size = (u64)info->sector_size * info->n_sectors;
+ params->page_size = info->page_size;
+
+ if (!(info->flags & SPI_NOR_NO_FR)) {
+ /* Default to Fast Read for DT and non-DT platform devices. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+
+ /* Mask out Fast Read if not requested at DT instantiation. */
+ if (np && !of_property_read_bool(np, "m25p,fast-read"))
+ params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+ }
+
+ /* (Fast) Read settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
+ 0, 0, SPINOR_OP_READ,
+ SNOR_PROTO_1_1_1);
+
+ if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
+ 0, 8, SPINOR_OP_READ_FAST,
+ SNOR_PROTO_1_1_1);
+
+ if (info->flags & SPI_NOR_DUAL_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
+ 0, 8, SPINOR_OP_READ_1_1_2,
+ SNOR_PROTO_1_1_2);
+ }
+
+ if (info->flags & SPI_NOR_QUAD_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
+ 0, 8, SPINOR_OP_READ_1_1_4,
+ SNOR_PROTO_1_1_4);
+ }
+
+ if (info->flags & SPI_NOR_OCTAL_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
+ 0, 8, SPINOR_OP_READ_1_1_8,
+ SNOR_PROTO_1_1_8);
+ }
+
+ /* Page Program settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_PP;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+
+ /*
+ * Sector Erase settings. Sort Erase Types in ascending order, with the
+ * smallest erase size starting at BIT(0).
+ */
+ erase_mask = 0;
+ i = 0;
+ if (info->flags & SECT_4K_PMC) {
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], 4096u,
+ SPINOR_OP_BE_4K_PMC);
+ i++;
+ } else if (info->flags & SECT_4K) {
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], 4096u,
+ SPINOR_OP_BE_4K);
+ i++;
+ }
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
+ SPINOR_OP_SE);
+ spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
+}
+
+/**
+ * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
+ * after SFDP has been parsed (is also called for SPI NORs that do not
+ * support RDSFDP).
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Typically used to tweak various parameters that could not be extracted by
+ * other means (i.e. when information provided by the SFDP/flash_info tables
+ * are incomplete or wrong).
+ */
+static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
+{
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->post_sfdp)
+ nor->manufacturer->fixups->post_sfdp(nor);
+
+ if (nor->info->fixups && nor->info->fixups->post_sfdp)
+ nor->info->fixups->post_sfdp(nor);
+}
+
+/**
+ * spi_nor_late_init_params() - Late initialization of default flash parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Used to set default flash parameters and settings when the ->default_init()
+ * hook or the SFDP parser let voids.
+ */
+static void spi_nor_late_init_params(struct spi_nor *nor)
+{
+ /*
+ * NOR protection support. When locking_ops are not provided, we pick
+ * the default ones.
+ */
+ if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
+ nor->params->locking_ops = &spi_nor_sr_locking_ops;
+}
+
+/**
+ * spi_nor_init_params() - Initialize the flash's parameters and settings.
+ * @nor: pointer to a 'struct spi-nor'.
+ *
+ * The flash parameters and settings are initialized based on a sequence of
+ * calls that are ordered by priority:
+ *
+ * 1/ Default flash parameters initialization. The initializations are done
+ * based on nor->info data:
+ * spi_nor_info_init_params()
+ *
+ * which can be overwritten by:
+ * 2/ Manufacturer flash parameters initialization. The initializations are
+ * done based on MFR register, or when the decisions can not be done solely
+ * based on MFR, by using specific flash_info tweeks, ->default_init():
+ * spi_nor_manufacturer_init_params()
+ *
+ * which can be overwritten by:
+ * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
+ * should be more accurate that the above.
+ * spi_nor_sfdp_init_params()
+ *
+ * Please note that there is a ->post_bfpt() fixup hook that can overwrite
+ * the flash parameters and settings immediately after parsing the Basic
+ * Flash Parameter Table.
+ *
+ * which can be overwritten by:
+ * 4/ Post SFDP flash parameters initialization. Used to tweak various
+ * parameters that could not be extracted by other means (i.e. when
+ * information provided by the SFDP/flash_info tables are incomplete or
+ * wrong).
+ * spi_nor_post_sfdp_fixups()
+ *
+ * 5/ Late default flash parameters initialization, used when the
+ * ->default_init() hook or the SFDP parser do not set specific params.
+ * spi_nor_late_init_params()
+ */
+static int spi_nor_init_params(struct spi_nor *nor)
+{
+ nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
+ if (!nor->params)
+ return -ENOMEM;
+
+ spi_nor_info_init_params(nor);
+
+ spi_nor_manufacturer_init_params(nor);
+
+ if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
+ !(nor->info->flags & SPI_NOR_SKIP_SFDP))
+ spi_nor_sfdp_init_params(nor);
+
+ spi_nor_post_sfdp_fixups(nor);
+
+ spi_nor_late_init_params(nor);
+
+ return 0;
+}
+
+/**
+ * spi_nor_quad_enable() - enable Quad I/O if needed.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_quad_enable(struct spi_nor *nor)
+{
+ if (!nor->params->quad_enable)
+ return 0;
+
+ if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
+ spi_nor_get_protocol_width(nor->write_proto) == 4))
+ return 0;
+
+ return nor->params->quad_enable(nor);
+}
+
+/**
+ * spi_nor_unlock_all() - Unlocks the entire flash memory array.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * Some SPI NOR flashes are write protected by default after a power-on reset
+ * cycle, in order to avoid inadvertent writes during power-up. Backward
+ * compatibility imposes to unlock the entire flash memory array at power-up
+ * by default.
+ */
+static int spi_nor_unlock_all(struct spi_nor *nor)
+{
+ if (nor->flags & SNOR_F_HAS_LOCK)
+ return spi_nor_unlock(&nor->mtd, 0, nor->params->size);
+
+ return 0;
+}
+
+static int spi_nor_init(struct spi_nor *nor)
+{
+ int err;
+
+ err = spi_nor_quad_enable(nor);
+ if (err) {
+ dev_dbg(nor->dev, "quad mode not supported\n");
+ return err;
+ }
+
+ err = spi_nor_unlock_all(nor);
+ if (err) {
+ dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
+ return err;
+ }
+
+ if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
+ /*
+ * If the RESET# pin isn't hooked up properly, or the system
+ * otherwise doesn't perform a reset command in the boot
+ * sequence, it's impossible to 100% protect against unexpected
+ * reboots (e.g., crashes). Warn the user (or hopefully, system
+ * designer) that this is bad.
+ */
+ WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
+ "enabling reset hack; may not recover from unexpected reboots\n");
+ nor->params->set_4byte_addr_mode(nor, true);
+ }
+
+ return 0;
+}
+
+/* mtd resume handler */
+static void spi_nor_resume(struct mtd_info *mtd)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ struct device *dev = nor->dev;
+ int ret;
+
+ /* re-initialize the nor chip */
+ ret = spi_nor_init(nor);
+ if (ret)
+ dev_err(dev, "resume() failed\n");
+}
+
+void spi_nor_restore(struct spi_nor *nor)
+{
+ /* restore the addressing mode */
+ if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
+ nor->flags & SNOR_F_BROKEN_RESET)
+ nor->params->set_4byte_addr_mode(nor, false);
+}
+EXPORT_SYMBOL_GPL(spi_nor_restore);
+
+static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
+ const char *name)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
+ for (j = 0; j < manufacturers[i]->nparts; j++) {
+ if (!strcmp(name, manufacturers[i]->parts[j].name)) {
+ nor->manufacturer = manufacturers[i];
+ return &manufacturers[i]->parts[j];
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static int spi_nor_set_addr_width(struct spi_nor *nor)
+{
+ if (nor->addr_width) {
+ /* already configured from SFDP */
+ } else if (nor->info->addr_width) {
+ nor->addr_width = nor->info->addr_width;
+ } else if (nor->mtd.size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_width = 4;
+ } else {
+ nor->addr_width = 3;
+ }
+
+ if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
+ dev_dbg(nor->dev, "address width is too large: %u\n",
+ nor->addr_width);
+ return -EINVAL;
+ }
+
+ /* Set 4byte opcodes when possible. */
+ if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ !(nor->flags & SNOR_F_HAS_4BAIT))
+ spi_nor_set_4byte_opcodes(nor);
+
+ return 0;
+}
+
+static void spi_nor_debugfs_init(struct spi_nor *nor,
+ const struct flash_info *info)
+{
+ struct mtd_info *mtd = &nor->mtd;
+
+ mtd->dbg.partname = info->name;
+ mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
+ info->id_len, info->id);
+}
+
+static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
+ const char *name)
+{
+ const struct flash_info *info = NULL;
+
+ if (name)
+ info = spi_nor_match_id(nor, name);
+ /* Try to auto-detect if chip name wasn't specified or not found */
+ if (!info)
+ info = spi_nor_read_id(nor);
+ if (IS_ERR_OR_NULL(info))
+ return ERR_PTR(-ENOENT);
+
+ /*
+ * If caller has specified name of flash model that can normally be
+ * detected using JEDEC, let's verify it.
+ */
+ if (name && info->id_len) {
+ const struct flash_info *jinfo;
+
+ jinfo = spi_nor_read_id(nor);
+ if (IS_ERR(jinfo)) {
+ return jinfo;
+ } else if (jinfo != info) {
+ /*
+ * JEDEC knows better, so overwrite platform ID. We
+ * can't trust partitions any longer, but we'll let
+ * mtd apply them anyway, since some partitions may be
+ * marked read-only, and we don't want to lose that
+ * information, even if it's not 100% accurate.
+ */
+ dev_warn(nor->dev, "found %s, expected %s\n",
+ jinfo->name, info->name);
+ info = jinfo;
+ }
+ }
+
+ return info;
+}
+
+int spi_nor_scan(struct spi_nor *nor, const char *name,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ const struct flash_info *info;
+ struct device *dev = nor->dev;
+ struct mtd_info *mtd = &nor->mtd;
+ struct device_node *np = spi_nor_get_flash_node(nor);
+ int ret;
+ int i;
+
+ ret = spi_nor_check(nor);
+ if (ret)
+ return ret;
+
+ /* Reset SPI protocol for all commands. */
+ nor->reg_proto = SNOR_PROTO_1_1_1;
+ nor->read_proto = SNOR_PROTO_1_1_1;
+ nor->write_proto = SNOR_PROTO_1_1_1;
+
+ /*
+ * We need the bounce buffer early to read/write registers when going
+ * through the spi-mem layer (buffers have to be DMA-able).
+ * For spi-mem drivers, we'll reallocate a new buffer if
+ * nor->page_size turns out to be greater than PAGE_SIZE (which
+ * shouldn't happen before long since NOR pages are usually less
+ * than 1KB) after spi_nor_scan() returns.
+ */
+ nor->bouncebuf_size = PAGE_SIZE;
+ nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
+ GFP_KERNEL);
+ if (!nor->bouncebuf)
+ return -ENOMEM;
+
+ info = spi_nor_get_flash_info(nor, name);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ nor->info = info;
+
+ spi_nor_debugfs_init(nor, info);
+
+ mutex_init(&nor->lock);
+
+ /*
+ * Make sure the XSR_RDY flag is set before calling
+ * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
+ * with Atmel spi-nor
+ */
+ if (info->flags & SPI_NOR_XSR_RDY)
+ nor->flags |= SNOR_F_READY_XSR_RDY;
+
+ if (info->flags & SPI_NOR_HAS_LOCK)
+ nor->flags |= SNOR_F_HAS_LOCK;
+
+ mtd->_write = spi_nor_write;
+
+ /* Init flash parameters based on flash_info struct and SFDP */
+ ret = spi_nor_init_params(nor);
+ if (ret)
+ return ret;
+
+ if (!mtd->name)
+ mtd->name = dev_name(dev);
+ mtd->priv = nor;
+ mtd->type = MTD_NORFLASH;
+ mtd->writesize = 1;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = nor->params->size;
+ mtd->_erase = spi_nor_erase;
+ mtd->_read = spi_nor_read;
+ mtd->_resume = spi_nor_resume;
+
+ if (nor->params->locking_ops) {
+ mtd->_lock = spi_nor_lock;
+ mtd->_unlock = spi_nor_unlock;
+ mtd->_is_locked = spi_nor_is_locked;
+ }
+
+ if (info->flags & USE_FSR)
+ nor->flags |= SNOR_F_USE_FSR;
+ if (info->flags & SPI_NOR_HAS_TB) {
+ nor->flags |= SNOR_F_HAS_SR_TB;
+ if (info->flags & SPI_NOR_TB_SR_BIT6)
+ nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
+ }
+
+ if (info->flags & NO_CHIP_ERASE)
+ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+ if (info->flags & USE_CLSR)
+ nor->flags |= SNOR_F_USE_CLSR;
+
+ if (info->flags & SPI_NOR_4BIT_BP) {
+ nor->flags |= SNOR_F_HAS_4BIT_BP;
+ if (info->flags & SPI_NOR_BP3_SR_BIT6)
+ nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
+ }
+
+ if (info->flags & SPI_NOR_NO_ERASE)
+ mtd->flags |= MTD_NO_ERASE;
+
+ mtd->dev.parent = dev;
+ nor->page_size = nor->params->page_size;
+ mtd->writebufsize = nor->page_size;
+
+ if (of_property_read_bool(np, "broken-flash-reset"))
+ nor->flags |= SNOR_F_BROKEN_RESET;
+
+ /*
+ * Configure the SPI memory:
+ * - select op codes for (Fast) Read, Page Program and Sector Erase.
+ * - set the number of dummy cycles (mode cycles + wait states).
+ * - set the SPI protocols for register and memory accesses.
+ */
+ ret = spi_nor_setup(nor, hwcaps);
+ if (ret)
+ return ret;
+
+ if (info->flags & SPI_NOR_4B_OPCODES)
+ nor->flags |= SNOR_F_4B_OPCODES;
+
+ ret = spi_nor_set_addr_width(nor);
+ if (ret)
+ return ret;
+
+ /* Send all the required SPI flash commands to initialize device */
+ ret = spi_nor_init(nor);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "%s (%lld Kbytes)\n", info->name,
+ (long long)mtd->size >> 10);
+
+ dev_dbg(dev,
+ "mtd .name = %s, .size = 0x%llx (%lldMiB), "
+ ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
+ mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
+ mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
+
+ if (mtd->numeraseregions)
+ for (i = 0; i < mtd->numeraseregions; i++)
+ dev_dbg(dev,
+ "mtd.eraseregions[%d] = { .offset = 0x%llx, "
+ ".erasesize = 0x%.8x (%uKiB), "
+ ".numblocks = %d }\n",
+ i, (long long)mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].erasesize / 1024,
+ mtd->eraseregions[i].numblocks);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_nor_scan);
+
+static int spi_nor_create_read_dirmap(struct spi_nor *nor)
+{
+ struct spi_mem_dirmap_info info = {
+ .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
+ SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
+ SPI_MEM_OP_DATA_IN(0, NULL, 1)),
+ .offset = 0,
+ .length = nor->mtd.size,
+ };
+ struct spi_mem_op *op = &info.op_tmpl;
+
+ /* get transfer protocols. */
+ op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
+ op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
+ op->dummy.buswidth = op->addr.buswidth;
+ op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
+
+ /* convert the dummy cycles to the number of bytes */
+ op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
+
+ nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
+ &info);
+ return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
+}
+
+static int spi_nor_create_write_dirmap(struct spi_nor *nor)
+{
+ struct spi_mem_dirmap_info info = {
+ .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(0, NULL, 1)),
+ .offset = 0,
+ .length = nor->mtd.size,
+ };
+ struct spi_mem_op *op = &info.op_tmpl;
+
+ /* get transfer protocols. */
+ op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
+ op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
+ op->dummy.buswidth = op->addr.buswidth;
+ op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
+
+ if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+ op->addr.nbytes = 0;
+
+ nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
+ &info);
+ return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
+}
+
+static int spi_nor_probe(struct spi_mem *spimem)
+{
+ struct spi_device *spi = spimem->spi;
+ struct flash_platform_data *data = dev_get_platdata(&spi->dev);
+ struct spi_nor *nor;
+ /*
+ * Enable all caps by default. The core will mask them after
+ * checking what's really supported using spi_mem_supports_op().
+ */
+ const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
+ char *flash_name;
+ int ret;
+
+ nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
+ if (!nor)
+ return -ENOMEM;
+
+ nor->spimem = spimem;
+ nor->dev = &spi->dev;
+ spi_nor_set_flash_node(nor, spi->dev.of_node);
+
+ spi_mem_set_drvdata(spimem, nor);
+
+ if (data && data->name)
+ nor->mtd.name = data->name;
+
+ if (!nor->mtd.name)
+ nor->mtd.name = spi_mem_get_name(spimem);
+
+ /*
+ * For some (historical?) reason many platforms provide two different
+ * names in flash_platform_data: "name" and "type". Quite often name is
+ * set to "m25p80" and then "type" provides a real chip name.
+ * If that's the case, respect "type" and ignore a "name".
+ */
+ if (data && data->type)
+ flash_name = data->type;
+ else if (!strcmp(spi->modalias, "spi-nor"))
+ flash_name = NULL; /* auto-detect */
+ else
+ flash_name = spi->modalias;
+
+ ret = spi_nor_scan(nor, flash_name, &hwcaps);
+ if (ret)
+ return ret;
+
+ /*
+ * None of the existing parts have > 512B pages, but let's play safe
+ * and add this logic so that if anyone ever adds support for such
+ * a NOR we don't end up with buffer overflows.
+ */
+ if (nor->page_size > PAGE_SIZE) {
+ nor->bouncebuf_size = nor->page_size;
+ devm_kfree(nor->dev, nor->bouncebuf);
+ nor->bouncebuf = devm_kmalloc(nor->dev,
+ nor->bouncebuf_size,
+ GFP_KERNEL);
+ if (!nor->bouncebuf)
+ return -ENOMEM;
+ }
+
+ ret = spi_nor_create_read_dirmap(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_create_write_dirmap(nor);
+ if (ret)
+ return ret;
+
+ return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
+}
+
+static int spi_nor_remove(struct spi_mem *spimem)
+{
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ spi_nor_restore(nor);
+
+ /* Clean up MTD stuff. */
+ return mtd_device_unregister(&nor->mtd);
+}
+
+static void spi_nor_shutdown(struct spi_mem *spimem)
+{
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ spi_nor_restore(nor);
+}
+
+/*
+ * Do NOT add to this array without reading the following:
+ *
+ * Historically, many flash devices are bound to this driver by their name. But
+ * since most of these flash are compatible to some extent, and their
+ * differences can often be differentiated by the JEDEC read-ID command, we
+ * encourage new users to add support to the spi-nor library, and simply bind
+ * against a generic string here (e.g., "jedec,spi-nor").
+ *
+ * Many flash names are kept here in this list (as well as in spi-nor.c) to
+ * keep them available as module aliases for existing platforms.
+ */
+static const struct spi_device_id spi_nor_dev_ids[] = {
+ /*
+ * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
+ * hack around the fact that the SPI core does not provide uevent
+ * matching for .of_match_table
+ */
+ {"spi-nor"},
+
+ /*
+ * Entries not used in DTs that should be safe to drop after replacing
+ * them with "spi-nor" in platform data.
+ */
+ {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
+
+ /*
+ * Entries that were used in DTs without "jedec,spi-nor" fallback and
+ * should be kept for backward compatibility.
+ */
+ {"at25df321a"}, {"at25df641"}, {"at26df081a"},
+ {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
+ {"mx25l25635e"},{"mx66l51235l"},
+ {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
+ {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
+ {"s25fl064k"},
+ {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
+ {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
+ {"m25p64"}, {"m25p128"},
+ {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
+ {"w25q80bl"}, {"w25q128"}, {"w25q256"},
+
+ /* Flashes that can't be detected using JEDEC */
+ {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
+ {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
+ {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
+
+ /* Everspin MRAMs (non-JEDEC) */
+ { "mr25h128" }, /* 128 Kib, 40 MHz */
+ { "mr25h256" }, /* 256 Kib, 40 MHz */
+ { "mr25h10" }, /* 1 Mib, 40 MHz */
+ { "mr25h40" }, /* 4 Mib, 40 MHz */
+
+ { },
+};
+MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
+
+static const struct of_device_id spi_nor_of_table[] = {
+ /*
+ * Generic compatibility for SPI NOR that can be identified by the
+ * JEDEC READ ID opcode (0x9F). Use this, if possible.
+ */
+ { .compatible = "jedec,spi-nor" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, spi_nor_of_table);
+
+/*
+ * REVISIT: many of these chips have deep power-down modes, which
+ * should clearly be entered on suspend() to minimize power use.
+ * And also when they're otherwise idle...
+ */
+static struct spi_mem_driver spi_nor_driver = {
+ .spidrv = {
+ .driver = {
+ .name = "spi-nor",
+ .of_match_table = spi_nor_of_table,
+ },
+ .id_table = spi_nor_dev_ids,
+ },
+ .probe = spi_nor_probe,
+ .remove = spi_nor_remove,
+ .shutdown = spi_nor_shutdown,
+};
+module_spi_mem_driver(spi_nor_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
+MODULE_AUTHOR("Mike Lavender");
+MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
new file mode 100644
index 000000000000..6f2f6b27173f
--- /dev/null
+++ b/drivers/mtd/spi-nor/core.h
@@ -0,0 +1,441 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#ifndef __LINUX_MTD_SPI_NOR_INTERNAL_H
+#define __LINUX_MTD_SPI_NOR_INTERNAL_H
+
+#include "sfdp.h"
+
+#define SPI_NOR_MAX_ID_LEN 6
+
+enum spi_nor_option_flags {
+ SNOR_F_USE_FSR = BIT(0),
+ SNOR_F_HAS_SR_TB = BIT(1),
+ SNOR_F_NO_OP_CHIP_ERASE = BIT(2),
+ SNOR_F_READY_XSR_RDY = BIT(3),
+ SNOR_F_USE_CLSR = BIT(4),
+ SNOR_F_BROKEN_RESET = BIT(5),
+ SNOR_F_4B_OPCODES = BIT(6),
+ SNOR_F_HAS_4BAIT = BIT(7),
+ SNOR_F_HAS_LOCK = BIT(8),
+ SNOR_F_HAS_16BIT_SR = BIT(9),
+ SNOR_F_NO_READ_CR = BIT(10),
+ SNOR_F_HAS_SR_TB_BIT6 = BIT(11),
+ SNOR_F_HAS_4BIT_BP = BIT(12),
+ SNOR_F_HAS_SR_BP3_BIT6 = BIT(13),
+};
+
+struct spi_nor_read_command {
+ u8 num_mode_clocks;
+ u8 num_wait_states;
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+struct spi_nor_pp_command {
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+enum spi_nor_read_command_index {
+ SNOR_CMD_READ,
+ SNOR_CMD_READ_FAST,
+ SNOR_CMD_READ_1_1_1_DTR,
+
+ /* Dual SPI */
+ SNOR_CMD_READ_1_1_2,
+ SNOR_CMD_READ_1_2_2,
+ SNOR_CMD_READ_2_2_2,
+ SNOR_CMD_READ_1_2_2_DTR,
+
+ /* Quad SPI */
+ SNOR_CMD_READ_1_1_4,
+ SNOR_CMD_READ_1_4_4,
+ SNOR_CMD_READ_4_4_4,
+ SNOR_CMD_READ_1_4_4_DTR,
+
+ /* Octal SPI */
+ SNOR_CMD_READ_1_1_8,
+ SNOR_CMD_READ_1_8_8,
+ SNOR_CMD_READ_8_8_8,
+ SNOR_CMD_READ_1_8_8_DTR,
+
+ SNOR_CMD_READ_MAX
+};
+
+enum spi_nor_pp_command_index {
+ SNOR_CMD_PP,
+
+ /* Quad SPI */
+ SNOR_CMD_PP_1_1_4,
+ SNOR_CMD_PP_1_4_4,
+ SNOR_CMD_PP_4_4_4,
+
+ /* Octal SPI */
+ SNOR_CMD_PP_1_1_8,
+ SNOR_CMD_PP_1_8_8,
+ SNOR_CMD_PP_8_8_8,
+
+ SNOR_CMD_PP_MAX
+};
+
+/**
+ * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type.
+ * JEDEC JESD216B imposes erase sizes to be a power of 2.
+ * @size_shift: @size is a power of 2, the shift is stored in
+ * @size_shift.
+ * @size_mask: the size mask based on @size_shift.
+ * @opcode: the SPI command op code to erase the sector/block.
+ * @idx: Erase Type index as sorted in the Basic Flash Parameter
+ * Table. It will be used to synchronize the supported
+ * Erase Types with the ones identified in the SFDP
+ * optional tables.
+ */
+struct spi_nor_erase_type {
+ u32 size;
+ u32 size_shift;
+ u32 size_mask;
+ u8 opcode;
+ u8 idx;
+};
+
+/**
+ * struct spi_nor_erase_command - Used for non-uniform erases
+ * The structure is used to describe a list of erase commands to be executed
+ * once we validate that the erase can be performed. The elements in the list
+ * are run-length encoded.
+ * @list: for inclusion into the list of erase commands.
+ * @count: how many times the same erase command should be
+ * consecutively used.
+ * @size: the size of the sector/block erased by the command.
+ * @opcode: the SPI command op code to erase the sector/block.
+ */
+struct spi_nor_erase_command {
+ struct list_head list;
+ u32 count;
+ u32 size;
+ u8 opcode;
+};
+
+/**
+ * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
+ * @offset: the offset in the data array of erase region start.
+ * LSB bits are used as a bitmask encoding flags to
+ * determine if this region is overlaid, if this region is
+ * the last in the SPI NOR flash memory and to indicate
+ * all the supported erase commands inside this region.
+ * The erase types are sorted in ascending order with the
+ * smallest Erase Type size being at BIT(0).
+ * @size: the size of the region in bytes.
+ */
+struct spi_nor_erase_region {
+ u64 offset;
+ u64 size;
+};
+
+#define SNOR_ERASE_TYPE_MAX 4
+#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
+
+#define SNOR_LAST_REGION BIT(4)
+#define SNOR_OVERLAID_REGION BIT(5)
+
+#define SNOR_ERASE_FLAGS_MAX 6
+#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
+
+/**
+ * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
+ * @regions: array of erase regions. The regions are consecutive in
+ * address space. Walking through the regions is done
+ * incrementally.
+ * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform
+ * sector size (legacy implementation).
+ * @erase_type: an array of erase types shared by all the regions.
+ * The erase types are sorted in ascending order, with the
+ * smallest Erase Type size being the first member in the
+ * erase_type array.
+ * @uniform_erase_type: bitmask encoding erase types that can erase the
+ * entire memory. This member is completed at init by
+ * uniform and non-uniform SPI NOR flash memories if they
+ * support at least one erase type that can erase the
+ * entire memory.
+ */
+struct spi_nor_erase_map {
+ struct spi_nor_erase_region *regions;
+ struct spi_nor_erase_region uniform_region;
+ struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX];
+ u8 uniform_erase_type;
+};
+
+/**
+ * struct spi_nor_locking_ops - SPI NOR locking methods
+ * @lock: lock a region of the SPI NOR.
+ * @unlock: unlock a region of the SPI NOR.
+ * @is_locked: check if a region of the SPI NOR is completely locked
+ */
+struct spi_nor_locking_ops {
+ int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+};
+
+/**
+ * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings.
+ * Includes legacy flash parameters and settings that can be overwritten
+ * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216
+ * Serial Flash Discoverable Parameters (SFDP) tables.
+ *
+ * @size: the flash memory density in bytes.
+ * @page_size: the page size of the SPI NOR flash memory.
+ * @hwcaps: describes the read and page program hardware
+ * capabilities.
+ * @reads: read capabilities ordered by priority: the higher index
+ * in the array, the higher priority.
+ * @page_programs: page program capabilities ordered by priority: the
+ * higher index in the array, the higher priority.
+ * @erase_map: the erase map parsed from the SFDP Sector Map Parameter
+ * Table.
+ * @quad_enable: enables SPI NOR quad mode.
+ * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
+ * @convert_addr: converts an absolute address into something the flash
+ * will understand. Particularly useful when pagesize is
+ * not a power-of-2.
+ * @setup: configures the SPI NOR memory. Useful for SPI NOR
+ * flashes that have peculiarities to the SPI NOR standard
+ * e.g. different opcodes, specific address calculation,
+ * page size, etc.
+ * @locking_ops: SPI NOR locking methods.
+ */
+struct spi_nor_flash_parameter {
+ u64 size;
+ u32 page_size;
+
+ struct spi_nor_hwcaps hwcaps;
+ struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
+ struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
+
+ struct spi_nor_erase_map erase_map;
+
+ int (*quad_enable)(struct spi_nor *nor);
+ int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
+ u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
+ int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
+
+ const struct spi_nor_locking_ops *locking_ops;
+};
+
+/**
+ * struct spi_nor_fixups - SPI NOR fixup hooks
+ * @default_init: called after default flash parameters init. Used to tweak
+ * flash parameters when information provided by the flash_info
+ * table is incomplete or wrong.
+ * @post_bfpt: called after the BFPT table has been parsed
+ * @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs
+ * that do not support RDSFDP). Typically used to tweak various
+ * parameters that could not be extracted by other means (i.e.
+ * when information provided by the SFDP/flash_info tables are
+ * incomplete or wrong).
+ *
+ * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
+ * table is broken or not available.
+ */
+struct spi_nor_fixups {
+ void (*default_init)(struct spi_nor *nor);
+ int (*post_bfpt)(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params);
+ void (*post_sfdp)(struct spi_nor *nor);
+};
+
+struct flash_info {
+ char *name;
+
+ /*
+ * This array stores the ID bytes.
+ * The first three bytes are the JEDIC ID.
+ * JEDEC ID zero means "no ID" (mostly older chips).
+ */
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ u8 id_len;
+
+ /* The size listed here is what works with SPINOR_OP_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned sector_size;
+ u16 n_sectors;
+
+ u16 page_size;
+ u16 addr_width;
+
+ u32 flags;
+#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
+#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
+#define SST_WRITE BIT(2) /* use SST byte programming */
+#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
+#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
+#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
+#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
+#define USE_FSR BIT(7) /* use flag status register */
+#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
+#define SPI_NOR_HAS_TB BIT(9) /*
+ * Flash SR has Top/Bottom (TB) protect
+ * bit. Must be used with
+ * SPI_NOR_HAS_LOCK.
+ */
+#define SPI_NOR_XSR_RDY BIT(10) /*
+ * S3AN flashes have specific opcode to
+ * read the status register.
+ */
+#define SPI_NOR_4B_OPCODES BIT(11) /*
+ * Use dedicated 4byte address op codes
+ * to support memory size above 128Mib.
+ */
+#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
+#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
+#define USE_CLSR BIT(14) /* use CLSR command */
+#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
+#define SPI_NOR_TB_SR_BIT6 BIT(16) /*
+ * Top/Bottom (TB) is bit 6 of
+ * status register. Must be used with
+ * SPI_NOR_HAS_TB.
+ */
+#define SPI_NOR_4BIT_BP BIT(17) /*
+ * Flash SR has 4 bit fields (BP0-3)
+ * for block protection.
+ */
+#define SPI_NOR_BP3_SR_BIT6 BIT(18) /*
+ * BP3 is bit 6 of status register.
+ * Must be used with SPI_NOR_4BIT_BP.
+ */
+
+ /* Part specific fixup hooks. */
+ const struct spi_nor_fixups *fixups;
+};
+
+/* Used when the "_ext_id" is two bytes at most */
+#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags),
+
+#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 16) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = 6, \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags),
+
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = (_page_size), \
+ .addr_width = (_addr_width), \
+ .flags = (_flags),
+
+#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff \
+ }, \
+ .id_len = 3, \
+ .sector_size = (8*_page_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = _page_size, \
+ .addr_width = 3, \
+ .flags = SPI_NOR_NO_FR | SPI_NOR_XSR_RDY,
+
+/**
+ * struct spi_nor_manufacturer - SPI NOR manufacturer object
+ * @name: manufacturer name
+ * @parts: array of parts supported by this manufacturer
+ * @nparts: number of entries in the parts array
+ * @fixups: hooks called at various points in time during spi_nor_scan()
+ */
+struct spi_nor_manufacturer {
+ const char *name;
+ const struct flash_info *parts;
+ unsigned int nparts;
+ const struct spi_nor_fixups *fixups;
+};
+
+/* Manufacturer drivers. */
+extern const struct spi_nor_manufacturer spi_nor_atmel;
+extern const struct spi_nor_manufacturer spi_nor_catalyst;
+extern const struct spi_nor_manufacturer spi_nor_eon;
+extern const struct spi_nor_manufacturer spi_nor_esmt;
+extern const struct spi_nor_manufacturer spi_nor_everspin;
+extern const struct spi_nor_manufacturer spi_nor_fujitsu;
+extern const struct spi_nor_manufacturer spi_nor_gigadevice;
+extern const struct spi_nor_manufacturer spi_nor_intel;
+extern const struct spi_nor_manufacturer spi_nor_issi;
+extern const struct spi_nor_manufacturer spi_nor_macronix;
+extern const struct spi_nor_manufacturer spi_nor_micron;
+extern const struct spi_nor_manufacturer spi_nor_st;
+extern const struct spi_nor_manufacturer spi_nor_spansion;
+extern const struct spi_nor_manufacturer spi_nor_sst;
+extern const struct spi_nor_manufacturer spi_nor_winbond;
+extern const struct spi_nor_manufacturer spi_nor_xilinx;
+extern const struct spi_nor_manufacturer spi_nor_xmc;
+
+int spi_nor_write_enable(struct spi_nor *nor);
+int spi_nor_write_disable(struct spi_nor *nor);
+int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable);
+int spi_nor_write_ear(struct spi_nor *nor, u8 ear);
+int spi_nor_wait_till_ready(struct spi_nor *nor);
+int spi_nor_lock_and_prep(struct spi_nor *nor);
+void spi_nor_unlock_and_unprep(struct spi_nor *nor);
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor);
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor);
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor);
+
+int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr);
+ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
+ u8 *buf);
+ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf);
+
+int spi_nor_hwcaps_read2cmd(u32 hwcaps);
+u8 spi_nor_convert_3to4_read(u8 opcode);
+void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
+ enum spi_nor_protocol proto);
+
+void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
+ u8 opcode);
+struct spi_nor_erase_region *
+spi_nor_region_next(struct spi_nor_erase_region *region);
+void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
+ u8 erase_mask, u64 flash_size);
+
+int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params);
+
+static struct spi_nor __maybe_unused *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+ return mtd->priv;
+}
+
+#endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */
diff --git a/drivers/mtd/spi-nor/eon.c b/drivers/mtd/spi-nor/eon.c
new file mode 100644
index 000000000000..ddb8e3650835
--- /dev/null
+++ b/drivers/mtd/spi-nor/eon.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info eon_parts[] = {
+ /* EON -- en25xxx */
+ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
+ { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
+};
+
+const struct spi_nor_manufacturer spi_nor_eon = {
+ .name = "eon",
+ .parts = eon_parts,
+ .nparts = ARRAY_SIZE(eon_parts),
+};
diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c
new file mode 100644
index 000000000000..c93170008118
--- /dev/null
+++ b/drivers/mtd/spi-nor/esmt.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info esmt_parts[] = {
+ /* ESMT */
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_HAS_LOCK) },
+ { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_HAS_LOCK) },
+ { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_HAS_LOCK) },
+};
+
+const struct spi_nor_manufacturer spi_nor_esmt = {
+ .name = "esmt",
+ .parts = esmt_parts,
+ .nparts = ARRAY_SIZE(esmt_parts),
+};
diff --git a/drivers/mtd/spi-nor/everspin.c b/drivers/mtd/spi-nor/everspin.c
new file mode 100644
index 000000000000..04a177a32283
--- /dev/null
+++ b/drivers/mtd/spi-nor/everspin.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info everspin_parts[] = {
+ /* Everspin */
+ { "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+};
+
+const struct spi_nor_manufacturer spi_nor_everspin = {
+ .name = "everspin",
+ .parts = everspin_parts,
+ .nparts = ARRAY_SIZE(everspin_parts),
+};
diff --git a/drivers/mtd/spi-nor/fujitsu.c b/drivers/mtd/spi-nor/fujitsu.c
new file mode 100644
index 000000000000..e385d93e756c
--- /dev/null
+++ b/drivers/mtd/spi-nor/fujitsu.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info fujitsu_parts[] = {
+ /* Fujitsu */
+ { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
+};
+
+const struct spi_nor_manufacturer spi_nor_fujitsu = {
+ .name = "fujitsu",
+ .parts = fujitsu_parts,
+ .nparts = ARRAY_SIZE(fujitsu_parts),
+};
diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c
new file mode 100644
index 000000000000..447d84bb2128
--- /dev/null
+++ b/drivers/mtd/spi-nor/gigadevice.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static void gd25q256_default_init(struct spi_nor *nor)
+{
+ /*
+ * Some manufacturer like GigaDevice may use different
+ * bit to set QE on different memories, so the MFR can't
+ * indicate the quad_enable method for this case, we need
+ * to set it in the default_init fixup hook.
+ */
+ nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+}
+
+static struct spi_nor_fixups gd25q256_fixups = {
+ .default_init = gd25q256_default_init,
+};
+
+static const struct flash_info gigadevice_parts[] = {
+ { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK |
+ SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
+ .fixups = &gd25q256_fixups },
+};
+
+const struct spi_nor_manufacturer spi_nor_gigadevice = {
+ .name = "gigadevice",
+ .parts = gigadevice_parts,
+ .nparts = ARRAY_SIZE(gigadevice_parts),
+};
diff --git a/drivers/mtd/spi-nor/intel.c b/drivers/mtd/spi-nor/intel.c
new file mode 100644
index 000000000000..d8196f101368
--- /dev/null
+++ b/drivers/mtd/spi-nor/intel.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info intel_parts[] = {
+ /* Intel/Numonyx -- xxxs33b */
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
+ { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
+ { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
+};
+
+static void intel_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static const struct spi_nor_fixups intel_fixups = {
+ .default_init = intel_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_intel = {
+ .name = "intel",
+ .parts = intel_parts,
+ .nparts = ARRAY_SIZE(intel_parts),
+ .fixups = &intel_fixups,
+};
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
new file mode 100644
index 000000000000..ffcb60e54a80
--- /dev/null
+++ b/drivers/mtd/spi-nor/issi.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static int
+is25lp256_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ /*
+ * IS25LP256 supports 4B opcodes, but the BFPT advertises a
+ * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
+ * Overwrite the address width advertised by the BFPT.
+ */
+ if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
+ BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
+ nor->addr_width = 4;
+
+ return 0;
+}
+
+static struct spi_nor_fixups is25lp256_fixups = {
+ .post_bfpt = is25lp256_post_bfpt_fixups,
+};
+
+static const struct flash_info issi_parts[] = {
+ /* ISSI */
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES)
+ .fixups = &is25lp256_fixups },
+ { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES)
+ .fixups = &is25lp256_fixups },
+
+ /* PMC */
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
+ { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
+};
+
+static void issi_default_init(struct spi_nor *nor)
+{
+ nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+}
+
+static const struct spi_nor_fixups issi_fixups = {
+ .default_init = issi_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_issi = {
+ .name = "issi",
+ .parts = issi_parts,
+ .nparts = ARRAY_SIZE(issi_parts),
+ .fixups = &issi_fixups,
+};
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
new file mode 100644
index 000000000000..ab0f963d630c
--- /dev/null
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static int
+mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ /*
+ * MX25L25635F supports 4B opcodes but MX25L25635E does not.
+ * Unfortunately, Macronix has re-used the same JEDEC ID for both
+ * variants which prevents us from defining a new entry in the parts
+ * table.
+ * We need a way to differentiate MX25L25635E and MX25L25635F, and it
+ * seems that the F version advertises support for Fast Read 4-4-4 in
+ * its BFPT table.
+ */
+ if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
+ nor->flags |= SNOR_F_4B_OPCODES;
+
+ return 0;
+}
+
+static struct spi_nor_fixups mx25l25635_fixups = {
+ .post_bfpt = mx25l25635_post_bfpt_fixups,
+};
+
+static const struct flash_info macronix_parts[] = {
+ /* Macronix */
+ { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
+ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
+ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
+ { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
+ { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
+ { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+ { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ .fixups = &mx25l25635_fixups },
+ { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_4B_OPCODES) },
+ { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048,
+ SPI_NOR_QUAD_READ) },
+};
+
+static void macronix_default_init(struct spi_nor *nor)
+{
+ nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+ nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode;
+}
+
+static const struct spi_nor_fixups macronix_fixups = {
+ .default_init = macronix_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_macronix = {
+ .name = "macronix",
+ .parts = macronix_parts,
+ .nparts = ARRAY_SIZE(macronix_parts),
+ .fixups = &macronix_fixups,
+};
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
new file mode 100644
index 000000000000..6c034b9718e2
--- /dev/null
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info micron_parts[] = {
+ { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
+ SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
+ SPI_NOR_4B_OPCODES) },
+};
+
+static const struct flash_info st_parts[] = {
+ { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64,
+ SPI_NOR_QUAD_READ) },
+ { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64,
+ SPI_NOR_QUAD_READ) },
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K |
+ USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+ SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
+ { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+ SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
+ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ NO_CHIP_ERASE) },
+ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ NO_CHIP_ERASE) },
+ { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ NO_CHIP_ERASE) },
+ { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ NO_CHIP_ERASE) },
+
+ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
+ { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
+ { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
+ { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
+ { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
+ { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
+ { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
+ { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
+ { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
+
+ { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
+ { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
+ { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
+ { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
+ { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
+ { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
+ { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
+ { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
+ { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
+
+ { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
+ { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
+ { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
+
+ { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
+ { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
+ { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
+
+ { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
+ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
+ { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
+};
+
+/**
+ * st_micron_set_4byte_addr_mode() - Set 4-byte address mode for ST and Micron
+ * flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int st_micron_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_set_4byte_addr_mode(nor, enable);
+ if (ret)
+ return ret;
+
+ return spi_nor_write_disable(nor);
+}
+
+static void micron_st_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ nor->params->quad_enable = NULL;
+ nor->params->set_4byte_addr_mode = st_micron_set_4byte_addr_mode;
+}
+
+static const struct spi_nor_fixups micron_st_fixups = {
+ .default_init = micron_st_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_micron = {
+ .name = "micron",
+ .parts = micron_parts,
+ .nparts = ARRAY_SIZE(micron_parts),
+ .fixups = &micron_st_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_st = {
+ .name = "st",
+ .parts = st_parts,
+ .nparts = ARRAY_SIZE(st_parts),
+ .fixups = &micron_st_fixups,
+};
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
new file mode 100644
index 000000000000..f6038d3a3684
--- /dev/null
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -0,0 +1,1204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
+#define SFDP_PARAM_HEADER_PTP(p) \
+ (((p)->parameter_table_pointer[2] << 16) | \
+ ((p)->parameter_table_pointer[1] << 8) | \
+ ((p)->parameter_table_pointer[0] << 0))
+
+#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
+#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
+#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */
+
+#define SFDP_SIGNATURE 0x50444653U
+#define SFDP_JESD216_MAJOR 1
+#define SFDP_JESD216_MINOR 0
+#define SFDP_JESD216A_MINOR 5
+#define SFDP_JESD216B_MINOR 6
+
+struct sfdp_header {
+ u32 signature; /* Ox50444653U <=> "SFDP" */
+ u8 minor;
+ u8 major;
+ u8 nph; /* 0-base number of parameter headers */
+ u8 unused;
+
+ /* Basic Flash Parameter Table. */
+ struct sfdp_parameter_header bfpt_header;
+};
+
+/* Fast Read settings. */
+struct sfdp_bfpt_read {
+ /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
+ u32 hwcaps;
+
+ /*
+ * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
+ * whether the Fast Read x-y-z command is supported.
+ */
+ u32 supported_dword;
+ u32 supported_bit;
+
+ /*
+ * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
+ * encodes the op code, the number of mode clocks and the number of wait
+ * states to be used by Fast Read x-y-z command.
+ */
+ u32 settings_dword;
+ u32 settings_shift;
+
+ /* The SPI protocol for this Fast Read x-y-z command. */
+ enum spi_nor_protocol proto;
+};
+
+struct sfdp_bfpt_erase {
+ /*
+ * The half-word at offset <shift> in DWORD <dwoard> encodes the
+ * op code and erase sector size to be used by Sector Erase commands.
+ */
+ u32 dword;
+ u32 shift;
+};
+
+#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
+#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22)
+
+#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16)
+#define SMPT_CMD_READ_DUMMY_SHIFT 16
+#define SMPT_CMD_READ_DUMMY(_cmd) \
+ (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
+#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL
+
+#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24)
+#define SMPT_CMD_READ_DATA_SHIFT 24
+#define SMPT_CMD_READ_DATA(_cmd) \
+ (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
+
+#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8)
+#define SMPT_CMD_OPCODE_SHIFT 8
+#define SMPT_CMD_OPCODE(_cmd) \
+ (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
+
+#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16)
+#define SMPT_MAP_REGION_COUNT_SHIFT 16
+#define SMPT_MAP_REGION_COUNT(_header) \
+ ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
+ SMPT_MAP_REGION_COUNT_SHIFT) + 1)
+
+#define SMPT_MAP_ID_MASK GENMASK(15, 8)
+#define SMPT_MAP_ID_SHIFT 8
+#define SMPT_MAP_ID(_header) \
+ (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
+
+#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8)
+#define SMPT_MAP_REGION_SIZE_SHIFT 8
+#define SMPT_MAP_REGION_SIZE(_region) \
+ (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
+ SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
+
+#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0)
+#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
+ ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
+
+#define SMPT_DESC_TYPE_MAP BIT(1)
+#define SMPT_DESC_END BIT(0)
+
+#define SFDP_4BAIT_DWORD_MAX 2
+
+struct sfdp_4bait {
+ /* The hardware capability. */
+ u32 hwcaps;
+
+ /*
+ * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
+ * the associated 4-byte address op code is supported.
+ */
+ u32 supported_bit;
+};
+
+/**
+ * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
+ * addr_width and read_dummy members of the struct spi_nor
+ * should be previously
+ * set.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to read
+ * @buf: buffer where the data is copied into (dma-safe memory)
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
+{
+ ssize_t ret;
+
+ while (len) {
+ ret = spi_nor_read_data(nor, addr, len, buf);
+ if (ret < 0)
+ return ret;
+ if (!ret || ret > len)
+ return -EIO;
+
+ buf += ret;
+ addr += ret;
+ len -= ret;
+ }
+ return 0;
+}
+
+/**
+ * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the SFDP area to start reading data from
+ * @len: number of bytes to read
+ * @buf: buffer where the SFDP data are copied into (dma-safe memory)
+ *
+ * Whatever the actual numbers of bytes for address and dummy cycles are
+ * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
+ * followed by a 3-byte address and 8 dummy clock cycles.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
+ size_t len, void *buf)
+{
+ u8 addr_width, read_opcode, read_dummy;
+ int ret;
+
+ read_opcode = nor->read_opcode;
+ addr_width = nor->addr_width;
+ read_dummy = nor->read_dummy;
+
+ nor->read_opcode = SPINOR_OP_RDSFDP;
+ nor->addr_width = 3;
+ nor->read_dummy = 8;
+
+ ret = spi_nor_read_raw(nor, addr, len, buf);
+
+ nor->read_opcode = read_opcode;
+ nor->addr_width = addr_width;
+ nor->read_dummy = read_dummy;
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the SFDP area to start reading data from
+ * @len: number of bytes to read
+ * @buf: buffer where the SFDP data are copied into
+ *
+ * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
+ * guaranteed to be dma-safe.
+ *
+ * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
+ * otherwise.
+ */
+static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
+ size_t len, void *buf)
+{
+ void *dma_safe_buf;
+ int ret;
+
+ dma_safe_buf = kmalloc(len, GFP_KERNEL);
+ if (!dma_safe_buf)
+ return -ENOMEM;
+
+ ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
+ memcpy(buf, dma_safe_buf, len);
+ kfree(dma_safe_buf);
+
+ return ret;
+}
+
+static void
+spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
+ u16 half,
+ enum spi_nor_protocol proto)
+{
+ read->num_mode_clocks = (half >> 5) & 0x07;
+ read->num_wait_states = (half >> 0) & 0x1f;
+ read->opcode = (half >> 8) & 0xff;
+ read->proto = proto;
+}
+
+static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
+ /* Fast Read 1-1-2 */
+ {
+ SNOR_HWCAPS_READ_1_1_2,
+ BFPT_DWORD(1), BIT(16), /* Supported bit */
+ BFPT_DWORD(4), 0, /* Settings */
+ SNOR_PROTO_1_1_2,
+ },
+
+ /* Fast Read 1-2-2 */
+ {
+ SNOR_HWCAPS_READ_1_2_2,
+ BFPT_DWORD(1), BIT(20), /* Supported bit */
+ BFPT_DWORD(4), 16, /* Settings */
+ SNOR_PROTO_1_2_2,
+ },
+
+ /* Fast Read 2-2-2 */
+ {
+ SNOR_HWCAPS_READ_2_2_2,
+ BFPT_DWORD(5), BIT(0), /* Supported bit */
+ BFPT_DWORD(6), 16, /* Settings */
+ SNOR_PROTO_2_2_2,
+ },
+
+ /* Fast Read 1-1-4 */
+ {
+ SNOR_HWCAPS_READ_1_1_4,
+ BFPT_DWORD(1), BIT(22), /* Supported bit */
+ BFPT_DWORD(3), 16, /* Settings */
+ SNOR_PROTO_1_1_4,
+ },
+
+ /* Fast Read 1-4-4 */
+ {
+ SNOR_HWCAPS_READ_1_4_4,
+ BFPT_DWORD(1), BIT(21), /* Supported bit */
+ BFPT_DWORD(3), 0, /* Settings */
+ SNOR_PROTO_1_4_4,
+ },
+
+ /* Fast Read 4-4-4 */
+ {
+ SNOR_HWCAPS_READ_4_4_4,
+ BFPT_DWORD(5), BIT(4), /* Supported bit */
+ BFPT_DWORD(7), 16, /* Settings */
+ SNOR_PROTO_4_4_4,
+ },
+};
+
+static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
+ /* Erase Type 1 in DWORD8 bits[15:0] */
+ {BFPT_DWORD(8), 0},
+
+ /* Erase Type 2 in DWORD8 bits[31:16] */
+ {BFPT_DWORD(8), 16},
+
+ /* Erase Type 3 in DWORD9 bits[15:0] */
+ {BFPT_DWORD(9), 0},
+
+ /* Erase Type 4 in DWORD9 bits[31:16] */
+ {BFPT_DWORD(9), 16},
+};
+
+/**
+ * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type
+ * @opcode: the SPI command op code to erase the sector/block
+ * @i: erase type index as sorted in the Basic Flash Parameter Table
+ *
+ * The supported Erase Types will be sorted at init in ascending order, with
+ * the smallest Erase Type size being the first member in the erase_type array
+ * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
+ * the Basic Flash Parameter Table since it will be used later on to
+ * synchronize with the supported Erase Types defined in SFDP optional tables.
+ */
+static void
+spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
+ u32 size, u8 opcode, u8 i)
+{
+ erase->idx = i;
+ spi_nor_set_erase_type(erase, size, opcode);
+}
+
+/**
+ * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
+ * @l: member in the left half of the map's erase_type array
+ * @r: member in the right half of the map's erase_type array
+ *
+ * Comparison function used in the sort() call to sort in ascending order the
+ * map's erase types, the smallest erase type size being the first member in the
+ * sorted erase_type array.
+ *
+ * Return: the result of @l->size - @r->size
+ */
+static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
+{
+ const struct spi_nor_erase_type *left = l, *right = r;
+
+ return left->size - right->size;
+}
+
+/**
+ * spi_nor_sort_erase_mask() - sort erase mask
+ * @map: the erase map of the SPI NOR
+ * @erase_mask: the erase type mask to be sorted
+ *
+ * Replicate the sort done for the map's erase types in BFPT: sort the erase
+ * mask in ascending order with the smallest erase type size starting from
+ * BIT(0) in the sorted erase mask.
+ *
+ * Return: sorted erase mask.
+ */
+static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
+{
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ int i;
+ u8 sorted_erase_mask = 0;
+
+ if (!erase_mask)
+ return 0;
+
+ /* Replicate the sort done for the map's erase types. */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
+ sorted_erase_mask |= BIT(i);
+
+ return sorted_erase_mask;
+}
+
+/**
+ * spi_nor_regions_sort_erase_types() - sort erase types in each region
+ * @map: the erase map of the SPI NOR
+ *
+ * Function assumes that the erase types defined in the erase map are already
+ * sorted in ascending order, with the smallest erase type size being the first
+ * member in the erase_type array. It replicates the sort done for the map's
+ * erase types. Each region's erase bitmask will indicate which erase types are
+ * supported from the sorted erase types defined in the erase map.
+ * Sort the all region's erase type at init in order to speed up the process of
+ * finding the best erase command at runtime.
+ */
+static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
+{
+ struct spi_nor_erase_region *region = map->regions;
+ u8 region_erase_mask, sorted_erase_mask;
+
+ while (region) {
+ region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
+
+ sorted_erase_mask = spi_nor_sort_erase_mask(map,
+ region_erase_mask);
+
+ /* Overwrite erase mask. */
+ region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
+ sorted_erase_mask;
+
+ region = spi_nor_region_next(region);
+ }
+}
+
+/**
+ * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
+ * @nor: pointer to a 'struct spi_nor'
+ * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the Basic Flash Parameter Table length and version
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be
+ * filled
+ *
+ * The Basic Flash Parameter Table is the main and only mandatory table as
+ * defined by the SFDP (JESD216) specification.
+ * It provides us with the total size (memory density) of the data array and
+ * the number of address bytes for Fast Read, Page Program and Sector Erase
+ * commands.
+ * For Fast READ commands, it also gives the number of mode clock cycles and
+ * wait states (regrouped in the number of dummy clock cycles) for each
+ * supported instruction op code.
+ * For Page Program, the page size is now available since JESD216 rev A, however
+ * the supported instruction op codes are still not provided.
+ * For Sector Erase commands, this table stores the supported instruction op
+ * codes and the associated sector sizes.
+ * Finally, the Quad Enable Requirements (QER) are also available since JESD216
+ * rev A. The QER bits encode the manufacturer dependent procedure to be
+ * executed to set the Quad Enable (QE) bit in some internal register of the
+ * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
+ * sending any Quad SPI command to the memory. Actually, setting the QE bit
+ * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
+ * and IO3 hence enabling 4 (Quad) I/O lines.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_bfpt(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ struct spi_nor_flash_parameter *params)
+{
+ struct spi_nor_erase_map *map = &params->erase_map;
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ struct sfdp_bfpt bfpt;
+ size_t len;
+ int i, cmd, err;
+ u32 addr;
+ u16 half;
+ u8 erase_mask;
+
+ /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
+ if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
+ return -EINVAL;
+
+ /* Read the Basic Flash Parameter Table. */
+ len = min_t(size_t, sizeof(bfpt),
+ bfpt_header->length * sizeof(u32));
+ addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
+ memset(&bfpt, 0, sizeof(bfpt));
+ err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
+ if (err < 0)
+ return err;
+
+ /* Fix endianness of the BFPT DWORDs. */
+ le32_to_cpu_array(bfpt.dwords, BFPT_DWORD_MAX);
+
+ /* Number of address bytes. */
+ switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
+ case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
+ nor->addr_width = 3;
+ break;
+
+ case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
+ nor->addr_width = 4;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Flash Memory Density (in bits). */
+ params->size = bfpt.dwords[BFPT_DWORD(2)];
+ if (params->size & BIT(31)) {
+ params->size &= ~BIT(31);
+
+ /*
+ * Prevent overflows on params->size. Anyway, a NOR of 2^64
+ * bits is unlikely to exist so this error probably means
+ * the BFPT we are reading is corrupted/wrong.
+ */
+ if (params->size > 63)
+ return -EINVAL;
+
+ params->size = 1ULL << params->size;
+ } else {
+ params->size++;
+ }
+ params->size >>= 3; /* Convert to bytes. */
+
+ /* Fast Read settings. */
+ for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
+ const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
+ struct spi_nor_read_command *read;
+
+ if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
+ params->hwcaps.mask &= ~rd->hwcaps;
+ continue;
+ }
+
+ params->hwcaps.mask |= rd->hwcaps;
+ cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
+ read = &params->reads[cmd];
+ half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
+ spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
+ }
+
+ /*
+ * Sector Erase settings. Reinitialize the uniform erase map using the
+ * Erase Types defined in the bfpt table.
+ */
+ erase_mask = 0;
+ memset(&params->erase_map, 0, sizeof(params->erase_map));
+ for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
+ const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
+ u32 erasesize;
+ u8 opcode;
+
+ half = bfpt.dwords[er->dword] >> er->shift;
+ erasesize = half & 0xff;
+
+ /* erasesize == 0 means this Erase Type is not supported. */
+ if (!erasesize)
+ continue;
+
+ erasesize = 1U << erasesize;
+ opcode = (half >> 8) & 0xff;
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
+ opcode, i);
+ }
+ spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
+ /*
+ * Sort all the map's Erase Types in ascending order with the smallest
+ * erase size being the first member in the erase_type array.
+ */
+ sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
+ spi_nor_map_cmp_erase_type, NULL);
+ /*
+ * Sort the erase types in the uniform region in order to update the
+ * uniform_erase_type bitmask. The bitmask will be used later on when
+ * selecting the uniform erase.
+ */
+ spi_nor_regions_sort_erase_types(map);
+ map->uniform_erase_type = map->uniform_region.offset &
+ SNOR_ERASE_TYPE_MASK;
+
+ /* Stop here if not JESD216 rev A or later. */
+ if (bfpt_header->length < BFPT_DWORD_MAX)
+ return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
+ params);
+
+ /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
+ params->page_size = bfpt.dwords[BFPT_DWORD(11)];
+ params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
+ params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
+ params->page_size = 1U << params->page_size;
+
+ /* Quad Enable Requirements. */
+ switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
+ case BFPT_DWORD15_QER_NONE:
+ params->quad_enable = NULL;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
+ /*
+ * Writing only one byte to the Status Register has the
+ * side-effect of clearing Status Register 2.
+ */
+ case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
+ /*
+ * Read Configuration Register (35h) instruction is not
+ * supported.
+ */
+ nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR;
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR1_BIT6:
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT7:
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ params->quad_enable = spi_nor_sr2_bit7_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT1:
+ /*
+ * JESD216 rev B or later does not specify if writing only one
+ * byte to the Status Register clears or not the Status
+ * Register 2, so let's be cautious and keep the default
+ * assumption of a 16-bit Write Status (01h) command.
+ */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
+}
+
+/**
+ * spi_nor_smpt_addr_width() - return the address width used in the
+ * configuration detection command.
+ * @nor: pointer to a 'struct spi_nor'
+ * @settings: configuration detection command descriptor, dword1
+ */
+static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
+{
+ switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
+ case SMPT_CMD_ADDRESS_LEN_0:
+ return 0;
+ case SMPT_CMD_ADDRESS_LEN_3:
+ return 3;
+ case SMPT_CMD_ADDRESS_LEN_4:
+ return 4;
+ case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
+ default:
+ return nor->addr_width;
+ }
+}
+
+/**
+ * spi_nor_smpt_read_dummy() - return the configuration detection command read
+ * latency, in clock cycles.
+ * @nor: pointer to a 'struct spi_nor'
+ * @settings: configuration detection command descriptor, dword1
+ *
+ * Return: the number of dummy cycles for an SMPT read
+ */
+static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
+{
+ u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
+
+ if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
+ return nor->read_dummy;
+ return read_dummy;
+}
+
+/**
+ * spi_nor_get_map_in_use() - get the configuration map in use
+ * @nor: pointer to a 'struct spi_nor'
+ * @smpt: pointer to the sector map parameter table
+ * @smpt_len: sector map parameter table length
+ *
+ * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
+ */
+static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
+ u8 smpt_len)
+{
+ const u32 *ret;
+ u8 *buf;
+ u32 addr;
+ int err;
+ u8 i;
+ u8 addr_width, read_opcode, read_dummy;
+ u8 read_data_mask, map_id;
+
+ /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ addr_width = nor->addr_width;
+ read_dummy = nor->read_dummy;
+ read_opcode = nor->read_opcode;
+
+ map_id = 0;
+ /* Determine if there are any optional Detection Command Descriptors */
+ for (i = 0; i < smpt_len; i += 2) {
+ if (smpt[i] & SMPT_DESC_TYPE_MAP)
+ break;
+
+ read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
+ nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
+ nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
+ nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
+ addr = smpt[i + 1];
+
+ err = spi_nor_read_raw(nor, addr, 1, buf);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto out;
+ }
+
+ /*
+ * Build an index value that is used to select the Sector Map
+ * Configuration that is currently in use.
+ */
+ map_id = map_id << 1 | !!(*buf & read_data_mask);
+ }
+
+ /*
+ * If command descriptors are provided, they always precede map
+ * descriptors in the table. There is no need to start the iteration
+ * over smpt array all over again.
+ *
+ * Find the matching configuration map.
+ */
+ ret = ERR_PTR(-EINVAL);
+ while (i < smpt_len) {
+ if (SMPT_MAP_ID(smpt[i]) == map_id) {
+ ret = smpt + i;
+ break;
+ }
+
+ /*
+ * If there are no more configuration map descriptors and no
+ * configuration ID matched the configuration identifier, the
+ * sector address map is unknown.
+ */
+ if (smpt[i] & SMPT_DESC_END)
+ break;
+
+ /* increment the table index to the next map */
+ i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
+ }
+
+ /* fall through */
+out:
+ kfree(buf);
+ nor->addr_width = addr_width;
+ nor->read_dummy = read_dummy;
+ nor->read_opcode = read_opcode;
+ return ret;
+}
+
+static void spi_nor_region_mark_end(struct spi_nor_erase_region *region)
+{
+ region->offset |= SNOR_LAST_REGION;
+}
+
+static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
+{
+ region->offset |= SNOR_OVERLAID_REGION;
+}
+
+/**
+ * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @erase_type: erase type bitmask
+ */
+static void
+spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
+ const struct spi_nor_erase_type *erase,
+ const u8 erase_type)
+{
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ if (!(erase_type & BIT(i)))
+ continue;
+ if (region->size & erase[i].size_mask) {
+ spi_nor_region_mark_overlay(region);
+ return;
+ }
+ }
+}
+
+/**
+ * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
+ * @nor: pointer to a 'struct spi_nor'
+ * @params: pointer to a duplicate 'struct spi_nor_flash_parameter' that is
+ * used for storing SFDP parsed data
+ * @smpt: pointer to the sector map parameter table
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params,
+ const u32 *smpt)
+{
+ struct spi_nor_erase_map *map = &params->erase_map;
+ struct spi_nor_erase_type *erase = map->erase_type;
+ struct spi_nor_erase_region *region;
+ u64 offset;
+ u32 region_count;
+ int i, j;
+ u8 uniform_erase_type, save_uniform_erase_type;
+ u8 erase_type, regions_erase_type;
+
+ region_count = SMPT_MAP_REGION_COUNT(*smpt);
+ /*
+ * The regions will be freed when the driver detaches from the
+ * device.
+ */
+ region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+ map->regions = region;
+
+ uniform_erase_type = 0xff;
+ regions_erase_type = 0;
+ offset = 0;
+ /* Populate regions. */
+ for (i = 0; i < region_count; i++) {
+ j = i + 1; /* index for the region dword */
+ region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
+ erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
+ region[i].offset = offset | erase_type;
+
+ spi_nor_region_check_overlay(&region[i], erase, erase_type);
+
+ /*
+ * Save the erase types that are supported in all regions and
+ * can erase the entire flash memory.
+ */
+ uniform_erase_type &= erase_type;
+
+ /*
+ * regions_erase_type mask will indicate all the erase types
+ * supported in this configuration map.
+ */
+ regions_erase_type |= erase_type;
+
+ offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
+ region[i].size;
+ }
+
+ save_uniform_erase_type = map->uniform_erase_type;
+ map->uniform_erase_type = spi_nor_sort_erase_mask(map,
+ uniform_erase_type);
+
+ if (!regions_erase_type) {
+ /*
+ * Roll back to the previous uniform_erase_type mask, SMPT is
+ * broken.
+ */
+ map->uniform_erase_type = save_uniform_erase_type;
+ return -EINVAL;
+ }
+
+ /*
+ * BFPT advertises all the erase types supported by all the possible
+ * map configurations. Mask out the erase types that are not supported
+ * by the current map configuration.
+ */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (!(regions_erase_type & BIT(erase[i].idx)))
+ spi_nor_set_erase_type(&erase[i], 0, 0xFF);
+
+ spi_nor_region_mark_end(&region[i - 1]);
+
+ return 0;
+}
+
+/**
+ * spi_nor_parse_smpt() - parse Sector Map Parameter Table
+ * @nor: pointer to a 'struct spi_nor'
+ * @smpt_header: sector map parameter table header
+ * @params: pointer to a duplicate 'struct spi_nor_flash_parameter'
+ * that is used for storing SFDP parsed data
+ *
+ * This table is optional, but when available, we parse it to identify the
+ * location and size of sectors within the main data array of the flash memory
+ * device and to identify which Erase Types are supported by each sector.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_smpt(struct spi_nor *nor,
+ const struct sfdp_parameter_header *smpt_header,
+ struct spi_nor_flash_parameter *params)
+{
+ const u32 *sector_map;
+ u32 *smpt;
+ size_t len;
+ u32 addr;
+ int ret;
+
+ /* Read the Sector Map Parameter Table. */
+ len = smpt_header->length * sizeof(*smpt);
+ smpt = kmalloc(len, GFP_KERNEL);
+ if (!smpt)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(smpt_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, smpt);
+ if (ret)
+ goto out;
+
+ /* Fix endianness of the SMPT DWORDs. */
+ le32_to_cpu_array(smpt, smpt_header->length);
+
+ sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
+ if (IS_ERR(sector_map)) {
+ ret = PTR_ERR(sector_map);
+ goto out;
+ }
+
+ ret = spi_nor_init_non_uniform_erase_map(nor, params, sector_map);
+ if (ret)
+ goto out;
+
+ spi_nor_regions_sort_erase_types(&params->erase_map);
+ /* fall through */
+out:
+ kfree(smpt);
+ return ret;
+}
+
+/**
+ * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
+ * @nor: pointer to a 'struct spi_nor'.
+ * @param_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the 4-Byte Address Instruction Table length and version.
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_4bait(struct spi_nor *nor,
+ const struct sfdp_parameter_header *param_header,
+ struct spi_nor_flash_parameter *params)
+{
+ static const struct sfdp_4bait reads[] = {
+ { SNOR_HWCAPS_READ, BIT(0) },
+ { SNOR_HWCAPS_READ_FAST, BIT(1) },
+ { SNOR_HWCAPS_READ_1_1_2, BIT(2) },
+ { SNOR_HWCAPS_READ_1_2_2, BIT(3) },
+ { SNOR_HWCAPS_READ_1_1_4, BIT(4) },
+ { SNOR_HWCAPS_READ_1_4_4, BIT(5) },
+ { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
+ { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
+ { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
+ };
+ static const struct sfdp_4bait programs[] = {
+ { SNOR_HWCAPS_PP, BIT(6) },
+ { SNOR_HWCAPS_PP_1_1_4, BIT(7) },
+ { SNOR_HWCAPS_PP_1_4_4, BIT(8) },
+ };
+ static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
+ { 0u /* not used */, BIT(9) },
+ { 0u /* not used */, BIT(10) },
+ { 0u /* not used */, BIT(11) },
+ { 0u /* not used */, BIT(12) },
+ };
+ struct spi_nor_pp_command *params_pp = params->page_programs;
+ struct spi_nor_erase_map *map = &params->erase_map;
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ u32 *dwords;
+ size_t len;
+ u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
+ int i, ret;
+
+ if (param_header->major != SFDP_JESD216_MAJOR ||
+ param_header->length < SFDP_4BAIT_DWORD_MAX)
+ return -EINVAL;
+
+ /* Read the 4-byte Address Instruction Table. */
+ len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
+
+ /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
+ dwords = kmalloc(len, GFP_KERNEL);
+ if (!dwords)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(param_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, dwords);
+ if (ret)
+ goto out;
+
+ /* Fix endianness of the 4BAIT DWORDs. */
+ le32_to_cpu_array(dwords, SFDP_4BAIT_DWORD_MAX);
+
+ /*
+ * Compute the subset of (Fast) Read commands for which the 4-byte
+ * version is supported.
+ */
+ discard_hwcaps = 0;
+ read_hwcaps = 0;
+ for (i = 0; i < ARRAY_SIZE(reads); i++) {
+ const struct sfdp_4bait *read = &reads[i];
+
+ discard_hwcaps |= read->hwcaps;
+ if ((params->hwcaps.mask & read->hwcaps) &&
+ (dwords[0] & read->supported_bit))
+ read_hwcaps |= read->hwcaps;
+ }
+
+ /*
+ * Compute the subset of Page Program commands for which the 4-byte
+ * version is supported.
+ */
+ pp_hwcaps = 0;
+ for (i = 0; i < ARRAY_SIZE(programs); i++) {
+ const struct sfdp_4bait *program = &programs[i];
+
+ /*
+ * The 4 Byte Address Instruction (Optional) Table is the only
+ * SFDP table that indicates support for Page Program Commands.
+ * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
+ * authority for specifying Page Program support.
+ */
+ discard_hwcaps |= program->hwcaps;
+ if (dwords[0] & program->supported_bit)
+ pp_hwcaps |= program->hwcaps;
+ }
+
+ /*
+ * Compute the subset of Sector Erase commands for which the 4-byte
+ * version is supported.
+ */
+ erase_mask = 0;
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ const struct sfdp_4bait *erase = &erases[i];
+
+ if (dwords[0] & erase->supported_bit)
+ erase_mask |= BIT(i);
+ }
+
+ /* Replicate the sort done for the map's erase types in BFPT. */
+ erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
+
+ /*
+ * We need at least one 4-byte op code per read, program and erase
+ * operation; the .read(), .write() and .erase() hooks share the
+ * nor->addr_width value.
+ */
+ if (!read_hwcaps || !pp_hwcaps || !erase_mask)
+ goto out;
+
+ /*
+ * Discard all operations from the 4-byte instruction set which are
+ * not supported by this memory.
+ */
+ params->hwcaps.mask &= ~discard_hwcaps;
+ params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
+
+ /* Use the 4-byte address instruction set. */
+ for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
+ struct spi_nor_read_command *read_cmd = &params->reads[i];
+
+ read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
+ }
+
+ /* 4BAIT is the only SFDP table that indicates page program support. */
+ if (pp_hwcaps & SNOR_HWCAPS_PP)
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
+ SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
+ if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4_4B,
+ SNOR_PROTO_1_1_4);
+ if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_4_4],
+ SPINOR_OP_PP_1_4_4_4B,
+ SNOR_PROTO_1_4_4);
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ if (erase_mask & BIT(i))
+ erase_type[i].opcode = (dwords[1] >>
+ erase_type[i].idx * 8) & 0xFF;
+ else
+ spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
+ }
+
+ /*
+ * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
+ * later because we already did the conversion to 4byte opcodes. Also,
+ * this latest function implements a legacy quirk for the erase size of
+ * Spansion memory. However this quirk is no longer needed with new
+ * SFDP compliant memories.
+ */
+ nor->addr_width = 4;
+ nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
+
+ /* fall through */
+out:
+ kfree(dwords);
+ return ret;
+}
+
+/**
+ * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be
+ * filled
+ *
+ * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
+ * specification. This is a standard which tends to supported by almost all
+ * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
+ * runtime the main parameters needed to perform basic SPI flash operations such
+ * as Fast Read, Page Program or Sector Erase commands.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_parse_sfdp(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params)
+{
+ const struct sfdp_parameter_header *param_header, *bfpt_header;
+ struct sfdp_parameter_header *param_headers = NULL;
+ struct sfdp_header header;
+ struct device *dev = nor->dev;
+ size_t psize;
+ int i, err;
+
+ /* Get the SFDP header. */
+ err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
+ if (err < 0)
+ return err;
+
+ /* Check the SFDP header version. */
+ if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
+ header.major != SFDP_JESD216_MAJOR)
+ return -EINVAL;
+
+ /*
+ * Verify that the first and only mandatory parameter header is a
+ * Basic Flash Parameter Table header as specified in JESD216.
+ */
+ bfpt_header = &header.bfpt_header;
+ if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
+ bfpt_header->major != SFDP_JESD216_MAJOR)
+ return -EINVAL;
+
+ /*
+ * Allocate memory then read all parameter headers with a single
+ * Read SFDP command. These parameter headers will actually be parsed
+ * twice: a first time to get the latest revision of the basic flash
+ * parameter table, then a second time to handle the supported optional
+ * tables.
+ * Hence we read the parameter headers once for all to reduce the
+ * processing time. Also we use kmalloc() instead of devm_kmalloc()
+ * because we don't need to keep these parameter headers: the allocated
+ * memory is always released with kfree() before exiting this function.
+ */
+ if (header.nph) {
+ psize = header.nph * sizeof(*param_headers);
+
+ param_headers = kmalloc(psize, GFP_KERNEL);
+ if (!param_headers)
+ return -ENOMEM;
+
+ err = spi_nor_read_sfdp(nor, sizeof(header),
+ psize, param_headers);
+ if (err < 0) {
+ dev_dbg(dev, "failed to read SFDP parameter headers\n");
+ goto exit;
+ }
+ }
+
+ /*
+ * Check other parameter headers to get the latest revision of
+ * the basic flash parameter table.
+ */
+ for (i = 0; i < header.nph; i++) {
+ param_header = &param_headers[i];
+
+ if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
+ param_header->major == SFDP_JESD216_MAJOR &&
+ (param_header->minor > bfpt_header->minor ||
+ (param_header->minor == bfpt_header->minor &&
+ param_header->length > bfpt_header->length)))
+ bfpt_header = param_header;
+ }
+
+ err = spi_nor_parse_bfpt(nor, bfpt_header, params);
+ if (err)
+ goto exit;
+
+ /* Parse optional parameter tables. */
+ for (i = 0; i < header.nph; i++) {
+ param_header = &param_headers[i];
+
+ switch (SFDP_PARAM_HEADER_ID(param_header)) {
+ case SFDP_SECTOR_MAP_ID:
+ err = spi_nor_parse_smpt(nor, param_header, params);
+ break;
+
+ case SFDP_4BAIT_ID:
+ err = spi_nor_parse_4bait(nor, param_header, params);
+ break;
+
+ default:
+ break;
+ }
+
+ if (err) {
+ dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
+ SFDP_PARAM_HEADER_ID(param_header));
+ /*
+ * Let's not drop all information we extracted so far
+ * if optional table parsers fail. In case of failing,
+ * each optional parser is responsible to roll back to
+ * the previously known spi_nor data.
+ */
+ err = 0;
+ }
+ }
+
+exit:
+ kfree(param_headers);
+ return err;
+}
diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h
new file mode 100644
index 000000000000..e0a8ded04890
--- /dev/null
+++ b/drivers/mtd/spi-nor/sfdp.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#ifndef __LINUX_MTD_SFDP_H
+#define __LINUX_MTD_SFDP_H
+
+/* Basic Flash Parameter Table */
+
+/*
+ * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
+ * They are indexed from 1 but C arrays are indexed from 0.
+ */
+#define BFPT_DWORD(i) ((i) - 1)
+#define BFPT_DWORD_MAX 16
+
+struct sfdp_bfpt {
+ u32 dwords[BFPT_DWORD_MAX];
+};
+
+/* The first version of JESD216 defined only 9 DWORDs. */
+#define BFPT_DWORD_MAX_JESD216 9
+
+/* 1st DWORD. */
+#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
+#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
+#define BFPT_DWORD1_DTR BIT(19)
+#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
+#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
+#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
+
+/* 5th DWORD. */
+#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
+#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
+
+/* 11th DWORD. */
+#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
+#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
+
+/* 15th DWORD. */
+
+/*
+ * (from JESD216 rev B)
+ * Quad Enable Requirements (QER):
+ * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
+ * reads based on instruction. DQ3/HOLD# functions are hold during
+ * instruction phase.
+ * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ * Writing only one byte to the status register has the side-effect of
+ * clearing status register 2, including the QE bit. The 100b code is
+ * used if writing one byte to the status register does not modify
+ * status register 2.
+ * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
+ * one data byte where bit 6 is one.
+ * [...]
+ * - 011b: QE is bit 7 of status register 2. It is set via Write status
+ * register 2 instruction 3Eh with one data byte where bit 7 is one.
+ * [...]
+ * The status register 2 is read using instruction 3Fh.
+ * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ * In contrast to the 001b code, writing one byte to the status
+ * register does not modify status register 2.
+ * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
+ * Read Status instruction 05h. Status register2 is read using
+ * instruction 35h. QE is set via Write Status instruction 01h with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ */
+#define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
+#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
+#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
+#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
+#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
+
+struct sfdp_parameter_header {
+ u8 id_lsb;
+ u8 minor;
+ u8 major;
+ u8 length; /* in double words */
+ u8 parameter_table_pointer[3]; /* byte address */
+ u8 id_msb;
+};
+
+int spi_nor_parse_sfdp(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params);
+
+#endif /* __LINUX_MTD_SFDP_H */
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
new file mode 100644
index 000000000000..6756202ace4b
--- /dev/null
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info spansion_parts[] = {
+ /* Spansion/Cypress -- single (large) sector size only, at least
+ * for the chips listed here (without boot sectors).
+ */
+ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
+ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | USE_CLSR) },
+ { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
+ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
+ { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
+ { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
+ { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
+ { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
+ { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+};
+
+static void spansion_post_sfdp_fixups(struct spi_nor *nor)
+{
+ if (nor->params->size <= SZ_16M)
+ return;
+
+ nor->flags |= SNOR_F_4B_OPCODES;
+ /* No small sector erase for 4-byte command set */
+ nor->erase_opcode = SPINOR_OP_SE;
+ nor->mtd.erasesize = nor->info->sector_size;
+}
+
+static const struct spi_nor_fixups spansion_fixups = {
+ .post_sfdp = spansion_post_sfdp_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_spansion = {
+ .name = "spansion",
+ .parts = spansion_parts,
+ .nparts = ARRAY_SIZE(spansion_parts),
+ .fixups = &spansion_fixups,
+};
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
deleted file mode 100644
index 4fc632ec18fe..000000000000
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ /dev/null
@@ -1,5434 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
- * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
- *
- * Copyright (C) 2005, Intec Automation Inc.
- * Copyright (C) 2014, Freescale Semiconductor, Inc.
- */
-
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/math64.h>
-#include <linux/sizes.h>
-#include <linux/slab.h>
-#include <linux/sort.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/of_platform.h>
-#include <linux/sched/task_stack.h>
-#include <linux/spi/flash.h>
-#include <linux/mtd/spi-nor.h>
-
-/* Define max times to check status register before we give up. */
-
-/*
- * For everything but full-chip erase; probably could be much smaller, but kept
- * around for safety for now
- */
-#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
-
-/*
- * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
- * for larger flash
- */
-#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
-
-#define SPI_NOR_MAX_ID_LEN 6
-#define SPI_NOR_MAX_ADDR_WIDTH 4
-
-struct sfdp_parameter_header {
- u8 id_lsb;
- u8 minor;
- u8 major;
- u8 length; /* in double words */
- u8 parameter_table_pointer[3]; /* byte address */
- u8 id_msb;
-};
-
-#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
-#define SFDP_PARAM_HEADER_PTP(p) \
- (((p)->parameter_table_pointer[2] << 16) | \
- ((p)->parameter_table_pointer[1] << 8) | \
- ((p)->parameter_table_pointer[0] << 0))
-
-#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
-#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
-#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */
-
-#define SFDP_SIGNATURE 0x50444653U
-#define SFDP_JESD216_MAJOR 1
-#define SFDP_JESD216_MINOR 0
-#define SFDP_JESD216A_MINOR 5
-#define SFDP_JESD216B_MINOR 6
-
-struct sfdp_header {
- u32 signature; /* Ox50444653U <=> "SFDP" */
- u8 minor;
- u8 major;
- u8 nph; /* 0-base number of parameter headers */
- u8 unused;
-
- /* Basic Flash Parameter Table. */
- struct sfdp_parameter_header bfpt_header;
-};
-
-/* Basic Flash Parameter Table */
-
-/*
- * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
- * They are indexed from 1 but C arrays are indexed from 0.
- */
-#define BFPT_DWORD(i) ((i) - 1)
-#define BFPT_DWORD_MAX 16
-
-/* The first version of JESD216 defined only 9 DWORDs. */
-#define BFPT_DWORD_MAX_JESD216 9
-
-/* 1st DWORD. */
-#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
-#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
-#define BFPT_DWORD1_DTR BIT(19)
-#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
-#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
-#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
-
-/* 5th DWORD. */
-#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
-#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
-
-/* 11th DWORD. */
-#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
-#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
-
-/* 15th DWORD. */
-
-/*
- * (from JESD216 rev B)
- * Quad Enable Requirements (QER):
- * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
- * reads based on instruction. DQ3/HOLD# functions are hold during
- * instruction phase.
- * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
- * two data bytes where bit 1 of the second byte is one.
- * [...]
- * Writing only one byte to the status register has the side-effect of
- * clearing status register 2, including the QE bit. The 100b code is
- * used if writing one byte to the status register does not modify
- * status register 2.
- * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
- * one data byte where bit 6 is one.
- * [...]
- * - 011b: QE is bit 7 of status register 2. It is set via Write status
- * register 2 instruction 3Eh with one data byte where bit 7 is one.
- * [...]
- * The status register 2 is read using instruction 3Fh.
- * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
- * two data bytes where bit 1 of the second byte is one.
- * [...]
- * In contrast to the 001b code, writing one byte to the status
- * register does not modify status register 2.
- * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
- * Read Status instruction 05h. Status register2 is read using
- * instruction 35h. QE is set via Write Status instruction 01h with
- * two data bytes where bit 1 of the second byte is one.
- * [...]
- */
-#define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
-#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
-#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
-#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
-#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
-#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
-#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
-
-struct sfdp_bfpt {
- u32 dwords[BFPT_DWORD_MAX];
-};
-
-/**
- * struct spi_nor_fixups - SPI NOR fixup hooks
- * @default_init: called after default flash parameters init. Used to tweak
- * flash parameters when information provided by the flash_info
- * table is incomplete or wrong.
- * @post_bfpt: called after the BFPT table has been parsed
- * @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs
- * that do not support RDSFDP). Typically used to tweak various
- * parameters that could not be extracted by other means (i.e.
- * when information provided by the SFDP/flash_info tables are
- * incomplete or wrong).
- *
- * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
- * table is broken or not available.
- */
-struct spi_nor_fixups {
- void (*default_init)(struct spi_nor *nor);
- int (*post_bfpt)(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- const struct sfdp_bfpt *bfpt,
- struct spi_nor_flash_parameter *params);
- void (*post_sfdp)(struct spi_nor *nor);
-};
-
-struct flash_info {
- char *name;
-
- /*
- * This array stores the ID bytes.
- * The first three bytes are the JEDIC ID.
- * JEDEC ID zero means "no ID" (mostly older chips).
- */
- u8 id[SPI_NOR_MAX_ID_LEN];
- u8 id_len;
-
- /* The size listed here is what works with SPINOR_OP_SE, which isn't
- * necessarily called a "sector" by the vendor.
- */
- unsigned sector_size;
- u16 n_sectors;
-
- u16 page_size;
- u16 addr_width;
-
- u32 flags;
-#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
-#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
-#define SST_WRITE BIT(2) /* use SST byte programming */
-#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
-#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
-#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
-#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
-#define USE_FSR BIT(7) /* use flag status register */
-#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
-#define SPI_NOR_HAS_TB BIT(9) /*
- * Flash SR has Top/Bottom (TB) protect
- * bit. Must be used with
- * SPI_NOR_HAS_LOCK.
- */
-#define SPI_NOR_XSR_RDY BIT(10) /*
- * S3AN flashes have specific opcode to
- * read the status register.
- * Flags SPI_NOR_XSR_RDY and SPI_S3AN
- * use the same bit as one implies the
- * other, but we will get rid of
- * SPI_S3AN soon.
- */
-#define SPI_S3AN BIT(10) /*
- * Xilinx Spartan 3AN In-System Flash
- * (MFR cannot be used for probing
- * because it has the same value as
- * ATMEL flashes)
- */
-#define SPI_NOR_4B_OPCODES BIT(11) /*
- * Use dedicated 4byte address op codes
- * to support memory size above 128Mib.
- */
-#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
-#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
-#define USE_CLSR BIT(14) /* use CLSR command */
-#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
-#define SPI_NOR_TB_SR_BIT6 BIT(16) /*
- * Top/Bottom (TB) is bit 6 of
- * status register. Must be used with
- * SPI_NOR_HAS_TB.
- */
-
- /* Part specific fixup hooks. */
- const struct spi_nor_fixups *fixups;
-};
-
-#define JEDEC_MFR(info) ((info)->id[0])
-
-/**
- * spi_nor_spimem_xfer_data() - helper function to read/write data to
- * flash's memory region
- * @nor: pointer to 'struct spi_nor'
- * @op: pointer to 'struct spi_mem_op' template for transfer
- *
- * Return: number of bytes transferred on success, -errno otherwise
- */
-static ssize_t spi_nor_spimem_xfer_data(struct spi_nor *nor,
- struct spi_mem_op *op)
-{
- bool usebouncebuf = false;
- void *rdbuf = NULL;
- const void *buf;
- int ret;
-
- if (op->data.dir == SPI_MEM_DATA_IN)
- buf = op->data.buf.in;
- else
- buf = op->data.buf.out;
-
- if (object_is_on_stack(buf) || !virt_addr_valid(buf))
- usebouncebuf = true;
-
- if (usebouncebuf) {
- if (op->data.nbytes > nor->bouncebuf_size)
- op->data.nbytes = nor->bouncebuf_size;
-
- if (op->data.dir == SPI_MEM_DATA_IN) {
- rdbuf = op->data.buf.in;
- op->data.buf.in = nor->bouncebuf;
- } else {
- op->data.buf.out = nor->bouncebuf;
- memcpy(nor->bouncebuf, buf,
- op->data.nbytes);
- }
- }
-
- ret = spi_mem_adjust_op_size(nor->spimem, op);
- if (ret)
- return ret;
-
- ret = spi_mem_exec_op(nor->spimem, op);
- if (ret)
- return ret;
-
- if (usebouncebuf && op->data.dir == SPI_MEM_DATA_IN)
- memcpy(rdbuf, nor->bouncebuf, op->data.nbytes);
-
- return op->data.nbytes;
-}
-
-/**
- * spi_nor_spimem_read_data() - read data from flash's memory region via
- * spi-mem
- * @nor: pointer to 'struct spi_nor'
- * @from: offset to read from
- * @len: number of bytes to read
- * @buf: pointer to dst buffer
- *
- * Return: number of bytes read successfully, -errno otherwise
- */
-static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
- size_t len, u8 *buf)
-{
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
- SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
- SPI_MEM_OP_DATA_IN(len, buf, 1));
-
- /* get transfer protocols. */
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
- op.dummy.buswidth = op.addr.buswidth;
- op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
-
- /* convert the dummy cycles to the number of bytes */
- op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
-
- return spi_nor_spimem_xfer_data(nor, &op);
-}
-
-/**
- * spi_nor_read_data() - read data from flash memory
- * @nor: pointer to 'struct spi_nor'
- * @from: offset to read from
- * @len: number of bytes to read
- * @buf: pointer to dst buffer
- *
- * Return: number of bytes read successfully, -errno otherwise
- */
-static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
- u8 *buf)
-{
- if (nor->spimem)
- return spi_nor_spimem_read_data(nor, from, len, buf);
-
- return nor->controller_ops->read(nor, from, len, buf);
-}
-
-/**
- * spi_nor_spimem_write_data() - write data to flash memory via
- * spi-mem
- * @nor: pointer to 'struct spi_nor'
- * @to: offset to write to
- * @len: number of bytes to write
- * @buf: pointer to src buffer
- *
- * Return: number of bytes written successfully, -errno otherwise
- */
-static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
- size_t len, const u8 *buf)
-{
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(len, buf, 1));
-
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
-
- if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
- op.addr.nbytes = 0;
-
- return spi_nor_spimem_xfer_data(nor, &op);
-}
-
-/**
- * spi_nor_write_data() - write data to flash memory
- * @nor: pointer to 'struct spi_nor'
- * @to: offset to write to
- * @len: number of bytes to write
- * @buf: pointer to src buffer
- *
- * Return: number of bytes written successfully, -errno otherwise
- */
-static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
- const u8 *buf)
-{
- if (nor->spimem)
- return spi_nor_spimem_write_data(nor, to, len, buf);
-
- return nor->controller_ops->write(nor, to, len, buf);
-}
-
-/**
- * spi_nor_write_enable() - Set write enable latch with Write Enable command.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_enable(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_write_disable() - Send Write Disable instruction to the chip.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_disable(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_read_sr() - Read the Status Register.
- * @nor: pointer to 'struct spi_nor'.
- * @sr: pointer to a DMA-able buffer where the value of the
- * Status Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
- sr, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading SR\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_read_fsr() - Read the Flag Status Register.
- * @nor: pointer to 'struct spi_nor'
- * @fsr: pointer to a DMA-able buffer where the value of the
- * Flag Status Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, fsr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
- fsr, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading FSR\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_read_cr() - Read the Configuration Register using the
- * SPINOR_OP_RDCR (35h) command.
- * @nor: pointer to 'struct spi_nor'
- * @cr: pointer to a DMA-able buffer where the value of the
- * Configuration Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, cr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading CR\n", ret);
-
- return ret;
-}
-
-/**
- * macronix_set_4byte() - Set 4-byte address mode for Macronix flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
- * address mode.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int macronix_set_4byte(struct spi_nor *nor, bool enable)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
- SPINOR_OP_EN4B :
- SPINOR_OP_EX4B,
- 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor,
- enable ? SPINOR_OP_EN4B :
- SPINOR_OP_EX4B,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
-
- return ret;
-}
-
-/**
- * st_micron_set_4byte() - Set 4-byte address mode for ST and Micron flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
- * address mode.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int st_micron_set_4byte(struct spi_nor *nor, bool enable)
-{
- int ret;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- return ret;
-
- ret = macronix_set_4byte(nor, enable);
- if (ret)
- return ret;
-
- return spi_nor_write_disable(nor);
-}
-
-/**
- * spansion_set_4byte() - Set 4-byte address mode for Spansion flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
- * address mode.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spansion_set_4byte(struct spi_nor *nor, bool enable)
-{
- int ret;
-
- nor->bouncebuf[0] = enable << 7;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
- nor->bouncebuf, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_write_ear() - Write Extended Address Register.
- * @nor: pointer to 'struct spi_nor'.
- * @ear: value to write to the Extended Address Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
-{
- int ret;
-
- nor->bouncebuf[0] = ear;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
- nor->bouncebuf, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d writing EAR\n", ret);
-
- return ret;
-}
-
-/**
- * winbond_set_4byte() - Set 4-byte address mode for Winbond flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
- * address mode.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int winbond_set_4byte(struct spi_nor *nor, bool enable)
-{
- int ret;
-
- ret = macronix_set_4byte(nor, enable);
- if (ret || enable)
- return ret;
-
- /*
- * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address
- * Register to be set to 1, so all 3-byte-address reads come from the
- * second 16M. We must clear the register to enable normal behavior.
- */
- ret = spi_nor_write_enable(nor);
- if (ret)
- return ret;
-
- ret = spi_nor_write_ear(nor, 0);
- if (ret)
- return ret;
-
- return spi_nor_write_disable(nor);
-}
-
-/**
- * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @sr: pointer to a DMA-able buffer where the value of the
- * Status Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
- sr, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
-
- return ret;
-}
-
-/**
- * s3an_sr_ready() - Query the Status Register of the S3AN flash to see if the
- * flash is ready for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int s3an_sr_ready(struct spi_nor *nor)
-{
- int ret;
-
- ret = spi_nor_xread_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- return !!(nor->bouncebuf[0] & XSR_RDY);
-}
-
-/**
- * spi_nor_clear_sr() - Clear the Status Register.
- * @nor: pointer to 'struct spi_nor'.
- */
-static void spi_nor_clear_sr(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d clearing SR\n", ret);
-}
-
-/**
- * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
- * for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_sr_ready(struct spi_nor *nor)
-{
- int ret = spi_nor_read_sr(nor, nor->bouncebuf);
-
- if (ret)
- return ret;
-
- if (nor->flags & SNOR_F_USE_CLSR &&
- nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
- if (nor->bouncebuf[0] & SR_E_ERR)
- dev_err(nor->dev, "Erase Error occurred\n");
- else
- dev_err(nor->dev, "Programming Error occurred\n");
-
- spi_nor_clear_sr(nor);
- return -EIO;
- }
-
- return !(nor->bouncebuf[0] & SR_WIP);
-}
-
-/**
- * spi_nor_clear_fsr() - Clear the Flag Status Register.
- * @nor: pointer to 'struct spi_nor'.
- */
-static void spi_nor_clear_fsr(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
-}
-
-/**
- * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
- * ready for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_fsr_ready(struct spi_nor *nor)
-{
- int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
-
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
- if (nor->bouncebuf[0] & FSR_E_ERR)
- dev_err(nor->dev, "Erase operation failed.\n");
- else
- dev_err(nor->dev, "Program operation failed.\n");
-
- if (nor->bouncebuf[0] & FSR_PT_ERR)
- dev_err(nor->dev,
- "Attempted to modify a protected sector.\n");
-
- spi_nor_clear_fsr(nor);
- return -EIO;
- }
-
- return nor->bouncebuf[0] & FSR_READY;
-}
-
-/**
- * spi_nor_ready() - Query the flash to see if it is ready for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_ready(struct spi_nor *nor)
-{
- int sr, fsr;
-
- if (nor->flags & SNOR_F_READY_XSR_RDY)
- sr = s3an_sr_ready(nor);
- else
- sr = spi_nor_sr_ready(nor);
- if (sr < 0)
- return sr;
- fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
- if (fsr < 0)
- return fsr;
- return sr && fsr;
-}
-
-/**
- * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
- * Status Register until ready, or timeout occurs.
- * @nor: pointer to "struct spi_nor".
- * @timeout_jiffies: jiffies to wait until timeout.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
- unsigned long timeout_jiffies)
-{
- unsigned long deadline;
- int timeout = 0, ret;
-
- deadline = jiffies + timeout_jiffies;
-
- while (!timeout) {
- if (time_after_eq(jiffies, deadline))
- timeout = 1;
-
- ret = spi_nor_ready(nor);
- if (ret < 0)
- return ret;
- if (ret)
- return 0;
-
- cond_resched();
- }
-
- dev_dbg(nor->dev, "flash operation timed out\n");
-
- return -ETIMEDOUT;
-}
-
-/**
- * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
- * flash to be ready, or timeout occurs.
- * @nor: pointer to "struct spi_nor".
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_wait_till_ready(struct spi_nor *nor)
-{
- return spi_nor_wait_till_ready_with_timeout(nor,
- DEFAULT_READY_WAIT_JIFFIES);
-}
-
-/**
- * spi_nor_write_sr() - Write the Status Register.
- * @nor: pointer to 'struct spi_nor'.
- * @sr: pointer to DMA-able buffer to write to the Status Register.
- * @len: number of bytes to write to the Status Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
-{
- int ret;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- return ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(len, sr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
- sr, len);
- }
-
- if (ret) {
- dev_dbg(nor->dev, "error %d writing SR\n", ret);
- return ret;
- }
-
- return spi_nor_wait_till_ready(nor);
-}
-
-/**
- * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
- * ensure that the byte written match the received value.
- * @nor: pointer to a 'struct spi_nor'.
- * @sr1: byte value to be written to the Status Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
-{
- int ret;
-
- nor->bouncebuf[0] = sr1;
-
- ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
- if (ret)
- return ret;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] != sr1) {
- dev_dbg(nor->dev, "SR1: read back test failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
- * Status Register 2 in one shot. Ensure that the byte written in the Status
- * Register 1 match the received value, and that the 16-bit Write did not
- * affect what was already in the Status Register 2.
- * @nor: pointer to a 'struct spi_nor'.
- * @sr1: byte value to be written to the Status Register 1.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
-{
- int ret;
- u8 *sr_cr = nor->bouncebuf;
- u8 cr_written;
-
- /* Make sure we don't overwrite the contents of Status Register 2. */
- if (!(nor->flags & SNOR_F_NO_READ_CR)) {
- ret = spi_nor_read_cr(nor, &sr_cr[1]);
- if (ret)
- return ret;
- } else if (nor->params.quad_enable) {
- /*
- * If the Status Register 2 Read command (35h) is not
- * supported, we should at least be sure we don't
- * change the value of the SR2 Quad Enable bit.
- *
- * We can safely assume that when the Quad Enable method is
- * set, the value of the QE bit is one, as a consequence of the
- * nor->params.quad_enable() call.
- *
- * We can safely assume that the Quad Enable bit is present in
- * the Status Register 2 at BIT(1). According to the JESD216
- * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
- * Write Status (01h) command is available just for the cases
- * in which the QE bit is described in SR2 at BIT(1).
- */
- sr_cr[1] = SR2_QUAD_EN_BIT1;
- } else {
- sr_cr[1] = 0;
- }
-
- sr_cr[0] = sr1;
-
- ret = spi_nor_write_sr(nor, sr_cr, 2);
- if (ret)
- return ret;
-
- if (nor->flags & SNOR_F_NO_READ_CR)
- return 0;
-
- cr_written = sr_cr[1];
-
- ret = spi_nor_read_cr(nor, &sr_cr[1]);
- if (ret)
- return ret;
-
- if (cr_written != sr_cr[1]) {
- dev_dbg(nor->dev, "CR: read back test failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
- * Configuration Register in one shot. Ensure that the byte written in the
- * Configuration Register match the received value, and that the 16-bit Write
- * did not affect what was already in the Status Register 1.
- * @nor: pointer to a 'struct spi_nor'.
- * @cr: byte value to be written to the Configuration Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
-{
- int ret;
- u8 *sr_cr = nor->bouncebuf;
- u8 sr_written;
-
- /* Keep the current value of the Status Register 1. */
- ret = spi_nor_read_sr(nor, sr_cr);
- if (ret)
- return ret;
-
- sr_cr[1] = cr;
-
- ret = spi_nor_write_sr(nor, sr_cr, 2);
- if (ret)
- return ret;
-
- sr_written = sr_cr[0];
-
- ret = spi_nor_read_sr(nor, sr_cr);
- if (ret)
- return ret;
-
- if (sr_written != sr_cr[0]) {
- dev_dbg(nor->dev, "SR: Read back test failed\n");
- return -EIO;
- }
-
- if (nor->flags & SNOR_F_NO_READ_CR)
- return 0;
-
- ret = spi_nor_read_cr(nor, &sr_cr[1]);
- if (ret)
- return ret;
-
- if (cr != sr_cr[1]) {
- dev_dbg(nor->dev, "CR: read back test failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
- * the byte written match the received value without affecting other bits in the
- * Status Register 1 and 2.
- * @nor: pointer to a 'struct spi_nor'.
- * @sr1: byte value to be written to the Status Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
-{
- if (nor->flags & SNOR_F_HAS_16BIT_SR)
- return spi_nor_write_16bit_sr_and_check(nor, sr1);
-
- return spi_nor_write_sr1_and_check(nor, sr1);
-}
-
-/**
- * spi_nor_write_sr2() - Write the Status Register 2 using the
- * SPINOR_OP_WRSR2 (3eh) command.
- * @nor: pointer to 'struct spi_nor'.
- * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
-{
- int ret;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- return ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, sr2, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
- sr2, 1);
- }
-
- if (ret) {
- dev_dbg(nor->dev, "error %d writing SR2\n", ret);
- return ret;
- }
-
- return spi_nor_wait_till_ready(nor);
-}
-
-/**
- * spi_nor_read_sr2() - Read the Status Register 2 using the
- * SPINOR_OP_RDSR2 (3fh) command.
- * @nor: pointer to 'struct spi_nor'.
- * @sr2: pointer to DMA-able buffer where the value of the
- * Status Register 2 will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr2, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
- sr2, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading SR2\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_erase_chip() - Erase the entire flash memory.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_erase_chip(struct spi_nor *nor)
-{
- int ret;
-
- dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d erasing chip\n", ret);
-
- return ret;
-}
-
-static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
-{
- return mtd->priv;
-}
-
-static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
-{
- size_t i;
-
- for (i = 0; i < size; i++)
- if (table[i][0] == opcode)
- return table[i][1];
-
- /* No conversion found, keep input op code. */
- return opcode;
-}
-
-static u8 spi_nor_convert_3to4_read(u8 opcode)
-{
- static const u8 spi_nor_3to4_read[][2] = {
- { SPINOR_OP_READ, SPINOR_OP_READ_4B },
- { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
- { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
- { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
- { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
- { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
- { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
- { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
-
- { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
- { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
- { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
- ARRAY_SIZE(spi_nor_3to4_read));
-}
-
-static u8 spi_nor_convert_3to4_program(u8 opcode)
-{
- static const u8 spi_nor_3to4_program[][2] = {
- { SPINOR_OP_PP, SPINOR_OP_PP_4B },
- { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
- { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
- { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
- { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
- ARRAY_SIZE(spi_nor_3to4_program));
-}
-
-static u8 spi_nor_convert_3to4_erase(u8 opcode)
-{
- static const u8 spi_nor_3to4_erase[][2] = {
- { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
- { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
- { SPINOR_OP_SE, SPINOR_OP_SE_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
- ARRAY_SIZE(spi_nor_3to4_erase));
-}
-
-static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
-{
- nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
- nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
- nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
-
- if (!spi_nor_has_uniform_erase(nor)) {
- struct spi_nor_erase_map *map = &nor->params.erase_map;
- struct spi_nor_erase_type *erase;
- int i;
-
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- erase = &map->erase_type[i];
- erase->opcode =
- spi_nor_convert_3to4_erase(erase->opcode);
- }
- }
-}
-
-static int spi_nor_lock_and_prep(struct spi_nor *nor)
-{
- int ret = 0;
-
- mutex_lock(&nor->lock);
-
- if (nor->controller_ops && nor->controller_ops->prepare) {
- ret = nor->controller_ops->prepare(nor);
- if (ret) {
- mutex_unlock(&nor->lock);
- return ret;
- }
- }
- return ret;
-}
-
-static void spi_nor_unlock_and_unprep(struct spi_nor *nor)
-{
- if (nor->controller_ops && nor->controller_ops->unprepare)
- nor->controller_ops->unprepare(nor);
- mutex_unlock(&nor->lock);
-}
-
-/*
- * This code converts an address to the Default Address Mode, that has non
- * power of two page sizes. We must support this mode because it is the default
- * mode supported by Xilinx tools, it can access the whole flash area and
- * changing over to the Power-of-two mode is irreversible and corrupts the
- * original data.
- * Addr can safely be unsigned int, the biggest S3AN device is smaller than
- * 4 MiB.
- */
-static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
-{
- u32 offset, page;
-
- offset = addr % nor->page_size;
- page = addr / nor->page_size;
- page <<= (nor->page_size > 512) ? 10 : 9;
-
- return page | offset;
-}
-
-static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
-{
- if (!nor->params.convert_addr)
- return addr;
-
- return nor->params.convert_addr(nor, addr);
-}
-
-/*
- * Initiate the erasure of a single sector
- */
-static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
-{
- int i;
-
- addr = spi_nor_convert_addr(nor, addr);
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- return spi_mem_exec_op(nor->spimem, &op);
- } else if (nor->controller_ops->erase) {
- return nor->controller_ops->erase(nor, addr);
- }
-
- /*
- * Default implementation, if driver doesn't have a specialized HW
- * control
- */
- for (i = nor->addr_width - 1; i >= 0; i--) {
- nor->bouncebuf[i] = addr & 0xff;
- addr >>= 8;
- }
-
- return nor->controller_ops->write_reg(nor, nor->erase_opcode,
- nor->bouncebuf, nor->addr_width);
-}
-
-/**
- * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
- * @erase: pointer to a structure that describes a SPI NOR erase type
- * @dividend: dividend value
- * @remainder: pointer to u32 remainder (will be updated)
- *
- * Return: the result of the division
- */
-static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
- u64 dividend, u32 *remainder)
-{
- /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
- *remainder = (u32)dividend & erase->size_mask;
- return dividend >> erase->size_shift;
-}
-
-/**
- * spi_nor_find_best_erase_type() - find the best erase type for the given
- * offset in the serial flash memory and the
- * number of bytes to erase. The region in
- * which the address fits is expected to be
- * provided.
- * @map: the erase map of the SPI NOR
- * @region: pointer to a structure that describes a SPI NOR erase region
- * @addr: offset in the serial flash memory
- * @len: number of bytes to erase
- *
- * Return: a pointer to the best fitted erase type, NULL otherwise.
- */
-static const struct spi_nor_erase_type *
-spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
- const struct spi_nor_erase_region *region,
- u64 addr, u32 len)
-{
- const struct spi_nor_erase_type *erase;
- u32 rem;
- int i;
- u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
-
- /*
- * Erase types are ordered by size, with the smallest erase type at
- * index 0.
- */
- for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
- /* Does the erase region support the tested erase type? */
- if (!(erase_mask & BIT(i)))
- continue;
-
- erase = &map->erase_type[i];
-
- /* Don't erase more than what the user has asked for. */
- if (erase->size > len)
- continue;
-
- /* Alignment is not mandatory for overlaid regions */
- if (region->offset & SNOR_OVERLAID_REGION)
- return erase;
-
- spi_nor_div_by_erase_size(erase, addr, &rem);
- if (rem)
- continue;
- else
- return erase;
- }
-
- return NULL;
-}
-
-/**
- * spi_nor_region_next() - get the next spi nor region
- * @region: pointer to a structure that describes a SPI NOR erase region
- *
- * Return: the next spi nor region or NULL if last region.
- */
-static struct spi_nor_erase_region *
-spi_nor_region_next(struct spi_nor_erase_region *region)
-{
- if (spi_nor_region_is_last(region))
- return NULL;
- region++;
- return region;
-}
-
-/**
- * spi_nor_find_erase_region() - find the region of the serial flash memory in
- * which the offset fits
- * @map: the erase map of the SPI NOR
- * @addr: offset in the serial flash memory
- *
- * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
- * otherwise.
- */
-static struct spi_nor_erase_region *
-spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
-{
- struct spi_nor_erase_region *region = map->regions;
- u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- u64 region_end = region_start + region->size;
-
- while (addr < region_start || addr >= region_end) {
- region = spi_nor_region_next(region);
- if (!region)
- return ERR_PTR(-EINVAL);
-
- region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- region_end = region_start + region->size;
- }
-
- return region;
-}
-
-/**
- * spi_nor_init_erase_cmd() - initialize an erase command
- * @region: pointer to a structure that describes a SPI NOR erase region
- * @erase: pointer to a structure that describes a SPI NOR erase type
- *
- * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
- * otherwise.
- */
-static struct spi_nor_erase_command *
-spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
- const struct spi_nor_erase_type *erase)
-{
- struct spi_nor_erase_command *cmd;
-
- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&cmd->list);
- cmd->opcode = erase->opcode;
- cmd->count = 1;
-
- if (region->offset & SNOR_OVERLAID_REGION)
- cmd->size = region->size;
- else
- cmd->size = erase->size;
-
- return cmd;
-}
-
-/**
- * spi_nor_destroy_erase_cmd_list() - destroy erase command list
- * @erase_list: list of erase commands
- */
-static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
-{
- struct spi_nor_erase_command *cmd, *next;
-
- list_for_each_entry_safe(cmd, next, erase_list, list) {
- list_del(&cmd->list);
- kfree(cmd);
- }
-}
-
-/**
- * spi_nor_init_erase_cmd_list() - initialize erase command list
- * @nor: pointer to a 'struct spi_nor'
- * @erase_list: list of erase commands to be executed once we validate that the
- * erase can be performed
- * @addr: offset in the serial flash memory
- * @len: number of bytes to erase
- *
- * Builds the list of best fitted erase commands and verifies if the erase can
- * be performed.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
- struct list_head *erase_list,
- u64 addr, u32 len)
-{
- const struct spi_nor_erase_map *map = &nor->params.erase_map;
- const struct spi_nor_erase_type *erase, *prev_erase = NULL;
- struct spi_nor_erase_region *region;
- struct spi_nor_erase_command *cmd = NULL;
- u64 region_end;
- int ret = -EINVAL;
-
- region = spi_nor_find_erase_region(map, addr);
- if (IS_ERR(region))
- return PTR_ERR(region);
-
- region_end = spi_nor_region_end(region);
-
- while (len) {
- erase = spi_nor_find_best_erase_type(map, region, addr, len);
- if (!erase)
- goto destroy_erase_cmd_list;
-
- if (prev_erase != erase ||
- region->offset & SNOR_OVERLAID_REGION) {
- cmd = spi_nor_init_erase_cmd(region, erase);
- if (IS_ERR(cmd)) {
- ret = PTR_ERR(cmd);
- goto destroy_erase_cmd_list;
- }
-
- list_add_tail(&cmd->list, erase_list);
- } else {
- cmd->count++;
- }
-
- addr += cmd->size;
- len -= cmd->size;
-
- if (len && addr >= region_end) {
- region = spi_nor_region_next(region);
- if (!region)
- goto destroy_erase_cmd_list;
- region_end = spi_nor_region_end(region);
- }
-
- prev_erase = erase;
- }
-
- return 0;
-
-destroy_erase_cmd_list:
- spi_nor_destroy_erase_cmd_list(erase_list);
- return ret;
-}
-
-/**
- * spi_nor_erase_multi_sectors() - perform a non-uniform erase
- * @nor: pointer to a 'struct spi_nor'
- * @addr: offset in the serial flash memory
- * @len: number of bytes to erase
- *
- * Build a list of best fitted erase commands and execute it once we validate
- * that the erase can be performed.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
-{
- LIST_HEAD(erase_list);
- struct spi_nor_erase_command *cmd, *next;
- int ret;
-
- ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
- if (ret)
- return ret;
-
- list_for_each_entry_safe(cmd, next, &erase_list, list) {
- nor->erase_opcode = cmd->opcode;
- while (cmd->count) {
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto destroy_erase_cmd_list;
-
- ret = spi_nor_erase_sector(nor, addr);
- if (ret)
- goto destroy_erase_cmd_list;
-
- addr += cmd->size;
- cmd->count--;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto destroy_erase_cmd_list;
- }
- list_del(&cmd->list);
- kfree(cmd);
- }
-
- return 0;
-
-destroy_erase_cmd_list:
- spi_nor_destroy_erase_cmd_list(&erase_list);
- return ret;
-}
-
-/*
- * Erase an address range on the nor chip. The address range may extend
- * one or more erase sectors. Return an error is there is a problem erasing.
- */
-static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- u32 addr, len;
- uint32_t rem;
- int ret;
-
- dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
- (long long)instr->len);
-
- if (spi_nor_has_uniform_erase(nor)) {
- div_u64_rem(instr->len, mtd->erasesize, &rem);
- if (rem)
- return -EINVAL;
- }
-
- addr = instr->addr;
- len = instr->len;
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- /* whole-chip erase? */
- if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
- unsigned long timeout;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto erase_err;
-
- ret = spi_nor_erase_chip(nor);
- if (ret)
- goto erase_err;
-
- /*
- * Scale the timeout linearly with the size of the flash, with
- * a minimum calibrated to an old 2MB flash. We could try to
- * pull these from CFI/SFDP, but these values should be good
- * enough for now.
- */
- timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
- CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
- (unsigned long)(mtd->size / SZ_2M));
- ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
- if (ret)
- goto erase_err;
-
- /* REVISIT in some cases we could speed up erasing large regions
- * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
- * to use "small sector erase", but that's not always optimal.
- */
-
- /* "sector"-at-a-time erase */
- } else if (spi_nor_has_uniform_erase(nor)) {
- while (len) {
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto erase_err;
-
- ret = spi_nor_erase_sector(nor, addr);
- if (ret)
- goto erase_err;
-
- addr += mtd->erasesize;
- len -= mtd->erasesize;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto erase_err;
- }
-
- /* erase multiple sectors */
- } else {
- ret = spi_nor_erase_multi_sectors(nor, addr, len);
- if (ret)
- goto erase_err;
- }
-
- ret = spi_nor_write_disable(nor);
-
-erase_err:
- spi_nor_unlock_and_unprep(nor);
-
- return ret;
-}
-
-static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
- uint64_t *len)
-{
- struct mtd_info *mtd = &nor->mtd;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
- u8 tb_mask = SR_TB_BIT5;
- int shift = ffs(mask) - 1;
- int pow;
-
- if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
- tb_mask = SR_TB_BIT6;
-
- if (!(sr & mask)) {
- /* No protection */
- *ofs = 0;
- *len = 0;
- } else {
- pow = ((sr & mask) ^ mask) >> shift;
- *len = mtd->size >> pow;
- if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
- *ofs = 0;
- else
- *ofs = mtd->size - *len;
- }
-}
-
-/*
- * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
- * @locked is false); 0 otherwise
- */
-static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
- u8 sr, bool locked)
-{
- loff_t lock_offs;
- uint64_t lock_len;
-
- if (!len)
- return 1;
-
- stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
-
- if (locked)
- /* Requested range is a sub-range of locked range */
- return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
- else
- /* Requested range does not overlap with locked range */
- return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
-}
-
-static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
- u8 sr)
-{
- return stm_check_lock_status_sr(nor, ofs, len, sr, true);
-}
-
-static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
- u8 sr)
-{
- return stm_check_lock_status_sr(nor, ofs, len, sr, false);
-}
-
-/*
- * Lock a region of the flash. Compatible with ST Micro and similar flash.
- * Supports the block protection bits BP{0,1,2} in the status register
- * (SR). Does not support these features found in newer SR bitfields:
- * - SEC: sector/block protect - only handle SEC=0 (block protect)
- * - CMP: complement protect - only support CMP=0 (range is not complemented)
- *
- * Support for the following is provided conditionally for some flash:
- * - TB: top/bottom protect
- *
- * Sample table portion for 8MB flash (Winbond w25q64fw):
- *
- * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
- * --------------------------------------------------------------------------
- * X | X | 0 | 0 | 0 | NONE | NONE
- * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
- * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
- * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
- * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
- * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
- * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
- * X | X | 1 | 1 | 1 | 8 MB | ALL
- * ------|-------|-------|-------|-------|---------------|-------------------
- * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
- * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
- * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
- * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
- * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
- * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
- *
- * Returns negative on errors, 0 on success.
- */
-static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
-{
- struct mtd_info *mtd = &nor->mtd;
- int ret, status_old, status_new;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
- u8 tb_mask = SR_TB_BIT5;
- u8 shift = ffs(mask) - 1, pow, val;
- loff_t lock_len;
- bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
- bool use_top;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- status_old = nor->bouncebuf[0];
-
- /* If nothing in our range is unlocked, we don't need to do anything */
- if (stm_is_locked_sr(nor, ofs, len, status_old))
- return 0;
-
- /* If anything below us is unlocked, we can't use 'bottom' protection */
- if (!stm_is_locked_sr(nor, 0, ofs, status_old))
- can_be_bottom = false;
-
- /* If anything above us is unlocked, we can't use 'top' protection */
- if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
- status_old))
- can_be_top = false;
-
- if (!can_be_bottom && !can_be_top)
- return -EINVAL;
-
- /* Prefer top, if both are valid */
- use_top = can_be_top;
-
- /* lock_len: length of region that should end up locked */
- if (use_top)
- lock_len = mtd->size - ofs;
- else
- lock_len = ofs + len;
-
- if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
- tb_mask = SR_TB_BIT6;
-
- /*
- * Need smallest pow such that:
- *
- * 1 / (2^pow) <= (len / size)
- *
- * so (assuming power-of-2 size) we do:
- *
- * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
- */
- pow = ilog2(mtd->size) - ilog2(lock_len);
- val = mask - (pow << shift);
- if (val & ~mask)
- return -EINVAL;
- /* Don't "lock" with no region! */
- if (!(val & mask))
- return -EINVAL;
-
- status_new = (status_old & ~mask & ~tb_mask) | val;
-
- /* Disallow further writes if WP pin is asserted */
- status_new |= SR_SRWD;
-
- if (!use_top)
- status_new |= tb_mask;
-
- /* Don't bother if they're the same */
- if (status_new == status_old)
- return 0;
-
- /* Only modify protection if it will not unlock other areas */
- if ((status_new & mask) < (status_old & mask))
- return -EINVAL;
-
- return spi_nor_write_sr_and_check(nor, status_new);
-}
-
-/*
- * Unlock a region of the flash. See stm_lock() for more info
- *
- * Returns negative on errors, 0 on success.
- */
-static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
-{
- struct mtd_info *mtd = &nor->mtd;
- int ret, status_old, status_new;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
- u8 tb_mask = SR_TB_BIT5;
- u8 shift = ffs(mask) - 1, pow, val;
- loff_t lock_len;
- bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
- bool use_top;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- status_old = nor->bouncebuf[0];
-
- /* If nothing in our range is locked, we don't need to do anything */
- if (stm_is_unlocked_sr(nor, ofs, len, status_old))
- return 0;
-
- /* If anything below us is locked, we can't use 'top' protection */
- if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
- can_be_top = false;
-
- /* If anything above us is locked, we can't use 'bottom' protection */
- if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
- status_old))
- can_be_bottom = false;
-
- if (!can_be_bottom && !can_be_top)
- return -EINVAL;
-
- /* Prefer top, if both are valid */
- use_top = can_be_top;
-
- /* lock_len: length of region that should remain locked */
- if (use_top)
- lock_len = mtd->size - (ofs + len);
- else
- lock_len = ofs;
-
- if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
- tb_mask = SR_TB_BIT6;
- /*
- * Need largest pow such that:
- *
- * 1 / (2^pow) >= (len / size)
- *
- * so (assuming power-of-2 size) we do:
- *
- * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
- */
- pow = ilog2(mtd->size) - order_base_2(lock_len);
- if (lock_len == 0) {
- val = 0; /* fully unlocked */
- } else {
- val = mask - (pow << shift);
- /* Some power-of-two sizes are not supported */
- if (val & ~mask)
- return -EINVAL;
- }
-
- status_new = (status_old & ~mask & ~tb_mask) | val;
-
- /* Don't protect status register if we're fully unlocked */
- if (lock_len == 0)
- status_new &= ~SR_SRWD;
-
- if (!use_top)
- status_new |= tb_mask;
-
- /* Don't bother if they're the same */
- if (status_new == status_old)
- return 0;
-
- /* Only modify protection if it will not lock other areas */
- if ((status_new & mask) > (status_old & mask))
- return -EINVAL;
-
- return spi_nor_write_sr_and_check(nor, status_new);
-}
-
-/*
- * Check if a region of the flash is (completely) locked. See stm_lock() for
- * more info.
- *
- * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
- * negative on errors.
- */
-static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
-{
- int ret;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- return stm_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
-}
-
-static const struct spi_nor_locking_ops stm_locking_ops = {
- .lock = stm_lock,
- .unlock = stm_unlock,
- .is_locked = stm_is_locked,
-};
-
-static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- int ret;
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- ret = nor->params.locking_ops->lock(nor, ofs, len);
-
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- int ret;
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- ret = nor->params.locking_ops->unlock(nor, ofs, len);
-
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- int ret;
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- ret = nor->params.locking_ops->is_locked(nor, ofs, len);
-
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-/**
- * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
- * Register 1.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
-{
- int ret;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
- return 0;
-
- nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
-
- return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
-}
-
-/**
- * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
- * Register 2.
- * @nor: pointer to a 'struct spi_nor'.
- *
- * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->flags & SNOR_F_NO_READ_CR)
- return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
-
- ret = spi_nor_read_cr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
- return 0;
-
- nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
-
- return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
-}
-
-/**
- * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Status Register 2.
- *
- * This is one of the procedures to set the QE bit described in the SFDP
- * (JESD216 rev B) specification but no manufacturer using this procedure has
- * been identified yet, hence the name of the function.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
-{
- u8 *sr2 = nor->bouncebuf;
- int ret;
- u8 sr2_written;
-
- /* Check current Quad Enable bit value. */
- ret = spi_nor_read_sr2(nor, sr2);
- if (ret)
- return ret;
- if (*sr2 & SR2_QUAD_EN_BIT7)
- return 0;
-
- /* Update the Quad Enable bit. */
- *sr2 |= SR2_QUAD_EN_BIT7;
-
- ret = spi_nor_write_sr2(nor, sr2);
- if (ret)
- return ret;
-
- sr2_written = *sr2;
-
- /* Read back and check it. */
- ret = spi_nor_read_sr2(nor, sr2);
- if (ret)
- return ret;
-
- if (*sr2 != sr2_written) {
- dev_dbg(nor->dev, "SR2: Read back test failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/* Used when the "_ext_id" is two bytes at most */
-#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
- .id = { \
- ((_jedec_id) >> 16) & 0xff, \
- ((_jedec_id) >> 8) & 0xff, \
- (_jedec_id) & 0xff, \
- ((_ext_id) >> 8) & 0xff, \
- (_ext_id) & 0xff, \
- }, \
- .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = 256, \
- .flags = (_flags),
-
-#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
- .id = { \
- ((_jedec_id) >> 16) & 0xff, \
- ((_jedec_id) >> 8) & 0xff, \
- (_jedec_id) & 0xff, \
- ((_ext_id) >> 16) & 0xff, \
- ((_ext_id) >> 8) & 0xff, \
- (_ext_id) & 0xff, \
- }, \
- .id_len = 6, \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = 256, \
- .flags = (_flags),
-
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = (_page_size), \
- .addr_width = (_addr_width), \
- .flags = (_flags),
-
-#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
- .id = { \
- ((_jedec_id) >> 16) & 0xff, \
- ((_jedec_id) >> 8) & 0xff, \
- (_jedec_id) & 0xff \
- }, \
- .id_len = 3, \
- .sector_size = (8*_page_size), \
- .n_sectors = (_n_sectors), \
- .page_size = _page_size, \
- .addr_width = 3, \
- .flags = SPI_NOR_NO_FR | SPI_S3AN,
-
-static int
-is25lp256_post_bfpt_fixups(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- const struct sfdp_bfpt *bfpt,
- struct spi_nor_flash_parameter *params)
-{
- /*
- * IS25LP256 supports 4B opcodes, but the BFPT advertises a
- * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
- * Overwrite the address width advertised by the BFPT.
- */
- if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
- BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
- nor->addr_width = 4;
-
- return 0;
-}
-
-static struct spi_nor_fixups is25lp256_fixups = {
- .post_bfpt = is25lp256_post_bfpt_fixups,
-};
-
-static int
-mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- const struct sfdp_bfpt *bfpt,
- struct spi_nor_flash_parameter *params)
-{
- /*
- * MX25L25635F supports 4B opcodes but MX25L25635E does not.
- * Unfortunately, Macronix has re-used the same JEDEC ID for both
- * variants which prevents us from defining a new entry in the parts
- * table.
- * We need a way to differentiate MX25L25635E and MX25L25635F, and it
- * seems that the F version advertises support for Fast Read 4-4-4 in
- * its BFPT table.
- */
- if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
- nor->flags |= SNOR_F_4B_OPCODES;
-
- return 0;
-}
-
-static struct spi_nor_fixups mx25l25635_fixups = {
- .post_bfpt = mx25l25635_post_bfpt_fixups,
-};
-
-static void gd25q256_default_init(struct spi_nor *nor)
-{
- /*
- * Some manufacturer like GigaDevice may use different
- * bit to set QE on different memories, so the MFR can't
- * indicate the quad_enable method for this case, we need
- * to set it in the default_init fixup hook.
- */
- nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
-}
-
-static struct spi_nor_fixups gd25q256_fixups = {
- .default_init = gd25q256_default_init,
-};
-
-/* NOTE: double check command sets and memory organization when you add
- * more nor chips. This current list focusses on newer chips, which
- * have been converging on command sets which including JEDEC ID.
- *
- * All newly added entries should describe *hardware* and should use SECT_4K
- * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
- * scenarios excluding small sectors there is config option that can be
- * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
- * For historical (and compatibility) reasons (before we got above config) some
- * old entries may be missing 4K flag.
- */
-static const struct flash_info spi_nor_ids[] = {
- /* Atmel -- some are (confusingly) marketed as "DataFlash" */
- { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
- { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
-
- { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
- { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
- { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
- { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
-
- { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-
- { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
- { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
- { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
- { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
-
- { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
-
- /* EON -- en25xxx */
- { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
- { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
- { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
- { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
- { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
- { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
- { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
- { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
- { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
-
- /* ESMT */
- { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
- { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
- { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
-
- /* Everspin */
- { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-
- /* Fujitsu */
- { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
-
- /* GigaDevice */
- {
- "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
- SPI_NOR_TB_SR_BIT6)
- .fixups = &gd25q256_fixups,
- },
-
- /* Intel/Numonyx -- xxxs33b */
- { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
- { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
- { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
-
- /* ISSI */
- { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
- { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES)
- .fixups = &is25lp256_fixups },
- { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES)
- .fixups = &is25lp256_fixups },
-
- /* Macronix */
- { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
- { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
- { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
- { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
- { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
- { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
- { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
- { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
- { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
- { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
- { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
- { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
- { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
- { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
- { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- .fixups = &mx25l25635_fixups },
- { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
- { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
-
- /* Micron <--> ST Micro */
- { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
- { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K |
- USE_FSR | SPI_NOR_QUAD_READ) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K |
- USE_FSR | SPI_NOR_QUAD_READ) },
- { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K |
- USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K |
- USE_FSR | SPI_NOR_QUAD_READ) },
- { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
- { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K |
- USE_FSR | SPI_NOR_QUAD_READ) },
- { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- NO_CHIP_ERASE) },
- { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
-
- /* Micron */
- {
- "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES)
- },
- { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
- SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES) },
-
- /* PMC */
- { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
- { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
- { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
-
- /* Spansion/Cypress -- single (large) sector size only, at least
- * for the chips listed here (without boot sectors).
- */
- { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
- { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | USE_CLSR) },
- { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
- { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
- { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
- { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
- { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
- { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
- { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
- { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
- { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
- { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
- { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
- { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
- { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
-
- /* SST -- large erase sizes are "overlays", "sectors" are 4K */
- { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
- { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
- { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
- { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
- { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
- { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
- { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
- { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
- { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
- { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
- { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
- { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
- { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K |
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32, SECT_4K |
- SPI_NOR_DUAL_READ) },
- { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-
- /* ST Microelectronics -- newer production may have feature updates */
- { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
- { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
- { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
- { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
- { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
- { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
- { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
- { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
- { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
-
- { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
- { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
- { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
- { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
- { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
- { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
- { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
- { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
- { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
-
- { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
- { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
- { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
-
- { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
- { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
- { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
-
- { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
- { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
- { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
-
- /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
- { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
- { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
- { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
- { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
- {
- "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
- {
- "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
- {
- "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
- { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
- {
- "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
- { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
- { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
- SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
-
- /* Catalyst / On Semiconductor -- non-JEDEC */
- { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-
- /* Xilinx S3AN Internal Flash */
- { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
- { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
- { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
- { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
- { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
-
- /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
- { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { },
-};
-
-static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
-{
- int tmp;
- u8 *id = nor->bouncebuf;
- const struct flash_info *info;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
-
- tmp = spi_mem_exec_op(nor->spimem, &op);
- } else {
- tmp = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
- SPI_NOR_MAX_ID_LEN);
- }
- if (tmp) {
- dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
- return ERR_PTR(tmp);
- }
-
- for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
- info = &spi_nor_ids[tmp];
- if (info->id_len) {
- if (!memcmp(info->id, id, info->id_len))
- return &spi_nor_ids[tmp];
- }
- }
- dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
- SPI_NOR_MAX_ID_LEN, id);
- return ERR_PTR(-ENODEV);
-}
-
-static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- ssize_t ret;
-
- dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- while (len) {
- loff_t addr = from;
-
- addr = spi_nor_convert_addr(nor, addr);
-
- ret = spi_nor_read_data(nor, addr, len, buf);
- if (ret == 0) {
- /* We shouldn't see 0-length reads */
- ret = -EIO;
- goto read_err;
- }
- if (ret < 0)
- goto read_err;
-
- WARN_ON(ret > len);
- *retlen += ret;
- buf += ret;
- from += ret;
- len -= ret;
- }
- ret = 0;
-
-read_err:
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- size_t actual = 0;
- int ret;
-
- dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto out;
-
- nor->sst_write_second = false;
-
- /* Start write from odd address. */
- if (to % 2) {
- nor->program_opcode = SPINOR_OP_BP;
-
- /* write one byte. */
- ret = spi_nor_write_data(nor, to, 1, buf);
- if (ret < 0)
- goto out;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
-
- to++;
- actual++;
- }
-
- /* Write out most of the data here. */
- for (; actual < len - 1; actual += 2) {
- nor->program_opcode = SPINOR_OP_AAI_WP;
-
- /* write two bytes. */
- ret = spi_nor_write_data(nor, to, 2, buf + actual);
- if (ret < 0)
- goto out;
- WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
- to += 2;
- nor->sst_write_second = true;
- }
- nor->sst_write_second = false;
-
- ret = spi_nor_write_disable(nor);
- if (ret)
- goto out;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
-
- /* Write out trailing byte if it exists. */
- if (actual != len) {
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto out;
-
- nor->program_opcode = SPINOR_OP_BP;
- ret = spi_nor_write_data(nor, to, 1, buf + actual);
- if (ret < 0)
- goto out;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
-
- actual += 1;
-
- ret = spi_nor_write_disable(nor);
- }
-out:
- *retlen += actual;
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-/*
- * Write an address range to the nor chip. Data must be written in
- * FLASH_PAGESIZE chunks. The address range may be any size provided
- * it is within the physical boundaries.
- */
-static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- size_t page_offset, page_remain, i;
- ssize_t ret;
-
- dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- for (i = 0; i < len; ) {
- ssize_t written;
- loff_t addr = to + i;
-
- /*
- * If page_size is a power of two, the offset can be quickly
- * calculated with an AND operation. On the other cases we
- * need to do a modulus operation (more expensive).
- * Power of two numbers have only one bit set and we can use
- * the instruction hweight32 to detect if we need to do a
- * modulus (do_div()) or not.
- */
- if (hweight32(nor->page_size) == 1) {
- page_offset = addr & (nor->page_size - 1);
- } else {
- uint64_t aux = addr;
-
- page_offset = do_div(aux, nor->page_size);
- }
- /* the size of data remaining on the first page */
- page_remain = min_t(size_t,
- nor->page_size - page_offset, len - i);
-
- addr = spi_nor_convert_addr(nor, addr);
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto write_err;
-
- ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
- if (ret < 0)
- goto write_err;
- written = ret;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto write_err;
- *retlen += written;
- i += written;
- }
-
-write_err:
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-static int spi_nor_check(struct spi_nor *nor)
-{
- if (!nor->dev ||
- (!nor->spimem && !nor->controller_ops) ||
- (!nor->spimem && nor->controller_ops &&
- (!nor->controller_ops->read ||
- !nor->controller_ops->write ||
- !nor->controller_ops->read_reg ||
- !nor->controller_ops->write_reg))) {
- pr_err("spi-nor: please fill all the necessary fields!\n");
- return -EINVAL;
- }
-
- if (nor->spimem && nor->controller_ops) {
- dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int s3an_nor_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
-{
- int ret;
-
- ret = spi_nor_xread_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- nor->erase_opcode = SPINOR_OP_XSE;
- nor->program_opcode = SPINOR_OP_XPP;
- nor->read_opcode = SPINOR_OP_READ;
- nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
-
- /*
- * This flashes have a page size of 264 or 528 bytes (known as
- * Default addressing mode). It can be changed to a more standard
- * Power of two mode where the page size is 256/512. This comes
- * with a price: there is 3% less of space, the data is corrupted
- * and the page size cannot be changed back to default addressing
- * mode.
- *
- * The current addressing mode can be read from the XRDSR register
- * and should not be changed, because is a destructive operation.
- */
- if (nor->bouncebuf[0] & XSR_PAGESIZE) {
- /* Flash in Power of 2 mode */
- nor->page_size = (nor->page_size == 264) ? 256 : 512;
- nor->mtd.writebufsize = nor->page_size;
- nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
- nor->mtd.erasesize = 8 * nor->page_size;
- } else {
- /* Flash in Default addressing mode */
- nor->params.convert_addr = s3an_convert_addr;
- nor->mtd.erasesize = nor->info->sector_size;
- }
-
- return 0;
-}
-
-static void
-spi_nor_set_read_settings(struct spi_nor_read_command *read,
- u8 num_mode_clocks,
- u8 num_wait_states,
- u8 opcode,
- enum spi_nor_protocol proto)
-{
- read->num_mode_clocks = num_mode_clocks;
- read->num_wait_states = num_wait_states;
- read->opcode = opcode;
- read->proto = proto;
-}
-
-static void
-spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
- u8 opcode,
- enum spi_nor_protocol proto)
-{
- pp->opcode = opcode;
- pp->proto = proto;
-}
-
-static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
-{
- size_t i;
-
- for (i = 0; i < size; i++)
- if (table[i][0] == (int)hwcaps)
- return table[i][1];
-
- return -EINVAL;
-}
-
-static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
-{
- static const int hwcaps_read2cmd[][2] = {
- { SNOR_HWCAPS_READ, SNOR_CMD_READ },
- { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
- { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
- { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
- { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
- { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
- { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
- { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
- { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
- { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
- { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
- { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
- { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
- { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
- { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
- };
-
- return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
- ARRAY_SIZE(hwcaps_read2cmd));
-}
-
-static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
-{
- static const int hwcaps_pp2cmd[][2] = {
- { SNOR_HWCAPS_PP, SNOR_CMD_PP },
- { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
- { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
- { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
- { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
- { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
- { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
- };
-
- return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
- ARRAY_SIZE(hwcaps_pp2cmd));
-}
-
-/*
- * Serial Flash Discoverable Parameters (SFDP) parsing.
- */
-
-/**
- * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
- * addr_width and read_dummy members of the struct spi_nor
- * should be previously
- * set.
- * @nor: pointer to a 'struct spi_nor'
- * @addr: offset in the serial flash memory
- * @len: number of bytes to read
- * @buf: buffer where the data is copied into (dma-safe memory)
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
-{
- ssize_t ret;
-
- while (len) {
- ret = spi_nor_read_data(nor, addr, len, buf);
- if (ret < 0)
- return ret;
- if (!ret || ret > len)
- return -EIO;
-
- buf += ret;
- addr += ret;
- len -= ret;
- }
- return 0;
-}
-
-/**
- * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
- * @nor: pointer to a 'struct spi_nor'
- * @addr: offset in the SFDP area to start reading data from
- * @len: number of bytes to read
- * @buf: buffer where the SFDP data are copied into (dma-safe memory)
- *
- * Whatever the actual numbers of bytes for address and dummy cycles are
- * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
- * followed by a 3-byte address and 8 dummy clock cycles.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
- size_t len, void *buf)
-{
- u8 addr_width, read_opcode, read_dummy;
- int ret;
-
- read_opcode = nor->read_opcode;
- addr_width = nor->addr_width;
- read_dummy = nor->read_dummy;
-
- nor->read_opcode = SPINOR_OP_RDSFDP;
- nor->addr_width = 3;
- nor->read_dummy = 8;
-
- ret = spi_nor_read_raw(nor, addr, len, buf);
-
- nor->read_opcode = read_opcode;
- nor->addr_width = addr_width;
- nor->read_dummy = read_dummy;
-
- return ret;
-}
-
-/**
- * spi_nor_spimem_check_op - check if the operation is supported
- * by controller
- *@nor: pointer to a 'struct spi_nor'
- *@op: pointer to op template to be checked
- *
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
- */
-static int spi_nor_spimem_check_op(struct spi_nor *nor,
- struct spi_mem_op *op)
-{
- /*
- * First test with 4 address bytes. The opcode itself might
- * be a 3B addressing opcode but we don't care, because
- * SPI controller implementation should not check the opcode,
- * but just the sequence.
- */
- op->addr.nbytes = 4;
- if (!spi_mem_supports_op(nor->spimem, op)) {
- if (nor->mtd.size > SZ_16M)
- return -ENOTSUPP;
-
- /* If flash size <= 16MB, 3 address bytes are sufficient */
- op->addr.nbytes = 3;
- if (!spi_mem_supports_op(nor->spimem, op))
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_spimem_check_readop - check if the read op is supported
- * by controller
- *@nor: pointer to a 'struct spi_nor'
- *@read: pointer to op template to be checked
- *
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
- */
-static int spi_nor_spimem_check_readop(struct spi_nor *nor,
- const struct spi_nor_read_command *read)
-{
- struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
- SPI_MEM_OP_ADDR(3, 0, 1),
- SPI_MEM_OP_DUMMY(0, 1),
- SPI_MEM_OP_DATA_IN(0, NULL, 1));
-
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
- op.dummy.buswidth = op.addr.buswidth;
- op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
- op.dummy.buswidth / 8;
-
- return spi_nor_spimem_check_op(nor, &op);
-}
-
-/**
- * spi_nor_spimem_check_pp - check if the page program op is supported
- * by controller
- *@nor: pointer to a 'struct spi_nor'
- *@pp: pointer to op template to be checked
- *
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
- */
-static int spi_nor_spimem_check_pp(struct spi_nor *nor,
- const struct spi_nor_pp_command *pp)
-{
- struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
- SPI_MEM_OP_ADDR(3, 0, 1),
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(0, NULL, 1));
-
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
-
- return spi_nor_spimem_check_op(nor, &op);
-}
-
-/**
- * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
- * based on SPI controller capabilities
- * @nor: pointer to a 'struct spi_nor'
- * @hwcaps: pointer to resulting capabilities after adjusting
- * according to controller and flash's capability
- */
-static void
-spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
-{
- struct spi_nor_flash_parameter *params = &nor->params;
- unsigned int cap;
-
- /* DTR modes are not supported yet, mask them all. */
- *hwcaps &= ~SNOR_HWCAPS_DTR;
-
- /* X-X-X modes are not supported yet, mask them all. */
- *hwcaps &= ~SNOR_HWCAPS_X_X_X;
-
- for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
- int rdidx, ppidx;
-
- if (!(*hwcaps & BIT(cap)))
- continue;
-
- rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
- if (rdidx >= 0 &&
- spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
- *hwcaps &= ~BIT(cap);
-
- ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
- if (ppidx < 0)
- continue;
-
- if (spi_nor_spimem_check_pp(nor,
- &params->page_programs[ppidx]))
- *hwcaps &= ~BIT(cap);
- }
-}
-
-/**
- * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
- * @nor: pointer to a 'struct spi_nor'
- * @addr: offset in the SFDP area to start reading data from
- * @len: number of bytes to read
- * @buf: buffer where the SFDP data are copied into
- *
- * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
- * guaranteed to be dma-safe.
- *
- * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
- * otherwise.
- */
-static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
- size_t len, void *buf)
-{
- void *dma_safe_buf;
- int ret;
-
- dma_safe_buf = kmalloc(len, GFP_KERNEL);
- if (!dma_safe_buf)
- return -ENOMEM;
-
- ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
- memcpy(buf, dma_safe_buf, len);
- kfree(dma_safe_buf);
-
- return ret;
-}
-
-/* Fast Read settings. */
-
-static void
-spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
- u16 half,
- enum spi_nor_protocol proto)
-{
- read->num_mode_clocks = (half >> 5) & 0x07;
- read->num_wait_states = (half >> 0) & 0x1f;
- read->opcode = (half >> 8) & 0xff;
- read->proto = proto;
-}
-
-struct sfdp_bfpt_read {
- /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
- u32 hwcaps;
-
- /*
- * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
- * whether the Fast Read x-y-z command is supported.
- */
- u32 supported_dword;
- u32 supported_bit;
-
- /*
- * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
- * encodes the op code, the number of mode clocks and the number of wait
- * states to be used by Fast Read x-y-z command.
- */
- u32 settings_dword;
- u32 settings_shift;
-
- /* The SPI protocol for this Fast Read x-y-z command. */
- enum spi_nor_protocol proto;
-};
-
-static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
- /* Fast Read 1-1-2 */
- {
- SNOR_HWCAPS_READ_1_1_2,
- BFPT_DWORD(1), BIT(16), /* Supported bit */
- BFPT_DWORD(4), 0, /* Settings */
- SNOR_PROTO_1_1_2,
- },
-
- /* Fast Read 1-2-2 */
- {
- SNOR_HWCAPS_READ_1_2_2,
- BFPT_DWORD(1), BIT(20), /* Supported bit */
- BFPT_DWORD(4), 16, /* Settings */
- SNOR_PROTO_1_2_2,
- },
-
- /* Fast Read 2-2-2 */
- {
- SNOR_HWCAPS_READ_2_2_2,
- BFPT_DWORD(5), BIT(0), /* Supported bit */
- BFPT_DWORD(6), 16, /* Settings */
- SNOR_PROTO_2_2_2,
- },
-
- /* Fast Read 1-1-4 */
- {
- SNOR_HWCAPS_READ_1_1_4,
- BFPT_DWORD(1), BIT(22), /* Supported bit */
- BFPT_DWORD(3), 16, /* Settings */
- SNOR_PROTO_1_1_4,
- },
-
- /* Fast Read 1-4-4 */
- {
- SNOR_HWCAPS_READ_1_4_4,
- BFPT_DWORD(1), BIT(21), /* Supported bit */
- BFPT_DWORD(3), 0, /* Settings */
- SNOR_PROTO_1_4_4,
- },
-
- /* Fast Read 4-4-4 */
- {
- SNOR_HWCAPS_READ_4_4_4,
- BFPT_DWORD(5), BIT(4), /* Supported bit */
- BFPT_DWORD(7), 16, /* Settings */
- SNOR_PROTO_4_4_4,
- },
-};
-
-struct sfdp_bfpt_erase {
- /*
- * The half-word at offset <shift> in DWORD <dwoard> encodes the
- * op code and erase sector size to be used by Sector Erase commands.
- */
- u32 dword;
- u32 shift;
-};
-
-static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
- /* Erase Type 1 in DWORD8 bits[15:0] */
- {BFPT_DWORD(8), 0},
-
- /* Erase Type 2 in DWORD8 bits[31:16] */
- {BFPT_DWORD(8), 16},
-
- /* Erase Type 3 in DWORD9 bits[15:0] */
- {BFPT_DWORD(9), 0},
-
- /* Erase Type 4 in DWORD9 bits[31:16] */
- {BFPT_DWORD(9), 16},
-};
-
-/**
- * spi_nor_set_erase_type() - set a SPI NOR erase type
- * @erase: pointer to a structure that describes a SPI NOR erase type
- * @size: the size of the sector/block erased by the erase type
- * @opcode: the SPI command op code to erase the sector/block
- */
-static void spi_nor_set_erase_type(struct spi_nor_erase_type *erase,
- u32 size, u8 opcode)
-{
- erase->size = size;
- erase->opcode = opcode;
- /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
- erase->size_shift = ffs(erase->size) - 1;
- erase->size_mask = (1 << erase->size_shift) - 1;
-}
-
-/**
- * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
- * @erase: pointer to a structure that describes a SPI NOR erase type
- * @size: the size of the sector/block erased by the erase type
- * @opcode: the SPI command op code to erase the sector/block
- * @i: erase type index as sorted in the Basic Flash Parameter Table
- *
- * The supported Erase Types will be sorted at init in ascending order, with
- * the smallest Erase Type size being the first member in the erase_type array
- * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
- * the Basic Flash Parameter Table since it will be used later on to
- * synchronize with the supported Erase Types defined in SFDP optional tables.
- */
-static void
-spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
- u32 size, u8 opcode, u8 i)
-{
- erase->idx = i;
- spi_nor_set_erase_type(erase, size, opcode);
-}
-
-/**
- * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
- * @l: member in the left half of the map's erase_type array
- * @r: member in the right half of the map's erase_type array
- *
- * Comparison function used in the sort() call to sort in ascending order the
- * map's erase types, the smallest erase type size being the first member in the
- * sorted erase_type array.
- *
- * Return: the result of @l->size - @r->size
- */
-static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
-{
- const struct spi_nor_erase_type *left = l, *right = r;
-
- return left->size - right->size;
-}
-
-/**
- * spi_nor_sort_erase_mask() - sort erase mask
- * @map: the erase map of the SPI NOR
- * @erase_mask: the erase type mask to be sorted
- *
- * Replicate the sort done for the map's erase types in BFPT: sort the erase
- * mask in ascending order with the smallest erase type size starting from
- * BIT(0) in the sorted erase mask.
- *
- * Return: sorted erase mask.
- */
-static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
-{
- struct spi_nor_erase_type *erase_type = map->erase_type;
- int i;
- u8 sorted_erase_mask = 0;
-
- if (!erase_mask)
- return 0;
-
- /* Replicate the sort done for the map's erase types. */
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
- if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
- sorted_erase_mask |= BIT(i);
-
- return sorted_erase_mask;
-}
-
-/**
- * spi_nor_regions_sort_erase_types() - sort erase types in each region
- * @map: the erase map of the SPI NOR
- *
- * Function assumes that the erase types defined in the erase map are already
- * sorted in ascending order, with the smallest erase type size being the first
- * member in the erase_type array. It replicates the sort done for the map's
- * erase types. Each region's erase bitmask will indicate which erase types are
- * supported from the sorted erase types defined in the erase map.
- * Sort the all region's erase type at init in order to speed up the process of
- * finding the best erase command at runtime.
- */
-static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
-{
- struct spi_nor_erase_region *region = map->regions;
- u8 region_erase_mask, sorted_erase_mask;
-
- while (region) {
- region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
-
- sorted_erase_mask = spi_nor_sort_erase_mask(map,
- region_erase_mask);
-
- /* Overwrite erase mask. */
- region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
- sorted_erase_mask;
-
- region = spi_nor_region_next(region);
- }
-}
-
-/**
- * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
- * @map: the erase map of the SPI NOR
- * @erase_mask: bitmask encoding erase types that can erase the entire
- * flash memory
- * @flash_size: the spi nor flash memory size
- */
-static void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
- u8 erase_mask, u64 flash_size)
-{
- /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
- map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
- SNOR_LAST_REGION;
- map->uniform_region.size = flash_size;
- map->regions = &map->uniform_region;
- map->uniform_erase_type = erase_mask;
-}
-
-static int
-spi_nor_post_bfpt_fixups(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- const struct sfdp_bfpt *bfpt,
- struct spi_nor_flash_parameter *params)
-{
- if (nor->info->fixups && nor->info->fixups->post_bfpt)
- return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
- params);
-
- return 0;
-}
-
-/**
- * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
- * @nor: pointer to a 'struct spi_nor'
- * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
- * the Basic Flash Parameter Table length and version
- * @params: pointer to the 'struct spi_nor_flash_parameter' to be
- * filled
- *
- * The Basic Flash Parameter Table is the main and only mandatory table as
- * defined by the SFDP (JESD216) specification.
- * It provides us with the total size (memory density) of the data array and
- * the number of address bytes for Fast Read, Page Program and Sector Erase
- * commands.
- * For Fast READ commands, it also gives the number of mode clock cycles and
- * wait states (regrouped in the number of dummy clock cycles) for each
- * supported instruction op code.
- * For Page Program, the page size is now available since JESD216 rev A, however
- * the supported instruction op codes are still not provided.
- * For Sector Erase commands, this table stores the supported instruction op
- * codes and the associated sector sizes.
- * Finally, the Quad Enable Requirements (QER) are also available since JESD216
- * rev A. The QER bits encode the manufacturer dependent procedure to be
- * executed to set the Quad Enable (QE) bit in some internal register of the
- * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
- * sending any Quad SPI command to the memory. Actually, setting the QE bit
- * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
- * and IO3 hence enabling 4 (Quad) I/O lines.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_bfpt(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- struct spi_nor_flash_parameter *params)
-{
- struct spi_nor_erase_map *map = &params->erase_map;
- struct spi_nor_erase_type *erase_type = map->erase_type;
- struct sfdp_bfpt bfpt;
- size_t len;
- int i, cmd, err;
- u32 addr;
- u16 half;
- u8 erase_mask;
-
- /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
- if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
- return -EINVAL;
-
- /* Read the Basic Flash Parameter Table. */
- len = min_t(size_t, sizeof(bfpt),
- bfpt_header->length * sizeof(u32));
- addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
- memset(&bfpt, 0, sizeof(bfpt));
- err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
- if (err < 0)
- return err;
-
- /* Fix endianness of the BFPT DWORDs. */
- for (i = 0; i < BFPT_DWORD_MAX; i++)
- bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
-
- /* Number of address bytes. */
- switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
- case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
- nor->addr_width = 3;
- break;
-
- case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
- nor->addr_width = 4;
- break;
-
- default:
- break;
- }
-
- /* Flash Memory Density (in bits). */
- params->size = bfpt.dwords[BFPT_DWORD(2)];
- if (params->size & BIT(31)) {
- params->size &= ~BIT(31);
-
- /*
- * Prevent overflows on params->size. Anyway, a NOR of 2^64
- * bits is unlikely to exist so this error probably means
- * the BFPT we are reading is corrupted/wrong.
- */
- if (params->size > 63)
- return -EINVAL;
-
- params->size = 1ULL << params->size;
- } else {
- params->size++;
- }
- params->size >>= 3; /* Convert to bytes. */
-
- /* Fast Read settings. */
- for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
- const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
- struct spi_nor_read_command *read;
-
- if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
- params->hwcaps.mask &= ~rd->hwcaps;
- continue;
- }
-
- params->hwcaps.mask |= rd->hwcaps;
- cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
- read = &params->reads[cmd];
- half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
- spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
- }
-
- /*
- * Sector Erase settings. Reinitialize the uniform erase map using the
- * Erase Types defined in the bfpt table.
- */
- erase_mask = 0;
- memset(&params->erase_map, 0, sizeof(params->erase_map));
- for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
- const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
- u32 erasesize;
- u8 opcode;
-
- half = bfpt.dwords[er->dword] >> er->shift;
- erasesize = half & 0xff;
-
- /* erasesize == 0 means this Erase Type is not supported. */
- if (!erasesize)
- continue;
-
- erasesize = 1U << erasesize;
- opcode = (half >> 8) & 0xff;
- erase_mask |= BIT(i);
- spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
- opcode, i);
- }
- spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
- /*
- * Sort all the map's Erase Types in ascending order with the smallest
- * erase size being the first member in the erase_type array.
- */
- sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
- spi_nor_map_cmp_erase_type, NULL);
- /*
- * Sort the erase types in the uniform region in order to update the
- * uniform_erase_type bitmask. The bitmask will be used later on when
- * selecting the uniform erase.
- */
- spi_nor_regions_sort_erase_types(map);
- map->uniform_erase_type = map->uniform_region.offset &
- SNOR_ERASE_TYPE_MASK;
-
- /* Stop here if not JESD216 rev A or later. */
- if (bfpt_header->length < BFPT_DWORD_MAX)
- return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
- params);
-
- /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
- params->page_size = bfpt.dwords[BFPT_DWORD(11)];
- params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
- params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
- params->page_size = 1U << params->page_size;
-
- /* Quad Enable Requirements. */
- switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
- case BFPT_DWORD15_QER_NONE:
- params->quad_enable = NULL;
- break;
-
- case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
- /*
- * Writing only one byte to the Status Register has the
- * side-effect of clearing Status Register 2.
- */
- case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
- /*
- * Read Configuration Register (35h) instruction is not
- * supported.
- */
- nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR;
- params->quad_enable = spi_nor_sr2_bit1_quad_enable;
- break;
-
- case BFPT_DWORD15_QER_SR1_BIT6:
- nor->flags &= ~SNOR_F_HAS_16BIT_SR;
- params->quad_enable = spi_nor_sr1_bit6_quad_enable;
- break;
-
- case BFPT_DWORD15_QER_SR2_BIT7:
- nor->flags &= ~SNOR_F_HAS_16BIT_SR;
- params->quad_enable = spi_nor_sr2_bit7_quad_enable;
- break;
-
- case BFPT_DWORD15_QER_SR2_BIT1:
- /*
- * JESD216 rev B or later does not specify if writing only one
- * byte to the Status Register clears or not the Status
- * Register 2, so let's be cautious and keep the default
- * assumption of a 16-bit Write Status (01h) command.
- */
- nor->flags |= SNOR_F_HAS_16BIT_SR;
-
- params->quad_enable = spi_nor_sr2_bit1_quad_enable;
- break;
-
- default:
- return -EINVAL;
- }
-
- return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
-}
-
-#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
-#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22)
-#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22)
-#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22)
-#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22)
-
-#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16)
-#define SMPT_CMD_READ_DUMMY_SHIFT 16
-#define SMPT_CMD_READ_DUMMY(_cmd) \
- (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
-#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL
-
-#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24)
-#define SMPT_CMD_READ_DATA_SHIFT 24
-#define SMPT_CMD_READ_DATA(_cmd) \
- (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
-
-#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8)
-#define SMPT_CMD_OPCODE_SHIFT 8
-#define SMPT_CMD_OPCODE(_cmd) \
- (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
-
-#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16)
-#define SMPT_MAP_REGION_COUNT_SHIFT 16
-#define SMPT_MAP_REGION_COUNT(_header) \
- ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
- SMPT_MAP_REGION_COUNT_SHIFT) + 1)
-
-#define SMPT_MAP_ID_MASK GENMASK(15, 8)
-#define SMPT_MAP_ID_SHIFT 8
-#define SMPT_MAP_ID(_header) \
- (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
-
-#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8)
-#define SMPT_MAP_REGION_SIZE_SHIFT 8
-#define SMPT_MAP_REGION_SIZE(_region) \
- (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
- SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
-
-#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0)
-#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
- ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
-
-#define SMPT_DESC_TYPE_MAP BIT(1)
-#define SMPT_DESC_END BIT(0)
-
-/**
- * spi_nor_smpt_addr_width() - return the address width used in the
- * configuration detection command.
- * @nor: pointer to a 'struct spi_nor'
- * @settings: configuration detection command descriptor, dword1
- */
-static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
-{
- switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
- case SMPT_CMD_ADDRESS_LEN_0:
- return 0;
- case SMPT_CMD_ADDRESS_LEN_3:
- return 3;
- case SMPT_CMD_ADDRESS_LEN_4:
- return 4;
- case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
- /* fall through */
- default:
- return nor->addr_width;
- }
-}
-
-/**
- * spi_nor_smpt_read_dummy() - return the configuration detection command read
- * latency, in clock cycles.
- * @nor: pointer to a 'struct spi_nor'
- * @settings: configuration detection command descriptor, dword1
- *
- * Return: the number of dummy cycles for an SMPT read
- */
-static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
-{
- u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
-
- if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
- return nor->read_dummy;
- return read_dummy;
-}
-
-/**
- * spi_nor_get_map_in_use() - get the configuration map in use
- * @nor: pointer to a 'struct spi_nor'
- * @smpt: pointer to the sector map parameter table
- * @smpt_len: sector map parameter table length
- *
- * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
- */
-static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
- u8 smpt_len)
-{
- const u32 *ret;
- u8 *buf;
- u32 addr;
- int err;
- u8 i;
- u8 addr_width, read_opcode, read_dummy;
- u8 read_data_mask, map_id;
-
- /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
- buf = kmalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- addr_width = nor->addr_width;
- read_dummy = nor->read_dummy;
- read_opcode = nor->read_opcode;
-
- map_id = 0;
- /* Determine if there are any optional Detection Command Descriptors */
- for (i = 0; i < smpt_len; i += 2) {
- if (smpt[i] & SMPT_DESC_TYPE_MAP)
- break;
-
- read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
- nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
- nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
- nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
- addr = smpt[i + 1];
-
- err = spi_nor_read_raw(nor, addr, 1, buf);
- if (err) {
- ret = ERR_PTR(err);
- goto out;
- }
-
- /*
- * Build an index value that is used to select the Sector Map
- * Configuration that is currently in use.
- */
- map_id = map_id << 1 | !!(*buf & read_data_mask);
- }
-
- /*
- * If command descriptors are provided, they always precede map
- * descriptors in the table. There is no need to start the iteration
- * over smpt array all over again.
- *
- * Find the matching configuration map.
- */
- ret = ERR_PTR(-EINVAL);
- while (i < smpt_len) {
- if (SMPT_MAP_ID(smpt[i]) == map_id) {
- ret = smpt + i;
- break;
- }
-
- /*
- * If there are no more configuration map descriptors and no
- * configuration ID matched the configuration identifier, the
- * sector address map is unknown.
- */
- if (smpt[i] & SMPT_DESC_END)
- break;
-
- /* increment the table index to the next map */
- i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
- }
-
- /* fall through */
-out:
- kfree(buf);
- nor->addr_width = addr_width;
- nor->read_dummy = read_dummy;
- nor->read_opcode = read_opcode;
- return ret;
-}
-
-/**
- * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
- * @region: pointer to a structure that describes a SPI NOR erase region
- * @erase: pointer to a structure that describes a SPI NOR erase type
- * @erase_type: erase type bitmask
- */
-static void
-spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
- const struct spi_nor_erase_type *erase,
- const u8 erase_type)
-{
- int i;
-
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- if (!(erase_type & BIT(i)))
- continue;
- if (region->size & erase[i].size_mask) {
- spi_nor_region_mark_overlay(region);
- return;
- }
- }
-}
-
-/**
- * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
- * @nor: pointer to a 'struct spi_nor'
- * @params: pointer to a duplicate 'struct spi_nor_flash_parameter' that is
- * used for storing SFDP parsed data
- * @smpt: pointer to the sector map parameter table
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int
-spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
- struct spi_nor_flash_parameter *params,
- const u32 *smpt)
-{
- struct spi_nor_erase_map *map = &params->erase_map;
- struct spi_nor_erase_type *erase = map->erase_type;
- struct spi_nor_erase_region *region;
- u64 offset;
- u32 region_count;
- int i, j;
- u8 uniform_erase_type, save_uniform_erase_type;
- u8 erase_type, regions_erase_type;
-
- region_count = SMPT_MAP_REGION_COUNT(*smpt);
- /*
- * The regions will be freed when the driver detaches from the
- * device.
- */
- region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
- GFP_KERNEL);
- if (!region)
- return -ENOMEM;
- map->regions = region;
-
- uniform_erase_type = 0xff;
- regions_erase_type = 0;
- offset = 0;
- /* Populate regions. */
- for (i = 0; i < region_count; i++) {
- j = i + 1; /* index for the region dword */
- region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
- erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
- region[i].offset = offset | erase_type;
-
- spi_nor_region_check_overlay(&region[i], erase, erase_type);
-
- /*
- * Save the erase types that are supported in all regions and
- * can erase the entire flash memory.
- */
- uniform_erase_type &= erase_type;
-
- /*
- * regions_erase_type mask will indicate all the erase types
- * supported in this configuration map.
- */
- regions_erase_type |= erase_type;
-
- offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
- region[i].size;
- }
-
- save_uniform_erase_type = map->uniform_erase_type;
- map->uniform_erase_type = spi_nor_sort_erase_mask(map,
- uniform_erase_type);
-
- if (!regions_erase_type) {
- /*
- * Roll back to the previous uniform_erase_type mask, SMPT is
- * broken.
- */
- map->uniform_erase_type = save_uniform_erase_type;
- return -EINVAL;
- }
-
- /*
- * BFPT advertises all the erase types supported by all the possible
- * map configurations. Mask out the erase types that are not supported
- * by the current map configuration.
- */
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
- if (!(regions_erase_type & BIT(erase[i].idx)))
- spi_nor_set_erase_type(&erase[i], 0, 0xFF);
-
- spi_nor_region_mark_end(&region[i - 1]);
-
- return 0;
-}
-
-/**
- * spi_nor_parse_smpt() - parse Sector Map Parameter Table
- * @nor: pointer to a 'struct spi_nor'
- * @smpt_header: sector map parameter table header
- * @params: pointer to a duplicate 'struct spi_nor_flash_parameter'
- * that is used for storing SFDP parsed data
- *
- * This table is optional, but when available, we parse it to identify the
- * location and size of sectors within the main data array of the flash memory
- * device and to identify which Erase Types are supported by each sector.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_smpt(struct spi_nor *nor,
- const struct sfdp_parameter_header *smpt_header,
- struct spi_nor_flash_parameter *params)
-{
- const u32 *sector_map;
- u32 *smpt;
- size_t len;
- u32 addr;
- int i, ret;
-
- /* Read the Sector Map Parameter Table. */
- len = smpt_header->length * sizeof(*smpt);
- smpt = kmalloc(len, GFP_KERNEL);
- if (!smpt)
- return -ENOMEM;
-
- addr = SFDP_PARAM_HEADER_PTP(smpt_header);
- ret = spi_nor_read_sfdp(nor, addr, len, smpt);
- if (ret)
- goto out;
-
- /* Fix endianness of the SMPT DWORDs. */
- for (i = 0; i < smpt_header->length; i++)
- smpt[i] = le32_to_cpu(smpt[i]);
-
- sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
- if (IS_ERR(sector_map)) {
- ret = PTR_ERR(sector_map);
- goto out;
- }
-
- ret = spi_nor_init_non_uniform_erase_map(nor, params, sector_map);
- if (ret)
- goto out;
-
- spi_nor_regions_sort_erase_types(&params->erase_map);
- /* fall through */
-out:
- kfree(smpt);
- return ret;
-}
-
-#define SFDP_4BAIT_DWORD_MAX 2
-
-struct sfdp_4bait {
- /* The hardware capability. */
- u32 hwcaps;
-
- /*
- * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
- * the associated 4-byte address op code is supported.
- */
- u32 supported_bit;
-};
-
-/**
- * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
- * @nor: pointer to a 'struct spi_nor'.
- * @param_header: pointer to the 'struct sfdp_parameter_header' describing
- * the 4-Byte Address Instruction Table length and version.
- * @params: pointer to the 'struct spi_nor_flash_parameter' to be.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_4bait(struct spi_nor *nor,
- const struct sfdp_parameter_header *param_header,
- struct spi_nor_flash_parameter *params)
-{
- static const struct sfdp_4bait reads[] = {
- { SNOR_HWCAPS_READ, BIT(0) },
- { SNOR_HWCAPS_READ_FAST, BIT(1) },
- { SNOR_HWCAPS_READ_1_1_2, BIT(2) },
- { SNOR_HWCAPS_READ_1_2_2, BIT(3) },
- { SNOR_HWCAPS_READ_1_1_4, BIT(4) },
- { SNOR_HWCAPS_READ_1_4_4, BIT(5) },
- { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
- { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
- { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
- };
- static const struct sfdp_4bait programs[] = {
- { SNOR_HWCAPS_PP, BIT(6) },
- { SNOR_HWCAPS_PP_1_1_4, BIT(7) },
- { SNOR_HWCAPS_PP_1_4_4, BIT(8) },
- };
- static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
- { 0u /* not used */, BIT(9) },
- { 0u /* not used */, BIT(10) },
- { 0u /* not used */, BIT(11) },
- { 0u /* not used */, BIT(12) },
- };
- struct spi_nor_pp_command *params_pp = params->page_programs;
- struct spi_nor_erase_map *map = &params->erase_map;
- struct spi_nor_erase_type *erase_type = map->erase_type;
- u32 *dwords;
- size_t len;
- u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
- int i, ret;
-
- if (param_header->major != SFDP_JESD216_MAJOR ||
- param_header->length < SFDP_4BAIT_DWORD_MAX)
- return -EINVAL;
-
- /* Read the 4-byte Address Instruction Table. */
- len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
-
- /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
- dwords = kmalloc(len, GFP_KERNEL);
- if (!dwords)
- return -ENOMEM;
-
- addr = SFDP_PARAM_HEADER_PTP(param_header);
- ret = spi_nor_read_sfdp(nor, addr, len, dwords);
- if (ret)
- goto out;
-
- /* Fix endianness of the 4BAIT DWORDs. */
- for (i = 0; i < SFDP_4BAIT_DWORD_MAX; i++)
- dwords[i] = le32_to_cpu(dwords[i]);
-
- /*
- * Compute the subset of (Fast) Read commands for which the 4-byte
- * version is supported.
- */
- discard_hwcaps = 0;
- read_hwcaps = 0;
- for (i = 0; i < ARRAY_SIZE(reads); i++) {
- const struct sfdp_4bait *read = &reads[i];
-
- discard_hwcaps |= read->hwcaps;
- if ((params->hwcaps.mask & read->hwcaps) &&
- (dwords[0] & read->supported_bit))
- read_hwcaps |= read->hwcaps;
- }
-
- /*
- * Compute the subset of Page Program commands for which the 4-byte
- * version is supported.
- */
- pp_hwcaps = 0;
- for (i = 0; i < ARRAY_SIZE(programs); i++) {
- const struct sfdp_4bait *program = &programs[i];
-
- /*
- * The 4 Byte Address Instruction (Optional) Table is the only
- * SFDP table that indicates support for Page Program Commands.
- * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
- * authority for specifying Page Program support.
- */
- discard_hwcaps |= program->hwcaps;
- if (dwords[0] & program->supported_bit)
- pp_hwcaps |= program->hwcaps;
- }
-
- /*
- * Compute the subset of Sector Erase commands for which the 4-byte
- * version is supported.
- */
- erase_mask = 0;
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- const struct sfdp_4bait *erase = &erases[i];
-
- if (dwords[0] & erase->supported_bit)
- erase_mask |= BIT(i);
- }
-
- /* Replicate the sort done for the map's erase types in BFPT. */
- erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
-
- /*
- * We need at least one 4-byte op code per read, program and erase
- * operation; the .read(), .write() and .erase() hooks share the
- * nor->addr_width value.
- */
- if (!read_hwcaps || !pp_hwcaps || !erase_mask)
- goto out;
-
- /*
- * Discard all operations from the 4-byte instruction set which are
- * not supported by this memory.
- */
- params->hwcaps.mask &= ~discard_hwcaps;
- params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
-
- /* Use the 4-byte address instruction set. */
- for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
- struct spi_nor_read_command *read_cmd = &params->reads[i];
-
- read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
- }
-
- /* 4BAIT is the only SFDP table that indicates page program support. */
- if (pp_hwcaps & SNOR_HWCAPS_PP)
- spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
- SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
- if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
- spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
- SPINOR_OP_PP_1_1_4_4B,
- SNOR_PROTO_1_1_4);
- if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
- spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_4_4],
- SPINOR_OP_PP_1_4_4_4B,
- SNOR_PROTO_1_4_4);
-
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- if (erase_mask & BIT(i))
- erase_type[i].opcode = (dwords[1] >>
- erase_type[i].idx * 8) & 0xFF;
- else
- spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
- }
-
- /*
- * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
- * later because we already did the conversion to 4byte opcodes. Also,
- * this latest function implements a legacy quirk for the erase size of
- * Spansion memory. However this quirk is no longer needed with new
- * SFDP compliant memories.
- */
- nor->addr_width = 4;
- nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
-
- /* fall through */
-out:
- kfree(dwords);
- return ret;
-}
-
-/**
- * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
- * @nor: pointer to a 'struct spi_nor'
- * @params: pointer to the 'struct spi_nor_flash_parameter' to be
- * filled
- *
- * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
- * specification. This is a standard which tends to supported by almost all
- * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
- * runtime the main parameters needed to perform basic SPI flash operations such
- * as Fast Read, Page Program or Sector Erase commands.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_sfdp(struct spi_nor *nor,
- struct spi_nor_flash_parameter *params)
-{
- const struct sfdp_parameter_header *param_header, *bfpt_header;
- struct sfdp_parameter_header *param_headers = NULL;
- struct sfdp_header header;
- struct device *dev = nor->dev;
- size_t psize;
- int i, err;
-
- /* Get the SFDP header. */
- err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
- if (err < 0)
- return err;
-
- /* Check the SFDP header version. */
- if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
- header.major != SFDP_JESD216_MAJOR)
- return -EINVAL;
-
- /*
- * Verify that the first and only mandatory parameter header is a
- * Basic Flash Parameter Table header as specified in JESD216.
- */
- bfpt_header = &header.bfpt_header;
- if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
- bfpt_header->major != SFDP_JESD216_MAJOR)
- return -EINVAL;
-
- /*
- * Allocate memory then read all parameter headers with a single
- * Read SFDP command. These parameter headers will actually be parsed
- * twice: a first time to get the latest revision of the basic flash
- * parameter table, then a second time to handle the supported optional
- * tables.
- * Hence we read the parameter headers once for all to reduce the
- * processing time. Also we use kmalloc() instead of devm_kmalloc()
- * because we don't need to keep these parameter headers: the allocated
- * memory is always released with kfree() before exiting this function.
- */
- if (header.nph) {
- psize = header.nph * sizeof(*param_headers);
-
- param_headers = kmalloc(psize, GFP_KERNEL);
- if (!param_headers)
- return -ENOMEM;
-
- err = spi_nor_read_sfdp(nor, sizeof(header),
- psize, param_headers);
- if (err < 0) {
- dev_dbg(dev, "failed to read SFDP parameter headers\n");
- goto exit;
- }
- }
-
- /*
- * Check other parameter headers to get the latest revision of
- * the basic flash parameter table.
- */
- for (i = 0; i < header.nph; i++) {
- param_header = &param_headers[i];
-
- if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
- param_header->major == SFDP_JESD216_MAJOR &&
- (param_header->minor > bfpt_header->minor ||
- (param_header->minor == bfpt_header->minor &&
- param_header->length > bfpt_header->length)))
- bfpt_header = param_header;
- }
-
- err = spi_nor_parse_bfpt(nor, bfpt_header, params);
- if (err)
- goto exit;
-
- /* Parse optional parameter tables. */
- for (i = 0; i < header.nph; i++) {
- param_header = &param_headers[i];
-
- switch (SFDP_PARAM_HEADER_ID(param_header)) {
- case SFDP_SECTOR_MAP_ID:
- err = spi_nor_parse_smpt(nor, param_header, params);
- break;
-
- case SFDP_4BAIT_ID:
- err = spi_nor_parse_4bait(nor, param_header, params);
- break;
-
- default:
- break;
- }
-
- if (err) {
- dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
- SFDP_PARAM_HEADER_ID(param_header));
- /*
- * Let's not drop all information we extracted so far
- * if optional table parsers fail. In case of failing,
- * each optional parser is responsible to roll back to
- * the previously known spi_nor data.
- */
- err = 0;
- }
- }
-
-exit:
- kfree(param_headers);
- return err;
-}
-
-static int spi_nor_select_read(struct spi_nor *nor,
- u32 shared_hwcaps)
-{
- int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
- const struct spi_nor_read_command *read;
-
- if (best_match < 0)
- return -EINVAL;
-
- cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
- if (cmd < 0)
- return -EINVAL;
-
- read = &nor->params.reads[cmd];
- nor->read_opcode = read->opcode;
- nor->read_proto = read->proto;
-
- /*
- * In the spi-nor framework, we don't need to make the difference
- * between mode clock cycles and wait state clock cycles.
- * Indeed, the value of the mode clock cycles is used by a QSPI
- * flash memory to know whether it should enter or leave its 0-4-4
- * (Continuous Read / XIP) mode.
- * eXecution In Place is out of the scope of the mtd sub-system.
- * Hence we choose to merge both mode and wait state clock cycles
- * into the so called dummy clock cycles.
- */
- nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
- return 0;
-}
-
-static int spi_nor_select_pp(struct spi_nor *nor,
- u32 shared_hwcaps)
-{
- int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
- const struct spi_nor_pp_command *pp;
-
- if (best_match < 0)
- return -EINVAL;
-
- cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
- if (cmd < 0)
- return -EINVAL;
-
- pp = &nor->params.page_programs[cmd];
- nor->program_opcode = pp->opcode;
- nor->write_proto = pp->proto;
- return 0;
-}
-
-/**
- * spi_nor_select_uniform_erase() - select optimum uniform erase type
- * @map: the erase map of the SPI NOR
- * @wanted_size: the erase type size to search for. Contains the value of
- * info->sector_size or of the "small sector" size in case
- * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
- *
- * Once the optimum uniform sector erase command is found, disable all the
- * other.
- *
- * Return: pointer to erase type on success, NULL otherwise.
- */
-static const struct spi_nor_erase_type *
-spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
- const u32 wanted_size)
-{
- const struct spi_nor_erase_type *tested_erase, *erase = NULL;
- int i;
- u8 uniform_erase_type = map->uniform_erase_type;
-
- for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
- if (!(uniform_erase_type & BIT(i)))
- continue;
-
- tested_erase = &map->erase_type[i];
-
- /*
- * If the current erase size is the one, stop here:
- * we have found the right uniform Sector Erase command.
- */
- if (tested_erase->size == wanted_size) {
- erase = tested_erase;
- break;
- }
-
- /*
- * Otherwise, the current erase size is still a valid canditate.
- * Select the biggest valid candidate.
- */
- if (!erase && tested_erase->size)
- erase = tested_erase;
- /* keep iterating to find the wanted_size */
- }
-
- if (!erase)
- return NULL;
-
- /* Disable all other Sector Erase commands. */
- map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
- map->uniform_erase_type |= BIT(erase - map->erase_type);
- return erase;
-}
-
-static int spi_nor_select_erase(struct spi_nor *nor)
-{
- struct spi_nor_erase_map *map = &nor->params.erase_map;
- const struct spi_nor_erase_type *erase = NULL;
- struct mtd_info *mtd = &nor->mtd;
- u32 wanted_size = nor->info->sector_size;
- int i;
-
- /*
- * The previous implementation handling Sector Erase commands assumed
- * that the SPI flash memory has an uniform layout then used only one
- * of the supported erase sizes for all Sector Erase commands.
- * So to be backward compatible, the new implementation also tries to
- * manage the SPI flash memory as uniform with a single erase sector
- * size, when possible.
- */
-#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
- /* prefer "small sector" erase if possible */
- wanted_size = 4096u;
-#endif
-
- if (spi_nor_has_uniform_erase(nor)) {
- erase = spi_nor_select_uniform_erase(map, wanted_size);
- if (!erase)
- return -EINVAL;
- nor->erase_opcode = erase->opcode;
- mtd->erasesize = erase->size;
- return 0;
- }
-
- /*
- * For non-uniform SPI flash memory, set mtd->erasesize to the
- * maximum erase sector size. No need to set nor->erase_opcode.
- */
- for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
- if (map->erase_type[i].size) {
- erase = &map->erase_type[i];
- break;
- }
- }
-
- if (!erase)
- return -EINVAL;
-
- mtd->erasesize = erase->size;
- return 0;
-}
-
-static int spi_nor_default_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
-{
- struct spi_nor_flash_parameter *params = &nor->params;
- u32 ignored_mask, shared_mask;
- int err;
-
- /*
- * Keep only the hardware capabilities supported by both the SPI
- * controller and the SPI flash memory.
- */
- shared_mask = hwcaps->mask & params->hwcaps.mask;
-
- if (nor->spimem) {
- /*
- * When called from spi_nor_probe(), all caps are set and we
- * need to discard some of them based on what the SPI
- * controller actually supports (using spi_mem_supports_op()).
- */
- spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
- } else {
- /*
- * SPI n-n-n protocols are not supported when the SPI
- * controller directly implements the spi_nor interface.
- * Yet another reason to switch to spi-mem.
- */
- ignored_mask = SNOR_HWCAPS_X_X_X;
- if (shared_mask & ignored_mask) {
- dev_dbg(nor->dev,
- "SPI n-n-n protocols are not supported.\n");
- shared_mask &= ~ignored_mask;
- }
- }
-
- /* Select the (Fast) Read command. */
- err = spi_nor_select_read(nor, shared_mask);
- if (err) {
- dev_dbg(nor->dev,
- "can't select read settings supported by both the SPI controller and memory.\n");
- return err;
- }
-
- /* Select the Page Program command. */
- err = spi_nor_select_pp(nor, shared_mask);
- if (err) {
- dev_dbg(nor->dev,
- "can't select write settings supported by both the SPI controller and memory.\n");
- return err;
- }
-
- /* Select the Sector Erase command. */
- err = spi_nor_select_erase(nor);
- if (err) {
- dev_dbg(nor->dev,
- "can't select erase settings supported by both the SPI controller and memory.\n");
- return err;
- }
-
- return 0;
-}
-
-static int spi_nor_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
-{
- if (!nor->params.setup)
- return 0;
-
- return nor->params.setup(nor, hwcaps);
-}
-
-static void atmel_set_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
-}
-
-static void intel_set_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
-}
-
-static void issi_set_default_init(struct spi_nor *nor)
-{
- nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
-}
-
-static void macronix_set_default_init(struct spi_nor *nor)
-{
- nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
- nor->params.set_4byte = macronix_set_4byte;
-}
-
-static void sst_set_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
-}
-
-static void st_micron_set_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
- nor->flags &= ~SNOR_F_HAS_16BIT_SR;
- nor->params.quad_enable = NULL;
- nor->params.set_4byte = st_micron_set_4byte;
-}
-
-static void winbond_set_default_init(struct spi_nor *nor)
-{
- nor->params.set_4byte = winbond_set_4byte;
-}
-
-/**
- * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
- * settings based on MFR register and ->default_init() hook.
- * @nor: pointer to a 'struct spi-nor'.
- */
-static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
-{
- /* Init flash parameters based on MFR */
- switch (JEDEC_MFR(nor->info)) {
- case SNOR_MFR_ATMEL:
- atmel_set_default_init(nor);
- break;
-
- case SNOR_MFR_INTEL:
- intel_set_default_init(nor);
- break;
-
- case SNOR_MFR_ISSI:
- issi_set_default_init(nor);
- break;
-
- case SNOR_MFR_MACRONIX:
- macronix_set_default_init(nor);
- break;
-
- case SNOR_MFR_ST:
- case SNOR_MFR_MICRON:
- st_micron_set_default_init(nor);
- break;
-
- case SNOR_MFR_SST:
- sst_set_default_init(nor);
- break;
-
- case SNOR_MFR_WINBOND:
- winbond_set_default_init(nor);
- break;
-
- default:
- break;
- }
-
- if (nor->info->fixups && nor->info->fixups->default_init)
- nor->info->fixups->default_init(nor);
-}
-
-/**
- * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
- * based on JESD216 SFDP standard.
- * @nor: pointer to a 'struct spi-nor'.
- *
- * The method has a roll-back mechanism: in case the SFDP parsing fails, the
- * legacy flash parameters and settings will be restored.
- */
-static void spi_nor_sfdp_init_params(struct spi_nor *nor)
-{
- struct spi_nor_flash_parameter sfdp_params;
-
- memcpy(&sfdp_params, &nor->params, sizeof(sfdp_params));
-
- if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
- nor->addr_width = 0;
- nor->flags &= ~SNOR_F_4B_OPCODES;
- } else {
- memcpy(&nor->params, &sfdp_params, sizeof(nor->params));
- }
-}
-
-/**
- * spi_nor_info_init_params() - Initialize the flash's parameters and settings
- * based on nor->info data.
- * @nor: pointer to a 'struct spi-nor'.
- */
-static void spi_nor_info_init_params(struct spi_nor *nor)
-{
- struct spi_nor_flash_parameter *params = &nor->params;
- struct spi_nor_erase_map *map = &params->erase_map;
- const struct flash_info *info = nor->info;
- struct device_node *np = spi_nor_get_flash_node(nor);
- u8 i, erase_mask;
-
- /* Initialize legacy flash parameters and settings. */
- params->quad_enable = spi_nor_sr2_bit1_quad_enable;
- params->set_4byte = spansion_set_4byte;
- params->setup = spi_nor_default_setup;
- /* Default to 16-bit Write Status (01h) Command */
- nor->flags |= SNOR_F_HAS_16BIT_SR;
-
- /* Set SPI NOR sizes. */
- params->size = (u64)info->sector_size * info->n_sectors;
- params->page_size = info->page_size;
-
- if (!(info->flags & SPI_NOR_NO_FR)) {
- /* Default to Fast Read for DT and non-DT platform devices. */
- params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
-
- /* Mask out Fast Read if not requested at DT instantiation. */
- if (np && !of_property_read_bool(np, "m25p,fast-read"))
- params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
- }
-
- /* (Fast) Read settings. */
- params->hwcaps.mask |= SNOR_HWCAPS_READ;
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
- 0, 0, SPINOR_OP_READ,
- SNOR_PROTO_1_1_1);
-
- if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
- 0, 8, SPINOR_OP_READ_FAST,
- SNOR_PROTO_1_1_1);
-
- if (info->flags & SPI_NOR_DUAL_READ) {
- params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
- 0, 8, SPINOR_OP_READ_1_1_2,
- SNOR_PROTO_1_1_2);
- }
-
- if (info->flags & SPI_NOR_QUAD_READ) {
- params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
- 0, 8, SPINOR_OP_READ_1_1_4,
- SNOR_PROTO_1_1_4);
- }
-
- if (info->flags & SPI_NOR_OCTAL_READ) {
- params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
- 0, 8, SPINOR_OP_READ_1_1_8,
- SNOR_PROTO_1_1_8);
- }
-
- /* Page Program settings. */
- params->hwcaps.mask |= SNOR_HWCAPS_PP;
- spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
- SPINOR_OP_PP, SNOR_PROTO_1_1_1);
-
- /*
- * Sector Erase settings. Sort Erase Types in ascending order, with the
- * smallest erase size starting at BIT(0).
- */
- erase_mask = 0;
- i = 0;
- if (info->flags & SECT_4K_PMC) {
- erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], 4096u,
- SPINOR_OP_BE_4K_PMC);
- i++;
- } else if (info->flags & SECT_4K) {
- erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], 4096u,
- SPINOR_OP_BE_4K);
- i++;
- }
- erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
- SPINOR_OP_SE);
- spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
-}
-
-static void spansion_post_sfdp_fixups(struct spi_nor *nor)
-{
- if (nor->params.size <= SZ_16M)
- return;
-
- nor->flags |= SNOR_F_4B_OPCODES;
- /* No small sector erase for 4-byte command set */
- nor->erase_opcode = SPINOR_OP_SE;
- nor->mtd.erasesize = nor->info->sector_size;
-}
-
-static void s3an_post_sfdp_fixups(struct spi_nor *nor)
-{
- nor->params.setup = s3an_nor_setup;
-}
-
-/**
- * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
- * after SFDP has been parsed (is also called for SPI NORs that do not
- * support RDSFDP).
- * @nor: pointer to a 'struct spi_nor'
- *
- * Typically used to tweak various parameters that could not be extracted by
- * other means (i.e. when information provided by the SFDP/flash_info tables
- * are incomplete or wrong).
- */
-static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
-{
- switch (JEDEC_MFR(nor->info)) {
- case SNOR_MFR_SPANSION:
- spansion_post_sfdp_fixups(nor);
- break;
-
- default:
- break;
- }
-
- if (nor->info->flags & SPI_S3AN)
- s3an_post_sfdp_fixups(nor);
-
- if (nor->info->fixups && nor->info->fixups->post_sfdp)
- nor->info->fixups->post_sfdp(nor);
-}
-
-/**
- * spi_nor_late_init_params() - Late initialization of default flash parameters.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Used to set default flash parameters and settings when the ->default_init()
- * hook or the SFDP parser let voids.
- */
-static void spi_nor_late_init_params(struct spi_nor *nor)
-{
- /*
- * NOR protection support. When locking_ops are not provided, we pick
- * the default ones.
- */
- if (nor->flags & SNOR_F_HAS_LOCK && !nor->params.locking_ops)
- nor->params.locking_ops = &stm_locking_ops;
-}
-
-/**
- * spi_nor_init_params() - Initialize the flash's parameters and settings.
- * @nor: pointer to a 'struct spi-nor'.
- *
- * The flash parameters and settings are initialized based on a sequence of
- * calls that are ordered by priority:
- *
- * 1/ Default flash parameters initialization. The initializations are done
- * based on nor->info data:
- * spi_nor_info_init_params()
- *
- * which can be overwritten by:
- * 2/ Manufacturer flash parameters initialization. The initializations are
- * done based on MFR register, or when the decisions can not be done solely
- * based on MFR, by using specific flash_info tweeks, ->default_init():
- * spi_nor_manufacturer_init_params()
- *
- * which can be overwritten by:
- * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
- * should be more accurate that the above.
- * spi_nor_sfdp_init_params()
- *
- * Please note that there is a ->post_bfpt() fixup hook that can overwrite
- * the flash parameters and settings immediately after parsing the Basic
- * Flash Parameter Table.
- *
- * which can be overwritten by:
- * 4/ Post SFDP flash parameters initialization. Used to tweak various
- * parameters that could not be extracted by other means (i.e. when
- * information provided by the SFDP/flash_info tables are incomplete or
- * wrong).
- * spi_nor_post_sfdp_fixups()
- *
- * 5/ Late default flash parameters initialization, used when the
- * ->default_init() hook or the SFDP parser do not set specific params.
- * spi_nor_late_init_params()
- */
-static void spi_nor_init_params(struct spi_nor *nor)
-{
- spi_nor_info_init_params(nor);
-
- spi_nor_manufacturer_init_params(nor);
-
- if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
- !(nor->info->flags & SPI_NOR_SKIP_SFDP))
- spi_nor_sfdp_init_params(nor);
-
- spi_nor_post_sfdp_fixups(nor);
-
- spi_nor_late_init_params(nor);
-}
-
-/**
- * spi_nor_quad_enable() - enable Quad I/O if needed.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_quad_enable(struct spi_nor *nor)
-{
- if (!nor->params.quad_enable)
- return 0;
-
- if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
- spi_nor_get_protocol_width(nor->write_proto) == 4))
- return 0;
-
- return nor->params.quad_enable(nor);
-}
-
-/**
- * spi_nor_unlock_all() - Unlocks the entire flash memory array.
- * @nor: pointer to a 'struct spi_nor'.
- *
- * Some SPI NOR flashes are write protected by default after a power-on reset
- * cycle, in order to avoid inadvertent writes during power-up. Backward
- * compatibility imposes to unlock the entire flash memory array at power-up
- * by default.
- */
-static int spi_nor_unlock_all(struct spi_nor *nor)
-{
- if (nor->flags & SNOR_F_HAS_LOCK)
- return spi_nor_unlock(&nor->mtd, 0, nor->params.size);
-
- return 0;
-}
-
-static int spi_nor_init(struct spi_nor *nor)
-{
- int err;
-
- err = spi_nor_quad_enable(nor);
- if (err) {
- dev_dbg(nor->dev, "quad mode not supported\n");
- return err;
- }
-
- err = spi_nor_unlock_all(nor);
- if (err) {
- dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
- return err;
- }
-
- if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
- /*
- * If the RESET# pin isn't hooked up properly, or the system
- * otherwise doesn't perform a reset command in the boot
- * sequence, it's impossible to 100% protect against unexpected
- * reboots (e.g., crashes). Warn the user (or hopefully, system
- * designer) that this is bad.
- */
- WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
- "enabling reset hack; may not recover from unexpected reboots\n");
- nor->params.set_4byte(nor, true);
- }
-
- return 0;
-}
-
-/* mtd resume handler */
-static void spi_nor_resume(struct mtd_info *mtd)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- struct device *dev = nor->dev;
- int ret;
-
- /* re-initialize the nor chip */
- ret = spi_nor_init(nor);
- if (ret)
- dev_err(dev, "resume() failed\n");
-}
-
-void spi_nor_restore(struct spi_nor *nor)
-{
- /* restore the addressing mode */
- if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
- nor->flags & SNOR_F_BROKEN_RESET)
- nor->params.set_4byte(nor, false);
-}
-EXPORT_SYMBOL_GPL(spi_nor_restore);
-
-static const struct flash_info *spi_nor_match_id(const char *name)
-{
- const struct flash_info *id = spi_nor_ids;
-
- while (id->name) {
- if (!strcmp(name, id->name))
- return id;
- id++;
- }
- return NULL;
-}
-
-static int spi_nor_set_addr_width(struct spi_nor *nor)
-{
- if (nor->addr_width) {
- /* already configured from SFDP */
- } else if (nor->info->addr_width) {
- nor->addr_width = nor->info->addr_width;
- } else if (nor->mtd.size > 0x1000000) {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
- } else {
- nor->addr_width = 3;
- }
-
- if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
- dev_dbg(nor->dev, "address width is too large: %u\n",
- nor->addr_width);
- return -EINVAL;
- }
-
- /* Set 4byte opcodes when possible. */
- if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
- !(nor->flags & SNOR_F_HAS_4BAIT))
- spi_nor_set_4byte_opcodes(nor);
-
- return 0;
-}
-
-static void spi_nor_debugfs_init(struct spi_nor *nor,
- const struct flash_info *info)
-{
- struct mtd_info *mtd = &nor->mtd;
-
- mtd->dbg.partname = info->name;
- mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
- info->id_len, info->id);
-}
-
-static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
- const char *name)
-{
- const struct flash_info *info = NULL;
-
- if (name)
- info = spi_nor_match_id(name);
- /* Try to auto-detect if chip name wasn't specified or not found */
- if (!info)
- info = spi_nor_read_id(nor);
- if (IS_ERR_OR_NULL(info))
- return ERR_PTR(-ENOENT);
-
- /*
- * If caller has specified name of flash model that can normally be
- * detected using JEDEC, let's verify it.
- */
- if (name && info->id_len) {
- const struct flash_info *jinfo;
-
- jinfo = spi_nor_read_id(nor);
- if (IS_ERR(jinfo)) {
- return jinfo;
- } else if (jinfo != info) {
- /*
- * JEDEC knows better, so overwrite platform ID. We
- * can't trust partitions any longer, but we'll let
- * mtd apply them anyway, since some partitions may be
- * marked read-only, and we don't want to lose that
- * information, even if it's not 100% accurate.
- */
- dev_warn(nor->dev, "found %s, expected %s\n",
- jinfo->name, info->name);
- info = jinfo;
- }
- }
-
- return info;
-}
-
-int spi_nor_scan(struct spi_nor *nor, const char *name,
- const struct spi_nor_hwcaps *hwcaps)
-{
- const struct flash_info *info;
- struct device *dev = nor->dev;
- struct mtd_info *mtd = &nor->mtd;
- struct device_node *np = spi_nor_get_flash_node(nor);
- struct spi_nor_flash_parameter *params = &nor->params;
- int ret;
- int i;
-
- ret = spi_nor_check(nor);
- if (ret)
- return ret;
-
- /* Reset SPI protocol for all commands. */
- nor->reg_proto = SNOR_PROTO_1_1_1;
- nor->read_proto = SNOR_PROTO_1_1_1;
- nor->write_proto = SNOR_PROTO_1_1_1;
-
- /*
- * We need the bounce buffer early to read/write registers when going
- * through the spi-mem layer (buffers have to be DMA-able).
- * For spi-mem drivers, we'll reallocate a new buffer if
- * nor->page_size turns out to be greater than PAGE_SIZE (which
- * shouldn't happen before long since NOR pages are usually less
- * than 1KB) after spi_nor_scan() returns.
- */
- nor->bouncebuf_size = PAGE_SIZE;
- nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
- GFP_KERNEL);
- if (!nor->bouncebuf)
- return -ENOMEM;
-
- info = spi_nor_get_flash_info(nor, name);
- if (IS_ERR(info))
- return PTR_ERR(info);
-
- nor->info = info;
-
- spi_nor_debugfs_init(nor, info);
-
- mutex_init(&nor->lock);
-
- /*
- * Make sure the XSR_RDY flag is set before calling
- * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
- * with Atmel spi-nor
- */
- if (info->flags & SPI_NOR_XSR_RDY)
- nor->flags |= SNOR_F_READY_XSR_RDY;
-
- if (info->flags & SPI_NOR_HAS_LOCK)
- nor->flags |= SNOR_F_HAS_LOCK;
-
- /* Init flash parameters based on flash_info struct and SFDP */
- spi_nor_init_params(nor);
-
- if (!mtd->name)
- mtd->name = dev_name(dev);
- mtd->priv = nor;
- mtd->type = MTD_NORFLASH;
- mtd->writesize = 1;
- mtd->flags = MTD_CAP_NORFLASH;
- mtd->size = params->size;
- mtd->_erase = spi_nor_erase;
- mtd->_read = spi_nor_read;
- mtd->_resume = spi_nor_resume;
-
- if (nor->params.locking_ops) {
- mtd->_lock = spi_nor_lock;
- mtd->_unlock = spi_nor_unlock;
- mtd->_is_locked = spi_nor_is_locked;
- }
-
- /* sst nor chips use AAI word program */
- if (info->flags & SST_WRITE)
- mtd->_write = sst_write;
- else
- mtd->_write = spi_nor_write;
-
- if (info->flags & USE_FSR)
- nor->flags |= SNOR_F_USE_FSR;
- if (info->flags & SPI_NOR_HAS_TB) {
- nor->flags |= SNOR_F_HAS_SR_TB;
- if (info->flags & SPI_NOR_TB_SR_BIT6)
- nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
- }
-
- if (info->flags & NO_CHIP_ERASE)
- nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
- if (info->flags & USE_CLSR)
- nor->flags |= SNOR_F_USE_CLSR;
-
- if (info->flags & SPI_NOR_NO_ERASE)
- mtd->flags |= MTD_NO_ERASE;
-
- mtd->dev.parent = dev;
- nor->page_size = params->page_size;
- mtd->writebufsize = nor->page_size;
-
- if (of_property_read_bool(np, "broken-flash-reset"))
- nor->flags |= SNOR_F_BROKEN_RESET;
-
- /*
- * Configure the SPI memory:
- * - select op codes for (Fast) Read, Page Program and Sector Erase.
- * - set the number of dummy cycles (mode cycles + wait states).
- * - set the SPI protocols for register and memory accesses.
- */
- ret = spi_nor_setup(nor, hwcaps);
- if (ret)
- return ret;
-
- if (info->flags & SPI_NOR_4B_OPCODES)
- nor->flags |= SNOR_F_4B_OPCODES;
-
- ret = spi_nor_set_addr_width(nor);
- if (ret)
- return ret;
-
- /* Send all the required SPI flash commands to initialize device */
- ret = spi_nor_init(nor);
- if (ret)
- return ret;
-
- dev_info(dev, "%s (%lld Kbytes)\n", info->name,
- (long long)mtd->size >> 10);
-
- dev_dbg(dev,
- "mtd .name = %s, .size = 0x%llx (%lldMiB), "
- ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
- mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
- mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
-
- if (mtd->numeraseregions)
- for (i = 0; i < mtd->numeraseregions; i++)
- dev_dbg(dev,
- "mtd.eraseregions[%d] = { .offset = 0x%llx, "
- ".erasesize = 0x%.8x (%uKiB), "
- ".numblocks = %d }\n",
- i, (long long)mtd->eraseregions[i].offset,
- mtd->eraseregions[i].erasesize,
- mtd->eraseregions[i].erasesize / 1024,
- mtd->eraseregions[i].numblocks);
- return 0;
-}
-EXPORT_SYMBOL_GPL(spi_nor_scan);
-
-static int spi_nor_probe(struct spi_mem *spimem)
-{
- struct spi_device *spi = spimem->spi;
- struct flash_platform_data *data = dev_get_platdata(&spi->dev);
- struct spi_nor *nor;
- /*
- * Enable all caps by default. The core will mask them after
- * checking what's really supported using spi_mem_supports_op().
- */
- const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
- char *flash_name;
- int ret;
-
- nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
- if (!nor)
- return -ENOMEM;
-
- nor->spimem = spimem;
- nor->dev = &spi->dev;
- spi_nor_set_flash_node(nor, spi->dev.of_node);
-
- spi_mem_set_drvdata(spimem, nor);
-
- if (data && data->name)
- nor->mtd.name = data->name;
-
- if (!nor->mtd.name)
- nor->mtd.name = spi_mem_get_name(spimem);
-
- /*
- * For some (historical?) reason many platforms provide two different
- * names in flash_platform_data: "name" and "type". Quite often name is
- * set to "m25p80" and then "type" provides a real chip name.
- * If that's the case, respect "type" and ignore a "name".
- */
- if (data && data->type)
- flash_name = data->type;
- else if (!strcmp(spi->modalias, "spi-nor"))
- flash_name = NULL; /* auto-detect */
- else
- flash_name = spi->modalias;
-
- ret = spi_nor_scan(nor, flash_name, &hwcaps);
- if (ret)
- return ret;
-
- /*
- * None of the existing parts have > 512B pages, but let's play safe
- * and add this logic so that if anyone ever adds support for such
- * a NOR we don't end up with buffer overflows.
- */
- if (nor->page_size > PAGE_SIZE) {
- nor->bouncebuf_size = nor->page_size;
- devm_kfree(nor->dev, nor->bouncebuf);
- nor->bouncebuf = devm_kmalloc(nor->dev,
- nor->bouncebuf_size,
- GFP_KERNEL);
- if (!nor->bouncebuf)
- return -ENOMEM;
- }
-
- return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
- data ? data->nr_parts : 0);
-}
-
-static int spi_nor_remove(struct spi_mem *spimem)
-{
- struct spi_nor *nor = spi_mem_get_drvdata(spimem);
-
- spi_nor_restore(nor);
-
- /* Clean up MTD stuff. */
- return mtd_device_unregister(&nor->mtd);
-}
-
-static void spi_nor_shutdown(struct spi_mem *spimem)
-{
- struct spi_nor *nor = spi_mem_get_drvdata(spimem);
-
- spi_nor_restore(nor);
-}
-
-/*
- * Do NOT add to this array without reading the following:
- *
- * Historically, many flash devices are bound to this driver by their name. But
- * since most of these flash are compatible to some extent, and their
- * differences can often be differentiated by the JEDEC read-ID command, we
- * encourage new users to add support to the spi-nor library, and simply bind
- * against a generic string here (e.g., "jedec,spi-nor").
- *
- * Many flash names are kept here in this list (as well as in spi-nor.c) to
- * keep them available as module aliases for existing platforms.
- */
-static const struct spi_device_id spi_nor_dev_ids[] = {
- /*
- * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
- * hack around the fact that the SPI core does not provide uevent
- * matching for .of_match_table
- */
- {"spi-nor"},
-
- /*
- * Entries not used in DTs that should be safe to drop after replacing
- * them with "spi-nor" in platform data.
- */
- {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
-
- /*
- * Entries that were used in DTs without "jedec,spi-nor" fallback and
- * should be kept for backward compatibility.
- */
- {"at25df321a"}, {"at25df641"}, {"at26df081a"},
- {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
- {"mx25l25635e"},{"mx66l51235l"},
- {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
- {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
- {"s25fl064k"},
- {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
- {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
- {"m25p64"}, {"m25p128"},
- {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
- {"w25q80bl"}, {"w25q128"}, {"w25q256"},
-
- /* Flashes that can't be detected using JEDEC */
- {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
- {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
- {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
-
- /* Everspin MRAMs (non-JEDEC) */
- { "mr25h128" }, /* 128 Kib, 40 MHz */
- { "mr25h256" }, /* 256 Kib, 40 MHz */
- { "mr25h10" }, /* 1 Mib, 40 MHz */
- { "mr25h40" }, /* 4 Mib, 40 MHz */
-
- { },
-};
-MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
-
-static const struct of_device_id spi_nor_of_table[] = {
- /*
- * Generic compatibility for SPI NOR that can be identified by the
- * JEDEC READ ID opcode (0x9F). Use this, if possible.
- */
- { .compatible = "jedec,spi-nor" },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, spi_nor_of_table);
-
-/*
- * REVISIT: many of these chips have deep power-down modes, which
- * should clearly be entered on suspend() to minimize power use.
- * And also when they're otherwise idle...
- */
-static struct spi_mem_driver spi_nor_driver = {
- .spidrv = {
- .driver = {
- .name = "spi-nor",
- .of_match_table = spi_nor_of_table,
- },
- .id_table = spi_nor_dev_ids,
- },
- .probe = spi_nor_probe,
- .remove = spi_nor_remove,
- .shutdown = spi_nor_shutdown,
-};
-module_spi_mem_driver(spi_nor_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
-MODULE_AUTHOR("Mike Lavender");
-MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
new file mode 100644
index 000000000000..e0af6d25d573
--- /dev/null
+++ b/drivers/mtd/spi-nor/sst.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info sst_parts[] = {
+ /* SST -- large erase sizes are "overlays", "sectors" are 4K */
+ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8,
+ SECT_4K | SST_WRITE) },
+ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16,
+ SECT_4K | SST_WRITE) },
+ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32,
+ SECT_4K | SST_WRITE) },
+ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64,
+ SECT_4K | SST_WRITE) },
+ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
+ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1,
+ SECT_4K | SST_WRITE) },
+ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2,
+ SECT_4K | SST_WRITE) },
+ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4,
+ SECT_4K | SST_WRITE) },
+ { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
+ { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
+ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8,
+ SECT_4K | SST_WRITE) },
+ { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16,
+ SECT_4K | SST_WRITE) },
+ { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+};
+
+static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t actual = 0;
+ int ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
+
+ nor->sst_write_second = false;
+
+ /* Start write from odd address. */
+ if (to % 2) {
+ nor->program_opcode = SPINOR_OP_BP;
+
+ /* write one byte. */
+ ret = spi_nor_write_data(nor, to, 1, buf);
+ if (ret < 0)
+ goto out;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+
+ to++;
+ actual++;
+ }
+
+ /* Write out most of the data here. */
+ for (; actual < len - 1; actual += 2) {
+ nor->program_opcode = SPINOR_OP_AAI_WP;
+
+ /* write two bytes. */
+ ret = spi_nor_write_data(nor, to, 2, buf + actual);
+ if (ret < 0)
+ goto out;
+ WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+ to += 2;
+ nor->sst_write_second = true;
+ }
+ nor->sst_write_second = false;
+
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ goto out;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+
+ /* Write out trailing byte if it exists. */
+ if (actual != len) {
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
+
+ nor->program_opcode = SPINOR_OP_BP;
+ ret = spi_nor_write_data(nor, to, 1, buf + actual);
+ if (ret < 0)
+ goto out;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+
+ actual += 1;
+
+ ret = spi_nor_write_disable(nor);
+ }
+out:
+ *retlen += actual;
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static void sst_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static void sst_post_sfdp_fixups(struct spi_nor *nor)
+{
+ if (nor->info->flags & SST_WRITE)
+ nor->mtd._write = sst_write;
+}
+
+static const struct spi_nor_fixups sst_fixups = {
+ .default_init = sst_default_init,
+ .post_sfdp = sst_post_sfdp_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_sst = {
+ .name = "sst",
+ .parts = sst_parts,
+ .nparts = ARRAY_SIZE(sst_parts),
+ .fixups = &sst_fixups,
+};
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
new file mode 100644
index 000000000000..17deabad57e1
--- /dev/null
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info winbond_parts[] = {
+ /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+ { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
+ { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
+ { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
+ { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
+ { "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
+ SPI_NOR_HAS_TB) },
+ { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ { "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
+};
+
+/**
+ * winbond_set_4byte_addr_mode() - Set 4-byte address mode for Winbond flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int winbond_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ ret = spi_nor_set_4byte_addr_mode(nor, enable);
+ if (ret || enable)
+ return ret;
+
+ /*
+ * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address
+ * Register to be set to 1, so all 3-byte-address reads come from the
+ * second 16M. We must clear the register to enable normal behavior.
+ */
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_write_ear(nor, 0);
+ if (ret)
+ return ret;
+
+ return spi_nor_write_disable(nor);
+}
+
+static void winbond_default_init(struct spi_nor *nor)
+{
+ nor->params->set_4byte_addr_mode = winbond_set_4byte_addr_mode;
+}
+
+static const struct spi_nor_fixups winbond_fixups = {
+ .default_init = winbond_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_winbond = {
+ .name = "winbond",
+ .parts = winbond_parts,
+ .nparts = ARRAY_SIZE(winbond_parts),
+ .fixups = &winbond_fixups,
+};
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
new file mode 100644
index 000000000000..1138bdbf4199
--- /dev/null
+++ b/drivers/mtd/spi-nor/xilinx.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info xilinx_parts[] = {
+ /* Xilinx S3AN Internal Flash */
+ { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
+ { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
+ { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
+ { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
+ { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
+};
+
+/*
+ * This code converts an address to the Default Address Mode, that has non
+ * power of two page sizes. We must support this mode because it is the default
+ * mode supported by Xilinx tools, it can access the whole flash area and
+ * changing over to the Power-of-two mode is irreversible and corrupts the
+ * original data.
+ * Addr can safely be unsigned int, the biggest S3AN device is smaller than
+ * 4 MiB.
+ */
+static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
+{
+ u32 offset, page;
+
+ offset = addr % nor->page_size;
+ page = addr / nor->page_size;
+ page <<= (nor->page_size > 512) ? 10 : 9;
+
+ return page | offset;
+}
+
+static int xilinx_nor_setup(struct spi_nor *nor,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ int ret;
+
+ ret = spi_nor_xread_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ nor->erase_opcode = SPINOR_OP_XSE;
+ nor->program_opcode = SPINOR_OP_XPP;
+ nor->read_opcode = SPINOR_OP_READ;
+ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+
+ /*
+ * This flashes have a page size of 264 or 528 bytes (known as
+ * Default addressing mode). It can be changed to a more standard
+ * Power of two mode where the page size is 256/512. This comes
+ * with a price: there is 3% less of space, the data is corrupted
+ * and the page size cannot be changed back to default addressing
+ * mode.
+ *
+ * The current addressing mode can be read from the XRDSR register
+ * and should not be changed, because is a destructive operation.
+ */
+ if (nor->bouncebuf[0] & XSR_PAGESIZE) {
+ /* Flash in Power of 2 mode */
+ nor->page_size = (nor->page_size == 264) ? 256 : 512;
+ nor->mtd.writebufsize = nor->page_size;
+ nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
+ nor->mtd.erasesize = 8 * nor->page_size;
+ } else {
+ /* Flash in Default addressing mode */
+ nor->params->convert_addr = s3an_convert_addr;
+ nor->mtd.erasesize = nor->info->sector_size;
+ }
+
+ return 0;
+}
+
+static void xilinx_post_sfdp_fixups(struct spi_nor *nor)
+{
+ nor->params->setup = xilinx_nor_setup;
+}
+
+static const struct spi_nor_fixups xilinx_fixups = {
+ .post_sfdp = xilinx_post_sfdp_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_xilinx = {
+ .name = "xilinx",
+ .parts = xilinx_parts,
+ .nparts = ARRAY_SIZE(xilinx_parts),
+ .fixups = &xilinx_fixups,
+};
diff --git a/drivers/mtd/spi-nor/xmc.c b/drivers/mtd/spi-nor/xmc.c
new file mode 100644
index 000000000000..2c7773b68993
--- /dev/null
+++ b/drivers/mtd/spi-nor/xmc.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info xmc_parts[] = {
+ /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
+ { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+};
+
+const struct spi_nor_manufacturer spi_nor_xmc = {
+ .name = "xmc",
+ .parts = xmc_parts,
+ .nparts = ARRAY_SIZE(xmc_parts),
+};
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index ea7440ac913b..ae5abe492b52 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -1059,7 +1059,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
* be a result of power cut during erasure.
*/
ai->maybe_bad_peb_count += 1;
- /* fall through */
+ fallthrough;
case UBI_IO_BAD_HDR:
/*
* If we're facing a bad VID header we have to drop *all*
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 2f93c25bbaee..12c02342149c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1342,10 +1342,10 @@ static int bytes_str_to_int(const char *str)
switch (*endp) {
case 'G':
result *= 1024;
- /* fall through */
+ fallthrough;
case 'M':
result *= 1024;
- /* fall through */
+ fallthrough;
case 'K':
result *= 1024;
if (endp[1] == 'i' && endp[2] == 'B')
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 249e8d9bfbcd..2d1f4a61f4ac 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/uio.h>
+#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/device.h>
#include <linux/of.h>
@@ -194,10 +195,43 @@ struct mtd_debug_info {
const char *partid;
};
+/**
+ * struct mtd_part - MTD partition specific fields
+ *
+ * @node: list node used to add an MTD partition to the parent partition list
+ * @offset: offset of the partition relatively to the parent offset
+ * @flags: original flags (before the mtdpart logic decided to tweak them based
+ * on flash constraints, like eraseblock/pagesize alignment)
+ *
+ * This struct is embedded in mtd_info and contains partition-specific
+ * properties/fields.
+ */
+struct mtd_part {
+ struct list_head node;
+ u64 offset;
+ u32 flags;
+};
+
+/**
+ * struct mtd_master - MTD master specific fields
+ *
+ * @partitions_lock: lock protecting accesses to the partition list. Protects
+ * not only the master partition list, but also all
+ * sub-partitions.
+ * @suspended: et to 1 when the device is suspended, 0 otherwise
+ *
+ * This struct is embedded in mtd_info and contains master-specific
+ * properties/fields. The master is the root MTD device from the MTD partition
+ * point of view.
+ */
+struct mtd_master {
+ struct mutex partitions_lock;
+ unsigned int suspended : 1;
+};
+
struct mtd_info {
u_char type;
uint32_t flags;
- uint32_t orig_flags; /* Flags as before running mtd checks */
uint64_t size; // Total size of the MTD
/* "Major" erase size for the device. Naïve users may take this
@@ -339,8 +373,52 @@ struct mtd_info {
int usecount;
struct mtd_debug_info dbg;
struct nvmem_device *nvmem;
+
+ /*
+ * Parent device from the MTD partition point of view.
+ *
+ * MTD masters do not have any parent, MTD partitions do. The parent
+ * MTD device can itself be a partition.
+ */
+ struct mtd_info *parent;
+
+ /* List of partitions attached to this MTD device */
+ struct list_head partitions;
+
+ union {
+ struct mtd_part part;
+ struct mtd_master master;
+ };
};
+static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd)
+{
+ while (mtd->parent)
+ mtd = mtd->parent;
+
+ return mtd;
+}
+
+static inline u64 mtd_get_master_ofs(struct mtd_info *mtd, u64 ofs)
+{
+ while (mtd->parent) {
+ ofs += mtd->part.offset;
+ mtd = mtd->parent;
+ }
+
+ return ofs;
+}
+
+static inline bool mtd_is_partition(const struct mtd_info *mtd)
+{
+ return mtd->parent;
+}
+
+static inline bool mtd_has_partitions(const struct mtd_info *mtd)
+{
+ return !list_empty(&mtd->partitions);
+}
+
int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobecc);
int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
@@ -392,13 +470,16 @@ static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
loff_t ofs, size_t len)
{
- if (!mtd->_max_bad_blocks)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_max_bad_blocks)
return -ENOTSUPP;
if (mtd->size < (len + ofs) || ofs < 0)
return -EINVAL;
- return mtd->_max_bad_blocks(mtd, ofs, len);
+ return master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs),
+ len);
}
int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
@@ -439,8 +520,10 @@ int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
static inline void mtd_sync(struct mtd_info *mtd)
{
- if (mtd->_sync)
- mtd->_sync(mtd);
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (master->_sync)
+ master->_sync(master);
}
int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
@@ -452,13 +535,31 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
static inline int mtd_suspend(struct mtd_info *mtd)
{
- return mtd->_suspend ? mtd->_suspend(mtd) : 0;
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ if (master->master.suspended)
+ return 0;
+
+ ret = master->_suspend ? master->_suspend(master) : 0;
+ if (ret)
+ return ret;
+
+ master->master.suspended = 1;
+ return 0;
}
static inline void mtd_resume(struct mtd_info *mtd)
{
- if (mtd->_resume)
- mtd->_resume(mtd);
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->master.suspended)
+ return;
+
+ if (master->_resume)
+ master->_resume(master);
+
+ master->master.suspended = 0;
}
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
@@ -538,7 +639,9 @@ static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base,
static inline int mtd_has_oob(const struct mtd_info *mtd)
{
- return mtd->_read_oob && mtd->_write_oob;
+ struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
+
+ return master->_read_oob && master->_write_oob;
}
static inline int mtd_type_is_nand(const struct mtd_info *mtd)
@@ -548,7 +651,9 @@ static inline int mtd_type_is_nand(const struct mtd_info *mtd)
static inline int mtd_can_have_bb(const struct mtd_info *mtd)
{
- return !!mtd->_block_isbad;
+ struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
+
+ return !!master->_block_isbad;
}
/* Kernel-side ioctl definitions */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 11cb0c50cd84..e545c050d3e8 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -105,7 +105,6 @@ extern void deregister_mtd_parser(struct mtd_part_parser *parser);
module_driver(__mtd_part_parser, register_mtd_parser, \
deregister_mtd_parser)
-int mtd_is_partition(const struct mtd_info *mtd);
int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length);
int mtd_del_partition(struct mtd_info *master, int partno);
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 4ab9bccfcde0..1e76196f9829 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -1064,6 +1064,8 @@ struct nand_legacy {
* @lock: lock protecting the suspended field. Also used to
* serialize accesses to the NAND device.
* @suspended: set to 1 when the device is suspended, 0 when it's not.
+ * @suspend: [REPLACEABLE] specific NAND device suspend operation
+ * @resume: [REPLACEABLE] specific NAND device resume operation
* @bbt: [INTERN] bad block table pointer
* @bbt_td: [REPLACEABLE] bad block table descriptor for flash
* lookup.
@@ -1077,6 +1079,8 @@ struct nand_legacy {
* @manufacturer: [INTERN] Contains manufacturer information
* @manufacturer.desc: [INTERN] Contains manufacturer's description
* @manufacturer.priv: [INTERN] Contains manufacturer private information
+ * @lock_area: [REPLACEABLE] specific NAND chip lock operation
+ * @unlock_area: [REPLACEABLE] specific NAND chip unlock operation
*/
struct nand_chip {
@@ -1117,6 +1121,8 @@ struct nand_chip {
struct mutex lock;
unsigned int suspended : 1;
+ int (*suspend)(struct nand_chip *chip);
+ void (*resume)(struct nand_chip *chip);
uint8_t *oob_poi;
struct nand_controller *controller;
@@ -1136,6 +1142,9 @@ struct nand_chip {
const struct nand_manufacturer *desc;
void *priv;
} manufacturer;
+
+ int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
+ int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
};
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
@@ -1215,7 +1224,7 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
* struct nand_flash_dev - NAND Flash Device ID Structure
* @name: a human-readable name of the NAND chip
* @dev_id: the device ID (the second byte of the full chip ID array)
- * @mfr_id: manufecturer ID part of the full chip ID array (refers the same
+ * @mfr_id: manufacturer ID part of the full chip ID array (refers the same
* memory address as ``id[0]``)
* @dev_id: device ID part of the full chip ID array (refers the same memory
* address as ``id[1]``)
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 5abd91cc6dfa..1e2af0ec1f03 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -12,23 +12,6 @@
#include <linux/spi/spi-mem.h>
/*
- * Manufacturer IDs
- *
- * The first byte returned from the flash after sending opcode SPINOR_OP_RDID.
- * Sometimes these are the same as CFI IDs, but sometimes they aren't.
- */
-#define SNOR_MFR_ATMEL CFI_MFR_ATMEL
-#define SNOR_MFR_GIGADEVICE 0xc8
-#define SNOR_MFR_INTEL CFI_MFR_INTEL
-#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */
-#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */
-#define SNOR_MFR_ISSI CFI_MFR_PMC
-#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
-#define SNOR_MFR_SPANSION CFI_MFR_AMD
-#define SNOR_MFR_SST CFI_MFR_SST
-#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */
-
-/*
* Note on opcode nomenclature: some opcodes have a format like
* SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number
* of I/O lines used for the opcode, address, and data (respectively). The
@@ -128,7 +111,9 @@
#define SR_BP0 BIT(2) /* Block protect 0 */
#define SR_BP1 BIT(3) /* Block protect 1 */
#define SR_BP2 BIT(4) /* Block protect 2 */
+#define SR_BP3 BIT(5) /* Block protect 3 */
#define SR_TB_BIT5 BIT(5) /* Top/Bottom protect */
+#define SR_BP3_BIT6 BIT(6) /* Block protect 3 */
#define SR_TB_BIT6 BIT(6) /* Top/Bottom protect */
#define SR_SRWD BIT(7) /* SR write protect */
/* Spansion/Cypress specific status bits */
@@ -137,6 +122,8 @@
#define SR1_QUAD_EN_BIT6 BIT(6)
+#define SR_BP_SHIFT 2
+
/* Enhanced Volatile Configuration Register bits */
#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
@@ -225,110 +212,6 @@ static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto)
return spi_nor_get_protocol_data_nbits(proto);
}
-enum spi_nor_option_flags {
- SNOR_F_USE_FSR = BIT(0),
- SNOR_F_HAS_SR_TB = BIT(1),
- SNOR_F_NO_OP_CHIP_ERASE = BIT(2),
- SNOR_F_READY_XSR_RDY = BIT(3),
- SNOR_F_USE_CLSR = BIT(4),
- SNOR_F_BROKEN_RESET = BIT(5),
- SNOR_F_4B_OPCODES = BIT(6),
- SNOR_F_HAS_4BAIT = BIT(7),
- SNOR_F_HAS_LOCK = BIT(8),
- SNOR_F_HAS_16BIT_SR = BIT(9),
- SNOR_F_NO_READ_CR = BIT(10),
- SNOR_F_HAS_SR_TB_BIT6 = BIT(11),
-
-};
-
-/**
- * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type
- * @size: the size of the sector/block erased by the erase type.
- * JEDEC JESD216B imposes erase sizes to be a power of 2.
- * @size_shift: @size is a power of 2, the shift is stored in
- * @size_shift.
- * @size_mask: the size mask based on @size_shift.
- * @opcode: the SPI command op code to erase the sector/block.
- * @idx: Erase Type index as sorted in the Basic Flash Parameter
- * Table. It will be used to synchronize the supported
- * Erase Types with the ones identified in the SFDP
- * optional tables.
- */
-struct spi_nor_erase_type {
- u32 size;
- u32 size_shift;
- u32 size_mask;
- u8 opcode;
- u8 idx;
-};
-
-/**
- * struct spi_nor_erase_command - Used for non-uniform erases
- * The structure is used to describe a list of erase commands to be executed
- * once we validate that the erase can be performed. The elements in the list
- * are run-length encoded.
- * @list: for inclusion into the list of erase commands.
- * @count: how many times the same erase command should be
- * consecutively used.
- * @size: the size of the sector/block erased by the command.
- * @opcode: the SPI command op code to erase the sector/block.
- */
-struct spi_nor_erase_command {
- struct list_head list;
- u32 count;
- u32 size;
- u8 opcode;
-};
-
-/**
- * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
- * @offset: the offset in the data array of erase region start.
- * LSB bits are used as a bitmask encoding flags to
- * determine if this region is overlaid, if this region is
- * the last in the SPI NOR flash memory and to indicate
- * all the supported erase commands inside this region.
- * The erase types are sorted in ascending order with the
- * smallest Erase Type size being at BIT(0).
- * @size: the size of the region in bytes.
- */
-struct spi_nor_erase_region {
- u64 offset;
- u64 size;
-};
-
-#define SNOR_ERASE_TYPE_MAX 4
-#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
-
-#define SNOR_LAST_REGION BIT(4)
-#define SNOR_OVERLAID_REGION BIT(5)
-
-#define SNOR_ERASE_FLAGS_MAX 6
-#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
-
-/**
- * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
- * @regions: array of erase regions. The regions are consecutive in
- * address space. Walking through the regions is done
- * incrementally.
- * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform
- * sector size (legacy implementation).
- * @erase_type: an array of erase types shared by all the regions.
- * The erase types are sorted in ascending order, with the
- * smallest Erase Type size being the first member in the
- * erase_type array.
- * @uniform_erase_type: bitmask encoding erase types that can erase the
- * entire memory. This member is completed at init by
- * uniform and non-uniform SPI NOR flash memories if they
- * support at least one erase type that can erase the
- * entire memory.
- */
-struct spi_nor_erase_map {
- struct spi_nor_erase_region *regions;
- struct spi_nor_erase_region uniform_region;
- struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX];
- u8 uniform_erase_type;
-};
-
/**
* struct spi_nor_hwcaps - Structure for describing the hardware capabilies
* supported by the SPI controller (bus master).
@@ -404,61 +287,7 @@ struct spi_nor_hwcaps {
#define SNOR_HWCAPS_ALL (SNOR_HWCAPS_READ_MASK | \
SNOR_HWCAPS_PP_MASK)
-struct spi_nor_read_command {
- u8 num_mode_clocks;
- u8 num_wait_states;
- u8 opcode;
- enum spi_nor_protocol proto;
-};
-
-struct spi_nor_pp_command {
- u8 opcode;
- enum spi_nor_protocol proto;
-};
-
-enum spi_nor_read_command_index {
- SNOR_CMD_READ,
- SNOR_CMD_READ_FAST,
- SNOR_CMD_READ_1_1_1_DTR,
-
- /* Dual SPI */
- SNOR_CMD_READ_1_1_2,
- SNOR_CMD_READ_1_2_2,
- SNOR_CMD_READ_2_2_2,
- SNOR_CMD_READ_1_2_2_DTR,
-
- /* Quad SPI */
- SNOR_CMD_READ_1_1_4,
- SNOR_CMD_READ_1_4_4,
- SNOR_CMD_READ_4_4_4,
- SNOR_CMD_READ_1_4_4_DTR,
-
- /* Octal SPI */
- SNOR_CMD_READ_1_1_8,
- SNOR_CMD_READ_1_8_8,
- SNOR_CMD_READ_8_8_8,
- SNOR_CMD_READ_1_8_8_DTR,
-
- SNOR_CMD_READ_MAX
-};
-
-enum spi_nor_pp_command_index {
- SNOR_CMD_PP,
-
- /* Quad SPI */
- SNOR_CMD_PP_1_1_4,
- SNOR_CMD_PP_1_4_4,
- SNOR_CMD_PP_4_4_4,
-
- /* Octal SPI */
- SNOR_CMD_PP_1_1_8,
- SNOR_CMD_PP_1_8_8,
- SNOR_CMD_PP_8_8_8,
-
- SNOR_CMD_PP_MAX
-};
-
-/* Forward declaration that will be used in 'struct spi_nor_flash_parameter' */
+/* Forward declaration that is used in 'struct spi_nor_controller_ops' */
struct spi_nor;
/**
@@ -489,68 +318,13 @@ struct spi_nor_controller_ops {
int (*erase)(struct spi_nor *nor, loff_t offs);
};
-/**
- * struct spi_nor_locking_ops - SPI NOR locking methods
- * @lock: lock a region of the SPI NOR.
- * @unlock: unlock a region of the SPI NOR.
- * @is_locked: check if a region of the SPI NOR is completely locked
- */
-struct spi_nor_locking_ops {
- int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
-};
-
-/**
- * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings.
- * Includes legacy flash parameters and settings that can be overwritten
- * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216
- * Serial Flash Discoverable Parameters (SFDP) tables.
- *
- * @size: the flash memory density in bytes.
- * @page_size: the page size of the SPI NOR flash memory.
- * @hwcaps: describes the read and page program hardware
- * capabilities.
- * @reads: read capabilities ordered by priority: the higher index
- * in the array, the higher priority.
- * @page_programs: page program capabilities ordered by priority: the
- * higher index in the array, the higher priority.
- * @erase_map: the erase map parsed from the SFDP Sector Map Parameter
- * Table.
- * @quad_enable: enables SPI NOR quad mode.
- * @set_4byte: puts the SPI NOR in 4 byte addressing mode.
- * @convert_addr: converts an absolute address into something the flash
- * will understand. Particularly useful when pagesize is
- * not a power-of-2.
- * @setup: configures the SPI NOR memory. Useful for SPI NOR
- * flashes that have peculiarities to the SPI NOR standard
- * e.g. different opcodes, specific address calculation,
- * page size, etc.
- * @locking_ops: SPI NOR locking methods.
- */
-struct spi_nor_flash_parameter {
- u64 size;
- u32 page_size;
-
- struct spi_nor_hwcaps hwcaps;
- struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
- struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
-
- struct spi_nor_erase_map erase_map;
-
- int (*quad_enable)(struct spi_nor *nor);
- int (*set_4byte)(struct spi_nor *nor, bool enable);
- u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
- int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
-
- const struct spi_nor_locking_ops *locking_ops;
-};
-
-/**
- * struct flash_info - Forward declaration of a structure used internally by
- * spi_nor_scan()
+/*
+ * Forward declarations that are used internally by the core and manufacturer
+ * drivers.
*/
struct flash_info;
+struct spi_nor_manufacturer;
+struct spi_nor_flash_parameter;
/**
* struct spi_nor - Structure for defining a the SPI NOR layer
@@ -562,6 +336,7 @@ struct flash_info;
* layer is not DMA-able
* @bouncebuf_size: size of the bounce buffer
* @info: spi-nor part JDEC MFR id and other info
+ * @manufacturer: spi-nor manufacturer
* @page_size: the page size of the SPI NOR
* @addr_width: number of address bytes
* @erase_opcode: the opcode for erasing a sector
@@ -578,6 +353,7 @@ struct flash_info;
* The structure includes legacy flash parameters and
* settings that can be overwritten by the spi_nor_fixups
* hooks, or dynamically when parsing the SFDP tables.
+ * @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes.
* @priv: the private data
*/
struct spi_nor {
@@ -588,6 +364,7 @@ struct spi_nor {
u8 *bouncebuf;
size_t bouncebuf_size;
const struct flash_info *info;
+ const struct spi_nor_manufacturer *manufacturer;
u32 page_size;
u8 addr_width;
u8 erase_opcode;
@@ -602,40 +379,16 @@ struct spi_nor {
const struct spi_nor_controller_ops *controller_ops;
- struct spi_nor_flash_parameter params;
+ struct spi_nor_flash_parameter *params;
+
+ struct {
+ struct spi_mem_dirmap_desc *rdesc;
+ struct spi_mem_dirmap_desc *wdesc;
+ } dirmap;
void *priv;
};
-static u64 __maybe_unused
-spi_nor_region_is_last(const struct spi_nor_erase_region *region)
-{
- return region->offset & SNOR_LAST_REGION;
-}
-
-static u64 __maybe_unused
-spi_nor_region_end(const struct spi_nor_erase_region *region)
-{
- return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
-}
-
-static void __maybe_unused
-spi_nor_region_mark_end(struct spi_nor_erase_region *region)
-{
- region->offset |= SNOR_LAST_REGION;
-}
-
-static void __maybe_unused
-spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
-{
- region->offset |= SNOR_OVERLAID_REGION;
-}
-
-static bool __maybe_unused spi_nor_has_uniform_erase(const struct spi_nor *nor)
-{
- return !!nor->params.erase_map.uniform_erase_type;
-}
-
static inline void spi_nor_set_flash_node(struct spi_nor *nor,
struct device_node *np)
{
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 4ea558bd3c46..1077c45721ff 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -32,9 +32,9 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_READID_OP(ndummy, buf, len) \
+#define SPINAND_READID_OP(naddr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
- SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_ADDR(naddr, 0, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
@@ -176,37 +176,46 @@ struct spinand_device;
* @data: buffer containing the id bytes. Currently 4 bytes large, but can
* be extended if required
* @len: ID length
- *
- * struct_spinand_id->data contains all bytes returned after a READ_ID command,
- * including dummy bytes if the chip does not emit ID bytes right after the
- * READ_ID command. The responsibility to extract real ID bytes is left to
- * struct_manufacurer_ops->detect().
*/
struct spinand_id {
u8 data[SPINAND_MAX_ID_LEN];
int len;
};
+enum spinand_readid_method {
+ SPINAND_READID_METHOD_OPCODE,
+ SPINAND_READID_METHOD_OPCODE_ADDR,
+ SPINAND_READID_METHOD_OPCODE_DUMMY,
+};
+
+/**
+ * struct spinand_devid - SPI NAND device id structure
+ * @id: device id of current chip
+ * @len: number of bytes in device id
+ * @method: method to read chip id
+ * There are 3 possible variants:
+ * SPINAND_READID_METHOD_OPCODE: chip id is returned immediately
+ * after read_id opcode.
+ * SPINAND_READID_METHOD_OPCODE_ADDR: chip id is returned after
+ * read_id opcode + 1-byte address.
+ * SPINAND_READID_METHOD_OPCODE_DUMMY: chip id is returned after
+ * read_id opcode + 1 dummy byte.
+ */
+struct spinand_devid {
+ const u8 *id;
+ const u8 len;
+ const enum spinand_readid_method method;
+};
+
/**
* struct manufacurer_ops - SPI NAND manufacturer specific operations
- * @detect: detect a SPI NAND device. Every time a SPI NAND device is probed
- * the core calls the struct_manufacurer_ops->detect() hook of each
- * registered manufacturer until one of them return 1. Note that
- * the first thing to check in this hook is that the manufacturer ID
- * in struct_spinand_device->id matches the manufacturer whose
- * ->detect() hook has been called. Should return 1 if there's a
- * match, 0 if the manufacturer ID does not match and a negative
- * error code otherwise. When true is returned, the core assumes
- * that properties of the NAND chip (spinand->base.memorg and
- * spinand->base.eccreq) have been filled
* @init: initialize a SPI NAND device
* @cleanup: cleanup a SPI NAND device
*
* Each SPI NAND manufacturer driver should implement this interface so that
- * NAND chips coming from this vendor can be detected and initialized properly.
+ * NAND chips coming from this vendor can be initialized properly.
*/
struct spinand_manufacturer_ops {
- int (*detect)(struct spinand_device *spinand);
int (*init)(struct spinand_device *spinand);
void (*cleanup)(struct spinand_device *spinand);
};
@@ -215,11 +224,16 @@ struct spinand_manufacturer_ops {
* struct spinand_manufacturer - SPI NAND manufacturer instance
* @id: manufacturer ID
* @name: manufacturer name
+ * @devid_len: number of bytes in device ID
+ * @chips: supported SPI NANDs under current manufacturer
+ * @nchips: number of SPI NANDs available in chips array
* @ops: manufacturer operations
*/
struct spinand_manufacturer {
u8 id;
char *name;
+ const struct spinand_info *chips;
+ const size_t nchips;
const struct spinand_manufacturer_ops *ops;
};
@@ -270,6 +284,7 @@ struct spinand_ecc_info {
};
#define SPINAND_HAS_QE_BIT BIT(0)
+#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
/**
* struct spinand_info - Structure used to describe SPI NAND chips
@@ -291,7 +306,7 @@ struct spinand_ecc_info {
*/
struct spinand_info {
const char *model;
- u16 devid;
+ struct spinand_devid devid;
u32 flags;
struct nand_memory_organization memorg;
struct nand_ecc_req eccreq;
@@ -305,6 +320,13 @@ struct spinand_info {
unsigned int target);
};
+#define SPINAND_ID(__method, ...) \
+ { \
+ .id = (const u8[]){ __VA_ARGS__ }, \
+ .len = sizeof((u8[]){ __VA_ARGS__ }), \
+ .method = __method, \
+ }
+
#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \
{ \
.read_cache = __read, \
@@ -451,9 +473,10 @@ static inline void spinand_set_of_node(struct spinand_device *spinand,
nanddev_set_of_node(&spinand->base, np);
}
-int spinand_match_and_init(struct spinand_device *dev,
+int spinand_match_and_init(struct spinand_device *spinand,
const struct spinand_info *table,
- unsigned int table_size, u16 devid);
+ unsigned int table_size,
+ enum spinand_readid_method rdid_method);
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);