summaryrefslogtreecommitdiff
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c20
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c11
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1
-rw-r--r--drivers/mtd/chips/cfi_probe.c2
-rw-r--r--drivers/mtd/chips/cfi_util.c1
-rw-r--r--drivers/mtd/chips/jedec_probe.c4
-rw-r--r--drivers/mtd/chips/map_ram.c8
-rw-r--r--drivers/mtd/devices/Kconfig13
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c4
-rw-r--r--drivers/mtd/devices/block2mtd.c49
-rw-r--r--drivers/mtd/devices/docg3.c11
-rw-r--r--drivers/mtd/devices/docg3.h2
-rw-r--r--drivers/mtd/devices/mchp23k256.c3
-rw-r--r--drivers/mtd/devices/mchp48l640.c39
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c3
-rw-r--r--drivers/mtd/devices/mtd_intel_dg.c880
-rw-r--r--drivers/mtd/devices/phram.c17
-rw-r--r--drivers/mtd/devices/powernv_flash.c7
-rw-r--r--drivers/mtd/devices/slram.c2
-rw-r--r--drivers/mtd/devices/spear_smi.c24
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c24
-rw-r--r--drivers/mtd/ftl.c4
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c26
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c13
-rw-r--r--drivers/mtd/inftlcore.c9
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c8
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c20
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c4
-rw-r--r--drivers/mtd/maps/Kconfig7
-rw-r--r--drivers/mtd/maps/Makefile12
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c265
-rw-r--r--drivers/mtd/maps/lantiq-flash.c15
-rw-r--r--drivers/mtd/maps/map_funcs.c1
-rw-r--r--drivers/mtd/maps/pcmciamtd.c1
-rw-r--r--drivers/mtd/maps/physmap-bt1-rom.c1
-rw-r--r--drivers/mtd/maps/physmap-core.c29
-rw-r--r--drivers/mtd/maps/physmap-gemini.c2
-rw-r--r--drivers/mtd/maps/physmap-ixp4xx.c2
-rw-r--r--drivers/mtd/maps/physmap-ixp4xx.h1
-rw-r--r--drivers/mtd/maps/physmap-versatile.c4
-rw-r--r--drivers/mtd/maps/plat-ram.c9
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c3
-rw-r--r--drivers/mtd/maps/sa1100-flash.c10
-rw-r--r--drivers/mtd/maps/sun_uflash.c8
-rw-r--r--drivers/mtd/maps/vmu-flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c38
-rw-r--r--drivers/mtd/mtdblock.c2
-rw-r--r--drivers/mtd/mtdblock_ro.c2
-rw-r--r--drivers/mtd/mtdchar.c6
-rw-r--r--drivers/mtd/mtdconcat.c2
-rw-r--r--drivers/mtd/mtdcore.c192
-rw-r--r--drivers/mtd/mtdcore.h1
-rw-r--r--drivers/mtd/mtdoops.c11
-rw-r--r--drivers/mtd/mtdpart.c28
-rw-r--r--drivers/mtd/mtdpstore.c12
-rw-r--r--drivers/mtd/mtdsuper.c45
-rw-r--r--drivers/mtd/mtdswap.c4
-rw-r--r--drivers/mtd/nand/Kconfig8
-rw-r--r--drivers/mtd/nand/Makefile4
-rw-r--r--drivers/mtd/nand/core.c131
-rw-r--r--drivers/mtd/nand/ecc-mxic.c26
-rw-r--r--drivers/mtd/nand/ecc-realtek.c464
-rw-r--r--drivers/mtd/nand/ecc-sw-bch.c2
-rw-r--r--drivers/mtd/nand/ecc-sw-hamming.c2
-rw-r--r--drivers/mtd/nand/ecc.c4
-rw-r--r--drivers/mtd/nand/onenand/generic.c2
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c1
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c15
-rw-r--r--drivers/mtd/nand/onenand/onenand_samsung.c13
-rw-r--r--drivers/mtd/nand/qpic_common.c779
-rw-r--r--drivers/mtd/nand/raw/Kconfig65
-rw-r--r--drivers/mtd/nand/raw/Makefile5
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c4
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c65
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c49
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c19
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.h2
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c2
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/main.c2
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c5
-rw-r--r--drivers/mtd/nand/raw/brcmnand/Makefile2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c101
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c126
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c927
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.h4
-rw-r--r--drivers/mtd/nand/raw/brcmnand/iproc_nand.c7
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c345
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c8
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c271
-rw-r--r--drivers/mtd/nand/raw/denali.h2
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c32
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c16
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c5
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c5
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c8
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c29
-rw-r--r--drivers/mtd/nand/raw/gpio.c2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c107
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h6
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c2
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c1
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c17
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c35
-rw-r--r--drivers/mtd/nand/raw/internals.h1
-rw-r--r--drivers/mtd/nand/raw/loongson-nand-controller.c1024
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c36
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c45
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c60
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c185
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c17
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c105
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c712
-rw-r--r--drivers/mtd/nand/raw/mxic_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c350
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c1
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c7
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c3
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c2
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c3
-rw-r--r--drivers/mtd/nand/raw/nandsim.c9
-rw-r--r--drivers/mtd/nand/raw/ndfc.c5
-rw-r--r--drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c1029
-rw-r--r--drivers/mtd/nand/raw/omap2.c58
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c2
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c24
-rw-r--r--drivers/mtd/nand/raw/oxnas_nand.c209
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c27
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c2503
-rw-r--r--drivers/mtd/nand/raw/r852.c7
-rw-r--r--drivers/mtd/nand/raw/renesas-nand-controller.c27
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c42
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c1233
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c9
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c2
-rw-r--r--drivers/mtd/nand/raw/sm_common.c4
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c5
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c160
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c451
-rw-r--r--drivers/mtd/nand/raw/technologic-nand-controller.c222
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c6
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c13
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c43
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c6
-rw-r--r--drivers/mtd/nand/spi/Makefile5
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c20
-rw-r--r--drivers/mtd/nand/spi/ato.c14
-rw-r--r--drivers/mtd/nand/spi/core.c459
-rw-r--r--drivers/mtd/nand/spi/esmt.c144
-rw-r--r--drivers/mtd/nand/spi/fmsh.c146
-rw-r--r--drivers/mtd/nand/spi/foresee.c105
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c209
-rw-r--r--drivers/mtd/nand/spi/macronix.c269
-rw-r--r--drivers/mtd/nand/spi/micron.c171
-rw-r--r--drivers/mtd/nand/spi/otp.c362
-rw-r--r--drivers/mtd/nand/spi/paragon.c20
-rw-r--r--drivers/mtd/nand/spi/skyhigh.c147
-rw-r--r--drivers/mtd/nand/spi/toshiba.c55
-rw-r--r--drivers/mtd/nand/spi/winbond.c395
-rw-r--r--drivers/mtd/nand/spi/xtx.c154
-rw-r--r--drivers/mtd/nftlcore.c43
-rw-r--r--drivers/mtd/parsers/Kconfig5
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/ar7part.c129
-rw-r--r--drivers/mtd/parsers/bcm47xxpart.c2
-rw-r--r--drivers/mtd/parsers/brcm_u-boot.c1
-rw-r--r--drivers/mtd/parsers/cmdlinepart.c18
-rw-r--r--drivers/mtd/parsers/ofpart_core.c4
-rw-r--r--drivers/mtd/parsers/redboot.c2
-rw-r--r--drivers/mtd/parsers/tplink_safeloader.c1
-rw-r--r--drivers/mtd/rfd_ftl.c4
-rw-r--r--drivers/mtd/sm_ftl.c11
-rw-r--r--drivers/mtd/spi-nor/Makefile3
-rw-r--r--drivers/mtd/spi-nor/atmel.c149
-rw-r--r--drivers/mtd/spi-nor/catalyst.c24
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c5
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c40
-rw-r--r--drivers/mtd/spi-nor/core.c919
-rw-r--r--drivers/mtd/spi-nor/core.h222
-rw-r--r--drivers/mtd/spi-nor/debugfs.c29
-rw-r--r--drivers/mtd/spi-nor/eon.c74
-rw-r--r--drivers/mtd/spi-nor/esmt.c29
-rw-r--r--drivers/mtd/spi-nor/everspin.c39
-rw-r--r--drivers/mtd/spi-nor/fujitsu.c21
-rw-r--r--drivers/mtd/spi-nor/gigadevice.c82
-rw-r--r--drivers/mtd/spi-nor/intel.c23
-rw-r--r--drivers/mtd/spi-nor/issi.c122
-rw-r--r--drivers/mtd/spi-nor/macronix.c352
-rw-r--r--drivers/mtd/spi-nor/micron-st.c501
-rw-r--r--drivers/mtd/spi-nor/otp.c1
-rw-r--r--drivers/mtd/spi-nor/sfdp.c110
-rw-r--r--drivers/mtd/spi-nor/sfdp.h8
-rw-r--r--drivers/mtd/spi-nor/spansion.c750
-rw-r--r--drivers/mtd/spi-nor/sst.c213
-rw-r--r--drivers/mtd/spi-nor/swp.c67
-rw-r--r--drivers/mtd/spi-nor/sysfs.c16
-rw-r--r--drivers/mtd/spi-nor/winbond.c432
-rw-r--r--drivers/mtd/spi-nor/xilinx.c173
-rw-r--r--drivers/mtd/spi-nor/xmc.c19
-rw-r--r--drivers/mtd/ssfdc.c8
-rw-r--r--drivers/mtd/tests/Makefile34
-rw-r--r--drivers/mtd/tests/mtd_test.c9
-rw-r--r--drivers/mtd/tests/oobtest.c2
-rw-r--r--drivers/mtd/tests/pagetest.c2
-rw-r--r--drivers/mtd/tests/subpagetest.c2
-rw-r--r--drivers/mtd/ubi/Kconfig22
-rw-r--r--drivers/mtd/ubi/Makefile1
-rw-r--r--drivers/mtd/ubi/attach.c16
-rw-r--r--drivers/mtd/ubi/block.c165
-rw-r--r--drivers/mtd/ubi/build.c191
-rw-r--r--drivers/mtd/ubi/cdev.c75
-rw-r--r--drivers/mtd/ubi/debug.c113
-rw-r--r--drivers/mtd/ubi/debug.h304
-rw-r--r--drivers/mtd/ubi/eba.c13
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c139
-rw-r--r--drivers/mtd/ubi/fastmap.c73
-rw-r--r--drivers/mtd/ubi/io.c96
-rw-r--r--drivers/mtd/ubi/kapi.c83
-rw-r--r--drivers/mtd/ubi/nvmem.c191
-rw-r--r--drivers/mtd/ubi/ubi.h75
-rw-r--r--drivers/mtd/ubi/vmt.c77
-rw-r--r--drivers/mtd/ubi/vtbl.c6
-rw-r--r--drivers/mtd/ubi/wl.c59
-rw-r--r--drivers/mtd/ubi/wl.h9
227 files changed, 15256 insertions, 8135 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 11b06fefaa0e..c10693ba265b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -422,9 +422,25 @@ read_pri_intelext(struct map_info *map, __u16 adr)
extra_size = 0;
/* Protection Register info */
- if (extp->NumProtectionFields)
+ if (extp->NumProtectionFields) {
+ struct cfi_intelext_otpinfo *otp =
+ (struct cfi_intelext_otpinfo *)&extp->extra[0];
+
extra_size += (extp->NumProtectionFields - 1) *
- sizeof(struct cfi_intelext_otpinfo);
+ sizeof(struct cfi_intelext_otpinfo);
+
+ if (extp_size >= sizeof(*extp) + extra_size) {
+ int i;
+
+ /* Do some byteswapping if necessary */
+ for (i = 0; i < extp->NumProtectionFields - 1; i++) {
+ otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
+ otp->FactGroups = le16_to_cpu(otp->FactGroups);
+ otp->UserGroups = le16_to_cpu(otp->UserGroups);
+ otp++;
+ }
+ }
+ }
}
if (extp->MinorVersion >= '1') {
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 153fb8d0008e..7c91429a670b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -32,7 +32,6 @@
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
@@ -650,7 +649,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
/*
* Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
- * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
+ * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
* http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
* http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
* http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
@@ -1780,10 +1779,8 @@ static int __xipram do_write_oneword_retry(struct map_info *map,
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
- if (++retry_cnt <= MAX_RETRIES) {
- ret = 0;
+ if (++retry_cnt <= MAX_RETRIES)
goto retry;
- }
}
xip_enable(map, chip, adr);
@@ -2412,7 +2409,7 @@ static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
{
struct cfi_private *cfi = map->fldrv_priv;
- unsigned long timeo = jiffies + HZ;
+ unsigned long timeo;
unsigned long int adr;
DECLARE_WAITQUEUE(wait, current);
int ret;
@@ -2513,7 +2510,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
- unsigned long timeo = jiffies + HZ;
+ unsigned long timeo;
DECLARE_WAITQUEUE(wait, current);
int ret;
int retry_cnt = 0;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 60c7f6f751c7..5e5266e2c2e1 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -1399,4 +1399,5 @@ static void cfi_staa_destroy(struct mtd_info *mtd)
kfree(cfi);
}
+MODULE_DESCRIPTION("MTD chip driver for ST Advanced Architecture Command Set (ID 0x0020)");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index a04b6174181c..e254f9cd2796 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -208,7 +208,7 @@ static int __xipram cfi_chip_setup(struct map_info *map,
if (!num_erase_regions)
return 0;
- cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
+ cfi->cfiq = kmalloc(struct_size(cfi->cfiq, EraseRegionInfo, num_erase_regions), GFP_KERNEL);
if (!cfi->cfiq)
return 0;
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 140c69a67e82..ef0aa6890bc0 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -441,4 +441,5 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
EXPORT_SYMBOL(cfi_varsize_frob);
+MODULE_DESCRIPTION("Common Flash Interface Generic utility functions");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 23c32fe584b7..b285962eee2a 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1953,7 +1953,7 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
* as they will ignore the writes and don't care what address
* the F0 is written to */
if (cfi->addr_unlock1) {
- pr_debug( "reset unlock called %x %x \n",
+ pr_debug("reset unlock called %x %x\n",
cfi->addr_unlock1,cfi->addr_unlock2);
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
@@ -1985,7 +1985,7 @@ static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int in
num_erase_regions = jedec_table[index].nr_regions;
- cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
+ cfi->cfiq = kmalloc(struct_size(cfi->cfiq, EraseRegionInfo, num_erase_regions), GFP_KERNEL);
if (!cfi->cfiq) {
//xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
return 0;
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index e8dd6496927e..f9d3e32ef8e9 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -70,12 +70,16 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
mtd->_read = mapram_read;
mtd->_write = mapram_write;
mtd->_panic_write = mapram_write;
- mtd->_point = mapram_point;
mtd->_sync = mapram_nop;
- mtd->_unpoint = mapram_unpoint;
mtd->flags = MTD_CAP_RAM;
mtd->writesize = 1;
+ /* Disable direct access when NO_XIP is set */
+ if (map->phys != NO_XIP) {
+ mtd->_point = mapram_point;
+ mtd->_unpoint = mapram_unpoint;
+ }
+
mtd->erasesize = PAGE_SIZE;
while(mtd->size & (mtd->erasesize - 1))
mtd->erasesize >>= 1;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index ff2f9e55ef28..e518dfeee654 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -98,7 +98,7 @@ config MTD_MCHP48L640
config MTD_SPEAR_SMI
tristate "SPEAR MTD NOR Support through SMI controller"
depends on PLAT_SPEAR || COMPILE_TEST
- default y
+ default PLAT_SPEAR
help
This enable SNOR support on SPEAR platforms using SMI controller
@@ -183,6 +183,17 @@ config MTD_POWERNV_FLASH
platforms from Linux. This device abstracts away the
firmware interface for flash access.
+config MTD_INTEL_DG
+ tristate "Intel Discrete Graphics non-volatile memory driver"
+ depends on AUXILIARY_BUS && MTD
+ depends on DRM_I915!=n || DRM_XE!=n || COMPILE_TEST
+ help
+ This provides an MTD device to access Intel Discrete Graphics
+ non-volatile memory.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mtd-intel-dg.
+
comment "Disk-On-Chip Device Drivers"
config MTD_DOCG3
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index d11eb2b8b6f8..9fe4ce9cffde 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_MTD_SST25L) += sst25l.o
obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
obj-$(CONFIG_MTD_ST_SPI_FSM) += st_spi_fsm.o
obj-$(CONFIG_MTD_POWERNV_FLASH) += powernv_flash.o
+obj-$(CONFIG_MTD_INTEL_DG) += mtd_intel_dg.o
CFLAGS_docg3.o += -I$(src)
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 3af50db8b21b..2edc46741774 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -357,14 +357,12 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
return 0;
}
-static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
+static void bcm47xxsflash_bcma_remove(struct platform_device *pdev)
{
struct bcm47xxsflash *b47s = platform_get_drvdata(pdev);
mtd_device_unregister(&b47s->mtd);
iounmap(b47s->window);
-
- return 0;
}
static struct platform_driver bcma_sflash_driver = {
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index be106dc20ff3..b06c8dd51562 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -37,7 +37,7 @@
/* Info for the block device */
struct block2mtd_dev {
struct list_head list;
- struct block_device *blkdev;
+ struct file *bdev_file;
struct mtd_info mtd;
struct mutex write_mutex;
};
@@ -55,7 +55,7 @@ static struct page *page_read(struct address_space *mapping, pgoff_t index)
/* erase a specified part of the device */
static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
{
- struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
+ struct address_space *mapping = dev->bdev_file->f_mapping;
struct page *page;
pgoff_t index = to >> PAGE_SHIFT; // page index
int pages = len >> PAGE_SHIFT;
@@ -105,6 +105,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct block2mtd_dev *dev = mtd->priv;
+ struct address_space *mapping = dev->bdev_file->f_mapping;
struct page *page;
pgoff_t index = from >> PAGE_SHIFT;
int offset = from & (PAGE_SIZE-1);
@@ -117,7 +118,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
cpylen = len; // this page
len = len - cpylen;
- page = page_read(dev->blkdev->bd_inode->i_mapping, index);
+ page = page_read(mapping, index);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -139,7 +140,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
loff_t to, size_t len, size_t *retlen)
{
struct page *page;
- struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
+ struct address_space *mapping = dev->bdev_file->f_mapping;
pgoff_t index = to >> PAGE_SHIFT; // page index
int offset = to & ~PAGE_MASK; // page offset
int cpylen;
@@ -194,7 +195,7 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
static void block2mtd_sync(struct mtd_info *mtd)
{
struct block2mtd_dev *dev = mtd->priv;
- sync_blockdev(dev->blkdev);
+ sync_blockdev(file_bdev(dev->bdev_file));
return;
}
@@ -206,10 +207,9 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
kfree(dev->mtd.name);
- if (dev->blkdev) {
- invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
- 0, -1);
- blkdev_put(dev->blkdev, NULL);
+ if (dev->bdev_file) {
+ invalidate_mapping_pages(dev->bdev_file->f_mapping, 0, -1);
+ bdev_fput(dev->bdev_file);
}
kfree(dev);
@@ -219,10 +219,10 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
* This function is marked __ref because it calls the __init marked
* early_lookup_bdev when called from the early boot code.
*/
-static struct block_device __ref *mdtblock_early_get_bdev(const char *devname,
+static struct file __ref *mdtblock_early_get_bdev(const char *devname,
blk_mode_t mode, int timeout, struct block2mtd_dev *dev)
{
- struct block_device *bdev = ERR_PTR(-ENODEV);
+ struct file *bdev_file = ERR_PTR(-ENODEV);
#ifndef MODULE
int i;
@@ -230,7 +230,7 @@ static struct block_device __ref *mdtblock_early_get_bdev(const char *devname,
* We can't use early_lookup_bdev from a running system.
*/
if (system_state >= SYSTEM_RUNNING)
- return bdev;
+ return bdev_file;
/*
* We might not have the root device mounted at this point.
@@ -249,21 +249,23 @@ static struct block_device __ref *mdtblock_early_get_bdev(const char *devname,
wait_for_device_probe();
if (!early_lookup_bdev(devname, &devt)) {
- bdev = blkdev_get_by_dev(devt, mode, dev, NULL);
- if (!IS_ERR(bdev))
+ bdev_file = bdev_file_open_by_dev(devt, mode, dev, NULL);
+ if (!IS_ERR(bdev_file))
break;
}
}
#endif
- return bdev;
+ return bdev_file;
}
static struct block2mtd_dev *add_device(char *devname, int erase_size,
char *label, int timeout)
{
const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE;
+ struct file *bdev_file;
struct block_device *bdev;
struct block2mtd_dev *dev;
+ loff_t size;
char *name;
if (!devname)
@@ -274,21 +276,24 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
return NULL;
/* Get a handle on the device */
- bdev = blkdev_get_by_path(devname, mode, dev, NULL);
- if (IS_ERR(bdev))
- bdev = mdtblock_early_get_bdev(devname, mode, timeout, dev);
- if (IS_ERR(bdev)) {
+ bdev_file = bdev_file_open_by_path(devname, mode, dev, NULL);
+ if (IS_ERR(bdev_file))
+ bdev_file = mdtblock_early_get_bdev(devname, mode, timeout,
+ dev);
+ if (IS_ERR(bdev_file)) {
pr_err("error: cannot open device %s\n", devname);
goto err_free_block2mtd;
}
- dev->blkdev = bdev;
+ dev->bdev_file = bdev_file;
+ bdev = file_bdev(bdev_file);
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
pr_err("attempting to use an MTD device as a block device\n");
goto err_free_block2mtd;
}
- if ((long)dev->blkdev->bd_inode->i_size % erase_size) {
+ size = bdev_nr_bytes(bdev);
+ if ((long)size % erase_size) {
pr_err("erasesize must be a divisor of device size\n");
goto err_free_block2mtd;
}
@@ -306,7 +311,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
dev->mtd.name = name;
- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.size = size & PAGE_MASK;
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
dev->mtd.writebufsize = PAGE_SIZE;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index a7714e3de887..c93769c233d9 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1599,7 +1599,7 @@ static void doc_unregister_sysfs(struct platform_device *pdev,
*/
static int flashcontrol_show(struct seq_file *s, void *p)
{
- struct docg3 *docg3 = (struct docg3 *)s->private;
+ struct docg3 *docg3 = s->private;
u8 fctrl;
@@ -1621,7 +1621,7 @@ DEFINE_SHOW_ATTRIBUTE(flashcontrol);
static int asic_mode_show(struct seq_file *s, void *p)
{
- struct docg3 *docg3 = (struct docg3 *)s->private;
+ struct docg3 *docg3 = s->private;
int pctrl, mode;
@@ -1658,7 +1658,7 @@ DEFINE_SHOW_ATTRIBUTE(asic_mode);
static int device_id_show(struct seq_file *s, void *p)
{
- struct docg3 *docg3 = (struct docg3 *)s->private;
+ struct docg3 *docg3 = s->private;
int id;
mutex_lock(&docg3->cascade->lock);
@@ -1672,7 +1672,7 @@ DEFINE_SHOW_ATTRIBUTE(device_id);
static int protection_show(struct seq_file *s, void *p)
{
- struct docg3 *docg3 = (struct docg3 *)s->private;
+ struct docg3 *docg3 = s->private;
int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
mutex_lock(&docg3->cascade->lock);
@@ -2046,7 +2046,7 @@ err_probe:
*
* Returns 0
*/
-static int docg3_release(struct platform_device *pdev)
+static void docg3_release(struct platform_device *pdev)
{
struct docg3_cascade *cascade = platform_get_drvdata(pdev);
struct docg3 *docg3 = cascade->floors[0]->priv;
@@ -2058,7 +2058,6 @@ static int docg3_release(struct platform_device *pdev)
doc_release_device(cascade->floors[floor]);
bch_free(docg3->cascade->bch);
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
index 2c0c5114e4e6..af6ef0ac1f11 100644
--- a/drivers/mtd/devices/docg3.h
+++ b/drivers/mtd/devices/docg3.h
@@ -274,11 +274,11 @@ struct docg3_cascade {
* @cascade: the cascade this device belongs to
* @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
* @if_cfg: if true, reads are on 16bits, else reads are on 8bits
-
* @reliable: if 0, docg3 in normal mode, if 1 docg3 in fast mode, if 2 in
* reliable mode
* Fast mode implies more errors than normal mode.
* Reliable mode implies that page 2*n and 2*n+1 are clones.
+ * @max_block: maximum block number for this device
* @bbt: bad block table cache
* @oob_write_ofs: offset of the MTD where this OOB should belong (ie. in next
* page_write)
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index 3a6ea7a6a30c..cef5f9677d39 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -15,7 +15,7 @@
#include <linux/sizes.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#define MAX_CMD_SIZE 4
@@ -257,4 +257,3 @@ module_spi_driver(mchp23k256_driver);
MODULE_DESCRIPTION("MTD SPI driver for MCHP23K256 RAM chips");
MODULE_AUTHOR("Andrew Lunn <andre@lunn.ch>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:mchp23k256");
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
index 40cd5041174c..4af9208f9690 100644
--- a/drivers/mtd/devices/mchp48l640.c
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -22,11 +22,13 @@
#include <linux/sizes.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/string_choices.h>
struct mchp48_caps {
unsigned int size;
unsigned int page_size;
+ bool auto_disable_wel;
};
struct mchp48l640_flash {
@@ -127,11 +129,11 @@ static int mchp48l640_write_prepare(struct mchp48l640_flash *flash, bool enable)
mutex_unlock(&flash->lock);
if (ret)
- dev_err(&flash->spi->dev, "write %sable failed ret: %d",
- (enable ? "en" : "dis"), ret);
+ dev_err(&flash->spi->dev, "write %s failed ret: %d",
+ str_enable_disable(enable), ret);
- dev_dbg(&flash->spi->dev, "write %sable success ret: %d",
- (enable ? "en" : "dis"), ret);
+ dev_dbg(&flash->spi->dev, "write %s success ret: %d",
+ str_enable_disable(enable), ret);
if (enable)
return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, true);
@@ -194,9 +196,15 @@ static int mchp48l640_write_page(struct mtd_info *mtd, loff_t to, size_t len,
else
goto fail;
- ret = mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, false);
- if (ret)
- goto fail;
+ if (flash->caps->auto_disable_wel) {
+ ret = mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, false);
+ if (ret)
+ goto fail;
+ } else {
+ ret = mchp48l640_write_prepare(flash, false);
+ if (ret)
+ goto fail;
+ }
kfree(cmd);
return 0;
@@ -293,6 +301,13 @@ static int mchp48l640_read(struct mtd_info *mtd, loff_t from, size_t len,
static const struct mchp48_caps mchp48l640_caps = {
.size = SZ_8K,
.page_size = 32,
+ .auto_disable_wel = true,
+};
+
+static const struct mchp48_caps mb85rs128ty_caps = {
+ .size = SZ_16K,
+ .page_size = 256,
+ .auto_disable_wel = false,
};
static int mchp48l640_probe(struct spi_device *spi)
@@ -353,6 +368,10 @@ static const struct of_device_id mchp48l640_of_table[] = {
.compatible = "microchip,48l640",
.data = &mchp48l640_caps,
},
+ {
+ .compatible = "fujitsu,mb85rs128ty",
+ .data = &mb85rs128ty_caps,
+ },
{}
};
MODULE_DEVICE_TABLE(of, mchp48l640_of_table);
@@ -362,6 +381,10 @@ static const struct spi_device_id mchp48l640_spi_ids[] = {
.name = "48l640",
.driver_data = (kernel_ulong_t)&mchp48l640_caps,
},
+ {
+ .name = "mb85rs128ty",
+ .driver_data = (kernel_ulong_t)&mb85rs128ty_caps,
+ },
{}
};
MODULE_DEVICE_TABLE(spi, mchp48l640_spi_ids);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 1d3b2a94581f..ec52277e3dd5 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -13,7 +13,6 @@
#include <linux/err.h>
#include <linux/math64.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
@@ -639,7 +638,7 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
/* name must be usable with cmdlinepart */
sprintf(priv->name, "spi%d.%d-%s",
- spi->master->bus_num, spi_get_chipselect(spi, 0),
+ spi->controller->bus_num, spi_get_chipselect(spi, 0),
name);
device = &priv->mtd;
diff --git a/drivers/mtd/devices/mtd_intel_dg.c b/drivers/mtd/devices/mtd_intel_dg.c
new file mode 100644
index 000000000000..2bab30dcd35f
--- /dev/null
+++ b/drivers/mtd/devices/mtd_intel_dg.c
@@ -0,0 +1,880 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/intel_dg_nvm_aux.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/pm_runtime.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#define INTEL_DG_NVM_RPM_TIMEOUT_MS 500
+
+struct intel_dg_nvm {
+ struct kref refcnt;
+ struct mtd_info mtd;
+ struct device *dev;
+ struct mutex lock; /* region access lock */
+ void __iomem *base;
+ void __iomem *base2;
+ bool non_posted_erase;
+
+ size_t size;
+ unsigned int nregions;
+ struct {
+ const char *name;
+ u8 id;
+ u64 offset;
+ u64 size;
+ unsigned int is_readable:1;
+ unsigned int is_writable:1;
+ } regions[] __counted_by(nregions);
+};
+
+#define NVM_TRIGGER_REG 0x00000000
+#define NVM_VALSIG_REG 0x00000010
+#define NVM_ADDRESS_REG 0x00000040
+#define NVM_REGION_ID_REG 0x00000044
+#define NVM_DEBUG_REG 0x00000000
+/*
+ * [15:0]-Erase size = 0x0010 4K 0x0080 32K 0x0100 64K
+ * [23:16]-Reserved
+ * [31:24]-Erase MEM RegionID
+ */
+#define NVM_ERASE_REG 0x00000048
+#define NVM_ACCESS_ERROR_REG 0x00000070
+#define NVM_ADDRESS_ERROR_REG 0x00000074
+
+/* Flash Valid Signature */
+#define NVM_FLVALSIG 0x0FF0A55A
+
+#define NVM_MAP_ADDR_MASK GENMASK(7, 0)
+#define NVM_MAP_ADDR_SHIFT 0x00000004
+
+#define NVM_REGION_ID_DESCRIPTOR 0
+/* Flash Region Base Address */
+#define NVM_FRBA 0x40
+/* Flash Region __n - Flash Descriptor Record */
+#define NVM_FLREG(__n) (NVM_FRBA + ((__n) * 4))
+/* Flash Map 1 Register */
+#define NVM_FLMAP1_REG 0x18
+#define NVM_FLMSTR4_OFFSET 0x00C
+
+#define NVM_ACCESS_ERROR_PCIE_MASK 0x7
+
+#define NVM_FREG_BASE_MASK GENMASK(15, 0)
+#define NVM_FREG_ADDR_MASK GENMASK(31, 16)
+#define NVM_FREG_ADDR_SHIFT 12
+#define NVM_FREG_MIN_REGION_SIZE 0xFFF
+
+#define NVM_NON_POSTED_ERASE_DONE BIT(23)
+#define NVM_NON_POSTED_ERASE_DONE_ITER 3000
+
+static inline void idg_nvm_set_region_id(struct intel_dg_nvm *nvm, u8 region)
+{
+ iowrite32((u32)region, nvm->base + NVM_REGION_ID_REG);
+}
+
+static inline u32 idg_nvm_error(struct intel_dg_nvm *nvm)
+{
+ void __iomem *base = nvm->base;
+
+ u32 reg = ioread32(base + NVM_ACCESS_ERROR_REG) & NVM_ACCESS_ERROR_PCIE_MASK;
+
+ /* reset error bits */
+ if (reg)
+ iowrite32(reg, base + NVM_ACCESS_ERROR_REG);
+
+ return reg;
+}
+
+static inline u32 idg_nvm_read32(struct intel_dg_nvm *nvm, u32 address)
+{
+ void __iomem *base = nvm->base;
+
+ iowrite32(address, base + NVM_ADDRESS_REG);
+
+ return ioread32(base + NVM_TRIGGER_REG);
+}
+
+static inline u64 idg_nvm_read64(struct intel_dg_nvm *nvm, u32 address)
+{
+ void __iomem *base = nvm->base;
+
+ iowrite32(address, base + NVM_ADDRESS_REG);
+
+ return readq(base + NVM_TRIGGER_REG);
+}
+
+static void idg_nvm_write32(struct intel_dg_nvm *nvm, u32 address, u32 data)
+{
+ void __iomem *base = nvm->base;
+
+ iowrite32(address, base + NVM_ADDRESS_REG);
+
+ iowrite32(data, base + NVM_TRIGGER_REG);
+}
+
+static void idg_nvm_write64(struct intel_dg_nvm *nvm, u32 address, u64 data)
+{
+ void __iomem *base = nvm->base;
+
+ iowrite32(address, base + NVM_ADDRESS_REG);
+
+ writeq(data, base + NVM_TRIGGER_REG);
+}
+
+static int idg_nvm_get_access_map(struct intel_dg_nvm *nvm, u32 *access_map)
+{
+ u32 fmstr4_addr;
+ u32 fmstr4;
+ u32 flmap1;
+ u32 fmba;
+
+ idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR);
+
+ flmap1 = idg_nvm_read32(nvm, NVM_FLMAP1_REG);
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ /* Get Flash Master Baser Address (FMBA) */
+ fmba = (FIELD_GET(NVM_MAP_ADDR_MASK, flmap1) << NVM_MAP_ADDR_SHIFT);
+ fmstr4_addr = fmba + NVM_FLMSTR4_OFFSET;
+
+ fmstr4 = idg_nvm_read32(nvm, fmstr4_addr);
+ if (idg_nvm_error(nvm))
+ return -EIO;
+
+ *access_map = fmstr4;
+ return 0;
+}
+
+/*
+ * Region read/write access encoded in the access map
+ * in the following order from the lower bit:
+ * [3:0] regions 12-15 read state
+ * [7:4] regions 12-15 write state
+ * [19:8] regions 0-11 read state
+ * [31:20] regions 0-11 write state
+ */
+static bool idg_nvm_region_readable(u32 access_map, u8 region)
+{
+ if (region < 12)
+ return access_map & BIT(region + 8); /* [19:8] */
+ else
+ return access_map & BIT(region - 12); /* [3:0] */
+}
+
+static bool idg_nvm_region_writable(u32 access_map, u8 region)
+{
+ if (region < 12)
+ return access_map & BIT(region + 20); /* [31:20] */
+ else
+ return access_map & BIT(region - 8); /* [7:4] */
+}
+
+static int idg_nvm_is_valid(struct intel_dg_nvm *nvm)
+{
+ u32 is_valid;
+
+ idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR);
+
+ is_valid = idg_nvm_read32(nvm, NVM_VALSIG_REG);
+ if (idg_nvm_error(nvm))
+ return -EIO;
+
+ if (is_valid != NVM_FLVALSIG)
+ return -ENODEV;
+
+ return 0;
+}
+
+static unsigned int idg_nvm_get_region(const struct intel_dg_nvm *nvm, loff_t from)
+{
+ unsigned int i;
+
+ for (i = 0; i < nvm->nregions; i++) {
+ if ((nvm->regions[i].offset + nvm->regions[i].size - 1) >= from &&
+ nvm->regions[i].offset <= from &&
+ nvm->regions[i].size != 0)
+ break;
+ }
+
+ return i;
+}
+
+static ssize_t idg_nvm_rewrite_partial(struct intel_dg_nvm *nvm, loff_t to,
+ loff_t offset, size_t len, const u32 *newdata)
+{
+ u32 data = idg_nvm_read32(nvm, to);
+
+ if (idg_nvm_error(nvm))
+ return -EIO;
+
+ memcpy((u8 *)&data + offset, newdata, len);
+
+ idg_nvm_write32(nvm, to, data);
+ if (idg_nvm_error(nvm))
+ return -EIO;
+
+ return len;
+}
+
+static ssize_t idg_write(struct intel_dg_nvm *nvm, u8 region,
+ loff_t to, size_t len, const unsigned char *buf)
+{
+ size_t len_s = len;
+ size_t to_shift;
+ size_t len8;
+ size_t len4;
+ ssize_t ret;
+ size_t to4;
+ size_t i;
+
+ idg_nvm_set_region_id(nvm, region);
+
+ to4 = ALIGN_DOWN(to, sizeof(u32));
+ to_shift = min(sizeof(u32) - ((size_t)to - to4), len);
+ if (to - to4) {
+ ret = idg_nvm_rewrite_partial(nvm, to4, to - to4, to_shift, (u32 *)&buf[0]);
+ if (ret < 0)
+ return ret;
+
+ buf += to_shift;
+ to += to_shift;
+ len_s -= to_shift;
+ }
+
+ if (!IS_ALIGNED(to, sizeof(u64)) &&
+ ((to ^ (to + len_s)) & GENMASK(31, 10))) {
+ /*
+ * Workaround reads/writes across 1k-aligned addresses
+ * (start u32 before 1k, end u32 after)
+ * as this fails on hardware.
+ */
+ u32 data;
+
+ memcpy(&data, &buf[0], sizeof(u32));
+ idg_nvm_write32(nvm, to, data);
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ buf += sizeof(u32);
+ to += sizeof(u32);
+ len_s -= sizeof(u32);
+ }
+
+ len8 = ALIGN_DOWN(len_s, sizeof(u64));
+ for (i = 0; i < len8; i += sizeof(u64)) {
+ u64 data;
+
+ memcpy(&data, &buf[i], sizeof(u64));
+ idg_nvm_write64(nvm, to + i, data);
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ }
+
+ len4 = len_s - len8;
+ if (len4 >= sizeof(u32)) {
+ u32 data;
+
+ memcpy(&data, &buf[i], sizeof(u32));
+ idg_nvm_write32(nvm, to + i, data);
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ i += sizeof(u32);
+ len4 -= sizeof(u32);
+ }
+
+ if (len4 > 0) {
+ ret = idg_nvm_rewrite_partial(nvm, to + i, 0, len4, (u32 *)&buf[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return len;
+}
+
+static ssize_t idg_read(struct intel_dg_nvm *nvm, u8 region,
+ loff_t from, size_t len, unsigned char *buf)
+{
+ size_t len_s = len;
+ size_t from_shift;
+ size_t from4;
+ size_t len8;
+ size_t len4;
+ size_t i;
+
+ idg_nvm_set_region_id(nvm, region);
+
+ from4 = ALIGN_DOWN(from, sizeof(u32));
+ from_shift = min(sizeof(u32) - ((size_t)from - from4), len);
+
+ if (from - from4) {
+ u32 data = idg_nvm_read32(nvm, from4);
+
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ memcpy(&buf[0], (u8 *)&data + (from - from4), from_shift);
+ len_s -= from_shift;
+ buf += from_shift;
+ from += from_shift;
+ }
+
+ if (!IS_ALIGNED(from, sizeof(u64)) &&
+ ((from ^ (from + len_s)) & GENMASK(31, 10))) {
+ /*
+ * Workaround reads/writes across 1k-aligned addresses
+ * (start u32 before 1k, end u32 after)
+ * as this fails on hardware.
+ */
+ u32 data = idg_nvm_read32(nvm, from);
+
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ memcpy(&buf[0], &data, sizeof(data));
+ len_s -= sizeof(u32);
+ buf += sizeof(u32);
+ from += sizeof(u32);
+ }
+
+ len8 = ALIGN_DOWN(len_s, sizeof(u64));
+ for (i = 0; i < len8; i += sizeof(u64)) {
+ u64 data = idg_nvm_read64(nvm, from + i);
+
+ if (idg_nvm_error(nvm))
+ return -EIO;
+
+ memcpy(&buf[i], &data, sizeof(data));
+ }
+
+ len4 = len_s - len8;
+ if (len4 >= sizeof(u32)) {
+ u32 data = idg_nvm_read32(nvm, from + i);
+
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ memcpy(&buf[i], &data, sizeof(data));
+ i += sizeof(u32);
+ len4 -= sizeof(u32);
+ }
+
+ if (len4 > 0) {
+ u32 data = idg_nvm_read32(nvm, from + i);
+
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ memcpy(&buf[i], &data, len4);
+ }
+
+ return len;
+}
+
+static ssize_t
+idg_erase(struct intel_dg_nvm *nvm, u8 region, loff_t from, u64 len, u64 *fail_addr)
+{
+ void __iomem *base2 = nvm->base2;
+ void __iomem *base = nvm->base;
+ const u32 block = 0x10;
+ u32 iter = 0;
+ u32 reg;
+ u64 i;
+
+ for (i = 0; i < len; i += SZ_4K) {
+ iowrite32(from + i, base + NVM_ADDRESS_REG);
+ iowrite32(region << 24 | block, base + NVM_ERASE_REG);
+ if (nvm->non_posted_erase) {
+ /* Wait for Erase Done */
+ reg = ioread32(base2 + NVM_DEBUG_REG);
+ while (!(reg & NVM_NON_POSTED_ERASE_DONE) &&
+ ++iter < NVM_NON_POSTED_ERASE_DONE_ITER) {
+ msleep(10);
+ reg = ioread32(base2 + NVM_DEBUG_REG);
+ }
+ if (reg & NVM_NON_POSTED_ERASE_DONE) {
+ /* Clear Erase Done */
+ iowrite32(reg, base2 + NVM_DEBUG_REG);
+ } else {
+ *fail_addr = from + i;
+ return -ETIME;
+ }
+ }
+ /* Since the writes are via sgunit
+ * we cannot do back to back erases.
+ */
+ msleep(50);
+ }
+ return len;
+}
+
+static int intel_dg_nvm_init(struct intel_dg_nvm *nvm, struct device *device,
+ bool non_posted_erase)
+{
+ u32 access_map = 0;
+ unsigned int i, n;
+ int ret;
+
+ nvm->dev = device;
+
+ /* clean error register, previous errors are ignored */
+ idg_nvm_error(nvm);
+
+ ret = idg_nvm_is_valid(nvm);
+ if (ret) {
+ dev_err(device, "The MEM is not valid %d\n", ret);
+ return ret;
+ }
+
+ if (idg_nvm_get_access_map(nvm, &access_map))
+ return -EIO;
+
+ for (i = 0, n = 0; i < nvm->nregions; i++) {
+ u32 address, base, limit, region;
+ u8 id = nvm->regions[i].id;
+
+ address = NVM_FLREG(id);
+ region = idg_nvm_read32(nvm, address);
+
+ base = FIELD_GET(NVM_FREG_BASE_MASK, region) << NVM_FREG_ADDR_SHIFT;
+ limit = (FIELD_GET(NVM_FREG_ADDR_MASK, region) << NVM_FREG_ADDR_SHIFT) |
+ NVM_FREG_MIN_REGION_SIZE;
+
+ dev_dbg(device, "[%d] %s: region: 0x%08X base: 0x%08x limit: 0x%08x\n",
+ id, nvm->regions[i].name, region, base, limit);
+
+ if (base >= limit || (i > 0 && limit == 0)) {
+ dev_dbg(device, "[%d] %s: disabled\n",
+ id, nvm->regions[i].name);
+ nvm->regions[i].is_readable = 0;
+ continue;
+ }
+
+ if (nvm->size < limit)
+ nvm->size = limit;
+
+ nvm->regions[i].offset = base;
+ nvm->regions[i].size = limit - base + 1;
+ /* No write access to descriptor; mask it out*/
+ nvm->regions[i].is_writable = idg_nvm_region_writable(access_map, id);
+
+ nvm->regions[i].is_readable = idg_nvm_region_readable(access_map, id);
+ dev_dbg(device, "Registered, %s id=%d offset=%lld size=%lld rd=%d wr=%d\n",
+ nvm->regions[i].name,
+ nvm->regions[i].id,
+ nvm->regions[i].offset,
+ nvm->regions[i].size,
+ nvm->regions[i].is_readable,
+ nvm->regions[i].is_writable);
+
+ if (nvm->regions[i].is_readable)
+ n++;
+ }
+
+ nvm->non_posted_erase = non_posted_erase;
+
+ dev_dbg(device, "Registered %d regions\n", n);
+ dev_dbg(device, "Non posted erase %d\n", nvm->non_posted_erase);
+
+ /* Need to add 1 to the amount of memory
+ * so it is reported as an even block
+ */
+ nvm->size += 1;
+
+ return n;
+}
+
+static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
+{
+ struct intel_dg_nvm *nvm = mtd->priv;
+ size_t total_len;
+ unsigned int idx;
+ ssize_t bytes;
+ loff_t from;
+ size_t len;
+ u8 region;
+ u64 addr;
+ int ret;
+
+ if (WARN_ON(!nvm))
+ return -EINVAL;
+
+ if (!IS_ALIGNED(info->addr, SZ_4K) || !IS_ALIGNED(info->len, SZ_4K)) {
+ dev_err(&mtd->dev, "unaligned erase %llx %llx\n",
+ info->addr, info->len);
+ info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ return -EINVAL;
+ }
+
+ total_len = info->len;
+ addr = info->addr;
+
+ ret = pm_runtime_resume_and_get(nvm->dev);
+ if (ret < 0) {
+ dev_err(&mtd->dev, "rpm: get failed %d\n", ret);
+ return ret;
+ }
+
+ ret = 0;
+ guard(mutex)(&nvm->lock);
+
+ while (total_len > 0) {
+ if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) {
+ dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len);
+ info->fail_addr = addr;
+ ret = -ERANGE;
+ break;
+ }
+
+ idx = idg_nvm_get_region(nvm, addr);
+ if (idx >= nvm->nregions) {
+ dev_err(&mtd->dev, "out of range");
+ info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ ret = -ERANGE;
+ break;
+ }
+
+ from = addr - nvm->regions[idx].offset;
+ region = nvm->regions[idx].id;
+ len = total_len;
+ if (len > nvm->regions[idx].size - from)
+ len = nvm->regions[idx].size - from;
+
+ dev_dbg(&mtd->dev, "erasing region[%d] %s from %llx len %zx\n",
+ region, nvm->regions[idx].name, from, len);
+
+ bytes = idg_erase(nvm, region, from, len, &info->fail_addr);
+ if (bytes < 0) {
+ dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes);
+ info->fail_addr += nvm->regions[idx].offset;
+ ret = bytes;
+ break;
+ }
+
+ addr += len;
+ total_len -= len;
+ }
+
+ pm_runtime_put_autosuspend(nvm->dev);
+ return ret;
+}
+
+static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct intel_dg_nvm *nvm = mtd->priv;
+ unsigned int idx;
+ ssize_t ret;
+ u8 region;
+
+ if (WARN_ON(!nvm))
+ return -EINVAL;
+
+ idx = idg_nvm_get_region(nvm, from);
+
+ dev_dbg(&mtd->dev, "reading region[%d] %s from %lld len %zd\n",
+ nvm->regions[idx].id, nvm->regions[idx].name, from, len);
+
+ if (idx >= nvm->nregions) {
+ dev_err(&mtd->dev, "out of range");
+ return -ERANGE;
+ }
+
+ from -= nvm->regions[idx].offset;
+ region = nvm->regions[idx].id;
+ if (len > nvm->regions[idx].size - from)
+ len = nvm->regions[idx].size - from;
+
+ ret = pm_runtime_resume_and_get(nvm->dev);
+ if (ret < 0) {
+ dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
+ return ret;
+ }
+
+ guard(mutex)(&nvm->lock);
+
+ ret = idg_read(nvm, region, from, len, buf);
+ if (ret < 0) {
+ dev_dbg(&mtd->dev, "read failed with %zd\n", ret);
+ } else {
+ *retlen = ret;
+ ret = 0;
+ }
+
+ pm_runtime_put_autosuspend(nvm->dev);
+ return ret;
+}
+
+static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct intel_dg_nvm *nvm = mtd->priv;
+ unsigned int idx;
+ ssize_t ret;
+ u8 region;
+
+ if (WARN_ON(!nvm))
+ return -EINVAL;
+
+ idx = idg_nvm_get_region(nvm, to);
+
+ dev_dbg(&mtd->dev, "writing region[%d] %s to %lld len %zd\n",
+ nvm->regions[idx].id, nvm->regions[idx].name, to, len);
+
+ if (idx >= nvm->nregions) {
+ dev_err(&mtd->dev, "out of range");
+ return -ERANGE;
+ }
+
+ to -= nvm->regions[idx].offset;
+ region = nvm->regions[idx].id;
+ if (len > nvm->regions[idx].size - to)
+ len = nvm->regions[idx].size - to;
+
+ ret = pm_runtime_resume_and_get(nvm->dev);
+ if (ret < 0) {
+ dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
+ return ret;
+ }
+
+ guard(mutex)(&nvm->lock);
+
+ ret = idg_write(nvm, region, to, len, buf);
+ if (ret < 0) {
+ dev_dbg(&mtd->dev, "write failed with %zd\n", ret);
+ } else {
+ *retlen = ret;
+ ret = 0;
+ }
+
+ pm_runtime_put_autosuspend(nvm->dev);
+ return ret;
+}
+
+static void intel_dg_nvm_release(struct kref *kref)
+{
+ struct intel_dg_nvm *nvm = container_of(kref, struct intel_dg_nvm, refcnt);
+ int i;
+
+ pr_debug("freeing intel_dg nvm\n");
+ for (i = 0; i < nvm->nregions; i++)
+ kfree(nvm->regions[i].name);
+ mutex_destroy(&nvm->lock);
+ kfree(nvm);
+}
+
+static int intel_dg_mtd_get_device(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct intel_dg_nvm *nvm = master->priv;
+
+ if (WARN_ON(!nvm))
+ return -EINVAL;
+ pr_debug("get mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt));
+ kref_get(&nvm->refcnt);
+
+ return 0;
+}
+
+static void intel_dg_mtd_put_device(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct intel_dg_nvm *nvm = master->priv;
+
+ if (WARN_ON(!nvm))
+ return;
+ pr_debug("put mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt));
+ kref_put(&nvm->refcnt, intel_dg_nvm_release);
+}
+
+static int intel_dg_nvm_init_mtd(struct intel_dg_nvm *nvm, struct device *device,
+ unsigned int nparts, bool writable_override)
+{
+ struct mtd_partition *parts = NULL;
+ unsigned int i, n;
+ int ret;
+
+ dev_dbg(device, "registering with mtd\n");
+
+ nvm->mtd.owner = THIS_MODULE;
+ nvm->mtd.dev.parent = device;
+ nvm->mtd.flags = MTD_CAP_NORFLASH;
+ nvm->mtd.type = MTD_DATAFLASH;
+ nvm->mtd.priv = nvm;
+ nvm->mtd._write = intel_dg_mtd_write;
+ nvm->mtd._read = intel_dg_mtd_read;
+ nvm->mtd._erase = intel_dg_mtd_erase;
+ nvm->mtd._get_device = intel_dg_mtd_get_device;
+ nvm->mtd._put_device = intel_dg_mtd_put_device;
+ nvm->mtd.writesize = SZ_1; /* 1 byte granularity */
+ nvm->mtd.erasesize = SZ_4K; /* 4K bytes granularity */
+ nvm->mtd.size = nvm->size;
+
+ parts = kcalloc(nvm->nregions, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ for (i = 0, n = 0; i < nvm->nregions && n < nparts; i++) {
+ if (!nvm->regions[i].is_readable)
+ continue;
+ parts[n].name = nvm->regions[i].name;
+ parts[n].offset = nvm->regions[i].offset;
+ parts[n].size = nvm->regions[i].size;
+ if (!nvm->regions[i].is_writable && !writable_override)
+ parts[n].mask_flags = MTD_WRITEABLE;
+ n++;
+ }
+
+ ret = mtd_device_register(&nvm->mtd, parts, n);
+
+ kfree(parts);
+ return ret;
+}
+
+static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *aux_dev_id)
+{
+ struct intel_dg_nvm_dev *invm = auxiliary_dev_to_intel_dg_nvm_dev(aux_dev);
+ struct intel_dg_nvm *nvm;
+ struct device *device;
+ unsigned int nregions;
+ unsigned int i, n;
+ int ret;
+
+ device = &aux_dev->dev;
+
+ /* count available regions */
+ for (nregions = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) {
+ if (invm->regions[i].name)
+ nregions++;
+ }
+
+ if (!nregions) {
+ dev_err(device, "no regions defined\n");
+ return -ENODEV;
+ }
+
+ nvm = kzalloc(struct_size(nvm, regions, nregions), GFP_KERNEL);
+ if (!nvm)
+ return -ENOMEM;
+
+ kref_init(&nvm->refcnt);
+ mutex_init(&nvm->lock);
+
+ for (n = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) {
+ if (!invm->regions[i].name)
+ continue;
+
+ char *name = kasprintf(GFP_KERNEL, "%s.%s",
+ dev_name(&aux_dev->dev), invm->regions[i].name);
+ if (!name)
+ continue;
+ nvm->regions[n].name = name;
+ nvm->regions[n].id = i;
+ n++;
+ }
+ nvm->nregions = n; /* in case where kasprintf fail */
+
+ ret = devm_pm_runtime_enable(device);
+ if (ret < 0) {
+ dev_err(device, "rpm: enable failed %d\n", ret);
+ goto err_norpm;
+ }
+
+ pm_runtime_set_autosuspend_delay(device, INTEL_DG_NVM_RPM_TIMEOUT_MS);
+ pm_runtime_use_autosuspend(device);
+
+ ret = pm_runtime_resume_and_get(device);
+ if (ret < 0) {
+ dev_err(device, "rpm: get failed %d\n", ret);
+ goto err_norpm;
+ }
+
+ nvm->base = devm_ioremap_resource(device, &invm->bar);
+ if (IS_ERR(nvm->base)) {
+ ret = PTR_ERR(nvm->base);
+ goto err;
+ }
+
+ if (invm->non_posted_erase) {
+ nvm->base2 = devm_ioremap_resource(device, &invm->bar2);
+ if (IS_ERR(nvm->base2)) {
+ ret = PTR_ERR(nvm->base2);
+ goto err;
+ }
+ }
+
+ ret = intel_dg_nvm_init(nvm, device, invm->non_posted_erase);
+ if (ret < 0) {
+ dev_err(device, "cannot initialize nvm %d\n", ret);
+ goto err;
+ }
+
+ ret = intel_dg_nvm_init_mtd(nvm, device, ret, invm->writable_override);
+ if (ret) {
+ dev_err(device, "failed init mtd %d\n", ret);
+ goto err;
+ }
+
+ dev_set_drvdata(&aux_dev->dev, nvm);
+
+ pm_runtime_put(device);
+ return 0;
+
+err:
+ pm_runtime_put(device);
+err_norpm:
+ kref_put(&nvm->refcnt, intel_dg_nvm_release);
+ return ret;
+}
+
+static void intel_dg_mtd_remove(struct auxiliary_device *aux_dev)
+{
+ struct intel_dg_nvm *nvm = dev_get_drvdata(&aux_dev->dev);
+
+ if (!nvm)
+ return;
+
+ mtd_device_unregister(&nvm->mtd);
+
+ dev_set_drvdata(&aux_dev->dev, NULL);
+
+ kref_put(&nvm->refcnt, intel_dg_nvm_release);
+}
+
+static const struct auxiliary_device_id intel_dg_mtd_id_table[] = {
+ {
+ .name = "i915.nvm",
+ },
+ {
+ .name = "xe.nvm",
+ },
+ {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(auxiliary, intel_dg_mtd_id_table);
+
+static struct auxiliary_driver intel_dg_mtd_driver = {
+ .probe = intel_dg_mtd_probe,
+ .remove = intel_dg_mtd_remove,
+ .driver = {
+ /* auxiliary_driver_register() sets .name to be the modname */
+ },
+ .id_table = intel_dg_mtd_id_table
+};
+module_auxiliary_driver(intel_dg_mtd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel DGFX MTD driver");
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 208bd4d871f4..fd9ec165e61a 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -30,6 +30,7 @@
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of.h>
+#include <linux/security.h>
struct phram_mtd_list {
struct mtd_info mtd;
@@ -388,15 +389,13 @@ static int phram_probe(struct platform_device *pdev)
PAGE_SIZE);
}
-static int phram_remove(struct platform_device *pdev)
+static void phram_remove(struct platform_device *pdev)
{
struct phram_mtd_list *phram = platform_get_drvdata(pdev);
mtd_device_unregister(&phram->mtd);
phram_unmap(phram);
kfree(phram);
-
- return 0;
}
static struct platform_driver phram_driver = {
@@ -412,19 +411,23 @@ static int __init init_phram(void)
{
int ret;
+ ret = security_locked_down(LOCKDOWN_DEV_MEM);
+ if (ret)
+ return ret;
+
ret = platform_driver_register(&phram_driver);
if (ret)
return ret;
#ifndef MODULE
- if (phram_paramline[0])
+ if (phram_paramline[0]) {
ret = phram_setup(phram_paramline);
+ if (ret)
+ platform_driver_unregister(&phram_driver);
+ }
phram_init_called = 1;
#endif
- if (ret)
- platform_driver_unregister(&phram_driver);
-
return ret;
}
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index 36e060386e59..a12427d9e20f 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -207,6 +207,9 @@ static int powernv_flash_set_driver_info(struct device *dev,
* get them
*/
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!mtd->name)
+ return -ENOMEM;
+
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_WRITEABLE;
mtd->size = size;
@@ -265,14 +268,12 @@ static int powernv_flash_probe(struct platform_device *pdev)
*
* Returns 0
*/
-static int powernv_flash_release(struct platform_device *pdev)
+static void powernv_flash_release(struct platform_device *pdev)
{
struct powernv_flash *data = dev_get_drvdata(&(pdev->dev));
/* All resources should be freed automatically */
WARN_ON(mtd_device_unregister(&data->mtd));
-
- return 0;
}
static const struct of_device_id powernv_flash_match[] = {
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 28131a127d06..8297b366a066 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -296,10 +296,12 @@ static int __init init_slram(void)
T("slram: devname = %s\n", devname);
if ((!map) || (!(devstart = strsep(&map, ",")))) {
E("slram: No devicestart specified.\n");
+ break;
}
T("slram: devstart = %s\n", devstart);
if ((!map) || (!(devlength = strsep(&map, ",")))) {
E("slram: No devicelength / -end specified.\n");
+ break;
}
T("slram: devlength = %s\n", devlength);
if (parse_cmdline(devname, devstart, devlength) != 0) {
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index cc17133be297..f02f96bff450 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -937,7 +937,6 @@ static int spear_smi_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct spear_smi_plat_data *pdata = NULL;
struct spear_smi *dev;
- struct resource *smi_base;
int irq, ret = 0;
int i;
@@ -975,9 +974,7 @@ static int spear_smi_probe(struct platform_device *pdev)
goto err;
}
- smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- dev->io_base = devm_ioremap_resource(&pdev->dev, smi_base);
+ dev->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->io_base)) {
ret = PTR_ERR(dev->io_base);
goto err;
@@ -996,21 +993,17 @@ static int spear_smi_probe(struct platform_device *pdev)
dev->num_flashes = MAX_NUM_FLASH_CHIP;
}
- dev->clk = devm_clk_get(&pdev->dev, NULL);
+ dev->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(dev->clk)) {
ret = PTR_ERR(dev->clk);
goto err;
}
- ret = clk_prepare_enable(dev->clk);
- if (ret)
- goto err;
-
ret = devm_request_irq(&pdev->dev, irq, spear_smi_int_handler, 0,
pdev->name, dev);
if (ret) {
dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n");
- goto err_irq;
+ goto err;
}
mutex_init(&dev->lock);
@@ -1023,14 +1016,11 @@ static int spear_smi_probe(struct platform_device *pdev)
ret = spear_smi_setup_banks(pdev, i, pdata->np[i]);
if (ret) {
dev_err(&dev->pdev->dev, "bank setup failed\n");
- goto err_irq;
+ goto err;
}
}
return 0;
-
-err_irq:
- clk_disable_unprepare(dev->clk);
err:
return ret;
}
@@ -1041,7 +1031,7 @@ err:
*
* free all allocations and delete the partitions.
*/
-static int spear_smi_remove(struct platform_device *pdev)
+static void spear_smi_remove(struct platform_device *pdev)
{
struct spear_smi *dev;
struct spear_snor_flash *flash;
@@ -1058,10 +1048,6 @@ static int spear_smi_remove(struct platform_device *pdev)
/* clean up mtd stuff */
WARN_ON(mtd_device_unregister(&flash->mtd));
}
-
- clk_disable_unprepare(dev->clk);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index 3dbb1aa80bfa..f2266145b821 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -2016,7 +2016,6 @@ static int stfsm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct flash_info *info;
- struct resource *res;
struct stfsm *fsm;
int ret;
@@ -2033,18 +2032,9 @@ static int stfsm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fsm);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Resource not found\n");
- return -ENODEV;
- }
-
- fsm->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(fsm->base)) {
- dev_err(&pdev->dev,
- "Failed to reserve memory region %pR\n", res);
+ fsm->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(fsm->base))
return PTR_ERR(fsm->base);
- }
fsm->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(fsm->clk)) {
@@ -2107,16 +2097,13 @@ static int stfsm_probe(struct platform_device *pdev)
return mtd_device_register(&fsm->mtd, NULL, 0);
}
-static int stfsm_remove(struct platform_device *pdev)
+static void stfsm_remove(struct platform_device *pdev)
{
struct stfsm *fsm = platform_get_drvdata(pdev);
WARN_ON(mtd_device_unregister(&fsm->mtd));
-
- return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int stfsmfsm_suspend(struct device *dev)
{
struct stfsm *fsm = dev_get_drvdata(dev);
@@ -2132,9 +2119,8 @@ static int stfsmfsm_resume(struct device *dev)
return clk_prepare_enable(fsm->clk);
}
-#endif
-static SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume);
static const struct of_device_id stfsm_match[] = {
{ .compatible = "st,spi-fsm", },
@@ -2148,7 +2134,7 @@ static struct platform_driver stfsm_driver = {
.driver = {
.name = "st-spi-fsm",
.of_match_table = stfsm_match,
- .pm = &stfsm_pm_ops,
+ .pm = pm_sleep_ptr(&stfsm_pm_ops),
},
};
module_platform_driver(stfsm_driver);
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 8c22064ead38..59a901549257 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -263,7 +263,7 @@ static int build_maps(partition_t *part)
/* Set up virtual page map */
blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
- part->VirtualBlockMap = vmalloc(array_size(blocks, sizeof(uint32_t)));
+ part->VirtualBlockMap = vmalloc_array(blocks, sizeof(uint32_t));
if (!part->VirtualBlockMap)
goto out_XferInfo;
@@ -344,7 +344,7 @@ static int erase_xfer(partition_t *part,
return -ENOMEM;
erase->addr = xfer->Offset;
- erase->len = 1 << part->header.EraseUnitSize;
+ erase->len = 1ULL << part->header.EraseUnitSize;
ret = mtd_erase(part->mbd.mtd, erase);
if (!ret) {
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index a6161ce340d4..9d31464046b2 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -174,26 +174,30 @@ static int am654_hbmc_probe(struct platform_device *pdev)
priv->hbdev.np = of_get_next_child(np, NULL);
ret = of_address_to_resource(priv->hbdev.np, 0, &res);
if (ret)
- return ret;
+ goto put_node;
- if (of_property_read_bool(dev->of_node, "mux-controls")) {
+ if (of_property_present(dev->of_node, "mux-controls")) {
struct mux_control *control = devm_mux_control_get(dev, NULL);
- if (IS_ERR(control))
- return PTR_ERR(control);
+ if (IS_ERR(control)) {
+ ret = PTR_ERR(control);
+ goto put_node;
+ }
ret = mux_control_select(control, 1);
if (ret) {
dev_err(dev, "Failed to select HBMC mux\n");
- return ret;
+ goto put_node;
}
priv->mux_ctrl = control;
}
priv->hbdev.map.size = resource_size(&res);
priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
- if (IS_ERR(priv->hbdev.map.virt))
- return PTR_ERR(priv->hbdev.map.virt);
+ if (IS_ERR(priv->hbdev.map.virt)) {
+ ret = PTR_ERR(priv->hbdev.map.virt);
+ goto disable_mux;
+ }
priv->ctlr.dev = dev;
priv->ctlr.ops = &am654_hbmc_ops;
@@ -226,10 +230,12 @@ release_dma:
disable_mux:
if (priv->mux_ctrl)
mux_control_deselect(priv->mux_ctrl);
+put_node:
+ of_node_put(priv->hbdev.np);
return ret;
}
-static int am654_hbmc_remove(struct platform_device *pdev)
+static void am654_hbmc_remove(struct platform_device *pdev)
{
struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv;
@@ -241,8 +247,7 @@ static int am654_hbmc_remove(struct platform_device *pdev)
if (dev_priv->rx_chan)
dma_release_channel(dev_priv->rx_chan);
-
- return 0;
+ of_node_put(priv->hbdev.np);
}
static const struct of_device_id am654_hbmc_dt_ids[] = {
@@ -267,5 +272,4 @@ module_platform_driver(am654_hbmc_platform_driver);
MODULE_DESCRIPTION("HBMC driver for AM654 SoC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:hbmc-am654");
MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c
index ef32fca5f785..f448e23f9260 100644
--- a/drivers/mtd/hyperbus/rpc-if.c
+++ b/drivers/mtd/hyperbus/rpc-if.c
@@ -154,20 +154,25 @@ out_disable_rpm:
return error;
}
-static int rpcif_hb_remove(struct platform_device *pdev)
+static void rpcif_hb_remove(struct platform_device *pdev)
{
struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev);
hyperbus_unregister_device(&hyperbus->hbdev);
pm_runtime_disable(hyperbus->rpc.dev);
-
- return 0;
}
+static const struct platform_device_id rpc_if_hyperflash_id_table[] = {
+ { .name = "rpc-if-hyperflash" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, rpc_if_hyperflash_id_table);
+
static struct platform_driver rpcif_platform_driver = {
.probe = rpcif_hb_probe,
- .remove = rpcif_hb_remove,
+ .remove = rpcif_hb_remove,
+ .id_table = rpc_if_hyperflash_id_table,
.driver = {
.name = "rpc-if-hyperflash",
},
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 9739387cff8c..58c6e1743f5c 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -482,10 +482,11 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
silly = MAX_LOOPS;
while (thisEUN <= inftl->lastEUN) {
- inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
- blockofs, 8, &retlen, (char *)&bci);
-
- status = bci.Status | bci.Status1;
+ if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
+ blockofs, 8, &retlen, (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
pr_debug("INFTL: status of block %d in EUN %d is %x\n",
block , writeEUN, status);
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
index e71af4c49096..565b71f7fdd5 100644
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -412,7 +412,6 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
struct map_info *map;
struct mtd_info *mtd;
struct resource *add_range;
- struct resource *control_regs;
struct pcm_int_data *pcm_data;
/* Allocate memory control_regs data structures */
@@ -452,8 +451,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
simple_map_init(map); /* fill with default methods */
- control_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pcm_data->ctl_regs = devm_ioremap_resource(&pdev->dev, control_regs);
+ pcm_data->ctl_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pcm_data->ctl_regs))
return PTR_ERR(pcm_data->ctl_regs);
@@ -478,11 +476,9 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
/*
* lpddr2_nvm driver remove method
*/
-static int lpddr2_nvm_remove(struct platform_device *pdev)
+static void lpddr2_nvm_remove(struct platform_device *pdev)
{
WARN_ON(mtd_device_unregister(dev_get_drvdata(&pdev->dev)));
-
- return 0;
}
/* Initialize platform_driver data structure for lpddr2_nvm */
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 3c3939bc2dad..cd37d58abacb 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -61,7 +61,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
mtd->_point = lpddr_point;
mtd->_unpoint = lpddr_unpoint;
}
- mtd->size = 1 << lpddr->qinfo->DevSizeShift;
+ mtd->size = 1ULL << lpddr->qinfo->DevSizeShift;
mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
@@ -79,7 +79,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
mutex_init(&shared[i].lock);
for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
*chip = lpddr->chips[i];
- chip->start += j << lpddr->chipshift;
+ chip->start += (unsigned long)j << lpddr->chipshift;
chip->oldstate = chip->state = FL_READY;
chip->priv = &shared[i];
/* those should be reset too since
@@ -142,7 +142,7 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
if (dsr & DSR_READY_STATUS)
break;
if (!timeo) {
- printk(KERN_ERR "%s: Flash timeout error state %d \n",
+ printk(KERN_ERR "%s: Flash timeout error state %d\n",
map->name, chip_state);
ret = -ETIME;
break;
@@ -186,7 +186,7 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
if (dsr & DSR_ERR) {
/* Clear DSR*/
map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
- printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
+ printk(KERN_WARNING"%s: Bad status on wait: 0x%x\n",
map->name, dsr);
print_drs_error(dsr);
ret = -EIO;
@@ -321,7 +321,7 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
/* Resume and pretend we weren't here. */
put_chip(map, chip);
printk(KERN_ERR "%s: suspend operation failed."
- "State may be wrong \n", map->name);
+ "State may be wrong\n", map->name);
return -EIO;
}
chip->erase_suspended = 1;
@@ -468,7 +468,7 @@ static int do_write_buffer(struct map_info *map, struct flchip *chip,
chip->state = FL_WRITING;
ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
if (ret) {
- printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
+ printk(KERN_WARNING"%s Buffer program error: %d at %lx\n",
map->name, ret, adr);
goto out;
}
@@ -559,7 +559,7 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
break;
if ((len + ofs - 1) >> lpddr->chipshift)
- thislen = (1<<lpddr->chipshift) - ofs;
+ thislen = (1UL << lpddr->chipshift) - ofs;
else
thislen = len;
/* get the chip */
@@ -575,7 +575,7 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
len -= thislen;
ofs = 0;
- last_end += 1 << lpddr->chipshift;
+ last_end += 1UL << lpddr->chipshift;
chipnum++;
chip = &lpddr->chips[chipnum];
}
@@ -601,7 +601,7 @@ static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
break;
if ((len + ofs - 1) >> lpddr->chipshift)
- thislen = (1<<lpddr->chipshift) - ofs;
+ thislen = (1UL << lpddr->chipshift) - ofs;
else
thislen = len;
@@ -736,7 +736,7 @@ static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
ret = wait_for_ready(map, chip, 1);
if (ret) {
- printk(KERN_ERR "%s: block unlock error status %d \n",
+ printk(KERN_ERR "%s: block unlock error status %d\n",
map->name, ret);
goto out;
}
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index 137ae5f0a19b..42281e460c62 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -55,7 +55,7 @@ static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
return minor | (major << bankwidth);
}
}
- printk(KERN_ERR"%s qinfo id string is wrong! \n", map->name);
+ printk(KERN_ERR"%s qinfo id string is wrong!\n", map->name);
BUG();
return -1;
}
@@ -112,7 +112,7 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
return 1; /* "PFOW" is found */
out:
- printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found \n",
+ printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found\n",
map->name, map->pfow_base);
return 0;
}
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index e098ae937ce8..8a8b19874e23 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -341,13 +341,6 @@ config MTD_UCLINUX
help
Map driver to support image based filesystems for uClinux.
-config MTD_INTEL_VR_NOR
- tristate "NOR flash on Intel Vermilion Range Expansion Bus CS0"
- depends on PCI
- help
- Map driver for a NOR flash bank located on the Expansion Bus of the
- Intel Vermilion Range chipset.
-
config MTD_PLATRAM
tristate "Map driver for platform device RAM (mtd-ram)"
select MTD_RAM
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 094cfb244086..019f1e92cc41 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -17,13 +17,12 @@ obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
-physmap-objs-y += physmap-core.o
-physmap-objs-$(CONFIG_MTD_PHYSMAP_BT1_ROM) += physmap-bt1-rom.o
-physmap-objs-$(CONFIG_MTD_PHYSMAP_VERSATILE) += physmap-versatile.o
-physmap-objs-$(CONFIG_MTD_PHYSMAP_GEMINI) += physmap-gemini.o
-physmap-objs-$(CONFIG_MTD_PHYSMAP_IXP4XX) += physmap-ixp4xx.o
-physmap-objs := $(physmap-objs-y)
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
+physmap-y := physmap-core.o
+physmap-$(CONFIG_MTD_PHYSMAP_BT1_ROM) += physmap-bt1-rom.o
+physmap-$(CONFIG_MTD_PHYSMAP_VERSATILE) += physmap-versatile.o
+physmap-$(CONFIG_MTD_PHYSMAP_GEMINI) += physmap-gemini.o
+physmap-$(CONFIG_MTD_PHYSMAP_IXP4XX) += physmap-ixp4xx.o
obj-$(CONFIG_MTD_PISMO) += pismo.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
@@ -40,6 +39,5 @@ obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
obj-$(CONFIG_MTD_NETtel) += nettel.o
obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
-obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
obj-$(CONFIG_MTD_VMU) += vmu-flash.o
obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
deleted file mode 100644
index d67b845b0e89..000000000000
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * drivers/mtd/maps/intel_vr_nor.c
- *
- * An MTD map driver for a NOR flash bank on the Expansion Bus of the Intel
- * Vermilion Range chipset.
- *
- * The Vermilion Range Expansion Bus supports four chip selects, each of which
- * has 64MiB of address space. The 2nd BAR of the Expansion Bus PCI Device
- * is a 256MiB memory region containing the address spaces for all four of the
- * chip selects, with start addresses hardcoded on 64MiB boundaries.
- *
- * This map driver only supports NOR flash on chip select 0. The buswidth
- * (either 8 bits or 16 bits) is determined by reading the Expansion Bus Timing
- * and Control Register for Chip Select 0 (EXP_TIMING_CS0). This driver does
- * not modify the value in the EXP_TIMING_CS0 register except to enable writing
- * and disable boot acceleration. The timing parameters in the register are
- * assumed to have been properly initialized by the BIOS. The reset default
- * timing parameters are maximally conservative (slow), so access to the flash
- * will be slower than it should be if the BIOS has not initialized the timing
- * parameters.
- *
- * Author: Andy Lowe <alowe@mvista.com>
- *
- * 2006 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/cfi.h>
-#include <linux/mtd/flashchip.h>
-
-#define DRV_NAME "vr_nor"
-
-struct vr_nor_mtd {
- void __iomem *csr_base;
- struct map_info map;
- struct mtd_info *info;
- struct pci_dev *dev;
-};
-
-/* Expansion Bus Configuration and Status Registers are in BAR 0 */
-#define EXP_CSR_MBAR 0
-/* Expansion Bus Memory Window is BAR 1 */
-#define EXP_WIN_MBAR 1
-/* Maximum address space for Chip Select 0 is 64MiB */
-#define CS0_SIZE 0x04000000
-/* Chip Select 0 is at offset 0 in the Memory Window */
-#define CS0_START 0x0
-/* Chip Select 0 Timing Register is at offset 0 in CSR */
-#define EXP_TIMING_CS0 0x00
-#define TIMING_CS_EN (1 << 31) /* Chip Select Enable */
-#define TIMING_BOOT_ACCEL_DIS (1 << 8) /* Boot Acceleration Disable */
-#define TIMING_WR_EN (1 << 1) /* Write Enable */
-#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
-#define TIMING_MASK 0x3FFF0000
-
-static void vr_nor_destroy_partitions(struct vr_nor_mtd *p)
-{
- mtd_device_unregister(p->info);
-}
-
-static int vr_nor_init_partitions(struct vr_nor_mtd *p)
-{
- /* register the flash bank */
- /* partition the flash bank */
- return mtd_device_register(p->info, NULL, 0);
-}
-
-static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
-{
- map_destroy(p->info);
-}
-
-static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
-{
- static const char * const probe_types[] =
- { "cfi_probe", "jedec_probe", NULL };
- const char * const *type;
-
- for (type = probe_types; !p->info && *type; type++)
- p->info = do_map_probe(*type, &p->map);
- if (!p->info)
- return -ENODEV;
-
- p->info->dev.parent = &p->dev->dev;
-
- return 0;
-}
-
-static void vr_nor_destroy_maps(struct vr_nor_mtd *p)
-{
- unsigned int exp_timing_cs0;
-
- /* write-protect the flash bank */
- exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
- exp_timing_cs0 &= ~TIMING_WR_EN;
- writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
-
- /* unmap the flash window */
- iounmap(p->map.virt);
-
- /* unmap the csr window */
- iounmap(p->csr_base);
-}
-
-/*
- * Initialize the map_info structure and map the flash.
- * Returns 0 on success, nonzero otherwise.
- */
-static int vr_nor_init_maps(struct vr_nor_mtd *p)
-{
- unsigned long csr_phys, csr_len;
- unsigned long win_phys, win_len;
- unsigned int exp_timing_cs0;
- int err;
-
- csr_phys = pci_resource_start(p->dev, EXP_CSR_MBAR);
- csr_len = pci_resource_len(p->dev, EXP_CSR_MBAR);
- win_phys = pci_resource_start(p->dev, EXP_WIN_MBAR);
- win_len = pci_resource_len(p->dev, EXP_WIN_MBAR);
-
- if (!csr_phys || !csr_len || !win_phys || !win_len)
- return -ENODEV;
-
- if (win_len < (CS0_START + CS0_SIZE))
- return -ENXIO;
-
- p->csr_base = ioremap(csr_phys, csr_len);
- if (!p->csr_base)
- return -ENOMEM;
-
- exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
- if (!(exp_timing_cs0 & TIMING_CS_EN)) {
- dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
- "is disabled.\n");
- err = -ENODEV;
- goto release;
- }
- if ((exp_timing_cs0 & TIMING_MASK) == TIMING_MASK) {
- dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
- "is configured for maximally slow access times.\n");
- }
- p->map.name = DRV_NAME;
- p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
- p->map.phys = win_phys + CS0_START;
- p->map.size = CS0_SIZE;
- p->map.virt = ioremap(p->map.phys, p->map.size);
- if (!p->map.virt) {
- err = -ENOMEM;
- goto release;
- }
- simple_map_init(&p->map);
-
- /* Enable writes to flash bank */
- exp_timing_cs0 |= TIMING_BOOT_ACCEL_DIS | TIMING_WR_EN;
- writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
-
- return 0;
-
- release:
- iounmap(p->csr_base);
- return err;
-}
-
-static const struct pci_device_id vr_nor_pci_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x500D)},
- {0,}
-};
-
-static void vr_nor_pci_remove(struct pci_dev *dev)
-{
- struct vr_nor_mtd *p = pci_get_drvdata(dev);
-
- vr_nor_destroy_partitions(p);
- vr_nor_destroy_mtd_setup(p);
- vr_nor_destroy_maps(p);
- kfree(p);
- pci_release_regions(dev);
- pci_disable_device(dev);
-}
-
-static int vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct vr_nor_mtd *p = NULL;
- unsigned int exp_timing_cs0;
- int err;
-
- err = pci_enable_device(dev);
- if (err)
- goto out;
-
- err = pci_request_regions(dev, DRV_NAME);
- if (err)
- goto disable_dev;
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- err = -ENOMEM;
- if (!p)
- goto release;
-
- p->dev = dev;
-
- err = vr_nor_init_maps(p);
- if (err)
- goto release;
-
- err = vr_nor_mtd_setup(p);
- if (err)
- goto destroy_maps;
-
- err = vr_nor_init_partitions(p);
- if (err)
- goto destroy_mtd_setup;
-
- pci_set_drvdata(dev, p);
-
- return 0;
-
- destroy_mtd_setup:
- map_destroy(p->info);
-
- destroy_maps:
- /* write-protect the flash bank */
- exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
- exp_timing_cs0 &= ~TIMING_WR_EN;
- writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
-
- /* unmap the flash window */
- iounmap(p->map.virt);
-
- /* unmap the csr window */
- iounmap(p->csr_base);
-
- release:
- kfree(p);
- pci_release_regions(dev);
-
- disable_dev:
- pci_disable_device(dev);
-
- out:
- return err;
-}
-
-static struct pci_driver vr_nor_pci_driver = {
- .name = DRV_NAME,
- .probe = vr_nor_pci_probe,
- .remove = vr_nor_pci_remove,
- .id_table = vr_nor_pci_ids,
-};
-
-module_pci_driver(vr_nor_pci_driver);
-
-MODULE_AUTHOR("Andy Lowe");
-MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, vr_nor_pci_ids);
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 67a1dbfdd72c..80d467eba92a 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -118,11 +118,9 @@ ltq_mtd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ltq_mtd);
- ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!ltq_mtd->res) {
- dev_err(&pdev->dev, "failed to get memory resource\n");
- return -ENOENT;
- }
+ ltq_mtd->map->virt = devm_platform_get_and_ioremap_resource(pdev, 0, &ltq_mtd->res);
+ if (IS_ERR(ltq_mtd->map->virt))
+ return PTR_ERR(ltq_mtd->map->virt);
ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info),
GFP_KERNEL);
@@ -131,9 +129,6 @@ ltq_mtd_probe(struct platform_device *pdev)
ltq_mtd->map->phys = ltq_mtd->res->start;
ltq_mtd->map->size = resource_size(ltq_mtd->res);
- ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res);
- if (IS_ERR(ltq_mtd->map->virt))
- return PTR_ERR(ltq_mtd->map->virt);
ltq_mtd->map->name = ltq_map_name;
ltq_mtd->map->bankwidth = 2;
@@ -171,8 +166,7 @@ err_destroy:
return err;
}
-static int
-ltq_mtd_remove(struct platform_device *pdev)
+static void ltq_mtd_remove(struct platform_device *pdev)
{
struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
@@ -180,7 +174,6 @@ ltq_mtd_remove(struct platform_device *pdev)
mtd_device_unregister(ltq_mtd->mtd);
map_destroy(ltq_mtd->mtd);
}
- return 0;
}
static const struct of_device_id ltq_mtd_match[] = {
diff --git a/drivers/mtd/maps/map_funcs.c b/drivers/mtd/maps/map_funcs.c
index 5b684c170d4e..1a4add9e119a 100644
--- a/drivers/mtd/maps/map_funcs.c
+++ b/drivers/mtd/maps/map_funcs.c
@@ -41,4 +41,5 @@ void simple_map_init(struct map_info *map)
}
EXPORT_SYMBOL(simple_map_init);
+MODULE_DESCRIPTION("Out-of-line map I/O");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 2ac79e1cedd9..206a3c463e6e 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -665,6 +665,7 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
}
pcmciamtd_release(link);
+ kfree(dev);
}
diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c
index 58782cfaf71c..60dccc48f99e 100644
--- a/drivers/mtd/maps/physmap-bt1-rom.c
+++ b/drivers/mtd/maps/physmap-bt1-rom.c
@@ -14,7 +14,6 @@
#include <linux/mtd/xip.h>
#include <linux/mux/consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/types.h>
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index c73854da5136..2bd7a1af898c 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
@@ -37,7 +38,7 @@
#include <linux/mtd/concat.h>
#include <linux/mtd/cfi_endian.h>
#include <linux/io.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/gpio/consumer.h>
@@ -62,7 +63,7 @@ struct physmap_flash_info {
unsigned int win_order;
};
-static int physmap_flash_remove(struct platform_device *dev)
+static void physmap_flash_remove(struct platform_device *dev)
{
struct physmap_flash_info *info;
struct physmap_flash_data *physmap_data;
@@ -88,7 +89,6 @@ static int physmap_flash_remove(struct platform_device *dev)
pm_runtime_put(&dev->dev);
pm_runtime_disable(&dev->dev);
- return 0;
}
static void physmap_set_vpp(struct map_info *map, int state)
@@ -296,14 +296,9 @@ static const char * const *of_get_part_probes(struct platform_device *dev)
static const char *of_select_probe_type(struct platform_device *dev)
{
struct device_node *dp = dev->dev.of_node;
- const struct of_device_id *match;
const char *probe_type;
- match = of_match_device(of_flash_match, &dev->dev);
- if (!match)
- return NULL;
-
- probe_type = match->data;
+ probe_type = device_get_match_data(&dev->dev);
if (probe_type)
return probe_type;
@@ -508,8 +503,7 @@ static int physmap_flash_probe(struct platform_device *dev)
for (i = 0; i < info->nmaps; i++) {
struct resource *res;
- res = platform_get_resource(dev, IORESOURCE_MEM, i);
- info->maps[i].virt = devm_ioremap_resource(&dev->dev, res);
+ info->maps[i].virt = devm_platform_get_and_ioremap_resource(dev, i, &res);
if (IS_ERR(info->maps[i].virt)) {
err = PTR_ERR(info->maps[i].virt);
goto err_out;
@@ -524,7 +518,7 @@ static int physmap_flash_probe(struct platform_device *dev)
if (!info->maps[i].phys)
info->maps[i].phys = res->start;
- info->win_order = get_bitmask_order(resource_size(res)) - 1;
+ info->win_order = fls64(resource_size(res)) - 1;
info->maps[i].size = BIT(info->win_order +
(info->gpios ?
info->gpios->ndescs : 0));
@@ -552,6 +546,17 @@ static int physmap_flash_probe(struct platform_device *dev)
if (info->probe_type) {
info->mtds[i] = do_map_probe(info->probe_type,
&info->maps[i]);
+
+ /* Fall back to mapping region as ROM */
+ if (!info->mtds[i] && IS_ENABLED(CONFIG_MTD_ROM) &&
+ strcmp(info->probe_type, "map_rom")) {
+ dev_warn(&dev->dev,
+ "map_probe() failed for type %s\n",
+ info->probe_type);
+
+ info->mtds[i] = do_map_probe("map_rom",
+ &info->maps[i]);
+ }
} else {
int j;
diff --git a/drivers/mtd/maps/physmap-gemini.c b/drivers/mtd/maps/physmap-gemini.c
index d4a46e159d38..9d3b4bf84a1a 100644
--- a/drivers/mtd/maps/physmap-gemini.c
+++ b/drivers/mtd/maps/physmap-gemini.c
@@ -8,10 +8,10 @@
*/
#include <linux/export.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mtd/map.h>
#include <linux/mtd/xip.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/bitops.h>
#include <linux/pinctrl/consumer.h>
diff --git a/drivers/mtd/maps/physmap-ixp4xx.c b/drivers/mtd/maps/physmap-ixp4xx.c
index 6a054229a8a0..c561468f95f6 100644
--- a/drivers/mtd/maps/physmap-ixp4xx.c
+++ b/drivers/mtd/maps/physmap-ixp4xx.c
@@ -11,7 +11,7 @@
*/
#include <linux/export.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/mtd/map.h>
#include <linux/mtd/xip.h>
#include "physmap-ixp4xx.h"
diff --git a/drivers/mtd/maps/physmap-ixp4xx.h b/drivers/mtd/maps/physmap-ixp4xx.h
index b0fc49b7f3ed..46824c57e58a 100644
--- a/drivers/mtd/maps/physmap-ixp4xx.h
+++ b/drivers/mtd/maps/physmap-ixp4xx.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/mtd/map.h>
#ifdef CONFIG_MTD_PHYSMAP_IXP4XX
diff --git a/drivers/mtd/maps/physmap-versatile.c b/drivers/mtd/maps/physmap-versatile.c
index a1b8b7b25f88..2e779111bf79 100644
--- a/drivers/mtd/maps/physmap-versatile.c
+++ b/drivers/mtd/maps/physmap-versatile.c
@@ -9,9 +9,9 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/mtd/map.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/bitops.h>
#include "physmap-versatile.h"
@@ -206,7 +206,7 @@ int of_flash_probe_versatile(struct platform_device *pdev,
if (!sysnp)
return -ENODEV;
- versatile_flashprot = (enum versatile_flashprot)devid->data;
+ versatile_flashprot = (uintptr_t)devid->data;
rmap = syscon_node_to_regmap(sysnp);
of_node_put(sysnp);
if (IS_ERR(rmap))
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index cedd8ef9a6bf..1c541eaf477a 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -65,14 +65,14 @@ static inline void platram_setrw(struct platram_info *info, int to)
* called to remove the device from the driver's control
*/
-static int platram_remove(struct platform_device *pdev)
+static void platram_remove(struct platform_device *pdev)
{
struct platram_info *info = to_platram_info(pdev);
dev_dbg(&pdev->dev, "removing device\n");
if (info == NULL)
- return 0;
+ return;
if (info->mtd) {
mtd_device_unregister(info->mtd);
@@ -84,8 +84,6 @@ static int platram_remove(struct platform_device *pdev)
platram_setrw(info, PLATRAM_RO);
kfree(info);
-
- return 0;
}
/* platram_probe
@@ -123,8 +121,7 @@ static int platram_probe(struct platform_device *pdev)
info->pdata = pdata;
/* get the resource for the memory mapping */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->map.virt = devm_ioremap_resource(&pdev->dev, res);
+ info->map.virt = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(info->map.virt)) {
err = PTR_ERR(info->map.virt);
goto exit_free;
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 62a5bf41a6d7..f27c25db6778 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -98,7 +98,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
return 0;
}
-static int pxa2xx_flash_remove(struct platform_device *dev)
+static void pxa2xx_flash_remove(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
@@ -109,7 +109,6 @@ static int pxa2xx_flash_remove(struct platform_device *dev)
if (info->map.cached)
iounmap(info->map.cached);
kfree(info);
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index d3d4e987c163..6a54a84d0d9c 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -153,7 +153,7 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
struct flash_platform_data *plat)
{
struct sa_info *info;
- int nr, size, i, ret = 0;
+ int nr, i, ret = 0;
/*
* Count number of devices.
@@ -167,12 +167,10 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
goto out;
}
- size = sizeof(struct sa_info) + sizeof(struct sa_subdev_info) * nr;
-
/*
* Allocate the map_info structs in one go.
*/
- info = kzalloc(size, GFP_KERNEL);
+ info = kzalloc(struct_size(info, subdev, nr), GFP_KERNEL);
if (!info) {
ret = -ENOMEM;
goto out;
@@ -285,14 +283,12 @@ static int sa1100_mtd_probe(struct platform_device *pdev)
return err;
}
-static int sa1100_mtd_remove(struct platform_device *pdev)
+static void sa1100_mtd_remove(struct platform_device *pdev)
{
struct sa_info *info = platform_get_drvdata(pdev);
struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
sa1100_destroy(info, plat);
-
- return 0;
}
static struct platform_driver sa1100_mtd_driver = {
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 860b19f77090..ea3aa026b55b 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -14,7 +14,7 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/prom.h>
#include <linux/uaccess.h>
@@ -47,7 +47,7 @@ struct map_info uflash_map_templ = {
.bankwidth = UFLASH_BUSWIDTH,
};
-int uflash_devinit(struct platform_device *op, struct device_node *dp)
+static int uflash_devinit(struct platform_device *op, struct device_node *dp)
{
struct uflash_dev *up;
@@ -118,7 +118,7 @@ static int uflash_probe(struct platform_device *op)
return uflash_devinit(op, dp);
}
-static int uflash_remove(struct platform_device *op)
+static void uflash_remove(struct platform_device *op)
{
struct uflash_dev *up = dev_get_drvdata(&op->dev);
@@ -132,8 +132,6 @@ static int uflash_remove(struct platform_device *op)
}
kfree(up);
-
- return 0;
}
static const struct of_device_id uflash_match[] = {
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index a7ec947a3ebb..53019d313db7 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -719,7 +719,7 @@ static int vmu_can_unload(struct maple_device *mdev)
card = maple_get_drvdata(mdev);
for (x = 0; x < card->partitions; x++) {
mtd = &((card->mtd)[x]);
- if (mtd->usecount > 0)
+ if (kref_read(&mtd->refcnt))
return 0;
}
return 1;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index ff18636e0889..28e09d080440 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -246,9 +246,9 @@ unlock:
blktrans_dev_put(dev);
}
-static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int blktrans_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
+ struct mtd_blktrans_dev *dev = disk->private_data;
int ret = -ENXIO;
mutex_lock(&dev->lock);
@@ -277,6 +277,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
{
struct mtd_blktrans_ops *tr = new->tr;
struct mtd_blktrans_dev *d;
+ struct queue_limits lim = { };
int last_devnum = -1;
struct gendisk *gd;
int ret;
@@ -328,12 +329,18 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
goto out_list_del;
ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ BLK_MQ_F_BLOCKING);
if (ret)
goto out_kfree_tag_set;
+
+ lim.logical_block_size = tr->blksize;
+ if (tr->discard)
+ lim.max_hw_discard_sectors = UINT_MAX;
+ if (tr->flush)
+ lim.features |= BLK_FEAT_WRITE_CACHE;
/* Create gendisk */
- gd = blk_mq_alloc_disk(new->tag_set, new);
+ gd = blk_mq_alloc_disk(new->tag_set, &lim, new);
if (IS_ERR(gd)) {
ret = PTR_ERR(gd);
goto out_free_tag_set;
@@ -367,20 +374,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
/* Create the request queue */
spin_lock_init(&new->queue_lock);
INIT_LIST_HEAD(&new->rq_list);
-
- if (tr->flush)
- blk_queue_write_cache(new->rq, true, false);
-
- blk_queue_logical_block_size(new->rq, tr->blksize);
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
-
- if (tr->discard) {
- blk_queue_max_discard_sectors(new->rq, UINT_MAX);
- new->rq->limits.discard_granularity = tr->blksize;
- }
-
gd->queue = new->rq;
if (new->readonly)
@@ -411,6 +404,7 @@ out_list_del:
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{
unsigned long flags;
+ unsigned int memflags;
lockdep_assert_held(&mtd_table_mutex);
@@ -427,10 +421,10 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
spin_unlock_irqrestore(&old->queue_lock, flags);
/* freeze+quiesce queue to ensure all requests are flushed */
- blk_mq_freeze_queue(old->rq);
+ memflags = blk_mq_freeze_queue(old->rq);
blk_mq_quiesce_queue(old->rq);
blk_mq_unquiesce_queue(old->rq);
- blk_mq_unfreeze_queue(old->rq);
+ blk_mq_unfreeze_queue(old->rq, memflags);
/* If the device is currently open, tell trans driver to close it,
then put mtd device, and don't touch it again */
@@ -463,7 +457,7 @@ static void blktrans_notify_add(struct mtd_info *mtd)
{
struct mtd_blktrans_ops *tr;
- if (mtd->type == MTD_ABSENT)
+ if (mtd->type == MTD_ABSENT || mtd->type == MTD_UBIVOLUME)
return;
list_for_each_entry(tr, &blktrans_majors, list)
@@ -503,7 +497,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
mutex_lock(&mtd_table_mutex);
list_add(&tr->list, &blktrans_majors);
mtd_for_each_device(mtd)
- if (mtd->type != MTD_ABSENT)
+ if (mtd->type != MTD_ABSENT && mtd->type != MTD_UBIVOLUME)
tr->add_mtd(tr, mtd);
mutex_unlock(&mtd_table_mutex);
return 0;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index fa476fb4dffb..9751416c2a91 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -262,7 +262,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
}
if (mtd_type_is_nand(mbd->mtd))
- pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
+ pr_warn_ratelimited("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
mbd->tr->name, mbd->mtd->name);
/* OK, it's not open. Create cache info for it */
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 66ffc9f1ead2..ef6299af60e4 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -49,7 +49,7 @@ static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
dev->readonly = 1;
if (mtd_type_is_nand(mtd))
- pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
+ pr_warn_ratelimited("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
tr->name, mtd->name);
if (add_mtd_blktrans_dev(dev))
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 8dc4f5c493fc..335c702633ff 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -599,6 +599,7 @@ mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
uint8_t *datbuf = NULL, *oobbuf = NULL;
size_t datbuf_len, oobbuf_len;
int ret = 0;
+ u64 end;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
@@ -618,7 +619,7 @@ mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
req.len &= 0xffffffff;
req.ooblen &= 0xffffffff;
- if (req.start + req.len > mtd->size)
+ if (check_add_overflow(req.start, req.len, &end) || end > mtd->size)
return -EINVAL;
datbuf_len = min_t(size_t, req.len, mtd->erasesize);
@@ -698,6 +699,7 @@ mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
size_t datbuf_len, oobbuf_len;
size_t orig_len, orig_ooblen;
int ret = 0;
+ u64 end;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
@@ -724,7 +726,7 @@ mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
req.len &= 0xffffffff;
req.ooblen &= 0xffffffff;
- if (req.start + req.len > mtd->size) {
+ if (check_add_overflow(req.start, req.len, &end) || end > mtd->size) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 193428de6a4b..f56f44aa8625 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -204,7 +204,7 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
}
/* make a copy of vecs */
- vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
+ vecs_copy = kmemdup_array(vecs, count, sizeof(struct kvec), GFP_KERNEL);
if (!vecs_copy)
return -ENOMEM;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index e00b12aa5ec9..64808493b4f5 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/nvmem-provider.h>
#include <linux/root_dev.h>
+#include <linux/error-injection.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -93,10 +94,39 @@ static void mtd_release(struct device *dev)
struct mtd_info *mtd = dev_get_drvdata(dev);
dev_t index = MTD_DEVT(mtd->index);
+ idr_remove(&mtd_idr, mtd->index);
+ of_node_put(mtd_get_of_node(mtd));
+
+ if (mtd_is_partition(mtd))
+ release_mtd_partition(mtd);
+
/* remove /dev/mtdXro node */
device_destroy(&mtd_class, index + 1);
}
+static void mtd_device_release(struct kref *kref)
+{
+ struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
+ bool is_partition = mtd_is_partition(mtd);
+
+ debugfs_remove_recursive(mtd->dbg.dfs_dir);
+
+ /* Try to remove the NVMEM provider */
+ nvmem_unregister(mtd->nvmem);
+
+ device_unregister(&mtd->dev);
+
+ /*
+ * Clear dev so mtd can be safely re-registered later if desired.
+ * Should not be done for partition,
+ * as it was already destroyed in device_unregister().
+ */
+ if (!is_partition)
+ memset(&mtd->dev, 0, sizeof(mtd->dev));
+
+ module_put(THIS_MODULE);
+}
+
#define MTD_DEVICE_ATTR_RO(name) \
static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
@@ -354,14 +384,64 @@ EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
static struct dentry *dfs_dir_mtd;
+static int mtd_ooblayout_show(struct seq_file *s, void *p,
+ int (*iter)(struct mtd_info *, int section,
+ struct mtd_oob_region *region))
+{
+ struct mtd_info *mtd = s->private;
+ int section;
+
+ for (section = 0;; section++) {
+ struct mtd_oob_region region;
+ int err;
+
+ err = iter(mtd, section, &region);
+ if (err) {
+ if (err == -ERANGE)
+ break;
+
+ return err;
+ }
+
+ seq_printf(s, "%-3d %4u %4u\n", section, region.offset,
+ region.length);
+ }
+
+ return 0;
+}
+
+static int mtd_ooblayout_ecc_show(struct seq_file *s, void *p)
+{
+ return mtd_ooblayout_show(s, p, mtd_ooblayout_ecc);
+}
+DEFINE_SHOW_ATTRIBUTE(mtd_ooblayout_ecc);
+
+static int mtd_ooblayout_free_show(struct seq_file *s, void *p)
+{
+ return mtd_ooblayout_show(s, p, mtd_ooblayout_free);
+}
+DEFINE_SHOW_ATTRIBUTE(mtd_ooblayout_free);
+
static void mtd_debugfs_populate(struct mtd_info *mtd)
{
struct device *dev = &mtd->dev;
+ struct mtd_oob_region region;
if (IS_ERR_OR_NULL(dfs_dir_mtd))
return;
mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
+ if (IS_ERR_OR_NULL(mtd->dbg.dfs_dir))
+ return;
+
+ /* Create ooblayout files only if at least one region is present. */
+ if (mtd_ooblayout_ecc(mtd, 0, &region) == 0)
+ debugfs_create_file("ooblayout_ecc", 0444, mtd->dbg.dfs_dir,
+ mtd, &mtd_ooblayout_ecc_fops);
+
+ if (mtd_ooblayout_free(mtd, 0, &region) == 0)
+ debugfs_create_file("ooblayout_free", 0444, mtd->dbg.dfs_dir,
+ mtd, &mtd_ooblayout_free_fops);
}
#ifndef CONFIG_MMU
@@ -523,6 +603,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
config.dev = &mtd->dev;
config.name = dev_name(&mtd->dev);
config.owner = THIS_MODULE;
+ config.add_legacy_fixed_of_cells = of_device_is_compatible(node, "nvmem-cells");
config.reg_read = mtd_nvmem_reg_read;
config.size = mtd->size;
config.word_size = 1;
@@ -530,7 +611,6 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
config.read_only = true;
config.root_only = true;
config.ignore_wp = true;
- config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
config.priv = mtd;
mtd->nvmem = nvmem_register(&config);
@@ -591,6 +671,7 @@ static void mtd_check_of_node(struct mtd_info *mtd)
if (plen == mtd_name_len &&
!strncmp(mtd->name, pname + offset, plen)) {
mtd_set_of_node(mtd, mtd_dn);
+ of_node_put(mtd_dn);
break;
}
}
@@ -666,7 +747,7 @@ int add_mtd_device(struct mtd_info *mtd)
}
mtd->index = i;
- mtd->usecount = 0;
+ kref_init(&mtd->refcnt);
/* default value if not set by driver */
if (mtd->bitflip_threshold == 0)
@@ -710,7 +791,9 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->dev.type = &mtd_devtype;
mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
- dev_set_name(&mtd->dev, "mtd%d", i);
+ error = dev_set_name(&mtd->dev, "mtd%d", i);
+ if (error)
+ goto fail_devname;
dev_set_drvdata(&mtd->dev, mtd);
mtd_check_of_node(mtd);
of_node_get(mtd_get_of_node(mtd));
@@ -759,6 +842,7 @@ fail_nvmem_add:
device_unregister(&mtd->dev);
fail_added:
of_node_put(mtd_get_of_node(mtd));
+fail_devname:
idr_remove(&mtd_idr, i);
fail_locked:
mutex_unlock(&mtd_table_mutex);
@@ -779,7 +863,6 @@ int del_mtd_device(struct mtd_info *mtd)
{
int ret;
struct mtd_notifier *not;
- struct device_node *mtd_of_node;
mutex_lock(&mtd_table_mutex);
@@ -793,28 +876,8 @@ int del_mtd_device(struct mtd_info *mtd)
list_for_each_entry(not, &mtd_notifiers, list)
not->remove(mtd);
- if (mtd->usecount) {
- printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
- mtd->index, mtd->name, mtd->usecount);
- ret = -EBUSY;
- } else {
- mtd_of_node = mtd_get_of_node(mtd);
- debugfs_remove_recursive(mtd->dbg.dfs_dir);
-
- /* Try to remove the NVMEM provider */
- nvmem_unregister(mtd->nvmem);
-
- device_unregister(&mtd->dev);
-
- /* Clear dev so mtd can be safely re-registered later if desired */
- memset(&mtd->dev, 0, sizeof(mtd->dev));
-
- idr_remove(&mtd_idr, mtd->index);
- of_node_put(mtd_of_node);
-
- module_put(THIS_MODULE);
- ret = 0;
- }
+ kref_put(&mtd->refcnt, mtd_device_release);
+ ret = 0;
out_error:
mutex_unlock(&mtd_table_mutex);
@@ -890,6 +953,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
config.name = compatible;
config.id = NVMEM_DEVID_AUTO;
config.owner = THIS_MODULE;
+ config.add_legacy_fixed_of_cells = !mtd_type_is_nand(mtd);
config.type = NVMEM_TYPE_OTP;
config.root_only = true;
config.ignore_wp = true;
@@ -945,8 +1009,10 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
size = mtd_otp_size(mtd, true);
- if (size < 0)
- return size;
+ if (size < 0) {
+ err = size;
+ goto err;
+ }
if (size > 0) {
nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
@@ -1001,6 +1067,9 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
err:
nvmem_unregister(mtd->otp_user_nvmem);
+ /* Don't report error if OTP is not supported. */
+ if (err == -EOPNOTSUPP)
+ return 0;
return dev_err_probe(dev, err, "Failed to register OTP NVMEM device\n");
}
@@ -1037,7 +1106,7 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
const struct mtd_partition *parts,
int nr_parts)
{
- int ret;
+ int ret, err;
mtd_set_dev_defaults(mtd);
@@ -1089,8 +1158,11 @@ out:
nvmem_unregister(mtd->otp_factory_nvmem);
}
- if (ret && device_is_registered(&mtd->dev))
- del_mtd_device(mtd);
+ if (ret && device_is_registered(&mtd->dev)) {
+ err = del_mtd_device(mtd);
+ if (err)
+ pr_err("Error when deleting MTD device (%d)\n", err);
+ }
return ret;
}
@@ -1227,25 +1299,27 @@ int __get_mtd_device(struct mtd_info *mtd)
struct mtd_info *master = mtd_get_master(mtd);
int err;
- if (!try_module_get(master->owner))
- return -ENODEV;
-
if (master->_get_device) {
err = master->_get_device(mtd);
-
- if (err) {
- module_put(master->owner);
+ if (err)
return err;
- }
}
- master->usecount++;
+ if (!try_module_get(master->owner)) {
+ if (master->_put_device)
+ master->_put_device(master);
+ return -ENODEV;
+ }
- while (mtd->parent) {
- mtd->usecount++;
+ while (mtd) {
+ if (mtd != master)
+ kref_get(&mtd->refcnt);
mtd = mtd->parent;
}
+ if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ kref_get(&master->refcnt);
+
return 0;
}
EXPORT_SYMBOL_GPL(__get_mtd_device);
@@ -1329,18 +1403,23 @@ void __put_mtd_device(struct mtd_info *mtd)
{
struct mtd_info *master = mtd_get_master(mtd);
- while (mtd->parent) {
- --mtd->usecount;
- BUG_ON(mtd->usecount < 0);
- mtd = mtd->parent;
+ while (mtd) {
+ /* kref_put() can relese mtd, so keep a reference mtd->parent */
+ struct mtd_info *parent = mtd->parent;
+
+ if (mtd != master)
+ kref_put(&mtd->refcnt, mtd_device_release);
+ mtd = parent;
}
- master->usecount--;
+ if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ kref_put(&master->refcnt, mtd_device_release);
+
+ module_put(master->owner);
+ /* must be the last as master can be freed in the _put_device */
if (master->_put_device)
master->_put_device(master);
-
- module_put(master->owner);
}
EXPORT_SYMBOL_GPL(__put_mtd_device);
@@ -1396,6 +1475,7 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
return ret;
}
EXPORT_SYMBOL_GPL(mtd_erase);
+ALLOW_ERROR_INJECTION(mtd_erase, ERRNO);
/*
* This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
@@ -1490,9 +1570,12 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
ret = mtd_read_oob(mtd, from, &ops);
*retlen = ops.retlen;
+ WARN_ON_ONCE(*retlen != len && mtd_is_bitflip_or_eccerr(ret));
+
return ret;
}
EXPORT_SYMBOL_GPL(mtd_read);
+ALLOW_ERROR_INJECTION(mtd_read, ERRNO);
int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
const u_char *buf)
@@ -1509,6 +1592,7 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
return ret;
}
EXPORT_SYMBOL_GPL(mtd_write);
+ALLOW_ERROR_INJECTION(mtd_write, ERRNO);
/*
* In blackbox flight recorder like scenarios we want to make successful writes
@@ -2305,6 +2389,7 @@ EXPORT_SYMBOL_GPL(mtd_block_isbad);
int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_info *master = mtd_get_master(mtd);
+ loff_t moffs;
int ret;
if (!master->_block_markbad)
@@ -2317,7 +2402,15 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
- ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
+ moffs = mtd_get_master_ofs(mtd, ofs);
+
+ if (master->_block_isbad) {
+ ret = master->_block_isbad(master, moffs);
+ if (ret > 0)
+ return 0;
+ }
+
+ ret = master->_block_markbad(master, moffs);
if (ret)
return ret;
@@ -2329,6 +2422,7 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
return 0;
}
EXPORT_SYMBOL_GPL(mtd_block_markbad);
+ALLOW_ERROR_INJECTION(mtd_block_markbad, ERRNO);
/*
* default_mtd_writev - the default writev method
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index b5eefeabf310..b014861a06a6 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -12,6 +12,7 @@ int __must_check add_mtd_device(struct mtd_info *mtd);
int del_mtd_device(struct mtd_info *mtd);
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
+void release_mtd_partition(struct mtd_info *mtd);
struct mtd_partitions;
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 2f11585b5613..b88083751a0c 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -298,14 +298,14 @@ static void find_next_position(struct mtdoops_context *cxt)
}
static void mtdoops_do_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
struct mtdoops_context *cxt = container_of(dumper,
struct mtdoops_context, dump);
struct kmsg_dump_iter iter;
/* Only dump oopses if dump_oops is set */
- if (reason == KMSG_DUMP_OOPS && !dump_oops)
+ if (detail->reason == KMSG_DUMP_OOPS && !dump_oops)
return;
kmsg_dump_rewind(&iter);
@@ -317,7 +317,7 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
record_size - sizeof(struct mtdoops_hdr), NULL);
clear_bit(0, &cxt->oops_buf_busy);
- if (reason != KMSG_DUMP_OOPS) {
+ if (detail->reason != KMSG_DUMP_OOPS) {
/* Panics must be written immediately */
mtdoops_write(cxt, 1);
} else {
@@ -356,9 +356,8 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
/* oops_page_used is a bit field */
cxt->oops_page_used =
- vmalloc(array_size(sizeof(unsigned long),
- DIV_ROUND_UP(mtdoops_pages,
- BITS_PER_LONG)));
+ vmalloc_array(DIV_ROUND_UP(mtdoops_pages, BITS_PER_LONG),
+ sizeof(unsigned long));
if (!cxt->oops_page_used) {
pr_err("could not allocate page array\n");
return;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a46affbb037d..2876501a7814 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -32,6 +32,12 @@ static inline void free_partition(struct mtd_info *mtd)
kfree(mtd);
}
+void release_mtd_partition(struct mtd_info *mtd)
+{
+ WARN_ON(!list_empty(&mtd->part.node));
+ free_partition(mtd);
+}
+
static struct mtd_info *allocate_partition(struct mtd_info *parent,
const struct mtd_partition *part,
int partno, uint64_t cur_offset)
@@ -309,13 +315,11 @@ static int __mtd_del_partition(struct mtd_info *mtd)
sysfs_remove_files(&mtd->dev.kobj, mtd_partition_attrs);
+ list_del_init(&mtd->part.node);
err = del_mtd_device(mtd);
if (err)
return err;
- list_del(&mtd->part.node);
- free_partition(mtd);
-
return 0;
}
@@ -333,6 +337,7 @@ static int __del_mtd_partitions(struct mtd_info *mtd)
__del_mtd_partitions(child);
pr_info("Deleting %s MTD partition\n", child->name);
+ list_del_init(&child->part.node);
ret = del_mtd_device(child);
if (ret < 0) {
pr_err("Error when deleting partition \"%s\" (%d)\n",
@@ -340,9 +345,6 @@ static int __del_mtd_partitions(struct mtd_info *mtd)
err = ret;
continue;
}
-
- list_del(&child->part.node);
- free_partition(child);
}
return err;
@@ -423,8 +425,15 @@ int add_mtd_partitions(struct mtd_info *parent,
mtd_add_partition_attrs(child);
- /* Look for subpartitions */
- parse_mtd_partitions(child, parts[i].types, NULL);
+ /* Look for subpartitions (skip if no maching parser found) */
+ ret = parse_mtd_partitions(child, parts[i].types, NULL);
+ if (ret < 0 && ret == -ENOENT) {
+ pr_debug("Skip parsing subpartitions: %d\n", ret);
+ continue;
+ } else if (ret < 0) {
+ pr_err("Failed to parse subpartitions: %d\n", ret);
+ goto err_del_partitions;
+ }
cur_offset = child->part.offset + child->part.size;
}
@@ -684,10 +693,9 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
parser = mtd_part_parser_get(*types);
if (!parser && !request_module("%s", *types))
parser = mtd_part_parser_get(*types);
- pr_debug("%s: got parser %s\n", master->name,
- parser ? parser->name : NULL);
if (!parser)
continue;
+ pr_debug("%s: got parser %s\n", master->name, parser->name);
ret = mtd_part_do_parse(parser, master, &pparts, data);
if (ret <= 0)
mtd_part_parser_put(parser);
diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c
index 7ac8ac901306..9cf3872e37ae 100644
--- a/drivers/mtd/mtdpstore.c
+++ b/drivers/mtd/mtdpstore.c
@@ -417,11 +417,14 @@ static void mtdpstore_notify_add(struct mtd_info *mtd)
}
longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size));
- cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
- cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+ cxt->rmmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
+ cxt->usedmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize));
- cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+ cxt->badmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
+
+ if (!cxt->rmmap || !cxt->usedmap || !cxt->badmap)
+ return;
/* just support dmesg right now */
cxt->dev.flags = PSTORE_FLAGS_DMESG;
@@ -527,9 +530,6 @@ static void mtdpstore_notify_remove(struct mtd_info *mtd)
mtdpstore_flush_removed(cxt);
unregister_pstore_device(&cxt->dev);
- kfree(cxt->badmap);
- kfree(cxt->usedmap);
- kfree(cxt->rmmap);
cxt->mtd = NULL;
cxt->index = -1;
}
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 5ff001140ef4..b7e3763c47f0 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -20,38 +20,6 @@
#include "mtdcore.h"
/*
- * compare superblocks to see if they're equivalent
- * - they are if the underlying MTD device is the same
- */
-static int mtd_test_super(struct super_block *sb, struct fs_context *fc)
-{
- struct mtd_info *mtd = fc->sget_key;
-
- if (sb->s_mtd == fc->sget_key) {
- pr_debug("MTDSB: Match on device %d (\"%s\")\n",
- mtd->index, mtd->name);
- return 1;
- }
-
- pr_debug("MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n",
- sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name);
- return 0;
-}
-
-/*
- * mark the superblock by the MTD device it is using
- * - set the device number to be the correct MTD block device for pesuperstence
- * of NFS exports
- */
-static int mtd_set_super(struct super_block *sb, struct fs_context *fc)
-{
- sb->s_mtd = fc->sget_key;
- sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
- sb->s_bdi = bdi_get(mtd_bdi);
- return 0;
-}
-
-/*
* get a superblock on an MTD-backed filesystem
*/
static int mtd_get_sb(struct fs_context *fc,
@@ -62,8 +30,7 @@ static int mtd_get_sb(struct fs_context *fc,
struct super_block *sb;
int ret;
- fc->sget_key = mtd;
- sb = sget_fc(fc, mtd_test_super, mtd_set_super);
+ sb = sget_dev(fc, MKDEV(MTD_BLOCK_MAJOR, mtd->index));
if (IS_ERR(sb))
return PTR_ERR(sb);
@@ -77,6 +44,16 @@ static int mtd_get_sb(struct fs_context *fc,
pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
mtd->index, mtd->name);
+ /*
+ * Would usually have been set with @sb_lock held but in
+ * contrast to sb->s_bdev that's checked with only
+ * @sb_lock held, nothing checks sb->s_mtd without also
+ * holding sb->s_umount and we're holding sb->s_umount
+ * here.
+ */
+ sb->s_mtd = mtd;
+ sb->s_bdi = bdi_get(mtd_bdi);
+
ret = fill_super(sb, fc);
if (ret < 0)
goto error_sb;
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index 680366616da2..d8f2e5be2d31 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1285,11 +1285,11 @@ static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
for (i = 0; i < MTDSWAP_TREE_CNT; i++)
d->trees[i].root = RB_ROOT;
- d->page_data = vmalloc(array_size(pages, sizeof(int)));
+ d->page_data = vmalloc_array(pages, sizeof(int));
if (!d->page_data)
goto page_data_fail;
- d->revmap = vmalloc(array_size(blocks, sizeof(int)));
+ d->revmap = vmalloc_array(blocks, sizeof(int));
if (!d->revmap)
goto revmap_fail;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 5b0c2c95f10c..1e57c8de8578 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -61,6 +61,14 @@ config MTD_NAND_ECC_MEDIATEK
help
This enables support for the hardware ECC engine from Mediatek.
+config MTD_NAND_ECC_REALTEK
+ tristate "Realtek RTL93xx hardware ECC engine"
+ depends on HAS_IOMEM && HAS_DMA
+ depends on MACH_REALTEK_RTL || COMPILE_TEST
+ select MTD_NAND_ECC
+ help
+ This enables support for the hardware ECC engine from Realtek.
+
endmenu
endmenu
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 19e1291ac4d5..2e0e56267718 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -3,7 +3,9 @@
nandcore-objs := core.o bbt.o
obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
-
+obj-$(CONFIG_MTD_NAND_ECC_REALTEK) += ecc-realtek.o
+obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o
+obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
obj-y += onenand/
obj-y += raw/
obj-y += spi/
diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c
index 7737b1a4a177..3e76d127715f 100644
--- a/drivers/mtd/nand/core.c
+++ b/drivers/mtd/nand/core.c
@@ -13,6 +13,137 @@
#include <linux/mtd/nand.h>
/**
+ * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
+ * @buf: buffer to test
+ * @len: buffer length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a buffer contains only 0xff, which means the underlying region
+ * has been erased and is ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region is not erased.
+ * Note: The logic of this function has been extracted from the memweight
+ * implementation, except that nand_check_erased_buf function exit before
+ * testing the whole buffer if the number of bitflips exceed the
+ * bitflips_threshold value.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold.
+ */
+static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
+{
+ const unsigned char *bitmap = buf;
+ int bitflips = 0;
+ int weight;
+
+ for (; len && ((uintptr_t)bitmap) % sizeof(long);
+ len--, bitmap++) {
+ weight = hweight8(*bitmap);
+ bitflips += BITS_PER_BYTE - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ for (; len >= sizeof(long);
+ len -= sizeof(long), bitmap += sizeof(long)) {
+ unsigned long d = *((unsigned long *)bitmap);
+ if (d == ~0UL)
+ continue;
+ weight = hweight_long(d);
+ bitflips += BITS_PER_LONG - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ for (; len > 0; len--, bitmap++) {
+ weight = hweight8(*bitmap);
+ bitflips += BITS_PER_BYTE - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ return bitflips;
+}
+
+/**
+ * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
+ * 0xff data
+ * @data: data buffer to test
+ * @datalen: data length
+ * @ecc: ECC buffer
+ * @ecclen: ECC length
+ * @extraoob: extra OOB buffer
+ * @extraooblen: extra OOB length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a data buffer and its associated ECC and OOB data contains only
+ * 0xff pattern, which means the underlying region has been erased and is
+ * ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region as not erased.
+ *
+ * Note:
+ * 1/ ECC algorithms are working on pre-defined block sizes which are usually
+ * different from the NAND page size. When fixing bitflips, ECC engines will
+ * report the number of errors per chunk, and the NAND core infrastructure
+ * expect you to return the maximum number of bitflips for the whole page.
+ * This is why you should always use this function on a single chunk and
+ * not on the whole page. After checking each chunk you should update your
+ * max_bitflips value accordingly.
+ * 2/ When checking for bitflips in erased pages you should not only check
+ * the payload data but also their associated ECC data, because a user might
+ * have programmed almost all bits to 1 but a few. In this case, we
+ * shouldn't consider the chunk as erased, and checking ECC bytes prevent
+ * this case.
+ * 3/ The extraoob argument is optional, and should be used if some of your OOB
+ * data are protected by the ECC engine.
+ * It could also be used if you support subpages and want to attach some
+ * extra OOB data to an ECC chunk.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold. In case of success, the passed buffers are filled with 0xff.
+ */
+int nand_check_erased_ecc_chunk(void *data, int datalen,
+ void *ecc, int ecclen,
+ void *extraoob, int extraooblen,
+ int bitflips_threshold)
+{
+ int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
+
+ data_bitflips = nand_check_erased_buf(data, datalen,
+ bitflips_threshold);
+ if (data_bitflips < 0)
+ return data_bitflips;
+
+ bitflips_threshold -= data_bitflips;
+
+ ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
+ if (ecc_bitflips < 0)
+ return ecc_bitflips;
+
+ bitflips_threshold -= ecc_bitflips;
+
+ extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
+ bitflips_threshold);
+ if (extraoob_bitflips < 0)
+ return extraoob_bitflips;
+
+ if (data_bitflips)
+ memset(data, 0xff, datalen);
+
+ if (ecc_bitflips)
+ memset(ecc, 0xff, ecclen);
+
+ if (extraoob_bitflips)
+ memset(extraoob, 0xff, extraooblen);
+
+ return data_bitflips + ecc_bitflips + extraoob_bitflips;
+}
+EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
+
+/**
* nanddev_isbad() - Check if a block is bad
* @nand: NAND device
* @pos: position pointing to the block we want to check
diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c
index 22a760e6024e..60cdcb4175ef 100644
--- a/drivers/mtd/nand/ecc-mxic.c
+++ b/drivers/mtd/nand/ecc-mxic.c
@@ -18,7 +18,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/nand-ecc-mxic.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -322,14 +322,14 @@ static int mxic_ecc_init_ctx(struct nand_device *nand, struct device *dev)
sg_init_table(ctx->sg, 2);
/* Configuration dump and sanity checks */
- dev_err(dev, "DPE version number: %d\n",
+ dev_dbg(dev, "DPE version number: %d\n",
readl(mxic->regs + DP_VER) >> DP_VER_OFFSET);
- dev_err(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE));
- dev_err(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE));
- dev_err(dev, "Spare size: %d\n", SPARE_SZ(spare_reg));
- dev_err(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg));
- dev_err(dev, "Parity size: %d\n", ctx->parity_sz);
- dev_err(dev, "Meta size: %d\n", ctx->meta_sz);
+ dev_dbg(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE));
+ dev_dbg(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE));
+ dev_dbg(dev, "Spare size: %d\n", SPARE_SZ(spare_reg));
+ dev_dbg(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg));
+ dev_dbg(dev, "Parity size: %d\n", ctx->parity_sz);
+ dev_dbg(dev, "Meta size: %d\n", ctx->meta_sz);
if ((ctx->meta_sz + ctx->parity_sz + RSV_SZ(spare_reg)) !=
SPARE_SZ(spare_reg)) {
@@ -614,7 +614,7 @@ static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
{
struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
- int nents, step, ret;
+ int nents, step, ret = 0;
if (req->mode == MTD_OPS_RAW)
return 0;
@@ -723,21 +723,21 @@ static int mxic_ecc_finish_io_req_pipelined(struct nand_device *nand,
return ret;
}
-static struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = {
+static const struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = {
.init_ctx = mxic_ecc_init_ctx_external,
.cleanup_ctx = mxic_ecc_cleanup_ctx,
.prepare_io_req = mxic_ecc_prepare_io_req_external,
.finish_io_req = mxic_ecc_finish_io_req_external,
};
-static struct nand_ecc_engine_ops mxic_ecc_engine_pipelined_ops = {
+static const struct nand_ecc_engine_ops mxic_ecc_engine_pipelined_ops = {
.init_ctx = mxic_ecc_init_ctx_pipelined,
.cleanup_ctx = mxic_ecc_cleanup_ctx,
.prepare_io_req = mxic_ecc_prepare_io_req_pipelined,
.finish_io_req = mxic_ecc_finish_io_req_pipelined,
};
-struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
+const struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
{
return &mxic_ecc_engine_pipelined_ops;
}
@@ -869,7 +869,7 @@ static struct platform_driver mxic_ecc_driver = {
.of_match_table = mxic_ecc_of_ids,
},
.probe = mxic_ecc_probe,
- .remove_new = mxic_ecc_remove,
+ .remove = mxic_ecc_remove,
};
module_platform_driver(mxic_ecc_driver);
diff --git a/drivers/mtd/nand/ecc-realtek.c b/drivers/mtd/nand/ecc-realtek.c
new file mode 100644
index 000000000000..0046da37ea3e
--- /dev/null
+++ b/drivers/mtd/nand/ecc-realtek.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Realtek hardware ECC engine in RTL93xx SoCs
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/mtd/nand.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/*
+ * The Realtek ECC engine has two operation modes.
+ *
+ * - BCH6 : Generate 10 ECC bytes from 512 data bytes plus 6 free bytes
+ * - BCH12 : Generate 20 ECC bytes from 512 data bytes plus 6 free bytes
+ *
+ * It can run for arbitrary NAND flash chips with different block and OOB sizes. Currently there
+ * are only two known devices in the wild that have NAND flash and make use of this ECC engine
+ * (Linksys LGS328C & LGS352C). To keep compatibility with vendor firmware, new modes can only
+ * be added when new data layouts have been analyzed. For now allow BCH6 on flash with 2048 byte
+ * blocks and 64 bytes oob.
+ *
+ * This driver aligns with kernel ECC naming conventions. Neverthless a short notice on the
+ * Realtek naming conventions for the different structures in the OOB area.
+ *
+ * - BBI : Bad block indicator. The first two bytes of OOB. Protected by ECC!
+ * - tag : 6 User/free bytes. First tag "contains" 2 bytes BBI. Protected by ECC!
+ * - syndrome : ECC/parity bytes
+ *
+ * Altogether this gives currently the following block layout.
+ *
+ * +------+------+------+------+-----+------+------+------+------+-----+-----+-----+-----+
+ * | 512 | 512 | 512 | 512 | 2 | 4 | 6 | 6 | 6 | 10 | 10 | 10 | 10 |
+ * +------+------+------+------+-----+------+------+------+------+-----+-----+-----+-----+
+ * | data | data | data | data | BBI | free | free | free | free | ECC | ECC | ECC | ECC |
+ * +------+------+------+------+-----+------+------+------+------+-----+-----+-----+-----+
+ */
+
+#define RTL_ECC_ALLOWED_PAGE_SIZE 2048
+#define RTL_ECC_ALLOWED_OOB_SIZE 64
+#define RTL_ECC_ALLOWED_STRENGTH 6
+
+#define RTL_ECC_BLOCK_SIZE 512
+#define RTL_ECC_FREE_SIZE 6
+#define RTL_ECC_PARITY_SIZE_BCH6 10
+#define RTL_ECC_PARITY_SIZE_BCH12 20
+
+/*
+ * The engine is fed with two DMA regions. One for data (always 512 bytes) and one for free bytes
+ * and parity (either 16 bytes for BCH6 or 26 bytes for BCH12). Start and length of each must be
+ * aligned to a multiple of 4.
+ */
+
+#define RTL_ECC_DMA_FREE_PARITY_SIZE ALIGN(RTL_ECC_FREE_SIZE + RTL_ECC_PARITY_SIZE_BCH12, 4)
+#define RTL_ECC_DMA_SIZE (RTL_ECC_BLOCK_SIZE + RTL_ECC_DMA_FREE_PARITY_SIZE)
+
+#define RTL_ECC_CFG 0x00
+#define RTL_ECC_BCH6 0
+#define RTL_ECC_BCH12 BIT(28)
+#define RTL_ECC_DMA_PRECISE BIT(12)
+#define RTL_ECC_BURST_128 GENMASK(1, 0)
+#define RTL_ECC_DMA_TRIGGER 0x08
+#define RTL_ECC_OP_DECODE 0
+#define RTL_ECC_OP_ENCODE BIT(0)
+#define RTL_ECC_DMA_START 0x0c
+#define RTL_ECC_DMA_TAG 0x10
+#define RTL_ECC_STATUS 0x14
+#define RTL_ECC_CORR_COUNT GENMASK(19, 12)
+#define RTL_ECC_RESULT BIT(8)
+#define RTL_ECC_ALL_ONE BIT(4)
+#define RTL_ECC_OP_STATUS BIT(0)
+
+struct rtl_ecc_engine {
+ struct device *dev;
+ struct nand_ecc_engine engine;
+ struct mutex lock;
+ char *buf;
+ dma_addr_t buf_dma;
+ struct regmap *regmap;
+};
+
+struct rtl_ecc_ctx {
+ struct rtl_ecc_engine * rtlc;
+ struct nand_ecc_req_tweak_ctx req_ctx;
+ int steps;
+ int bch_mode;
+ int strength;
+ int parity_size;
+};
+
+static const struct regmap_config rtl_ecc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static inline void *nand_to_ctx(struct nand_device *nand)
+{
+ return nand->ecc.ctx.priv;
+}
+
+static inline struct rtl_ecc_engine *nand_to_rtlc(struct nand_device *nand)
+{
+ struct nand_ecc_engine *eng = nand->ecc.engine;
+
+ return container_of(eng, struct rtl_ecc_engine, engine);
+}
+
+static int rtl_ecc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct rtl_ecc_ctx *ctx = nand_to_ctx(nand);
+
+ if (section < 0 || section >= ctx->steps)
+ return -ERANGE;
+
+ oobregion->offset = ctx->steps * RTL_ECC_FREE_SIZE + section * ctx->parity_size;
+ oobregion->length = ctx->parity_size;
+
+ return 0;
+}
+
+static int rtl_ecc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct rtl_ecc_ctx *ctx = nand_to_ctx(nand);
+ int bbm;
+
+ if (section < 0 || section >= ctx->steps)
+ return -ERANGE;
+
+ /* reserve 2 BBM bytes in first block */
+ bbm = section ? 0 : 2;
+ oobregion->offset = section * RTL_ECC_FREE_SIZE + bbm;
+ oobregion->length = RTL_ECC_FREE_SIZE - bbm;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops rtl_ecc_ooblayout_ops = {
+ .ecc = rtl_ecc_ooblayout_ecc,
+ .free = rtl_ecc_ooblayout_free,
+};
+
+static void rtl_ecc_kick_engine(struct rtl_ecc_ctx *ctx, int operation)
+{
+ struct rtl_ecc_engine *rtlc = ctx->rtlc;
+
+ regmap_write(rtlc->regmap, RTL_ECC_CFG,
+ ctx->bch_mode | RTL_ECC_BURST_128 | RTL_ECC_DMA_PRECISE);
+
+ regmap_write(rtlc->regmap, RTL_ECC_DMA_START, rtlc->buf_dma);
+ regmap_write(rtlc->regmap, RTL_ECC_DMA_TAG, rtlc->buf_dma + RTL_ECC_BLOCK_SIZE);
+ regmap_write(rtlc->regmap, RTL_ECC_DMA_TRIGGER, operation);
+}
+
+static int rtl_ecc_wait_for_engine(struct rtl_ecc_ctx *ctx)
+{
+ struct rtl_ecc_engine *rtlc = ctx->rtlc;
+ int ret, status, bitflips;
+ bool all_one;
+
+ /*
+ * The ECC engine needs 6-8 us to encode/decode a BCH6 syndrome for 512 bytes of data
+ * and 6 free bytes. In case the NAND area has been erased and all data and oob is
+ * set to 0xff, decoding takes 30us (reason unknown). Although the engine can trigger
+ * interrupts when finished, use active polling for now. 12 us maximum wait time has
+ * proven to be a good tradeoff between performance and overhead.
+ */
+
+ ret = regmap_read_poll_timeout(rtlc->regmap, RTL_ECC_STATUS, status,
+ !(status & RTL_ECC_OP_STATUS), 12, 1000000);
+ if (ret)
+ return ret;
+
+ ret = FIELD_GET(RTL_ECC_RESULT, status);
+ all_one = FIELD_GET(RTL_ECC_ALL_ONE, status);
+ bitflips = FIELD_GET(RTL_ECC_CORR_COUNT, status);
+
+ /* For erased blocks (all bits one) error status can be ignored */
+ if (all_one)
+ ret = 0;
+
+ return ret ? -EBADMSG : bitflips;
+}
+
+static int rtl_ecc_run_engine(struct rtl_ecc_ctx *ctx, char *data, char *free,
+ char *parity, int operation)
+{
+ struct rtl_ecc_engine *rtlc = ctx->rtlc;
+ char *buf_parity = rtlc->buf + RTL_ECC_BLOCK_SIZE + RTL_ECC_FREE_SIZE;
+ char *buf_free = rtlc->buf + RTL_ECC_BLOCK_SIZE;
+ char *buf_data = rtlc->buf;
+ int ret;
+
+ mutex_lock(&rtlc->lock);
+
+ memcpy(buf_data, data, RTL_ECC_BLOCK_SIZE);
+ memcpy(buf_free, free, RTL_ECC_FREE_SIZE);
+ memcpy(buf_parity, parity, ctx->parity_size);
+
+ dma_sync_single_for_device(rtlc->dev, rtlc->buf_dma, RTL_ECC_DMA_SIZE, DMA_TO_DEVICE);
+ rtl_ecc_kick_engine(ctx, operation);
+ ret = rtl_ecc_wait_for_engine(ctx);
+ dma_sync_single_for_cpu(rtlc->dev, rtlc->buf_dma, RTL_ECC_DMA_SIZE, DMA_FROM_DEVICE);
+
+ if (ret >= 0) {
+ memcpy(data, buf_data, RTL_ECC_BLOCK_SIZE);
+ memcpy(free, buf_free, RTL_ECC_FREE_SIZE);
+ memcpy(parity, buf_parity, ctx->parity_size);
+ }
+
+ mutex_unlock(&rtlc->lock);
+
+ return ret;
+}
+
+static int rtl_ecc_prepare_io_req(struct nand_device *nand, struct nand_page_io_req *req)
+{
+ struct rtl_ecc_engine *rtlc = nand_to_rtlc(nand);
+ struct rtl_ecc_ctx *ctx = nand_to_ctx(nand);
+ char *data, *free, *parity;
+ int ret = 0;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ nand_ecc_tweak_req(&ctx->req_ctx, req);
+
+ if (req->type == NAND_PAGE_READ)
+ return 0;
+
+ free = req->oobbuf.in;
+ data = req->databuf.in;
+ parity = req->oobbuf.in + ctx->steps * RTL_ECC_FREE_SIZE;
+
+ for (int i = 0; i < ctx->steps; i++) {
+ ret |= rtl_ecc_run_engine(ctx, data, free, parity, RTL_ECC_OP_ENCODE);
+
+ free += RTL_ECC_FREE_SIZE;
+ data += RTL_ECC_BLOCK_SIZE;
+ parity += ctx->parity_size;
+ }
+
+ if (unlikely(ret))
+ dev_dbg(rtlc->dev, "ECC calculation failed\n");
+
+ return ret ? -EBADMSG : 0;
+}
+
+static int rtl_ecc_finish_io_req(struct nand_device *nand, struct nand_page_io_req *req)
+{
+ struct rtl_ecc_engine *rtlc = nand_to_rtlc(nand);
+ struct rtl_ecc_ctx *ctx = nand_to_ctx(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ char *data, *free, *parity;
+ bool failure = false;
+ int bitflips = 0;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ if (req->type == NAND_PAGE_WRITE) {
+ nand_ecc_restore_req(&ctx->req_ctx, req);
+ return 0;
+ }
+
+ free = req->oobbuf.in;
+ data = req->databuf.in;
+ parity = req->oobbuf.in + ctx->steps * RTL_ECC_FREE_SIZE;
+
+ for (int i = 0 ; i < ctx->steps; i++) {
+ int ret = rtl_ecc_run_engine(ctx, data, free, parity, RTL_ECC_OP_DECODE);
+
+ if (unlikely(ret < 0))
+ /* ECC totally fails for bitflips in erased blocks */
+ ret = nand_check_erased_ecc_chunk(data, RTL_ECC_BLOCK_SIZE,
+ parity, ctx->parity_size,
+ free, RTL_ECC_FREE_SIZE,
+ ctx->strength);
+ if (unlikely(ret < 0)) {
+ failure = true;
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ bitflips = max_t(unsigned int, bitflips, ret);
+ }
+
+ free += RTL_ECC_FREE_SIZE;
+ data += RTL_ECC_BLOCK_SIZE;
+ parity += ctx->parity_size;
+ }
+
+ nand_ecc_restore_req(&ctx->req_ctx, req);
+
+ if (unlikely(failure))
+ dev_dbg(rtlc->dev, "ECC correction failed\n");
+ else if (unlikely(bitflips > 2))
+ dev_dbg(rtlc->dev, "%d bitflips detected\n", bitflips);
+
+ return failure ? -EBADMSG : bitflips;
+}
+
+static int rtl_ecc_check_support(struct nand_device *nand)
+{
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct device *dev = nand->ecc.engine->dev;
+
+ if (mtd->oobsize != RTL_ECC_ALLOWED_OOB_SIZE ||
+ mtd->writesize != RTL_ECC_ALLOWED_PAGE_SIZE) {
+ dev_err(dev, "only flash geometry data=%d, oob=%d supported\n",
+ RTL_ECC_ALLOWED_PAGE_SIZE, RTL_ECC_ALLOWED_OOB_SIZE);
+ return -EINVAL;
+ }
+
+ if (nand->ecc.user_conf.algo != NAND_ECC_ALGO_BCH ||
+ nand->ecc.user_conf.strength != RTL_ECC_ALLOWED_STRENGTH ||
+ nand->ecc.user_conf.placement != NAND_ECC_PLACEMENT_OOB ||
+ nand->ecc.user_conf.step_size != RTL_ECC_BLOCK_SIZE) {
+ dev_err(dev, "only algo=bch, strength=%d, placement=oob, step=%d supported\n",
+ RTL_ECC_ALLOWED_STRENGTH, RTL_ECC_BLOCK_SIZE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtl_ecc_init_ctx(struct nand_device *nand)
+{
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+ struct rtl_ecc_engine *rtlc = nand_to_rtlc(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int strength = nand->ecc.user_conf.strength;
+ struct device *dev = nand->ecc.engine->dev;
+ struct rtl_ecc_ctx *ctx;
+ int ret;
+
+ ret = rtl_ecc_check_support(nand);
+ if (ret)
+ return ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ nand->ecc.ctx.priv = ctx;
+ mtd_set_ooblayout(mtd, &rtl_ecc_ooblayout_ops);
+
+ conf->algo = NAND_ECC_ALGO_BCH;
+ conf->strength = strength;
+ conf->step_size = RTL_ECC_BLOCK_SIZE;
+ conf->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ ctx->rtlc = rtlc;
+ ctx->steps = mtd->writesize / RTL_ECC_BLOCK_SIZE;
+ ctx->strength = strength;
+ ctx->bch_mode = strength == 6 ? RTL_ECC_BCH6 : RTL_ECC_BCH12;
+ ctx->parity_size = strength == 6 ? RTL_ECC_PARITY_SIZE_BCH6 : RTL_ECC_PARITY_SIZE_BCH12;
+
+ ret = nand_ecc_init_req_tweaking(&ctx->req_ctx, nand);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "using bch%d with geometry data=%dx%d, free=%dx6, parity=%dx%d",
+ conf->strength, ctx->steps, conf->step_size,
+ ctx->steps, ctx->steps, ctx->parity_size);
+
+ return 0;
+}
+
+static void rtl_ecc_cleanup_ctx(struct nand_device *nand)
+{
+ struct rtl_ecc_ctx *ctx = nand_to_ctx(nand);
+
+ if (ctx)
+ nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
+}
+
+static const struct nand_ecc_engine_ops rtl_ecc_engine_ops = {
+ .init_ctx = rtl_ecc_init_ctx,
+ .cleanup_ctx = rtl_ecc_cleanup_ctx,
+ .prepare_io_req = rtl_ecc_prepare_io_req,
+ .finish_io_req = rtl_ecc_finish_io_req,
+};
+
+static int rtl_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rtl_ecc_engine *rtlc;
+ void __iomem *base;
+ int ret;
+
+ rtlc = devm_kzalloc(dev, sizeof(*rtlc), GFP_KERNEL);
+ if (!rtlc)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = devm_mutex_init(dev, &rtlc->lock);
+ if (ret)
+ return ret;
+
+ rtlc->regmap = devm_regmap_init_mmio(dev, base, &rtl_ecc_regmap_config);
+ if (IS_ERR(rtlc->regmap))
+ return PTR_ERR(rtlc->regmap);
+
+ /*
+ * Focus on simplicity and use a preallocated DMA buffer for data exchange with the
+ * engine. For now make it a noncoherent memory model as invalidating/flushing caches
+ * is faster than reading/writing uncached memory on the known architectures.
+ */
+
+ rtlc->buf = dma_alloc_noncoherent(dev, RTL_ECC_DMA_SIZE, &rtlc->buf_dma,
+ DMA_BIDIRECTIONAL, GFP_KERNEL);
+ if (!rtlc->buf)
+ return -ENOMEM;
+
+ rtlc->dev = dev;
+ rtlc->engine.dev = dev;
+ rtlc->engine.ops = &rtl_ecc_engine_ops;
+ rtlc->engine.integration = NAND_ECC_ENGINE_INTEGRATION_EXTERNAL;
+
+ nand_ecc_register_on_host_hw_engine(&rtlc->engine);
+
+ platform_set_drvdata(pdev, rtlc);
+
+ return 0;
+}
+
+static void rtl_ecc_remove(struct platform_device *pdev)
+{
+ struct rtl_ecc_engine *rtlc = platform_get_drvdata(pdev);
+
+ nand_ecc_unregister_on_host_hw_engine(&rtlc->engine);
+ dma_free_noncoherent(rtlc->dev, RTL_ECC_DMA_SIZE, rtlc->buf, rtlc->buf_dma,
+ DMA_BIDIRECTIONAL);
+}
+
+static const struct of_device_id rtl_ecc_of_ids[] = {
+ {
+ .compatible = "realtek,rtl9301-ecc",
+ },
+ { /* sentinel */ },
+};
+
+static struct platform_driver rtl_ecc_driver = {
+ .driver = {
+ .name = "rtl-nand-ecc-engine",
+ .of_match_table = rtl_ecc_of_ids,
+ },
+ .probe = rtl_ecc_probe,
+ .remove = rtl_ecc_remove,
+};
+module_platform_driver(rtl_ecc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Markus Stockhausen <markus.stockhausen@gmx.de>");
+MODULE_DESCRIPTION("Realtek NAND hardware ECC controller");
diff --git a/drivers/mtd/nand/ecc-sw-bch.c b/drivers/mtd/nand/ecc-sw-bch.c
index 405552d014a8..0d9310dd6f52 100644
--- a/drivers/mtd/nand/ecc-sw-bch.c
+++ b/drivers/mtd/nand/ecc-sw-bch.c
@@ -384,7 +384,7 @@ static int nand_ecc_sw_bch_finish_io_req(struct nand_device *nand,
return max_bitflips;
}
-static struct nand_ecc_engine_ops nand_ecc_sw_bch_engine_ops = {
+static const struct nand_ecc_engine_ops nand_ecc_sw_bch_engine_ops = {
.init_ctx = nand_ecc_sw_bch_init_ctx,
.cleanup_ctx = nand_ecc_sw_bch_cleanup_ctx,
.prepare_io_req = nand_ecc_sw_bch_prepare_io_req,
diff --git a/drivers/mtd/nand/ecc-sw-hamming.c b/drivers/mtd/nand/ecc-sw-hamming.c
index 254db2e7f8bb..f2d0effad9d2 100644
--- a/drivers/mtd/nand/ecc-sw-hamming.c
+++ b/drivers/mtd/nand/ecc-sw-hamming.c
@@ -638,7 +638,7 @@ static int nand_ecc_sw_hamming_finish_io_req(struct nand_device *nand,
return max_bitflips;
}
-static struct nand_ecc_engine_ops nand_ecc_sw_hamming_engine_ops = {
+static const struct nand_ecc_engine_ops nand_ecc_sw_hamming_engine_ops = {
.init_ctx = nand_ecc_sw_hamming_init_ctx,
.cleanup_ctx = nand_ecc_sw_hamming_cleanup_ctx,
.prepare_io_req = nand_ecc_sw_hamming_prepare_io_req,
diff --git a/drivers/mtd/nand/ecc.c b/drivers/mtd/nand/ecc.c
index 5250764cedee..6ccdff3fc913 100644
--- a/drivers/mtd/nand/ecc.c
+++ b/drivers/mtd/nand/ecc.c
@@ -95,9 +95,9 @@
#include <linux/module.h>
#include <linux/mtd/nand.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_platform.h>
static LIST_HEAD(on_host_hw_engines);
@@ -552,7 +552,7 @@ void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
memset(tweak->oobbuf.in, 0xFF, ctx->oob_buffer_size);
}
- /* Copy the data that must be writen in the bounce buffers, if needed */
+ /* Copy the data that must be written in the bounce buffers, if needed */
if (orig->type == NAND_PAGE_WRITE) {
if (ctx->bounce_data)
memcpy((void *)tweak->databuf.out + orig->dataoffs,
diff --git a/drivers/mtd/nand/onenand/generic.c b/drivers/mtd/nand/onenand/generic.c
index 4e7de48f07a6..4e6fd1c34484 100644
--- a/drivers/mtd/nand/onenand/generic.c
+++ b/drivers/mtd/nand/onenand/generic.c
@@ -104,7 +104,7 @@ static struct platform_driver generic_onenand_driver = {
.name = DRIVER_NAME,
},
.probe = generic_onenand_probe,
- .remove_new = generic_onenand_remove,
+ .remove = generic_onenand_remove,
};
module_platform_driver(generic_onenand_driver);
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index f66385faf631..0dc2ea4fc857 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -2923,6 +2923,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
ret = ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
+ *retlen = ops.retlen;
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
diff --git a/drivers/mtd/nand/onenand/onenand_omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c
index ff7af98604df..0793251ada3b 100644
--- a/drivers/mtd/nand/onenand/onenand_omap2.c
+++ b/drivers/mtd/nand/onenand/onenand_omap2.c
@@ -13,7 +13,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/omap-gpmc.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
@@ -467,12 +467,6 @@ static int omap2_onenand_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "error getting memory resource\n");
- return -EINVAL;
- }
-
r = of_property_read_u32(np, "reg", &val);
if (r) {
dev_err(dev, "reg not found in DT\n");
@@ -486,11 +480,11 @@ static int omap2_onenand_probe(struct platform_device *pdev)
init_completion(&c->irq_done);
init_completion(&c->dma_done);
c->gpmc_cs = val;
- c->phys_base = res->start;
- c->onenand.base = devm_ioremap_resource(dev, res);
+ c->onenand.base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(c->onenand.base))
return PTR_ERR(c->onenand.base);
+ c->phys_base = res->start;
c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
if (IS_ERR(c->int_gpiod)) {
@@ -599,7 +593,7 @@ MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
- .remove_new = omap2_onenand_remove,
+ .remove = omap2_onenand_remove,
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
@@ -609,7 +603,6 @@ static struct platform_driver omap2_onenand_driver = {
module_platform_driver(omap2_onenand_driver);
-MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
diff --git a/drivers/mtd/nand/onenand/onenand_samsung.c b/drivers/mtd/nand/onenand/onenand_samsung.c
index 92151aa52964..6d6aa709a21f 100644
--- a/drivers/mtd/nand/onenand/onenand_samsung.c
+++ b/drivers/mtd/nand/onenand/onenand_samsung.c
@@ -860,8 +860,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
s3c_onenand_setup(mtd);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- onenand->base = devm_ioremap_resource(&pdev->dev, r);
+ onenand->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(onenand->base))
return PTR_ERR(onenand->base);
@@ -874,8 +873,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
this->options |= ONENAND_SKIP_UNLOCK_CHECK;
if (onenand->type != TYPE_S5PC110) {
- r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- onenand->ahb_addr = devm_ioremap_resource(&pdev->dev, r);
+ onenand->ahb_addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(onenand->ahb_addr))
return PTR_ERR(onenand->ahb_addr);
@@ -895,8 +893,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
this->subpagesize = mtd->writesize;
} else { /* S5PC110 */
- r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- onenand->dma_addr = devm_ioremap_resource(&pdev->dev, r);
+ onenand->dma_addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(onenand->dma_addr))
return PTR_ERR(onenand->dma_addr);
@@ -909,7 +906,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, r->start,
s5pc110_onenand_irq,
IRQF_SHARED, "onenand",
- &onenand);
+ onenand);
if (err) {
dev_err(&pdev->dev, "failed to get irq\n");
return err;
@@ -994,7 +991,7 @@ static struct platform_driver s3c_onenand_driver = {
},
.id_table = s3c_onenand_driver_ids,
.probe = s3c_onenand_probe,
- .remove_new = s3c_onenand_remove,
+ .remove = s3c_onenand_remove,
};
module_platform_driver(s3c_onenand_driver);
diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c
new file mode 100644
index 000000000000..db6c46a6fe01
--- /dev/null
+++ b/drivers/mtd/nand/qpic_common.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_adm.h>
+#include <linux/dma/qcom_bam_dma.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand-qpic-common.h>
+
+/**
+ * qcom_free_bam_transaction() - Frees the BAM transaction memory
+ * @nandc: qpic nand controller
+ *
+ * This function frees the bam transaction memory
+ */
+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ kfree(bam_txn);
+}
+EXPORT_SYMBOL(qcom_free_bam_transaction);
+
+/**
+ * qcom_alloc_bam_transaction() - allocate BAM transaction
+ * @nandc: qpic nand controller
+ *
+ * This function will allocate and initialize the BAM transaction structure
+ */
+struct bam_transaction *
+qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn;
+ size_t bam_txn_size;
+ unsigned int num_cw = nandc->max_cwperpage;
+ void *bam_txn_buf;
+
+ bam_txn_size =
+ sizeof(*bam_txn) + num_cw *
+ ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+ (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+ (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
+
+ bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL);
+ if (!bam_txn_buf)
+ return NULL;
+
+ bam_txn = bam_txn_buf;
+ bam_txn_buf += sizeof(*bam_txn);
+
+ bam_txn->bam_ce = bam_txn_buf;
+ bam_txn->bam_ce_nitems = QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+ bam_txn_buf += sizeof(*bam_txn->bam_ce) * bam_txn->bam_ce_nitems;
+
+ bam_txn->cmd_sgl = bam_txn_buf;
+ bam_txn->cmd_sgl_nitems = QPIC_PER_CW_CMD_SGL * num_cw;
+ bam_txn_buf += sizeof(*bam_txn->cmd_sgl) * bam_txn->cmd_sgl_nitems;
+
+ bam_txn->data_sgl = bam_txn_buf;
+ bam_txn->data_sgl_nitems = QPIC_PER_CW_DATA_SGL * num_cw;
+
+ init_completion(&bam_txn->txn_done);
+
+ return bam_txn;
+}
+EXPORT_SYMBOL(qcom_alloc_bam_transaction);
+
+/**
+ * qcom_clear_bam_transaction() - Clears the BAM transaction
+ * @nandc: qpic nand controller
+ *
+ * This function will clear the BAM transaction indexes.
+ */
+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ if (!nandc->props->supports_bam)
+ return;
+
+ memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions));
+ bam_txn->last_data_desc = NULL;
+
+ sg_init_table(bam_txn->cmd_sgl, bam_txn->cmd_sgl_nitems);
+ sg_init_table(bam_txn->data_sgl, bam_txn->data_sgl_nitems);
+
+ reinit_completion(&bam_txn->txn_done);
+}
+EXPORT_SYMBOL(qcom_clear_bam_transaction);
+
+/**
+ * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
+ * @data: data pointer
+ *
+ * This function is a callback for DMA descriptor completion
+ */
+void qcom_qpic_bam_dma_done(void *data)
+{
+ struct bam_transaction *bam_txn = data;
+
+ complete(&bam_txn->txn_done);
+}
+EXPORT_SYMBOL(qcom_qpic_bam_dma_done);
+
+/**
+ * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
+ * @nandc: qpic nand controller
+ * @is_cpu: cpu or Device
+ *
+ * This function will check for dma sync for cpu or device
+ */
+inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+{
+ if (!nandc->props->supports_bam)
+ return;
+
+ if (is_cpu)
+ dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+ else
+ dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(qcom_nandc_dev_to_mem);
+
+/**
+ * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
+ * @nandc: qpic nand controller
+ * @chan: dma channel
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function maps the scatter gather list for DMA transfer and forms the
+ * DMA descriptor for BAM.This descriptor will be added in the NAND DMA
+ * descriptor queue which will be submitted to DMA engine.
+ */
+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+ struct dma_chan *chan, unsigned long flags)
+{
+ struct desc_info *desc;
+ struct scatterlist *sgl;
+ unsigned int sgl_cnt;
+ int ret;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ enum dma_transfer_direction dir_eng;
+ struct dma_async_tx_descriptor *dma_desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ if (chan == nandc->cmd_chan) {
+ sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
+ sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
+ bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ } else if (chan == nandc->tx_chan) {
+ sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
+ sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
+ bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ } else {
+ sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
+ sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
+ bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
+ }
+
+ sg_mark_end(sgl + sgl_cnt - 1);
+ ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+ if (ret == 0) {
+ dev_err(nandc->dev, "failure in mapping desc\n");
+ kfree(desc);
+ return -ENOMEM;
+ }
+
+ desc->sgl_cnt = sgl_cnt;
+ desc->bam_sgl = sgl;
+
+ dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
+ flags);
+
+ if (!dma_desc) {
+ dev_err(nandc->dev, "failure in prep desc\n");
+ dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+ kfree(desc);
+ return -EINVAL;
+ }
+
+ desc->dma_desc = dma_desc;
+
+ /* update last data/command descriptor */
+ if (chan == nandc->cmd_chan)
+ bam_txn->last_cmd_desc = dma_desc;
+ else
+ bam_txn->last_data_desc = dma_desc;
+
+ list_add_tail(&desc->node, &nandc->desc_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_prepare_bam_async_desc);
+
+/**
+ * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
+ * @nandc: qpic nand controller
+ * @read: read or write type
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares the command descriptor for BAM DMA
+ * which will be used for NAND register reads and writes.
+ */
+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr,
+ int size, unsigned int flags)
+{
+ int bam_ce_size;
+ int i, ret;
+ struct bam_cmd_element *bam_ce_buffer;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ u32 offset;
+
+ if (bam_txn->bam_ce_pos + size > bam_txn->bam_ce_nitems) {
+ dev_err(nandc->dev, "BAM %s array is full\n", "CE");
+ return -EINVAL;
+ }
+
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+
+ /* fill the command desc */
+ for (i = 0; i < size; i++) {
+ offset = nandc->props->bam_offset + reg_off + 4 * i;
+ if (read)
+ bam_prep_ce(&bam_ce_buffer[i],
+ offset, BAM_READ_COMMAND,
+ reg_buf_dma_addr(nandc,
+ (__le32 *)vaddr + i));
+ else
+ bam_prep_ce_le32(&bam_ce_buffer[i],
+ offset, BAM_WRITE_COMMAND,
+ *((__le32 *)vaddr + i));
+ }
+
+ bam_txn->bam_ce_pos += size;
+
+ /* use the separate sgl after this command */
+ if (flags & NAND_BAM_NEXT_SGL) {
+ if (bam_txn->cmd_sgl_pos >= bam_txn->cmd_sgl_nitems) {
+ dev_err(nandc->dev, "BAM %s array is full\n",
+ "CMD sgl");
+ return -EINVAL;
+ }
+
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+ bam_ce_size = (bam_txn->bam_ce_pos -
+ bam_txn->bam_ce_start) *
+ sizeof(struct bam_cmd_element);
+ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+ bam_ce_buffer, bam_ce_size);
+ bam_txn->cmd_sgl_pos++;
+ bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+
+ if (flags & NAND_BAM_NWD) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_FENCE | DMA_PREP_CMD);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_prep_bam_dma_desc_cmd);
+
+/**
+ * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
+ * @nandc: qpic nand controller
+ * @read: read or write type
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares the data descriptor for BAM DMA which
+ * will be used for NAND data reads and writes.
+ */
+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+ const void *vaddr, int size, unsigned int flags)
+{
+ int ret;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ if (read) {
+ if (bam_txn->rx_sgl_pos >= bam_txn->data_sgl_nitems) {
+ dev_err(nandc->dev, "BAM %s array is full\n", "RX sgl");
+ return -EINVAL;
+ }
+
+ sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
+ vaddr, size);
+ bam_txn->rx_sgl_pos++;
+ } else {
+ if (bam_txn->tx_sgl_pos >= bam_txn->data_sgl_nitems) {
+ dev_err(nandc->dev, "BAM %s array is full\n", "TX sgl");
+ return -EINVAL;
+ }
+
+ sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
+ vaddr, size);
+ bam_txn->tx_sgl_pos++;
+
+ /*
+ * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
+ * is not set, form the DMA descriptor
+ */
+ if (!(flags & NAND_BAM_NO_EOT)) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+ DMA_PREP_INTERRUPT);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_prep_bam_dma_desc_data);
+
+/**
+ * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
+ * @nandc: qpic nand controller
+ * @read: read or write type
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: adm dma transaction size in bytes
+ * @flow_control: flow controller
+ *
+ * This function will prepare descriptor for adma
+ */
+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr, int size,
+ bool flow_control)
+{
+ struct qcom_adm_peripheral_config periph_conf = {};
+ struct dma_async_tx_descriptor *dma_desc;
+ struct dma_slave_config slave_conf = {0};
+ enum dma_transfer_direction dir_eng;
+ struct desc_info *desc;
+ struct scatterlist *sgl;
+ int ret;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ sgl = &desc->adm_sgl;
+
+ sg_init_one(sgl, vaddr, size);
+
+ if (read) {
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
+ } else {
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ }
+
+ ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
+ if (!ret) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ slave_conf.device_fc = flow_control;
+ if (read) {
+ slave_conf.src_maxburst = 16;
+ slave_conf.src_addr = nandc->base_dma + reg_off;
+ if (nandc->data_crci) {
+ periph_conf.crci = nandc->data_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
+ } else {
+ slave_conf.dst_maxburst = 16;
+ slave_conf.dst_addr = nandc->base_dma + reg_off;
+ if (nandc->cmd_crci) {
+ periph_conf.crci = nandc->cmd_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
+ }
+
+ ret = dmaengine_slave_config(nandc->chan, &slave_conf);
+ if (ret) {
+ dev_err(nandc->dev, "failed to configure dma channel\n");
+ goto err;
+ }
+
+ dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
+ if (!dma_desc) {
+ dev_err(nandc->dev, "failed to prepare desc\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ desc->dma_desc = dma_desc;
+
+ list_add_tail(&desc->node, &nandc->desc_list);
+
+ return 0;
+err:
+ kfree(desc);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_prep_adm_dma_desc);
+
+/**
+ * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
+ * @nandc: qpic nand controller
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to read
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a descriptor to read a given number of
+ * contiguous registers to the reg_read_buf pointer.
+ */
+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
+ int num_regs, unsigned int flags)
+{
+ bool flow_control = false;
+ void *vaddr;
+
+ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+ nandc->reg_read_pos += num_regs;
+
+ if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+ first = dev_cmd_reg_addr(nandc, first);
+
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ flow_control = true;
+
+ return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
+}
+EXPORT_SYMBOL(qcom_read_reg_dma);
+
+/**
+ * qcom_write_reg_dma() - write a given number of registers
+ * @nandc: qpic nand controller
+ * @vaddr: contiguous memory from where register value will
+ * be written
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to write
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a descriptor to write a given number of
+ * contiguous registers
+ */
+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+ int first, int num_regs, unsigned int flags)
+{
+ bool flow_control = false;
+
+ if (first == NAND_EXEC_CMD)
+ flags |= NAND_BAM_NWD;
+
+ if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
+
+ if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_FLASH_CMD)
+ flow_control = true;
+
+ return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
+}
+EXPORT_SYMBOL(qcom_write_reg_dma);
+
+/**
+ * qcom_read_data_dma() - transfer data
+ * @nandc: qpic nand controller
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a DMA descriptor to transfer data from the
+ * controller's internal buffer to the buffer 'vaddr'
+ */
+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
+{
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+
+ return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+}
+EXPORT_SYMBOL(qcom_read_data_dma);
+
+/**
+ * qcom_write_data_dma() - transfer data
+ * @nandc: qpic nand controller
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to read from
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a DMA descriptor to transfer data from
+ * 'vaddr' to the controller's internal buffer
+ */
+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
+{
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+
+ return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+}
+EXPORT_SYMBOL(qcom_write_data_dma);
+
+/**
+ * qcom_submit_descs() - submit dma descriptor
+ * @nandc: qpic nand controller
+ *
+ * This function will submit all the prepared dma descriptor
+ * cmd or data descriptor
+ */
+int qcom_submit_descs(struct qcom_nand_controller *nandc)
+{
+ struct desc_info *desc, *n;
+ dma_cookie_t cookie = 0;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ int ret = 0;
+
+ if (nandc->props->supports_bam) {
+ if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+
+ if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+ DMA_PREP_INTERRUPT);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+
+ if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_CMD);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+ }
+
+ list_for_each_entry(desc, &nandc->desc_list, node)
+ cookie = dmaengine_submit(desc->dma_desc);
+
+ if (nandc->props->supports_bam) {
+ bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
+ bam_txn->last_cmd_desc->callback_param = bam_txn;
+
+ dma_async_issue_pending(nandc->tx_chan);
+ dma_async_issue_pending(nandc->rx_chan);
+ dma_async_issue_pending(nandc->cmd_chan);
+
+ if (!wait_for_completion_timeout(&bam_txn->txn_done,
+ QPIC_NAND_COMPLETION_TIMEOUT))
+ ret = -ETIMEDOUT;
+ } else {
+ if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
+ ret = -ETIMEDOUT;
+ }
+
+err_unmap_free_desc:
+ /*
+ * Unmap the dma sg_list and free the desc allocated by both
+ * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
+ */
+ list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+ list_del(&desc->node);
+
+ if (nandc->props->supports_bam)
+ dma_unmap_sg(nandc->dev, desc->bam_sgl,
+ desc->sgl_cnt, desc->dir);
+ else
+ dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
+ desc->dir);
+
+ kfree(desc);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_submit_descs);
+
+/**
+ * qcom_clear_read_regs() - reset the read register buffer
+ * @nandc: qpic nand controller
+ *
+ * This function reset the register read buffer for next NAND operation
+ */
+void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
+{
+ nandc->reg_read_pos = 0;
+ qcom_nandc_dev_to_mem(nandc, false);
+}
+EXPORT_SYMBOL(qcom_clear_read_regs);
+
+/**
+ * qcom_nandc_unalloc() - unallocate qpic nand controller
+ * @nandc: qpic nand controller
+ *
+ * This function will unallocate memory alloacted for qpic nand controller
+ */
+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+{
+ if (nandc->props->supports_bam) {
+ if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+ dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+
+ if (nandc->tx_chan)
+ dma_release_channel(nandc->tx_chan);
+
+ if (nandc->rx_chan)
+ dma_release_channel(nandc->rx_chan);
+
+ if (nandc->cmd_chan)
+ dma_release_channel(nandc->cmd_chan);
+ } else {
+ if (nandc->chan)
+ dma_release_channel(nandc->chan);
+ }
+}
+EXPORT_SYMBOL(qcom_nandc_unalloc);
+
+/**
+ * qcom_nandc_alloc() - Allocate qpic nand controller
+ * @nandc: qpic nand controller
+ *
+ * This function will allocate memory for qpic nand controller
+ */
+int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+{
+ int ret;
+
+ ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(nandc->dev, "failed to set DMA mask\n");
+ return ret;
+ }
+
+ /*
+ * we use the internal buffer for reading ONFI params, reading small
+ * data like ID and status, and preforming read-copy-write operations
+ * when writing to a codeword partially. 532 is the maximum possible
+ * size of a codeword for our nand controller
+ */
+ nandc->buf_size = 532;
+
+ nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
+ if (!nandc->data_buffer)
+ return -ENOMEM;
+
+ nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
+ if (!nandc->regs)
+ return -ENOMEM;
+
+ nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
+ sizeof(*nandc->reg_read_buf),
+ GFP_KERNEL);
+ if (!nandc->reg_read_buf)
+ return -ENOMEM;
+
+ if (nandc->props->supports_bam) {
+ nandc->reg_read_dma =
+ dma_map_single(nandc->dev, nandc->reg_read_buf,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
+ dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
+ return -EIO;
+ }
+
+ nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
+ if (IS_ERR(nandc->tx_chan)) {
+ ret = PTR_ERR(nandc->tx_chan);
+ nandc->tx_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "tx DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
+ if (IS_ERR(nandc->rx_chan)) {
+ ret = PTR_ERR(nandc->rx_chan);
+ nandc->rx_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "rx DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
+ if (IS_ERR(nandc->cmd_chan)) {
+ ret = PTR_ERR(nandc->cmd_chan);
+ nandc->cmd_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "cmd DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ /*
+ * Initially allocate BAM transaction to read ONFI param page.
+ * After detecting all the devices, this BAM transaction will
+ * be freed and the next BAM transaction will be allocated with
+ * maximum codeword size
+ */
+ nandc->max_cwperpage = 1;
+ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+ "failed to allocate bam transaction\n");
+ ret = -ENOMEM;
+ goto unalloc;
+ }
+ } else {
+ nandc->chan = dma_request_chan(nandc->dev, "rxtx");
+ if (IS_ERR(nandc->chan)) {
+ ret = PTR_ERR(nandc->chan);
+ nandc->chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "rxtx DMA channel request failed\n");
+ return ret;
+ }
+ }
+
+ INIT_LIST_HEAD(&nandc->desc_list);
+ INIT_LIST_HEAD(&nandc->host_list);
+
+ return 0;
+unalloc:
+ qcom_nandc_unalloc(nandc);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_nandc_alloc);
+
+MODULE_DESCRIPTION("QPIC controller common api");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index b523354dfb00..7408f34f0c68 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -34,7 +34,7 @@ config MTD_NAND_DENALI_DT
config MTD_NAND_AMS_DELTA
tristate "Amstrad E3 NAND controller"
depends on MACH_AMS_DELTA || COMPILE_TEST
- default y
+ default MACH_AMS_DELTA
help
Support for NAND flash on Amstrad E3 (Delta).
@@ -77,32 +77,6 @@ config MTD_NAND_NDFC
help
NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
-config MTD_NAND_S3C2410
- tristate "Samsung S3C NAND controller"
- depends on ARCH_S3C64XX
- help
- This enables the NAND flash controller on the S3C24xx and S3C64xx
- SoCs
-
- No board specific support is done by this driver, each board
- must advertise a platform_device for the driver to attach.
-
-config MTD_NAND_S3C2410_DEBUG
- bool "Samsung S3C NAND controller debug"
- depends on MTD_NAND_S3C2410
- help
- Enable debugging of the S3C NAND driver
-
-config MTD_NAND_S3C2410_CLKSTOP
- bool "Samsung S3C NAND IDLE clock stop"
- depends on MTD_NAND_S3C2410
- default n
- help
- Stop the clock to the NAND controller when there is no chip
- selected to save power. This will mean there is a small delay
- when the is NAND chip selected or released, but will save
- approximately 5mA of power when there is nothing happening.
-
config MTD_NAND_SHARPSL
tristate "Sharp SL Series (C7xx + others) NAND controller"
depends on ARCH_PXA || COMPILE_TEST
@@ -160,7 +134,7 @@ config MTD_NAND_MARVELL
including:
- PXA3xx processors (NFCv1)
- 32-bit Armada platforms (XP, 37x, 38x, 39x) (NFCv2)
- - 64-bit Aramda platforms (7k, 8k) (NFCv2)
+ - 64-bit Aramda platforms (7k, 8k, ac5) (NFCv2)
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC NAND controller"
@@ -204,13 +178,6 @@ config MTD_NAND_BCM47XXNFLASH
registered by bcma as platform devices. This enables driver for
NAND flash memories. For now only BCM4706 is supported.
-config MTD_NAND_OXNAS
- tristate "Oxford Semiconductor NAND controller"
- depends on ARCH_OXNAS || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This enables the NAND flash controller on Oxford Semiconductor SoCs.
-
config MTD_NAND_MPC5121_NFC
tristate "MPC5121 NAND controller"
depends on PPC_MPC512x
@@ -241,8 +208,7 @@ config MTD_NAND_FSL_IFC
tristate "Freescale IFC NAND controller"
depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
depends on HAS_IOMEM
- select FSL_IFC
- select MEMORY
+ depends on FSL_IFC
help
Various Freescale chips e.g P1010, include a NAND Flash machine
with built-in hardware ECC capabilities.
@@ -287,8 +253,8 @@ config MTD_NAND_SH_FLCTL
config MTD_NAND_DAVINCI
tristate "DaVinci/Keystone NAND controller"
- depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF) || COMPILE_TEST
- depends on HAS_IOMEM
+ depends on COMPILE_TEST || ARCH_DAVINCI || ARCH_KEYSTONE
+ depends on HAS_IOMEM && TI_AEMIF
help
Enable the driver for NAND flash chips on Texas Instruments
DaVinci/Keystone processors.
@@ -456,6 +422,27 @@ config MTD_NAND_RENESAS
Enables support for the NAND controller found on Renesas R-Car
Gen3 and RZ/N1 SoC families.
+config MTD_NAND_TS72XX
+ tristate "ts72xx NAND controller"
+ depends on ARCH_EP93XX && HAS_IOMEM
+ help
+ Enables support for NAND controller on ts72xx SBCs.
+
+config MTD_NAND_NUVOTON_MA35
+ tristate "Nuvoton MA35 SoC NAND controller"
+ depends on ARCH_MA35 || COMPILE_TEST
+ depends on OF
+ help
+ Enables support for the NAND controller found on
+ the Nuvoton MA35 series SoCs.
+
+config MTD_NAND_LOONGSON
+ tristate "Loongson NAND controller"
+ depends on LOONGSON1_APB_DMA || LOONGSON2_APB_DMA || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Enables support for NAND controller on Loongson family chips.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index d93e861d8ba7..619760138d32 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o
obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
-obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
@@ -26,7 +25,6 @@ obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
-obj-$(CONFIG_MTD_NAND_OXNAS) += oxnas_nand.o
obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
@@ -35,6 +33,7 @@ obj-$(CONFIG_MTD_NAND_MLC_LPC32XX) += lpc32xx_mlc.o
obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
+obj-$(CONFIG_MTD_NAND_TS72XX) += technologic-nand-controller.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
obj-$(CONFIG_MTD_NAND_VF610_NFC) += vf610_nfc.o
@@ -58,6 +57,8 @@ obj-$(CONFIG_MTD_NAND_INTEL_LGM) += intel-nand-controller.o
obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o
obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o
+obj-$(CONFIG_MTD_NAND_NUVOTON_MA35) += nuvoton-ma35d1-nand-controller.o
+obj-$(CONFIG_MTD_NAND_LOONGSON) += loongson-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index fa621ffa6490..fb2b7db70297 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -22,7 +22,7 @@
#include <linux/mtd/nand-gpio.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
@@ -432,7 +432,7 @@ MODULE_DEVICE_TABLE(platform, gpio_nand_plat_id_table);
static struct platform_driver gpio_nand_driver = {
.probe = gpio_nand_probe,
- .remove_new = gpio_nand_remove,
+ .remove = gpio_nand_remove,
.id_table = gpio_nand_plat_id_table,
.driver = {
.name = "ams-delta-nand",
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 906eef70cb6d..865754737f5f 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -481,7 +481,7 @@ static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
}
bf = nand_check_erased_ecc_chunk(raw_buf, chip->ecc.size,
- NULL, 0, NULL, 0,
+ anand->hw_ecc, chip->ecc.bytes, NULL, 0,
chip->ecc.strength);
if (bf > 0) {
mtd->ecc_stats.corrected += bf;
@@ -515,6 +515,7 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
dma_addr_t dma_addr;
+ u8 status;
int ret;
struct anfc_op nfc_op = {
.pkt_reg =
@@ -561,10 +562,21 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
}
/* Spare data is not protected */
- if (oob_required)
+ if (oob_required) {
ret = nand_write_oob_std(chip, page);
+ if (ret)
+ return ret;
+ }
- return ret;
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
}
static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
@@ -1348,7 +1360,7 @@ static void anfc_chips_cleanup(struct arasan_nfc *nfc)
static int anfc_chips_init(struct arasan_nfc *nfc)
{
- struct device_node *np = nfc->dev->of_node, *nand_np;
+ struct device_node *np = nfc->dev->of_node;
int nchips = of_get_child_count(np);
int ret;
@@ -1358,10 +1370,9 @@ static int anfc_chips_init(struct arasan_nfc *nfc)
return -EINVAL;
}
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = anfc_chip_init(nfc, nand_np);
if (ret) {
- of_node_put(nand_np);
anfc_chips_cleanup(nfc);
break;
}
@@ -1398,8 +1409,8 @@ static int anfc_parse_cs(struct arasan_nfc *nfc)
* case, the "not" chosen CS is assigned to nfc->spare_cs and selected
* whenever a GPIO CS must be asserted.
*/
- if (nfc->cs_array && nfc->ncs > 2) {
- if (!nfc->cs_array[0] && !nfc->cs_array[1]) {
+ if (nfc->cs_array) {
+ if (nfc->ncs > 2 && !nfc->cs_array[0] && !nfc->cs_array[1]) {
dev_err(nfc->dev,
"Assign a single native CS when using GPIOs\n");
return -EINVAL;
@@ -1440,55 +1451,43 @@ static int anfc_probe(struct platform_device *pdev)
anfc_reset(nfc);
- nfc->controller_clk = devm_clk_get(&pdev->dev, "controller");
+ nfc->controller_clk = devm_clk_get_enabled(&pdev->dev, "controller");
if (IS_ERR(nfc->controller_clk))
return PTR_ERR(nfc->controller_clk);
- nfc->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ nfc->bus_clk = devm_clk_get_enabled(&pdev->dev, "bus");
if (IS_ERR(nfc->bus_clk))
return PTR_ERR(nfc->bus_clk);
- ret = clk_prepare_enable(nfc->controller_clk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(nfc->bus_clk);
- if (ret)
- goto disable_controller_clk;
-
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
- goto disable_bus_clk;
+ return ret;
ret = anfc_parse_cs(nfc);
if (ret)
- goto disable_bus_clk;
+ return ret;
ret = anfc_chips_init(nfc);
if (ret)
- goto disable_bus_clk;
+ return ret;
platform_set_drvdata(pdev, nfc);
return 0;
-
-disable_bus_clk:
- clk_disable_unprepare(nfc->bus_clk);
-
-disable_controller_clk:
- clk_disable_unprepare(nfc->controller_clk);
-
- return ret;
}
static void anfc_remove(struct platform_device *pdev)
{
+ int i;
struct arasan_nfc *nfc = platform_get_drvdata(pdev);
- anfc_chips_cleanup(nfc);
+ for (i = 0; i < nfc->ncs; i++) {
+ if (nfc->cs_array[i]) {
+ gpiod_put(nfc->cs_array[i]);
+ }
+ }
- clk_disable_unprepare(nfc->bus_clk);
- clk_disable_unprepare(nfc->controller_clk);
+ anfc_chips_cleanup(nfc);
}
static const struct of_device_id anfc_ids[] = {
@@ -1508,7 +1507,7 @@ static struct platform_driver anfc_driver = {
.of_match_table = anfc_ids,
},
.probe = anfc_probe,
- .remove_new = anfc_remove,
+ .remove = anfc_remove,
};
module_platform_driver(anfc_driver);
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 81e3d682a8cd..83ba4ebd02d4 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -165,7 +165,7 @@ struct atmel_nand {
struct atmel_pmecc_user *pmecc;
struct gpio_desc *cdgpio;
int numcs;
- struct atmel_nand_cs cs[];
+ struct atmel_nand_cs cs[] __counted_by(numcs);
};
static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip)
@@ -373,7 +373,7 @@ static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
dma_cookie_t cookie;
buf_dma = dma_map_single(nc->dev, buf, len, dir);
- if (dma_mapping_error(nc->dev, dev_dma)) {
+ if (dma_mapping_error(nc->dev, buf_dma)) {
dev_err(nc->dev,
"Failed to prepare a buffer for DMA access\n");
goto err;
@@ -1240,7 +1240,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
const struct nand_interface_config *conf,
struct atmel_smc_cs_conf *smcconf)
{
- u32 ncycles, totalcycles, timeps, mckperiodps;
+ u32 ncycles, totalcycles, timeps, mckperiodps, pulse;
struct atmel_nand_controller *nc;
int ret;
@@ -1366,11 +1366,16 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
ATMEL_SMC_MODE_TDFMODE_OPTIMIZED;
/*
- * Read pulse timing directly matches tRP:
+ * Read pulse timing would directly match tRP,
+ * but some NAND flash chips (S34ML01G2 and W29N02KVxxAF)
+ * do not work properly in timing mode 3.
+ * The workaround is to extend the SMC NRD pulse to meet tREA
+ * timing.
*
- * NRD_PULSE = tRP
+ * NRD_PULSE = max(tRP, tREA)
*/
- ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps);
+ pulse = max(conf->timings.sdr.tRP_min, conf->timings.sdr.tREA_max);
+ ncycles = DIV_ROUND_UP(pulse, mckperiodps);
totalcycles += ncycles;
ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT,
ncycles);
@@ -1378,13 +1383,23 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
return ret;
/*
- * The write cycle timing is directly matching tWC, but is also
+ * Read setup timing depends on the operation done on the NAND:
+ *
+ * NRD_SETUP = max(tAR, tCLR)
+ */
+ timeps = max(conf->timings.sdr.tAR_min, conf->timings.sdr.tCLR_min);
+ ncycles = DIV_ROUND_UP(timeps, mckperiodps);
+ totalcycles += ncycles;
+ ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NRD_SHIFT, ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * The read cycle timing is directly matching tRC, but is also
* dependent on the setup and hold timings we calculated earlier,
* which gives:
*
- * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
- *
- * NRD_SETUP is always 0.
+ * NRD_CYCLE = max(tRC, NRD_SETUP + NRD_PULSE + NRD_HOLD)
*/
ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
ncycles = max(totalcycles, ncycles);
@@ -1791,8 +1806,7 @@ atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
nand->numcs = 1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nand->cs[0].io.virt = devm_ioremap_resource(dev, res);
+ nand->cs[0].io.virt = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(nand->cs[0].io.virt))
return PTR_ERR(nand->cs[0].io.virt);
@@ -1849,7 +1863,7 @@ atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
{
- struct device_node *np, *nand_np;
+ struct device_node *np;
struct device *dev = nc->dev;
int ret, reg_cells;
u32 val;
@@ -1876,7 +1890,7 @@ static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
reg_cells += val;
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
struct atmel_nand *nand;
nand = atmel_nand_create(nc, nand_np, reg_cells);
@@ -2050,7 +2064,10 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
dma_cap_set(DMA_MEMCPY, mask);
nc->dmac = dma_request_channel(mask, NULL, NULL);
- if (!nc->dmac)
+ if (nc->dmac)
+ dev_info(nc->dev, "using %s for DMA transfers\n",
+ dma_chan_name(nc->dmac));
+ else
dev_err(nc->dev, "Failed to request DMA channel\n");
}
@@ -2661,7 +2678,7 @@ static struct platform_driver atmel_nand_controller_driver = {
.pm = &atmel_nand_controller_pm_ops,
},
.probe = atmel_nand_controller_probe,
- .remove_new = atmel_nand_controller_remove,
+ .remove = atmel_nand_controller_remove,
};
module_platform_driver(atmel_nand_controller_driver);
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
index 4d7dc8a9c373..1d0e93e4edb1 100644
--- a/drivers/mtd/nand/raw/atmel/pmecc.c
+++ b/drivers/mtd/nand/raw/atmel/pmecc.c
@@ -143,6 +143,7 @@ struct atmel_pmecc_caps {
int nstrengths;
int el_offset;
bool correct_erased_chunks;
+ bool clk_ctrl;
};
struct atmel_pmecc {
@@ -362,7 +363,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
size = ALIGN(size, sizeof(s32));
size += (req->ecc.strength + 1) * sizeof(s32) * 3;
- user = kzalloc(size, GFP_KERNEL);
+ user = devm_kzalloc(pmecc->dev, size, GFP_KERNEL);
if (!user)
return ERR_PTR(-ENOMEM);
@@ -380,10 +381,8 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
user->delta = user->dmu + req->ecc.strength + 1;
gf_tables = atmel_pmecc_get_gf_tables(req);
- if (IS_ERR(gf_tables)) {
- kfree(user);
+ if (IS_ERR(gf_tables))
return ERR_CAST(gf_tables);
- }
user->gf_tables = gf_tables;
@@ -408,12 +407,6 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
}
EXPORT_SYMBOL_GPL(atmel_pmecc_create_user);
-void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user)
-{
- kfree(user);
-}
-EXPORT_SYMBOL_GPL(atmel_pmecc_destroy_user);
-
static int get_strength(struct atmel_pmecc_user *user)
{
const int *strengths = user->pmecc->caps->strengths;
@@ -851,6 +844,10 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
if (IS_ERR(pmecc->regs.errloc))
return ERR_CAST(pmecc->regs.errloc);
+ /* pmecc data setup time */
+ if (caps->clk_ctrl)
+ writel(PMECC_CLK_133MHZ, pmecc->regs.base + ATMEL_PMECC_CLK);
+
/* Disable all interrupts before registering the PMECC handler. */
writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
atmel_pmecc_reset(pmecc);
@@ -904,6 +901,7 @@ static struct atmel_pmecc_caps at91sam9g45_caps = {
.strengths = atmel_pmecc_strengths,
.nstrengths = 5,
.el_offset = 0x8c,
+ .clk_ctrl = true,
};
static struct atmel_pmecc_caps sama5d4_caps = {
@@ -1012,4 +1010,3 @@ module_platform_driver(atmel_pmecc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
MODULE_DESCRIPTION("PMECC engine driver");
-MODULE_ALIAS("platform:atmel_pmecc");
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.h b/drivers/mtd/nand/raw/atmel/pmecc.h
index 7851c05126cf..cc0c5af1f4f1 100644
--- a/drivers/mtd/nand/raw/atmel/pmecc.h
+++ b/drivers/mtd/nand/raw/atmel/pmecc.h
@@ -55,8 +55,6 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *dev);
struct atmel_pmecc_user *
atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
struct atmel_pmecc_user_req *req);
-void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user);
-
void atmel_pmecc_reset(struct atmel_pmecc *pmecc);
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
void atmel_pmecc_disable(struct atmel_pmecc_user *user);
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index 063a5e0b8d4b..04d64724c400 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -357,7 +357,7 @@ static struct platform_driver au1550nd_driver = {
.name = "au1550-nand",
},
.probe = au1550nd_probe,
- .remove_new = au1550nd_remove,
+ .remove = au1550nd_remove,
};
module_platform_driver(au1550nd_driver);
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/main.c b/drivers/mtd/nand/raw/bcm47xxnflash/main.c
index ebcf508e0606..4d4e185c22e5 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/main.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/main.c
@@ -70,7 +70,7 @@ static void bcm47xxnflash_remove(struct platform_device *pdev)
static struct platform_driver bcm47xxnflash_driver = {
.probe = bcm47xxnflash_probe,
- .remove_new = bcm47xxnflash_remove,
+ .remove = bcm47xxnflash_remove,
.driver = {
.name = "bcma_nflash",
},
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
index 6487dfc64258..e532c3535b16 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -171,6 +171,7 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip,
{
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
u32 code = 0;
+ int rc;
if (cmd == NAND_CMD_NONE)
return;
@@ -182,7 +183,9 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip,
if (cmd != NAND_CMD_RESET)
code |= NCTL_CSA;
- bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
+ rc = bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
+ if (rc)
+ pr_err("ctl_cmd didn't work with error %d\n", rc);
}
/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
diff --git a/drivers/mtd/nand/raw/brcmnand/Makefile b/drivers/mtd/nand/raw/brcmnand/Makefile
index 9907e3ec4bb2..0536568c6467 100644
--- a/drivers/mtd/nand/raw/brcmnand/Makefile
+++ b/drivers/mtd/nand/raw/brcmnand/Makefile
@@ -2,7 +2,7 @@
# link order matters; don't link the more generic brcmstb_nand.o before the
# more specific iproc_nand.o, for instance
obj-$(CONFIG_MTD_NAND_BRCMNAND_IPROC) += iproc_nand.o
-obj-$(CONFIG_MTD_NAND_BRCMNAND_BCMBCA) += bcm63138_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND_BCMBCA) += bcmbca_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND_BCM63XX) += bcm6368_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND_BRCMSTB) += brcmstb_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand.o
diff --git a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
deleted file mode 100644
index 71ddcc611f6e..000000000000
--- a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright © 2015 Broadcom Corporation
- */
-
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "brcmnand.h"
-
-struct bcm63138_nand_soc {
- struct brcmnand_soc soc;
- void __iomem *base;
-};
-
-#define BCM63138_NAND_INT_STATUS 0x00
-#define BCM63138_NAND_INT_EN 0x04
-
-enum {
- BCM63138_CTLRDY = BIT(4),
-};
-
-static bool bcm63138_nand_intc_ack(struct brcmnand_soc *soc)
-{
- struct bcm63138_nand_soc *priv =
- container_of(soc, struct bcm63138_nand_soc, soc);
- void __iomem *mmio = priv->base + BCM63138_NAND_INT_STATUS;
- u32 val = brcmnand_readl(mmio);
-
- if (val & BCM63138_CTLRDY) {
- brcmnand_writel(val & ~BCM63138_CTLRDY, mmio);
- return true;
- }
-
- return false;
-}
-
-static void bcm63138_nand_intc_set(struct brcmnand_soc *soc, bool en)
-{
- struct bcm63138_nand_soc *priv =
- container_of(soc, struct bcm63138_nand_soc, soc);
- void __iomem *mmio = priv->base + BCM63138_NAND_INT_EN;
- u32 val = brcmnand_readl(mmio);
-
- if (en)
- val |= BCM63138_CTLRDY;
- else
- val &= ~BCM63138_CTLRDY;
-
- brcmnand_writel(val, mmio);
-}
-
-static int bcm63138_nand_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct bcm63138_nand_soc *priv;
- struct brcmnand_soc *soc;
- struct resource *res;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- soc = &priv->soc;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-int-base");
- priv->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
-
- soc->ctlrdy_ack = bcm63138_nand_intc_ack;
- soc->ctlrdy_set_enabled = bcm63138_nand_intc_set;
-
- return brcmnand_probe(pdev, soc);
-}
-
-static const struct of_device_id bcm63138_nand_of_match[] = {
- { .compatible = "brcm,nand-bcm63138" },
- {},
-};
-MODULE_DEVICE_TABLE(of, bcm63138_nand_of_match);
-
-static struct platform_driver bcm63138_nand_driver = {
- .probe = bcm63138_nand_probe,
- .remove = brcmnand_remove,
- .driver = {
- .name = "bcm63138_nand",
- .pm = &brcmnand_pm_ops,
- .of_match_table = bcm63138_nand_of_match,
- }
-};
-module_platform_driver(bcm63138_nand_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Brian Norris");
-MODULE_DESCRIPTION("NAND driver for BCM63138");
diff --git a/drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c b/drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c
new file mode 100644
index 000000000000..c31d7f37dc52
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2015 Broadcom Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "brcmnand.h"
+
+struct bcmbca_nand_soc {
+ struct brcmnand_soc soc;
+ void __iomem *base;
+};
+
+#define BCMBCA_NAND_INT_STATUS 0x00
+#define BCMBCA_NAND_INT_EN 0x04
+
+enum {
+ BCMBCA_CTLRDY = BIT(4),
+};
+
+#if defined(CONFIG_ARM64)
+#define ALIGN_REQ 8
+#else
+#define ALIGN_REQ 4
+#endif
+
+static inline bool bcmbca_nand_is_buf_aligned(void *flash_cache, void *buffer)
+{
+ return IS_ALIGNED((uintptr_t)buffer, ALIGN_REQ) &&
+ IS_ALIGNED((uintptr_t)flash_cache, ALIGN_REQ);
+}
+
+static bool bcmbca_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct bcmbca_nand_soc *priv =
+ container_of(soc, struct bcmbca_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCMBCA_NAND_INT_STATUS;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & BCMBCA_CTLRDY) {
+ brcmnand_writel(val & ~BCMBCA_CTLRDY, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void bcmbca_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct bcmbca_nand_soc *priv =
+ container_of(soc, struct bcmbca_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCMBCA_NAND_INT_EN;
+ u32 val = brcmnand_readl(mmio);
+
+ if (en)
+ val |= BCMBCA_CTLRDY;
+ else
+ val &= ~BCMBCA_CTLRDY;
+
+ brcmnand_writel(val, mmio);
+}
+
+static void bcmbca_read_data_bus(struct brcmnand_soc *soc,
+ void __iomem *flash_cache, u32 *buffer, int fc_words)
+{
+ /*
+ * memcpy can do unaligned aligned access depending on source
+ * and dest address, which is incompatible with nand cache. Fallback
+ * to the memcpy_fromio in such case
+ */
+ if (bcmbca_nand_is_buf_aligned((void __force *)flash_cache, buffer))
+ memcpy((void *)buffer, (void __force *)flash_cache, fc_words * 4);
+ else
+ memcpy_fromio((void *)buffer, flash_cache, fc_words * 4);
+}
+
+static int bcmbca_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcmbca_nand_soc *priv;
+ struct brcmnand_soc *soc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ soc = &priv->soc;
+
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "nand-int-base");
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ soc->ctlrdy_ack = bcmbca_nand_intc_ack;
+ soc->ctlrdy_set_enabled = bcmbca_nand_intc_set;
+ soc->read_data_bus = bcmbca_read_data_bus;
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct of_device_id bcmbca_nand_of_match[] = {
+ { .compatible = "brcm,nand-bcm63138" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcmbca_nand_of_match);
+
+static struct platform_driver bcmbca_nand_driver = {
+ .probe = bcmbca_nand_probe,
+ .remove = brcmnand_remove,
+ .driver = {
+ .name = "bcmbca_nand",
+ .pm = &brcmnand_pm_ops,
+ .of_match_table = bcmbca_nand_of_match,
+ }
+};
+module_platform_driver(bcmbca_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for BCMBCA");
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 2e9c2e2d9c9f..835653bdd5ab 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -29,6 +29,7 @@
#include <linux/static_key.h>
#include <linux/list.h>
#include <linux/log2.h>
+#include <linux/string_choices.h>
#include "brcmnand.h"
@@ -65,6 +66,7 @@ module_param(wp_on, int, 0444);
#define CMD_PARAMETER_READ 0x0e
#define CMD_PARAMETER_CHANGE_COL 0x0f
#define CMD_LOW_LEVEL_OP 0x10
+#define CMD_NOT_SUPPORTED 0xff
struct brcm_nand_dma_desc {
u32 next_desc;
@@ -101,7 +103,7 @@ struct brcm_nand_dma_desc {
#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
-#define NAND_POLL_STATUS_TIMEOUT_MS 100
+#define NAND_POLL_STATUS_TIMEOUT_MS 500
#define EDU_CMD_WRITE 0x00
#define EDU_CMD_READ 0x01
@@ -199,6 +201,30 @@ static const u16 flash_dma_regs_v4[] = {
[FLASH_DMA_CURRENT_DESC_EXT] = 0x34,
};
+/* Native command conversion for legacy controllers (< v5.0) */
+static const u8 native_cmd_conv[] = {
+ [NAND_CMD_READ0] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READ1] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RNDOUT] = CMD_PARAMETER_CHANGE_COL,
+ [NAND_CMD_PAGEPROG] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READOOB] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_ERASE1] = CMD_BLOCK_ERASE,
+ [NAND_CMD_STATUS] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_SEQIN] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RNDIN] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READID] = CMD_DEVICE_ID_READ,
+ [NAND_CMD_ERASE2] = CMD_NULL,
+ [NAND_CMD_PARAM] = CMD_PARAMETER_READ,
+ [NAND_CMD_GET_FEATURES] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_SET_FEATURES] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RESET] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READSTART] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READCACHESEQ] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READCACHEEND] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RNDOUTSTART] = CMD_NULL,
+ [NAND_CMD_CACHEDPROG] = CMD_NOT_SUPPORTED,
+};
+
/* Controller feature flags */
enum {
BRCMNAND_HAS_1K_SECTORS = BIT(0),
@@ -237,6 +263,12 @@ struct brcmnand_controller {
/* List of NAND hosts (one for each chip-select) */
struct list_head host_list;
+ /* Functions to be called from exec_op */
+ int (*check_instr)(struct nand_chip *chip,
+ const struct nand_operation *op);
+ int (*exec_instr)(struct nand_chip *chip,
+ const struct nand_operation *op);
+
/* EDU info, per-transaction */
const u16 *edu_offsets;
void __iomem *edu_base;
@@ -272,6 +304,7 @@ struct brcmnand_controller {
const unsigned int *page_sizes;
unsigned int page_size_shift;
unsigned int max_oob;
+ u32 ecc_level_shift;
u32 features;
/* for low-power standby/resume only */
@@ -309,9 +342,6 @@ struct brcmnand_host {
struct platform_device *pdev;
int cs;
- unsigned int last_cmd;
- unsigned int last_byte;
- u64 last_addr;
struct brcmnand_cfg hwcfg;
struct brcmnand_controller *ctrl;
};
@@ -330,6 +360,7 @@ enum brcmnand_reg {
BRCMNAND_CORR_THRESHOLD_EXT,
BRCMNAND_UNCORR_COUNT,
BRCMNAND_CORR_COUNT,
+ BRCMNAND_READ_ERROR_COUNT,
BRCMNAND_CORR_EXT_ADDR,
BRCMNAND_CORR_ADDR,
BRCMNAND_UNCORR_EXT_ADDR,
@@ -360,6 +391,7 @@ static const u16 brcmnand_regs_v21[] = {
[BRCMNAND_CORR_THRESHOLD_EXT] = 0,
[BRCMNAND_UNCORR_COUNT] = 0,
[BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_READ_ERROR_COUNT] = 0,
[BRCMNAND_CORR_EXT_ADDR] = 0x60,
[BRCMNAND_CORR_ADDR] = 0x64,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x68,
@@ -390,6 +422,7 @@ static const u16 brcmnand_regs_v33[] = {
[BRCMNAND_CORR_THRESHOLD_EXT] = 0,
[BRCMNAND_UNCORR_COUNT] = 0,
[BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_READ_ERROR_COUNT] = 0x80,
[BRCMNAND_CORR_EXT_ADDR] = 0x70,
[BRCMNAND_CORR_ADDR] = 0x74,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
@@ -420,6 +453,7 @@ static const u16 brcmnand_regs_v50[] = {
[BRCMNAND_CORR_THRESHOLD_EXT] = 0,
[BRCMNAND_UNCORR_COUNT] = 0,
[BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_READ_ERROR_COUNT] = 0x80,
[BRCMNAND_CORR_EXT_ADDR] = 0x70,
[BRCMNAND_CORR_ADDR] = 0x74,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
@@ -450,6 +484,7 @@ static const u16 brcmnand_regs_v60[] = {
[BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4,
[BRCMNAND_UNCORR_COUNT] = 0xfc,
[BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_READ_ERROR_COUNT] = 0x104,
[BRCMNAND_CORR_EXT_ADDR] = 0x10c,
[BRCMNAND_CORR_ADDR] = 0x110,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
@@ -480,6 +515,7 @@ static const u16 brcmnand_regs_v71[] = {
[BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
[BRCMNAND_UNCORR_COUNT] = 0xfc,
[BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_READ_ERROR_COUNT] = 0x104,
[BRCMNAND_CORR_EXT_ADDR] = 0x10c,
[BRCMNAND_CORR_ADDR] = 0x110,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
@@ -510,6 +546,7 @@ static const u16 brcmnand_regs_v72[] = {
[BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
[BRCMNAND_UNCORR_COUNT] = 0xfc,
[BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_READ_ERROR_COUNT] = 0x104,
[BRCMNAND_CORR_EXT_ADDR] = 0x10c,
[BRCMNAND_CORR_ADDR] = 0x110,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
@@ -596,6 +633,36 @@ enum {
INTFC_CTLR_READY = BIT(31),
};
+/***********************************************************************
+ * NAND ACC CONTROL bitfield
+ *
+ * Some bits have remained constant throughout hardware revision, while
+ * others have shifted around.
+ ***********************************************************************/
+
+/* Constant for all versions (where supported) */
+enum {
+ /* See BRCMNAND_HAS_CACHE_MODE */
+ ACC_CONTROL_CACHE_MODE = BIT(22),
+
+ /* See BRCMNAND_HAS_PREFETCH */
+ ACC_CONTROL_PREFETCH = BIT(23),
+
+ ACC_CONTROL_PAGE_HIT = BIT(24),
+ ACC_CONTROL_WR_PREEMPT = BIT(25),
+ ACC_CONTROL_PARTIAL_PAGE = BIT(26),
+ ACC_CONTROL_RD_ERASED = BIT(27),
+ ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
+ ACC_CONTROL_WR_ECC = BIT(30),
+ ACC_CONTROL_RD_ECC = BIT(31),
+};
+
+#define ACC_CONTROL_ECC_SHIFT 16
+/* Only for v7.2 */
+#define ACC_CONTROL_ECC_EXT_SHIFT 13
+
+static int brcmnand_status(struct brcmnand_host *host);
+
static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
{
#if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA)
@@ -737,6 +804,12 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
ctrl->features |= BRCMNAND_HAS_WP;
+ /* v7.2 has different ecc level shift in the acc register */
+ if (ctrl->nand_version == 0x0702)
+ ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT;
+ else
+ ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT;
+
return 0;
}
@@ -814,6 +887,20 @@ static inline u32 edu_readl(struct brcmnand_controller *ctrl,
return brcmnand_readl(ctrl->edu_base + offs);
}
+static inline void brcmnand_read_data_bus(struct brcmnand_controller *ctrl,
+ void __iomem *flash_cache, u32 *buffer, int fc_words)
+{
+ struct brcmnand_soc *soc = ctrl->soc;
+ int i;
+
+ if (soc && soc->read_data_bus) {
+ soc->read_data_bus(soc, flash_cache, buffer, fc_words);
+ } else {
+ for (i = 0; i < fc_words; i++)
+ buffer[i] = brcmnand_read_fc(ctrl, i);
+ }
+}
+
static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
{
@@ -880,11 +967,11 @@ static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
}
-static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
+static inline u32 brcmnand_corr_total(struct brcmnand_controller *ctrl)
{
- if (ctrl->nand_version < 0x0600)
- return 1;
- return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
+ if (ctrl->nand_version < 0x400)
+ return 0;
+ return brcmnand_read_reg(ctrl, BRCMNAND_READ_ERROR_COUNT);
}
static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
@@ -931,30 +1018,6 @@ static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
return 0;
}
-/***********************************************************************
- * NAND ACC CONTROL bitfield
- *
- * Some bits have remained constant throughout hardware revision, while
- * others have shifted around.
- ***********************************************************************/
-
-/* Constant for all versions (where supported) */
-enum {
- /* See BRCMNAND_HAS_CACHE_MODE */
- ACC_CONTROL_CACHE_MODE = BIT(22),
-
- /* See BRCMNAND_HAS_PREFETCH */
- ACC_CONTROL_PREFETCH = BIT(23),
-
- ACC_CONTROL_PAGE_HIT = BIT(24),
- ACC_CONTROL_WR_PREEMPT = BIT(25),
- ACC_CONTROL_PARTIAL_PAGE = BIT(26),
- ACC_CONTROL_RD_ERASED = BIT(27),
- ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
- ACC_CONTROL_WR_ECC = BIT(30),
- ACC_CONTROL_RD_ECC = BIT(31),
-};
-
static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
{
if (ctrl->nand_version == 0x0702)
@@ -967,18 +1030,15 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
return GENMASK(4, 0);
}
-#define NAND_ACC_CONTROL_ECC_SHIFT 16
-#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13
-
static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
{
u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
- mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
+ mask <<= ACC_CONTROL_ECC_SHIFT;
/* v7.2 includes additional ECC levels */
- if (ctrl->nand_version >= 0x0702)
- mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
+ if (ctrl->nand_version == 0x0702)
+ mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT;
return mask;
}
@@ -992,8 +1052,8 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
if (en) {
acc_control |= ecc_flags; /* enable RD/WR ECC */
- acc_control |= host->hwcfg.ecc_level
- << NAND_ACC_CONTROL_ECC_SHIFT;
+ acc_control &= ~brcmnand_ecc_level_mask(ctrl);
+ acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift;
} else {
acc_control &= ~ecc_flags; /* disable RD/WR ECC */
acc_control &= ~brcmnand_ecc_level_mask(ctrl);
@@ -1014,17 +1074,20 @@ static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
return -1;
}
-static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
+static bool brcmnand_get_sector_size_1k(struct brcmnand_host *host)
{
struct brcmnand_controller *ctrl = host->ctrl;
- int shift = brcmnand_sector_1k_shift(ctrl);
+ int sector_size_bit = brcmnand_sector_1k_shift(ctrl);
u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
BRCMNAND_CS_ACC_CONTROL);
+ u32 acc_control;
- if (shift < 0)
- return 0;
+ if (sector_size_bit < 0)
+ return false;
+
+ acc_control = nand_readreg(ctrl, acc_control_offs);
- return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
+ return ((acc_control & BIT(sector_size_bit)) != 0);
}
static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
@@ -1044,6 +1107,43 @@ static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
nand_writereg(ctrl, acc_control_offs, tmp);
}
+static int brcmnand_get_spare_size(struct brcmnand_host *host)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u32 acc = nand_readreg(ctrl, acc_control_offs);
+
+ return (acc & brcmnand_spare_area_mask(ctrl));
+}
+
+static void brcmnand_get_ecc_settings(struct brcmnand_host *host, struct nand_chip *chip)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ bool sector_size_1k = brcmnand_get_sector_size_1k(host);
+ int spare_area_size, ecc_level;
+ u32 acc;
+
+ spare_area_size = brcmnand_get_spare_size(host);
+ acc = nand_readreg(ctrl, acc_control_offs);
+ ecc_level = (acc & brcmnand_ecc_level_mask(ctrl)) >> ctrl->ecc_level_shift;
+ if (sector_size_1k)
+ chip->ecc.strength = ecc_level * 2;
+ else if (spare_area_size == 16 && ecc_level == 15)
+ chip->ecc.strength = 1; /* hamming */
+ else
+ chip->ecc.strength = ecc_level;
+
+ if (chip->ecc.size == 0) {
+ if (sector_size_1k)
+ chip->ecc.size = 1024;
+ else
+ chip->ecc.size = 512;
+ }
+}
+
/***********************************************************************
* CS_NAND_SELECT
***********************************************************************/
@@ -1053,10 +1153,11 @@ enum {
CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
};
-static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
+static int bcmnand_ctrl_poll_status(struct brcmnand_host *host,
u32 mask, u32 expected_val,
unsigned long timeout_ms)
{
+ struct brcmnand_controller *ctrl = host->ctrl;
unsigned long limit;
u32 val;
@@ -1065,6 +1166,9 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
limit = jiffies + msecs_to_jiffies(timeout_ms);
do {
+ if (mask & INTFC_FLASH_STATUS)
+ brcmnand_status(host);
+
val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
if ((val & mask) == expected_val)
return 0;
@@ -1072,8 +1176,19 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
cpu_relax();
} while (time_after(limit, jiffies));
- dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
- expected_val, val & mask);
+ /*
+ * do a final check after time out in case the CPU was busy and the driver
+ * did not get enough time to perform the polling to avoid false alarms
+ */
+ if (mask & INTFC_FLASH_STATUS)
+ brcmnand_status(host);
+
+ val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
+ if ((val & mask) == expected_val)
+ return 0;
+
+ dev_err(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
+ expected_val, val & mask);
return -ETIMEDOUT;
}
@@ -1355,7 +1470,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
int ret;
if (old_wp != wp) {
- dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
+ dev_dbg(ctrl->dev, "WP %s\n", str_on_off(wp));
old_wp = wp;
}
@@ -1363,7 +1478,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
* make sure ctrl/flash ready before and after
* changing state of #WP pin
*/
- ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
+ ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY |
NAND_STATUS_READY,
NAND_CTRL_RDY |
NAND_STATUS_READY, 0);
@@ -1371,9 +1486,10 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
return;
brcmnand_set_wp(ctrl, wp);
- nand_status_op(chip, NULL);
+ /* force controller operation to update internal copy of NAND chip status */
+ brcmnand_status(host);
/* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
- ret = bcmnand_ctrl_poll_status(ctrl,
+ ret = bcmnand_ctrl_poll_status(host,
NAND_CTRL_RDY |
NAND_STATUS_READY |
NAND_STATUS_WP,
@@ -1384,7 +1500,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
if (ret)
dev_err_ratelimited(&host->pdev->dev,
"nand #WP expected %s\n",
- wp ? "on" : "off");
+ str_on_off(wp));
}
}
@@ -1461,19 +1577,33 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
const u8 *oob, int sas, int sector_1k)
{
int tbytes = sas << sector_1k;
- int j;
+ int j, k = 0;
+ u32 last = 0xffffffff;
+ u8 *plast = (u8 *)&last;
/* Adjust OOB values for 1K sector size */
if (sector_1k && (i & 0x01))
tbytes = max(0, tbytes - (int)ctrl->max_oob);
tbytes = min_t(int, tbytes, ctrl->max_oob);
- for (j = 0; j < tbytes; j += 4)
+ /*
+ * tbytes may not be multiple of words. Make sure we don't read out of
+ * the boundary and stop at last word.
+ */
+ for (j = 0; (j + 3) < tbytes; j += 4)
oob_reg_write(ctrl, j,
(oob[j + 0] << 24) |
(oob[j + 1] << 16) |
(oob[j + 2] << 8) |
(oob[j + 3] << 0));
+
+ /* handle the remaining bytes */
+ while (j < tbytes)
+ plast[k++] = oob[j++];
+
+ if (tbytes & 0x3)
+ oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last));
+
return tbytes;
}
@@ -1592,10 +1722,20 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
- BUG_ON(ctrl->cmd_pending != 0);
+ /*
+ * If we came here through _panic_write and there is a pending
+ * command, try to wait for it. If it times out, rather than
+ * hitting BUG_ON, just return so we don't crash while crashing.
+ */
+ if (oops_in_progress) {
+ if (ctrl->cmd_pending &&
+ bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
+ return;
+ } else
+ BUG_ON(ctrl->cmd_pending != 0);
ctrl->cmd_pending = cmd;
- ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+ ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
WARN_ON(ret);
mb(); /* flush previous writes */
@@ -1603,16 +1743,6 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
cmd << brcmnand_cmd_shift(ctrl));
}
-/***********************************************************************
- * NAND MTD API: read/program/erase
- ***********************************************************************/
-
-static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat,
- unsigned int ctrl)
-{
- /* intentionally left blank */
-}
-
static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
@@ -1624,15 +1754,15 @@ static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
if (mtd->oops_panic_write || ctrl->irq < 0) {
/* switch to interrupt polling and PIO mode */
disable_ctrl_irqs(ctrl);
- sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
+ sts = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY,
NAND_CTRL_RDY, 0);
- err = (sts < 0) ? true : false;
+ err = sts < 0;
} else {
unsigned long timeo = msecs_to_jiffies(
NAND_POLL_STATUS_TIMEOUT_MS);
/* wait for completion interrupt */
sts = wait_for_completion_timeout(&ctrl->done, timeo);
- err = (sts <= 0) ? true : false;
+ err = !sts;
}
return err;
@@ -1648,6 +1778,7 @@ static int brcmnand_waitfunc(struct nand_chip *chip)
if (ctrl->cmd_pending)
err = brcmstb_nand_wait_for_completion(chip);
+ ctrl->cmd_pending = 0;
if (err) {
u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
>> brcmnand_cmd_shift(ctrl);
@@ -1656,12 +1787,32 @@ static int brcmnand_waitfunc(struct nand_chip *chip)
"timeout waiting for command %#02x\n", cmd);
dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
+ return -ETIMEDOUT;
}
- ctrl->cmd_pending = 0;
return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
INTFC_FLASH_STATUS;
}
+static int brcmnand_status(struct brcmnand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ brcmnand_set_cmd_addr(mtd, 0);
+ brcmnand_send_cmd(host, CMD_STATUS_READ);
+
+ return brcmnand_waitfunc(chip);
+}
+
+static int brcmnand_reset(struct brcmnand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+
+ brcmnand_send_cmd(host, CMD_FLASH_RESET);
+
+ return brcmnand_waitfunc(chip);
+}
+
enum {
LLOP_RE = BIT(16),
LLOP_WE = BIT(17),
@@ -1711,190 +1862,6 @@ static int brcmnand_low_level_op(struct brcmnand_host *host,
return brcmnand_waitfunc(chip);
}
-static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command,
- int column, int page_addr)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct brcmnand_host *host = nand_get_controller_data(chip);
- struct brcmnand_controller *ctrl = host->ctrl;
- u64 addr = (u64)page_addr << chip->page_shift;
- int native_cmd = 0;
-
- if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
- command == NAND_CMD_RNDOUT)
- addr = (u64)column;
- /* Avoid propagating a negative, don't-care address */
- else if (page_addr < 0)
- addr = 0;
-
- dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
- (unsigned long long)addr);
-
- host->last_cmd = command;
- host->last_byte = 0;
- host->last_addr = addr;
-
- switch (command) {
- case NAND_CMD_RESET:
- native_cmd = CMD_FLASH_RESET;
- break;
- case NAND_CMD_STATUS:
- native_cmd = CMD_STATUS_READ;
- break;
- case NAND_CMD_READID:
- native_cmd = CMD_DEVICE_ID_READ;
- break;
- case NAND_CMD_READOOB:
- native_cmd = CMD_SPARE_AREA_READ;
- break;
- case NAND_CMD_ERASE1:
- native_cmd = CMD_BLOCK_ERASE;
- brcmnand_wp(mtd, 0);
- break;
- case NAND_CMD_PARAM:
- native_cmd = CMD_PARAMETER_READ;
- break;
- case NAND_CMD_SET_FEATURES:
- case NAND_CMD_GET_FEATURES:
- brcmnand_low_level_op(host, LL_OP_CMD, command, false);
- brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
- break;
- case NAND_CMD_RNDOUT:
- native_cmd = CMD_PARAMETER_CHANGE_COL;
- addr &= ~((u64)(FC_BYTES - 1));
- /*
- * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
- * NB: hwcfg.sector_size_1k may not be initialized yet
- */
- if (brcmnand_get_sector_size_1k(host)) {
- host->hwcfg.sector_size_1k =
- brcmnand_get_sector_size_1k(host);
- brcmnand_set_sector_size_1k(host, 0);
- }
- break;
- }
-
- if (!native_cmd)
- return;
-
- brcmnand_set_cmd_addr(mtd, addr);
- brcmnand_send_cmd(host, native_cmd);
- brcmnand_waitfunc(chip);
-
- if (native_cmd == CMD_PARAMETER_READ ||
- native_cmd == CMD_PARAMETER_CHANGE_COL) {
- /* Copy flash cache word-wise */
- u32 *flash_cache = (u32 *)ctrl->flash_cache;
- int i;
-
- brcmnand_soc_data_bus_prepare(ctrl->soc, true);
-
- /*
- * Must cache the FLASH_CACHE now, since changes in
- * SECTOR_SIZE_1K may invalidate it
- */
- for (i = 0; i < FC_WORDS; i++)
- /*
- * Flash cache is big endian for parameter pages, at
- * least on STB SoCs
- */
- flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
-
- brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
-
- /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
- if (host->hwcfg.sector_size_1k)
- brcmnand_set_sector_size_1k(host,
- host->hwcfg.sector_size_1k);
- }
-
- /* Re-enable protection is necessary only after erase */
- if (command == NAND_CMD_ERASE1)
- brcmnand_wp(mtd, 1);
-}
-
-static uint8_t brcmnand_read_byte(struct nand_chip *chip)
-{
- struct brcmnand_host *host = nand_get_controller_data(chip);
- struct brcmnand_controller *ctrl = host->ctrl;
- uint8_t ret = 0;
- int addr, offs;
-
- switch (host->last_cmd) {
- case NAND_CMD_READID:
- if (host->last_byte < 4)
- ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
- (24 - (host->last_byte << 3));
- else if (host->last_byte < 8)
- ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
- (56 - (host->last_byte << 3));
- break;
-
- case NAND_CMD_READOOB:
- ret = oob_reg_read(ctrl, host->last_byte);
- break;
-
- case NAND_CMD_STATUS:
- ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
- INTFC_FLASH_STATUS;
- if (wp_on) /* hide WP status */
- ret |= NAND_STATUS_WP;
- break;
-
- case NAND_CMD_PARAM:
- case NAND_CMD_RNDOUT:
- addr = host->last_addr + host->last_byte;
- offs = addr & (FC_BYTES - 1);
-
- /* At FC_BYTES boundary, switch to next column */
- if (host->last_byte > 0 && offs == 0)
- nand_change_read_column_op(chip, addr, NULL, 0, false);
-
- ret = ctrl->flash_cache[offs];
- break;
- case NAND_CMD_GET_FEATURES:
- if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
- ret = 0;
- } else {
- bool last = host->last_byte ==
- ONFI_SUBFEATURE_PARAM_LEN - 1;
- brcmnand_low_level_op(host, LL_OP_RD, 0, last);
- ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
- }
- }
-
- dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
- host->last_byte++;
-
- return ret;
-}
-
-static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
-{
- int i;
-
- for (i = 0; i < len; i++, buf++)
- *buf = brcmnand_read_byte(chip);
-}
-
-static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
- int len)
-{
- int i;
- struct brcmnand_host *host = nand_get_controller_data(chip);
-
- switch (host->last_cmd) {
- case NAND_CMD_SET_FEATURES:
- for (i = 0; i < len; i++)
- brcmnand_low_level_op(host, LL_OP_WR, buf[i],
- (i + 1) == len);
- break;
- default:
- BUG();
- break;
- }
-}
-
/*
* Kick EDU engine
*/
@@ -1910,8 +1877,8 @@ static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
unsigned int trans = len >> FC_SHIFT;
dma_addr_t pa;
- dev_dbg(ctrl->dev, "EDU %s %p:%p\n", ((edu_cmd == EDU_CMD_READ) ?
- "read" : "write"), buf, oob);
+ dev_dbg(ctrl->dev, "EDU %s %p:%p\n",
+ str_read_write(edu_cmd == EDU_CMD_READ), buf, oob);
pa = dma_map_single(ctrl->dev, buf, len, dir);
if (dma_mapping_error(ctrl->dev, pa)) {
@@ -2107,15 +2074,20 @@ static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
*/
static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
u64 addr, unsigned int trans, u32 *buf,
- u8 *oob, u64 *err_addr)
+ u8 *oob, u64 *err_addr, unsigned int *corr)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
- int i, j, ret = 0;
+ int i, ret = 0;
+ unsigned int prev_corr;
+
+ if (corr)
+ *corr = 0;
brcmnand_clear_ecc_addr(ctrl);
for (i = 0; i < trans; i++, addr += FC_BYTES) {
+ prev_corr = brcmnand_corr_total(ctrl);
brcmnand_set_cmd_addr(mtd, addr);
/* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
brcmnand_send_cmd(host, CMD_PAGE_READ);
@@ -2124,8 +2096,8 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
if (likely(buf)) {
brcmnand_soc_data_bus_prepare(ctrl->soc, false);
- for (j = 0; j < FC_WORDS; j++, buf++)
- *buf = brcmnand_read_fc(ctrl, j);
+ brcmnand_read_data_bus(ctrl, ctrl->nand_fc, buf, FC_WORDS);
+ buf += FC_WORDS;
brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
}
@@ -2140,13 +2112,16 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
if (*err_addr)
ret = -EBADMSG;
- }
+ else {
+ *err_addr = brcmnand_get_correcc_addr(ctrl);
- if (!ret) {
- *err_addr = brcmnand_get_correcc_addr(ctrl);
+ if (*err_addr) {
+ ret = -EUCLEAN;
- if (*err_addr)
- ret = -EUCLEAN;
+ if (corr && (brcmnand_corr_total(ctrl) - prev_corr) > *corr)
+ *corr = brcmnand_corr_total(ctrl) - prev_corr;
+ }
+ }
}
}
@@ -2214,6 +2189,8 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
int err;
bool retry = true;
bool edu_err = false;
+ unsigned int corrected = 0; /* max corrected bits per subpage */
+ unsigned int prev_tot = brcmnand_corr_total(ctrl);
dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
@@ -2241,9 +2218,11 @@ try_dmaread:
memset(oob, 0x99, mtd->oobsize);
err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
- oob, &err_addr);
+ oob, &err_addr, &corrected);
}
+ mtd->ecc_stats.corrected += brcmnand_corr_total(ctrl) - prev_tot;
+
if (mtd_is_eccerr(err)) {
/*
* On controller version and 7.0, 7.1 , DMA read after a
@@ -2273,7 +2252,7 @@ try_dmaread:
return err;
}
- dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
+ dev_err(ctrl->dev, "uncorrectable error at 0x%llx\n",
(unsigned long long)err_addr);
mtd->ecc_stats.failed++;
/* NAND layer expects zero on ECC errors */
@@ -2281,16 +2260,20 @@ try_dmaread:
}
if (mtd_is_bitflip(err)) {
- unsigned int corrected = brcmnand_count_corrected(ctrl);
-
/* in case of EDU correctable error we read again using PIO */
if (edu_err)
err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
- oob, &err_addr);
+ oob, &err_addr, &corrected);
dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
(unsigned long long)err_addr);
- mtd->ecc_stats.corrected += corrected;
+ /*
+ * if flipped bits accumulator is not supported but we detected
+ * a correction, increase stat by 1 to match previous behavior.
+ */
+ if (brcmnand_corr_total(ctrl) == prev_tot)
+ mtd->ecc_stats.corrected++;
+
/* Always exceed the software-imposed threshold */
return max(mtd->bitflip_threshold, corrected);
}
@@ -2302,13 +2285,11 @@ static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct brcmnand_host *host = nand_get_controller_data(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+ u64 addr = (u64)page << chip->page_shift;
- nand_read_page_op(chip, page, 0, NULL, 0);
-
- return brcmnand_read(mtd, chip, host->last_addr,
- mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+ return brcmnand_read(mtd, chip, addr, mtd->writesize >> FC_SHIFT,
+ (u32 *)buf, oob);
}
static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
@@ -2318,12 +2299,11 @@ static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
int ret;
-
- nand_read_page_op(chip, page, 0, NULL, 0);
+ u64 addr = (u64)page << chip->page_shift;
brcmnand_set_ecc_enabled(host, 0);
- ret = brcmnand_read(mtd, chip, host->last_addr,
- mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+ ret = brcmnand_read(mtd, chip, addr, mtd->writesize >> FC_SHIFT,
+ (u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
return ret;
}
@@ -2409,6 +2389,11 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
status = brcmnand_waitfunc(chip);
+ if (status < 0) {
+ ret = status;
+ goto out;
+ }
+
if (status & NAND_STATUS_FAIL) {
dev_info(ctrl->dev, "program failed at %llx\n",
(unsigned long long)addr);
@@ -2425,13 +2410,10 @@ static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ u64 addr = (u64)page << chip->page_shift;
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
- brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
-
- return nand_prog_page_end_op(chip);
+ return brcmnand_write(mtd, chip, addr, (const u32 *)buf, oob);
}
static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
@@ -2440,13 +2422,14 @@ static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ u64 addr = (u64)page << chip->page_shift;
+ int ret = 0;
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
brcmnand_set_ecc_enabled(host, 0);
- brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ ret = brcmnand_write(mtd, chip, addr, (const u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
- return nand_prog_page_end_op(chip);
+ return ret;
}
static int brcmnand_write_oob(struct nand_chip *chip, int page)
@@ -2470,6 +2453,301 @@ static int brcmnand_write_oob_raw(struct nand_chip *chip, int page)
return ret;
}
+static int brcmnand_exec_instr(struct brcmnand_host *host, int i,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr = &op->instrs[i];
+ struct brcmnand_controller *ctrl = host->ctrl;
+ const u8 *out;
+ bool last_op;
+ int ret = 0;
+ u8 *in;
+
+ /*
+ * The controller needs to be aware of the last command in the operation
+ * (WAITRDY excepted).
+ */
+ last_op = ((i == (op->ninstrs - 1)) && (instr->type != NAND_OP_WAITRDY_INSTR)) ||
+ ((i == (op->ninstrs - 2)) && (op->instrs[i + 1].type == NAND_OP_WAITRDY_INSTR));
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ brcmnand_low_level_op(host, LL_OP_CMD, instr->ctx.cmd.opcode, last_op);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ brcmnand_low_level_op(host, LL_OP_ADDR, instr->ctx.addr.addrs[i],
+ last_op && (i == (instr->ctx.addr.naddrs - 1)));
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ in = instr->ctx.data.buf.in;
+ for (i = 0; i < instr->ctx.data.len; i++) {
+ brcmnand_low_level_op(host, LL_OP_RD, 0,
+ last_op && (i == (instr->ctx.data.len - 1)));
+ in[i] = brcmnand_read_reg(host->ctrl, BRCMNAND_LL_RDATA);
+ }
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ out = instr->ctx.data.buf.out;
+ for (i = 0; i < instr->ctx.data.len; i++)
+ brcmnand_low_level_op(host, LL_OP_WR, out[i],
+ last_op && (i == (instr->ctx.data.len - 1)));
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+ break;
+
+ default:
+ dev_err(ctrl->dev, "unsupported instruction type: %d\n",
+ instr->type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int brcmnand_op_is_status(const struct nand_operation *op)
+{
+ if (op->ninstrs == 2 &&
+ op->instrs[0].type == NAND_OP_CMD_INSTR &&
+ op->instrs[0].ctx.cmd.opcode == NAND_CMD_STATUS &&
+ op->instrs[1].type == NAND_OP_DATA_IN_INSTR)
+ return 1;
+
+ return 0;
+}
+
+static int brcmnand_op_is_reset(const struct nand_operation *op)
+{
+ if (op->ninstrs == 2 &&
+ op->instrs[0].type == NAND_OP_CMD_INSTR &&
+ op->instrs[0].ctx.cmd.opcode == NAND_CMD_RESET &&
+ op->instrs[1].type == NAND_OP_WAITRDY_INSTR)
+ return 1;
+
+ return 0;
+}
+
+static int brcmnand_check_instructions(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ return 0;
+}
+
+static int brcmnand_exec_instructions(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = brcmnand_exec_instr(host, i, op);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int brcmnand_check_instructions_legacy(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr;
+ unsigned int i;
+ u8 cmd;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ instr = &op->instrs[i];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ cmd = native_cmd_conv[instr->ctx.cmd.opcode];
+ if (cmd == CMD_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
+ break;
+ case NAND_OP_ADDR_INSTR:
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_WAITRDY_INSTR:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int brcmnand_exec_instructions_legacy(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ const struct nand_op_instr *instr;
+ unsigned int i, j;
+ u8 cmd = CMD_NULL, last_cmd = CMD_NULL;
+ int ret = 0;
+ u64 last_addr;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ instr = &op->instrs[i];
+
+ if (instr->type == NAND_OP_CMD_INSTR) {
+ cmd = native_cmd_conv[instr->ctx.cmd.opcode];
+ if (cmd == CMD_NOT_SUPPORTED) {
+ dev_err(ctrl->dev, "unsupported cmd=%d\n",
+ instr->ctx.cmd.opcode);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ } else if (instr->type == NAND_OP_ADDR_INSTR) {
+ u64 addr = 0;
+
+ if (cmd == CMD_NULL)
+ continue;
+
+ if (instr->ctx.addr.naddrs > 8) {
+ dev_err(ctrl->dev, "unsupported naddrs=%u\n",
+ instr->ctx.addr.naddrs);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ for (j = 0; j < instr->ctx.addr.naddrs; j++)
+ addr |= (instr->ctx.addr.addrs[j]) << (j << 3);
+
+ if (cmd == CMD_BLOCK_ERASE)
+ addr <<= chip->page_shift;
+ else if (cmd == CMD_PARAMETER_CHANGE_COL)
+ addr &= ~((u64)(FC_BYTES - 1));
+
+ brcmnand_set_cmd_addr(mtd, addr);
+ brcmnand_send_cmd(host, cmd);
+ last_addr = addr;
+ last_cmd = cmd;
+ cmd = CMD_NULL;
+ brcmnand_waitfunc(chip);
+
+ if (last_cmd == CMD_PARAMETER_READ ||
+ last_cmd == CMD_PARAMETER_CHANGE_COL) {
+ /* Copy flash cache word-wise */
+ u32 *flash_cache = (u32 *)ctrl->flash_cache;
+
+ brcmnand_soc_data_bus_prepare(ctrl->soc, true);
+
+ /*
+ * Must cache the FLASH_CACHE now, since changes in
+ * SECTOR_SIZE_1K may invalidate it
+ */
+ for (j = 0; j < FC_WORDS; j++)
+ /*
+ * Flash cache is big endian for parameter pages, at
+ * least on STB SoCs
+ */
+ flash_cache[j] = be32_to_cpu(brcmnand_read_fc(ctrl, j));
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
+ }
+ } else if (instr->type == NAND_OP_DATA_IN_INSTR) {
+ u8 *in = instr->ctx.data.buf.in;
+
+ if (last_cmd == CMD_DEVICE_ID_READ) {
+ u32 val;
+
+ if (instr->ctx.data.len > 8) {
+ dev_err(ctrl->dev, "unsupported len=%u\n",
+ instr->ctx.data.len);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ for (j = 0; j < instr->ctx.data.len; j++) {
+ if (j == 0)
+ val = brcmnand_read_reg(ctrl, BRCMNAND_ID);
+ else if (j == 4)
+ val = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT);
+
+ in[j] = (val >> (24 - ((j % 4) << 3))) & 0xff;
+ }
+ } else if (last_cmd == CMD_PARAMETER_READ ||
+ last_cmd == CMD_PARAMETER_CHANGE_COL) {
+ u64 addr;
+ u32 offs;
+
+ for (j = 0; j < instr->ctx.data.len; j++) {
+ addr = last_addr + j;
+ offs = addr & (FC_BYTES - 1);
+
+ if (j > 0 && offs == 0)
+ nand_change_read_column_op(chip, addr, NULL, 0,
+ false);
+
+ in[j] = ctrl->flash_cache[offs];
+ }
+ }
+ } else if (instr->type == NAND_OP_WAITRDY_INSTR) {
+ ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+ if (ret)
+ break;
+ } else {
+ dev_err(ctrl->dev, "unsupported instruction type: %d\n", instr->type);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int brcmnand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *status;
+ int ret = 0;
+
+ if (check_only)
+ return ctrl->check_instr(chip, op);
+
+ if (brcmnand_op_is_status(op)) {
+ status = op->instrs[1].ctx.data.buf.in;
+ ret = brcmnand_status(host);
+ if (ret < 0)
+ return ret;
+
+ *status = ret & 0xFF;
+
+ return 0;
+ } else if (brcmnand_op_is_reset(op)) {
+ ret = brcmnand_reset(host);
+ if (ret < 0)
+ return ret;
+
+ brcmnand_wp(mtd, 1);
+
+ return 0;
+ }
+
+ if (op->deassert_wp)
+ brcmnand_wp(mtd, 0);
+
+ ret = ctrl->exec_instr(chip, op);
+
+ if (op->deassert_wp)
+ brcmnand_wp(mtd, 1);
+
+ return ret;
+}
+
/***********************************************************************
* Per-CS setup (1 NAND device)
***********************************************************************/
@@ -2561,7 +2839,7 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
tmp &= ~brcmnand_ecc_level_mask(ctrl);
tmp &= ~brcmnand_spare_area_mask(ctrl);
if (ctrl->nand_version >= 0x0302) {
- tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
+ tmp |= cfg->ecc_level << ctrl->ecc_level_shift;
tmp |= cfg->spare_area_size;
}
nand_writereg(ctrl, acc_control_offs, tmp);
@@ -2612,31 +2890,52 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
struct nand_chip *chip = &host->chip;
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&chip->base);
+ struct nand_memory_organization *memorg =
+ nanddev_get_memorg(&chip->base);
struct brcmnand_controller *ctrl = host->ctrl;
struct brcmnand_cfg *cfg = &host->hwcfg;
- char msg[128];
+ struct device_node *np = nand_get_flash_node(chip);
u32 offs, tmp, oob_sector;
+ bool use_strap = false;
+ char msg[128];
int ret;
memset(cfg, 0, sizeof(*cfg));
+ use_strap = of_property_read_bool(np, "brcm,nand-ecc-use-strap");
+
+ /*
+ * Either nand-ecc-xxx or brcm,nand-ecc-use-strap can be set. Error out
+ * if both exist.
+ */
+ if (chip->ecc.strength && use_strap) {
+ dev_err(ctrl->dev,
+ "ECC strap and DT ECC configuration properties are mutually exclusive\n");
+ return -EINVAL;
+ }
- ret = of_property_read_u32(nand_get_flash_node(chip),
- "brcm,nand-oob-sector-size",
+ if (use_strap)
+ brcmnand_get_ecc_settings(host, chip);
+
+ ret = of_property_read_u32(np, "brcm,nand-oob-sector-size",
&oob_sector);
if (ret) {
- /* Use detected size */
- cfg->spare_area_size = mtd->oobsize /
- (mtd->writesize >> FC_SHIFT);
+ if (use_strap)
+ cfg->spare_area_size = brcmnand_get_spare_size(host);
+ else
+ /* Use detected size */
+ cfg->spare_area_size = mtd->oobsize /
+ (mtd->writesize >> FC_SHIFT);
} else {
cfg->spare_area_size = oob_sector;
}
if (cfg->spare_area_size > ctrl->max_oob)
cfg->spare_area_size = ctrl->max_oob;
/*
- * Set oobsize to be consistent with controller's spare_area_size, as
- * the rest is inaccessible.
+ * Set mtd and memorg oobsize to be consistent with controller's
+ * spare_area_size, as the rest is inaccessible.
*/
mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
+ memorg->oobsize = mtd->oobsize;
cfg->device_size = mtd->size;
cfg->block_size = mtd->erasesize;
@@ -2777,6 +3076,7 @@ static int brcmnand_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops brcmnand_controller_ops = {
.attach_chip = brcmnand_attach_chip,
+ .exec_op = brcmnand_exec_op,
};
static int brcmnand_init_cs(struct brcmnand_host *host,
@@ -2801,13 +3101,6 @@ static int brcmnand_init_cs(struct brcmnand_host *host,
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
- chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl;
- chip->legacy.cmdfunc = brcmnand_cmdfunc;
- chip->legacy.waitfunc = brcmnand_waitfunc;
- chip->legacy.read_byte = brcmnand_read_byte;
- chip->legacy.read_buf = brcmnand_read_buf;
- chip->legacy.write_buf = brcmnand_write_buf;
-
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.read_page = brcmnand_read_page;
chip->ecc.write_page = brcmnand_write_page;
@@ -2819,6 +3112,7 @@ static int brcmnand_init_cs(struct brcmnand_host *host,
chip->ecc.write_oob = brcmnand_write_oob;
chip->controller = &ctrl->controller;
+ ctrl->controller.controller_wp = 1;
/*
* The bootloader might have configured 16bit mode but
@@ -2925,7 +3219,7 @@ static int brcmnand_resume(struct device *dev)
brcmnand_save_restore_cs_config(host, 1);
/* Reset the chip, required by some chips after power-up */
- nand_reset_op(chip);
+ nand_reset(chip, 0);
}
return 0;
@@ -3059,6 +3353,15 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
if (ret)
goto err;
+ /* Only v5.0+ controllers have low level ops support */
+ if (ctrl->nand_version >= 0x0500) {
+ ctrl->check_instr = brcmnand_check_instructions;
+ ctrl->exec_instr = brcmnand_exec_instructions;
+ } else {
+ ctrl->check_instr = brcmnand_check_instructions_legacy;
+ ctrl->exec_instr = brcmnand_exec_instructions_legacy;
+ }
+
/*
* Most chips have this cache at a fixed offset within 'nand' block.
* Some must specify this region separately.
@@ -3145,6 +3448,10 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
/* Disable XOR addressing */
brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
+ /* Check if the board connects the WP pin */
+ if (of_property_read_bool(dn, "brcm,wp-not-connected"))
+ wp_on = 0;
+
if (ctrl->features & BRCMNAND_HAS_WP) {
/* Permanently disable write protection */
if (wp_on == 2)
@@ -3202,6 +3509,10 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
ret = brcmnand_init_cs(host, NULL);
if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(child);
+ goto err;
+ }
devm_kfree(dev, host);
continue; /* Try all chip-selects */
}
@@ -3251,7 +3562,7 @@ err:
}
EXPORT_SYMBOL_GPL(brcmnand_probe);
-int brcmnand_remove(struct platform_device *pdev)
+void brcmnand_remove(struct platform_device *pdev)
{
struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
struct brcmnand_host *host;
@@ -3268,8 +3579,6 @@ int brcmnand_remove(struct platform_device *pdev)
clk_disable_unprepare(ctrl->clk);
dev_set_drvdata(&pdev->dev, NULL);
-
- return 0;
}
EXPORT_SYMBOL_GPL(brcmnand_remove);
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.h b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
index f1f93d85f50d..9f171252a2ae 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.h
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
@@ -24,6 +24,8 @@ struct brcmnand_soc {
void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en);
void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare,
bool is_param);
+ void (*read_data_bus)(struct brcmnand_soc *soc, void __iomem *flash_cache,
+ u32 *buffer, int fc_words);
const struct brcmnand_io_ops *ops;
};
@@ -88,7 +90,7 @@ static inline void brcmnand_soc_write(struct brcmnand_soc *soc, u32 val,
}
int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc);
-int brcmnand_remove(struct platform_device *pdev);
+void brcmnand_remove(struct platform_device *pdev);
extern const struct dev_pm_ops brcmnand_pm_ops;
diff --git a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
index d32950847a62..089c70fc6edf 100644
--- a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
@@ -103,7 +103,6 @@ static int iproc_nand_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct iproc_nand_soc *priv;
struct brcmnand_soc *soc;
- struct resource *res;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -112,13 +111,11 @@ static int iproc_nand_probe(struct platform_device *pdev)
spin_lock_init(&priv->idm_lock);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-idm");
- priv->idm_base = devm_ioremap_resource(dev, res);
+ priv->idm_base = devm_platform_ioremap_resource_byname(pdev, "iproc-idm");
if (IS_ERR(priv->idm_base))
return PTR_ERR(priv->idm_base);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-ext");
- priv->ext_base = devm_ioremap_resource(dev, res);
+ priv->ext_base = devm_platform_ioremap_resource_byname(pdev, "iproc-ext");
if (IS_ERR(priv->ext_base))
return PTR_ERR(priv->ext_base);
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 034ec564c2ed..5f037753f78c 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -15,8 +15,10 @@
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
-#include <linux/of_device.h>
#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
/*
@@ -197,6 +199,7 @@
/* Common settings. */
#define COMMON_SET 0x1008
+#define OPR_MODE_NVDDR BIT(0)
/* 16 bit device connected to the NAND Flash interface. */
#define COMMON_SET_DEVICE_16BIT BIT(8)
@@ -209,12 +212,20 @@
#define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0)
/* Timings configuration. */
+#define TOGGLE_TIMINGS_0 0x1014
+#define TOGGLE_TIMINGS_1 0x1018
+
#define ASYNC_TOGGLE_TIMINGS 0x101c
#define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24)
#define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16)
#define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8)
#define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0)
+#define SYNC_TIMINGS 0x1020
+#define SYNC_TCKWR GENMASK(21, 16)
+#define SYNC_TWRCK GENMASK(13, 8)
+#define SYNC_TCAD GENMASK(5, 0)
+
#define TIMINGS0 0x1024
#define TIMINGS0_TADL GENMASK(31, 24)
#define TIMINGS0_TCCS GENMASK(23, 16)
@@ -224,6 +235,7 @@
#define TIMINGS1 0x1028
#define TIMINGS1_TRHZ GENMASK(31, 24)
#define TIMINGS1_TWB GENMASK(23, 16)
+#define TIMINGS1_TCWAW GENMASK(15, 8)
#define TIMINGS1_TVDLY GENMASK(7, 0)
#define TIMINGS2 0x102c
@@ -241,14 +253,23 @@
/* Register controlling DQ related timing. */
#define PHY_DQ_TIMING 0x2000
+#define PHY_DQ_TIMING_OE_END GENMASK(2, 0)
+#define PHY_DQ_TIMING_OE_START GENMASK(6, 4)
+#define PHY_DQ_TIMING_TSEL_END GENMASK(11, 8)
+#define PHY_DQ_TIMING_TSEL_START GENMASK(15, 12)
+
/* Register controlling DSQ related timing. */
#define PHY_DQS_TIMING 0x2004
#define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0)
+#define PHY_DQS_TIMING_DQS_SEL_OE_START GENMASK(7, 4)
+#define PHY_DQS_TIMING_DQS_SEL_TSEL_END GENMASK(11, 8)
#define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16)
#define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20)
/* Register controlling the gate and loopback control related timing. */
#define PHY_GATE_LPBK_CTRL 0x2008
+#define PHY_GATE_LPBK_CTRL_GATE_CFG GENMASK(3, 0)
+#define PHY_GATE_LPBK_CTRL_GATE_CFG_CLOSE GENMASK(5, 4)
#define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19)
/* Register holds the control for the master DLL logic. */
@@ -258,6 +279,12 @@
/* Register holds the control for the slave DLL logic. */
#define PHY_DLL_SLAVE_CTRL 0x2010
+/* Register controls the DQS related timing. */
+#define PHY_IE_TIMING 0x2014
+#define PHY_IE_TIMING_DQS_IE_START GENMASK(10, 8)
+#define PHY_IE_TIMING_DQ_IE_START GENMASK(18, 16)
+#define PHY_IE_TIMING_IE_ALWAYS_ON BIT(20)
+
/* This register handles the global control settings for the PHY. */
#define PHY_CTRL 0x2080
#define PHY_CTRL_SDR_DQS BIT(14)
@@ -373,15 +400,41 @@
#define BCH_MAX_NUM_CORR_CAPS 8
#define BCH_MAX_NUM_SECTOR_SIZES 2
+/* NVDDR mode specific parameters and register values based on cadence specs */
+#define NVDDR_PHY_RD_DELAY 29
+#define NVDDR_PHY_RD_DELAY_MAX 31
+#define NVDDR_GATE_CFG_OPT 14
+#define NVDDR_GATE_CFG_STD 7
+#define NVDDR_GATE_CFG_MAX 15
+#define NVDDR_DATA_SEL_OE_START 1
+#define NVDDR_DATA_SEL_OE_START_MAX 7
+#define NVDDR_DATA_SEL_OE_END 6
+#define NVDDR_DATA_SEL_OE_END_MIN 4
+#define NVDDR_DATA_SEL_OE_END_MAX 15
+#define NVDDR_RS_HIGH_WAIT_CNT 7
+#define NVDDR_RS_IDLE_CNT 7
+#define NVDDR_TCWAW_DELAY 250000
+#define NVDDR_TVDLY_DELAY 500000
+#define NVDDR_TOGGLE_TIMINGS_0 0x00000301
+#define NVDDR_TOGGLE_TIMINGS_1 0x0a060102
+#define NVDDR_ASYNC_TOGGLE_TIMINGS 0
+#define NVDDR_PHY_CTRL 0x00004000
+#define NVDDR_PHY_TSEL 0
+#define NVDDR_PHY_DLL_MASTER_CTRL 0x00140004
+#define NVDDR_PHY_DLL_SLAVE_CTRL 0x00003c3c
+
struct cadence_nand_timings {
u32 async_toggle_timings;
+ u32 sync_timings;
u32 timings0;
u32 timings1;
u32 timings2;
u32 dll_phy_ctrl;
u32 phy_ctrl;
+ u32 phy_dq_timing;
u32 phy_dqs_timing;
u32 phy_gate_lpbk_ctrl;
+ u32 phy_ie_timing;
};
/* Command DMA descriptor. */
@@ -469,6 +522,8 @@ struct cdns_nand_ctrl {
struct {
void __iomem *virt;
dma_addr_t dma;
+ dma_addr_t iova_dma;
+ u32 size;
} io;
int irq;
@@ -526,12 +581,7 @@ struct cdns_nand_chip {
/* ECC strength index. */
u8 corr_str_idx;
- u8 cs[];
-};
-
-struct ecc_info {
- int (*calc_ecc_bytes)(int step_size, int strength);
- int max_step_size;
+ u8 cs[] __counted_by(nsels);
};
static inline struct
@@ -1838,11 +1888,11 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
}
if (dir == DMA_FROM_DEVICE) {
- src_dma = cdns_ctrl->io.dma;
+ src_dma = cdns_ctrl->io.iova_dma;
dst_dma = buf_dma;
} else {
src_dma = buf_dma;
- dst_dma = cdns_ctrl->io.dma;
+ dst_dma = cdns_ctrl->io.iova_dma;
}
tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
@@ -1864,12 +1914,12 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
dma_async_issue_pending(cdns_ctrl->dmac);
wait_for_completion(&finished);
- dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+ dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
return 0;
err_unmap:
- dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+ dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
err:
dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
@@ -1894,7 +1944,7 @@ static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
int len_in_words = (data_dma_width == 4) ? len >> 2 : len >> 3;
- /* read alingment data */
+ /* read alignment data */
if (data_dma_width == 4)
ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words);
#ifdef CONFIG_64BIT
@@ -2346,11 +2396,9 @@ static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
}
-static int
-cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
- const struct nand_interface_config *conf)
+static int cadence_nand_setup_sdr_interface(struct nand_chip *chip,
+ const struct nand_sdr_timings *sdr)
{
- const struct nand_sdr_timings *sdr;
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
struct cadence_nand_timings *t = &cdns_chip->timings;
@@ -2371,13 +2419,8 @@ cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
u32 sampling_point;
- sdr = nand_get_sdr_timings(conf);
- if (IS_ERR(sdr))
- return PTR_ERR(sdr);
-
memset(t, 0, sizeof(*t));
/* Sampling point calculation. */
-
if (cdns_ctrl->caps2.is_phy_type_dll)
phony_dqs_mod = 2;
else
@@ -2634,10 +2677,221 @@ cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
PHY_DLL_MASTER_CTRL_BYPASS_MODE);
dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
}
+ return 0;
+}
+static int
+cadence_nand_setup_nvddr_interface(struct nand_chip *chip,
+ const struct nand_nvddr_timings *nvddr)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct cadence_nand_timings *t = &cdns_chip->timings;
+ u32 board_delay = cdns_ctrl->board_delay;
+ u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
+ cdns_ctrl->nf_clk_rate);
+ u32 ddr_clk_ctrl_period = clk_period * 2;
+ u32 if_skew = cdns_ctrl->caps1->if_skew;
+ u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
+ u32 twrck_cnt, tcad_cnt, tckwr_cnt = 0;
+ u32 tfeat_cnt, trhz_cnt, tvdly_cnt, tcwaw_cnt;
+ u32 trhw_cnt, twb_cnt, twhr_cnt;
+ u32 oe_start, oe_end, oe_end_dqsd;
+ u32 rd_del_sel = 0;
+ u32 dqs_driven_by_device, dqs_toogle_by_device, gate_open_delay;
+ u32 dll_phy_gate_open_delay, gate_close_delay, ie_start;
+ u32 dll_phy_rd_delay;
+ u32 reg;
+
+ memset(t, 0, sizeof(*t));
+ twrck_cnt = calc_cycl(nvddr->tWRCK_min, ddr_clk_ctrl_period);
+ tcad_cnt = calc_cycl(nvddr->tCAD_min, ddr_clk_ctrl_period);
+
+ reg = FIELD_PREP(SYNC_TWRCK, twrck_cnt);
+ reg |= FIELD_PREP(SYNC_TCAD, tcad_cnt);
+ t->sync_timings = reg;
+ dev_dbg(cdns_ctrl->dev, "SYNC_TIMINGS_NVDDR\t%08x\n", reg);
+
+ tadl_cnt = calc_cycl((nvddr->tADL_min + if_skew), ddr_clk_ctrl_period);
+ tccs_cnt = calc_cycl((nvddr->tCCS_min + if_skew), ddr_clk_ctrl_period);
+ twhr_cnt = calc_cycl((nvddr->tWHR_min + if_skew), ddr_clk_ctrl_period);
+ trhw_cnt = calc_cycl((nvddr->tRHW_min + if_skew), ddr_clk_ctrl_period);
+ reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
+ reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
+ reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
+ reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
+ t->timings0 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS0_NVDDR\t%08x\n", reg);
+
+ twb_cnt = calc_cycl((nvddr->tWB_max + board_delay),
+ ddr_clk_ctrl_period);
+ /*
+ * Because of the two stage syncflop the value must be increased by 3
+ * first value is related with sync, second value is related
+ * with output if delay.
+ */
+ twb_cnt = twb_cnt + 3 + 5;
+ tvdly_cnt = calc_cycl(NVDDR_TVDLY_DELAY + if_skew, ddr_clk_ctrl_period);
+ tcwaw_cnt = calc_cycl(NVDDR_TCWAW_DELAY, ddr_clk_ctrl_period);
+ trhz_cnt = 1;
+ reg = FIELD_PREP(TIMINGS1_TWB, twb_cnt);
+ reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
+ reg |= FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
+ reg |= FIELD_PREP(TIMINGS1_TCWAW, tcwaw_cnt);
+ t->timings1 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS1_NVDDR\t%08x\n", reg);
+
+ tfeat_cnt = calc_cycl(nvddr->tFEAT_max, ddr_clk_ctrl_period);
+ if (tfeat_cnt < twb_cnt)
+ tfeat_cnt = twb_cnt;
+
+ tceh_cnt = calc_cycl(nvddr->tCEH_min, ddr_clk_ctrl_period);
+ tcs_cnt = calc_cycl((nvddr->tCS_min + if_skew), ddr_clk_ctrl_period);
+ reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
+ reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
+ reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
+ t->timings2 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS2_NVDDR\t%08x\n", reg);
+
+ reg = FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, NVDDR_RS_HIGH_WAIT_CNT);
+ reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, NVDDR_RS_IDLE_CNT);
+ t->dll_phy_ctrl = reg;
+ dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_NVDDR\t%08x\n", reg);
+
+ reg = PHY_CTRL_SDR_DQS;
+ t->phy_ctrl = reg;
+ dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_NVDDR\t%08x\n", reg);
+
+ dqs_driven_by_device = (nvddr->tDQSD_max + board_delay) / 1000 +
+ if_skew;
+ dqs_toogle_by_device = (nvddr->tDQSCK_max + board_delay) / 1000 -
+ if_skew;
+ gate_open_delay = dqs_toogle_by_device / (clk_period / 1000);
+ if (dqs_toogle_by_device > clk_period / 1000) {
+ if (gate_open_delay > NVDDR_GATE_CFG_OPT)
+ dll_phy_gate_open_delay = NVDDR_GATE_CFG_MAX;
+ else
+ dll_phy_gate_open_delay = gate_open_delay + 1;
+ gate_close_delay = 0;
+ } else {
+ twrck_cnt = calc_cycl(dqs_driven_by_device * 1000, clk_period);
+ dll_phy_gate_open_delay = 1;
+ gate_close_delay = 0;
+
+ reg = FIELD_PREP(SYNC_TCKWR, tckwr_cnt);
+ reg |= FIELD_PREP(SYNC_TWRCK, twrck_cnt);
+ reg |= FIELD_PREP(SYNC_TCAD, tcad_cnt);
+ t->sync_timings = reg;
+ dev_dbg(cdns_ctrl->dev, "SYNC_TIMINGS_NVDDR\t%08x\n", reg);
+ }
+
+ if (dll_phy_gate_open_delay > NVDDR_GATE_CFG_STD)
+ ie_start = NVDDR_GATE_CFG_STD;
+ else
+ ie_start = dll_phy_gate_open_delay;
+
+ dll_phy_rd_delay = ((nvddr->tDQSCK_max + board_delay) +
+ (clk_period / 2)) / clk_period;
+ if (dll_phy_rd_delay <= NVDDR_PHY_RD_DELAY)
+ rd_del_sel = dll_phy_rd_delay + 2;
+ else
+ rd_del_sel = NVDDR_PHY_RD_DELAY_MAX;
+
+ reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_GATE_CFG, dll_phy_gate_open_delay);
+ reg |= FIELD_PREP(PHY_GATE_LPBK_CTRL_GATE_CFG_CLOSE, gate_close_delay);
+ reg |= FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
+ t->phy_gate_lpbk_ctrl = reg;
+ dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_NVDDR\t%08x\n", reg);
+
+ oe_end_dqsd = ((nvddr->tDQSD_max / 1000) / ((clk_period / 2) / 1000))
+ + NVDDR_DATA_SEL_OE_END_MIN;
+ oe_end = (NVDDR_DATA_SEL_OE_END_MIN + oe_end_dqsd) / 2;
+ if (oe_end > NVDDR_DATA_SEL_OE_END_MAX)
+ oe_end = NVDDR_DATA_SEL_OE_END_MAX;
+
+ oe_start = ((nvddr->tDQSHZ_max / 1000) / ((clk_period / 2) / 1000)) + 1;
+ if (oe_start > NVDDR_DATA_SEL_OE_START_MAX)
+ oe_start = NVDDR_DATA_SEL_OE_START_MAX;
+
+ reg = FIELD_PREP(PHY_DQ_TIMING_OE_END, NVDDR_DATA_SEL_OE_END);
+ reg |= FIELD_PREP(PHY_DQ_TIMING_OE_START, NVDDR_DATA_SEL_OE_START);
+ reg |= FIELD_PREP(PHY_DQ_TIMING_TSEL_END, NVDDR_DATA_SEL_OE_END);
+ reg |= FIELD_PREP(PHY_DQ_TIMING_TSEL_START, NVDDR_DATA_SEL_OE_START);
+ t->phy_dq_timing = reg;
+ dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_NVDDR\t%08x\n", reg);
+
+ reg = FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, oe_end);
+ reg |= FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_START, oe_start);
+ reg |= FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_TSEL_END, oe_end);
+ t->phy_dqs_timing = reg;
+ dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_NVDDR\t%08x\n", reg);
+
+ reg = FIELD_PREP(PHY_IE_TIMING_DQS_IE_START, ie_start);
+ reg |= FIELD_PREP(PHY_IE_TIMING_DQ_IE_START, ie_start);
+ reg |= FIELD_PREP(PHY_IE_TIMING_IE_ALWAYS_ON, 0);
+ t->phy_ie_timing = reg;
+ dev_dbg(cdns_ctrl->dev, "PHY_IE_TIMING_REG_NVDDR\t%08x\n", reg);
+
+ reg = readl_relaxed(cdns_ctrl->reg + DLL_PHY_CTRL);
+ reg &= ~(DLL_PHY_CTRL_DLL_RST_N |
+ DLL_PHY_CTRL_EXTENDED_RD_MODE |
+ DLL_PHY_CTRL_EXTENDED_WR_MODE);
+ writel_relaxed(reg, cdns_ctrl->reg + DLL_PHY_CTRL);
+ writel_relaxed(OPR_MODE_NVDDR, cdns_ctrl->reg + COMMON_SET);
+ writel_relaxed(NVDDR_TOGGLE_TIMINGS_0,
+ cdns_ctrl->reg + TOGGLE_TIMINGS_0);
+ writel_relaxed(NVDDR_TOGGLE_TIMINGS_1,
+ cdns_ctrl->reg + TOGGLE_TIMINGS_1);
+ writel_relaxed(NVDDR_ASYNC_TOGGLE_TIMINGS,
+ cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
+ writel_relaxed(t->sync_timings, cdns_ctrl->reg + SYNC_TIMINGS);
+ writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
+ writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
+ writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
+ writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
+ writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
+ writel_relaxed(NVDDR_PHY_TSEL, cdns_ctrl->reg + PHY_TSEL);
+ writel_relaxed(t->phy_dq_timing, cdns_ctrl->reg + PHY_DQ_TIMING);
+ writel_relaxed(t->phy_dqs_timing, cdns_ctrl->reg + PHY_DQS_TIMING);
+ writel_relaxed(t->phy_gate_lpbk_ctrl,
+ cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
+ writel_relaxed(NVDDR_PHY_DLL_MASTER_CTRL,
+ cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
+ writel_relaxed(NVDDR_PHY_DLL_SLAVE_CTRL,
+ cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
+ writel_relaxed(t->phy_ie_timing, cdns_ctrl->reg + PHY_IE_TIMING);
+ writel_relaxed((reg | DLL_PHY_CTRL_DLL_RST_N),
+ cdns_ctrl->reg + DLL_PHY_CTRL);
return 0;
}
+static int
+cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ int ret = 0;
+
+ if (chipnr < 0)
+ return ret;
+
+ if (nand_interface_is_sdr(conf)) {
+ const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf);
+
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ ret = cadence_nand_setup_sdr_interface(chip, sdr);
+ } else {
+ const struct nand_nvddr_timings *nvddr = nand_get_nvddr_timings(conf);
+
+ if (IS_ERR(nvddr))
+ return PTR_ERR(nvddr);
+
+ ret = cadence_nand_setup_nvddr_interface(chip, nvddr);
+ }
+ return ret;
+}
+
static int cadence_nand_attach_chip(struct nand_chip *chip)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
@@ -2839,7 +3093,6 @@ static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
{
struct device_node *np = cdns_ctrl->dev->of_node;
- struct device_node *nand_np;
int max_cs = cdns_ctrl->caps2.max_banks;
int nchips, ret;
@@ -2852,10 +3105,9 @@ static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
return -EINVAL;
}
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
if (ret) {
- of_node_put(nand_np);
cadence_nand_chips_cleanup(cdns_ctrl);
return ret;
}
@@ -2874,6 +3126,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
{
dma_cap_mask_t mask;
+ struct dma_device *dma_dev;
int ret;
cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
@@ -2909,15 +3162,25 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
dma_cap_set(DMA_MEMCPY, mask);
if (cdns_ctrl->caps1->has_dma) {
- cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
- if (!cdns_ctrl->dmac) {
- dev_err(cdns_ctrl->dev,
- "Unable to get a DMA channel\n");
- ret = -EBUSY;
+ cdns_ctrl->dmac = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(cdns_ctrl->dmac)) {
+ ret = dev_err_probe(cdns_ctrl->dev, PTR_ERR(cdns_ctrl->dmac),
+ "%d: Failed to get a DMA channel\n", ret);
goto disable_irq;
}
}
+ dma_dev = cdns_ctrl->dmac->device;
+ cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma,
+ cdns_ctrl->io.size,
+ DMA_BIDIRECTIONAL, 0);
+
+ ret = dma_mapping_error(dma_dev->dev, cdns_ctrl->io.iova_dma);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "Failed to map I/O resource to DMA\n");
+ goto dma_release_chnl;
+ }
+
nand_controller_init(&cdns_ctrl->controller);
INIT_LIST_HEAD(&cdns_ctrl->chips);
@@ -2928,18 +3191,22 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
if (ret) {
dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
ret);
- goto dma_release_chnl;
+ goto unmap_dma_resource;
}
kfree(cdns_ctrl->buf);
cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
if (!cdns_ctrl->buf) {
ret = -ENOMEM;
- goto dma_release_chnl;
+ goto unmap_dma_resource;
}
return 0;
+unmap_dma_resource:
+ dma_unmap_resource(dma_dev->dev, cdns_ctrl->io.iova_dma,
+ cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0);
+
dma_release_chnl:
if (cdns_ctrl->dmac)
dma_release_channel(cdns_ctrl->dmac);
@@ -2961,6 +3228,10 @@ free_buf_desc:
static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
{
cadence_nand_chips_cleanup(cdns_ctrl);
+ if (cdns_ctrl->dmac)
+ dma_unmap_resource(cdns_ctrl->dmac->device->dev,
+ cdns_ctrl->io.iova_dma, cdns_ctrl->io.size,
+ DMA_BIDIRECTIONAL, 0);
cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
kfree(cdns_ctrl->buf);
dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
@@ -2995,15 +3266,11 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev)
struct cadence_nand_dt *dt;
struct cdns_nand_ctrl *cdns_ctrl;
int ret;
- const struct of_device_id *of_id;
const struct cadence_nand_dt_devdata *devdata;
u32 val;
- of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev);
- if (of_id) {
- ofdev->id_entry = of_id->data;
- devdata = of_id->data;
- } else {
+ devdata = device_get_match_data(&ofdev->dev);
+ if (!devdata) {
pr_err("Failed to find the right device id.\n");
return -ENOMEM;
}
@@ -3029,7 +3296,9 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev)
cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
if (IS_ERR(cdns_ctrl->io.virt))
return PTR_ERR(cdns_ctrl->io.virt);
+
cdns_ctrl->io.dma = res->start;
+ cdns_ctrl->io.size = resource_size(res);
dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
if (IS_ERR(dt->clk))
@@ -3064,7 +3333,7 @@ static void cadence_nand_dt_remove(struct platform_device *ofdev)
static struct platform_driver cadence_nand_dt_driver = {
.probe = cadence_nand_dt_probe,
- .remove_new = cadence_nand_dt_remove,
+ .remove = cadence_nand_dt_remove,
.driver = {
.name = "cadence-nand-controller",
.of_match_table = cadence_nand_dt_ids,
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
index f0a15717cf05..ec95d787001b 100644
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -26,7 +26,7 @@
#define NR_CS553X_CONTROLLERS 4
-#define MSR_DIVIL_GLD_CAP 0x51400000 /* DIVIL capabilitiies */
+#define MSR_DIVIL_GLD_CAP 0x51400000 /* DIVIL capabilities */
#define CAP_CS5535 0x2df000ULL
#define CAP_CS5536 0x5df500ULL
@@ -351,20 +351,20 @@ static int __init cs553x_init(void)
return -ENXIO;
/* If it doesn't have the CS553[56], abort */
- rdmsrl(MSR_DIVIL_GLD_CAP, val);
+ rdmsrq(MSR_DIVIL_GLD_CAP, val);
val &= ~0xFFULL;
if (val != CAP_CS5535 && val != CAP_CS5536)
return -ENXIO;
/* If it doesn't have the NAND controller enabled, abort */
- rdmsrl(MSR_DIVIL_BALL_OPTS, val);
+ rdmsrq(MSR_DIVIL_BALL_OPTS, val);
if (val & PIN_OPT_IDE) {
pr_info("CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n");
return -ENXIO;
}
for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
- rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val);
+ rdmsrq(MSR_DIVIL_LBAR_FLSH0 + i, val);
if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND))
err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF);
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 415d6aaa8255..3986553881d0 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -10,19 +10,87 @@
* Dirk Behme <Dirk.Behme@gmail.com>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/iopoll.h>
-#include <linux/mtd/rawnand.h>
+#include <linux/kernel.h>
+#include <linux/memory/ti-aemif.h>
+#include <linux/module.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
-#include <linux/of_device.h>
-#include <linux/of.h>
-#include <linux/platform_data/mtd-davinci.h>
-#include <linux/platform_data/mtd-davinci-aemif.h>
+#define NRCSR_OFFSET 0x00
+#define NANDFCR_OFFSET 0x60
+#define NANDFSR_OFFSET 0x64
+#define NANDF1ECC_OFFSET 0x70
+
+/* 4-bit ECC syndrome registers */
+#define NAND_4BIT_ECC_LOAD_OFFSET 0xbc
+#define NAND_4BIT_ECC1_OFFSET 0xc0
+#define NAND_4BIT_ECC2_OFFSET 0xc4
+#define NAND_4BIT_ECC3_OFFSET 0xc8
+#define NAND_4BIT_ECC4_OFFSET 0xcc
+#define NAND_ERR_ADD1_OFFSET 0xd0
+#define NAND_ERR_ADD2_OFFSET 0xd4
+#define NAND_ERR_ERRVAL1_OFFSET 0xd8
+#define NAND_ERR_ERRVAL2_OFFSET 0xdc
+
+/* NOTE: boards don't need to use these address bits
+ * for ALE/CLE unless they support booting from NAND.
+ * They're used unless platform data overrides them.
+ */
+#define MASK_ALE 0x08
+#define MASK_CLE 0x10
+
+#define MAX_TSU_PS 3000 /* Input setup time in ps */
+#define MAX_TH_PS 1600 /* Input hold time in ps */
+
+struct davinci_nand_pdata {
+ uint32_t mask_ale;
+ uint32_t mask_cle;
+
+ /*
+ * 0-indexed chip-select number of the asynchronous
+ * interface to which the NAND device has been connected.
+ *
+ * So, if you have NAND connected to CS3 of DA850, you
+ * will pass '1' here. Since the asynchronous interface
+ * on DA850 starts from CS2.
+ */
+ uint32_t core_chipsel;
+
+ /* for packages using two chipselects */
+ uint32_t mask_chipsel;
+
+ /* board's default static partition info */
+ struct mtd_partition *parts;
+ unsigned int nr_parts;
+
+ /* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
+ * soft == NAND_ECC_ENGINE_TYPE_SOFT
+ * on-die == NAND_ECC_ENGINE_TYPE_ON_DIE
+ * else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
+ *
+ * All DaVinci-family chips support 1-bit hardware ECC.
+ * Newer ones also support 4-bit ECC, but are awkward
+ * using it with large page chips.
+ */
+ enum nand_ecc_engine_type engine_type;
+ enum nand_ecc_placement ecc_placement;
+ u8 ecc_bits;
+
+ /* e.g. NAND_BUSWIDTH_16 */
+ unsigned int options;
+ /* e.g. NAND_BBT_USE_FLASH */
+ unsigned int bbt_options;
+
+ /* Main and mirror bbt descriptor overrides */
+ struct nand_bbt_descr *bbt_td;
+ struct nand_bbt_descr *bbt_md;
+};
/*
* This is a device driver for the NAND flash controller found on the
@@ -56,7 +124,8 @@ struct davinci_nand_info {
uint32_t core_chipsel;
- struct davinci_aemif_timing *timing;
+ struct clk *clk;
+ struct aemif_device *aemif;
};
static DEFINE_SPINLOCK(davinci_nand_lock);
@@ -419,6 +488,44 @@ static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
.free = hwecc4_ooblayout_small_free,
};
+static int hwecc4_ooblayout_large_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+ int nregions = total_ecc_bytes / 10; /* 10 bytes per chunk */
+
+ if (section >= nregions)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 6;
+ oobregion->length = 10;
+
+ return 0;
+}
+
+static int hwecc4_ooblayout_large_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+ int nregions = total_ecc_bytes / 10; /* 10 bytes per chunk */
+
+ /* First region is used for BBT */
+ if (section >= (nregions - 1))
+ return -ERANGE;
+
+ oobregion->offset = ((section + 1) * 16);
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops hwecc4_large_ooblayout_ops = {
+ .ecc = hwecc4_ooblayout_large_ecc,
+ .free = hwecc4_ooblayout_large_free,
+};
+
#if defined(CONFIG_OF)
static const struct of_device_id davinci_nand_of_match[] = {
{.compatible = "ti,davinci-nand", },
@@ -427,10 +534,10 @@ static const struct of_device_id davinci_nand_of_match[] = {
};
MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
-static struct davinci_nand_pdata
- *nand_davinci_get_pdata(struct platform_device *pdev)
+static struct davinci_nand_pdata *
+nand_davinci_get_pdata(struct platform_device *pdev)
{
- if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
+ if (!dev_get_platdata(&pdev->dev)) {
struct davinci_nand_pdata *pdata;
const char *mode;
u32 prop;
@@ -441,40 +548,44 @@ static struct davinci_nand_pdata
pdev->dev.platform_data = pdata;
if (!pdata)
return ERR_PTR(-ENOMEM);
- if (!of_property_read_u32(pdev->dev.of_node,
- "ti,davinci-chipselect", &prop))
+ if (!device_property_read_u32(&pdev->dev,
+ "ti,davinci-chipselect", &prop))
pdata->core_chipsel = prop;
else
return ERR_PTR(-EINVAL);
- if (!of_property_read_u32(pdev->dev.of_node,
- "ti,davinci-mask-ale", &prop))
+ if (!device_property_read_u32(&pdev->dev,
+ "ti,davinci-mask-ale", &prop))
pdata->mask_ale = prop;
- if (!of_property_read_u32(pdev->dev.of_node,
- "ti,davinci-mask-cle", &prop))
+ if (!device_property_read_u32(&pdev->dev,
+ "ti,davinci-mask-cle", &prop))
pdata->mask_cle = prop;
- if (!of_property_read_u32(pdev->dev.of_node,
- "ti,davinci-mask-chipsel", &prop))
+ if (!device_property_read_u32(&pdev->dev,
+ "ti,davinci-mask-chipsel", &prop))
pdata->mask_chipsel = prop;
- if (!of_property_read_string(pdev->dev.of_node,
- "ti,davinci-ecc-mode", &mode)) {
+ if (!device_property_read_string(&pdev->dev,
+ "ti,davinci-ecc-mode",
+ &mode)) {
if (!strncmp("none", mode, 4))
pdata->engine_type = NAND_ECC_ENGINE_TYPE_NONE;
if (!strncmp("soft", mode, 4))
pdata->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
if (!strncmp("hw", mode, 2))
pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ if (!strncmp("on-die", mode, 6))
+ pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
}
- if (!of_property_read_u32(pdev->dev.of_node,
- "ti,davinci-ecc-bits", &prop))
+ if (!device_property_read_u32(&pdev->dev,
+ "ti,davinci-ecc-bits", &prop))
pdata->ecc_bits = prop;
- if (!of_property_read_u32(pdev->dev.of_node,
- "ti,davinci-nand-buswidth", &prop) && prop == 16)
+ if (!device_property_read_u32(&pdev->dev,
+ "ti,davinci-nand-buswidth",
+ &prop) && prop == 16)
pdata->options |= NAND_BUSWIDTH_16;
- if (of_property_read_bool(pdev->dev.of_node,
- "ti,davinci-nand-use-bbt"))
+ if (device_property_read_bool(&pdev->dev,
+ "ti,davinci-nand-use-bbt"))
pdata->bbt_options = NAND_BBT_USE_FLASH;
/*
@@ -488,17 +599,15 @@ static struct davinci_nand_pdata
* then use "ti,davinci-nand" as the compatible in your
* device-tree file.
*/
- if (of_device_is_compatible(pdev->dev.of_node,
- "ti,keystone-nand")) {
+ if (device_is_compatible(&pdev->dev, "ti,keystone-nand"))
pdata->options |= NAND_NO_SUBPAGE_WRITE;
- }
}
return dev_get_platdata(&pdev->dev);
}
#else
-static struct davinci_nand_pdata
- *nand_davinci_get_pdata(struct platform_device *pdev)
+static struct davinci_nand_pdata *
+nand_davinci_get_pdata(struct platform_device *pdev)
{
return dev_get_platdata(&pdev->dev);
}
@@ -520,6 +629,7 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
pdata->ecc_bits = 0;
break;
case NAND_ECC_ENGINE_TYPE_SOFT:
@@ -578,9 +688,12 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
mtd_set_ooblayout(mtd,
&hwecc4_small_ooblayout_ops);
} else if (chunks == 4 || chunks == 8) {
- mtd_set_ooblayout(mtd,
- nand_get_large_page_ooblayout());
chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+
+ if (chip->options & NAND_IS_BOOT_MEDIUM)
+ mtd_set_ooblayout(mtd, &hwecc4_large_ooblayout_ops);
+ else
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
} else {
return -EIO;
}
@@ -664,7 +777,7 @@ static int davinci_nand_exec_instr(struct davinci_nand_info *info,
case NAND_OP_WAITRDY_INSTR:
timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET,
- status, status & BIT(0), 100,
+ status, status & BIT(0), 5,
timeout_us);
if (ret)
return ret;
@@ -672,8 +785,11 @@ static int davinci_nand_exec_instr(struct davinci_nand_info *info,
break;
}
- if (instr->delay_ns)
+ if (instr->delay_ns) {
+ /* Dummy read to be sure that command is sent before ndelay starts */
+ davinci_nand_readl(info, 0);
ndelay(instr->delay_ns);
+ }
return 0;
}
@@ -701,9 +817,82 @@ static int davinci_nand_exec_op(struct nand_chip *chip,
return 0;
}
+#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP((ps) / 1000, (period_ns)))
+
+static int davinci_nand_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+ const struct nand_sdr_timings *sdr;
+ struct aemif_cs_timings timings;
+ s32 cfg, min, cyc_ns;
+ int ret;
+
+ cyc_ns = 1000000000 / clk_get_rate(info->clk);
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ cfg = TO_CYCLES(sdr->tCLR_min, cyc_ns) - 1;
+ timings.rsetup = cfg > 0 ? cfg : 0;
+
+ cfg = max_t(s32, TO_CYCLES(sdr->tREA_max + MAX_TSU_PS, cyc_ns),
+ TO_CYCLES(sdr->tRP_min, cyc_ns)) - 1;
+ timings.rstrobe = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tCEA_max + MAX_TSU_PS, cyc_ns) - 2;
+ while ((s32)(timings.rsetup + timings.rstrobe) < min)
+ timings.rstrobe++;
+
+ cfg = TO_CYCLES((s32)(MAX_TH_PS - sdr->tCHZ_max), cyc_ns) - 1;
+ timings.rhold = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tRC_min, cyc_ns) - 3;
+ while ((s32)(timings.rsetup + timings.rstrobe + timings.rhold) < min)
+ timings.rhold++;
+
+ cfg = TO_CYCLES((s32)(sdr->tRHZ_max - (timings.rhold + 1) * cyc_ns * 1000), cyc_ns);
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tCHZ_max, cyc_ns)) - 1;
+ timings.ta = cfg > 0 ? cfg : 0;
+
+ cfg = TO_CYCLES(sdr->tWP_min, cyc_ns) - 1;
+ timings.wstrobe = cfg > 0 ? cfg : 0;
+
+ cfg = max_t(s32, TO_CYCLES(sdr->tCLS_min, cyc_ns), TO_CYCLES(sdr->tALS_min, cyc_ns));
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tCS_min, cyc_ns)) - 1;
+ timings.wsetup = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tDS_min, cyc_ns) - 2;
+ while ((s32)(timings.wsetup + timings.wstrobe) < min)
+ timings.wstrobe++;
+
+ cfg = max_t(s32, TO_CYCLES(sdr->tCLH_min, cyc_ns), TO_CYCLES(sdr->tALH_min, cyc_ns));
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tCH_min, cyc_ns));
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tDH_min, cyc_ns)) - 1;
+ timings.whold = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tWC_min, cyc_ns) - 2;
+ while ((s32)(timings.wsetup + timings.wstrobe + timings.whold) < min)
+ timings.whold++;
+
+ dev_dbg(&info->pdev->dev, "RSETUP %x RSTROBE %x RHOLD %x\n",
+ timings.rsetup, timings.rstrobe, timings.rhold);
+ dev_dbg(&info->pdev->dev, "TA %x\n", timings.ta);
+ dev_dbg(&info->pdev->dev, "WSETUP %x WSTROBE %x WHOLD %x\n",
+ timings.wsetup, timings.wstrobe, timings.whold);
+
+ ret = aemif_check_cs_timings(&timings);
+ if (ret || chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return ret;
+
+ return aemif_set_cs_timings(info->aemif, info->core_chipsel, &timings);
+}
+
static const struct nand_controller_ops davinci_nand_controller_ops = {
.attach_chip = davinci_nand_attach_chip,
.exec_op = davinci_nand_exec_op,
+ .setup_interface = davinci_nand_setup_interface,
};
static int nand_davinci_probe(struct platform_device *pdev)
@@ -759,9 +948,14 @@ static int nand_davinci_probe(struct platform_device *pdev)
return -EADDRNOTAVAIL;
}
+ info->clk = devm_clk_get_enabled(&pdev->dev, "aemif");
+ if (IS_ERR(info->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->clk), "failed to get clock");
+
info->pdev = pdev;
info->base = base;
info->vaddr = vaddr;
+ info->aemif = dev_get_drvdata(pdev->dev.parent);
mtd = nand_to_mtd(&info->chip);
mtd->dev.parent = &pdev->dev;
@@ -773,7 +967,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->chip.options = pdata->options;
info->chip.bbt_td = pdata->bbt_td;
info->chip.bbt_md = pdata->bbt_md;
- info->timing = pdata->timing;
info->current_cs = info->vaddr;
info->core_chipsel = pdata->core_chipsel;
@@ -839,7 +1032,7 @@ static void nand_davinci_remove(struct platform_device *pdev)
static struct platform_driver nand_davinci_driver = {
.probe = nand_davinci_probe,
- .remove_new = nand_davinci_remove,
+ .remove = nand_davinci_remove,
.driver = {
.name = "davinci_nand",
.of_match_table = of_match_ptr(davinci_nand_of_match),
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index ac46eb7956ce..5f2fab022fc5 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -328,7 +328,7 @@ struct denali_chip {
struct nand_chip chip;
struct list_head node;
unsigned int nsels;
- struct denali_chip_sel sels[];
+ struct denali_chip_sel sels[] __counted_by(nsels);
};
/**
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 915047e3fbc2..e0dd59bba4bd 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -146,15 +145,15 @@ static int denali_dt_probe(struct platform_device *pdev)
if (IS_ERR(denali->host))
return PTR_ERR(denali->host);
- dt->clk = devm_clk_get(dev, "nand");
+ dt->clk = devm_clk_get_enabled(dev, "nand");
if (IS_ERR(dt->clk))
return PTR_ERR(dt->clk);
- dt->clk_x = devm_clk_get(dev, "nand_x");
+ dt->clk_x = devm_clk_get_enabled(dev, "nand_x");
if (IS_ERR(dt->clk_x))
return PTR_ERR(dt->clk_x);
- dt->clk_ecc = devm_clk_get(dev, "ecc");
+ dt->clk_ecc = devm_clk_get_enabled(dev, "ecc");
if (IS_ERR(dt->clk_ecc))
return PTR_ERR(dt->clk_ecc);
@@ -166,18 +165,6 @@ static int denali_dt_probe(struct platform_device *pdev)
if (IS_ERR(dt->rst_reg))
return PTR_ERR(dt->rst_reg);
- ret = clk_prepare_enable(dt->clk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(dt->clk_x);
- if (ret)
- goto out_disable_clk;
-
- ret = clk_prepare_enable(dt->clk_ecc);
- if (ret)
- goto out_disable_clk_x;
-
denali->clk_rate = clk_get_rate(dt->clk);
denali->clk_x_rate = clk_get_rate(dt->clk_x);
@@ -188,7 +175,7 @@ static int denali_dt_probe(struct platform_device *pdev)
*/
ret = reset_control_deassert(dt->rst_reg);
if (ret)
- goto out_disable_clk_ecc;
+ return ret;
ret = reset_control_deassert(dt->rst);
if (ret)
@@ -223,12 +210,6 @@ out_assert_rst:
reset_control_assert(dt->rst);
out_assert_rst_reg:
reset_control_assert(dt->rst_reg);
-out_disable_clk_ecc:
- clk_disable_unprepare(dt->clk_ecc);
-out_disable_clk_x:
- clk_disable_unprepare(dt->clk_x);
-out_disable_clk:
- clk_disable_unprepare(dt->clk);
return ret;
}
@@ -240,14 +221,11 @@ static void denali_dt_remove(struct platform_device *pdev)
denali_remove(&dt->controller);
reset_control_assert(dt->rst);
reset_control_assert(dt->rst_reg);
- clk_disable_unprepare(dt->clk_ecc);
- clk_disable_unprepare(dt->clk_x);
- clk_disable_unprepare(dt->clk);
}
static struct platform_driver denali_dt_driver = {
.probe = denali_dt_probe,
- .remove_new = denali_dt_remove,
+ .remove = denali_dt_remove,
.driver = {
.name = "denali-nand-dt",
.of_match_table = denali_nand_dt_ids,
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index de7e722d3826..97fa32d73441 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -68,7 +68,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->clk_rate = 50000000; /* 50 MHz */
denali->clk_x_rate = 200000000; /* 200 MHz */
- ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ ret = pcim_request_all_regions(dev, DENALI_NAND_NAME);
if (ret) {
dev_err(&dev->dev, "Spectra: Unable to request memory regions\n");
return ret;
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 5d2ddb037a9a..70d6c2250f32 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
0xe8000, 0xea000, 0xec000, 0xee000,
#endif
#endif
- 0xffffffff };
+};
static struct mtd_info *doclist = NULL;
@@ -1098,7 +1098,7 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti
(i == 0) && (ip->firstUnit > 0)) {
parts[0].name = " DiskOnChip IPL / Media Header partition";
parts[0].offset = 0;
- parts[0].size = mtd->erasesize * ip->firstUnit;
+ parts[0].size = (uint64_t)mtd->erasesize * ip->firstUnit;
numparts = 1;
}
@@ -1491,10 +1491,12 @@ static int __init doc_probe(unsigned long physadr)
else
numchips = doc2001_init(mtd);
- if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) {
- /* DBB note: i believe nand_cleanup is necessary here, as
- buffers may have been allocated in nand_base. Check with
- Thomas. FIX ME! */
+ ret = nand_scan(nand, numchips);
+ if (ret)
+ goto fail;
+
+ ret = doc->late_init(mtd);
+ if (ret) {
nand_cleanup(nand);
goto fail;
}
@@ -1552,7 +1554,7 @@ static int __init init_nanddoc(void)
if (ret < 0)
return ret;
} else {
- for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
+ for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
doc_probe(doc_locations[i]);
}
}
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index 1e3a80f06f33..03dbe37df021 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -869,7 +869,8 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
struct mtd_info *mtd;
if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
- return -ENODEV;
+ return dev_err_probe(&pdev->dev, -EPROBE_DEFER, "lbc_ctrl_dev missing\n");
+
lbc = fsl_lbc_ctrl_dev->regs;
dev = fsl_lbc_ctrl_dev->dev;
@@ -998,7 +999,7 @@ static struct platform_driver fsl_elbc_nand_driver = {
.of_match_table = fsl_elbc_nand_match,
},
.probe = fsl_elbc_nand_probe,
- .remove_new = fsl_elbc_nand_remove,
+ .remove = fsl_elbc_nand_remove,
};
module_platform_driver(fsl_elbc_nand_driver);
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index fa537fee6701..7be95d0be248 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
@@ -20,7 +21,7 @@
#define ERR_BYTE 0xFF /* Value returned for read
bytes when read failed */
-#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
+#define IFC_TIMEOUT_MSECS 1000 /* Maximum timeout to wait
for IFC NAND Machine */
struct fsl_ifc_ctrl;
@@ -1129,7 +1130,7 @@ static struct platform_driver fsl_ifc_nand_driver = {
.of_match_table = fsl_ifc_nand_match,
},
.probe = fsl_ifc_nand_probe,
- .remove_new = fsl_ifc_nand_remove,
+ .remove = fsl_ifc_nand_remove,
};
module_platform_driver(fsl_ifc_nand_driver);
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
index 7366e85c09fd..f4dc990a8da1 100644
--- a/drivers/mtd/nand/raw/fsl_upm.c
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -13,7 +13,8 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/mtd.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <asm/fsl_lbc.h>
@@ -172,8 +173,7 @@ static int fun_probe(struct platform_device *ofdev)
if (!fun)
return -ENOMEM;
- io_res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- fun->io_base = devm_ioremap_resource(&ofdev->dev, io_res);
+ fun->io_base = devm_platform_get_and_ioremap_resource(ofdev, 0, &io_res);
if (IS_ERR(fun->io_base))
return PTR_ERR(fun->io_base);
@@ -259,7 +259,7 @@ static struct platform_driver of_fun_driver = {
.of_match_table = of_fun_match,
},
.probe = fun_probe,
- .remove_new = fun_remove,
+ .remove = fun_remove,
};
module_platform_driver(of_fun_driver);
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 7b4742420dfc..b13b2b0c3f30 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -503,6 +503,8 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_dev = chan->device;
dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
+ if (dma_mapping_error(dma_dev->dev, dma_addr))
+ return -EINVAL;
if (direction == DMA_TO_DEVICE) {
dma_src = dma_addr;
@@ -874,10 +876,14 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
if (!of_property_read_u32(np, "bank-width", &val)) {
if (val == 2) {
nand->options |= NAND_BUSWIDTH_16;
- } else if (val != 1) {
+ } else if (val == 1) {
+ nand->options |= NAND_BUSWIDTH_AUTO;
+ } else {
dev_err(&pdev->dev, "invalid bank-width %u\n", val);
return -EINVAL;
}
+ } else {
+ nand->options |= NAND_BUSWIDTH_AUTO;
}
if (of_property_read_bool(np, "nand-skip-bbtscan"))
@@ -1066,16 +1072,12 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
host->regs_va = base + FSMC_NOR_REG_SIZE +
(host->bank * FSMC_NAND_BANK_SZ);
- host->clk = devm_clk_get(&pdev->dev, NULL);
+ host->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "failed to fetch block clock\n");
return PTR_ERR(host->clk);
}
- ret = clk_prepare_enable(host->clk);
- if (ret)
- return ret;
-
/*
* This device ID is actually a common AMBA ID as used on the
* AMBA PrimeCell bus. However it is not a PrimeCell.
@@ -1111,7 +1113,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
if (!host->read_dma_chan) {
dev_err(&pdev->dev, "Unable to get read dma channel\n");
ret = -ENODEV;
- goto disable_clk;
+ goto disable_fsmc;
}
host->write_dma_chan = dma_request_channel(mask, filter, NULL);
if (!host->write_dma_chan) {
@@ -1155,9 +1157,8 @@ release_dma_write_chan:
release_dma_read_chan:
if (host->mode == USE_DMA_ACCESS)
dma_release_channel(host->read_dma_chan);
-disable_clk:
+disable_fsmc:
fsmc_nand_disable(host);
- clk_disable_unprepare(host->clk);
return ret;
}
@@ -1182,7 +1183,6 @@ static void fsmc_nand_remove(struct platform_device *pdev)
dma_release_channel(host->write_dma_chan);
dma_release_channel(host->read_dma_chan);
}
- clk_disable_unprepare(host->clk);
}
}
@@ -1200,9 +1200,14 @@ static int fsmc_nand_suspend(struct device *dev)
static int fsmc_nand_resume(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
+ int ret;
if (host) {
- clk_prepare_enable(host->clk);
+ ret = clk_prepare_enable(host->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk\n");
+ return ret;
+ }
if (host->dev_timings)
fsmc_nand_setup(host, host->dev_timings);
nand_reset(&host->nand, 0);
@@ -1222,7 +1227,7 @@ static const struct of_device_id fsmc_nand_id_table[] = {
MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
static struct platform_driver fsmc_nand_driver = {
- .remove_new = fsmc_nand_remove,
+ .remove = fsmc_nand_remove,
.driver = {
.name = "fsmc-nand",
.of_match_table = fsmc_nand_id_table,
diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c
index d6cc2cb65214..69e5e43532a4 100644
--- a/drivers/mtd/nand/raw/gpio.c
+++ b/drivers/mtd/nand/raw/gpio.c
@@ -392,7 +392,7 @@ out_ce:
static struct platform_driver gpio_nand_driver = {
.probe = gpio_nand_probe,
- .remove_new = gpio_nand_remove,
+ .remove = gpio_nand_remove,
.driver = {
.name = "gpio-nand",
.of_match_table = of_match_ptr(gpio_nand_id_table),
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 500e7a28d2e4..51f595fbc834 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -13,9 +13,11 @@
#include <linux/module.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/dma/mxs-dma.h>
+#include <linux/string_choices.h>
#include "gpmi-nand.h"
#include "gpmi-regs.h"
#include "bch-regs.h"
@@ -143,6 +145,9 @@ err_clk:
return ret;
}
+#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true)
+#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false)
+
static int gpmi_init(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
@@ -186,7 +191,6 @@ static int gpmi_init(struct gpmi_nand_data *this)
r->gpmi_regs + HW_GPMI_CTRL1_SET);
err_out:
- pm_runtime_mark_last_busy(this->dev);
pm_runtime_put_autosuspend(this->dev);
return ret;
}
@@ -737,9 +741,8 @@ static int bch_set_geometry(struct gpmi_nand_data *this)
if (ret)
return ret;
- ret = pm_runtime_get_sync(this->dev);
+ ret = pm_runtime_resume_and_get(this->dev);
if (ret < 0) {
- pm_runtime_put_autosuspend(this->dev);
return ret;
}
@@ -757,7 +760,6 @@ static int bch_set_geometry(struct gpmi_nand_data *this)
ret = 0;
err_out:
- pm_runtime_mark_last_busy(this->dev);
pm_runtime_put_autosuspend(this->dev);
return ret;
@@ -983,7 +985,7 @@ static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
return PTR_ERR(sdr);
/* Only MX28/MX6 GPMI controller can reach EDO timings */
- if (sdr->tRC_min <= 25000 && !GPMI_IS_MX28(this) && !GPMI_IS_MX6(this))
+ if (sdr->tRC_min <= 25000 && !this->devdata->support_edo_timing)
return -ENOTSUPP;
/* Stop here if this call was just a check */
@@ -1142,6 +1144,7 @@ static const struct gpmi_devdata gpmi_devdata_imx28 = {
.type = IS_MX28,
.bch_max_ecc_strength = 20,
.max_chain_delay = 16000,
+ .support_edo_timing = true,
.clks = gpmi_clks_for_mx2x,
.clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
};
@@ -1154,6 +1157,7 @@ static const struct gpmi_devdata gpmi_devdata_imx6q = {
.type = IS_MX6Q,
.bch_max_ecc_strength = 40,
.max_chain_delay = 12000,
+ .support_edo_timing = true,
.clks = gpmi_clks_for_mx6,
.clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
};
@@ -1162,6 +1166,7 @@ static const struct gpmi_devdata gpmi_devdata_imx6sx = {
.type = IS_MX6SX,
.bch_max_ecc_strength = 62,
.max_chain_delay = 12000,
+ .support_edo_timing = true,
.clks = gpmi_clks_for_mx6,
.clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
};
@@ -1174,10 +1179,24 @@ static const struct gpmi_devdata gpmi_devdata_imx7d = {
.type = IS_MX7D,
.bch_max_ecc_strength = 62,
.max_chain_delay = 12000,
+ .support_edo_timing = true,
.clks = gpmi_clks_for_mx7d,
.clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
};
+static const char *gpmi_clks_for_mx8qxp[GPMI_CLK_MAX] = {
+ "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb",
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx8qxp = {
+ .type = IS_MX8QXP,
+ .bch_max_ecc_strength = 62,
+ .max_chain_delay = 12000,
+ .support_edo_timing = true,
+ .clks = gpmi_clks_for_mx8qxp,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx8qxp),
+};
+
static int acquire_register_block(struct gpmi_nand_data *this,
const char *res_name)
{
@@ -2302,8 +2321,8 @@ static int gpmi_nand_attach_chip(struct nand_chip *chip)
"fsl,no-blockmark-swap"))
this->swap_block_mark = false;
}
- dev_dbg(this->dev, "Blockmark swapping %sabled\n",
- this->swap_block_mark ? "en" : "dis");
+ dev_dbg(this->dev, "Blockmark swapping %s\n",
+ str_enabled_disabled(this->swap_block_mark));
ret = gpmi_init_last(this);
if (ret)
@@ -2646,7 +2665,6 @@ unmap:
this->bch = false;
out_pm:
- pm_runtime_mark_last_busy(this->dev);
pm_runtime_put_autosuspend(this->dev);
return ret;
@@ -2721,6 +2739,7 @@ static const struct of_device_id gpmi_nand_id_table[] = {
{ .compatible = "fsl,imx6q-gpmi-nand", .data = &gpmi_devdata_imx6q, },
{ .compatible = "fsl,imx6sx-gpmi-nand", .data = &gpmi_devdata_imx6sx, },
{ .compatible = "fsl,imx7d-gpmi-nand", .data = &gpmi_devdata_imx7d,},
+ { .compatible = "fsl,imx8qxp-gpmi-nand", .data = &gpmi_devdata_imx8qxp, },
{}
};
MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
@@ -2743,15 +2762,14 @@ static int gpmi_nand_probe(struct platform_device *pdev)
if (ret)
goto exit_acquire_resources;
- ret = __gpmi_enable_clk(this, true);
- if (ret)
- goto exit_acquire_resources;
-
+ pm_runtime_enable(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
+#ifndef CONFIG_PM
+ ret = gpmi_enable_clk(this);
+ if (ret)
+ goto exit_acquire_resources;
+#endif
ret = gpmi_init(this);
if (ret)
@@ -2761,15 +2779,12 @@ static int gpmi_nand_probe(struct platform_device *pdev)
if (ret)
goto exit_nfc_init;
- pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
-
dev_info(this->dev, "driver registered.\n");
return 0;
exit_nfc_init:
- pm_runtime_put(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
release_resources(this);
exit_acquire_resources:
@@ -2783,23 +2798,26 @@ static void gpmi_nand_remove(struct platform_device *pdev)
struct nand_chip *chip = &this->nand;
int ret;
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
gpmi_free_dma_buffer(this);
release_resources(this);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+#ifndef CONFIG_PM
+ gpmi_disable_clk(this);
+#endif
}
-#ifdef CONFIG_PM_SLEEP
static int gpmi_pm_suspend(struct device *dev)
{
- struct gpmi_nand_data *this = dev_get_drvdata(dev);
+ int ret;
- release_dma_channels(this);
- return 0;
+ pinctrl_pm_select_sleep_state(dev);
+ ret = pm_runtime_force_suspend(dev);
+
+ return ret;
}
static int gpmi_pm_resume(struct device *dev)
@@ -2807,9 +2825,13 @@ static int gpmi_pm_resume(struct device *dev)
struct gpmi_nand_data *this = dev_get_drvdata(dev);
int ret;
- ret = acquire_dma_channels(this);
- if (ret < 0)
+ ret = pm_runtime_force_resume(dev);
+ if (ret) {
+ dev_err(this->dev, "Error in resume %d\n", ret);
return ret;
+ }
+
+ pinctrl_pm_select_default_state(dev);
/* re-init the GPMI registers */
ret = gpmi_init(this);
@@ -2831,35 +2853,42 @@ static int gpmi_pm_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
+static int gpmi_runtime_suspend(struct device *dev)
{
struct gpmi_nand_data *this = dev_get_drvdata(dev);
- return __gpmi_enable_clk(this, false);
+ gpmi_disable_clk(this);
+
+ return 0;
}
-static int __maybe_unused gpmi_runtime_resume(struct device *dev)
+static int gpmi_runtime_resume(struct device *dev)
{
struct gpmi_nand_data *this = dev_get_drvdata(dev);
+ int ret;
+
+ ret = gpmi_enable_clk(this);
+ if (ret)
+ return ret;
+
+ return 0;
- return __gpmi_enable_clk(this, true);
}
static const struct dev_pm_ops gpmi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
- SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
+ RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
};
static struct platform_driver gpmi_nand_driver = {
.driver = {
.name = "gpmi-nand",
- .pm = &gpmi_pm_ops,
+ .pm = pm_ptr(&gpmi_pm_ops),
.of_match_table = gpmi_nand_id_table,
},
- .probe = gpmi_nand_probe,
- .remove_new = gpmi_nand_remove,
+ .probe = gpmi_nand_probe,
+ .remove = gpmi_nand_remove,
};
module_platform_driver(gpmi_nand_driver);
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
index c3ff56ac62a7..3e9bc985e44a 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
@@ -78,6 +78,7 @@ enum gpmi_type {
IS_MX6Q,
IS_MX6SX,
IS_MX7D,
+ IS_MX8QXP,
};
struct gpmi_devdata {
@@ -86,6 +87,7 @@ struct gpmi_devdata {
int max_chain_delay; /* See the SDR EDO mode */
const char * const *clks;
const int clks_count;
+ bool support_edo_timing;
};
/**
@@ -172,8 +174,10 @@ struct gpmi_nand_data {
#define GPMI_IS_MX6Q(x) ((x)->devdata->type == IS_MX6Q)
#define GPMI_IS_MX6SX(x) ((x)->devdata->type == IS_MX6SX)
#define GPMI_IS_MX7D(x) ((x)->devdata->type == IS_MX7D)
+#define GPMI_IS_MX8QXP(x) ((x)->devdata->type == IS_MX8QXP)
#define GPMI_IS_MX6(x) (GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x) || \
- GPMI_IS_MX7D(x))
+ GPMI_IS_MX7D(x) || GPMI_IS_MX8QXP(x))
+
#define GPMI_IS_MXS(x) (GPMI_IS_MX23(x) || GPMI_IS_MX28(x))
#endif
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index fe291a2e5c77..d97270ec1185 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -858,7 +858,7 @@ static struct platform_driver hisi_nfc_driver = {
.pm = &hisi_nfc_pm_ops,
},
.probe = hisi_nfc_probe,
- .remove_new = hisi_nfc_remove,
+ .remove = hisi_nfc_remove,
};
module_platform_driver(hisi_nfc_driver);
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index 9054559e52dd..525c34c281b6 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index b9f135297aa0..47dc3efcee92 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -47,7 +46,7 @@ struct ingenic_nfc {
struct nand_controller controller;
unsigned int num_banks;
struct list_head chips;
- struct ingenic_nand_cs cs[];
+ struct ingenic_nand_cs cs[] __counted_by(num_banks);
};
struct ingenic_nand {
@@ -381,18 +380,6 @@ static int ingenic_nand_init_chip(struct platform_device *pdev,
return ret;
}
- /*
- * The rb-gpios semantics was undocumented and qi,lb60 (along with
- * the ingenic driver) got it wrong. The active state encodes the
- * NAND ready state, which is high level. Since there's no signal
- * inverter on this board, it should be active-high. Let's fix that
- * here for older DTs so we can re-use the generic nand_gpio_waitrdy()
- * helper, and be consistent with what other drivers do.
- */
- if (of_machine_is_compatible("qi,lb60") &&
- gpiod_is_active_low(nand->busy_gpio))
- gpiod_toggle_active_low(nand->busy_gpio);
-
nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
if (IS_ERR(nand->wp_gpio)) {
@@ -562,7 +549,7 @@ MODULE_DEVICE_TABLE(of, ingenic_nand_dt_match);
static struct platform_driver ingenic_nand_driver = {
.probe = ingenic_nand_probe,
- .remove_new = ingenic_nand_remove,
+ .remove = ingenic_nand_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = ingenic_nand_dt_match,
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
index a9909eb08124..01cefdaf115d 100644
--- a/drivers/mtd/nand/raw/intel-nand-controller.c
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -22,7 +22,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/units.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define EBU_CLC 0x000
#define EBU_CLC_RST 0x00000000u
@@ -295,7 +295,7 @@ static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir,
unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
dma_addr_t buf_dma;
int ret;
- u32 timeout;
+ unsigned long time_left;
if (dir == DMA_DEV_TO_MEM) {
chan = ebu_host->dma_rx;
@@ -335,8 +335,8 @@ static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir,
dma_async_issue_pending(chan);
/* Wait DMA to finish the data transfer.*/
- timeout = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
- if (!timeout) {
+ time_left = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
+ if (!time_left) {
dev_err(ebu_host->dev, "I/O Error in DMA RX (status %d)\n",
dmaengine_tx_status(chan, cookie, NULL));
dmaengine_terminate_sync(chan);
@@ -619,6 +619,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
ebu_host->cs_num = cs;
resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
+ if (!resname) {
+ ret = -ENOMEM;
+ goto err_of_node_put;
+ }
+
ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
resname);
if (IS_ERR(ebu_host->cs[cs].chipaddr)) {
@@ -626,16 +631,10 @@ static int ebu_nand_probe(struct platform_device *pdev)
goto err_of_node_put;
}
- ebu_host->clk = devm_clk_get(dev, NULL);
+ ebu_host->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(ebu_host->clk)) {
ret = dev_err_probe(dev, PTR_ERR(ebu_host->clk),
- "failed to get clock\n");
- goto err_of_node_put;
- }
-
- ret = clk_prepare_enable(ebu_host->clk);
- if (ret) {
- dev_err(dev, "failed to enable clock: %d\n", ret);
+ "failed to get and enable clock\n");
goto err_of_node_put;
}
@@ -643,7 +642,7 @@ static int ebu_nand_probe(struct platform_device *pdev)
if (IS_ERR(ebu_host->dma_tx)) {
ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
"failed to request DMA tx chan!.\n");
- goto err_disable_unprepare_clk;
+ goto err_of_node_put;
}
ebu_host->dma_rx = dma_request_chan(dev, "rx");
@@ -655,6 +654,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
}
resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
+ if (!resname) {
+ ret = -ENOMEM;
+ goto err_cleanup_dma;
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
if (!res) {
ret = -EINVAL;
@@ -698,8 +702,6 @@ err_clean_nand:
nand_cleanup(&ebu_host->chip);
err_cleanup_dma:
ebu_dma_cleanup(ebu_host);
-err_disable_unprepare_clk:
- clk_disable_unprepare(ebu_host->clk);
err_of_node_put:
of_node_put(chip_np);
@@ -716,7 +718,6 @@ static void ebu_nand_remove(struct platform_device *pdev)
nand_cleanup(&ebu_host->chip);
ebu_nand_disable(&ebu_host->chip);
ebu_dma_cleanup(ebu_host);
- clk_disable_unprepare(ebu_host->clk);
}
static const struct of_device_id ebu_nand_match[] = {
@@ -727,7 +728,7 @@ MODULE_DEVICE_TABLE(of, ebu_nand_match);
static struct platform_driver ebu_nand_driver = {
.probe = ebu_nand_probe,
- .remove_new = ebu_nand_remove,
+ .remove = ebu_nand_remove,
.driver = {
.name = "intel-nand-controller",
.of_match_table = ebu_nand_match,
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
index e9932da18bdd..b7162ced9efa 100644
--- a/drivers/mtd/nand/raw/internals.h
+++ b/drivers/mtd/nand/raw/internals.h
@@ -106,7 +106,6 @@ int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
int oob_required, int page);
int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
int oob_required, int page);
-int nand_exit_status_op(struct nand_chip *chip);
int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
unsigned int len);
void nand_decode_ext_id(struct nand_chip *chip);
diff --git a/drivers/mtd/nand/raw/loongson-nand-controller.c b/drivers/mtd/nand/raw/loongson-nand-controller.c
new file mode 100644
index 000000000000..8490412d5be1
--- /dev/null
+++ b/drivers/mtd/nand/raw/loongson-nand-controller.c
@@ -0,0 +1,1024 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NAND Controller Driver for Loongson family chips
+ *
+ * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com>
+ * Copyright (C) 2025 Binbin Zhou <zhoubinbin@loongson.cn>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+
+/* Loongson NAND Controller Registers */
+#define LOONGSON_NAND_CMD 0x0
+#define LOONGSON_NAND_ADDR1 0x4
+#define LOONGSON_NAND_ADDR2 0x8
+#define LOONGSON_NAND_TIMING 0xc
+#define LOONGSON_NAND_IDL 0x10
+#define LOONGSON_NAND_IDH_STATUS 0x14
+#define LOONGSON_NAND_PARAM 0x18
+#define LOONGSON_NAND_OP_NUM 0x1c
+#define LOONGSON_NAND_CS_RDY_MAP 0x20
+
+/* Bitfields of nand command register */
+#define LOONGSON_NAND_CMD_OP_DONE BIT(10)
+#define LOONGSON_NAND_CMD_OP_SPARE BIT(9)
+#define LOONGSON_NAND_CMD_OP_MAIN BIT(8)
+#define LOONGSON_NAND_CMD_STATUS BIT(7)
+#define LOONGSON_NAND_CMD_RESET BIT(6)
+#define LOONGSON_NAND_CMD_READID BIT(5)
+#define LOONGSON_NAND_CMD_BLOCKS_ERASE BIT(4)
+#define LOONGSON_NAND_CMD_ERASE BIT(3)
+#define LOONGSON_NAND_CMD_WRITE BIT(2)
+#define LOONGSON_NAND_CMD_READ BIT(1)
+#define LOONGSON_NAND_CMD_VALID BIT(0)
+
+/* Bitfields of nand cs/rdy map register */
+#define LOONGSON_NAND_MAP_CS1_SEL GENMASK(11, 8)
+#define LOONGSON_NAND_MAP_RDY1_SEL GENMASK(15, 12)
+#define LOONGSON_NAND_MAP_CS2_SEL GENMASK(19, 16)
+#define LOONGSON_NAND_MAP_RDY2_SEL GENMASK(23, 20)
+#define LOONGSON_NAND_MAP_CS3_SEL GENMASK(27, 24)
+#define LOONGSON_NAND_MAP_RDY3_SEL GENMASK(31, 28)
+
+#define LOONGSON_NAND_CS_SEL0 BIT(0)
+#define LOONGSON_NAND_CS_SEL1 BIT(1)
+#define LOONGSON_NAND_CS_SEL2 BIT(2)
+#define LOONGSON_NAND_CS_SEL3 BIT(3)
+#define LOONGSON_NAND_CS_RDY0 BIT(0)
+#define LOONGSON_NAND_CS_RDY1 BIT(1)
+#define LOONGSON_NAND_CS_RDY2 BIT(2)
+#define LOONGSON_NAND_CS_RDY3 BIT(3)
+
+/* Bitfields of nand timing register */
+#define LOONGSON_NAND_WAIT_CYCLE_MASK GENMASK(7, 0)
+#define LOONGSON_NAND_HOLD_CYCLE_MASK GENMASK(15, 8)
+
+/* Bitfields of nand parameter register */
+#define LOONGSON_NAND_CELL_SIZE_MASK GENMASK(11, 8)
+
+#define LOONGSON_NAND_COL_ADDR_CYC 2U
+#define LOONGSON_NAND_MAX_ADDR_CYC 5U
+
+#define LOONGSON_NAND_READ_ID_SLEEP_US 1000
+#define LOONGSON_NAND_READ_ID_TIMEOUT_US 5000
+
+#define BITS_PER_WORD (4 * BITS_PER_BYTE)
+
+/* Loongson-2K1000 NAND DMA routing register */
+#define LS2K1000_NAND_DMA_MASK GENMASK(2, 0)
+#define LS2K1000_DMA0_CONF 0x0
+#define LS2K1000_DMA1_CONF 0x1
+#define LS2K1000_DMA2_CONF 0x2
+#define LS2K1000_DMA3_CONF 0x3
+#define LS2K1000_DMA4_CONF 0x4
+
+struct loongson_nand_host;
+
+struct loongson_nand_op {
+ char addrs[LOONGSON_NAND_MAX_ADDR_CYC];
+ unsigned int naddrs;
+ unsigned int addrs_offset;
+ unsigned int aligned_offset;
+ unsigned int cmd_reg;
+ unsigned int row_start;
+ unsigned int rdy_timeout_ms;
+ unsigned int orig_len;
+ bool is_readid;
+ bool is_erase;
+ bool is_write;
+ bool is_read;
+ bool is_change_column;
+ size_t len;
+ char *buf;
+};
+
+struct loongson_nand_data {
+ unsigned int max_id_cycle;
+ unsigned int id_cycle_field;
+ unsigned int status_field;
+ unsigned int op_scope_field;
+ unsigned int hold_cycle;
+ unsigned int wait_cycle;
+ unsigned int nand_cs;
+ unsigned int dma_bits;
+ int (*dma_config)(struct device *dev);
+ void (*set_addr)(struct loongson_nand_host *host, struct loongson_nand_op *op);
+};
+
+struct loongson_nand_host {
+ struct device *dev;
+ struct nand_chip chip;
+ struct nand_controller controller;
+ const struct loongson_nand_data *data;
+ unsigned int addr_cs_field;
+ void __iomem *reg_base;
+ struct regmap *regmap;
+ /* DMA Engine stuff */
+ dma_addr_t dma_base;
+ struct dma_chan *dma_chan;
+ dma_cookie_t dma_cookie;
+ struct completion dma_complete;
+};
+
+static const struct regmap_config loongson_nand_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int loongson_nand_op_cmd_mapping(struct nand_chip *chip, struct loongson_nand_op *op,
+ u8 opcode)
+{
+ struct loongson_nand_host *host = nand_get_controller_data(chip);
+
+ op->row_start = chip->page_shift + 1;
+
+ /* The controller abstracts the following NAND operations. */
+ switch (opcode) {
+ case NAND_CMD_STATUS:
+ op->cmd_reg = LOONGSON_NAND_CMD_STATUS;
+ break;
+ case NAND_CMD_RESET:
+ op->cmd_reg = LOONGSON_NAND_CMD_RESET;
+ break;
+ case NAND_CMD_READID:
+ op->is_readid = true;
+ op->cmd_reg = LOONGSON_NAND_CMD_READID;
+ break;
+ case NAND_CMD_ERASE1:
+ op->is_erase = true;
+ op->addrs_offset = LOONGSON_NAND_COL_ADDR_CYC;
+ break;
+ case NAND_CMD_ERASE2:
+ if (!op->is_erase)
+ return -EOPNOTSUPP;
+ /* During erasing, row_start differs from the default value. */
+ op->row_start = chip->page_shift;
+ op->cmd_reg = LOONGSON_NAND_CMD_ERASE;
+ break;
+ case NAND_CMD_SEQIN:
+ op->is_write = true;
+ break;
+ case NAND_CMD_PAGEPROG:
+ if (!op->is_write)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LOONGSON_NAND_CMD_WRITE;
+ break;
+ case NAND_CMD_READ0:
+ op->is_read = true;
+ break;
+ case NAND_CMD_READSTART:
+ if (!op->is_read)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LOONGSON_NAND_CMD_READ;
+ break;
+ case NAND_CMD_RNDOUT:
+ op->is_change_column = true;
+ break;
+ case NAND_CMD_RNDOUTSTART:
+ if (!op->is_change_column)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LOONGSON_NAND_CMD_READ;
+ break;
+ default:
+ dev_dbg(host->dev, "unsupported opcode: %u\n", opcode);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int loongson_nand_parse_instructions(struct nand_chip *chip, const struct nand_subop *subop,
+ struct loongson_nand_op *op)
+{
+ unsigned int op_id;
+ int ret;
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ const struct nand_op_instr *instr = &subop->instrs[op_id];
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ret = loongson_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case NAND_OP_ADDR_INSTR:
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ if (naddrs > LOONGSON_NAND_MAX_ADDR_CYC)
+ return -EOPNOTSUPP;
+ op->naddrs = naddrs;
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+ memcpy(op->addrs + op->addrs_offset, addrs, naddrs);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ op->orig_len = nand_subop_get_data_len(subop, op_id);
+ if (instr->type == NAND_OP_DATA_IN_INSTR)
+ op->buf = instr->ctx.data.buf.in + offset;
+ else if (instr->type == NAND_OP_DATA_OUT_INSTR)
+ op->buf = (void *)instr->ctx.data.buf.out + offset;
+
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void loongson_nand_set_addr_cs(struct loongson_nand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!host->data->nand_cs)
+ return;
+
+ /*
+ * The Manufacturer/Chip ID read operation precedes attach_chip, at which point
+ * information such as NAND chip selection and capacity is unknown. As a
+ * workaround, we use 128MB cellsize (2KB pagesize) as a fallback.
+ */
+ if (!mtd->writesize)
+ host->addr_cs_field = GENMASK(17, 16);
+
+ regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, host->addr_cs_field,
+ host->data->nand_cs << __ffs(host->addr_cs_field));
+}
+
+static void ls1b_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ int i;
+
+ for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) {
+ int shift, mask, val;
+
+ if (i < LOONGSON_NAND_COL_ADDR_CYC) {
+ shift = i * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ mask &= GENMASK(chip->page_shift, 0);
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
+ } else if (!op->is_change_column) {
+ shift = op->row_start + (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
+
+ if (i == 4) {
+ mask = (u32)0xff >> (BITS_PER_WORD - shift);
+ val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift);
+ regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val);
+ }
+ }
+ }
+}
+
+static void ls1c_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op)
+{
+ int i;
+
+ for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) {
+ int shift, mask, val;
+
+ if (i < LOONGSON_NAND_COL_ADDR_CYC) {
+ shift = i * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
+ } else if (!op->is_change_column) {
+ shift = (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val);
+ }
+ }
+
+ loongson_nand_set_addr_cs(host);
+}
+
+static void loongson_nand_trigger_op(struct loongson_nand_host *host, struct loongson_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int col0 = op->addrs[0];
+ short col;
+
+ if (!IS_ALIGNED(col0, chip->buf_align)) {
+ col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align);
+ op->aligned_offset = op->addrs[0] - col0;
+ op->addrs[0] = col0;
+ }
+
+ if (host->data->set_addr)
+ host->data->set_addr(host, op);
+
+ /* set operation length */
+ if (op->is_write || op->is_read || op->is_change_column)
+ op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align);
+ else if (op->is_erase)
+ op->len = 1;
+ else
+ op->len = op->orig_len;
+
+ writel(op->len, host->reg_base + LOONGSON_NAND_OP_NUM);
+
+ /* set operation area and scope */
+ col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0];
+ if (op->orig_len && !op->is_readid) {
+ unsigned int op_scope = 0;
+
+ if (col < mtd->writesize) {
+ op->cmd_reg |= LOONGSON_NAND_CMD_OP_MAIN;
+ op_scope = mtd->writesize;
+ }
+
+ op->cmd_reg |= LOONGSON_NAND_CMD_OP_SPARE;
+ op_scope += mtd->oobsize;
+
+ op_scope <<= __ffs(host->data->op_scope_field);
+ regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM,
+ host->data->op_scope_field, op_scope);
+ }
+
+ /* set command */
+ writel(op->cmd_reg, host->reg_base + LOONGSON_NAND_CMD);
+
+ /* trigger operation */
+ regmap_write_bits(host->regmap, LOONGSON_NAND_CMD, LOONGSON_NAND_CMD_VALID,
+ LOONGSON_NAND_CMD_VALID);
+}
+
+static int loongson_nand_wait_for_op_done(struct loongson_nand_host *host,
+ struct loongson_nand_op *op)
+{
+ unsigned int val;
+ int ret = 0;
+
+ if (op->rdy_timeout_ms) {
+ ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_CMD,
+ val, val & LOONGSON_NAND_CMD_OP_DONE,
+ 0, op->rdy_timeout_ms * MSEC_PER_SEC);
+ if (ret)
+ dev_err(host->dev, "operation failed\n");
+ }
+
+ return ret;
+}
+
+static void loongson_nand_dma_callback(void *data)
+{
+ struct loongson_nand_host *host = (struct loongson_nand_host *)data;
+ struct dma_chan *chan = host->dma_chan;
+ struct device *dev = chan->device->dev;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(chan, host->dma_cookie, NULL);
+ if (likely(status == DMA_COMPLETE)) {
+ dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie);
+ complete(&host->dma_complete);
+ } else {
+ dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie);
+ }
+}
+
+static int loongson_nand_dma_transfer(struct loongson_nand_host *host, struct loongson_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ struct dma_chan *chan = host->dma_chan;
+ struct device *dev = chan->device->dev;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ void *buf = op->buf;
+ char *dma_buf = NULL;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) &&
+ IS_ALIGNED(op->orig_len, chip->buf_align)) {
+ dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "failed to map DMA buffer\n");
+ return -ENXIO;
+ }
+ } else if (!op->is_write) {
+ dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL);
+ if (!dma_buf)
+ return -ENOMEM;
+ } else {
+ dev_err(dev, "subpage writing not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "failed to prepare DMA descriptor\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ desc->callback = loongson_nand_dma_callback;
+ desc->callback_param = host;
+
+ host->dma_cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(host->dma_cookie);
+ if (ret) {
+ dev_err(dev, "failed to submit DMA descriptor\n");
+ goto err;
+ }
+
+ dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie);
+ dma_async_issue_pending(chan);
+
+ if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) {
+ dmaengine_terminate_sync(chan);
+ reinit_completion(&host->dma_complete);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ if (dma_buf)
+ memcpy(buf, dma_buf + op->aligned_offset, op->orig_len);
+err:
+ if (dma_buf)
+ dma_free_coherent(dev, op->len, dma_buf, dma_addr);
+ else
+ dma_unmap_single(dev, dma_addr, op->orig_len, data_dir);
+
+ return ret;
+}
+
+static int loongson_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct loongson_nand_host *host = nand_get_controller_data(chip);
+ struct loongson_nand_op op = {};
+ int ret;
+
+ ret = loongson_nand_parse_instructions(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ loongson_nand_trigger_op(host, &op);
+
+ ret = loongson_nand_dma_transfer(host, &op);
+ if (ret)
+ return ret;
+
+ return loongson_nand_wait_for_op_done(host, &op);
+}
+
+static int loongson_nand_misc_type_exec(struct nand_chip *chip, const struct nand_subop *subop,
+ struct loongson_nand_op *op)
+{
+ struct loongson_nand_host *host = nand_get_controller_data(chip);
+ int ret;
+
+ ret = loongson_nand_parse_instructions(chip, subop, op);
+ if (ret)
+ return ret;
+
+ loongson_nand_trigger_op(host, op);
+
+ return loongson_nand_wait_for_op_done(host, op);
+}
+
+static int loongson_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct loongson_nand_op op = {};
+
+ return loongson_nand_misc_type_exec(chip, subop, &op);
+}
+
+static int loongson_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct loongson_nand_host *host = nand_get_controller_data(chip);
+ struct loongson_nand_op op = {};
+ int i, ret;
+ union {
+ char ids[6];
+ struct {
+ int idl;
+ u16 idh;
+ };
+ } nand_id;
+
+ ret = loongson_nand_misc_type_exec(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_IDL, nand_id.idl, nand_id.idl,
+ LOONGSON_NAND_READ_ID_SLEEP_US,
+ LOONGSON_NAND_READ_ID_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ nand_id.idh = readw(host->reg_base + LOONGSON_NAND_IDH_STATUS);
+
+ for (i = 0; i < min(host->data->max_id_cycle, op.orig_len); i++)
+ op.buf[i] = nand_id.ids[host->data->max_id_cycle - 1 - i];
+
+ return ret;
+}
+
+static int loongson_nand_read_status_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct loongson_nand_host *host = nand_get_controller_data(chip);
+ struct loongson_nand_op op = {};
+ int val, ret;
+
+ ret = loongson_nand_misc_type_exec(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ val = readl(host->reg_base + LOONGSON_NAND_IDH_STATUS);
+ val &= ~host->data->status_field;
+ op.buf[0] = val << ffs(host->data->status_field);
+
+ return ret;
+}
+
+static const struct nand_op_parser loongson_nand_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ loongson_nand_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ loongson_nand_read_status_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ loongson_nand_zerolen_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ loongson_nand_zerolen_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ loongson_nand_data_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
+ NAND_OP_PARSER_PATTERN(
+ loongson_nand_data_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ );
+
+static int loongson_nand_is_valid_cmd(u8 opcode)
+{
+ if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int loongson_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2)
+{
+ if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART)
+ return 0;
+
+ if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART)
+ return 0;
+
+ if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2)
+ return 0;
+
+ if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int loongson_nand_check_op(struct nand_chip *chip, const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr1 = NULL, *instr2 = NULL;
+ int op_id;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ const struct nand_op_instr *instr = &op->instrs[op_id];
+
+ if (instr->type == NAND_OP_CMD_INSTR) {
+ if (!instr1)
+ instr1 = instr;
+ else if (!instr2)
+ instr2 = instr;
+ else
+ break;
+ }
+ }
+
+ if (!instr1)
+ return -EOPNOTSUPP;
+
+ if (!instr2)
+ return loongson_nand_is_valid_cmd(instr1->ctx.cmd.opcode);
+
+ return loongson_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode);
+}
+
+static int loongson_nand_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+ bool check_only)
+{
+ if (check_only)
+ return loongson_nand_check_op(chip, op);
+
+ return nand_op_parser_exec_op(chip, &loongson_nand_op_parser, op, check_only);
+}
+
+static int loongson_nand_get_chip_capacity(struct nand_chip *chip)
+{
+ struct loongson_nand_host *host = nand_get_controller_data(chip);
+ u64 chipsize = nanddev_target_size(&chip->base);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ switch (mtd->writesize) {
+ case SZ_512:
+ switch (chipsize) {
+ case SZ_8M:
+ host->addr_cs_field = GENMASK(15, 14);
+ return 0x9;
+ case SZ_16M:
+ host->addr_cs_field = GENMASK(16, 15);
+ return 0xa;
+ case SZ_32M:
+ host->addr_cs_field = GENMASK(17, 16);
+ return 0xb;
+ case SZ_64M:
+ host->addr_cs_field = GENMASK(18, 17);
+ return 0xc;
+ case SZ_128M:
+ host->addr_cs_field = GENMASK(19, 18);
+ return 0xd;
+ }
+ break;
+ case SZ_2K:
+ switch (chipsize) {
+ case SZ_128M:
+ host->addr_cs_field = GENMASK(17, 16);
+ return 0x0;
+ case SZ_256M:
+ host->addr_cs_field = GENMASK(18, 17);
+ return 0x1;
+ case SZ_512M:
+ host->addr_cs_field = GENMASK(19, 18);
+ return 0x2;
+ case SZ_1G:
+ host->addr_cs_field = GENMASK(20, 19);
+ return 0x3;
+ }
+ break;
+ case SZ_4K:
+ if (chipsize == SZ_2G) {
+ host->addr_cs_field = GENMASK(20, 19);
+ return 0x4;
+ }
+ break;
+ case SZ_8K:
+ switch (chipsize) {
+ case SZ_4G:
+ host->addr_cs_field = GENMASK(20, 19);
+ return 0x5;
+ case SZ_8G:
+ host->addr_cs_field = GENMASK(21, 20);
+ return 0x6;
+ case SZ_16G:
+ host->addr_cs_field = GENMASK(22, 21);
+ return 0x7;
+ }
+ break;
+ }
+
+ dev_err(host->dev, "Unsupported chip size: %llu MB with page size %u B\n",
+ chipsize, mtd->writesize);
+ return -EINVAL;
+}
+
+static int loongson_nand_attach_chip(struct nand_chip *chip)
+{
+ struct loongson_nand_host *host = nand_get_controller_data(chip);
+ int cell_size = loongson_nand_get_chip_capacity(chip);
+
+ if (cell_size < 0)
+ return cell_size;
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ break;
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set cell size */
+ regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, LOONGSON_NAND_CELL_SIZE_MASK,
+ FIELD_PREP(LOONGSON_NAND_CELL_SIZE_MASK, cell_size));
+
+ regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_HOLD_CYCLE_MASK,
+ FIELD_PREP(LOONGSON_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle));
+
+ regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_WAIT_CYCLE_MASK,
+ FIELD_PREP(LOONGSON_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle));
+
+ chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
+ chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
+
+ return 0;
+}
+
+static const struct nand_controller_ops loongson_nand_controller_ops = {
+ .exec_op = loongson_nand_exec_op,
+ .attach_chip = loongson_nand_attach_chip,
+};
+
+static void loongson_nand_controller_cleanup(struct loongson_nand_host *host)
+{
+ if (host->dma_chan)
+ dma_release_channel(host->dma_chan);
+}
+
+static int ls2k1000_nand_apbdma_config(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ void __iomem *regs;
+ int val;
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "dma-config");
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ val = readl(regs);
+ val |= FIELD_PREP(LS2K1000_NAND_DMA_MASK, LS2K1000_DMA0_CONF);
+ writel(val, regs);
+
+ return 0;
+}
+
+static int loongson_nand_controller_init(struct loongson_nand_host *host)
+{
+ struct device *dev = host->dev;
+ struct dma_chan *chan;
+ struct dma_slave_config cfg = {};
+ int ret, val;
+
+ host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &loongson_nand_regmap_config);
+ if (IS_ERR(host->regmap))
+ return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n");
+
+ if (host->data->id_cycle_field)
+ regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, host->data->id_cycle_field,
+ host->data->max_id_cycle << __ffs(host->data->id_cycle_field));
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(host->data->dma_bits));
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to set DMA mask\n");
+
+ val = FIELD_PREP(LOONGSON_NAND_MAP_CS1_SEL, LOONGSON_NAND_CS_SEL1) |
+ FIELD_PREP(LOONGSON_NAND_MAP_RDY1_SEL, LOONGSON_NAND_CS_RDY1) |
+ FIELD_PREP(LOONGSON_NAND_MAP_CS2_SEL, LOONGSON_NAND_CS_SEL2) |
+ FIELD_PREP(LOONGSON_NAND_MAP_RDY2_SEL, LOONGSON_NAND_CS_RDY2) |
+ FIELD_PREP(LOONGSON_NAND_MAP_CS3_SEL, LOONGSON_NAND_CS_SEL3) |
+ FIELD_PREP(LOONGSON_NAND_MAP_RDY3_SEL, LOONGSON_NAND_CS_RDY3);
+
+ regmap_write(host->regmap, LOONGSON_NAND_CS_RDY_MAP, val);
+
+ if (host->data->dma_config) {
+ ret = host->data->dma_config(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to config DMA routing\n");
+ }
+
+ chan = dma_request_chan(dev, "rxtx");
+ if (IS_ERR(chan))
+ return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n");
+ host->dma_chan = chan;
+
+ cfg.src_addr = host->dma_base;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr = host->dma_base;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ ret = dmaengine_slave_config(host->dma_chan, &cfg);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to config DMA channel\n");
+
+ init_completion(&host->dma_complete);
+
+ return 0;
+}
+
+static int loongson_nand_chip_init(struct loongson_nand_host *host)
+{
+ struct device *dev = host->dev;
+ int nchips = of_get_child_count(dev->of_node);
+ struct device_node *chip_np;
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ if (nchips != 1)
+ return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n");
+
+ chip_np = of_get_next_child(dev->of_node, NULL);
+ if (!chip_np)
+ return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n");
+
+ nand_set_flash_node(chip, chip_np);
+ of_node_put(chip_np);
+ if (!mtd->name)
+ return dev_err_probe(dev, -EINVAL, "Missing MTD label\n");
+
+ nand_set_controller_data(chip, host);
+ chip->controller = &host->controller;
+ chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD;
+ chip->buf_align = 16;
+ mtd->dev.parent = dev;
+ mtd->owner = THIS_MODULE;
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to scan NAND chip\n");
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return dev_err_probe(dev, ret, "failed to register MTD device\n");
+ }
+
+ return 0;
+}
+
+static int loongson_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct loongson_nand_data *data;
+ struct loongson_nand_host *host;
+ struct resource *res;
+ int ret;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->reg_base))
+ return PTR_ERR(host->reg_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma");
+ if (!res)
+ return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n");
+
+ host->dma_base = dma_map_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, host->dma_base))
+ return -ENXIO;
+
+ host->dev = dev;
+ host->data = data;
+ host->controller.ops = &loongson_nand_controller_ops;
+
+ nand_controller_init(&host->controller);
+
+ ret = loongson_nand_controller_init(host);
+ if (ret)
+ goto err;
+
+ ret = loongson_nand_chip_init(host);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+err:
+ loongson_nand_controller_cleanup(host);
+
+ return ret;
+}
+
+static void loongson_nand_remove(struct platform_device *pdev)
+{
+ struct loongson_nand_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ loongson_nand_controller_cleanup(host);
+}
+
+static const struct loongson_nand_data ls1b_nand_data = {
+ .max_id_cycle = 5,
+ .status_field = GENMASK(15, 8),
+ .hold_cycle = 0x2,
+ .wait_cycle = 0xc,
+ .dma_bits = 32,
+ .set_addr = ls1b_nand_set_addr,
+};
+
+static const struct loongson_nand_data ls1c_nand_data = {
+ .max_id_cycle = 6,
+ .id_cycle_field = GENMASK(14, 12),
+ .status_field = GENMASK(23, 16),
+ .op_scope_field = GENMASK(29, 16),
+ .hold_cycle = 0x2,
+ .wait_cycle = 0xc,
+ .dma_bits = 32,
+ .set_addr = ls1c_nand_set_addr,
+};
+
+static const struct loongson_nand_data ls2k0500_nand_data = {
+ .max_id_cycle = 6,
+ .id_cycle_field = GENMASK(14, 12),
+ .status_field = GENMASK(23, 16),
+ .op_scope_field = GENMASK(29, 16),
+ .hold_cycle = 0x4,
+ .wait_cycle = 0x12,
+ .dma_bits = 64,
+ .set_addr = ls1c_nand_set_addr,
+};
+
+static const struct loongson_nand_data ls2k1000_nand_data = {
+ .max_id_cycle = 6,
+ .id_cycle_field = GENMASK(14, 12),
+ .status_field = GENMASK(23, 16),
+ .op_scope_field = GENMASK(29, 16),
+ .hold_cycle = 0x4,
+ .wait_cycle = 0x12,
+ .nand_cs = 0x2,
+ .dma_bits = 64,
+ .dma_config = ls2k1000_nand_apbdma_config,
+ .set_addr = ls1c_nand_set_addr,
+};
+
+static const struct of_device_id loongson_nand_match[] = {
+ {
+ .compatible = "loongson,ls1b-nand-controller",
+ .data = &ls1b_nand_data,
+ },
+ {
+ .compatible = "loongson,ls1c-nand-controller",
+ .data = &ls1c_nand_data,
+ },
+ {
+ .compatible = "loongson,ls2k0500-nand-controller",
+ .data = &ls2k0500_nand_data,
+ },
+ {
+ .compatible = "loongson,ls2k1000-nand-controller",
+ .data = &ls2k1000_nand_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, loongson_nand_match);
+
+static struct platform_driver loongson_nand_driver = {
+ .probe = loongson_nand_probe,
+ .remove = loongson_nand_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = loongson_nand_match,
+ },
+};
+
+module_platform_driver(loongson_nand_driver);
+
+MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
+MODULE_AUTHOR("Binbin Zhou <zhoubinbin@loongson.cn>");
+MODULE_DESCRIPTION("Loongson NAND Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index b3136ae6f4e9..19b13ae536d4 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -303,8 +303,9 @@ static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip)
return 0;
}
-static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
+static irqreturn_t lpc3xxx_nand_irq(int irq, void *data)
{
+ struct lpc32xx_nand_host *host = data;
uint8_t sr;
/* Clear interrupt flag by reading status */
@@ -573,18 +574,22 @@ static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
dma_cap_mask_t mask;
- if (!host->pdata || !host->pdata->dma_filter) {
- dev_err(mtd->dev.parent, "no DMA platform data\n");
- return -ENOENT;
- }
+ host->dma_chan = dma_request_chan(mtd->dev.parent, "rx-tx");
+ if (IS_ERR(host->dma_chan)) {
+ /* fallback to request using platform data */
+ if (!host->pdata || !host->pdata->dma_filter) {
+ dev_err(mtd->dev.parent, "no DMA platform data\n");
+ return -ENOENT;
+ }
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
- "nand-mlc");
- if (!host->dma_chan) {
- dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
- return -EBUSY;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter, "nand-mlc");
+
+ if (!host->dma_chan) {
+ dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+ return -EBUSY;
+ }
}
/*
@@ -695,8 +700,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
host->pdev = pdev;
- rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ host->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &rc);
if (IS_ERR(host->io_base))
return PTR_ERR(host->io_base);
@@ -781,7 +785,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
goto release_dma_chan;
}
- if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
+ if (request_irq(host->irq, &lpc3xxx_nand_irq,
IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
res = -ENXIO;
@@ -887,7 +891,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
static struct platform_driver lpc32xx_nand_driver = {
.probe = lpc32xx_nand_probe,
- .remove_new = lpc32xx_nand_remove,
+ .remove = lpc32xx_nand_remove,
.resume = pm_ptr(lpc32xx_nand_resume),
.suspend = pm_ptr(lpc32xx_nand_suspend),
.driver = {
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index 3139b6107660..3ca30e7dce33 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -721,18 +721,22 @@ static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
dma_cap_mask_t mask;
- if (!host->pdata || !host->pdata->dma_filter) {
- dev_err(mtd->dev.parent, "no DMA platform data\n");
- return -ENOENT;
- }
+ host->dma_chan = dma_request_chan(mtd->dev.parent, "rx-tx");
+ if (IS_ERR(host->dma_chan)) {
+ /* fallback to request using platform data */
+ if (!host->pdata || !host->pdata->dma_filter) {
+ dev_err(mtd->dev.parent, "no DMA platform data\n");
+ return -ENOENT;
+ }
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
- "nand-slc");
- if (!host->dma_chan) {
- dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
- return -EBUSY;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter, "nand-slc");
+
+ if (!host->dma_chan) {
+ dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+ return -EBUSY;
+ }
}
return 0;
@@ -836,8 +840,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
if (!host)
return -ENOMEM;
- rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ host->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &rc);
if (IS_ERR(host->io_base))
return PTR_ERR(host->io_base);
@@ -851,7 +854,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
/* Start with WP disabled, if available */
- host->wp_gpio = gpiod_get_optional(&pdev->dev, NULL, GPIOD_OUT_LOW);
+ host->wp_gpio = devm_gpiod_get_optional(&pdev->dev, NULL, GPIOD_OUT_LOW);
res = PTR_ERR_OR_ZERO(host->wp_gpio);
if (res) {
if (res != -EPROBE_DEFER)
@@ -872,15 +875,12 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
/* Get NAND clock */
- host->clk = devm_clk_get(&pdev->dev, NULL);
+ host->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "Clock failure\n");
res = -ENOENT;
goto enable_wp;
}
- res = clk_prepare_enable(host->clk);
- if (res)
- goto enable_wp;
/* Set NAND IO addresses and command/ready functions */
chip->legacy.IO_ADDR_R = SLC_DATA(host->io_base);
@@ -908,13 +908,13 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
GFP_KERNEL);
if (host->data_buf == NULL) {
res = -ENOMEM;
- goto unprepare_clk;
+ goto enable_wp;
}
res = lpc32xx_nand_dma_setup(host);
if (res) {
res = -EIO;
- goto unprepare_clk;
+ goto enable_wp;
}
/* Find NAND device */
@@ -935,8 +935,6 @@ cleanup_nand:
nand_cleanup(chip);
release_dma:
dma_release_channel(host->dma_chan);
-unprepare_clk:
- clk_disable_unprepare(host->clk);
enable_wp:
lpc32xx_wp_enable(host);
@@ -963,7 +961,6 @@ static void lpc32xx_nand_remove(struct platform_device *pdev)
tmp &= ~SLCCFG_CE_LOW;
writel(tmp, SLC_CTRL(host->io_base));
- clk_disable_unprepare(host->clk);
lpc32xx_wp_enable(host);
}
@@ -1013,7 +1010,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
static struct platform_driver lpc32xx_nand_driver = {
.probe = lpc32xx_nand_probe,
- .remove_new = lpc32xx_nand_remove,
+ .remove = lpc32xx_nand_remove,
.resume = pm_ptr(lpc32xx_nand_resume),
.suspend = pm_ptr(lpc32xx_nand_suspend),
.driver = {
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 30c15e4e1cc0..38b7eb5b992c 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -77,13 +77,14 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/mtd/rawnand.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/iopoll.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -347,7 +348,7 @@ struct marvell_nand_chip {
int addr_cyc;
int selected_die;
unsigned int nsels;
- struct marvell_nand_chip_sel sels[];
+ struct marvell_nand_chip_sel sels[] __counted_by(nsels);
};
static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
@@ -375,6 +376,7 @@ static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
* BCH error detection and correction algorithm,
* NDCB3 register has been added
* @use_dma: Use dma for data transfers
+ * @max_mode_number: Maximum timing mode supported by the controller
*/
struct marvell_nfc_caps {
unsigned int max_cs_nb;
@@ -383,6 +385,7 @@ struct marvell_nfc_caps {
bool legacy_of_bindings;
bool is_nfcv2;
bool use_dma;
+ unsigned int max_mode_number;
};
/**
@@ -1162,6 +1165,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
.ndcb[2] = NDCB2_ADDR5_PAGE(page),
};
unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ u8 status;
int ret;
/* NFCv2 needs more information about the operation being executed */
@@ -1195,7 +1199,18 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
ret = marvell_nfc_wait_op(chip,
PSEC_TO_MSEC(sdr->tPROG_max));
- return ret;
+ if (ret)
+ return ret;
+
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
}
static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
@@ -1624,6 +1639,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
int data_len = lt->data_bytes;
int spare_len = lt->spare_bytes;
int chunk, ret;
+ u8 status;
marvell_nfc_select_target(chip, chip->cur_cs);
@@ -1660,6 +1676,14 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
if (ret)
return ret;
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
return 0;
}
@@ -2376,6 +2400,9 @@ static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr,
if (IS_ERR(sdr))
return PTR_ERR(sdr);
+ if (nfc->caps->max_mode_number && nfc->caps->max_mode_number < conf->timings.mode)
+ return -EOPNOTSUPP;
+
/*
* SDR timings are given in pico-seconds while NFC timings must be
* expressed in NAND controller clock cycles, which is half of the
@@ -2747,7 +2774,6 @@ static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
{
struct device_node *np = dev->of_node;
- struct device_node *nand_np;
int max_cs = nfc->caps->max_cs_nb;
int nchips;
int ret;
@@ -2774,20 +2800,15 @@ static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
return ret;
}
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = marvell_nand_chip_init(dev, nfc, nand_np);
if (ret) {
- of_node_put(nand_np);
- goto cleanup_chips;
+ marvell_nand_chips_cleanup(nfc);
+ return ret;
}
}
return 0;
-
-cleanup_chips:
- marvell_nand_chips_cleanup(nfc);
-
- return ret;
}
static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
@@ -3073,6 +3094,13 @@ static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
.is_nfcv2 = true,
};
+static const struct marvell_nfc_caps marvell_ac5_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .is_nfcv2 = true,
+ .max_mode_number = 3,
+};
+
static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
.max_cs_nb = 4,
.max_rb_nb = 2,
@@ -3122,6 +3150,10 @@ static const struct of_device_id marvell_nfc_of_ids[] = {
.data = &marvell_armada_8k_nfc_caps,
},
{
+ .compatible = "marvell,ac5-nand-controller",
+ .data = &marvell_ac5_caps,
+ },
+ {
.compatible = "marvell,armada370-nand-controller",
.data = &marvell_armada370_nfc_caps,
},
@@ -3154,7 +3186,7 @@ static struct platform_driver marvell_nfc_driver = {
},
.id_table = marvell_nfc_platform_ids,
.probe = marvell_nfc_probe,
- .remove_new = marvell_nfc_remove,
+ .remove = marvell_nfc_remove,
};
module_platform_driver(marvell_nfc_driver);
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index b10011dec1e6..b8834aa96e81 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/iopoll.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/sched/task_stack.h>
#define NFC_REG_CMD 0x00
@@ -36,6 +35,7 @@
#define NFC_CMD_RB BIT(20)
#define NFC_CMD_SCRAMBLER_ENABLE BIT(19)
#define NFC_CMD_SCRAMBLER_DISABLE 0
+#define NFC_CMD_SHORTMODE_ENABLE 1
#define NFC_CMD_SHORTMODE_DISABLE 0
#define NFC_CMD_RB_INT BIT(14)
#define NFC_CMD_RB_INT_NO_PIN ((0xb << 10) | BIT(18) | BIT(16))
@@ -64,7 +64,7 @@
#define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \
( \
(cmd_dir) | \
- ((ran) << 19) | \
+ (ran) | \
((bch) << 14) | \
((short_mode) << 13) | \
(((page_size) & 0x7f) << 6) | \
@@ -79,6 +79,8 @@
#define DMA_DIR(dir) ((dir) ? NFC_CMD_N2M : NFC_CMD_M2N)
#define DMA_ADDR_ALIGN 8
+#define NFC_SHORT_MODE_ECC_SZ 384
+
#define ECC_CHECK_RETURN_FF (-1)
#define NAND_CE0 (0xe << 10)
@@ -91,6 +93,8 @@
/* eMMC clock register, misc control */
#define CLK_SELECT_NAND BIT(31)
+#define CLK_ALWAYS_ON_NAND BIT(24)
+#define CLK_SELECT_FIX_PLL2 BIT(6)
#define NFC_CLK_CYCLE 6
@@ -124,17 +128,20 @@ struct meson_nfc_nand_chip {
u32 twb;
u32 tadl;
u32 tbers_max;
+ u32 boot_pages;
+ u32 boot_page_step;
u32 bch_mode;
u8 *data_buf;
__le64 *info_buf;
u32 nsels;
- u8 sels[];
+ u8 sels[] __counted_by(nsels);
};
struct meson_nand_ecc {
u32 bch;
u32 strength;
+ u32 size;
};
struct meson_nfc_data {
@@ -190,7 +197,8 @@ struct meson_nfc {
};
enum {
- NFC_ECC_BCH8_1K = 2,
+ NFC_ECC_BCH8_512 = 1,
+ NFC_ECC_BCH8_1K,
NFC_ECC_BCH24_1K,
NFC_ECC_BCH30_1K,
NFC_ECC_BCH40_1K,
@@ -198,15 +206,16 @@ enum {
NFC_ECC_BCH60_1K,
};
-#define MESON_ECC_DATA(b, s) { .bch = (b), .strength = (s)}
+#define MESON_ECC_DATA(b, s, sz) { .bch = (b), .strength = (s), .size = (sz) }
static struct meson_nand_ecc meson_ecc[] = {
- MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8),
- MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24),
- MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30),
- MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40),
- MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50),
- MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60),
+ MESON_ECC_DATA(NFC_ECC_BCH8_512, 8, 512),
+ MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60, 1024),
};
static int meson_nand_calc_ecc_bytes(int step_size, int strength)
@@ -224,8 +233,27 @@ static int meson_nand_calc_ecc_bytes(int step_size, int strength)
NAND_ECC_CAPS_SINGLE(meson_gxl_ecc_caps,
meson_nand_calc_ecc_bytes, 1024, 8, 24, 30, 40, 50, 60);
-NAND_ECC_CAPS_SINGLE(meson_axg_ecc_caps,
- meson_nand_calc_ecc_bytes, 1024, 8);
+
+static const int axg_stepinfo_strengths[] = { 8 };
+
+static const struct nand_ecc_step_info axg_stepinfo[] = {
+ {
+ .stepsize = 1024,
+ .strengths = axg_stepinfo_strengths,
+ .nstrengths = ARRAY_SIZE(axg_stepinfo_strengths)
+ },
+ {
+ .stepsize = 512,
+ .strengths = axg_stepinfo_strengths,
+ .nstrengths = ARRAY_SIZE(axg_stepinfo_strengths)
+ },
+};
+
+static const struct nand_ecc_caps meson_axg_ecc_caps = {
+ .stepinfos = axg_stepinfo,
+ .nstepinfos = ARRAY_SIZE(axg_stepinfo),
+ .calc_ecc_bytes = meson_nand_calc_ecc_bytes,
+};
static struct meson_nfc_nand_chip *to_meson_nand(struct nand_chip *nand)
{
@@ -275,28 +303,49 @@ static void meson_nfc_cmd_seed(struct meson_nfc *nfc, u32 seed)
nfc->reg_base + NFC_REG_CMD);
}
-static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
- int scrambler)
+static int meson_nfc_is_boot_page(struct nand_chip *nand, int page)
{
+ const struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+
+ return (nand->options & NAND_IS_BOOT_MEDIUM) &&
+ !(page % meson_chip->boot_page_step) &&
+ (page < meson_chip->boot_pages);
+}
+
+static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir, int page)
+{
+ const struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
struct mtd_info *mtd = nand_to_mtd(nand);
struct meson_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
- struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
- u32 bch = meson_chip->bch_mode, cmd;
int len = mtd->writesize, pagesize, pages;
+ int scrambler;
+ u32 cmd;
- pagesize = nand->ecc.size;
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ scrambler = NFC_CMD_SCRAMBLER_ENABLE;
+ else
+ scrambler = NFC_CMD_SCRAMBLER_DISABLE;
if (raw) {
len = mtd->writesize + mtd->oobsize;
cmd = len | scrambler | DMA_DIR(dir);
- writel(cmd, nfc->reg_base + NFC_REG_CMD);
- return;
- }
+ } else if (meson_nfc_is_boot_page(nand, page)) {
+ pagesize = NFC_SHORT_MODE_ECC_SZ >> 3;
+ pages = mtd->writesize / 512;
+
+ scrambler = NFC_CMD_SCRAMBLER_ENABLE;
+ cmd = CMDRWGEN(DMA_DIR(dir), scrambler, NFC_ECC_BCH8_1K,
+ NFC_CMD_SHORTMODE_ENABLE, pagesize, pages);
+ } else {
+ pagesize = nand->ecc.size >> 3;
+ pages = len / nand->ecc.size;
- pages = len / nand->ecc.size;
+ cmd = CMDRWGEN(DMA_DIR(dir), scrambler, meson_chip->bch_mode,
+ NFC_CMD_SHORTMODE_DISABLE, pagesize, pages);
+ }
- cmd = CMDRWGEN(DMA_DIR(dir), scrambler, bch,
- NFC_CMD_SHORTMODE_DISABLE, pagesize, pages);
+ if (scrambler == NFC_CMD_SCRAMBLER_ENABLE)
+ meson_nfc_cmd_seed(nfc, page);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
}
@@ -400,9 +449,10 @@ static void meson_nfc_set_data_oob(struct nand_chip *nand,
}
}
-static int meson_nfc_wait_no_rb_pin(struct meson_nfc *nfc, int timeout_ms,
+static int meson_nfc_wait_no_rb_pin(struct nand_chip *nand, int timeout_ms,
bool need_cmd_read0)
{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
u32 cmd, cfg;
meson_nfc_cmd_idle(nfc, nfc->timing.twb);
@@ -414,8 +464,7 @@ static int meson_nfc_wait_no_rb_pin(struct meson_nfc *nfc, int timeout_ms,
writel(cfg, nfc->reg_base + NFC_REG_CFG);
reinit_completion(&nfc->completion);
- cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_STATUS;
- writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ nand_status_op(nand, NULL);
/* use the max erase time as the maximum clock for waiting R/B */
cmd = NFC_CMD_RB | NFC_CMD_RB_INT_NO_PIN | nfc->timing.tbers_max;
@@ -425,12 +474,8 @@ static int meson_nfc_wait_no_rb_pin(struct meson_nfc *nfc, int timeout_ms,
msecs_to_jiffies(timeout_ms)))
return -ETIMEDOUT;
- if (need_cmd_read0) {
- cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_READ0;
- writel(cmd, nfc->reg_base + NFC_REG_CMD);
- meson_nfc_drain_cmd(nfc);
- meson_nfc_wait_cmd_finish(nfc, CMD_FIFO_EMPTY_TIMEOUT);
- }
+ if (need_cmd_read0)
+ nand_exit_status_op(nand);
return 0;
}
@@ -463,9 +508,11 @@ static int meson_nfc_wait_rb_pin(struct meson_nfc *nfc, int timeout_ms)
return ret;
}
-static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms,
+static int meson_nfc_queue_rb(struct nand_chip *nand, int timeout_ms,
bool need_cmd_read0)
{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+
if (nfc->no_rb_pin) {
/* This mode is used when there is no wired R/B pin.
* It works like 'nand_soft_waitrdy()', but instead of
@@ -477,7 +524,7 @@ static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms,
* needed (for all cases except page programming - this
* is reason of 'need_cmd_read0' flag).
*/
- return meson_nfc_wait_no_rb_pin(nfc, timeout_ms,
+ return meson_nfc_wait_no_rb_pin(nand, timeout_ms,
need_cmd_read0);
} else {
return meson_nfc_wait_rb_pin(nfc, timeout_ms);
@@ -490,7 +537,7 @@ static void meson_nfc_set_user_byte(struct nand_chip *nand, u8 *oob_buf)
__le64 *info;
int i, count;
- for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
+ for (i = 0, count = 0; i < nand->ecc.steps; i++, count += (2 + nand->ecc.bytes)) {
info = &meson_chip->info_buf[i];
*info |= oob_buf[count];
*info |= oob_buf[count + 1] << 8;
@@ -503,7 +550,7 @@ static void meson_nfc_get_user_byte(struct nand_chip *nand, u8 *oob_buf)
__le64 *info;
int i, count;
- for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
+ for (i = 0, count = 0; i < nand->ecc.steps; i++, count += (2 + nand->ecc.bytes)) {
info = &meson_chip->info_buf[i];
oob_buf[count] = *info;
oob_buf[count + 1] = *info >> 8;
@@ -687,7 +734,7 @@ static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand,
if (in) {
nfc->cmdfifo.rw.cmd1 = cs | NFC_CMD_CLE | NAND_CMD_READSTART;
writel(nfc->cmdfifo.rw.cmd1, nfc->reg_base + NFC_REG_CMD);
- meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tR_max), true);
+ meson_nfc_queue_rb(nand, PSEC_TO_MSEC(sdr->tR_max), true);
} else {
meson_nfc_cmd_idle(nfc, nfc->timing.tadl);
}
@@ -722,18 +769,11 @@ static int meson_nfc_write_page_sub(struct nand_chip *nand,
if (ret)
return ret;
- if (nand->options & NAND_NEED_SCRAMBLING) {
- meson_nfc_cmd_seed(nfc, page);
- meson_nfc_cmd_access(nand, raw, DIRWRITE,
- NFC_CMD_SCRAMBLER_ENABLE);
- } else {
- meson_nfc_cmd_access(nand, raw, DIRWRITE,
- NFC_CMD_SCRAMBLER_DISABLE);
- }
+ meson_nfc_cmd_access(nand, raw, DIRWRITE, page);
cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_PAGEPROG;
writel(cmd, nfc->reg_base + NFC_REG_CMD);
- meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tPROG_max), false);
+ meson_nfc_queue_rb(nand, PSEC_TO_MSEC(sdr->tPROG_max), false);
meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_TO_DEVICE);
@@ -808,14 +848,7 @@ static int meson_nfc_read_page_sub(struct nand_chip *nand,
if (ret)
return ret;
- if (nand->options & NAND_NEED_SCRAMBLING) {
- meson_nfc_cmd_seed(nfc, page);
- meson_nfc_cmd_access(nand, raw, DIRREAD,
- NFC_CMD_SCRAMBLER_ENABLE);
- } else {
- meson_nfc_cmd_access(nand, raw, DIRREAD,
- NFC_CMD_SCRAMBLER_DISABLE);
- }
+ meson_nfc_cmd_access(nand, raw, DIRREAD, page);
ret = meson_nfc_wait_dma_finish(nfc);
meson_nfc_check_ecc_pages_valid(nfc, nand, raw);
@@ -1049,7 +1082,7 @@ static int meson_nfc_exec_op(struct nand_chip *nand,
break;
case NAND_OP_WAITRDY_INSTR:
- meson_nfc_queue_rb(nfc, instr->ctx.waitrdy.timeout_ms,
+ meson_nfc_queue_rb(nand, instr->ctx.waitrdy.timeout_ms,
true);
if (instr->delay_ns)
meson_nfc_cmd_idle(nfc, delay_idle);
@@ -1115,6 +1148,9 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
init.name = devm_kasprintf(nfc->dev,
GFP_KERNEL, "%s#div",
dev_name(nfc->dev));
+ if (!init.name)
+ return -ENOMEM;
+
init.ops = &clk_divider_ops;
nfc_divider_parent_data[0].fw_name = "device";
init.parent_data = nfc_divider_parent_data;
@@ -1132,7 +1168,7 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
return PTR_ERR(nfc->nand_clk);
/* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
- writel(CLK_SELECT_NAND | readl(nfc->reg_clk),
+ writel(CLK_ALWAYS_ON_NAND | CLK_SELECT_NAND | CLK_SELECT_FIX_PLL2,
nfc->reg_clk);
ret = clk_prepare_enable(nfc->core_clk);
@@ -1259,7 +1295,8 @@ static int meson_nand_bch_mode(struct nand_chip *nand)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(meson_ecc); i++) {
- if (meson_ecc[i].strength == nand->ecc.strength) {
+ if (meson_ecc[i].strength == nand->ecc.strength &&
+ meson_ecc[i].size == nand->ecc.size) {
meson_chip->bch_mode = meson_ecc[i].bch;
return 0;
}
@@ -1406,6 +1443,26 @@ meson_nfc_nand_chip_init(struct device *dev,
if (ret)
return ret;
+ if (nand->options & NAND_IS_BOOT_MEDIUM) {
+ ret = of_property_read_u32(np, "amlogic,boot-pages",
+ &meson_chip->boot_pages);
+ if (ret) {
+ dev_err(dev, "could not retrieve 'amlogic,boot-pages' property: %d",
+ ret);
+ nand_cleanup(nand);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "amlogic,boot-page-step",
+ &meson_chip->boot_page_step);
+ if (ret) {
+ dev_err(dev, "could not retrieve 'amlogic,boot-page-step' property: %d",
+ ret);
+ nand_cleanup(nand);
+ return ret;
+ }
+ }
+
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "failed to register MTD device: %d\n", ret);
@@ -1418,7 +1475,7 @@ meson_nfc_nand_chip_init(struct device *dev,
return 0;
}
-static void meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
+static void meson_nfc_nand_chips_cleanup(struct meson_nfc *nfc)
{
struct meson_nfc_nand_chip *meson_chip;
struct mtd_info *mtd;
@@ -1438,14 +1495,12 @@ static int meson_nfc_nand_chips_init(struct device *dev,
struct meson_nfc *nfc)
{
struct device_node *np = dev->of_node;
- struct device_node *nand_np;
int ret;
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = meson_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
- meson_nfc_nand_chip_cleanup(nfc);
- of_node_put(nand_np);
+ meson_nfc_nand_chips_cleanup(nfc);
return ret;
}
}
@@ -1559,14 +1614,14 @@ static void meson_nfc_remove(struct platform_device *pdev)
{
struct meson_nfc *nfc = platform_get_drvdata(pdev);
- meson_nfc_nand_chip_cleanup(nfc);
+ meson_nfc_nand_chips_cleanup(nfc);
meson_nfc_disable_clk(nfc);
}
static struct platform_driver meson_nfc_driver = {
.probe = meson_nfc_probe,
- .remove_new = meson_nfc_remove,
+ .remove = meson_nfc_remove,
.driver = {
.name = "meson-nand",
.of_match_table = meson_nfc_id_table,
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index ab05ee65702c..97b4e7f3e1bb 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -21,10 +21,10 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/mpc5121.h>
@@ -595,8 +595,6 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
struct nand_chip *chip = mtd_to_nand(mtd);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
- clk_disable_unprepare(prv->clk);
-
if (prv->csreg)
iounmap(prv->csreg);
}
@@ -717,17 +715,12 @@ static int mpc5121_nfc_probe(struct platform_device *op)
}
/* Enable NFC clock */
- clk = devm_clk_get(dev, "ipg");
+ clk = devm_clk_get_enabled(dev, "ipg");
if (IS_ERR(clk)) {
- dev_err(dev, "Unable to acquire NFC clock!\n");
+ dev_err(dev, "Unable to acquire and enable NFC clock!\n");
retval = PTR_ERR(clk);
goto error;
}
- retval = clk_prepare_enable(clk);
- if (retval) {
- dev_err(dev, "Unable to enable NFC clock!\n");
- goto error;
- }
prv->clk = clk;
/* Reset NAND Flash controller */
@@ -842,7 +835,7 @@ MODULE_DEVICE_TABLE(of, mpc5121_nfc_match);
static struct platform_driver mpc5121_nfc_driver = {
.probe = mpc5121_nfc_probe,
- .remove_new = mpc5121_nfc_remove,
+ .remove = mpc5121_nfc_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = mpc5121_nfc_match,
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index b2fa6b2074ab..21c7e1102746 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/iopoll.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mtd/nand-ecc-mtk.h>
/* NAND controller register definition */
@@ -131,7 +130,7 @@ struct mtk_nfc_nand_chip {
u32 spare_per_sector;
int nsels;
- u8 sels[];
+ u8 sels[] __counted_by(nsels);
/* nothing after this field */
};
@@ -1119,32 +1118,6 @@ static irqreturn_t mtk_nfc_irq(int irq, void *id)
return IRQ_HANDLED;
}
-static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
-{
- int ret;
-
- ret = clk_prepare_enable(clk->nfi_clk);
- if (ret) {
- dev_err(dev, "failed to enable nfi clk\n");
- return ret;
- }
-
- ret = clk_prepare_enable(clk->pad_clk);
- if (ret) {
- dev_err(dev, "failed to enable pad clk\n");
- clk_disable_unprepare(clk->nfi_clk);
- return ret;
- }
-
- return 0;
-}
-
-static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
-{
- clk_disable_unprepare(clk->nfi_clk);
- clk_disable_unprepare(clk->pad_clk);
-}
-
static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oob_region)
{
@@ -1383,7 +1356,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
return -EINVAL;
}
- chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
+ chip = devm_kzalloc(dev, struct_size(chip, sels, nsels),
GFP_KERNEL);
if (!chip)
return -ENOMEM;
@@ -1456,16 +1429,32 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
return 0;
}
+static void mtk_nfc_nand_chips_cleanup(struct mtk_nfc *nfc)
+{
+ struct mtk_nfc_nand_chip *mtk_chip;
+ struct nand_chip *chip;
+ int ret;
+
+ while (!list_empty(&nfc->chips)) {
+ mtk_chip = list_first_entry(&nfc->chips,
+ struct mtk_nfc_nand_chip, node);
+ chip = &mtk_chip->nand;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&mtk_chip->node);
+ }
+}
+
static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
{
struct device_node *np = dev->of_node;
- struct device_node *nand_np;
int ret;
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
- of_node_put(nand_np);
+ mtk_nfc_nand_chips_cleanup(nfc);
return ret;
}
}
@@ -1546,40 +1535,36 @@ static int mtk_nfc_probe(struct platform_device *pdev)
goto release_ecc;
}
- nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
+ nfc->clk.nfi_clk = devm_clk_get_enabled(dev, "nfi_clk");
if (IS_ERR(nfc->clk.nfi_clk)) {
dev_err(dev, "no clk\n");
ret = PTR_ERR(nfc->clk.nfi_clk);
goto release_ecc;
}
- nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
+ nfc->clk.pad_clk = devm_clk_get_enabled(dev, "pad_clk");
if (IS_ERR(nfc->clk.pad_clk)) {
dev_err(dev, "no pad clk\n");
ret = PTR_ERR(nfc->clk.pad_clk);
goto release_ecc;
}
- ret = mtk_nfc_enable_clk(dev, &nfc->clk);
- if (ret)
- goto release_ecc;
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -EINVAL;
- goto clk_disable;
+ goto release_ecc;
}
ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
if (ret) {
dev_err(dev, "failed to request nfi irq\n");
- goto clk_disable;
+ goto release_ecc;
}
ret = dma_set_mask(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "failed to set dma mask\n");
- goto clk_disable;
+ goto release_ecc;
}
platform_set_drvdata(pdev, nfc);
@@ -1587,14 +1572,11 @@ static int mtk_nfc_probe(struct platform_device *pdev)
ret = mtk_nfc_nand_chips_init(dev, nfc);
if (ret) {
dev_err(dev, "failed to init nand chips\n");
- goto clk_disable;
+ goto release_ecc;
}
return 0;
-clk_disable:
- mtk_nfc_disable_clk(&nfc->clk);
-
release_ecc:
mtk_ecc_release(nfc->ecc);
@@ -1604,22 +1586,9 @@ release_ecc:
static void mtk_nfc_remove(struct platform_device *pdev)
{
struct mtk_nfc *nfc = platform_get_drvdata(pdev);
- struct mtk_nfc_nand_chip *mtk_chip;
- struct nand_chip *chip;
- int ret;
-
- while (!list_empty(&nfc->chips)) {
- mtk_chip = list_first_entry(&nfc->chips,
- struct mtk_nfc_nand_chip, node);
- chip = &mtk_chip->nand;
- ret = mtd_device_unregister(nand_to_mtd(chip));
- WARN_ON(ret);
- nand_cleanup(chip);
- list_del(&mtk_chip->node);
- }
+ mtk_nfc_nand_chips_cleanup(nfc);
mtk_ecc_release(nfc->ecc);
- mtk_nfc_disable_clk(&nfc->clk);
}
#ifdef CONFIG_PM_SLEEP
@@ -1627,7 +1596,8 @@ static int mtk_nfc_suspend(struct device *dev)
{
struct mtk_nfc *nfc = dev_get_drvdata(dev);
- mtk_nfc_disable_clk(&nfc->clk);
+ clk_disable_unprepare(nfc->clk.nfi_clk);
+ clk_disable_unprepare(nfc->clk.pad_clk);
return 0;
}
@@ -1642,9 +1612,18 @@ static int mtk_nfc_resume(struct device *dev)
udelay(200);
- ret = mtk_nfc_enable_clk(dev, &nfc->clk);
- if (ret)
+ ret = clk_prepare_enable(nfc->clk.nfi_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable nfi clk\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(nfc->clk.pad_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable pad clk\n");
+ clk_disable_unprepare(nfc->clk.nfi_clk);
return ret;
+ }
/* reset NAND chip if VCC was powered off */
list_for_each_entry(chip, &nfc->chips, node) {
@@ -1661,7 +1640,7 @@ static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
static struct platform_driver mtk_nfc_driver = {
.probe = mtk_nfc_probe,
- .remove_new = mtk_nfc_remove,
+ .remove = mtk_nfc_remove,
.driver = {
.name = MTK_NAME,
.of_match_table = mtk_nfc_id_table,
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 3d4b2e8294ea..8c56b685bf91 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -20,7 +20,7 @@
#include <linux/irq.h>
#include <linux/completion.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/bitfield.h>
#define DRIVER_NAME "mxc_nand"
@@ -48,6 +48,8 @@
#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a)
#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c)
+#define NFC_V1_V2_ECC_STATUS_RESULT_ERM GENMASK(3, 2)
+
#define NFC_V2_CONFIG1_ECC_MODE_4 (1 << 0)
#define NFC_V1_V2_CONFIG1_SP_EN (1 << 2)
#define NFC_V1_V2_CONFIG1_ECC_EN (1 << 3)
@@ -124,8 +126,7 @@ struct mxc_nand_host;
struct mxc_nand_devtype_data {
void (*preset)(struct mtd_info *);
- int (*read_page)(struct nand_chip *chip, void *buf, void *oob, bool ecc,
- int page);
+ int (*read_page)(struct nand_chip *chip);
void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
void (*send_page)(struct mtd_info *, unsigned int);
@@ -133,7 +134,7 @@ struct mxc_nand_devtype_data {
uint16_t (*get_dev_status)(struct mxc_nand_host *);
int (*check_int)(struct mxc_nand_host *);
void (*irq_control)(struct mxc_nand_host *, int);
- u32 (*get_ecc_status)(struct mxc_nand_host *);
+ u32 (*get_ecc_status)(struct nand_chip *);
const struct mtd_ooblayout_ops *ooblayout;
void (*select_chip)(struct nand_chip *chip, int cs);
int (*setup_interface)(struct nand_chip *chip, int csline,
@@ -176,11 +177,11 @@ struct mxc_nand_host {
int eccsize;
int used_oobsize;
int active_cs;
+ unsigned int ecc_stats_v1;
struct completion op_completion;
- uint8_t *data_buf;
- unsigned int buf_start;
+ void *data_buf;
const struct mxc_nand_devtype_data *devtype_data;
};
@@ -282,63 +283,6 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom, void *buf)
}
}
-/*
- * MXC NANDFC can only perform full page+spare or spare-only read/write. When
- * the upper layers perform a read/write buf operation, the saved column address
- * is used to index into the full page. So usually this function is called with
- * column == 0 (unless no column cycle is needed indicated by column == -1)
- */
-static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
-{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
- struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-
- /* Write out column address, if necessary */
- if (column != -1) {
- host->devtype_data->send_addr(host, column & 0xff,
- page_addr == -1);
- if (mtd->writesize > 512)
- /* another col addr cycle for 2k page */
- host->devtype_data->send_addr(host,
- (column >> 8) & 0xff,
- false);
- }
-
- /* Write out page address, if necessary */
- if (page_addr != -1) {
- /* paddr_0 - p_addr_7 */
- host->devtype_data->send_addr(host, (page_addr & 0xff), false);
-
- if (mtd->writesize > 512) {
- if (mtd->size >= 0x10000000) {
- /* paddr_8 - paddr_15 */
- host->devtype_data->send_addr(host,
- (page_addr >> 8) & 0xff,
- false);
- host->devtype_data->send_addr(host,
- (page_addr >> 16) & 0xff,
- true);
- } else
- /* paddr_8 - paddr_15 */
- host->devtype_data->send_addr(host,
- (page_addr >> 8) & 0xff, true);
- } else {
- if (nand_chip->options & NAND_ROW_ADDR_3) {
- /* paddr_8 - paddr_15 */
- host->devtype_data->send_addr(host,
- (page_addr >> 8) & 0xff,
- false);
- host->devtype_data->send_addr(host,
- (page_addr >> 16) & 0xff,
- true);
- } else
- /* paddr_8 - paddr_15 */
- host->devtype_data->send_addr(host,
- (page_addr >> 8) & 0xff, true);
- }
- }
-}
-
static int check_int_v3(struct mxc_nand_host *host)
{
uint32_t tmp;
@@ -407,19 +351,81 @@ static void irq_control(struct mxc_nand_host *host, int activate)
}
}
-static u32 get_ecc_status_v1(struct mxc_nand_host *host)
+static u32 get_ecc_status_v1(struct nand_chip *chip)
{
- return readw(NFC_V1_V2_ECC_STATUS_RESULT);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ unsigned int ecc_stats, max_bitflips = 0;
+ int no_subpages, i;
+
+ no_subpages = mtd->writesize >> 9;
+
+ ecc_stats = host->ecc_stats_v1;
+
+ for (i = 0; i < no_subpages; i++) {
+ switch (ecc_stats & 0x3) {
+ case 0:
+ default:
+ break;
+ case 1:
+ mtd->ecc_stats.corrected++;
+ max_bitflips = 1;
+ break;
+ case 2:
+ mtd->ecc_stats.failed++;
+ break;
+ }
+
+ ecc_stats >>= 2;
+ }
+
+ return max_bitflips;
}
-static u32 get_ecc_status_v2(struct mxc_nand_host *host)
+static u32 get_ecc_status_v2_v3(struct nand_chip *chip, unsigned int ecc_stat)
{
- return readl(NFC_V1_V2_ECC_STATUS_RESULT);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ u8 ecc_bit_mask, err_limit;
+ unsigned int max_bitflips = 0;
+ int no_subpages, err;
+
+ ecc_bit_mask = (host->eccsize == 4) ? 0x7 : 0xf;
+ err_limit = (host->eccsize == 4) ? 0x4 : 0x8;
+
+ no_subpages = mtd->writesize >> 9;
+
+ do {
+ err = ecc_stat & ecc_bit_mask;
+ if (err > err_limit) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += err;
+ max_bitflips = max_t(unsigned int, max_bitflips, err);
+ }
+
+ ecc_stat >>= 4;
+ } while (--no_subpages);
+
+ return max_bitflips;
}
-static u32 get_ecc_status_v3(struct mxc_nand_host *host)
+static u32 get_ecc_status_v2(struct nand_chip *chip)
{
- return readl(NFC_V3_ECC_STATUS_RESULT);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+
+ u32 ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT);
+
+ return get_ecc_status_v2_v3(chip, ecc_stat);
+}
+
+static u32 get_ecc_status_v3(struct nand_chip *chip)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+
+ u32 ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT);
+
+ return get_ecc_status_v2_v3(chip, ecc_stat);
}
static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
@@ -451,14 +457,14 @@ static int wait_op_done(struct mxc_nand_host *host, int useirq)
return 0;
if (useirq) {
- unsigned long timeout;
+ unsigned long time_left;
reinit_completion(&host->op_completion);
irq_control(host, 1);
- timeout = wait_for_completion_timeout(&host->op_completion, HZ);
- if (!timeout && !host->devtype_data->check_int(host)) {
+ time_left = wait_for_completion_timeout(&host->op_completion, HZ);
+ if (!time_left && !host->devtype_data->check_int(host)) {
dev_dbg(host->dev, "timeout waiting for irq\n");
ret = -ETIMEDOUT;
}
@@ -698,38 +704,21 @@ static void mxc_nand_enable_hwecc_v3(struct nand_chip *chip, bool enable)
writel(config2, NFC_V3_CONFIG2);
}
-/* This functions is used by upper layer to checks if device is ready */
-static int mxc_nand_dev_ready(struct nand_chip *chip)
-{
- /*
- * NFC handles R/B internally. Therefore, this function
- * always returns status as ready.
- */
- return 1;
-}
-
-static int mxc_nand_read_page_v1(struct nand_chip *chip, void *buf, void *oob,
- bool ecc, int page)
+static int mxc_nand_read_page_v1(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
- unsigned int bitflips_corrected = 0;
int no_subpages;
int i;
+ unsigned int ecc_stats = 0;
- host->devtype_data->enable_hwecc(chip, ecc);
-
- host->devtype_data->send_cmd(host, NAND_CMD_READ0, false);
- mxc_do_addr_cycle(mtd, 0, page);
-
- if (mtd->writesize > 512)
- host->devtype_data->send_cmd(host, NAND_CMD_READSTART, true);
-
- no_subpages = mtd->writesize >> 9;
+ if (mtd->writesize)
+ no_subpages = mtd->writesize >> 9;
+ else
+ /* READ PARAMETER PAGE is called when mtd->writesize is not yet set */
+ no_subpages = 1;
for (i = 0; i < no_subpages; i++) {
- uint16_t ecc_stats;
-
/* NANDFC buffer 0 is used for page read/write */
writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
@@ -738,135 +727,74 @@ static int mxc_nand_read_page_v1(struct nand_chip *chip, void *buf, void *oob,
/* Wait for operation to complete */
wait_op_done(host, true);
- ecc_stats = get_ecc_status_v1(host);
-
- ecc_stats >>= 2;
-
- if (buf && ecc) {
- switch (ecc_stats & 0x3) {
- case 0:
- default:
- break;
- case 1:
- mtd->ecc_stats.corrected++;
- bitflips_corrected = 1;
- break;
- case 2:
- mtd->ecc_stats.failed++;
- break;
- }
- }
+ ecc_stats |= FIELD_GET(NFC_V1_V2_ECC_STATUS_RESULT_ERM,
+ readw(NFC_V1_V2_ECC_STATUS_RESULT)) << i * 2;
}
- if (buf)
- memcpy32_fromio(buf, host->main_area0, mtd->writesize);
- if (oob)
- copy_spare(mtd, true, oob);
+ host->ecc_stats_v1 = ecc_stats;
- return bitflips_corrected;
+ return 0;
}
-static int mxc_nand_read_page_v2_v3(struct nand_chip *chip, void *buf,
- void *oob, bool ecc, int page)
+static int mxc_nand_read_page_v2_v3(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
- unsigned int max_bitflips = 0;
- u32 ecc_stat, err;
- int no_subpages;
- u8 ecc_bit_mask, err_limit;
-
- host->devtype_data->enable_hwecc(chip, ecc);
-
- host->devtype_data->send_cmd(host, NAND_CMD_READ0, false);
- mxc_do_addr_cycle(mtd, 0, page);
-
- if (mtd->writesize > 512)
- host->devtype_data->send_cmd(host,
- NAND_CMD_READSTART, true);
host->devtype_data->send_page(mtd, NFC_OUTPUT);
- if (buf)
- memcpy32_fromio(buf, host->main_area0, mtd->writesize);
- if (oob)
- copy_spare(mtd, true, oob);
-
- ecc_bit_mask = (host->eccsize == 4) ? 0x7 : 0xf;
- err_limit = (host->eccsize == 4) ? 0x4 : 0x8;
-
- no_subpages = mtd->writesize >> 9;
-
- ecc_stat = host->devtype_data->get_ecc_status(host);
-
- do {
- err = ecc_stat & ecc_bit_mask;
- if (err > err_limit) {
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += err;
- max_bitflips = max_t(unsigned int, max_bitflips, err);
- }
-
- ecc_stat >>= 4;
- } while (--no_subpages);
-
- return max_bitflips;
+ return 0;
}
static int mxc_nand_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
- void *oob_buf;
+ int ret;
+
+ host->devtype_data->enable_hwecc(chip, true);
+
+ ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+
+ host->devtype_data->enable_hwecc(chip, false);
+
+ if (ret)
+ return ret;
if (oob_required)
- oob_buf = chip->oob_poi;
- else
- oob_buf = NULL;
+ copy_spare(mtd, true, chip->oob_poi);
- return host->devtype_data->read_page(chip, buf, oob_buf, 1, page);
+ return host->devtype_data->get_ecc_status(chip);
}
static int mxc_nand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
- struct mxc_nand_host *host = nand_get_controller_data(chip);
- void *oob_buf;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
if (oob_required)
- oob_buf = chip->oob_poi;
- else
- oob_buf = NULL;
+ copy_spare(mtd, true, chip->oob_poi);
- return host->devtype_data->read_page(chip, buf, oob_buf, 0, page);
+ return 0;
}
static int mxc_nand_read_oob(struct nand_chip *chip, int page)
{
- struct mxc_nand_host *host = nand_get_controller_data(chip);
-
- return host->devtype_data->read_page(chip, NULL, chip->oob_poi, 0,
- page);
-}
-
-static int mxc_nand_write_page(struct nand_chip *chip, const uint8_t *buf,
- bool ecc, int page)
-{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int ret;
- host->devtype_data->enable_hwecc(chip, ecc);
-
- host->devtype_data->send_cmd(host, NAND_CMD_SEQIN, false);
- mxc_do_addr_cycle(mtd, 0, page);
-
- memcpy32_toio(host->main_area0, buf, mtd->writesize);
- copy_spare(mtd, false, chip->oob_poi);
+ ret = nand_read_page_op(chip, page, 0, host->data_buf, mtd->writesize);
+ if (ret)
+ return ret;
- host->devtype_data->send_page(mtd, NFC_INPUT);
- host->devtype_data->send_cmd(host, NAND_CMD_PAGEPROG, true);
- mxc_do_addr_cycle(mtd, 0, page);
+ copy_spare(mtd, true, chip->oob_poi);
return 0;
}
@@ -874,83 +802,40 @@ static int mxc_nand_write_page(struct nand_chip *chip, const uint8_t *buf,
static int mxc_nand_write_page_ecc(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
- return mxc_nand_write_page(chip, buf, true, page);
-}
-
-static int mxc_nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
- int oob_required, int page)
-{
- return mxc_nand_write_page(chip, buf, false, page);
-}
-
-static int mxc_nand_write_oob(struct nand_chip *chip, int page)
-{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int ret;
- memset(host->data_buf, 0xff, mtd->writesize);
-
- return mxc_nand_write_page(chip, host->data_buf, false, page);
-}
-
-static u_char mxc_nand_read_byte(struct nand_chip *nand_chip)
-{
- struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
- uint8_t ret;
+ copy_spare(mtd, false, chip->oob_poi);
- /* Check for status request */
- if (host->status_request)
- return host->devtype_data->get_dev_status(host) & 0xFF;
+ host->devtype_data->enable_hwecc(chip, true);
- if (nand_chip->options & NAND_BUSWIDTH_16) {
- /* only take the lower byte of each word */
- ret = *(uint16_t *)(host->data_buf + host->buf_start);
+ ret = nand_prog_page_op(chip, page, 0, buf, mtd->writesize);
- host->buf_start += 2;
- } else {
- ret = *(uint8_t *)(host->data_buf + host->buf_start);
- host->buf_start++;
- }
+ host->devtype_data->enable_hwecc(chip, false);
- dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
return ret;
}
-/* Write data of length len to buffer buf. The data to be
- * written on NAND Flash is first copied to RAMbuffer. After the Data Input
- * Operation by the NFC, the data is written to NAND Flash */
-static void mxc_nand_write_buf(struct nand_chip *nand_chip, const u_char *buf,
- int len)
+static int mxc_nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
- struct mtd_info *mtd = nand_to_mtd(nand_chip);
- struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
- u16 col = host->buf_start;
- int n = mtd->oobsize + mtd->writesize - col;
-
- n = min(n, len);
+ struct mtd_info *mtd = nand_to_mtd(chip);
- memcpy(host->data_buf + col, buf, n);
+ copy_spare(mtd, false, chip->oob_poi);
- host->buf_start += n;
+ return nand_prog_page_op(chip, page, 0, buf, mtd->writesize);
}
-/* Read the data buffer from the NAND Flash. To read the data from NAND
- * Flash first the data output cycle is initiated by the NFC, which copies
- * the data to RAMbuffer. This data of length len is then copied to buffer buf.
- */
-static void mxc_nand_read_buf(struct nand_chip *nand_chip, u_char *buf,
- int len)
+static int mxc_nand_write_oob(struct nand_chip *chip, int page)
{
- struct mtd_info *mtd = nand_to_mtd(nand_chip);
- struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
- u16 col = host->buf_start;
- int n = mtd->oobsize + mtd->writesize - col;
-
- n = min(n, len);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
- memcpy(buf, host->data_buf + col, n);
+ memset(host->data_buf, 0xff, mtd->writesize);
+ copy_spare(mtd, false, chip->oob_poi);
- host->buf_start += n;
+ return nand_prog_page_op(chip, page, 0, host->data_buf, mtd->writesize);
}
/* This function is used by upper layer for select and
@@ -1329,107 +1214,6 @@ static void preset_v3(struct mtd_info *mtd)
writel(0, NFC_V3_DELAY_LINE);
}
-/* Used by the upper layer to write command to NAND Flash for
- * different operations to be carried out on NAND Flash */
-static void mxc_nand_command(struct nand_chip *nand_chip, unsigned command,
- int column, int page_addr)
-{
- struct mtd_info *mtd = nand_to_mtd(nand_chip);
- struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-
- dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
- command, column, page_addr);
-
- /* Reset command state information */
- host->status_request = false;
-
- /* Command pre-processing step */
- switch (command) {
- case NAND_CMD_RESET:
- host->devtype_data->preset(mtd);
- host->devtype_data->send_cmd(host, command, false);
- break;
-
- case NAND_CMD_STATUS:
- host->buf_start = 0;
- host->status_request = true;
-
- host->devtype_data->send_cmd(host, command, true);
- WARN_ONCE(column != -1 || page_addr != -1,
- "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
- command, column, page_addr);
- mxc_do_addr_cycle(mtd, column, page_addr);
- break;
-
- case NAND_CMD_READID:
- host->devtype_data->send_cmd(host, command, true);
- mxc_do_addr_cycle(mtd, column, page_addr);
- host->devtype_data->send_read_id(host);
- host->buf_start = 0;
- break;
-
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- host->devtype_data->send_cmd(host, command, false);
- WARN_ONCE(column != -1,
- "Unexpected column value (cmd=%u, col=%d)\n",
- command, column);
- mxc_do_addr_cycle(mtd, column, page_addr);
-
- break;
- case NAND_CMD_PARAM:
- host->devtype_data->send_cmd(host, command, false);
- mxc_do_addr_cycle(mtd, column, page_addr);
- host->devtype_data->send_page(mtd, NFC_OUTPUT);
- memcpy32_fromio(host->data_buf, host->main_area0, 512);
- host->buf_start = 0;
- break;
- default:
- WARN_ONCE(1, "Unimplemented command (cmd=%u)\n",
- command);
- break;
- }
-}
-
-static int mxc_nand_set_features(struct nand_chip *chip, int addr,
- u8 *subfeature_param)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct mxc_nand_host *host = nand_get_controller_data(chip);
- int i;
-
- host->buf_start = 0;
-
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- chip->legacy.write_byte(chip, subfeature_param[i]);
-
- memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
- host->devtype_data->send_cmd(host, NAND_CMD_SET_FEATURES, false);
- mxc_do_addr_cycle(mtd, addr, -1);
- host->devtype_data->send_page(mtd, NFC_INPUT);
-
- return 0;
-}
-
-static int mxc_nand_get_features(struct nand_chip *chip, int addr,
- u8 *subfeature_param)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct mxc_nand_host *host = nand_get_controller_data(chip);
- int i;
-
- host->devtype_data->send_cmd(host, NAND_CMD_GET_FEATURES, false);
- mxc_do_addr_cycle(mtd, addr, -1);
- host->devtype_data->send_page(mtd, NFC_OUTPUT);
- memcpy32_fromio(host->data_buf, host->main_area0, 512);
- host->buf_start = 0;
-
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- *subfeature_param++ = chip->legacy.read_byte(chip);
-
- return 0;
-}
-
/*
* The generic flash bbt descriptors overlap with our ecc
* hardware, so define some i.MX specific ones.
@@ -1618,10 +1402,10 @@ static int mxcnd_attach_chip(struct nand_chip *chip)
chip->ecc.bytes = host->devtype_data->eccbytes;
host->eccsize = host->devtype_data->eccsize;
chip->ecc.size = 512;
- mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
chip->ecc.read_page = mxc_nand_read_page;
chip->ecc.read_page_raw = mxc_nand_read_page_raw;
chip->ecc.read_oob = mxc_nand_read_oob;
@@ -1631,6 +1415,8 @@ static int mxcnd_attach_chip(struct nand_chip *chip)
break;
case NAND_ECC_ENGINE_TYPE_SOFT:
+ chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
+ chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
break;
default:
@@ -1686,9 +1472,217 @@ static int mxcnd_setup_interface(struct nand_chip *chip, int chipnr,
return host->devtype_data->setup_interface(chip, chipnr, conf);
}
+static void memff16_toio(void *buf, int n)
+{
+ __iomem u16 *t = buf;
+ int i;
+
+ for (i = 0; i < (n >> 1); i++)
+ __raw_writew(0xffff, t++);
+}
+
+static void copy_page_to_sram(struct mtd_info *mtd, const void *buf, int buf_len)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(this);
+ unsigned int no_subpages = mtd->writesize / 512;
+ int oob_per_subpage, i;
+
+ oob_per_subpage = (mtd->oobsize / no_subpages) & ~1;
+
+ /*
+ * During a page write the i.MX NAND controller will read 512b from
+ * main_area0 SRAM, then oob_per_subpage bytes from spare0 SRAM, then
+ * 512b from main_area1 SRAM and so on until the full page is written.
+ * For software ECC we want to have a 1:1 mapping between the raw page
+ * data on the NAND chip and the view of the NAND core. This is
+ * necessary to make the NAND_CMD_RNDOUT read the data it expects.
+ * To accomplish this we have to write the data in the order the controller
+ * reads it. This is reversed in copy_page_from_sram() below.
+ *
+ * buf_len can either be the full page including the OOB or user data only.
+ * When it's user data only make sure that we fill up the rest of the
+ * SRAM with 0xff.
+ */
+ for (i = 0; i < no_subpages; i++) {
+ int now = min(buf_len, 512);
+
+ if (now)
+ memcpy16_toio(host->main_area0 + i * 512, buf, now);
+
+ if (now < 512)
+ memff16_toio(host->main_area0 + i * 512 + now, 512 - now);
+
+ buf += 512;
+ buf_len -= now;
+
+ now = min(buf_len, oob_per_subpage);
+ if (now)
+ memcpy16_toio(host->spare0 + i * host->devtype_data->spare_len,
+ buf, now);
+
+ if (now < oob_per_subpage)
+ memff16_toio(host->spare0 + i * host->devtype_data->spare_len + now,
+ oob_per_subpage - now);
+
+ buf += oob_per_subpage;
+ buf_len -= now;
+ }
+}
+
+static void copy_page_from_sram(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(this);
+ void *buf = host->data_buf;
+ unsigned int no_subpages = mtd->writesize / 512;
+ int oob_per_subpage, i;
+
+ /* mtd->writesize is not set during ident scanning */
+ if (!no_subpages)
+ no_subpages = 1;
+
+ oob_per_subpage = (mtd->oobsize / no_subpages) & ~1;
+
+ for (i = 0; i < no_subpages; i++) {
+ memcpy16_fromio(buf, host->main_area0 + i * 512, 512);
+ buf += 512;
+
+ memcpy16_fromio(buf, host->spare0 + i * host->devtype_data->spare_len,
+ oob_per_subpage);
+ buf += oob_per_subpage;
+ }
+}
+
+static int mxcnd_do_exec_op(struct nand_chip *chip,
+ const struct nand_subop *op)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, j, buf_len;
+ void *buf_read = NULL;
+ const void *buf_write = NULL;
+ const struct nand_op_instr *instr;
+ bool readid = false;
+ bool statusreq = false;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ instr = &op->instrs[i];
+
+ switch (instr->type) {
+ case NAND_OP_WAITRDY_INSTR:
+ /* NFC handles R/B internally, nothing to do here */
+ break;
+ case NAND_OP_CMD_INSTR:
+ host->devtype_data->send_cmd(host, instr->ctx.cmd.opcode, true);
+
+ if (instr->ctx.cmd.opcode == NAND_CMD_READID)
+ readid = true;
+ if (instr->ctx.cmd.opcode == NAND_CMD_STATUS)
+ statusreq = true;
+
+ break;
+ case NAND_OP_ADDR_INSTR:
+ for (j = 0; j < instr->ctx.addr.naddrs; j++) {
+ bool islast = j == instr->ctx.addr.naddrs - 1;
+ host->devtype_data->send_addr(host, instr->ctx.addr.addrs[j], islast);
+ }
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ buf_write = instr->ctx.data.buf.out;
+ buf_len = instr->ctx.data.len;
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
+ memcpy32_toio(host->main_area0, buf_write, buf_len);
+ else
+ copy_page_to_sram(mtd, buf_write, buf_len);
+
+ host->devtype_data->send_page(mtd, NFC_INPUT);
+
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+
+ buf_read = instr->ctx.data.buf.in;
+ buf_len = instr->ctx.data.len;
+
+ if (readid) {
+ host->devtype_data->send_read_id(host);
+ readid = false;
+
+ memcpy32_fromio(host->data_buf, host->main_area0, buf_len * 2);
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ u8 *bufr = buf_read;
+ u16 *bufw = host->data_buf;
+ for (j = 0; j < buf_len; j++)
+ bufr[j] = bufw[j];
+ } else {
+ memcpy(buf_read, host->data_buf, buf_len);
+ }
+ break;
+ }
+
+ if (statusreq) {
+ *(u8*)buf_read = host->devtype_data->get_dev_status(host);
+ statusreq = false;
+ break;
+ }
+
+ host->devtype_data->read_page(chip);
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ if (IS_ALIGNED(buf_len, 4)) {
+ memcpy32_fromio(buf_read, host->main_area0, buf_len);
+ } else {
+ memcpy32_fromio(host->data_buf, host->main_area0, mtd->writesize);
+ memcpy(buf_read, host->data_buf, buf_len);
+ }
+ } else {
+ copy_page_from_sram(mtd);
+ memcpy(buf_read, host->data_buf, buf_len);
+ }
+
+ break;
+ }
+ }
+
+ return 0;
+}
+
+#define MAX_DATA_SIZE (4096 + 512)
+
+static const struct nand_op_parser mxcnd_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(mxcnd_do_exec_op,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, MAX_DATA_SIZE)),
+ NAND_OP_PARSER_PATTERN(mxcnd_do_exec_op,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(mxcnd_do_exec_op,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ );
+
+static int mxcnd_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ return nand_op_parser_exec_op(chip, &mxcnd_op_parser,
+ op, check_only);
+}
+
static const struct nand_controller_ops mxcnd_controller_ops = {
.attach_chip = mxcnd_attach_chip,
.setup_interface = mxcnd_setup_interface,
+ .exec_op = mxcnd_exec_op,
};
static int mxcnd_probe(struct platform_device *pdev)
@@ -1696,7 +1690,6 @@ static int mxcnd_probe(struct platform_device *pdev)
struct nand_chip *this;
struct mtd_info *mtd;
struct mxc_nand_host *host;
- struct resource *res;
int err = 0;
/* Allocate memory for MTD device structure and private data */
@@ -1722,13 +1715,6 @@ static int mxcnd_probe(struct platform_device *pdev)
nand_set_controller_data(this, host);
nand_set_flash_node(this, pdev->dev.of_node);
- this->legacy.dev_ready = mxc_nand_dev_ready;
- this->legacy.cmdfunc = mxc_nand_command;
- this->legacy.read_byte = mxc_nand_read_byte;
- this->legacy.write_buf = mxc_nand_write_buf;
- this->legacy.read_buf = mxc_nand_read_buf;
- this->legacy.set_features = mxc_nand_set_features;
- this->legacy.get_features = mxc_nand_get_features;
host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk))
@@ -1740,17 +1726,15 @@ static int mxcnd_probe(struct platform_device *pdev)
this->options |= NAND_KEEP_TIMINGS;
if (host->devtype_data->needs_ip) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->regs_ip = devm_ioremap_resource(&pdev->dev, res);
+ host->regs_ip = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->regs_ip))
return PTR_ERR(host->regs_ip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ host->base = devm_platform_ioremap_resource(pdev, 1);
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
}
- host->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->base))
return PTR_ERR(host->base);
@@ -1840,7 +1824,7 @@ static struct platform_driver mxcnd_driver = {
.of_match_table = mxcnd_dt_ids,
},
.probe = mxcnd_probe,
- .remove_new = mxcnd_remove,
+ .remove = mxcnd_remove,
};
module_platform_driver(mxcnd_driver);
diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c
index be8050e84b4f..92de26697359 100644
--- a/drivers/mtd/nand/raw/mxic_nand.c
+++ b/drivers/mtd/nand/raw/mxic_nand.c
@@ -574,7 +574,7 @@ MODULE_DEVICE_TABLE(of, mxic_nfc_of_ids);
static struct platform_driver mxic_nfc_driver = {
.probe = mxic_nfc_probe,
- .remove_new = mxic_nfc_remove,
+ .remove = mxic_nfc_remove,
.driver = {
.name = "mxic-nfc",
.of_match_table = mxic_nfc_of_ids,
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index a6af521832aa..ad6d66309597 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -42,7 +42,6 @@
#include <linux/io.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include "internals.h"
@@ -367,6 +366,10 @@ static int nand_check_wp(struct nand_chip *chip)
if (chip->options & NAND_BROKEN_XD)
return 0;
+ /* controller responsible for NAND write protect */
+ if (chip->controller->controller_wp)
+ return 0;
+
/* Check the WP bit */
ret = nand_status_op(chip, &status);
if (ret)
@@ -1090,28 +1093,32 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
unsigned int offset_in_page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ bool ident_stage = !mtd->writesize;
- /* Make sure the offset is less than the actual page size. */
- if (offset_in_page > mtd->writesize + mtd->oobsize)
- return -EINVAL;
+ /* Bypass all checks during NAND identification */
+ if (likely(!ident_stage)) {
+ /* Make sure the offset is less than the actual page size. */
+ if (offset_in_page > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
- /*
- * On small page NANDs, there's a dedicated command to access the OOB
- * area, and the column address is relative to the start of the OOB
- * area, not the start of the page. Asjust the address accordingly.
- */
- if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
- offset_in_page -= mtd->writesize;
+ /*
+ * On small page NANDs, there's a dedicated command to access the OOB
+ * area, and the column address is relative to the start of the OOB
+ * area, not the start of the page. Asjust the address accordingly.
+ */
+ if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+ offset_in_page -= mtd->writesize;
- /*
- * The offset in page is expressed in bytes, if the NAND bus is 16-bit
- * wide, then it must be divided by 2.
- */
- if (chip->options & NAND_BUSWIDTH_16) {
- if (WARN_ON(offset_in_page % 2))
- return -EINVAL;
+ /*
+ * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+ * wide, then it must be divided by 2.
+ */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ if (WARN_ON(offset_in_page % 2))
+ return -EINVAL;
- offset_in_page /= 2;
+ offset_in_page /= 2;
+ }
}
addrs[0] = offset_in_page;
@@ -1120,7 +1127,7 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
* Small page NANDs use 1 cycle for the columns, while large page NANDs
* need 2
*/
- if (mtd->writesize <= 512)
+ if (!ident_stage && mtd->writesize <= 512)
return 1;
addrs[1] = offset_in_page >> 8;
@@ -1208,6 +1215,38 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
return nand_exec_op(chip, &op);
}
+static unsigned int rawnand_last_page_of_lun(unsigned int pages_per_lun, unsigned int lun)
+{
+ /* lun is expected to be very small */
+ return (lun * pages_per_lun) + pages_per_lun - 1;
+}
+
+static void rawnand_cap_cont_reads(struct nand_chip *chip)
+{
+ struct nand_memory_organization *memorg;
+ unsigned int ppl, first_lun, last_lun;
+
+ memorg = nanddev_get_memorg(&chip->base);
+ ppl = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun;
+ first_lun = chip->cont_read.first_page / ppl;
+ last_lun = chip->cont_read.last_page / ppl;
+
+ /* Prevent sequential cache reads across LUN boundaries */
+ if (first_lun != last_lun)
+ chip->cont_read.pause_page = rawnand_last_page_of_lun(ppl, first_lun);
+ else
+ chip->cont_read.pause_page = chip->cont_read.last_page;
+
+ if (chip->cont_read.first_page == chip->cont_read.pause_page) {
+ chip->cont_read.first_page++;
+ chip->cont_read.pause_page = min(chip->cont_read.last_page,
+ rawnand_last_page_of_lun(ppl, first_lun + 1));
+ }
+
+ if (chip->cont_read.first_page >= chip->cont_read.last_page)
+ chip->cont_read.ongoing = false;
+}
+
static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf,
unsigned int len, bool check_only)
@@ -1226,7 +1265,7 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_op_instr cont_instrs[] = {
- NAND_OP_CMD(page == chip->cont_read.last_page ?
+ NAND_OP_CMD(page == chip->cont_read.pause_page ?
NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ,
NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
@@ -1263,16 +1302,28 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p
}
if (page == chip->cont_read.first_page)
- return nand_exec_op(chip, &start_op);
+ ret = nand_exec_op(chip, &start_op);
else
- return nand_exec_op(chip, &cont_op);
+ ret = nand_exec_op(chip, &cont_op);
+ if (ret)
+ return ret;
+
+ if (!chip->cont_read.ongoing)
+ return 0;
+
+ if (page == chip->cont_read.last_page) {
+ chip->cont_read.ongoing = false;
+ } else if (page == chip->cont_read.pause_page) {
+ chip->cont_read.first_page++;
+ rawnand_cap_cont_reads(chip);
+ }
+
+ return 0;
}
static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page)
{
- return chip->cont_read.ongoing &&
- page >= chip->cont_read.first_page &&
- page <= chip->cont_read.last_page;
+ return chip->cont_read.ongoing && page >= chip->cont_read.first_page;
}
/**
@@ -1389,16 +1440,19 @@ int nand_change_read_column_op(struct nand_chip *chip,
unsigned int len, bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ bool ident_stage = !mtd->writesize;
if (len && !buf)
return -EINVAL;
- if (offset_in_page + len > mtd->writesize + mtd->oobsize)
- return -EINVAL;
+ if (!ident_stage) {
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
- /* Small page NANDs do not support column change. */
- if (mtd->writesize <= 512)
- return -ENOTSUPP;
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+ }
if (nand_has_exec_op(chip)) {
const struct nand_interface_config *conf =
@@ -1494,7 +1548,8 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0),
};
- struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs,
+ instrs);
int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
if (naddrs < 0)
@@ -1778,7 +1833,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
/* READ_ID data bytes are received twice in NV-DDR mode */
if (len && nand_interface_is_nvddr(conf)) {
- ddrbuf = kzalloc(len * 2, GFP_KERNEL);
+ ddrbuf = kcalloc(2, len, GFP_KERNEL);
if (!ddrbuf)
return -ENOMEM;
@@ -1885,6 +1940,7 @@ int nand_exit_status_op(struct nand_chip *chip)
return 0;
}
+EXPORT_SYMBOL_GPL(nand_exit_status_op);
/**
* nand_erase_op - Do an erase operation
@@ -1916,7 +1972,8 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
0),
};
- struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs,
+ instrs);
if (chip->options & NAND_ROW_ADDR_3)
instrs[1].ctx.addr.naddrs++;
@@ -2123,7 +2180,7 @@ EXPORT_SYMBOL_GPL(nand_reset_op);
int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
bool force_8bit, bool check_only)
{
- if (!len || !buf)
+ if (!len || (!check_only && !buf))
return -EINVAL;
if (nand_has_exec_op(chip)) {
@@ -2146,7 +2203,7 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
* twice.
*/
if (force_8bit && nand_interface_is_nvddr(conf)) {
- ddrbuf = kzalloc(len * 2, GFP_KERNEL);
+ ddrbuf = kcalloc(2, len, GFP_KERNEL);
if (!ddrbuf)
return -ENOMEM;
@@ -2727,137 +2784,6 @@ int nand_set_features(struct nand_chip *chip, int addr,
}
/**
- * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
- * @buf: buffer to test
- * @len: buffer length
- * @bitflips_threshold: maximum number of bitflips
- *
- * Check if a buffer contains only 0xff, which means the underlying region
- * has been erased and is ready to be programmed.
- * The bitflips_threshold specify the maximum number of bitflips before
- * considering the region is not erased.
- * Note: The logic of this function has been extracted from the memweight
- * implementation, except that nand_check_erased_buf function exit before
- * testing the whole buffer if the number of bitflips exceed the
- * bitflips_threshold value.
- *
- * Returns a positive number of bitflips less than or equal to
- * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
- * threshold.
- */
-static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
-{
- const unsigned char *bitmap = buf;
- int bitflips = 0;
- int weight;
-
- for (; len && ((uintptr_t)bitmap) % sizeof(long);
- len--, bitmap++) {
- weight = hweight8(*bitmap);
- bitflips += BITS_PER_BYTE - weight;
- if (unlikely(bitflips > bitflips_threshold))
- return -EBADMSG;
- }
-
- for (; len >= sizeof(long);
- len -= sizeof(long), bitmap += sizeof(long)) {
- unsigned long d = *((unsigned long *)bitmap);
- if (d == ~0UL)
- continue;
- weight = hweight_long(d);
- bitflips += BITS_PER_LONG - weight;
- if (unlikely(bitflips > bitflips_threshold))
- return -EBADMSG;
- }
-
- for (; len > 0; len--, bitmap++) {
- weight = hweight8(*bitmap);
- bitflips += BITS_PER_BYTE - weight;
- if (unlikely(bitflips > bitflips_threshold))
- return -EBADMSG;
- }
-
- return bitflips;
-}
-
-/**
- * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
- * 0xff data
- * @data: data buffer to test
- * @datalen: data length
- * @ecc: ECC buffer
- * @ecclen: ECC length
- * @extraoob: extra OOB buffer
- * @extraooblen: extra OOB length
- * @bitflips_threshold: maximum number of bitflips
- *
- * Check if a data buffer and its associated ECC and OOB data contains only
- * 0xff pattern, which means the underlying region has been erased and is
- * ready to be programmed.
- * The bitflips_threshold specify the maximum number of bitflips before
- * considering the region as not erased.
- *
- * Note:
- * 1/ ECC algorithms are working on pre-defined block sizes which are usually
- * different from the NAND page size. When fixing bitflips, ECC engines will
- * report the number of errors per chunk, and the NAND core infrastructure
- * expect you to return the maximum number of bitflips for the whole page.
- * This is why you should always use this function on a single chunk and
- * not on the whole page. After checking each chunk you should update your
- * max_bitflips value accordingly.
- * 2/ When checking for bitflips in erased pages you should not only check
- * the payload data but also their associated ECC data, because a user might
- * have programmed almost all bits to 1 but a few. In this case, we
- * shouldn't consider the chunk as erased, and checking ECC bytes prevent
- * this case.
- * 3/ The extraoob argument is optional, and should be used if some of your OOB
- * data are protected by the ECC engine.
- * It could also be used if you support subpages and want to attach some
- * extra OOB data to an ECC chunk.
- *
- * Returns a positive number of bitflips less than or equal to
- * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
- * threshold. In case of success, the passed buffers are filled with 0xff.
- */
-int nand_check_erased_ecc_chunk(void *data, int datalen,
- void *ecc, int ecclen,
- void *extraoob, int extraooblen,
- int bitflips_threshold)
-{
- int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
-
- data_bitflips = nand_check_erased_buf(data, datalen,
- bitflips_threshold);
- if (data_bitflips < 0)
- return data_bitflips;
-
- bitflips_threshold -= data_bitflips;
-
- ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
- if (ecc_bitflips < 0)
- return ecc_bitflips;
-
- bitflips_threshold -= ecc_bitflips;
-
- extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
- bitflips_threshold);
- if (extraoob_bitflips < 0)
- return extraoob_bitflips;
-
- if (data_bitflips)
- memset(data, 0xff, datalen);
-
- if (ecc_bitflips)
- memset(ecc, 0xff, ecclen);
-
- if (extraoob_bitflips)
- memset(extraoob, 0xff, extraooblen);
-
- return data_bitflips + ecc_bitflips + extraoob_bitflips;
-}
-EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
-
-/**
* nand_read_page_raw_notsupp - dummy read raw page function
* @chip: nand chip info structure
* @buf: buffer to store read data
@@ -3430,21 +3356,45 @@ static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page,
u32 readlen, int col)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int first_page, last_page;
+
+ chip->cont_read.ongoing = false;
if (!chip->controller->supported_op.cont_read)
return;
- if ((col && col + readlen < (3 * mtd->writesize)) ||
- (!col && readlen < (2 * mtd->writesize))) {
- chip->cont_read.ongoing = false;
+ /*
+ * Don't bother making any calculations if the length is too small.
+ * Side effect: avoids possible integer underflows below.
+ */
+ if (readlen < (2 * mtd->writesize))
return;
- }
- chip->cont_read.ongoing = true;
- chip->cont_read.first_page = page;
+ /* Derive the page where continuous read should start (the first full page read) */
+ first_page = page;
if (col)
- chip->cont_read.first_page++;
- chip->cont_read.last_page = page + ((readlen >> chip->page_shift) & chip->pagemask);
+ first_page++;
+
+ /* Derive the page where continuous read should stop (the last full page read) */
+ last_page = page + ((col + readlen) / mtd->writesize) - 1;
+
+ /* Configure and enable continuous read when suitable */
+ if (first_page < last_page) {
+ chip->cont_read.first_page = first_page;
+ chip->cont_read.last_page = last_page;
+ chip->cont_read.ongoing = true;
+ /* May reset the ongoing flag */
+ rawnand_cap_cont_reads(chip);
+ }
+}
+
+static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page)
+{
+ if (!chip->cont_read.ongoing || page != chip->cont_read.first_page)
+ return;
+
+ chip->cont_read.first_page++;
+ rawnand_cap_cont_reads(chip);
}
/**
@@ -3520,7 +3470,8 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
oob = ops->oobbuf;
oob_required = oob ? 1 : 0;
- rawnand_enable_cont_reads(chip, page, readlen, col);
+ if (likely(ops->mode != MTD_OPS_RAW))
+ rawnand_enable_cont_reads(chip, page, readlen, col);
while (1) {
struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
@@ -3621,6 +3572,8 @@ read_retry:
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips,
chip->pagecache.bitflips);
+
+ rawnand_cont_read_skip_first_page(chip, page);
}
readlen -= bytes;
@@ -3651,6 +3604,9 @@ read_retry:
}
nand_deselect_target(chip);
+ if (WARN_ON_ONCE(chip->cont_read.ongoing))
+ chip->cont_read.ongoing = false;
+
ops->retlen = ops->len - (size_t) readlen;
if (oob)
ops->oobretlen = ops->ooblen - oobreadlen;
@@ -5109,6 +5065,9 @@ static void rawnand_check_cont_read_support(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ if (!chip->parameters.supports_read_cache)
+ return;
+
if (chip->read_retries)
return;
@@ -5122,9 +5081,26 @@ static void rawnand_late_check_supported_ops(struct nand_chip *chip)
/* The supported_op fields should not be set by individual drivers */
WARN_ON_ONCE(chip->controller->supported_op.cont_read);
+ /*
+ * Too many devices do not support sequential cached reads with on-die
+ * ECC correction enabled, so in this case refuse to perform the
+ * automation.
+ */
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE)
+ return;
+
if (!nand_has_exec_op(chip))
return;
+ /*
+ * For now, continuous reads can only be used with the core page helpers.
+ * This can be extended later.
+ */
+ if (!(chip->ecc.read_page == nand_read_page_hwecc ||
+ chip->ecc.read_page == nand_read_page_syndrome ||
+ chip->ecc.read_page == nand_read_page_swecc))
+ return;
+
rawnand_check_cont_read_support(chip);
}
@@ -6201,6 +6177,7 @@ static const struct nand_ops rawnand_ops = {
static int nand_scan_tail(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_device *base = &chip->base;
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i;
@@ -6345,9 +6322,13 @@ static int nand_scan_tail(struct nand_chip *chip)
if (!ecc->write_oob_raw)
ecc->write_oob_raw = ecc->write_oob;
- /* propagate ecc info to mtd_info */
+ /* Propagate ECC info to the generic NAND and MTD layers */
mtd->ecc_strength = ecc->strength;
+ if (!base->ecc.ctx.conf.strength)
+ base->ecc.ctx.conf.strength = ecc->strength;
mtd->ecc_step_size = ecc->size;
+ if (!base->ecc.ctx.conf.step_size)
+ base->ecc.ctx.conf.step_size = ecc->size;
/*
* Set the number of read / write steps for one page depending on ECC
@@ -6355,11 +6336,16 @@ static int nand_scan_tail(struct nand_chip *chip)
*/
if (!ecc->steps)
ecc->steps = mtd->writesize / ecc->size;
- if (ecc->steps * ecc->size != mtd->writesize) {
- WARN(1, "Invalid ECC parameters\n");
- ret = -EINVAL;
- goto err_nand_manuf_cleanup;
- }
+ if (!base->ecc.ctx.nsteps)
+ base->ecc.ctx.nsteps = ecc->steps;
+
+ /*
+ * Validity check: Warn if ECC parameters are not compatible with page size.
+ * Due to the custom handling of ECC blocks in certain controllers the check
+ * may result in an expected failure.
+ */
+ if (ecc->steps * ecc->size != mtd->writesize)
+ pr_warn("ECC parameters may be invalid in reference to underlying NAND chip\n");
if (!ecc->total) {
ecc->total = ecc->steps * ecc->bytes;
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index e4664fa6fd9e..a8fba5f39f59 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -576,7 +576,6 @@ static int search_bbt(struct nand_chip *this, uint8_t *buf,
startblock &= bbtblocks - 1;
} else {
chips = 1;
- bbtblocks = mtd->size >> this->bbt_erase_shift;
}
for (i = 0; i < chips; i++) {
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index 39076735a3fb..b663659b2f49 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -31,7 +31,6 @@ struct hynix_read_retry {
/**
* struct hynix_nand - private Hynix NAND struct
- * @nand_technology: manufacturing process expressed in picometer
* @read_retry: read-retry information
*/
struct hynix_nand {
@@ -378,9 +377,9 @@ static int hynix_nand_rr_init(struct nand_chip *chip)
/*
* We only support read-retry for 1xnm NANDs, and those NANDs all
- * expose a valid JEDEC ID.
+ * expose a valid JEDEC ID. SLC NANDs don't require read-retry.
*/
- if (valid_jedecid) {
+ if (valid_jedecid && nanddev_bits_per_cell(&chip->base) > 1) {
u8 nand_tech = chip->id.data[5] >> 4;
/* 1xnm technology */
@@ -402,7 +401,7 @@ static int hynix_nand_rr_init(struct nand_chip *chip)
if (ret)
pr_warn("failed to initialize read-retry infrastructure");
- return 0;
+ return ret;
}
static void hynix_nand_extract_oobsize(struct nand_chip *chip,
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
index 836757717660..b3cc8f360529 100644
--- a/drivers/mtd/nand/raw/nand_jedec.c
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -94,6 +94,9 @@ int nand_jedec_detect(struct nand_chip *chip)
goto free_jedec_param_page;
}
+ if (p->opt_cmd[0] & JEDEC_OPT_CMD_READ_CACHE)
+ chip->parameters.supports_read_cache = true;
+
memorg->pagesize = le32_to_cpu(p->byte_per_page);
mtd->writesize = memorg->pagesize;
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index e229de32ff50..03237310852c 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -113,7 +113,7 @@ static void macronix_nand_onfi_init(struct nand_chip *chip)
rand_otp = of_property_read_bool(dn, "mxic,enable-randomizer-otp");
mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor;
- /* Subpage write is prohibited in randomizer operatoin */
+ /* Subpage write is prohibited in randomizer operation */
if (rand_otp && chip->options & NAND_NO_SUBPAGE_WRITE &&
mxic->reliability_func & MACRONIX_RANDOMIZER_BIT) {
if (p->supports_set_get_features) {
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index f15ef90aec8c..861975e44b55 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -303,6 +303,9 @@ int nand_onfi_detect(struct nand_chip *chip)
ONFI_FEATURE_ADDR_TIMING_MODE, 1);
}
+ if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_READ_CACHE)
+ chip->parameters.supports_read_cache = true;
+
onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
if (!onfi) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 179b28459b4b..84942e7e528f 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -552,9 +552,8 @@ static int __init ns_alloc_device(struct nandsim *ns)
err = -EINVAL;
goto err_close_filp;
}
- ns->pages_written =
- vzalloc(array_size(sizeof(unsigned long),
- BITS_TO_LONGS(ns->geom.pgnum)));
+ ns->pages_written = vcalloc(BITS_TO_LONGS(ns->geom.pgnum),
+ sizeof(unsigned long));
if (!ns->pages_written) {
NS_ERR("alloc_device: unable to allocate pages written array\n");
err = -ENOMEM;
@@ -578,7 +577,7 @@ err_close_filp:
return err;
}
- ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum));
+ ns->pages = vmalloc_array(ns->geom.pgnum, sizeof(union ns_mem));
if (!ns->pages) {
NS_ERR("alloc_device: unable to allocate page array\n");
return -ENOMEM;
@@ -1381,7 +1380,7 @@ static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
}
/*
- * Retuns a pointer to the current byte, within the current page.
+ * Returns a pointer to the current byte, within the current page.
*/
static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
{
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index 57f3db32122d..13365128194d 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -22,8 +22,9 @@
#include <linux/mtd/ndfc.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#define NDFC_MAX_CS 4
@@ -265,7 +266,7 @@ static struct platform_driver ndfc_driver = {
.of_match_table = ndfc_match,
},
.probe = ndfc_probe,
- .remove_new = ndfc_remove,
+ .remove = ndfc_remove,
};
module_platform_driver(ndfc_driver);
diff --git a/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c b/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
new file mode 100644
index 000000000000..1a285cd8fad6
--- /dev/null
+++ b/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
@@ -0,0 +1,1029 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Nuvoton Technology Corp.
+ */
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* NFI Registers */
+#define MA35_NFI_REG_DMACTL 0x400
+#define DMA_EN BIT(0)
+#define DMA_RST BIT(1)
+#define DMA_BUSY BIT(9)
+
+#define MA35_NFI_REG_DMASA 0x408
+#define MA35_NFI_REG_GCTL 0x800
+#define GRST BIT(0)
+#define NAND_EN BIT(3)
+
+#define MA35_NFI_REG_NANDCTL 0x8A0
+#define SWRST BIT(0)
+#define DMA_R_EN BIT(1)
+#define DMA_W_EN BIT(2)
+#define ECC_CHK BIT(7)
+#define PROT3BEN BIT(8)
+#define PSIZE_2K BIT(16)
+#define PSIZE_4K BIT(17)
+#define PSIZE_8K GENMASK(17, 16)
+#define PSIZE_MASK GENMASK(17, 16)
+#define BCH_T24 BIT(18)
+#define BCH_T8 BIT(20)
+#define BCH_T12 BIT(21)
+#define BCH_NONE (0x0)
+#define BCH_MASK GENMASK(22, 18)
+#define ECC_EN BIT(23)
+#define DISABLE_CS0 BIT(25)
+
+#define MA35_NFI_REG_NANDINTEN 0x8A8
+#define MA35_NFI_REG_NANDINTSTS 0x8AC
+#define INT_DMA BIT(0)
+#define INT_ECC BIT(2)
+#define INT_RB0 BIT(10)
+
+#define MA35_NFI_REG_NANDCMD 0x8B0
+#define MA35_NFI_REG_NANDADDR 0x8B4
+#define ENDADDR BIT(31)
+
+#define MA35_NFI_REG_NANDDATA 0x8B8
+#define MA35_NFI_REG_NANDRACTL 0x8BC
+#define MA35_NFI_REG_NANDECTL 0x8C0
+#define ENABLE_WP 0x0
+#define DISABLE_WP BIT(0)
+
+#define MA35_NFI_REG_NANDECCES0 0x8D0
+#define ECC_STATUS_MASK GENMASK(1, 0)
+#define ECC_ERR_CNT_MASK GENMASK(4, 0)
+
+#define MA35_NFI_REG_NANDECCEA0 0x900
+#define MA35_NFI_REG_NANDECCED0 0x960
+#define MA35_NFI_REG_NANDRA0 0xA00
+
+/* Define for the BCH hardware ECC engine */
+/* define the total padding bytes for 512/1024 data segment */
+#define MA35_BCH_PADDING_512 32
+#define MA35_BCH_PADDING_1024 64
+/* define the BCH parity code length for 512 bytes data pattern */
+#define MA35_PARITY_BCH8 15
+#define MA35_PARITY_BCH12 23
+/* define the BCH parity code length for 1024 bytes data pattern */
+#define MA35_PARITY_BCH24 45
+
+#define MA35_MAX_NSELS (2)
+#define PREFIX_RA_IS_EMPTY(reg) FIELD_GET(GENMASK(31, 16), (reg))
+
+struct ma35_nand_chip {
+ struct list_head node;
+ struct nand_chip chip;
+
+ u32 eccstatus;
+ u8 nsels;
+ u8 sels[] __counted_by(nsels);
+};
+
+struct ma35_nand_info {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ struct completion complete;
+ struct list_head chips;
+
+ u8 *buffer;
+ unsigned long assigned_cs;
+};
+
+static inline struct ma35_nand_chip *to_ma35_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct ma35_nand_chip, chip);
+}
+
+static int ma35_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oob_region->length = chip->ecc.total;
+ oob_region->offset = mtd->oobsize - oob_region->length;
+
+ return 0;
+}
+
+static int ma35_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oob_region->length = mtd->oobsize - chip->ecc.total - 2;
+ oob_region->offset = 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops ma35_ooblayout_ops = {
+ .free = ma35_ooblayout_free,
+ .ecc = ma35_ooblayout_ecc,
+};
+
+static inline void ma35_clear_spare(struct nand_chip *chip, int size)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ int i;
+
+ for (i = 0; i < size / 4; i++)
+ writel(0xff, nand->regs + MA35_NFI_REG_NANDRA0);
+}
+
+static inline void read_remaining_bytes(struct ma35_nand_info *nand, u32 *buf,
+ u32 offset, int size, int swap)
+{
+ u32 value = readl(nand->regs + MA35_NFI_REG_NANDRA0 + offset);
+ u8 *ptr = (u8 *)buf;
+ int i, shift;
+
+ for (i = 0; i < size; i++) {
+ shift = (swap ? 3 - i : i) * 8;
+ ptr[i] = (value >> shift) & 0xff;
+ }
+}
+
+static inline void ma35_read_spare(struct nand_chip *chip, int size, u32 *buf, u32 offset)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 off = round_down(offset, 4);
+ int len = offset % 4;
+ int i;
+
+ if (len) {
+ read_remaining_bytes(nand, buf, off, 4 - len, 1);
+ off += 4;
+ size -= (4 - len);
+ }
+
+ for (i = 0; i < size / 4; i++)
+ *buf++ = readl(nand->regs + MA35_NFI_REG_NANDRA0 + off + (i * 4));
+
+ read_remaining_bytes(nand, buf, off + (size & ~3), size % 4, 0);
+}
+
+static inline void ma35_write_spare(struct nand_chip *chip, int size, u32 *buf)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 value;
+ int i, j;
+ u8 *ptr;
+
+ for (i = 0, j = 0; i < size / 4; i++, j += 4)
+ writel(*buf++, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+
+ ptr = (u8 *)buf;
+ switch (size % 4) {
+ case 1:
+ writel(*ptr, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+ break;
+ case 2:
+ value = *ptr | (*(ptr + 1) << 8);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+ break;
+ case 3:
+ value = *ptr | (*(ptr + 1) << 8) | (*(ptr + 2) << 16);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ma35_nand_target_enable(struct nand_chip *chip, unsigned int cs)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 reg;
+
+ switch (cs) {
+ case 0:
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~DISABLE_CS0, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
+ reg |= INT_RB0;
+ writel(reg, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ break;
+ default:
+ break;
+ }
+}
+
+static int ma35_nand_hwecc_init(struct nand_chip *chip, struct ma35_nand_info *nand)
+{
+ struct ma35_nand_chip *nvtnand = to_ma35_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ u32 reg;
+
+ nand->buffer = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
+ if (!nand->buffer)
+ return -ENOMEM;
+
+ /* Redundant area size */
+ writel(mtd->oobsize, nand->regs + MA35_NFI_REG_NANDRACTL);
+
+ /* Protect redundant 3 bytes and disable ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ reg |= (PROT3BEN | ECC_CHK);
+ reg &= ~ECC_EN;
+
+ if (chip->ecc.strength != 0) {
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ nvtnand->eccstatus = (chip->ecc.steps < 4) ? 1 : chip->ecc.steps / 4;
+ /* Set BCH algorithm */
+ reg &= ~BCH_MASK;
+ switch (chip->ecc.strength) {
+ case 8:
+ chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH8;
+ reg |= BCH_T8;
+ break;
+ case 12:
+ chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH12;
+ reg |= BCH_T12;
+ break;
+ case 24:
+ chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH24;
+ reg |= BCH_T24;
+ break;
+ default:
+ dev_err(nand->dev, "ECC strength unsupported\n");
+ return -EINVAL;
+ }
+
+ chip->ecc.bytes = chip->ecc.total / chip->ecc.steps;
+ }
+ writel(reg, nand->regs + MA35_NFI_REG_NANDCTL);
+ return 0;
+}
+
+/* Correct data by BCH alrogithm */
+static void ma35_nfi_correct(struct nand_chip *chip, u8 index,
+ u8 err_cnt, u8 *addr)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 temp_data[24], temp_addr[24];
+ u32 padding_len, parity_len;
+ u32 value, offset, remain;
+ u32 err_data[6];
+ u8 i, j;
+
+ /* Configurations */
+ if (chip->ecc.strength <= 8) {
+ parity_len = MA35_PARITY_BCH8;
+ padding_len = MA35_BCH_PADDING_512;
+ } else if (chip->ecc.strength <= 12) {
+ parity_len = MA35_PARITY_BCH12;
+ padding_len = MA35_BCH_PADDING_512;
+ } else if (chip->ecc.strength <= 24) {
+ parity_len = MA35_PARITY_BCH24;
+ padding_len = MA35_BCH_PADDING_1024;
+ } else {
+ dev_err(nand->dev, "Invalid BCH_TSEL = 0x%lx\n",
+ readl(nand->regs + MA35_NFI_REG_NANDCTL) & BCH_MASK);
+ return;
+ }
+
+ /*
+ * got valid BCH_ECC_DATAx and parse them to temp_data[]
+ * got the valid register number of BCH_ECC_DATAx since
+ * one register include 4 error bytes
+ */
+ j = (err_cnt + 3) / 4;
+ j = (j > 6) ? 6 : j;
+ for (i = 0; i < j; i++)
+ err_data[i] = readl(nand->regs + MA35_NFI_REG_NANDECCED0 + i * 4);
+
+ for (i = 0; i < j; i++) {
+ temp_data[i * 4 + 0] = err_data[i] & 0xff;
+ temp_data[i * 4 + 1] = (err_data[i] >> 8) & 0xff;
+ temp_data[i * 4 + 2] = (err_data[i] >> 16) & 0xff;
+ temp_data[i * 4 + 3] = (err_data[i] >> 24) & 0xff;
+ }
+
+ /*
+ * got valid REG_BCH_ECC_ADDRx and parse them to temp_addr[]
+ * got the valid register number of REG_BCH_ECC_ADDRx since
+ * one register include 2 error addresses
+ */
+ j = (err_cnt + 1) / 2;
+ j = (j > 12) ? 12 : j;
+ for (i = 0; i < j; i++) {
+ temp_addr[i * 2 + 0] = readl(nand->regs + MA35_NFI_REG_NANDECCEA0 + i * 4)
+ & 0x07ff;
+ temp_addr[i * 2 + 1] = (readl(nand->regs + MA35_NFI_REG_NANDECCEA0 + i * 4)
+ >> 16) & 0x07ff;
+ }
+
+ /* pointer to begin address of field that with data error */
+ addr += index * chip->ecc.size;
+
+ /* correct each error bytes */
+ for (i = 0; i < err_cnt; i++) {
+ u32 corrected_index = temp_addr[i];
+
+ if (corrected_index < chip->ecc.size) {
+ /* for wrong data in field */
+ *(addr + corrected_index) ^= temp_data[i];
+ } else if (corrected_index < (chip->ecc.size + 3)) {
+ /* for wrong first-3-bytes in redundancy area */
+ corrected_index -= chip->ecc.size;
+ temp_addr[i] += (parity_len * index); /* field offset */
+
+ value = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ value ^= temp_data[i] << (8 * corrected_index);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0);
+ } else {
+ /*
+ * for wrong parity code in redundancy area
+ * ERR_ADDRx = [data in field] + [3 bytes] + [xx] + [parity code]
+ * |<-- padding bytes -->|
+ * The ERR_ADDRx for last parity code always = field size + padding size.
+ * The first parity code = field size + padding size - parity code length.
+ * For example, for BCH T12, the first parity code = 512 + 32 - 23 = 521.
+ * That is, error byte address offset within field is
+ */
+ corrected_index -= (chip->ecc.size + padding_len - parity_len);
+
+ /*
+ * final address = first parity code of first field +
+ * offset of fields +
+ * offset within field
+ */
+ offset = (readl(nand->regs + MA35_NFI_REG_NANDRACTL) & 0x1ff) -
+ (parity_len * chip->ecc.steps) +
+ (parity_len * index) + corrected_index;
+
+ remain = offset % 4;
+ value = readl(nand->regs + MA35_NFI_REG_NANDRA0 + offset - remain);
+ value ^= temp_data[i] << (8 * remain);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + offset - remain);
+ }
+ }
+}
+
+static int ma35_nfi_ecc_check(struct nand_chip *chip, u8 *addr)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct ma35_nand_chip *nvtnand = to_ma35_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int maxbitflips = 0;
+ int cnt = 0;
+ u32 status;
+ int i, j;
+
+ for (j = 0; j < nvtnand->eccstatus; j++) {
+ status = readl(nand->regs + MA35_NFI_REG_NANDECCES0 + j * 4);
+ if (!status)
+ continue;
+
+ for (i = 0; i < 4; i++) {
+ if ((status & ECC_STATUS_MASK) == 0x01) {
+ /* Correctable error */
+ cnt = (status >> 2) & ECC_ERR_CNT_MASK;
+ ma35_nfi_correct(chip, j * 4 + i, cnt, addr);
+ maxbitflips = max_t(u32, maxbitflips, cnt);
+ mtd->ecc_stats.corrected += cnt;
+ } else {
+ /* Uncorrectable error */
+ mtd->ecc_stats.failed++;
+ dev_err(nand->dev, "uncorrectable error! 0x%4x\n", status);
+ return -EBADMSG;
+ }
+ status >>= 8;
+ }
+ }
+ return maxbitflips;
+}
+
+static void ma35_nand_dmac_init(struct ma35_nand_info *nand)
+{
+ /* DMAC reset and enable */
+ writel(DMA_RST | DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
+ writel(DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
+
+ /* Clear DMA finished flag and enable */
+ writel(INT_DMA | INT_ECC, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ writel(INT_DMA, nand->regs + MA35_NFI_REG_NANDINTEN);
+}
+
+static int ma35_nand_do_write(struct nand_chip *chip, const u8 *addr, u32 len)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ dma_addr_t dma_addr;
+ int ret = 0, i;
+ u32 reg;
+
+ if (len != mtd->writesize) {
+ for (i = 0; i < len; i++)
+ writel(addr[i], nand->regs + MA35_NFI_REG_NANDDATA);
+ return 0;
+ }
+
+ ma35_nand_dmac_init(nand);
+
+ /* To mark this page as dirty. */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (reg & 0xffff0000)
+ writel(reg & 0xffff, nand->regs + MA35_NFI_REG_NANDRA0);
+
+ dma_addr = dma_map_single(nand->dev, (void *)addr, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(nand->dev, dma_addr);
+ if (ret) {
+ dev_err(nand->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+ dma_sync_single_for_device(nand->dev, dma_addr, len, DMA_TO_DEVICE);
+
+ reinit_completion(&nand->complete);
+ writel(dma_addr, nand->regs + MA35_NFI_REG_DMASA);
+ writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | DMA_W_EN,
+ nand->regs + MA35_NFI_REG_NANDCTL);
+ ret = wait_for_completion_timeout(&nand->complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(nand->dev, "write timeout\n");
+ ret = -ETIMEDOUT;
+ }
+
+ dma_unmap_single(nand->dev, dma_addr, len, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int ma35_nand_do_read(struct nand_chip *chip, u8 *addr, u32 len)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret = 0, cnt = 0, i;
+ dma_addr_t dma_addr;
+ u32 reg;
+
+ if (len != mtd->writesize) {
+ for (i = 0; i < len; i++)
+ addr[i] = readb(nand->regs + MA35_NFI_REG_NANDDATA);
+ return 0;
+ }
+
+ ma35_nand_dmac_init(nand);
+
+ /* Setup and start DMA using dma_addr */
+ dma_addr = dma_map_single(nand->dev, (void *)addr, len, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(nand->dev, dma_addr);
+ if (ret) {
+ dev_err(nand->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ reinit_completion(&nand->complete);
+ writel(dma_addr, nand->regs + MA35_NFI_REG_DMASA);
+ writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | DMA_R_EN,
+ nand->regs + MA35_NFI_REG_NANDCTL);
+ ret = wait_for_completion_timeout(&nand->complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(nand->dev, "read timeout\n");
+ ret = -ETIMEDOUT;
+ }
+
+ dma_unmap_single(nand->dev, dma_addr, len, DMA_FROM_DEVICE);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
+ if (reg & INT_ECC) {
+ cnt = ma35_nfi_ecc_check(chip, addr);
+ if (cnt < 0) {
+ writel(DMA_RST | DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
+ writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | SWRST,
+ nand->regs + MA35_NFI_REG_NANDCTL);
+ }
+ writel(INT_ECC, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ }
+
+ ret = ret < 0 ? ret : cnt;
+ return ret;
+}
+
+static int ma35_nand_format_subpage(struct nand_chip *chip, u32 offset,
+ u32 len, const u8 *buf)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 page_off = round_down(offset, chip->ecc.size);
+ u32 end = DIV_ROUND_UP(page_off + len, chip->ecc.size);
+ u32 start = page_off / chip->ecc.size;
+ u32 reg;
+ int i;
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRACTL) | 0xffff0000;
+ memset(nand->buffer, 0xff, mtd->writesize);
+ for (i = start; i < end; i++) {
+ memcpy(nand->buffer + i * chip->ecc.size,
+ buf + i * chip->ecc.size, chip->ecc.size);
+ reg &= ~(1 << (i + 16));
+ }
+ writel(reg, nand->regs + MA35_NFI_REG_NANDRACTL);
+
+ return 0;
+}
+
+static int ma35_nand_write_subpage_hwecc(struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_required, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 reg, oobpoi, index;
+ int i;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+
+ ma35_clear_spare(chip, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize - chip->ecc.total,
+ (u32 *)chip->oob_poi);
+
+ ma35_nand_format_subpage(chip, offset, data_len, buf);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ ma35_nand_do_write(chip, nand->buffer, mtd->writesize);
+ nand_prog_page_end_op(chip);
+
+ oobpoi = mtd->oobsize - chip->ecc.total;
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRACTL);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ index = i * chip->ecc.bytes;
+ if (!(reg & (1 << (i + 16)))) {
+ ma35_read_spare(chip, chip->ecc.bytes,
+ (u32 *)(chip->oob_poi + oobpoi + index),
+ oobpoi + index);
+ }
+ }
+
+ writel(mtd->oobsize, nand->regs + MA35_NFI_REG_NANDRACTL);
+ /* Disable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return 0;
+}
+
+static int ma35_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 reg;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+
+ ma35_clear_spare(chip, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize - chip->ecc.total,
+ (u32 *)chip->oob_poi);
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ ma35_nand_do_write(chip, buf, mtd->writesize);
+ nand_prog_page_end_op(chip);
+
+ ma35_read_spare(chip, chip->ecc.total,
+ (u32 *)(chip->oob_poi + (mtd->oobsize - chip->ecc.total)),
+ mtd->oobsize - chip->ecc.total);
+
+ /* Disable HW ECC engine */
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return 0;
+}
+
+static int ma35_nand_read_subpage_hwecc(struct nand_chip *chip, u32 offset,
+ u32 data_len, u8 *buf, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bitflips = 0;
+ u32 reg;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (PREFIX_RA_IS_EMPTY(reg)) {
+ memset((void *)buf, 0xff, mtd->writesize);
+ } else {
+ nand_read_page_op(chip, page, offset, NULL, 0);
+ bitflips = ma35_nand_do_read(chip, buf + offset, data_len);
+ ma35_read_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi, 0);
+ }
+
+ /* Disable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return bitflips;
+}
+
+static int ma35_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bitflips = 0;
+ u32 reg;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (PREFIX_RA_IS_EMPTY(reg)) {
+ memset((void *)buf, 0xff, mtd->writesize);
+ } else {
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ bitflips = ma35_nand_do_read(chip, buf, mtd->writesize);
+ ma35_read_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi, 0);
+ }
+
+ /* Disable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return bitflips;
+}
+
+static int ma35_nand_read_oob_hwecc(struct nand_chip *chip, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 reg;
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+
+ /* copy OOB data to controller redundant area for page read */
+ ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (PREFIX_RA_IS_EMPTY(reg))
+ memset((void *)chip->oob_poi, 0xff, mtd->oobsize);
+
+ return 0;
+}
+
+static inline void ma35_hw_init(struct ma35_nand_info *nand)
+{
+ u32 reg;
+
+ /* Disable flash wp. */
+ writel(DISABLE_WP, nand->regs + MA35_NFI_REG_NANDECTL);
+
+ /* resets the internal state machine and counters */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ reg |= SWRST;
+ writel(reg, nand->regs + MA35_NFI_REG_NANDCTL);
+}
+
+static irqreturn_t ma35_nand_irq(int irq, void *id)
+{
+ struct ma35_nand_info *nand = (struct ma35_nand_info *)id;
+ u32 isr;
+
+ isr = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
+ if (isr & INT_DMA) {
+ writel(INT_DMA, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ complete(&nand->complete);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int ma35_nand_attach_chip(struct nand_chip *chip)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ u32 reg;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ dev_err(dev, "16 bits bus width not supported");
+ return -EINVAL;
+ }
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL) & (~PSIZE_MASK);
+ switch (mtd->writesize) {
+ case SZ_2K:
+ writel(reg | PSIZE_2K, nand->regs + MA35_NFI_REG_NANDCTL);
+ break;
+ case SZ_4K:
+ writel(reg | PSIZE_4K, nand->regs + MA35_NFI_REG_NANDCTL);
+ break;
+ case SZ_8K:
+ writel(reg | PSIZE_8K, nand->regs + MA35_NFI_REG_NANDCTL);
+ break;
+ default:
+ dev_err(dev, "Unsupported page size");
+ return -EINVAL;
+ }
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ /* Do not store BBT bits in the OOB section as it is not protected */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->options |= NAND_USES_DMA | NAND_SUBPAGE_READ;
+ chip->ecc.write_subpage = ma35_nand_write_subpage_hwecc;
+ chip->ecc.write_page = ma35_nand_write_page_hwecc;
+ chip->ecc.read_subpage = ma35_nand_read_subpage_hwecc;
+ chip->ecc.read_page = ma35_nand_read_page_hwecc;
+ chip->ecc.read_oob = ma35_nand_read_oob_hwecc;
+ return ma35_nand_hwecc_init(chip, nand);
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ma35_nfc_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ unsigned int i;
+ int ret = 0;
+ u32 status;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writel(instr->ctx.cmd.opcode, nand->regs + MA35_NFI_REG_NANDCMD);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ if (i == (instr->ctx.addr.naddrs - 1))
+ writel(instr->ctx.addr.addrs[i] | ENDADDR,
+ nand->regs + MA35_NFI_REG_NANDADDR);
+ else
+ writel(instr->ctx.addr.addrs[i],
+ nand->regs + MA35_NFI_REG_NANDADDR);
+ }
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ ret = ma35_nand_do_read(chip, instr->ctx.data.buf.in, instr->ctx.data.len);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ ret = ma35_nand_do_write(chip, instr->ctx.data.buf.out, instr->ctx.data.len);
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ return readl_poll_timeout(nand->regs + MA35_NFI_REG_NANDINTSTS, status,
+ status & INT_RB0, 20,
+ instr->ctx.waitrdy.timeout_ms * MSEC_PER_SEC);
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ma35_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ int ret = 0;
+ u32 i;
+
+ if (check_only)
+ return 0;
+
+ ma35_nand_target_enable(chip, op->cs);
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = ma35_nfc_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static const struct nand_controller_ops ma35_nfc_ops = {
+ .attach_chip = ma35_nand_attach_chip,
+ .exec_op = ma35_nfc_exec_op,
+};
+
+static int ma35_nand_chip_init(struct device *dev, struct ma35_nand_info *nand,
+ struct device_node *np)
+{
+ struct ma35_nand_chip *nvtnand;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ int nsels;
+ int ret;
+ u32 cs;
+ int i;
+
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (!nsels || nsels > MA35_MAX_NSELS) {
+ dev_err(dev, "invalid reg property size %d\n", nsels);
+ return -EINVAL;
+ }
+
+ nvtnand = devm_kzalloc(dev, struct_size(nvtnand, sels, nsels),
+ GFP_KERNEL);
+ if (!nvtnand)
+ return -ENOMEM;
+
+ nvtnand->nsels = nsels;
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(dev, "reg property failure : %d\n", ret);
+ return ret;
+ }
+
+ if (cs >= MA35_MAX_NSELS) {
+ dev_err(dev, "invalid CS: %u\n", cs);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nand->assigned_cs)) {
+ dev_err(dev, "CS %u already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ nvtnand->sels[i] = cs;
+ }
+
+ chip = &nvtnand->chip;
+ chip->controller = &nand->controller;
+
+ nand_set_flash_node(chip, np);
+ nand_set_controller_data(chip, nand);
+
+ mtd = nand_to_mtd(chip);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+
+ mtd_set_ooblayout(mtd, &ma35_ooblayout_ops);
+ ret = nand_scan(chip, nsels);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&nvtnand->node, &nand->chips);
+
+ return 0;
+}
+
+static void ma35_chips_cleanup(struct ma35_nand_info *nand)
+{
+ struct ma35_nand_chip *nvtnand, *tmp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(nvtnand, tmp, &nand->chips, node) {
+ chip = &nvtnand->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&nvtnand->node);
+ }
+}
+
+static int ma35_nand_chips_init(struct device *dev, struct ma35_nand_info *nand)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ for_each_child_of_node_scoped(np, nand_np) {
+ ret = ma35_nand_chip_init(dev, nand, nand_np);
+ if (ret) {
+ ma35_chips_cleanup(nand);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int ma35_nand_probe(struct platform_device *pdev)
+{
+ struct ma35_nand_info *nand;
+ int ret = 0;
+
+ nand = devm_kzalloc(&pdev->dev, sizeof(*nand), GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ nand_controller_init(&nand->controller);
+ INIT_LIST_HEAD(&nand->chips);
+ nand->controller.ops = &ma35_nfc_ops;
+
+ init_completion(&nand->complete);
+
+ nand->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nand->regs))
+ return PTR_ERR(nand->regs);
+
+ nand->dev = &pdev->dev;
+
+ nand->clk = devm_clk_get_enabled(&pdev->dev, "nand_gate");
+ if (IS_ERR(nand->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(nand->clk),
+ "failed to find NAND clock\n");
+
+ nand->irq = platform_get_irq(pdev, 0);
+ if (nand->irq < 0)
+ return dev_err_probe(&pdev->dev, nand->irq,
+ "failed to get platform irq\n");
+
+ ret = devm_request_irq(&pdev->dev, nand->irq, ma35_nand_irq,
+ IRQF_TRIGGER_HIGH, "ma35d1-nand-controller", nand);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request NAND irq\n");
+ return -ENXIO;
+ }
+
+ platform_set_drvdata(pdev, nand);
+
+ writel(GRST | NAND_EN, nand->regs + MA35_NFI_REG_GCTL);
+ ma35_hw_init(nand);
+ ret = ma35_nand_chips_init(&pdev->dev, nand);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init NAND chips\n");
+ clk_disable(nand->clk);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void ma35_nand_remove(struct platform_device *pdev)
+{
+ struct ma35_nand_info *nand = platform_get_drvdata(pdev);
+
+ ma35_chips_cleanup(nand);
+}
+
+static const struct of_device_id ma35_nand_of_match[] = {
+ { .compatible = "nuvoton,ma35d1-nand-controller" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ma35_nand_of_match);
+
+static struct platform_driver ma35_nand_driver = {
+ .driver = {
+ .name = "ma35d1-nand-controller",
+ .of_match_table = ma35_nand_of_match,
+ },
+ .probe = ma35_nand_probe,
+ .remove = ma35_nand_remove,
+};
+
+module_platform_driver(ma35_nand_driver);
+
+MODULE_DESCRIPTION("Nuvoton ma35 NAND driver");
+MODULE_AUTHOR("Hui-Ping Chen <hpchen0nvt@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index db22b3af16d8..39e297486721 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -22,7 +22,7 @@
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/platform_data/elm.h>
@@ -254,6 +254,10 @@ static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
/**
* omap_nand_data_in_pref - NAND data in using prefetch engine
+ * @chip: NAND chip
+ * @buf: output buffer where NAND data is placed into
+ * @len: length of transfer
+ * @force_8bit: force 8-bit transfers
*/
static void omap_nand_data_in_pref(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit)
@@ -297,6 +301,10 @@ static void omap_nand_data_in_pref(struct nand_chip *chip, void *buf,
/**
* omap_nand_data_out_pref - NAND data out using Write Posting engine
+ * @chip: NAND chip
+ * @buf: input buffer that is sent to NAND
+ * @len: length of transfer
+ * @force_8bit: force 8-bit transfers
*/
static void omap_nand_data_out_pref(struct nand_chip *chip,
const void *buf, unsigned int len,
@@ -440,6 +448,10 @@ out_copy:
/**
* omap_nand_data_in_dma_pref - NAND data in using DMA and Prefetch
+ * @chip: NAND chip
+ * @buf: output buffer where NAND data is placed into
+ * @len: length of transfer
+ * @force_8bit: force 8-bit transfers
*/
static void omap_nand_data_in_dma_pref(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit)
@@ -460,6 +472,10 @@ static void omap_nand_data_in_dma_pref(struct nand_chip *chip, void *buf,
/**
* omap_nand_data_out_dma_pref - NAND data out using DMA and write posting
+ * @chip: NAND chip
+ * @buf: input buffer that is sent to NAND
+ * @len: length of transfer
+ * @force_8bit: force 8-bit transfers
*/
static void omap_nand_data_out_dma_pref(struct nand_chip *chip,
const void *buf, unsigned int len,
@@ -1881,8 +1897,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
case NAND_OMAP_PREFETCH_IRQ:
info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
- if (info->gpmc_irq_fifo <= 0)
- return -ENODEV;
+ if (info->gpmc_irq_fifo < 0)
+ return info->gpmc_irq_fifo;
err = devm_request_irq(dev, info->gpmc_irq_fifo,
omap_nand_irq, IRQF_SHARED,
"gpmc-nand-fifo", info);
@@ -1894,8 +1910,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
}
info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
- if (info->gpmc_irq_count <= 0)
- return -ENODEV;
+ if (info->gpmc_irq_count < 0)
+ return info->gpmc_irq_count;
err = devm_request_irq(dev, info->gpmc_irq_count,
omap_nand_irq, IRQF_SHARED,
"gpmc-nand-count", info);
@@ -1963,7 +1979,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
err = rawnand_sw_bch_init(chip);
if (err) {
dev_err(dev, "Unable to use BCH library\n");
- return err;
+ goto err_put_elm_dev;
}
break;
@@ -2000,7 +2016,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
err = rawnand_sw_bch_init(chip);
if (err) {
dev_err(dev, "unable to use BCH library\n");
- return err;
+ goto err_put_elm_dev;
}
break;
@@ -2038,7 +2054,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
break;
default:
dev_err(dev, "Invalid or unsupported ECC scheme\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto err_put_elm_dev;
}
if (elm_bch_strength >= 0) {
@@ -2057,7 +2074,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
info->nsteps_per_eccpg, chip->ecc.size,
chip->ecc.bytes);
if (err < 0)
- return err;
+ goto err_put_elm_dev;
}
/* Check if NAND device's OOB is enough to store ECC signatures */
@@ -2067,10 +2084,24 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
dev_err(dev,
"Not enough OOB bytes: required = %d, available=%d\n",
min_oobbytes, mtd->oobsize);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_put_elm_dev;
}
return 0;
+
+err_put_elm_dev:
+ put_device(info->elm_dev);
+
+ return err;
+}
+
+static void omap_nand_detach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+
+ put_device(info->elm_dev);
}
static void omap_nand_data_in(struct nand_chip *chip, void *buf,
@@ -2171,6 +2202,7 @@ static int omap_nand_exec_op(struct nand_chip *chip,
static const struct nand_controller_ops omap_nand_controller_ops = {
.attach_chip = omap_nand_attach_chip,
+ .detach_chip = omap_nand_detach_chip,
.exec_op = omap_nand_exec_op,
};
@@ -2219,8 +2251,7 @@ static int omap_nand_probe(struct platform_device *pdev)
}
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vaddr = devm_ioremap_resource(&pdev->dev, res);
+ vaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -2292,7 +2323,7 @@ MODULE_DEVICE_TABLE(of, omap_nand_ids);
static struct platform_driver omap_nand_driver = {
.probe = omap_nand_probe,
- .remove_new = omap_nand_remove,
+ .remove = omap_nand_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = omap_nand_ids,
@@ -2301,6 +2332,5 @@ static struct platform_driver omap_nand_driver = {
module_platform_driver(omap_nand_driver);
-MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
index 4a97d4a76454..c1d19b8e6715 100644
--- a/drivers/mtd/nand/raw/omap_elm.c
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -560,7 +560,7 @@ static struct platform_driver elm_driver = {
.pm = &elm_pm_ops,
},
.probe = elm_probe,
- .remove_new = elm_remove,
+ .remove = elm_remove,
};
module_platform_driver(elm_driver);
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
index 7e0313889b50..47e80d5e58c5 100644
--- a/drivers/mtd/nand/raw/orion_nand.c
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -169,16 +169,10 @@ static int __init orion_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
/* Not all platforms can gate the clock, so it is optional. */
- info->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ info->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
if (IS_ERR(info->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
- "failed to get clock!\n");
-
- ret = clk_prepare_enable(info->clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to prepare clock!\n");
- return ret;
- }
+ "failed to get and enable clock!\n");
/*
* This driver assumes that the default ECC engine should be TYPE_SOFT.
@@ -189,19 +183,13 @@ static int __init orion_nand_probe(struct platform_device *pdev)
ret = nand_scan(nc, 1);
if (ret)
- goto no_dev;
+ return ret;
mtd->name = "orion_nand";
ret = mtd_device_register(mtd, board->parts, board->nr_parts);
- if (ret) {
+ if (ret)
nand_cleanup(nc);
- goto no_dev;
- }
-
- return 0;
-no_dev:
- clk_disable_unprepare(info->clk);
return ret;
}
@@ -215,8 +203,6 @@ static void orion_nand_remove(struct platform_device *pdev)
WARN_ON(ret);
nand_cleanup(chip);
-
- clk_disable_unprepare(info->clk);
}
#ifdef CONFIG_OF
@@ -228,7 +214,7 @@ MODULE_DEVICE_TABLE(of, orion_nand_of_match_table);
#endif
static struct platform_driver orion_nand_driver = {
- .remove_new = orion_nand_remove,
+ .remove = orion_nand_remove,
.driver = {
.name = "orion_nand",
.of_match_table = of_match_ptr(orion_nand_of_match_table),
diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c
deleted file mode 100644
index e3c9807df1cd..000000000000
--- a/drivers/mtd/nand/raw/oxnas_nand.c
+++ /dev/null
@@ -1,209 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Oxford Semiconductor OXNAS NAND driver
-
- * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
- * Heavily based on plat_nand.c :
- * Author: Vitaly Wool <vitalywool@gmail.com>
- * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/reset.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/of.h>
-
-/* Nand commands */
-#define OXNAS_NAND_CMD_ALE BIT(18)
-#define OXNAS_NAND_CMD_CLE BIT(19)
-
-#define OXNAS_NAND_MAX_CHIPS 1
-
-struct oxnas_nand_ctrl {
- struct nand_controller base;
- void __iomem *io_base;
- struct clk *clk;
- struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS];
- unsigned int nchips;
-};
-
-static uint8_t oxnas_nand_read_byte(struct nand_chip *chip)
-{
- struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
-
- return readb(oxnas->io_base);
-}
-
-static void oxnas_nand_read_buf(struct nand_chip *chip, u8 *buf, int len)
-{
- struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
-
- ioread8_rep(oxnas->io_base, buf, len);
-}
-
-static void oxnas_nand_write_buf(struct nand_chip *chip, const u8 *buf,
- int len)
-{
- struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
-
- iowrite8_rep(oxnas->io_base, buf, len);
-}
-
-/* Single CS command control */
-static void oxnas_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
- unsigned int ctrl)
-{
- struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
-
- if (ctrl & NAND_CLE)
- writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_CLE);
- else if (ctrl & NAND_ALE)
- writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_ALE);
-}
-
-/*
- * Probe for the NAND device.
- */
-static int oxnas_nand_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct device_node *nand_np;
- struct oxnas_nand_ctrl *oxnas;
- struct nand_chip *chip;
- struct mtd_info *mtd;
- int count = 0;
- int err = 0;
- int i;
-
- /* Allocate memory for the device structure (and zero it) */
- oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas),
- GFP_KERNEL);
- if (!oxnas)
- return -ENOMEM;
-
- nand_controller_init(&oxnas->base);
-
- oxnas->io_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(oxnas->io_base))
- return PTR_ERR(oxnas->io_base);
-
- oxnas->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(oxnas->clk))
- oxnas->clk = NULL;
-
- /* Only a single chip node is supported */
- count = of_get_child_count(np);
- if (count > 1)
- return -EINVAL;
-
- err = clk_prepare_enable(oxnas->clk);
- if (err)
- return err;
-
- device_reset_optional(&pdev->dev);
-
- for_each_child_of_node(np, nand_np) {
- chip = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip),
- GFP_KERNEL);
- if (!chip) {
- err = -ENOMEM;
- goto err_release_child;
- }
-
- chip->controller = &oxnas->base;
-
- nand_set_flash_node(chip, nand_np);
- nand_set_controller_data(chip, oxnas);
-
- mtd = nand_to_mtd(chip);
- mtd->dev.parent = &pdev->dev;
- mtd->priv = chip;
-
- chip->legacy.cmd_ctrl = oxnas_nand_cmd_ctrl;
- chip->legacy.read_buf = oxnas_nand_read_buf;
- chip->legacy.read_byte = oxnas_nand_read_byte;
- chip->legacy.write_buf = oxnas_nand_write_buf;
- chip->legacy.chip_delay = 30;
-
- /* Scan to find existence of the device */
- err = nand_scan(chip, 1);
- if (err)
- goto err_release_child;
-
- err = mtd_device_register(mtd, NULL, 0);
- if (err)
- goto err_cleanup_nand;
-
- oxnas->chips[oxnas->nchips++] = chip;
- }
-
- /* Exit if no chips found */
- if (!oxnas->nchips) {
- err = -ENODEV;
- goto err_clk_unprepare;
- }
-
- platform_set_drvdata(pdev, oxnas);
-
- return 0;
-
-err_cleanup_nand:
- nand_cleanup(chip);
-err_release_child:
- of_node_put(nand_np);
-
- for (i = 0; i < oxnas->nchips; i++) {
- chip = oxnas->chips[i];
- WARN_ON(mtd_device_unregister(nand_to_mtd(chip)));
- nand_cleanup(chip);
- }
-
-err_clk_unprepare:
- clk_disable_unprepare(oxnas->clk);
- return err;
-}
-
-static void oxnas_nand_remove(struct platform_device *pdev)
-{
- struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev);
- struct nand_chip *chip;
- int i;
-
- for (i = 0; i < oxnas->nchips; i++) {
- chip = oxnas->chips[i];
- WARN_ON(mtd_device_unregister(nand_to_mtd(chip)));
- nand_cleanup(chip);
- }
-
- clk_disable_unprepare(oxnas->clk);
-}
-
-static const struct of_device_id oxnas_nand_match[] = {
- { .compatible = "oxsemi,ox820-nand" },
- {},
-};
-MODULE_DEVICE_TABLE(of, oxnas_nand_match);
-
-static struct platform_driver oxnas_nand_driver = {
- .probe = oxnas_nand_probe,
- .remove_new = oxnas_nand_remove,
- .driver = {
- .name = "oxnas_nand",
- .of_match_table = oxnas_nand_match,
- },
-};
-
-module_platform_driver(oxnas_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
-MODULE_DESCRIPTION("Oxnas NAND driver");
-MODULE_ALIAS("platform:oxnas_nand");
diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c
index 19b2c9d25863..0b1f7670660e 100644
--- a/drivers/mtd/nand/raw/pasemi_nand.c
+++ b/drivers/mtd/nand/raw/pasemi_nand.c
@@ -237,7 +237,7 @@ static struct platform_driver pasemi_nand_driver =
.of_match_table = pasemi_nand_match,
},
.probe = pasemi_nand_probe,
- .remove_new = pasemi_nand_remove,
+ .remove = pasemi_nand_remove,
};
module_platform_driver(pasemi_nand_driver);
diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
index 28b7bd7e22eb..11bd90e3f18c 100644
--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
+++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
@@ -23,9 +23,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
@@ -130,7 +128,7 @@ struct pl35x_nand {
* @conf_regs: SMC configuration registers for command phase
* @io_regs: NAND data registers for data phase
* @controller: Core NAND controller structure
- * @chip: NAND chip information structure
+ * @chips: List of connected NAND chips
* @selected_chip: NAND chip currently selected by the controller
* @assigned_cs: List of assigned CS
* @ecc_buf: Temporary buffer to extract ECC bytes
@@ -189,7 +187,7 @@ static const struct mtd_ooblayout_ops pl35x_ecc_ooblayout16_ops = {
.free = pl35x_ecc_ooblayout16_free,
};
-/* Generic flash bbt decriptors */
+/* Generic flash bbt descriptors */
static u8 bbt_pattern[] = { 'B', 'b', 't', '0' };
static u8 mirror_pattern[] = { '1', 't', 'b', 'B' };
@@ -513,6 +511,7 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
u32 addr1 = 0, addr2 = 0, row;
u32 cmd_addr;
int i, ret;
+ u8 status;
ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
if (ret)
@@ -565,6 +564,14 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
if (ret)
goto disable_ecc_engine;
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ goto disable_ecc_engine;
+
+ if (status & NAND_STATUS_FAIL)
+ ret = -EIO;
+
disable_ecc_engine:
pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
@@ -1104,7 +1111,7 @@ static void pl35x_nand_chips_cleanup(struct pl35x_nandc *nfc)
static int pl35x_nand_chips_init(struct pl35x_nandc *nfc)
{
- struct device_node *np = nfc->dev->of_node, *nand_np;
+ struct device_node *np = nfc->dev->of_node;
int nchips = of_get_child_count(np);
int ret;
@@ -1114,10 +1121,9 @@ static int pl35x_nand_chips_init(struct pl35x_nandc *nfc)
return -EINVAL;
}
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = pl35x_nand_chip_init(nfc, nand_np);
if (ret) {
- of_node_put(nand_np);
pl35x_nand_chips_cleanup(nfc);
break;
}
@@ -1131,7 +1137,7 @@ static int pl35x_nand_probe(struct platform_device *pdev)
struct device *smc_dev = pdev->dev.parent;
struct amba_device *smc_amba = to_amba_device(smc_dev);
struct pl35x_nandc *nfc;
- u32 ret;
+ int ret;
nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
if (!nfc)
@@ -1178,7 +1184,7 @@ MODULE_DEVICE_TABLE(of, pl35x_nand_of_match);
static struct platform_driver pl35x_nandc_driver = {
.probe = pl35x_nand_probe,
- .remove_new = pl35x_nand_remove,
+ .remove = pl35x_nand_remove,
.driver = {
.name = PL35X_NANDC_DRIVER_NAME,
.of_match_table = pl35x_nand_of_match,
@@ -1187,6 +1193,5 @@ static struct platform_driver pl35x_nandc_driver = {
module_platform_driver(pl35x_nandc_driver);
MODULE_AUTHOR("Xilinx, Inc.");
-MODULE_ALIAS("platform:" PL35X_NANDC_DRIVER_NAME);
MODULE_DESCRIPTION("ARM PL35X NAND controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
index b5c374b51ecd..0bcd455328ef 100644
--- a/drivers/mtd/nand/raw/plat_nand.c
+++ b/drivers/mtd/nand/raw/plat_nand.c
@@ -144,7 +144,7 @@ MODULE_DEVICE_TABLE(of, plat_nand_match);
static struct platform_driver plat_nand_driver = {
.probe = plat_nand_probe,
- .remove_new = plat_nand_remove,
+ .remove = plat_nand_remove,
.driver = {
.name = "gen_nand",
.of_match_table = plat_nand_match,
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 72d6168d8a1b..4dd6f1a4e797 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2,437 +2,20 @@
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
-#include <linux/clk.h>
-#include <linux/slab.h>
#include <linux/bitops.h>
-#include <linux/dma/qcom_adm.h>
-#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_adm.h>
+#include <linux/dma/qcom_bam_dma.h>
#include <linux/module.h>
-#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/delay.h>
-#include <linux/dma/qcom_bam_dma.h>
-
-/* NANDc reg offsets */
-#define NAND_FLASH_CMD 0x00
-#define NAND_ADDR0 0x04
-#define NAND_ADDR1 0x08
-#define NAND_FLASH_CHIP_SELECT 0x0c
-#define NAND_EXEC_CMD 0x10
-#define NAND_FLASH_STATUS 0x14
-#define NAND_BUFFER_STATUS 0x18
-#define NAND_DEV0_CFG0 0x20
-#define NAND_DEV0_CFG1 0x24
-#define NAND_DEV0_ECC_CFG 0x28
-#define NAND_AUTO_STATUS_EN 0x2c
-#define NAND_DEV1_CFG0 0x30
-#define NAND_DEV1_CFG1 0x34
-#define NAND_READ_ID 0x40
-#define NAND_READ_STATUS 0x44
-#define NAND_DEV_CMD0 0xa0
-#define NAND_DEV_CMD1 0xa4
-#define NAND_DEV_CMD2 0xa8
-#define NAND_DEV_CMD_VLD 0xac
-#define SFLASHC_BURST_CFG 0xe0
-#define NAND_ERASED_CW_DETECT_CFG 0xe8
-#define NAND_ERASED_CW_DETECT_STATUS 0xec
-#define NAND_EBI2_ECC_BUF_CFG 0xf0
-#define FLASH_BUF_ACC 0x100
-
-#define NAND_CTRL 0xf00
-#define NAND_VERSION 0xf08
-#define NAND_READ_LOCATION_0 0xf20
-#define NAND_READ_LOCATION_1 0xf24
-#define NAND_READ_LOCATION_2 0xf28
-#define NAND_READ_LOCATION_3 0xf2c
-#define NAND_READ_LOCATION_LAST_CW_0 0xf40
-#define NAND_READ_LOCATION_LAST_CW_1 0xf44
-#define NAND_READ_LOCATION_LAST_CW_2 0xf48
-#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
-
-/* dummy register offsets, used by write_reg_dma */
-#define NAND_DEV_CMD1_RESTORE 0xdead
-#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
-
-/* NAND_FLASH_CMD bits */
-#define PAGE_ACC BIT(4)
-#define LAST_PAGE BIT(5)
-
-/* NAND_FLASH_CHIP_SELECT bits */
-#define NAND_DEV_SEL 0
-#define DM_EN BIT(2)
-
-/* NAND_FLASH_STATUS bits */
-#define FS_OP_ERR BIT(4)
-#define FS_READY_BSY_N BIT(5)
-#define FS_MPU_ERR BIT(8)
-#define FS_DEVICE_STS_ERR BIT(16)
-#define FS_DEVICE_WP BIT(23)
-
-/* NAND_BUFFER_STATUS bits */
-#define BS_UNCORRECTABLE_BIT BIT(8)
-#define BS_CORRECTABLE_ERR_MSK 0x1f
-
-/* NAND_DEVn_CFG0 bits */
-#define DISABLE_STATUS_AFTER_WRITE 4
-#define CW_PER_PAGE 6
-#define UD_SIZE_BYTES 9
-#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
-#define ECC_PARITY_SIZE_BYTES_RS 19
-#define SPARE_SIZE_BYTES 23
-#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
-#define NUM_ADDR_CYCLES 27
-#define STATUS_BFR_READ 30
-#define SET_RD_MODE_AFTER_STATUS 31
-
-/* NAND_DEVn_CFG0 bits */
-#define DEV0_CFG1_ECC_DISABLE 0
-#define WIDE_FLASH 1
-#define NAND_RECOVERY_CYCLES 2
-#define CS_ACTIVE_BSY 5
-#define BAD_BLOCK_BYTE_NUM 6
-#define BAD_BLOCK_IN_SPARE_AREA 16
-#define WR_RD_BSY_GAP 17
-#define ENABLE_BCH_ECC 27
-
-/* NAND_DEV0_ECC_CFG bits */
-#define ECC_CFG_ECC_DISABLE 0
-#define ECC_SW_RESET 1
-#define ECC_MODE 4
-#define ECC_PARITY_SIZE_BYTES_BCH 8
-#define ECC_NUM_DATA_BYTES 16
-#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
-#define ECC_FORCE_CLK_OPEN 30
-
-/* NAND_DEV_CMD1 bits */
-#define READ_ADDR 0
-
-/* NAND_DEV_CMD_VLD bits */
-#define READ_START_VLD BIT(0)
-#define READ_STOP_VLD BIT(1)
-#define WRITE_START_VLD BIT(2)
-#define ERASE_START_VLD BIT(3)
-#define SEQ_READ_START_VLD BIT(4)
-
-/* NAND_EBI2_ECC_BUF_CFG bits */
-#define NUM_STEPS 0
-
-/* NAND_ERASED_CW_DETECT_CFG bits */
-#define ERASED_CW_ECC_MASK 1
-#define AUTO_DETECT_RES 0
-#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
-#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
-#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
-#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
-#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
-
-/* NAND_ERASED_CW_DETECT_STATUS bits */
-#define PAGE_ALL_ERASED BIT(7)
-#define CODEWORD_ALL_ERASED BIT(6)
-#define PAGE_ERASED BIT(5)
-#define CODEWORD_ERASED BIT(4)
-#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
-#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
-
-/* NAND_READ_LOCATION_n bits */
-#define READ_LOCATION_OFFSET 0
-#define READ_LOCATION_SIZE 16
-#define READ_LOCATION_LAST 31
-
-/* Version Mask */
-#define NAND_VERSION_MAJOR_MASK 0xf0000000
-#define NAND_VERSION_MAJOR_SHIFT 28
-#define NAND_VERSION_MINOR_MASK 0x0fff0000
-#define NAND_VERSION_MINOR_SHIFT 16
-
-/* NAND OP_CMDs */
-#define OP_PAGE_READ 0x2
-#define OP_PAGE_READ_WITH_ECC 0x3
-#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
-#define OP_PAGE_READ_ONFI_READ 0x5
-#define OP_PROGRAM_PAGE 0x6
-#define OP_PAGE_PROGRAM_WITH_ECC 0x7
-#define OP_PROGRAM_PAGE_SPARE 0x9
-#define OP_BLOCK_ERASE 0xa
-#define OP_FETCH_ID 0xb
-#define OP_RESET_DEVICE 0xd
-
-/* Default Value for NAND_DEV_CMD_VLD */
-#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
- ERASE_START_VLD | SEQ_READ_START_VLD)
-
-/* NAND_CTRL bits */
-#define BAM_MODE_EN BIT(0)
-
-/*
- * the NAND controller performs reads/writes with ECC in 516 byte chunks.
- * the driver calls the chunks 'step' or 'codeword' interchangeably
- */
-#define NANDC_STEP_SIZE 512
-
-/*
- * the largest page size we support is 8K, this will have 16 steps/codewords
- * of 512 bytes each
- */
-#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
-
-/* we read at most 3 registers per codeword scan */
-#define MAX_REG_RD (3 * MAX_NUM_STEPS)
-
-/* ECC modes supported by the controller */
-#define ECC_NONE BIT(0)
-#define ECC_RS_4BIT BIT(1)
-#define ECC_BCH_4BIT BIT(2)
-#define ECC_BCH_8BIT BIT(3)
-
-#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc) \
-nandc_set_reg(chip, reg, \
- ((cw_offset) << READ_LOCATION_OFFSET) | \
- ((read_size) << READ_LOCATION_SIZE) | \
- ((is_last_read_loc) << READ_LOCATION_LAST))
-
-#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc) \
-nandc_set_reg(chip, reg, \
- ((cw_offset) << READ_LOCATION_OFFSET) | \
- ((read_size) << READ_LOCATION_SIZE) | \
- ((is_last_read_loc) << READ_LOCATION_LAST))
-/*
- * Returns the actual register address for all NAND_DEV_ registers
- * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
- */
-#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
-
-/* Returns the NAND register physical address */
-#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
-
-/* Returns the dma address for reg read buffer */
-#define reg_buf_dma_addr(chip, vaddr) \
- ((chip)->reg_read_dma + \
- ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
-
-#define QPIC_PER_CW_CMD_ELEMENTS 32
-#define QPIC_PER_CW_CMD_SGL 32
-#define QPIC_PER_CW_DATA_SGL 8
-
-#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
-
-/*
- * Flags used in DMA descriptor preparation helper functions
- * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
- */
-/* Don't set the EOT in current tx BAM sgl */
-#define NAND_BAM_NO_EOT BIT(0)
-/* Set the NWD flag in current BAM sgl */
-#define NAND_BAM_NWD BIT(1)
-/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
-#define NAND_BAM_NEXT_SGL BIT(2)
-/*
- * Erased codeword status is being used two times in single transfer so this
- * flag will determine the current value of erased codeword status register
- */
-#define NAND_ERASED_CW_SET BIT(4)
-
-/*
- * This data type corresponds to the BAM transaction which will be used for all
- * NAND transfers.
- * @bam_ce - the array of BAM command elements
- * @cmd_sgl - sgl for NAND BAM command pipe
- * @data_sgl - sgl for NAND BAM consumer/producer pipe
- * @last_data_desc - last DMA desc in data channel (tx/rx).
- * @last_cmd_desc - last DMA desc in command channel.
- * @txn_done - completion for NAND transfer.
- * @bam_ce_pos - the index in bam_ce which is available for next sgl
- * @bam_ce_start - the index in bam_ce which marks the start position ce
- * for current sgl. It will be used for size calculation
- * for current sgl
- * @cmd_sgl_pos - current index in command sgl.
- * @cmd_sgl_start - start index in command sgl.
- * @tx_sgl_pos - current index in data sgl for tx.
- * @tx_sgl_start - start index in data sgl for tx.
- * @rx_sgl_pos - current index in data sgl for rx.
- * @rx_sgl_start - start index in data sgl for rx.
- * @wait_second_completion - wait for second DMA desc completion before making
- * the NAND transfer completion.
- */
-struct bam_transaction {
- struct bam_cmd_element *bam_ce;
- struct scatterlist *cmd_sgl;
- struct scatterlist *data_sgl;
- struct dma_async_tx_descriptor *last_data_desc;
- struct dma_async_tx_descriptor *last_cmd_desc;
- struct completion txn_done;
- u32 bam_ce_pos;
- u32 bam_ce_start;
- u32 cmd_sgl_pos;
- u32 cmd_sgl_start;
- u32 tx_sgl_pos;
- u32 tx_sgl_start;
- u32 rx_sgl_pos;
- u32 rx_sgl_start;
- bool wait_second_completion;
-};
-
-/*
- * This data type corresponds to the nand dma descriptor
- * @dma_desc - low level DMA engine descriptor
- * @list - list for desc_info
- *
- * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
- * ADM
- * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
- * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
- * @dir - DMA transfer direction
- */
-struct desc_info {
- struct dma_async_tx_descriptor *dma_desc;
- struct list_head node;
-
- union {
- struct scatterlist adm_sgl;
- struct {
- struct scatterlist *bam_sgl;
- int sgl_cnt;
- };
- };
- enum dma_data_direction dir;
-};
-
-/*
- * holds the current register values that we want to write. acts as a contiguous
- * chunk of memory which we use to write the controller registers through DMA.
- */
-struct nandc_regs {
- __le32 cmd;
- __le32 addr0;
- __le32 addr1;
- __le32 chip_sel;
- __le32 exec;
-
- __le32 cfg0;
- __le32 cfg1;
- __le32 ecc_bch_cfg;
-
- __le32 clrflashstatus;
- __le32 clrreadstatus;
-
- __le32 cmd1;
- __le32 vld;
-
- __le32 orig_cmd1;
- __le32 orig_vld;
-
- __le32 ecc_buf_cfg;
- __le32 read_location0;
- __le32 read_location1;
- __le32 read_location2;
- __le32 read_location3;
- __le32 read_location_last0;
- __le32 read_location_last1;
- __le32 read_location_last2;
- __le32 read_location_last3;
-
- __le32 erased_cw_detect_cfg_clr;
- __le32 erased_cw_detect_cfg_set;
-};
-
-/*
- * NAND controller data struct
- *
- * @dev: parent device
- *
- * @base: MMIO base
- *
- * @core_clk: controller clock
- * @aon_clk: another controller clock
- *
- * @regs: a contiguous chunk of memory for DMA register
- * writes. contains the register values to be
- * written to controller
- *
- * @props: properties of current NAND controller,
- * initialized via DT match data
- *
- * @controller: base controller structure
- * @host_list: list containing all the chips attached to the
- * controller
- *
- * @chan: dma channel
- * @cmd_crci: ADM DMA CRCI for command flow control
- * @data_crci: ADM DMA CRCI for data flow control
- *
- * @desc_list: DMA descriptor list (list of desc_infos)
- *
- * @data_buffer: our local DMA buffer for page read/writes,
- * used when we can't use the buffer provided
- * by upper layers directly
- * @reg_read_buf: local buffer for reading back registers via DMA
- *
- * @base_phys: physical base address of controller registers
- * @base_dma: dma base address of controller registers
- * @reg_read_dma: contains dma address for register read buffer
- *
- * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
- * functions
- * @max_cwperpage: maximum QPIC codewords required. calculated
- * from all connected NAND devices pagesize
- *
- * @reg_read_pos: marker for data read in reg_read_buf
- *
- * @cmd1/vld: some fixed controller register values
- */
-struct qcom_nand_controller {
- struct device *dev;
-
- void __iomem *base;
-
- struct clk *core_clk;
- struct clk *aon_clk;
-
- struct nandc_regs *regs;
- struct bam_transaction *bam_txn;
-
- const struct qcom_nandc_props *props;
-
- struct nand_controller controller;
- struct list_head host_list;
-
- union {
- /* will be used only by QPIC for BAM DMA */
- struct {
- struct dma_chan *tx_chan;
- struct dma_chan *rx_chan;
- struct dma_chan *cmd_chan;
- };
-
- /* will be used only by EBI2 for ADM DMA */
- struct {
- struct dma_chan *chan;
- unsigned int cmd_crci;
- unsigned int data_crci;
- };
- };
-
- struct list_head desc_list;
-
- u8 *data_buffer;
- __le32 *reg_read_buf;
-
- phys_addr_t base_phys;
- dma_addr_t base_dma;
- dma_addr_t reg_read_dma;
-
- int buf_size;
- int buf_count;
- int buf_start;
- unsigned int max_cwperpage;
-
- int reg_read_pos;
-
- u32 cmd1, vld;
-};
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand-qpic-common.h>
/*
* NAND special boot partitions
@@ -448,6 +31,29 @@ struct qcom_nand_boot_partition {
};
/*
+ * Qcom op for each exec_op transfer
+ *
+ * @data_instr: data instruction pointer
+ * @data_instr_idx: data instruction index
+ * @rdy_timeout_ms: wait ready timeout in ms
+ * @rdy_delay_ns: Additional delay in ns
+ * @addr1_reg: Address1 register value
+ * @addr2_reg: Address2 register value
+ * @cmd_reg: CMD register value
+ * @flag: flag for misc instruction
+ */
+struct qcom_op {
+ const struct nand_op_instr *data_instr;
+ unsigned int data_instr_idx;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ __le32 addr1_reg;
+ __le32 addr2_reg;
+ __le32 cmd_reg;
+ u8 flag;
+};
+
+/*
* NAND chip structure
*
* @boot_partitions: array of boot partitions where offset and size of the
@@ -514,243 +120,113 @@ struct qcom_nand_host {
bool bch_enabled;
};
-/*
- * This data type corresponds to the NAND controller properties which varies
- * among different NAND controllers.
- * @ecc_modes - ecc mode for NAND
- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
- * @is_bam - whether NAND controller is using BAM
- * @is_qpic - whether NAND CTRL is part of qpic IP
- * @qpic_v2 - flag to indicate QPIC IP version 2
- * @use_codeword_fixup - whether NAND has different layout for boot partitions
- */
-struct qcom_nandc_props {
- u32 ecc_modes;
- u32 dev_cmd_reg_start;
- bool is_bam;
- bool is_qpic;
- bool qpic_v2;
- bool use_codeword_fixup;
-};
-
-/* Frees the BAM transaction memory */
-static void free_bam_transaction(struct qcom_nand_controller *nandc)
-{
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- devm_kfree(nandc->dev, bam_txn);
-}
-
-/* Allocates and Initializes the BAM transaction */
-static struct bam_transaction *
-alloc_bam_transaction(struct qcom_nand_controller *nandc)
-{
- struct bam_transaction *bam_txn;
- size_t bam_txn_size;
- unsigned int num_cw = nandc->max_cwperpage;
- void *bam_txn_buf;
-
- bam_txn_size =
- sizeof(*bam_txn) + num_cw *
- ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
- (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
- (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
-
- bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
- if (!bam_txn_buf)
- return NULL;
-
- bam_txn = bam_txn_buf;
- bam_txn_buf += sizeof(*bam_txn);
-
- bam_txn->bam_ce = bam_txn_buf;
- bam_txn_buf +=
- sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
-
- bam_txn->cmd_sgl = bam_txn_buf;
- bam_txn_buf +=
- sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
-
- bam_txn->data_sgl = bam_txn_buf;
-
- init_completion(&bam_txn->txn_done);
-
- return bam_txn;
-}
-
-/* Clears the BAM transaction indexes */
-static void clear_bam_transaction(struct qcom_nand_controller *nandc)
-{
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- if (!nandc->props->is_bam)
- return;
-
- bam_txn->bam_ce_pos = 0;
- bam_txn->bam_ce_start = 0;
- bam_txn->cmd_sgl_pos = 0;
- bam_txn->cmd_sgl_start = 0;
- bam_txn->tx_sgl_pos = 0;
- bam_txn->tx_sgl_start = 0;
- bam_txn->rx_sgl_pos = 0;
- bam_txn->rx_sgl_start = 0;
- bam_txn->last_data_desc = NULL;
- bam_txn->wait_second_completion = false;
-
- sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
- QPIC_PER_CW_CMD_SGL);
- sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
- QPIC_PER_CW_DATA_SGL);
-
- reinit_completion(&bam_txn->txn_done);
-}
-
-/* Callback for DMA descriptor completion */
-static void qpic_bam_dma_done(void *data)
-{
- struct bam_transaction *bam_txn = data;
-
- /*
- * In case of data transfer with NAND, 2 callbacks will be generated.
- * One for command channel and another one for data channel.
- * If current transaction has data descriptors
- * (i.e. wait_second_completion is true), then set this to false
- * and wait for second DMA descriptor completion.
- */
- if (bam_txn->wait_second_completion)
- bam_txn->wait_second_completion = false;
- else
- complete(&bam_txn->txn_done);
-}
-
-static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
{
return container_of(chip, struct qcom_nand_host, chip);
}
-static inline struct qcom_nand_controller *
+static struct qcom_nand_controller *
get_qcom_nand_controller(struct nand_chip *chip)
{
- return container_of(chip->controller, struct qcom_nand_controller,
- controller);
+ return (struct qcom_nand_controller *)
+ ((u8 *)chip->controller - sizeof(struct qcom_nand_controller));
}
-static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+static u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
{
return ioread32(nandc->base + offset);
}
-static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
- u32 val)
+static void nandc_write(struct qcom_nand_controller *nandc, int offset,
+ u32 val)
{
iowrite32(val, nandc->base + offset);
}
-static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
- bool is_cpu)
+/* Helper to check whether this is the last CW or not */
+static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
{
- if (!nandc->props->is_bam)
- return;
-
- if (is_cpu)
- dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
- else
- dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
+ return cw == (ecc->steps - 1);
}
-static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
+/**
+ * nandc_set_read_loc_first() - to set read location first register
+ * @chip: NAND Private Flash Chip Data
+ * @reg_base: location register base
+ * @cw_offset: code word offset
+ * @read_size: code word read length
+ * @is_last_read_loc: is this the last read location
+ *
+ * This function will set location register value
+ */
+static void nandc_set_read_loc_first(struct nand_chip *chip,
+ int reg_base, u32 cw_offset,
+ u32 read_size, u32 is_last_read_loc)
{
- switch (offset) {
- case NAND_FLASH_CMD:
- return &regs->cmd;
- case NAND_ADDR0:
- return &regs->addr0;
- case NAND_ADDR1:
- return &regs->addr1;
- case NAND_FLASH_CHIP_SELECT:
- return &regs->chip_sel;
- case NAND_EXEC_CMD:
- return &regs->exec;
- case NAND_FLASH_STATUS:
- return &regs->clrflashstatus;
- case NAND_DEV0_CFG0:
- return &regs->cfg0;
- case NAND_DEV0_CFG1:
- return &regs->cfg1;
- case NAND_DEV0_ECC_CFG:
- return &regs->ecc_bch_cfg;
- case NAND_READ_STATUS:
- return &regs->clrreadstatus;
- case NAND_DEV_CMD1:
- return &regs->cmd1;
- case NAND_DEV_CMD1_RESTORE:
- return &regs->orig_cmd1;
- case NAND_DEV_CMD_VLD:
- return &regs->vld;
- case NAND_DEV_CMD_VLD_RESTORE:
- return &regs->orig_vld;
- case NAND_EBI2_ECC_BUF_CFG:
- return &regs->ecc_buf_cfg;
- case NAND_READ_LOCATION_0:
- return &regs->read_location0;
- case NAND_READ_LOCATION_1:
- return &regs->read_location1;
- case NAND_READ_LOCATION_2:
- return &regs->read_location2;
- case NAND_READ_LOCATION_3:
- return &regs->read_location3;
- case NAND_READ_LOCATION_LAST_CW_0:
- return &regs->read_location_last0;
- case NAND_READ_LOCATION_LAST_CW_1:
- return &regs->read_location_last1;
- case NAND_READ_LOCATION_LAST_CW_2:
- return &regs->read_location_last2;
- case NAND_READ_LOCATION_LAST_CW_3:
- return &regs->read_location_last3;
- default:
- return NULL;
- }
-}
-
-static void nandc_set_reg(struct nand_chip *chip, int offset,
- u32 val)
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ __le32 locreg_val;
+ u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
+ FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
+ FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
+
+ locreg_val = cpu_to_le32(val);
+
+ if (reg_base == NAND_READ_LOCATION_0)
+ nandc->regs->read_location0 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_1)
+ nandc->regs->read_location1 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_2)
+ nandc->regs->read_location2 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_3)
+ nandc->regs->read_location3 = locreg_val;
+}
+
+/**
+ * nandc_set_read_loc_last - to set read location last register
+ * @chip: NAND Private Flash Chip Data
+ * @reg_base: location register base
+ * @cw_offset: code word offset
+ * @read_size: code word read length
+ * @is_last_read_loc: is this the last read location
+ *
+ * This function will set location last register value
+ */
+static void nandc_set_read_loc_last(struct nand_chip *chip,
+ int reg_base, u32 cw_offset,
+ u32 read_size, u32 is_last_read_loc)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- struct nandc_regs *regs = nandc->regs;
- __le32 *reg;
+ __le32 locreg_val;
+ u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
+ FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
+ FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
- reg = offset_to_nandc_reg(regs, offset);
+ locreg_val = cpu_to_le32(val);
- if (reg)
- *reg = cpu_to_le32(val);
-}
-
-/* Helper to check the code word, whether it is last cw or not */
-static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
-{
- return cw == (ecc->steps - 1);
+ if (reg_base == NAND_READ_LOCATION_LAST_CW_0)
+ nandc->regs->read_location_last0 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_1)
+ nandc->regs->read_location_last1 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_2)
+ nandc->regs->read_location_last2 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_3)
+ nandc->regs->read_location_last3 = locreg_val;
}
/* helper to configure location register values */
static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
- int cw_offset, int read_size, int is_last_read_loc)
+ u32 cw_offset, u32 read_size, u32 is_last_read_loc)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int reg_base = NAND_READ_LOCATION_0;
- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
reg_base = NAND_READ_LOCATION_LAST_CW_0;
reg_base += reg * 4;
- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
return nandc_set_read_loc_last(chip, reg_base, cw_offset,
read_size, is_last_read_loc);
else
@@ -762,12 +238,13 @@ static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
static void set_address(struct qcom_nand_host *host, u16 column, int page)
{
struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
if (chip->options & NAND_BUSWIDTH_16)
column >>= 1;
- nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
- nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
+ nandc->regs->addr0 = cpu_to_le32(page << 16 | column);
+ nandc->regs->addr1 = cpu_to_le32(page >> 16 & 0xff);
}
/*
@@ -781,41 +258,43 @@ static void set_address(struct qcom_nand_host *host, u16 column, int page)
static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
{
struct nand_chip *chip = &host->chip;
- u32 cmd, cfg0, cfg1, ecc_bch_cfg;
+ __le32 cmd, cfg0, cfg1, ecc_bch_cfg;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
if (read) {
if (host->use_ecc)
- cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
+ cmd = cpu_to_le32(OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE);
else
- cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
+ cmd = cpu_to_le32(OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
} else {
- cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
+ cmd = cpu_to_le32(OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE);
}
if (host->use_ecc) {
- cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE;
+ cfg0 = cpu_to_le32((host->cfg0 & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, (num_cw - 1)));
- cfg1 = host->cfg1;
- ecc_bch_cfg = host->ecc_bch_cfg;
+ cfg1 = cpu_to_le32(host->cfg1);
+ ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
} else {
- cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE;
+ cfg0 = cpu_to_le32((host->cfg0_raw & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, (num_cw - 1)));
- cfg1 = host->cfg1_raw;
- ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ cfg1 = cpu_to_le32(host->cfg1_raw);
+ ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
}
- nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
- nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
- nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
- nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
- if (!nandc->props->qpic_v2)
- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
- nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
- nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ nandc->regs->cmd = cmd;
+ nandc->regs->cfg0 = cfg0;
+ nandc->regs->cfg1 = cfg1;
+ nandc->regs->ecc_bch_cfg = ecc_bch_cfg;
+
+ if (!nandc->props->qpic_version2)
+ nandc->regs->ecc_buf_cfg = cpu_to_le32(host->ecc_buf_cfg);
+
+ nandc->regs->clrflashstatus = cpu_to_le32(host->clrflashstatus);
+ nandc->regs->clrreadstatus = cpu_to_le32(host->clrreadstatus);
+ nandc->regs->exec = cpu_to_le32(1);
if (read)
nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
@@ -823,366 +302,6 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
}
/*
- * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
- * for BAM. This descriptor will be added in the NAND DMA descriptor queue
- * which will be submitted to DMA engine.
- */
-static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
- struct dma_chan *chan,
- unsigned long flags)
-{
- struct desc_info *desc;
- struct scatterlist *sgl;
- unsigned int sgl_cnt;
- int ret;
- struct bam_transaction *bam_txn = nandc->bam_txn;
- enum dma_transfer_direction dir_eng;
- struct dma_async_tx_descriptor *dma_desc;
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- if (chan == nandc->cmd_chan) {
- sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
- sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
- bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
- dir_eng = DMA_MEM_TO_DEV;
- desc->dir = DMA_TO_DEVICE;
- } else if (chan == nandc->tx_chan) {
- sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
- sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
- bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
- dir_eng = DMA_MEM_TO_DEV;
- desc->dir = DMA_TO_DEVICE;
- } else {
- sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
- sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
- bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
- dir_eng = DMA_DEV_TO_MEM;
- desc->dir = DMA_FROM_DEVICE;
- }
-
- sg_mark_end(sgl + sgl_cnt - 1);
- ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
- if (ret == 0) {
- dev_err(nandc->dev, "failure in mapping desc\n");
- kfree(desc);
- return -ENOMEM;
- }
-
- desc->sgl_cnt = sgl_cnt;
- desc->bam_sgl = sgl;
-
- dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
- flags);
-
- if (!dma_desc) {
- dev_err(nandc->dev, "failure in prep desc\n");
- dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
- kfree(desc);
- return -EINVAL;
- }
-
- desc->dma_desc = dma_desc;
-
- /* update last data/command descriptor */
- if (chan == nandc->cmd_chan)
- bam_txn->last_cmd_desc = dma_desc;
- else
- bam_txn->last_data_desc = dma_desc;
-
- list_add_tail(&desc->node, &nandc->desc_list);
-
- return 0;
-}
-
-/*
- * Prepares the command descriptor for BAM DMA which will be used for NAND
- * register reads and writes. The command descriptor requires the command
- * to be formed in command element type so this function uses the command
- * element from bam transaction ce array and fills the same with required
- * data. A single SGL can contain multiple command elements so
- * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
- * after the current command element.
- */
-static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
- int reg_off, const void *vaddr,
- int size, unsigned int flags)
-{
- int bam_ce_size;
- int i, ret;
- struct bam_cmd_element *bam_ce_buffer;
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
-
- /* fill the command desc */
- for (i = 0; i < size; i++) {
- if (read)
- bam_prep_ce(&bam_ce_buffer[i],
- nandc_reg_phys(nandc, reg_off + 4 * i),
- BAM_READ_COMMAND,
- reg_buf_dma_addr(nandc,
- (__le32 *)vaddr + i));
- else
- bam_prep_ce_le32(&bam_ce_buffer[i],
- nandc_reg_phys(nandc, reg_off + 4 * i),
- BAM_WRITE_COMMAND,
- *((__le32 *)vaddr + i));
- }
-
- bam_txn->bam_ce_pos += size;
-
- /* use the separate sgl after this command */
- if (flags & NAND_BAM_NEXT_SGL) {
- bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
- bam_ce_size = (bam_txn->bam_ce_pos -
- bam_txn->bam_ce_start) *
- sizeof(struct bam_cmd_element);
- sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
- bam_ce_buffer, bam_ce_size);
- bam_txn->cmd_sgl_pos++;
- bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
-
- if (flags & NAND_BAM_NWD) {
- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
- DMA_PREP_FENCE |
- DMA_PREP_CMD);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-/*
- * Prepares the data descriptor for BAM DMA which will be used for NAND
- * data reads and writes.
- */
-static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
- const void *vaddr,
- int size, unsigned int flags)
-{
- int ret;
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- if (read) {
- sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
- vaddr, size);
- bam_txn->rx_sgl_pos++;
- } else {
- sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
- vaddr, size);
- bam_txn->tx_sgl_pos++;
-
- /*
- * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
- * is not set, form the DMA descriptor
- */
- if (!(flags & NAND_BAM_NO_EOT)) {
- ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
- DMA_PREP_INTERRUPT);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
- int reg_off, const void *vaddr, int size,
- bool flow_control)
-{
- struct desc_info *desc;
- struct dma_async_tx_descriptor *dma_desc;
- struct scatterlist *sgl;
- struct dma_slave_config slave_conf;
- struct qcom_adm_peripheral_config periph_conf = {};
- enum dma_transfer_direction dir_eng;
- int ret;
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- sgl = &desc->adm_sgl;
-
- sg_init_one(sgl, vaddr, size);
-
- if (read) {
- dir_eng = DMA_DEV_TO_MEM;
- desc->dir = DMA_FROM_DEVICE;
- } else {
- dir_eng = DMA_MEM_TO_DEV;
- desc->dir = DMA_TO_DEVICE;
- }
-
- ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
- if (ret == 0) {
- ret = -ENOMEM;
- goto err;
- }
-
- memset(&slave_conf, 0x00, sizeof(slave_conf));
-
- slave_conf.device_fc = flow_control;
- if (read) {
- slave_conf.src_maxburst = 16;
- slave_conf.src_addr = nandc->base_dma + reg_off;
- if (nandc->data_crci) {
- periph_conf.crci = nandc->data_crci;
- slave_conf.peripheral_config = &periph_conf;
- slave_conf.peripheral_size = sizeof(periph_conf);
- }
- } else {
- slave_conf.dst_maxburst = 16;
- slave_conf.dst_addr = nandc->base_dma + reg_off;
- if (nandc->cmd_crci) {
- periph_conf.crci = nandc->cmd_crci;
- slave_conf.peripheral_config = &periph_conf;
- slave_conf.peripheral_size = sizeof(periph_conf);
- }
- }
-
- ret = dmaengine_slave_config(nandc->chan, &slave_conf);
- if (ret) {
- dev_err(nandc->dev, "failed to configure dma channel\n");
- goto err;
- }
-
- dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
- if (!dma_desc) {
- dev_err(nandc->dev, "failed to prepare desc\n");
- ret = -EINVAL;
- goto err;
- }
-
- desc->dma_desc = dma_desc;
-
- list_add_tail(&desc->node, &nandc->desc_list);
-
- return 0;
-err:
- kfree(desc);
-
- return ret;
-}
-
-/*
- * read_reg_dma: prepares a descriptor to read a given number of
- * contiguous registers to the reg_read_buf pointer
- *
- * @first: offset of the first register in the contiguous block
- * @num_regs: number of registers to read
- * @flags: flags to control DMA descriptor preparation
- */
-static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
- int num_regs, unsigned int flags)
-{
- bool flow_control = false;
- void *vaddr;
-
- vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
- nandc->reg_read_pos += num_regs;
-
- if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
- first = dev_cmd_reg_addr(nandc, first);
-
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
- num_regs, flags);
-
- if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
- flow_control = true;
-
- return prep_adm_dma_desc(nandc, true, first, vaddr,
- num_regs * sizeof(u32), flow_control);
-}
-
-/*
- * write_reg_dma: prepares a descriptor to write a given number of
- * contiguous registers
- *
- * @first: offset of the first register in the contiguous block
- * @num_regs: number of registers to write
- * @flags: flags to control DMA descriptor preparation
- */
-static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
- int num_regs, unsigned int flags)
-{
- bool flow_control = false;
- struct nandc_regs *regs = nandc->regs;
- void *vaddr;
-
- vaddr = offset_to_nandc_reg(regs, first);
-
- if (first == NAND_ERASED_CW_DETECT_CFG) {
- if (flags & NAND_ERASED_CW_SET)
- vaddr = &regs->erased_cw_detect_cfg_set;
- else
- vaddr = &regs->erased_cw_detect_cfg_clr;
- }
-
- if (first == NAND_EXEC_CMD)
- flags |= NAND_BAM_NWD;
-
- if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
-
- if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
-
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
- num_regs, flags);
-
- if (first == NAND_FLASH_CMD)
- flow_control = true;
-
- return prep_adm_dma_desc(nandc, false, first, vaddr,
- num_regs * sizeof(u32), flow_control);
-}
-
-/*
- * read_data_dma: prepares a DMA descriptor to transfer data from the
- * controller's internal buffer to the buffer 'vaddr'
- *
- * @reg_off: offset within the controller's data buffer
- * @vaddr: virtual address of the buffer we want to write to
- * @size: DMA transaction size in bytes
- * @flags: flags to control DMA descriptor preparation
- */
-static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- const u8 *vaddr, int size, unsigned int flags)
-{
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-
- return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-}
-
-/*
- * write_data_dma: prepares a DMA descriptor to transfer data from
- * 'vaddr' to the controller's internal buffer
- *
- * @reg_off: offset within the controller's data buffer
- * @vaddr: virtual address of the buffer we want to read from
- * @size: DMA transaction size in bytes
- * @flags: flags to control DMA descriptor preparation
- */
-static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- const u8 *vaddr, int size, unsigned int flags)
-{
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-
- return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-}
-
-/*
* Helper to prepare DMA descriptors for configuring registers
* before reading a NAND page.
*/
@@ -1190,13 +309,14 @@ static void config_nand_page_read(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, NAND_ADDR0, 2, 0);
- write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
- if (!nandc->props->qpic_v2)
- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
- write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
- write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
- NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ if (!nandc->props->qpic_version2)
+ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
}
/*
@@ -1209,23 +329,23 @@ config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- int reg = NAND_READ_LOCATION_0;
+ __le32 *reg = &nandc->regs->read_location0;
- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
- reg = NAND_READ_LOCATION_LAST_CW_0;
+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
+ reg = &nandc->regs->read_location_last0;
- if (nandc->props->is_bam)
- write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
+ if (nandc->props->supports_bam)
+ qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
if (use_ecc) {
- read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
- read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
- NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+ qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
} else {
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
}
}
@@ -1249,11 +369,11 @@ static void config_nand_page_write(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, NAND_ADDR0, 2, 0);
- write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
- if (!nandc->props->qpic_v2)
- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
- NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ if (!nandc->props->qpic_version2)
+ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
+ NAND_BAM_NEXT_SGL);
}
/*
@@ -1264,390 +384,14 @@ static void config_nand_cw_write(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
- write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-}
-
-/*
- * the following functions are used within chip->legacy.cmdfunc() to
- * perform different NAND_CMD_* commands
- */
-
-/* sets up descriptors for NAND_CMD_PARAM */
-static int nandc_param(struct qcom_nand_host *host)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- /*
- * NAND_CMD_PARAM is called before we know much about the FLASH chip
- * in use. we configure the controller to perform a raw read of 512
- * bytes to read onfi params
- */
- if (nandc->props->qpic_v2)
- nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ_ONFI_READ |
- PAGE_ACC | LAST_PAGE);
- else
- nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ |
- PAGE_ACC | LAST_PAGE);
-
- nandc_set_reg(chip, NAND_ADDR0, 0);
- nandc_set_reg(chip, NAND_ADDR1, 0);
- nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
- | 512 << UD_SIZE_BYTES
- | 5 << NUM_ADDR_CYCLES
- | 0 << SPARE_SIZE_BYTES);
- nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | 17 << BAD_BLOCK_BYTE_NUM
- | 1 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | 0 << WIDE_FLASH
- | 1 << DEV0_CFG1_ECC_DISABLE);
- if (!nandc->props->qpic_v2)
- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
-
- /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
- if (!nandc->props->qpic_v2) {
- nandc_set_reg(chip, NAND_DEV_CMD_VLD,
- (nandc->vld & ~READ_START_VLD));
- nandc_set_reg(chip, NAND_DEV_CMD1,
- (nandc->cmd1 & ~(0xFF << READ_ADDR))
- | NAND_CMD_PARAM << READ_ADDR);
- }
-
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-
- if (!nandc->props->qpic_v2) {
- nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
- nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
- }
-
- nandc_set_read_loc(chip, 0, 0, 0, 512, 1);
-
- if (!nandc->props->qpic_v2) {
- write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
- write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
- }
-
- nandc->buf_count = 512;
- memset(nandc->data_buffer, 0xff, nandc->buf_count);
-
- config_nand_single_cw_page_read(chip, false, 0);
-
- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
- nandc->buf_count, 0);
-
- /* restore CMD1 and VLD regs */
- if (!nandc->props->qpic_v2) {
- write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
- write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
- }
-
- return 0;
-}
-
-/* sets up descriptors for NAND_CMD_ERASE1 */
-static int erase_block(struct qcom_nand_host *host, int page_addr)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- nandc_set_reg(chip, NAND_FLASH_CMD,
- OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
- nandc_set_reg(chip, NAND_ADDR0, page_addr);
- nandc_set_reg(chip, NAND_ADDR1, 0);
- nandc_set_reg(chip, NAND_DEV0_CFG0,
- host->cfg0_raw & ~(7 << CW_PER_PAGE));
- nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
- nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
- nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
-
- write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-
- write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
- write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-
- return 0;
-}
-
-/* sets up descriptors for NAND_CMD_READID */
-static int read_id(struct qcom_nand_host *host, int column)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- if (column == -1)
- return 0;
-
- nandc_set_reg(chip, NAND_FLASH_CMD, OP_FETCH_ID);
- nandc_set_reg(chip, NAND_ADDR0, column);
- nandc_set_reg(chip, NAND_ADDR1, 0);
- nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
- nandc->props->is_bam ? 0 : DM_EN);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-
- write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
-
- return 0;
-}
-
-/* sets up descriptors for NAND_CMD_RESET */
-static int reset(struct qcom_nand_host *host)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- nandc_set_reg(chip, NAND_FLASH_CMD, OP_RESET_DEVICE);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-
- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-
- return 0;
-}
-
-/* helpers to submit/free our list of dma descriptors */
-static int submit_descs(struct qcom_nand_controller *nandc)
-{
- struct desc_info *desc;
- dma_cookie_t cookie = 0;
- struct bam_transaction *bam_txn = nandc->bam_txn;
- int r;
-
- if (nandc->props->is_bam) {
- if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
- r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
- if (r)
- return r;
- }
-
- if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
- r = prepare_bam_async_desc(nandc, nandc->tx_chan,
- DMA_PREP_INTERRUPT);
- if (r)
- return r;
- }
-
- if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
- r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
- DMA_PREP_CMD);
- if (r)
- return r;
- }
- }
-
- list_for_each_entry(desc, &nandc->desc_list, node)
- cookie = dmaengine_submit(desc->dma_desc);
-
- if (nandc->props->is_bam) {
- bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
- bam_txn->last_cmd_desc->callback_param = bam_txn;
- if (bam_txn->last_data_desc) {
- bam_txn->last_data_desc->callback = qpic_bam_dma_done;
- bam_txn->last_data_desc->callback_param = bam_txn;
- bam_txn->wait_second_completion = true;
- }
-
- dma_async_issue_pending(nandc->tx_chan);
- dma_async_issue_pending(nandc->rx_chan);
- dma_async_issue_pending(nandc->cmd_chan);
-
- if (!wait_for_completion_timeout(&bam_txn->txn_done,
- QPIC_NAND_COMPLETION_TIMEOUT))
- return -ETIMEDOUT;
- } else {
- if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static void free_descs(struct qcom_nand_controller *nandc)
-{
- struct desc_info *desc, *n;
-
- list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
- list_del(&desc->node);
-
- if (nandc->props->is_bam)
- dma_unmap_sg(nandc->dev, desc->bam_sgl,
- desc->sgl_cnt, desc->dir);
- else
- dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
- desc->dir);
-
- kfree(desc);
- }
-}
-
-/* reset the register read buffer for next NAND operation */
-static void clear_read_regs(struct qcom_nand_controller *nandc)
-{
- nandc->reg_read_pos = 0;
- nandc_read_buffer_sync(nandc, false);
-}
-
-static void pre_command(struct qcom_nand_host *host, int command)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- nandc->buf_count = 0;
- nandc->buf_start = 0;
- host->use_ecc = false;
- host->last_command = command;
-
- clear_read_regs(nandc);
-
- if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
- command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
- clear_bam_transaction(nandc);
-}
-
-/*
- * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
- * privately maintained status byte, this status byte can be read after
- * NAND_CMD_STATUS is called
- */
-static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
- int num_cw;
- int i;
-
- num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
- nandc_read_buffer_sync(nandc, true);
-
- for (i = 0; i < num_cw; i++) {
- u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
-
- if (flash_status & FS_MPU_ERR)
- host->status &= ~NAND_STATUS_WP;
-
- if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
- (flash_status &
- FS_DEVICE_STS_ERR)))
- host->status |= NAND_STATUS_FAIL;
- }
-}
-
-static void post_command(struct qcom_nand_host *host, int command)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- switch (command) {
- case NAND_CMD_READID:
- nandc_read_buffer_sync(nandc, true);
- memcpy(nandc->data_buffer, nandc->reg_read_buf,
- nandc->buf_count);
- break;
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_ERASE1:
- parse_erase_write_errors(host, command);
- break;
- default:
- break;
- }
-}
-
-/*
- * Implements chip->legacy.cmdfunc. It's only used for a limited set of
- * commands. The rest of the commands wouldn't be called by upper layers.
- * For example, NAND_CMD_READOOB would never be called because we have our own
- * versions of read_oob ops for nand_ecc_ctrl.
- */
-static void qcom_nandc_command(struct nand_chip *chip, unsigned int command,
- int column, int page_addr)
-{
- struct qcom_nand_host *host = to_qcom_nand_host(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- bool wait = false;
- int ret = 0;
-
- pre_command(host, command);
-
- switch (command) {
- case NAND_CMD_RESET:
- ret = reset(host);
- wait = true;
- break;
-
- case NAND_CMD_READID:
- nandc->buf_count = 4;
- ret = read_id(host, column);
- wait = true;
- break;
-
- case NAND_CMD_PARAM:
- ret = nandc_param(host);
- wait = true;
- break;
-
- case NAND_CMD_ERASE1:
- ret = erase_block(host, page_addr);
- wait = true;
- break;
-
- case NAND_CMD_READ0:
- /* we read the entire page for now */
- WARN_ON(column != 0);
-
- host->use_ecc = true;
- set_address(host, 0, page_addr);
- update_rw_regs(host, ecc->steps, true, 0);
- break;
-
- case NAND_CMD_SEQIN:
- WARN_ON(column != 0);
- set_address(host, 0, page_addr);
- break;
-
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_STATUS:
- case NAND_CMD_NONE:
- default:
- break;
- }
-
- if (ret) {
- dev_err(nandc->dev, "failure executing command %d\n",
- command);
- free_descs(nandc);
- return;
- }
-
- if (wait) {
- ret = submit_descs(nandc);
- if (ret)
- dev_err(nandc->dev,
- "failure submitting descs for command %d\n",
- command);
- }
-
- free_descs(nandc);
-
- post_command(host, command);
+ qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
}
/*
@@ -1711,7 +455,7 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
int i;
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
for (i = 0; i < cw_cnt; i++) {
u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
@@ -1736,12 +480,15 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
int raw_cw = cw;
nand_read_page_op(chip, page, 0, NULL, 0);
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ qcom_clear_read_regs(nandc);
host->use_ecc = false;
- if (nandc->props->qpic_v2)
+ if (nandc->props->qpic_version2)
raw_cw = ecc->steps - 1;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
set_address(host, host->cw_size * cw, page);
update_rw_regs(host, 1, true, raw_cw);
config_nand_page_read(chip);
@@ -1759,7 +506,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
}
- if (nandc->props->is_bam) {
+ if (nandc->props->supports_bam) {
nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
read_loc += data_size1;
@@ -1774,19 +521,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
config_nand_cw_read(chip, false, raw_cw);
- read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
+ qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
reg_off += data_size1;
- read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
+ qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
reg_off += oob_size1;
- read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
+ qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
reg_off += data_size2;
- read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+ qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
- ret = submit_descs(nandc);
- free_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
return ret;
@@ -1819,7 +565,7 @@ check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *cw_data_buf, *cw_oob_buf;
- int cw, data_size, oob_size, ret = 0;
+ int cw, data_size, oob_size, ret;
if (!data_buf)
data_buf = nand_get_data_buf(chip);
@@ -1884,7 +630,7 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
buf = (struct read_stats *)nandc->reg_read_buf;
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
for (i = 0; i < ecc->steps; i++, buf++) {
u32 flash, buffer, erased_cw;
@@ -1997,7 +743,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
oob_size = host->ecc_bytes_hw + host->spare_bytes;
}
- if (nandc->props->is_bam) {
+ if (nandc->props->supports_bam) {
if (data_buf && oob_buf) {
nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
nandc_set_read_loc(chip, i, 1, data_size,
@@ -2013,8 +759,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
config_nand_cw_read(chip, true, i);
if (data_buf)
- read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
- data_size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+ data_size, 0);
/*
* when ecc is enabled, the controller doesn't read the real
@@ -2029,8 +775,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
for (j = 0; j < host->bbm_size; j++)
*oob_buf++ = 0xff;
- read_data_dma(nandc, FLASH_BUF_ACC + data_size,
- oob_buf, oob_size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
}
if (data_buf)
@@ -2039,9 +785,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
oob_buf += oob_size;
}
- ret = submit_descs(nandc);
- free_descs(nandc);
-
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to read page/oob\n");
return ret;
@@ -2062,7 +806,7 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
int size;
int ret;
- clear_read_regs(nandc);
+ qcom_clear_read_regs(nandc);
size = host->use_ecc ? host->cw_data : host->cw_size;
@@ -2074,14 +818,12 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret)
dev_err(nandc->dev, "failed to copy last codeword\n");
- free_descs(nandc);
-
return ret;
}
@@ -2140,36 +882,44 @@ static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
host->bbm_size - host->cw_data;
host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK);
- host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES |
- host->cw_data << UD_SIZE_BYTES;
+ host->cfg0 |= FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data);
host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK;
- host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES;
- host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS;
+ host->ecc_bch_cfg |= FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data);
+ host->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, host->cw_data - 1);
}
/* implements ecc->read_page() */
-static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
+static int qcom_nandc_read_page(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *data_buf, *oob_buf = NULL;
if (host->nr_boot_partitions)
qcom_nandc_codeword_fixup(host, page);
nand_read_page_op(chip, page, 0, NULL, 0);
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = true;
+ qcom_clear_read_regs(nandc);
+ set_address(host, 0, page);
+ update_rw_regs(host, ecc->steps, true, 0);
+
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
return read_page_ecc(host, data_buf, oob_buf, page);
}
/* implements ecc->read_page_raw() */
-static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+static int qcom_nandc_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -2204,8 +954,8 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
if (host->nr_boot_partitions)
qcom_nandc_codeword_fixup(host, page);
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
host->use_ecc = true;
set_address(host, 0, page);
@@ -2215,7 +965,7 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
}
/* implements ecc->write_page() */
-static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
+static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
@@ -2229,8 +979,11 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ set_address(host, 0, page);
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
data_buf = (u8 *)buf;
oob_buf = chip->oob_poi;
@@ -2251,9 +1004,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
oob_size = ecc->bytes;
}
-
- write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
- i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
+ i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
/*
* when ECC is enabled, we don't really need to write anything
@@ -2265,8 +1017,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
if (qcom_nandc_is_last_cw(ecc, i)) {
oob_buf += host->bbm_size;
- write_data_dma(nandc, FLASH_BUF_ACC + data_size,
- oob_buf, oob_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
}
config_nand_cw_write(chip);
@@ -2275,21 +1027,18 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
oob_buf += oob_size;
}
- ret = submit_descs(nandc);
- if (ret)
+ ret = qcom_submit_descs(nandc);
+ if (ret) {
dev_err(nandc->dev, "failure to write page\n");
+ return ret;
+ }
- free_descs(nandc);
-
- if (!ret)
- ret = nand_prog_page_end_op(chip);
-
- return ret;
+ return nand_prog_page_end_op(chip);
}
/* implements ecc->write_page_raw() */
static int qcom_nandc_write_page_raw(struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
+ const u8 *buf, int oob_required,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -2303,8 +1052,8 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
qcom_nandc_codeword_fixup(host, page);
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
data_buf = (u8 *)buf;
oob_buf = chip->oob_poi;
@@ -2330,37 +1079,34 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
}
- write_data_dma(nandc, reg_off, data_buf, data_size1,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
+ NAND_BAM_NO_EOT);
reg_off += data_size1;
data_buf += data_size1;
- write_data_dma(nandc, reg_off, oob_buf, oob_size1,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
+ NAND_BAM_NO_EOT);
reg_off += oob_size1;
oob_buf += oob_size1;
- write_data_dma(nandc, reg_off, data_buf, data_size2,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
+ NAND_BAM_NO_EOT);
reg_off += data_size2;
data_buf += data_size2;
- write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
+ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
oob_buf += oob_size2;
config_nand_cw_write(chip);
}
- ret = submit_descs(nandc);
- if (ret)
+ ret = qcom_submit_descs(nandc);
+ if (ret) {
dev_err(nandc->dev, "failure to write raw page\n");
+ return ret;
+ }
- free_descs(nandc);
-
- if (!ret)
- ret = nand_prog_page_end_op(chip);
-
- return ret;
+ return nand_prog_page_end_op(chip);
}
/*
@@ -2384,7 +1130,7 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
qcom_nandc_codeword_fixup(host, page);
host->use_ecc = true;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
/* calculate the data and oob size for the last codeword/step */
data_size = ecc->size - ((ecc->steps - 1) << 2);
@@ -2399,17 +1145,14 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
update_rw_regs(host, 1, false, 0);
config_nand_page_write(chip);
- write_data_dma(nandc, FLASH_BUF_ACC,
- nandc->data_buffer, data_size + oob_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC,
+ nandc->data_buffer, data_size + oob_size, 0);
config_nand_cw_write(chip);
- ret = submit_descs(nandc);
-
- free_descs(nandc);
-
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write oob\n");
- return -EIO;
+ return ret;
}
return nand_prog_page_end_op(chip);
@@ -2433,7 +1176,7 @@ static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
*/
host->use_ecc = false;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
ret = copy_last_cw(host, page);
if (ret)
goto err;
@@ -2460,8 +1203,8 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
struct nand_ecc_ctrl *ecc = &chip->ecc;
int page, ret;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
/*
* to mark the BBM as bad, we flash the entire last codeword with 0s.
@@ -2478,78 +1221,17 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
update_rw_regs(host, 1, false, ecc->steps - 1);
config_nand_page_write(chip);
- write_data_dma(nandc, FLASH_BUF_ACC,
- nandc->data_buffer, host->cw_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC,
+ nandc->data_buffer, host->cw_size, 0);
config_nand_cw_write(chip);
- ret = submit_descs(nandc);
-
- free_descs(nandc);
-
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to update BBM\n");
- return -EIO;
- }
-
- return nand_prog_page_end_op(chip);
-}
-
-/*
- * the three functions below implement chip->legacy.read_byte(),
- * chip->legacy.read_buf() and chip->legacy.write_buf() respectively. these
- * aren't used for reading/writing page data, they are used for smaller data
- * like reading id, status etc
- */
-static uint8_t qcom_nandc_read_byte(struct nand_chip *chip)
-{
- struct qcom_nand_host *host = to_qcom_nand_host(chip);
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- u8 *buf = nandc->data_buffer;
- u8 ret = 0x0;
-
- if (host->last_command == NAND_CMD_STATUS) {
- ret = host->status;
-
- host->status = NAND_STATUS_READY | NAND_STATUS_WP;
-
return ret;
}
- if (nandc->buf_start < nandc->buf_count)
- ret = buf[nandc->buf_start++];
-
- return ret;
-}
-
-static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
-{
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
-
- memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
- nandc->buf_start += real_len;
-}
-
-static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf,
- int len)
-{
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
-
- memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
-
- nandc->buf_start += real_len;
-}
-
-/* we support only one external chip for now */
-static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr)
-{
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- if (chipnr <= 0)
- return;
-
- dev_warn(nandc->dev, "invalid chip select\n");
+ return nand_prog_page_end_op(chip);
}
/*
@@ -2660,7 +1342,7 @@ static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
}
static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
+ struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
@@ -2685,6 +1367,7 @@ qcom_nandc_calc_ecc_bytes(int step_size, int strength)
{
return strength == 4 ? 12 : 16;
}
+
NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
NANDC_STEP_SIZE, 4, 8);
@@ -2696,7 +1379,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
int cwperpage, bad_block_byte, ret;
bool wide_bus;
- int ecc_mode = 1;
+ int ecc_mode = ECC_MODE_8BIT;
/* controller only supports 512 bytes data steps */
ecc->size = NANDC_STEP_SIZE;
@@ -2717,7 +1400,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
if (ecc->strength >= 8) {
/* 8 bit ECC defaults to BCH ECC on all platforms */
host->bch_enabled = true;
- ecc_mode = 1;
+ ecc_mode = ECC_MODE_8BIT;
if (wide_bus) {
host->ecc_bytes_hw = 14;
@@ -2737,7 +1420,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
/* BCH */
host->bch_enabled = true;
- ecc_mode = 0;
+ ecc_mode = ECC_MODE_4BIT;
if (wide_bus) {
host->ecc_bytes_hw = 8;
@@ -2781,15 +1464,15 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
/* Free the initially allocated BAM transaction for reading the ONFI params */
- if (nandc->props->is_bam)
- free_bam_transaction(nandc);
+ if (nandc->props->supports_bam)
+ qcom_free_bam_transaction(nandc);
nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
cwperpage);
/* Now allocate the BAM transaction based on updated max_cwperpage */
- if (nandc->props->is_bam) {
- nandc->bam_txn = alloc_bam_transaction(nandc);
+ if (nandc->props->supports_bam) {
+ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
if (!nandc->bam_txn) {
dev_err(nandc->dev,
"failed to allocate bam transaction\n");
@@ -2811,45 +1494,44 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
host->cw_size = host->cw_data + ecc->bytes;
bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
- host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
- | host->cw_data << UD_SIZE_BYTES
- | 0 << DISABLE_STATUS_AFTER_WRITE
- | 5 << NUM_ADDR_CYCLES
- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
- | 0 << STATUS_BFR_READ
- | 1 << SET_RD_MODE_AFTER_STATUS
- | host->spare_bytes << SPARE_SIZE_BYTES;
-
- host->cfg1 = 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | bad_block_byte << BAD_BLOCK_BYTE_NUM
- | 0 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | wide_bus << WIDE_FLASH
- | host->bch_enabled << ENABLE_BCH_ECC;
-
- host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
- | host->cw_size << UD_SIZE_BYTES
- | 5 << NUM_ADDR_CYCLES
- | 0 << SPARE_SIZE_BYTES;
-
- host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | 17 << BAD_BLOCK_BYTE_NUM
- | 1 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | wide_bus << WIDE_FLASH
- | 1 << DEV0_CFG1_ECC_DISABLE;
-
- host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
- | 0 << ECC_SW_RESET
- | host->cw_data << ECC_NUM_DATA_BYTES
- | 1 << ECC_FORCE_CLK_OPEN
- | ecc_mode << ECC_MODE
- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
-
- if (!nandc->props->qpic_v2)
- host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+ host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data) |
+ FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 0) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, host->ecc_bytes_hw) |
+ FIELD_PREP(STATUS_BFR_READ, 0) |
+ FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes);
+
+ host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, wide_bus) |
+ FIELD_PREP(ENABLE_BCH_ECC, host->bch_enabled);
+
+ host->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_size) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+
+ host->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, wide_bus) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+
+ host->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !host->bch_enabled) |
+ FIELD_PREP(ECC_SW_RESET, 0) |
+ FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data) |
+ FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
+ FIELD_PREP(ECC_MODE_MASK, ecc_mode) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
+
+ if (!nandc->props->qpic_version2)
+ host->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, 0x203);
host->clrflashstatus = FS_READY_BSY_N;
host->clrreadstatus = 0xc0;
@@ -2867,157 +1549,504 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
return 0;
}
-static const struct nand_controller_ops qcom_nandc_ops = {
- .attach_chip = qcom_nand_attach_chip,
-};
+static int qcom_op_cmd_mapping(struct nand_chip *chip, u8 opcode,
+ struct qcom_op *q_op)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ int cmd;
+
+ switch (opcode) {
+ case NAND_CMD_RESET:
+ cmd = OP_RESET_DEVICE;
+ break;
+ case NAND_CMD_READID:
+ cmd = OP_FETCH_ID;
+ break;
+ case NAND_CMD_PARAM:
+ if (nandc->props->qpic_version2)
+ cmd = OP_PAGE_READ_ONFI_READ;
+ else
+ cmd = OP_PAGE_READ;
+ break;
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ cmd = OP_BLOCK_ERASE;
+ break;
+ case NAND_CMD_STATUS:
+ cmd = OP_CHECK_STATUS;
+ break;
+ case NAND_CMD_PAGEPROG:
+ cmd = OP_PROGRAM_PAGE;
+ q_op->flag = OP_PROGRAM_PAGE;
+ nandc->exec_opwrite = true;
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_READSTART:
+ if (host->use_ecc)
+ cmd = OP_PAGE_READ_WITH_ECC;
+ else
+ cmd = OP_PAGE_READ;
+ break;
+ default:
+ dev_err(nandc->dev, "Opcode not supported: %u\n", opcode);
+ return -EOPNOTSUPP;
+ }
-static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+ return cmd;
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static int qcom_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct qcom_op *q_op)
{
- if (nandc->props->is_bam) {
- if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
- dma_unmap_single(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id;
+ int i, ret;
- if (nandc->tx_chan)
- dma_release_channel(nandc->tx_chan);
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs;
+ const u8 *addrs;
- if (nandc->rx_chan)
- dma_release_channel(nandc->rx_chan);
+ instr = &subop->instrs[op_id];
- if (nandc->cmd_chan)
- dma_release_channel(nandc->cmd_chan);
- } else {
- if (nandc->chan)
- dma_release_channel(nandc->chan);
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ret = qcom_op_cmd_mapping(chip, instr->ctx.cmd.opcode, q_op);
+ if (ret < 0)
+ return ret;
+
+ q_op->cmd_reg = cpu_to_le32(ret);
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ q_op->addr1_reg |= cpu_to_le32(addrs[i] << (i * 8));
+
+ if (naddrs > 4)
+ q_op->addr2_reg |= cpu_to_le32(addrs[4]);
+
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ q_op->data_instr = instr;
+ q_op->data_instr_idx = op_id;
+ q_op->rdy_delay_ns = instr->delay_ns;
+ fallthrough;
+ case NAND_OP_DATA_OUT_INSTR:
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ q_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void qcom_delay_ns(unsigned int ns)
+{
+ if (!ns)
+ return;
+
+ if (ns < 10000)
+ ndelay(ns);
+ else
+ udelay(DIV_ROUND_UP(ns, 1000));
+}
+
+static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ unsigned long start = jiffies + msecs_to_jiffies(time_ms);
+ u32 flash;
+
+ qcom_nandc_dev_to_mem(nandc, true);
+
+ do {
+ flash = le32_to_cpu(nandc->reg_read_buf[0]);
+ if (flash & FS_READY_BSY_N)
+ return 0;
+ cpu_relax();
+ } while (time_after(start, jiffies));
+
+ dev_err(nandc->dev, "Timeout waiting for device to be ready:0x%08x\n", flash);
+
+ return -ETIMEDOUT;
+}
+
+static int qcom_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct qcom_op q_op = {};
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id = 0;
+ unsigned int len = 0;
+ int ret, num_cw, i;
+ u32 flash_status;
+
+ host->status = NAND_STATUS_READY | NAND_STATUS_WP;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ num_cw = nandc->exec_opwrite ? ecc->steps : 1;
+ nandc->exec_opwrite = false;
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
+
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->exec = cpu_to_le32(1);
+
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ ret = qcom_submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting status descriptor\n");
+ goto err_out;
}
+
+ qcom_nandc_dev_to_mem(nandc, true);
+
+ for (i = 0; i < num_cw; i++) {
+ flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+
+ if (flash_status & FS_MPU_ERR)
+ host->status &= ~NAND_STATUS_WP;
+
+ if (flash_status & FS_OP_ERR ||
+ (i == (num_cw - 1) && (flash_status & FS_DEVICE_STS_ERR)))
+ host->status |= NAND_STATUS_FAIL;
+ }
+
+ flash_status = host->status;
+ instr = q_op.data_instr;
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+ memcpy(instr->ctx.data.buf.in, &flash_status, len);
+
+err_out:
+ return ret;
}
-static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_op q_op = {};
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id = 0;
+ unsigned int len = 0;
int ret;
- ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
+
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->addr0 = q_op.addr1_reg;
+ nandc->regs->addr1 = q_op.addr2_reg;
+ nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
+ nandc->regs->exec = cpu_to_le32(1);
+
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+
+ ret = qcom_submit_descs(nandc);
if (ret) {
- dev_err(nandc->dev, "failed to set DMA mask\n");
+ dev_err(nandc->dev, "failure in submitting read id descriptor\n");
+ goto err_out;
+ }
+
+ instr = q_op.data_instr;
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+
+ qcom_nandc_dev_to_mem(nandc, true);
+ memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
+
+err_out:
+ return ret;
+}
+
+static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_op q_op = {};
+ int ret;
+ int instrs = 1;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
return ret;
+
+ if (q_op.flag == OP_PROGRAM_PAGE) {
+ goto wait_rdy;
+ } else if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) {
+ q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
+ nandc->regs->addr0 = q_op.addr1_reg;
+ nandc->regs->addr1 = q_op.addr2_reg;
+ nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~CW_PER_PAGE_MASK);
+ nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
+ instrs = 3;
+ } else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
+ return 0;
}
- /*
- * we use the internal buffer for reading ONFI params, reading small
- * data like ID and status, and preforming read-copy-write operations
- * when writing to a codeword partially. 532 is the maximum possible
- * size of a codeword for our nand controller
- */
- nandc->buf_size = 532;
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
- nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
- GFP_KERNEL);
- if (!nandc->data_buffer)
- return -ENOMEM;
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
- nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
- GFP_KERNEL);
- if (!nandc->regs)
- return -ENOMEM;
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->exec = cpu_to_le32(1);
- nandc->reg_read_buf = devm_kcalloc(nandc->dev,
- MAX_REG_RD, sizeof(*nandc->reg_read_buf),
- GFP_KERNEL);
- if (!nandc->reg_read_buf)
- return -ENOMEM;
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+ if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
- if (nandc->props->is_bam) {
- nandc->reg_read_dma =
- dma_map_single(nandc->dev, nandc->reg_read_buf,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
- if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
- dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
- return -EIO;
- }
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
- if (IS_ERR(nandc->tx_chan)) {
- ret = PTR_ERR(nandc->tx_chan);
- nandc->tx_chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "tx DMA channel request failed\n");
- goto unalloc;
- }
+ ret = qcom_submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting misc descriptor\n");
+ goto err_out;
+ }
- nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
- if (IS_ERR(nandc->rx_chan)) {
- ret = PTR_ERR(nandc->rx_chan);
- nandc->rx_chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "rx DMA channel request failed\n");
- goto unalloc;
- }
+wait_rdy:
+ qcom_delay_ns(q_op.rdy_delay_ns);
+ ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
- nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
- if (IS_ERR(nandc->cmd_chan)) {
- ret = PTR_ERR(nandc->cmd_chan);
- nandc->cmd_chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "cmd DMA channel request failed\n");
- goto unalloc;
- }
+err_out:
+ return ret;
+}
- /*
- * Initially allocate BAM transaction to read ONFI param page.
- * After detecting all the devices, this BAM transaction will
- * be freed and the next BAM tranasction will be allocated with
- * maximum codeword size
- */
- nandc->max_cwperpage = 1;
- nandc->bam_txn = alloc_bam_transaction(nandc);
- if (!nandc->bam_txn) {
- dev_err(nandc->dev,
- "failed to allocate bam transaction\n");
- ret = -ENOMEM;
- goto unalloc;
- }
- } else {
- nandc->chan = dma_request_chan(nandc->dev, "rxtx");
- if (IS_ERR(nandc->chan)) {
- ret = PTR_ERR(nandc->chan);
- nandc->chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "rxtx DMA channel request failed\n");
- return ret;
- }
+static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_op q_op = {};
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id = 0;
+ unsigned int len = 0;
+ int ret, reg_base;
+
+ reg_base = NAND_READ_LOCATION_0;
+
+ if (nandc->props->qpic_version2)
+ reg_base = NAND_READ_LOCATION_LAST_CW_0;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
+
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->addr0 = 0;
+ nandc->regs->addr1 = 0;
+
+ nandc->regs->cfg0 = cpu_to_le32(FIELD_PREP(CW_PER_PAGE_MASK, 0) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0));
+
+ nandc->regs->cfg1 = cpu_to_le32(FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1));
+
+ if (!nandc->props->qpic_version2)
+ nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
+
+ /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
+ if (!nandc->props->qpic_version2) {
+ nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
+ nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~READ_ADDR_MASK) |
+ FIELD_PREP(READ_ADDR_MASK, NAND_CMD_PARAM));
}
- INIT_LIST_HEAD(&nandc->desc_list);
- INIT_LIST_HEAD(&nandc->host_list);
+ nandc->regs->exec = cpu_to_le32(1);
- nand_controller_init(&nandc->controller);
- nandc->controller.ops = &qcom_nandc_ops;
+ if (!nandc->props->qpic_version2) {
+ nandc->regs->orig_cmd1 = cpu_to_le32(nandc->cmd1);
+ nandc->regs->orig_vld = cpu_to_le32(nandc->vld);
+ }
- return 0;
-unalloc:
- qcom_nandc_unalloc(nandc);
+ instr = q_op.data_instr;
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+
+ if (nandc->props->qpic_version2)
+ nandc_set_read_loc_last(chip, reg_base, 0, len, 1);
+ else
+ nandc_set_read_loc_first(chip, reg_base, 0, len, 1);
+
+ if (!nandc->props->qpic_version2) {
+ qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ }
+
+ nandc->buf_count = 512;
+ memset(nandc->data_buffer, 0xff, nandc->buf_count);
+
+ config_nand_single_cw_page_read(chip, false, 0);
+
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+ nandc->buf_count, 0);
+
+ /* restore CMD1 and VLD regs */
+ if (!nandc->props->qpic_version2) {
+ qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
+ NAND_BAM_NEXT_SGL);
+ }
+
+ ret = qcom_submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting param page descriptor\n");
+ goto err_out;
+ }
+
+ ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
+ if (ret)
+ goto err_out;
+
+ memcpy(instr->ctx.data.buf.in, nandc->data_buffer, len);
+
+err_out:
return ret;
}
+static const struct nand_op_parser qcom_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ qcom_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ qcom_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ qcom_param_page_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 512)),
+ NAND_OP_PARSER_PATTERN(
+ qcom_misc_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYCLE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static int qcom_check_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr;
+ int op_id;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (instr->ctx.cmd.opcode != NAND_CMD_RESET &&
+ instr->ctx.cmd.opcode != NAND_CMD_READID &&
+ instr->ctx.cmd.opcode != NAND_CMD_PARAM &&
+ instr->ctx.cmd.opcode != NAND_CMD_ERASE1 &&
+ instr->ctx.cmd.opcode != NAND_CMD_ERASE2 &&
+ instr->ctx.cmd.opcode != NAND_CMD_STATUS &&
+ instr->ctx.cmd.opcode != NAND_CMD_PAGEPROG &&
+ instr->ctx.cmd.opcode != NAND_CMD_READ0 &&
+ instr->ctx.cmd.opcode != NAND_CMD_READSTART)
+ return -EOPNOTSUPP;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ if (check_only)
+ return qcom_check_op(chip, op);
+
+ return nand_op_parser_exec_op(chip, &qcom_op_parser, op, check_only);
+}
+
+static const struct nand_controller_ops qcom_nandc_ops = {
+ .attach_chip = qcom_nand_attach_chip,
+ .exec_op = qcom_nand_exec_op,
+};
+
/* one time setup of a few nand controller registers */
static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
{
u32 nand_ctrl;
+ nand_controller_init(nandc->controller);
+ nandc->controller->ops = &qcom_nandc_ops;
+
/* kill onenand */
- if (!nandc->props->is_qpic)
+ if (!nandc->props->nandc_part_of_qpic)
nandc_write(nandc, SFLASHC_BURST_CFG, 0);
- if (!nandc->props->qpic_v2)
+ if (!nandc->props->qpic_version2)
nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
NAND_DEV_CMD_VLD_VAL);
/* enable ADM or BAM DMA */
- if (nandc->props->is_bam) {
+ if (nandc->props->supports_bam) {
nand_ctrl = nandc_read(nandc, NAND_CTRL);
/*
@@ -3034,7 +2063,7 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
}
/* save the original values of these registers */
- if (!nandc->props->qpic_v2) {
+ if (!nandc->props->qpic_version2) {
nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
nandc->vld = NAND_DEV_CMD_VLD_VAL;
}
@@ -3135,14 +2164,6 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
- chip->legacy.cmdfunc = qcom_nandc_command;
- chip->legacy.select_chip = qcom_nandc_select_chip;
- chip->legacy.read_byte = qcom_nandc_read_byte;
- chip->legacy.read_buf = qcom_nandc_read_buf;
- chip->legacy.write_buf = qcom_nandc_write_buf;
- chip->legacy.set_features = nand_get_set_features_notsupp;
- chip->legacy.get_features = nand_get_set_features_notsupp;
-
/*
* the bad block marker is readable only when we read the last codeword
* of a page with ECC disabled. currently, the nand_base and nand_bbt
@@ -3154,7 +2175,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
chip->legacy.block_bad = qcom_nandc_block_bad;
chip->legacy.block_markbad = qcom_nandc_block_markbad;
- chip->controller = &nandc->controller;
+ chip->controller = nandc->controller;
chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
NAND_SKIP_BBTSCAN;
@@ -3215,7 +2236,7 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev)
struct device_node *np = nandc->dev->of_node;
int ret;
- if (!nandc->props->is_bam) {
+ if (!nandc->props->supports_bam) {
ret = of_property_read_u32(np, "qcom,cmd-crci",
&nandc->cmd_crci);
if (ret) {
@@ -3237,17 +2258,21 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev)
static int qcom_nandc_probe(struct platform_device *pdev)
{
struct qcom_nand_controller *nandc;
+ struct nand_controller *controller;
const void *dev_data;
struct device *dev = &pdev->dev;
struct resource *res;
int ret;
- nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
+ nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc) + sizeof(*controller),
+ GFP_KERNEL);
if (!nandc)
return -ENOMEM;
+ controller = (struct nand_controller *)&nandc[1];
platform_set_drvdata(pdev, nandc);
nandc->dev = dev;
+ nandc->controller = controller;
dev_data = of_device_get_match_data(dev);
if (!dev_data) {
@@ -3309,7 +2334,7 @@ err_nandc_alloc:
err_aon_clk:
clk_disable_unprepare(nandc->core_clk);
err_core_clk:
- dma_unmap_resource(dev, res->start, resource_size(res),
+ dma_unmap_resource(dev, nandc->base_dma, resource_size(res),
DMA_BIDIRECTIONAL, 0);
return ret;
}
@@ -3340,31 +2365,35 @@ static void qcom_nandc_remove(struct platform_device *pdev)
static const struct qcom_nandc_props ipq806x_nandc_props = {
.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
- .is_bam = false,
+ .supports_bam = false,
.use_codeword_fixup = true,
.dev_cmd_reg_start = 0x0,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props ipq4019_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
- .is_bam = true,
- .is_qpic = true,
+ .supports_bam = true,
+ .nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x0,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props ipq8074_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
- .is_bam = true,
- .is_qpic = true,
+ .supports_bam = true,
+ .nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x7000,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props sdx55_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
- .is_bam = true,
- .is_qpic = true,
- .qpic_v2 = true,
+ .supports_bam = true,
+ .nandc_part_of_qpic = true,
+ .qpic_version2 = true,
.dev_cmd_reg_start = 0x7000,
+ .bam_offset = 0x30000,
};
/*
@@ -3401,8 +2430,8 @@ static struct platform_driver qcom_nandc_driver = {
.name = "qcom-nandc",
.of_match_table = qcom_nandc_of_match,
},
- .probe = qcom_nandc_probe,
- .remove_new = qcom_nandc_remove,
+ .probe = qcom_nandc_probe,
+ .remove = qcom_nandc_remove,
};
module_platform_driver(qcom_nandc_driver);
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index ed0cf732d20e..918974d088cf 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -335,7 +335,7 @@ static void r852_cmdctl(struct nand_chip *chip, int dat, unsigned int ctrl)
else
dev->ctlreg &= ~R852_CTL_WRITE;
- /* when write is stareted, enable write access */
+ /* when write is started, enable write access */
if (dat == NAND_CMD_ERASE1)
dev->ctlreg |= R852_CTL_WRITE;
@@ -372,7 +372,7 @@ static int r852_wait(struct nand_chip *chip)
nand_status_op(chip, &status);
- /* Unfortunelly, no way to send detailed error status... */
+ /* Unfortunately, no way to send detailed error status... */
if (dev->dma_error) {
status |= NAND_STATUS_FAIL;
dev->dma_error = 0;
@@ -387,6 +387,9 @@ static int r852_wait(struct nand_chip *chip)
static int r852_ready(struct nand_chip *chip)
{
struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+ if (dev->card_unstable)
+ return 0;
+
return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
}
diff --git a/drivers/mtd/nand/raw/renesas-nand-controller.c b/drivers/mtd/nand/raw/renesas-nand-controller.c
index 589021ea9eb2..201dd62b9990 100644
--- a/drivers/mtd/nand/raw/renesas-nand-controller.c
+++ b/drivers/mtd/nand/raw/renesas-nand-controller.c
@@ -210,7 +210,7 @@ struct rnand_chip {
u32 tim_gen_seq1;
u32 tim_gen_seq2;
u32 tim_gen_seq3;
- struct rnand_chip_sel sels[];
+ struct rnand_chip_sel sels[] __counted_by(nsels);
};
struct rnandc {
@@ -426,6 +426,9 @@ static int rnandc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
/* Configure DMA */
dma_addr = dma_map_single(rnandc->dev, rnandc->buf, mtd->writesize,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(rnandc->dev, dma_addr))
+ return -ENOMEM;
+
writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
@@ -606,6 +609,9 @@ static int rnandc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
/* Configure DMA */
dma_addr = dma_map_single(rnandc->dev, (void *)rnandc->buf, mtd->writesize,
DMA_TO_DEVICE);
+ if (dma_mapping_error(rnandc->dev, dma_addr))
+ return -ENOMEM;
+
writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
@@ -1297,23 +1303,17 @@ static void rnandc_chips_cleanup(struct rnandc *rnandc)
static int rnandc_chips_init(struct rnandc *rnandc)
{
- struct device_node *np;
int ret;
- for_each_child_of_node(rnandc->dev->of_node, np) {
+ for_each_child_of_node_scoped(rnandc->dev->of_node, np) {
ret = rnandc_chip_init(rnandc, np);
if (ret) {
- of_node_put(np);
- goto cleanup_chips;
+ rnandc_chips_cleanup(rnandc);
+ return ret;
}
}
return 0;
-
-cleanup_chips:
- rnandc_chips_cleanup(rnandc);
-
- return ret;
}
static int rnandc_probe(struct platform_device *pdev)
@@ -1336,7 +1336,10 @@ static int rnandc_probe(struct platform_device *pdev)
if (IS_ERR(rnandc->regs))
return PTR_ERR(rnandc->regs);
- devm_pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
@@ -1408,7 +1411,7 @@ static struct platform_driver rnandc_driver = {
.of_match_table = rnandc_id_table,
},
.probe = rnandc_probe,
- .remove_new = rnandc_remove,
+ .remove = rnandc_remove,
};
module_platform_driver(rnandc_driver);
diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
index 5a04680342c3..9444ba02696d 100644
--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
+++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
@@ -15,7 +15,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -99,7 +98,7 @@ enum nfc_type {
* @high: ECC count high bit index at register.
* @high_mask: mask bit
*/
-struct ecc_cnt_status {
+struct rk_ecc_cnt_status {
u8 err_flag_bit;
u8 low;
u8 low_mask;
@@ -109,6 +108,7 @@ struct ecc_cnt_status {
};
/**
+ * struct nfc_cfg: Rockchip NAND controller configuration
* @type: NFC version
* @ecc_strengths: ECC strengths
* @ecc_cfgs: ECC config values
@@ -145,8 +145,8 @@ struct nfc_cfg {
u32 int_st_off;
u32 oob0_off;
u32 oob1_off;
- struct ecc_cnt_status ecc0;
- struct ecc_cnt_status ecc1;
+ struct rk_ecc_cnt_status ecc0;
+ struct rk_ecc_cnt_status ecc1;
};
struct rk_nfc_nand_chip {
@@ -159,8 +159,7 @@ struct rk_nfc_nand_chip {
u32 timing;
u8 nsels;
- u8 sels[];
- /* Nothing after this field. */
+ u8 sels[] __counted_by(nsels);
};
struct rk_nfc {
@@ -421,13 +420,13 @@ static int rk_nfc_setup_interface(struct nand_chip *chip, int target,
u32 rate, tc2rw, trwpw, trw2c;
u32 temp;
- if (target < 0)
- return 0;
-
timings = nand_get_sdr_timings(conf);
if (IS_ERR(timings))
return -EOPNOTSUPP;
+ if (target < 0)
+ return 0;
+
if (IS_ERR(nfc->nfc_clk))
rate = clk_get_rate(nfc->ahb_clk);
else
@@ -657,9 +656,16 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf,
mtd->writesize, DMA_TO_DEVICE);
+ if (dma_mapping_error(nfc->dev, dma_data))
+ return -ENOMEM;
+
dma_oob = dma_map_single(nfc->dev, nfc->oob_buf,
ecc->steps * oob_step,
DMA_TO_DEVICE);
+ if (dma_mapping_error(nfc->dev, dma_oob)) {
+ dma_unmap_single(nfc->dev, dma_data, mtd->writesize, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
reinit_completion(&nfc->done);
writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off);
@@ -773,9 +779,17 @@ static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on,
dma_data = dma_map_single(nfc->dev, nfc->page_buf,
mtd->writesize,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(nfc->dev, dma_data))
+ return -ENOMEM;
+
dma_oob = dma_map_single(nfc->dev, nfc->oob_buf,
ecc->steps * oob_step,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(nfc->dev, dma_oob)) {
+ dma_unmap_single(nfc->dev, dma_data, mtd->writesize,
+ DMA_FROM_DEVICE);
+ return -ENOMEM;
+ }
/*
* The first blocks (4, 8 or 16 depending on the device)
@@ -1120,7 +1134,7 @@ static int rk_nfc_nand_chip_init(struct device *dev, struct rk_nfc *nfc,
return -EINVAL;
}
- rknand = devm_kzalloc(dev, sizeof(*rknand) + nsels * sizeof(u8),
+ rknand = devm_kzalloc(dev, struct_size(rknand, sels, nsels),
GFP_KERNEL);
if (!rknand)
return -ENOMEM;
@@ -1212,7 +1226,7 @@ static void rk_nfc_chips_cleanup(struct rk_nfc *nfc)
static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc)
{
- struct device_node *np = dev->of_node, *nand_np;
+ struct device_node *np = dev->of_node;
int nchips = of_get_child_count(np);
int ret;
@@ -1222,10 +1236,9 @@ static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc)
return -EINVAL;
}
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = rk_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
- of_node_put(nand_np);
rk_nfc_chips_cleanup(nfc);
return ret;
}
@@ -1479,7 +1492,7 @@ static const struct dev_pm_ops rk_nfc_pm_ops = {
static struct platform_driver rk_nfc_driver = {
.probe = rk_nfc_probe,
- .remove_new = rk_nfc_remove,
+ .remove = rk_nfc_remove,
.driver = {
.name = "rockchip-nfc",
.of_match_table = rk_nfc_id_table,
@@ -1492,4 +1505,3 @@ module_platform_driver(rk_nfc_driver);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_AUTHOR("Yifeng Zhao <yifeng.zhao@rock-chips.com>");
MODULE_DESCRIPTION("Rockchip Nand Flash Controller Driver");
-MODULE_ALIAS("platform:rockchip-nand-controller");
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
deleted file mode 100644
index ac80aaf5b4e3..000000000000
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ /dev/null
@@ -1,1233 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright © 2004-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Samsung S3C2410/S3C2440/S3C2412 NAND driver
-*/
-
-#define pr_fmt(fmt) "nand-s3c2410: " fmt
-
-#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
-#define DEBUG
-#endif
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/cpufreq.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-
-#include <linux/platform_data/mtd-nand-s3c2410.h>
-
-#define S3C2410_NFREG(x) (x)
-
-#define S3C2410_NFCONF S3C2410_NFREG(0x00)
-#define S3C2410_NFCMD S3C2410_NFREG(0x04)
-#define S3C2410_NFADDR S3C2410_NFREG(0x08)
-#define S3C2410_NFDATA S3C2410_NFREG(0x0C)
-#define S3C2410_NFSTAT S3C2410_NFREG(0x10)
-#define S3C2410_NFECC S3C2410_NFREG(0x14)
-#define S3C2440_NFCONT S3C2410_NFREG(0x04)
-#define S3C2440_NFCMD S3C2410_NFREG(0x08)
-#define S3C2440_NFADDR S3C2410_NFREG(0x0C)
-#define S3C2440_NFDATA S3C2410_NFREG(0x10)
-#define S3C2440_NFSTAT S3C2410_NFREG(0x20)
-#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C)
-#define S3C2412_NFSTAT S3C2410_NFREG(0x28)
-#define S3C2412_NFMECC0 S3C2410_NFREG(0x34)
-#define S3C2410_NFCONF_EN (1<<15)
-#define S3C2410_NFCONF_INITECC (1<<12)
-#define S3C2410_NFCONF_nFCE (1<<11)
-#define S3C2410_NFCONF_TACLS(x) ((x)<<8)
-#define S3C2410_NFCONF_TWRPH0(x) ((x)<<4)
-#define S3C2410_NFCONF_TWRPH1(x) ((x)<<0)
-#define S3C2410_NFSTAT_BUSY (1<<0)
-#define S3C2440_NFCONF_TACLS(x) ((x)<<12)
-#define S3C2440_NFCONF_TWRPH0(x) ((x)<<8)
-#define S3C2440_NFCONF_TWRPH1(x) ((x)<<4)
-#define S3C2440_NFCONT_INITECC (1<<4)
-#define S3C2440_NFCONT_nFCE (1<<1)
-#define S3C2440_NFCONT_ENABLE (1<<0)
-#define S3C2440_NFSTAT_READY (1<<0)
-#define S3C2412_NFCONF_NANDBOOT (1<<31)
-#define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5)
-#define S3C2412_NFCONT_nFCE0 (1<<1)
-#define S3C2412_NFSTAT_READY (1<<0)
-
-/* new oob placement block for use with hardware ecc generation
- */
-static int s3c2410_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- if (section)
- return -ERANGE;
-
- oobregion->offset = 0;
- oobregion->length = 3;
-
- return 0;
-}
-
-static int s3c2410_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- if (section)
- return -ERANGE;
-
- oobregion->offset = 8;
- oobregion->length = 8;
-
- return 0;
-}
-
-static const struct mtd_ooblayout_ops s3c2410_ooblayout_ops = {
- .ecc = s3c2410_ooblayout_ecc,
- .free = s3c2410_ooblayout_free,
-};
-
-/* controller and mtd information */
-
-struct s3c2410_nand_info;
-
-/**
- * struct s3c2410_nand_mtd - driver MTD structure
- * @mtd: The MTD instance to pass to the MTD layer.
- * @chip: The NAND chip information.
- * @set: The platform information supplied for this set of NAND chips.
- * @info: Link back to the hardware information.
-*/
-struct s3c2410_nand_mtd {
- struct nand_chip chip;
- struct s3c2410_nand_set *set;
- struct s3c2410_nand_info *info;
-};
-
-enum s3c_cpu_type {
- TYPE_S3C2410,
- TYPE_S3C2412,
- TYPE_S3C2440,
-};
-
-enum s3c_nand_clk_state {
- CLOCK_DISABLE = 0,
- CLOCK_ENABLE,
- CLOCK_SUSPEND,
-};
-
-/* overview of the s3c2410 nand state */
-
-/**
- * struct s3c2410_nand_info - NAND controller state.
- * @controller: Base controller structure.
- * @mtds: An array of MTD instances on this controller.
- * @platform: The platform data for this board.
- * @device: The platform device we bound to.
- * @clk: The clock resource for this controller.
- * @regs: The area mapped for the hardware registers.
- * @sel_reg: Pointer to the register controlling the NAND selection.
- * @sel_bit: The bit in @sel_reg to select the NAND chip.
- * @mtd_count: The number of MTDs created from this controller.
- * @save_sel: The contents of @sel_reg to be saved over suspend.
- * @clk_rate: The clock rate from @clk.
- * @clk_state: The current clock state.
- * @cpu_type: The exact type of this controller.
- * @freq_transition: CPUFreq notifier block
- */
-struct s3c2410_nand_info {
- /* mtd info */
- struct nand_controller controller;
- struct s3c2410_nand_mtd *mtds;
- struct s3c2410_platform_nand *platform;
-
- /* device info */
- struct device *device;
- struct clk *clk;
- void __iomem *regs;
- void __iomem *sel_reg;
- int sel_bit;
- int mtd_count;
- unsigned long save_sel;
- unsigned long clk_rate;
- enum s3c_nand_clk_state clk_state;
-
- enum s3c_cpu_type cpu_type;
-};
-
-struct s3c24XX_nand_devtype_data {
- enum s3c_cpu_type type;
-};
-
-static const struct s3c24XX_nand_devtype_data s3c2410_nand_devtype_data = {
- .type = TYPE_S3C2410,
-};
-
-static const struct s3c24XX_nand_devtype_data s3c2412_nand_devtype_data = {
- .type = TYPE_S3C2412,
-};
-
-static const struct s3c24XX_nand_devtype_data s3c2440_nand_devtype_data = {
- .type = TYPE_S3C2440,
-};
-
-/* conversion functions */
-
-static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd)
-{
- return container_of(mtd_to_nand(mtd), struct s3c2410_nand_mtd,
- chip);
-}
-
-static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd)
-{
- return s3c2410_nand_mtd_toours(mtd)->info;
-}
-
-static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev)
-{
- return platform_get_drvdata(dev);
-}
-
-static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
-{
- return dev_get_platdata(&dev->dev);
-}
-
-static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
-{
-#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
- return 1;
-#else
- return 0;
-#endif
-}
-
-/**
- * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock.
- * @info: The controller instance.
- * @new_state: State to which clock should be set.
- */
-static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
- enum s3c_nand_clk_state new_state)
-{
- if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND)
- return;
-
- if (info->clk_state == CLOCK_ENABLE) {
- if (new_state != CLOCK_ENABLE)
- clk_disable_unprepare(info->clk);
- } else {
- if (new_state == CLOCK_ENABLE)
- clk_prepare_enable(info->clk);
- }
-
- info->clk_state = new_state;
-}
-
-/* timing calculations */
-
-#define NS_IN_KHZ 1000000
-
-/**
- * s3c_nand_calc_rate - calculate timing data.
- * @wanted: The cycle time in nanoseconds.
- * @clk: The clock rate in kHz.
- * @max: The maximum divider value.
- *
- * Calculate the timing value from the given parameters.
- */
-static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
-{
- int result;
-
- result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
-
- pr_debug("result %d from %ld, %d\n", result, clk, wanted);
-
- if (result > max) {
- pr_err("%d ns is too big for current clock rate %ld\n",
- wanted, clk);
- return -1;
- }
-
- if (result < 1)
- result = 1;
-
- return result;
-}
-
-#define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
-
-/* controller setup */
-
-/**
- * s3c2410_nand_setrate - setup controller timing information.
- * @info: The controller instance.
- *
- * Given the information supplied by the platform, calculate and set
- * the necessary timing registers in the hardware to generate the
- * necessary timing cycles to the hardware.
- */
-static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
-{
- struct s3c2410_platform_nand *plat = info->platform;
- int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
- int tacls, twrph0, twrph1;
- unsigned long clkrate = clk_get_rate(info->clk);
- unsigned long set, cfg, mask;
- unsigned long flags;
-
- /* calculate the timing information for the controller */
-
- info->clk_rate = clkrate;
- clkrate /= 1000; /* turn clock into kHz for ease of use */
-
- if (plat != NULL) {
- tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max);
- twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8);
- twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8);
- } else {
- /* default timings */
- tacls = tacls_max;
- twrph0 = 8;
- twrph1 = 8;
- }
-
- if (tacls < 0 || twrph0 < 0 || twrph1 < 0) {
- dev_err(info->device, "cannot get suitable timings\n");
- return -EINVAL;
- }
-
- dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
- tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate),
- twrph1, to_ns(twrph1, clkrate));
-
- switch (info->cpu_type) {
- case TYPE_S3C2410:
- mask = (S3C2410_NFCONF_TACLS(3) |
- S3C2410_NFCONF_TWRPH0(7) |
- S3C2410_NFCONF_TWRPH1(7));
- set = S3C2410_NFCONF_EN;
- set |= S3C2410_NFCONF_TACLS(tacls - 1);
- set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
- set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
- break;
-
- case TYPE_S3C2440:
- case TYPE_S3C2412:
- mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
- S3C2440_NFCONF_TWRPH0(7) |
- S3C2440_NFCONF_TWRPH1(7));
-
- set = S3C2440_NFCONF_TACLS(tacls - 1);
- set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
- set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
- break;
-
- default:
- BUG();
- }
-
- local_irq_save(flags);
-
- cfg = readl(info->regs + S3C2410_NFCONF);
- cfg &= ~mask;
- cfg |= set;
- writel(cfg, info->regs + S3C2410_NFCONF);
-
- local_irq_restore(flags);
-
- dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
-
- return 0;
-}
-
-/**
- * s3c2410_nand_inithw - basic hardware initialisation
- * @info: The hardware state.
- *
- * Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
- * to setup the hardware access speeds and set the controller to be enabled.
-*/
-static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
-{
- int ret;
-
- ret = s3c2410_nand_setrate(info);
- if (ret < 0)
- return ret;
-
- switch (info->cpu_type) {
- case TYPE_S3C2410:
- default:
- break;
-
- case TYPE_S3C2440:
- case TYPE_S3C2412:
- /* enable the controller and de-assert nFCE */
-
- writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
- }
-
- return 0;
-}
-
-/**
- * s3c2410_nand_select_chip - select the given nand chip
- * @this: NAND chip object.
- * @chip: The chip number.
- *
- * This is called by the MTD layer to either select a given chip for the
- * @mtd instance, or to indicate that the access has finished and the
- * chip can be de-selected.
- *
- * The routine ensures that the nFCE line is correctly setup, and any
- * platform specific selection code is called to route nFCE to the specific
- * chip.
- */
-static void s3c2410_nand_select_chip(struct nand_chip *this, int chip)
-{
- struct s3c2410_nand_info *info;
- struct s3c2410_nand_mtd *nmtd;
- unsigned long cur;
-
- nmtd = nand_get_controller_data(this);
- info = nmtd->info;
-
- if (chip != -1)
- s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
-
- cur = readl(info->sel_reg);
-
- if (chip == -1) {
- cur |= info->sel_bit;
- } else {
- if (nmtd->set != NULL && chip > nmtd->set->nr_chips) {
- dev_err(info->device, "invalid chip %d\n", chip);
- return;
- }
-
- if (info->platform != NULL) {
- if (info->platform->select_chip != NULL)
- (info->platform->select_chip) (nmtd->set, chip);
- }
-
- cur &= ~info->sel_bit;
- }
-
- writel(cur, info->sel_reg);
-
- if (chip == -1)
- s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
-}
-
-/* s3c2410_nand_hwcontrol
- *
- * Issue command and address cycles to the chip
-*/
-
-static void s3c2410_nand_hwcontrol(struct nand_chip *chip, int cmd,
- unsigned int ctrl)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
- if (cmd == NAND_CMD_NONE)
- return;
-
- if (ctrl & NAND_CLE)
- writeb(cmd, info->regs + S3C2410_NFCMD);
- else
- writeb(cmd, info->regs + S3C2410_NFADDR);
-}
-
-/* command and control functions */
-
-static void s3c2440_nand_hwcontrol(struct nand_chip *chip, int cmd,
- unsigned int ctrl)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
- if (cmd == NAND_CMD_NONE)
- return;
-
- if (ctrl & NAND_CLE)
- writeb(cmd, info->regs + S3C2440_NFCMD);
- else
- writeb(cmd, info->regs + S3C2440_NFADDR);
-}
-
-/* s3c2410_nand_devready()
- *
- * returns 0 if the nand is busy, 1 if it is ready
-*/
-
-static int s3c2410_nand_devready(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
-}
-
-static int s3c2440_nand_devready(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
-}
-
-static int s3c2412_nand_devready(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY;
-}
-
-/* ECC handling functions */
-
-static int s3c2410_nand_correct_data(struct nand_chip *chip, u_char *dat,
- u_char *read_ecc, u_char *calc_ecc)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- unsigned int diff0, diff1, diff2;
- unsigned int bit, byte;
-
- pr_debug("%s(%p,%p,%p,%p)\n", __func__, mtd, dat, read_ecc, calc_ecc);
-
- diff0 = read_ecc[0] ^ calc_ecc[0];
- diff1 = read_ecc[1] ^ calc_ecc[1];
- diff2 = read_ecc[2] ^ calc_ecc[2];
-
- pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n",
- __func__, 3, read_ecc, 3, calc_ecc,
- diff0, diff1, diff2);
-
- if (diff0 == 0 && diff1 == 0 && diff2 == 0)
- return 0; /* ECC is ok */
-
- /* sometimes people do not think about using the ECC, so check
- * to see if we have an 0xff,0xff,0xff read ECC and then ignore
- * the error, on the assumption that this is an un-eccd page.
- */
- if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff
- && info->platform->ignore_unset_ecc)
- return 0;
-
- /* Can we correct this ECC (ie, one row and column change).
- * Note, this is similar to the 256 error code on smartmedia */
-
- if (((diff0 ^ (diff0 >> 1)) & 0x55) == 0x55 &&
- ((diff1 ^ (diff1 >> 1)) & 0x55) == 0x55 &&
- ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
- /* calculate the bit position of the error */
-
- bit = ((diff2 >> 3) & 1) |
- ((diff2 >> 4) & 2) |
- ((diff2 >> 5) & 4);
-
- /* calculate the byte position of the error */
-
- byte = ((diff2 << 7) & 0x100) |
- ((diff1 << 0) & 0x80) |
- ((diff1 << 1) & 0x40) |
- ((diff1 << 2) & 0x20) |
- ((diff1 << 3) & 0x10) |
- ((diff0 >> 4) & 0x08) |
- ((diff0 >> 3) & 0x04) |
- ((diff0 >> 2) & 0x02) |
- ((diff0 >> 1) & 0x01);
-
- dev_dbg(info->device, "correcting error bit %d, byte %d\n",
- bit, byte);
-
- dat[byte] ^= (1 << bit);
- return 1;
- }
-
- /* if there is only one bit difference in the ECC, then
- * one of only a row or column parity has changed, which
- * means the error is most probably in the ECC itself */
-
- diff0 |= (diff1 << 8);
- diff0 |= (diff2 << 16);
-
- /* equal to "(diff0 & ~(1 << __ffs(diff0)))" */
- if ((diff0 & (diff0 - 1)) == 0)
- return 1;
-
- return -1;
-}
-
-/* ECC functions
- *
- * These allow the s3c2410 and s3c2440 to use the controller's ECC
- * generator block to ECC the data as it passes through]
-*/
-
-static void s3c2410_nand_enable_hwecc(struct nand_chip *chip, int mode)
-{
- struct s3c2410_nand_info *info;
- unsigned long ctrl;
-
- info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
- ctrl = readl(info->regs + S3C2410_NFCONF);
- ctrl |= S3C2410_NFCONF_INITECC;
- writel(ctrl, info->regs + S3C2410_NFCONF);
-}
-
-static void s3c2412_nand_enable_hwecc(struct nand_chip *chip, int mode)
-{
- struct s3c2410_nand_info *info;
- unsigned long ctrl;
-
- info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
- ctrl = readl(info->regs + S3C2440_NFCONT);
- writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC,
- info->regs + S3C2440_NFCONT);
-}
-
-static void s3c2440_nand_enable_hwecc(struct nand_chip *chip, int mode)
-{
- struct s3c2410_nand_info *info;
- unsigned long ctrl;
-
- info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
- ctrl = readl(info->regs + S3C2440_NFCONT);
- writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
-}
-
-static int s3c2410_nand_calculate_ecc(struct nand_chip *chip,
- const u_char *dat, u_char *ecc_code)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
- ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0);
- ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
- ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
-
- pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
-
- return 0;
-}
-
-static int s3c2412_nand_calculate_ecc(struct nand_chip *chip,
- const u_char *dat, u_char *ecc_code)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
-
- ecc_code[0] = ecc;
- ecc_code[1] = ecc >> 8;
- ecc_code[2] = ecc >> 16;
-
- pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
-
- return 0;
-}
-
-static int s3c2440_nand_calculate_ecc(struct nand_chip *chip,
- const u_char *dat, u_char *ecc_code)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
-
- ecc_code[0] = ecc;
- ecc_code[1] = ecc >> 8;
- ecc_code[2] = ecc >> 16;
-
- pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff);
-
- return 0;
-}
-
-/* over-ride the standard functions for a little more speed. We can
- * use read/write block to move the data buffers to/from the controller
-*/
-
-static void s3c2410_nand_read_buf(struct nand_chip *this, u_char *buf, int len)
-{
- readsb(this->legacy.IO_ADDR_R, buf, len);
-}
-
-static void s3c2440_nand_read_buf(struct nand_chip *this, u_char *buf, int len)
-{
- struct mtd_info *mtd = nand_to_mtd(this);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
- readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
-
- /* cleanup if we've got less than a word to do */
- if (len & 3) {
- buf += len & ~3;
-
- for (; len & 3; len--)
- *buf++ = readb(info->regs + S3C2440_NFDATA);
- }
-}
-
-static void s3c2410_nand_write_buf(struct nand_chip *this, const u_char *buf,
- int len)
-{
- writesb(this->legacy.IO_ADDR_W, buf, len);
-}
-
-static void s3c2440_nand_write_buf(struct nand_chip *this, const u_char *buf,
- int len)
-{
- struct mtd_info *mtd = nand_to_mtd(this);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
- writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
-
- /* cleanup any fractional write */
- if (len & 3) {
- buf += len & ~3;
-
- for (; len & 3; len--, buf++)
- writeb(*buf, info->regs + S3C2440_NFDATA);
- }
-}
-
-/* device management functions */
-
-static void s3c24xx_nand_remove(struct platform_device *pdev)
-{
- struct s3c2410_nand_info *info = to_nand_info(pdev);
-
- if (info == NULL)
- return;
-
- /* Release all our mtds and their partitions, then go through
- * freeing the resources used
- */
-
- if (info->mtds != NULL) {
- struct s3c2410_nand_mtd *ptr = info->mtds;
- int mtdno;
-
- for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
- pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
- WARN_ON(mtd_device_unregister(nand_to_mtd(&ptr->chip)));
- nand_cleanup(&ptr->chip);
- }
- }
-
- /* free the common resources */
-
- if (!IS_ERR(info->clk))
- s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
-}
-
-static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
- struct s3c2410_nand_mtd *mtd,
- struct s3c2410_nand_set *set)
-{
- if (set) {
- struct mtd_info *mtdinfo = nand_to_mtd(&mtd->chip);
-
- mtdinfo->name = set->name;
-
- return mtd_device_register(mtdinfo, set->partitions,
- set->nr_partitions);
- }
-
- return -ENODEV;
-}
-
-static int s3c2410_nand_setup_interface(struct nand_chip *chip, int csline,
- const struct nand_interface_config *conf)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- struct s3c2410_platform_nand *pdata = info->platform;
- const struct nand_sdr_timings *timings;
- int tacls;
-
- timings = nand_get_sdr_timings(conf);
- if (IS_ERR(timings))
- return -ENOTSUPP;
-
- tacls = timings->tCLS_min - timings->tWP_min;
- if (tacls < 0)
- tacls = 0;
-
- pdata->tacls = DIV_ROUND_UP(tacls, 1000);
- pdata->twrph0 = DIV_ROUND_UP(timings->tWP_min, 1000);
- pdata->twrph1 = DIV_ROUND_UP(timings->tCLH_min, 1000);
-
- return s3c2410_nand_setrate(info);
-}
-
-/**
- * s3c2410_nand_init_chip - initialise a single instance of an chip
- * @info: The base NAND controller the chip is on.
- * @nmtd: The new controller MTD instance to fill in.
- * @set: The information passed from the board specific platform data.
- *
- * Initialise the given @nmtd from the information in @info and @set. This
- * readies the structure for use with the MTD layer functions by ensuring
- * all pointers are setup and the necessary control routines selected.
- */
-static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
- struct s3c2410_nand_mtd *nmtd,
- struct s3c2410_nand_set *set)
-{
- struct device_node *np = info->device->of_node;
- struct nand_chip *chip = &nmtd->chip;
- void __iomem *regs = info->regs;
-
- nand_set_flash_node(chip, set->of_node);
-
- chip->legacy.write_buf = s3c2410_nand_write_buf;
- chip->legacy.read_buf = s3c2410_nand_read_buf;
- chip->legacy.select_chip = s3c2410_nand_select_chip;
- chip->legacy.chip_delay = 50;
- nand_set_controller_data(chip, nmtd);
- chip->options = set->options;
- chip->controller = &info->controller;
-
- /*
- * let's keep behavior unchanged for legacy boards booting via pdata and
- * auto-detect timings only when booting with a device tree.
- */
- if (!np)
- chip->options |= NAND_KEEP_TIMINGS;
-
- switch (info->cpu_type) {
- case TYPE_S3C2410:
- chip->legacy.IO_ADDR_W = regs + S3C2410_NFDATA;
- info->sel_reg = regs + S3C2410_NFCONF;
- info->sel_bit = S3C2410_NFCONF_nFCE;
- chip->legacy.cmd_ctrl = s3c2410_nand_hwcontrol;
- chip->legacy.dev_ready = s3c2410_nand_devready;
- break;
-
- case TYPE_S3C2440:
- chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA;
- info->sel_reg = regs + S3C2440_NFCONT;
- info->sel_bit = S3C2440_NFCONT_nFCE;
- chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol;
- chip->legacy.dev_ready = s3c2440_nand_devready;
- chip->legacy.read_buf = s3c2440_nand_read_buf;
- chip->legacy.write_buf = s3c2440_nand_write_buf;
- break;
-
- case TYPE_S3C2412:
- chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA;
- info->sel_reg = regs + S3C2440_NFCONT;
- info->sel_bit = S3C2412_NFCONT_nFCE0;
- chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol;
- chip->legacy.dev_ready = s3c2412_nand_devready;
-
- if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT)
- dev_info(info->device, "System booted from NAND\n");
-
- break;
- }
-
- chip->legacy.IO_ADDR_R = chip->legacy.IO_ADDR_W;
-
- nmtd->info = info;
- nmtd->set = set;
-
- chip->ecc.engine_type = info->platform->engine_type;
-
- /*
- * If you use u-boot BBT creation code, specifying this flag will
- * let the kernel fish out the BBT from the NAND.
- */
- if (set->flash_bbt)
- chip->bbt_options |= NAND_BBT_USE_FLASH;
-}
-
-/**
- * s3c2410_nand_attach_chip - Init the ECC engine after NAND scan
- * @chip: The NAND chip
- *
- * This hook is called by the core after the identification of the NAND chip,
- * once the relevant per-chip information is up to date.. This call ensure that
- * we update the internal state accordingly.
- *
- * The internal state is currently limited to the ECC state information.
-*/
-static int s3c2410_nand_attach_chip(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
- switch (chip->ecc.engine_type) {
-
- case NAND_ECC_ENGINE_TYPE_NONE:
- dev_info(info->device, "ECC disabled\n");
- break;
-
- case NAND_ECC_ENGINE_TYPE_SOFT:
- /*
- * This driver expects Hamming based ECC when engine_type is set
- * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
- * NAND_ECC_ALGO_HAMMING to avoid adding an extra ecc_algo field
- * to s3c2410_platform_nand.
- */
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
- dev_info(info->device, "soft ECC\n");
- break;
-
- case NAND_ECC_ENGINE_TYPE_ON_HOST:
- chip->ecc.calculate = s3c2410_nand_calculate_ecc;
- chip->ecc.correct = s3c2410_nand_correct_data;
- chip->ecc.strength = 1;
-
- switch (info->cpu_type) {
- case TYPE_S3C2410:
- chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
- chip->ecc.calculate = s3c2410_nand_calculate_ecc;
- break;
-
- case TYPE_S3C2412:
- chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
- chip->ecc.calculate = s3c2412_nand_calculate_ecc;
- break;
-
- case TYPE_S3C2440:
- chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
- chip->ecc.calculate = s3c2440_nand_calculate_ecc;
- break;
- }
-
- dev_dbg(info->device, "chip %p => page shift %d\n",
- chip, chip->page_shift);
-
- /* change the behaviour depending on whether we are using
- * the large or small page nand device */
- if (chip->page_shift > 10) {
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
- } else {
- chip->ecc.size = 512;
- chip->ecc.bytes = 3;
- mtd_set_ooblayout(nand_to_mtd(chip),
- &s3c2410_ooblayout_ops);
- }
-
- dev_info(info->device, "hardware ECC\n");
- break;
-
- default:
- dev_err(info->device, "invalid ECC mode!\n");
- return -EINVAL;
- }
-
- if (chip->bbt_options & NAND_BBT_USE_FLASH)
- chip->options |= NAND_SKIP_BBTSCAN;
-
- return 0;
-}
-
-static const struct nand_controller_ops s3c24xx_nand_controller_ops = {
- .attach_chip = s3c2410_nand_attach_chip,
- .setup_interface = s3c2410_nand_setup_interface,
-};
-
-static const struct of_device_id s3c24xx_nand_dt_ids[] = {
- {
- .compatible = "samsung,s3c2410-nand",
- .data = &s3c2410_nand_devtype_data,
- }, {
- /* also compatible with s3c6400 */
- .compatible = "samsung,s3c2412-nand",
- .data = &s3c2412_nand_devtype_data,
- }, {
- .compatible = "samsung,s3c2440-nand",
- .data = &s3c2440_nand_devtype_data,
- },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, s3c24xx_nand_dt_ids);
-
-static int s3c24xx_nand_probe_dt(struct platform_device *pdev)
-{
- const struct s3c24XX_nand_devtype_data *devtype_data;
- struct s3c2410_platform_nand *pdata;
- struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
- struct device_node *np = pdev->dev.of_node, *child;
- struct s3c2410_nand_set *sets;
-
- devtype_data = of_device_get_match_data(&pdev->dev);
- if (!devtype_data)
- return -ENODEV;
-
- info->cpu_type = devtype_data->type;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- pdev->dev.platform_data = pdata;
-
- pdata->nr_sets = of_get_child_count(np);
- if (!pdata->nr_sets)
- return 0;
-
- sets = devm_kcalloc(&pdev->dev, pdata->nr_sets, sizeof(*sets),
- GFP_KERNEL);
- if (!sets)
- return -ENOMEM;
-
- pdata->sets = sets;
-
- for_each_available_child_of_node(np, child) {
- sets->name = (char *)child->name;
- sets->of_node = child;
- sets->nr_chips = 1;
-
- of_node_get(child);
-
- sets++;
- }
-
- return 0;
-}
-
-static int s3c24xx_nand_probe_pdata(struct platform_device *pdev)
-{
- struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
-
- info->cpu_type = platform_get_device_id(pdev)->driver_data;
-
- return 0;
-}
-
-/* s3c24xx_nand_probe
- *
- * called by device layer when it finds a device matching
- * one our driver can handled. This code checks to see if
- * it can allocate all necessary resources then calls the
- * nand layer to look for devices
-*/
-static int s3c24xx_nand_probe(struct platform_device *pdev)
-{
- struct s3c2410_platform_nand *plat;
- struct s3c2410_nand_info *info;
- struct s3c2410_nand_mtd *nmtd;
- struct s3c2410_nand_set *sets;
- struct resource *res;
- int err = 0;
- int size;
- int nr_sets;
- int setno;
-
- info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
- if (info == NULL) {
- err = -ENOMEM;
- goto exit_error;
- }
-
- platform_set_drvdata(pdev, info);
-
- nand_controller_init(&info->controller);
- info->controller.ops = &s3c24xx_nand_controller_ops;
-
- /* get the clock source and enable it */
-
- info->clk = devm_clk_get(&pdev->dev, "nand");
- if (IS_ERR(info->clk)) {
- dev_err(&pdev->dev, "failed to get clock\n");
- err = -ENOENT;
- goto exit_error;
- }
-
- s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
-
- if (pdev->dev.of_node)
- err = s3c24xx_nand_probe_dt(pdev);
- else
- err = s3c24xx_nand_probe_pdata(pdev);
-
- if (err)
- goto exit_error;
-
- plat = to_nand_plat(pdev);
-
- /* allocate and map the resource */
-
- /* currently we assume we have the one resource */
- res = pdev->resource;
- size = resource_size(res);
-
- info->device = &pdev->dev;
- info->platform = plat;
-
- info->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(info->regs)) {
- err = PTR_ERR(info->regs);
- goto exit_error;
- }
-
- dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
-
- if (!plat->sets || plat->nr_sets < 1) {
- err = -EINVAL;
- goto exit_error;
- }
-
- sets = plat->sets;
- nr_sets = plat->nr_sets;
-
- info->mtd_count = nr_sets;
-
- /* allocate our information */
-
- size = nr_sets * sizeof(*info->mtds);
- info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (info->mtds == NULL) {
- err = -ENOMEM;
- goto exit_error;
- }
-
- /* initialise all possible chips */
-
- nmtd = info->mtds;
-
- for (setno = 0; setno < nr_sets; setno++, nmtd++, sets++) {
- struct mtd_info *mtd = nand_to_mtd(&nmtd->chip);
-
- pr_debug("initialising set %d (%p, info %p)\n",
- setno, nmtd, info);
-
- mtd->dev.parent = &pdev->dev;
- s3c2410_nand_init_chip(info, nmtd, sets);
-
- err = nand_scan(&nmtd->chip, sets ? sets->nr_chips : 1);
- if (err)
- goto exit_error;
-
- s3c2410_nand_add_partition(info, nmtd, sets);
- }
-
- /* initialise the hardware */
- err = s3c2410_nand_inithw(info);
- if (err != 0)
- goto exit_error;
-
- if (allow_clk_suspend(info)) {
- dev_info(&pdev->dev, "clock idle support enabled\n");
- s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
- }
-
- return 0;
-
- exit_error:
- s3c24xx_nand_remove(pdev);
-
- if (err == 0)
- err = -EINVAL;
- return err;
-}
-
-/* PM Support */
-#ifdef CONFIG_PM
-
-static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
-{
- struct s3c2410_nand_info *info = platform_get_drvdata(dev);
-
- if (info) {
- info->save_sel = readl(info->sel_reg);
-
- /* For the moment, we must ensure nFCE is high during
- * the time we are suspended. This really should be
- * handled by suspending the MTDs we are using, but
- * that is currently not the case. */
-
- writel(info->save_sel | info->sel_bit, info->sel_reg);
-
- s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
- }
-
- return 0;
-}
-
-static int s3c24xx_nand_resume(struct platform_device *dev)
-{
- struct s3c2410_nand_info *info = platform_get_drvdata(dev);
- unsigned long sel;
-
- if (info) {
- s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
- s3c2410_nand_inithw(info);
-
- /* Restore the state of the nFCE line. */
-
- sel = readl(info->sel_reg);
- sel &= ~info->sel_bit;
- sel |= info->save_sel & info->sel_bit;
- writel(sel, info->sel_reg);
-
- s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
- }
-
- return 0;
-}
-
-#else
-#define s3c24xx_nand_suspend NULL
-#define s3c24xx_nand_resume NULL
-#endif
-
-/* driver device registration */
-
-static const struct platform_device_id s3c24xx_driver_ids[] = {
- {
- .name = "s3c2410-nand",
- .driver_data = TYPE_S3C2410,
- }, {
- .name = "s3c2440-nand",
- .driver_data = TYPE_S3C2440,
- }, {
- .name = "s3c2412-nand",
- .driver_data = TYPE_S3C2412,
- }, {
- .name = "s3c6400-nand",
- .driver_data = TYPE_S3C2412, /* compatible with 2412 */
- },
- { }
-};
-
-MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-
-static struct platform_driver s3c24xx_nand_driver = {
- .probe = s3c24xx_nand_probe,
- .remove_new = s3c24xx_nand_remove,
- .suspend = s3c24xx_nand_suspend,
- .resume = s3c24xx_nand_resume,
- .id_table = s3c24xx_driver_ids,
- .driver = {
- .name = "s3c24xx-nand",
- .of_match_table = s3c24xx_nand_dt_ids,
- },
-};
-
-module_platform_driver(s3c24xx_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index 63bf20c41719..97f733e481ff 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -17,7 +17,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
@@ -1124,8 +1123,7 @@ static int flctl_probe(struct platform_device *pdev)
if (!flctl)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- flctl->reg = devm_ioremap_resource(&pdev->dev, res);
+ flctl->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(flctl->reg))
return PTR_ERR(flctl->reg);
flctl->fifo = res->start + 0x24; /* FLDTFIFO */
@@ -1217,14 +1215,15 @@ static void flctl_remove(struct platform_device *pdev)
}
static struct platform_driver flctl_driver = {
- .remove_new = flctl_remove,
+ .probe = flctl_probe,
+ .remove = flctl_remove,
.driver = {
.name = "sh_flctl",
.of_match_table = of_flctl_match,
},
};
-module_platform_driver_probe(flctl_driver, flctl_probe);
+module_platform_driver(flctl_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yoshihiro Shimoda");
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index 2402dc5465d5..142e93b200a3 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -234,7 +234,7 @@ static struct platform_driver sharpsl_nand_driver = {
.name = "sharpsl-nand",
},
.probe = sharpsl_nand_probe,
- .remove_new = sharpsl_nand_remove,
+ .remove = sharpsl_nand_remove,
};
module_platform_driver(sharpsl_nand_driver);
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
index 24f52a30fb13..e238784c8c3e 100644
--- a/drivers/mtd/nand/raw/sm_common.c
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -52,8 +52,8 @@ static const struct mtd_ooblayout_ops oob_sm_ops = {
.free = oob_sm_ooblayout_free,
};
-/* NOTE: This layout is not compatabable with SmartMedia, */
-/* because the 256 byte devices have page depenent oob layout */
+/* NOTE: This layout is not compatible with SmartMedia, */
+/* because the 256 byte devices have page dependent oob layout */
/* However it does preserve the bad block markers */
/* If you use smftl, it will bypass this and work correctly */
/* If you not, then you break SmartMedia compliance anyway */
diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c
index a8b720ffe9e8..668584683ce5 100644
--- a/drivers/mtd/nand/raw/socrates_nand.c
+++ b/drivers/mtd/nand/raw/socrates_nand.c
@@ -8,8 +8,9 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#define FPGA_NAND_CMD_MASK (0x7 << 28)
@@ -230,7 +231,7 @@ static struct platform_driver socrates_nand_driver = {
.of_match_table = socrates_nand_match,
},
.probe = socrates_nand_probe,
- .remove_new = socrates_nand_remove,
+ .remove = socrates_nand_remove,
};
module_platform_driver(socrates_nand_driver);
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 10c11cecac08..c08d6b176372 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/mtd/rawnand.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -37,7 +38,7 @@
#define FMC2_MAX_SG 16
/* Max chip enable */
-#define FMC2_MAX_CE 2
+#define FMC2_MAX_CE 4
/* Max ECC buffer length */
#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
@@ -243,6 +244,13 @@ static inline struct stm32_fmc2_nand *to_fmc2_nand(struct nand_chip *chip)
return container_of(chip, struct stm32_fmc2_nand, chip);
}
+struct stm32_fmc2_nfc;
+
+struct stm32_fmc2_nfc_data {
+ int max_ncs;
+ int (*set_cdev)(struct stm32_fmc2_nfc *nfc);
+};
+
struct stm32_fmc2_nfc {
struct nand_controller base;
struct stm32_fmc2_nand nand;
@@ -256,6 +264,7 @@ struct stm32_fmc2_nfc {
phys_addr_t data_phys_addr[FMC2_MAX_CE];
struct clk *clk;
u8 irq_state;
+ const struct stm32_fmc2_nfc_data *data;
struct dma_chan *dma_tx_ch;
struct dma_chan *dma_rx_ch;
@@ -263,7 +272,10 @@ struct stm32_fmc2_nfc {
struct sg_table dma_data_sg;
struct sg_table dma_ecc_sg;
u8 *ecc_buf;
+ dma_addr_t dma_ecc_addr;
int dma_ecc_len;
+ u32 tx_dma_max_burst;
+ u32 rx_dma_max_burst;
struct completion complete;
struct completion dma_data_complete;
@@ -347,20 +359,26 @@ static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr)
stm32_fmc2_nfc_setup(chip);
stm32_fmc2_nfc_timings_init(chip);
- if (nfc->dma_tx_ch && nfc->dma_rx_ch) {
+ if (nfc->dma_tx_ch) {
memset(&dma_cfg, 0, sizeof(dma_cfg));
- dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel];
dma_cfg.dst_addr = nfc->data_phys_addr[nfc->cs_sel];
- dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_cfg.src_maxburst = 32;
- dma_cfg.dst_maxburst = 32;
+ dma_cfg.dst_maxburst = nfc->tx_dma_max_burst /
+ dma_cfg.dst_addr_width;
ret = dmaengine_slave_config(nfc->dma_tx_ch, &dma_cfg);
if (ret) {
dev_err(nfc->dev, "tx DMA engine slave config failed\n");
return ret;
}
+ }
+
+ if (nfc->dma_rx_ch) {
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel];
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_cfg.src_maxburst = nfc->rx_dma_max_burst /
+ dma_cfg.src_addr_width;
ret = dmaengine_slave_config(nfc->dma_rx_ch, &dma_cfg);
if (ret) {
@@ -885,17 +903,10 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
if (!write_data && !raw) {
/* Configure DMA ECC status */
- p = nfc->ecc_buf;
for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
- sg_set_buf(sg, p, nfc->dma_ecc_len);
- p += nfc->dma_ecc_len;
- }
-
- ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
- eccsteps, dma_data_dir);
- if (!ret) {
- ret = -EIO;
- goto err_unmap_data;
+ sg_dma_address(sg) = nfc->dma_ecc_addr +
+ s * nfc->dma_ecc_len;
+ sg_dma_len(sg) = nfc->dma_ecc_len;
}
desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
@@ -904,7 +915,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
DMA_PREP_INTERRUPT);
if (!desc_ecc) {
ret = -ENOMEM;
- goto err_unmap_ecc;
+ goto err_unmap_data;
}
reinit_completion(&nfc->dma_ecc_complete);
@@ -912,7 +923,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
desc_ecc->callback_param = &nfc->dma_ecc_complete;
ret = dma_submit_error(dmaengine_submit(desc_ecc));
if (ret)
- goto err_unmap_ecc;
+ goto err_unmap_data;
dma_async_issue_pending(nfc->dma_ecc_ch);
}
@@ -932,7 +943,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
if (!write_data && !raw)
dmaengine_terminate_all(nfc->dma_ecc_ch);
ret = -ETIMEDOUT;
- goto err_unmap_ecc;
+ goto err_unmap_data;
}
/* Wait DMA data transfer completion */
@@ -952,11 +963,6 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
}
}
-err_unmap_ecc:
- if (!write_data && !raw)
- dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
- eccsteps, dma_data_dir);
-
err_unmap_data:
dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
@@ -979,9 +985,21 @@ static int stm32_fmc2_nfc_seq_write(struct nand_chip *chip, const u8 *buf,
/* Write oob */
if (oob_required) {
- ret = nand_change_write_column_op(chip, mtd->writesize,
- chip->oob_poi, mtd->oobsize,
- false);
+ unsigned int offset_in_page = mtd->writesize;
+ const void *buf = chip->oob_poi;
+ unsigned int len = mtd->oobsize;
+
+ if (!raw) {
+ struct mtd_oob_region oob_free;
+
+ mtd_ooblayout_free(mtd, 0, &oob_free);
+ offset_in_page += oob_free.offset;
+ buf += oob_free.offset;
+ len = oob_free.length;
+ }
+
+ ret = nand_change_write_column_op(chip, offset_in_page,
+ buf, len, false);
if (ret)
return ret;
}
@@ -1545,6 +1563,7 @@ static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
{
+ struct dma_slave_caps caps;
int ret = 0;
nfc->dma_tx_ch = dma_request_chan(nfc->dev, "tx");
@@ -1557,6 +1576,11 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
goto err_dma;
}
+ ret = dma_get_slave_caps(nfc->dma_tx_ch, &caps);
+ if (ret)
+ return ret;
+ nfc->tx_dma_max_burst = caps.max_burst;
+
nfc->dma_rx_ch = dma_request_chan(nfc->dev, "rx");
if (IS_ERR(nfc->dma_rx_ch)) {
ret = PTR_ERR(nfc->dma_rx_ch);
@@ -1567,6 +1591,11 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
goto err_dma;
}
+ ret = dma_get_slave_caps(nfc->dma_rx_ch, &caps);
+ if (ret)
+ return ret;
+ nfc->rx_dma_max_burst = caps.max_burst;
+
nfc->dma_ecc_ch = dma_request_chan(nfc->dev, "ecc");
if (IS_ERR(nfc->dma_ecc_ch)) {
ret = PTR_ERR(nfc->dma_ecc_ch);
@@ -1582,7 +1611,8 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
return ret;
/* Allocate a buffer to store ECC status registers */
- nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
+ nfc->ecc_buf = dmam_alloc_coherent(nfc->dev, FMC2_MAX_ECC_BUF_LEN,
+ &nfc->dma_ecc_addr, GFP_KERNEL);
if (!nfc->ecc_buf)
return -ENOMEM;
@@ -1790,7 +1820,7 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
return ret;
}
- if (cs >= FMC2_MAX_CE) {
+ if (cs >= nfc->data->max_ncs) {
dev_err(nfc->dev, "invalid reg value: %d\n", cs);
return -EINVAL;
}
@@ -1823,7 +1853,6 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
{
struct device_node *dn = nfc->dev->of_node;
- struct device_node *child;
int nchips = of_get_child_count(dn);
int ret = 0;
@@ -1837,12 +1866,10 @@ static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
return -EINVAL;
}
- for_each_child_of_node(dn, child) {
+ for_each_child_of_node_scoped(dn, child) {
ret = stm32_fmc2_nfc_parse_child(nfc, child);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
return ret;
- }
}
return ret;
@@ -1896,9 +1923,17 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
nand_controller_init(&nfc->base);
nfc->base.ops = &stm32_fmc2_nfc_controller_ops;
- ret = stm32_fmc2_nfc_set_cdev(nfc);
- if (ret)
- return ret;
+ nfc->data = of_device_get_match_data(dev);
+ if (!nfc->data)
+ return -EINVAL;
+
+ if (nfc->data->set_cdev) {
+ ret = nfc->data->set_cdev(nfc);
+ if (ret)
+ return ret;
+ } else {
+ nfc->cdev = dev->parent;
+ }
ret = stm32_fmc2_nfc_parse_dt(nfc);
if (ret)
@@ -1917,13 +1952,13 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
if (nfc->dev == nfc->cdev)
start_region = 1;
- for (chip_cs = 0, mem_region = start_region; chip_cs < FMC2_MAX_CE;
+ for (chip_cs = 0, mem_region = start_region; chip_cs < nfc->data->max_ncs;
chip_cs++, mem_region += 3) {
if (!(nfc->cs_assigned & BIT(chip_cs)))
continue;
- res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region);
- nfc->data_base[chip_cs] = devm_ioremap_resource(dev, res);
+ nfc->data_base[chip_cs] = devm_platform_get_and_ioremap_resource(pdev,
+ mem_region, &res);
if (IS_ERR(nfc->data_base[chip_cs]))
return PTR_ERR(nfc->data_base[chip_cs]);
@@ -1951,21 +1986,17 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
init_completion(&nfc->complete);
- nfc->clk = devm_clk_get(nfc->cdev, NULL);
- if (IS_ERR(nfc->clk))
+ nfc->clk = devm_clk_get_enabled(nfc->cdev, NULL);
+ if (IS_ERR(nfc->clk)) {
+ dev_err(dev, "can not get and enable the clock\n");
return PTR_ERR(nfc->clk);
-
- ret = clk_prepare_enable(nfc->clk);
- if (ret) {
- dev_err(dev, "can not enable the clock\n");
- return ret;
}
rstc = devm_reset_control_get(dev, NULL);
if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc);
if (ret == -EPROBE_DEFER)
- goto err_clk_disable;
+ return ret;
} else {
reset_control_assert(rstc);
reset_control_deassert(rstc);
@@ -2018,9 +2049,6 @@ err_release_dma:
sg_free_table(&nfc->dma_data_sg);
sg_free_table(&nfc->dma_ecc_sg);
-err_clk_disable:
- clk_disable_unprepare(nfc->clk);
-
return ret;
}
@@ -2045,8 +2073,6 @@ static void stm32_fmc2_nfc_remove(struct platform_device *pdev)
sg_free_table(&nfc->dma_data_sg);
sg_free_table(&nfc->dma_ecc_sg);
- clk_disable_unprepare(nfc->clk);
-
stm32_fmc2_nfc_wp_enable(nand);
}
@@ -2082,7 +2108,7 @@ static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev)
stm32_fmc2_nfc_wp_disable(nand);
- for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) {
+ for (chip_cs = 0; chip_cs < nfc->data->max_ncs; chip_cs++) {
if (!(nfc->cs_assigned & BIT(chip_cs)))
continue;
@@ -2095,16 +2121,35 @@ static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(stm32_fmc2_nfc_pm_ops, stm32_fmc2_nfc_suspend,
stm32_fmc2_nfc_resume);
+static const struct stm32_fmc2_nfc_data stm32_fmc2_nfc_mp1_data = {
+ .max_ncs = 2,
+ .set_cdev = stm32_fmc2_nfc_set_cdev,
+};
+
+static const struct stm32_fmc2_nfc_data stm32_fmc2_nfc_mp25_data = {
+ .max_ncs = 4,
+};
+
static const struct of_device_id stm32_fmc2_nfc_match[] = {
- {.compatible = "st,stm32mp15-fmc2"},
- {.compatible = "st,stm32mp1-fmc2-nfc"},
+ {
+ .compatible = "st,stm32mp15-fmc2",
+ .data = &stm32_fmc2_nfc_mp1_data,
+ },
+ {
+ .compatible = "st,stm32mp1-fmc2-nfc",
+ .data = &stm32_fmc2_nfc_mp1_data,
+ },
+ {
+ .compatible = "st,stm32mp25-fmc2-nfc",
+ .data = &stm32_fmc2_nfc_mp25_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match);
static struct platform_driver stm32_fmc2_nfc_driver = {
.probe = stm32_fmc2_nfc_probe,
- .remove_new = stm32_fmc2_nfc_remove,
+ .remove = stm32_fmc2_nfc_remove,
.driver = {
.name = "stm32_fmc2_nfc",
.of_match_table = stm32_fmc2_nfc_match,
@@ -2113,7 +2158,6 @@ static struct platform_driver stm32_fmc2_nfc_driver = {
};
module_platform_driver(stm32_fmc2_nfc_driver);
-MODULE_ALIAS("platform:stm32_fmc2_nfc");
MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>");
MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 NFC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 9884304634f6..9dcdc93734cb 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -19,7 +19,6 @@
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
@@ -30,6 +29,12 @@
#include <linux/iopoll.h>
#include <linux/reset.h>
+/* non compile-time field get/prep */
+#undef field_get
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+#undef field_prep
+#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
+
#define NFC_REG_CTL 0x0000
#define NFC_REG_ST 0x0004
#define NFC_REG_INT 0x0008
@@ -46,13 +51,40 @@
#define NFC_REG_A23_IO_DATA 0x0300
#define NFC_REG_ECC_CTL 0x0034
#define NFC_REG_ECC_ST 0x0038
-#define NFC_REG_DEBUG 0x003C
-#define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
-#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
-#define NFC_REG_SPARE_AREA 0x00A0
-#define NFC_REG_PAT_ID 0x00A4
+#define NFC_REG_H6_PAT_FOUND 0x003C
+#define NFC_REG_A10_ECC_ERR_CNT 0x0040
+#define NFC_REG_H6_ECC_ERR_CNT 0x0050
+#define NFC_REG_ECC_ERR_CNT(nfc, x) ((nfc->caps->reg_ecc_err_cnt + (x)) & ~0x3)
+#define NFC_REG_H6_RDATA_CTL 0x0044
+#define NFC_REG_H6_RDATA_0 0x0048
+#define NFC_REG_H6_RDATA_1 0x004C
+#define NFC_REG_A10_USER_DATA 0x0050
+#define NFC_REG_H6_USER_DATA 0x0080
+#define NFC_REG_USER_DATA(nfc, x) (nfc->caps->reg_user_data + ((x) * 4))
+#define NFC_REG_H6_USER_DATA_LEN 0x0070
+/* A USER_DATA_LEN register can hold the length of 8 USER_DATA registers */
+#define NFC_REG_USER_DATA_LEN_CAPACITY 8
+#define NFC_REG_USER_DATA_LEN(nfc, step) \
+ (nfc->caps->reg_user_data_len + \
+ ((step) / NFC_REG_USER_DATA_LEN_CAPACITY) * 4)
+#define NFC_REG_SPARE_AREA(nfc) (nfc->caps->reg_spare_area)
+#define NFC_REG_A10_SPARE_AREA 0x00A0
+#define NFC_REG_PAT_ID(nfc) (nfc->caps->reg_pat_id)
+#define NFC_REG_A10_PAT_ID 0x00A4
#define NFC_REG_MDMA_ADDR 0x00C0
#define NFC_REG_MDMA_CNT 0x00C4
+#define NFC_REG_H6_EFNAND_STATUS 0x0110
+#define NFC_REG_H6_SPARE_AREA 0x0114
+#define NFC_REG_H6_PAT_ID 0x0118
+#define NFC_REG_H6_DDR2_SPEC_CTL 0x011C
+#define NFC_REG_H6_NDMA_MODE_CTL 0x0120
+#define NFC_REG_H6_MDMA_DLBA_REG 0x0200
+#define NFC_REG_H6_MDMA_STA 0x0204
+#define NFC_REG_H6_MDMA_INT_MAS 0x0208
+#define NFC_REG_H6_MDMA_DESC_ADDR 0x020C
+#define NFC_REG_H6_MDMA_BUF_ADDR 0x0210
+#define NFC_REG_H6_MDMA_CNT 0x0214
+
#define NFC_RAM0_BASE 0x0400
#define NFC_RAM1_BASE 0x0800
@@ -64,6 +96,7 @@
#define NFC_BUS_WIDTH_16 (1 << 2)
#define NFC_RB_SEL_MSK BIT(3)
#define NFC_RB_SEL(x) ((x) << 3)
+/* CE_SEL BIT 27 is meant to be used for GPIO chipselect */
#define NFC_CE_SEL_MSK GENMASK(26, 24)
#define NFC_CE_SEL(x) ((x) << 24)
#define NFC_CE_CTL BIT(6)
@@ -82,6 +115,9 @@
#define NFC_STA BIT(4)
#define NFC_NATCH_INT_FLAG BIT(5)
#define NFC_RB_STATE(x) BIT(x + 8)
+#define NFC_RB_STATE_MSK GENMASK(11, 8)
+#define NDFC_RDATA_STA_1 BIT(12)
+#define NDFC_RDATA_STA_0 BIT(13)
/* define bit use in NFC_INT */
#define NFC_B2R_INT_ENABLE BIT(0)
@@ -93,6 +129,7 @@
/* define bit use in NFC_TIMING_CTL */
#define NFC_TIMING_CTL_EDO BIT(8)
+#define NFC_TIMING_CTL_E_EDO BIT(9)
/* define NFC_TIMING_CFG register layout */
#define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD) \
@@ -100,9 +137,15 @@
(((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) | \
(((tCAD) & 0x7) << 8))
+#define NFC_TIMING_CFG2(tCDQSS, tSC, tCLHZ, tCSS, tWC) \
+ ((((tCDQSS) & 0x1) << 11) | (((tSC) & 0x3) << 12) | \
+ (((tCLHZ) & 0x3) << 14) | (((tCSS) & 0x3) << 16) | \
+ (((tWC) & 0x3) << 18))
+
/* define bit use in NFC_CMD */
#define NFC_CMD_LOW_BYTE_MSK GENMASK(7, 0)
-#define NFC_CMD_HIGH_BYTE_MSK GENMASK(15, 8)
+#define NFC_CMD_HIGH_BYTE_MSK GENMASK(15, 8) /* 15-10 reserved on H6 */
+#define NFC_CMD_ADR_NUM_MSK GENMASK(9, 8)
#define NFC_CMD(x) (x)
#define NFC_ADR_NUM_MSK GENMASK(18, 16)
#define NFC_ADR_NUM(x) (((x) - 1) << 16)
@@ -115,6 +158,7 @@
#define NFC_SEQ BIT(25)
#define NFC_DATA_SWAP_METHOD BIT(26)
#define NFC_ROW_AUTO_INC BIT(27)
+#define NFC_H6_SEND_RND_CMD2 BIT(27)
#define NFC_SEND_CMD3 BIT(28)
#define NFC_SEND_CMD4 BIT(29)
#define NFC_CMD_TYPE_MSK GENMASK(31, 30)
@@ -126,6 +170,7 @@
#define NFC_READ_CMD_MSK GENMASK(7, 0)
#define NFC_RND_READ_CMD0_MSK GENMASK(15, 8)
#define NFC_RND_READ_CMD1_MSK GENMASK(23, 16)
+#define NFC_RND_READ_CMD2_MSK GENMASK(31, 24)
/* define bit use in NFC_WCMD_SET */
#define NFC_PROGRAM_CMD_MSK GENMASK(7, 0)
@@ -139,25 +184,46 @@
#define NFC_ECC_EXCEPTION BIT(4)
#define NFC_ECC_BLOCK_SIZE_MSK BIT(5)
#define NFC_ECC_BLOCK_512 BIT(5)
-#define NFC_RANDOM_EN BIT(9)
-#define NFC_RANDOM_DIRECTION BIT(10)
-#define NFC_ECC_MODE_MSK GENMASK(15, 12)
-#define NFC_ECC_MODE(x) ((x) << 12)
+#define NFC_RANDOM_EN(nfc) (nfc->caps->random_en_mask)
+#define NFC_RANDOM_DIRECTION(nfc) (nfc->caps->random_dir_mask)
+#define NFC_ECC_MODE_MSK(nfc) (nfc->caps->ecc_mode_mask)
+#define NFC_ECC_MODE(nfc, x) field_prep(NFC_ECC_MODE_MSK(nfc), (x))
+/* RANDOM_PAGE_SIZE: 0: ECC block size 1: page size */
+#define NFC_A23_RANDOM_PAGE_SIZE BIT(11)
+#define NFC_H6_RANDOM_PAGE_SIZE BIT(7)
#define NFC_RANDOM_SEED_MSK GENMASK(30, 16)
#define NFC_RANDOM_SEED(x) ((x) << 16)
/* define bit use in NFC_ECC_ST */
#define NFC_ECC_ERR(x) BIT(x)
-#define NFC_ECC_ERR_MSK GENMASK(15, 0)
-#define NFC_ECC_PAT_FOUND(x) BIT(x + 16)
+#define NFC_ECC_ERR_MSK(nfc) (nfc->caps->ecc_err_mask)
+
+/*
+ * define bit use in NFC_REG_PAT_FOUND
+ * For A10/A23, NFC_REG_PAT_FOUND == NFC_ECC_ST register
+ */
+#define NFC_ECC_PAT_FOUND_MSK(nfc) (nfc->caps->pat_found_mask)
+
#define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff)
-#define NFC_DEFAULT_TIMEOUT_MS 1000
+#define NFC_USER_DATA_LEN_MSK(step) \
+ (0xf << (((step) % NFC_REG_USER_DATA_LEN_CAPACITY) * 4))
-#define NFC_SRAM_SIZE 1024
+#define NFC_DEFAULT_TIMEOUT_MS 1000
#define NFC_MAX_CS 7
+/*
+ * On A10/A23, this is the size of the NDFC User Data Register, containing the
+ * mandatory user data bytes following the ECC for each ECC step.
+ * Thus, for each ECC step, we need the ECC bytes + USER_DATA_SZ.
+ * Those bits are currently unsused, and kept as default value 0xffffffff.
+ *
+ * On H6/H616, this size became configurable, from 0 bytes to 32, via the
+ * USER_DATA_LEN registers.
+ */
+#define USER_DATA_SZ 4
+
/**
* struct sunxi_nand_chip_sel - stores information related to NAND Chip Select
*
@@ -198,7 +264,7 @@ struct sunxi_nand_chip {
u32 timing_cfg;
u32 timing_ctl;
int nsels;
- struct sunxi_nand_chip_sel sels[];
+ struct sunxi_nand_chip_sel sels[] __counted_by(nsels);
};
static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
@@ -212,13 +278,57 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
*
* @has_mdma: Use mbus dma mode, otherwise general dma
* through MBUS on A23/A33 needs extra configuration.
+ * @has_ecc_block_512: If the ECC can handle 512B or only 1024B chuncks
+ * @has_ecc_clk: If the controller needs an ECC clock.
+ * @has_mbus_clk: If the controller needs a mbus clock.
* @reg_io_data: I/O data register
+ * @reg_ecc_err_cnt: ECC error counter register
+ * @reg_user_data: User data register
+ * @reg_user_data_len: User data length register
+ * @reg_spare_area: Spare Area Register
+ * @reg_pat_id: Pattern ID Register
+ * @reg_pat_found: Data Pattern Status Register
+ * @random_en_mask: RANDOM_EN mask in NFC_ECC_CTL register
+ * @random_dir_mask: RANDOM_DIRECTION mask in NFC_ECC_CTL register
+ * @ecc_mode_mask: ECC_MODE mask in NFC_ECC_CTL register
+ * @ecc_err_mask: NFC_ECC_ERR mask in NFC_ECC_ST register
+ * @pat_found_mask: ECC_PAT_FOUND mask in NFC_REG_PAT_FOUND register
* @dma_maxburst: DMA maxburst
+ * @ecc_strengths: Available ECC strengths array
+ * @nstrengths: Size of @ecc_strengths
+ * @max_ecc_steps: Maximum supported steps for ECC, this is also the
+ * number of user data registers
+ * @user_data_len_tab: Table of lenghts supported by USER_DATA_LEN register
+ * The table index is the value to set in NFC_USER_DATA_LEN
+ * registers, and the corresponding value is the number of
+ * bytes to write
+ * @nuser_data_tab: Size of @user_data_len_tab
+ * @sram_size: Size of the NAND controller SRAM
*/
struct sunxi_nfc_caps {
bool has_mdma;
+ bool has_ecc_block_512;
+ bool has_ecc_clk;
+ bool has_mbus_clk;
unsigned int reg_io_data;
+ unsigned int reg_ecc_err_cnt;
+ unsigned int reg_user_data;
+ unsigned int reg_user_data_len;
+ unsigned int reg_spare_area;
+ unsigned int reg_pat_id;
+ unsigned int reg_pat_found;
+ unsigned int random_en_mask;
+ unsigned int random_dir_mask;
+ unsigned int ecc_mode_mask;
+ unsigned int ecc_err_mask;
+ unsigned int pat_found_mask;
unsigned int dma_maxburst;
+ const u8 *ecc_strengths;
+ unsigned int nstrengths;
+ const u8 *user_data_len_tab;
+ unsigned int nuser_data_tab;
+ unsigned int max_ecc_steps;
+ int sram_size;
};
/**
@@ -229,6 +339,8 @@ struct sunxi_nfc_caps {
* @regs: NAND controller registers
* @ahb_clk: NAND controller AHB clock
* @mod_clk: NAND controller mod clock
+ * @ecc_clk: NAND controller ECC clock
+ * @mbus_clk: NAND controller MBUS clock
* @reset: NAND controller reset line
* @assigned_cs: bitmask describing already assigned CS lines
* @clk_rate: NAND controller current clock rate
@@ -244,6 +356,8 @@ struct sunxi_nfc {
void __iomem *regs;
struct clk *ahb_clk;
struct clk *mod_clk;
+ struct clk *ecc_clk;
+ struct clk *mbus_clk;
struct reset_control *reset;
unsigned long assigned_cs;
unsigned long clk_rate;
@@ -432,7 +546,7 @@ static void sunxi_nfc_select_chip(struct nand_chip *nand, unsigned int cs)
if (sel->rb >= 0)
ctl |= NFC_RB_SEL(sel->rb);
- writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
+ writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA(nfc));
if (nfc->clk_rate != sunxi_nand->clk_rate) {
clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
@@ -456,7 +570,7 @@ static void sunxi_nfc_read_buf(struct nand_chip *nand, uint8_t *buf, int len)
while (len > offs) {
bool poll = false;
- cnt = min(len - offs, NFC_SRAM_SIZE);
+ cnt = min(len - offs, nfc->caps->sram_size);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
@@ -494,7 +608,7 @@ static void sunxi_nfc_write_buf(struct nand_chip *nand, const uint8_t *buf,
while (len > offs) {
bool poll = false;
- cnt = min(len - offs, NFC_SRAM_SIZE);
+ cnt = min(len - offs, nfc->caps->sram_size);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
@@ -624,13 +738,12 @@ static void sunxi_nfc_randomizer_config(struct nand_chip *nand, int page,
bool ecc)
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
- u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+ u32 ecc_ctl;
u16 state;
if (!(nand->options & NAND_NEED_SCRAMBLING))
return;
- ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
state = sunxi_nfc_randomizer_state(nand, page, ecc);
ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
@@ -643,7 +756,7 @@ static void sunxi_nfc_randomizer_enable(struct nand_chip *nand)
if (!(nand->options & NAND_NEED_SCRAMBLING))
return;
- writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN(nfc),
nfc->regs + NFC_REG_ECC_CTL);
}
@@ -654,7 +767,7 @@ static void sunxi_nfc_randomizer_disable(struct nand_chip *nand)
if (!(nand->options & NAND_NEED_SCRAMBLING))
return;
- writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN(nfc),
nfc->regs + NFC_REG_ECC_CTL);
}
@@ -718,20 +831,66 @@ static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct nand_chip *nand, u8 *oob,
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
- sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
- oob);
+ sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(nfc, step)), oob);
/* De-randomize the Bad Block Marker. */
if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
sunxi_nfc_randomize_bbm(nand, page, oob);
}
+/*
+ * On H6/H6 the user_data length has to be set in specific registers
+ * before writing.
+ */
+static void sunxi_nfc_reset_user_data_len(struct sunxi_nfc *nfc)
+{
+ int loop_step = NFC_REG_USER_DATA_LEN_CAPACITY;
+
+ /* not all SoCs have this register */
+ if (!nfc->caps->reg_user_data_len)
+ return;
+
+ for (int i = 0; i < nfc->caps->max_ecc_steps; i += loop_step)
+ writel(0, nfc->regs + NFC_REG_USER_DATA_LEN(nfc, i));
+}
+
+static void sunxi_nfc_set_user_data_len(struct sunxi_nfc *nfc,
+ int len, int step)
+{
+ bool found = false;
+ u32 val;
+ int i;
+
+ /* not all SoCs have this register */
+ if (!nfc->caps->reg_user_data_len)
+ return;
+
+ for (i = 0; i < nfc->caps->nuser_data_tab; i++) {
+ if (len == nfc->caps->user_data_len_tab[i]) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_warn(nfc->dev,
+ "Unsupported length for user data reg: %d\n", len);
+ return;
+ }
+
+ val = readl(nfc->regs + NFC_REG_USER_DATA_LEN(nfc, step));
+
+ val &= ~NFC_USER_DATA_LEN_MSK(step);
+ val |= field_prep(NFC_USER_DATA_LEN_MSK(step), i);
+ writel(val, nfc->regs + NFC_REG_USER_DATA_LEN(nfc, step));
+}
+
static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct nand_chip *nand,
const u8 *oob, int step,
bool bbm, int page)
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
- u8 user_data[4];
+ u8 user_data[USER_DATA_SZ];
/* Randomize the Bad Block Marker. */
if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
@@ -741,7 +900,7 @@ static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct nand_chip *nand,
}
writel(sunxi_nfc_buf_to_user_data(oob),
- nfc->regs + NFC_REG_USER_DATA(step));
+ nfc->regs + NFC_REG_USER_DATA(nfc, step));
}
static void sunxi_nfc_hw_ecc_update_stats(struct nand_chip *nand,
@@ -758,7 +917,8 @@ static void sunxi_nfc_hw_ecc_update_stats(struct nand_chip *nand,
}
static int sunxi_nfc_hw_ecc_correct(struct nand_chip *nand, u8 *data, u8 *oob,
- int step, u32 status, bool *erased)
+ int step, u32 status, u32 pattern_found,
+ bool *erased)
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
@@ -769,10 +929,10 @@ static int sunxi_nfc_hw_ecc_correct(struct nand_chip *nand, u8 *data, u8 *oob,
if (status & NFC_ECC_ERR(step))
return -EBADMSG;
- if (status & NFC_ECC_PAT_FOUND(step)) {
+ if (pattern_found & BIT(step)) {
u8 pattern;
- if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) {
+ if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID(nfc)) & 0x1))) {
pattern = 0x0;
} else {
pattern = 0xff;
@@ -783,12 +943,12 @@ static int sunxi_nfc_hw_ecc_correct(struct nand_chip *nand, u8 *data, u8 *oob,
memset(data, pattern, ecc->size);
if (oob)
- memset(oob, pattern, ecc->bytes + 4);
+ memset(oob, pattern, ecc->bytes + USER_DATA_SZ);
return 0;
}
- tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step));
+ tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(nfc, step));
return NFC_ECC_ERR_CNT(step, tmp);
}
@@ -803,6 +963,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int raw_mode = 0;
+ u32 pattern_found;
bool erased;
int ret;
@@ -818,6 +979,9 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
if (ret)
return ret;
+ sunxi_nfc_reset_user_data_len(nfc);
+ sunxi_nfc_set_user_data_len(nfc, USER_DATA_SZ, 0);
+ sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
@@ -827,10 +991,14 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
if (ret)
return ret;
- *cur_off = oob_off + ecc->bytes + 4;
+ *cur_off = oob_off + ecc->bytes + USER_DATA_SZ;
+
+ pattern_found = readl(nfc->regs + nfc->caps->reg_pat_found);
+ pattern_found = field_get(NFC_ECC_PAT_FOUND_MSK(nfc), pattern_found);
ret = sunxi_nfc_hw_ecc_correct(nand, data, oob_required ? oob : NULL, 0,
readl(nfc->regs + NFC_REG_ECC_ST),
+ pattern_found,
&erased);
if (erased)
return 1;
@@ -847,11 +1015,11 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
ecc->size);
- nand_change_read_column_op(nand, oob_off, oob, ecc->bytes + 4,
- false);
+ nand_change_read_column_op(nand, oob_off, oob,
+ ecc->bytes + USER_DATA_SZ, false);
- ret = nand_check_erased_ecc_chunk(data, ecc->size,
- oob, ecc->bytes + 4,
+ ret = nand_check_erased_ecc_chunk(data, ecc->size, oob,
+ ecc->bytes + USER_DATA_SZ,
NULL, 0, ecc->strength);
if (ret >= 0)
raw_mode = 1;
@@ -861,7 +1029,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
if (oob_required) {
nand_change_read_column_op(nand, oob_off, NULL, 0,
false);
- sunxi_nfc_randomizer_read_buf(nand, oob, ecc->bytes + 4,
+ sunxi_nfc_randomizer_read_buf(nand, oob, ecc->bytes + USER_DATA_SZ,
true, page);
sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, 0,
@@ -911,7 +1079,7 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf
unsigned int max_bitflips = 0;
int ret, i, raw_mode = 0;
struct scatterlist sg;
- u32 status, wait;
+ u32 status, pattern_found, wait;
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
@@ -923,6 +1091,8 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf
return ret;
sunxi_nfc_hw_ecc_enable(nand);
+ sunxi_nfc_reset_user_data_len(nfc);
+ sunxi_nfc_set_user_data_len(nfc, USER_DATA_SZ, 0);
sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
@@ -952,17 +1122,20 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf
return ret;
status = readl(nfc->regs + NFC_REG_ECC_ST);
+ pattern_found = readl(nfc->regs + nfc->caps->reg_pat_found);
+ pattern_found = field_get(NFC_ECC_PAT_FOUND_MSK(nfc), pattern_found);
for (i = 0; i < nchunks; i++) {
int data_off = i * ecc->size;
- int oob_off = i * (ecc->bytes + 4);
+ int oob_off = i * (ecc->bytes + USER_DATA_SZ);
u8 *data = buf + data_off;
u8 *oob = nand->oob_poi + oob_off;
bool erased;
ret = sunxi_nfc_hw_ecc_correct(nand, randomized ? data : NULL,
oob_required ? oob : NULL,
- i, status, &erased);
+ i, status, pattern_found,
+ &erased);
/* ECC errors are handled in the second loop. */
if (ret < 0)
@@ -972,7 +1145,7 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf
/* TODO: use DMA to retrieve OOB */
nand_change_read_column_op(nand,
mtd->writesize + oob_off,
- oob, ecc->bytes + 4, false);
+ oob, ecc->bytes + USER_DATA_SZ, false);
sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, i,
!i, page);
@@ -984,10 +1157,10 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf
sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret);
}
- if (status & NFC_ECC_ERR_MSK) {
+ if (status & NFC_ECC_ERR_MSK(nfc)) {
for (i = 0; i < nchunks; i++) {
int data_off = i * ecc->size;
- int oob_off = i * (ecc->bytes + 4);
+ int oob_off = i * (ecc->bytes + USER_DATA_SZ);
u8 *data = buf + data_off;
u8 *oob = nand->oob_poi + oob_off;
@@ -1007,10 +1180,10 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf
/* TODO: use DMA to retrieve OOB */
nand_change_read_column_op(nand,
mtd->writesize + oob_off,
- oob, ecc->bytes + 4, false);
+ oob, ecc->bytes + USER_DATA_SZ, false);
- ret = nand_check_erased_ecc_chunk(data, ecc->size,
- oob, ecc->bytes + 4,
+ ret = nand_check_erased_ecc_chunk(data, ecc->size, oob,
+ ecc->bytes + USER_DATA_SZ,
NULL, 0,
ecc->strength);
if (ret >= 0)
@@ -1050,7 +1223,10 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
if (ret)
return ret;
+ sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
+ sunxi_nfc_reset_user_data_len(nfc);
+ sunxi_nfc_set_user_data_len(nfc, USER_DATA_SZ, 0);
sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
@@ -1062,7 +1238,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
if (ret)
return ret;
- *cur_off = oob_off + ecc->bytes + 4;
+ *cur_off = oob_off + ecc->bytes + USER_DATA_SZ;
return 0;
}
@@ -1073,7 +1249,7 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct nand_chip *nand,
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
- int offset = ((ecc->bytes + 4) * ecc->steps);
+ int offset = ((ecc->bytes + USER_DATA_SZ) * ecc->steps);
int len = mtd->oobsize - offset;
if (len <= 0)
@@ -1106,7 +1282,7 @@ static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *nand, uint8_t *buf,
for (i = 0; i < ecc->steps; i++) {
int data_off = i * ecc->size;
- int oob_off = i * (ecc->bytes + 4);
+ int oob_off = i * (ecc->bytes + USER_DATA_SZ);
u8 *data = buf + data_off;
u8 *oob = nand->oob_poi + oob_off;
@@ -1165,7 +1341,7 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *nand,
for (i = data_offs / ecc->size;
i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
int data_off = i * ecc->size;
- int oob_off = i * (ecc->bytes + 4);
+ int oob_off = i * (ecc->bytes + USER_DATA_SZ);
u8 *data = bufpoi + data_off;
u8 *oob = nand->oob_poi + oob_off;
@@ -1219,7 +1395,7 @@ static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *nand,
for (i = 0; i < ecc->steps; i++) {
int data_off = i * ecc->size;
- int oob_off = i * (ecc->bytes + 4);
+ int oob_off = i * (ecc->bytes + USER_DATA_SZ);
const u8 *data = buf + data_off;
const u8 *oob = nand->oob_poi + oob_off;
@@ -1257,7 +1433,7 @@ static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *nand,
for (i = data_offs / ecc->size;
i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
int data_off = i * ecc->size;
- int oob_off = i * (ecc->bytes + 4);
+ int oob_off = i * (ecc->bytes + USER_DATA_SZ);
const u8 *data = buf + data_off;
const u8 *oob = nand->oob_poi + oob_off;
@@ -1295,10 +1471,12 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand,
if (ret)
goto pio_fallback;
+ sunxi_nfc_reset_user_data_len(nfc);
for (i = 0; i < ecc->steps; i++) {
- const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
+ const u8 *oob = nand->oob_poi + (i * (ecc->bytes + USER_DATA_SZ));
sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, i, !i, page);
+ sunxi_nfc_set_user_data_len(nfc, USER_DATA_SZ, i);
}
nand_prog_page_begin_op(nand, page, 0, NULL, 0);
@@ -1566,7 +1744,7 @@ static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
if (section >= ecc->steps)
return -ERANGE;
- oobregion->offset = section * (ecc->bytes + 4) + 4;
+ oobregion->offset = section * (ecc->bytes + USER_DATA_SZ) + 4;
oobregion->length = ecc->bytes;
return 0;
@@ -1600,10 +1778,10 @@ static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
if (section == ecc->steps && ecc->engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
return -ERANGE;
- oobregion->offset = section * (ecc->bytes + 4);
+ oobregion->offset = section * (ecc->bytes + USER_DATA_SZ);
if (section < ecc->steps)
- oobregion->length = 4;
+ oobregion->length = USER_DATA_SZ;
else
oobregion->length = mtd->oobsize - oobregion->offset;
@@ -1619,9 +1797,9 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
struct nand_ecc_ctrl *ecc,
struct device_node *np)
{
- static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ const u8 *strengths = nfc->caps->ecc_strengths;
struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_device *nanddev = mtd_to_nanddev(mtd);
int nsectors;
@@ -1637,7 +1815,7 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
bytes = (mtd->oobsize - 2) / nsectors;
/* 4 non-ECC bytes are added before each ECC bytes section */
- bytes -= 4;
+ bytes -= USER_DATA_SZ;
/* and bytes has to be even. */
if (bytes % 2)
@@ -1645,7 +1823,7 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
ecc->strength = bytes * 8 / fls(8 * ecc->size);
- for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+ for (i = 0; i < nfc->caps->nstrengths; i++) {
if (strengths[i] > ecc->strength)
break;
}
@@ -1666,7 +1844,7 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
}
/* Add ECC info retrieval from DT */
- for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+ for (i = 0; i < nfc->caps->nstrengths; i++) {
if (ecc->strength <= strengths[i]) {
/*
* Update ecc->strength value with the actual strength
@@ -1677,7 +1855,7 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
}
}
- if (i >= ARRAY_SIZE(strengths)) {
+ if (i >= nfc->caps->nstrengths) {
dev_err(nfc->dev, "unsupported strength\n");
return -ENOTSUPP;
}
@@ -1690,7 +1868,7 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
nsectors = mtd->writesize / ecc->size;
- if (mtd->oobsize < ((ecc->bytes + 4) * nsectors))
+ if (mtd->oobsize < ((ecc->bytes + USER_DATA_SZ) * nsectors))
return -EINVAL;
ecc->read_oob = sunxi_nfc_hw_ecc_read_oob;
@@ -1713,11 +1891,17 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
ecc->read_oob_raw = nand_read_oob_std;
ecc->write_oob_raw = nand_write_oob_std;
- sunxi_nand->ecc.ecc_ctl = NFC_ECC_MODE(i) | NFC_ECC_EXCEPTION |
+ sunxi_nand->ecc.ecc_ctl = NFC_ECC_MODE(nfc, i) | NFC_ECC_EXCEPTION |
NFC_ECC_PIPELINE | NFC_ECC_EN;
- if (ecc->size == 512)
- sunxi_nand->ecc.ecc_ctl |= NFC_ECC_BLOCK_512;
+ if (ecc->size == 512) {
+ if (nfc->caps->has_ecc_block_512) {
+ sunxi_nand->ecc.ecc_ctl |= NFC_ECC_BLOCK_512;
+ } else {
+ dev_err(nfc->dev, "512B ECC block not supported\n");
+ return -EOPNOTSUPP;
+ }
+ }
return 0;
}
@@ -1806,7 +1990,7 @@ static int sunxi_nfc_exec_subop(struct nand_chip *nand,
case NAND_OP_DATA_OUT_INSTR:
start = nand_subop_get_data_start_off(subop, i);
remaining = nand_subop_get_data_len(subop, i);
- cnt = min_t(u32, remaining, NFC_SRAM_SIZE);
+ cnt = min_t(u32, remaining, nfc->caps->sram_size);
cmd |= NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
if (instr->type == NAND_OP_DATA_OUT_INSTR) {
@@ -2026,13 +2210,11 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
{
struct device_node *np = dev->of_node;
- struct device_node *nand_np;
int ret;
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = sunxi_nand_chip_init(dev, nfc, nand_np);
if (ret) {
- of_node_put(nand_np);
sunxi_nand_chips_cleanup(nfc);
return ret;
}
@@ -2087,8 +2269,7 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nfc->regs = devm_ioremap_resource(dev, r);
+ nfc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(nfc->regs))
return PTR_ERR(nfc->regs);
@@ -2096,43 +2277,46 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- nfc->ahb_clk = devm_clk_get(dev, "ahb");
+ nfc->caps = of_device_get_match_data(dev);
+ if (!nfc->caps)
+ return -EINVAL;
+
+ nfc->ahb_clk = devm_clk_get_enabled(dev, "ahb");
if (IS_ERR(nfc->ahb_clk)) {
dev_err(dev, "failed to retrieve ahb clk\n");
return PTR_ERR(nfc->ahb_clk);
}
- ret = clk_prepare_enable(nfc->ahb_clk);
- if (ret)
- return ret;
-
- nfc->mod_clk = devm_clk_get(dev, "mod");
+ nfc->mod_clk = devm_clk_get_enabled(dev, "mod");
if (IS_ERR(nfc->mod_clk)) {
dev_err(dev, "failed to retrieve mod clk\n");
- ret = PTR_ERR(nfc->mod_clk);
- goto out_ahb_clk_unprepare;
+ return PTR_ERR(nfc->mod_clk);
}
- ret = clk_prepare_enable(nfc->mod_clk);
- if (ret)
- goto out_ahb_clk_unprepare;
+ if (nfc->caps->has_ecc_clk) {
+ nfc->ecc_clk = devm_clk_get_enabled(dev, "ecc");
+ if (IS_ERR(nfc->ecc_clk)) {
+ dev_err(dev, "failed to retrieve ecc clk\n");
+ return PTR_ERR(nfc->ecc_clk);
+ }
+ }
- nfc->reset = devm_reset_control_get_optional_exclusive(dev, "ahb");
- if (IS_ERR(nfc->reset)) {
- ret = PTR_ERR(nfc->reset);
- goto out_mod_clk_unprepare;
+ if (nfc->caps->has_mbus_clk) {
+ nfc->mbus_clk = devm_clk_get_enabled(dev, "mbus");
+ if (IS_ERR(nfc->mbus_clk)) {
+ dev_err(dev, "failed to retrieve mbus clk\n");
+ return PTR_ERR(nfc->mbus_clk);
+ }
}
+ nfc->reset = devm_reset_control_get_optional_exclusive(dev, "ahb");
+ if (IS_ERR(nfc->reset))
+ return PTR_ERR(nfc->reset);
+
ret = reset_control_deassert(nfc->reset);
if (ret) {
dev_err(dev, "reset err %d\n", ret);
- goto out_mod_clk_unprepare;
- }
-
- nfc->caps = of_device_get_match_data(&pdev->dev);
- if (!nfc->caps) {
- ret = -EINVAL;
- goto out_ahb_reset_reassert;
+ return ret;
}
ret = sunxi_nfc_rst(nfc);
@@ -2165,10 +2349,6 @@ out_release_dmac:
dma_release_channel(nfc->dmac);
out_ahb_reset_reassert:
reset_control_assert(nfc->reset);
-out_mod_clk_unprepare:
- clk_disable_unprepare(nfc->mod_clk);
-out_ahb_clk_unprepare:
- clk_disable_unprepare(nfc->ahb_clk);
return ret;
}
@@ -2183,19 +2363,83 @@ static void sunxi_nfc_remove(struct platform_device *pdev)
if (nfc->dmac)
dma_release_channel(nfc->dmac);
- clk_disable_unprepare(nfc->mod_clk);
- clk_disable_unprepare(nfc->ahb_clk);
}
+static const u8 sunxi_ecc_strengths_a10[] = {
+ 16, 24, 28, 32, 40, 48, 56, 60, 64
+};
+
+static const u8 sunxi_ecc_strengths_h6[] = {
+ 16, 24, 28, 32, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80
+};
+
+static const u8 sunxi_user_data_len_h6[] = {
+ 0, 4, 8, 12, 16, 20, 24, 28, 32
+};
+
static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = {
+ .has_ecc_block_512 = true,
.reg_io_data = NFC_REG_A10_IO_DATA,
+ .reg_ecc_err_cnt = NFC_REG_A10_ECC_ERR_CNT,
+ .reg_user_data = NFC_REG_A10_USER_DATA,
+ .reg_spare_area = NFC_REG_A10_SPARE_AREA,
+ .reg_pat_id = NFC_REG_A10_PAT_ID,
+ .reg_pat_found = NFC_REG_ECC_ST,
+ .random_en_mask = BIT(9),
+ .random_dir_mask = BIT(10),
+ .ecc_mode_mask = GENMASK(15, 12),
+ .ecc_err_mask = GENMASK(15, 0),
+ .pat_found_mask = GENMASK(31, 16),
.dma_maxburst = 4,
+ .ecc_strengths = sunxi_ecc_strengths_a10,
+ .nstrengths = ARRAY_SIZE(sunxi_ecc_strengths_a10),
+ .max_ecc_steps = 16,
+ .sram_size = 1024,
};
static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = {
.has_mdma = true,
+ .has_ecc_block_512 = true,
.reg_io_data = NFC_REG_A23_IO_DATA,
+ .reg_ecc_err_cnt = NFC_REG_A10_ECC_ERR_CNT,
+ .reg_user_data = NFC_REG_A10_USER_DATA,
+ .reg_spare_area = NFC_REG_A10_SPARE_AREA,
+ .reg_pat_id = NFC_REG_A10_PAT_ID,
+ .reg_pat_found = NFC_REG_ECC_ST,
+ .random_en_mask = BIT(9),
+ .random_dir_mask = BIT(10),
+ .ecc_mode_mask = GENMASK(15, 12),
+ .ecc_err_mask = GENMASK(15, 0),
+ .pat_found_mask = GENMASK(31, 16),
.dma_maxburst = 8,
+ .ecc_strengths = sunxi_ecc_strengths_a10,
+ .nstrengths = ARRAY_SIZE(sunxi_ecc_strengths_a10),
+ .max_ecc_steps = 16,
+ .sram_size = 1024,
+};
+
+static const struct sunxi_nfc_caps sunxi_nfc_h616_caps = {
+ .has_ecc_clk = true,
+ .has_mbus_clk = true,
+ .reg_io_data = NFC_REG_A23_IO_DATA,
+ .reg_ecc_err_cnt = NFC_REG_H6_ECC_ERR_CNT,
+ .reg_user_data = NFC_REG_H6_USER_DATA,
+ .reg_user_data_len = NFC_REG_H6_USER_DATA_LEN,
+ .reg_spare_area = NFC_REG_H6_SPARE_AREA,
+ .reg_pat_id = NFC_REG_H6_PAT_ID,
+ .reg_pat_found = NFC_REG_H6_PAT_FOUND,
+ .random_en_mask = BIT(5),
+ .random_dir_mask = BIT(6),
+ .ecc_mode_mask = GENMASK(15, 8),
+ .ecc_err_mask = GENMASK(31, 0),
+ .pat_found_mask = GENMASK(31, 0),
+ .dma_maxburst = 8,
+ .ecc_strengths = sunxi_ecc_strengths_h6,
+ .nstrengths = ARRAY_SIZE(sunxi_ecc_strengths_h6),
+ .user_data_len_tab = sunxi_user_data_len_h6,
+ .nuser_data_tab = ARRAY_SIZE(sunxi_user_data_len_h6),
+ .max_ecc_steps = 32,
+ .sram_size = 8192,
};
static const struct of_device_id sunxi_nfc_ids[] = {
@@ -2207,6 +2451,10 @@ static const struct of_device_id sunxi_nfc_ids[] = {
.compatible = "allwinner,sun8i-a23-nand-controller",
.data = &sunxi_nfc_a23_caps,
},
+ {
+ .compatible = "allwinner,sun50i-h616-nand-controller",
+ .data = &sunxi_nfc_h616_caps,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
@@ -2217,11 +2465,10 @@ static struct platform_driver sunxi_nfc_driver = {
.of_match_table = sunxi_nfc_ids,
},
.probe = sunxi_nfc_probe,
- .remove_new = sunxi_nfc_remove,
+ .remove = sunxi_nfc_remove,
};
module_platform_driver(sunxi_nfc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Boris BREZILLON");
MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
-MODULE_ALIAS("platform:sunxi_nand");
diff --git a/drivers/mtd/nand/raw/technologic-nand-controller.c b/drivers/mtd/nand/raw/technologic-nand-controller.c
new file mode 100644
index 000000000000..a3294aaf43bd
--- /dev/null
+++ b/drivers/mtd/nand/raw/technologic-nand-controller.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Technologic Systems TS72xx NAND controller driver
+ *
+ * Copyright (C) 2023 Nikita Shubin <nikita.shubin@maquefel.me>
+ *
+ * Derived from: plat_nand.c
+ * Author: Vitaly Wool <vitalywool@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/platnand.h>
+
+#define TS72XX_NAND_CONTROL_ADDR_LINE BIT(22) /* 0xN0400000 */
+#define TS72XX_NAND_BUSY_ADDR_LINE BIT(23) /* 0xN0800000 */
+
+#define TS72XX_NAND_ALE BIT(0)
+#define TS72XX_NAND_CLE BIT(1)
+#define TS72XX_NAND_NCE BIT(2)
+
+#define TS72XX_NAND_CTRL_CLE (TS72XX_NAND_NCE | TS72XX_NAND_CLE)
+#define TS72XX_NAND_CTRL_ALE (TS72XX_NAND_NCE | TS72XX_NAND_ALE)
+
+struct ts72xx_nand_data {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ void __iomem *base;
+ void __iomem *ctrl;
+ void __iomem *busy;
+};
+
+static inline struct ts72xx_nand_data *chip_to_ts72xx(struct nand_chip *chip)
+{
+ return container_of(chip, struct ts72xx_nand_data, chip);
+}
+
+static int ts72xx_nand_attach_chip(struct nand_chip *chip)
+{
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ return -EINVAL;
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ fallthrough;
+ default:
+ return 0;
+ }
+}
+
+static void ts72xx_nand_ctrl(struct nand_chip *chip, u8 value)
+{
+ struct ts72xx_nand_data *data = chip_to_ts72xx(chip);
+ unsigned char bits = ioread8(data->ctrl) & ~GENMASK(2, 0);
+
+ iowrite8(bits | value, data->ctrl);
+}
+
+static int ts72xx_nand_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct ts72xx_nand_data *data = chip_to_ts72xx(chip);
+ unsigned int timeout_us;
+ u32 status;
+ int ret;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ts72xx_nand_ctrl(chip, TS72XX_NAND_CTRL_CLE);
+ iowrite8(instr->ctx.cmd.opcode, data->base);
+ ts72xx_nand_ctrl(chip, TS72XX_NAND_NCE);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ ts72xx_nand_ctrl(chip, TS72XX_NAND_CTRL_ALE);
+ iowrite8_rep(data->base, instr->ctx.addr.addrs, instr->ctx.addr.naddrs);
+ ts72xx_nand_ctrl(chip, TS72XX_NAND_NCE);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ ioread8_rep(data->base, instr->ctx.data.buf.in, instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ iowrite8_rep(data->base, instr->ctx.data.buf.in, instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
+ ret = readb_poll_timeout(data->busy, status, status & BIT(5), 0, timeout_us);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return 0;
+}
+
+static int ts72xx_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ unsigned int i;
+ int ret;
+
+ if (check_only)
+ return 0;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = ts72xx_nand_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops ts72xx_nand_ops = {
+ .attach_chip = ts72xx_nand_attach_chip,
+ .exec_op = ts72xx_nand_exec_op,
+};
+
+static int ts72xx_nand_probe(struct platform_device *pdev)
+{
+ struct ts72xx_nand_data *data;
+ struct fwnode_handle *child;
+ struct mtd_info *mtd;
+ int err;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ nand_controller_init(&data->controller);
+ data->controller.ops = &ts72xx_nand_ops;
+ data->chip.controller = &data->controller;
+
+ data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+ data->ctrl = data->base + TS72XX_NAND_CONTROL_ADDR_LINE;
+ data->busy = data->base + TS72XX_NAND_BUSY_ADDR_LINE;
+
+ child = fwnode_get_next_child_node(dev_fwnode(&pdev->dev), NULL);
+ if (!child)
+ return dev_err_probe(&pdev->dev, -ENXIO,
+ "ts72xx controller node should have exactly one child\n");
+
+ nand_set_flash_node(&data->chip, to_of_node(child));
+ mtd = nand_to_mtd(&data->chip);
+ mtd->dev.parent = &pdev->dev;
+ platform_set_drvdata(pdev, data);
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(&data->chip, 1);
+ if (err)
+ goto err_handle_put;
+
+ err = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+ if (err)
+ goto err_clean_nand;
+
+ return 0;
+
+err_clean_nand:
+ nand_cleanup(&data->chip);
+err_handle_put:
+ fwnode_handle_put(child);
+ return err;
+}
+
+static void ts72xx_nand_remove(struct platform_device *pdev)
+{
+ struct ts72xx_nand_data *data = platform_get_drvdata(pdev);
+ struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev);
+ struct nand_chip *chip = &data->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ fwnode_handle_put(fwnode);
+}
+
+static const struct of_device_id ts72xx_id_table[] = {
+ { .compatible = "technologic,ts7200-nand" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ts72xx_id_table);
+
+static struct platform_driver ts72xx_nand_driver = {
+ .driver = {
+ .name = "ts72xx-nand",
+ .of_match_table = ts72xx_id_table,
+ },
+ .probe = ts72xx_nand_probe,
+ .remove = ts72xx_nand_remove,
+};
+module_platform_driver(ts72xx_nand_driver);
+
+MODULE_AUTHOR("Nikita Shubin <nikita.shubin@maquefel.me>");
+MODULE_DESCRIPTION("Technologic Systems TS72xx NAND controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index eb0b9d16e8da..7f9eb5f042a7 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -1197,6 +1197,10 @@ static int tegra_nand_probe(struct platform_device *pdev)
init_completion(&ctrl->dma_complete);
ctrl->irq = platform_get_irq(pdev, 0);
+ if (ctrl->irq < 0) {
+ err = ctrl->irq;
+ goto err_put_pm;
+ }
err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
dev_name(&pdev->dev), ctrl);
if (err) {
@@ -1275,7 +1279,7 @@ static struct platform_driver tegra_nand_driver = {
.pm = &tegra_nand_pm,
},
.probe = tegra_nand_probe,
- .remove_new = tegra_nand_remove,
+ .remove = tegra_nand_remove,
};
module_platform_driver(tegra_nand_driver);
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index eddcc0728a67..907fb5de4269 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -276,7 +276,7 @@ static const struct nand_controller_ops txx9ndfmc_controller_ops = {
.attach_chip = txx9ndfmc_attach_chip,
};
-static int __init txx9ndfmc_probe(struct platform_device *dev)
+static int txx9ndfmc_probe(struct platform_device *dev)
{
struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
int hold, spw;
@@ -369,13 +369,11 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
return 0;
}
-static int __exit txx9ndfmc_remove(struct platform_device *dev)
+static void txx9ndfmc_remove(struct platform_device *dev)
{
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
int ret, i;
- if (!drvdata)
- return 0;
for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
struct mtd_info *mtd = drvdata->mtds[i];
struct nand_chip *chip;
@@ -392,7 +390,6 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
- return 0;
}
#ifdef CONFIG_PM
@@ -407,14 +404,14 @@ static int txx9ndfmc_resume(struct platform_device *dev)
#endif
static struct platform_driver txx9ndfmc_driver = {
- .remove = __exit_p(txx9ndfmc_remove),
+ .probe = txx9ndfmc_probe,
+ .remove = txx9ndfmc_remove,
.resume = txx9ndfmc_resume,
.driver = {
.name = "txx9ndfmc",
},
};
-
-module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe);
+module_platform_driver(txx9ndfmc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index 86522048e271..4b5ba3187853 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -29,8 +29,9 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/swab.h>
@@ -810,7 +811,6 @@ static int vf610_nfc_probe(struct platform_device *pdev)
struct mtd_info *mtd;
struct nand_chip *chip;
struct device_node *child;
- const struct of_device_id *of_id;
int err;
int irq;
@@ -827,30 +827,22 @@ static int vf610_nfc_probe(struct platform_device *pdev)
mtd->name = DRV_NAME;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -EINVAL;
+ if (irq < 0)
+ return irq;
nfc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nfc->regs))
return PTR_ERR(nfc->regs);
- nfc->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(nfc->clk))
+ nfc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(nfc->clk)) {
+ dev_err(nfc->dev, "Unable to get and enable clock!\n");
return PTR_ERR(nfc->clk);
-
- err = clk_prepare_enable(nfc->clk);
- if (err) {
- dev_err(nfc->dev, "Unable to enable clock!\n");
- return err;
}
- of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
- if (!of_id) {
- err = -ENODEV;
- goto err_disable_clk;
- }
-
- nfc->variant = (enum vf610_nfc_variant)of_id->data;
+ nfc->variant = (enum vf610_nfc_variant)device_get_match_data(&pdev->dev);
+ if (!nfc->variant)
+ return -ENODEV;
for_each_available_child_of_node(nfc->dev->of_node, child) {
if (of_device_is_compatible(child, "fsl,vf610-nfc-nandcs")) {
@@ -858,9 +850,8 @@ static int vf610_nfc_probe(struct platform_device *pdev)
if (nand_get_flash_node(chip)) {
dev_err(nfc->dev,
"Only one NAND chip supported!\n");
- err = -EINVAL;
of_node_put(child);
- goto err_disable_clk;
+ return -EINVAL;
}
nand_set_flash_node(chip, child);
@@ -869,8 +860,7 @@ static int vf610_nfc_probe(struct platform_device *pdev)
if (!nand_get_flash_node(chip)) {
dev_err(nfc->dev, "NAND chip sub-node missing!\n");
- err = -ENODEV;
- goto err_disable_clk;
+ return -ENODEV;
}
chip->options |= NAND_NO_SUBPAGE_WRITE;
@@ -880,7 +870,7 @@ static int vf610_nfc_probe(struct platform_device *pdev)
err = devm_request_irq(nfc->dev, irq, vf610_nfc_irq, 0, DRV_NAME, nfc);
if (err) {
dev_err(nfc->dev, "Error requesting IRQ!\n");
- goto err_disable_clk;
+ return err;
}
vf610_nfc_preinit_controller(nfc);
@@ -892,7 +882,7 @@ static int vf610_nfc_probe(struct platform_device *pdev)
/* Scan the NAND chip */
err = nand_scan(chip, 1);
if (err)
- goto err_disable_clk;
+ return err;
platform_set_drvdata(pdev, nfc);
@@ -904,8 +894,6 @@ static int vf610_nfc_probe(struct platform_device *pdev)
err_cleanup_nand:
nand_cleanup(chip);
-err_disable_clk:
- clk_disable_unprepare(nfc->clk);
return err;
}
@@ -918,7 +906,6 @@ static void vf610_nfc_remove(struct platform_device *pdev)
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
- clk_disable_unprepare(nfc->clk);
}
#ifdef CONFIG_PM_SLEEP
@@ -954,7 +941,7 @@ static struct platform_driver vf610_nfc_driver = {
.pm = &vf610_nfc_pm_ops,
},
.probe = vf610_nfc_probe,
- .remove_new = vf610_nfc_remove,
+ .remove = vf610_nfc_remove,
};
module_platform_driver(vf610_nfc_driver);
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index 6b1e2a2bba15..af84395dc66e 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -6,8 +6,8 @@
*/
#include <linux/mtd/rawnand.h>
-#include <linux/of_gpio.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <lantiq_soc.h>
@@ -256,7 +256,7 @@ static const struct of_device_id xway_nand_match[] = {
static struct platform_driver xway_nand_driver = {
.probe = xway_nand_probe,
- .remove_new = xway_nand_remove,
+ .remove = xway_nand_remove,
.driver = {
.name = "lantiq,nand-xway",
.of_match_table = xway_nand_match,
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index cd8b66bf7740..6d3d203df048 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o alliancememory.o ato.o esmt.o gigadevice.o macronix.o
-spinand-objs += micron.o paragon.o toshiba.o winbond.o xtx.o
+spinand-objs := core.o otp.o
+spinand-objs += alliancememory.o ato.o esmt.o fmsh.o foresee.o gigadevice.o macronix.o
+spinand-objs += micron.o paragon.o skyhigh.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/alliancememory.c b/drivers/mtd/nand/spi/alliancememory.c
index 7936ea546b03..9e97c40955c9 100644
--- a/drivers/mtd/nand/spi/alliancememory.c
+++ b/drivers/mtd/nand/spi/alliancememory.c
@@ -17,20 +17,20 @@
#define AM_STATUS_ECC_MAX_CORRECTED (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int am_get_eccsize(struct mtd_info *mtd)
{
diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c
index 82b377c06812..45d38ce0736c 100644
--- a/drivers/mtd/nand/spi/ato.c
+++ b/drivers/mtd/nand/spi/ato.c
@@ -14,17 +14,17 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int ato25d1ga_ooblayout_ecc(struct mtd_info *mtd, int section,
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 393ff37f0d23..d207286572d8 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -20,9 +20,9 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
-static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
+int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
int ret;
@@ -34,9 +34,9 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
return 0;
}
-static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
+int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
*spinand->scratchbuf = val;
@@ -200,6 +200,12 @@ static int spinand_ecc_enable(struct spinand_device *spinand,
enable ? CFG_ECC_ENABLE : 0);
}
+static int spinand_cont_read_enable(struct spinand_device *spinand,
+ bool enable)
+{
+ return spinand->set_cont_read(spinand, enable);
+}
+
static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
@@ -288,6 +294,9 @@ static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
struct spinand_device *spinand = nand_to_spinand(nand);
bool enable = (req->mode != MTD_OPS_RAW);
+ if (!enable && spinand->flags & SPINAND_NO_RAW_ACCESS)
+ return -EOPNOTSUPP;
+
memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
/* Only enable or disable the engine */
@@ -311,15 +320,27 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
/* Finish a page read: check the status, report errors/bitflips */
ret = spinand_check_ecc_status(spinand, engine_conf->status);
- if (ret == -EBADMSG)
+ if (ret == -EBADMSG) {
mtd->ecc_stats.failed++;
- else if (ret > 0)
- mtd->ecc_stats.corrected += ret;
+ } else if (ret > 0) {
+ unsigned int pages;
+
+ /*
+ * Continuous reads don't allow us to get the detail,
+ * so we may exagerate the actual number of corrected bitflips.
+ */
+ if (!req->continuous)
+ pages = 1;
+ else
+ pages = req->datalen / nanddev_page_size(nand);
+
+ mtd->ecc_stats.corrected += ret * pages;
+ }
return ret;
}
-static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
+static const struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
.init_ctx = spinand_ondie_ecc_init_ctx,
.cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
.prepare_io_req = spinand_ondie_ecc_prepare_io_req,
@@ -339,9 +360,9 @@ static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
engine_conf->status = status;
}
-static int spinand_write_enable_op(struct spinand_device *spinand)
+int spinand_write_enable_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
+ struct spi_mem_op op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -351,7 +372,7 @@ static int spinand_load_page_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
+ struct spi_mem_op op = SPINAND_PAGE_READ_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -369,7 +390,11 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
if (req->datalen) {
buf = spinand->databuf;
- nbytes = nanddev_page_size(nand);
+ if (!req->continuous)
+ nbytes = nanddev_page_size(nand);
+ else
+ nbytes = round_up(req->dataoffs + req->datalen,
+ nanddev_page_size(nand));
column = 0;
}
@@ -386,6 +411,9 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
else
rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
+ if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT)
+ column |= req->pos.plane << fls(nanddev_page_size(nand));
+
while (nbytes) {
ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
if (ret < 0)
@@ -397,6 +425,21 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
nbytes -= ret;
column += ret;
buf += ret;
+
+ /*
+ * Dirmap accesses are allowed to toggle the CS.
+ * Toggling the CS during a continuous read is forbidden.
+ */
+ if (nbytes && req->continuous) {
+ /*
+ * Spi controller with broken support of continuous
+ * reading was detected. Disable future use of
+ * continuous reading and return -EAGAIN to retry
+ * reading within regular mode.
+ */
+ spinand->cont_read_possible = false;
+ return -EAGAIN;
+ }
}
if (req->datalen)
@@ -460,6 +503,9 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
else
wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
+ if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT)
+ column |= req->pos.plane << fls(nanddev_page_size(nand));
+
while (nbytes) {
ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
if (ret < 0)
@@ -481,7 +527,7 @@ static int spinand_program_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
+ struct spi_mem_op op = SPINAND_PROG_EXEC_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -491,18 +537,28 @@ static int spinand_erase_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, pos);
- struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
+ struct spi_mem_op op = SPINAND_BLK_ERASE_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
-static int spinand_wait(struct spinand_device *spinand,
- unsigned long initial_delay_us,
- unsigned long poll_delay_us,
- u8 *s)
+/**
+ * spinand_wait() - Poll memory device status
+ * @spinand: the spinand device
+ * @initial_delay_us: delay in us before starting to poll
+ * @poll_delay_us: time to sleep between reads in us
+ * @s: the pointer to variable to store the value of REG_STATUS
+ *
+ * This function polls a status register (REG_STATUS) and returns when
+ * the STATUS_READY bit is 0 or when the timeout has expired.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
+ unsigned long poll_delay_us, u8 *s)
{
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
- spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(REG_STATUS,
+ spinand->scratchbuf);
u8 status;
int ret;
@@ -535,7 +591,7 @@ out:
static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
u8 ndummy, u8 *buf)
{
- struct spi_mem_op op = SPINAND_READID_OP(
+ struct spi_mem_op op = SPINAND_READID_1S_1S_1S_OP(
naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
int ret;
@@ -548,7 +604,7 @@ static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
static int spinand_reset_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_RESET_OP;
+ struct spi_mem_op op = SPINAND_RESET_1S_0_0_OP;
int ret;
ret = spi_mem_exec_op(spinand->spimem, &op);
@@ -566,8 +622,16 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
}
-static int spinand_read_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req)
+/**
+ * spinand_read_page() - Read a page
+ * @spinand: the spinand device
+ * @req: the I/O request
+ *
+ * Return: 0 or a positive number of bitflips corrected on success.
+ * A negative error code otherwise.
+ */
+int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
@@ -597,8 +661,16 @@ static int spinand_read_page(struct spinand_device *spinand,
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
-static int spinand_write_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req)
+/**
+ * spinand_write_page() - Write a page
+ * @spinand: the spinand device
+ * @req: the I/O request
+ *
+ * Return: 0 or a positive number of bitflips corrected on success.
+ * A negative error code otherwise.
+ */
+int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
@@ -624,31 +696,33 @@ static int spinand_write_page(struct spinand_device *spinand,
SPINAND_WRITE_INITIAL_DELAY_US,
SPINAND_WRITE_POLL_DELAY_US,
&status);
- if (!ret && (status & STATUS_PROG_FAILED))
+ if (ret)
+ return ret;
+
+ if (status & STATUS_PROG_FAILED)
return -EIO;
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
-static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
+static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ unsigned int *max_bitflips)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
struct mtd_ecc_stats old_stats;
- unsigned int max_bitflips = 0;
struct nand_io_iter iter;
bool disable_ecc = false;
bool ecc_failed = false;
- int ret = 0;
-
- if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
- disable_ecc = true;
-
- mutex_lock(&spinand->lock);
+ unsigned int retry_mode = 0;
+ int ret;
old_stats = mtd->ecc_stats;
+ if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
+ disable_ecc = true;
+
nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
if (disable_ecc)
iter.req.mode = MTD_OPS_RAW;
@@ -657,18 +731,194 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
if (ret)
break;
+read_retry:
ret = spinand_read_page(spinand, &iter.req);
if (ret < 0 && ret != -EBADMSG)
break;
- if (ret == -EBADMSG)
+ if (ret == -EBADMSG && spinand->set_read_retry) {
+ if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) {
+ ret = spinand->set_read_retry(spinand, retry_mode);
+ if (ret < 0) {
+ spinand->set_read_retry(spinand, 0);
+ return ret;
+ }
+
+ /* Reset ecc_stats; retry */
+ mtd->ecc_stats = old_stats;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_failed = true;
+ }
+ } else if (ret == -EBADMSG) {
ecc_failed = true;
- else
- max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ } else {
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ }
ret = 0;
ops->retlen += iter.req.datalen;
ops->oobretlen += iter.req.ooblen;
+
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ retry_mode = 0;
+ ret = spinand->set_read_retry(spinand, retry_mode);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (ecc_failed && !ret)
+ ret = -EBADMSG;
+
+ return ret;
+}
+
+static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ unsigned int *max_bitflips)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_io_iter iter;
+ u8 status;
+ int ret;
+
+ ret = spinand_cont_read_enable(spinand, true);
+ if (ret)
+ return ret;
+
+ /*
+ * The cache is divided into two halves. While one half of the cache has
+ * the requested data, the other half is loaded with the next chunk of data.
+ * Therefore, the host can read out the data continuously from page to page.
+ * Each data read must be a multiple of 4-bytes and full pages should be read;
+ * otherwise, the data output might get out of sequence from one read command
+ * to another.
+ */
+ nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
+ ret = spinand_select_target(spinand, iter.req.pos.target);
+ if (ret)
+ goto end_cont_read;
+
+ ret = nand_ecc_prepare_io_req(nand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ret = spinand_load_page_op(spinand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
+ SPINAND_READ_POLL_DELAY_US, NULL);
+ if (ret < 0)
+ goto end_cont_read;
+
+ ret = spinand_read_from_cache_op(spinand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ops->retlen += iter.req.datalen;
+
+ ret = spinand_read_status(spinand, &status);
+ if (ret)
+ goto end_cont_read;
+
+ spinand_ondie_ecc_save_status(nand, status);
+
+ ret = nand_ecc_finish_io_req(nand, &iter.req);
+ if (ret < 0)
+ goto end_cont_read;
+
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ ret = 0;
+ }
+
+end_cont_read:
+ /*
+ * Once all the data has been read out, the host can either pull CS#
+ * high and wait for tRST or manually clear the bit in the configuration
+ * register to terminate the continuous read operation. We have no
+ * guarantee the SPI controller drivers will effectively deassert the CS
+ * when we expect them to, so take the register based approach.
+ */
+ spinand_cont_read_enable(spinand, false);
+
+ return ret;
+}
+
+static void spinand_cont_read_init(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type;
+
+ /* OOBs cannot be retrieved so external/on-host ECC engine won't work */
+ if (spinand->set_cont_read &&
+ (engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE ||
+ engine_type == NAND_ECC_ENGINE_TYPE_NONE)) {
+ spinand->cont_read_possible = true;
+ }
+}
+
+static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_pos start_pos, end_pos;
+
+ if (!spinand->cont_read_possible)
+ return false;
+
+ /* OOBs won't be retrieved */
+ if (ops->ooblen || ops->oobbuf)
+ return false;
+
+ nanddev_offs_to_pos(nand, from, &start_pos);
+ nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos);
+
+ /*
+ * Continuous reads never cross LUN boundaries. Some devices don't
+ * support crossing planes boundaries. Some devices don't even support
+ * crossing blocks boundaries. The common case being to read through UBI,
+ * we will very rarely read two consequent blocks or more, so it is safer
+ * and easier (can be improved) to only enable continuous reads when
+ * reading within the same erase block.
+ */
+ if (start_pos.target != end_pos.target ||
+ start_pos.plane != end_pos.plane ||
+ start_pos.eraseblock != end_pos.eraseblock)
+ return false;
+
+ return start_pos.page < end_pos.page;
+}
+
+static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct mtd_ecc_stats old_stats;
+ unsigned int max_bitflips = 0;
+ int ret;
+
+ mutex_lock(&spinand->lock);
+
+ old_stats = mtd->ecc_stats;
+
+ if (spinand_use_cont_read(mtd, from, ops)) {
+ ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
+ if (ret == -EAGAIN && !spinand->cont_read_possible) {
+ /*
+ * Spi controller with broken support of continuous
+ * reading was detected (see spinand_read_from_cache_op()),
+ * repeat reading in regular mode.
+ */
+ ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
+ }
+ } else {
+ ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
}
if (ops->stats) {
@@ -680,9 +930,6 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
mutex_unlock(&spinand->lock);
- if (ecc_failed && !ret)
- ret = -EBADMSG;
-
return ret ? ret : max_bitflips;
}
@@ -732,9 +979,17 @@ static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
.oobbuf.in = marker,
.mode = MTD_OPS_RAW,
};
+ int ret;
spinand_select_target(spinand, pos->target);
- spinand_read_page(spinand, &req);
+
+ ret = spinand_read_page(spinand, &req);
+ if (ret == -EOPNOTSUPP) {
+ /* Retry with ECC in case raw access is not supported */
+ req.mode = MTD_OPS_PLACE_OOB;
+ spinand_read_page(spinand, &req);
+ }
+
if (marker[0] != 0xff || marker[1] != 0xff)
return true;
@@ -773,11 +1028,14 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- ret = spinand_write_enable_op(spinand);
- if (ret)
- return ret;
+ ret = spinand_write_page(spinand, &req);
+ if (ret == -EOPNOTSUPP) {
+ /* Retry with ECC in case raw access is not supported */
+ req.mode = MTD_OPS_PLACE_OOB;
+ ret = spinand_write_page(spinand, &req);
+ }
- return spinand_write_page(spinand, &req);
+ return ret;
}
static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
@@ -852,19 +1110,50 @@ static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
return ret;
}
+static struct spi_mem_dirmap_desc *spinand_create_rdesc(
+ struct spinand_device *spinand,
+ struct spi_mem_dirmap_info *info)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct spi_mem_dirmap_desc *desc = NULL;
+
+ if (spinand->cont_read_possible) {
+ /*
+ * spi controller may return an error if info->length is
+ * too large
+ */
+ info->length = nanddev_eraseblock_size(nand);
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, info);
+ }
+
+ if (IS_ERR_OR_NULL(desc)) {
+ /*
+ * continuous reading is not supported by flash or
+ * its spi controller, use regular reading
+ */
+ spinand->cont_read_possible = false;
+
+ info->length = nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand);
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, info);
+ }
+
+ return desc;
+}
+
static int spinand_create_dirmap(struct spinand_device *spinand,
unsigned int plane)
{
struct nand_device *nand = spinand_to_nand(spinand);
- struct spi_mem_dirmap_info info = {
- .length = nanddev_page_size(nand) +
- nanddev_per_page_oobsize(nand),
- };
+ struct spi_mem_dirmap_info info = { 0 };
struct spi_mem_dirmap_desc *desc;
/* The plane number is passed in MSB just above the column address */
info.offset = plane << fls(nand->memorg.pagesize);
+ info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
info.op_tmpl = *spinand->op_templates.update_cache;
desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
spinand->spimem, &info);
@@ -874,8 +1163,7 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
spinand->dirmaps[plane].wdesc = desc;
info.op_tmpl = *spinand->op_templates.read_cache;
- desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
- spinand->spimem, &info);
+ desc = spinand_create_rdesc(spinand, &info);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -888,6 +1176,7 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
return 0;
}
+ info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
info.op_tmpl = *spinand->op_templates.update_cache;
info.op_tmpl.data.ecc = true;
desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
@@ -899,8 +1188,7 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
info.op_tmpl = *spinand->op_templates.read_cache;
info.op_tmpl.data.ecc = true;
- desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
- spinand->spimem, &info);
+ desc = spinand_create_rdesc(spinand, &info);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -939,11 +1227,15 @@ static const struct nand_ops spinand_ops = {
static const struct spinand_manufacturer *spinand_manufacturers[] = {
&alliancememory_spinand_manufacturer,
&ato_spinand_manufacturer,
+ &esmt_8c_spinand_manufacturer,
&esmt_c8_spinand_manufacturer,
+ &fmsh_spinand_manufacturer,
+ &foresee_spinand_manufacturer,
&gigadevice_spinand_manufacturer,
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
&paragon_spinand_manufacturer,
+ &skyhigh_spinand_manufacturer,
&toshiba_spinand_manufacturer,
&winbond_spinand_manufacturer,
&xtx_spinand_manufacturer,
@@ -973,7 +1265,7 @@ static int spinand_manufacturer_match(struct spinand_device *spinand,
spinand->manufacturer = manufacturer;
return 0;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int spinand_id_detect(struct spinand_device *spinand)
@@ -1007,8 +1299,19 @@ static int spinand_id_detect(struct spinand_device *spinand)
static int spinand_manufacturer_init(struct spinand_device *spinand)
{
- if (spinand->manufacturer->ops->init)
- return spinand->manufacturer->ops->init(spinand);
+ int ret;
+
+ if (spinand->manufacturer->ops->init) {
+ ret = spinand->manufacturer->ops->init(spinand);
+ if (ret)
+ return ret;
+ }
+
+ if (spinand->configure_chip) {
+ ret = spinand->configure_chip(spinand);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -1025,10 +1328,13 @@ spinand_select_op_variant(struct spinand_device *spinand,
const struct spinand_op_variants *variants)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ const struct spi_mem_op *best_variant = NULL;
+ u64 best_op_duration_ns = ULLONG_MAX;
unsigned int i;
for (i = 0; i < variants->nops; i++) {
struct spi_mem_op op = variants->ops[i];
+ u64 op_duration_ns = 0;
unsigned int nbytes;
int ret;
@@ -1041,17 +1347,23 @@ spinand_select_op_variant(struct spinand_device *spinand,
if (ret)
break;
+ spi_mem_adjust_op_freq(spinand->spimem, &op);
+
if (!spi_mem_supports_op(spinand->spimem, &op))
break;
nbytes -= op.data.nbytes;
+
+ op_duration_ns += spi_mem_calc_op_duration(spinand->spimem, &op);
}
- if (!nbytes)
- return &variants->ops[i];
+ if (!nbytes && op_duration_ns < best_op_duration_ns) {
+ best_op_duration_ns = op_duration_ns;
+ best_variant = &variants->ops[i];
+ }
}
- return NULL;
+ return best_variant;
}
/**
@@ -1094,6 +1406,12 @@ int spinand_match_and_init(struct spinand_device *spinand,
spinand->flags = table[i].flags;
spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
+ spinand->configure_chip = table[i].configure_chip;
+ spinand->set_cont_read = table[i].set_cont_read;
+ spinand->fact_otp = &table[i].fact_otp;
+ spinand->user_otp = &table[i].user_otp;
+ spinand->read_retries = table[i].read_retries;
+ spinand->set_read_retry = table[i].set_read_retry;
op = spinand_select_op_variant(spinand,
info->op_variants.read_cache);
@@ -1235,9 +1553,8 @@ static int spinand_init(struct spinand_device *spinand)
* may use this buffer for DMA access.
* Memory allocated by devm_ does not guarantee DMA-safe alignment.
*/
- spinand->databuf = kzalloc(nanddev_page_size(nand) +
- nanddev_per_page_oobsize(nand),
- GFP_KERNEL);
+ spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
+ GFP_KERNEL);
if (!spinand->databuf) {
ret = -ENOMEM;
goto err_free_bufs;
@@ -1266,6 +1583,12 @@ static int spinand_init(struct spinand_device *spinand)
if (ret)
goto err_cleanup_nanddev;
+ /*
+ * Continuous read can only be enabled with an on-die ECC engine, so the
+ * ECC initialization must have happened previously.
+ */
+ spinand_cont_read_init(spinand);
+
mtd->_read_oob = spinand_mtd_read;
mtd->_write_oob = spinand_mtd_write;
mtd->_block_isbad = spinand_mtd_block_isbad;
@@ -1275,6 +1598,12 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
mtd->_resume = spinand_mtd_resume;
+ if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) {
+ ret = spinand_set_mtd_otp_ops(spinand);
+ if (ret)
+ goto err_cleanup_ecc_engine;
+ }
+
if (nand->ecc.engine) {
ret = mtd_ooblayout_count_freebytes(mtd);
if (ret < 0)
@@ -1286,6 +1615,7 @@ static int spinand_init(struct spinand_device *spinand)
/* Propagate ECC information to mtd_info */
mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
+ mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
ret = spinand_create_dirmaps(spinand);
if (ret) {
@@ -1316,6 +1646,7 @@ static void spinand_cleanup(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ nanddev_ecc_engine_cleanup(nand);
nanddev_cleanup(nand);
spinand_manufacturer_cleanup(spinand);
kfree(spinand->databuf);
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index 1a3ffb982335..e60e4ac1fd6f 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -8,23 +8,29 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+#include <linux/spi/spi-mem.h>
/* ESMT uses GigaDevice 0xc8 JECDEC ID on some SPI NANDs */
#define SPINAND_MFR_ESMT_C8 0xc8
+#define SPINAND_MFR_ESMT_8C 0x8c
+
+#define ESMT_F50L1G41LB_CFG_OTP_PROTECT BIT(7)
+#define ESMT_F50L1G41LB_CFG_OTP_LOCK \
+ (CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/*
* OOB spare area map (64 bytes)
@@ -102,30 +108,146 @@ static const struct mtd_ooblayout_ops f50l1g41lb_ooblayout = {
.free = f50l1g41lb_ooblayout_free,
};
+static int f50l1g41lb_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen, bool user)
+{
+ if (len < sizeof(*buf))
+ return -EINVAL;
+
+ buf->locked = 0;
+ buf->start = 0;
+ buf->length = user ? spinand_user_otp_size(spinand) :
+ spinand_fact_otp_size(spinand);
+
+ *retlen = sizeof(*buf);
+ return 0;
+}
+
+static int f50l1g41lb_fact_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen)
+{
+ return f50l1g41lb_otp_info(spinand, len, buf, retlen, false);
+}
+
+static int f50l1g41lb_user_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen)
+{
+ return f50l1g41lb_otp_info(spinand, len, buf, retlen, true);
+}
+
+static int f50l1g41lb_otp_lock(struct spinand_device *spinand, loff_t from,
+ size_t len)
+{
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0);
+ u8 status;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK,
+ ESMT_F50L1G41LB_CFG_OTP_LOCK);
+ if (!ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &write_op);
+ if (!ret)
+ goto out;
+
+ ret = spi_mem_exec_op(spinand->spimem, &exec_op);
+ if (!ret)
+ goto out;
+
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
+ if (!ret && (status & STATUS_PROG_FAILED))
+ ret = -EIO;
+
+out:
+ if (spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK, 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static const struct spinand_user_otp_ops f50l1g41lb_user_otp_ops = {
+ .info = f50l1g41lb_user_otp_info,
+ .lock = f50l1g41lb_otp_lock,
+ .read = spinand_user_otp_read,
+ .write = spinand_user_otp_write,
+};
+
+static const struct spinand_fact_otp_ops f50l1g41lb_fact_otp_ops = {
+ .info = f50l1g41lb_fact_otp_info,
+ .read = spinand_fact_otp_read,
+};
+
+
+static const struct spinand_info esmt_8c_spinand_table[] = {
+ SPINAND_INFO("F50L1G41LC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x2C),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL),
+ SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
+};
+
static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_INFO("F50L1G41LB",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01, 0x7f,
+ 0x7f, 0x7f),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL),
+ SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D1G41LB",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
+ 0x7f),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL),
+ SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
+ SPINAND_INFO("F50D2G41KA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51, 0x7f,
+ 0x7f, 0x7f),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
};
static const struct spinand_manufacturer_ops esmt_spinand_manuf_ops = {
};
+const struct spinand_manufacturer esmt_8c_spinand_manufacturer = {
+ .id = SPINAND_MFR_ESMT_8C,
+ .name = "ESMT",
+ .chips = esmt_8c_spinand_table,
+ .nchips = ARRAY_SIZE(esmt_8c_spinand_table),
+ .ops = &esmt_spinand_manuf_ops,
+};
+
const struct spinand_manufacturer esmt_c8_spinand_manufacturer = {
.id = SPINAND_MFR_ESMT_C8,
.name = "ESMT",
diff --git a/drivers/mtd/nand/spi/fmsh.c b/drivers/mtd/nand/spi/fmsh.c
new file mode 100644
index 000000000000..f417955f7d1c
--- /dev/null
+++ b/drivers/mtd/nand/spi/fmsh.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020-2021 Rockchip Electronics Co., Ltd.
+ *
+ * Author: Dingqiang Lin <jon.lin@rock-chips.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define FM25S01BI3_STATUS_ECC_MASK (7 << 4)
+ #define FM25S01BI3_STATUS_ECC_NO_BITFLIPS (0 << 4)
+ #define FM25S01BI3_STATUS_ECC_1_3_BITFLIPS (1 << 4)
+ #define FM25S01BI3_STATUS_ECC_UNCOR_ERROR (2 << 4)
+ #define FM25S01BI3_STATUS_ECC_4_6_BITFLIPS (3 << 4)
+ #define FM25S01BI3_STATUS_ECC_7_8_BITFLIPS (5 << 4)
+
+#define SPINAND_MFR_FMSH 0xA1
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
+static int fm25s01a_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int fm25s01a_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static int fm25s01bi3_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & FM25S01BI3_STATUS_ECC_MASK) {
+ case FM25S01BI3_STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case FM25S01BI3_STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case FM25S01BI3_STATUS_ECC_1_3_BITFLIPS:
+ return 3;
+
+ case FM25S01BI3_STATUS_ECC_4_6_BITFLIPS:
+ return 6;
+
+ case FM25S01BI3_STATUS_ECC_7_8_BITFLIPS:
+ return 8;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int fm25s01bi3_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 64;
+ region->length = 64;
+
+ return 0;
+}
+
+static int fm25s01bi3_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 4;
+ region->length = 12;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops fm25s01a_ooblayout = {
+ .ecc = fm25s01a_ooblayout_ecc,
+ .free = fm25s01a_ooblayout_free,
+};
+
+static const struct mtd_ooblayout_ops fm25s01bi3_ooblayout = {
+ .ecc = fm25s01bi3_ooblayout_ecc,
+ .free = fm25s01bi3_ooblayout_free,
+};
+
+static const struct spinand_info fmsh_spinand_table[] = {
+ SPINAND_INFO("FM25S01A",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&fm25s01a_ooblayout, NULL)),
+ SPINAND_INFO("FM25S01BI3",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xd4),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&fm25s01bi3_ooblayout,
+ fm25s01bi3_ecc_get_status)),
+};
+
+static const struct spinand_manufacturer_ops fmsh_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer fmsh_spinand_manufacturer = {
+ .id = SPINAND_MFR_FMSH,
+ .name = "Fudan Micro",
+ .chips = fmsh_spinand_table,
+ .nchips = ARRAY_SIZE(fmsh_spinand_table),
+ .ops = &fmsh_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/foresee.c b/drivers/mtd/nand/spi/foresee.c
new file mode 100644
index 000000000000..c521dd6abc4b
--- /dev/null
+++ b/drivers/mtd/nand/spi/foresee.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2023, SberDevices. All Rights Reserved.
+ *
+ * Author: Martin Kurbanov <mmkurbanov@salutedevices.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_FORESEE 0xCD
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
+static int f35sqa002g_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int f35sqa002g_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops f35sqa002g_ooblayout = {
+ .ecc = f35sqa002g_ooblayout_ecc,
+ .free = f35sqa002g_ooblayout_free,
+};
+
+static int f35sqa002g_ecc_get_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ default:
+ break;
+ }
+
+ /* More than 1-bit error was detected in one or more sectors and
+ * cannot be corrected.
+ */
+ return -EBADMSG;
+}
+
+static const struct spinand_info foresee_spinand_table[] = {
+ SPINAND_INFO("F35SQA002G",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x72, 0x72),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&f35sqa002g_ooblayout,
+ f35sqa002g_ecc_get_status)),
+ SPINAND_INFO("F35SQA001G",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x71, 0x71),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&f35sqa002g_ooblayout,
+ f35sqa002g_ecc_get_status)),
+};
+
+static const struct spinand_manufacturer_ops foresee_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer foresee_spinand_manufacturer = {
+ .id = SPINAND_MFR_FORESEE,
+ .name = "FORESEE",
+ .chips = foresee_spinand_table,
+ .nchips = ARRAY_SIZE(foresee_spinand_table),
+ .ops = &foresee_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index cfd7c3b26dc4..72ad36c9a126 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -4,6 +4,7 @@
* Chuanhong Guo <gch981213@gmail.com>
*/
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
@@ -23,45 +24,125 @@
#define GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS (1 << 4)
#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
+/* Feature bit definitions */
+#define GD_FEATURE_NR BIT(3) /* Normal Read(1=normal,0=continuous) */
+#define GD_FEATURE_CRDC BIT(2) /* Continuous Read Dummy */
+
+/* ECC status extraction helpers */
+#define GD_ECCSR_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr)
+#define GD_ECCSR_ACCUMULATED(eccsr) FIELD_GET(GENMASK(7, 4), eccsr)
+
+struct gigadevice_priv {
+ bool continuous_read;
+};
+
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_f,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(0, 0, NULL, 0, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_2gq5,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
+static int gd5fxgm9_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
+{
+ struct gigadevice_priv *priv = spinand->priv;
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_DUMMY(1, 1),
+ SPI_MEM_OP_DATA_IN(1, eccsr, 1));
+ int ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ if (priv->continuous_read)
+ *eccsr = GD_ECCSR_ACCUMULATED(*eccsr);
+ else
+ *eccsr = GD_ECCSR_LAST_PAGE(*eccsr);
+
+ return 0;
+}
+
+static int gd5fxgm9_ecc_get_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ u8 eccsr;
+ int ret;
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
+ ret = gd5fxgm9_get_eccsr(spinand, spinand->scratchbuf);
+ if (ret)
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ eccsr = *spinand->scratchbuf;
+ if (WARN_ON(!eccsr || eccsr > nanddev_get_ecc_conf(nand)->strength))
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ return eccsr;
+
+ case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
+ return 8;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int gd5fxgm9_set_continuous_read(struct spinand_device *spinand, bool enable)
+{
+ struct gigadevice_priv *priv = spinand->priv;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand, GD_FEATURE_NR,
+ enable ? 0 : GD_FEATURE_NR);
+ if (ret)
+ return ret;
+
+ priv->continuous_read = enable;
+
+ return 0;
+}
static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -185,8 +266,8 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
- &status2);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@@ -207,6 +288,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
* report the maximum of 4 in this case
*/
/* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
+ status2 = *(spinand->scratchbuf);
return ((status & STATUS_ECC_MASK) >> 2) |
((status2 & STATUS_ECC_MASK) >> 4);
@@ -227,8 +309,8 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
- &status2);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@@ -248,6 +330,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
* 1 ... 4 bits are flipped (and corrected)
*/
/* bits sorted this way (1...0): ECCSE1, ECCSE0 */
+ status2 = *(spinand->scratchbuf);
return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
case STATUS_ECC_UNCOR_ERROR:
@@ -511,9 +594,71 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ5RExxH",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x21),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ4RExxH",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xc9),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GM9UExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x91, 0x01),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgm9_ecc_get_status),
+ SPINAND_CONT_READ(gd5fxgm9_set_continuous_read)),
+ SPINAND_INFO("GD5F1GM9RExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x81, 0x01),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgm9_ecc_get_status),
+ SPINAND_CONT_READ(gd5fxgm9_set_continuous_read)),
};
+static int gd5fxgm9_spinand_init(struct spinand_device *spinand)
+{
+ struct gigadevice_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spinand->priv = priv;
+
+ return 0;
+}
+
+static void gd5fxgm9_spinand_cleanup(struct spinand_device *spinand)
+{
+ kfree(spinand->priv);
+}
+
static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
+ .init = gd5fxgm9_spinand_init,
+ .cleanup = gd5fxgm9_spinand_cleanup,
};
const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 3dfc7e1e5241..edf63b9996cf 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -5,26 +5,41 @@
* Author: Boris Brezillon <boris.brezillon@bootlin.com>
*/
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_MACRONIX 0xC2
-#define MACRONIX_ECCSR_MASK 0x0F
+#define MACRONIX_ECCSR_BF_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr)
+#define MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(eccsr) FIELD_GET(GENMASK(7, 4), eccsr)
+#define MACRONIX_CFG_CONT_READ BIT(2)
+#define MACRONIX_FEATURE_ADDR_READ_RETRY 0x70
+#define MACRONIX_NUM_READ_RETRY_MODES 5
+
+#define STATUS_ECC_HAS_BITFLIPS_THRESHOLD (3 << 4)
+
+/* Bitflip theshold configuration register */
+#define REG_CFG_BFT 0x10
+#define CFG_BFT(x) FIELD_PREP(GENMASK(7, 4), (x))
+
+struct macronix_priv {
+ bool cont_read;
+};
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -49,8 +64,9 @@ static const struct mtd_ooblayout_ops mx35lfxge4ab_ooblayout = {
.free = mx35lfxge4ab_ooblayout_free,
};
-static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
+static int macronix_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
{
+ struct macronix_priv *priv = spinand->priv;
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_DUMMY(1, 1),
@@ -60,12 +76,21 @@ static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
if (ret)
return ret;
- *eccsr &= MACRONIX_ECCSR_MASK;
+ /*
+ * ECCSR exposes the number of bitflips for the last read page in bits [3:0].
+ * Continuous read compatible chips also expose the maximum number of
+ * bitflips for the whole (continuous) read operation in bits [7:4].
+ */
+ if (!priv->cont_read)
+ *eccsr = MACRONIX_ECCSR_BF_LAST_PAGE(*eccsr);
+ else
+ *eccsr = MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(*eccsr);
+
return 0;
}
-static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
- u8 status)
+static int macronix_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 eccsr;
@@ -83,16 +108,14 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
* in order to avoid forcing the wear-leveling layer to move
* data around if it's not necessary.
*/
- if (mx35lf1ge4ab_get_eccsr(spinand, spinand->scratchbuf))
+ if (macronix_get_eccsr(spinand, spinand->scratchbuf))
return nanddev_get_ecc_conf(nand)->strength;
eccsr = *spinand->scratchbuf;
- if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength ||
- !eccsr))
+ if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength || !eccsr))
return nanddev_get_ecc_conf(nand)->strength;
return eccsr;
-
default:
break;
}
@@ -100,6 +123,38 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
+static int macronix_set_cont_read(struct spinand_device *spinand, bool enable)
+{
+ struct macronix_priv *priv = spinand->priv;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand, MACRONIX_CFG_CONT_READ,
+ enable ? MACRONIX_CFG_CONT_READ : 0);
+ if (ret)
+ return ret;
+
+ priv->cont_read = enable;
+
+ return 0;
+}
+
+/**
+ * macronix_set_read_retry - Set the retry mode
+ * @spinand: SPI NAND device
+ * @retry_mode: Specify which retry mode to set
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int macronix_set_read_retry(struct spinand_device *spinand,
+ unsigned int retry_mode)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MACRONIX_FEATURE_ADDR_READ_RETRY,
+ spinand->scratchbuf);
+
+ *spinand->scratchbuf = retry_mode;
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
@@ -110,7 +165,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35LF2GE4AB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
@@ -118,10 +173,12 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT |
+ SPINAND_HAS_READ_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF2GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26, 0x03),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -129,9 +186,12 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF4GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37, 0x03),
NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -139,34 +199,67 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF1G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF2G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
+ SPINAND_INFO("MX35LF2G24AD-Z4I8",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x64, 0x03),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF4G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
+ SPINAND_INFO("MX35LF4G24AD-Z4I8",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x75, 0x03),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX31LF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
@@ -176,7 +269,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX31UF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9e),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
@@ -186,7 +279,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35LF2G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x20),
@@ -195,21 +288,38 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT |
+ SPINAND_HAS_READ_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35UF4G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb5),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb5, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
+ SPINAND_INFO("MX35UF4G24AD-Z4I8",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xf5, 0x03),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF4GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -217,7 +327,10 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
@@ -225,21 +338,38 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT |
+ SPINAND_HAS_READ_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35UF2G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa4),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa4, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
+ SPINAND_INFO("MX35UF2G24AD-Z4I8",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe4, 0x03),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -247,9 +377,12 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2GE4AC",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -257,7 +390,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX35UF1G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x90),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
@@ -267,9 +401,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35UF1G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x94),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x94, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -277,9 +411,11 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF1GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -287,9 +423,12 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF1GE4AC",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -297,8 +436,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
-
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX31LF2GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x2e),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
@@ -308,7 +447,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX3UF2GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
@@ -318,10 +457,30 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
};
+static int macronix_spinand_init(struct spinand_device *spinand)
+{
+ struct macronix_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spinand->priv = priv;
+
+ return 0;
+}
+
+static void macronix_spinand_cleanup(struct spinand_device *spinand)
+{
+ kfree(spinand->priv);
+}
+
static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
+ .init = macronix_spinand_init,
+ .cleanup = macronix_spinand_cleanup,
};
const struct spinand_manufacturer macronix_spinand_manufacturer = {
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 50b7295bc922..a49d7cb6a96d 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -9,10 +9,12 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/string.h>
#define SPINAND_MFR_MICRON 0x2c
-#define MICRON_STATUS_ECC_MASK GENMASK(7, 4)
+#define MICRON_STATUS_ECC_MASK GENMASK(6, 4)
#define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4)
#define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4)
#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
@@ -28,34 +30,38 @@
#define MICRON_SELECT_DIE(x) ((x) << 6)
+#define MICRON_MT29F2G01ABAGD_CFG_OTP_STATE BIT(7)
+#define MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK \
+ (CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE)
+
static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(x4_write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(x4_update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/* Micron MT29F2G01AAAED Device */
static SPINAND_OP_VARIANTS(x4_read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(x1_write_cache_variants,
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(x1_update_cache_variants,
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -131,7 +137,7 @@ static const struct mtd_ooblayout_ops micron_4_ooblayout = {
static int micron_select_target(struct spinand_device *spinand,
unsigned int target)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MICRON_DIE_SELECT_REG,
spinand->scratchbuf);
if (target > 1)
@@ -168,6 +174,131 @@ static int micron_8_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
+static int mt29f2g01abagd_otp_is_locked(struct spinand_device *spinand)
+{
+ size_t bufsize = spinand_otp_page_size(spinand);
+ size_t retlen;
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = spinand_upd_cfg(spinand,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_STATE);
+ if (ret)
+ goto free_buf;
+
+ ret = spinand_user_otp_read(spinand, 0, bufsize, &retlen, buf);
+
+ if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ if (ret)
+ goto free_buf;
+
+ /* If all zeros, then the OTP area is locked. */
+ if (mem_is_zero(buf, bufsize))
+ ret = 1;
+
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
+static int mt29f2g01abagd_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen,
+ bool user)
+{
+ int locked;
+
+ if (len < sizeof(*buf))
+ return -EINVAL;
+
+ locked = mt29f2g01abagd_otp_is_locked(spinand);
+ if (locked < 0)
+ return locked;
+
+ buf->locked = locked;
+ buf->start = 0;
+ buf->length = user ? spinand_user_otp_size(spinand) :
+ spinand_fact_otp_size(spinand);
+
+ *retlen = sizeof(*buf);
+ return 0;
+}
+
+static int mt29f2g01abagd_fact_otp_info(struct spinand_device *spinand,
+ size_t len, struct otp_info *buf,
+ size_t *retlen)
+{
+ return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, false);
+}
+
+static int mt29f2g01abagd_user_otp_info(struct spinand_device *spinand,
+ size_t len, struct otp_info *buf,
+ size_t *retlen)
+{
+ return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, true);
+}
+
+static int mt29f2g01abagd_otp_lock(struct spinand_device *spinand, loff_t from,
+ size_t len)
+{
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0);
+ u8 status;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK);
+ if (!ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &write_op);
+ if (!ret)
+ goto out;
+
+ ret = spi_mem_exec_op(spinand->spimem, &exec_op);
+ if (!ret)
+ goto out;
+
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
+ if (!ret && (status & STATUS_PROG_FAILED))
+ ret = -EIO;
+
+out:
+ if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK, 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static const struct spinand_user_otp_ops mt29f2g01abagd_user_otp_ops = {
+ .info = mt29f2g01abagd_user_otp_info,
+ .lock = mt29f2g01abagd_otp_lock,
+ .read = spinand_user_otp_read,
+ .write = spinand_user_otp_write,
+};
+
+static const struct spinand_fact_otp_ops mt29f2g01abagd_fact_otp_ops = {
+ .info = mt29f2g01abagd_fact_otp_info,
+ .read = spinand_fact_otp_read,
+};
+
static const struct spinand_info micron_spinand_table[] = {
/* M79A 2Gb 3.3V */
SPINAND_INFO("MT29F2G01ABAGD",
@@ -179,7 +310,9 @@ static const struct spinand_info micron_spinand_table[] = {
&x4_update_cache_variants),
0,
SPINAND_ECCINFO(&micron_8_ooblayout,
- micron_8_ecc_get_status)),
+ micron_8_ecc_get_status),
+ SPINAND_USER_OTP_INFO(12, 2, &mt29f2g01abagd_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &mt29f2g01abagd_fact_otp_ops)),
/* M79A 2Gb 1.8V */
SPINAND_INFO("MT29F2G01ABBGD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
diff --git a/drivers/mtd/nand/spi/otp.c b/drivers/mtd/nand/spi/otp.c
new file mode 100644
index 000000000000..ce41bca86ea9
--- /dev/null
+++ b/drivers/mtd/nand/spi/otp.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025, SaluteDevices. All Rights Reserved.
+ *
+ * Author: Martin Kurbanov <mmkurbanov@salutedevices.com>
+ */
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spinand.h>
+
+/**
+ * spinand_otp_page_size() - Get SPI-NAND OTP page size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP page size.
+ */
+size_t spinand_otp_page_size(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ return nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+}
+
+static size_t spinand_otp_size(struct spinand_device *spinand,
+ const struct spinand_otp_layout *layout)
+{
+ return layout->npages * spinand_otp_page_size(spinand);
+}
+
+/**
+ * spinand_fact_otp_size() - Get SPI-NAND factory OTP area size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP size.
+ */
+size_t spinand_fact_otp_size(struct spinand_device *spinand)
+{
+ return spinand_otp_size(spinand, &spinand->fact_otp->layout);
+}
+
+/**
+ * spinand_user_otp_size() - Get SPI-NAND user OTP area size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP size.
+ */
+size_t spinand_user_otp_size(struct spinand_device *spinand)
+{
+ return spinand_otp_size(spinand, &spinand->user_otp->layout);
+}
+
+static int spinand_otp_check_bounds(struct spinand_device *spinand, loff_t ofs,
+ size_t len,
+ const struct spinand_otp_layout *layout)
+{
+ if (ofs < 0 || ofs + len > spinand_otp_size(spinand, layout))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int spinand_user_otp_check_bounds(struct spinand_device *spinand,
+ loff_t ofs, size_t len)
+{
+ return spinand_otp_check_bounds(spinand, ofs, len,
+ &spinand->user_otp->layout);
+}
+
+static int spinand_otp_rw(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf, bool is_write,
+ const struct spinand_otp_layout *layout)
+{
+ struct nand_page_io_req req = {};
+ unsigned long long page;
+ size_t copied = 0;
+ size_t otp_pagesize = spinand_otp_page_size(spinand);
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_otp_check_bounds(spinand, ofs, len, layout);
+ if (ret)
+ return ret;
+
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, CFG_OTP_ENABLE);
+ if (ret)
+ return ret;
+
+ page = ofs;
+ req.dataoffs = do_div(page, otp_pagesize);
+ req.pos.page = page + layout->start_page;
+ req.type = is_write ? NAND_PAGE_WRITE : NAND_PAGE_READ;
+ req.mode = MTD_OPS_RAW;
+ req.databuf.in = buf;
+
+ while (copied < len) {
+ req.datalen = min_t(unsigned int,
+ otp_pagesize - req.dataoffs,
+ len - copied);
+
+ if (is_write)
+ ret = spinand_write_page(spinand, &req);
+ else
+ ret = spinand_read_page(spinand, &req);
+
+ if (ret < 0)
+ break;
+
+ req.databuf.in += req.datalen;
+ req.pos.page++;
+ req.dataoffs = 0;
+ copied += req.datalen;
+ }
+
+ *retlen = copied;
+
+ if (spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/**
+ * spinand_fact_otp_read() - Read from OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to read
+ * @len: the number of data bytes to read
+ * @retlen: the pointer to variable to store the number of read bytes
+ * @buf: the buffer to store the read data
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_fact_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, buf, false,
+ &spinand->fact_otp->layout);
+}
+
+/**
+ * spinand_user_otp_read() - Read from OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to read
+ * @len: the number of data bytes to read
+ * @retlen: the pointer to variable to store the number of read bytes
+ * @buf: the buffer to store the read data
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_user_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, buf, false,
+ &spinand->user_otp->layout);
+}
+
+/**
+ * spinand_user_otp_write() - Write to OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to write to
+ * @len: the number of bytes to write
+ * @retlen: the pointer to variable to store the number of written bytes
+ * @buf: the buffer with data to write
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_user_otp_write(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, const u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, (u8 *)buf, true,
+ &spinand->user_otp->layout);
+}
+
+static int spinand_mtd_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf,
+ bool is_fact)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ *retlen = 0;
+
+ mutex_lock(&spinand->lock);
+
+ if (is_fact)
+ ret = spinand->fact_otp->ops->info(spinand, len, buf, retlen);
+ else
+ ret = spinand->user_otp->ops->info(spinand, len, buf, retlen);
+
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_fact_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return spinand_mtd_otp_info(mtd, len, retlen, buf, true);
+}
+
+static int spinand_mtd_user_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return spinand_mtd_otp_info(mtd, len, retlen, buf, false);
+}
+
+static int spinand_mtd_otp_read(struct mtd_info *mtd, loff_t ofs, size_t len,
+ size_t *retlen, u8 *buf, bool is_fact)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ *retlen = 0;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_otp_check_bounds(spinand, ofs, len,
+ is_fact ? &spinand->fact_otp->layout :
+ &spinand->user_otp->layout);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+
+ if (is_fact)
+ ret = spinand->fact_otp->ops->read(spinand, ofs, len, retlen,
+ buf);
+ else
+ ret = spinand->user_otp->ops->read(spinand, ofs, len, retlen,
+ buf);
+
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_fact_otp_read(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, true);
+}
+
+static int spinand_mtd_user_otp_read(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, false);
+}
+
+static int spinand_mtd_user_otp_write(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, const u8 *buf)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ *retlen = 0;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->write(spinand, ofs, len, retlen, buf);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_user_otp_erase(struct mtd_info *mtd, loff_t ofs,
+ size_t len)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->erase(spinand, ofs, len);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_user_otp_lock(struct mtd_info *mtd, loff_t ofs,
+ size_t len)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->lock(spinand, ofs, len);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+/**
+ * spinand_set_mtd_otp_ops() - Setup OTP methods
+ * @spinand: the spinand device
+ *
+ * Setup OTP methods.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_set_mtd_otp_ops(struct spinand_device *spinand)
+{
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ const struct spinand_fact_otp_ops *fact_ops = spinand->fact_otp->ops;
+ const struct spinand_user_otp_ops *user_ops = spinand->user_otp->ops;
+
+ if (!user_ops && !fact_ops)
+ return -EINVAL;
+
+ if (user_ops) {
+ if (user_ops->info)
+ mtd->_get_user_prot_info = spinand_mtd_user_otp_info;
+
+ if (user_ops->read)
+ mtd->_read_user_prot_reg = spinand_mtd_user_otp_read;
+
+ if (user_ops->write)
+ mtd->_write_user_prot_reg = spinand_mtd_user_otp_write;
+
+ if (user_ops->lock)
+ mtd->_lock_user_prot_reg = spinand_mtd_user_otp_lock;
+
+ if (user_ops->erase)
+ mtd->_erase_user_prot_reg = spinand_mtd_user_otp_erase;
+ }
+
+ if (fact_ops) {
+ if (fact_ops->info)
+ mtd->_get_fact_prot_info = spinand_mtd_fact_otp_info;
+
+ if (fact_ops->read)
+ mtd->_read_fact_prot_reg = spinand_mtd_fact_otp_read;
+ }
+
+ return 0;
+}
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
index 519ade513c1f..73bd124273a5 100644
--- a/drivers/mtd/nand/spi/paragon.c
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -22,20 +22,20 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int pn26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
diff --git a/drivers/mtd/nand/spi/skyhigh.c b/drivers/mtd/nand/spi/skyhigh.c
new file mode 100644
index 000000000000..bf9ce163e6a7
--- /dev/null
+++ b/drivers/mtd/nand/spi/skyhigh.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 SkyHigh Memory Limited
+ *
+ * Author: Takahiro Kuwano <takahiro.kuwano@infineon.com>
+ * Co-Author: KR Kim <kr.kim@skyhighmemory.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_SKYHIGH 0x01
+#define SKYHIGH_STATUS_ECC_1TO2_BITFLIPS (1 << 4)
+#define SKYHIGH_STATUS_ECC_3TO6_BITFLIPS (2 << 4)
+#define SKYHIGH_STATUS_ECC_UNCOR_ERROR (3 << 4)
+#define SKYHIGH_CONFIG_PROTECT_EN BIT(1)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
+static int skyhigh_spinand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ /* ECC bytes are stored in hidden area. */
+ return -ERANGE;
+}
+
+static int skyhigh_spinand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* ECC bytes are stored in hidden area. Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = mtd->oobsize - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops skyhigh_spinand_ooblayout = {
+ .ecc = skyhigh_spinand_ooblayout_ecc,
+ .free = skyhigh_spinand_ooblayout_free,
+};
+
+static int skyhigh_spinand_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case SKYHIGH_STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case SKYHIGH_STATUS_ECC_1TO2_BITFLIPS:
+ return 2;
+
+ case SKYHIGH_STATUS_ECC_3TO6_BITFLIPS:
+ return 6;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info skyhigh_spinand_table[] = {
+ SPINAND_INFO("S35ML01G301",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML01G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML02G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML04G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 2, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+};
+
+static int skyhigh_spinand_init(struct spinand_device *spinand)
+{
+ /*
+ * Config_Protect_En (bit 1 in Block Lock register) must be set to 1
+ * before writing other bits. Do it here before core unlocks all blocks
+ * by writing block protection bits.
+ */
+ return spinand_write_reg_op(spinand, REG_BLOCK_LOCK,
+ SKYHIGH_CONFIG_PROTECT_EN);
+}
+
+static const struct spinand_manufacturer_ops skyhigh_spinand_manuf_ops = {
+ .init = skyhigh_spinand_init,
+};
+
+const struct spinand_manufacturer skyhigh_spinand_manufacturer = {
+ .id = SPINAND_MFR_SKYHIGH,
+ .name = "SkyHigh",
+ .chips = skyhigh_spinand_table,
+ .nchips = ARRAY_SIZE(skyhigh_spinand_table),
+ .ops = &skyhigh_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index a80427c13121..6530257ac0be 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -15,28 +15,28 @@
#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_x4_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_x4_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/*
* Backward compatibility for 1st generation Serial NAND devices
* which don't support Quad Program Load operation.
*/
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@@ -266,6 +266,39 @@ static const struct spinand_info toshiba_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (1st generation) */
+ SPINAND_INFO("TC58NYG0S3HBAI4",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA1),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (1st generation) */
+ SPINAND_INFO("TH58NYG2S3HBAI4",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xAC),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 2, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 8Gb (1st generation) */
+ SPINAND_INFO("TH58NYG3S0HBAI6",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA3),
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
};
static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = {
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index f507e3759301..4870b2d5edb2 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -10,26 +10,82 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+#include <linux/units.h>
+#include <linux/delay.h>
#define SPINAND_MFR_WINBOND 0xEF
#define WINBOND_CFG_BUF_READ BIT(3)
+#define W25N04KV_STATUS_ECC_5_8_BITFLIPS (3 << 4)
+
+#define W25N0XJW_SR4 0xD0
+#define W25N0XJW_SR4_HS BIT(2)
+
+#define W35N01JW_VCR_IO_MODE 0x00
+#define W35N01JW_VCR_IO_MODE_SINGLE_SDR 0xFF
+#define W35N01JW_VCR_IO_MODE_OCTAL_SDR 0xDF
+#define W35N01JW_VCR_IO_MODE_OCTAL_DDR_DS 0xE7
+#define W35N01JW_VCR_IO_MODE_OCTAL_DDR 0xC7
+#define W35N01JW_VCR_DUMMY_CLOCK_REG 0x01
+
+/*
+ * "X2" in the core is equivalent to "dual output" in the datasheets,
+ * "X4" in the core is equivalent to "quad output" in the datasheets.
+ * Quad and octal capable chips feature an absolute maximum frequency of 166MHz.
+ */
+
+static SPINAND_OP_VARIANTS(read_cache_octal_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 3, NULL, 0, 120 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 2, NULL, 0, 105 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 20, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 162 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 12, NULL, 0, 124 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 8, NULL, 0, 86 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 1, NULL, 0, 133 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_octal_variants,
+ SPINAND_PROG_LOAD_1S_8S_8S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_8S_OP(0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_octal_variants,
+ SPINAND_PROG_LOAD_1S_8S_8S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(read_cache_dual_quad_dtr_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 104 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 104 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ));
+
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -74,6 +130,18 @@ static int w25m02gv_select_target(struct spinand_device *spinand,
return spi_mem_exec_op(spinand->spimem, &op);
}
+static int w25n01kv_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = 64 + (8 * section);
+ region->length = 7;
+
+ return 0;
+}
+
static int w25n02kv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
@@ -98,17 +166,92 @@ static int w25n02kv_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
+static const struct mtd_ooblayout_ops w25n01kv_ooblayout = {
+ .ecc = w25n01kv_ooblayout_ecc,
+ .free = w25n02kv_ooblayout_free,
+};
+
static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
.ecc = w25n02kv_ooblayout_ecc,
.free = w25n02kv_ooblayout_free,
};
+static int w25n01jw_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 12;
+ region->length = 4;
+
+ return 0;
+}
+
+static int w25n01jw_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section);
+ region->length = 12;
+
+ /* Extract BBM */
+ if (!section) {
+ region->offset += 2;
+ region->length -= 2;
+ }
+
+ return 0;
+}
+
+static int w35n01jw_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 12;
+ region->length = 4;
+
+ return 0;
+}
+
+static int w35n01jw_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ region->offset = 16 * section;
+ region->length = 12;
+
+ /* Extract BBM */
+ if (!section) {
+ region->offset += 2;
+ region->length -= 2;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops w25n01jw_ooblayout = {
+ .ecc = w25n01jw_ooblayout_ecc,
+ .free = w25n01jw_ooblayout_free,
+};
+
+static const struct mtd_ooblayout_ops w35n01jw_ooblayout = {
+ .ecc = w35n01jw_ooblayout_ecc,
+ .free = w35n01jw_ooblayout_free,
+};
+
static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@@ -118,6 +261,7 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
return -EBADMSG;
case STATUS_ECC_HAS_BITFLIPS:
+ case W25N04KV_STATUS_ECC_5_8_BITFLIPS:
/*
* Let's try to retrieve the real maximum number of bitflips
* in order to avoid forcing the wear-leveling layer to move
@@ -140,18 +284,126 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
+static int w25n0xjw_hs_cfg(struct spinand_device *spinand)
+{
+ const struct spi_mem_op *op;
+ bool hs;
+ u8 sr4;
+ int ret;
+
+ op = spinand->op_templates.read_cache;
+ if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
+ hs = false;
+ else if (op->cmd.buswidth == 1 && op->addr.buswidth == 1 &&
+ op->dummy.buswidth == 1 && op->data.buswidth == 1)
+ hs = false;
+ else if (!op->max_freq)
+ hs = true;
+ else
+ hs = false;
+
+ ret = spinand_read_reg_op(spinand, W25N0XJW_SR4, &sr4);
+ if (ret)
+ return ret;
+
+ if (hs)
+ sr4 |= W25N0XJW_SR4_HS;
+ else
+ sr4 &= ~W25N0XJW_SR4_HS;
+
+ ret = spinand_write_reg_op(spinand, W25N0XJW_SR4, sr4);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int w35n0xjw_write_vcr(struct spinand_device *spinand, u8 reg, u8 val)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x81, 1),
+ SPI_MEM_OP_ADDR(3, reg, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, spinand->scratchbuf, 1));
+ int ret;
+
+ *spinand->scratchbuf = val;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ /*
+ * Write VCR operation doesn't set the busy bit in SR, which means we
+ * cannot perform a status poll. Minimum time of 50ns is needed to
+ * complete the write.
+ */
+ ndelay(50);
+
+ return 0;
+}
+
+static int w35n0xjw_vcr_cfg(struct spinand_device *spinand)
+{
+ const struct spi_mem_op *op;
+ unsigned int dummy_cycles;
+ bool dtr, single;
+ u8 io_mode;
+ int ret;
+
+ op = spinand->op_templates.read_cache;
+
+ single = (op->cmd.buswidth == 1 && op->addr.buswidth == 1 && op->data.buswidth == 1);
+ dtr = (op->cmd.dtr || op->addr.dtr || op->data.dtr);
+ if (single && !dtr)
+ io_mode = W35N01JW_VCR_IO_MODE_SINGLE_SDR;
+ else if (!single && !dtr)
+ io_mode = W35N01JW_VCR_IO_MODE_OCTAL_SDR;
+ else if (!single && dtr)
+ io_mode = W35N01JW_VCR_IO_MODE_OCTAL_DDR;
+ else
+ return -EINVAL;
+
+ ret = w35n0xjw_write_vcr(spinand, W35N01JW_VCR_IO_MODE, io_mode);
+ if (ret)
+ return ret;
+
+ dummy_cycles = ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1);
+ switch (dummy_cycles) {
+ case 8:
+ case 12:
+ case 16:
+ case 20:
+ case 24:
+ case 28:
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = w35n0xjw_write_vcr(spinand, W35N01JW_VCR_DUMMY_CLOCK_REG, dummy_cycles);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static const struct spinand_info winbond_spinand_table[] = {
- SPINAND_INFO("W25M02GV",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21),
- NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2),
+ /* 512M-bit densities */
+ SPINAND_INFO("W25N512GW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x20),
+ NAND_MEMORG(1, 2048, 64, 64, 512, 10, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
- SPINAND_SELECT_TARGET(w25m02gv_select_target)),
- SPINAND_INFO("W25N01GV",
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ /* 1G-bit densities */
+ SPINAND_INFO("W25N01GV", /* 3.3V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x21),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
@@ -160,7 +412,86 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
- SPINAND_INFO("W25N02KV",
+ SPINAND_INFO("W25N01GW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x21),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ SPINAND_INFO("W25N01JW", /* high-speed 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n01jw_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w25n0xjw_hs_cfg)),
+ SPINAND_INFO("W25N01KV", /* 3.3V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae, 0x21),
+ NAND_MEMORG(1, 2048, 96, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W35N01JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdc, 0x21),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w35n0xjw_vcr_cfg)),
+ SPINAND_INFO("W35N02JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x22),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 2, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w35n0xjw_vcr_cfg)),
+ SPINAND_INFO("W35N04JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x23),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 4, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w35n0xjw_vcr_cfg)),
+ /* 2G-bit densities */
+ SPINAND_INFO("W25M02GV", /* 2x1G-bit 3.3V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
+ SPINAND_SELECT_TARGET(w25m02gv_select_target)),
+ SPINAND_INFO("W25N02JW", /* high-speed 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w25n0xjw_hs_cfg)),
+ SPINAND_INFO("W25N02KV", /* 3.3V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
@@ -169,6 +500,34 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W25N02KW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x22),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
+ /* 4G-bit densities */
+ SPINAND_INFO("W25N04KV", /* 3.3V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x23),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W25N04KW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x23),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
};
static int winbond_spinand_init(struct spinand_device *spinand)
diff --git a/drivers/mtd/nand/spi/xtx.c b/drivers/mtd/nand/spi/xtx.c
index 3911520f718c..5915b37b47f5 100644
--- a/drivers/mtd/nand/spi/xtx.c
+++ b/drivers/mtd/nand/spi/xtx.c
@@ -4,6 +4,7 @@
* Felix Matouschek <felix@matouschek.org>
*/
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
@@ -15,21 +16,27 @@
#define XT26G0XA_STATUS_ECC_8_CORRECTED (3 << 4)
#define XT26G0XA_STATUS_ECC_UNCOR_ERROR (2 << 4)
+#define XT26XXXD_STATUS_ECC3_ECC2_MASK GENMASK(7, 6)
+#define XT26XXXD_STATUS_ECC_NO_DETECTED (0)
+#define XT26XXXD_STATUS_ECC_1_7_CORRECTED (1)
+#define XT26XXXD_STATUS_ECC_8_CORRECTED (3)
+#define XT26XXXD_STATUS_ECC_UNCOR_ERROR (2)
+
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int xt26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -84,6 +91,53 @@ static int xt26g0xa_ecc_get_status(struct spinand_device *spinand,
return status >> 2;
}
+static int xt26xxxd_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = mtd->oobsize / 2;
+ region->length = mtd->oobsize / 2;
+
+ return 0;
+}
+
+static int xt26xxxd_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 2;
+ region->length = mtd->oobsize / 2 - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops xt26xxxd_ooblayout = {
+ .ecc = xt26xxxd_ooblayout_ecc,
+ .free = xt26xxxd_ooblayout_free,
+};
+
+static int xt26xxxd_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (FIELD_GET(STATUS_ECC_MASK, status)) {
+ case XT26XXXD_STATUS_ECC_NO_DETECTED:
+ return 0;
+ case XT26XXXD_STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+ case XT26XXXD_STATUS_ECC_1_7_CORRECTED:
+ return 4 + FIELD_GET(XT26XXXD_STATUS_ECC3_ECC2_MASK, status);
+ case XT26XXXD_STATUS_ECC_8_CORRECTED:
+ return 8;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
static const struct spinand_info xtx_spinand_table[] = {
SPINAND_INFO("XT26G01A",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE1),
@@ -115,6 +169,86 @@ static const struct spinand_info xtx_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&xt26g0xa_ooblayout,
xt26g0xa_ecc_get_status)),
+ SPINAND_INFO("XT26G01D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x31),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
+ SPINAND_INFO("XT26G11D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x34),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
+ SPINAND_INFO("XT26Q01D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
+ SPINAND_INFO("XT26G02D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x32),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
+ SPINAND_INFO("XT26G12D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x35),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
+ SPINAND_INFO("XT26Q02D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x52),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
+ SPINAND_INFO("XT26G04D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x33),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
+ SPINAND_INFO("XT26Q04D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x53),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
};
static const struct spinand_manufacturer_ops xtx_spinand_manuf_ops = {
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 64d319e959b2..868aa3d35d09 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -228,6 +228,25 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
return BLOCK_NIL;
}
+static noinline_for_stack void NFTL_move_block(struct mtd_info *mtd, loff_t src, loff_t dst)
+{
+ unsigned char movebuf[512];
+ struct nftl_oob oob;
+ size_t retlen;
+ int ret;
+
+ ret = mtd_read(mtd, src, 512, &retlen, movebuf);
+ if (ret < 0 && !mtd_is_bitflip(ret)) {
+ ret = mtd_read(mtd, src, 512, &retlen, movebuf);
+ if (ret != -EIO)
+ printk("Error went away on retry.\n");
+ }
+ memset(&oob, 0xff, sizeof(struct nftl_oob));
+ oob.b.Status = oob.b.Status1 = SECTOR_USED;
+
+ nftl_write(mtd, dst, 512, &retlen, movebuf, (char *)&oob);
+}
+
static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock )
{
struct mtd_info *mtd = nftl->mbd.mtd;
@@ -389,9 +408,6 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
*/
pr_debug("Folding chain %d into unit %d\n", thisVUC, targetEUN);
for (block = 0; block < nftl->EraseSize / 512 ; block++) {
- unsigned char movebuf[512];
- int ret;
-
/* If it's in the target EUN already, or if it's pending write, do nothing */
if (BlockMap[block] == targetEUN ||
(pendingblock == (thisVUC * (nftl->EraseSize / 512) + block))) {
@@ -403,25 +419,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
if (BlockMap[block] == BLOCK_NIL)
continue;
- ret = mtd_read(mtd,
- (nftl->EraseSize * BlockMap[block]) + (block * 512),
- 512,
- &retlen,
- movebuf);
- if (ret < 0 && !mtd_is_bitflip(ret)) {
- ret = mtd_read(mtd,
- (nftl->EraseSize * BlockMap[block]) + (block * 512),
- 512,
- &retlen,
- movebuf);
- if (ret != -EIO)
- printk("Error went away on retry.\n");
- }
- memset(&oob, 0xff, sizeof(struct nftl_oob));
- oob.b.Status = oob.b.Status1 = SECTOR_USED;
-
- nftl_write(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) +
- (block * 512), 512, &retlen, movebuf, (char *)&oob);
+ NFTL_move_block(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ (nftl->EraseSize * targetEUN) + (block * 512));
}
/* add the header so that it is now a valid chain */
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index 60738edcd5d5..da03ab6efe04 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -1,9 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config MTD_AR7_PARTS
- tristate "TI AR7 partitioning parser"
- help
- TI AR7 partitioning parser support
-
config MTD_BCM47XX_PARTS
tristate "BCM47XX partitioning parser"
depends on BCM47XX || ARCH_BCM_5301X
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index 0e70b621a1d8..9b00c62b837a 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
obj-$(CONFIG_MTD_BRCM_U_BOOT) += brcm_u-boot.o
diff --git a/drivers/mtd/parsers/ar7part.c b/drivers/mtd/parsers/ar7part.c
deleted file mode 100644
index 8cd683711ac6..000000000000
--- a/drivers/mtd/parsers/ar7part.c
+++ /dev/null
@@ -1,129 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright © 2007 Eugene Konev <ejka@openwrt.org>
- *
- * TI AR7 flash partition table.
- * Based on ar7 map by Felix Fietkau <nbd@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/memblock.h>
-#include <linux/module.h>
-
-#include <uapi/linux/magic.h>
-
-#define AR7_PARTS 4
-#define ROOT_OFFSET 0xe0000
-
-#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42)
-#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281)
-
-struct ar7_bin_rec {
- unsigned int checksum;
- unsigned int length;
- unsigned int address;
-};
-
-static int create_mtd_partitions(struct mtd_info *master,
- const struct mtd_partition **pparts,
- struct mtd_part_parser_data *data)
-{
- struct ar7_bin_rec header;
- unsigned int offset;
- size_t len;
- unsigned int pre_size = master->erasesize, post_size = 0;
- unsigned int root_offset = ROOT_OFFSET;
-
- int retries = 10;
- struct mtd_partition *ar7_parts;
-
- ar7_parts = kcalloc(AR7_PARTS, sizeof(*ar7_parts), GFP_KERNEL);
- if (!ar7_parts)
- return -ENOMEM;
- ar7_parts[0].name = "loader";
- ar7_parts[0].offset = 0;
- ar7_parts[0].size = master->erasesize;
- ar7_parts[0].mask_flags = MTD_WRITEABLE;
-
- ar7_parts[1].name = "config";
- ar7_parts[1].offset = 0;
- ar7_parts[1].size = master->erasesize;
- ar7_parts[1].mask_flags = 0;
-
- do { /* Try 10 blocks starting from master->erasesize */
- offset = pre_size;
- mtd_read(master, offset, sizeof(header), &len,
- (uint8_t *)&header);
- if (!strncmp((char *)&header, "TIENV0.8", 8))
- ar7_parts[1].offset = pre_size;
- if (header.checksum == LOADER_MAGIC1)
- break;
- if (header.checksum == LOADER_MAGIC2)
- break;
- pre_size += master->erasesize;
- } while (retries--);
-
- pre_size = offset;
-
- if (!ar7_parts[1].offset) {
- ar7_parts[1].offset = master->size - master->erasesize;
- post_size = master->erasesize;
- }
-
- switch (header.checksum) {
- case LOADER_MAGIC1:
- while (header.length) {
- offset += sizeof(header) + header.length;
- mtd_read(master, offset, sizeof(header), &len,
- (uint8_t *)&header);
- }
- root_offset = offset + sizeof(header) + 4;
- break;
- case LOADER_MAGIC2:
- while (header.length) {
- offset += sizeof(header) + header.length;
- mtd_read(master, offset, sizeof(header), &len,
- (uint8_t *)&header);
- }
- root_offset = offset + sizeof(header) + 4 + 0xff;
- root_offset &= ~(uint32_t)0xff;
- break;
- default:
- printk(KERN_WARNING "Unknown magic: %08x\n", header.checksum);
- break;
- }
-
- mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header);
- if (header.checksum != SQUASHFS_MAGIC) {
- root_offset += master->erasesize - 1;
- root_offset &= ~(master->erasesize - 1);
- }
-
- ar7_parts[2].name = "linux";
- ar7_parts[2].offset = pre_size;
- ar7_parts[2].size = master->size - pre_size - post_size;
- ar7_parts[2].mask_flags = 0;
-
- ar7_parts[3].name = "rootfs";
- ar7_parts[3].offset = root_offset;
- ar7_parts[3].size = master->size - root_offset - post_size;
- ar7_parts[3].mask_flags = 0;
-
- *pparts = ar7_parts;
- return AR7_PARTS;
-}
-
-static struct mtd_part_parser ar7_parser = {
- .parse_fn = create_mtd_partitions,
- .name = "ar7part",
-};
-module_mtd_part_parser(ar7_parser);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
- "Eugene Konev <ejka@openwrt.org>");
-MODULE_DESCRIPTION("MTD partitioning for TI AR7");
diff --git a/drivers/mtd/parsers/bcm47xxpart.c b/drivers/mtd/parsers/bcm47xxpart.c
index 13daf9bffd08..49c8e7f27f21 100644
--- a/drivers/mtd/parsers/bcm47xxpart.c
+++ b/drivers/mtd/parsers/bcm47xxpart.c
@@ -95,7 +95,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
uint32_t blocksize = master->erasesize;
int trx_parts[2]; /* Array with indexes of TRX partitions */
int trx_num = 0; /* Number of found TRX partitions */
- int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
+ static const int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
int err;
/*
diff --git a/drivers/mtd/parsers/brcm_u-boot.c b/drivers/mtd/parsers/brcm_u-boot.c
index 7c338dc7b8f3..984f98923446 100644
--- a/drivers/mtd/parsers/brcm_u-boot.c
+++ b/drivers/mtd/parsers/brcm_u-boot.c
@@ -81,4 +81,5 @@ static struct mtd_part_parser brcm_u_boot_mtd_parser = {
};
module_mtd_part_parser(brcm_u_boot_mtd_parser);
+MODULE_DESCRIPTION("Broadcom's U-Boot partition parser");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c
index b34856def816..504e5fa2b45b 100644
--- a/drivers/mtd/parsers/cmdlinepart.c
+++ b/drivers/mtd/parsers/cmdlinepart.c
@@ -44,14 +44,6 @@
#include <linux/module.h>
#include <linux/err.h>
-/* debug macro */
-#if 0
-#define dbg(x) do { printk("DEBUG-CMDLINE-PART: "); printk x; } while(0)
-#else
-#define dbg(x)
-#endif
-
-
/* special size referring to all the remaining space in a partition */
#define SIZE_REMAINING ULLONG_MAX
#define OFFSET_CONTINUOUS ULLONG_MAX
@@ -199,9 +191,9 @@ static struct mtd_partition * newpart(char *s,
parts[this_part].name = extra_mem;
extra_mem += name_len + 1;
- dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
+ pr_debug("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
this_part, parts[this_part].name, parts[this_part].offset,
- parts[this_part].size, parts[this_part].mask_flags));
+ parts[this_part].size, parts[this_part].mask_flags);
/* return (updated) pointer to extra_mem memory */
if (extra_mem_ptr)
@@ -267,7 +259,7 @@ static int mtdpart_setup_real(char *s)
}
mtd_id_len = p - mtd_id;
- dbg(("parsing <%s>\n", p+1));
+ pr_debug("parsing <%s>\n", p+1);
/*
* parse one mtd. have it reserve memory for the
@@ -304,8 +296,8 @@ static int mtdpart_setup_real(char *s)
this_mtd->next = partitions;
partitions = this_mtd;
- dbg(("mtdid=<%s> num_parts=<%d>\n",
- this_mtd->mtd_id, this_mtd->num_parts));
+ pr_debug("mtdid=<%s> num_parts=<%d>\n",
+ this_mtd->mtd_id, this_mtd->num_parts);
/* EOS - we're done */
diff --git a/drivers/mtd/parsers/ofpart_core.c b/drivers/mtd/parsers/ofpart_core.c
index e7b8e9d0a910..abfa68798918 100644
--- a/drivers/mtd/parsers/ofpart_core.c
+++ b/drivers/mtd/parsers/ofpart_core.c
@@ -157,10 +157,10 @@ static int parse_fixed_partitions(struct mtd_info *master,
partname = of_get_property(pp, "name", &len);
parts[i].name = partname;
- if (of_get_property(pp, "read-only", &len))
+ if (of_property_read_bool(pp, "read-only"))
parts[i].mask_flags |= MTD_WRITEABLE;
- if (of_get_property(pp, "lock", &len))
+ if (of_property_read_bool(pp, "lock"))
parts[i].mask_flags |= MTD_POWERUP_LOCK;
if (of_property_read_bool(pp, "slc-mode"))
diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
index a16b42a88581..3b55b676ca6b 100644
--- a/drivers/mtd/parsers/redboot.c
+++ b/drivers/mtd/parsers/redboot.c
@@ -102,7 +102,7 @@ nogood:
offset -= master->erasesize;
}
} else {
- offset = directory * master->erasesize;
+ offset = (unsigned long) directory * master->erasesize;
while (mtd_block_isbad(master, offset)) {
offset += master->erasesize;
if (offset == master->size)
diff --git a/drivers/mtd/parsers/tplink_safeloader.c b/drivers/mtd/parsers/tplink_safeloader.c
index 1c689dafca2a..e358a029dc70 100644
--- a/drivers/mtd/parsers/tplink_safeloader.c
+++ b/drivers/mtd/parsers/tplink_safeloader.c
@@ -149,4 +149,5 @@ static struct mtd_part_parser mtd_parser_tplink_safeloader = {
};
module_mtd_part_parser(mtd_parser_tplink_safeloader);
+MODULE_DESCRIPTION("TP-Link Safeloader partitions parser");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index c546f8c5f24d..be26cc67a1c4 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -190,8 +190,8 @@ static int scan_header(struct partition *part)
if (!part->blocks)
goto err;
- part->sector_map = vmalloc(array_size(sizeof(u_long),
- part->sector_count));
+ part->sector_map = vmalloc_array(part->sector_count,
+ sizeof(u_long));
if (!part->sector_map)
goto err;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index b5b3c4c44a94..5988cba30eb3 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -44,8 +44,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
struct sm_sysfs_attribute *sm_attr =
container_of(attr, struct sm_sysfs_attribute, dev_attr);
- strncpy(buf, sm_attr->data, sm_attr->len);
- return sm_attr->len;
+ return sysfs_emit(buf, "%.*s", sm_attr->len, sm_attr->data);
}
@@ -157,7 +156,7 @@ static int sm_read_lba(struct sm_oob *oob)
if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
return -1;
- /* Now check is both copies of the LBA differ too much */
+ /* Now check if both copies of the LBA differ too much */
lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
if (lba_test && !is_power_of_2(lba_test))
return -2;
@@ -993,7 +992,7 @@ restart:
/* flush timer, runs a second after last write */
static void sm_cache_flush_timer(struct timer_list *t)
{
- struct sm_ftl *ftl = from_timer(ftl, t, timer);
+ struct sm_ftl *ftl = timer_container_of(ftl, t, timer);
queue_work(cache_flush_workqueue, &ftl->flush_work);
}
@@ -1067,7 +1066,7 @@ static int sm_write(struct mtd_blktrans_dev *dev,
sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
/* No need in flush thread running now */
- del_timer(&ftl->timer);
+ timer_delete(&ftl->timer);
mutex_lock(&ftl->mutex);
zone = sm_get_zone(ftl, zone_num);
@@ -1111,7 +1110,7 @@ static void sm_release(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
- del_timer_sync(&ftl->timer);
+ timer_delete_sync(&ftl->timer);
cancel_work_sync(&ftl->flush_work);
mutex_lock(&ftl->mutex);
sm_cache_flush(ftl);
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index e347b435a038..5dd9c35f6b6f 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -2,11 +2,9 @@
spi-nor-objs := core.o sfdp.o swp.o otp.o sysfs.o
spi-nor-objs += atmel.o
-spi-nor-objs += catalyst.o
spi-nor-objs += eon.o
spi-nor-objs += esmt.o
spi-nor-objs += everspin.o
-spi-nor-objs += fujitsu.o
spi-nor-objs += gigadevice.o
spi-nor-objs += intel.o
spi-nor-objs += issi.o
@@ -15,7 +13,6 @@ spi-nor-objs += micron-st.o
spi-nor-objs += spansion.o
spi-nor-objs += sst.o
spi-nor-objs += winbond.o
-spi-nor-objs += xilinx.o
spi-nor-objs += xmc.o
spi-nor-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
index 656dd80a0be7..82c592f0a1e1 100644
--- a/drivers/mtd/spi-nor/atmel.c
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -16,12 +16,12 @@
* is to unlock the whole flash array on startup. Therefore, we have to support
* exactly this operation.
*/
-static int at25fs_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int at25fs_nor_lock(struct spi_nor *nor, loff_t ofs, u64 len)
{
return -EOPNOTSUPP;
}
-static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
{
int ret;
@@ -37,7 +37,7 @@ static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
return ret;
}
-static int at25fs_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int at25fs_nor_is_locked(struct spi_nor *nor, loff_t ofs, u64 len)
{
return -EOPNOTSUPP;
}
@@ -48,9 +48,11 @@ static const struct spi_nor_locking_ops at25fs_nor_locking_ops = {
.is_locked = at25fs_nor_is_locked,
};
-static void at25fs_nor_late_init(struct spi_nor *nor)
+static int at25fs_nor_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &at25fs_nor_locking_ops;
+
+ return 0;
}
static const struct spi_nor_fixups at25fs_nor_fixups = {
@@ -67,7 +69,7 @@ static const struct spi_nor_fixups at25fs_nor_fixups = {
* Return: 0 on success, -error otherwise.
*/
static int atmel_nor_set_global_protection(struct spi_nor *nor, loff_t ofs,
- uint64_t len, bool is_protect)
+ u64 len, bool is_protect)
{
int ret;
u8 sr;
@@ -116,20 +118,18 @@ static int atmel_nor_set_global_protection(struct spi_nor *nor, loff_t ofs,
return spi_nor_write_sr(nor, nor->bouncebuf, 1);
}
-static int atmel_nor_global_protect(struct spi_nor *nor, loff_t ofs,
- uint64_t len)
+static int atmel_nor_global_protect(struct spi_nor *nor, loff_t ofs, u64 len)
{
return atmel_nor_set_global_protection(nor, ofs, len, true);
}
-static int atmel_nor_global_unprotect(struct spi_nor *nor, loff_t ofs,
- uint64_t len)
+static int atmel_nor_global_unprotect(struct spi_nor *nor, loff_t ofs, u64 len)
{
return atmel_nor_set_global_protection(nor, ofs, len, false);
}
static int atmel_nor_is_global_protected(struct spi_nor *nor, loff_t ofs,
- uint64_t len)
+ u64 len)
{
int ret;
@@ -149,9 +149,11 @@ static const struct spi_nor_locking_ops atmel_nor_global_protection_ops = {
.is_locked = atmel_nor_is_global_protected,
};
-static void atmel_nor_global_protection_late_init(struct spi_nor *nor)
+static int atmel_nor_global_protection_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &atmel_nor_global_protection_ops;
+
+ return 0;
}
static const struct spi_nor_fixups atmel_nor_global_protection_fixups = {
@@ -159,49 +161,88 @@ static const struct spi_nor_fixups atmel_nor_global_protection_fixups = {
};
static const struct flash_info atmel_nor_parts[] = {
- /* Atmel -- some are (confusingly) marketed as "DataFlash" */
- { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4)
- FLAGS(SPI_NOR_HAS_LOCK)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &at25fs_nor_fixups },
- { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8)
- FLAGS(SPI_NOR_HAS_LOCK)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &at25fs_nor_fixups },
- { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &atmel_nor_global_protection_fixups },
- { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &atmel_nor_global_protection_fixups },
- { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &atmel_nor_global_protection_fixups },
- { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &atmel_nor_global_protection_fixups },
- { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8)
- NO_SFDP_FLAGS(SECT_4K) },
- { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &atmel_nor_global_protection_fixups },
- { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &atmel_nor_global_protection_fixups },
- { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &atmel_nor_global_protection_fixups },
- { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K) },
+ {
+ .id = SNOR_ID(0x1f, 0x04, 0x00),
+ .name = "at26f004",
+ .size = SZ_512K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x1f, 0x25, 0x00),
+ .name = "at45db081d",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x1f, 0x42, 0x16),
+ .name = "at25sl321",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x1f, 0x44, 0x01),
+ .name = "at25df041a",
+ .size = SZ_512K,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &atmel_nor_global_protection_fixups,
+ }, {
+ .id = SNOR_ID(0x1f, 0x45, 0x01),
+ .name = "at26df081a",
+ .size = SZ_1M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &atmel_nor_global_protection_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x46, 0x01),
+ .name = "at26df161a",
+ .size = SZ_2M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &atmel_nor_global_protection_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x47, 0x00),
+ .name = "at25df321",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &atmel_nor_global_protection_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x47, 0x01),
+ .name = "at25df321a",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &atmel_nor_global_protection_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x47, 0x08),
+ .name = "at25ff321a",
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .fixups = &atmel_nor_global_protection_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x48, 0x00),
+ .name = "at25df641",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &atmel_nor_global_protection_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x66, 0x01),
+ .name = "at25fs010",
+ .sector_size = SZ_32K,
+ .size = SZ_128K,
+ .flags = SPI_NOR_HAS_LOCK,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &at25fs_nor_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x66, 0x04),
+ .name = "at25fs040",
+ .size = SZ_512K,
+ .flags = SPI_NOR_HAS_LOCK,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &at25fs_nor_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x87, 0x01),
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ },
};
const struct spi_nor_manufacturer spi_nor_atmel = {
diff --git a/drivers/mtd/spi-nor/catalyst.c b/drivers/mtd/spi-nor/catalyst.c
deleted file mode 100644
index 6d310815fb12..000000000000
--- a/drivers/mtd/spi-nor/catalyst.c
+++ /dev/null
@@ -1,24 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2005, Intec Automation Inc.
- * Copyright (C) 2014, Freescale Semiconductor, Inc.
- */
-
-#include <linux/mtd/spi-nor.h>
-
-#include "core.h"
-
-static const struct flash_info catalyst_nor_parts[] = {
- /* Catalyst / On Semiconductor -- non-JEDEC */
- { "cat25c11", CAT25_INFO(16, 8, 16, 1) },
- { "cat25c03", CAT25_INFO(32, 8, 16, 2) },
- { "cat25c09", CAT25_INFO(128, 8, 32, 2) },
- { "cat25c17", CAT25_INFO(256, 8, 32, 2) },
- { "cat25128", CAT25_INFO(2048, 8, 64, 2) },
-};
-
-const struct spi_nor_manufacturer spi_nor_catalyst = {
- .name = "catalyst",
- .parts = catalyst_nor_parts,
- .nparts = ARRAY_SIZE(catalyst_nor_parts),
-};
diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
index 5070d72835ec..db948da2c4c5 100644
--- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
@@ -468,13 +468,12 @@ static int hisi_spi_nor_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_spi_nor_remove(struct platform_device *pdev)
+static void hisi_spi_nor_remove(struct platform_device *pdev)
{
struct hifmc_host *host = platform_get_drvdata(pdev);
hisi_spi_nor_unregister_all(host);
mutex_destroy(&host->lock);
- return 0;
}
static const struct of_device_id hisi_spi_nor_dt_ids[] = {
@@ -489,7 +488,7 @@ static struct platform_driver hisi_spi_nor_driver = {
.of_match_table = hisi_spi_nor_dt_ids,
},
.probe = hisi_spi_nor_probe,
- .remove = hisi_spi_nor_remove,
+ .remove = hisi_spi_nor_remove,
};
module_platform_driver(hisi_spi_nor_driver);
diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
index 794c7b7d5c92..1a92d71755db 100644
--- a/drivers/mtd/spi-nor/controllers/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
@@ -17,7 +17,6 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/spi-nor.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
@@ -395,30 +394,18 @@ static int nxp_spifi_probe(struct platform_device *pdev)
if (IS_ERR(spifi->flash_base))
return PTR_ERR(spifi->flash_base);
- spifi->clk_spifi = devm_clk_get(&pdev->dev, "spifi");
+ spifi->clk_spifi = devm_clk_get_enabled(&pdev->dev, "spifi");
if (IS_ERR(spifi->clk_spifi)) {
- dev_err(&pdev->dev, "spifi clock not found\n");
+ dev_err(&pdev->dev, "spifi clock not found or unable to enable\n");
return PTR_ERR(spifi->clk_spifi);
}
- spifi->clk_reg = devm_clk_get(&pdev->dev, "reg");
+ spifi->clk_reg = devm_clk_get_enabled(&pdev->dev, "reg");
if (IS_ERR(spifi->clk_reg)) {
- dev_err(&pdev->dev, "reg clock not found\n");
+ dev_err(&pdev->dev, "reg clock not found or unable to enable\n");
return PTR_ERR(spifi->clk_reg);
}
- ret = clk_prepare_enable(spifi->clk_reg);
- if (ret) {
- dev_err(&pdev->dev, "unable to enable reg clock\n");
- return ret;
- }
-
- ret = clk_prepare_enable(spifi->clk_spifi);
- if (ret) {
- dev_err(&pdev->dev, "unable to enable spifi clock\n");
- goto dis_clk_reg;
- }
-
spifi->dev = &pdev->dev;
platform_set_drvdata(pdev, spifi);
@@ -431,35 +418,24 @@ static int nxp_spifi_probe(struct platform_device *pdev)
flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
if (!flash_np) {
dev_err(&pdev->dev, "no SPI flash device to configure\n");
- ret = -ENODEV;
- goto dis_clks;
+ return -ENODEV;
}
ret = nxp_spifi_setup_flash(spifi, flash_np);
of_node_put(flash_np);
if (ret) {
dev_err(&pdev->dev, "unable to setup flash chip\n");
- goto dis_clks;
+ return ret;
}
return 0;
-
-dis_clks:
- clk_disable_unprepare(spifi->clk_spifi);
-dis_clk_reg:
- clk_disable_unprepare(spifi->clk_reg);
- return ret;
}
-static int nxp_spifi_remove(struct platform_device *pdev)
+static void nxp_spifi_remove(struct platform_device *pdev)
{
struct nxp_spifi *spifi = platform_get_drvdata(pdev);
mtd_device_unregister(&spifi->nor.mtd);
- clk_disable_unprepare(spifi->clk_spifi);
- clk_disable_unprepare(spifi->clk_reg);
-
- return 0;
}
static const struct of_device_id nxp_spifi_match[] = {
@@ -470,7 +446,7 @@ MODULE_DEVICE_TABLE(of, nxp_spifi_match);
static struct platform_driver nxp_spifi_driver = {
.probe = nxp_spifi_probe,
- .remove = nxp_spifi_remove,
+ .remove = nxp_spifi_remove,
.driver = {
.name = "nxp-spifi",
.of_match_table = nxp_spifi_match,
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 5f29fac8669a..d3f8a78efd3b 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -7,16 +7,18 @@
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
-#include <linux/err.h>
-#include <linux/errno.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
#include <linux/mutex.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
#include <linux/sched/task_stack.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -113,6 +115,9 @@ void spi_nor_spimem_setup_op(const struct spi_nor *nor,
op->cmd.opcode = (op->cmd.opcode << 8) | ext;
op->cmd.nbytes = 2;
}
+
+ if (proto == SNOR_PROTO_8_8_8_DTR && nor->flags & SNOR_F_SWAP16)
+ op->data.swap16 = true;
}
/**
@@ -635,32 +640,26 @@ static bool spi_nor_use_parallel_locking(struct spi_nor *nor)
static int spi_nor_rww_start_rdst(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- int ret = -EAGAIN;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd)
- goto busy;
+ return -EAGAIN;
rww->ongoing_io = true;
rww->ongoing_rd = true;
- ret = 0;
-busy:
- mutex_unlock(&nor->lock);
- return ret;
+ return 0;
}
static void spi_nor_rww_end_rdst(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
rww->ongoing_io = false;
rww->ongoing_rd = false;
-
- mutex_unlock(&nor->lock);
}
static int spi_nor_lock_rdst(struct spi_nor *nor)
@@ -870,21 +869,22 @@ static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
ret = spi_nor_read_cr(nor, &sr_cr[1]);
if (ret)
return ret;
- } else if (nor->params->quad_enable) {
+ } else if (spi_nor_get_protocol_width(nor->read_proto) == 4 &&
+ spi_nor_get_protocol_width(nor->write_proto) == 4 &&
+ nor->params->quad_enable) {
/*
* If the Status Register 2 Read command (35h) is not
* supported, we should at least be sure we don't
* change the value of the SR2 Quad Enable bit.
*
- * We can safely assume that when the Quad Enable method is
- * set, the value of the QE bit is one, as a consequence of the
- * nor->params->quad_enable() call.
+ * When the Quad Enable method is set and the buswidth is 4, we
+ * can safely assume that the value of the QE bit is one, as a
+ * consequence of the nor->params->quad_enable() call.
*
- * We can safely assume that the Quad Enable bit is present in
- * the Status Register 2 at BIT(1). According to the JESD216
- * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
- * Write Status (01h) command is available just for the cases
- * in which the QE bit is described in SR2 at BIT(1).
+ * According to the JESD216 revB standard, BFPT DWORDS[15],
+ * bits 22:20, the 16-bit Write Status (01h) command is
+ * available just for the cases in which the QE bit is
+ * described in SR2 at BIT(1).
*/
sr_cr[1] = SR2_QUAD_EN_BIT1;
} else {
@@ -1059,24 +1059,32 @@ static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
}
/**
- * spi_nor_erase_chip() - Erase the entire flash memory.
+ * spi_nor_erase_die() - Erase the entire die.
* @nor: pointer to 'struct spi_nor'.
+ * @addr: address of the die.
+ * @die_size: size of the die.
*
* Return: 0 on success, -errno otherwise.
*/
-static int spi_nor_erase_chip(struct spi_nor *nor)
+static int spi_nor_erase_die(struct spi_nor *nor, loff_t addr, size_t die_size)
{
+ bool multi_die = nor->mtd.size != die_size;
int ret;
- dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
+ dev_dbg(nor->dev, " %lldKiB\n", (long long)(die_size >> 10));
if (nor->spimem) {
- struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP;
+ struct spi_mem_op op =
+ SPI_NOR_DIE_ERASE_OP(nor->params->die_erase_opcode,
+ nor->addr_nbytes, addr, multi_die);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
+ if (multi_die)
+ return -EOPNOTSUPP;
+
ret = spi_nor_controller_ops_write_reg(nor,
SPINOR_OP_CHIP_ERASE,
NULL, 0);
@@ -1149,7 +1157,7 @@ static u8 spi_nor_convert_3to4_erase(u8 opcode)
static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
{
- return !!nor->params->erase_map.uniform_erase_type;
+ return !!nor->params->erase_map.uniform_region.erase_mask;
}
static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
@@ -1199,26 +1207,21 @@ static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len,
static bool spi_nor_rww_start_io(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- bool start = false;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io)
- goto busy;
+ return false;
rww->ongoing_io = true;
- start = true;
-busy:
- mutex_unlock(&nor->lock);
- return start;
+ return true;
}
static void spi_nor_rww_end_io(struct spi_nor *nor)
{
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
nor->rww.ongoing_io = false;
- mutex_unlock(&nor->lock);
}
static int spi_nor_lock_device(struct spi_nor *nor)
@@ -1241,32 +1244,27 @@ static void spi_nor_unlock_device(struct spi_nor *nor)
static bool spi_nor_rww_start_exclusive(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- bool start = false;
mutex_lock(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
- goto busy;
+ return false;
rww->ongoing_io = true;
rww->ongoing_rd = true;
rww->ongoing_pe = true;
- start = true;
-busy:
- mutex_unlock(&nor->lock);
- return start;
+ return true;
}
static void spi_nor_rww_end_exclusive(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
rww->ongoing_io = false;
rww->ongoing_rd = false;
rww->ongoing_pe = false;
- mutex_unlock(&nor->lock);
}
int spi_nor_prep_and_lock(struct spi_nor *nor)
@@ -1303,30 +1301,26 @@ static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len)
{
struct spi_nor_rww *rww = &nor->rww;
unsigned int used_banks = 0;
- bool started = false;
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
- goto busy;
+ return false;
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++) {
if (rww->used_banks & BIT(bank))
- goto busy;
+ return false;
used_banks |= BIT(bank);
}
rww->used_banks |= used_banks;
rww->ongoing_pe = true;
- started = true;
-busy:
- mutex_unlock(&nor->lock);
- return started;
+ return true;
}
static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len)
@@ -1335,15 +1329,13 @@ static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len)
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++)
rww->used_banks &= ~BIT(bank);
rww->ongoing_pe = false;
-
- mutex_unlock(&nor->lock);
}
static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len)
@@ -1380,19 +1372,18 @@ static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len)
{
struct spi_nor_rww *rww = &nor->rww;
unsigned int used_banks = 0;
- bool started = false;
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd)
- goto busy;
+ return false;
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++) {
if (rww->used_banks & BIT(bank))
- goto busy;
+ return false;
used_banks |= BIT(bank);
}
@@ -1400,11 +1391,8 @@ static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len)
rww->used_banks |= used_banks;
rww->ongoing_io = true;
rww->ongoing_rd = true;
- started = true;
-busy:
- mutex_unlock(&nor->lock);
- return started;
+ return true;
}
static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
@@ -1413,7 +1401,7 @@ static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++)
@@ -1421,8 +1409,6 @@ static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
rww->ongoing_io = false;
rww->ongoing_rd = false;
-
- mutex_unlock(&nor->lock);
}
static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len)
@@ -1454,14 +1440,6 @@ static void spi_nor_unlock_and_unprep_rd(struct spi_nor *nor, loff_t start, size
spi_nor_unprep(nor);
}
-static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
-{
- if (!nor->params->convert_addr)
- return addr;
-
- return nor->params->convert_addr(nor, addr);
-}
-
/*
* Initiate the erasure of a single sector
*/
@@ -1469,8 +1447,6 @@ int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
{
int i;
- addr = spi_nor_convert_addr(nor, addr);
-
if (nor->spimem) {
struct spi_mem_op op =
SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode,
@@ -1533,7 +1509,6 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
const struct spi_nor_erase_type *erase;
u32 rem;
int i;
- u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
/*
* Erase types are ordered by size, with the smallest erase type at
@@ -1541,7 +1516,7 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
*/
for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
/* Does the erase region support the tested erase type? */
- if (!(erase_mask & BIT(i)))
+ if (!(region->erase_mask & BIT(i)))
continue;
erase = &map->erase_type[i];
@@ -1549,8 +1524,7 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
continue;
/* Alignment is not mandatory for overlaid regions */
- if (region->offset & SNOR_OVERLAID_REGION &&
- region->size <= len)
+ if (region->overlaid && region->size <= len)
return erase;
/* Don't erase more than what the user has asked for. */
@@ -1565,59 +1539,6 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
return NULL;
}
-static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
-{
- return region->offset & SNOR_LAST_REGION;
-}
-
-static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
-{
- return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
-}
-
-/**
- * spi_nor_region_next() - get the next spi nor region
- * @region: pointer to a structure that describes a SPI NOR erase region
- *
- * Return: the next spi nor region or NULL if last region.
- */
-struct spi_nor_erase_region *
-spi_nor_region_next(struct spi_nor_erase_region *region)
-{
- if (spi_nor_region_is_last(region))
- return NULL;
- region++;
- return region;
-}
-
-/**
- * spi_nor_find_erase_region() - find the region of the serial flash memory in
- * which the offset fits
- * @map: the erase map of the SPI NOR
- * @addr: offset in the serial flash memory
- *
- * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
- * otherwise.
- */
-static struct spi_nor_erase_region *
-spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
-{
- struct spi_nor_erase_region *region = map->regions;
- u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- u64 region_end = region_start + region->size;
-
- while (addr < region_start || addr >= region_end) {
- region = spi_nor_region_next(region);
- if (!region)
- return ERR_PTR(-EINVAL);
-
- region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- region_end = region_start + region->size;
- }
-
- return region;
-}
-
/**
* spi_nor_init_erase_cmd() - initialize an erase command
* @region: pointer to a structure that describes a SPI NOR erase region
@@ -1640,7 +1561,7 @@ spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
cmd->opcode = erase->opcode;
cmd->count = 1;
- if (region->offset & SNOR_OVERLAID_REGION)
+ if (region->overlaid)
cmd->size = region->size;
else
cmd->size = erase->size;
@@ -1684,44 +1605,36 @@ static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
struct spi_nor_erase_region *region;
struct spi_nor_erase_command *cmd = NULL;
u64 region_end;
+ unsigned int i;
int ret = -EINVAL;
- region = spi_nor_find_erase_region(map, addr);
- if (IS_ERR(region))
- return PTR_ERR(region);
+ for (i = 0; i < map->n_regions && len; i++) {
+ region = &map->regions[i];
+ region_end = region->offset + region->size;
- region_end = spi_nor_region_end(region);
-
- while (len) {
- erase = spi_nor_find_best_erase_type(map, region, addr, len);
- if (!erase)
- goto destroy_erase_cmd_list;
-
- if (prev_erase != erase ||
- erase->size != cmd->size ||
- region->offset & SNOR_OVERLAID_REGION) {
- cmd = spi_nor_init_erase_cmd(region, erase);
- if (IS_ERR(cmd)) {
- ret = PTR_ERR(cmd);
+ while (len && addr >= region->offset && addr < region_end) {
+ erase = spi_nor_find_best_erase_type(map, region, addr,
+ len);
+ if (!erase)
goto destroy_erase_cmd_list;
- }
-
- list_add_tail(&cmd->list, erase_list);
- } else {
- cmd->count++;
- }
- addr += cmd->size;
- len -= cmd->size;
+ if (prev_erase != erase || erase->size != cmd->size ||
+ region->overlaid) {
+ cmd = spi_nor_init_erase_cmd(region, erase);
+ if (IS_ERR(cmd)) {
+ ret = PTR_ERR(cmd);
+ goto destroy_erase_cmd_list;
+ }
+
+ list_add_tail(&cmd->list, erase_list);
+ } else {
+ cmd->count++;
+ }
- if (len && addr >= region_end) {
- region = spi_nor_region_next(region);
- if (!region)
- goto destroy_erase_cmd_list;
- region_end = spi_nor_region_end(region);
+ len -= cmd->size;
+ addr += cmd->size;
+ prev_erase = erase;
}
-
- prev_erase = erase;
}
return 0;
@@ -1791,6 +1704,51 @@ destroy_erase_cmd_list:
return ret;
}
+static int spi_nor_erase_dice(struct spi_nor *nor, loff_t addr,
+ size_t len, size_t die_size)
+{
+ unsigned long timeout;
+ int ret;
+
+ /*
+ * Scale the timeout linearly with the size of the flash, with
+ * a minimum calibrated to an old 2MB flash. We could try to
+ * pull these from CFI/SFDP, but these values should be good
+ * enough for now.
+ */
+ timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
+ CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
+ (unsigned long)(nor->mtd.size / SZ_2M));
+
+ do {
+ ret = spi_nor_lock_device(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret) {
+ spi_nor_unlock_device(nor);
+ return ret;
+ }
+
+ ret = spi_nor_erase_die(nor, addr, die_size);
+
+ spi_nor_unlock_device(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
+ if (ret)
+ return ret;
+
+ addr += die_size;
+ len -= die_size;
+
+ } while (len);
+
+ return 0;
+}
+
/*
* Erase an address range on the nor chip. The address range may extend
* one or more erase sectors. Return an error if there is a problem erasing.
@@ -1798,8 +1756,10 @@ destroy_erase_cmd_list:
static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- u32 addr, len;
- uint32_t rem;
+ u8 n_dice = nor->params->n_dice;
+ bool multi_die_erase = false;
+ u32 addr, len, rem;
+ size_t die_size;
int ret;
dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
@@ -1814,39 +1774,22 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
addr = instr->addr;
len = instr->len;
+ if (n_dice) {
+ die_size = div_u64(mtd->size, n_dice);
+ if (!(len & (die_size - 1)) && !(addr & (die_size - 1)))
+ multi_die_erase = true;
+ } else {
+ die_size = mtd->size;
+ }
+
ret = spi_nor_prep_and_lock_pe(nor, instr->addr, instr->len);
if (ret)
return ret;
- /* whole-chip erase? */
- if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
- unsigned long timeout;
-
- ret = spi_nor_lock_device(nor);
- if (ret)
- goto erase_err;
-
- ret = spi_nor_write_enable(nor);
- if (ret) {
- spi_nor_unlock_device(nor);
- goto erase_err;
- }
-
- ret = spi_nor_erase_chip(nor);
- spi_nor_unlock_device(nor);
- if (ret)
- goto erase_err;
-
- /*
- * Scale the timeout linearly with the size of the flash, with
- * a minimum calibrated to an old 2MB flash. We could try to
- * pull these from CFI/SFDP, but these values should be good
- * enough for now.
- */
- timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
- CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
- (unsigned long)(mtd->size / SZ_2M));
- ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
+ /* chip (die) erase? */
+ if ((len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) ||
+ multi_die_erase) {
+ ret = spi_nor_erase_dice(nor, addr, len, die_size);
if (ret)
goto erase_err;
@@ -1998,11 +1941,9 @@ int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
static const struct spi_nor_manufacturer *manufacturers[] = {
&spi_nor_atmel,
- &spi_nor_catalyst,
&spi_nor_eon,
&spi_nor_esmt,
&spi_nor_everspin,
- &spi_nor_fujitsu,
&spi_nor_gigadevice,
&spi_nor_intel,
&spi_nor_issi,
@@ -2012,19 +1953,11 @@ static const struct spi_nor_manufacturer *manufacturers[] = {
&spi_nor_spansion,
&spi_nor_sst,
&spi_nor_winbond,
- &spi_nor_xilinx,
&spi_nor_xmc,
};
static const struct flash_info spi_nor_generic_flash = {
.name = "spi-nor-generic",
- .n_banks = 1,
- /*
- * JESD216 rev A doesn't specify the page size, therefore we need a
- * sane default.
- */
- .page_size = 256,
- .parse_sfdp = true,
};
static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
@@ -2036,8 +1969,8 @@ static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
for (j = 0; j < manufacturers[i]->nparts; j++) {
part = &manufacturers[i]->parts[j];
- if (part->id_len &&
- !memcmp(part->id, id, part->id_len)) {
+ if (part->id &&
+ !memcmp(part->id->bytes, id, part->id->len)) {
nor->manufacturer = manufacturers[i];
return part;
}
@@ -2081,6 +2014,76 @@ static const struct flash_info *spi_nor_detect(struct spi_nor *nor)
return info;
}
+/*
+ * On Octal DTR capable flashes, reads cannot start or end at an odd
+ * address in Octal DTR mode. Extra bytes need to be read at the start
+ * or end to make sure both the start address and length remain even.
+ */
+static int spi_nor_octal_dtr_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *buf)
+{
+ u_char *tmp_buf;
+ size_t tmp_len;
+ loff_t start, end;
+ int ret, bytes_read;
+
+ if (IS_ALIGNED(from, 2) && IS_ALIGNED(len, 2))
+ return spi_nor_read_data(nor, from, len, buf);
+ else if (IS_ALIGNED(from, 2) && len > PAGE_SIZE)
+ return spi_nor_read_data(nor, from, round_down(len, PAGE_SIZE),
+ buf);
+
+ tmp_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+
+ start = round_down(from, 2);
+ end = round_up(from + len, 2);
+
+ /*
+ * Avoid allocating too much memory. The requested read length might be
+ * quite large. Allocating a buffer just as large (slightly bigger, in
+ * fact) would put unnecessary memory pressure on the system.
+ *
+ * For example if the read is from 3 to 1M, then this will read from 2
+ * to 4098. The reads from 4098 to 1M will then not need a temporary
+ * buffer so they can proceed as normal.
+ */
+ tmp_len = min_t(size_t, end - start, PAGE_SIZE);
+
+ ret = spi_nor_read_data(nor, start, tmp_len, tmp_buf);
+ if (ret == 0) {
+ ret = -EIO;
+ goto out;
+ }
+ if (ret < 0)
+ goto out;
+
+ /*
+ * More bytes are read than actually requested, but that number can't be
+ * reported to the calling function or it will confuse its calculations.
+ * Calculate how many of the _requested_ bytes were read.
+ */
+ bytes_read = ret;
+
+ if (from != start)
+ ret -= from - start;
+
+ /*
+ * Only account for extra bytes at the end if they were actually read.
+ * For example, if the total length was truncated because of temporary
+ * buffer size limit then the adjustment for the extra bytes at the end
+ * is not needed.
+ */
+ if (start + bytes_read == end)
+ ret -= end - (from + len);
+
+ memcpy(buf, tmp_buf + (from - start), ret);
+out:
+ kfree(tmp_buf);
+ return ret;
+}
+
static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
@@ -2098,9 +2101,11 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
while (len) {
loff_t addr = from;
- addr = spi_nor_convert_addr(nor, addr);
+ if (nor->read_proto == SNOR_PROTO_8_8_8_DTR)
+ ret = spi_nor_octal_dtr_read(nor, addr, len, buf);
+ else
+ ret = spi_nor_read_data(nor, addr, len, buf);
- ret = spi_nor_read_data(nor, addr, len, buf);
if (ret == 0) {
/* We shouldn't see 0-length reads */
ret = -EIO;
@@ -2124,6 +2129,68 @@ read_err:
}
/*
+ * On Octal DTR capable flashes, writes cannot start or end at an odd address
+ * in Octal DTR mode. Extra 0xff bytes need to be appended or prepended to
+ * make sure the start address and end address are even. 0xff is used because
+ * on NOR flashes a program operation can only flip bits from 1 to 0, not the
+ * other way round. 0 to 1 flip needs to happen via erases.
+ */
+static int spi_nor_octal_dtr_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf)
+{
+ u8 *tmp_buf;
+ size_t bytes_written;
+ loff_t start, end;
+ int ret;
+
+ if (IS_ALIGNED(to, 2) && IS_ALIGNED(len, 2))
+ return spi_nor_write_data(nor, to, len, buf);
+
+ tmp_buf = kmalloc(nor->params->page_size, GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+
+ memset(tmp_buf, 0xff, nor->params->page_size);
+
+ start = round_down(to, 2);
+ end = round_up(to + len, 2);
+
+ memcpy(tmp_buf + (to - start), buf, len);
+
+ ret = spi_nor_write_data(nor, start, end - start, tmp_buf);
+ if (ret == 0) {
+ ret = -EIO;
+ goto out;
+ }
+ if (ret < 0)
+ goto out;
+
+ /*
+ * More bytes are written than actually requested, but that number can't
+ * be reported to the calling function or it will confuse its
+ * calculations. Calculate how many of the _requested_ bytes were
+ * written.
+ */
+ bytes_written = ret;
+
+ if (to != start)
+ ret -= to - start;
+
+ /*
+ * Only account for extra bytes at the end if they were actually
+ * written. For example, if for some reason the controller could only
+ * complete a partial write then the adjustment for the extra bytes at
+ * the end is not needed.
+ */
+ if (start + bytes_written == end)
+ ret -= end - (to + len);
+
+out:
+ kfree(tmp_buf);
+ return ret;
+}
+
+/*
* Write an address range to the nor chip. Data must be written in
* FLASH_PAGESIZE chunks. The address range may be any size provided
* it is within the physical boundaries.
@@ -2132,7 +2199,7 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- size_t page_offset, page_remain, i;
+ size_t i;
ssize_t ret;
u32 page_size = nor->params->page_size;
@@ -2145,23 +2212,9 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
for (i = 0; i < len; ) {
ssize_t written;
loff_t addr = to + i;
-
- /*
- * If page_size is a power of two, the offset can be quickly
- * calculated with an AND operation. On the other cases we
- * need to do a modulus operation (more expensive).
- */
- if (is_power_of_2(page_size)) {
- page_offset = addr & (page_size - 1);
- } else {
- uint64_t aux = addr;
-
- page_offset = do_div(aux, page_size);
- }
+ size_t page_offset = addr & (page_size - 1);
/* the size of data remaining on the first page */
- page_remain = min_t(size_t, page_size - page_offset, len - i);
-
- addr = spi_nor_convert_addr(nor, addr);
+ size_t page_remain = min_t(size_t, page_size - page_offset, len - i);
ret = spi_nor_lock_device(nor);
if (ret)
@@ -2173,7 +2226,12 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
goto write_err;
}
- ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
+ if (nor->write_proto == SNOR_PROTO_8_8_8_DTR)
+ ret = spi_nor_octal_dtr_write(nor, addr, page_remain,
+ buf + i);
+ else
+ ret = spi_nor_write_data(nor, addr, page_remain,
+ buf + i);
spi_nor_unlock_device(nor);
if (ret < 0)
goto write_err;
@@ -2401,6 +2459,16 @@ spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
&params->page_programs[ppidx]))
*hwcaps &= ~BIT(cap);
}
+
+ /* Some SPI controllers might not support CR read opcode. */
+ if (!(nor->flags & SNOR_F_NO_READ_CR)) {
+ struct spi_mem_op op = SPI_NOR_RDCR_OP(nor->bouncebuf);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ if (spi_nor_spimem_check_op(nor, &op))
+ nor->flags |= SNOR_F_NO_READ_CR;
+ }
}
/**
@@ -2438,12 +2506,11 @@ void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase)
void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
u8 erase_mask, u64 flash_size)
{
- /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
- map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
- SNOR_LAST_REGION;
+ map->uniform_region.offset = 0;
map->uniform_region.size = flash_size;
+ map->uniform_region.erase_mask = erase_mask;
map->regions = &map->uniform_region;
- map->uniform_erase_type = erase_mask;
+ map->n_regions = 1;
}
int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
@@ -2519,13 +2586,6 @@ static int spi_nor_select_pp(struct spi_nor *nor,
/**
* spi_nor_select_uniform_erase() - select optimum uniform erase type
* @map: the erase map of the SPI NOR
- * @wanted_size: the erase type size to search for. Contains the value of
- * info->sector_size, the "small sector" size in case
- * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined or 0 if
- * there is no information about the sector size. The
- * latter is the case if the flash parameters are parsed
- * solely by SFDP, then the largest supported erase type
- * is selected.
*
* Once the optimum uniform sector erase command is found, disable all the
* other.
@@ -2533,13 +2593,16 @@ static int spi_nor_select_pp(struct spi_nor *nor,
* Return: pointer to erase type on success, NULL otherwise.
*/
static const struct spi_nor_erase_type *
-spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
- const u32 wanted_size)
+spi_nor_select_uniform_erase(struct spi_nor_erase_map *map)
{
const struct spi_nor_erase_type *tested_erase, *erase = NULL;
int i;
- u8 uniform_erase_type = map->uniform_erase_type;
+ u8 uniform_erase_type = map->uniform_region.erase_mask;
+ /*
+ * Search for the biggest erase size, except for when compiled
+ * to use 4k erases.
+ */
for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
if (!(uniform_erase_type & BIT(i)))
continue;
@@ -2551,10 +2614,11 @@ spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
continue;
/*
- * If the current erase size is the one, stop here:
+ * If the current erase size is the 4k one, stop here,
* we have found the right uniform Sector Erase command.
*/
- if (tested_erase->size == wanted_size) {
+ if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS) &&
+ tested_erase->size == SZ_4K) {
erase = tested_erase;
break;
}
@@ -2572,8 +2636,7 @@ spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
return NULL;
/* Disable all other Sector Erase commands. */
- map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
- map->uniform_erase_type |= BIT(erase - map->erase_type);
+ map->uniform_region.erase_mask = BIT(erase - map->erase_type);
return erase;
}
@@ -2582,7 +2645,6 @@ static int spi_nor_select_erase(struct spi_nor *nor)
struct spi_nor_erase_map *map = &nor->params->erase_map;
const struct spi_nor_erase_type *erase = NULL;
struct mtd_info *mtd = &nor->mtd;
- u32 wanted_size = nor->info->sector_size;
int i;
/*
@@ -2593,13 +2655,8 @@ static int spi_nor_select_erase(struct spi_nor *nor)
* manage the SPI flash memory as uniform with a single erase sector
* size, when possible.
*/
-#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
- /* prefer "small sector" erase if possible */
- wanted_size = 4096u;
-#endif
-
if (spi_nor_has_uniform_erase(nor)) {
- erase = spi_nor_select_uniform_erase(map, wanted_size);
+ erase = spi_nor_select_uniform_erase(map);
if (!erase)
return -EINVAL;
nor->erase_opcode = erase->opcode;
@@ -2625,8 +2682,51 @@ static int spi_nor_select_erase(struct spi_nor *nor)
return 0;
}
-static int spi_nor_default_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
+static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
+{
+ if (nor->params->addr_nbytes) {
+ nor->addr_nbytes = nor->params->addr_nbytes;
+ } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
+ /*
+ * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
+ * in this protocol an odd addr_nbytes cannot be used because
+ * then the address phase would only span a cycle and a half.
+ * Half a cycle would be left over. We would then have to start
+ * the dummy phase in the middle of a cycle and so too the data
+ * phase, and we will end the transaction with half a cycle left
+ * over.
+ *
+ * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
+ * avoid this situation.
+ */
+ nor->addr_nbytes = 4;
+ } else if (nor->info->addr_nbytes) {
+ nor->addr_nbytes = nor->info->addr_nbytes;
+ } else {
+ nor->addr_nbytes = 3;
+ }
+
+ if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_nbytes = 4;
+ }
+
+ if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
+ dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
+ nor->addr_nbytes);
+ return -EINVAL;
+ }
+
+ /* Set 4byte opcodes when possible. */
+ if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ !(nor->flags & SNOR_F_HAS_4BAIT))
+ spi_nor_set_4byte_opcodes(nor);
+
+ return 0;
+}
+
+static int spi_nor_setup(struct spi_nor *nor,
+ const struct spi_nor_hwcaps *hwcaps)
{
struct spi_nor_flash_parameter *params = nor->params;
u32 ignored_mask, shared_mask;
@@ -2683,64 +2783,6 @@ static int spi_nor_default_setup(struct spi_nor *nor,
return err;
}
- return 0;
-}
-
-static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
-{
- if (nor->params->addr_nbytes) {
- nor->addr_nbytes = nor->params->addr_nbytes;
- } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
- /*
- * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
- * in this protocol an odd addr_nbytes cannot be used because
- * then the address phase would only span a cycle and a half.
- * Half a cycle would be left over. We would then have to start
- * the dummy phase in the middle of a cycle and so too the data
- * phase, and we will end the transaction with half a cycle left
- * over.
- *
- * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
- * avoid this situation.
- */
- nor->addr_nbytes = 4;
- } else if (nor->info->addr_nbytes) {
- nor->addr_nbytes = nor->info->addr_nbytes;
- } else {
- nor->addr_nbytes = 3;
- }
-
- if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_nbytes = 4;
- }
-
- if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
- dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
- nor->addr_nbytes);
- return -EINVAL;
- }
-
- /* Set 4byte opcodes when possible. */
- if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
- !(nor->flags & SNOR_F_HAS_4BAIT))
- spi_nor_set_4byte_opcodes(nor);
-
- return 0;
-}
-
-static int spi_nor_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
-{
- int ret;
-
- if (nor->params->setup)
- ret = nor->params->setup(nor, hwcaps);
- else
- ret = spi_nor_default_setup(nor, hwcaps);
- if (ret)
- return ret;
-
return spi_nor_set_addr_nbytes(nor);
}
@@ -2772,7 +2814,8 @@ static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
struct spi_nor_erase_map *map = &params->erase_map;
- const u8 no_sfdp_flags = nor->info->no_sfdp_flags;
+ const struct flash_info *info = nor->info;
+ const u8 no_sfdp_flags = info->no_sfdp_flags;
u8 i, erase_mask;
if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
@@ -2826,7 +2869,8 @@ static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
i++;
}
erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], nor->info->sector_size,
+ spi_nor_set_erase_type(&map->erase_type[i],
+ info->sector_size ?: SPI_NOR_DEFAULT_SECTOR_SIZE,
SPINOR_OP_SE);
spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
}
@@ -2844,6 +2888,9 @@ static void spi_nor_init_flags(struct spi_nor *nor)
if (of_property_read_bool(np, "broken-flash-reset"))
nor->flags |= SNOR_F_BROKEN_RESET;
+ if (of_property_read_bool(np, "no-wp"))
+ nor->flags |= SNOR_F_NO_WP;
+
if (flags & SPI_NOR_SWP_IS_VOLATILE)
nor->flags |= SNOR_F_SWP_IS_VOLATILE;
@@ -2862,10 +2909,7 @@ static void spi_nor_init_flags(struct spi_nor *nor)
nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
}
- if (flags & NO_CHIP_ERASE)
- nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
-
- if (flags & SPI_NOR_RWW && nor->info->n_banks > 1 &&
+ if (flags & SPI_NOR_RWW && nor->params->n_banks > 1 &&
!nor->controller_ops)
nor->flags |= SNOR_F_RWW;
}
@@ -2897,22 +2941,34 @@ static void spi_nor_init_fixup_flags(struct spi_nor *nor)
* SFDP standard, or where SFDP tables are not defined at all.
* Will replace the spi_nor_manufacturer_init_params() method.
*/
-static void spi_nor_late_init_params(struct spi_nor *nor)
+static int spi_nor_late_init_params(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
+ int ret;
if (nor->manufacturer && nor->manufacturer->fixups &&
- nor->manufacturer->fixups->late_init)
- nor->manufacturer->fixups->late_init(nor);
+ nor->manufacturer->fixups->late_init) {
+ ret = nor->manufacturer->fixups->late_init(nor);
+ if (ret)
+ return ret;
+ }
- if (nor->info->fixups && nor->info->fixups->late_init)
- nor->info->fixups->late_init(nor);
+ /* Needed by some flashes late_init hooks. */
+ spi_nor_init_flags(nor);
+
+ if (nor->info->fixups && nor->info->fixups->late_init) {
+ ret = nor->info->fixups->late_init(nor);
+ if (ret)
+ return ret;
+ }
+
+ if (!nor->params->die_erase_opcode)
+ nor->params->die_erase_opcode = SPINOR_OP_CHIP_ERASE;
/* Default method kept for backward compatibility. */
if (!params->set_4byte_addr_mode)
params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr;
- spi_nor_init_flags(nor);
spi_nor_init_fixup_flags(nor);
/*
@@ -2922,8 +2978,10 @@ static void spi_nor_late_init_params(struct spi_nor *nor)
if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
spi_nor_init_default_locking_ops(nor);
- if (nor->info->n_banks > 1)
- params->bank_size = div64_u64(params->size, nor->info->n_banks);
+ if (params->n_banks > 1)
+ params->bank_size = div_u64(params->size, params->n_banks);
+
+ return 0;
}
/**
@@ -2981,26 +3039,22 @@ static void spi_nor_init_default_params(struct spi_nor *nor)
struct device_node *np = spi_nor_get_flash_node(nor);
params->quad_enable = spi_nor_sr2_bit1_quad_enable;
- params->otp.org = &info->otp_org;
+ params->otp.org = info->otp;
/* Default to 16-bit Write Status (01h) Command */
nor->flags |= SNOR_F_HAS_16BIT_SR;
/* Set SPI NOR sizes. */
params->writesize = 1;
- params->size = (u64)info->sector_size * info->n_sectors;
+ params->size = info->size;
params->bank_size = params->size;
- params->page_size = info->page_size;
+ params->page_size = info->page_size ?: SPI_NOR_DEFAULT_PAGE_SIZE;
+ params->n_banks = info->n_banks ?: SPI_NOR_DEFAULT_N_BANKS;
- if (!(info->flags & SPI_NOR_NO_FR)) {
- /* Default to Fast Read for DT and non-DT platform devices. */
+ /* Default to Fast Read for non-DT and enable it if requested by DT. */
+ if (!np || of_property_read_bool(np, "m25p,fast-read"))
params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
- /* Mask out Fast Read if not requested at DT instantiation. */
- if (np && !of_property_read_bool(np, "m25p,fast-read"))
- params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
- }
-
/* (Fast) Read settings. */
params->hwcaps.mask |= SNOR_HWCAPS_READ;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
@@ -3070,7 +3124,7 @@ static int spi_nor_init_params(struct spi_nor *nor)
spi_nor_init_default_params(nor);
- if (nor->info->parse_sfdp) {
+ if (spi_nor_needs_sfdp(nor)) {
ret = spi_nor_parse_sfdp(nor);
if (ret) {
dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
@@ -3082,22 +3136,27 @@ static int spi_nor_init_params(struct spi_nor *nor)
spi_nor_init_params_deprecated(nor);
}
- spi_nor_late_init_params(nor);
+ ret = spi_nor_late_init_params(nor);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(!is_power_of_2(nor->params->page_size)))
+ return -EINVAL;
return 0;
}
-/** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed
+/** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O.
* @nor: pointer to a 'struct spi_nor'
* @enable: whether to enable or disable Octal DTR
*
* Return: 0 on success, -errno otherwise.
*/
-static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
+static int spi_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
{
int ret;
- if (!nor->params->octal_dtr_enable)
+ if (!nor->params->set_octal_dtr)
return 0;
if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
@@ -3107,7 +3166,7 @@ static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
return 0;
- ret = nor->params->octal_dtr_enable(nor, enable);
+ ret = nor->params->set_octal_dtr(nor, enable);
if (ret)
return ret;
@@ -3149,8 +3208,20 @@ int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
struct spi_nor_flash_parameter *params = nor->params;
int ret;
+ if (enable) {
+ /*
+ * If the RESET# pin isn't hooked up properly, or the system
+ * otherwise doesn't perform a reset command in the boot
+ * sequence, it's impossible to 100% protect against unexpected
+ * reboots (e.g., crashes). Warn the user (or hopefully, system
+ * designer) that this is bad.
+ */
+ WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
+ "enabling reset hack; may not recover from unexpected reboots\n");
+ }
+
ret = params->set_4byte_addr_mode(nor, enable);
- if (ret && ret != -ENOTSUPP)
+ if (ret && ret != -EOPNOTSUPP)
return ret;
if (enable) {
@@ -3168,7 +3239,7 @@ static int spi_nor_init(struct spi_nor *nor)
{
int err;
- err = spi_nor_octal_dtr_enable(nor, true);
+ err = spi_nor_set_octal_dtr(nor, true);
if (err) {
dev_dbg(nor->dev, "octal mode not supported\n");
return err;
@@ -3197,20 +3268,8 @@ static int spi_nor_init(struct spi_nor *nor)
if (nor->addr_nbytes == 4 &&
nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
- !(nor->flags & SNOR_F_4B_OPCODES)) {
- /*
- * If the RESET# pin isn't hooked up properly, or the system
- * otherwise doesn't perform a reset command in the boot
- * sequence, it's impossible to 100% protect against unexpected
- * reboots (e.g., crashes). Warn the user (or hopefully, system
- * designer) that this is bad.
- */
- WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
- "enabling reset hack; may not recover from unexpected reboots\n");
- err = spi_nor_set_4byte_addr_mode(nor, true);
- if (err)
- return err;
- }
+ !(nor->flags & SNOR_F_4B_OPCODES))
+ return spi_nor_set_4byte_addr_mode(nor, true);
return 0;
}
@@ -3241,7 +3300,8 @@ static void spi_nor_soft_reset(struct spi_nor *nor)
ret = spi_mem_exec_op(nor->spimem, &op);
if (ret) {
- dev_warn(nor->dev, "Software reset failed: %d\n", ret);
+ if (ret != -EOPNOTSUPP)
+ dev_warn(nor->dev, "Software reset failed: %d\n", ret);
return;
}
@@ -3270,7 +3330,7 @@ static int spi_nor_suspend(struct mtd_info *mtd)
int ret;
/* Disable octal DTR mode if we enabled it. */
- ret = spi_nor_octal_dtr_enable(nor, false);
+ ret = spi_nor_set_octal_dtr(nor, false);
if (ret)
dev_err(nor->dev, "suspend() failed\n");
@@ -3349,7 +3409,8 @@ static const struct flash_info *spi_nor_match_name(struct spi_nor *nor,
for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
for (j = 0; j < manufacturers[i]->nparts; j++) {
- if (!strcmp(name, manufacturers[i]->parts[j].name)) {
+ if (manufacturers[i]->parts[j].name &&
+ !strcmp(name, manufacturers[i]->parts[j].name)) {
nor->manufacturer = manufacturers[i];
return &manufacturers[i]->parts[j];
}
@@ -3366,38 +3427,81 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
if (name)
info = spi_nor_match_name(nor, name);
- /* Try to auto-detect if chip name wasn't specified or not found */
- if (!info)
- return spi_nor_detect(nor);
-
/*
- * If caller has specified name of flash model that can normally be
- * detected using JEDEC, let's verify it.
+ * Auto-detect if chip name wasn't specified or not found, or the chip
+ * has an ID. If the chip supposedly has an ID, we also do an
+ * auto-detection to compare it later.
*/
- if (name && info->id_len) {
+ if (!info || info->id) {
const struct flash_info *jinfo;
jinfo = spi_nor_detect(nor);
- if (IS_ERR(jinfo)) {
+ if (IS_ERR(jinfo))
return jinfo;
- } else if (jinfo != info) {
- /*
- * JEDEC knows better, so overwrite platform ID. We
- * can't trust partitions any longer, but we'll let
- * mtd apply them anyway, since some partitions may be
- * marked read-only, and we don't want to loose that
- * information, even if it's not 100% accurate.
- */
+
+ /*
+ * If caller has specified name of flash model that can normally
+ * be detected using JEDEC, let's verify it.
+ */
+ if (info && jinfo != info)
dev_warn(nor->dev, "found %s, expected %s\n",
jinfo->name, info->name);
- info = jinfo;
- }
+
+ /* If info was set before, JEDEC knows better. */
+ info = jinfo;
}
return info;
}
-static void spi_nor_set_mtd_info(struct spi_nor *nor)
+static u32
+spi_nor_get_region_erasesize(const struct spi_nor_erase_region *region,
+ const struct spi_nor_erase_type *erase_type)
+{
+ int i;
+
+ if (region->overlaid)
+ return region->size;
+
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ if (region->erase_mask & BIT(i))
+ return erase_type[i].size;
+ }
+
+ return 0;
+}
+
+static int spi_nor_set_mtd_eraseregions(struct spi_nor *nor)
+{
+ const struct spi_nor_erase_map *map = &nor->params->erase_map;
+ const struct spi_nor_erase_region *region = map->regions;
+ struct mtd_erase_region_info *mtd_region;
+ struct mtd_info *mtd = &nor->mtd;
+ u32 erasesize, i;
+
+ mtd_region = devm_kcalloc(nor->dev, map->n_regions, sizeof(*mtd_region),
+ GFP_KERNEL);
+ if (!mtd_region)
+ return -ENOMEM;
+
+ for (i = 0; i < map->n_regions; i++) {
+ erasesize = spi_nor_get_region_erasesize(&region[i],
+ map->erase_type);
+ if (!erasesize)
+ return -EINVAL;
+
+ mtd_region[i].erasesize = erasesize;
+ mtd_region[i].numblocks = div_u64(region[i].size, erasesize);
+ mtd_region[i].offset = region[i].offset;
+ }
+
+ mtd->numeraseregions = map->n_regions;
+ mtd->eraseregions = mtd_region;
+
+ return 0;
+}
+
+static int spi_nor_set_mtd_info(struct spi_nor *nor)
{
struct mtd_info *mtd = &nor->mtd;
struct device *dev = nor->dev;
@@ -3428,6 +3532,11 @@ static void spi_nor_set_mtd_info(struct spi_nor *nor)
mtd->_resume = spi_nor_resume;
mtd->_get_device = spi_nor_get_device;
mtd->_put_device = spi_nor_put_device;
+
+ if (!spi_nor_has_uniform_erase(nor))
+ return spi_nor_set_mtd_eraseregions(nor);
+
+ return 0;
}
static int spi_nor_hw_reset(struct spi_nor *nor)
@@ -3456,9 +3565,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
{
const struct flash_info *info;
struct device *dev = nor->dev;
- struct mtd_info *mtd = &nor->mtd;
int ret;
- int i;
ret = spi_nor_check(nor);
if (ret)
@@ -3520,27 +3627,13 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
return ret;
/* No mtd_info fields should be used up to this point. */
- spi_nor_set_mtd_info(nor);
-
- dev_info(dev, "%s (%lld Kbytes)\n", info->name,
- (long long)mtd->size >> 10);
-
- dev_dbg(dev,
- "mtd .name = %s, .size = 0x%llx (%lldMiB), "
- ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
- mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
- mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
-
- if (mtd->numeraseregions)
- for (i = 0; i < mtd->numeraseregions; i++)
- dev_dbg(dev,
- "mtd.eraseregions[%d] = { .offset = 0x%llx, "
- ".erasesize = 0x%.8x (%uKiB), "
- ".numblocks = %d }\n",
- i, (long long)mtd->eraseregions[i].offset,
- mtd->eraseregions[i].erasesize,
- mtd->eraseregions[i].erasesize / 1024,
- mtd->eraseregions[i].numblocks);
+ ret = spi_nor_set_mtd_info(nor);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "Manufacturer and device ID: %*phN\n",
+ SPI_NOR_MAX_ID_LEN, nor->id);
+
return 0;
}
EXPORT_SYMBOL_GPL(spi_nor_scan);
@@ -3608,7 +3701,8 @@ static int spi_nor_create_write_dirmap(struct spi_nor *nor)
static int spi_nor_probe(struct spi_mem *spimem)
{
struct spi_device *spi = spimem->spi;
- struct flash_platform_data *data = dev_get_platdata(&spi->dev);
+ struct device *dev = &spi->dev;
+ struct flash_platform_data *data = dev_get_platdata(dev);
struct spi_nor *nor;
/*
* Enable all caps by default. The core will mask them after
@@ -3618,13 +3712,17 @@ static int spi_nor_probe(struct spi_mem *spimem)
char *flash_name;
int ret;
- nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
+ ret = devm_regulator_get_enable(dev, "vcc");
+ if (ret)
+ return ret;
+
+ nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL);
if (!nor)
return -ENOMEM;
nor->spimem = spimem;
- nor->dev = &spi->dev;
- spi_nor_set_flash_node(nor, spi->dev.of_node);
+ nor->dev = dev;
+ spi_nor_set_flash_node(nor, dev->of_node);
spi_mem_set_drvdata(spimem, nor);
@@ -3660,9 +3758,8 @@ static int spi_nor_probe(struct spi_mem *spimem)
*/
if (nor->params->page_size > PAGE_SIZE) {
nor->bouncebuf_size = nor->params->page_size;
- devm_kfree(nor->dev, nor->bouncebuf);
- nor->bouncebuf = devm_kmalloc(nor->dev,
- nor->bouncebuf_size,
+ devm_kfree(dev, nor->bouncebuf);
+ nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
GFP_KERNEL);
if (!nor->bouncebuf)
return -ENOMEM;
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 4fb5ff09c63a..16b382d4f04f 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -10,6 +10,13 @@
#include "sfdp.h"
#define SPI_NOR_MAX_ID_LEN 6
+/*
+ * 256 bytes is a sane default for most older flashes. Newer flashes will
+ * have the page size defined within their SFDP tables.
+ */
+#define SPI_NOR_DEFAULT_PAGE_SIZE 256
+#define SPI_NOR_DEFAULT_N_BANKS 1
+#define SPI_NOR_DEFAULT_SECTOR_SIZE SZ_64K
/* Standard SPI NOR flash operations. */
#define SPI_NOR_READID_OP(naddr, ndummy, buf, len) \
@@ -78,9 +85,9 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPI_NOR_CHIP_ERASE_OP \
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0), \
- SPI_MEM_OP_NO_ADDR, \
+#define SPI_NOR_DIE_ERASE_OP(opcode, addr_nbytes, addr, dice) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
+ SPI_MEM_OP_ADDR(dice ? addr_nbytes : 0, addr, 0), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
@@ -132,6 +139,8 @@ enum spi_nor_option_flags {
SNOR_F_SWP_IS_VOLATILE = BIT(13),
SNOR_F_RWW = BIT(14),
SNOR_F_ECC = BIT(15),
+ SNOR_F_NO_WP = BIT(16),
+ SNOR_F_SWAP16 = BIT(17),
};
struct spi_nor_read_command {
@@ -232,27 +241,21 @@ struct spi_nor_erase_command {
/**
* struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
* @offset: the offset in the data array of erase region start.
- * LSB bits are used as a bitmask encoding flags to
- * determine if this region is overlaid, if this region is
- * the last in the SPI NOR flash memory and to indicate
- * all the supported erase commands inside this region.
- * The erase types are sorted in ascending order with the
- * smallest Erase Type size being at BIT(0).
* @size: the size of the region in bytes.
+ * @erase_mask: bitmask to indicate all the supported erase commands
+ * inside this region. The erase types are sorted in
+ * ascending order with the smallest Erase Type size being
+ * at BIT(0).
+ * @overlaid: determine if this region is overlaid.
*/
struct spi_nor_erase_region {
u64 offset;
u64 size;
+ u8 erase_mask;
+ bool overlaid;
};
#define SNOR_ERASE_TYPE_MAX 4
-#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
-
-#define SNOR_LAST_REGION BIT(4)
-#define SNOR_OVERLAID_REGION BIT(5)
-
-#define SNOR_ERASE_FLAGS_MAX 6
-#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
/**
* struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
@@ -265,17 +268,13 @@ struct spi_nor_erase_region {
* The erase types are sorted in ascending order, with the
* smallest Erase Type size being the first member in the
* erase_type array.
- * @uniform_erase_type: bitmask encoding erase types that can erase the
- * entire memory. This member is completed at init by
- * uniform and non-uniform SPI NOR flash memories if they
- * support at least one erase type that can erase the
- * entire memory.
+ * @n_regions: number of erase regions.
*/
struct spi_nor_erase_map {
struct spi_nor_erase_region *regions;
struct spi_nor_erase_region uniform_region;
struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX];
- u8 uniform_erase_type;
+ unsigned int n_regions;
};
/**
@@ -285,9 +284,9 @@ struct spi_nor_erase_map {
* @is_locked: check if a region of the SPI NOR is completely locked
*/
struct spi_nor_locking_ops {
- int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*lock)(struct spi_nor *nor, loff_t ofs, u64 len);
+ int (*unlock)(struct spi_nor *nor, loff_t ofs, u64 len);
+ int (*is_locked)(struct spi_nor *nor, loff_t ofs, u64 len);
};
/**
@@ -352,7 +351,9 @@ struct spi_nor_otp {
* in octal DTR mode.
* @rdsr_addr_nbytes: dummy address bytes needed for Read Status Register
* command in octal DTR mode.
+ * @n_banks: number of banks.
* @n_dice: number of dice in the flash memory.
+ * @die_erase_opcode: die erase opcode. Defaults to SPINOR_OP_CHIP_ERASE.
* @vreg_offset: volatile register offset for each die.
* @hwcaps: describes the read and page program hardware
* capabilities.
@@ -363,20 +364,14 @@ struct spi_nor_otp {
* @erase_map: the erase map parsed from the SFDP Sector Map Parameter
* Table.
* @otp: SPI NOR OTP info.
- * @octal_dtr_enable: enables SPI NOR octal DTR mode.
+ * @set_octal_dtr: enables or disables SPI NOR octal DTR mode.
* @quad_enable: enables SPI NOR quad mode.
* @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
- * @convert_addr: converts an absolute address into something the flash
- * will understand. Particularly useful when pagesize is
- * not a power-of-2.
- * @setup: (optional) configures the SPI NOR memory. Useful for
- * SPI NOR flashes that have peculiarities to the SPI NOR
- * standard e.g. different opcodes, specific address
- * calculation, page size, etc.
* @ready: (optional) flashes might use a different mechanism
* than reading the status register to indicate they
* are ready for a new command
* @locking_ops: SPI NOR locking methods.
+ * @priv: flash's private data.
*/
struct spi_nor_flash_parameter {
u64 bank_size;
@@ -387,7 +382,9 @@ struct spi_nor_flash_parameter {
u8 addr_mode_nbytes;
u8 rdsr_dummy;
u8 rdsr_addr_nbytes;
+ u8 n_banks;
u8 n_dice;
+ u8 die_erase_opcode;
u32 *vreg_offset;
struct spi_nor_hwcaps hwcaps;
@@ -397,14 +394,13 @@ struct spi_nor_flash_parameter {
struct spi_nor_erase_map erase_map;
struct spi_nor_otp otp;
- int (*octal_dtr_enable)(struct spi_nor *nor, bool enable);
+ int (*set_octal_dtr)(struct spi_nor *nor, bool enable);
int (*quad_enable)(struct spi_nor *nor);
int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
- u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
- int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
int (*ready)(struct spi_nor *nor);
const struct spi_nor_locking_ops *locking_ops;
+ void *priv;
};
/**
@@ -413,6 +409,10 @@ struct spi_nor_flash_parameter {
* flash parameters when information provided by the flash_info
* table is incomplete or wrong.
* @post_bfpt: called after the BFPT table has been parsed
+ * @smpt_read_dummy: called during SMPT table is being parsed. Used to fix the
+ * number of dummy cycles in read register ops.
+ * @smpt_map_id: called after map ID in SMPT table has been determined for the
+ * case the map ID is wrong and needs to be fixed.
* @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs
* that do not support RDSFDP). Typically used to tweak various
* parameters that could not be extracted by other means (i.e.
@@ -430,26 +430,42 @@ struct spi_nor_fixups {
int (*post_bfpt)(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt);
+ void (*smpt_read_dummy)(const struct spi_nor *nor, u8 *read_dummy);
+ void (*smpt_map_id)(const struct spi_nor *nor, u8 *map_id);
int (*post_sfdp)(struct spi_nor *nor);
- void (*late_init)(struct spi_nor *nor);
+ int (*late_init)(struct spi_nor *nor);
+};
+
+/**
+ * struct spi_nor_id - SPI NOR flash ID.
+ *
+ * @bytes: the bytes returned by the flash when issuing command 9F. Typically,
+ * the first byte is the manufacturer ID code (see JEP106) and the next
+ * two bytes are a flash part specific ID.
+ * @len: the number of bytes of ID.
+ */
+struct spi_nor_id {
+ const u8 *bytes;
+ u8 len;
};
/**
* struct flash_info - SPI NOR flash_info entry.
- * @name: the name of the flash.
- * @id: the flash's ID bytes. The first three bytes are the
- * JEDIC ID. JEDEC ID zero means "no ID" (mostly older chips).
- * @id_len: the number of bytes of ID.
- * @sector_size: the size listed here is what works with SPINOR_OP_SE, which
- * isn't necessarily called a "sector" by the vendor.
- * @n_sectors: the number of sectors.
- * @n_banks: the number of banks.
- * @page_size: the flash's page size.
+ * @id: pointer to struct spi_nor_id or NULL, which means "no ID" (mostly
+ * older chips).
+ * @name: (obsolete) the name of the flash. Do not set it for new additions.
+ * @size: the size of the flash in bytes. The flash size is one
+ * property parsed by the SFDP. We use it as an indicator
+ * whether we need SFDP parsing for a particular flash.
+ * I.e. non-legacy flash entries in flash_info will have
+ * a size of zero iff SFDP should be used.
+ * @sector_size: (optional) the size listed here is what works with
+ * SPINOR_OP_SE, which isn't necessarily called a "sector" by
+ * the vendor. Defaults to 64k.
+ * @n_banks: (optional) the number of banks. Defaults to 1.
+ * @page_size: (optional) the flash's page size. Defaults to 256.
* @addr_nbytes: number of address bytes to send.
*
- * @parse_sfdp: true when flash supports SFDP tables. The false value has no
- * meaning. If one wants to skip the SFDP tables, one should
- * instead use the SPI_NOR_SKIP_SFDP sfdp_flag.
* @flags: flags that indicate support that is not defined by the
* JESD216 standard in its SFDP tables. Flag meanings:
* SPI_NOR_HAS_LOCK: flash supports lock/unlock via SR
@@ -465,8 +481,6 @@ struct spi_nor_fixups {
* Usually these will power-up in a write-protected
* state.
* SPI_NOR_NO_ERASE: no erase command needed.
- * NO_CHIP_ERASE: chip does not support chip erase.
- * SPI_NOR_NO_FR: can't do fastread.
* SPI_NOR_QUAD_PP: flash supports Quad Input Page Program.
* SPI_NOR_RWW: flash supports reads while write.
*
@@ -500,15 +514,13 @@ struct spi_nor_fixups {
*/
struct flash_info {
char *name;
- u8 id[SPI_NOR_MAX_ID_LEN];
- u8 id_len;
+ const struct spi_nor_id *id;
+ size_t size;
unsigned sector_size;
- u16 n_sectors;
u16 page_size;
u8 n_banks;
u8 addr_nbytes;
- bool parse_sfdp;
u16 flags;
#define SPI_NOR_HAS_LOCK BIT(0)
#define SPI_NOR_HAS_TB BIT(1)
@@ -517,10 +529,8 @@ struct flash_info {
#define SPI_NOR_BP3_SR_BIT6 BIT(4)
#define SPI_NOR_SWP_IS_VOLATILE BIT(5)
#define SPI_NOR_NO_ERASE BIT(6)
-#define NO_CHIP_ERASE BIT(7)
-#define SPI_NOR_NO_FR BIT(8)
-#define SPI_NOR_QUAD_PP BIT(9)
-#define SPI_NOR_RWW BIT(10)
+#define SPI_NOR_QUAD_PP BIT(8)
+#define SPI_NOR_RWW BIT(9)
u8 no_sfdp_flags;
#define SPI_NOR_SKIP_SFDP BIT(0)
@@ -537,70 +547,23 @@ struct flash_info {
u8 mfr_flags;
- const struct spi_nor_otp_organization otp_org;
+ const struct spi_nor_otp_organization *otp;
const struct spi_nor_fixups *fixups;
};
-#define SPI_NOR_ID_2ITEMS(_id) ((_id) >> 8) & 0xff, (_id) & 0xff
-#define SPI_NOR_ID_3ITEMS(_id) ((_id) >> 16) & 0xff, SPI_NOR_ID_2ITEMS(_id)
-
-#define SPI_NOR_ID(_jedec_id, _ext_id) \
- .id = { SPI_NOR_ID_3ITEMS(_jedec_id), SPI_NOR_ID_2ITEMS(_ext_id) }, \
- .id_len = !(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))
-
-#define SPI_NOR_ID6(_jedec_id, _ext_id) \
- .id = { SPI_NOR_ID_3ITEMS(_jedec_id), SPI_NOR_ID_3ITEMS(_ext_id) }, \
- .id_len = 6
-
-#define SPI_NOR_GEOMETRY(_sector_size, _n_sectors, _n_banks) \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = 256, \
- .n_banks = (_n_banks)
-
-/* Used when the "_ext_id" is two bytes at most */
-#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors) \
- SPI_NOR_ID((_jedec_id), (_ext_id)), \
- SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), 1),
+#define SNOR_ID(...) \
+ (&(const struct spi_nor_id){ \
+ .bytes = (const u8[]){ __VA_ARGS__ }, \
+ .len = sizeof((u8[]){ __VA_ARGS__ }), \
+ })
-#define INFOB(_jedec_id, _ext_id, _sector_size, _n_sectors, _n_banks) \
- SPI_NOR_ID((_jedec_id), (_ext_id)), \
- SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), (_n_banks)),
-
-#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors) \
- SPI_NOR_ID6((_jedec_id), (_ext_id)), \
- SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), 1),
-
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_nbytes) \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = (_page_size), \
- .n_banks = 1, \
- .addr_nbytes = (_addr_nbytes), \
- .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR, \
-
-#define OTP_INFO(_len, _n_regions, _base, _offset) \
- .otp_org = { \
- .len = (_len), \
- .base = (_base), \
- .offset = (_offset), \
- .n_regions = (_n_regions), \
- },
-
-#define PARSE_SFDP \
- .parse_sfdp = true, \
-
-#define FLAGS(_flags) \
- .flags = (_flags), \
-
-#define NO_SFDP_FLAGS(_no_sfdp_flags) \
- .no_sfdp_flags = (_no_sfdp_flags), \
-
-#define FIXUP_FLAGS(_fixup_flags) \
- .fixup_flags = (_fixup_flags), \
-
-#define MFR_FLAGS(_mfr_flags) \
- .mfr_flags = (_mfr_flags), \
+#define SNOR_OTP(_len, _n_regions, _base, _offset) \
+ (&(const struct spi_nor_otp_organization){ \
+ .len = (_len), \
+ .base = (_base), \
+ .offset = (_offset), \
+ .n_regions = (_n_regions), \
+ })
/**
* struct spi_nor_manufacturer - SPI NOR manufacturer object
@@ -628,11 +591,9 @@ struct sfdp {
/* Manufacturer drivers. */
extern const struct spi_nor_manufacturer spi_nor_atmel;
-extern const struct spi_nor_manufacturer spi_nor_catalyst;
extern const struct spi_nor_manufacturer spi_nor_eon;
extern const struct spi_nor_manufacturer spi_nor_esmt;
extern const struct spi_nor_manufacturer spi_nor_everspin;
-extern const struct spi_nor_manufacturer spi_nor_fujitsu;
extern const struct spi_nor_manufacturer spi_nor_gigadevice;
extern const struct spi_nor_manufacturer spi_nor_intel;
extern const struct spi_nor_manufacturer spi_nor_issi;
@@ -642,7 +603,6 @@ extern const struct spi_nor_manufacturer spi_nor_st;
extern const struct spi_nor_manufacturer spi_nor_spansion;
extern const struct spi_nor_manufacturer spi_nor_sst;
extern const struct spi_nor_manufacturer spi_nor_winbond;
-extern const struct spi_nor_manufacturer spi_nor_xilinx;
extern const struct spi_nor_manufacturer spi_nor_xmc;
extern const struct attribute_group *spi_nor_sysfs_groups[];
@@ -704,8 +664,6 @@ void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
u8 opcode);
void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase);
-struct spi_nor_erase_region *
-spi_nor_region_next(struct spi_nor_erase_region *region);
void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
u8 erase_mask, u64 flash_size);
@@ -731,6 +689,22 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
return container_of(mtd, struct spi_nor, mtd);
}
+/**
+ * spi_nor_needs_sfdp() - returns true if SFDP parsing is used for this flash.
+ *
+ * Return: true if SFDP parsing is needed
+ */
+static inline bool spi_nor_needs_sfdp(const struct spi_nor *nor)
+{
+ /*
+ * The flash size is one property parsed by the SFDP. We use it as an
+ * indicator whether we need SFDP parsing for a particular flash. I.e.
+ * non-legacy flash entries in flash_info will have a size of zero iff
+ * SFDP should be used.
+ */
+ return !nor->info->size;
+}
+
#ifdef CONFIG_DEBUG_FS
void spi_nor_debugfs_register(struct spi_nor *nor);
void spi_nor_debugfs_shutdown(void);
diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
index e11536fffe0f..fa6956144d2e 100644
--- a/drivers/mtd/spi-nor/debugfs.c
+++ b/drivers/mtd/spi-nor/debugfs.c
@@ -27,6 +27,7 @@ static const char *const snor_f_names[] = {
SNOR_F_NAME(SWP_IS_VOLATILE),
SNOR_F_NAME(RWW),
SNOR_F_NAME(ECC),
+ SNOR_F_NAME(NO_WP),
};
#undef SNOR_F_NAME
@@ -77,10 +78,10 @@ static int spi_nor_params_show(struct seq_file *s, void *data)
struct spi_nor *nor = s->private;
struct spi_nor_flash_parameter *params = nor->params;
struct spi_nor_erase_map *erase_map = &params->erase_map;
- struct spi_nor_erase_region *region;
+ struct spi_nor_erase_region *region = erase_map->regions;
const struct flash_info *info = nor->info;
char buf[16], *str;
- int i;
+ unsigned int i;
seq_printf(s, "name\t\t%s\n", info->name);
seq_printf(s, "id\t\t%*ph\n", SPI_NOR_MAX_ID_LEN, nor->id);
@@ -137,26 +138,24 @@ static int spi_nor_params_show(struct seq_file *s, void *data)
if (!(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
string_get_size(params->size, 1, STRING_UNITS_2, buf, sizeof(buf));
- seq_printf(s, " %02x (%s)\n", SPINOR_OP_CHIP_ERASE, buf);
+ seq_printf(s, " %02x (%s)\n", nor->params->die_erase_opcode, buf);
}
seq_puts(s, "\nsector map\n");
- seq_puts(s, " region (in hex) | erase mask | flags\n");
+ seq_puts(s, " region (in hex) | erase mask | overlaid\n");
seq_puts(s, " ------------------+------------+----------\n");
- for (region = erase_map->regions;
- region;
- region = spi_nor_region_next(region)) {
- u64 start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- u64 flags = region->offset & SNOR_ERASE_FLAGS_MASK;
- u64 end = start + region->size - 1;
+ for (i = 0; i < erase_map->n_regions; i++) {
+ u64 start = region[i].offset;
+ u64 end = start + region[i].size - 1;
+ u8 erase_mask = region[i].erase_mask;
seq_printf(s, " %08llx-%08llx | [%c%c%c%c] | %s\n",
start, end,
- flags & BIT(0) ? '0' : ' ',
- flags & BIT(1) ? '1' : ' ',
- flags & BIT(2) ? '2' : ' ',
- flags & BIT(3) ? '3' : ' ',
- flags & SNOR_OVERLAID_REGION ? "overlaid" : "");
+ erase_mask & BIT(0) ? '0' : ' ',
+ erase_mask & BIT(1) ? '1' : ' ',
+ erase_mask & BIT(2) ? '2' : ' ',
+ erase_mask & BIT(3) ? '3' : ' ',
+ region[i].overlaid ? "yes" : "no");
}
return 0;
diff --git a/drivers/mtd/spi-nor/eon.c b/drivers/mtd/spi-nor/eon.c
index 50a11053711f..c1ddf662f782 100644
--- a/drivers/mtd/spi-nor/eon.c
+++ b/drivers/mtd/spi-nor/eon.c
@@ -9,26 +9,60 @@
#include "core.h"
static const struct flash_info eon_nor_parts[] = {
- /* EON -- en25xxx */
- { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64) },
- { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64) },
- { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128) },
- { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K) },
- { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64) },
- { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256) },
- { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512)
- PARSE_SFDP },
- { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K) },
+ {
+ .id = SNOR_ID(0x1c, 0x20, 0x16),
+ .name = "en25p32",
+ .size = SZ_4M,
+ }, {
+ .id = SNOR_ID(0x1c, 0x20, 0x17),
+ .name = "en25p64",
+ .size = SZ_8M,
+ }, {
+ .id = SNOR_ID(0x1c, 0x30, 0x14),
+ .name = "en25q80a",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x1c, 0x30, 0x16),
+ .name = "en25q32b",
+ .size = SZ_4M,
+ }, {
+ .id = SNOR_ID(0x1c, 0x30, 0x17),
+ .name = "en25q64",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x1c, 0x31, 0x16),
+ .name = "en25f32",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .name = "en25s64",
+ .id = SNOR_ID(0x1c, 0x38, 0x17),
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x1c, 0x70, 0x15),
+ .name = "en25qh16",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x1c, 0x70, 0x16),
+ .name = "en25qh32",
+ .size = SZ_4M,
+ }, {
+ .id = SNOR_ID(0x1c, 0x70, 0x17),
+ .name = "en25qh64",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x1c, 0x70, 0x18),
+ .name = "en25qh128",
+ .size = SZ_16M,
+ }, {
+ .id = SNOR_ID(0x1c, 0x70, 0x19),
+ .name = "en25qh256",
+ },
};
const struct spi_nor_manufacturer spi_nor_eon = {
diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c
index fcc3b0e7cda9..089fcd1aa794 100644
--- a/drivers/mtd/spi-nor/esmt.c
+++ b/drivers/mtd/spi-nor/esmt.c
@@ -9,16 +9,25 @@
#include "core.h"
static const struct flash_info esmt_nor_parts[] = {
- /* ESMT */
- { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K) },
- { "f25l32qa-2s", INFO(0x8c4116, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK)
- NO_SFDP_FLAGS(SECT_4K) },
- { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK)
- NO_SFDP_FLAGS(SECT_4K) },
+ {
+ .id = SNOR_ID(0x8c, 0x20, 0x16),
+ .name = "f25l32pa",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x8c, 0x41, 0x16),
+ .name = "f25l32qa-2s",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x8c, 0x41, 0x17),
+ .name = "f25l64qa",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK,
+ .no_sfdp_flags = SECT_4K,
+ }
};
const struct spi_nor_manufacturer spi_nor_esmt = {
diff --git a/drivers/mtd/spi-nor/everspin.c b/drivers/mtd/spi-nor/everspin.c
index 84a07c2e0536..add37104d673 100644
--- a/drivers/mtd/spi-nor/everspin.c
+++ b/drivers/mtd/spi-nor/everspin.c
@@ -9,15 +9,44 @@
#include "core.h"
static const struct flash_info everspin_nor_parts[] = {
- /* Everspin */
- { "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2) },
- { "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2) },
- { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3) },
- { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3) },
+ {
+ .name = "mr25h128",
+ .size = SZ_16K,
+ .sector_size = SZ_16K,
+ .addr_nbytes = 2,
+ .flags = SPI_NOR_NO_ERASE,
+ }, {
+ .name = "mr25h256",
+ .size = SZ_32K,
+ .sector_size = SZ_32K,
+ .addr_nbytes = 2,
+ .flags = SPI_NOR_NO_ERASE,
+ }, {
+ .name = "mr25h10",
+ .size = SZ_128K,
+ .sector_size = SZ_128K,
+ .flags = SPI_NOR_NO_ERASE,
+ }, {
+ .name = "mr25h40",
+ .size = SZ_512K,
+ .sector_size = SZ_512K,
+ .flags = SPI_NOR_NO_ERASE,
+ }
+};
+
+static void everspin_nor_default_init(struct spi_nor *nor)
+{
+ /* Everspin FRAMs don't support the fast read opcode. */
+ nor->params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+}
+
+static const struct spi_nor_fixups everspin_nor_fixups = {
+ .default_init = everspin_nor_default_init,
};
const struct spi_nor_manufacturer spi_nor_everspin = {
.name = "everspin",
.parts = everspin_nor_parts,
.nparts = ARRAY_SIZE(everspin_nor_parts),
+ .fixups = &everspin_nor_fixups,
};
diff --git a/drivers/mtd/spi-nor/fujitsu.c b/drivers/mtd/spi-nor/fujitsu.c
deleted file mode 100644
index 69cffc5c73ef..000000000000
--- a/drivers/mtd/spi-nor/fujitsu.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2005, Intec Automation Inc.
- * Copyright (C) 2014, Freescale Semiconductor, Inc.
- */
-
-#include <linux/mtd/spi-nor.h>
-
-#include "core.h"
-
-static const struct flash_info fujitsu_nor_parts[] = {
- /* Fujitsu */
- { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1)
- FLAGS(SPI_NOR_NO_ERASE) },
-};
-
-const struct spi_nor_manufacturer spi_nor_fujitsu = {
- .name = "fujitsu",
- .parts = fujitsu_nor_parts,
- .nparts = ARRAY_SIZE(fujitsu_nor_parts),
-};
diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c
index d57ddaf1525b..ef1edd0add70 100644
--- a/drivers/mtd/spi-nor/gigadevice.c
+++ b/drivers/mtd/spi-nor/gigadevice.c
@@ -34,39 +34,55 @@ static const struct spi_nor_fixups gd25q256_fixups = {
};
static const struct flash_info gigadevice_nor_parts[] = {
- { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512)
- PARSE_SFDP
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- .fixups = &gd25q256_fixups },
+ {
+ .id = SNOR_ID(0xc8, 0x40, 0x15),
+ .name = "gd25q16",
+ .size = SZ_2M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc8, 0x40, 0x16),
+ .name = "gd25q32",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc8, 0x40, 0x17),
+ .name = "gd25q64",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc8, 0x40, 0x18),
+ .name = "gd25q128",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc8, 0x40, 0x19),
+ .name = "gd25q256",
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6,
+ .fixups = &gd25q256_fixups,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0xc8, 0x60, 0x16),
+ .name = "gd25lq32",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc8, 0x60, 0x17),
+ .name = "gd25lq64c",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc8, 0x60, 0x18),
+ .name = "gd25lq128d",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ },
};
const struct spi_nor_manufacturer spi_nor_gigadevice = {
diff --git a/drivers/mtd/spi-nor/intel.c b/drivers/mtd/spi-nor/intel.c
index 9179f2d09cba..f647359fee7a 100644
--- a/drivers/mtd/spi-nor/intel.c
+++ b/drivers/mtd/spi-nor/intel.c
@@ -9,13 +9,22 @@
#include "core.h"
static const struct flash_info intel_nor_parts[] = {
- /* Intel/Numonyx -- xxxs33b */
- { "160s33b", INFO(0x898911, 0, 64 * 1024, 32)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "320s33b", INFO(0x898912, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "640s33b", INFO(0x898913, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+ {
+ .id = SNOR_ID(0x89, 0x89, 0x11),
+ .name = "160s33b",
+ .size = SZ_2M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ }, {
+ .id = SNOR_ID(0x89, 0x89, 0x12),
+ .name = "320s33b",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ }, {
+ .id = SNOR_ID(0x89, 0x89, 0x13),
+ .name = "640s33b",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ }
};
const struct spi_nor_manufacturer spi_nor_intel = {
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
index 400e2b42f45a..18d9a00aa22e 100644
--- a/drivers/mtd/spi-nor/issi.c
+++ b/drivers/mtd/spi-nor/issi.c
@@ -29,7 +29,7 @@ static const struct spi_nor_fixups is25lp256_fixups = {
.post_bfpt = is25lp256_post_bfpt_fixups,
};
-static void pm25lv_nor_late_init(struct spi_nor *nor)
+static int pm25lv_nor_late_init(struct spi_nor *nor)
{
struct spi_nor_erase_map *map = &nor->params->erase_map;
int i;
@@ -38,6 +38,8 @@ static void pm25lv_nor_late_init(struct spi_nor *nor)
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
if (map->erase_type[i].size == 4096)
map->erase_type[i].opcode = SPINOR_OP_BE_4K_PMC;
+
+ return 0;
}
static const struct spi_nor_fixups pm25lv_nor_fixups = {
@@ -45,48 +47,86 @@ static const struct spi_nor_fixups pm25lv_nor_fixups = {
};
static const struct flash_info issi_nor_parts[] = {
- /* ISSI */
- { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2)
- NO_SFDP_FLAGS(SECT_4K) },
- { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512)
- PARSE_SFDP
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- .fixups = &is25lp256_fixups },
- { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp256", INFO(0x9d7019, 0, 0, 0)
- PARSE_SFDP
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- FLAGS(SPI_NOR_QUAD_PP)
- .fixups = &is25lp256_fixups },
-
- /* PMC */
- { "pm25lv512", INFO(0, 0, 32 * 1024, 2)
- NO_SFDP_FLAGS(SECT_4K)
+ {
+ .name = "pm25lv512",
+ .sector_size = SZ_32K,
+ .size = SZ_64K,
+ .no_sfdp_flags = SECT_4K,
.fixups = &pm25lv_nor_fixups
- },
- { "pm25lv010", INFO(0, 0, 32 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K)
+ }, {
+ .name = "pm25lv010",
+ .sector_size = SZ_32K,
+ .size = SZ_128K,
+ .no_sfdp_flags = SECT_4K,
.fixups = &pm25lv_nor_fixups
- },
- { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
+ }, {
+ .id = SNOR_ID(0x7f, 0x9d, 0x20),
+ .name = "is25cd512",
+ .sector_size = SZ_32K,
+ .size = SZ_64K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x7f, 0x9d, 0x46),
+ .name = "pm25lq032",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x9d, 0x40, 0x13),
+ .name = "is25lq040b",
+ .size = SZ_512K,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x60, 0x14),
+ .name = "is25lp080d",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x60, 0x15),
+ .name = "is25lp016d",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x60, 0x16),
+ .name = "is25lp032",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x60, 0x17),
+ .name = "is25lp064",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x60, 0x18),
+ .name = "is25lp128",
+ .size = SZ_16M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x60, 0x19),
+ .name = "is25lp256",
+ .fixups = &is25lp256_fixups,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0x9d, 0x70, 0x16),
+ .name = "is25wp032",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x70, 0x17),
+ .size = SZ_8M,
+ .name = "is25wp064",
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x70, 0x18),
+ .name = "is25wp128",
+ .size = SZ_16M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x70, 0x19),
+ .name = "is25wp256",
+ .flags = SPI_NOR_QUAD_PP,
+ .fixups = &is25lp256_fixups,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }
};
static void issi_nor_default_init(struct spi_nor *nor)
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 04888258e891..e97f5cbd9aad 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -8,6 +8,23 @@
#include "core.h"
+#define MXIC_NOR_OP_RD_CR2 0x71 /* Read configuration register 2 opcode */
+#define MXIC_NOR_OP_WR_CR2 0x72 /* Write configuration register 2 opcode */
+#define MXIC_NOR_ADDR_CR2_MODE 0x00000000 /* CR2 address for setting spi/sopi/dopi mode */
+#define MXIC_NOR_ADDR_CR2_DC 0x00000300 /* CR2 address for setting dummy cycles */
+#define MXIC_NOR_REG_DOPI_EN 0x2 /* Enable Octal DTR */
+#define MXIC_NOR_REG_SPI_EN 0x0 /* Enable SPI */
+
+/* Convert dummy cycles to bit pattern */
+#define MXIC_NOR_REG_DC(p) \
+ ((20 - (p)) >> 1)
+
+#define MXIC_NOR_WR_CR2(addr, ndata, buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(MXIC_NOR_OP_WR_CR2, 0), \
+ SPI_MEM_OP_ADDR(4, addr, 0), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(ndata, buf, 0))
+
static int
mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
@@ -28,92 +45,285 @@ mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
+static int
+macronix_qpp4b_post_sfdp_fixups(struct spi_nor *nor)
+{
+ /* PP_1_1_4_4B is supported but missing in 4BAIT. */
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4_4B, SNOR_PROTO_1_1_4);
+
+ return 0;
+}
+
+static int
+mx25l3255e_late_init_fixups(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ /*
+ * SFDP of MX25L3255E is JESD216, which does not include the Quad
+ * Enable bit Requirement in BFPT. As a result, during BFPT parsing,
+ * the quad_enable method is not set to spi_nor_sr1_bit6_quad_enable.
+ * Therefore, it is necessary to correct this setting by late_init.
+ */
+ params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+
+ /*
+ * In addition, MX25L3255E also supports 1-4-4 page program in 3-byte
+ * address mode. However, since the 3-byte address 1-4-4 page program
+ * is not defined in SFDP, it needs to be configured in late_init.
+ */
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_4_4;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_4_4],
+ SPINOR_OP_PP_1_4_4, SNOR_PROTO_1_4_4);
+
+ return 0;
+}
+
static const struct spi_nor_fixups mx25l25635_fixups = {
.post_bfpt = mx25l25635_post_bfpt_fixups,
+ .post_sfdp = macronix_qpp4b_post_sfdp_fixups,
+};
+
+static const struct spi_nor_fixups macronix_qpp4b_fixups = {
+ .post_sfdp = macronix_qpp4b_post_sfdp_fixups,
+};
+
+static const struct spi_nor_fixups mx25l3255e_fixups = {
+ .late_init = mx25l3255e_late_init_fixups,
};
static const struct flash_info macronix_nor_parts[] = {
- /* Macronix */
- { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16) },
- { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256) },
- { "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- .fixups = &mx25l25635_fixups },
- { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
- { "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
- { "mx25uw51245g", INFOB(0xc2813a, 0, 0, 0, 4)
- PARSE_SFDP
- FLAGS(SPI_NOR_RWW) },
- { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512) },
- { "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
- { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
- { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048)
- NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
- { "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ {
+ .id = SNOR_ID(0xc2, 0x20, 0x10),
+ .name = "mx25l512e",
+ .size = SZ_64K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x12),
+ .name = "mx25l2005a",
+ .size = SZ_256K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x13),
+ .name = "mx25l4005a",
+ .size = SZ_512K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x14),
+ .name = "mx25l8005",
+ .size = SZ_1M,
+ }, {
+ /* MX25L1606E */
+ .id = SNOR_ID(0xc2, 0x20, 0x15),
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x16),
+ .name = "mx25l3205d",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x17),
+ .name = "mx25l6405d",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ /* MX25L12805D */
+ .id = SNOR_ID(0xc2, 0x20, 0x18),
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP,
+ }, {
+ /* MX25L25635E, MX25L25645G */
+ .id = SNOR_ID(0xc2, 0x20, 0x19),
+ .fixups = &mx25l25635_fixups
+ }, {
+ /* MX66L51235F */
+ .id = SNOR_ID(0xc2, 0x20, 0x1a),
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ .fixups = &macronix_qpp4b_fixups,
+ }, {
+ /* MX66L1G45G */
+ .id = SNOR_ID(0xc2, 0x20, 0x1b),
+ .fixups = &macronix_qpp4b_fixups,
+ }, {
+ /* MX66L2G45G */
+ .id = SNOR_ID(0xc2, 0x20, 0x1c),
+ .fixups = &macronix_qpp4b_fixups,
+ }, {
+ .id = SNOR_ID(0xc2, 0x23, 0x14),
+ .name = "mx25v8035f",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x32),
+ .name = "mx25u2033e",
+ .size = SZ_256K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x33),
+ .name = "mx25u4035",
+ .size = SZ_512K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x34),
+ .name = "mx25u8035",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x36),
+ .name = "mx25u3235f",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x37),
+ .name = "mx25u6435f",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x38),
+ .name = "mx25u12835f",
+ .size = SZ_16M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ /* MX25U51245G */
+ .id = SNOR_ID(0xc2, 0x25, 0x3a),
+ .fixups = &macronix_qpp4b_fixups,
+ }, {
+ /* MX66U1G45G */
+ .id = SNOR_ID(0xc2, 0x25, 0x3b),
+ .fixups = &macronix_qpp4b_fixups,
+ }, {
+ /* MX66U2G45G */
+ .id = SNOR_ID(0xc2, 0x25, 0x3c),
+ .fixups = &macronix_qpp4b_fixups,
+ }, {
+ .id = SNOR_ID(0xc2, 0x26, 0x18),
+ .name = "mx25l12855e",
+ .size = SZ_16M,
+ }, {
+ .id = SNOR_ID(0xc2, 0x26, 0x19),
+ .name = "mx25l25655e",
+ .size = SZ_32M,
+ }, {
+ .id = SNOR_ID(0xc2, 0x26, 0x1b),
+ .name = "mx66l1g55g",
+ .size = SZ_128M,
+ .no_sfdp_flags = SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc2, 0x28, 0x15),
+ .name = "mx25r1635f",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc2, 0x28, 0x16),
+ .name = "mx25r3235f",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ /* MX25UW51245G */
+ .id = SNOR_ID(0xc2, 0x81, 0x3a),
+ .n_banks = 4,
+ .flags = SPI_NOR_RWW,
+ }, {
+ /* MX25L3255E */
+ .id = SNOR_ID(0xc2, 0x9e, 0x16),
+ .fixups = &mx25l3255e_fixups,
+ },
+ /*
+ * This spares us of adding new flash entries for flashes that can be
+ * initialized solely based on the SFDP data, but still need the
+ * manufacturer hooks to set parameters that can't be discovered at SFDP
+ * parsing time.
+ */
+ { .id = SNOR_ID(0xc2) }
};
+static int macronix_nor_octal_dtr_en(struct spi_nor *nor)
+{
+ struct spi_mem_op op;
+ u8 *buf = nor->bouncebuf, i;
+ int ret;
+
+ /* Use dummy cycles which is parse by SFDP and convert to bit pattern. */
+ buf[0] = MXIC_NOR_REG_DC(nor->params->reads[SNOR_CMD_READ_8_8_8_DTR].num_wait_states);
+ op = (struct spi_mem_op)MXIC_NOR_WR_CR2(MXIC_NOR_ADDR_CR2_DC, 1, buf);
+ ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ /* Set the octal and DTR enable bits. */
+ buf[0] = MXIC_NOR_REG_DOPI_EN;
+ op = (struct spi_mem_op)MXIC_NOR_WR_CR2(MXIC_NOR_ADDR_CR2_MODE, 1, buf);
+ ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ /* Read flash ID to make sure the switch was successful. */
+ ret = spi_nor_read_id(nor, nor->addr_nbytes, 4, buf,
+ SNOR_PROTO_8_8_8_DTR);
+ if (ret) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID after enabling 8D-8D-8D mode\n", ret);
+ return ret;
+ }
+
+ /* Macronix SPI-NOR flash 8D-8D-8D read ID would get 6 bytes data A-A-B-B-C-C */
+ for (i = 0; i < nor->info->id->len; i++)
+ if (buf[i * 2] != buf[(i * 2) + 1] || buf[i * 2] != nor->info->id->bytes[i])
+ return -EINVAL;
+
+ return 0;
+}
+
+static int macronix_nor_octal_dtr_dis(struct spi_nor *nor)
+{
+ struct spi_mem_op op;
+ u8 *buf = nor->bouncebuf;
+ int ret;
+
+ /*
+ * The register is 1-byte wide, but 1-byte transactions are not
+ * allowed in 8D-8D-8D mode. Since there is no register at the
+ * next location, just initialize the value to 0 and let the
+ * transaction go on.
+ */
+ buf[0] = MXIC_NOR_REG_SPI_EN;
+ buf[1] = 0x0;
+ op = (struct spi_mem_op)MXIC_NOR_WR_CR2(MXIC_NOR_ADDR_CR2_MODE, 2, buf);
+ ret = spi_nor_write_any_volatile_reg(nor, &op, SNOR_PROTO_8_8_8_DTR);
+ if (ret)
+ return ret;
+
+ /* Read flash ID to make sure the switch was successful. */
+ ret = spi_nor_read_id(nor, 0, 0, buf, SNOR_PROTO_1_1_1);
+ if (ret) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID after disabling 8D-8D-8D mode\n", ret);
+ return ret;
+ }
+
+ if (memcmp(buf, nor->info->id->bytes, nor->info->id->len))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int macronix_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
+{
+ return enable ? macronix_nor_octal_dtr_en(nor) : macronix_nor_octal_dtr_dis(nor);
+}
+
static void macronix_nor_default_init(struct spi_nor *nor)
{
nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
}
-static void macronix_nor_late_init(struct spi_nor *nor)
+static int macronix_nor_late_init(struct spi_nor *nor)
{
if (!nor->params->set_4byte_addr_mode)
nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_en4b_ex4b;
+ nor->params->set_octal_dtr = macronix_nor_set_octal_dtr;
+
+ return 0;
}
static const struct spi_nor_fixups macronix_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index 4b919756a205..88033384a71e 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -11,6 +11,7 @@
/* flash_info mfr_flag. Used to read proprietary FSR register. */
#define USE_FSR BIT(0)
+#define SPINOR_OP_MT_DIE_ERASE 0xc4 /* Chip (die) erase opcode */
#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
#define SPINOR_OP_MT_DTR_RD 0xfd /* Fast Read opcode in DTR mode */
@@ -78,7 +79,7 @@ static int micron_st_nor_octal_dtr_en(struct spi_nor *nor)
return ret;
}
- if (memcmp(buf, nor->info->id, nor->info->id_len))
+ if (memcmp(buf, nor->info->id->bytes, nor->info->id->len))
return -EINVAL;
return 0;
@@ -114,21 +115,48 @@ static int micron_st_nor_octal_dtr_dis(struct spi_nor *nor)
return ret;
}
- if (memcmp(buf, nor->info->id, nor->info->id_len))
+ if (memcmp(buf, nor->info->id->bytes, nor->info->id->len))
return -EINVAL;
return 0;
}
-static int micron_st_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
+static int micron_st_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
{
return enable ? micron_st_nor_octal_dtr_en(nor) :
micron_st_nor_octal_dtr_dis(nor);
}
-static void mt35xu512aba_default_init(struct spi_nor *nor)
+static int micron_st_nor_four_die_late_init(struct spi_nor *nor)
{
- nor->params->octal_dtr_enable = micron_st_nor_octal_dtr_enable;
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ params->die_erase_opcode = SPINOR_OP_MT_DIE_ERASE;
+ params->n_dice = 4;
+
+ /*
+ * Unfortunately the die erase opcode does not have a 4-byte opcode
+ * correspondent for these flashes. The SFDP 4BAIT table fails to
+ * consider the die erase too. We're forced to enter in the 4 byte
+ * address mode in order to benefit of the die erase.
+ */
+ return spi_nor_set_4byte_addr_mode(nor, true);
+}
+
+static int micron_st_nor_two_die_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ params->die_erase_opcode = SPINOR_OP_MT_DIE_ERASE;
+ params->n_dice = 2;
+
+ /*
+ * Unfortunately the die erase opcode does not have a 4-byte opcode
+ * correspondent for these flashes. The SFDP 4BAIT table fails to
+ * consider the die erase too. We're forced to enter in the 4 byte
+ * address mode in order to benefit of the die erase.
+ */
+ return spi_nor_set_4byte_addr_mode(nor, true);
}
static int mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor)
@@ -154,153 +182,332 @@ static int mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor)
}
static const struct spi_nor_fixups mt35xu512aba_fixups = {
- .default_init = mt35xu512aba_default_init,
.post_sfdp = mt35xu512aba_post_sfdp_fixup,
};
+static const struct spi_nor_fixups mt35xu01gbba_fixups = {
+ .post_sfdp = mt35xu512aba_post_sfdp_fixup,
+ .late_init = micron_st_nor_two_die_late_init,
+};
+
static const struct flash_info micron_nor_parts[] = {
- { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ |
- SPI_NOR_OCTAL_DTR_READ | SPI_NOR_OCTAL_DTR_PP)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES | SPI_NOR_IO_MODE_EN_VOLATILE)
- MFR_FLAGS(USE_FSR)
- .fixups = &mt35xu512aba_fixups
- },
- { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- MFR_FLAGS(USE_FSR)
+ {
+ /* MT35XU512ABA */
+ .id = SNOR_ID(0x2c, 0x5b, 0x1a),
+ .mfr_flags = USE_FSR,
+ .fixup_flags = SPI_NOR_IO_MODE_EN_VOLATILE,
+ .fixups = &mt35xu512aba_fixups,
+ }, {
+ /* MT35XU01GBBA */
+ .id = SNOR_ID(0x2c, 0x5b, 0x1b),
+ .mfr_flags = USE_FSR,
+ .fixup_flags = SPI_NOR_IO_MODE_EN_VOLATILE,
+ .fixups = &mt35xu01gbba_fixups,
+ }, {
+ /*
+ * The MT35XU02GCBA flash device does not support chip erase,
+ * according to its datasheet. It supports die erase, which
+ * means the current driver implementation will likely need to
+ * be converted to use die erase. Furthermore, similar to the
+ * MT35XU01GBBA, the SPI_NOR_IO_MODE_EN_VOLATILE flag probably
+ * needs to be enabled.
+ *
+ * TODO: Fix these and test on real hardware.
+ */
+ .id = SNOR_ID(0x2c, 0x5b, 0x1c),
+ .name = "mt35xu02g",
+ .sector_size = SZ_128K,
+ .size = SZ_256M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_OCTAL_READ,
+ .mfr_flags = USE_FSR,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
},
};
-static const struct flash_info st_nor_parts[] = {
- { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
- { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
- SPI_NOR_BP3_SR_BIT6)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
- SPI_NOR_BP3_SR_BIT6)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- MFR_FLAGS(USE_FSR)
- },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
- SPI_NOR_BP3_SR_BIT6)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- MFR_FLAGS(USE_FSR)
- },
- { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- MFR_FLAGS(USE_FSR)
- },
- { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
- SPI_NOR_BP3_SR_BIT6)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- MFR_FLAGS(USE_FSR)
- },
- { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
- SPI_NOR_BP3_SR_BIT6)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
- SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048)
- FLAGS(NO_CHIP_ERASE)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096)
- FLAGS(NO_CHIP_ERASE)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096)
- FLAGS(NO_CHIP_ERASE)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
+static int mt25qu512a_post_bfpt_fixup(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ return 0;
+}
- { "m25p05", INFO(0x202010, 0, 32 * 1024, 2) },
- { "m25p10", INFO(0x202011, 0, 32 * 1024, 4) },
- { "m25p20", INFO(0x202012, 0, 64 * 1024, 4) },
- { "m25p40", INFO(0x202013, 0, 64 * 1024, 8) },
- { "m25p80", INFO(0x202014, 0, 64 * 1024, 16) },
- { "m25p16", INFO(0x202015, 0, 64 * 1024, 32) },
- { "m25p32", INFO(0x202016, 0, 64 * 1024, 64) },
- { "m25p64", INFO(0x202017, 0, 64 * 1024, 128) },
- { "m25p128", INFO(0x202018, 0, 256 * 1024, 64) },
-
- { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2) },
- { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4) },
- { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4) },
- { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8) },
- { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16) },
- { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32) },
- { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64) },
- { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128) },
- { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64) },
-
- { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2) },
- { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16) },
- { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32) },
-
- { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4) },
- { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16) },
- { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K) },
-
- { "m25px16", INFO(0x207115, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K) },
- { "m25px32", INFO(0x207116, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "m25px64", INFO(0x207117, 0, 64 * 1024, 128) },
- { "m25px80", INFO(0x207114, 0, 64 * 1024, 16) },
+static const struct spi_nor_fixups mt25qu512a_fixups = {
+ .post_bfpt = mt25qu512a_post_bfpt_fixup,
+};
+
+static const struct spi_nor_fixups n25q00_fixups = {
+ .late_init = micron_st_nor_four_die_late_init,
+};
+
+static const struct spi_nor_fixups mt25q01_fixups = {
+ .late_init = micron_st_nor_two_die_late_init,
+};
+
+static const struct spi_nor_fixups mt25q02_fixups = {
+ .late_init = micron_st_nor_four_die_late_init,
+};
+
+static const struct flash_info st_nor_parts[] = {
+ {
+ .name = "m25p05-nonjedec",
+ .sector_size = SZ_32K,
+ .size = SZ_64K,
+ }, {
+ .name = "m25p10-nonjedec",
+ .sector_size = SZ_32K,
+ .size = SZ_128K,
+ }, {
+ .name = "m25p20-nonjedec",
+ .size = SZ_256K,
+ }, {
+ .name = "m25p40-nonjedec",
+ .size = SZ_512K,
+ }, {
+ .name = "m25p80-nonjedec",
+ .size = SZ_1M,
+ }, {
+ .name = "m25p16-nonjedec",
+ .size = SZ_2M,
+ }, {
+ .name = "m25p32-nonjedec",
+ .size = SZ_4M,
+ }, {
+ .name = "m25p64-nonjedec",
+ .size = SZ_8M,
+ }, {
+ .name = "m25p128-nonjedec",
+ .sector_size = SZ_256K,
+ .size = SZ_16M,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x10),
+ .name = "m25p05",
+ .sector_size = SZ_32K,
+ .size = SZ_64K,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x11),
+ .name = "m25p10",
+ .sector_size = SZ_32K,
+ .size = SZ_128K,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x12),
+ .name = "m25p20",
+ .size = SZ_256K,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x13),
+ .name = "m25p40",
+ .size = SZ_512K,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x14),
+ .name = "m25p80",
+ .size = SZ_1M,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x15),
+ .name = "m25p16",
+ .size = SZ_2M,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x16),
+ .name = "m25p32",
+ .size = SZ_4M,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x17),
+ .name = "m25p64",
+ .size = SZ_8M,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x18),
+ .name = "m25p128",
+ .sector_size = SZ_256K,
+ .size = SZ_16M,
+ }, {
+ .id = SNOR_ID(0x20, 0x40, 0x11),
+ .name = "m45pe10",
+ .size = SZ_128K,
+ }, {
+ .id = SNOR_ID(0x20, 0x40, 0x14),
+ .name = "m45pe80",
+ .size = SZ_1M,
+ }, {
+ .id = SNOR_ID(0x20, 0x40, 0x15),
+ .name = "m45pe16",
+ .size = SZ_2M,
+ }, {
+ .id = SNOR_ID(0x20, 0x63, 0x16),
+ .name = "m25px32-s1",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x20, 0x71, 0x14),
+ .name = "m25px80",
+ .size = SZ_1M,
+ }, {
+ .id = SNOR_ID(0x20, 0x71, 0x15),
+ .name = "m25px16",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x20, 0x71, 0x16),
+ .name = "m25px32",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x20, 0x71, 0x17),
+ .name = "m25px64",
+ .size = SZ_8M,
+ }, {
+ .id = SNOR_ID(0x20, 0x73, 0x16),
+ .name = "m25px32-s0",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x20, 0x80, 0x12),
+ .name = "m25pe20",
+ .size = SZ_256K,
+ }, {
+ .id = SNOR_ID(0x20, 0x80, 0x14),
+ .name = "m25pe80",
+ .size = SZ_1M,
+ }, {
+ .id = SNOR_ID(0x20, 0x80, 0x15),
+ .name = "m25pe16",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x16),
+ .name = "n25q032",
+ .size = SZ_4M,
+ .no_sfdp_flags = SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x17),
+ .name = "n25q064",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x18),
+ .name = "n25q128a13",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x19, 0x10, 0x44, 0x00),
+ .name = "mt25ql256a",
+ .size = SZ_32M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x19),
+ .name = "n25q256a",
+ .size = SZ_32M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x20, 0x10, 0x44, 0x00),
+ .name = "mt25ql512a",
+ .size = SZ_64M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x20),
+ .name = "n25q512ax3",
+ .size = SZ_64M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x21),
+ .name = "n25q00",
+ .size = SZ_128M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ .fixups = &n25q00_fixups,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x22),
+ .name = "mt25ql02g",
+ .size = SZ_256M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ .fixups = &mt25q02_fixups,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x15),
+ .name = "n25q016a",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x16),
+ .name = "n25q032a",
+ .size = SZ_4M,
+ .no_sfdp_flags = SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x17),
+ .name = "n25q064a",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x18),
+ .name = "n25q128a11",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x19, 0x10, 0x44, 0x00),
+ .name = "mt25qu256a",
+ .size = SZ_32M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x19),
+ .name = "n25q256ax1",
+ .size = SZ_32M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x20, 0x10, 0x44, 0x00),
+ .name = "mt25qu512a",
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
+ .mfr_flags = USE_FSR,
+ .fixups = &mt25qu512a_fixups,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x20),
+ .name = "n25q512a",
+ .size = SZ_64M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x21, 0x10, 0x44, 0x00),
+ .name = "mt25qu01g",
+ .mfr_flags = USE_FSR,
+ .fixups = &mt25q01_fixups,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x21),
+ .name = "n25q00a",
+ .size = SZ_128M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ .fixups = &n25q00_fixups,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x22),
+ .name = "mt25qu02g",
+ .size = SZ_256M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ .fixups = &mt25q02_fixups,
+ }
};
/**
@@ -429,7 +636,7 @@ static void micron_st_nor_default_init(struct spi_nor *nor)
nor->params->quad_enable = NULL;
}
-static void micron_st_nor_late_init(struct spi_nor *nor)
+static int micron_st_nor_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
@@ -438,6 +645,10 @@ static void micron_st_nor_late_init(struct spi_nor *nor)
if (!params->set_4byte_addr_mode)
params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_wren_en4b_ex4b;
+
+ params->set_octal_dtr = micron_st_nor_set_octal_dtr;
+
+ return 0;
}
static const struct spi_nor_fixups micron_st_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c
index 9a729aa3452d..7d0b145d78d8 100644
--- a/drivers/mtd/spi-nor/otp.c
+++ b/drivers/mtd/spi-nor/otp.c
@@ -6,6 +6,7 @@
*/
#include <linux/log2.h>
+#include <linux/math64.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index b3b11dfed789..a8324c2da0ac 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -389,19 +389,15 @@ static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
{
struct spi_nor_erase_region *region = map->regions;
- u8 region_erase_mask, sorted_erase_mask;
+ u8 sorted_erase_mask;
+ unsigned int i;
- while (region) {
- region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
-
- sorted_erase_mask = spi_nor_sort_erase_mask(map,
- region_erase_mask);
+ for (i = 0; i < map->n_regions; i++) {
+ sorted_erase_mask =
+ spi_nor_sort_erase_mask(map, region[i].erase_mask);
/* Overwrite erase mask. */
- region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
- sorted_erase_mask;
-
- region = spi_nor_region_next(region);
+ region[i].erase_mask = sorted_erase_mask;
}
}
@@ -446,6 +442,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
u32 dword;
u16 half;
u8 erase_mask;
+ u8 wait_states, mode_clocks, opcode;
/* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
@@ -553,8 +550,6 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
* selecting the uniform erase.
*/
spi_nor_regions_sort_erase_types(map);
- map->uniform_erase_type = map->uniform_region.offset &
- SNOR_ERASE_TYPE_MASK;
/* Stop here if not JESD216 rev A or later. */
if (bfpt_header->length == BFPT_DWORD_MAX_JESD216)
@@ -631,6 +626,32 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B)
return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt);
+ /* Parse 1-1-8 read instruction */
+ opcode = FIELD_GET(BFPT_DWORD17_RD_1_1_8_CMD, bfpt.dwords[SFDP_DWORD(17)]);
+ if (opcode) {
+ mode_clocks = FIELD_GET(BFPT_DWORD17_RD_1_1_8_MODE_CLOCKS,
+ bfpt.dwords[SFDP_DWORD(17)]);
+ wait_states = FIELD_GET(BFPT_DWORD17_RD_1_1_8_WAIT_STATES,
+ bfpt.dwords[SFDP_DWORD(17)]);
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
+ mode_clocks, wait_states, opcode,
+ SNOR_PROTO_1_1_8);
+ }
+
+ /* Parse 1-8-8 read instruction */
+ opcode = FIELD_GET(BFPT_DWORD17_RD_1_8_8_CMD, bfpt.dwords[SFDP_DWORD(17)]);
+ if (opcode) {
+ mode_clocks = FIELD_GET(BFPT_DWORD17_RD_1_8_8_MODE_CLOCKS,
+ bfpt.dwords[SFDP_DWORD(17)]);
+ wait_states = FIELD_GET(BFPT_DWORD17_RD_1_8_8_WAIT_STATES,
+ bfpt.dwords[SFDP_DWORD(17)]);
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_8_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_8_8],
+ mode_clocks, wait_states, opcode,
+ SNOR_PROTO_1_8_8);
+ }
+
/* 8D-8D-8D command extension. */
switch (bfpt.dwords[SFDP_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) {
case BFPT_DWORD18_CMD_EXT_REP:
@@ -650,6 +671,10 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
return -EOPNOTSUPP;
}
+ /* Byte order in 8D-8D-8D mode */
+ if (bfpt.dwords[SFDP_DWORD(18)] & BFPT_DWORD18_BYTE_ORDER_SWAPPED)
+ nor->flags |= SNOR_F_SWAP16;
+
return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt);
}
@@ -674,6 +699,17 @@ static u8 spi_nor_smpt_addr_nbytes(const struct spi_nor *nor, const u32 settings
}
}
+static void spi_nor_smpt_read_dummy_fixups(const struct spi_nor *nor,
+ u8 *read_dummy)
+{
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->smpt_read_dummy)
+ nor->manufacturer->fixups->smpt_read_dummy(nor, read_dummy);
+
+ if (nor->info->fixups && nor->info->fixups->smpt_read_dummy)
+ nor->info->fixups->smpt_read_dummy(nor, read_dummy);
+}
+
/**
* spi_nor_smpt_read_dummy() - return the configuration detection command read
* latency, in clock cycles.
@@ -686,11 +722,24 @@ static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
{
u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
- if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
- return nor->read_dummy;
+ if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE) {
+ read_dummy = nor->read_dummy;
+ spi_nor_smpt_read_dummy_fixups(nor, &read_dummy);
+ }
+
return read_dummy;
}
+static void spi_nor_smpt_map_id_fixups(const struct spi_nor *nor, u8 *map_id)
+{
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->smpt_map_id)
+ nor->manufacturer->fixups->smpt_map_id(nor, map_id);
+
+ if (nor->info->fixups && nor->info->fixups->smpt_map_id)
+ nor->info->fixups->smpt_map_id(nor, map_id);
+}
+
/**
* spi_nor_get_map_in_use() - get the configuration map in use
* @nor: pointer to a 'struct spi_nor'
@@ -744,6 +793,8 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
map_id = map_id << 1 | !!(*buf & read_data_mask);
}
+ spi_nor_smpt_map_id_fixups(nor, &map_id);
+
/*
* If command descriptors are provided, they always precede map
* descriptors in the table. There is no need to start the iteration
@@ -779,16 +830,6 @@ out:
return ret;
}
-static void spi_nor_region_mark_end(struct spi_nor_erase_region *region)
-{
- region->offset |= SNOR_LAST_REGION;
-}
-
-static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
-{
- region->offset |= SNOR_OVERLAID_REGION;
-}
-
/**
* spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
* @region: pointer to a structure that describes a SPI NOR erase region
@@ -806,7 +847,7 @@ spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
if (!(erase[i].size && erase_type & BIT(erase[i].idx)))
continue;
if (region->size & erase[i].size_mask) {
- spi_nor_region_mark_overlay(region);
+ region->overlaid = true;
return;
}
}
@@ -841,6 +882,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
if (!region)
return -ENOMEM;
map->regions = region;
+ map->n_regions = region_count;
uniform_erase_type = 0xff;
regions_erase_type = 0;
@@ -848,9 +890,10 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
/* Populate regions. */
for (i = 0; i < region_count; i++) {
j = i + 1; /* index for the region dword */
+ region[i].offset = offset;
region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
- region[i].offset = offset | erase_type;
+ region[i].erase_mask = erase_type;
spi_nor_region_check_overlay(&region[i], erase, erase_type);
@@ -866,21 +909,20 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
*/
regions_erase_type |= erase_type;
- offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
- region[i].size;
+ offset = region[i].offset + region[i].size;
}
- spi_nor_region_mark_end(&region[i - 1]);
- save_uniform_erase_type = map->uniform_erase_type;
- map->uniform_erase_type = spi_nor_sort_erase_mask(map,
- uniform_erase_type);
+ save_uniform_erase_type = map->uniform_region.erase_mask;
+ map->uniform_region.erase_mask =
+ spi_nor_sort_erase_mask(map,
+ uniform_erase_type);
if (!regions_erase_type) {
/*
* Roll back to the previous uniform_erase_type mask, SMPT is
* broken.
*/
- map->uniform_erase_type = save_uniform_erase_type;
+ map->uniform_region.erase_mask = save_uniform_erase_type;
return -EINVAL;
}
@@ -968,6 +1010,8 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
{ SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
{ SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
{ SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
+ { SNOR_HWCAPS_READ_1_1_8, BIT(20) },
+ { SNOR_HWCAPS_READ_1_8_8, BIT(21) },
};
static const struct sfdp_4bait programs[] = {
{ SNOR_HWCAPS_PP, BIT(6) },
diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h
index 6eb99e1cdd61..f74a0eb339ea 100644
--- a/drivers/mtd/spi-nor/sfdp.h
+++ b/drivers/mtd/spi-nor/sfdp.h
@@ -118,11 +118,19 @@ struct sfdp_bfpt {
(BFPT_DWORD16_EN4B_EN4B | BFPT_DWORD16_EX4B_EX4B)
#define BFPT_DWORD16_SWRST_EN_RST BIT(12)
+#define BFPT_DWORD17_RD_1_1_8_CMD GENMASK(31, 24)
+#define BFPT_DWORD17_RD_1_1_8_MODE_CLOCKS GENMASK(23, 21)
+#define BFPT_DWORD17_RD_1_1_8_WAIT_STATES GENMASK(20, 16)
+#define BFPT_DWORD17_RD_1_8_8_CMD GENMASK(15, 8)
+#define BFPT_DWORD17_RD_1_8_8_MODE_CLOCKS GENMASK(7, 5)
+#define BFPT_DWORD17_RD_1_8_8_WAIT_STATES GENMASK(4, 0)
+
#define BFPT_DWORD18_CMD_EXT_MASK GENMASK(30, 29)
#define BFPT_DWORD18_CMD_EXT_REP (0x0UL << 29) /* Repeat */
#define BFPT_DWORD18_CMD_EXT_INV (0x1UL << 29) /* Invert */
#define BFPT_DWORD18_CMD_EXT_RES (0x2UL << 29) /* Reserved */
#define BFPT_DWORD18_CMD_EXT_16B (0x3UL << 29) /* 16-bit opcode */
+#define BFPT_DWORD18_BYTE_ORDER_SWAPPED BIT(31) /* Byte order swapped in 8D-8D-8D mode */
struct sfdp_parameter_header {
u8 id_lsb;
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 15f9a80c10b9..8498c7003d88 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -4,14 +4,21 @@
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/errno.h>
#include <linux/mtd/spi-nor.h>
#include "core.h"
/* flash_info mfr_flag. Used to clear sticky prorietary SR bits. */
#define USE_CLSR BIT(0)
+#define USE_CLPEF BIT(1)
#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
+#define SPINOR_OP_CLPEF 0x82 /* Clear program/erase failure flags */
+#define SPINOR_OP_CYPRESS_EX4B 0xB8 /* Exit 4-byte address mode */
+#define SPINOR_OP_CYPRESS_DIE_ERASE 0x61 /* Chip (die) erase */
#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
#define SPINOR_REG_CYPRESS_VREG 0x00800000
@@ -19,21 +26,16 @@
#define SPINOR_REG_CYPRESS_STR1V \
(SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_STR1)
#define SPINOR_REG_CYPRESS_CFR1 0x2
-#define SPINOR_REG_CYPRESS_CFR1V \
- (SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR1)
#define SPINOR_REG_CYPRESS_CFR1_QUAD_EN BIT(1) /* Quad Enable */
#define SPINOR_REG_CYPRESS_CFR2 0x3
#define SPINOR_REG_CYPRESS_CFR2V \
(SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR2)
+#define SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK GENMASK(3, 0)
#define SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24 0xb
#define SPINOR_REG_CYPRESS_CFR2_ADRBYT BIT(7)
#define SPINOR_REG_CYPRESS_CFR3 0x4
-#define SPINOR_REG_CYPRESS_CFR3V \
- (SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR3)
#define SPINOR_REG_CYPRESS_CFR3_PGSZ BIT(4) /* Page size. */
#define SPINOR_REG_CYPRESS_CFR5 0x6
-#define SPINOR_REG_CYPRESS_CFR5V \
- (SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR5)
#define SPINOR_REG_CYPRESS_CFR5_BIT6 BIT(6)
#define SPINOR_REG_CYPRESS_CFR5_DDR BIT(1)
#define SPINOR_REG_CYPRESS_CFR5_OPI BIT(0)
@@ -57,22 +59,39 @@
SPI_MEM_OP_DUMMY(ndummy, 0), \
SPI_MEM_OP_DATA_IN(1, buf, 0))
-#define SPANSION_CLSR_OP \
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0), \
+#define CYPRESS_NOR_EN4B_EX4B_OP(enable) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(enable ? SPINOR_OP_EN4B : \
+ SPINOR_OP_CYPRESS_EX4B, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPANSION_OP(opcode) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
/**
+ * struct spansion_nor_params - Spansion private parameters.
+ * @clsr: Clear Status Register or Clear Program and Erase Failure Flag
+ * opcode.
+ */
+struct spansion_nor_params {
+ u8 clsr;
+};
+
+/**
* spansion_nor_clear_sr() - Clear the Status Register.
* @nor: pointer to 'struct spi_nor'.
*/
static void spansion_nor_clear_sr(struct spi_nor *nor)
{
+ const struct spansion_nor_params *priv_params = nor->params->priv;
int ret;
if (nor->spimem) {
- struct spi_mem_op op = SPANSION_CLSR_OP;
+ struct spi_mem_op op = SPANSION_OP(priv_params->clsr);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
@@ -88,11 +107,18 @@ static void spansion_nor_clear_sr(struct spi_nor *nor)
static int cypress_nor_sr_ready_and_clear_reg(struct spi_nor *nor, u64 addr)
{
+ struct spi_nor_flash_parameter *params = nor->params;
struct spi_mem_op op =
- CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes, addr,
+ CYPRESS_NOR_RD_ANY_REG_OP(params->addr_mode_nbytes, addr,
0, nor->bouncebuf);
int ret;
+ if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
+ op.addr.nbytes = nor->addr_nbytes;
+ op.dummy.nbytes = params->rdsr_dummy;
+ op.data.nbytes = 2;
+ }
+
ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
@@ -141,18 +167,26 @@ static int cypress_nor_sr_ready_and_clear(struct spi_nor *nor)
return 1;
}
-static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
+static int cypress_nor_set_memlat(struct spi_nor *nor, u64 addr)
{
struct spi_mem_op op;
u8 *buf = nor->bouncebuf;
int ret;
u8 addr_mode_nbytes = nor->params->addr_mode_nbytes;
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes, addr, 0, buf);
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
/* Use 24 dummy cycles for memory array reads. */
- *buf = SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24;
+ *buf &= ~SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK;
+ *buf |= FIELD_PREP(SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK,
+ SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24);
op = (struct spi_mem_op)
- CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
- SPINOR_REG_CYPRESS_CFR2V, 1, buf);
+ CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes, addr, 1, buf);
ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
if (ret)
@@ -160,15 +194,41 @@ static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
nor->read_dummy = 24;
+ return 0;
+}
+
+static int cypress_nor_set_octal_dtr_bits(struct spi_nor *nor, u64 addr)
+{
+ struct spi_mem_op op;
+ u8 *buf = nor->bouncebuf;
+
/* Set the octal and DTR enable bits. */
buf[0] = SPINOR_REG_CYPRESS_CFR5_OCT_DTR_EN;
op = (struct spi_mem_op)
- CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
- SPINOR_REG_CYPRESS_CFR5V, 1, buf);
+ CYPRESS_NOR_WR_ANY_REG_OP(nor->params->addr_mode_nbytes,
+ addr, 1, buf);
- ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
- if (ret)
- return ret;
+ return spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
+}
+
+static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
+{
+ const struct spi_nor_flash_parameter *params = nor->params;
+ u8 *buf = nor->bouncebuf;
+ u64 addr;
+ int i, ret;
+
+ for (i = 0; i < params->n_dice; i++) {
+ addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR2;
+ ret = cypress_nor_set_memlat(nor, addr);
+ if (ret)
+ return ret;
+
+ addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR5;
+ ret = cypress_nor_set_octal_dtr_bits(nor, addr);
+ if (ret)
+ return ret;
+ }
/* Read flash ID to make sure the switch was successful. */
ret = spi_nor_read_id(nor, nor->addr_nbytes, 3, buf,
@@ -178,17 +238,16 @@ static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
return ret;
}
- if (memcmp(buf, nor->info->id, nor->info->id_len))
+ if (memcmp(buf, nor->info->id->bytes, nor->info->id->len))
return -EINVAL;
return 0;
}
-static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
+static int cypress_nor_set_single_spi_bits(struct spi_nor *nor, u64 addr)
{
struct spi_mem_op op;
u8 *buf = nor->bouncebuf;
- int ret;
/*
* The register is 1-byte wide, but 1-byte transactions are not allowed
@@ -198,11 +257,23 @@ static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
buf[0] = SPINOR_REG_CYPRESS_CFR5_OCT_DTR_DS;
buf[1] = 0;
op = (struct spi_mem_op)
- CYPRESS_NOR_WR_ANY_REG_OP(nor->addr_nbytes,
- SPINOR_REG_CYPRESS_CFR5V, 2, buf);
- ret = spi_nor_write_any_volatile_reg(nor, &op, SNOR_PROTO_8_8_8_DTR);
- if (ret)
- return ret;
+ CYPRESS_NOR_WR_ANY_REG_OP(nor->addr_nbytes, addr, 2, buf);
+ return spi_nor_write_any_volatile_reg(nor, &op, SNOR_PROTO_8_8_8_DTR);
+}
+
+static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
+{
+ const struct spi_nor_flash_parameter *params = nor->params;
+ u8 *buf = nor->bouncebuf;
+ u64 addr;
+ int i, ret;
+
+ for (i = 0; i < params->n_dice; i++) {
+ addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR5;
+ ret = cypress_nor_set_single_spi_bits(nor, addr);
+ if (ret)
+ return ret;
+ }
/* Read flash ID to make sure the switch was successful. */
ret = spi_nor_read_id(nor, 0, 0, buf, SNOR_PROTO_1_1_1);
@@ -211,7 +282,7 @@ static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
return ret;
}
- if (memcmp(buf, nor->info->id, nor->info->id_len))
+ if (memcmp(buf, nor->info->id->bytes, nor->info->id->len))
return -EINVAL;
return 0;
@@ -283,10 +354,6 @@ static int cypress_nor_quad_enable_volatile(struct spi_nor *nor)
u8 i;
int ret;
- if (!params->n_dice)
- return cypress_nor_quad_enable_volatile_reg(nor,
- SPINOR_REG_CYPRESS_CFR1V);
-
for (i = 0; i < params->n_dice; i++) {
addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR1;
ret = cypress_nor_quad_enable_volatile_reg(nor, addr);
@@ -297,6 +364,20 @@ static int cypress_nor_quad_enable_volatile(struct spi_nor *nor)
return 0;
}
+static int cypress_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+ struct spi_mem_op op = CYPRESS_NOR_EN4B_EX4B_OP(enable);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
+}
+
/**
* cypress_nor_determine_addr_mode_by_sr1() - Determine current address mode
* (3 or 4-byte) by querying status
@@ -408,28 +489,17 @@ static int cypress_nor_set_addr_mode_nbytes(struct spi_nor *nor)
return 0;
}
-static int cypress_nor_get_page_size_single_chip(struct spi_nor *nor)
-{
- struct spi_mem_op op =
- CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes,
- SPINOR_REG_CYPRESS_CFR3V, 0,
- nor->bouncebuf);
- int ret;
-
- ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3_PGSZ)
- nor->params->page_size = 512;
- else
- nor->params->page_size = 256;
-
- return 0;
-}
-
-
-static int cypress_nor_get_page_size_mcp(struct spi_nor *nor)
+/**
+ * cypress_nor_get_page_size() - Get flash page size configuration.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * The BFPT table advertises a 512B or 256B page size depending on part but the
+ * page size is actually configurable (with the default being 256B). Read from
+ * CFR3V[4] and set the correct size.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_get_page_size(struct spi_nor *nor)
{
struct spi_mem_op op =
CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes,
@@ -459,23 +529,6 @@ static int cypress_nor_get_page_size_mcp(struct spi_nor *nor)
return 0;
}
-/**
- * cypress_nor_get_page_size() - Get flash page size configuration.
- * @nor: pointer to a 'struct spi_nor'
- *
- * The BFPT table advertises a 512B or 256B page size depending on part but the
- * page size is actually configurable (with the default being 256B). Read from
- * CFR3V[4] and set the correct size.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int cypress_nor_get_page_size(struct spi_nor *nor)
-{
- if (nor->params->n_dice)
- return cypress_nor_get_page_size_mcp(nor);
- return cypress_nor_get_page_size_single_chip(nor);
-}
-
static void cypress_nor_ecc_init(struct spi_nor *nor)
{
/*
@@ -495,6 +548,9 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor,
struct spi_mem_op op;
int ret;
+ /* Assign 4-byte address mode method that is not determined in BFPT */
+ nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode;
+
ret = cypress_nor_set_addr_mode_nbytes(nor);
if (ret)
return ret;
@@ -512,28 +568,42 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor,
if (nor->bouncebuf[0])
return -ENODEV;
- return cypress_nor_get_page_size(nor);
+ return 0;
}
static int s25fs256t_post_sfdp_fixup(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
+ /*
+ * S25FS256T does not define the SCCR map, but we would like to use the
+ * same code base for both single and multi chip package devices, thus
+ * set the vreg_offset and n_dice to be able to do so.
+ */
+ params->vreg_offset = devm_kmalloc(nor->dev, sizeof(u32), GFP_KERNEL);
+ if (!params->vreg_offset)
+ return -ENOMEM;
+
+ params->vreg_offset[0] = SPINOR_REG_CYPRESS_VREG;
+ params->n_dice = 1;
+
/* PP_1_1_4_4B is supported but missing in 4BAIT. */
params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
SPINOR_OP_PP_1_1_4_4B,
SNOR_PROTO_1_1_4);
- return 0;
+ return cypress_nor_get_page_size(nor);
}
-static void s25fs256t_late_init(struct spi_nor *nor)
+static int s25fs256t_late_init(struct spi_nor *nor)
{
cypress_nor_ecc_init(nor);
+
+ return 0;
}
-static struct spi_nor_fixups s25fs256t_fixups = {
+static const struct spi_nor_fixups s25fs256t_fixups = {
.post_bfpt = s25fs256t_post_bfpt_fixup,
.post_sfdp = s25fs256t_post_sfdp_fixup,
.late_init = s25fs256t_late_init,
@@ -546,6 +616,9 @@ s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
{
int ret;
+ /* Assign 4-byte address mode method that is not determined in BFPT */
+ nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode;
+
ret = cypress_nor_set_addr_mode_nbytes(nor);
if (ret)
return ret;
@@ -558,10 +631,20 @@ s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
static int s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
{
- struct spi_nor_erase_type *erase_type =
- nor->params->erase_map.erase_type;
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spi_nor_erase_type *erase_type = params->erase_map.erase_type;
unsigned int i;
+ if (!params->n_dice || !params->vreg_offset) {
+ dev_err(nor->dev, "%s failed. The volatile register offset could not be retrieved from SFDP.\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ /* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */
+ if (params->size == SZ_256M)
+ params->n_dice = 2;
+
/*
* In some parts, 3byte erase opcodes are advertised by 4BAIT.
* Convert them to 4byte erase opcodes.
@@ -579,35 +662,30 @@ static int s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
}
}
- /* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */
- if (nor->params->size == SZ_256M)
- nor->params->n_dice = 2;
-
return cypress_nor_get_page_size(nor);
}
-static void s25hx_t_late_init(struct spi_nor *nor)
+static int s25hx_t_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
/* Fast Read 4B requires mode cycles */
params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
-
+ params->ready = cypress_nor_sr_ready_and_clear;
cypress_nor_ecc_init(nor);
- /* Replace ready() with multi die version */
- if (params->n_dice)
- params->ready = cypress_nor_sr_ready_and_clear;
+ params->die_erase_opcode = SPINOR_OP_CYPRESS_DIE_ERASE;
+ return 0;
}
-static struct spi_nor_fixups s25hx_t_fixups = {
+static const struct spi_nor_fixups s25hx_t_fixups = {
.post_bfpt = s25hx_t_post_bfpt_fixup,
.post_sfdp = s25hx_t_post_sfdp_fixup,
.late_init = s25hx_t_late_init,
};
/**
- * cypress_nor_octal_dtr_enable() - Enable octal DTR on Cypress flashes.
+ * cypress_nor_set_octal_dtr() - Enable or disable octal DTR on Cypress flashes.
* @nor: pointer to a 'struct spi_nor'
* @enable: whether to enable or disable Octal DTR
*
@@ -616,7 +694,7 @@ static struct spi_nor_fixups s25hx_t_fixups = {
*
* Return: 0 on success, -errno otherwise.
*/
-static int cypress_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
+static int cypress_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
{
return enable ? cypress_nor_octal_dtr_en(nor) :
cypress_nor_octal_dtr_dis(nor);
@@ -624,22 +702,34 @@ static int cypress_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
static int s28hx_t_post_sfdp_fixup(struct spi_nor *nor)
{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ if (!params->n_dice || !params->vreg_offset) {
+ dev_err(nor->dev, "%s failed. The volatile register offset could not be retrieved from SFDP.\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ /* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */
+ if (params->size == SZ_256M)
+ params->n_dice = 2;
+
/*
* On older versions of the flash the xSPI Profile 1.0 table has the
* 8D-8D-8D Fast Read opcode as 0x00. But it actually should be 0xEE.
*/
- if (nor->params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode == 0)
- nor->params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode =
+ if (params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode == 0)
+ params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode =
SPINOR_OP_CYPRESS_RD_FAST;
/* This flash is also missing the 4-byte Page Program opcode bit. */
- spi_nor_set_pp_settings(&nor->params->page_programs[SNOR_CMD_PP],
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
/*
* Since xSPI Page Program opcode is backward compatible with
* Legacy SPI, use Legacy SPI opcode there as well.
*/
- spi_nor_set_pp_settings(&nor->params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR);
/*
@@ -647,7 +737,7 @@ static int s28hx_t_post_sfdp_fixup(struct spi_nor *nor)
* address bytes needed for Read Status Register command as 0 but the
* actual value for that is 4.
*/
- nor->params->rdsr_addr_nbytes = 4;
+ params->rdsr_addr_nbytes = 4;
return cypress_nor_get_page_size(nor);
}
@@ -656,19 +746,21 @@ static int s28hx_t_post_bfpt_fixup(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
- int ret;
+ /* Assign 4-byte address mode method that is not determined in BFPT */
+ nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode;
- ret = cypress_nor_set_addr_mode_nbytes(nor);
- if (ret)
- return ret;
-
- return 0;
+ return cypress_nor_set_addr_mode_nbytes(nor);
}
-static void s28hx_t_late_init(struct spi_nor *nor)
+static int s28hx_t_late_init(struct spi_nor *nor)
{
- nor->params->octal_dtr_enable = cypress_nor_octal_dtr_enable;
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ params->set_octal_dtr = cypress_nor_set_octal_dtr;
+ params->ready = cypress_nor_sr_ready_and_clear;
cypress_nor_ecc_init(nor);
+
+ return 0;
}
static const struct spi_nor_fixups s28hx_t_fixups = {
@@ -693,148 +785,307 @@ s25fs_s_nor_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
+static void s25fs_s_nor_smpt_read_dummy(const struct spi_nor *nor,
+ u8 *read_dummy)
+{
+ /*
+ * The configuration detection dwords in S25FS-S SMPT has 65h as
+ * command instruction and 'variable' as configuration detection command
+ * latency. Set 8 dummy cycles as it is factory default for 65h (read
+ * any register) op.
+ */
+ *read_dummy = 8;
+}
+
+static void s25fs_s_nor_smpt_map_id_dummy(const struct spi_nor *nor, u8 *map_id)
+{
+ /*
+ * The S25FS512S chip supports:
+ * - Hybrid sector option which has physical set of eight 4-KB sectors
+ * and one 224-KB sector at the top or bottom of address space with
+ * all remaining sectors of 256-KB
+ * - Uniform sector option which has uniform 256-KB sectors
+ *
+ * On the other hand, the datasheet rev.O Table 71 on page 153 JEDEC
+ * Sector Map Parameter Dword-6 Config. Detect-3 does use CR3NV[1] to
+ * discern 64-KB(CR3NV[1]=0) and 256-KB(CR3NV[1]=1) uniform sectors
+ * device configuration. And in section 7.5.5.1 Configuration Register 3
+ * Non-volatile (CR3NV) page 61, the CR3NV[1] is RFU Reserved for Future
+ * Use and set to 0, which means 64-KB uniform. Since the device does
+ * not support 64-KB uniform sectors in any configuration, parsing SMPT
+ * table cannot find a valid sector map entry and fails. Fix this up by
+ * setting SMPT by overwriting the CR3NV[1] value to 1, as the table
+ * expects.
+ */
+ if (nor->params->size == SZ_64M)
+ *map_id |= BIT(0);
+}
+
static const struct spi_nor_fixups s25fs_s_nor_fixups = {
.post_bfpt = s25fs_s_nor_post_bfpt_fixups,
+ .smpt_read_dummy = s25fs_s_nor_smpt_read_dummy,
+ .smpt_map_id = s25fs_s_nor_smpt_map_id_dummy,
};
static const struct flash_info spansion_nor_parts[] = {
- /* Spansion/Cypress -- single (large) sector size only, at least
- * for the chips listed here (without boot sectors).
- */
- { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128)
- NO_SFDP_FLAGS(SPI_NOR_SKIP_SFDP | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- .fixups = &s25fs_s_nor_fixups, },
- { "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- .fixups = &s25fs_s_nor_fixups, },
- { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64) },
- { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8) },
- { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16) },
- { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32) },
- { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64) },
- { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128) },
- { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K) },
- { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
- { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
- { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
- { "s25fs256t", INFO6(0x342b19, 0x0f0890, 0, 0)
- PARSE_SFDP
- .fixups = &s25fs256t_fixups },
- { "s25hl512t", INFO6(0x342a1a, 0x0f0390, 256 * 1024, 256)
- PARSE_SFDP
- MFR_FLAGS(USE_CLSR)
- .fixups = &s25hx_t_fixups },
- { "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 256 * 1024, 512)
- PARSE_SFDP
- MFR_FLAGS(USE_CLSR)
- .fixups = &s25hx_t_fixups },
- { "s25hl02gt", INFO6(0x342a1c, 0x0f0090, 0, 0)
- PARSE_SFDP
- FLAGS(NO_CHIP_ERASE)
- .fixups = &s25hx_t_fixups },
- { "s25hs512t", INFO6(0x342b1a, 0x0f0390, 256 * 1024, 256)
- PARSE_SFDP
- MFR_FLAGS(USE_CLSR)
- .fixups = &s25hx_t_fixups },
- { "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 256 * 1024, 512)
- PARSE_SFDP
- MFR_FLAGS(USE_CLSR)
- .fixups = &s25hx_t_fixups },
- { "s25hs02gt", INFO6(0x342b1c, 0x0f0090, 0, 0)
- PARSE_SFDP
- FLAGS(NO_CHIP_ERASE)
- .fixups = &s25hx_t_fixups },
- { "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
- FLAGS(SPI_NOR_NO_ERASE) },
- { "s28hl512t", INFO(0x345a1a, 0, 256 * 1024, 256)
- PARSE_SFDP
+ {
+ .id = SNOR_ID(0x01, 0x02, 0x12),
+ .name = "s25sl004a",
+ .size = SZ_512K,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x13),
+ .name = "s25sl008a",
+ .size = SZ_1M,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x14),
+ .name = "s25sl016a",
+ .size = SZ_2M,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x15, 0x4d, 0x00),
+ .name = "s25sl032p",
+ .size = SZ_4M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x15),
+ .name = "s25sl032a",
+ .size = SZ_4M,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x16, 0x4d, 0x00),
+ .name = "s25sl064p",
+ .size = SZ_8M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x16),
+ .name = "s25sl064a",
+ .size = SZ_8M,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x19, 0x4d, 0x00, 0x80),
+ .name = "s25fl256s0",
+ .size = SZ_32M,
+ .sector_size = SZ_256K,
+ .no_sfdp_flags = SPI_NOR_SKIP_SFDP | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x19, 0x4d, 0x00, 0x81),
+ .name = "s25fs256s0",
+ .size = SZ_32M,
+ .sector_size = SZ_256K,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x19, 0x4d, 0x01, 0x80),
+ .name = "s25fl256s1",
+ .size = SZ_32M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x19, 0x4d, 0x01, 0x81),
+ .name = "s25fs256s1",
+ .size = SZ_32M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x20, 0x4d, 0x00, 0x80),
+ .name = "s25fl512s",
+ .size = SZ_64M,
+ .sector_size = SZ_256K,
+ .flags = SPI_NOR_HAS_LOCK,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x20, 0x4d, 0x00, 0x81),
+ .name = "s25fs512s",
+ .size = SZ_64M,
+ .sector_size = SZ_256K,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ .fixups = &s25fs_s_nor_fixups,
+ }, {
+ .id = SNOR_ID(0x01, 0x20, 0x18, 0x03, 0x00),
+ .name = "s25sl12800",
+ .size = SZ_16M,
+ .sector_size = SZ_256K,
+ }, {
+ .id = SNOR_ID(0x01, 0x20, 0x18, 0x03, 0x01),
+ .name = "s25sl12801",
+ .size = SZ_16M,
+ }, {
+ .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x00, 0x80),
+ .name = "s25fl128s0",
+ .size = SZ_16M,
+ .sector_size = SZ_256K,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x00),
+ .name = "s25fl129p0",
+ .size = SZ_16M,
+ .sector_size = SZ_256K,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x01, 0x80),
+ .name = "s25fl128s1",
+ .size = SZ_16M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x01, 0x81),
+ .name = "s25fs128s1",
+ .size = SZ_16M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ .fixups = &s25fs_s_nor_fixups,
+ }, {
+ .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x01),
+ .name = "s25fl129p1",
+ .size = SZ_16M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x40, 0x13),
+ .name = "s25fl204k",
+ .size = SZ_512K,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x01, 0x40, 0x14),
+ .name = "s25fl208k",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x01, 0x40, 0x15),
+ .name = "s25fl116k",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x01, 0x40, 0x16),
+ .name = "s25fl132k",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x01, 0x40, 0x17),
+ .name = "s25fl164k",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x01, 0x60, 0x17),
+ .name = "s25fl064l",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0x01, 0x60, 0x18),
+ .name = "s25fl128l",
+ .size = SZ_16M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0x01, 0x60, 0x19),
+ .name = "s25fl256l",
+ .size = SZ_32M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0x04, 0x2c, 0xc2, 0x7f, 0x7f, 0x7f),
+ .name = "cy15x104q",
+ .size = SZ_512K,
+ .sector_size = SZ_512K,
+ .flags = SPI_NOR_NO_ERASE,
+ }, {
+ .id = SNOR_ID(0x34, 0x2a, 0x1a, 0x0f, 0x03, 0x90),
+ .name = "s25hl512t",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s25hx_t_fixups
+ }, {
+ .id = SNOR_ID(0x34, 0x2a, 0x1b, 0x0f, 0x03, 0x90),
+ .name = "s25hl01gt",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s25hx_t_fixups
+ }, {
+ .id = SNOR_ID(0x34, 0x2a, 0x1c, 0x0f, 0x00, 0x90),
+ .name = "s25hl02gt",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s25hx_t_fixups
+ }, {
+ .id = SNOR_ID(0x34, 0x2b, 0x19, 0x0f, 0x08, 0x90),
+ .name = "s25fs256t",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s25fs256t_fixups
+ }, {
+ .id = SNOR_ID(0x34, 0x2b, 0x1a, 0x0f, 0x03, 0x90),
+ .name = "s25hs512t",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s25hx_t_fixups
+ }, {
+ .id = SNOR_ID(0x34, 0x2b, 0x1b, 0x0f, 0x03, 0x90),
+ .name = "s25hs01gt",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s25hx_t_fixups
+ }, {
+ .id = SNOR_ID(0x34, 0x2b, 0x1c, 0x0f, 0x00, 0x90),
+ .name = "s25hs02gt",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s25hx_t_fixups
+ }, {
+ /* S28HL256T */
+ .id = SNOR_ID(0x34, 0x5a, 0x19),
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s28hx_t_fixups,
+ }, {
+ .id = SNOR_ID(0x34, 0x5a, 0x1a),
+ .name = "s28hl512t",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s28hx_t_fixups,
+ }, {
+ .id = SNOR_ID(0x34, 0x5a, 0x1b),
+ .name = "s28hl01gt",
+ .mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
- },
- { "s28hl01gt", INFO(0x345a1b, 0, 256 * 1024, 512)
- PARSE_SFDP
+ }, {
+ /* S28HL02GT */
+ .id = SNOR_ID(0x34, 0x5a, 0x1c),
+ .mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
- },
- { "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256)
- PARSE_SFDP
+ }, {
+ .id = SNOR_ID(0x34, 0x5b, 0x19),
+ .mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
- },
- { "s28hs01gt", INFO(0x345b1b, 0, 256 * 1024, 512)
- PARSE_SFDP
+ }, {
+ .id = SNOR_ID(0x34, 0x5b, 0x1a),
+ .name = "s28hs512t",
+ .mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
- },
+ }, {
+ .id = SNOR_ID(0x34, 0x5b, 0x1b),
+ .name = "s28hs01gt",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s28hx_t_fixups,
+ }, {
+ .id = SNOR_ID(0x34, 0x5b, 0x1c),
+ .name = "s28hs02gt",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s28hx_t_fixups,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x13),
+ .name = "s25fl004k",
+ .size = SZ_512K,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x14),
+ .name = "s25fl008k",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x15),
+ .name = "s25fl016k",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x17),
+ .name = "s25fl064k",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }
};
/**
@@ -876,17 +1127,36 @@ static int spansion_nor_sr_ready_and_clear(struct spi_nor *nor)
return !(nor->bouncebuf[0] & SR_WIP);
}
-static void spansion_nor_late_init(struct spi_nor *nor)
+static int spansion_nor_late_init(struct spi_nor *nor)
{
- if (nor->params->size > SZ_16M) {
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spansion_nor_params *priv_params;
+ u8 mfr_flags = nor->info->mfr_flags;
+
+ if (params->size > SZ_16M) {
nor->flags |= SNOR_F_4B_OPCODES;
/* No small sector erase for 4-byte command set */
nor->erase_opcode = SPINOR_OP_SE;
- nor->mtd.erasesize = nor->info->sector_size;
+ nor->mtd.erasesize = nor->info->sector_size ?:
+ SPI_NOR_DEFAULT_SECTOR_SIZE;
}
- if (nor->info->mfr_flags & USE_CLSR)
- nor->params->ready = spansion_nor_sr_ready_and_clear;
+ if (mfr_flags & (USE_CLSR | USE_CLPEF)) {
+ priv_params = devm_kmalloc(nor->dev, sizeof(*priv_params),
+ GFP_KERNEL);
+ if (!priv_params)
+ return -ENOMEM;
+
+ if (mfr_flags & USE_CLSR)
+ priv_params->clsr = SPINOR_OP_CLSR;
+ else if (mfr_flags & USE_CLPEF)
+ priv_params->clsr = SPINOR_OP_CLPEF;
+
+ params->priv = priv_params;
+ params->ready = spansion_nor_sr_ready_and_clear;
+ }
+
+ return 0;
}
static const struct spi_nor_fixups spansion_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index 688eb20c763e..175211fe6a5e 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -13,12 +13,12 @@
#define SST26VF_CR_BPNV BIT(3)
-static int sst26vf_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int sst26vf_nor_lock(struct spi_nor *nor, loff_t ofs, u64 len)
{
return -EOPNOTSUPP;
}
-static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
{
int ret;
@@ -38,7 +38,7 @@ static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
return spi_nor_global_block_unlock(nor);
}
-static int sst26vf_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int sst26vf_nor_is_locked(struct spi_nor *nor, loff_t ofs, u64 len)
{
return -EOPNOTSUPP;
}
@@ -49,9 +49,11 @@ static const struct spi_nor_locking_ops sst26vf_nor_locking_ops = {
.is_locked = sst26vf_nor_is_locked,
};
-static void sst26vf_nor_late_init(struct spi_nor *nor)
+static int sst26vf_nor_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &sst26vf_nor_locking_ops;
+
+ return 0;
}
static const struct spi_nor_fixups sst26vf_nor_fixups = {
@@ -59,64 +61,127 @@ static const struct spi_nor_fixups sst26vf_nor_fixups = {
};
static const struct flash_info sst_nor_parts[] = {
- /* SST -- large erase sizes are "overlays", "sectors" are 4K */
- { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP |
- SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K) },
- { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4)
- FLAGS(SPI_NOR_HAS_LOCK)
- NO_SFDP_FLAGS(SECT_4K) },
- { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8)
- FLAGS(SPI_NOR_HAS_LOCK)
- NO_SFDP_FLAGS(SECT_4K) },
- { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- .fixups = &sst26vf_nor_fixups },
+ {
+ .id = SNOR_ID(0x62, 0x16, 0x12),
+ .name = "sst25wf020a",
+ .size = SZ_256K,
+ .flags = SPI_NOR_HAS_LOCK,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x62, 0x16, 0x13),
+ .name = "sst25wf040b",
+ .size = SZ_512K,
+ .flags = SPI_NOR_HAS_LOCK,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x01),
+ .name = "sst25wf512",
+ .size = SZ_64K,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x02),
+ .name = "sst25wf010",
+ .size = SZ_128K,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x03),
+ .name = "sst25wf020",
+ .size = SZ_256K,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x04),
+ .name = "sst25wf040",
+ .size = SZ_512K,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x05),
+ .name = "sst25wf080",
+ .size = SZ_1M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x41),
+ .name = "sst25vf016b",
+ .size = SZ_2M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x4a),
+ .name = "sst25vf032b",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x4b),
+ .name = "sst25vf064c",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x8d),
+ .name = "sst25vf040b",
+ .size = SZ_512K,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x8e),
+ .name = "sst25vf080b",
+ .size = SZ_1M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x26, 0x41),
+ .name = "sst26vf016b",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0xbf, 0x26, 0x42),
+ .name = "sst26vf032b",
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .fixups = &sst26vf_nor_fixups,
+ }, {
+ .id = SNOR_ID(0xbf, 0x26, 0x43),
+ .name = "sst26vf064b",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixups = &sst26vf_nor_fixups,
+ }, {
+ .id = SNOR_ID(0xbf, 0x26, 0x51),
+ .name = "sst26wf016b",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }
};
+static int sst_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
+{
+ u8 op = (len == 1) ? SPINOR_OP_BP : SPINOR_OP_AAI_WP;
+ int ret;
+
+ nor->program_opcode = op;
+ ret = spi_nor_write_data(nor, to, len, buf);
+ if (ret < 0)
+ return ret;
+ WARN(ret != len, "While writing %zu byte written %i bytes\n", len, ret);
+
+ return spi_nor_wait_till_ready(nor);
+}
+
static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
@@ -138,16 +203,10 @@ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Start write from odd address. */
if (to % 2) {
- nor->program_opcode = SPINOR_OP_BP;
-
/* write one byte. */
- ret = spi_nor_write_data(nor, to, 1, buf);
+ ret = sst_nor_write_data(nor, to, 1, buf);
if (ret < 0)
goto out;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
to++;
actual++;
@@ -155,16 +214,11 @@ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Write out most of the data here. */
for (; actual < len - 1; actual += 2) {
- nor->program_opcode = SPINOR_OP_AAI_WP;
-
/* write two bytes. */
- ret = spi_nor_write_data(nor, to, 2, buf + actual);
+ ret = sst_nor_write_data(nor, to, 2, buf + actual);
if (ret < 0)
goto out;
- WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
+
to += 2;
nor->sst_write_second = true;
}
@@ -184,14 +238,9 @@ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
goto out;
- nor->program_opcode = SPINOR_OP_BP;
- ret = spi_nor_write_data(nor, to, 1, buf + actual);
+ ret = sst_nor_write_data(nor, to, 1, buf + actual);
if (ret < 0)
goto out;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
actual += 1;
@@ -203,10 +252,12 @@ out:
return ret;
}
-static void sst_nor_late_init(struct spi_nor *nor)
+static int sst_nor_late_init(struct spi_nor *nor)
{
if (nor->info->mfr_flags & SST_WRITE)
nor->mtd._write = sst_nor_write;
+
+ return 0;
}
static const struct spi_nor_fixups sst_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c
index 0ba716e84377..9b07f83aeac7 100644
--- a/drivers/mtd/spi-nor/swp.c
+++ b/drivers/mtd/spi-nor/swp.c
@@ -5,6 +5,7 @@
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
+#include <linux/math64.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
@@ -34,23 +35,27 @@ static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor)
static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
{
unsigned int bp_slots, bp_slots_needed;
+ /*
+ * sector_size will eventually be replaced with the max erase size of
+ * the flash. For now, we need to have that ugly default.
+ */
+ unsigned int sector_size = nor->info->sector_size ?: SPI_NOR_DEFAULT_SECTOR_SIZE;
+ u64 n_sectors = div_u64(nor->params->size, sector_size);
u8 mask = spi_nor_get_sr_bp_mask(nor);
/* Reserved one for "protect none" and one for "protect all". */
bp_slots = (1 << hweight8(mask)) - 2;
- bp_slots_needed = ilog2(nor->info->n_sectors);
+ bp_slots_needed = ilog2(n_sectors);
if (bp_slots_needed > bp_slots)
- return nor->info->sector_size <<
- (bp_slots_needed - bp_slots);
+ return sector_size << (bp_slots_needed - bp_slots);
else
- return nor->info->sector_size;
+ return sector_size;
}
static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
- uint64_t *len)
+ u64 *len)
{
- struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
u8 mask = spi_nor_get_sr_bp_mask(nor);
u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
@@ -71,13 +76,13 @@ static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
min_prot_len = spi_nor_get_min_prot_length_sr(nor);
*len = min_prot_len << (bp - 1);
- if (*len > mtd->size)
- *len = mtd->size;
+ if (*len > nor->params->size)
+ *len = nor->params->size;
if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
*ofs = 0;
else
- *ofs = mtd->size - *len;
+ *ofs = nor->params->size - *len;
}
/*
@@ -85,10 +90,10 @@ static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
* (if @locked is false); false otherwise.
*/
static bool spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
- uint64_t len, u8 sr, bool locked)
+ u64 len, u8 sr, bool locked)
{
loff_t lock_offs, lock_offs_max, offs_max;
- uint64_t lock_len;
+ u64 lock_len;
if (!len)
return true;
@@ -106,14 +111,13 @@ static bool spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
return (ofs >= lock_offs_max) || (offs_max <= lock_offs);
}
-static bool spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
- u8 sr)
+static bool spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, u64 len, u8 sr)
{
return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true);
}
-static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs,
- uint64_t len, u8 sr)
+static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, u64 len,
+ u8 sr)
{
return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false);
}
@@ -151,9 +155,8 @@ static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs,
*
* Returns negative on errors, 0 on success.
*/
-static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len)
{
- struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
int ret, status_old, status_new;
u8 mask = spi_nor_get_sr_bp_mask(nor);
@@ -178,7 +181,7 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
can_be_bottom = false;
/* If anything above us is unlocked, we can't use 'top' protection */
- if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ if (!spi_nor_is_locked_sr(nor, ofs + len, nor->params->size - (ofs + len),
status_old))
can_be_top = false;
@@ -190,11 +193,11 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
/* lock_len: length of region that should end up locked */
if (use_top)
- lock_len = mtd->size - ofs;
+ lock_len = nor->params->size - ofs;
else
lock_len = ofs + len;
- if (lock_len == mtd->size) {
+ if (lock_len == nor->params->size) {
val = mask;
} else {
min_prot_len = spi_nor_get_min_prot_length_sr(nor);
@@ -214,8 +217,13 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
status_new = (status_old & ~mask & ~tb_mask) | val;
- /* Disallow further writes if WP pin is asserted */
- status_new |= SR_SRWD;
+ /*
+ * Disallow further writes if WP# pin is neither left floating nor
+ * wrongly tied to GND (that includes internal pull-downs).
+ * WP# pin hard strapped to GND can be a valid use case.
+ */
+ if (!(nor->flags & SNOR_F_NO_WP))
+ status_new |= SR_SRWD;
if (!use_top)
status_new |= tb_mask;
@@ -236,9 +244,8 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
*
* Returns negative on errors, 0 on success.
*/
-static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
{
- struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
int ret, status_old, status_new;
u8 mask = spi_nor_get_sr_bp_mask(nor);
@@ -263,7 +270,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
can_be_top = false;
/* If anything above us is locked, we can't use 'bottom' protection */
- if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ if (!spi_nor_is_unlocked_sr(nor, ofs + len, nor->params->size - (ofs + len),
status_old))
can_be_bottom = false;
@@ -275,7 +282,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
/* lock_len: length of region that should remain locked */
if (use_top)
- lock_len = mtd->size - (ofs + len);
+ lock_len = nor->params->size - (ofs + len);
else
lock_len = ofs;
@@ -321,7 +328,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
* Returns 1 if entire region is locked, 0 if any portion is unlocked, and
* negative on errors.
*/
-static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, u64 len)
{
int ret;
@@ -343,7 +350,7 @@ void spi_nor_init_default_locking_ops(struct spi_nor *nor)
nor->params->locking_ops = &spi_nor_sr_locking_ops;
}
-static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, u64 len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
@@ -358,7 +365,7 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
-static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, u64 len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
@@ -373,7 +380,7 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
-static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, u64 len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c
index c09bb832b3b9..643513ee891b 100644
--- a/drivers/mtd/spi-nor/sysfs.c
+++ b/drivers/mtd/spi-nor/sysfs.c
@@ -35,8 +35,8 @@ static ssize_t jedec_id_show(struct device *dev,
struct spi_device *spi = to_spi_device(dev);
struct spi_mem *spimem = spi_get_drvdata(spi);
struct spi_nor *nor = spi_mem_get_drvdata(spimem);
- const u8 *id = nor->info->id_len ? nor->info->id : nor->id;
- u8 id_len = nor->info->id_len ?: SPI_NOR_MAX_ID_LEN;
+ const u8 *id = nor->info->id ? nor->info->id->bytes : nor->id;
+ u8 id_len = nor->info->id ? nor->info->id->len : SPI_NOR_MAX_ID_LEN;
return sysfs_emit(buf, "%*phN\n", id_len, id);
}
@@ -50,7 +50,7 @@ static struct attribute *spi_nor_sysfs_entries[] = {
};
static ssize_t sfdp_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
@@ -62,9 +62,9 @@ static ssize_t sfdp_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, nor->sfdp->dwords,
sfdp_size);
}
-static BIN_ATTR_RO(sfdp, 0);
+static const BIN_ATTR_RO(sfdp, 0);
-static struct bin_attribute *spi_nor_sysfs_bin_entries[] = {
+static const struct bin_attribute *const spi_nor_sysfs_bin_entries[] = {
&bin_attr_sfdp,
NULL
};
@@ -78,14 +78,16 @@ static umode_t spi_nor_sysfs_is_visible(struct kobject *kobj,
if (attr == &dev_attr_manufacturer.attr && !nor->manufacturer)
return 0;
- if (attr == &dev_attr_jedec_id.attr && !nor->info->id_len && !nor->id)
+ if (attr == &dev_attr_partname.attr && !nor->info->name)
+ return 0;
+ if (attr == &dev_attr_jedec_id.attr && !nor->info->id && !nor->id)
return 0;
return 0444;
}
static umode_t spi_nor_sysfs_is_bin_visible(struct kobject *kobj,
- struct bin_attribute *attr, int n)
+ const struct bin_attribute *attr, int n)
{
struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
struct spi_mem *spimem = spi_get_drvdata(spi);
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index 834d6ba5ce70..fb855fe44733 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -10,6 +10,7 @@
#define WINBOND_NOR_OP_RDEAR 0xc8 /* Read Extended Address Register */
#define WINBOND_NOR_OP_WREAR 0xc5 /* Write Extended Address Register */
+#define WINBOND_NOR_OP_SELDIE 0xc2 /* Select active die */
#define WINBOND_NOR_WREAR_OP(buf) \
SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_WREAR, 0), \
@@ -17,6 +18,37 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(1, buf, 0))
+#define WINBOND_NOR_SELDIE_OP(buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_SELDIE, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(1, buf, 0))
+
+static int
+w25q128_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ /*
+ * Zetta ZD25Q128C is a clone of the Winbond device. But the encoded
+ * size is really wrong. It seems that they confused Mbit with MiB.
+ * Thus the flash is discovered as a 2MiB device.
+ */
+ if (bfpt_header->major == SFDP_JESD216_MAJOR &&
+ bfpt_header->minor == SFDP_JESD216_MINOR &&
+ nor->params->size == SZ_2M &&
+ nor->params->erase_map.regions[0].size == SZ_2M) {
+ nor->params->size = SZ_16M;
+ nor->params->erase_map.regions[0].size = SZ_16M;
+ }
+
+ return 0;
+}
+
+static const struct spi_nor_fixups w25q128_fixups = {
+ .post_bfpt = w25q128_post_bfpt_fixups,
+};
+
static int
w25q256_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
@@ -41,107 +73,301 @@ static const struct spi_nor_fixups w25q256_fixups = {
.post_bfpt = w25q256_post_bfpt_fixups,
};
+/**
+ * winbond_nor_select_die() - Set active die.
+ * @nor: pointer to 'struct spi_nor'.
+ * @die: die to set active.
+ *
+ * Certain Winbond chips feature more than a single die. This is mostly hidden
+ * to the user, except that some chips may experience time deviation when
+ * modifying the status bits between dies, which in some corner cases may
+ * produce problematic races. Being able to explicitly select a die to check its
+ * state in this case may be useful.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int winbond_nor_select_die(struct spi_nor *nor, u8 die)
+{
+ int ret;
+
+ nor->bouncebuf[0] = die;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = WINBOND_NOR_SELDIE_OP(nor->bouncebuf);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor,
+ WINBOND_NOR_OP_SELDIE,
+ nor->bouncebuf, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d selecting die %d\n", ret, die);
+
+ return ret;
+}
+
+static int winbond_nor_multi_die_ready(struct spi_nor *nor)
+{
+ int ret, i;
+
+ for (i = 0; i < nor->params->n_dice; i++) {
+ ret = winbond_nor_select_die(nor, i);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_sr_ready(nor);
+ if (ret <= 0)
+ return ret;
+ }
+
+ return 1;
+}
+
+static int
+winbond_nor_multi_die_post_sfdp_fixups(struct spi_nor *nor)
+{
+ /*
+ * SFDP supports dice numbers, but this information is only available in
+ * optional additional tables which are not provided by these chips.
+ * Dice number has an impact though, because these devices need extra
+ * care when reading the busy bit.
+ */
+ nor->params->n_dice = nor->params->size / SZ_64M;
+ nor->params->ready = winbond_nor_multi_die_ready;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups winbond_nor_multi_die_fixups = {
+ .post_sfdp = winbond_nor_multi_die_post_sfdp_fixups,
+};
+
static const struct flash_info winbond_nor_parts[] = {
- /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
- { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- OTP_INFO(256, 3, 0x1000, 0x1000) },
- { "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- OTP_INFO(256, 3, 0x1000, 0x1000) },
- { "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- .fixups = &w25q256_fixups },
- { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512)
- PARSE_SFDP },
- { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ |
- SPI_NOR_DUAL_READ) },
- { "w25q512nwq", INFO(0xef6020, 0, 0, 0)
- PARSE_SFDP
- OTP_INFO(256, 3, 0x1000, 0x1000) },
- { "w25q512nwm", INFO(0xef8020, 0, 64 * 1024, 1024)
- PARSE_SFDP
- OTP_INFO(256, 3, 0x1000, 0x1000) },
- { "w25q512jvq", INFO(0xef4020, 0, 64 * 1024, 1024)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
+ {
+ .id = SNOR_ID(0xef, 0x30, 0x10),
+ .name = "w25x05",
+ .size = SZ_64K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x30, 0x11),
+ .name = "w25x10",
+ .size = SZ_128K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x30, 0x12),
+ .name = "w25x20",
+ .size = SZ_256K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x30, 0x13),
+ .name = "w25x40",
+ .size = SZ_512K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x30, 0x14),
+ .name = "w25x80",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x30, 0x15),
+ .name = "w25x16",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x30, 0x16),
+ .name = "w25x32",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x30, 0x17),
+ .name = "w25x64",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x12),
+ .name = "w25q20cl",
+ .size = SZ_256K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x14),
+ .name = "w25q80bl",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x16),
+ .name = "w25q32",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x17),
+ .name = "w25q64",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x18),
+ /* Flavors w/ and w/o SFDP. */
+ .name = "w25q128",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixups = &w25q128_fixups,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x19),
+ .name = "w25q256",
+ .size = SZ_32M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixups = &w25q256_fixups,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x20),
+ .name = "w25q512jvq",
+ .size = SZ_64M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ /* W25Q01JV */
+ .id = SNOR_ID(0xef, 0x40, 0x21),
+ .fixups = &winbond_nor_multi_die_fixups,
+ }, {
+ .id = SNOR_ID(0xef, 0x50, 0x12),
+ .name = "w25q20bw",
+ .size = SZ_256K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x50, 0x14),
+ .name = "w25q80",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x60, 0x12),
+ .name = "w25q20ew",
+ .size = SZ_256K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x60, 0x15),
+ .name = "w25q16dw",
+ .size = SZ_2M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x60, 0x16),
+ .name = "w25q32dw",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .otp = SNOR_OTP(256, 3, 0x1000, 0x1000),
+ }, {
+ .id = SNOR_ID(0xef, 0x60, 0x17),
+ .name = "w25q64dw",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x60, 0x18),
+ .name = "w25q128fw",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x60, 0x19),
+ .name = "w25q256jw",
+ .size = SZ_32M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x60, 0x20),
+ .name = "w25q512nwq",
+ .otp = SNOR_OTP(256, 3, 0x1000, 0x1000),
+ }, {
+ .id = SNOR_ID(0xef, 0x70, 0x15),
+ .name = "w25q16jv-im/jm",
+ .size = SZ_2M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x70, 0x16),
+ .name = "w25q32jv",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x70, 0x17),
+ .name = "w25q64jvm",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x70, 0x18),
+ .name = "w25q128jv",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x70, 0x19),
+ .name = "w25q256jvm",
+ }, {
+ /* W25Q02JV */
+ .id = SNOR_ID(0xef, 0x70, 0x22),
+ .fixups = &winbond_nor_multi_die_fixups,
+ }, {
+ .id = SNOR_ID(0xef, 0x71, 0x19),
+ .name = "w25m512jv",
+ .size = SZ_64M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x80, 0x16),
+ .name = "w25q32jwm",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .otp = SNOR_OTP(256, 3, 0x1000, 0x1000),
+ }, {
+ .id = SNOR_ID(0xef, 0x80, 0x17),
+ .name = "w25q64jwm",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x80, 0x18),
+ .name = "w25q128jwm",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x80, 0x19),
+ .name = "w25q256jwm",
+ .size = SZ_32M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x80, 0x20),
+ .name = "w25q512nwm",
+ .otp = SNOR_OTP(256, 3, 0x1000, 0x1000),
+ }, {
+ /* W25Q01NWxxIQ */
+ .id = SNOR_ID(0xef, 0x60, 0x21),
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6 | SPI_NOR_4BIT_BP,
+ }, {
+ /* W25Q01NWxxIM */
+ .id = SNOR_ID(0xef, 0x80, 0x21),
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6 | SPI_NOR_4BIT_BP,
+ }, {
+ /* W25Q02NWxxIM */
+ .id = SNOR_ID(0xef, 0x80, 0x22),
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6 | SPI_NOR_4BIT_BP,
+ }, {
+ /* W25H512NWxxAM */
+ .id = SNOR_ID(0xef, 0xa0, 0x20),
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6 | SPI_NOR_4BIT_BP,
+ }, {
+ /* W25H01NWxxAM */
+ .id = SNOR_ID(0xef, 0xa0, 0x21),
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6 | SPI_NOR_4BIT_BP,
+ }, {
+ /* W25H02NWxxAM */
+ .id = SNOR_ID(0xef, 0xa0, 0x22),
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6 | SPI_NOR_4BIT_BP,
+ },
};
/**
@@ -216,11 +442,11 @@ static const struct spi_nor_otp_ops winbond_nor_otp_ops = {
.is_locked = spi_nor_otp_is_locked_sr2,
};
-static void winbond_nor_late_init(struct spi_nor *nor)
+static int winbond_nor_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
- if (params->otp.org->n_regions)
+ if (params->otp.org)
params->otp.ops = &winbond_nor_otp_ops;
/*
@@ -232,6 +458,8 @@ static void winbond_nor_late_init(struct spi_nor *nor)
* from BFPT, if any.
*/
params->set_4byte_addr_mode = winbond_nor_set_4byte_addr_mode;
+
+ return 0;
}
static const struct spi_nor_fixups winbond_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
deleted file mode 100644
index 7175de8aa336..000000000000
--- a/drivers/mtd/spi-nor/xilinx.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2005, Intec Automation Inc.
- * Copyright (C) 2014, Freescale Semiconductor, Inc.
- */
-
-#include <linux/mtd/spi-nor.h>
-
-#include "core.h"
-
-#define XILINX_OP_SE 0x50 /* Sector erase */
-#define XILINX_OP_PP 0x82 /* Page program */
-#define XILINX_OP_RDSR 0xd7 /* Read status register */
-
-#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */
-#define XSR_RDY BIT(7) /* Ready */
-
-#define XILINX_RDSR_OP(buf) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(XILINX_OP_RDSR, 0), \
- SPI_MEM_OP_NO_ADDR, \
- SPI_MEM_OP_NO_DUMMY, \
- SPI_MEM_OP_DATA_IN(1, buf, 0))
-
-#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
- .id = { \
- ((_jedec_id) >> 16) & 0xff, \
- ((_jedec_id) >> 8) & 0xff, \
- (_jedec_id) & 0xff \
- }, \
- .id_len = 3, \
- .sector_size = (8 * (_page_size)), \
- .n_sectors = (_n_sectors), \
- .page_size = (_page_size), \
- .n_banks = 1, \
- .addr_nbytes = 3, \
- .flags = SPI_NOR_NO_FR
-
-/* Xilinx S3AN share MFR with Atmel SPI NOR */
-static const struct flash_info xilinx_nor_parts[] = {
- /* Xilinx S3AN Internal Flash */
- { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
- { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
- { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
- { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
- { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
-};
-
-/*
- * This code converts an address to the Default Address Mode, that has non
- * power of two page sizes. We must support this mode because it is the default
- * mode supported by Xilinx tools, it can access the whole flash area and
- * changing over to the Power-of-two mode is irreversible and corrupts the
- * original data.
- * Addr can safely be unsigned int, the biggest S3AN device is smaller than
- * 4 MiB.
- */
-static u32 s3an_nor_convert_addr(struct spi_nor *nor, u32 addr)
-{
- u32 page_size = nor->params->page_size;
- u32 offset, page;
-
- offset = addr % page_size;
- page = addr / page_size;
- page <<= (page_size > 512) ? 10 : 9;
-
- return page | offset;
-}
-
-/**
- * xilinx_nor_read_sr() - Read the Status Register on S3AN flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @sr: pointer to a DMA-able buffer where the value of the
- * Status Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int xilinx_nor_read_sr(struct spi_nor *nor, u8 *sr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op = XILINX_RDSR_OP(sr);
-
- spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = spi_nor_controller_ops_read_reg(nor, XILINX_OP_RDSR, sr,
- 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading SR\n", ret);
-
- return ret;
-}
-
-/**
- * xilinx_nor_sr_ready() - Query the Status Register of the S3AN flash to see
- * if the flash is ready for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 1 if ready, 0 if not ready, -errno on errors.
- */
-static int xilinx_nor_sr_ready(struct spi_nor *nor)
-{
- int ret;
-
- ret = xilinx_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- return !!(nor->bouncebuf[0] & XSR_RDY);
-}
-
-static int xilinx_nor_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
-{
- u32 page_size;
- int ret;
-
- ret = xilinx_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- nor->erase_opcode = XILINX_OP_SE;
- nor->program_opcode = XILINX_OP_PP;
- nor->read_opcode = SPINOR_OP_READ;
- nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
-
- /*
- * This flashes have a page size of 264 or 528 bytes (known as
- * Default addressing mode). It can be changed to a more standard
- * Power of two mode where the page size is 256/512. This comes
- * with a price: there is 3% less of space, the data is corrupted
- * and the page size cannot be changed back to default addressing
- * mode.
- *
- * The current addressing mode can be read from the XRDSR register
- * and should not be changed, because is a destructive operation.
- */
- if (nor->bouncebuf[0] & XSR_PAGESIZE) {
- /* Flash in Power of 2 mode */
- page_size = (nor->params->page_size == 264) ? 256 : 512;
- nor->params->page_size = page_size;
- nor->mtd.writebufsize = page_size;
- nor->params->size = 8 * page_size * nor->info->n_sectors;
- nor->mtd.erasesize = 8 * page_size;
- } else {
- /* Flash in Default addressing mode */
- nor->params->convert_addr = s3an_nor_convert_addr;
- nor->mtd.erasesize = nor->info->sector_size;
- }
-
- return 0;
-}
-
-static void xilinx_nor_late_init(struct spi_nor *nor)
-{
- nor->params->setup = xilinx_nor_setup;
- nor->params->ready = xilinx_nor_sr_ready;
-}
-
-static const struct spi_nor_fixups xilinx_nor_fixups = {
- .late_init = xilinx_nor_late_init,
-};
-
-const struct spi_nor_manufacturer spi_nor_xilinx = {
- .name = "xilinx",
- .parts = xilinx_nor_parts,
- .nparts = ARRAY_SIZE(xilinx_nor_parts),
- .fixups = &xilinx_nor_fixups,
-};
diff --git a/drivers/mtd/spi-nor/xmc.c b/drivers/mtd/spi-nor/xmc.c
index 051411e86339..d5a06054b0dd 100644
--- a/drivers/mtd/spi-nor/xmc.c
+++ b/drivers/mtd/spi-nor/xmc.c
@@ -9,15 +9,20 @@
#include "core.h"
static const struct flash_info xmc_nor_parts[] = {
- /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
- { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
+ {
+ .id = SNOR_ID(0x20, 0x70, 0x17),
+ .name = "XM25QH64A",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x20, 0x70, 0x18),
+ .name = "XM25QH128A",
+ .size = SZ_16M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ },
};
+/* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
const struct spi_nor_manufacturer spi_nor_xmc = {
.name = "xmc",
.parts = xmc_nor_parts,
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 04da685c36be..46c01fa2ec46 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -18,7 +18,6 @@
struct ssfdcr_record {
struct mtd_blktrans_dev mbd;
- int usecount;
unsigned char heads;
unsigned char sectors;
unsigned short cylinders;
@@ -296,7 +295,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
if (cis_sector == -1)
return;
- ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL);
+ ssfdc = kzalloc(sizeof(*ssfdc), GFP_KERNEL);
if (!ssfdc)
return;
@@ -333,7 +332,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
kmalloc_array(ssfdc->map_len,
sizeof(ssfdc->logic_block_map[0]), GFP_KERNEL);
if (!ssfdc->logic_block_map)
- goto out_err;
+ goto out_free_ssfdc;
memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) *
ssfdc->map_len);
@@ -351,7 +350,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
out_err:
kfree(ssfdc->logic_block_map);
- kfree(ssfdc);
+out_free_ssfdc:
+ kfree(ssfdc);
}
static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
index 5de0378f90db..7dae831ee8b6 100644
--- a/drivers/mtd/tests/Makefile
+++ b/drivers/mtd/tests/Makefile
@@ -1,19 +1,19 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o
+obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o mtd_test.o
-mtd_oobtest-objs := oobtest.o mtd_test.o
-mtd_pagetest-objs := pagetest.o mtd_test.o
-mtd_readtest-objs := readtest.o mtd_test.o
-mtd_speedtest-objs := speedtest.o mtd_test.o
-mtd_stresstest-objs := stresstest.o mtd_test.o
-mtd_subpagetest-objs := subpagetest.o mtd_test.o
-mtd_torturetest-objs := torturetest.o mtd_test.o
-mtd_nandbiterrs-objs := nandbiterrs.o mtd_test.o
+mtd_oobtest-objs := oobtest.o
+mtd_pagetest-objs := pagetest.o
+mtd_readtest-objs := readtest.o
+mtd_speedtest-objs := speedtest.o
+mtd_stresstest-objs := stresstest.o
+mtd_subpagetest-objs := subpagetest.o
+mtd_torturetest-objs := torturetest.o
+mtd_nandbiterrs-objs := nandbiterrs.o
diff --git a/drivers/mtd/tests/mtd_test.c b/drivers/mtd/tests/mtd_test.c
index c84250beffdc..f391e0300cdc 100644
--- a/drivers/mtd/tests/mtd_test.c
+++ b/drivers/mtd/tests/mtd_test.c
@@ -25,6 +25,7 @@ int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum)
return 0;
}
+EXPORT_SYMBOL_GPL(mtdtest_erase_eraseblock);
static int is_block_bad(struct mtd_info *mtd, unsigned int ebnum)
{
@@ -57,6 +58,7 @@ int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
return 0;
}
+EXPORT_SYMBOL_GPL(mtdtest_scan_for_bad_eraseblocks);
int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
unsigned int eb, int ebcnt)
@@ -75,6 +77,7 @@ int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
return 0;
}
+EXPORT_SYMBOL_GPL(mtdtest_erase_good_eraseblocks);
int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
{
@@ -92,6 +95,7 @@ int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
return err;
}
+EXPORT_SYMBOL_GPL(mtdtest_read);
int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
const void *buf)
@@ -107,3 +111,8 @@ int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
return err;
}
+EXPORT_SYMBOL_GPL(mtdtest_write);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MTD function test helpers");
+MODULE_AUTHOR("Akinobu Mita");
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 13fed398937e..e1ee68f8b8f8 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -17,7 +17,7 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include "mtd_test.h"
diff --git a/drivers/mtd/tests/pagetest.c b/drivers/mtd/tests/pagetest.c
index 8eb40b6e6dfa..6878700d2fc0 100644
--- a/drivers/mtd/tests/pagetest.c
+++ b/drivers/mtd/tests/pagetest.c
@@ -17,7 +17,7 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include "mtd_test.h"
diff --git a/drivers/mtd/tests/subpagetest.c b/drivers/mtd/tests/subpagetest.c
index 05250a080139..f34bbf033c4d 100644
--- a/drivers/mtd/tests/subpagetest.c
+++ b/drivers/mtd/tests/subpagetest.c
@@ -15,7 +15,7 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include "mtd_test.h"
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 2ed77b7b3fcb..e28a3af83c0e 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -104,4 +104,26 @@ config MTD_UBI_BLOCK
If in doubt, say "N".
+config MTD_UBI_FAULT_INJECTION
+ bool "Fault injection capability of UBI device"
+ default n
+ depends on FAULT_INJECTION_DEBUG_FS
+ help
+ This option enables fault-injection support for UBI devices for
+ testing purposes.
+
+ If in doubt, say "N".
+
+config MTD_UBI_NVMEM
+ tristate "UBI virtual NVMEM"
+ default n
+ depends on NVMEM
+ help
+ This option enabled an additional driver exposing UBI volumes as NVMEM
+ providers, intended for platforms where UBI is part of the firmware
+ specification and used to store also e.g. MAC addresses or board-
+ specific Wi-Fi calibration data.
+
+ If in doubt, say "N".
+
endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index 543673605ca7..4b51aaf00d1a 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -7,3 +7,4 @@ ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
+obj-$(CONFIG_MTD_UBI_NVMEM) += nvmem.o
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index ae5abe492b52..884171871d0e 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -1447,7 +1447,7 @@ out_ech:
return err;
}
-static struct ubi_attach_info *alloc_ai(void)
+static struct ubi_attach_info *alloc_ai(const char *slab_name)
{
struct ubi_attach_info *ai;
@@ -1461,7 +1461,7 @@ static struct ubi_attach_info *alloc_ai(void)
INIT_LIST_HEAD(&ai->alien);
INIT_LIST_HEAD(&ai->fastmap);
ai->volumes = RB_ROOT;
- ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
+ ai->aeb_slab_cache = kmem_cache_create(slab_name,
sizeof(struct ubi_ainf_peb),
0, 0, NULL);
if (!ai->aeb_slab_cache) {
@@ -1491,7 +1491,7 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
err = -ENOMEM;
- scan_ai = alloc_ai();
+ scan_ai = alloc_ai("ubi_aeb_slab_cache_fastmap");
if (!scan_ai)
goto out;
@@ -1557,7 +1557,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
int err;
struct ubi_attach_info *ai;
- ai = alloc_ai();
+ ai = alloc_ai("ubi_aeb_slab_cache");
if (!ai)
return -ENOMEM;
@@ -1575,7 +1575,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
if (err > 0 || mtd_is_eccerr(err)) {
if (err != UBI_NO_FASTMAP) {
destroy_ai(ai);
- ai = alloc_ai();
+ ai = alloc_ai("ubi_aeb_slab_cache");
if (!ai)
return -ENOMEM;
@@ -1600,7 +1600,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
err = ubi_read_volume_table(ubi, ai);
if (err)
- goto out_ai;
+ goto out_fm;
err = ubi_wl_init(ubi, ai);
if (err)
@@ -1614,7 +1614,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
struct ubi_attach_info *scan_ai;
- scan_ai = alloc_ai();
+ scan_ai = alloc_ai("ubi_aeb_slab_cache_dbg_chk_fastmap");
if (!scan_ai) {
err = -ENOMEM;
goto out_wl;
@@ -1642,6 +1642,8 @@ out_wl:
out_vtbl:
ubi_free_all_volumes(ubi);
vfree(ubi->vtbl);
+out_fm:
+ ubi_free_fastmap(ubi);
out_ai:
destroy_ai(ai);
return err;
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 437c5b83ffe5..b53fd147fa65 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -65,10 +65,10 @@ struct ubiblock_pdu {
};
/* Numbers of elements set in the @ubiblock_param array */
-static int ubiblock_devs __initdata;
+static int ubiblock_devs;
/* MTD devices specification parameters */
-static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
+static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES];
struct ubiblock {
struct ubi_volume_desc *desc;
@@ -199,7 +199,7 @@ static blk_status_t ubiblock_read(struct request *req)
* and ubi_read_sg() will check that limit.
*/
ubi_sgl_init(&pdu->usgl);
- blk_rq_map_sg(req->q, req, pdu->usgl.sg);
+ blk_rq_map_sg(req, pdu->usgl.sg);
while (bytes_left) {
/*
@@ -282,12 +282,12 @@ static void ubiblock_release(struct gendisk *gd)
mutex_unlock(&dev->dev_mutex);
}
-static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int ubiblock_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
/* Some tools might require this information */
geo->heads = 1;
geo->cylinders = 1;
- geo->sectors = get_capacity(bdev->bd_disk);
+ geo->sectors = get_capacity(disk);
geo->start = 0;
return 0;
}
@@ -348,6 +348,9 @@ static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
int ubiblock_create(struct ubi_volume_info *vi)
{
+ struct queue_limits lim = {
+ .max_segments = UBI_MAX_SG_COUNT,
+ };
struct ubiblock *dev;
struct gendisk *gd;
u64 disk_capacity;
@@ -380,20 +383,21 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->tag_set.ops = &ubiblock_mq_ops;
dev->tag_set.queue_depth = 64;
dev->tag_set.numa_node = NUMA_NO_NODE;
- dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ dev->tag_set.flags = BLK_MQ_F_BLOCKING;
dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
dev->tag_set.driver_data = dev;
dev->tag_set.nr_hw_queues = 1;
ret = blk_mq_alloc_tag_set(&dev->tag_set);
if (ret) {
- dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
+ pr_err("ubiblock%d_%d: blk_mq_alloc_tag_set failed\n",
+ dev->ubi_num, dev->vol_id);
goto out_free_dev;
}
/* Initialize the gendisk of this ubiblock device */
- gd = blk_mq_alloc_disk(&dev->tag_set, dev);
+ gd = blk_mq_alloc_disk(&dev->tag_set, &lim, dev);
if (IS_ERR(gd)) {
ret = PTR_ERR(gd);
goto out_free_tags;
@@ -404,8 +408,8 @@ int ubiblock_create(struct ubi_volume_info *vi)
gd->minors = 1;
gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
if (gd->first_minor < 0) {
- dev_err(disk_to_dev(gd),
- "block: dynamic minor allocation failed");
+ pr_err("ubiblock%d_%d: block: dynamic minor allocation failed\n",
+ dev->ubi_num, dev->vol_id);
ret = -ENODEV;
goto out_cleanup_disk;
}
@@ -416,7 +420,6 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->gd = gd;
dev->rq = gd->queue;
- blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
list_add_tail(&dev->list, &ubiblock_devices);
@@ -434,7 +437,7 @@ out_remove_minor:
list_del(&dev->list);
idr_remove(&ubiblock_minor_idr, gd->first_minor);
out_cleanup_disk:
- put_disk(dev->gd);
+ put_disk(gd);
out_free_tags:
blk_mq_free_tag_set(&dev->tag_set);
out_free_dev:
@@ -447,13 +450,15 @@ out_unlock:
static void ubiblock_cleanup(struct ubiblock *dev)
{
+ int id = dev->gd->first_minor;
+
/* Stop new requests to arrive */
del_gendisk(dev->gd);
/* Finally destroy the blk queue */
dev_info(disk_to_dev(dev->gd), "released");
put_disk(dev->gd);
blk_mq_free_tag_set(&dev->tag_set);
- idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
+ idr_remove(&ubiblock_minor_idr, id);
}
int ubiblock_remove(struct ubi_volume_info *vi)
@@ -532,6 +537,70 @@ static int ubiblock_resize(struct ubi_volume_info *vi)
return 0;
}
+static bool
+match_volume_desc(struct ubi_volume_info *vi, const char *name, int ubi_num, int vol_id)
+{
+ int err, len, cur_ubi_num, cur_vol_id;
+
+ if (ubi_num == -1) {
+ /* No ubi num, name must be a vol device path */
+ err = ubi_get_num_by_path(name, &cur_ubi_num, &cur_vol_id);
+ if (err || vi->ubi_num != cur_ubi_num || vi->vol_id != cur_vol_id)
+ return false;
+
+ return true;
+ }
+
+ if (vol_id == -1) {
+ /* Got ubi_num, but no vol_id, name must be volume name */
+ if (vi->ubi_num != ubi_num)
+ return false;
+
+ len = strnlen(name, UBI_VOL_NAME_MAX + 1);
+ if (len < 1 || vi->name_len != len)
+ return false;
+
+ if (strcmp(name, vi->name))
+ return false;
+
+ return true;
+ }
+
+ if (vi->ubi_num != ubi_num)
+ return false;
+
+ if (vi->vol_id != vol_id)
+ return false;
+
+ return true;
+}
+
+static void
+ubiblock_create_from_param(struct ubi_volume_info *vi)
+{
+ int i, ret = 0;
+ struct ubiblock_param *p;
+
+ /*
+ * Iterate over ubiblock cmdline parameters. If a parameter matches the
+ * newly added volume create the ubiblock device for it.
+ */
+ for (i = 0; i < ubiblock_devs; i++) {
+ p = &ubiblock_param[i];
+
+ if (!match_volume_desc(vi, p->name, p->ubi_num, p->vol_id))
+ continue;
+
+ ret = ubiblock_create(vi);
+ if (ret) {
+ pr_err(
+ "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
+ vi->name, p->ubi_num, p->vol_id, ret);
+ }
+ break;
+ }
+}
+
static int ubiblock_notify(struct notifier_block *nb,
unsigned long notification_type, void *ns_ptr)
{
@@ -539,10 +608,7 @@ static int ubiblock_notify(struct notifier_block *nb,
switch (notification_type) {
case UBI_VOLUME_ADDED:
- /*
- * We want to enforce explicit block device creation for
- * volumes, so when a volume is added we do nothing.
- */
+ ubiblock_create_from_param(&nt->vi);
break;
case UBI_VOLUME_REMOVED:
ubiblock_remove(&nt->vi);
@@ -568,56 +634,6 @@ static struct notifier_block ubiblock_notifier = {
.notifier_call = ubiblock_notify,
};
-static struct ubi_volume_desc * __init
-open_volume_desc(const char *name, int ubi_num, int vol_id)
-{
- if (ubi_num == -1)
- /* No ubi num, name must be a vol device path */
- return ubi_open_volume_path(name, UBI_READONLY);
- else if (vol_id == -1)
- /* No vol_id, must be vol_name */
- return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
- else
- return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
-}
-
-static void __init ubiblock_create_from_param(void)
-{
- int i, ret = 0;
- struct ubiblock_param *p;
- struct ubi_volume_desc *desc;
- struct ubi_volume_info vi;
-
- /*
- * If there is an error creating one of the ubiblocks, continue on to
- * create the following ubiblocks. This helps in a circumstance where
- * the kernel command-line specifies multiple block devices and some
- * may be broken, but we still want the working ones to come up.
- */
- for (i = 0; i < ubiblock_devs; i++) {
- p = &ubiblock_param[i];
-
- desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
- if (IS_ERR(desc)) {
- pr_err(
- "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
- p->ubi_num, p->vol_id, PTR_ERR(desc));
- continue;
- }
-
- ubi_get_volume_info(desc, &vi);
- ubi_close_volume(desc);
-
- ret = ubiblock_create(&vi);
- if (ret) {
- pr_err(
- "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
- vi.name, p->ubi_num, p->vol_id, ret);
- continue;
- }
- }
-}
-
static void ubiblock_remove_all(void)
{
struct ubiblock *next;
@@ -643,18 +659,7 @@ int __init ubiblock_init(void)
if (ubiblock_major < 0)
return ubiblock_major;
- /*
- * Attach block devices from 'block=' module param.
- * Even if one block device in the param list fails to come up,
- * still allow the module to load and leave any others up.
- */
- ubiblock_create_from_param();
-
- /*
- * Block devices are only created upon user requests, so we ignore
- * existing volumes.
- */
- ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
+ ret = ubi_register_volume_notifier(&ubiblock_notifier, 0);
if (ret)
goto err_unreg;
return 0;
@@ -665,7 +670,7 @@ err_unreg:
return ret;
}
-void __exit ubiblock_exit(void)
+void ubiblock_exit(void)
{
ubi_unregister_volume_notifier(&ubiblock_notifier);
ubiblock_remove_all();
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 8b91a55ec0d2..ef6a22f372f9 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -27,6 +27,7 @@
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <linux/major.h>
#include "ubi.h"
@@ -35,7 +36,7 @@
#define MTD_PARAM_LEN_MAX 64
/* Maximum number of comma-separated items in the 'mtd=' parameter */
-#define MTD_PARAM_MAX_COUNT 5
+#define MTD_PARAM_MAX_COUNT 6
/* Maximum value for the number of bad PEBs per 1024 PEBs */
#define MAX_MTD_UBI_BEB_LIMIT 768
@@ -54,6 +55,7 @@
* @vid_hdr_offs: VID header offset
* @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
* @enable_fm: enable fastmap when value is non-zero
+ * @need_resv_pool: reserve pool->max_size pebs when value is none-zero
*/
struct mtd_dev_param {
char name[MTD_PARAM_LEN_MAX];
@@ -61,6 +63,7 @@ struct mtd_dev_param {
int vid_hdr_offs;
int max_beb_per1024;
int enable_fm;
+ int need_resv_pool;
};
/* Numbers of elements set in the @mtd_dev_param array */
@@ -90,7 +93,7 @@ static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
/* Serializes UBI devices creations and removals */
DEFINE_MUTEX(ubi_devices_mutex);
-/* Protects @ubi_devices and @ubi->ref_count */
+/* Protects @ubi_devices, @ubi->ref_count and @ubi->is_dead */
static DEFINE_SPINLOCK(ubi_devices_lock);
/* "Show" method for files in '/<sysfs>/class/ubi/' */
@@ -109,7 +112,7 @@ static struct attribute *ubi_class_attrs[] = {
ATTRIBUTE_GROUPS(ubi_class);
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
-struct class ubi_class = {
+const struct class ubi_class = {
.name = UBI_NAME_STR,
.class_groups = ubi_class_groups,
};
@@ -258,6 +261,9 @@ struct ubi_device *ubi_get_device(int ubi_num)
spin_lock(&ubi_devices_lock);
ubi = ubi_devices[ubi_num];
+ if (ubi && ubi->is_dead)
+ ubi = NULL;
+
if (ubi) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
@@ -295,7 +301,7 @@ struct ubi_device *ubi_get_by_major(int major)
spin_lock(&ubi_devices_lock);
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
get_device(&ubi->dev);
@@ -324,7 +330,7 @@ int ubi_major2num(int major)
for (i = 0; i < UBI_MAX_DEVICES; i++) {
struct ubi_device *ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
ubi_num = ubi->ubi_num;
break;
}
@@ -511,7 +517,7 @@ static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
int i;
for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
- if (!ubi->volumes[i])
+ if (!ubi->volumes[i] || ubi->volumes[i]->is_dead)
continue;
ubi_eba_replace_table(ubi->volumes[i], NULL);
ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
@@ -825,6 +831,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* @vid_hdr_offset: VID header offset
* @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
* @disable_fm: whether disable fastmap
+ * @need_resv_pool: whether reserve pebs to fill fm_pool
*
* This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
* to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -840,7 +847,8 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* @ubi_devices_mutex.
*/
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
- int vid_hdr_offset, int max_beb_per1024, bool disable_fm)
+ int vid_hdr_offset, int max_beb_per1024, bool disable_fm,
+ bool need_resv_pool)
{
struct ubi_device *ubi;
int i, err;
@@ -894,6 +902,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
return -EINVAL;
}
+ /* UBI cannot work on flashes with zero erasesize. */
+ if (!mtd->erasesize) {
+ pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n",
+ mtd->index);
+ return -EINVAL;
+ }
+
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
@@ -944,6 +959,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
UBI_FM_MIN_POOL_SIZE);
ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
+ ubi->fm_pool_rsv_cnt = need_resv_pool ? ubi->fm_pool.max_size : 0;
ubi->fm_disabled = (!fm_autoconvert || disable_fm) ? 1 : 0;
if (fm_debug)
ubi_enable_dbg_chk_fastmap(ubi);
@@ -1086,7 +1102,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
return -EINVAL;
spin_lock(&ubi_devices_lock);
- put_device(&ubi->dev);
ubi->ref_count -= 1;
if (ubi->ref_count) {
if (!anyway) {
@@ -1097,6 +1112,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_err(ubi, "%s reference count %d, destroy anyway",
ubi->ubi_name, ubi->ref_count);
}
+ ubi->is_dead = true;
+ spin_unlock(&ubi_devices_lock);
+
+ ubi_notify_all(ubi, UBI_VOLUME_SHUTDOWN, NULL);
+
+ spin_lock(&ubi_devices_lock);
+ put_device(&ubi->dev);
ubi_devices[ubi_num] = NULL;
spin_unlock(&ubi_devices_lock);
@@ -1207,43 +1229,43 @@ static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
return mtd;
}
-static int __init ubi_init(void)
+static void ubi_notify_add(struct mtd_info *mtd)
{
- int err, i, k;
+ struct device_node *np = mtd_get_of_node(mtd);
+ int err;
- /* Ensure that EC and VID headers have correct size */
- BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
- BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+ if (!of_device_is_compatible(np, "linux,ubi"))
+ return;
- if (mtd_devs > UBI_MAX_DEVICES) {
- pr_err("UBI error: too many MTD devices, maximum is %d\n",
- UBI_MAX_DEVICES);
- return -EINVAL;
- }
+ /*
+ * we are already holding &mtd_table_mutex, but still need
+ * to bump refcount
+ */
+ err = __get_mtd_device(mtd);
+ if (err)
+ return;
- /* Create base sysfs directory and sysfs files */
- err = class_register(&ubi_class);
+ /* called while holding mtd_table_mutex */
+ mutex_lock_nested(&ubi_devices_mutex, SINGLE_DEPTH_NESTING);
+ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0, false, false);
+ mutex_unlock(&ubi_devices_mutex);
if (err < 0)
- return err;
-
- err = misc_register(&ubi_ctrl_cdev);
- if (err) {
- pr_err("UBI error: cannot register device\n");
- goto out;
- }
+ __put_mtd_device(mtd);
+}
- ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
- sizeof(struct ubi_wl_entry),
- 0, 0, NULL);
- if (!ubi_wl_entry_slab) {
- err = -ENOMEM;
- goto out_dev_unreg;
- }
+static void ubi_notify_remove(struct mtd_info *mtd)
+{
+ /* do nothing for now */
+}
- err = ubi_debugfs_init();
- if (err)
- goto out_slab;
+static struct mtd_notifier ubi_mtd_notifier = {
+ .add = ubi_notify_add,
+ .remove = ubi_notify_remove,
+};
+static int __init ubi_init_attach(void)
+{
+ int err, i, k;
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
@@ -1266,7 +1288,8 @@ static int __init ubi_init(void)
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, p->ubi_num,
p->vid_hdr_offs, p->max_beb_per1024,
- p->enable_fm == 0);
+ p->enable_fm == 0,
+ p->need_resv_pool != 0);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
pr_err("UBI error: cannot attach mtd%d\n",
@@ -1291,24 +1314,81 @@ static int __init ubi_init(void)
}
}
+ return 0;
+
+out_detach:
+ for (k = 0; k < i; k++)
+ if (ubi_devices[k]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ return err;
+}
+#ifndef CONFIG_MTD_UBI_MODULE
+late_initcall(ubi_init_attach);
+#endif
+
+static int __init ubi_init(void)
+{
+ int err;
+
+ /* Ensure that EC and VID headers have correct size */
+ BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
+ BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+
+ if (mtd_devs > UBI_MAX_DEVICES) {
+ pr_err("UBI error: too many MTD devices, maximum is %d\n",
+ UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ /* Create base sysfs directory and sysfs files */
+ err = class_register(&ubi_class);
+ if (err < 0)
+ return err;
+
+ err = misc_register(&ubi_ctrl_cdev);
+ if (err) {
+ pr_err("UBI error: cannot register device\n");
+ goto out;
+ }
+
+ ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+ sizeof(struct ubi_wl_entry),
+ 0, 0, NULL);
+ if (!ubi_wl_entry_slab) {
+ err = -ENOMEM;
+ goto out_dev_unreg;
+ }
+
+ err = ubi_debugfs_init();
+ if (err)
+ goto out_slab;
+
err = ubiblock_init();
if (err) {
pr_err("UBI error: block: cannot initialize, error %d\n", err);
/* See comment above re-ubi_is_module(). */
if (ubi_is_module())
- goto out_detach;
+ goto out_debugfs;
+ }
+
+ register_mtd_user(&ubi_mtd_notifier);
+
+ if (ubi_is_module()) {
+ err = ubi_init_attach();
+ if (err)
+ goto out_mtd_notifier;
}
return 0;
-out_detach:
- for (k = 0; k < i; k++)
- if (ubi_devices[k]) {
- mutex_lock(&ubi_devices_mutex);
- ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
- mutex_unlock(&ubi_devices_mutex);
- }
+out_mtd_notifier:
+ unregister_mtd_user(&ubi_mtd_notifier);
+ ubiblock_exit();
+out_debugfs:
ubi_debugfs_exit();
out_slab:
kmem_cache_destroy(ubi_wl_entry_slab);
@@ -1319,13 +1399,15 @@ out:
pr_err("UBI error: cannot initialize UBI, error %d\n", err);
return err;
}
-late_initcall(ubi_init);
+device_initcall(ubi_init);
+
static void __exit ubi_exit(void)
{
int i;
ubiblock_exit();
+ unregister_mtd_user(&ubi_mtd_notifier);
for (i = 0; i < UBI_MAX_DEVICES; i++)
if (ubi_devices[i]) {
@@ -1455,7 +1537,7 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
if (token) {
int err = kstrtoint(token, 10, &p->ubi_num);
- if (err) {
+ if (err || p->ubi_num < UBI_DEV_NUM_AUTO) {
pr_err("UBI error: bad value for ubi_num parameter: %s\n",
token);
return -EINVAL;
@@ -1475,6 +1557,18 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
} else
p->enable_fm = 0;
+ token = tokens[5];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->need_resv_pool);
+
+ if (err) {
+ pr_err("UBI error: bad value for need_resv_pool parameter: %s\n",
+ token);
+ return -EINVAL;
+ }
+ } else
+ p->need_resv_pool = 0;
+
mtd_devs += 1;
return 0;
}
@@ -1488,6 +1582,7 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
__stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
"Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
"Optional \"enable_fm\" parameter determines whether to enable fastmap during attach. If the value is non-zero, fastmap is enabled. Default value is 0.\n"
+ "Optional \"need_resv_pool\" parameter determines whether to reserve pool->max_size pebs during attach. If the value is non-zero, peb reservation is enabled. Default value is 0.\n"
"\n"
"Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
"Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index f43430b9c1e6..b700a0efaa93 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -828,6 +828,70 @@ out_free:
return err;
}
+static int ubi_get_ec_info(struct ubi_device *ubi, struct ubi_ecinfo_req __user *ureq)
+{
+ struct ubi_ecinfo_req req;
+ struct ubi_wl_entry *wl;
+ int read_cnt;
+ int peb;
+ int end_peb;
+
+ /* Copy the input arguments */
+ if (copy_from_user(&req, ureq, sizeof(struct ubi_ecinfo_req)))
+ return -EFAULT;
+
+ /* Check input arguments */
+ if (req.length <= 0 || req.start < 0 || req.start >= ubi->peb_count)
+ return -EINVAL;
+
+ if (check_add_overflow(req.start, req.length, &end_peb))
+ return -EINVAL;
+
+ if (end_peb > ubi->peb_count)
+ end_peb = ubi->peb_count;
+
+ /* Check access rights before filling erase_counters array */
+ if (!access_ok((void __user *)ureq->erase_counters,
+ (end_peb-req.start) * sizeof(int32_t)))
+ return -EFAULT;
+
+ /* Fill erase counter array */
+ read_cnt = 0;
+ for (peb = req.start; peb < end_peb; read_cnt++, peb++) {
+ int ec;
+
+ if (ubi_io_is_bad(ubi, peb)) {
+ if (__put_user(UBI_UNKNOWN, ureq->erase_counters+read_cnt))
+ return -EFAULT;
+
+ continue;
+ }
+
+ spin_lock(&ubi->wl_lock);
+
+ wl = ubi->lookuptbl[peb];
+ if (wl)
+ ec = wl->ec;
+ else
+ ec = UBI_UNKNOWN;
+
+ spin_unlock(&ubi->wl_lock);
+
+ if (__put_user(ec, ureq->erase_counters+read_cnt))
+ return -EFAULT;
+
+ }
+
+ /* Return actual read length */
+ req.read_length = read_cnt;
+
+ /* Copy everything except erase counter array */
+ if (copy_to_user(ureq, &req, sizeof(struct ubi_ecinfo_req)))
+ return -EFAULT;
+
+ return 0;
+}
+
static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -991,6 +1055,12 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
+ case UBI_IOCECNFO:
+ {
+ err = ubi_get_ec_info(ubi, argp);
+ break;
+ }
+
default:
err = -ENOTTY;
break;
@@ -1041,7 +1111,8 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
*/
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
- req.max_beb_per1024, !!req.disable_fm);
+ req.max_beb_per1024, !!req.disable_fm,
+ !!req.need_resv_pool);
mutex_unlock(&ubi_devices_mutex);
if (err < 0)
put_mtd_device(mtd);
@@ -1094,7 +1165,6 @@ const struct file_operations ubi_vol_cdev_operations = {
/* UBI character device operations */
const struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = ubi_cdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
@@ -1104,5 +1174,4 @@ const struct file_operations ubi_ctrl_cdev_operations = {
.owner = THIS_MODULE,
.unlocked_ioctl = ctrl_cdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 27168f511d6d..d2a53961d8e2 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -10,7 +10,37 @@
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <linux/fault-inject.h>
+
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+static DECLARE_FAULT_ATTR(fault_eccerr_attr);
+static DECLARE_FAULT_ATTR(fault_bitflips_attr);
+static DECLARE_FAULT_ATTR(fault_read_failure_attr);
+static DECLARE_FAULT_ATTR(fault_write_failure_attr);
+static DECLARE_FAULT_ATTR(fault_erase_failure_attr);
+static DECLARE_FAULT_ATTR(fault_power_cut_attr);
+static DECLARE_FAULT_ATTR(fault_io_ff_attr);
+static DECLARE_FAULT_ATTR(fault_io_ff_bitflips_attr);
+static DECLARE_FAULT_ATTR(fault_bad_hdr_attr);
+static DECLARE_FAULT_ATTR(fault_bad_hdr_ebadmsg_attr);
+
+#define FAIL_ACTION(name, fault_attr) \
+bool should_fail_##name(void) \
+{ \
+ return should_fail(&fault_attr, 1); \
+}
+FAIL_ACTION(eccerr, fault_eccerr_attr)
+FAIL_ACTION(bitflips, fault_bitflips_attr)
+FAIL_ACTION(read_failure, fault_read_failure_attr)
+FAIL_ACTION(write_failure, fault_write_failure_attr)
+FAIL_ACTION(erase_failure, fault_erase_failure_attr)
+FAIL_ACTION(power_cut, fault_power_cut_attr)
+FAIL_ACTION(io_ff, fault_io_ff_attr)
+FAIL_ACTION(io_ff_bitflips, fault_io_ff_bitflips_attr)
+FAIL_ACTION(bad_hdr, fault_bad_hdr_attr)
+FAIL_ACTION(bad_hdr_ebadmsg, fault_bad_hdr_ebadmsg_attr)
+#endif
/**
* ubi_dump_flash - dump a region of flash.
@@ -212,6 +242,52 @@ void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
*/
static struct dentry *dfs_rootdir;
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+static void dfs_create_fault_entry(struct dentry *parent)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("fault_inject", parent);
+ if (IS_ERR_OR_NULL(dir)) {
+ int err = dir ? PTR_ERR(dir) : -ENODEV;
+
+ pr_warn("UBI error: cannot create \"fault_inject\" debugfs directory, error %d\n",
+ err);
+ return;
+ }
+
+ fault_create_debugfs_attr("emulate_eccerr", dir,
+ &fault_eccerr_attr);
+
+ fault_create_debugfs_attr("emulate_read_failure", dir,
+ &fault_read_failure_attr);
+
+ fault_create_debugfs_attr("emulate_bitflips", dir,
+ &fault_bitflips_attr);
+
+ fault_create_debugfs_attr("emulate_write_failure", dir,
+ &fault_write_failure_attr);
+
+ fault_create_debugfs_attr("emulate_erase_failure", dir,
+ &fault_erase_failure_attr);
+
+ fault_create_debugfs_attr("emulate_power_cut", dir,
+ &fault_power_cut_attr);
+
+ fault_create_debugfs_attr("emulate_io_ff", dir,
+ &fault_io_ff_attr);
+
+ fault_create_debugfs_attr("emulate_io_ff_bitflips", dir,
+ &fault_io_ff_bitflips_attr);
+
+ fault_create_debugfs_attr("emulate_bad_hdr", dir,
+ &fault_bad_hdr_attr);
+
+ fault_create_debugfs_attr("emulate_bad_hdr_ebadmsg", dir,
+ &fault_bad_hdr_ebadmsg_attr);
+}
+#endif
+
/**
* ubi_debugfs_init - create UBI debugfs directory.
*
@@ -232,6 +308,10 @@ int ubi_debugfs_init(void)
return err;
}
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+ dfs_create_fault_entry(dfs_rootdir);
+#endif
+
return 0;
}
@@ -252,7 +332,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
struct dentry *dent = file->f_path.dentry;
struct ubi_device *ubi;
struct ubi_debug_info *d;
- char buf[8];
+ char buf[16];
int val;
ubi = ubi_get_device(ubi_num);
@@ -272,7 +352,12 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
val = d->emulate_bitflips;
else if (dent == d->dfs_emulate_io_failures)
val = d->emulate_io_failures;
- else if (dent == d->dfs_emulate_power_cut) {
+ else if (dent == d->dfs_emulate_failures) {
+ snprintf(buf, sizeof(buf), "0x%04x\n", d->emulate_failures);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ } else if (dent == d->dfs_emulate_power_cut) {
snprintf(buf, sizeof(buf), "%u\n", d->emulate_power_cut);
count = simple_read_from_buffer(user_buf, count, ppos,
buf, strlen(buf));
@@ -287,8 +372,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
count = simple_read_from_buffer(user_buf, count, ppos,
buf, strlen(buf));
goto out;
- }
- else {
+ } else {
count = -EINVAL;
goto out;
}
@@ -316,7 +400,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
struct ubi_device *ubi;
struct ubi_debug_info *d;
size_t buf_size;
- char buf[8] = {0};
+ char buf[16] = {0};
int val;
ubi = ubi_get_device(ubi_num);
@@ -330,7 +414,11 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
goto out;
}
- if (dent == d->dfs_power_cut_min) {
+ if (dent == d->dfs_emulate_failures) {
+ if (kstrtouint(buf, 0, &d->emulate_failures) != 0)
+ count = -EINVAL;
+ goto out;
+ } else if (dent == d->dfs_power_cut_min) {
if (kstrtouint(buf, 0, &d->power_cut_min) != 0)
count = -EINVAL;
goto out;
@@ -382,7 +470,6 @@ static const struct file_operations dfs_fops = {
.read = dfs_file_read,
.write = dfs_file_write,
.open = simple_open,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -510,9 +597,9 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
+ n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN, UBI_DFS_DIR_NAME,
ubi->ubi_num);
- if (n > UBI_DFS_DIR_LEN) {
+ if (n >= UBI_DFS_DIR_LEN) {
/* The array size is too small */
return -EINVAL;
}
@@ -559,6 +646,12 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,
(void *)ubi_num, &eraseblk_count_fops);
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+ d->dfs_emulate_failures = debugfs_create_file("emulate_failures",
+ mode, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+#endif
return 0;
}
@@ -600,7 +693,5 @@ int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
if (ubi->dbg.power_cut_counter)
return 0;
- ubi_msg(ubi, "XXXXXXXXXXXXXXX emulating a power cut XXXXXXXXXXXXXXXX");
- ubi_ro_mode(ubi);
return 1;
}
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 23676f32b681..b2fd97548808 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -53,56 +53,315 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi);
void ubi_debugfs_exit_dev(struct ubi_device *ubi);
/**
- * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * The following function is a legacy implementation of UBI fault-injection
+ * hook. When using more powerful fault injection capabilities, the legacy
+ * fault injection interface should be retained.
+ */
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
+
+static inline int ubi_dbg_bitflip(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_bitflips)
+ return !get_random_u32_below(200);
+ return 0;
+}
+
+static inline int ubi_dbg_write_failure(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_io_failures)
+ return !get_random_u32_below(500);
+ return 0;
+}
+
+static inline int ubi_dbg_erase_failure(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_io_failures)
+ return !get_random_u32_below(400);
+ return 0;
+}
+
+/**
+ * MASK_XXX: Mask for emulate_failures in ubi_debug_info.The mask is used to
+ * precisely control the type and process of fault injection.
+ */
+/* Emulate a power cut when writing EC/VID header */
+#define MASK_POWER_CUT_EC (1 << 0)
+#define MASK_POWER_CUT_VID (1 << 1)
+/* Emulate a power cut when writing data*/
+#define MASK_POWER_CUT_DATA (1 << 2)
+/* Emulate bit-flips */
+#define MASK_BITFLIPS (1 << 3)
+/* Emulate ecc error */
+#define MASK_ECCERR (1 << 4)
+/* Emulates -EIO during data read */
+#define MASK_READ_FAILURE (1 << 5)
+#define MASK_READ_FAILURE_EC (1 << 6)
+#define MASK_READ_FAILURE_VID (1 << 7)
+/* Emulates -EIO during data write */
+#define MASK_WRITE_FAILURE (1 << 8)
+/* Emulates -EIO during erase a PEB*/
+#define MASK_ERASE_FAILURE (1 << 9)
+/* Return UBI_IO_FF when reading EC/VID header */
+#define MASK_IO_FF_EC (1 << 10)
+#define MASK_IO_FF_VID (1 << 11)
+/* Return UBI_IO_FF_BITFLIPS when reading EC/VID header */
+#define MASK_IO_FF_BITFLIPS_EC (1 << 12)
+#define MASK_IO_FF_BITFLIPS_VID (1 << 13)
+/* Return UBI_IO_BAD_HDR when reading EC/VID header */
+#define MASK_BAD_HDR_EC (1 << 14)
+#define MASK_BAD_HDR_VID (1 << 15)
+/* Return UBI_IO_BAD_HDR_EBADMSG when reading EC/VID header */
+#define MASK_BAD_HDR_EBADMSG_EC (1 << 16)
+#define MASK_BAD_HDR_EBADMSG_VID (1 << 17)
+
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+
+extern bool should_fail_eccerr(void);
+extern bool should_fail_bitflips(void);
+extern bool should_fail_read_failure(void);
+extern bool should_fail_write_failure(void);
+extern bool should_fail_erase_failure(void);
+extern bool should_fail_power_cut(void);
+extern bool should_fail_io_ff(void);
+extern bool should_fail_io_ff_bitflips(void);
+extern bool should_fail_bad_hdr(void);
+extern bool should_fail_bad_hdr_ebadmsg(void);
+
+static inline bool ubi_dbg_fail_bitflip(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_failures & MASK_BITFLIPS)
+ return should_fail_bitflips();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_write(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_failures & MASK_WRITE_FAILURE)
+ return should_fail_write_failure();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_erase(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_failures & MASK_ERASE_FAILURE)
+ return should_fail_erase_failure();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_power_cut(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_power_cut();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_read(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_read_failure();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_eccerr(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_failures & MASK_ECCERR)
+ return should_fail_eccerr();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_ff(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_io_ff();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_ff_bitflips(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_io_ff_bitflips();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_bad_hdr(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_bad_hdr();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_bad_hdr_ebadmsg(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_bad_hdr_ebadmsg();
+ return false;
+}
+#else /* CONFIG_MTD_UBI_FAULT_INJECTION */
+
+#define ubi_dbg_fail_bitflip(u) false
+#define ubi_dbg_fail_write(u) false
+#define ubi_dbg_fail_erase(u) false
+#define ubi_dbg_fail_power_cut(u, c) false
+#define ubi_dbg_fail_read(u, c) false
+#define ubi_dbg_fail_eccerr(u) false
+#define ubi_dbg_fail_ff(u, c) false
+#define ubi_dbg_fail_ff_bitflips(u, v) false
+#define ubi_dbg_fail_bad_hdr(u, c) false
+#define ubi_dbg_fail_bad_hdr_ebadmsg(u, c) false
+
+#endif
+
+/**
+ * ubi_dbg_is_power_cut - if it is time to emulate power cut.
* @ubi: UBI device description object
*
- * Returns non-zero if the UBI background thread is disabled for testing
- * purposes.
+ * Returns true if power cut should be emulated, otherwise returns false.
*/
-static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_power_cut(struct ubi_device *ubi,
+ unsigned int caller)
{
- return ubi->dbg.disable_bgt;
+ if (ubi_dbg_power_cut(ubi, caller))
+ return true;
+ return ubi_dbg_fail_power_cut(ubi, caller);
}
/**
* ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
* @ubi: UBI device description object
*
- * Returns non-zero if a bit-flip should be emulated, otherwise returns zero.
+ * Returns true if a bit-flip should be emulated, otherwise returns false.
*/
-static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
- if (ubi->dbg.emulate_bitflips)
- return !get_random_u32_below(200);
- return 0;
+ if (ubi_dbg_bitflip(ubi))
+ return true;
+ return ubi_dbg_fail_bitflip(ubi);
}
/**
* ubi_dbg_is_write_failure - if it is time to emulate a write failure.
* @ubi: UBI device description object
*
- * Returns non-zero if a write failure should be emulated, otherwise returns
- * zero.
+ * Returns true if a write failure should be emulated, otherwise returns
+ * false.
*/
-static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg.emulate_io_failures)
- return !get_random_u32_below(500);
- return 0;
+ if (ubi_dbg_write_failure(ubi))
+ return true;
+ return ubi_dbg_fail_write(ubi);
}
/**
* ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
* @ubi: UBI device description object
*
- * Returns non-zero if an erase failure should be emulated, otherwise returns
- * zero.
+ * Returns true if an erase failure should be emulated, otherwise returns
+ * false.
*/
-static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg.emulate_io_failures)
- return !get_random_u32_below(400);
- return 0;
+ if (ubi_dbg_erase_failure(ubi))
+ return true;
+ return ubi_dbg_fail_erase(ubi);
+}
+
+/**
+ * ubi_dbg_is_eccerr - if it is time to emulate ECC error.
+ * @ubi: UBI device description object
+ *
+ * Returns true if a ECC error should be emulated, otherwise returns false.
+ */
+static inline bool ubi_dbg_is_eccerr(const struct ubi_device *ubi)
+{
+ return ubi_dbg_fail_eccerr(ubi);
+}
+
+/**
+ * ubi_dbg_is_read_failure - if it is time to emulate a read failure.
+ * @ubi: UBI device description object
+ *
+ * Returns true if a read failure should be emulated, otherwise returns
+ * false.
+ */
+static inline bool ubi_dbg_is_read_failure(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_read(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_ff - if it is time to emulate that read region is only 0xFF.
+ * @ubi: UBI device description object
+ *
+ * Returns true if read region should be emulated 0xFF, otherwise
+ * returns false.
+ */
+static inline bool ubi_dbg_is_ff(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_ff(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_ff_bitflips - if it is time to emulate that read region is only 0xFF
+ * with error reported by the MTD driver
+ *
+ * @ubi: UBI device description object
+ *
+ * Returns true if read region should be emulated 0xFF and error
+ * reported by the MTD driver, otherwise returns false.
+ */
+static inline bool ubi_dbg_is_ff_bitflips(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_ff_bitflips(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_bad_hdr - if it is time to emulate a bad header
+ * @ubi: UBI device description object
+ *
+ * Returns true if a bad header error should be emulated, otherwise
+ * returns false.
+ */
+static inline bool ubi_dbg_is_bad_hdr(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_bad_hdr(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_bad_hdr_ebadmsg - if it is time to emulate a bad header with
+ * ECC error.
+ *
+ * @ubi: UBI device description object
+ *
+ * Returns true if a bad header with ECC error should be emulated, otherwise
+ * returns false.
+ */
+static inline bool ubi_dbg_is_bad_hdr_ebadmsg(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_bad_hdr_ebadmsg(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if the UBI background thread is disabled for testing
+ * purposes.
+ */
+static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
+{
+ return ubi->dbg.disable_bgt;
}
static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
@@ -125,5 +384,4 @@ static inline void ubi_enable_dbg_chk_fastmap(struct ubi_device *ubi)
ubi->dbg.chk_fastmap = 1;
}
-int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 655ff41863e2..c7ba7a15c9f7 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -33,9 +33,6 @@
#include <linux/err.h>
#include "ubi.h"
-/* Number of physical eraseblocks reserved for atomic LEB change operation */
-#define EBA_RESERVED_PEBS 1
-
/**
* struct ubi_eba_entry - structure encoding a single LEB -> PEB association
* @pnum: the physical eraseblock number attached to the LEB
@@ -1459,7 +1456,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
+
+ /**
+ * The volumes_lock lock is needed here to prevent the expired old eba_tbl
+ * being updated when the eba_tbl is copied in the ubi_resize_volume() process.
+ */
+ spin_lock(&ubi->volumes_lock);
vol->eba_tbl->entries[lnum].pnum = to;
+ spin_unlock(&ubi->volumes_lock);
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
@@ -1560,6 +1564,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
GFP_KERNEL);
if (!fm_eba[i]) {
ret = -ENOMEM;
+ kfree(scan_eba[i]);
goto out_free;
}
@@ -1595,7 +1600,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
}
out_free:
- for (i = 0; i < num_volumes; i++) {
+ while (--i >= 0) {
if (!ubi->volumes[i])
continue;
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 863f571f1adb..e2bc1122bfd3 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -76,7 +76,7 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
{
struct ubi_wl_entry *e = NULL;
- if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ if (!ubi->free.rb_node)
goto out;
if (anchor)
@@ -98,43 +98,104 @@ out:
}
/*
- * has_enough_free_count - whether ubi has enough free pebs to fill fm pools
+ * wait_free_pebs_for_pool - wait until there enough free pebs
+ * @ubi: UBI device description object
+ *
+ * Wait and execute do_work until there are enough free pebs, fill pool
+ * as much as we can. This will reduce pool refilling times, which can
+ * reduce the fastmap updating frequency.
+ */
+static void wait_free_pebs_for_pool(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ int free, expect_free, executed;
+ /*
+ * There are at least following free pebs which reserved by UBI:
+ * 1. WL_RESERVED_PEBS[1]
+ * 2. EBA_RESERVED_PEBS[1]
+ * 3. fm pebs - 1: Twice fastmap size deducted by fastmap and fm_anchor
+ * 4. beb_rsvd_pebs: This value should be get under lock ubi->wl_lock
+ */
+ int reserved = WL_RESERVED_PEBS + EBA_RESERVED_PEBS +
+ ubi->fm_size / ubi->leb_size - 1 + ubi->fm_pool_rsv_cnt;
+
+ do {
+ spin_lock(&ubi->wl_lock);
+ free = ubi->free_count;
+ free += pool->size - pool->used + wl_pool->size - wl_pool->used;
+ expect_free = reserved + ubi->beb_rsvd_pebs;
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Break out if there are no works or work is executed failure,
+ * given the fact that erase_worker will schedule itself when
+ * -EBUSY is returned from mtd layer caused by system shutdown.
+ */
+ if (do_work(ubi, &executed) || !executed)
+ break;
+ } while (free < expect_free);
+}
+
+/*
+ * left_free_count - returns the number of free pebs to fill fm pools
* @ubi: UBI device description object
- * @is_wl_pool: whether UBI is filling wear leveling pool
*
- * This helper function checks whether there are enough free pebs (deducted
- * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
- * there is at least one of free pebs is filled into fm_wl_pool.
- * For wear leveling pool, UBI should also reserve free pebs for bad pebs
- * handling, because there maybe no enough free pebs for user volumes after
- * producing new bad pebs.
+ * This helper function returns the number of free pebs (deducted
+ * by fastmap pebs) to fill fm_pool and fm_wl_pool.
*/
-static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
+static int left_free_count(struct ubi_device *ubi)
{
int fm_used = 0; // fastmap non anchor pebs.
- int beb_rsvd_pebs;
if (!ubi->free.rb_node)
- return false;
+ return 0;
- beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
- if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
+ if (!ubi->ro_mode && !ubi->fm_disabled)
fm_used = ubi->fm_size / ubi->leb_size - 1;
- return ubi->free_count - beb_rsvd_pebs > fm_used;
+ return ubi->free_count - fm_used;
+}
+
+/*
+ * can_fill_pools - whether free PEBs will be left after filling pools
+ * @ubi: UBI device description object
+ * @free: current number of free PEBs
+ *
+ * Return %1 if there are still left free PEBs after filling pools,
+ * otherwise %0 is returned.
+ */
+static int can_fill_pools(struct ubi_device *ubi, int free)
+{
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ int pool_need = pool->max_size - pool->size +
+ wl_pool->max_size - wl_pool->size;
+
+ if (free - pool_need < 1)
+ return 0;
+
+ return 1;
}
/**
- * ubi_refill_pools - refills all fastmap PEB pools.
+ * ubi_refill_pools_and_lock - refills all fastmap PEB pools and takes fm locks.
* @ubi: UBI device description object
*/
-void ubi_refill_pools(struct ubi_device *ubi)
+void ubi_refill_pools_and_lock(struct ubi_device *ubi)
{
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
struct ubi_fm_pool *pool = &ubi->fm_pool;
struct ubi_wl_entry *e;
int enough;
+ if (!ubi->ro_mode && !ubi->fm_disabled)
+ wait_free_pebs_for_pool(ubi);
+
+ down_write(&ubi->fm_protect);
+ down_write(&ubi->work_sem);
+ down_write(&ubi->fm_eba_sem);
+
spin_lock(&ubi->wl_lock);
return_unused_pool_pebs(ubi, wl_pool);
@@ -159,7 +220,7 @@ void ubi_refill_pools(struct ubi_device *ubi)
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
- if (!has_enough_free_count(ubi, false))
+ if (left_free_count(ubi) <= 0)
break;
e = wl_get_wle(ubi);
@@ -172,10 +233,13 @@ void ubi_refill_pools(struct ubi_device *ubi)
enough++;
if (wl_pool->size < wl_pool->max_size) {
- if (!has_enough_free_count(ubi, true))
+ int left_free = left_free_count(ubi);
+
+ if (left_free <= 0)
break;
- e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF,
+ !can_fill_pools(ubi, left_free));
self_check_in_wl_tree(ubi, e, &ubi->free);
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
@@ -210,7 +274,7 @@ static int produce_free_peb(struct ubi_device *ubi)
while (!ubi->free.rb_node && ubi->works_count) {
dbg_wl("do one work synchronously");
- err = do_work(ubi);
+ err = do_work(ubi, NULL);
if (err)
return err;
@@ -282,14 +346,27 @@ out:
* WL sub-system.
*
* @ubi: UBI device description object
+ * @need_fill: whether to fill wear-leveling pool when no PEBs are found
*/
-static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
+static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi,
+ bool need_fill)
{
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
int pnum;
- if (pool->used == pool->size)
+ if (pool->used == pool->size) {
+ if (need_fill && !ubi->fm_work_scheduled) {
+ /*
+ * We cannot update the fastmap here because this
+ * function is called in atomic context.
+ * Let's fail here and refill/update it as soon as
+ * possible.
+ */
+ ubi->fm_work_scheduled = 1;
+ schedule_work(&ubi->fm_work);
+ }
return NULL;
+ }
pnum = pool->pebs[pool->used];
return ubi->lookuptbl[pnum];
@@ -311,16 +388,16 @@ static bool need_wear_leveling(struct ubi_device *ubi)
if (!ubi->used.rb_node)
return false;
- e = next_peb_for_wl(ubi);
+ e = next_peb_for_wl(ubi, false);
if (!e) {
if (!ubi->free.rb_node)
return false;
- e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
ec = e->ec;
} else {
ec = e->ec;
if (ubi->free.rb_node) {
- e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
ec = max(ec, e->ec);
}
}
@@ -453,8 +530,6 @@ int ubi_is_erase_work(struct ubi_work *wrk)
static void ubi_fastmap_close(struct ubi_device *ubi)
{
- int i;
-
return_unused_pool_pebs(ubi, &ubi->fm_pool);
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
@@ -463,11 +538,7 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
ubi->fm_anchor = NULL;
}
- if (ubi->fm) {
- for (i = 0; i < ubi->fm->used_blocks; i++)
- kfree(ubi->fm->e[i]);
- }
- kfree(ubi->fm);
+ ubi_free_fastmap(ubi);
}
/**
@@ -481,7 +552,7 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
struct ubi_wl_entry *e,
struct rb_root *root) {
- if (e && !ubi->fm_disabled && !ubi->fm &&
+ if (e && !ubi->fm_disabled && !ubi->fm && !ubi->fm_anchor &&
e->pnum < UBI_FM_MAX_START)
e = rb_entry(rb_next(root->rb_node),
struct ubi_wl_entry, u.rb);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 28c8151a0725..9a4940874be5 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -20,7 +20,7 @@ static inline unsigned long *init_seen(struct ubi_device *ubi)
if (!ubi_dbg_chk_fastmap(ubi))
return NULL;
- ret = bitmap_zalloc(ubi->peb_count, GFP_KERNEL);
+ ret = bitmap_zalloc(ubi->peb_count, GFP_NOFS);
if (!ret)
return ERR_PTR(-ENOMEM);
@@ -85,9 +85,10 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
sizeof(struct ubi_fm_scan_pool) +
sizeof(struct ubi_fm_scan_pool) +
(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
- (sizeof(struct ubi_fm_eba) +
- (ubi->peb_count * sizeof(__be32))) +
- sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ ((sizeof(struct ubi_fm_eba) +
+ sizeof(struct ubi_fm_volhdr)) *
+ (UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) +
+ (ubi->peb_count * sizeof(__be32));
return roundup(size, ubi->leb_size);
}
@@ -105,7 +106,7 @@ static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
struct ubi_vid_io_buf *new;
struct ubi_vid_hdr *vh;
- new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ new = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!new)
goto out;
@@ -1390,53 +1391,6 @@ out:
}
/**
- * erase_block - Manually erase a PEB.
- * @ubi: UBI device object
- * @pnum: PEB to be erased
- *
- * Returns the new EC value on success, < 0 indicates an internal error.
- */
-static int erase_block(struct ubi_device *ubi, int pnum)
-{
- int ret;
- struct ubi_ec_hdr *ec_hdr;
- long long ec;
-
- ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
- if (!ec_hdr)
- return -ENOMEM;
-
- ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
- if (ret < 0)
- goto out;
- else if (ret && ret != UBI_IO_BITFLIPS) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = ubi_io_sync_erase(ubi, pnum, 0);
- if (ret < 0)
- goto out;
-
- ec = be64_to_cpu(ec_hdr->ec);
- ec += ret;
- if (ec > UBI_MAX_ERASECOUNTER) {
- ret = -EINVAL;
- goto out;
- }
-
- ec_hdr->ec = cpu_to_be64(ec);
- ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
- if (ret < 0)
- goto out;
-
- ret = ec;
-out:
- kfree(ec_hdr);
- return ret;
-}
-
-/**
* invalidate_fastmap - destroys a fastmap.
* @ubi: UBI device object
*
@@ -1462,7 +1416,7 @@ static int invalidate_fastmap(struct ubi_device *ubi)
ubi->fm = NULL;
ret = -ENOMEM;
- fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ fm = kzalloc(sizeof(*fm), GFP_NOFS);
if (!fm)
goto out;
@@ -1538,11 +1492,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
struct ubi_fastmap_layout *new_fm, *old_fm;
struct ubi_wl_entry *tmp_e;
- down_write(&ubi->fm_protect);
- down_write(&ubi->work_sem);
- down_write(&ubi->fm_eba_sem);
-
- ubi_refill_pools(ubi);
+ ubi_refill_pools_and_lock(ubi);
if (ubi->ro_mode || ubi->fm_disabled) {
up_write(&ubi->fm_eba_sem);
@@ -1551,7 +1501,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
return 0;
}
- new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+ new_fm = kzalloc(sizeof(*new_fm), GFP_NOFS);
if (!new_fm) {
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
@@ -1576,7 +1526,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
if (!tmp_e) {
if (old_fm && old_fm->e[i]) {
- ret = erase_block(ubi, old_fm->e[i]->pnum);
+ ret = ubi_sync_erase(ubi, old_fm->e[i], 0);
if (ret < 0) {
ubi_err(ubi, "could not erase old fastmap PEB");
@@ -1628,7 +1578,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
if (old_fm) {
/* no fresh anchor PEB was found, reuse the old one */
if (!tmp_e) {
- ret = erase_block(ubi, old_fm->e[0]->pnum);
+ ret = ubi_sync_erase(ubi, old_fm->e[0], 0);
if (ret < 0) {
ubi_err(ubi, "could not erase old anchor PEB");
@@ -1640,7 +1590,6 @@ int ubi_update_fastmap(struct ubi_device *ubi)
goto err;
}
new_fm->e[0] = old_fm->e[0];
- new_fm->e[0]->ec = ret;
old_fm->e[0] = NULL;
} else {
/* we've got a new anchor PEB, return the old one */
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 01b644861253..915eb64cb001 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -195,7 +195,19 @@ retry:
if (ubi_dbg_is_bitflip(ubi)) {
dbg_gen("bit-flip (emulated)");
- err = UBI_IO_BITFLIPS;
+ return UBI_IO_BITFLIPS;
+ }
+
+ if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE)) {
+ ubi_warn(ubi, "cannot read %d bytes from PEB %d:%d (emulated)",
+ len, pnum, offset);
+ return -EIO;
+ }
+
+ if (ubi_dbg_is_eccerr(ubi)) {
+ ubi_warn(ubi, "ECC error (emulated) while reading %d bytes from PEB %d:%d, read %zd bytes",
+ len, pnum, offset, read);
+ return -EBADMSG;
}
}
@@ -782,7 +794,36 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
* If there was %-EBADMSG, but the header CRC is still OK, report about
* a bit-flip to force scrubbing on this PEB.
*/
- return read_err ? UBI_IO_BITFLIPS : 0;
+ if (read_err)
+ return UBI_IO_BITFLIPS;
+
+ if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE_EC)) {
+ ubi_warn(ubi, "cannot read EC header from PEB %d (emulated)",
+ pnum);
+ return -EIO;
+ }
+
+ if (ubi_dbg_is_ff(ubi, MASK_IO_FF_EC)) {
+ ubi_warn(ubi, "bit-all-ff (emulated)");
+ return UBI_IO_FF;
+ }
+
+ if (ubi_dbg_is_ff_bitflips(ubi, MASK_IO_FF_BITFLIPS_EC)) {
+ ubi_warn(ubi, "bit-all-ff with error reported by MTD driver (emulated)");
+ return UBI_IO_FF_BITFLIPS;
+ }
+
+ if (ubi_dbg_is_bad_hdr(ubi, MASK_BAD_HDR_EC)) {
+ ubi_warn(ubi, "bad_hdr (emulated)");
+ return UBI_IO_BAD_HDR;
+ }
+
+ if (ubi_dbg_is_bad_hdr_ebadmsg(ubi, MASK_BAD_HDR_EBADMSG_EC)) {
+ ubi_warn(ubi, "bad_hdr with ECC error (emulated)");
+ return UBI_IO_BAD_HDR_EBADMSG;
+ }
+
+ return 0;
}
/**
@@ -821,8 +862,13 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
if (err)
return err;
- if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
+ if (ubi_dbg_is_power_cut(ubi, MASK_POWER_CUT_EC)) {
+ ubi_warn(ubi, "emulating a power cut when writing EC header");
+ ubi_ro_mode(ubi);
return -EROFS;
+ }
+
+ memset((char *)ec_hdr + UBI_EC_HDR_SIZE, 0xFF, ubi->ec_hdr_alsize - UBI_EC_HDR_SIZE);
err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
return err;
@@ -1029,7 +1075,36 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
return -EINVAL;
}
- return read_err ? UBI_IO_BITFLIPS : 0;
+ if (read_err)
+ return UBI_IO_BITFLIPS;
+
+ if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE_VID)) {
+ ubi_warn(ubi, "cannot read VID header from PEB %d (emulated)",
+ pnum);
+ return -EIO;
+ }
+
+ if (ubi_dbg_is_ff(ubi, MASK_IO_FF_VID)) {
+ ubi_warn(ubi, "bit-all-ff (emulated)");
+ return UBI_IO_FF;
+ }
+
+ if (ubi_dbg_is_ff_bitflips(ubi, MASK_IO_FF_BITFLIPS_VID)) {
+ ubi_warn(ubi, "bit-all-ff with error reported by MTD driver (emulated)");
+ return UBI_IO_FF_BITFLIPS;
+ }
+
+ if (ubi_dbg_is_bad_hdr(ubi, MASK_BAD_HDR_VID)) {
+ ubi_warn(ubi, "bad_hdr (emulated)");
+ return UBI_IO_BAD_HDR;
+ }
+
+ if (ubi_dbg_is_bad_hdr_ebadmsg(ubi, MASK_BAD_HDR_EBADMSG_VID)) {
+ ubi_warn(ubi, "bad_hdr with ECC error (emulated)");
+ return UBI_IO_BAD_HDR_EBADMSG;
+ }
+
+ return 0;
}
/**
@@ -1071,8 +1146,19 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
if (err)
return err;
- if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
+ if (ubi_dbg_is_power_cut(ubi, MASK_POWER_CUT_VID)) {
+ ubi_warn(ubi, "emulating a power cut when writing VID header");
+ ubi_ro_mode(ubi);
return -EROFS;
+ }
+
+ if (ubi->vid_hdr_shift) {
+ memset((char *)p, 0xFF, ubi->vid_hdr_shift);
+ memset((char *)p + ubi->vid_hdr_shift + UBI_VID_HDR_SIZE, 0xFF,
+ ubi->vid_hdr_alsize - (ubi->vid_hdr_shift + UBI_VID_HDR_SIZE));
+ } else {
+ memset((char *)p + UBI_VID_HDR_SIZE, 0xFF, ubi->vid_hdr_alsize - UBI_VID_HDR_SIZE);
+ }
err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 5db653eacbd4..df0a5a57b072 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -152,7 +152,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
spin_lock(&ubi->volumes_lock);
vol = ubi->volumes[vol_id];
- if (!vol)
+ if (!vol || vol->is_dead)
goto out_unlock;
err = -EBUSY;
@@ -280,6 +280,41 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
/**
+ * ubi_get_num_by_path - get UBI device and volume number from device path
+ * @pathname: volume character device node path
+ * @ubi_num: pointer to UBI device number to be set
+ * @vol_id: pointer to UBI volume ID to be set
+ *
+ * Returns 0 on success and sets ubi_num and vol_id, returns error otherwise.
+ */
+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id)
+{
+ int error;
+ struct path path;
+ struct kstat stat;
+
+ error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+ if (error)
+ return error;
+
+ error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
+ path_put(&path);
+ if (error)
+ return error;
+
+ if (!S_ISCHR(stat.mode))
+ return -EINVAL;
+
+ *ubi_num = ubi_major2num(MAJOR(stat.rdev));
+ *vol_id = MINOR(stat.rdev) - 1;
+
+ if (*vol_id < 0 || *ubi_num < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
* ubi_open_volume_path - open UBI volume by its character device node path.
* @pathname: volume character device node path
* @mode: open mode
@@ -290,32 +325,17 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
{
int error, ubi_num, vol_id;
- struct path path;
- struct kstat stat;
dbg_gen("open volume %s, mode %d", pathname, mode);
if (!pathname || !*pathname)
return ERR_PTR(-EINVAL);
- error = kern_path(pathname, LOOKUP_FOLLOW, &path);
- if (error)
- return ERR_PTR(error);
-
- error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
- path_put(&path);
+ error = ubi_get_num_by_path(pathname, &ubi_num, &vol_id);
if (error)
return ERR_PTR(error);
- if (!S_ISCHR(stat.mode))
- return ERR_PTR(-EINVAL);
-
- ubi_num = ubi_major2num(MAJOR(stat.rdev));
- vol_id = MINOR(stat.rdev) - 1;
-
- if (vol_id >= 0 && ubi_num >= 0)
- return ubi_open_volume(ubi_num, vol_id, mode);
- return ERR_PTR(-ENODEV);
+ return ubi_open_volume(ubi_num, vol_id, mode);
}
EXPORT_SYMBOL_GPL(ubi_open_volume_path);
@@ -771,33 +791,6 @@ int ubi_sync(int ubi_num)
}
EXPORT_SYMBOL_GPL(ubi_sync);
-/**
- * ubi_flush - flush UBI work queue.
- * @ubi_num: UBI device to flush work queue
- * @vol_id: volume id to flush for
- * @lnum: logical eraseblock number to flush for
- *
- * This function executes all pending works for a particular volume id / logical
- * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
- * a wildcard for all of the corresponding volume numbers or logical
- * eraseblock numbers. It returns zero in case of success and a negative error
- * code in case of failure.
- */
-int ubi_flush(int ubi_num, int vol_id, int lnum)
-{
- struct ubi_device *ubi;
- int err = 0;
-
- ubi = ubi_get_device(ubi_num);
- if (!ubi)
- return -ENODEV;
-
- err = ubi_wl_flush(ubi, vol_id, lnum);
- ubi_put_device(ubi);
- return err;
-}
-EXPORT_SYMBOL_GPL(ubi_flush);
-
BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
/**
diff --git a/drivers/mtd/ubi/nvmem.c b/drivers/mtd/ubi/nvmem.c
new file mode 100644
index 000000000000..34f8c1d3cdee
--- /dev/null
+++ b/drivers/mtd/ubi/nvmem.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org>
+ */
+
+/* UBI NVMEM provider */
+#include "ubi.h"
+#include <linux/nvmem-provider.h>
+
+/* List of all NVMEM devices */
+static LIST_HEAD(nvmem_devices);
+static DEFINE_MUTEX(devices_mutex);
+
+struct ubi_nvmem {
+ struct nvmem_device *nvmem;
+ int ubi_num;
+ int vol_id;
+ int usable_leb_size;
+ struct list_head list;
+};
+
+static int ubi_nvmem_reg_read(void *priv, unsigned int from,
+ void *val, size_t bytes)
+{
+ size_t to_read, bytes_left = bytes;
+ struct ubi_nvmem *unv = priv;
+ struct ubi_volume_desc *desc;
+ uint32_t offs;
+ uint32_t lnum;
+ int err = 0;
+
+ desc = ubi_open_volume(unv->ubi_num, unv->vol_id, UBI_READONLY);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ offs = from % unv->usable_leb_size;
+ lnum = from / unv->usable_leb_size;
+ while (bytes_left) {
+ to_read = unv->usable_leb_size - offs;
+
+ if (to_read > bytes_left)
+ to_read = bytes_left;
+
+ err = ubi_read(desc, lnum, val, offs, to_read);
+ if (err)
+ break;
+
+ lnum += 1;
+ offs = 0;
+ bytes_left -= to_read;
+ val += to_read;
+ }
+ ubi_close_volume(desc);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int ubi_nvmem_add(struct ubi_volume_info *vi)
+{
+ struct device_node *np = dev_of_node(vi->dev);
+ struct nvmem_config config = {};
+ struct ubi_nvmem *unv;
+ int ret;
+
+ if (!np)
+ return 0;
+
+ if (!of_get_child_by_name(np, "nvmem-layout"))
+ return 0;
+
+ if (WARN_ON_ONCE(vi->usable_leb_size <= 0) ||
+ WARN_ON_ONCE(vi->size <= 0))
+ return -EINVAL;
+
+ unv = kzalloc(sizeof(struct ubi_nvmem), GFP_KERNEL);
+ if (!unv)
+ return -ENOMEM;
+
+ config.id = NVMEM_DEVID_NONE;
+ config.dev = vi->dev;
+ config.name = dev_name(vi->dev);
+ config.owner = THIS_MODULE;
+ config.priv = unv;
+ config.reg_read = ubi_nvmem_reg_read;
+ config.size = vi->usable_leb_size * vi->size;
+ config.word_size = 1;
+ config.stride = 1;
+ config.read_only = true;
+ config.root_only = true;
+ config.ignore_wp = true;
+ config.of_node = np;
+
+ unv->ubi_num = vi->ubi_num;
+ unv->vol_id = vi->vol_id;
+ unv->usable_leb_size = vi->usable_leb_size;
+ unv->nvmem = nvmem_register(&config);
+ if (IS_ERR(unv->nvmem)) {
+ ret = dev_err_probe(vi->dev, PTR_ERR(unv->nvmem),
+ "Failed to register NVMEM device\n");
+ kfree(unv);
+ return ret;
+ }
+
+ mutex_lock(&devices_mutex);
+ list_add_tail(&unv->list, &nvmem_devices);
+ mutex_unlock(&devices_mutex);
+
+ return 0;
+}
+
+static void ubi_nvmem_remove(struct ubi_volume_info *vi)
+{
+ struct ubi_nvmem *unv_c, *unv = NULL;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry(unv_c, &nvmem_devices, list)
+ if (unv_c->ubi_num == vi->ubi_num && unv_c->vol_id == vi->vol_id) {
+ unv = unv_c;
+ break;
+ }
+
+ if (!unv) {
+ mutex_unlock(&devices_mutex);
+ return;
+ }
+
+ list_del(&unv->list);
+ mutex_unlock(&devices_mutex);
+ nvmem_unregister(unv->nvmem);
+ kfree(unv);
+}
+
+/**
+ * nvmem_notify - UBI notification handler.
+ * @nb: registered notifier block
+ * @l: notification type
+ * @ns_ptr: pointer to the &struct ubi_notification object
+ */
+static int nvmem_notify(struct notifier_block *nb, unsigned long l,
+ void *ns_ptr)
+{
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (l) {
+ case UBI_VOLUME_RESIZED:
+ ubi_nvmem_remove(&nt->vi);
+ fallthrough;
+ case UBI_VOLUME_ADDED:
+ ubi_nvmem_add(&nt->vi);
+ break;
+ case UBI_VOLUME_SHUTDOWN:
+ ubi_nvmem_remove(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nvmem_notifier = {
+ .notifier_call = nvmem_notify,
+};
+
+static int __init ubi_nvmem_init(void)
+{
+ return ubi_register_volume_notifier(&nvmem_notifier, 0);
+}
+
+static void __exit ubi_nvmem_exit(void)
+{
+ struct ubi_nvmem *unv, *tmp;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry_safe(unv, tmp, &nvmem_devices, list) {
+ nvmem_unregister(unv->nvmem);
+ list_del(&unv->list);
+ kfree(unv);
+ }
+ mutex_unlock(&devices_mutex);
+
+ ubi_unregister_volume_notifier(&nvmem_notifier);
+}
+
+module_init(ubi_nvmem_init);
+module_exit(ubi_nvmem_exit);
+MODULE_DESCRIPTION("NVMEM layer over UBI volumes");
+MODULE_AUTHOR("Daniel Golle");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index c8f1bd4fa100..44803d3329f4 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -82,6 +82,9 @@ void ubi_err(const struct ubi_device *ubi, const char *fmt, ...);
#define UBI_DFS_DIR_NAME "ubi%d"
#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+/* Number of physical eraseblocks reserved for atomic LEB change operation */
+#define EBA_RESERVED_PEBS 1
+
/*
* Error codes returned by the I/O sub-system.
*
@@ -142,17 +145,6 @@ enum {
UBI_BAD_FASTMAP,
};
-/*
- * Flags for emulate_power_cut in ubi_debug_info
- *
- * POWER_CUT_EC_WRITE: Emulate a power cut when writing an EC header
- * POWER_CUT_VID_WRITE: Emulate a power cut when writing a VID header
- */
-enum {
- POWER_CUT_EC_WRITE = 0x01,
- POWER_CUT_VID_WRITE = 0x02,
-};
-
/**
* struct ubi_vid_io_buf - VID buffer used to read/write VID info to/from the
* flash.
@@ -345,6 +337,7 @@ struct ubi_volume {
int writers;
int exclusive;
int metaonly;
+ bool is_dead;
int reserved_pebs;
int vol_type;
@@ -401,6 +394,7 @@ struct ubi_volume_desc {
* @power_cut_counter: count down for writes left until emulated power cut
* @power_cut_min: minimum number of writes before emulating a power cut
* @power_cut_max: maximum number of writes until emulating a power cut
+ * @emulate_failures: emulate failures for testing purposes
* @dfs_dir_name: name of debugfs directory containing files of this UBI device
* @dfs_dir: direntry object of the UBI device debugfs directory
* @dfs_chk_gen: debugfs knob to enable UBI general extra checks
@@ -412,6 +406,7 @@ struct ubi_volume_desc {
* @dfs_emulate_power_cut: debugfs knob to emulate power cuts
* @dfs_power_cut_min: debugfs knob for minimum writes before power cut
* @dfs_power_cut_max: debugfs knob for maximum writes until power cut
+ * @dfs_emulate_failures: debugfs entry to control the fault injection type
*/
struct ubi_debug_info {
unsigned int chk_gen:1;
@@ -424,7 +419,8 @@ struct ubi_debug_info {
unsigned int power_cut_counter;
unsigned int power_cut_min;
unsigned int power_cut_max;
- char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+ unsigned int emulate_failures;
+ char dfs_dir_name[UBI_DFS_DIR_LEN];
struct dentry *dfs_dir;
struct dentry *dfs_chk_gen;
struct dentry *dfs_chk_io;
@@ -435,6 +431,7 @@ struct ubi_debug_info {
struct dentry *dfs_emulate_power_cut;
struct dentry *dfs_power_cut_min;
struct dentry *dfs_power_cut_max;
+ struct dentry *dfs_emulate_failures;
};
/**
@@ -491,6 +488,7 @@ struct ubi_debug_info {
* @fast_attach: non-zero if UBI was attached by fastmap
* @fm_anchor: The next anchor PEB to use for fastmap
* @fm_do_produce_anchor: If true produce an anchor PEB in wl
+ * @fm_pool_rsv_cnt: Number of reserved PEBs for filling pool/wl_pool
*
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -564,6 +562,7 @@ struct ubi_device {
spinlock_t volumes_lock;
int ref_count;
int image_seq;
+ bool is_dead;
int rsvd_pebs;
int avail_pebs;
@@ -601,6 +600,7 @@ struct ubi_device {
int fast_attach;
struct ubi_wl_entry *fm_anchor;
int fm_do_produce_anchor;
+ int fm_pool_rsv_cnt;
/* Wear-leveling sub-system's stuff */
struct rb_root used;
@@ -814,7 +814,7 @@ extern struct kmem_cache *ubi_wl_entry_slab;
extern const struct file_operations ubi_ctrl_cdev_operations;
extern const struct file_operations ubi_cdev_operations;
extern const struct file_operations ubi_vol_cdev_operations;
-extern struct class ubi_class;
+extern const struct class ubi_class;
extern struct mutex ubi_devices_mutex;
extern struct blocking_notifier_head ubi_notifiers;
@@ -831,7 +831,6 @@ void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
struct ubi_attach_info *ai);
int ubi_attach(struct ubi_device *ubi, int force_scan);
-void ubi_destroy_ai(struct ubi_attach_info *ai);
/* vtbl.c */
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
@@ -902,6 +901,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
struct ubi_attach_info *ai_scan);
/* wl.c */
+int ubi_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture);
int ubi_wl_get_peb(struct ubi_device *ubi);
int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
int pnum, int torture);
@@ -914,7 +914,7 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
int lnum, int torture);
int ubi_is_erase_work(struct ubi_work *wrk);
-void ubi_refill_pools(struct ubi_device *ubi);
+void ubi_refill_pools_and_lock(struct ubi_device *ubi);
int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub);
@@ -938,7 +938,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
/* build.c */
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
int vid_hdr_offset, int max_beb_per1024,
- bool disable_fm);
+ bool disable_fm, bool need_resv_pool);
int ubi_detach_mtd_dev(int ubi_num, int anyway);
struct ubi_device *ubi_get_device(int ubi_num);
void ubi_put_device(struct ubi_device *ubi);
@@ -956,6 +956,7 @@ void ubi_free_internal_volumes(struct ubi_device *ubi);
void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_volume_info *vi);
+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id);
/* scan.c */
int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr);
@@ -968,10 +969,22 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_attach_info *scan_ai);
int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count);
void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol);
+static inline void ubi_free_fastmap(struct ubi_device *ubi)
+{
+ if (ubi->fm) {
+ int i;
+
+ for (i = 0; i < ubi->fm->used_blocks; i++)
+ kmem_cache_free(ubi_wl_entry_slab, ubi->fm->e[i]);
+ kfree(ubi->fm);
+ ubi->fm = NULL;
+ }
+}
#else
static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
static inline int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
static inline void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) {}
+static inline void ubi_free_fastmap(struct ubi_device *ubi) { }
#endif
/* block.c */
@@ -1124,6 +1137,19 @@ static inline struct ubi_vid_hdr *ubi_get_vid_hdr(struct ubi_vid_io_buf *vidb)
return vidb->hdr;
}
+/**
+ * ubi_ro_mode - switch to read-only mode.
+ * @ubi: UBI device description object
+ */
+static inline void ubi_ro_mode(struct ubi_device *ubi)
+{
+ if (!ubi->ro_mode) {
+ ubi->ro_mode = 1;
+ ubi_warn(ubi, "switch to read-only mode");
+ dump_stack();
+ }
+}
+
/*
* This function is equivalent to 'ubi_io_read()', but @offset is relative to
* the beginning of the logical eraseblock, not to the beginning of the
@@ -1145,20 +1171,13 @@ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
int pnum, int offset, int len)
{
ubi_assert(offset >= 0);
- return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
-}
-/**
- * ubi_ro_mode - switch to read-only mode.
- * @ubi: UBI device description object
- */
-static inline void ubi_ro_mode(struct ubi_device *ubi)
-{
- if (!ubi->ro_mode) {
- ubi->ro_mode = 1;
- ubi_warn(ubi, "switch to read-only mode");
- dump_stack();
+ if (ubi_dbg_power_cut(ubi, MASK_POWER_CUT_DATA)) {
+ ubi_warn(ubi, "XXXXX emulating a power cut when writing data XXXXX");
+ ubi_ro_mode(ubi);
+ return -EROFS;
}
+ return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
}
/**
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 2c867d16f89f..e5cf3bdca3b0 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -59,7 +59,7 @@ static ssize_t vol_attribute_show(struct device *dev,
struct ubi_device *ubi = vol->ubi;
spin_lock(&ubi->volumes_lock);
- if (!ubi->volumes[vol->vol_id]) {
+ if (!ubi->volumes[vol->vol_id] || ubi->volumes[vol->vol_id]->is_dead) {
spin_unlock(&ubi->volumes_lock);
return -ENODEV;
}
@@ -124,6 +124,33 @@ static void vol_release(struct device *dev)
kfree(vol);
}
+static struct fwnode_handle *find_volume_fwnode(struct ubi_volume *vol)
+{
+ struct fwnode_handle *fw_vols, *fw_vol;
+ const char *volname;
+ u32 volid;
+
+ fw_vols = device_get_named_child_node(vol->dev.parent->parent, "volumes");
+ if (!fw_vols)
+ return NULL;
+
+ fwnode_for_each_child_node(fw_vols, fw_vol) {
+ if (!fwnode_property_read_string(fw_vol, "volname", &volname) &&
+ strncmp(volname, vol->name, vol->name_len))
+ continue;
+
+ if (!fwnode_property_read_u32(fw_vol, "volid", &volid) &&
+ vol->vol_id != volid)
+ continue;
+
+ fwnode_handle_put(fw_vols);
+ return fw_vol;
+ }
+ fwnode_handle_put(fw_vols);
+
+ return NULL;
+}
+
/**
* ubi_create_volume - create volume.
* @ubi: UBI device description object
@@ -189,7 +216,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Ensure that the name is unique */
for (i = 0; i < ubi->vtbl_slots; i++)
- if (ubi->volumes[i] &&
+ if (ubi->volumes[i] && !ubi->volumes[i]->is_dead &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
ubi_err(ubi, "volume \"%s\" exists (ID %d)",
@@ -223,6 +250,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->name_len = req->name_len;
memcpy(vol->name, req->name, vol->name_len);
vol->ubi = ubi;
+ device_set_node(&vol->dev, find_volume_fwnode(vol));
/*
* Finish all pending erases because there may be some LEBs belonging
@@ -352,6 +380,19 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
err = -EBUSY;
goto out_unlock;
}
+
+ /*
+ * Mark volume as dead at this point to prevent that anyone
+ * can take a reference to the volume from now on.
+ * This is necessary as we have to release the spinlock before
+ * calling ubi_volume_notify.
+ */
+ vol->is_dead = true;
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_SHUTDOWN);
+
+ spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
@@ -408,6 +449,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
struct ubi_device *ubi = vol->ubi;
struct ubi_vtbl_record vtbl_rec;
struct ubi_eba_table *new_eba_tbl = NULL;
+ struct ubi_eba_table *old_eba_tbl = NULL;
int vol_id = vol->vol_id;
if (ubi->ro_mode)
@@ -453,10 +495,13 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
err = -ENOSPC;
goto out_free;
}
+
ubi->avail_pebs -= pebs;
ubi->rsvd_pebs += pebs;
ubi_eba_copy_table(vol, new_eba_tbl, vol->reserved_pebs);
- ubi_eba_replace_table(vol, new_eba_tbl);
+ old_eba_tbl = vol->eba_tbl;
+ vol->eba_tbl = new_eba_tbl;
+ vol->reserved_pebs = reserved_pebs;
spin_unlock(&ubi->volumes_lock);
}
@@ -471,7 +516,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
ubi->avail_pebs -= pebs;
ubi_update_reserved(ubi);
ubi_eba_copy_table(vol, new_eba_tbl, reserved_pebs);
- ubi_eba_replace_table(vol, new_eba_tbl);
+ old_eba_tbl = vol->eba_tbl;
+ vol->eba_tbl = new_eba_tbl;
+ vol->reserved_pebs = reserved_pebs;
spin_unlock(&ubi->volumes_lock);
}
@@ -493,7 +540,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (err)
goto out_acc;
- vol->reserved_pebs = reserved_pebs;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
@@ -501,19 +547,23 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
(long long)vol->used_ebs * vol->usable_leb_size;
}
+ /* destroy old table */
+ ubi_eba_destroy_table(old_eba_tbl);
ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
self_check_volumes(ubi);
return err;
out_acc:
- if (pebs > 0) {
- spin_lock(&ubi->volumes_lock);
- ubi->rsvd_pebs -= pebs;
- ubi->avail_pebs += pebs;
- spin_unlock(&ubi->volumes_lock);
- }
- return err;
-
+ spin_lock(&ubi->volumes_lock);
+ vol->reserved_pebs = reserved_pebs - pebs;
+ ubi->rsvd_pebs -= pebs;
+ ubi->avail_pebs += pebs;
+ if (pebs > 0)
+ ubi_eba_copy_table(vol, old_eba_tbl, vol->reserved_pebs);
+ else
+ ubi_eba_copy_table(vol, old_eba_tbl, reserved_pebs);
+ vol->eba_tbl = old_eba_tbl;
+ spin_unlock(&ubi->volumes_lock);
out_free:
ubi_eba_destroy_table(new_eba_tbl);
return err;
@@ -592,6 +642,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
vol->dev.class = &ubi_class;
vol->dev.groups = volume_dev_groups;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ device_set_node(&vol->dev, find_volume_fwnode(vol));
err = device_register(&vol->dev);
if (err) {
cdev_del(&vol->cdev);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index f700f0e4f2ec..6e5489e233dd 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -791,6 +791,12 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
* The number of supported volumes is limited by the eraseblock size
* and by the UBI_MAX_VOLUMES constant.
*/
+
+ if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) {
+ ubi_err(ubi, "LEB size too small for a volume record");
+ return -EINVAL;
+ }
+
ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
ubi->vtbl_slots = UBI_MAX_VOLUMES;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 26a214f016c1..fbd399cf6503 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -181,11 +181,13 @@ static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
/**
* do_work - do one pending work.
* @ubi: UBI device description object
+ * @executed: whether there is one work is executed
*
* This function returns zero in case of success and a negative error code in
- * case of failure.
+ * case of failure. If @executed is not NULL and there is one work executed,
+ * @executed is set as %1, otherwise @executed is set as %0.
*/
-static int do_work(struct ubi_device *ubi)
+static int do_work(struct ubi_device *ubi, int *executed)
{
int err;
struct ubi_work *wrk;
@@ -203,9 +205,13 @@ static int do_work(struct ubi_device *ubi)
if (list_empty(&ubi->works)) {
spin_unlock(&ubi->wl_lock);
up_read(&ubi->work_sem);
+ if (executed)
+ *executed = 0;
return 0;
}
+ if (executed)
+ *executed = 1;
wrk = list_entry(ubi->works.next, struct ubi_work, list);
list_del(&wrk->list);
ubi->works_count -= 1;
@@ -311,12 +317,14 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
* @ubi: UBI device description object
* @root: the RB-tree where to look for
* @diff: maximum possible difference from the smallest erase counter
+ * @pick_max: pick PEB even its erase counter beyonds 'min_ec + @diff'
*
* This function looks for a wear leveling entry with erase counter closest to
* min + @diff, where min is the smallest erase counter.
*/
static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
- struct rb_root *root, int diff)
+ struct rb_root *root, int diff,
+ int pick_max)
{
struct rb_node *p;
struct ubi_wl_entry *e;
@@ -330,9 +338,11 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
struct ubi_wl_entry *e1;
e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
- if (e1->ec >= max)
+ if (e1->ec >= max) {
+ if (pick_max)
+ e = e1;
p = p->rb_left;
- else {
+ } else {
p = p->rb_right;
e = e1;
}
@@ -361,12 +371,15 @@ static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
- /* If no fastmap has been written and this WL entry can be used
- * as anchor PEB, hold it back and return the second best
- * WL entry such that fastmap can use the anchor PEB later. */
+ /*
+ * If no fastmap has been written and fm_anchor is not
+ * reserved and this WL entry can be used as anchor PEB
+ * hold it back and return the second best WL entry such
+ * that fastmap can use the anchor PEB later.
+ */
e = may_reserve_for_fm(ubi, e, root);
} else
- e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
+ e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2, 0);
return e;
}
@@ -427,7 +440,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
}
/**
- * sync_erase - synchronously erase a physical eraseblock.
+ * ubi_sync_erase - synchronously erase a physical eraseblock.
* @ubi: UBI device description object
* @e: the physical eraseblock to erase
* @torture: if the physical eraseblock has to be tortured
@@ -435,8 +448,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
- int torture)
+int ubi_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
{
int err;
struct ubi_ec_hdr *ec_hdr;
@@ -671,7 +683,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi_assert(!ubi->move_to_put);
#ifdef CONFIG_MTD_UBI_FASTMAP
- if (!next_peb_for_wl(ubi) ||
+ if (!next_peb_for_wl(ubi, true) ||
#else
if (!ubi->free.rb_node ||
#endif
@@ -834,7 +846,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
goto out_not_moved;
}
if (err == MOVE_RETRY) {
- scrubbing = 1;
+ /*
+ * For source PEB:
+ * 1. The scrubbing is set for scrub type PEB, it will
+ * be put back into ubi->scrub list.
+ * 2. Non-scrub type PEB will be put back into ubi->used
+ * list.
+ */
+ keep = 1;
dst_leb_clean = 1;
goto out_not_moved;
}
@@ -1040,7 +1059,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
* %UBI_WL_THRESHOLD.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
goto out_unlock;
@@ -1094,7 +1113,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
dbg_wl("erase PEB %d EC %d LEB %d:%d",
pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
- err = sync_erase(ubi, e, wl_wrk->torture);
+ err = ubi_sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
spin_lock(&ubi->wl_lock);
@@ -1686,7 +1705,7 @@ int ubi_thread(void *u)
}
spin_unlock(&ubi->wl_lock);
- err = do_work(ubi);
+ err = do_work(ubi, NULL);
if (err) {
ubi_err(ubi, "%s: work failed with error code %d",
ubi->bgt_name, err);
@@ -1749,7 +1768,7 @@ static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync
ubi->lookuptbl[e->pnum] = e;
if (sync) {
- err = sync_erase(ubi, e, false);
+ err = ubi_sync_erase(ubi, e, false);
if (err)
goto out_free;
@@ -2071,7 +2090,7 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
{
struct ubi_wl_entry *e;
- e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
self_check_in_wl_tree(ubi, e, &ubi->free);
ubi->free_count--;
ubi_assert(ubi->free_count >= 0);
@@ -2097,7 +2116,7 @@ static int produce_free_peb(struct ubi_device *ubi)
spin_unlock(&ubi->wl_lock);
dbg_wl("do one work synchronously");
- err = do_work(ubi);
+ err = do_work(ubi, NULL);
spin_lock(&ubi->wl_lock);
if (err)
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index 5ebe374a08ae..a69169c35e31 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -5,13 +5,16 @@
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
-static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi);
+static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi,
+ bool need_fill);
static bool need_wear_leveling(struct ubi_device *ubi);
static void ubi_fastmap_close(struct ubi_device *ubi);
static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
{
- /* Reserve enough LEBs to store two fastmaps. */
- *count += (ubi->fm_size / ubi->leb_size) * 2;
+ if (ubi->fm_disabled)
+ ubi->fm_pool_rsv_cnt = 0;
+ /* Reserve enough LEBs to store two fastmaps and to fill pools. */
+ *count += (ubi->fm_size / ubi->leb_size) * 2 + ubi->fm_pool_rsv_cnt;
INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
}
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,