diff options
Diffstat (limited to 'drivers/mtd')
36 files changed, 1335 insertions, 323 deletions
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index aed653ce8fa2..46cebde79f34 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -183,6 +183,17 @@ config MTD_POWERNV_FLASH platforms from Linux. This device abstracts away the firmware interface for flash access. +config MTD_INTEL_DG + tristate "Intel Discrete Graphics non-volatile memory driver" + depends on AUXILIARY_BUS + depends on MTD + help + This provides an MTD device to access Intel Discrete Graphics + non-volatile memory. + + To compile this driver as a module, choose M here: the module + will be called mtd-intel-dg. + comment "Disk-On-Chip Device Drivers" config MTD_DOCG3 diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index d11eb2b8b6f8..9fe4ce9cffde 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_MTD_SST25L) += sst25l.o obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o obj-$(CONFIG_MTD_ST_SPI_FSM) += st_spi_fsm.o obj-$(CONFIG_MTD_POWERNV_FLASH) += powernv_flash.o +obj-$(CONFIG_MTD_INTEL_DG) += mtd_intel_dg.o CFLAGS_docg3.o += -I$(src) diff --git a/drivers/mtd/devices/mtd_intel_dg.c b/drivers/mtd/devices/mtd_intel_dg.c new file mode 100644 index 000000000000..b438ee5aacc3 --- /dev/null +++ b/drivers/mtd/devices/mtd_intel_dg.c @@ -0,0 +1,830 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(c) 2019-2025, Intel Corporation. All rights reserved. + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/intel_dg_nvm_aux.h> +#include <linux/io.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/sizes.h> +#include <linux/types.h> + +struct intel_dg_nvm { + struct kref refcnt; + struct mtd_info mtd; + struct mutex lock; /* region access lock */ + void __iomem *base; + void __iomem *base2; + bool non_posted_erase; + + size_t size; + unsigned int nregions; + struct { + const char *name; + u8 id; + u64 offset; + u64 size; + unsigned int is_readable:1; + unsigned int is_writable:1; + } regions[] __counted_by(nregions); +}; + +#define NVM_TRIGGER_REG 0x00000000 +#define NVM_VALSIG_REG 0x00000010 +#define NVM_ADDRESS_REG 0x00000040 +#define NVM_REGION_ID_REG 0x00000044 +#define NVM_DEBUG_REG 0x00000000 +/* + * [15:0]-Erase size = 0x0010 4K 0x0080 32K 0x0100 64K + * [23:16]-Reserved + * [31:24]-Erase MEM RegionID + */ +#define NVM_ERASE_REG 0x00000048 +#define NVM_ACCESS_ERROR_REG 0x00000070 +#define NVM_ADDRESS_ERROR_REG 0x00000074 + +/* Flash Valid Signature */ +#define NVM_FLVALSIG 0x0FF0A55A + +#define NVM_MAP_ADDR_MASK GENMASK(7, 0) +#define NVM_MAP_ADDR_SHIFT 0x00000004 + +#define NVM_REGION_ID_DESCRIPTOR 0 +/* Flash Region Base Address */ +#define NVM_FRBA 0x40 +/* Flash Region __n - Flash Descriptor Record */ +#define NVM_FLREG(__n) (NVM_FRBA + ((__n) * 4)) +/* Flash Map 1 Register */ +#define NVM_FLMAP1_REG 0x18 +#define NVM_FLMSTR4_OFFSET 0x00C + +#define NVM_ACCESS_ERROR_PCIE_MASK 0x7 + +#define NVM_FREG_BASE_MASK GENMASK(15, 0) +#define NVM_FREG_ADDR_MASK GENMASK(31, 16) +#define NVM_FREG_ADDR_SHIFT 12 +#define NVM_FREG_MIN_REGION_SIZE 0xFFF + +#define NVM_NON_POSTED_ERASE_DONE BIT(23) +#define NVM_NON_POSTED_ERASE_DONE_ITER 3000 + +static inline void idg_nvm_set_region_id(struct intel_dg_nvm *nvm, u8 region) +{ + iowrite32((u32)region, nvm->base + NVM_REGION_ID_REG); +} + +static inline u32 idg_nvm_error(struct intel_dg_nvm *nvm) +{ + void __iomem *base = nvm->base; + + u32 reg = ioread32(base + NVM_ACCESS_ERROR_REG) & NVM_ACCESS_ERROR_PCIE_MASK; + + /* reset error bits */ + if (reg) + iowrite32(reg, base + NVM_ACCESS_ERROR_REG); + + return reg; +} + +static inline u32 idg_nvm_read32(struct intel_dg_nvm *nvm, u32 address) +{ + void __iomem *base = nvm->base; + + iowrite32(address, base + NVM_ADDRESS_REG); + + return ioread32(base + NVM_TRIGGER_REG); +} + +static inline u64 idg_nvm_read64(struct intel_dg_nvm *nvm, u32 address) +{ + void __iomem *base = nvm->base; + + iowrite32(address, base + NVM_ADDRESS_REG); + + return readq(base + NVM_TRIGGER_REG); +} + +static void idg_nvm_write32(struct intel_dg_nvm *nvm, u32 address, u32 data) +{ + void __iomem *base = nvm->base; + + iowrite32(address, base + NVM_ADDRESS_REG); + + iowrite32(data, base + NVM_TRIGGER_REG); +} + +static void idg_nvm_write64(struct intel_dg_nvm *nvm, u32 address, u64 data) +{ + void __iomem *base = nvm->base; + + iowrite32(address, base + NVM_ADDRESS_REG); + + writeq(data, base + NVM_TRIGGER_REG); +} + +static int idg_nvm_get_access_map(struct intel_dg_nvm *nvm, u32 *access_map) +{ + u32 fmstr4_addr; + u32 fmstr4; + u32 flmap1; + u32 fmba; + + idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR); + + flmap1 = idg_nvm_read32(nvm, NVM_FLMAP1_REG); + if (idg_nvm_error(nvm)) + return -EIO; + /* Get Flash Master Baser Address (FMBA) */ + fmba = (FIELD_GET(NVM_MAP_ADDR_MASK, flmap1) << NVM_MAP_ADDR_SHIFT); + fmstr4_addr = fmba + NVM_FLMSTR4_OFFSET; + + fmstr4 = idg_nvm_read32(nvm, fmstr4_addr); + if (idg_nvm_error(nvm)) + return -EIO; + + *access_map = fmstr4; + return 0; +} + +/* + * Region read/write access encoded in the access map + * in the following order from the lower bit: + * [3:0] regions 12-15 read state + * [7:4] regions 12-15 write state + * [19:8] regions 0-11 read state + * [31:20] regions 0-11 write state + */ +static bool idg_nvm_region_readable(u32 access_map, u8 region) +{ + if (region < 12) + return access_map & BIT(region + 8); /* [19:8] */ + else + return access_map & BIT(region - 12); /* [3:0] */ +} + +static bool idg_nvm_region_writable(u32 access_map, u8 region) +{ + if (region < 12) + return access_map & BIT(region + 20); /* [31:20] */ + else + return access_map & BIT(region - 8); /* [7:4] */ +} + +static int idg_nvm_is_valid(struct intel_dg_nvm *nvm) +{ + u32 is_valid; + + idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR); + + is_valid = idg_nvm_read32(nvm, NVM_VALSIG_REG); + if (idg_nvm_error(nvm)) + return -EIO; + + if (is_valid != NVM_FLVALSIG) + return -ENODEV; + + return 0; +} + +static unsigned int idg_nvm_get_region(const struct intel_dg_nvm *nvm, loff_t from) +{ + unsigned int i; + + for (i = 0; i < nvm->nregions; i++) { + if ((nvm->regions[i].offset + nvm->regions[i].size - 1) >= from && + nvm->regions[i].offset <= from && + nvm->regions[i].size != 0) + break; + } + + return i; +} + +static ssize_t idg_nvm_rewrite_partial(struct intel_dg_nvm *nvm, loff_t to, + loff_t offset, size_t len, const u32 *newdata) +{ + u32 data = idg_nvm_read32(nvm, to); + + if (idg_nvm_error(nvm)) + return -EIO; + + memcpy((u8 *)&data + offset, newdata, len); + + idg_nvm_write32(nvm, to, data); + if (idg_nvm_error(nvm)) + return -EIO; + + return len; +} + +static ssize_t idg_write(struct intel_dg_nvm *nvm, u8 region, + loff_t to, size_t len, const unsigned char *buf) +{ + size_t len_s = len; + size_t to_shift; + size_t len8; + size_t len4; + ssize_t ret; + size_t to4; + size_t i; + + idg_nvm_set_region_id(nvm, region); + + to4 = ALIGN_DOWN(to, sizeof(u32)); + to_shift = min(sizeof(u32) - ((size_t)to - to4), len); + if (to - to4) { + ret = idg_nvm_rewrite_partial(nvm, to4, to - to4, to_shift, (u32 *)&buf[0]); + if (ret < 0) + return ret; + + buf += to_shift; + to += to_shift; + len_s -= to_shift; + } + + if (!IS_ALIGNED(to, sizeof(u64)) && + ((to ^ (to + len_s)) & GENMASK(31, 10))) { + /* + * Workaround reads/writes across 1k-aligned addresses + * (start u32 before 1k, end u32 after) + * as this fails on hardware. + */ + u32 data; + + memcpy(&data, &buf[0], sizeof(u32)); + idg_nvm_write32(nvm, to, data); + if (idg_nvm_error(nvm)) + return -EIO; + buf += sizeof(u32); + to += sizeof(u32); + len_s -= sizeof(u32); + } + + len8 = ALIGN_DOWN(len_s, sizeof(u64)); + for (i = 0; i < len8; i += sizeof(u64)) { + u64 data; + + memcpy(&data, &buf[i], sizeof(u64)); + idg_nvm_write64(nvm, to + i, data); + if (idg_nvm_error(nvm)) + return -EIO; + } + + len4 = len_s - len8; + if (len4 >= sizeof(u32)) { + u32 data; + + memcpy(&data, &buf[i], sizeof(u32)); + idg_nvm_write32(nvm, to + i, data); + if (idg_nvm_error(nvm)) + return -EIO; + i += sizeof(u32); + len4 -= sizeof(u32); + } + + if (len4 > 0) { + ret = idg_nvm_rewrite_partial(nvm, to + i, 0, len4, (u32 *)&buf[i]); + if (ret < 0) + return ret; + } + + return len; +} + +static ssize_t idg_read(struct intel_dg_nvm *nvm, u8 region, + loff_t from, size_t len, unsigned char *buf) +{ + size_t len_s = len; + size_t from_shift; + size_t from4; + size_t len8; + size_t len4; + size_t i; + + idg_nvm_set_region_id(nvm, region); + + from4 = ALIGN_DOWN(from, sizeof(u32)); + from_shift = min(sizeof(u32) - ((size_t)from - from4), len); + + if (from - from4) { + u32 data = idg_nvm_read32(nvm, from4); + + if (idg_nvm_error(nvm)) + return -EIO; + memcpy(&buf[0], (u8 *)&data + (from - from4), from_shift); + len_s -= from_shift; + buf += from_shift; + from += from_shift; + } + + if (!IS_ALIGNED(from, sizeof(u64)) && + ((from ^ (from + len_s)) & GENMASK(31, 10))) { + /* + * Workaround reads/writes across 1k-aligned addresses + * (start u32 before 1k, end u32 after) + * as this fails on hardware. + */ + u32 data = idg_nvm_read32(nvm, from); + + if (idg_nvm_error(nvm)) + return -EIO; + memcpy(&buf[0], &data, sizeof(data)); + len_s -= sizeof(u32); + buf += sizeof(u32); + from += sizeof(u32); + } + + len8 = ALIGN_DOWN(len_s, sizeof(u64)); + for (i = 0; i < len8; i += sizeof(u64)) { + u64 data = idg_nvm_read64(nvm, from + i); + + if (idg_nvm_error(nvm)) + return -EIO; + + memcpy(&buf[i], &data, sizeof(data)); + } + + len4 = len_s - len8; + if (len4 >= sizeof(u32)) { + u32 data = idg_nvm_read32(nvm, from + i); + + if (idg_nvm_error(nvm)) + return -EIO; + memcpy(&buf[i], &data, sizeof(data)); + i += sizeof(u32); + len4 -= sizeof(u32); + } + + if (len4 > 0) { + u32 data = idg_nvm_read32(nvm, from + i); + + if (idg_nvm_error(nvm)) + return -EIO; + memcpy(&buf[i], &data, len4); + } + + return len; +} + +static ssize_t +idg_erase(struct intel_dg_nvm *nvm, u8 region, loff_t from, u64 len, u64 *fail_addr) +{ + void __iomem *base2 = nvm->base2; + void __iomem *base = nvm->base; + const u32 block = 0x10; + u32 iter = 0; + u32 reg; + u64 i; + + for (i = 0; i < len; i += SZ_4K) { + iowrite32(from + i, base + NVM_ADDRESS_REG); + iowrite32(region << 24 | block, base + NVM_ERASE_REG); + if (nvm->non_posted_erase) { + /* Wait for Erase Done */ + reg = ioread32(base2 + NVM_DEBUG_REG); + while (!(reg & NVM_NON_POSTED_ERASE_DONE) && + ++iter < NVM_NON_POSTED_ERASE_DONE_ITER) { + msleep(10); + reg = ioread32(base2 + NVM_DEBUG_REG); + } + if (reg & NVM_NON_POSTED_ERASE_DONE) { + /* Clear Erase Done */ + iowrite32(reg, base2 + NVM_DEBUG_REG); + } else { + *fail_addr = from + i; + return -ETIME; + } + } + /* Since the writes are via sgunit + * we cannot do back to back erases. + */ + msleep(50); + } + return len; +} + +static int intel_dg_nvm_init(struct intel_dg_nvm *nvm, struct device *device, + bool non_posted_erase) +{ + u32 access_map = 0; + unsigned int i, n; + int ret; + + /* clean error register, previous errors are ignored */ + idg_nvm_error(nvm); + + ret = idg_nvm_is_valid(nvm); + if (ret) { + dev_err(device, "The MEM is not valid %d\n", ret); + return ret; + } + + if (idg_nvm_get_access_map(nvm, &access_map)) + return -EIO; + + for (i = 0, n = 0; i < nvm->nregions; i++) { + u32 address, base, limit, region; + u8 id = nvm->regions[i].id; + + address = NVM_FLREG(id); + region = idg_nvm_read32(nvm, address); + + base = FIELD_GET(NVM_FREG_BASE_MASK, region) << NVM_FREG_ADDR_SHIFT; + limit = (FIELD_GET(NVM_FREG_ADDR_MASK, region) << NVM_FREG_ADDR_SHIFT) | + NVM_FREG_MIN_REGION_SIZE; + + dev_dbg(device, "[%d] %s: region: 0x%08X base: 0x%08x limit: 0x%08x\n", + id, nvm->regions[i].name, region, base, limit); + + if (base >= limit || (i > 0 && limit == 0)) { + dev_dbg(device, "[%d] %s: disabled\n", + id, nvm->regions[i].name); + nvm->regions[i].is_readable = 0; + continue; + } + + if (nvm->size < limit) + nvm->size = limit; + + nvm->regions[i].offset = base; + nvm->regions[i].size = limit - base + 1; + /* No write access to descriptor; mask it out*/ + nvm->regions[i].is_writable = idg_nvm_region_writable(access_map, id); + + nvm->regions[i].is_readable = idg_nvm_region_readable(access_map, id); + dev_dbg(device, "Registered, %s id=%d offset=%lld size=%lld rd=%d wr=%d\n", + nvm->regions[i].name, + nvm->regions[i].id, + nvm->regions[i].offset, + nvm->regions[i].size, + nvm->regions[i].is_readable, + nvm->regions[i].is_writable); + + if (nvm->regions[i].is_readable) + n++; + } + + nvm->non_posted_erase = non_posted_erase; + + dev_dbg(device, "Registered %d regions\n", n); + dev_dbg(device, "Non posted erase %d\n", nvm->non_posted_erase); + + /* Need to add 1 to the amount of memory + * so it is reported as an even block + */ + nvm->size += 1; + + return n; +} + +static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info) +{ + struct intel_dg_nvm *nvm = mtd->priv; + size_t total_len; + unsigned int idx; + ssize_t bytes; + loff_t from; + size_t len; + u8 region; + u64 addr; + + if (WARN_ON(!nvm)) + return -EINVAL; + + if (!IS_ALIGNED(info->addr, SZ_4K) || !IS_ALIGNED(info->len, SZ_4K)) { + dev_err(&mtd->dev, "unaligned erase %llx %llx\n", + info->addr, info->len); + info->fail_addr = MTD_FAIL_ADDR_UNKNOWN; + return -EINVAL; + } + + total_len = info->len; + addr = info->addr; + + guard(mutex)(&nvm->lock); + + while (total_len > 0) { + if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) { + dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len); + info->fail_addr = addr; + return -ERANGE; + } + + idx = idg_nvm_get_region(nvm, addr); + if (idx >= nvm->nregions) { + dev_err(&mtd->dev, "out of range"); + info->fail_addr = MTD_FAIL_ADDR_UNKNOWN; + return -ERANGE; + } + + from = addr - nvm->regions[idx].offset; + region = nvm->regions[idx].id; + len = total_len; + if (len > nvm->regions[idx].size - from) + len = nvm->regions[idx].size - from; + + dev_dbg(&mtd->dev, "erasing region[%d] %s from %llx len %zx\n", + region, nvm->regions[idx].name, from, len); + + bytes = idg_erase(nvm, region, from, len, &info->fail_addr); + if (bytes < 0) { + dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes); + info->fail_addr += nvm->regions[idx].offset; + return bytes; + } + + addr += len; + total_len -= len; + } + + return 0; +} + +static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct intel_dg_nvm *nvm = mtd->priv; + unsigned int idx; + ssize_t ret; + u8 region; + + if (WARN_ON(!nvm)) + return -EINVAL; + + idx = idg_nvm_get_region(nvm, from); + + dev_dbg(&mtd->dev, "reading region[%d] %s from %lld len %zd\n", + nvm->regions[idx].id, nvm->regions[idx].name, from, len); + + if (idx >= nvm->nregions) { + dev_err(&mtd->dev, "out of range"); + return -ERANGE; + } + + from -= nvm->regions[idx].offset; + region = nvm->regions[idx].id; + if (len > nvm->regions[idx].size - from) + len = nvm->regions[idx].size - from; + + guard(mutex)(&nvm->lock); + + ret = idg_read(nvm, region, from, len, buf); + if (ret < 0) { + dev_dbg(&mtd->dev, "read failed with %zd\n", ret); + return ret; + } + + *retlen = ret; + + return 0; +} + +static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct intel_dg_nvm *nvm = mtd->priv; + unsigned int idx; + ssize_t ret; + u8 region; + + if (WARN_ON(!nvm)) + return -EINVAL; + + idx = idg_nvm_get_region(nvm, to); + + dev_dbg(&mtd->dev, "writing region[%d] %s to %lld len %zd\n", + nvm->regions[idx].id, nvm->regions[idx].name, to, len); + + if (idx >= nvm->nregions) { + dev_err(&mtd->dev, "out of range"); + return -ERANGE; + } + + to -= nvm->regions[idx].offset; + region = nvm->regions[idx].id; + if (len > nvm->regions[idx].size - to) + len = nvm->regions[idx].size - to; + + guard(mutex)(&nvm->lock); + + ret = idg_write(nvm, region, to, len, buf); + if (ret < 0) { + dev_dbg(&mtd->dev, "write failed with %zd\n", ret); + return ret; + } + + *retlen = ret; + + return 0; +} + +static void intel_dg_nvm_release(struct kref *kref) +{ + struct intel_dg_nvm *nvm = container_of(kref, struct intel_dg_nvm, refcnt); + int i; + + pr_debug("freeing intel_dg nvm\n"); + for (i = 0; i < nvm->nregions; i++) + kfree(nvm->regions[i].name); + mutex_destroy(&nvm->lock); + kfree(nvm); +} + +static int intel_dg_mtd_get_device(struct mtd_info *mtd) +{ + struct mtd_info *master = mtd_get_master(mtd); + struct intel_dg_nvm *nvm = master->priv; + + if (WARN_ON(!nvm)) + return -EINVAL; + pr_debug("get mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt)); + kref_get(&nvm->refcnt); + + return 0; +} + +static void intel_dg_mtd_put_device(struct mtd_info *mtd) +{ + struct mtd_info *master = mtd_get_master(mtd); + struct intel_dg_nvm *nvm = master->priv; + + if (WARN_ON(!nvm)) + return; + pr_debug("put mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt)); + kref_put(&nvm->refcnt, intel_dg_nvm_release); +} + +static int intel_dg_nvm_init_mtd(struct intel_dg_nvm *nvm, struct device *device, + unsigned int nparts, bool writable_override) +{ + struct mtd_partition *parts = NULL; + unsigned int i, n; + int ret; + + dev_dbg(device, "registering with mtd\n"); + + nvm->mtd.owner = THIS_MODULE; + nvm->mtd.dev.parent = device; + nvm->mtd.flags = MTD_CAP_NORFLASH; + nvm->mtd.type = MTD_DATAFLASH; + nvm->mtd.priv = nvm; + nvm->mtd._write = intel_dg_mtd_write; + nvm->mtd._read = intel_dg_mtd_read; + nvm->mtd._erase = intel_dg_mtd_erase; + nvm->mtd._get_device = intel_dg_mtd_get_device; + nvm->mtd._put_device = intel_dg_mtd_put_device; + nvm->mtd.writesize = SZ_1; /* 1 byte granularity */ + nvm->mtd.erasesize = SZ_4K; /* 4K bytes granularity */ + nvm->mtd.size = nvm->size; + + parts = kcalloc(nvm->nregions, sizeof(*parts), GFP_KERNEL); + if (!parts) + return -ENOMEM; + + for (i = 0, n = 0; i < nvm->nregions && n < nparts; i++) { + if (!nvm->regions[i].is_readable) + continue; + parts[n].name = nvm->regions[i].name; + parts[n].offset = nvm->regions[i].offset; + parts[n].size = nvm->regions[i].size; + if (!nvm->regions[i].is_writable && !writable_override) + parts[n].mask_flags = MTD_WRITEABLE; + n++; + } + + ret = mtd_device_register(&nvm->mtd, parts, n); + + kfree(parts); + return ret; +} + +static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev, + const struct auxiliary_device_id *aux_dev_id) +{ + struct intel_dg_nvm_dev *invm = auxiliary_dev_to_intel_dg_nvm_dev(aux_dev); + struct intel_dg_nvm *nvm; + struct device *device; + unsigned int nregions; + unsigned int i, n; + int ret; + + device = &aux_dev->dev; + + /* count available regions */ + for (nregions = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) { + if (invm->regions[i].name) + nregions++; + } + + if (!nregions) { + dev_err(device, "no regions defined\n"); + return -ENODEV; + } + + nvm = kzalloc(struct_size(nvm, regions, nregions), GFP_KERNEL); + if (!nvm) + return -ENOMEM; + + kref_init(&nvm->refcnt); + mutex_init(&nvm->lock); + + for (n = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) { + if (!invm->regions[i].name) + continue; + + char *name = kasprintf(GFP_KERNEL, "%s.%s", + dev_name(&aux_dev->dev), invm->regions[i].name); + if (!name) + continue; + nvm->regions[n].name = name; + nvm->regions[n].id = i; + n++; + } + nvm->nregions = n; /* in case where kasprintf fail */ + + nvm->base = devm_ioremap_resource(device, &invm->bar); + if (IS_ERR(nvm->base)) { + ret = PTR_ERR(nvm->base); + goto err; + } + + if (invm->non_posted_erase) { + nvm->base2 = devm_ioremap_resource(device, &invm->bar2); + if (IS_ERR(nvm->base2)) { + ret = PTR_ERR(nvm->base2); + goto err; + } + } + + ret = intel_dg_nvm_init(nvm, device, invm->non_posted_erase); + if (ret < 0) { + dev_err(device, "cannot initialize nvm %d\n", ret); + goto err; + } + + ret = intel_dg_nvm_init_mtd(nvm, device, ret, invm->writable_override); + if (ret) { + dev_err(device, "failed init mtd %d\n", ret); + goto err; + } + + dev_set_drvdata(&aux_dev->dev, nvm); + + return 0; + +err: + kref_put(&nvm->refcnt, intel_dg_nvm_release); + return ret; +} + +static void intel_dg_mtd_remove(struct auxiliary_device *aux_dev) +{ + struct intel_dg_nvm *nvm = dev_get_drvdata(&aux_dev->dev); + + if (!nvm) + return; + + mtd_device_unregister(&nvm->mtd); + + dev_set_drvdata(&aux_dev->dev, NULL); + + kref_put(&nvm->refcnt, intel_dg_nvm_release); +} + +static const struct auxiliary_device_id intel_dg_mtd_id_table[] = { + { + .name = "i915.nvm", + }, + { + .name = "xe.nvm", + }, + { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(auxiliary, intel_dg_mtd_id_table); + +static struct auxiliary_driver intel_dg_mtd_driver = { + .probe = intel_dg_mtd_probe, + .remove = intel_dg_mtd_remove, + .driver = { + /* auxiliary_driver_register() sets .name to be the modname */ + }, + .id_table = intel_dg_mtd_id_table +}; +module_auxiliary_driver(intel_dg_mtd_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel DGFX MTD driver"); diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c index 8c22064ead38..f2bd1984609c 100644 --- a/drivers/mtd/ftl.c +++ b/drivers/mtd/ftl.c @@ -344,7 +344,7 @@ static int erase_xfer(partition_t *part, return -ENOMEM; erase->addr = xfer->Offset; - erase->len = 1 << part->header.EraseUnitSize; + erase->len = 1ULL << part->header.EraseUnitSize; ret = mtd_erase(part->mbd.mtd, erase); if (!ret) { diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 391d81ad960c..8dc4f5c493fc 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -559,7 +559,7 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd, /* Sanitize user input */ p.devname[BLKPG_DEVNAMELTH - 1] = '\0'; - return mtd_add_partition(mtd, p.devname, p.start, p.length, NULL); + return mtd_add_partition(mtd, p.devname, p.start, p.length); case BLKPG_DEL_PARTITION: diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 429d8c16baf0..5ba9a741f5ac 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -68,13 +68,7 @@ static struct class mtd_class = { .pm = MTD_CLS_PM_OPS, }; -static struct class mtd_master_class = { - .name = "mtd_master", - .pm = MTD_CLS_PM_OPS, -}; - static DEFINE_IDR(mtd_idr); -static DEFINE_IDR(mtd_master_idr); /* These are exported solely for the purpose of mtd_blkdevs.c. You should not use them for _anything_ else */ @@ -89,9 +83,8 @@ EXPORT_SYMBOL_GPL(__mtd_next_device); static LIST_HEAD(mtd_notifiers); -#define MTD_MASTER_DEVS 255 + #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) -static dev_t mtd_master_devt; /* REVISIT once MTD uses the driver model better, whoever allocates * the mtd_info will probably want to use the release() hook... @@ -111,17 +104,6 @@ static void mtd_release(struct device *dev) device_destroy(&mtd_class, index + 1); } -static void mtd_master_release(struct device *dev) -{ - struct mtd_info *mtd = dev_get_drvdata(dev); - - idr_remove(&mtd_master_idr, mtd->index); - of_node_put(mtd_get_of_node(mtd)); - - if (mtd_is_partition(mtd)) - release_mtd_partition(mtd); -} - static void mtd_device_release(struct kref *kref) { struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt); @@ -385,11 +367,6 @@ static const struct device_type mtd_devtype = { .release = mtd_release, }; -static const struct device_type mtd_master_devtype = { - .name = "mtd_master", - .release = mtd_master_release, -}; - static bool mtd_expert_analysis_mode; #ifdef CONFIG_DEBUG_FS @@ -657,13 +634,13 @@ exit_parent: /** * add_mtd_device - register an MTD device * @mtd: pointer to new MTD device info structure - * @partitioned: create partitioned device * * Add a device to the list of MTD devices present in the system, and * notify each currently active MTD 'user' of its arrival. Returns * zero on success or non-zero on failure. */ -int add_mtd_device(struct mtd_info *mtd, bool partitioned) + +int add_mtd_device(struct mtd_info *mtd) { struct device_node *np = mtd_get_of_node(mtd); struct mtd_info *master = mtd_get_master(mtd); @@ -710,17 +687,10 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned) ofidx = -1; if (np) ofidx = of_alias_get_id(np, "mtd"); - if (partitioned) { - if (ofidx >= 0) - i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL); - else - i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); - } else { - if (ofidx >= 0) - i = idr_alloc(&mtd_master_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL); - else - i = idr_alloc(&mtd_master_idr, mtd, 0, 0, GFP_KERNEL); - } + if (ofidx >= 0) + i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL); + else + i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); if (i < 0) { error = i; goto fail_locked; @@ -768,18 +738,10 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned) /* Caller should have set dev.parent to match the * physical device, if appropriate. */ - if (partitioned) { - mtd->dev.type = &mtd_devtype; - mtd->dev.class = &mtd_class; - mtd->dev.devt = MTD_DEVT(i); - dev_set_name(&mtd->dev, "mtd%d", i); - error = dev_set_name(&mtd->dev, "mtd%d", i); - } else { - mtd->dev.type = &mtd_master_devtype; - mtd->dev.class = &mtd_master_class; - mtd->dev.devt = MKDEV(MAJOR(mtd_master_devt), i); - error = dev_set_name(&mtd->dev, "mtd_master%d", i); - } + mtd->dev.type = &mtd_devtype; + mtd->dev.class = &mtd_class; + mtd->dev.devt = MTD_DEVT(i); + error = dev_set_name(&mtd->dev, "mtd%d", i); if (error) goto fail_devname; dev_set_drvdata(&mtd->dev, mtd); @@ -787,7 +749,6 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned) of_node_get(mtd_get_of_node(mtd)); error = device_register(&mtd->dev); if (error) { - pr_err("mtd: %s device_register fail %d\n", mtd->name, error); put_device(&mtd->dev); goto fail_added; } @@ -799,13 +760,10 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned) mtd_debugfs_populate(mtd); - if (partitioned) { - device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, - "mtd%dro", i); - } + device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, + "mtd%dro", i); - pr_debug("mtd: Giving out %spartitioned device %d to %s\n", - partitioned ? "" : "un-", i, mtd->name); + pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); /* No need to get a refcount on the module containing the notifier, since we hold the mtd_table_mutex */ list_for_each_entry(not, &mtd_notifiers, list) @@ -813,16 +771,13 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned) mutex_unlock(&mtd_table_mutex); - if (partitioned) { - if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) { - if (IS_BUILTIN(CONFIG_MTD)) { - pr_info("mtd: setting mtd%d (%s) as root device\n", - mtd->index, mtd->name); - ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index); - } else { - pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n", - mtd->index, mtd->name); - } + if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) { + if (IS_BUILTIN(CONFIG_MTD)) { + pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name); + ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index); + } else { + pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n", + mtd->index, mtd->name); } } @@ -838,10 +793,7 @@ fail_nvmem_add: fail_added: of_node_put(mtd_get_of_node(mtd)); fail_devname: - if (partitioned) - idr_remove(&mtd_idr, i); - else - idr_remove(&mtd_master_idr, i); + idr_remove(&mtd_idr, i); fail_locked: mutex_unlock(&mtd_table_mutex); return error; @@ -859,14 +811,12 @@ fail_locked: int del_mtd_device(struct mtd_info *mtd) { - struct mtd_notifier *not; - struct idr *idr; int ret; + struct mtd_notifier *not; mutex_lock(&mtd_table_mutex); - idr = mtd->dev.class == &mtd_class ? &mtd_idr : &mtd_master_idr; - if (idr_find(idr, mtd->index) != mtd) { + if (idr_find(&mtd_idr, mtd->index) != mtd) { ret = -ENODEV; goto out_error; } @@ -1106,7 +1056,6 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, const struct mtd_partition *parts, int nr_parts) { - struct mtd_info *parent; int ret, err; mtd_set_dev_defaults(mtd); @@ -1115,30 +1064,25 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, if (ret) goto out; - ret = add_mtd_device(mtd, false); - if (ret) - goto out; - if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) { - ret = mtd_add_partition(mtd, mtd->name, 0, MTDPART_SIZ_FULL, &parent); + ret = add_mtd_device(mtd); if (ret) goto out; - - } else { - parent = mtd; } /* Prefer parsed partitions over driver-provided fallback */ - ret = parse_mtd_partitions(parent, types, parser_data); + ret = parse_mtd_partitions(mtd, types, parser_data); if (ret == -EPROBE_DEFER) goto out; if (ret > 0) ret = 0; else if (nr_parts) - ret = add_mtd_partitions(parent, parts, nr_parts); - else if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) - ret = mtd_add_partition(parent, mtd->name, 0, MTDPART_SIZ_FULL, NULL); + ret = add_mtd_partitions(mtd, parts, nr_parts); + else if (!device_is_registered(&mtd->dev)) + ret = add_mtd_device(mtd); + else + ret = 0; if (ret) goto out; @@ -1158,14 +1102,13 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, register_reboot_notifier(&mtd->reboot_notifier); } - return 0; out: - nvmem_unregister(mtd->otp_user_nvmem); - nvmem_unregister(mtd->otp_factory_nvmem); - - del_mtd_partitions(mtd); + if (ret) { + nvmem_unregister(mtd->otp_user_nvmem); + nvmem_unregister(mtd->otp_factory_nvmem); + } - if (device_is_registered(&mtd->dev)) { + if (ret && device_is_registered(&mtd->dev)) { err = del_mtd_device(mtd); if (err) pr_err("Error when deleting MTD device (%d)\n", err); @@ -1324,7 +1267,8 @@ int __get_mtd_device(struct mtd_info *mtd) mtd = mtd->parent; } - kref_get(&master->refcnt); + if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) + kref_get(&master->refcnt); return 0; } @@ -1418,7 +1362,8 @@ void __put_mtd_device(struct mtd_info *mtd) mtd = parent; } - kref_put(&master->refcnt, mtd_device_release); + if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) + kref_put(&master->refcnt, mtd_device_release); module_put(master->owner); @@ -2585,16 +2530,6 @@ static int __init init_mtd(void) if (ret) goto err_reg; - ret = class_register(&mtd_master_class); - if (ret) - goto err_reg2; - - ret = alloc_chrdev_region(&mtd_master_devt, 0, MTD_MASTER_DEVS, "mtd_master"); - if (ret < 0) { - pr_err("unable to allocate char dev region\n"); - goto err_chrdev; - } - mtd_bdi = mtd_bdi_init("mtd"); if (IS_ERR(mtd_bdi)) { ret = PTR_ERR(mtd_bdi); @@ -2619,10 +2554,6 @@ out_procfs: bdi_unregister(mtd_bdi); bdi_put(mtd_bdi); err_bdi: - unregister_chrdev_region(mtd_master_devt, MTD_MASTER_DEVS); -err_chrdev: - class_unregister(&mtd_master_class); -err_reg2: class_unregister(&mtd_class); err_reg: pr_err("Error registering mtd class or bdi: %d\n", ret); @@ -2636,12 +2567,9 @@ static void __exit cleanup_mtd(void) if (proc_mtd) remove_proc_entry("mtd", NULL); class_unregister(&mtd_class); - class_unregister(&mtd_master_class); - unregister_chrdev_region(mtd_master_devt, MTD_MASTER_DEVS); bdi_unregister(mtd_bdi); bdi_put(mtd_bdi); idr_destroy(&mtd_idr); - idr_destroy(&mtd_master_idr); } module_init(init_mtd); diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h index 2258d31c5aa6..b014861a06a6 100644 --- a/drivers/mtd/mtdcore.h +++ b/drivers/mtd/mtdcore.h @@ -8,7 +8,7 @@ extern struct mutex mtd_table_mutex; extern struct backing_dev_info *mtd_bdi; struct mtd_info *__mtd_next_device(int i); -int __must_check add_mtd_device(struct mtd_info *mtd, bool partitioned); +int __must_check add_mtd_device(struct mtd_info *mtd); int del_mtd_device(struct mtd_info *mtd); int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); int del_mtd_partitions(struct mtd_info *); diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 5a3db36d734e..994e8c51e674 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -86,7 +86,8 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, * parent conditional on that option. Note, this is a way to * distinguish between the parent and its partitions in sysfs. */ - child->dev.parent = &parent->dev; + child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ? + &parent->dev : parent->dev.parent; child->dev.of_node = part->of_node; child->parent = parent; child->part.offset = part->offset; @@ -242,7 +243,7 @@ static int mtd_add_partition_attrs(struct mtd_info *new) } int mtd_add_partition(struct mtd_info *parent, const char *name, - long long offset, long long length, struct mtd_info **out) + long long offset, long long length) { struct mtd_info *master = mtd_get_master(parent); u64 parent_size = mtd_is_partition(parent) ? @@ -275,15 +276,12 @@ int mtd_add_partition(struct mtd_info *parent, const char *name, list_add_tail(&child->part.node, &parent->partitions); mutex_unlock(&master->master.partitions_lock); - ret = add_mtd_device(child, true); + ret = add_mtd_device(child); if (ret) goto err_remove_part; mtd_add_partition_attrs(child); - if (out) - *out = child; - return 0; err_remove_part: @@ -415,7 +413,7 @@ int add_mtd_partitions(struct mtd_info *parent, list_add_tail(&child->part.node, &parent->partitions); mutex_unlock(&master->master.partitions_lock); - ret = add_mtd_device(child, true); + ret = add_mtd_device(child); if (ret) { mutex_lock(&master->master.partitions_lock); list_del(&child->part.node); @@ -592,6 +590,9 @@ static int mtd_part_of_parse(struct mtd_info *master, int ret, err = 0; dev = &master->dev; + /* Use parent device (controller) if the top level MTD is not registered */ + if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) && !mtd_is_partition(master)) + dev = master->dev.parent; np = mtd_get_of_node(master); if (mtd_is_partition(master)) @@ -710,7 +711,6 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types, if (ret < 0 && !err) err = ret; } - return err; } diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c index 4dc4d65e7d32..8e604cc22ca3 100644 --- a/drivers/mtd/nand/qpic_common.c +++ b/drivers/mtd/nand/qpic_common.c @@ -57,14 +57,15 @@ qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc) bam_txn_buf += sizeof(*bam_txn); bam_txn->bam_ce = bam_txn_buf; - bam_txn_buf += - sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw; + bam_txn->bam_ce_nitems = QPIC_PER_CW_CMD_ELEMENTS * num_cw; + bam_txn_buf += sizeof(*bam_txn->bam_ce) * bam_txn->bam_ce_nitems; bam_txn->cmd_sgl = bam_txn_buf; - bam_txn_buf += - sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw; + bam_txn->cmd_sgl_nitems = QPIC_PER_CW_CMD_SGL * num_cw; + bam_txn_buf += sizeof(*bam_txn->cmd_sgl) * bam_txn->cmd_sgl_nitems; bam_txn->data_sgl = bam_txn_buf; + bam_txn->data_sgl_nitems = QPIC_PER_CW_DATA_SGL * num_cw; init_completion(&bam_txn->txn_done); @@ -238,6 +239,11 @@ int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, struct bam_transaction *bam_txn = nandc->bam_txn; u32 offset; + if (bam_txn->bam_ce_pos + size > bam_txn->bam_ce_nitems) { + dev_err(nandc->dev, "BAM %s array is full\n", "CE"); + return -EINVAL; + } + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos]; /* fill the command desc */ @@ -258,6 +264,12 @@ int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, /* use the separate sgl after this command */ if (flags & NAND_BAM_NEXT_SGL) { + if (bam_txn->cmd_sgl_pos >= bam_txn->cmd_sgl_nitems) { + dev_err(nandc->dev, "BAM %s array is full\n", + "CMD sgl"); + return -EINVAL; + } + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start]; bam_ce_size = (bam_txn->bam_ce_pos - bam_txn->bam_ce_start) * @@ -297,10 +309,20 @@ int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, struct bam_transaction *bam_txn = nandc->bam_txn; if (read) { + if (bam_txn->rx_sgl_pos >= bam_txn->data_sgl_nitems) { + dev_err(nandc->dev, "BAM %s array is full\n", "RX sgl"); + return -EINVAL; + } + sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos], vaddr, size); bam_txn->rx_sgl_pos++; } else { + if (bam_txn->tx_sgl_pos >= bam_txn->data_sgl_nitems) { + dev_err(nandc->dev, "BAM %s array is full\n", "TX sgl"); + return -EINVAL; + } + sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos], vaddr, size); bam_txn->tx_sgl_pos++; diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index dedcca87defc..84ab4a83cbd6 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -373,7 +373,7 @@ static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc, dma_cookie_t cookie; buf_dma = dma_map_single(nc->dev, buf, len, dir); - if (dma_mapping_error(nc->dev, dev_dma)) { + if (dma_mapping_error(nc->dev, buf_dma)) { dev_err(nc->dev, "Failed to prepare a buffer for DMA access\n"); goto err; diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c index 3c7dee1be21d..0b402823b619 100644 --- a/drivers/mtd/nand/raw/atmel/pmecc.c +++ b/drivers/mtd/nand/raw/atmel/pmecc.c @@ -143,6 +143,7 @@ struct atmel_pmecc_caps { int nstrengths; int el_offset; bool correct_erased_chunks; + bool clk_ctrl; }; struct atmel_pmecc { @@ -843,6 +844,10 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev, if (IS_ERR(pmecc->regs.errloc)) return ERR_CAST(pmecc->regs.errloc); + /* pmecc data setup time */ + if (caps->clk_ctrl) + writel(PMECC_CLK_133MHZ, pmecc->regs.base + ATMEL_PMECC_CLK); + /* Disable all interrupts before registering the PMECC handler. */ writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR); atmel_pmecc_reset(pmecc); @@ -896,6 +901,7 @@ static struct atmel_pmecc_caps at91sam9g45_caps = { .strengths = atmel_pmecc_strengths, .nstrengths = 5, .el_offset = 0x8c, + .clk_ctrl = true, }; static struct atmel_pmecc_caps sama5d4_caps = { diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 62bdda3be92f..835653bdd5ab 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -29,6 +29,7 @@ #include <linux/static_key.h> #include <linux/list.h> #include <linux/log2.h> +#include <linux/string_choices.h> #include "brcmnand.h" @@ -359,6 +360,7 @@ enum brcmnand_reg { BRCMNAND_CORR_THRESHOLD_EXT, BRCMNAND_UNCORR_COUNT, BRCMNAND_CORR_COUNT, + BRCMNAND_READ_ERROR_COUNT, BRCMNAND_CORR_EXT_ADDR, BRCMNAND_CORR_ADDR, BRCMNAND_UNCORR_EXT_ADDR, @@ -389,6 +391,7 @@ static const u16 brcmnand_regs_v21[] = { [BRCMNAND_CORR_THRESHOLD_EXT] = 0, [BRCMNAND_UNCORR_COUNT] = 0, [BRCMNAND_CORR_COUNT] = 0, + [BRCMNAND_READ_ERROR_COUNT] = 0, [BRCMNAND_CORR_EXT_ADDR] = 0x60, [BRCMNAND_CORR_ADDR] = 0x64, [BRCMNAND_UNCORR_EXT_ADDR] = 0x68, @@ -419,6 +422,7 @@ static const u16 brcmnand_regs_v33[] = { [BRCMNAND_CORR_THRESHOLD_EXT] = 0, [BRCMNAND_UNCORR_COUNT] = 0, [BRCMNAND_CORR_COUNT] = 0, + [BRCMNAND_READ_ERROR_COUNT] = 0x80, [BRCMNAND_CORR_EXT_ADDR] = 0x70, [BRCMNAND_CORR_ADDR] = 0x74, [BRCMNAND_UNCORR_EXT_ADDR] = 0x78, @@ -449,6 +453,7 @@ static const u16 brcmnand_regs_v50[] = { [BRCMNAND_CORR_THRESHOLD_EXT] = 0, [BRCMNAND_UNCORR_COUNT] = 0, [BRCMNAND_CORR_COUNT] = 0, + [BRCMNAND_READ_ERROR_COUNT] = 0x80, [BRCMNAND_CORR_EXT_ADDR] = 0x70, [BRCMNAND_CORR_ADDR] = 0x74, [BRCMNAND_UNCORR_EXT_ADDR] = 0x78, @@ -479,6 +484,7 @@ static const u16 brcmnand_regs_v60[] = { [BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4, [BRCMNAND_UNCORR_COUNT] = 0xfc, [BRCMNAND_CORR_COUNT] = 0x100, + [BRCMNAND_READ_ERROR_COUNT] = 0x104, [BRCMNAND_CORR_EXT_ADDR] = 0x10c, [BRCMNAND_CORR_ADDR] = 0x110, [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, @@ -509,6 +515,7 @@ static const u16 brcmnand_regs_v71[] = { [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0, [BRCMNAND_UNCORR_COUNT] = 0xfc, [BRCMNAND_CORR_COUNT] = 0x100, + [BRCMNAND_READ_ERROR_COUNT] = 0x104, [BRCMNAND_CORR_EXT_ADDR] = 0x10c, [BRCMNAND_CORR_ADDR] = 0x110, [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, @@ -539,6 +546,7 @@ static const u16 brcmnand_regs_v72[] = { [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0, [BRCMNAND_UNCORR_COUNT] = 0xfc, [BRCMNAND_CORR_COUNT] = 0x100, + [BRCMNAND_READ_ERROR_COUNT] = 0x104, [BRCMNAND_CORR_EXT_ADDR] = 0x10c, [BRCMNAND_CORR_ADDR] = 0x110, [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, @@ -959,11 +967,11 @@ static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs, return offs_cs0 + cs * ctrl->reg_spacing + cs_offs; } -static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl) +static inline u32 brcmnand_corr_total(struct brcmnand_controller *ctrl) { - if (ctrl->nand_version < 0x0600) - return 1; - return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT); + if (ctrl->nand_version < 0x400) + return 0; + return brcmnand_read_reg(ctrl, BRCMNAND_READ_ERROR_COUNT); } static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val) @@ -1462,7 +1470,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp) int ret; if (old_wp != wp) { - dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off"); + dev_dbg(ctrl->dev, "WP %s\n", str_on_off(wp)); old_wp = wp; } @@ -1492,7 +1500,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp) if (ret) dev_err_ratelimited(&host->pdev->dev, "nand #WP expected %s\n", - wp ? "on" : "off"); + str_on_off(wp)); } } @@ -1869,8 +1877,8 @@ static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf, unsigned int trans = len >> FC_SHIFT; dma_addr_t pa; - dev_dbg(ctrl->dev, "EDU %s %p:%p\n", ((edu_cmd == EDU_CMD_READ) ? - "read" : "write"), buf, oob); + dev_dbg(ctrl->dev, "EDU %s %p:%p\n", + str_read_write(edu_cmd == EDU_CMD_READ), buf, oob); pa = dma_map_single(ctrl->dev, buf, len, dir); if (dma_mapping_error(ctrl->dev, pa)) { @@ -2066,15 +2074,20 @@ static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf, */ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, u64 addr, unsigned int trans, u32 *buf, - u8 *oob, u64 *err_addr) + u8 *oob, u64 *err_addr, unsigned int *corr) { struct brcmnand_host *host = nand_get_controller_data(chip); struct brcmnand_controller *ctrl = host->ctrl; int i, ret = 0; + unsigned int prev_corr; + + if (corr) + *corr = 0; brcmnand_clear_ecc_addr(ctrl); for (i = 0; i < trans; i++, addr += FC_BYTES) { + prev_corr = brcmnand_corr_total(ctrl); brcmnand_set_cmd_addr(mtd, addr); /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */ brcmnand_send_cmd(host, CMD_PAGE_READ); @@ -2099,13 +2112,16 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, if (*err_addr) ret = -EBADMSG; - } + else { + *err_addr = brcmnand_get_correcc_addr(ctrl); - if (!ret) { - *err_addr = brcmnand_get_correcc_addr(ctrl); + if (*err_addr) { + ret = -EUCLEAN; - if (*err_addr) - ret = -EUCLEAN; + if (corr && (brcmnand_corr_total(ctrl) - prev_corr) > *corr) + *corr = brcmnand_corr_total(ctrl) - prev_corr; + } + } } } @@ -2173,6 +2189,8 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, int err; bool retry = true; bool edu_err = false; + unsigned int corrected = 0; /* max corrected bits per subpage */ + unsigned int prev_tot = brcmnand_corr_total(ctrl); dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); @@ -2200,9 +2218,11 @@ try_dmaread: memset(oob, 0x99, mtd->oobsize); err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf, - oob, &err_addr); + oob, &err_addr, &corrected); } + mtd->ecc_stats.corrected += brcmnand_corr_total(ctrl) - prev_tot; + if (mtd_is_eccerr(err)) { /* * On controller version and 7.0, 7.1 , DMA read after a @@ -2240,16 +2260,20 @@ try_dmaread: } if (mtd_is_bitflip(err)) { - unsigned int corrected = brcmnand_count_corrected(ctrl); - /* in case of EDU correctable error we read again using PIO */ if (edu_err) err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf, - oob, &err_addr); + oob, &err_addr, &corrected); dev_dbg(ctrl->dev, "corrected error at 0x%llx\n", (unsigned long long)err_addr); - mtd->ecc_stats.corrected += corrected; + /* + * if flipped bits accumulator is not supported but we detected + * a correction, increase stat by 1 to match previous behavior. + */ + if (brcmnand_corr_total(ctrl) == prev_tot) + mtd->ecc_stats.corrected++; + /* Always exceed the software-imposed threshold */ return max(mtd->bitflip_threshold, corrected); } diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index d579d5dd60d6..df61db8ce466 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -503,6 +503,8 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, dma_dev = chan->device; dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); + if (dma_mapping_error(dma_dev->dev, dma_addr)) + return -EINVAL; if (direction == DMA_TO_DEVICE) { dma_src = dma_addr; diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c index c02e50608816..b663659b2f49 100644 --- a/drivers/mtd/nand/raw/nand_hynix.c +++ b/drivers/mtd/nand/raw/nand_hynix.c @@ -377,9 +377,9 @@ static int hynix_nand_rr_init(struct nand_chip *chip) /* * We only support read-retry for 1xnm NANDs, and those NANDs all - * expose a valid JEDEC ID. + * expose a valid JEDEC ID. SLC NANDs don't require read-retry. */ - if (valid_jedecid) { + if (valid_jedecid && nanddev_bits_per_cell(&chip->base) > 1) { u8 nand_tech = chip->id.data[5] >> 4; /* 1xnm technology */ diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 1003cf118c01..4dd6f1a4e797 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -1379,7 +1379,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); int cwperpage, bad_block_byte, ret; bool wide_bus; - int ecc_mode = 1; + int ecc_mode = ECC_MODE_8BIT; /* controller only supports 512 bytes data steps */ ecc->size = NANDC_STEP_SIZE; @@ -1400,7 +1400,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) if (ecc->strength >= 8) { /* 8 bit ECC defaults to BCH ECC on all platforms */ host->bch_enabled = true; - ecc_mode = 1; + ecc_mode = ECC_MODE_8BIT; if (wide_bus) { host->ecc_bytes_hw = 14; @@ -1420,7 +1420,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) if (nandc->props->ecc_modes & ECC_BCH_4BIT) { /* BCH */ host->bch_enabled = true; - ecc_mode = 0; + ecc_mode = ECC_MODE_4BIT; if (wide_bus) { host->ecc_bytes_hw = 8; diff --git a/drivers/mtd/nand/raw/renesas-nand-controller.c b/drivers/mtd/nand/raw/renesas-nand-controller.c index 44f6603736d1..ac8c1b80d7be 100644 --- a/drivers/mtd/nand/raw/renesas-nand-controller.c +++ b/drivers/mtd/nand/raw/renesas-nand-controller.c @@ -426,6 +426,9 @@ static int rnandc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf, /* Configure DMA */ dma_addr = dma_map_single(rnandc->dev, rnandc->buf, mtd->writesize, DMA_FROM_DEVICE); + if (dma_mapping_error(rnandc->dev, dma_addr)) + return -ENOMEM; + writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG); writel(mtd->writesize, rnandc->regs + DMA_CNT_REG); writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG); @@ -606,6 +609,9 @@ static int rnandc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf, /* Configure DMA */ dma_addr = dma_map_single(rnandc->dev, (void *)rnandc->buf, mtd->writesize, DMA_TO_DEVICE); + if (dma_mapping_error(rnandc->dev, dma_addr)) + return -ENOMEM; + writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG); writel(mtd->writesize, rnandc->regs + DMA_CNT_REG); writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG); diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c index 63e7b9e39a5a..c5d7cd8a6cab 100644 --- a/drivers/mtd/nand/raw/rockchip-nand-controller.c +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -656,9 +656,16 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf, mtd->writesize, DMA_TO_DEVICE); + if (dma_mapping_error(nfc->dev, dma_data)) + return -ENOMEM; + dma_oob = dma_map_single(nfc->dev, nfc->oob_buf, ecc->steps * oob_step, DMA_TO_DEVICE); + if (dma_mapping_error(nfc->dev, dma_oob)) { + dma_unmap_single(nfc->dev, dma_data, mtd->writesize, DMA_TO_DEVICE); + return -ENOMEM; + } reinit_completion(&nfc->done); writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off); @@ -772,9 +779,17 @@ static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on, dma_data = dma_map_single(nfc->dev, nfc->page_buf, mtd->writesize, DMA_FROM_DEVICE); + if (dma_mapping_error(nfc->dev, dma_data)) + return -ENOMEM; + dma_oob = dma_map_single(nfc->dev, nfc->oob_buf, ecc->steps * oob_step, DMA_FROM_DEVICE); + if (dma_mapping_error(nfc->dev, dma_oob)) { + dma_unmap_single(nfc->dev, dma_data, mtd->writesize, + DMA_FROM_DEVICE); + return -ENOMEM; + } /* * The first blocks (4, 8 or 16 depending on the device) diff --git a/drivers/mtd/nand/spi/alliancememory.c b/drivers/mtd/nand/spi/alliancememory.c index 2ee498230ec1..9e97c40955c9 100644 --- a/drivers/mtd/nand/spi/alliancememory.c +++ b/drivers/mtd/nand/spi/alliancememory.c @@ -17,12 +17,12 @@ #define AM_STATUS_ECC_MAX_CORRECTED (3 << 4) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c index 2b4df1d917ac..45d38ce0736c 100644 --- a/drivers/mtd/nand/spi/ato.c +++ b/drivers/mtd/nand/spi/ato.c @@ -14,9 +14,9 @@ static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 7099db7a62be..b0898990b2a5 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -20,7 +20,7 @@ #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h> -static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) +int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) { struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(reg, spinand->scratchbuf); @@ -360,7 +360,7 @@ static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status) engine_conf->status = status; } -static int spinand_write_enable_op(struct spinand_device *spinand) +int spinand_write_enable_op(struct spinand_device *spinand) { struct spi_mem_op op = SPINAND_WR_EN_DIS_1S_0_0_OP(true); @@ -688,7 +688,10 @@ int spinand_write_page(struct spinand_device *spinand, SPINAND_WRITE_INITIAL_DELAY_US, SPINAND_WRITE_POLL_DELAY_US, &status); - if (!ret && (status & STATUS_PROG_FAILED)) + if (ret) + return ret; + + if (status & STATUS_PROG_FAILED) return -EIO; return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); @@ -1250,8 +1253,19 @@ static int spinand_id_detect(struct spinand_device *spinand) static int spinand_manufacturer_init(struct spinand_device *spinand) { - if (spinand->manufacturer->ops->init) - return spinand->manufacturer->ops->init(spinand); + int ret; + + if (spinand->manufacturer->ops->init) { + ret = spinand->manufacturer->ops->init(spinand); + if (ret) + return ret; + } + + if (spinand->configure_chip) { + ret = spinand->configure_chip(spinand); + if (ret) + return ret; + } return 0; } @@ -1294,7 +1308,7 @@ spinand_select_op_variant(struct spinand_device *spinand, nbytes -= op.data.nbytes; - op_duration_ns += spi_mem_calc_op_duration(&op); + op_duration_ns += spi_mem_calc_op_duration(spinand->spimem, &op); } if (!nbytes && op_duration_ns < best_op_duration_ns) { @@ -1346,6 +1360,7 @@ int spinand_match_and_init(struct spinand_device *spinand, spinand->flags = table[i].flags; spinand->id.len = 1 + table[i].devid.len; spinand->select_target = table[i].select_target; + spinand->configure_chip = table[i].configure_chip; spinand->set_cont_read = table[i].set_cont_read; spinand->fact_otp = &table[i].fact_otp; spinand->user_otp = &table[i].user_otp; @@ -1585,6 +1600,7 @@ static void spinand_cleanup(struct spinand_device *spinand) { struct nand_device *nand = spinand_to_nand(spinand); + nanddev_ecc_engine_cleanup(nand); nanddev_cleanup(nand); spinand_manufacturer_cleanup(spinand); kfree(spinand->databuf); diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c index 9e286612a296..9a9325c0bc49 100644 --- a/drivers/mtd/nand/spi/esmt.c +++ b/drivers/mtd/nand/spi/esmt.c @@ -18,10 +18,10 @@ (CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), diff --git a/drivers/mtd/nand/spi/foresee.c b/drivers/mtd/nand/spi/foresee.c index 7c61644bfb10..c521dd6abc4b 100644 --- a/drivers/mtd/nand/spi/foresee.c +++ b/drivers/mtd/nand/spi/foresee.c @@ -12,10 +12,10 @@ #define SPINAND_MFR_FORESEE 0xCD static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c index cb1d316fc4d8..93e40431dbe2 100644 --- a/drivers/mtd/nand/spi/gigadevice.c +++ b/drivers/mtd/nand/spi/gigadevice.c @@ -24,36 +24,36 @@ #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(read_cache_variants_f, - SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(0, 0, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(0, 0, NULL, 0, 0)); static SPINAND_OP_VARIANTS(read_cache_variants_1gq5, - SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(read_cache_variants_2gq5, - SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), @@ -533,6 +533,26 @@ static const struct spinand_info gigadevice_spinand_table[] = { SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, gd5fxgq4uexxg_ecc_get_status)), + SPINAND_INFO("GD5F1GM9UExxG", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x91, 0x01), + NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, + gd5fxgq4uexxg_ecc_get_status)), + SPINAND_INFO("GD5F1GM9RExxG", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x81, 0x01), + NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, + gd5fxgq4uexxg_ecc_get_status)), }; static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = { diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c index eeaf5bf9f082..edf63b9996cf 100644 --- a/drivers/mtd/nand/spi/macronix.c +++ b/drivers/mtd/nand/spi/macronix.c @@ -28,10 +28,10 @@ struct macronix_priv { }; static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c index 8281c9d3f4f7..a49d7cb6a96d 100644 --- a/drivers/mtd/nand/spi/micron.c +++ b/drivers/mtd/nand/spi/micron.c @@ -35,12 +35,12 @@ (CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE) static SPINAND_OP_VARIANTS(quadio_read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(x4_write_cache_variants, SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), @@ -52,10 +52,10 @@ static SPINAND_OP_VARIANTS(x4_update_cache_variants, /* Micron MT29F2G01AAAED Device */ static SPINAND_OP_VARIANTS(x4_read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(x1_write_cache_variants, SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c index 4670bac41245..73bd124273a5 100644 --- a/drivers/mtd/nand/spi/paragon.c +++ b/drivers/mtd/nand/spi/paragon.c @@ -22,12 +22,12 @@ static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), diff --git a/drivers/mtd/nand/spi/skyhigh.c b/drivers/mtd/nand/spi/skyhigh.c index 51d61785df61..bf9ce163e6a7 100644 --- a/drivers/mtd/nand/spi/skyhigh.c +++ b/drivers/mtd/nand/spi/skyhigh.c @@ -17,12 +17,12 @@ #define SKYHIGH_CONFIG_PROTECT_EN BIT(1) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 4c6923047aeb..6530257ac0be 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -15,10 +15,10 @@ #define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(write_cache_x4_variants, SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c index 19f8dd4a6370..87053389a1fc 100644 --- a/drivers/mtd/nand/spi/winbond.c +++ b/drivers/mtd/nand/spi/winbond.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/mtd/spinand.h> #include <linux/units.h> +#include <linux/delay.h> #define SPINAND_MFR_WINBOND 0xEF @@ -18,17 +19,33 @@ #define W25N04KV_STATUS_ECC_5_8_BITFLIPS (3 << 4) +#define W25N0XJW_SR4 0xD0 +#define W25N0XJW_SR4_HS BIT(2) + +#define W35N01JW_VCR_IO_MODE 0x00 +#define W35N01JW_VCR_IO_MODE_SINGLE_SDR 0xFF +#define W35N01JW_VCR_IO_MODE_OCTAL_SDR 0xDF +#define W35N01JW_VCR_IO_MODE_OCTAL_DDR_DS 0xE7 +#define W35N01JW_VCR_IO_MODE_OCTAL_DDR 0xC7 +#define W35N01JW_VCR_DUMMY_CLOCK_REG 0x01 + /* * "X2" in the core is equivalent to "dual output" in the datasheets, * "X4" in the core is equivalent to "quad output" in the datasheets. + * Quad and octal capable chips feature an absolute maximum frequency of 166MHz. */ static SPINAND_OP_VARIANTS(read_cache_octal_variants, + SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 3, NULL, 0, 120 * HZ_PER_MHZ), SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 2, NULL, 0, 105 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 86 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 20, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 162 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 12, NULL, 0, 124 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 8, NULL, 0, 86 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 2, NULL, 0, 0), SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 1, NULL, 0, 133 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(write_cache_octal_variants, SPINAND_PROG_LOAD_1S_8S_8S_OP(true, 0, NULL, 0), @@ -42,23 +59,25 @@ static SPINAND_OP_VARIANTS(update_cache_octal_variants, static SPINAND_OP_VARIANTS(read_cache_dual_quad_dtr_variants, SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ), SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 104 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ), SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 104 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ)); static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), @@ -230,6 +249,113 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand, return -EINVAL; } +static int w25n0xjw_hs_cfg(struct spinand_device *spinand) +{ + const struct spi_mem_op *op; + bool hs; + u8 sr4; + int ret; + + op = spinand->op_templates.read_cache; + if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr) + hs = false; + else if (op->cmd.buswidth == 1 && op->addr.buswidth == 1 && + op->dummy.buswidth == 1 && op->data.buswidth == 1) + hs = false; + else if (!op->max_freq) + hs = true; + else + hs = false; + + ret = spinand_read_reg_op(spinand, W25N0XJW_SR4, &sr4); + if (ret) + return ret; + + if (hs) + sr4 |= W25N0XJW_SR4_HS; + else + sr4 &= ~W25N0XJW_SR4_HS; + + ret = spinand_write_reg_op(spinand, W25N0XJW_SR4, sr4); + if (ret) + return ret; + + return 0; +} + +static int w35n0xjw_write_vcr(struct spinand_device *spinand, u8 reg, u8 val) +{ + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(0x81, 1), + SPI_MEM_OP_ADDR(3, reg, 1), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(1, spinand->scratchbuf, 1)); + int ret; + + *spinand->scratchbuf = val; + + ret = spinand_write_enable_op(spinand); + if (ret) + return ret; + + ret = spi_mem_exec_op(spinand->spimem, &op); + if (ret) + return ret; + + /* + * Write VCR operation doesn't set the busy bit in SR, which means we + * cannot perform a status poll. Minimum time of 50ns is needed to + * complete the write. + */ + ndelay(50); + + return 0; +} + +static int w35n0xjw_vcr_cfg(struct spinand_device *spinand) +{ + const struct spi_mem_op *op; + unsigned int dummy_cycles; + bool dtr, single; + u8 io_mode; + int ret; + + op = spinand->op_templates.read_cache; + + single = (op->cmd.buswidth == 1 && op->addr.buswidth == 1 && op->data.buswidth == 1); + dtr = (op->cmd.dtr || op->addr.dtr || op->data.dtr); + if (single && !dtr) + io_mode = W35N01JW_VCR_IO_MODE_SINGLE_SDR; + else if (!single && !dtr) + io_mode = W35N01JW_VCR_IO_MODE_OCTAL_SDR; + else if (!single && dtr) + io_mode = W35N01JW_VCR_IO_MODE_OCTAL_DDR; + else + return -EINVAL; + + ret = w35n0xjw_write_vcr(spinand, W35N01JW_VCR_IO_MODE, io_mode); + if (ret) + return ret; + + dummy_cycles = ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1); + switch (dummy_cycles) { + case 8: + case 12: + case 16: + case 20: + case 24: + case 28: + break; + default: + return -EINVAL; + } + ret = w35n0xjw_write_vcr(spinand, W35N01JW_VCR_DUMMY_CLOCK_REG, dummy_cycles); + if (ret) + return ret; + + return 0; +} + static const struct spinand_info winbond_spinand_table[] = { /* 512M-bit densities */ SPINAND_INFO("W25N512GW", /* 1.8V */ @@ -268,7 +394,8 @@ static const struct spinand_info winbond_spinand_table[] = { &write_cache_variants, &update_cache_variants), 0, - SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)), + SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL), + SPINAND_CONFIGURE_CHIP(w25n0xjw_hs_cfg)), SPINAND_INFO("W25N01KV", /* 3.3V */ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae, 0x21), NAND_MEMORG(1, 2048, 96, 64, 1024, 20, 1, 1, 1), @@ -286,25 +413,28 @@ static const struct spinand_info winbond_spinand_table[] = { &write_cache_octal_variants, &update_cache_octal_variants), 0, - SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)), + SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL), + SPINAND_CONFIGURE_CHIP(w35n0xjw_vcr_cfg)), SPINAND_INFO("W35N02JW", /* 1.8V */ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x22), - NAND_MEMORG(1, 4096, 128, 64, 512, 10, 2, 1, 1), + NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 2, 1), NAND_ECCREQ(1, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants, &write_cache_octal_variants, &update_cache_octal_variants), 0, - SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)), + SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL), + SPINAND_CONFIGURE_CHIP(w35n0xjw_vcr_cfg)), SPINAND_INFO("W35N04JW", /* 1.8V */ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x23), - NAND_MEMORG(1, 4096, 128, 64, 512, 10, 4, 1, 1), + NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 4, 1), NAND_ECCREQ(1, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants, &write_cache_octal_variants, &update_cache_octal_variants), 0, - SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)), + SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL), + SPINAND_CONFIGURE_CHIP(w35n0xjw_vcr_cfg)), /* 2G-bit densities */ SPINAND_INFO("W25M02GV", /* 2x1G-bit 3.3V */ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21), @@ -324,7 +454,8 @@ static const struct spinand_info winbond_spinand_table[] = { &write_cache_variants, &update_cache_variants), 0, - SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)), + SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL), + SPINAND_CONFIGURE_CHIP(w25n0xjw_hs_cfg)), SPINAND_INFO("W25N02KV", /* 3.3V */ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), diff --git a/drivers/mtd/nand/spi/xtx.c b/drivers/mtd/nand/spi/xtx.c index 37336d5958a9..5915b37b47f5 100644 --- a/drivers/mtd/nand/spi/xtx.c +++ b/drivers/mtd/nand/spi/xtx.c @@ -23,12 +23,12 @@ #define XT26XXXD_STATUS_ECC_UNCOR_ERROR (2) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c index 64d319e959b2..868aa3d35d09 100644 --- a/drivers/mtd/nftlcore.c +++ b/drivers/mtd/nftlcore.c @@ -228,6 +228,25 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate ) return BLOCK_NIL; } +static noinline_for_stack void NFTL_move_block(struct mtd_info *mtd, loff_t src, loff_t dst) +{ + unsigned char movebuf[512]; + struct nftl_oob oob; + size_t retlen; + int ret; + + ret = mtd_read(mtd, src, 512, &retlen, movebuf); + if (ret < 0 && !mtd_is_bitflip(ret)) { + ret = mtd_read(mtd, src, 512, &retlen, movebuf); + if (ret != -EIO) + printk("Error went away on retry.\n"); + } + memset(&oob, 0xff, sizeof(struct nftl_oob)); + oob.b.Status = oob.b.Status1 = SECTOR_USED; + + nftl_write(mtd, dst, 512, &retlen, movebuf, (char *)&oob); +} + static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock ) { struct mtd_info *mtd = nftl->mbd.mtd; @@ -389,9 +408,6 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p */ pr_debug("Folding chain %d into unit %d\n", thisVUC, targetEUN); for (block = 0; block < nftl->EraseSize / 512 ; block++) { - unsigned char movebuf[512]; - int ret; - /* If it's in the target EUN already, or if it's pending write, do nothing */ if (BlockMap[block] == targetEUN || (pendingblock == (thisVUC * (nftl->EraseSize / 512) + block))) { @@ -403,25 +419,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p if (BlockMap[block] == BLOCK_NIL) continue; - ret = mtd_read(mtd, - (nftl->EraseSize * BlockMap[block]) + (block * 512), - 512, - &retlen, - movebuf); - if (ret < 0 && !mtd_is_bitflip(ret)) { - ret = mtd_read(mtd, - (nftl->EraseSize * BlockMap[block]) + (block * 512), - 512, - &retlen, - movebuf); - if (ret != -EIO) - printk("Error went away on retry.\n"); - } - memset(&oob, 0xff, sizeof(struct nftl_oob)); - oob.b.Status = oob.b.Status1 = SECTOR_USED; - - nftl_write(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + - (block * 512), 512, &retlen, movebuf, (char *)&oob); + NFTL_move_block(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512), + (nftl->EraseSize * targetEUN) + (block * 512)); } /* add the header so that it is now a valid chain */ diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c index e6bab2d00c92..187239ccd549 100644 --- a/drivers/mtd/spi-nor/micron-st.c +++ b/drivers/mtd/spi-nor/micron-st.c @@ -189,7 +189,7 @@ static int mt25qu512a_post_bfpt_fixup(struct spi_nor *nor, return 0; } -static struct spi_nor_fixups mt25qu512a_fixups = { +static const struct spi_nor_fixups mt25qu512a_fixups = { .post_bfpt = mt25qu512a_post_bfpt_fixup, }; @@ -225,15 +225,15 @@ static int st_nor_two_die_late_init(struct spi_nor *nor) return spi_nor_set_4byte_addr_mode(nor, true); } -static struct spi_nor_fixups n25q00_fixups = { +static const struct spi_nor_fixups n25q00_fixups = { .late_init = st_nor_four_die_late_init, }; -static struct spi_nor_fixups mt25q01_fixups = { +static const struct spi_nor_fixups mt25q01_fixups = { .late_init = st_nor_two_die_late_init, }; -static struct spi_nor_fixups mt25q02_fixups = { +static const struct spi_nor_fixups mt25q02_fixups = { .late_init = st_nor_four_die_late_init, }; diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index bf08dbf5e742..a0296c871634 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -17,6 +17,7 @@ #define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */ #define SPINOR_OP_CLPEF 0x82 /* Clear program/erase failure flags */ +#define SPINOR_OP_CYPRESS_EX4B 0xB8 /* Exit 4-byte address mode */ #define SPINOR_OP_CYPRESS_DIE_ERASE 0x61 /* Chip (die) erase */ #define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */ #define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */ @@ -58,6 +59,13 @@ SPI_MEM_OP_DUMMY(ndummy, 0), \ SPI_MEM_OP_DATA_IN(1, buf, 0)) +#define CYPRESS_NOR_EN4B_EX4B_OP(enable) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(enable ? SPINOR_OP_EN4B : \ + SPINOR_OP_CYPRESS_EX4B, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + #define SPANSION_OP(opcode) \ SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \ SPI_MEM_OP_NO_ADDR, \ @@ -356,6 +364,20 @@ static int cypress_nor_quad_enable_volatile(struct spi_nor *nor) return 0; } +static int cypress_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable) +{ + int ret; + struct spi_mem_op op = CYPRESS_NOR_EN4B_EX4B_OP(enable); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + if (ret) + dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); + + return ret; +} + /** * cypress_nor_determine_addr_mode_by_sr1() - Determine current address mode * (3 or 4-byte) by querying status @@ -526,6 +548,9 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor, struct spi_mem_op op; int ret; + /* Assign 4-byte address mode method that is not determined in BFPT */ + nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode; + ret = cypress_nor_set_addr_mode_nbytes(nor); if (ret) return ret; @@ -578,7 +603,7 @@ static int s25fs256t_late_init(struct spi_nor *nor) return 0; } -static struct spi_nor_fixups s25fs256t_fixups = { +static const struct spi_nor_fixups s25fs256t_fixups = { .post_bfpt = s25fs256t_post_bfpt_fixup, .post_sfdp = s25fs256t_post_sfdp_fixup, .late_init = s25fs256t_late_init, @@ -591,6 +616,9 @@ s25hx_t_post_bfpt_fixup(struct spi_nor *nor, { int ret; + /* Assign 4-byte address mode method that is not determined in BFPT */ + nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode; + ret = cypress_nor_set_addr_mode_nbytes(nor); if (ret) return ret; @@ -650,7 +678,7 @@ static int s25hx_t_late_init(struct spi_nor *nor) return 0; } -static struct spi_nor_fixups s25hx_t_fixups = { +static const struct spi_nor_fixups s25hx_t_fixups = { .post_bfpt = s25hx_t_post_bfpt_fixup, .post_sfdp = s25hx_t_post_sfdp_fixup, .late_init = s25hx_t_late_init, @@ -718,6 +746,9 @@ static int s28hx_t_post_bfpt_fixup(struct spi_nor *nor, const struct sfdp_parameter_header *bfpt_header, const struct sfdp_bfpt *bfpt) { + /* Assign 4-byte address mode method that is not determined in BFPT */ + nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode; + return cypress_nor_set_addr_mode_nbytes(nor); } diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c index 9c9328478d8a..9b07f83aeac7 100644 --- a/drivers/mtd/spi-nor/swp.c +++ b/drivers/mtd/spi-nor/swp.c @@ -56,7 +56,6 @@ static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor) static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs, u64 *len) { - struct mtd_info *mtd = &nor->mtd; u64 min_prot_len; u8 mask = spi_nor_get_sr_bp_mask(nor); u8 tb_mask = spi_nor_get_sr_tb_mask(nor); @@ -77,13 +76,13 @@ static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs, min_prot_len = spi_nor_get_min_prot_length_sr(nor); *len = min_prot_len << (bp - 1); - if (*len > mtd->size) - *len = mtd->size; + if (*len > nor->params->size) + *len = nor->params->size; if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask) *ofs = 0; else - *ofs = mtd->size - *len; + *ofs = nor->params->size - *len; } /* @@ -158,7 +157,6 @@ static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, u64 len, */ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len) { - struct mtd_info *mtd = &nor->mtd; u64 min_prot_len; int ret, status_old, status_new; u8 mask = spi_nor_get_sr_bp_mask(nor); @@ -183,7 +181,7 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len) can_be_bottom = false; /* If anything above us is unlocked, we can't use 'top' protection */ - if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len), + if (!spi_nor_is_locked_sr(nor, ofs + len, nor->params->size - (ofs + len), status_old)) can_be_top = false; @@ -195,11 +193,11 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len) /* lock_len: length of region that should end up locked */ if (use_top) - lock_len = mtd->size - ofs; + lock_len = nor->params->size - ofs; else lock_len = ofs + len; - if (lock_len == mtd->size) { + if (lock_len == nor->params->size) { val = mask; } else { min_prot_len = spi_nor_get_min_prot_length_sr(nor); @@ -248,7 +246,6 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len) */ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, u64 len) { - struct mtd_info *mtd = &nor->mtd; u64 min_prot_len; int ret, status_old, status_new; u8 mask = spi_nor_get_sr_bp_mask(nor); @@ -273,7 +270,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, u64 len) can_be_top = false; /* If anything above us is locked, we can't use 'bottom' protection */ - if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len), + if (!spi_nor_is_unlocked_sr(nor, ofs + len, nor->params->size - (ofs + len), status_old)) can_be_bottom = false; @@ -285,7 +282,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, u64 len) /* lock_len: length of region that should remain locked */ if (use_top) - lock_len = mtd->size - (ofs + len); + lock_len = nor->params->size - (ofs + len); else lock_len = ofs; diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c index 4f12ff755df0..643513ee891b 100644 --- a/drivers/mtd/spi-nor/sysfs.c +++ b/drivers/mtd/spi-nor/sysfs.c @@ -104,7 +104,7 @@ static const struct attribute_group spi_nor_sysfs_group = { .is_visible = spi_nor_sysfs_is_visible, .is_bin_visible = spi_nor_sysfs_is_bin_visible, .attrs = spi_nor_sysfs_entries, - .bin_attrs_new = spi_nor_sysfs_bin_entries, + .bin_attrs = spi_nor_sysfs_bin_entries, }; const struct attribute_group *spi_nor_sysfs_groups[] = { diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index f1ea8677467f..df0a5a57b072 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c @@ -791,33 +791,6 @@ int ubi_sync(int ubi_num) } EXPORT_SYMBOL_GPL(ubi_sync); -/** - * ubi_flush - flush UBI work queue. - * @ubi_num: UBI device to flush work queue - * @vol_id: volume id to flush for - * @lnum: logical eraseblock number to flush for - * - * This function executes all pending works for a particular volume id / logical - * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as - * a wildcard for all of the corresponding volume numbers or logical - * eraseblock numbers. It returns zero in case of success and a negative error - * code in case of failure. - */ -int ubi_flush(int ubi_num, int vol_id, int lnum) -{ - struct ubi_device *ubi; - int err = 0; - - ubi = ubi_get_device(ubi_num); - if (!ubi) - return -ENODEV; - - err = ubi_wl_flush(ubi, vol_id, lnum); - ubi_put_device(ubi); - return err; -} -EXPORT_SYMBOL_GPL(ubi_flush); - BLOCKING_NOTIFIER_HEAD(ubi_notifiers); /** |