diff options
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/devices/Kconfig | 11 | ||||
-rw-r--r-- | drivers/mtd/devices/Makefile | 1 | ||||
-rw-r--r-- | drivers/mtd/devices/mtd_intel_dg.c | 830 | ||||
-rw-r--r-- | drivers/mtd/mtdchar.c | 2 | ||||
-rw-r--r-- | drivers/mtd/mtdcore.c | 152 | ||||
-rw-r--r-- | drivers/mtd/mtdcore.h | 2 | ||||
-rw-r--r-- | drivers/mtd/mtdpart.c | 16 | ||||
-rw-r--r-- | drivers/mtd/nand/qpic_common.c | 30 | ||||
-rw-r--r-- | drivers/mtd/nand/raw/qcom_nandc.c | 6 | ||||
-rw-r--r-- | drivers/mtd/nand/spi/core.c | 1 | ||||
-rw-r--r-- | drivers/mtd/nand/spi/winbond.c | 10 | ||||
-rw-r--r-- | drivers/mtd/spi-nor/sysfs.c | 2 |
12 files changed, 928 insertions, 135 deletions
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index aed653ce8fa2..46cebde79f34 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -183,6 +183,17 @@ config MTD_POWERNV_FLASH platforms from Linux. This device abstracts away the firmware interface for flash access. +config MTD_INTEL_DG + tristate "Intel Discrete Graphics non-volatile memory driver" + depends on AUXILIARY_BUS + depends on MTD + help + This provides an MTD device to access Intel Discrete Graphics + non-volatile memory. + + To compile this driver as a module, choose M here: the module + will be called mtd-intel-dg. + comment "Disk-On-Chip Device Drivers" config MTD_DOCG3 diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index d11eb2b8b6f8..9fe4ce9cffde 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_MTD_SST25L) += sst25l.o obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o obj-$(CONFIG_MTD_ST_SPI_FSM) += st_spi_fsm.o obj-$(CONFIG_MTD_POWERNV_FLASH) += powernv_flash.o +obj-$(CONFIG_MTD_INTEL_DG) += mtd_intel_dg.o CFLAGS_docg3.o += -I$(src) diff --git a/drivers/mtd/devices/mtd_intel_dg.c b/drivers/mtd/devices/mtd_intel_dg.c new file mode 100644 index 000000000000..b438ee5aacc3 --- /dev/null +++ b/drivers/mtd/devices/mtd_intel_dg.c @@ -0,0 +1,830 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(c) 2019-2025, Intel Corporation. All rights reserved. + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/intel_dg_nvm_aux.h> +#include <linux/io.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/sizes.h> +#include <linux/types.h> + +struct intel_dg_nvm { + struct kref refcnt; + struct mtd_info mtd; + struct mutex lock; /* region access lock */ + void __iomem *base; + void __iomem *base2; + bool non_posted_erase; + + size_t size; + unsigned int nregions; + struct { + const char *name; + u8 id; + u64 offset; + u64 size; + unsigned int is_readable:1; + unsigned int is_writable:1; + } regions[] __counted_by(nregions); +}; + +#define NVM_TRIGGER_REG 0x00000000 +#define NVM_VALSIG_REG 0x00000010 +#define NVM_ADDRESS_REG 0x00000040 +#define NVM_REGION_ID_REG 0x00000044 +#define NVM_DEBUG_REG 0x00000000 +/* + * [15:0]-Erase size = 0x0010 4K 0x0080 32K 0x0100 64K + * [23:16]-Reserved + * [31:24]-Erase MEM RegionID + */ +#define NVM_ERASE_REG 0x00000048 +#define NVM_ACCESS_ERROR_REG 0x00000070 +#define NVM_ADDRESS_ERROR_REG 0x00000074 + +/* Flash Valid Signature */ +#define NVM_FLVALSIG 0x0FF0A55A + +#define NVM_MAP_ADDR_MASK GENMASK(7, 0) +#define NVM_MAP_ADDR_SHIFT 0x00000004 + +#define NVM_REGION_ID_DESCRIPTOR 0 +/* Flash Region Base Address */ +#define NVM_FRBA 0x40 +/* Flash Region __n - Flash Descriptor Record */ +#define NVM_FLREG(__n) (NVM_FRBA + ((__n) * 4)) +/* Flash Map 1 Register */ +#define NVM_FLMAP1_REG 0x18 +#define NVM_FLMSTR4_OFFSET 0x00C + +#define NVM_ACCESS_ERROR_PCIE_MASK 0x7 + +#define NVM_FREG_BASE_MASK GENMASK(15, 0) +#define NVM_FREG_ADDR_MASK GENMASK(31, 16) +#define NVM_FREG_ADDR_SHIFT 12 +#define NVM_FREG_MIN_REGION_SIZE 0xFFF + +#define NVM_NON_POSTED_ERASE_DONE BIT(23) +#define NVM_NON_POSTED_ERASE_DONE_ITER 3000 + +static inline void idg_nvm_set_region_id(struct intel_dg_nvm *nvm, u8 region) +{ + iowrite32((u32)region, nvm->base + NVM_REGION_ID_REG); +} + +static inline u32 idg_nvm_error(struct intel_dg_nvm *nvm) +{ + void __iomem *base = nvm->base; + + u32 reg = ioread32(base + NVM_ACCESS_ERROR_REG) & NVM_ACCESS_ERROR_PCIE_MASK; + + /* reset error bits */ + if (reg) + iowrite32(reg, base + NVM_ACCESS_ERROR_REG); + + return reg; +} + +static inline u32 idg_nvm_read32(struct intel_dg_nvm *nvm, u32 address) +{ + void __iomem *base = nvm->base; + + iowrite32(address, base + NVM_ADDRESS_REG); + + return ioread32(base + NVM_TRIGGER_REG); +} + +static inline u64 idg_nvm_read64(struct intel_dg_nvm *nvm, u32 address) +{ + void __iomem *base = nvm->base; + + iowrite32(address, base + NVM_ADDRESS_REG); + + return readq(base + NVM_TRIGGER_REG); +} + +static void idg_nvm_write32(struct intel_dg_nvm *nvm, u32 address, u32 data) +{ + void __iomem *base = nvm->base; + + iowrite32(address, base + NVM_ADDRESS_REG); + + iowrite32(data, base + NVM_TRIGGER_REG); +} + +static void idg_nvm_write64(struct intel_dg_nvm *nvm, u32 address, u64 data) +{ + void __iomem *base = nvm->base; + + iowrite32(address, base + NVM_ADDRESS_REG); + + writeq(data, base + NVM_TRIGGER_REG); +} + +static int idg_nvm_get_access_map(struct intel_dg_nvm *nvm, u32 *access_map) +{ + u32 fmstr4_addr; + u32 fmstr4; + u32 flmap1; + u32 fmba; + + idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR); + + flmap1 = idg_nvm_read32(nvm, NVM_FLMAP1_REG); + if (idg_nvm_error(nvm)) + return -EIO; + /* Get Flash Master Baser Address (FMBA) */ + fmba = (FIELD_GET(NVM_MAP_ADDR_MASK, flmap1) << NVM_MAP_ADDR_SHIFT); + fmstr4_addr = fmba + NVM_FLMSTR4_OFFSET; + + fmstr4 = idg_nvm_read32(nvm, fmstr4_addr); + if (idg_nvm_error(nvm)) + return -EIO; + + *access_map = fmstr4; + return 0; +} + +/* + * Region read/write access encoded in the access map + * in the following order from the lower bit: + * [3:0] regions 12-15 read state + * [7:4] regions 12-15 write state + * [19:8] regions 0-11 read state + * [31:20] regions 0-11 write state + */ +static bool idg_nvm_region_readable(u32 access_map, u8 region) +{ + if (region < 12) + return access_map & BIT(region + 8); /* [19:8] */ + else + return access_map & BIT(region - 12); /* [3:0] */ +} + +static bool idg_nvm_region_writable(u32 access_map, u8 region) +{ + if (region < 12) + return access_map & BIT(region + 20); /* [31:20] */ + else + return access_map & BIT(region - 8); /* [7:4] */ +} + +static int idg_nvm_is_valid(struct intel_dg_nvm *nvm) +{ + u32 is_valid; + + idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR); + + is_valid = idg_nvm_read32(nvm, NVM_VALSIG_REG); + if (idg_nvm_error(nvm)) + return -EIO; + + if (is_valid != NVM_FLVALSIG) + return -ENODEV; + + return 0; +} + +static unsigned int idg_nvm_get_region(const struct intel_dg_nvm *nvm, loff_t from) +{ + unsigned int i; + + for (i = 0; i < nvm->nregions; i++) { + if ((nvm->regions[i].offset + nvm->regions[i].size - 1) >= from && + nvm->regions[i].offset <= from && + nvm->regions[i].size != 0) + break; + } + + return i; +} + +static ssize_t idg_nvm_rewrite_partial(struct intel_dg_nvm *nvm, loff_t to, + loff_t offset, size_t len, const u32 *newdata) +{ + u32 data = idg_nvm_read32(nvm, to); + + if (idg_nvm_error(nvm)) + return -EIO; + + memcpy((u8 *)&data + offset, newdata, len); + + idg_nvm_write32(nvm, to, data); + if (idg_nvm_error(nvm)) + return -EIO; + + return len; +} + +static ssize_t idg_write(struct intel_dg_nvm *nvm, u8 region, + loff_t to, size_t len, const unsigned char *buf) +{ + size_t len_s = len; + size_t to_shift; + size_t len8; + size_t len4; + ssize_t ret; + size_t to4; + size_t i; + + idg_nvm_set_region_id(nvm, region); + + to4 = ALIGN_DOWN(to, sizeof(u32)); + to_shift = min(sizeof(u32) - ((size_t)to - to4), len); + if (to - to4) { + ret = idg_nvm_rewrite_partial(nvm, to4, to - to4, to_shift, (u32 *)&buf[0]); + if (ret < 0) + return ret; + + buf += to_shift; + to += to_shift; + len_s -= to_shift; + } + + if (!IS_ALIGNED(to, sizeof(u64)) && + ((to ^ (to + len_s)) & GENMASK(31, 10))) { + /* + * Workaround reads/writes across 1k-aligned addresses + * (start u32 before 1k, end u32 after) + * as this fails on hardware. + */ + u32 data; + + memcpy(&data, &buf[0], sizeof(u32)); + idg_nvm_write32(nvm, to, data); + if (idg_nvm_error(nvm)) + return -EIO; + buf += sizeof(u32); + to += sizeof(u32); + len_s -= sizeof(u32); + } + + len8 = ALIGN_DOWN(len_s, sizeof(u64)); + for (i = 0; i < len8; i += sizeof(u64)) { + u64 data; + + memcpy(&data, &buf[i], sizeof(u64)); + idg_nvm_write64(nvm, to + i, data); + if (idg_nvm_error(nvm)) + return -EIO; + } + + len4 = len_s - len8; + if (len4 >= sizeof(u32)) { + u32 data; + + memcpy(&data, &buf[i], sizeof(u32)); + idg_nvm_write32(nvm, to + i, data); + if (idg_nvm_error(nvm)) + return -EIO; + i += sizeof(u32); + len4 -= sizeof(u32); + } + + if (len4 > 0) { + ret = idg_nvm_rewrite_partial(nvm, to + i, 0, len4, (u32 *)&buf[i]); + if (ret < 0) + return ret; + } + + return len; +} + +static ssize_t idg_read(struct intel_dg_nvm *nvm, u8 region, + loff_t from, size_t len, unsigned char *buf) +{ + size_t len_s = len; + size_t from_shift; + size_t from4; + size_t len8; + size_t len4; + size_t i; + + idg_nvm_set_region_id(nvm, region); + + from4 = ALIGN_DOWN(from, sizeof(u32)); + from_shift = min(sizeof(u32) - ((size_t)from - from4), len); + + if (from - from4) { + u32 data = idg_nvm_read32(nvm, from4); + + if (idg_nvm_error(nvm)) + return -EIO; + memcpy(&buf[0], (u8 *)&data + (from - from4), from_shift); + len_s -= from_shift; + buf += from_shift; + from += from_shift; + } + + if (!IS_ALIGNED(from, sizeof(u64)) && + ((from ^ (from + len_s)) & GENMASK(31, 10))) { + /* + * Workaround reads/writes across 1k-aligned addresses + * (start u32 before 1k, end u32 after) + * as this fails on hardware. + */ + u32 data = idg_nvm_read32(nvm, from); + + if (idg_nvm_error(nvm)) + return -EIO; + memcpy(&buf[0], &data, sizeof(data)); + len_s -= sizeof(u32); + buf += sizeof(u32); + from += sizeof(u32); + } + + len8 = ALIGN_DOWN(len_s, sizeof(u64)); + for (i = 0; i < len8; i += sizeof(u64)) { + u64 data = idg_nvm_read64(nvm, from + i); + + if (idg_nvm_error(nvm)) + return -EIO; + + memcpy(&buf[i], &data, sizeof(data)); + } + + len4 = len_s - len8; + if (len4 >= sizeof(u32)) { + u32 data = idg_nvm_read32(nvm, from + i); + + if (idg_nvm_error(nvm)) + return -EIO; + memcpy(&buf[i], &data, sizeof(data)); + i += sizeof(u32); + len4 -= sizeof(u32); + } + + if (len4 > 0) { + u32 data = idg_nvm_read32(nvm, from + i); + + if (idg_nvm_error(nvm)) + return -EIO; + memcpy(&buf[i], &data, len4); + } + + return len; +} + +static ssize_t +idg_erase(struct intel_dg_nvm *nvm, u8 region, loff_t from, u64 len, u64 *fail_addr) +{ + void __iomem *base2 = nvm->base2; + void __iomem *base = nvm->base; + const u32 block = 0x10; + u32 iter = 0; + u32 reg; + u64 i; + + for (i = 0; i < len; i += SZ_4K) { + iowrite32(from + i, base + NVM_ADDRESS_REG); + iowrite32(region << 24 | block, base + NVM_ERASE_REG); + if (nvm->non_posted_erase) { + /* Wait for Erase Done */ + reg = ioread32(base2 + NVM_DEBUG_REG); + while (!(reg & NVM_NON_POSTED_ERASE_DONE) && + ++iter < NVM_NON_POSTED_ERASE_DONE_ITER) { + msleep(10); + reg = ioread32(base2 + NVM_DEBUG_REG); + } + if (reg & NVM_NON_POSTED_ERASE_DONE) { + /* Clear Erase Done */ + iowrite32(reg, base2 + NVM_DEBUG_REG); + } else { + *fail_addr = from + i; + return -ETIME; + } + } + /* Since the writes are via sgunit + * we cannot do back to back erases. + */ + msleep(50); + } + return len; +} + +static int intel_dg_nvm_init(struct intel_dg_nvm *nvm, struct device *device, + bool non_posted_erase) +{ + u32 access_map = 0; + unsigned int i, n; + int ret; + + /* clean error register, previous errors are ignored */ + idg_nvm_error(nvm); + + ret = idg_nvm_is_valid(nvm); + if (ret) { + dev_err(device, "The MEM is not valid %d\n", ret); + return ret; + } + + if (idg_nvm_get_access_map(nvm, &access_map)) + return -EIO; + + for (i = 0, n = 0; i < nvm->nregions; i++) { + u32 address, base, limit, region; + u8 id = nvm->regions[i].id; + + address = NVM_FLREG(id); + region = idg_nvm_read32(nvm, address); + + base = FIELD_GET(NVM_FREG_BASE_MASK, region) << NVM_FREG_ADDR_SHIFT; + limit = (FIELD_GET(NVM_FREG_ADDR_MASK, region) << NVM_FREG_ADDR_SHIFT) | + NVM_FREG_MIN_REGION_SIZE; + + dev_dbg(device, "[%d] %s: region: 0x%08X base: 0x%08x limit: 0x%08x\n", + id, nvm->regions[i].name, region, base, limit); + + if (base >= limit || (i > 0 && limit == 0)) { + dev_dbg(device, "[%d] %s: disabled\n", + id, nvm->regions[i].name); + nvm->regions[i].is_readable = 0; + continue; + } + + if (nvm->size < limit) + nvm->size = limit; + + nvm->regions[i].offset = base; + nvm->regions[i].size = limit - base + 1; + /* No write access to descriptor; mask it out*/ + nvm->regions[i].is_writable = idg_nvm_region_writable(access_map, id); + + nvm->regions[i].is_readable = idg_nvm_region_readable(access_map, id); + dev_dbg(device, "Registered, %s id=%d offset=%lld size=%lld rd=%d wr=%d\n", + nvm->regions[i].name, + nvm->regions[i].id, + nvm->regions[i].offset, + nvm->regions[i].size, + nvm->regions[i].is_readable, + nvm->regions[i].is_writable); + + if (nvm->regions[i].is_readable) + n++; + } + + nvm->non_posted_erase = non_posted_erase; + + dev_dbg(device, "Registered %d regions\n", n); + dev_dbg(device, "Non posted erase %d\n", nvm->non_posted_erase); + + /* Need to add 1 to the amount of memory + * so it is reported as an even block + */ + nvm->size += 1; + + return n; +} + +static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info) +{ + struct intel_dg_nvm *nvm = mtd->priv; + size_t total_len; + unsigned int idx; + ssize_t bytes; + loff_t from; + size_t len; + u8 region; + u64 addr; + + if (WARN_ON(!nvm)) + return -EINVAL; + + if (!IS_ALIGNED(info->addr, SZ_4K) || !IS_ALIGNED(info->len, SZ_4K)) { + dev_err(&mtd->dev, "unaligned erase %llx %llx\n", + info->addr, info->len); + info->fail_addr = MTD_FAIL_ADDR_UNKNOWN; + return -EINVAL; + } + + total_len = info->len; + addr = info->addr; + + guard(mutex)(&nvm->lock); + + while (total_len > 0) { + if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) { + dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len); + info->fail_addr = addr; + return -ERANGE; + } + + idx = idg_nvm_get_region(nvm, addr); + if (idx >= nvm->nregions) { + dev_err(&mtd->dev, "out of range"); + info->fail_addr = MTD_FAIL_ADDR_UNKNOWN; + return -ERANGE; + } + + from = addr - nvm->regions[idx].offset; + region = nvm->regions[idx].id; + len = total_len; + if (len > nvm->regions[idx].size - from) + len = nvm->regions[idx].size - from; + + dev_dbg(&mtd->dev, "erasing region[%d] %s from %llx len %zx\n", + region, nvm->regions[idx].name, from, len); + + bytes = idg_erase(nvm, region, from, len, &info->fail_addr); + if (bytes < 0) { + dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes); + info->fail_addr += nvm->regions[idx].offset; + return bytes; + } + + addr += len; + total_len -= len; + } + + return 0; +} + +static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct intel_dg_nvm *nvm = mtd->priv; + unsigned int idx; + ssize_t ret; + u8 region; + + if (WARN_ON(!nvm)) + return -EINVAL; + + idx = idg_nvm_get_region(nvm, from); + + dev_dbg(&mtd->dev, "reading region[%d] %s from %lld len %zd\n", + nvm->regions[idx].id, nvm->regions[idx].name, from, len); + + if (idx >= nvm->nregions) { + dev_err(&mtd->dev, "out of range"); + return -ERANGE; + } + + from -= nvm->regions[idx].offset; + region = nvm->regions[idx].id; + if (len > nvm->regions[idx].size - from) + len = nvm->regions[idx].size - from; + + guard(mutex)(&nvm->lock); + + ret = idg_read(nvm, region, from, len, buf); + if (ret < 0) { + dev_dbg(&mtd->dev, "read failed with %zd\n", ret); + return ret; + } + + *retlen = ret; + + return 0; +} + +static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct intel_dg_nvm *nvm = mtd->priv; + unsigned int idx; + ssize_t ret; + u8 region; + + if (WARN_ON(!nvm)) + return -EINVAL; + + idx = idg_nvm_get_region(nvm, to); + + dev_dbg(&mtd->dev, "writing region[%d] %s to %lld len %zd\n", + nvm->regions[idx].id, nvm->regions[idx].name, to, len); + + if (idx >= nvm->nregions) { + dev_err(&mtd->dev, "out of range"); + return -ERANGE; + } + + to -= nvm->regions[idx].offset; + region = nvm->regions[idx].id; + if (len > nvm->regions[idx].size - to) + len = nvm->regions[idx].size - to; + + guard(mutex)(&nvm->lock); + + ret = idg_write(nvm, region, to, len, buf); + if (ret < 0) { + dev_dbg(&mtd->dev, "write failed with %zd\n", ret); + return ret; + } + + *retlen = ret; + + return 0; +} + +static void intel_dg_nvm_release(struct kref *kref) +{ + struct intel_dg_nvm *nvm = container_of(kref, struct intel_dg_nvm, refcnt); + int i; + + pr_debug("freeing intel_dg nvm\n"); + for (i = 0; i < nvm->nregions; i++) + kfree(nvm->regions[i].name); + mutex_destroy(&nvm->lock); + kfree(nvm); +} + +static int intel_dg_mtd_get_device(struct mtd_info *mtd) +{ + struct mtd_info *master = mtd_get_master(mtd); + struct intel_dg_nvm *nvm = master->priv; + + if (WARN_ON(!nvm)) + return -EINVAL; + pr_debug("get mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt)); + kref_get(&nvm->refcnt); + + return 0; +} + +static void intel_dg_mtd_put_device(struct mtd_info *mtd) +{ + struct mtd_info *master = mtd_get_master(mtd); + struct intel_dg_nvm *nvm = master->priv; + + if (WARN_ON(!nvm)) + return; + pr_debug("put mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt)); + kref_put(&nvm->refcnt, intel_dg_nvm_release); +} + +static int intel_dg_nvm_init_mtd(struct intel_dg_nvm *nvm, struct device *device, + unsigned int nparts, bool writable_override) +{ + struct mtd_partition *parts = NULL; + unsigned int i, n; + int ret; + + dev_dbg(device, "registering with mtd\n"); + + nvm->mtd.owner = THIS_MODULE; + nvm->mtd.dev.parent = device; + nvm->mtd.flags = MTD_CAP_NORFLASH; + nvm->mtd.type = MTD_DATAFLASH; + nvm->mtd.priv = nvm; + nvm->mtd._write = intel_dg_mtd_write; + nvm->mtd._read = intel_dg_mtd_read; + nvm->mtd._erase = intel_dg_mtd_erase; + nvm->mtd._get_device = intel_dg_mtd_get_device; + nvm->mtd._put_device = intel_dg_mtd_put_device; + nvm->mtd.writesize = SZ_1; /* 1 byte granularity */ + nvm->mtd.erasesize = SZ_4K; /* 4K bytes granularity */ + nvm->mtd.size = nvm->size; + + parts = kcalloc(nvm->nregions, sizeof(*parts), GFP_KERNEL); + if (!parts) + return -ENOMEM; + + for (i = 0, n = 0; i < nvm->nregions && n < nparts; i++) { + if (!nvm->regions[i].is_readable) + continue; + parts[n].name = nvm->regions[i].name; + parts[n].offset = nvm->regions[i].offset; + parts[n].size = nvm->regions[i].size; + if (!nvm->regions[i].is_writable && !writable_override) + parts[n].mask_flags = MTD_WRITEABLE; + n++; + } + + ret = mtd_device_register(&nvm->mtd, parts, n); + + kfree(parts); + return ret; +} + +static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev, + const struct auxiliary_device_id *aux_dev_id) +{ + struct intel_dg_nvm_dev *invm = auxiliary_dev_to_intel_dg_nvm_dev(aux_dev); + struct intel_dg_nvm *nvm; + struct device *device; + unsigned int nregions; + unsigned int i, n; + int ret; + + device = &aux_dev->dev; + + /* count available regions */ + for (nregions = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) { + if (invm->regions[i].name) + nregions++; + } + + if (!nregions) { + dev_err(device, "no regions defined\n"); + return -ENODEV; + } + + nvm = kzalloc(struct_size(nvm, regions, nregions), GFP_KERNEL); + if (!nvm) + return -ENOMEM; + + kref_init(&nvm->refcnt); + mutex_init(&nvm->lock); + + for (n = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) { + if (!invm->regions[i].name) + continue; + + char *name = kasprintf(GFP_KERNEL, "%s.%s", + dev_name(&aux_dev->dev), invm->regions[i].name); + if (!name) + continue; + nvm->regions[n].name = name; + nvm->regions[n].id = i; + n++; + } + nvm->nregions = n; /* in case where kasprintf fail */ + + nvm->base = devm_ioremap_resource(device, &invm->bar); + if (IS_ERR(nvm->base)) { + ret = PTR_ERR(nvm->base); + goto err; + } + + if (invm->non_posted_erase) { + nvm->base2 = devm_ioremap_resource(device, &invm->bar2); + if (IS_ERR(nvm->base2)) { + ret = PTR_ERR(nvm->base2); + goto err; + } + } + + ret = intel_dg_nvm_init(nvm, device, invm->non_posted_erase); + if (ret < 0) { + dev_err(device, "cannot initialize nvm %d\n", ret); + goto err; + } + + ret = intel_dg_nvm_init_mtd(nvm, device, ret, invm->writable_override); + if (ret) { + dev_err(device, "failed init mtd %d\n", ret); + goto err; + } + + dev_set_drvdata(&aux_dev->dev, nvm); + + return 0; + +err: + kref_put(&nvm->refcnt, intel_dg_nvm_release); + return ret; +} + +static void intel_dg_mtd_remove(struct auxiliary_device *aux_dev) +{ + struct intel_dg_nvm *nvm = dev_get_drvdata(&aux_dev->dev); + + if (!nvm) + return; + + mtd_device_unregister(&nvm->mtd); + + dev_set_drvdata(&aux_dev->dev, NULL); + + kref_put(&nvm->refcnt, intel_dg_nvm_release); +} + +static const struct auxiliary_device_id intel_dg_mtd_id_table[] = { + { + .name = "i915.nvm", + }, + { + .name = "xe.nvm", + }, + { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(auxiliary, intel_dg_mtd_id_table); + +static struct auxiliary_driver intel_dg_mtd_driver = { + .probe = intel_dg_mtd_probe, + .remove = intel_dg_mtd_remove, + .driver = { + /* auxiliary_driver_register() sets .name to be the modname */ + }, + .id_table = intel_dg_mtd_id_table +}; +module_auxiliary_driver(intel_dg_mtd_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel DGFX MTD driver"); diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 391d81ad960c..8dc4f5c493fc 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -559,7 +559,7 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd, /* Sanitize user input */ p.devname[BLKPG_DEVNAMELTH - 1] = '\0'; - return mtd_add_partition(mtd, p.devname, p.start, p.length, NULL); + return mtd_add_partition(mtd, p.devname, p.start, p.length); case BLKPG_DEL_PARTITION: diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 429d8c16baf0..5ba9a741f5ac 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -68,13 +68,7 @@ static struct class mtd_class = { .pm = MTD_CLS_PM_OPS, }; -static struct class mtd_master_class = { - .name = "mtd_master", - .pm = MTD_CLS_PM_OPS, -}; - static DEFINE_IDR(mtd_idr); -static DEFINE_IDR(mtd_master_idr); /* These are exported solely for the purpose of mtd_blkdevs.c. You should not use them for _anything_ else */ @@ -89,9 +83,8 @@ EXPORT_SYMBOL_GPL(__mtd_next_device); static LIST_HEAD(mtd_notifiers); -#define MTD_MASTER_DEVS 255 + #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) -static dev_t mtd_master_devt; /* REVISIT once MTD uses the driver model better, whoever allocates * the mtd_info will probably want to use the release() hook... @@ -111,17 +104,6 @@ static void mtd_release(struct device *dev) device_destroy(&mtd_class, index + 1); } -static void mtd_master_release(struct device *dev) -{ - struct mtd_info *mtd = dev_get_drvdata(dev); - - idr_remove(&mtd_master_idr, mtd->index); - of_node_put(mtd_get_of_node(mtd)); - - if (mtd_is_partition(mtd)) - release_mtd_partition(mtd); -} - static void mtd_device_release(struct kref *kref) { struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt); @@ -385,11 +367,6 @@ static const struct device_type mtd_devtype = { .release = mtd_release, }; -static const struct device_type mtd_master_devtype = { - .name = "mtd_master", - .release = mtd_master_release, -}; - static bool mtd_expert_analysis_mode; #ifdef CONFIG_DEBUG_FS @@ -657,13 +634,13 @@ exit_parent: /** * add_mtd_device - register an MTD device * @mtd: pointer to new MTD device info structure - * @partitioned: create partitioned device * * Add a device to the list of MTD devices present in the system, and * notify each currently active MTD 'user' of its arrival. Returns * zero on success or non-zero on failure. */ -int add_mtd_device(struct mtd_info *mtd, bool partitioned) + +int add_mtd_device(struct mtd_info *mtd) { struct device_node *np = mtd_get_of_node(mtd); struct mtd_info *master = mtd_get_master(mtd); @@ -710,17 +687,10 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned) ofidx = -1; if (np) ofidx = of_alias_get_id(np, "mtd"); - if (partitioned) { - if (ofidx >= 0) - i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL); - else - i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); - } else { - if (ofidx >= 0) - i = idr_alloc(&mtd_master_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL); - else - i = idr_alloc(&mtd_master_idr, mtd, 0, 0, GFP_KERNEL); - } + if (ofidx >= 0) + i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL); + else + i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); if (i < 0) { error = i; goto fail_locked; @@ -768,18 +738,10 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned) /* Caller should have set dev.parent to match the * physical device, if appropriate. */ - if (partitioned) { - mtd->dev.type = &mtd_devtype; - mtd->dev.class = &mtd_class; - mtd->dev.devt = MTD_DEVT(i); - dev_set_name(&mtd->dev, "mtd%d", i); - error = dev_set_name(&mtd->dev, "mtd%d", i); - } else { - mtd->dev.type = &mtd_master_devtype; - mtd->dev.class = &mtd_master_class; - mtd->dev.devt = MKDEV(MAJOR(mtd_master_devt), i); - error = dev_set_name(&mtd->dev, "mtd_master%d", i); - } + mtd->dev.type = &mtd_devtype; + mtd->dev.class = &mtd_class; + mtd->dev.devt = MTD_DEVT(i); + error = dev_set_name(&mtd->dev, "mtd%d", i); if (error) goto fail_devname; dev_set_drvdata(&mtd->dev, mtd); @@ -787,7 +749,6 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned) of_node_get(mtd_get_of_node(mtd)); error = device_register(&mtd->dev); if (error) { - pr_err("mtd: %s device_register fail %d\n", mtd->name, error); put_device(&mtd->dev); goto fail_added; } @@ -799,13 +760,10 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned) mtd_debugfs_populate(mtd); - if (partitioned) { - device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, - "mtd%dro", i); - } + device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, + "mtd%dro", i); - pr_debug("mtd: Giving out %spartitioned device %d to %s\n", - partitioned ? "" : "un-", i, mtd->name); + pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); /* No need to get a refcount on the module containing the notifier, since we hold the mtd_table_mutex */ list_for_each_entry(not, &mtd_notifiers, list) @@ -813,16 +771,13 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned) mutex_unlock(&mtd_table_mutex); - if (partitioned) { - if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) { - if (IS_BUILTIN(CONFIG_MTD)) { - pr_info("mtd: setting mtd%d (%s) as root device\n", - mtd->index, mtd->name); - ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index); - } else { - pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n", - mtd->index, mtd->name); - } + if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) { + if (IS_BUILTIN(CONFIG_MTD)) { + pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name); + ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index); + } else { + pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n", + mtd->index, mtd->name); } } @@ -838,10 +793,7 @@ fail_nvmem_add: fail_added: of_node_put(mtd_get_of_node(mtd)); fail_devname: - if (partitioned) - idr_remove(&mtd_idr, i); - else - idr_remove(&mtd_master_idr, i); + idr_remove(&mtd_idr, i); fail_locked: mutex_unlock(&mtd_table_mutex); return error; @@ -859,14 +811,12 @@ fail_locked: int del_mtd_device(struct mtd_info *mtd) { - struct mtd_notifier *not; - struct idr *idr; int ret; + struct mtd_notifier *not; mutex_lock(&mtd_table_mutex); - idr = mtd->dev.class == &mtd_class ? &mtd_idr : &mtd_master_idr; - if (idr_find(idr, mtd->index) != mtd) { + if (idr_find(&mtd_idr, mtd->index) != mtd) { ret = -ENODEV; goto out_error; } @@ -1106,7 +1056,6 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, const struct mtd_partition *parts, int nr_parts) { - struct mtd_info *parent; int ret, err; mtd_set_dev_defaults(mtd); @@ -1115,30 +1064,25 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, if (ret) goto out; - ret = add_mtd_device(mtd, false); - if (ret) - goto out; - if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) { - ret = mtd_add_partition(mtd, mtd->name, 0, MTDPART_SIZ_FULL, &parent); + ret = add_mtd_device(mtd); if (ret) goto out; - - } else { - parent = mtd; } /* Prefer parsed partitions over driver-provided fallback */ - ret = parse_mtd_partitions(parent, types, parser_data); + ret = parse_mtd_partitions(mtd, types, parser_data); if (ret == -EPROBE_DEFER) goto out; if (ret > 0) ret = 0; else if (nr_parts) - ret = add_mtd_partitions(parent, parts, nr_parts); - else if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) - ret = mtd_add_partition(parent, mtd->name, 0, MTDPART_SIZ_FULL, NULL); + ret = add_mtd_partitions(mtd, parts, nr_parts); + else if (!device_is_registered(&mtd->dev)) + ret = add_mtd_device(mtd); + else + ret = 0; if (ret) goto out; @@ -1158,14 +1102,13 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, register_reboot_notifier(&mtd->reboot_notifier); } - return 0; out: - nvmem_unregister(mtd->otp_user_nvmem); - nvmem_unregister(mtd->otp_factory_nvmem); - - del_mtd_partitions(mtd); + if (ret) { + nvmem_unregister(mtd->otp_user_nvmem); + nvmem_unregister(mtd->otp_factory_nvmem); + } - if (device_is_registered(&mtd->dev)) { + if (ret && device_is_registered(&mtd->dev)) { err = del_mtd_device(mtd); if (err) pr_err("Error when deleting MTD device (%d)\n", err); @@ -1324,7 +1267,8 @@ int __get_mtd_device(struct mtd_info *mtd) mtd = mtd->parent; } - kref_get(&master->refcnt); + if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) + kref_get(&master->refcnt); return 0; } @@ -1418,7 +1362,8 @@ void __put_mtd_device(struct mtd_info *mtd) mtd = parent; } - kref_put(&master->refcnt, mtd_device_release); + if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) + kref_put(&master->refcnt, mtd_device_release); module_put(master->owner); @@ -2585,16 +2530,6 @@ static int __init init_mtd(void) if (ret) goto err_reg; - ret = class_register(&mtd_master_class); - if (ret) - goto err_reg2; - - ret = alloc_chrdev_region(&mtd_master_devt, 0, MTD_MASTER_DEVS, "mtd_master"); - if (ret < 0) { - pr_err("unable to allocate char dev region\n"); - goto err_chrdev; - } - mtd_bdi = mtd_bdi_init("mtd"); if (IS_ERR(mtd_bdi)) { ret = PTR_ERR(mtd_bdi); @@ -2619,10 +2554,6 @@ out_procfs: bdi_unregister(mtd_bdi); bdi_put(mtd_bdi); err_bdi: - unregister_chrdev_region(mtd_master_devt, MTD_MASTER_DEVS); -err_chrdev: - class_unregister(&mtd_master_class); -err_reg2: class_unregister(&mtd_class); err_reg: pr_err("Error registering mtd class or bdi: %d\n", ret); @@ -2636,12 +2567,9 @@ static void __exit cleanup_mtd(void) if (proc_mtd) remove_proc_entry("mtd", NULL); class_unregister(&mtd_class); - class_unregister(&mtd_master_class); - unregister_chrdev_region(mtd_master_devt, MTD_MASTER_DEVS); bdi_unregister(mtd_bdi); bdi_put(mtd_bdi); idr_destroy(&mtd_idr); - idr_destroy(&mtd_master_idr); } module_init(init_mtd); diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h index 2258d31c5aa6..b014861a06a6 100644 --- a/drivers/mtd/mtdcore.h +++ b/drivers/mtd/mtdcore.h @@ -8,7 +8,7 @@ extern struct mutex mtd_table_mutex; extern struct backing_dev_info *mtd_bdi; struct mtd_info *__mtd_next_device(int i); -int __must_check add_mtd_device(struct mtd_info *mtd, bool partitioned); +int __must_check add_mtd_device(struct mtd_info *mtd); int del_mtd_device(struct mtd_info *mtd); int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); int del_mtd_partitions(struct mtd_info *); diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 5a3db36d734e..994e8c51e674 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -86,7 +86,8 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, * parent conditional on that option. Note, this is a way to * distinguish between the parent and its partitions in sysfs. */ - child->dev.parent = &parent->dev; + child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ? + &parent->dev : parent->dev.parent; child->dev.of_node = part->of_node; child->parent = parent; child->part.offset = part->offset; @@ -242,7 +243,7 @@ static int mtd_add_partition_attrs(struct mtd_info *new) } int mtd_add_partition(struct mtd_info *parent, const char *name, - long long offset, long long length, struct mtd_info **out) + long long offset, long long length) { struct mtd_info *master = mtd_get_master(parent); u64 parent_size = mtd_is_partition(parent) ? @@ -275,15 +276,12 @@ int mtd_add_partition(struct mtd_info *parent, const char *name, list_add_tail(&child->part.node, &parent->partitions); mutex_unlock(&master->master.partitions_lock); - ret = add_mtd_device(child, true); + ret = add_mtd_device(child); if (ret) goto err_remove_part; mtd_add_partition_attrs(child); - if (out) - *out = child; - return 0; err_remove_part: @@ -415,7 +413,7 @@ int add_mtd_partitions(struct mtd_info *parent, list_add_tail(&child->part.node, &parent->partitions); mutex_unlock(&master->master.partitions_lock); - ret = add_mtd_device(child, true); + ret = add_mtd_device(child); if (ret) { mutex_lock(&master->master.partitions_lock); list_del(&child->part.node); @@ -592,6 +590,9 @@ static int mtd_part_of_parse(struct mtd_info *master, int ret, err = 0; dev = &master->dev; + /* Use parent device (controller) if the top level MTD is not registered */ + if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) && !mtd_is_partition(master)) + dev = master->dev.parent; np = mtd_get_of_node(master); if (mtd_is_partition(master)) @@ -710,7 +711,6 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types, if (ret < 0 && !err) err = ret; } - return err; } diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c index 4dc4d65e7d32..8e604cc22ca3 100644 --- a/drivers/mtd/nand/qpic_common.c +++ b/drivers/mtd/nand/qpic_common.c @@ -57,14 +57,15 @@ qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc) bam_txn_buf += sizeof(*bam_txn); bam_txn->bam_ce = bam_txn_buf; - bam_txn_buf += - sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw; + bam_txn->bam_ce_nitems = QPIC_PER_CW_CMD_ELEMENTS * num_cw; + bam_txn_buf += sizeof(*bam_txn->bam_ce) * bam_txn->bam_ce_nitems; bam_txn->cmd_sgl = bam_txn_buf; - bam_txn_buf += - sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw; + bam_txn->cmd_sgl_nitems = QPIC_PER_CW_CMD_SGL * num_cw; + bam_txn_buf += sizeof(*bam_txn->cmd_sgl) * bam_txn->cmd_sgl_nitems; bam_txn->data_sgl = bam_txn_buf; + bam_txn->data_sgl_nitems = QPIC_PER_CW_DATA_SGL * num_cw; init_completion(&bam_txn->txn_done); @@ -238,6 +239,11 @@ int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, struct bam_transaction *bam_txn = nandc->bam_txn; u32 offset; + if (bam_txn->bam_ce_pos + size > bam_txn->bam_ce_nitems) { + dev_err(nandc->dev, "BAM %s array is full\n", "CE"); + return -EINVAL; + } + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos]; /* fill the command desc */ @@ -258,6 +264,12 @@ int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, /* use the separate sgl after this command */ if (flags & NAND_BAM_NEXT_SGL) { + if (bam_txn->cmd_sgl_pos >= bam_txn->cmd_sgl_nitems) { + dev_err(nandc->dev, "BAM %s array is full\n", + "CMD sgl"); + return -EINVAL; + } + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start]; bam_ce_size = (bam_txn->bam_ce_pos - bam_txn->bam_ce_start) * @@ -297,10 +309,20 @@ int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, struct bam_transaction *bam_txn = nandc->bam_txn; if (read) { + if (bam_txn->rx_sgl_pos >= bam_txn->data_sgl_nitems) { + dev_err(nandc->dev, "BAM %s array is full\n", "RX sgl"); + return -EINVAL; + } + sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos], vaddr, size); bam_txn->rx_sgl_pos++; } else { + if (bam_txn->tx_sgl_pos >= bam_txn->data_sgl_nitems) { + dev_err(nandc->dev, "BAM %s array is full\n", "TX sgl"); + return -EINVAL; + } + sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos], vaddr, size); bam_txn->tx_sgl_pos++; diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 1003cf118c01..4dd6f1a4e797 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -1379,7 +1379,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); int cwperpage, bad_block_byte, ret; bool wide_bus; - int ecc_mode = 1; + int ecc_mode = ECC_MODE_8BIT; /* controller only supports 512 bytes data steps */ ecc->size = NANDC_STEP_SIZE; @@ -1400,7 +1400,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) if (ecc->strength >= 8) { /* 8 bit ECC defaults to BCH ECC on all platforms */ host->bch_enabled = true; - ecc_mode = 1; + ecc_mode = ECC_MODE_8BIT; if (wide_bus) { host->ecc_bytes_hw = 14; @@ -1420,7 +1420,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) if (nandc->props->ecc_modes & ECC_BCH_4BIT) { /* BCH */ host->bch_enabled = true; - ecc_mode = 0; + ecc_mode = ECC_MODE_4BIT; if (wide_bus) { host->ecc_bytes_hw = 8; diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 7099db7a62be..c411fe9be3ef 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -1585,6 +1585,7 @@ static void spinand_cleanup(struct spinand_device *spinand) { struct nand_device *nand = spinand_to_nand(spinand); + nanddev_ecc_engine_cleanup(nand); nanddev_cleanup(nand); spinand_manufacturer_cleanup(spinand); kfree(spinand->databuf); diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c index 19f8dd4a6370..b7a28f001a38 100644 --- a/drivers/mtd/nand/spi/winbond.c +++ b/drivers/mtd/nand/spi/winbond.c @@ -25,7 +25,7 @@ static SPINAND_OP_VARIANTS(read_cache_octal_variants, SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 2, NULL, 0, 105 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 86 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 162 * HZ_PER_MHZ), SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 1, NULL, 0, 133 * HZ_PER_MHZ), SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); @@ -42,11 +42,11 @@ static SPINAND_OP_VARIANTS(update_cache_octal_variants, static SPINAND_OP_VARIANTS(read_cache_dual_quad_dtr_variants, SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ), SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 104 * HZ_PER_MHZ), SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ), SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 104 * HZ_PER_MHZ), SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), @@ -289,7 +289,7 @@ static const struct spinand_info winbond_spinand_table[] = { SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)), SPINAND_INFO("W35N02JW", /* 1.8V */ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x22), - NAND_MEMORG(1, 4096, 128, 64, 512, 10, 2, 1, 1), + NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 2, 1), NAND_ECCREQ(1, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants, &write_cache_octal_variants, @@ -298,7 +298,7 @@ static const struct spinand_info winbond_spinand_table[] = { SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)), SPINAND_INFO("W35N04JW", /* 1.8V */ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x23), - NAND_MEMORG(1, 4096, 128, 64, 512, 10, 4, 1, 1), + NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 4, 1), NAND_ECCREQ(1, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants, &write_cache_octal_variants, diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c index 4f12ff755df0..643513ee891b 100644 --- a/drivers/mtd/spi-nor/sysfs.c +++ b/drivers/mtd/spi-nor/sysfs.c @@ -104,7 +104,7 @@ static const struct attribute_group spi_nor_sysfs_group = { .is_visible = spi_nor_sysfs_is_visible, .is_bin_visible = spi_nor_sysfs_is_bin_visible, .attrs = spi_nor_sysfs_entries, - .bin_attrs_new = spi_nor_sysfs_bin_entries, + .bin_attrs = spi_nor_sysfs_bin_entries, }; const struct attribute_group *spi_nor_sysfs_groups[] = { |