summaryrefslogtreecommitdiff
path: root/drivers/mtd/nand/spi/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/nand/spi/core.c')
-rw-r--r--drivers/mtd/nand/spi/core.c516
1 files changed, 447 insertions, 69 deletions
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 446ba8d43fbc..d207286572d8 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -20,9 +20,9 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
-static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
+int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
int ret;
@@ -34,9 +34,9 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
return 0;
}
-static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
+int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
*spinand->scratchbuf = val;
@@ -200,6 +200,12 @@ static int spinand_ecc_enable(struct spinand_device *spinand,
enable ? CFG_ECC_ENABLE : 0);
}
+static int spinand_cont_read_enable(struct spinand_device *spinand,
+ bool enable)
+{
+ return spinand->set_cont_read(spinand, enable);
+}
+
static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
@@ -288,6 +294,11 @@ static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
struct spinand_device *spinand = nand_to_spinand(nand);
bool enable = (req->mode != MTD_OPS_RAW);
+ if (!enable && spinand->flags & SPINAND_NO_RAW_ACCESS)
+ return -EOPNOTSUPP;
+
+ memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
+
/* Only enable or disable the engine */
return spinand_ecc_enable(spinand, enable);
}
@@ -307,17 +318,29 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
if (req->type == NAND_PAGE_WRITE)
return 0;
- /* Finish a page write: check the status, report errors/bitflips */
+ /* Finish a page read: check the status, report errors/bitflips */
ret = spinand_check_ecc_status(spinand, engine_conf->status);
- if (ret == -EBADMSG)
+ if (ret == -EBADMSG) {
mtd->ecc_stats.failed++;
- else if (ret > 0)
- mtd->ecc_stats.corrected += ret;
+ } else if (ret > 0) {
+ unsigned int pages;
+
+ /*
+ * Continuous reads don't allow us to get the detail,
+ * so we may exagerate the actual number of corrected bitflips.
+ */
+ if (!req->continuous)
+ pages = 1;
+ else
+ pages = req->datalen / nanddev_page_size(nand);
+
+ mtd->ecc_stats.corrected += ret * pages;
+ }
return ret;
}
-static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
+static const struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
.init_ctx = spinand_ondie_ecc_init_ctx,
.cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
.prepare_io_req = spinand_ondie_ecc_prepare_io_req,
@@ -337,9 +360,9 @@ static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
engine_conf->status = status;
}
-static int spinand_write_enable_op(struct spinand_device *spinand)
+int spinand_write_enable_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
+ struct spi_mem_op op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -349,7 +372,7 @@ static int spinand_load_page_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
+ struct spi_mem_op op = SPINAND_PAGE_READ_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -367,7 +390,11 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
if (req->datalen) {
buf = spinand->databuf;
- nbytes = nanddev_page_size(nand);
+ if (!req->continuous)
+ nbytes = nanddev_page_size(nand);
+ else
+ nbytes = round_up(req->dataoffs + req->datalen,
+ nanddev_page_size(nand));
column = 0;
}
@@ -379,7 +406,13 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
}
}
- rdesc = spinand->dirmaps[req->pos.plane].rdesc;
+ if (req->mode == MTD_OPS_RAW)
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc;
+ else
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
+
+ if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT)
+ column |= req->pos.plane << fls(nanddev_page_size(nand));
while (nbytes) {
ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
@@ -392,6 +425,21 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
nbytes -= ret;
column += ret;
buf += ret;
+
+ /*
+ * Dirmap accesses are allowed to toggle the CS.
+ * Toggling the CS during a continuous read is forbidden.
+ */
+ if (nbytes && req->continuous) {
+ /*
+ * Spi controller with broken support of continuous
+ * reading was detected. Disable future use of
+ * continuous reading and return -EAGAIN to retry
+ * reading within regular mode.
+ */
+ spinand->cont_read_possible = false;
+ return -EAGAIN;
+ }
}
if (req->datalen)
@@ -450,7 +498,13 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
req->ooblen);
}
- wdesc = spinand->dirmaps[req->pos.plane].wdesc;
+ if (req->mode == MTD_OPS_RAW)
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc;
+ else
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
+
+ if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT)
+ column |= req->pos.plane << fls(nanddev_page_size(nand));
while (nbytes) {
ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
@@ -473,7 +527,7 @@ static int spinand_program_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
+ struct spi_mem_op op = SPINAND_PROG_EXEC_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -483,18 +537,28 @@ static int spinand_erase_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, pos);
- struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
+ struct spi_mem_op op = SPINAND_BLK_ERASE_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
-static int spinand_wait(struct spinand_device *spinand,
- unsigned long initial_delay_us,
- unsigned long poll_delay_us,
- u8 *s)
+/**
+ * spinand_wait() - Poll memory device status
+ * @spinand: the spinand device
+ * @initial_delay_us: delay in us before starting to poll
+ * @poll_delay_us: time to sleep between reads in us
+ * @s: the pointer to variable to store the value of REG_STATUS
+ *
+ * This function polls a status register (REG_STATUS) and returns when
+ * the STATUS_READY bit is 0 or when the timeout has expired.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
+ unsigned long poll_delay_us, u8 *s)
{
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
- spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(REG_STATUS,
+ spinand->scratchbuf);
u8 status;
int ret;
@@ -527,7 +591,7 @@ out:
static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
u8 ndummy, u8 *buf)
{
- struct spi_mem_op op = SPINAND_READID_OP(
+ struct spi_mem_op op = SPINAND_READID_1S_1S_1S_OP(
naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
int ret;
@@ -540,7 +604,7 @@ static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
static int spinand_reset_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_RESET_OP;
+ struct spi_mem_op op = SPINAND_RESET_1S_0_0_OP;
int ret;
ret = spi_mem_exec_op(spinand->spimem, &op);
@@ -558,8 +622,16 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
}
-static int spinand_read_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req)
+/**
+ * spinand_read_page() - Read a page
+ * @spinand: the spinand device
+ * @req: the I/O request
+ *
+ * Return: 0 or a positive number of bitflips corrected on success.
+ * A negative error code otherwise.
+ */
+int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
@@ -589,8 +661,16 @@ static int spinand_read_page(struct spinand_device *spinand,
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
-static int spinand_write_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req)
+/**
+ * spinand_write_page() - Write a page
+ * @spinand: the spinand device
+ * @req: the I/O request
+ *
+ * Return: 0 or a positive number of bitflips corrected on success.
+ * A negative error code otherwise.
+ */
+int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
@@ -616,27 +696,32 @@ static int spinand_write_page(struct spinand_device *spinand,
SPINAND_WRITE_INITIAL_DELAY_US,
SPINAND_WRITE_POLL_DELAY_US,
&status);
- if (!ret && (status & STATUS_PROG_FAILED))
+ if (ret)
+ return ret;
+
+ if (status & STATUS_PROG_FAILED)
return -EIO;
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
-static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
+static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ unsigned int *max_bitflips)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
- unsigned int max_bitflips = 0;
+ struct mtd_ecc_stats old_stats;
struct nand_io_iter iter;
bool disable_ecc = false;
bool ecc_failed = false;
- int ret = 0;
+ unsigned int retry_mode = 0;
+ int ret;
- if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
- disable_ecc = true;
+ old_stats = mtd->ecc_stats;
- mutex_lock(&spinand->lock);
+ if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
+ disable_ecc = true;
nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
if (disable_ecc)
@@ -646,25 +731,205 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
if (ret)
break;
+read_retry:
ret = spinand_read_page(spinand, &iter.req);
if (ret < 0 && ret != -EBADMSG)
break;
- if (ret == -EBADMSG)
+ if (ret == -EBADMSG && spinand->set_read_retry) {
+ if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) {
+ ret = spinand->set_read_retry(spinand, retry_mode);
+ if (ret < 0) {
+ spinand->set_read_retry(spinand, 0);
+ return ret;
+ }
+
+ /* Reset ecc_stats; retry */
+ mtd->ecc_stats = old_stats;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_failed = true;
+ }
+ } else if (ret == -EBADMSG) {
ecc_failed = true;
- else
- max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ } else {
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ }
ret = 0;
ops->retlen += iter.req.datalen;
ops->oobretlen += iter.req.ooblen;
- }
- mutex_unlock(&spinand->lock);
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ retry_mode = 0;
+ ret = spinand->set_read_retry(spinand, retry_mode);
+ if (ret < 0)
+ return ret;
+ }
+ }
if (ecc_failed && !ret)
ret = -EBADMSG;
+ return ret;
+}
+
+static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ unsigned int *max_bitflips)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_io_iter iter;
+ u8 status;
+ int ret;
+
+ ret = spinand_cont_read_enable(spinand, true);
+ if (ret)
+ return ret;
+
+ /*
+ * The cache is divided into two halves. While one half of the cache has
+ * the requested data, the other half is loaded with the next chunk of data.
+ * Therefore, the host can read out the data continuously from page to page.
+ * Each data read must be a multiple of 4-bytes and full pages should be read;
+ * otherwise, the data output might get out of sequence from one read command
+ * to another.
+ */
+ nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
+ ret = spinand_select_target(spinand, iter.req.pos.target);
+ if (ret)
+ goto end_cont_read;
+
+ ret = nand_ecc_prepare_io_req(nand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ret = spinand_load_page_op(spinand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
+ SPINAND_READ_POLL_DELAY_US, NULL);
+ if (ret < 0)
+ goto end_cont_read;
+
+ ret = spinand_read_from_cache_op(spinand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ops->retlen += iter.req.datalen;
+
+ ret = spinand_read_status(spinand, &status);
+ if (ret)
+ goto end_cont_read;
+
+ spinand_ondie_ecc_save_status(nand, status);
+
+ ret = nand_ecc_finish_io_req(nand, &iter.req);
+ if (ret < 0)
+ goto end_cont_read;
+
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ ret = 0;
+ }
+
+end_cont_read:
+ /*
+ * Once all the data has been read out, the host can either pull CS#
+ * high and wait for tRST or manually clear the bit in the configuration
+ * register to terminate the continuous read operation. We have no
+ * guarantee the SPI controller drivers will effectively deassert the CS
+ * when we expect them to, so take the register based approach.
+ */
+ spinand_cont_read_enable(spinand, false);
+
+ return ret;
+}
+
+static void spinand_cont_read_init(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type;
+
+ /* OOBs cannot be retrieved so external/on-host ECC engine won't work */
+ if (spinand->set_cont_read &&
+ (engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE ||
+ engine_type == NAND_ECC_ENGINE_TYPE_NONE)) {
+ spinand->cont_read_possible = true;
+ }
+}
+
+static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_pos start_pos, end_pos;
+
+ if (!spinand->cont_read_possible)
+ return false;
+
+ /* OOBs won't be retrieved */
+ if (ops->ooblen || ops->oobbuf)
+ return false;
+
+ nanddev_offs_to_pos(nand, from, &start_pos);
+ nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos);
+
+ /*
+ * Continuous reads never cross LUN boundaries. Some devices don't
+ * support crossing planes boundaries. Some devices don't even support
+ * crossing blocks boundaries. The common case being to read through UBI,
+ * we will very rarely read two consequent blocks or more, so it is safer
+ * and easier (can be improved) to only enable continuous reads when
+ * reading within the same erase block.
+ */
+ if (start_pos.target != end_pos.target ||
+ start_pos.plane != end_pos.plane ||
+ start_pos.eraseblock != end_pos.eraseblock)
+ return false;
+
+ return start_pos.page < end_pos.page;
+}
+
+static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct mtd_ecc_stats old_stats;
+ unsigned int max_bitflips = 0;
+ int ret;
+
+ mutex_lock(&spinand->lock);
+
+ old_stats = mtd->ecc_stats;
+
+ if (spinand_use_cont_read(mtd, from, ops)) {
+ ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
+ if (ret == -EAGAIN && !spinand->cont_read_possible) {
+ /*
+ * Spi controller with broken support of continuous
+ * reading was detected (see spinand_read_from_cache_op()),
+ * repeat reading in regular mode.
+ */
+ ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
+ }
+ } else {
+ ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
+ }
+
+ if (ops->stats) {
+ ops->stats->uncorrectable_errors +=
+ mtd->ecc_stats.failed - old_stats.failed;
+ ops->stats->corrected_bitflips +=
+ mtd->ecc_stats.corrected - old_stats.corrected;
+ }
+
+ mutex_unlock(&spinand->lock);
+
return ret ? ret : max_bitflips;
}
@@ -714,9 +979,17 @@ static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
.oobbuf.in = marker,
.mode = MTD_OPS_RAW,
};
+ int ret;
spinand_select_target(spinand, pos->target);
- spinand_read_page(spinand, &req);
+
+ ret = spinand_read_page(spinand, &req);
+ if (ret == -EOPNOTSUPP) {
+ /* Retry with ECC in case raw access is not supported */
+ req.mode = MTD_OPS_PLACE_OOB;
+ spinand_read_page(spinand, &req);
+ }
+
if (marker[0] != 0xff || marker[1] != 0xff)
return true;
@@ -755,11 +1028,14 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- ret = spinand_write_enable_op(spinand);
- if (ret)
- return ret;
+ ret = spinand_write_page(spinand, &req);
+ if (ret == -EOPNOTSUPP) {
+ /* Retry with ECC in case raw access is not supported */
+ req.mode = MTD_OPS_PLACE_OOB;
+ ret = spinand_write_page(spinand, &req);
+ }
- return spinand_write_page(spinand, &req);
+ return ret;
}
static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
@@ -834,19 +1110,50 @@ static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
return ret;
}
+static struct spi_mem_dirmap_desc *spinand_create_rdesc(
+ struct spinand_device *spinand,
+ struct spi_mem_dirmap_info *info)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct spi_mem_dirmap_desc *desc = NULL;
+
+ if (spinand->cont_read_possible) {
+ /*
+ * spi controller may return an error if info->length is
+ * too large
+ */
+ info->length = nanddev_eraseblock_size(nand);
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, info);
+ }
+
+ if (IS_ERR_OR_NULL(desc)) {
+ /*
+ * continuous reading is not supported by flash or
+ * its spi controller, use regular reading
+ */
+ spinand->cont_read_possible = false;
+
+ info->length = nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand);
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, info);
+ }
+
+ return desc;
+}
+
static int spinand_create_dirmap(struct spinand_device *spinand,
unsigned int plane)
{
struct nand_device *nand = spinand_to_nand(spinand);
- struct spi_mem_dirmap_info info = {
- .length = nanddev_page_size(nand) +
- nanddev_per_page_oobsize(nand),
- };
+ struct spi_mem_dirmap_info info = { 0 };
struct spi_mem_dirmap_desc *desc;
/* The plane number is passed in MSB just above the column address */
info.offset = plane << fls(nand->memorg.pagesize);
+ info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
info.op_tmpl = *spinand->op_templates.update_cache;
desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
spinand->spimem, &info);
@@ -856,12 +1163,36 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
spinand->dirmaps[plane].wdesc = desc;
info.op_tmpl = *spinand->op_templates.read_cache;
+ desc = spinand_create_rdesc(spinand, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].rdesc = desc;
+
+ if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) {
+ spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc;
+ spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc;
+
+ return 0;
+ }
+
+ info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+ info.op_tmpl = *spinand->op_templates.update_cache;
+ info.op_tmpl.data.ecc = true;
desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
spinand->spimem, &info);
if (IS_ERR(desc))
return PTR_ERR(desc);
- spinand->dirmaps[plane].rdesc = desc;
+ spinand->dirmaps[plane].wdesc_ecc = desc;
+
+ info.op_tmpl = *spinand->op_templates.read_cache;
+ info.op_tmpl.data.ecc = true;
+ desc = spinand_create_rdesc(spinand, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].rdesc_ecc = desc;
return 0;
}
@@ -894,12 +1225,20 @@ static const struct nand_ops spinand_ops = {
};
static const struct spinand_manufacturer *spinand_manufacturers[] = {
+ &alliancememory_spinand_manufacturer,
+ &ato_spinand_manufacturer,
+ &esmt_8c_spinand_manufacturer,
+ &esmt_c8_spinand_manufacturer,
+ &fmsh_spinand_manufacturer,
+ &foresee_spinand_manufacturer,
&gigadevice_spinand_manufacturer,
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
&paragon_spinand_manufacturer,
+ &skyhigh_spinand_manufacturer,
&toshiba_spinand_manufacturer,
&winbond_spinand_manufacturer,
+ &xtx_spinand_manufacturer,
};
static int spinand_manufacturer_match(struct spinand_device *spinand,
@@ -926,7 +1265,7 @@ static int spinand_manufacturer_match(struct spinand_device *spinand,
spinand->manufacturer = manufacturer;
return 0;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int spinand_id_detect(struct spinand_device *spinand)
@@ -960,8 +1299,19 @@ static int spinand_id_detect(struct spinand_device *spinand)
static int spinand_manufacturer_init(struct spinand_device *spinand)
{
- if (spinand->manufacturer->ops->init)
- return spinand->manufacturer->ops->init(spinand);
+ int ret;
+
+ if (spinand->manufacturer->ops->init) {
+ ret = spinand->manufacturer->ops->init(spinand);
+ if (ret)
+ return ret;
+ }
+
+ if (spinand->configure_chip) {
+ ret = spinand->configure_chip(spinand);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -978,10 +1328,13 @@ spinand_select_op_variant(struct spinand_device *spinand,
const struct spinand_op_variants *variants)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ const struct spi_mem_op *best_variant = NULL;
+ u64 best_op_duration_ns = ULLONG_MAX;
unsigned int i;
for (i = 0; i < variants->nops; i++) {
struct spi_mem_op op = variants->ops[i];
+ u64 op_duration_ns = 0;
unsigned int nbytes;
int ret;
@@ -994,17 +1347,23 @@ spinand_select_op_variant(struct spinand_device *spinand,
if (ret)
break;
+ spi_mem_adjust_op_freq(spinand->spimem, &op);
+
if (!spi_mem_supports_op(spinand->spimem, &op))
break;
nbytes -= op.data.nbytes;
+
+ op_duration_ns += spi_mem_calc_op_duration(spinand->spimem, &op);
}
- if (!nbytes)
- return &variants->ops[i];
+ if (!nbytes && op_duration_ns < best_op_duration_ns) {
+ best_op_duration_ns = op_duration_ns;
+ best_variant = &variants->ops[i];
+ }
}
- return NULL;
+ return best_variant;
}
/**
@@ -1047,6 +1406,12 @@ int spinand_match_and_init(struct spinand_device *spinand,
spinand->flags = table[i].flags;
spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
+ spinand->configure_chip = table[i].configure_chip;
+ spinand->set_cont_read = table[i].set_cont_read;
+ spinand->fact_otp = &table[i].fact_otp;
+ spinand->user_otp = &table[i].user_otp;
+ spinand->read_retries = table[i].read_retries;
+ spinand->set_read_retry = table[i].set_read_retry;
op = spinand_select_op_variant(spinand,
info->op_variants.read_cache);
@@ -1188,9 +1553,8 @@ static int spinand_init(struct spinand_device *spinand)
* may use this buffer for DMA access.
* Memory allocated by devm_ does not guarantee DMA-safe alignment.
*/
- spinand->databuf = kzalloc(nanddev_page_size(nand) +
- nanddev_per_page_oobsize(nand),
- GFP_KERNEL);
+ spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
+ GFP_KERNEL);
if (!spinand->databuf) {
ret = -ENOMEM;
goto err_free_bufs;
@@ -1206,14 +1570,6 @@ static int spinand_init(struct spinand_device *spinand)
if (ret)
goto err_free_bufs;
- ret = spinand_create_dirmaps(spinand);
- if (ret) {
- dev_err(dev,
- "Failed to create direct mappings for read/write operations (err = %d)\n",
- ret);
- goto err_manuf_cleanup;
- }
-
ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
if (ret)
goto err_manuf_cleanup;
@@ -1227,6 +1583,12 @@ static int spinand_init(struct spinand_device *spinand)
if (ret)
goto err_cleanup_nanddev;
+ /*
+ * Continuous read can only be enabled with an on-die ECC engine, so the
+ * ECC initialization must have happened previously.
+ */
+ spinand_cont_read_init(spinand);
+
mtd->_read_oob = spinand_mtd_read;
mtd->_write_oob = spinand_mtd_write;
mtd->_block_isbad = spinand_mtd_block_isbad;
@@ -1236,6 +1598,12 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
mtd->_resume = spinand_mtd_resume;
+ if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) {
+ ret = spinand_set_mtd_otp_ops(spinand);
+ if (ret)
+ goto err_cleanup_ecc_engine;
+ }
+
if (nand->ecc.engine) {
ret = mtd_ooblayout_count_freebytes(mtd);
if (ret < 0)
@@ -1247,6 +1615,15 @@ static int spinand_init(struct spinand_device *spinand)
/* Propagate ECC information to mtd_info */
mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
+ mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
+
+ ret = spinand_create_dirmaps(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to create direct mappings for read/write operations (err = %d)\n",
+ ret);
+ goto err_cleanup_ecc_engine;
+ }
return 0;
@@ -1269,6 +1646,7 @@ static void spinand_cleanup(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ nanddev_ecc_engine_cleanup(nand);
nanddev_cleanup(nand);
spinand_manufacturer_cleanup(spinand);
kfree(spinand->databuf);