summaryrefslogtreecommitdiff
path: root/drivers/mtd/nand/raw/nand_base.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/nand/raw/nand_base.c')
-rw-r--r--drivers/mtd/nand/raw/nand_base.c97
1 files changed, 82 insertions, 15 deletions
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 9e24bedffd89..3b3ce2926f5d 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -366,6 +366,10 @@ static int nand_check_wp(struct nand_chip *chip)
if (chip->options & NAND_BROKEN_XD)
return 0;
+ /* controller responsible for NAND write protect */
+ if (chip->controller->controller_wp)
+ return 0;
+
/* Check the WP bit */
ret = nand_status_op(chip, &status);
if (ret)
@@ -1207,6 +1211,23 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
return nand_exec_op(chip, &op);
}
+static void rawnand_cap_cont_reads(struct nand_chip *chip)
+{
+ struct nand_memory_organization *memorg;
+ unsigned int pages_per_lun, first_lun, last_lun;
+
+ memorg = nanddev_get_memorg(&chip->base);
+ pages_per_lun = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun;
+ first_lun = chip->cont_read.first_page / pages_per_lun;
+ last_lun = chip->cont_read.last_page / pages_per_lun;
+
+ /* Prevent sequential cache reads across LUN boundaries */
+ if (first_lun != last_lun)
+ chip->cont_read.pause_page = first_lun * pages_per_lun + pages_per_lun - 1;
+ else
+ chip->cont_read.pause_page = chip->cont_read.last_page;
+}
+
static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf,
unsigned int len, bool check_only)
@@ -1225,7 +1246,7 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_op_instr cont_instrs[] = {
- NAND_OP_CMD(page == chip->cont_read.last_page ?
+ NAND_OP_CMD(page == chip->cont_read.pause_page ?
NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ,
NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
@@ -1262,16 +1283,29 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p
}
if (page == chip->cont_read.first_page)
- return nand_exec_op(chip, &start_op);
+ ret = nand_exec_op(chip, &start_op);
else
- return nand_exec_op(chip, &cont_op);
+ ret = nand_exec_op(chip, &cont_op);
+ if (ret)
+ return ret;
+
+ if (!chip->cont_read.ongoing)
+ return 0;
+
+ if (page == chip->cont_read.pause_page &&
+ page != chip->cont_read.last_page) {
+ chip->cont_read.first_page = chip->cont_read.pause_page + 1;
+ rawnand_cap_cont_reads(chip);
+ } else if (page == chip->cont_read.last_page) {
+ chip->cont_read.ongoing = false;
+ }
+
+ return 0;
}
static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page)
{
- return chip->cont_read.ongoing &&
- page >= chip->cont_read.first_page &&
- page <= chip->cont_read.last_page;
+ return chip->cont_read.ongoing && page >= chip->cont_read.first_page;
}
/**
@@ -1493,7 +1527,8 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0),
};
- struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs,
+ instrs);
int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
if (naddrs < 0)
@@ -1916,7 +1951,8 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
0),
};
- struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs,
+ instrs);
if (chip->options & NAND_ROW_ADDR_3)
instrs[1].ctx.addr.naddrs++;
@@ -3430,21 +3466,42 @@ static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page,
u32 readlen, int col)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int end_page, end_col;
+
+ chip->cont_read.ongoing = false;
if (!chip->controller->supported_op.cont_read)
return;
- if ((col && col + readlen < (3 * mtd->writesize)) ||
- (!col && readlen < (2 * mtd->writesize))) {
- chip->cont_read.ongoing = false;
+ end_page = DIV_ROUND_UP(col + readlen, mtd->writesize);
+ end_col = (col + readlen) % mtd->writesize;
+
+ if (col)
+ page++;
+
+ if (end_col && end_page)
+ end_page--;
+
+ if (page + 1 > end_page)
return;
- }
- chip->cont_read.ongoing = true;
chip->cont_read.first_page = page;
- if (col)
+ chip->cont_read.last_page = end_page;
+ chip->cont_read.ongoing = true;
+
+ rawnand_cap_cont_reads(chip);
+}
+
+static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page)
+{
+ if (!chip->cont_read.ongoing || page != chip->cont_read.first_page)
+ return;
+
+ chip->cont_read.first_page++;
+ if (chip->cont_read.first_page == chip->cont_read.pause_page)
chip->cont_read.first_page++;
- chip->cont_read.last_page = page + ((readlen >> chip->page_shift) & chip->pagemask);
+ if (chip->cont_read.first_page >= chip->cont_read.last_page)
+ chip->cont_read.ongoing = false;
}
/**
@@ -3621,6 +3678,8 @@ read_retry:
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips,
chip->pagecache.bitflips);
+
+ rawnand_cont_read_skip_first_page(chip, page);
}
readlen -= bytes;
@@ -5125,6 +5184,14 @@ static void rawnand_late_check_supported_ops(struct nand_chip *chip)
/* The supported_op fields should not be set by individual drivers */
WARN_ON_ONCE(chip->controller->supported_op.cont_read);
+ /*
+ * Too many devices do not support sequential cached reads with on-die
+ * ECC correction enabled, so in this case refuse to perform the
+ * automation.
+ */
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE)
+ return;
+
if (!nand_has_exec_op(chip))
return;