summaryrefslogtreecommitdiff
path: root/drivers/mtd/ubi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/ubi')
-rw-r--r--drivers/mtd/ubi/Kconfig22
-rw-r--r--drivers/mtd/ubi/Makefile1
-rw-r--r--drivers/mtd/ubi/attach.c16
-rw-r--r--drivers/mtd/ubi/block.c281
-rw-r--r--drivers/mtd/ubi/build.c250
-rw-r--r--drivers/mtd/ubi/cdev.c77
-rw-r--r--drivers/mtd/ubi/debug.c136
-rw-r--r--drivers/mtd/ubi/debug.h304
-rw-r--r--drivers/mtd/ubi/eba.c36
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c226
-rw-r--r--drivers/mtd/ubi/fastmap.c122
-rw-r--r--drivers/mtd/ubi/io.c98
-rw-r--r--drivers/mtd/ubi/kapi.c84
-rw-r--r--drivers/mtd/ubi/misc.c2
-rw-r--r--drivers/mtd/ubi/nvmem.c191
-rw-r--r--drivers/mtd/ubi/ubi-media.h2
-rw-r--r--drivers/mtd/ubi/ubi.h86
-rw-r--r--drivers/mtd/ubi/vmt.c104
-rw-r--r--drivers/mtd/ubi/vtbl.c6
-rw-r--r--drivers/mtd/ubi/wl.c129
-rw-r--r--drivers/mtd/ubi/wl.h9
21 files changed, 1618 insertions, 564 deletions
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 2ed77b7b3fcb..e28a3af83c0e 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -104,4 +104,26 @@ config MTD_UBI_BLOCK
If in doubt, say "N".
+config MTD_UBI_FAULT_INJECTION
+ bool "Fault injection capability of UBI device"
+ default n
+ depends on FAULT_INJECTION_DEBUG_FS
+ help
+ This option enables fault-injection support for UBI devices for
+ testing purposes.
+
+ If in doubt, say "N".
+
+config MTD_UBI_NVMEM
+ tristate "UBI virtual NVMEM"
+ default n
+ depends on NVMEM
+ help
+ This option enabled an additional driver exposing UBI volumes as NVMEM
+ providers, intended for platforms where UBI is part of the firmware
+ specification and used to store also e.g. MAC addresses or board-
+ specific Wi-Fi calibration data.
+
+ If in doubt, say "N".
+
endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index 543673605ca7..4b51aaf00d1a 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -7,3 +7,4 @@ ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
+obj-$(CONFIG_MTD_UBI_NVMEM) += nvmem.o
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index ae5abe492b52..884171871d0e 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -1447,7 +1447,7 @@ out_ech:
return err;
}
-static struct ubi_attach_info *alloc_ai(void)
+static struct ubi_attach_info *alloc_ai(const char *slab_name)
{
struct ubi_attach_info *ai;
@@ -1461,7 +1461,7 @@ static struct ubi_attach_info *alloc_ai(void)
INIT_LIST_HEAD(&ai->alien);
INIT_LIST_HEAD(&ai->fastmap);
ai->volumes = RB_ROOT;
- ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
+ ai->aeb_slab_cache = kmem_cache_create(slab_name,
sizeof(struct ubi_ainf_peb),
0, 0, NULL);
if (!ai->aeb_slab_cache) {
@@ -1491,7 +1491,7 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
err = -ENOMEM;
- scan_ai = alloc_ai();
+ scan_ai = alloc_ai("ubi_aeb_slab_cache_fastmap");
if (!scan_ai)
goto out;
@@ -1557,7 +1557,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
int err;
struct ubi_attach_info *ai;
- ai = alloc_ai();
+ ai = alloc_ai("ubi_aeb_slab_cache");
if (!ai)
return -ENOMEM;
@@ -1575,7 +1575,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
if (err > 0 || mtd_is_eccerr(err)) {
if (err != UBI_NO_FASTMAP) {
destroy_ai(ai);
- ai = alloc_ai();
+ ai = alloc_ai("ubi_aeb_slab_cache");
if (!ai)
return -ENOMEM;
@@ -1600,7 +1600,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
err = ubi_read_volume_table(ubi, ai);
if (err)
- goto out_ai;
+ goto out_fm;
err = ubi_wl_init(ubi, ai);
if (err)
@@ -1614,7 +1614,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
struct ubi_attach_info *scan_ai;
- scan_ai = alloc_ai();
+ scan_ai = alloc_ai("ubi_aeb_slab_cache_dbg_chk_fastmap");
if (!scan_ai) {
err = -ENOMEM;
goto out_wl;
@@ -1642,6 +1642,8 @@ out_wl:
out_vtbl:
ubi_free_all_volumes(ubi);
vfree(ubi->vtbl);
+out_fm:
+ ubi_free_fastmap(ubi);
out_ai:
destroy_ai(ai);
return err;
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index e003b4b44ffa..b53fd147fa65 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -35,7 +35,6 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/mtd/ubi.h>
-#include <linux/workqueue.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/hdreg.h>
@@ -62,15 +61,14 @@ struct ubiblock_param {
};
struct ubiblock_pdu {
- struct work_struct work;
struct ubi_sgl usgl;
};
/* Numbers of elements set in the @ubiblock_param array */
-static int ubiblock_devs __initdata;
+static int ubiblock_devs;
/* MTD devices specification parameters */
-static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
+static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES];
struct ubiblock {
struct ubi_volume_desc *desc;
@@ -82,8 +80,6 @@ struct ubiblock {
struct gendisk *gd;
struct request_queue *rq;
- struct workqueue_struct *wq;
-
struct mutex dev_mutex;
struct list_head list;
struct blk_mq_tag_set tag_set;
@@ -181,20 +177,29 @@ static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
return NULL;
}
-static int ubiblock_read(struct ubiblock_pdu *pdu)
+static blk_status_t ubiblock_read(struct request *req)
{
- int ret, leb, offset, bytes_left, to_read;
- u64 pos;
- struct request *req = blk_mq_rq_from_pdu(pdu);
+ struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
struct ubiblock *dev = req->q->queuedata;
+ u64 pos = blk_rq_pos(req) << 9;
+ int to_read = blk_rq_bytes(req);
+ int bytes_left = to_read;
+ /* Get LEB:offset address to read from */
+ int offset = do_div(pos, dev->leb_size);
+ int leb = pos;
+ struct req_iterator iter;
+ struct bio_vec bvec;
+ int ret;
- to_read = blk_rq_bytes(req);
- pos = blk_rq_pos(req) << 9;
+ blk_mq_start_request(req);
- /* Get LEB:offset address to read from */
- offset = do_div(pos, dev->leb_size);
- leb = pos;
- bytes_left = to_read;
+ /*
+ * It is safe to ignore the return value of blk_rq_map_sg() because
+ * the number of sg entries is limited to UBI_MAX_SG_COUNT
+ * and ubi_read_sg() will check that limit.
+ */
+ ubi_sgl_init(&pdu->usgl);
+ blk_rq_map_sg(req, pdu->usgl.sg);
while (bytes_left) {
/*
@@ -206,19 +211,25 @@ static int ubiblock_read(struct ubiblock_pdu *pdu)
ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
if (ret < 0)
- return ret;
+ break;
bytes_left -= to_read;
to_read = bytes_left;
leb += 1;
offset = 0;
}
- return 0;
+
+ rq_for_each_segment(bvec, req, iter)
+ flush_dcache_page(bvec.bv_page);
+
+ blk_mq_end_request(req, errno_to_blk_status(ret));
+
+ return BLK_STS_OK;
}
-static int ubiblock_open(struct block_device *bdev, fmode_t mode)
+static int ubiblock_open(struct gendisk *disk, blk_mode_t mode)
{
- struct ubiblock *dev = bdev->bd_disk->private_data;
+ struct ubiblock *dev = disk->private_data;
int ret;
mutex_lock(&dev->dev_mutex);
@@ -235,11 +246,10 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
* It's just a paranoid check, as write requests will get rejected
* in any case.
*/
- if (mode & FMODE_WRITE) {
+ if (mode & BLK_OPEN_WRITE) {
ret = -EROFS;
goto out_unlock;
}
-
dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
if (IS_ERR(dev->desc)) {
dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
@@ -259,7 +269,7 @@ out_unlock:
return ret;
}
-static void ubiblock_release(struct gendisk *gd, fmode_t mode)
+static void ubiblock_release(struct gendisk *gd)
{
struct ubiblock *dev = gd->private_data;
@@ -272,12 +282,12 @@ static void ubiblock_release(struct gendisk *gd, fmode_t mode)
mutex_unlock(&dev->dev_mutex);
}
-static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int ubiblock_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
/* Some tools might require this information */
geo->heads = 1;
geo->cylinders = 1;
- geo->sectors = get_capacity(bdev->bd_disk);
+ geo->sectors = get_capacity(disk);
geo->start = 0;
return 0;
}
@@ -289,43 +299,15 @@ static const struct block_device_operations ubiblock_ops = {
.getgeo = ubiblock_getgeo,
};
-static void ubiblock_do_work(struct work_struct *work)
-{
- int ret;
- struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
- struct request *req = blk_mq_rq_from_pdu(pdu);
-
- blk_mq_start_request(req);
-
- /*
- * It is safe to ignore the return value of blk_rq_map_sg() because
- * the number of sg entries is limited to UBI_MAX_SG_COUNT
- * and ubi_read_sg() will check that limit.
- */
- blk_rq_map_sg(req->q, req, pdu->usgl.sg);
-
- ret = ubiblock_read(pdu);
- rq_flush_dcache_pages(req);
-
- blk_mq_end_request(req, errno_to_blk_status(ret));
-}
-
static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
- struct request *req = bd->rq;
- struct ubiblock *dev = hctx->queue->queuedata;
- struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
-
- switch (req_op(req)) {
+ switch (req_op(bd->rq)) {
case REQ_OP_READ:
- ubi_sgl_init(&pdu->usgl);
- queue_work(dev->wq, &pdu->work);
- return BLK_STS_OK;
+ return ubiblock_read(bd->rq);
default:
return BLK_STS_IOERR;
}
-
}
static int ubiblock_init_request(struct blk_mq_tag_set *set,
@@ -335,8 +317,6 @@ static int ubiblock_init_request(struct blk_mq_tag_set *set,
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
- INIT_WORK(&pdu->work, ubiblock_do_work);
-
return 0;
}
@@ -350,9 +330,12 @@ static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
u64 size = vi->used_bytes >> 9;
if (vi->used_bytes % 512) {
- pr_warn("UBI: block: volume size is not a multiple of 512, "
- "last %llu bytes are ignored!\n",
- vi->used_bytes - (size << 9));
+ if (vi->vol_type == UBI_DYNAMIC_VOLUME)
+ pr_warn("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
+ vi->used_bytes - (size << 9));
+ else
+ pr_info("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
+ vi->used_bytes - (size << 9));
}
if ((sector_t)size != size)
@@ -365,6 +348,9 @@ static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
int ubiblock_create(struct ubi_volume_info *vi)
{
+ struct queue_limits lim = {
+ .max_segments = UBI_MAX_SG_COUNT,
+ };
struct ubiblock *dev;
struct gendisk *gd;
u64 disk_capacity;
@@ -397,20 +383,21 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->tag_set.ops = &ubiblock_mq_ops;
dev->tag_set.queue_depth = 64;
dev->tag_set.numa_node = NUMA_NO_NODE;
- dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ dev->tag_set.flags = BLK_MQ_F_BLOCKING;
dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
dev->tag_set.driver_data = dev;
dev->tag_set.nr_hw_queues = 1;
ret = blk_mq_alloc_tag_set(&dev->tag_set);
if (ret) {
- dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
- goto out_free_dev;;
+ pr_err("ubiblock%d_%d: blk_mq_alloc_tag_set failed\n",
+ dev->ubi_num, dev->vol_id);
+ goto out_free_dev;
}
/* Initialize the gendisk of this ubiblock device */
- gd = blk_mq_alloc_disk(&dev->tag_set, dev);
+ gd = blk_mq_alloc_disk(&dev->tag_set, &lim, dev);
if (IS_ERR(gd)) {
ret = PTR_ERR(gd);
goto out_free_tags;
@@ -421,42 +408,36 @@ int ubiblock_create(struct ubi_volume_info *vi)
gd->minors = 1;
gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
if (gd->first_minor < 0) {
- dev_err(disk_to_dev(gd),
- "block: dynamic minor allocation failed");
+ pr_err("ubiblock%d_%d: block: dynamic minor allocation failed\n",
+ dev->ubi_num, dev->vol_id);
ret = -ENODEV;
goto out_cleanup_disk;
}
+ gd->flags |= GENHD_FL_NO_PART;
gd->private_data = dev;
sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
set_capacity(gd, disk_capacity);
dev->gd = gd;
dev->rq = gd->queue;
- blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
-
- /*
- * Create one workqueue per volume (per registered block device).
- * Rembember workqueues are cheap, they're not threads.
- */
- dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
- if (!dev->wq) {
- ret = -ENOMEM;
- goto out_remove_minor;
- }
list_add_tail(&dev->list, &ubiblock_devices);
/* Must be the last step: anyone can call file ops from now on */
- add_disk(dev->gd);
+ ret = device_add_disk(vi->dev, dev->gd, NULL);
+ if (ret)
+ goto out_remove_minor;
+
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
dev->ubi_num, dev->vol_id, vi->name);
mutex_unlock(&devices_mutex);
return 0;
out_remove_minor:
+ list_del(&dev->list);
idr_remove(&ubiblock_minor_idr, gd->first_minor);
out_cleanup_disk:
- blk_cleanup_disk(dev->gd);
+ put_disk(gd);
out_free_tags:
blk_mq_free_tag_set(&dev->tag_set);
out_free_dev:
@@ -469,15 +450,15 @@ out_unlock:
static void ubiblock_cleanup(struct ubiblock *dev)
{
+ int id = dev->gd->first_minor;
+
/* Stop new requests to arrive */
del_gendisk(dev->gd);
- /* Flush pending work */
- destroy_workqueue(dev->wq);
/* Finally destroy the blk queue */
dev_info(disk_to_dev(dev->gd), "released");
- blk_cleanup_disk(dev->gd);
+ put_disk(dev->gd);
blk_mq_free_tag_set(&dev->tag_set);
- idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
+ idr_remove(&ubiblock_minor_idr, id);
}
int ubiblock_remove(struct ubi_volume_info *vi)
@@ -556,6 +537,70 @@ static int ubiblock_resize(struct ubi_volume_info *vi)
return 0;
}
+static bool
+match_volume_desc(struct ubi_volume_info *vi, const char *name, int ubi_num, int vol_id)
+{
+ int err, len, cur_ubi_num, cur_vol_id;
+
+ if (ubi_num == -1) {
+ /* No ubi num, name must be a vol device path */
+ err = ubi_get_num_by_path(name, &cur_ubi_num, &cur_vol_id);
+ if (err || vi->ubi_num != cur_ubi_num || vi->vol_id != cur_vol_id)
+ return false;
+
+ return true;
+ }
+
+ if (vol_id == -1) {
+ /* Got ubi_num, but no vol_id, name must be volume name */
+ if (vi->ubi_num != ubi_num)
+ return false;
+
+ len = strnlen(name, UBI_VOL_NAME_MAX + 1);
+ if (len < 1 || vi->name_len != len)
+ return false;
+
+ if (strcmp(name, vi->name))
+ return false;
+
+ return true;
+ }
+
+ if (vi->ubi_num != ubi_num)
+ return false;
+
+ if (vi->vol_id != vol_id)
+ return false;
+
+ return true;
+}
+
+static void
+ubiblock_create_from_param(struct ubi_volume_info *vi)
+{
+ int i, ret = 0;
+ struct ubiblock_param *p;
+
+ /*
+ * Iterate over ubiblock cmdline parameters. If a parameter matches the
+ * newly added volume create the ubiblock device for it.
+ */
+ for (i = 0; i < ubiblock_devs; i++) {
+ p = &ubiblock_param[i];
+
+ if (!match_volume_desc(vi, p->name, p->ubi_num, p->vol_id))
+ continue;
+
+ ret = ubiblock_create(vi);
+ if (ret) {
+ pr_err(
+ "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
+ vi->name, p->ubi_num, p->vol_id, ret);
+ }
+ break;
+ }
+}
+
static int ubiblock_notify(struct notifier_block *nb,
unsigned long notification_type, void *ns_ptr)
{
@@ -563,10 +608,7 @@ static int ubiblock_notify(struct notifier_block *nb,
switch (notification_type) {
case UBI_VOLUME_ADDED:
- /*
- * We want to enforce explicit block device creation for
- * volumes, so when a volume is added we do nothing.
- */
+ ubiblock_create_from_param(&nt->vi);
break;
case UBI_VOLUME_REMOVED:
ubiblock_remove(&nt->vi);
@@ -592,56 +634,6 @@ static struct notifier_block ubiblock_notifier = {
.notifier_call = ubiblock_notify,
};
-static struct ubi_volume_desc * __init
-open_volume_desc(const char *name, int ubi_num, int vol_id)
-{
- if (ubi_num == -1)
- /* No ubi num, name must be a vol device path */
- return ubi_open_volume_path(name, UBI_READONLY);
- else if (vol_id == -1)
- /* No vol_id, must be vol_name */
- return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
- else
- return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
-}
-
-static void __init ubiblock_create_from_param(void)
-{
- int i, ret = 0;
- struct ubiblock_param *p;
- struct ubi_volume_desc *desc;
- struct ubi_volume_info vi;
-
- /*
- * If there is an error creating one of the ubiblocks, continue on to
- * create the following ubiblocks. This helps in a circumstance where
- * the kernel command-line specifies multiple block devices and some
- * may be broken, but we still want the working ones to come up.
- */
- for (i = 0; i < ubiblock_devs; i++) {
- p = &ubiblock_param[i];
-
- desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
- if (IS_ERR(desc)) {
- pr_err(
- "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
- p->ubi_num, p->vol_id, PTR_ERR(desc));
- continue;
- }
-
- ubi_get_volume_info(desc, &vi);
- ubi_close_volume(desc);
-
- ret = ubiblock_create(&vi);
- if (ret) {
- pr_err(
- "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
- vi.name, p->ubi_num, p->vol_id, ret);
- continue;
- }
- }
-}
-
static void ubiblock_remove_all(void)
{
struct ubiblock *next;
@@ -667,18 +659,7 @@ int __init ubiblock_init(void)
if (ubiblock_major < 0)
return ubiblock_major;
- /*
- * Attach block devices from 'block=' module param.
- * Even if one block device in the param list fails to come up,
- * still allow the module to load and leave any others up.
- */
- ubiblock_create_from_param();
-
- /*
- * Block devices are only created upon user requests, so we ignore
- * existing volumes.
- */
- ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
+ ret = ubi_register_volume_notifier(&ubiblock_notifier, 0);
if (ret)
goto err_unreg;
return 0;
@@ -689,7 +670,7 @@ err_unreg:
return ret;
}
-void __exit ubiblock_exit(void)
+void ubiblock_exit(void)
{
ubi_unregister_volume_notifier(&ubiblock_notifier);
ubiblock_remove_all();
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index a7e3eb9befb6..ef6a22f372f9 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -27,6 +27,7 @@
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <linux/major.h>
#include "ubi.h"
@@ -35,7 +36,7 @@
#define MTD_PARAM_LEN_MAX 64
/* Maximum number of comma-separated items in the 'mtd=' parameter */
-#define MTD_PARAM_MAX_COUNT 4
+#define MTD_PARAM_MAX_COUNT 6
/* Maximum value for the number of bad PEBs per 1024 PEBs */
#define MAX_MTD_UBI_BEB_LIMIT 768
@@ -53,12 +54,16 @@
* @ubi_num: UBI number
* @vid_hdr_offs: VID header offset
* @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
+ * @enable_fm: enable fastmap when value is non-zero
+ * @need_resv_pool: reserve pool->max_size pebs when value is none-zero
*/
struct mtd_dev_param {
char name[MTD_PARAM_LEN_MAX];
int ubi_num;
int vid_hdr_offs;
int max_beb_per1024;
+ int enable_fm;
+ int need_resv_pool;
};
/* Numbers of elements set in the @mtd_dev_param array */
@@ -88,12 +93,12 @@ static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
/* Serializes UBI devices creations and removals */
DEFINE_MUTEX(ubi_devices_mutex);
-/* Protects @ubi_devices and @ubi->ref_count */
+/* Protects @ubi_devices, @ubi->ref_count and @ubi->is_dead */
static DEFINE_SPINLOCK(ubi_devices_lock);
/* "Show" method for files in '/<sysfs>/class/ubi/' */
/* UBI version attribute ('/<sysfs>/class/ubi/version') */
-static ssize_t version_show(struct class *class, struct class_attribute *attr,
+static ssize_t version_show(const struct class *class, const struct class_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", UBI_VERSION);
@@ -107,9 +112,8 @@ static struct attribute *ubi_class_attrs[] = {
ATTRIBUTE_GROUPS(ubi_class);
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
-struct class ubi_class = {
+const struct class ubi_class = {
.name = UBI_NAME_STR,
- .owner = THIS_MODULE,
.class_groups = ubi_class_groups,
};
@@ -257,6 +261,9 @@ struct ubi_device *ubi_get_device(int ubi_num)
spin_lock(&ubi_devices_lock);
ubi = ubi_devices[ubi_num];
+ if (ubi && ubi->is_dead)
+ ubi = NULL;
+
if (ubi) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
@@ -294,7 +301,7 @@ struct ubi_device *ubi_get_by_major(int major)
spin_lock(&ubi_devices_lock);
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
get_device(&ubi->dev);
@@ -323,7 +330,7 @@ int ubi_major2num(int major)
for (i = 0; i < UBI_MAX_DEVICES; i++) {
struct ubi_device *ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
ubi_num = ubi->ubi_num;
break;
}
@@ -351,9 +358,6 @@ static ssize_t dev_attribute_show(struct device *dev,
* we still can use 'ubi->ubi_num'.
*/
ubi = container_of(dev, struct ubi_device, dev);
- ubi = ubi_get_device(ubi->ubi_num);
- if (!ubi)
- return -ENODEV;
if (attr == &dev_eraseblock_size)
ret = sprintf(buf, "%d\n", ubi->leb_size);
@@ -382,7 +386,6 @@ static ssize_t dev_attribute_show(struct device *dev,
else
ret = -EINVAL;
- ubi_put_device(ubi);
return ret;
}
@@ -472,6 +475,7 @@ static int uif_init(struct ubi_device *ubi)
err = ubi_add_volume(ubi, ubi->volumes[i]);
if (err) {
ubi_err(ubi, "cannot add volume %d", i);
+ ubi->volumes[i] = NULL;
goto out_volumes;
}
}
@@ -513,7 +517,7 @@ static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
int i;
for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
- if (!ubi->volumes[i])
+ if (!ubi->volumes[i] || ubi->volumes[i]->is_dead)
continue;
ubi_eba_replace_table(ubi->volumes[i], NULL);
ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
@@ -684,6 +688,21 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->vid_hdr_aloffset;
}
+ /*
+ * Memory allocation for VID header is ubi->vid_hdr_alsize
+ * which is described in comments in io.c.
+ * Make sure VID header shift + UBI_VID_HDR_SIZE not exceeds
+ * ubi->vid_hdr_alsize, so that all vid header operations
+ * won't access memory out of bounds.
+ */
+ if ((ubi->vid_hdr_shift + UBI_VID_HDR_SIZE) > ubi->vid_hdr_alsize) {
+ ubi_err(ubi, "Invalid VID header offset %d, VID header shift(%d)"
+ " + VID header size(%zu) > VID header aligned size(%d).",
+ ubi->vid_hdr_offset, ubi->vid_hdr_shift,
+ UBI_VID_HDR_SIZE, ubi->vid_hdr_alsize);
+ return -EINVAL;
+ }
+
/* Similar for the data offset */
ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
@@ -811,6 +830,8 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* @ubi_num: number to assign to the new UBI device
* @vid_hdr_offset: VID header offset
* @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
+ * @disable_fm: whether disable fastmap
+ * @need_resv_pool: whether reserve pebs to fill fm_pool
*
* This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
* to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -818,11 +839,16 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* automatically. Returns the new UBI device number in case of success and a
* negative error code in case of failure.
*
+ * If @disable_fm is true, ubi doesn't create new fastmap even the module param
+ * 'fm_autoconvert' is set, and existed old fastmap will be destroyed after
+ * doing full scanning.
+ *
* Note, the invocations of this function has to be serialized by the
* @ubi_devices_mutex.
*/
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
- int vid_hdr_offset, int max_beb_per1024)
+ int vid_hdr_offset, int max_beb_per1024, bool disable_fm,
+ bool need_resv_pool)
{
struct ubi_device *ubi;
int i, err;
@@ -876,6 +902,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
return -EINVAL;
}
+ /* UBI cannot work on flashes with zero erasesize. */
+ if (!mtd->erasesize) {
+ pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n",
+ mtd->index);
+ return -EINVAL;
+ }
+
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
@@ -905,6 +938,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
ubi->dev.release = dev_release;
ubi->dev.class = &ubi_class;
ubi->dev.groups = ubi_dev_groups;
+ ubi->dev.parent = &mtd->dev;
ubi->mtd = mtd;
ubi->ubi_num = ubi_num;
@@ -925,7 +959,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
UBI_FM_MIN_POOL_SIZE);
ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
- ubi->fm_disabled = !fm_autoconvert;
+ ubi->fm_pool_rsv_cnt = need_resv_pool ? ubi->fm_pool.max_size : 0;
+ ubi->fm_disabled = (!fm_autoconvert || disable_fm) ? 1 : 0;
if (fm_debug)
ubi_enable_dbg_chk_fastmap(ubi);
@@ -966,7 +1001,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (!ubi->fm_buf)
goto out_free;
#endif
- err = ubi_attach(ubi, 0);
+ err = ubi_attach(ubi, disable_fm ? 1 : 0);
if (err) {
ubi_err(ubi, "failed to attach mtd%d, error %d",
mtd->index, err);
@@ -979,9 +1014,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
goto out_detach;
}
- /* Make device "available" before it becomes accessible via sysfs */
- ubi_devices[ubi_num] = ubi;
-
err = uif_init(ubi);
if (err)
goto out_detach;
@@ -1026,6 +1058,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
+ ubi_devices[ubi_num] = ubi;
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
@@ -1034,7 +1067,6 @@ out_debugfs:
out_uif:
uif_close(ubi);
out_detach:
- ubi_devices[ubi_num] = NULL;
ubi_wl_close(ubi);
ubi_free_all_volumes(ubi);
vfree(ubi->vtbl);
@@ -1070,7 +1102,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
return -EINVAL;
spin_lock(&ubi_devices_lock);
- put_device(&ubi->dev);
ubi->ref_count -= 1;
if (ubi->ref_count) {
if (!anyway) {
@@ -1081,6 +1112,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_err(ubi, "%s reference count %d, destroy anyway",
ubi->ubi_name, ubi->ref_count);
}
+ ubi->is_dead = true;
+ spin_unlock(&ubi_devices_lock);
+
+ ubi_notify_all(ubi, UBI_VOLUME_SHUTDOWN, NULL);
+
+ spin_lock(&ubi_devices_lock);
+ put_device(&ubi->dev);
ubi_devices[ubi_num] = NULL;
spin_unlock(&ubi_devices_lock);
@@ -1191,43 +1229,43 @@ static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
return mtd;
}
-static int __init ubi_init(void)
+static void ubi_notify_add(struct mtd_info *mtd)
{
- int err, i, k;
+ struct device_node *np = mtd_get_of_node(mtd);
+ int err;
- /* Ensure that EC and VID headers have correct size */
- BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
- BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+ if (!of_device_is_compatible(np, "linux,ubi"))
+ return;
- if (mtd_devs > UBI_MAX_DEVICES) {
- pr_err("UBI error: too many MTD devices, maximum is %d\n",
- UBI_MAX_DEVICES);
- return -EINVAL;
- }
+ /*
+ * we are already holding &mtd_table_mutex, but still need
+ * to bump refcount
+ */
+ err = __get_mtd_device(mtd);
+ if (err)
+ return;
- /* Create base sysfs directory and sysfs files */
- err = class_register(&ubi_class);
+ /* called while holding mtd_table_mutex */
+ mutex_lock_nested(&ubi_devices_mutex, SINGLE_DEPTH_NESTING);
+ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0, false, false);
+ mutex_unlock(&ubi_devices_mutex);
if (err < 0)
- return err;
-
- err = misc_register(&ubi_ctrl_cdev);
- if (err) {
- pr_err("UBI error: cannot register device\n");
- goto out;
- }
+ __put_mtd_device(mtd);
+}
- ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
- sizeof(struct ubi_wl_entry),
- 0, 0, NULL);
- if (!ubi_wl_entry_slab) {
- err = -ENOMEM;
- goto out_dev_unreg;
- }
+static void ubi_notify_remove(struct mtd_info *mtd)
+{
+ /* do nothing for now */
+}
- err = ubi_debugfs_init();
- if (err)
- goto out_slab;
+static struct mtd_notifier ubi_mtd_notifier = {
+ .add = ubi_notify_add,
+ .remove = ubi_notify_remove,
+};
+static int __init ubi_init_attach(void)
+{
+ int err, i, k;
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
@@ -1249,7 +1287,9 @@ static int __init ubi_init(void)
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, p->ubi_num,
- p->vid_hdr_offs, p->max_beb_per1024);
+ p->vid_hdr_offs, p->max_beb_per1024,
+ p->enable_fm == 0,
+ p->need_resv_pool != 0);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
pr_err("UBI error: cannot attach mtd%d\n",
@@ -1274,24 +1314,81 @@ static int __init ubi_init(void)
}
}
+ return 0;
+
+out_detach:
+ for (k = 0; k < i; k++)
+ if (ubi_devices[k]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ return err;
+}
+#ifndef CONFIG_MTD_UBI_MODULE
+late_initcall(ubi_init_attach);
+#endif
+
+static int __init ubi_init(void)
+{
+ int err;
+
+ /* Ensure that EC and VID headers have correct size */
+ BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
+ BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+
+ if (mtd_devs > UBI_MAX_DEVICES) {
+ pr_err("UBI error: too many MTD devices, maximum is %d\n",
+ UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ /* Create base sysfs directory and sysfs files */
+ err = class_register(&ubi_class);
+ if (err < 0)
+ return err;
+
+ err = misc_register(&ubi_ctrl_cdev);
+ if (err) {
+ pr_err("UBI error: cannot register device\n");
+ goto out;
+ }
+
+ ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+ sizeof(struct ubi_wl_entry),
+ 0, 0, NULL);
+ if (!ubi_wl_entry_slab) {
+ err = -ENOMEM;
+ goto out_dev_unreg;
+ }
+
+ err = ubi_debugfs_init();
+ if (err)
+ goto out_slab;
+
err = ubiblock_init();
if (err) {
pr_err("UBI error: block: cannot initialize, error %d\n", err);
/* See comment above re-ubi_is_module(). */
if (ubi_is_module())
- goto out_detach;
+ goto out_debugfs;
+ }
+
+ register_mtd_user(&ubi_mtd_notifier);
+
+ if (ubi_is_module()) {
+ err = ubi_init_attach();
+ if (err)
+ goto out_mtd_notifier;
}
return 0;
-out_detach:
- for (k = 0; k < i; k++)
- if (ubi_devices[k]) {
- mutex_lock(&ubi_devices_mutex);
- ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
- mutex_unlock(&ubi_devices_mutex);
- }
+out_mtd_notifier:
+ unregister_mtd_user(&ubi_mtd_notifier);
+ ubiblock_exit();
+out_debugfs:
ubi_debugfs_exit();
out_slab:
kmem_cache_destroy(ubi_wl_entry_slab);
@@ -1302,13 +1399,15 @@ out:
pr_err("UBI error: cannot initialize UBI, error %d\n", err);
return err;
}
-late_initcall(ubi_init);
+device_initcall(ubi_init);
+
static void __exit ubi_exit(void)
{
int i;
ubiblock_exit();
+ unregister_mtd_user(&ubi_mtd_notifier);
for (i = 0; i < UBI_MAX_DEVICES; i++)
if (ubi_devices[i]) {
@@ -1428,7 +1527,7 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
int err = kstrtoint(token, 10, &p->max_beb_per1024);
if (err) {
- pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
+ pr_err("UBI error: bad value for max_beb_per1024 parameter: %s\n",
token);
return -EINVAL;
}
@@ -1438,14 +1537,38 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
if (token) {
int err = kstrtoint(token, 10, &p->ubi_num);
- if (err) {
- pr_err("UBI error: bad value for ubi_num parameter: %s",
+ if (err || p->ubi_num < UBI_DEV_NUM_AUTO) {
+ pr_err("UBI error: bad value for ubi_num parameter: %s\n",
token);
return -EINVAL;
}
} else
p->ubi_num = UBI_DEV_NUM_AUTO;
+ token = tokens[4];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->enable_fm);
+
+ if (err) {
+ pr_err("UBI error: bad value for enable_fm parameter: %s\n",
+ token);
+ return -EINVAL;
+ }
+ } else
+ p->enable_fm = 0;
+
+ token = tokens[5];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->need_resv_pool);
+
+ if (err) {
+ pr_err("UBI error: bad value for need_resv_pool parameter: %s\n",
+ token);
+ return -EINVAL;
+ }
+ } else
+ p->need_resv_pool = 0;
+
mtd_devs += 1;
return 0;
}
@@ -1458,11 +1581,14 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
"Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
__stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
"Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
+ "Optional \"enable_fm\" parameter determines whether to enable fastmap during attach. If the value is non-zero, fastmap is enabled. Default value is 0.\n"
+ "Optional \"need_resv_pool\" parameter determines whether to reserve pool->max_size pebs during attach. If the value is non-zero, peb reservation is enabled. Default value is 0.\n"
"\n"
"Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
"Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
"Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
"Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
+ "example 5: mtd=1,0,0,5 mtd=2,0,0,6,1 - attach MTD device /dev/mtd1 to UBI 5 and disable fastmap; attach MTD device /dev/mtd2 to UBI 6 and enable fastmap.(only works when fastmap is enabled and fm_autoconvert=Y).\n"
"\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
#ifdef CONFIG_MTD_UBI_FASTMAP
module_param(fm_autoconvert, bool, 0644);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index cc9a28cf9d82..b700a0efaa93 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -672,7 +672,7 @@ static int verify_rsvol_req(const struct ubi_device *ubi,
* @req: volumes re-name request
*
* This is a helper function for the volume re-name IOCTL which validates the
- * the request, opens the volume and calls corresponding volumes management
+ * request, opens the volume and calls corresponding volumes management
* function. Returns zero in case of success and a negative error code in case
* of failure.
*/
@@ -828,6 +828,70 @@ out_free:
return err;
}
+static int ubi_get_ec_info(struct ubi_device *ubi, struct ubi_ecinfo_req __user *ureq)
+{
+ struct ubi_ecinfo_req req;
+ struct ubi_wl_entry *wl;
+ int read_cnt;
+ int peb;
+ int end_peb;
+
+ /* Copy the input arguments */
+ if (copy_from_user(&req, ureq, sizeof(struct ubi_ecinfo_req)))
+ return -EFAULT;
+
+ /* Check input arguments */
+ if (req.length <= 0 || req.start < 0 || req.start >= ubi->peb_count)
+ return -EINVAL;
+
+ if (check_add_overflow(req.start, req.length, &end_peb))
+ return -EINVAL;
+
+ if (end_peb > ubi->peb_count)
+ end_peb = ubi->peb_count;
+
+ /* Check access rights before filling erase_counters array */
+ if (!access_ok((void __user *)ureq->erase_counters,
+ (end_peb-req.start) * sizeof(int32_t)))
+ return -EFAULT;
+
+ /* Fill erase counter array */
+ read_cnt = 0;
+ for (peb = req.start; peb < end_peb; read_cnt++, peb++) {
+ int ec;
+
+ if (ubi_io_is_bad(ubi, peb)) {
+ if (__put_user(UBI_UNKNOWN, ureq->erase_counters+read_cnt))
+ return -EFAULT;
+
+ continue;
+ }
+
+ spin_lock(&ubi->wl_lock);
+
+ wl = ubi->lookuptbl[peb];
+ if (wl)
+ ec = wl->ec;
+ else
+ ec = UBI_UNKNOWN;
+
+ spin_unlock(&ubi->wl_lock);
+
+ if (__put_user(ec, ureq->erase_counters+read_cnt))
+ return -EFAULT;
+
+ }
+
+ /* Return actual read length */
+ req.read_length = read_cnt;
+
+ /* Copy everything except erase counter array */
+ if (copy_to_user(ureq, &req, sizeof(struct ubi_ecinfo_req)))
+ return -EFAULT;
+
+ return 0;
+}
+
static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -991,6 +1055,12 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
+ case UBI_IOCECNFO:
+ {
+ err = ubi_get_ec_info(ubi, argp);
+ break;
+ }
+
default:
err = -ENOTTY;
break;
@@ -1041,7 +1111,8 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
*/
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
- req.max_beb_per1024);
+ req.max_beb_per1024, !!req.disable_fm,
+ !!req.need_resv_pool);
mutex_unlock(&ubi_devices_mutex);
if (err < 0)
put_mtd_device(mtd);
@@ -1094,7 +1165,6 @@ const struct file_operations ubi_vol_cdev_operations = {
/* UBI character device operations */
const struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = ubi_cdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
@@ -1104,5 +1174,4 @@ const struct file_operations ubi_ctrl_cdev_operations = {
.owner = THIS_MODULE,
.unlocked_ioctl = ctrl_cdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 3c0c8eca4d51..d2a53961d8e2 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -10,7 +10,37 @@
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <linux/fault-inject.h>
+
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+static DECLARE_FAULT_ATTR(fault_eccerr_attr);
+static DECLARE_FAULT_ATTR(fault_bitflips_attr);
+static DECLARE_FAULT_ATTR(fault_read_failure_attr);
+static DECLARE_FAULT_ATTR(fault_write_failure_attr);
+static DECLARE_FAULT_ATTR(fault_erase_failure_attr);
+static DECLARE_FAULT_ATTR(fault_power_cut_attr);
+static DECLARE_FAULT_ATTR(fault_io_ff_attr);
+static DECLARE_FAULT_ATTR(fault_io_ff_bitflips_attr);
+static DECLARE_FAULT_ATTR(fault_bad_hdr_attr);
+static DECLARE_FAULT_ATTR(fault_bad_hdr_ebadmsg_attr);
+
+#define FAIL_ACTION(name, fault_attr) \
+bool should_fail_##name(void) \
+{ \
+ return should_fail(&fault_attr, 1); \
+}
+FAIL_ACTION(eccerr, fault_eccerr_attr)
+FAIL_ACTION(bitflips, fault_bitflips_attr)
+FAIL_ACTION(read_failure, fault_read_failure_attr)
+FAIL_ACTION(write_failure, fault_write_failure_attr)
+FAIL_ACTION(erase_failure, fault_erase_failure_attr)
+FAIL_ACTION(power_cut, fault_power_cut_attr)
+FAIL_ACTION(io_ff, fault_io_ff_attr)
+FAIL_ACTION(io_ff_bitflips, fault_io_ff_bitflips_attr)
+FAIL_ACTION(bad_hdr, fault_bad_hdr_attr)
+FAIL_ACTION(bad_hdr_ebadmsg, fault_bad_hdr_ebadmsg_attr)
+#endif
/**
* ubi_dump_flash - dump a region of flash.
@@ -212,6 +242,52 @@ void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
*/
static struct dentry *dfs_rootdir;
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+static void dfs_create_fault_entry(struct dentry *parent)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("fault_inject", parent);
+ if (IS_ERR_OR_NULL(dir)) {
+ int err = dir ? PTR_ERR(dir) : -ENODEV;
+
+ pr_warn("UBI error: cannot create \"fault_inject\" debugfs directory, error %d\n",
+ err);
+ return;
+ }
+
+ fault_create_debugfs_attr("emulate_eccerr", dir,
+ &fault_eccerr_attr);
+
+ fault_create_debugfs_attr("emulate_read_failure", dir,
+ &fault_read_failure_attr);
+
+ fault_create_debugfs_attr("emulate_bitflips", dir,
+ &fault_bitflips_attr);
+
+ fault_create_debugfs_attr("emulate_write_failure", dir,
+ &fault_write_failure_attr);
+
+ fault_create_debugfs_attr("emulate_erase_failure", dir,
+ &fault_erase_failure_attr);
+
+ fault_create_debugfs_attr("emulate_power_cut", dir,
+ &fault_power_cut_attr);
+
+ fault_create_debugfs_attr("emulate_io_ff", dir,
+ &fault_io_ff_attr);
+
+ fault_create_debugfs_attr("emulate_io_ff_bitflips", dir,
+ &fault_io_ff_bitflips_attr);
+
+ fault_create_debugfs_attr("emulate_bad_hdr", dir,
+ &fault_bad_hdr_attr);
+
+ fault_create_debugfs_attr("emulate_bad_hdr_ebadmsg", dir,
+ &fault_bad_hdr_ebadmsg_attr);
+}
+#endif
+
/**
* ubi_debugfs_init - create UBI debugfs directory.
*
@@ -232,6 +308,10 @@ int ubi_debugfs_init(void)
return err;
}
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+ dfs_create_fault_entry(dfs_rootdir);
+#endif
+
return 0;
}
@@ -252,7 +332,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
struct dentry *dent = file->f_path.dentry;
struct ubi_device *ubi;
struct ubi_debug_info *d;
- char buf[8];
+ char buf[16];
int val;
ubi = ubi_get_device(ubi_num);
@@ -272,7 +352,12 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
val = d->emulate_bitflips;
else if (dent == d->dfs_emulate_io_failures)
val = d->emulate_io_failures;
- else if (dent == d->dfs_emulate_power_cut) {
+ else if (dent == d->dfs_emulate_failures) {
+ snprintf(buf, sizeof(buf), "0x%04x\n", d->emulate_failures);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ } else if (dent == d->dfs_emulate_power_cut) {
snprintf(buf, sizeof(buf), "%u\n", d->emulate_power_cut);
count = simple_read_from_buffer(user_buf, count, ppos,
buf, strlen(buf));
@@ -287,8 +372,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
count = simple_read_from_buffer(user_buf, count, ppos,
buf, strlen(buf));
goto out;
- }
- else {
+ } else {
count = -EINVAL;
goto out;
}
@@ -316,7 +400,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
struct ubi_device *ubi;
struct ubi_debug_info *d;
size_t buf_size;
- char buf[8] = {0};
+ char buf[16] = {0};
int val;
ubi = ubi_get_device(ubi_num);
@@ -330,7 +414,11 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
goto out;
}
- if (dent == d->dfs_power_cut_min) {
+ if (dent == d->dfs_emulate_failures) {
+ if (kstrtouint(buf, 0, &d->emulate_failures) != 0)
+ count = -EINVAL;
+ goto out;
+ } else if (dent == d->dfs_power_cut_min) {
if (kstrtouint(buf, 0, &d->power_cut_min) != 0)
count = -EINVAL;
goto out;
@@ -382,7 +470,6 @@ static const struct file_operations dfs_fops = {
.read = dfs_file_read,
.write = dfs_file_write,
.open = simple_open,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -504,65 +591,72 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
{
unsigned long ubi_num = ubi->ubi_num;
struct ubi_debug_info *d = &ubi->dbg;
+ umode_t mode = S_IRUSR | S_IWUSR;
int n;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
+ n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN, UBI_DFS_DIR_NAME,
ubi->ubi_num);
- if (n > UBI_DFS_DIR_LEN) {
+ if (n >= UBI_DFS_DIR_LEN) {
/* The array size is too small */
return -EINVAL;
}
d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
- d->dfs_chk_gen = debugfs_create_file("chk_gen", S_IWUSR, d->dfs_dir,
+ d->dfs_chk_gen = debugfs_create_file("chk_gen", mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
- d->dfs_chk_io = debugfs_create_file("chk_io", S_IWUSR, d->dfs_dir,
+ d->dfs_chk_io = debugfs_create_file("chk_io", mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
- d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", S_IWUSR,
+ d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", mode,
d->dfs_dir, (void *)ubi_num,
&dfs_fops);
- d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", S_IWUSR,
+ d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", mode,
d->dfs_dir, (void *)ubi_num,
&dfs_fops);
d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips",
- S_IWUSR, d->dfs_dir,
+ mode, d->dfs_dir,
(void *)ubi_num,
&dfs_fops);
d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures",
- S_IWUSR, d->dfs_dir,
+ mode, d->dfs_dir,
(void *)ubi_num,
&dfs_fops);
d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut",
- S_IWUSR, d->dfs_dir,
+ mode, d->dfs_dir,
(void *)ubi_num,
&dfs_fops);
d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min",
- S_IWUSR, d->dfs_dir,
+ mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max",
- S_IWUSR, d->dfs_dir,
+ mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,
(void *)ubi_num, &eraseblk_count_fops);
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+ d->dfs_emulate_failures = debugfs_create_file("emulate_failures",
+ mode, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+#endif
return 0;
}
/**
- * dbg_debug_exit_dev - free all debugfs files corresponding to device @ubi
+ * ubi_debugfs_exit_dev - free all debugfs files corresponding to device @ubi
* @ubi: UBI device description object
*/
void ubi_debugfs_exit_dev(struct ubi_device *ubi)
@@ -590,7 +684,7 @@ int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
- ubi->dbg.power_cut_counter += prandom_u32() % range;
+ ubi->dbg.power_cut_counter += get_random_u32_below(range);
}
return 0;
}
@@ -599,7 +693,5 @@ int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
if (ubi->dbg.power_cut_counter)
return 0;
- ubi_msg(ubi, "XXXXXXXXXXXXXXX emulating a power cut XXXXXXXXXXXXXXXX");
- ubi_ro_mode(ubi);
return 1;
}
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 118248a5d7d4..b2fd97548808 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -53,56 +53,315 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi);
void ubi_debugfs_exit_dev(struct ubi_device *ubi);
/**
- * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * The following function is a legacy implementation of UBI fault-injection
+ * hook. When using more powerful fault injection capabilities, the legacy
+ * fault injection interface should be retained.
+ */
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
+
+static inline int ubi_dbg_bitflip(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_bitflips)
+ return !get_random_u32_below(200);
+ return 0;
+}
+
+static inline int ubi_dbg_write_failure(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_io_failures)
+ return !get_random_u32_below(500);
+ return 0;
+}
+
+static inline int ubi_dbg_erase_failure(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_io_failures)
+ return !get_random_u32_below(400);
+ return 0;
+}
+
+/**
+ * MASK_XXX: Mask for emulate_failures in ubi_debug_info.The mask is used to
+ * precisely control the type and process of fault injection.
+ */
+/* Emulate a power cut when writing EC/VID header */
+#define MASK_POWER_CUT_EC (1 << 0)
+#define MASK_POWER_CUT_VID (1 << 1)
+/* Emulate a power cut when writing data*/
+#define MASK_POWER_CUT_DATA (1 << 2)
+/* Emulate bit-flips */
+#define MASK_BITFLIPS (1 << 3)
+/* Emulate ecc error */
+#define MASK_ECCERR (1 << 4)
+/* Emulates -EIO during data read */
+#define MASK_READ_FAILURE (1 << 5)
+#define MASK_READ_FAILURE_EC (1 << 6)
+#define MASK_READ_FAILURE_VID (1 << 7)
+/* Emulates -EIO during data write */
+#define MASK_WRITE_FAILURE (1 << 8)
+/* Emulates -EIO during erase a PEB*/
+#define MASK_ERASE_FAILURE (1 << 9)
+/* Return UBI_IO_FF when reading EC/VID header */
+#define MASK_IO_FF_EC (1 << 10)
+#define MASK_IO_FF_VID (1 << 11)
+/* Return UBI_IO_FF_BITFLIPS when reading EC/VID header */
+#define MASK_IO_FF_BITFLIPS_EC (1 << 12)
+#define MASK_IO_FF_BITFLIPS_VID (1 << 13)
+/* Return UBI_IO_BAD_HDR when reading EC/VID header */
+#define MASK_BAD_HDR_EC (1 << 14)
+#define MASK_BAD_HDR_VID (1 << 15)
+/* Return UBI_IO_BAD_HDR_EBADMSG when reading EC/VID header */
+#define MASK_BAD_HDR_EBADMSG_EC (1 << 16)
+#define MASK_BAD_HDR_EBADMSG_VID (1 << 17)
+
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+
+extern bool should_fail_eccerr(void);
+extern bool should_fail_bitflips(void);
+extern bool should_fail_read_failure(void);
+extern bool should_fail_write_failure(void);
+extern bool should_fail_erase_failure(void);
+extern bool should_fail_power_cut(void);
+extern bool should_fail_io_ff(void);
+extern bool should_fail_io_ff_bitflips(void);
+extern bool should_fail_bad_hdr(void);
+extern bool should_fail_bad_hdr_ebadmsg(void);
+
+static inline bool ubi_dbg_fail_bitflip(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_failures & MASK_BITFLIPS)
+ return should_fail_bitflips();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_write(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_failures & MASK_WRITE_FAILURE)
+ return should_fail_write_failure();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_erase(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_failures & MASK_ERASE_FAILURE)
+ return should_fail_erase_failure();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_power_cut(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_power_cut();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_read(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_read_failure();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_eccerr(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_failures & MASK_ECCERR)
+ return should_fail_eccerr();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_ff(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_io_ff();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_ff_bitflips(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_io_ff_bitflips();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_bad_hdr(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_bad_hdr();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_bad_hdr_ebadmsg(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_bad_hdr_ebadmsg();
+ return false;
+}
+#else /* CONFIG_MTD_UBI_FAULT_INJECTION */
+
+#define ubi_dbg_fail_bitflip(u) false
+#define ubi_dbg_fail_write(u) false
+#define ubi_dbg_fail_erase(u) false
+#define ubi_dbg_fail_power_cut(u, c) false
+#define ubi_dbg_fail_read(u, c) false
+#define ubi_dbg_fail_eccerr(u) false
+#define ubi_dbg_fail_ff(u, c) false
+#define ubi_dbg_fail_ff_bitflips(u, v) false
+#define ubi_dbg_fail_bad_hdr(u, c) false
+#define ubi_dbg_fail_bad_hdr_ebadmsg(u, c) false
+
+#endif
+
+/**
+ * ubi_dbg_is_power_cut - if it is time to emulate power cut.
* @ubi: UBI device description object
*
- * Returns non-zero if the UBI background thread is disabled for testing
- * purposes.
+ * Returns true if power cut should be emulated, otherwise returns false.
*/
-static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_power_cut(struct ubi_device *ubi,
+ unsigned int caller)
{
- return ubi->dbg.disable_bgt;
+ if (ubi_dbg_power_cut(ubi, caller))
+ return true;
+ return ubi_dbg_fail_power_cut(ubi, caller);
}
/**
* ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
* @ubi: UBI device description object
*
- * Returns non-zero if a bit-flip should be emulated, otherwise returns zero.
+ * Returns true if a bit-flip should be emulated, otherwise returns false.
*/
-static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
- if (ubi->dbg.emulate_bitflips)
- return !(prandom_u32() % 200);
- return 0;
+ if (ubi_dbg_bitflip(ubi))
+ return true;
+ return ubi_dbg_fail_bitflip(ubi);
}
/**
* ubi_dbg_is_write_failure - if it is time to emulate a write failure.
* @ubi: UBI device description object
*
- * Returns non-zero if a write failure should be emulated, otherwise returns
- * zero.
+ * Returns true if a write failure should be emulated, otherwise returns
+ * false.
*/
-static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg.emulate_io_failures)
- return !(prandom_u32() % 500);
- return 0;
+ if (ubi_dbg_write_failure(ubi))
+ return true;
+ return ubi_dbg_fail_write(ubi);
}
/**
* ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
* @ubi: UBI device description object
*
- * Returns non-zero if an erase failure should be emulated, otherwise returns
- * zero.
+ * Returns true if an erase failure should be emulated, otherwise returns
+ * false.
*/
-static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg.emulate_io_failures)
- return !(prandom_u32() % 400);
- return 0;
+ if (ubi_dbg_erase_failure(ubi))
+ return true;
+ return ubi_dbg_fail_erase(ubi);
+}
+
+/**
+ * ubi_dbg_is_eccerr - if it is time to emulate ECC error.
+ * @ubi: UBI device description object
+ *
+ * Returns true if a ECC error should be emulated, otherwise returns false.
+ */
+static inline bool ubi_dbg_is_eccerr(const struct ubi_device *ubi)
+{
+ return ubi_dbg_fail_eccerr(ubi);
+}
+
+/**
+ * ubi_dbg_is_read_failure - if it is time to emulate a read failure.
+ * @ubi: UBI device description object
+ *
+ * Returns true if a read failure should be emulated, otherwise returns
+ * false.
+ */
+static inline bool ubi_dbg_is_read_failure(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_read(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_ff - if it is time to emulate that read region is only 0xFF.
+ * @ubi: UBI device description object
+ *
+ * Returns true if read region should be emulated 0xFF, otherwise
+ * returns false.
+ */
+static inline bool ubi_dbg_is_ff(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_ff(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_ff_bitflips - if it is time to emulate that read region is only 0xFF
+ * with error reported by the MTD driver
+ *
+ * @ubi: UBI device description object
+ *
+ * Returns true if read region should be emulated 0xFF and error
+ * reported by the MTD driver, otherwise returns false.
+ */
+static inline bool ubi_dbg_is_ff_bitflips(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_ff_bitflips(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_bad_hdr - if it is time to emulate a bad header
+ * @ubi: UBI device description object
+ *
+ * Returns true if a bad header error should be emulated, otherwise
+ * returns false.
+ */
+static inline bool ubi_dbg_is_bad_hdr(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_bad_hdr(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_bad_hdr_ebadmsg - if it is time to emulate a bad header with
+ * ECC error.
+ *
+ * @ubi: UBI device description object
+ *
+ * Returns true if a bad header with ECC error should be emulated, otherwise
+ * returns false.
+ */
+static inline bool ubi_dbg_is_bad_hdr_ebadmsg(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_bad_hdr_ebadmsg(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if the UBI background thread is disabled for testing
+ * purposes.
+ */
+static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
+{
+ return ubi->dbg.disable_bgt;
}
static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
@@ -125,5 +384,4 @@ static inline void ubi_enable_dbg_chk_fastmap(struct ubi_device *ubi)
ubi->dbg.chk_fastmap = 1;
}
-int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index ccc5979642b7..c7ba7a15c9f7 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -33,9 +33,6 @@
#include <linux/err.h>
#include "ubi.h"
-/* Number of physical eraseblocks reserved for atomic LEB change operation */
-#define EBA_RESERVED_PEBS 1
-
/**
* struct ubi_eba_entry - structure encoding a single LEB -> PEB association
* @pnum: the physical eraseblock number attached to the LEB
@@ -61,7 +58,7 @@ struct ubi_eba_table {
};
/**
- * next_sqnum - get next sequence number.
+ * ubi_next_sqnum - get next sequence number.
* @ubi: UBI device description object
*
* This function returns next sequence number to use, which is just the current
@@ -377,7 +374,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
*
* This function locks a logical eraseblock for writing if there is no
* contention and does nothing if there is contention. Returns %0 in case of
- * success, %1 in case of contention, and and a negative error code in case of
+ * success, %1 in case of contention, and a negative error code in case of
* failure.
*/
static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
@@ -946,7 +943,7 @@ static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
int offset, int len)
{
struct ubi_device *ubi = vol->ubi;
- int pnum, opnum, err, vol_id = vol->vol_id;
+ int pnum, opnum, err, err2, vol_id = vol->vol_id;
pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
@@ -981,10 +978,19 @@ static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
out_put:
up_read(&ubi->fm_eba_sem);
- if (err && pnum >= 0)
- err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
- else if (!err && opnum >= 0)
- err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
+ if (err && pnum >= 0) {
+ err2 = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ if (err2) {
+ ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
+ pnum, err2);
+ }
+ } else if (!err && opnum >= 0) {
+ err2 = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
+ if (err2) {
+ ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
+ opnum, err2);
+ }
+ }
return err;
}
@@ -1450,7 +1456,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
+
+ /**
+ * The volumes_lock lock is needed here to prevent the expired old eba_tbl
+ * being updated when the eba_tbl is copied in the ubi_resize_volume() process.
+ */
+ spin_lock(&ubi->volumes_lock);
vol->eba_tbl->entries[lnum].pnum = to;
+ spin_unlock(&ubi->volumes_lock);
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
@@ -1551,6 +1564,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
GFP_KERNEL);
if (!fm_eba[i]) {
ret = -ENOMEM;
+ kfree(scan_eba[i]);
goto out_free;
}
@@ -1586,7 +1600,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
}
out_free:
- for (i = 0; i < num_volumes; i++) {
+ while (--i >= 0) {
if (!ubi->volumes[i])
continue;
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 28f55f9cf715..e2bc1122bfd3 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -76,7 +76,7 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
{
struct ubi_wl_entry *e = NULL;
- if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ if (!ubi->free.rb_node)
goto out;
if (anchor)
@@ -97,17 +97,105 @@ out:
return e;
}
+/*
+ * wait_free_pebs_for_pool - wait until there enough free pebs
+ * @ubi: UBI device description object
+ *
+ * Wait and execute do_work until there are enough free pebs, fill pool
+ * as much as we can. This will reduce pool refilling times, which can
+ * reduce the fastmap updating frequency.
+ */
+static void wait_free_pebs_for_pool(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ int free, expect_free, executed;
+ /*
+ * There are at least following free pebs which reserved by UBI:
+ * 1. WL_RESERVED_PEBS[1]
+ * 2. EBA_RESERVED_PEBS[1]
+ * 3. fm pebs - 1: Twice fastmap size deducted by fastmap and fm_anchor
+ * 4. beb_rsvd_pebs: This value should be get under lock ubi->wl_lock
+ */
+ int reserved = WL_RESERVED_PEBS + EBA_RESERVED_PEBS +
+ ubi->fm_size / ubi->leb_size - 1 + ubi->fm_pool_rsv_cnt;
+
+ do {
+ spin_lock(&ubi->wl_lock);
+ free = ubi->free_count;
+ free += pool->size - pool->used + wl_pool->size - wl_pool->used;
+ expect_free = reserved + ubi->beb_rsvd_pebs;
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Break out if there are no works or work is executed failure,
+ * given the fact that erase_worker will schedule itself when
+ * -EBUSY is returned from mtd layer caused by system shutdown.
+ */
+ if (do_work(ubi, &executed) || !executed)
+ break;
+ } while (free < expect_free);
+}
+
+/*
+ * left_free_count - returns the number of free pebs to fill fm pools
+ * @ubi: UBI device description object
+ *
+ * This helper function returns the number of free pebs (deducted
+ * by fastmap pebs) to fill fm_pool and fm_wl_pool.
+ */
+static int left_free_count(struct ubi_device *ubi)
+{
+ int fm_used = 0; // fastmap non anchor pebs.
+
+ if (!ubi->free.rb_node)
+ return 0;
+
+ if (!ubi->ro_mode && !ubi->fm_disabled)
+ fm_used = ubi->fm_size / ubi->leb_size - 1;
+
+ return ubi->free_count - fm_used;
+}
+
+/*
+ * can_fill_pools - whether free PEBs will be left after filling pools
+ * @ubi: UBI device description object
+ * @free: current number of free PEBs
+ *
+ * Return %1 if there are still left free PEBs after filling pools,
+ * otherwise %0 is returned.
+ */
+static int can_fill_pools(struct ubi_device *ubi, int free)
+{
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ int pool_need = pool->max_size - pool->size +
+ wl_pool->max_size - wl_pool->size;
+
+ if (free - pool_need < 1)
+ return 0;
+
+ return 1;
+}
+
/**
- * ubi_refill_pools - refills all fastmap PEB pools.
+ * ubi_refill_pools_and_lock - refills all fastmap PEB pools and takes fm locks.
* @ubi: UBI device description object
*/
-void ubi_refill_pools(struct ubi_device *ubi)
+void ubi_refill_pools_and_lock(struct ubi_device *ubi)
{
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
struct ubi_fm_pool *pool = &ubi->fm_pool;
struct ubi_wl_entry *e;
int enough;
+ if (!ubi->ro_mode && !ubi->fm_disabled)
+ wait_free_pebs_for_pool(ubi);
+
+ down_write(&ubi->fm_protect);
+ down_write(&ubi->work_sem);
+ down_write(&ubi->fm_eba_sem);
+
spin_lock(&ubi->wl_lock);
return_unused_pool_pebs(ubi, wl_pool);
@@ -119,22 +207,20 @@ void ubi_refill_pools(struct ubi_device *ubi)
if (ubi->fm_anchor) {
wl_tree_add(ubi->fm_anchor, &ubi->free);
ubi->free_count++;
- }
- if (ubi->fm_next_anchor) {
- wl_tree_add(ubi->fm_next_anchor, &ubi->free);
- ubi->free_count++;
+ ubi->fm_anchor = NULL;
}
- /* All available PEBs are in ubi->free, now is the time to get
- * the best anchor PEBs.
- */
- ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
- ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
+ if (!ubi->fm_disabled)
+ /*
+ * All available PEBs are in ubi->free, now is the time to get
+ * the best anchor PEBs.
+ */
+ ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
- if (!ubi->free.rb_node)
+ if (left_free_count(ubi) <= 0)
break;
e = wl_get_wle(ubi);
@@ -147,11 +233,13 @@ void ubi_refill_pools(struct ubi_device *ubi)
enough++;
if (wl_pool->size < wl_pool->max_size) {
- if (!ubi->free.rb_node ||
- (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ int left_free = left_free_count(ubi);
+
+ if (left_free <= 0)
break;
- e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF,
+ !can_fill_pools(ubi, left_free));
self_check_in_wl_tree(ubi, e, &ubi->free);
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
@@ -186,7 +274,7 @@ static int produce_free_peb(struct ubi_device *ubi)
while (!ubi->free.rb_node && ubi->works_count) {
dbg_wl("do one work synchronously");
- err = do_work(ubi);
+ err = do_work(ubi, NULL);
if (err)
return err;
@@ -253,6 +341,71 @@ out:
return ret;
}
+/**
+ * next_peb_for_wl - returns next PEB to be used internally by the
+ * WL sub-system.
+ *
+ * @ubi: UBI device description object
+ * @need_fill: whether to fill wear-leveling pool when no PEBs are found
+ */
+static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi,
+ bool need_fill)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int pnum;
+
+ if (pool->used == pool->size) {
+ if (need_fill && !ubi->fm_work_scheduled) {
+ /*
+ * We cannot update the fastmap here because this
+ * function is called in atomic context.
+ * Let's fail here and refill/update it as soon as
+ * possible.
+ */
+ ubi->fm_work_scheduled = 1;
+ schedule_work(&ubi->fm_work);
+ }
+ return NULL;
+ }
+
+ pnum = pool->pebs[pool->used];
+ return ubi->lookuptbl[pnum];
+}
+
+/**
+ * need_wear_leveling - checks whether to trigger a wear leveling work.
+ * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
+ * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
+ * 'wl_pool' by ubi_refill_pools().
+ *
+ * @ubi: UBI device description object
+ */
+static bool need_wear_leveling(struct ubi_device *ubi)
+{
+ int ec;
+ struct ubi_wl_entry *e;
+
+ if (!ubi->used.rb_node)
+ return false;
+
+ e = next_peb_for_wl(ubi, false);
+ if (!e) {
+ if (!ubi->free.rb_node)
+ return false;
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
+ ec = e->ec;
+ } else {
+ ec = e->ec;
+ if (ubi->free.rb_node) {
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
+ ec = max(ec, e->ec);
+ }
+ }
+ e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+
+ return ec - e->ec >= UBI_WL_THRESHOLD;
+}
+
/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
*
* @ubi: UBI device description object
@@ -286,20 +439,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
+ struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock);
- /* Do we have a next anchor? */
- if (!ubi->fm_next_anchor) {
- ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
- if (!ubi->fm_next_anchor)
- /* Tell wear leveling to produce a new anchor PEB */
- ubi->fm_do_produce_anchor = 1;
+ /* Do we already have an anchor? */
+ if (ubi->fm_anchor) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ /* See if we can find an anchor PEB on the list of free PEBs */
+ anchor = ubi_wl_get_fm_peb(ubi, 1);
+ if (anchor) {
+ ubi->fm_anchor = anchor;
+ spin_unlock(&ubi->wl_lock);
+ return 0;
}
- /* Do wear leveling to get a new anchor PEB or check the
- * existing next anchor candidate.
- */
+ ubi->fm_do_produce_anchor = 1;
+ /* No luck, trigger wear leveling to produce a new anchor PEB. */
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
@@ -371,8 +530,6 @@ int ubi_is_erase_work(struct ubi_work *wrk)
static void ubi_fastmap_close(struct ubi_device *ubi)
{
- int i;
-
return_unused_pool_pebs(ubi, &ubi->fm_pool);
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
@@ -381,16 +538,7 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
ubi->fm_anchor = NULL;
}
- if (ubi->fm_next_anchor) {
- return_unused_peb(ubi, ubi->fm_next_anchor);
- ubi->fm_next_anchor = NULL;
- }
-
- if (ubi->fm) {
- for (i = 0; i < ubi->fm->used_blocks; i++)
- kfree(ubi->fm->e[i]);
- }
- kfree(ubi->fm);
+ ubi_free_fastmap(ubi);
}
/**
@@ -404,7 +552,7 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
struct ubi_wl_entry *e,
struct rb_root *root) {
- if (e && !ubi->fm_disabled && !ubi->fm &&
+ if (e && !ubi->fm_disabled && !ubi->fm && !ubi->fm_anchor &&
e->pnum < UBI_FM_MAX_START)
e = rb_entry(rb_next(root->rb_node),
struct ubi_wl_entry, u.rb);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 022af59906aa..9a4940874be5 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -20,8 +20,7 @@ static inline unsigned long *init_seen(struct ubi_device *ubi)
if (!ubi_dbg_chk_fastmap(ubi))
return NULL;
- ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
- GFP_KERNEL);
+ ret = bitmap_zalloc(ubi->peb_count, GFP_NOFS);
if (!ret)
return ERR_PTR(-ENOMEM);
@@ -34,7 +33,7 @@ static inline unsigned long *init_seen(struct ubi_device *ubi)
*/
static inline void free_seen(unsigned long *seen)
{
- kfree(seen);
+ bitmap_free(seen);
}
/**
@@ -86,15 +85,16 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
sizeof(struct ubi_fm_scan_pool) +
sizeof(struct ubi_fm_scan_pool) +
(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
- (sizeof(struct ubi_fm_eba) +
- (ubi->peb_count * sizeof(__be32))) +
- sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ ((sizeof(struct ubi_fm_eba) +
+ sizeof(struct ubi_fm_volhdr)) *
+ (UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) +
+ (ubi->peb_count * sizeof(__be32));
return roundup(size, ubi->leb_size);
}
/**
- * new_fm_vhdr - allocate a new volume header for fastmap usage.
+ * new_fm_vbuf() - allocate a new volume header for fastmap usage.
* @ubi: UBI device description object
* @vol_id: the VID of the new header
*
@@ -106,7 +106,7 @@ static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
struct ubi_vid_io_buf *new;
struct ubi_vid_hdr *vh;
- new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ new = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!new)
goto out;
@@ -468,7 +468,9 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (err == UBI_IO_FF_BITFLIPS)
scrub = 1;
- add_aeb(ai, free, pnum, ec, scrub);
+ ret = add_aeb(ai, free, pnum, ec, scrub);
+ if (ret)
+ goto out;
continue;
} else if (err == 0 || err == UBI_IO_BITFLIPS) {
dbg_bld("Found non empty PEB:%i in pool", pnum);
@@ -638,8 +640,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
if (fm_pos >= fm_size)
goto fail_bad;
- add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
- be32_to_cpu(fmec->ec), 0);
+ ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ if (ret)
+ goto fail;
}
/* read EC values from used list */
@@ -649,8 +653,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
if (fm_pos >= fm_size)
goto fail_bad;
- add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
- be32_to_cpu(fmec->ec), 0);
+ ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ if (ret)
+ goto fail;
}
/* read EC values from scrub list */
@@ -660,8 +666,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
if (fm_pos >= fm_size)
goto fail_bad;
- add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
- be32_to_cpu(fmec->ec), 1);
+ ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ if (ret)
+ goto fail;
}
/* read EC values from erase list */
@@ -671,8 +679,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
if (fm_pos >= fm_size)
goto fail_bad;
- add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
- be32_to_cpu(fmec->ec), 1);
+ ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ if (ret)
+ goto fail;
}
ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
@@ -1098,8 +1108,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
if (!ubi->fast_attach)
return 0;
- vol->checkmap = kcalloc(BITS_TO_LONGS(leb_count), sizeof(unsigned long),
- GFP_KERNEL);
+ vol->checkmap = bitmap_zalloc(leb_count, GFP_KERNEL);
if (!vol->checkmap)
return -ENOMEM;
@@ -1108,7 +1117,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
{
- kfree(vol->checkmap);
+ bitmap_free(vol->checkmap);
}
/**
@@ -1220,17 +1229,6 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
- if (ubi->fm_next_anchor) {
- fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
-
- fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
- set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
- fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
-
- free_peb_count++;
- fm_pos += sizeof(*fec);
- ubi_assert(fm_pos <= ubi->fm_size);
- }
fmh->free_peb_count = cpu_to_be32(free_peb_count);
ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
@@ -1393,53 +1391,6 @@ out:
}
/**
- * erase_block - Manually erase a PEB.
- * @ubi: UBI device object
- * @pnum: PEB to be erased
- *
- * Returns the new EC value on success, < 0 indicates an internal error.
- */
-static int erase_block(struct ubi_device *ubi, int pnum)
-{
- int ret;
- struct ubi_ec_hdr *ec_hdr;
- long long ec;
-
- ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
- if (!ec_hdr)
- return -ENOMEM;
-
- ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
- if (ret < 0)
- goto out;
- else if (ret && ret != UBI_IO_BITFLIPS) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = ubi_io_sync_erase(ubi, pnum, 0);
- if (ret < 0)
- goto out;
-
- ec = be64_to_cpu(ec_hdr->ec);
- ec += ret;
- if (ec > UBI_MAX_ERASECOUNTER) {
- ret = -EINVAL;
- goto out;
- }
-
- ec_hdr->ec = cpu_to_be64(ec);
- ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
- if (ret < 0)
- goto out;
-
- ret = ec;
-out:
- kfree(ec_hdr);
- return ret;
-}
-
-/**
* invalidate_fastmap - destroys a fastmap.
* @ubi: UBI device object
*
@@ -1465,7 +1416,7 @@ static int invalidate_fastmap(struct ubi_device *ubi)
ubi->fm = NULL;
ret = -ENOMEM;
- fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ fm = kzalloc(sizeof(*fm), GFP_NOFS);
if (!fm)
goto out;
@@ -1541,11 +1492,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
struct ubi_fastmap_layout *new_fm, *old_fm;
struct ubi_wl_entry *tmp_e;
- down_write(&ubi->fm_protect);
- down_write(&ubi->work_sem);
- down_write(&ubi->fm_eba_sem);
-
- ubi_refill_pools(ubi);
+ ubi_refill_pools_and_lock(ubi);
if (ubi->ro_mode || ubi->fm_disabled) {
up_write(&ubi->fm_eba_sem);
@@ -1554,7 +1501,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
return 0;
}
- new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+ new_fm = kzalloc(sizeof(*new_fm), GFP_NOFS);
if (!new_fm) {
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
@@ -1579,7 +1526,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
if (!tmp_e) {
if (old_fm && old_fm->e[i]) {
- ret = erase_block(ubi, old_fm->e[i]->pnum);
+ ret = ubi_sync_erase(ubi, old_fm->e[i], 0);
if (ret < 0) {
ubi_err(ubi, "could not erase old fastmap PEB");
@@ -1631,7 +1578,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
if (old_fm) {
/* no fresh anchor PEB was found, reuse the old one */
if (!tmp_e) {
- ret = erase_block(ubi, old_fm->e[0]->pnum);
+ ret = ubi_sync_erase(ubi, old_fm->e[0], 0);
if (ret < 0) {
ubi_err(ubi, "could not erase old anchor PEB");
@@ -1643,7 +1590,6 @@ int ubi_update_fastmap(struct ubi_device *ubi)
goto err;
}
new_fm->e[0] = old_fm->e[0];
- new_fm->e[0]->ec = ret;
old_fm->e[0] = NULL;
} else {
/* we've got a new anchor PEB, return the old one */
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 8a7306cc1947..915eb64cb001 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -195,7 +195,19 @@ retry:
if (ubi_dbg_is_bitflip(ubi)) {
dbg_gen("bit-flip (emulated)");
- err = UBI_IO_BITFLIPS;
+ return UBI_IO_BITFLIPS;
+ }
+
+ if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE)) {
+ ubi_warn(ubi, "cannot read %d bytes from PEB %d:%d (emulated)",
+ len, pnum, offset);
+ return -EIO;
+ }
+
+ if (ubi_dbg_is_eccerr(ubi)) {
+ ubi_warn(ubi, "ECC error (emulated) while reading %d bytes from PEB %d:%d, read %zd bytes",
+ len, pnum, offset, read);
+ return -EBADMSG;
}
}
@@ -782,7 +794,36 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
* If there was %-EBADMSG, but the header CRC is still OK, report about
* a bit-flip to force scrubbing on this PEB.
*/
- return read_err ? UBI_IO_BITFLIPS : 0;
+ if (read_err)
+ return UBI_IO_BITFLIPS;
+
+ if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE_EC)) {
+ ubi_warn(ubi, "cannot read EC header from PEB %d (emulated)",
+ pnum);
+ return -EIO;
+ }
+
+ if (ubi_dbg_is_ff(ubi, MASK_IO_FF_EC)) {
+ ubi_warn(ubi, "bit-all-ff (emulated)");
+ return UBI_IO_FF;
+ }
+
+ if (ubi_dbg_is_ff_bitflips(ubi, MASK_IO_FF_BITFLIPS_EC)) {
+ ubi_warn(ubi, "bit-all-ff with error reported by MTD driver (emulated)");
+ return UBI_IO_FF_BITFLIPS;
+ }
+
+ if (ubi_dbg_is_bad_hdr(ubi, MASK_BAD_HDR_EC)) {
+ ubi_warn(ubi, "bad_hdr (emulated)");
+ return UBI_IO_BAD_HDR;
+ }
+
+ if (ubi_dbg_is_bad_hdr_ebadmsg(ubi, MASK_BAD_HDR_EBADMSG_EC)) {
+ ubi_warn(ubi, "bad_hdr with ECC error (emulated)");
+ return UBI_IO_BAD_HDR_EBADMSG;
+ }
+
+ return 0;
}
/**
@@ -821,8 +862,13 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
if (err)
return err;
- if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
+ if (ubi_dbg_is_power_cut(ubi, MASK_POWER_CUT_EC)) {
+ ubi_warn(ubi, "emulating a power cut when writing EC header");
+ ubi_ro_mode(ubi);
return -EROFS;
+ }
+
+ memset((char *)ec_hdr + UBI_EC_HDR_SIZE, 0xFF, ubi->ec_hdr_alsize - UBI_EC_HDR_SIZE);
err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
return err;
@@ -1029,7 +1075,36 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
return -EINVAL;
}
- return read_err ? UBI_IO_BITFLIPS : 0;
+ if (read_err)
+ return UBI_IO_BITFLIPS;
+
+ if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE_VID)) {
+ ubi_warn(ubi, "cannot read VID header from PEB %d (emulated)",
+ pnum);
+ return -EIO;
+ }
+
+ if (ubi_dbg_is_ff(ubi, MASK_IO_FF_VID)) {
+ ubi_warn(ubi, "bit-all-ff (emulated)");
+ return UBI_IO_FF;
+ }
+
+ if (ubi_dbg_is_ff_bitflips(ubi, MASK_IO_FF_BITFLIPS_VID)) {
+ ubi_warn(ubi, "bit-all-ff with error reported by MTD driver (emulated)");
+ return UBI_IO_FF_BITFLIPS;
+ }
+
+ if (ubi_dbg_is_bad_hdr(ubi, MASK_BAD_HDR_VID)) {
+ ubi_warn(ubi, "bad_hdr (emulated)");
+ return UBI_IO_BAD_HDR;
+ }
+
+ if (ubi_dbg_is_bad_hdr_ebadmsg(ubi, MASK_BAD_HDR_EBADMSG_VID)) {
+ ubi_warn(ubi, "bad_hdr with ECC error (emulated)");
+ return UBI_IO_BAD_HDR_EBADMSG;
+ }
+
+ return 0;
}
/**
@@ -1071,8 +1146,19 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
if (err)
return err;
- if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
+ if (ubi_dbg_is_power_cut(ubi, MASK_POWER_CUT_VID)) {
+ ubi_warn(ubi, "emulating a power cut when writing VID header");
+ ubi_ro_mode(ubi);
return -EROFS;
+ }
+
+ if (ubi->vid_hdr_shift) {
+ memset((char *)p, 0xFF, ubi->vid_hdr_shift);
+ memset((char *)p + ubi->vid_hdr_shift + UBI_VID_HDR_SIZE, 0xFF,
+ ubi->vid_hdr_alsize - (ubi->vid_hdr_shift + UBI_VID_HDR_SIZE));
+ } else {
+ memset((char *)p + UBI_VID_HDR_SIZE, 0xFF, ubi->vid_hdr_alsize - UBI_VID_HDR_SIZE);
+ }
err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
@@ -1147,7 +1233,7 @@ fail:
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
- * This function returns zero if the erase counter header is all right and and
+ * This function returns zero if the erase counter header is all right and
* a negative error code if not or if an error occurred.
*/
static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 0fce99ff29b5..df0a5a57b072 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -79,6 +79,7 @@ void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
vi->name_len = vol->name_len;
vi->name = vol->name;
vi->cdev = vol->cdev.dev;
+ vi->dev = &vol->dev;
}
/**
@@ -151,7 +152,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
spin_lock(&ubi->volumes_lock);
vol = ubi->volumes[vol_id];
- if (!vol)
+ if (!vol || vol->is_dead)
goto out_unlock;
err = -EBUSY;
@@ -279,6 +280,41 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
/**
+ * ubi_get_num_by_path - get UBI device and volume number from device path
+ * @pathname: volume character device node path
+ * @ubi_num: pointer to UBI device number to be set
+ * @vol_id: pointer to UBI volume ID to be set
+ *
+ * Returns 0 on success and sets ubi_num and vol_id, returns error otherwise.
+ */
+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id)
+{
+ int error;
+ struct path path;
+ struct kstat stat;
+
+ error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+ if (error)
+ return error;
+
+ error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
+ path_put(&path);
+ if (error)
+ return error;
+
+ if (!S_ISCHR(stat.mode))
+ return -EINVAL;
+
+ *ubi_num = ubi_major2num(MAJOR(stat.rdev));
+ *vol_id = MINOR(stat.rdev) - 1;
+
+ if (*vol_id < 0 || *ubi_num < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
* ubi_open_volume_path - open UBI volume by its character device node path.
* @pathname: volume character device node path
* @mode: open mode
@@ -289,32 +325,17 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
{
int error, ubi_num, vol_id;
- struct path path;
- struct kstat stat;
dbg_gen("open volume %s, mode %d", pathname, mode);
if (!pathname || !*pathname)
return ERR_PTR(-EINVAL);
- error = kern_path(pathname, LOOKUP_FOLLOW, &path);
- if (error)
- return ERR_PTR(error);
-
- error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
- path_put(&path);
+ error = ubi_get_num_by_path(pathname, &ubi_num, &vol_id);
if (error)
return ERR_PTR(error);
- if (!S_ISCHR(stat.mode))
- return ERR_PTR(-EINVAL);
-
- ubi_num = ubi_major2num(MAJOR(stat.rdev));
- vol_id = MINOR(stat.rdev) - 1;
-
- if (vol_id >= 0 && ubi_num >= 0)
- return ubi_open_volume(ubi_num, vol_id, mode);
- return ERR_PTR(-ENODEV);
+ return ubi_open_volume(ubi_num, vol_id, mode);
}
EXPORT_SYMBOL_GPL(ubi_open_volume_path);
@@ -770,33 +791,6 @@ int ubi_sync(int ubi_num)
}
EXPORT_SYMBOL_GPL(ubi_sync);
-/**
- * ubi_flush - flush UBI work queue.
- * @ubi_num: UBI device to flush work queue
- * @vol_id: volume id to flush for
- * @lnum: logical eraseblock number to flush for
- *
- * This function executes all pending works for a particular volume id / logical
- * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
- * a wildcard for all of the corresponding volume numbers or logical
- * eraseblock numbers. It returns zero in case of success and a negative error
- * code in case of failure.
- */
-int ubi_flush(int ubi_num, int vol_id, int lnum)
-{
- struct ubi_device *ubi;
- int err = 0;
-
- ubi = ubi_get_device(ubi_num);
- if (!ubi)
- return -ENODEV;
-
- err = ubi_wl_flush(ubi, vol_id, lnum);
- ubi_put_device(ubi);
- return err;
-}
-EXPORT_SYMBOL_GPL(ubi_flush);
-
BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
/**
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 7b30c8ee3e82..1794d66b6eb7 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -10,7 +10,7 @@
#include "ubi.h"
/**
- * calc_data_len - calculate how much real data is stored in a buffer.
+ * ubi_calc_data_len - calculate how much real data is stored in a buffer.
* @ubi: UBI device description object
* @buf: a buffer with the contents of the physical eraseblock
* @length: the buffer length
diff --git a/drivers/mtd/ubi/nvmem.c b/drivers/mtd/ubi/nvmem.c
new file mode 100644
index 000000000000..34f8c1d3cdee
--- /dev/null
+++ b/drivers/mtd/ubi/nvmem.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org>
+ */
+
+/* UBI NVMEM provider */
+#include "ubi.h"
+#include <linux/nvmem-provider.h>
+
+/* List of all NVMEM devices */
+static LIST_HEAD(nvmem_devices);
+static DEFINE_MUTEX(devices_mutex);
+
+struct ubi_nvmem {
+ struct nvmem_device *nvmem;
+ int ubi_num;
+ int vol_id;
+ int usable_leb_size;
+ struct list_head list;
+};
+
+static int ubi_nvmem_reg_read(void *priv, unsigned int from,
+ void *val, size_t bytes)
+{
+ size_t to_read, bytes_left = bytes;
+ struct ubi_nvmem *unv = priv;
+ struct ubi_volume_desc *desc;
+ uint32_t offs;
+ uint32_t lnum;
+ int err = 0;
+
+ desc = ubi_open_volume(unv->ubi_num, unv->vol_id, UBI_READONLY);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ offs = from % unv->usable_leb_size;
+ lnum = from / unv->usable_leb_size;
+ while (bytes_left) {
+ to_read = unv->usable_leb_size - offs;
+
+ if (to_read > bytes_left)
+ to_read = bytes_left;
+
+ err = ubi_read(desc, lnum, val, offs, to_read);
+ if (err)
+ break;
+
+ lnum += 1;
+ offs = 0;
+ bytes_left -= to_read;
+ val += to_read;
+ }
+ ubi_close_volume(desc);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int ubi_nvmem_add(struct ubi_volume_info *vi)
+{
+ struct device_node *np = dev_of_node(vi->dev);
+ struct nvmem_config config = {};
+ struct ubi_nvmem *unv;
+ int ret;
+
+ if (!np)
+ return 0;
+
+ if (!of_get_child_by_name(np, "nvmem-layout"))
+ return 0;
+
+ if (WARN_ON_ONCE(vi->usable_leb_size <= 0) ||
+ WARN_ON_ONCE(vi->size <= 0))
+ return -EINVAL;
+
+ unv = kzalloc(sizeof(struct ubi_nvmem), GFP_KERNEL);
+ if (!unv)
+ return -ENOMEM;
+
+ config.id = NVMEM_DEVID_NONE;
+ config.dev = vi->dev;
+ config.name = dev_name(vi->dev);
+ config.owner = THIS_MODULE;
+ config.priv = unv;
+ config.reg_read = ubi_nvmem_reg_read;
+ config.size = vi->usable_leb_size * vi->size;
+ config.word_size = 1;
+ config.stride = 1;
+ config.read_only = true;
+ config.root_only = true;
+ config.ignore_wp = true;
+ config.of_node = np;
+
+ unv->ubi_num = vi->ubi_num;
+ unv->vol_id = vi->vol_id;
+ unv->usable_leb_size = vi->usable_leb_size;
+ unv->nvmem = nvmem_register(&config);
+ if (IS_ERR(unv->nvmem)) {
+ ret = dev_err_probe(vi->dev, PTR_ERR(unv->nvmem),
+ "Failed to register NVMEM device\n");
+ kfree(unv);
+ return ret;
+ }
+
+ mutex_lock(&devices_mutex);
+ list_add_tail(&unv->list, &nvmem_devices);
+ mutex_unlock(&devices_mutex);
+
+ return 0;
+}
+
+static void ubi_nvmem_remove(struct ubi_volume_info *vi)
+{
+ struct ubi_nvmem *unv_c, *unv = NULL;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry(unv_c, &nvmem_devices, list)
+ if (unv_c->ubi_num == vi->ubi_num && unv_c->vol_id == vi->vol_id) {
+ unv = unv_c;
+ break;
+ }
+
+ if (!unv) {
+ mutex_unlock(&devices_mutex);
+ return;
+ }
+
+ list_del(&unv->list);
+ mutex_unlock(&devices_mutex);
+ nvmem_unregister(unv->nvmem);
+ kfree(unv);
+}
+
+/**
+ * nvmem_notify - UBI notification handler.
+ * @nb: registered notifier block
+ * @l: notification type
+ * @ns_ptr: pointer to the &struct ubi_notification object
+ */
+static int nvmem_notify(struct notifier_block *nb, unsigned long l,
+ void *ns_ptr)
+{
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (l) {
+ case UBI_VOLUME_RESIZED:
+ ubi_nvmem_remove(&nt->vi);
+ fallthrough;
+ case UBI_VOLUME_ADDED:
+ ubi_nvmem_add(&nt->vi);
+ break;
+ case UBI_VOLUME_SHUTDOWN:
+ ubi_nvmem_remove(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nvmem_notifier = {
+ .notifier_call = nvmem_notify,
+};
+
+static int __init ubi_nvmem_init(void)
+{
+ return ubi_register_volume_notifier(&nvmem_notifier, 0);
+}
+
+static void __exit ubi_nvmem_exit(void)
+{
+ struct ubi_nvmem *unv, *tmp;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry_safe(unv, tmp, &nvmem_devices, list) {
+ nvmem_unregister(unv->nvmem);
+ list_del(&unv->list);
+ kfree(unv);
+ }
+ mutex_unlock(&devices_mutex);
+
+ ubi_unregister_volume_notifier(&nvmem_notifier);
+}
+
+module_init(ubi_nvmem_init);
+module_exit(ubi_nvmem_exit);
+MODULE_DESCRIPTION("NVMEM layer over UBI volumes");
+MODULE_AUTHOR("Daniel Golle");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 386db0598e95..2c9cd3b6434f 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -131,7 +131,7 @@ enum {
* is changed radically. This field is duplicated in the volume identifier
* header.
*
- * The @vid_hdr_offset and @data_offset fields contain the offset of the the
+ * The @vid_hdr_offset and @data_offset fields contain the offset of the
* volume identifier header and user data, relative to the beginning of the
* physical eraseblock. These values have to be the same for all physical
* eraseblocks.
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 7c083ad58274..44803d3329f4 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -82,11 +82,14 @@ void ubi_err(const struct ubi_device *ubi, const char *fmt, ...);
#define UBI_DFS_DIR_NAME "ubi%d"
#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+/* Number of physical eraseblocks reserved for atomic LEB change operation */
+#define EBA_RESERVED_PEBS 1
+
/*
* Error codes returned by the I/O sub-system.
*
* UBI_IO_FF: the read region of flash contains only 0xFFs
- * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data
+ * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also there was a data
* integrity error reported by the MTD driver
* (uncorrectable ECC error in case of NAND)
* UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC)
@@ -142,17 +145,6 @@ enum {
UBI_BAD_FASTMAP,
};
-/*
- * Flags for emulate_power_cut in ubi_debug_info
- *
- * POWER_CUT_EC_WRITE: Emulate a power cut when writing an EC header
- * POWER_CUT_VID_WRITE: Emulate a power cut when writing a VID header
- */
-enum {
- POWER_CUT_EC_WRITE = 0x01,
- POWER_CUT_VID_WRITE = 0x02,
-};
-
/**
* struct ubi_vid_io_buf - VID buffer used to read/write VID info to/from the
* flash.
@@ -281,7 +273,7 @@ struct ubi_eba_leb_desc {
/**
* struct ubi_volume - UBI volume description data structure.
- * @dev: device object to make use of the the Linux device model
+ * @dev: device object to make use of the Linux device model
* @cdev: character device object to create character device
* @ubi: reference to the UBI device description object
* @vol_id: volume ID
@@ -345,6 +337,7 @@ struct ubi_volume {
int writers;
int exclusive;
int metaonly;
+ bool is_dead;
int reserved_pebs;
int vol_type;
@@ -401,6 +394,7 @@ struct ubi_volume_desc {
* @power_cut_counter: count down for writes left until emulated power cut
* @power_cut_min: minimum number of writes before emulating a power cut
* @power_cut_max: maximum number of writes until emulating a power cut
+ * @emulate_failures: emulate failures for testing purposes
* @dfs_dir_name: name of debugfs directory containing files of this UBI device
* @dfs_dir: direntry object of the UBI device debugfs directory
* @dfs_chk_gen: debugfs knob to enable UBI general extra checks
@@ -412,6 +406,7 @@ struct ubi_volume_desc {
* @dfs_emulate_power_cut: debugfs knob to emulate power cuts
* @dfs_power_cut_min: debugfs knob for minimum writes before power cut
* @dfs_power_cut_max: debugfs knob for maximum writes until power cut
+ * @dfs_emulate_failures: debugfs entry to control the fault injection type
*/
struct ubi_debug_info {
unsigned int chk_gen:1;
@@ -424,7 +419,8 @@ struct ubi_debug_info {
unsigned int power_cut_counter;
unsigned int power_cut_min;
unsigned int power_cut_max;
- char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+ unsigned int emulate_failures;
+ char dfs_dir_name[UBI_DFS_DIR_LEN];
struct dentry *dfs_dir;
struct dentry *dfs_chk_gen;
struct dentry *dfs_chk_io;
@@ -435,11 +431,12 @@ struct ubi_debug_info {
struct dentry *dfs_emulate_power_cut;
struct dentry *dfs_power_cut_min;
struct dentry *dfs_power_cut_max;
+ struct dentry *dfs_emulate_failures;
};
/**
* struct ubi_device - UBI device description structure
- * @dev: UBI device object to use the the Linux device model
+ * @dev: UBI device object to use the Linux device model
* @cdev: character device object to create character device
* @ubi_num: UBI device number
* @ubi_name: UBI device name
@@ -489,9 +486,9 @@ struct ubi_debug_info {
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap
- * @fm_anchor: The new anchor PEB used during fastmap update
- * @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
+ * @fm_anchor: The next anchor PEB to use for fastmap
* @fm_do_produce_anchor: If true produce an anchor PEB in wl
+ * @fm_pool_rsv_cnt: Number of reserved PEBs for filling pool/wl_pool
*
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -565,6 +562,7 @@ struct ubi_device {
spinlock_t volumes_lock;
int ref_count;
int image_seq;
+ bool is_dead;
int rsvd_pebs;
int avail_pebs;
@@ -601,8 +599,8 @@ struct ubi_device {
int fm_work_scheduled;
int fast_attach;
struct ubi_wl_entry *fm_anchor;
- struct ubi_wl_entry *fm_next_anchor;
int fm_do_produce_anchor;
+ int fm_pool_rsv_cnt;
/* Wear-leveling sub-system's stuff */
struct rb_root used;
@@ -816,7 +814,7 @@ extern struct kmem_cache *ubi_wl_entry_slab;
extern const struct file_operations ubi_ctrl_cdev_operations;
extern const struct file_operations ubi_cdev_operations;
extern const struct file_operations ubi_vol_cdev_operations;
-extern struct class ubi_class;
+extern const struct class ubi_class;
extern struct mutex ubi_devices_mutex;
extern struct blocking_notifier_head ubi_notifiers;
@@ -833,7 +831,6 @@ void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
struct ubi_attach_info *ai);
int ubi_attach(struct ubi_device *ubi, int force_scan);
-void ubi_destroy_ai(struct ubi_attach_info *ai);
/* vtbl.c */
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
@@ -904,6 +901,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
struct ubi_attach_info *ai_scan);
/* wl.c */
+int ubi_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture);
int ubi_wl_get_peb(struct ubi_device *ubi);
int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
int pnum, int torture);
@@ -916,7 +914,7 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
int lnum, int torture);
int ubi_is_erase_work(struct ubi_work *wrk);
-void ubi_refill_pools(struct ubi_device *ubi);
+void ubi_refill_pools_and_lock(struct ubi_device *ubi);
int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub);
@@ -939,7 +937,8 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
/* build.c */
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
- int vid_hdr_offset, int max_beb_per1024);
+ int vid_hdr_offset, int max_beb_per1024,
+ bool disable_fm, bool need_resv_pool);
int ubi_detach_mtd_dev(int ubi_num, int anyway);
struct ubi_device *ubi_get_device(int ubi_num);
void ubi_put_device(struct ubi_device *ubi);
@@ -957,6 +956,7 @@ void ubi_free_internal_volumes(struct ubi_device *ubi);
void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_volume_info *vi);
+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id);
/* scan.c */
int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr);
@@ -969,10 +969,22 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_attach_info *scan_ai);
int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count);
void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol);
+static inline void ubi_free_fastmap(struct ubi_device *ubi)
+{
+ if (ubi->fm) {
+ int i;
+
+ for (i = 0; i < ubi->fm->used_blocks; i++)
+ kmem_cache_free(ubi_wl_entry_slab, ubi->fm->e[i]);
+ kfree(ubi->fm);
+ ubi->fm = NULL;
+ }
+}
#else
static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
static inline int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
static inline void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) {}
+static inline void ubi_free_fastmap(struct ubi_device *ubi) { }
#endif
/* block.c */
@@ -1125,6 +1137,19 @@ static inline struct ubi_vid_hdr *ubi_get_vid_hdr(struct ubi_vid_io_buf *vidb)
return vidb->hdr;
}
+/**
+ * ubi_ro_mode - switch to read-only mode.
+ * @ubi: UBI device description object
+ */
+static inline void ubi_ro_mode(struct ubi_device *ubi)
+{
+ if (!ubi->ro_mode) {
+ ubi->ro_mode = 1;
+ ubi_warn(ubi, "switch to read-only mode");
+ dump_stack();
+ }
+}
+
/*
* This function is equivalent to 'ubi_io_read()', but @offset is relative to
* the beginning of the logical eraseblock, not to the beginning of the
@@ -1146,20 +1171,13 @@ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
int pnum, int offset, int len)
{
ubi_assert(offset >= 0);
- return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
-}
-/**
- * ubi_ro_mode - switch to read-only mode.
- * @ubi: UBI device description object
- */
-static inline void ubi_ro_mode(struct ubi_device *ubi)
-{
- if (!ubi->ro_mode) {
- ubi->ro_mode = 1;
- ubi_warn(ubi, "switch to read-only mode");
- dump_stack();
+ if (ubi_dbg_power_cut(ubi, MASK_POWER_CUT_DATA)) {
+ ubi_warn(ubi, "XXXXX emulating a power cut when writing data XXXXX");
+ ubi_ro_mode(ubi);
+ return -EROFS;
}
+ return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
}
/**
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 139ee132bfbc..e5cf3bdca3b0 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -56,16 +56,11 @@ static ssize_t vol_attribute_show(struct device *dev,
{
int ret;
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
- struct ubi_device *ubi;
-
- ubi = ubi_get_device(vol->ubi->ubi_num);
- if (!ubi)
- return -ENODEV;
+ struct ubi_device *ubi = vol->ubi;
spin_lock(&ubi->volumes_lock);
- if (!ubi->volumes[vol->vol_id]) {
+ if (!ubi->volumes[vol->vol_id] || ubi->volumes[vol->vol_id]->is_dead) {
spin_unlock(&ubi->volumes_lock);
- ubi_put_device(ubi);
return -ENODEV;
}
/* Take a reference to prevent volume removal */
@@ -103,7 +98,6 @@ static ssize_t vol_attribute_show(struct device *dev,
vol->ref_count -= 1;
ubi_assert(vol->ref_count >= 0);
spin_unlock(&ubi->volumes_lock);
- ubi_put_device(ubi);
return ret;
}
@@ -130,6 +124,33 @@ static void vol_release(struct device *dev)
kfree(vol);
}
+static struct fwnode_handle *find_volume_fwnode(struct ubi_volume *vol)
+{
+ struct fwnode_handle *fw_vols, *fw_vol;
+ const char *volname;
+ u32 volid;
+
+ fw_vols = device_get_named_child_node(vol->dev.parent->parent, "volumes");
+ if (!fw_vols)
+ return NULL;
+
+ fwnode_for_each_child_node(fw_vols, fw_vol) {
+ if (!fwnode_property_read_string(fw_vol, "volname", &volname) &&
+ strncmp(volname, vol->name, vol->name_len))
+ continue;
+
+ if (!fwnode_property_read_u32(fw_vol, "volid", &volid) &&
+ vol->vol_id != volid)
+ continue;
+
+ fwnode_handle_put(fw_vols);
+ return fw_vol;
+ }
+ fwnode_handle_put(fw_vols);
+
+ return NULL;
+}
+
/**
* ubi_create_volume - create volume.
* @ubi: UBI device description object
@@ -195,7 +216,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Ensure that the name is unique */
for (i = 0; i < ubi->vtbl_slots; i++)
- if (ubi->volumes[i] &&
+ if (ubi->volumes[i] && !ubi->volumes[i]->is_dead &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
ubi_err(ubi, "volume \"%s\" exists (ID %d)",
@@ -229,6 +250,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->name_len = req->name_len;
memcpy(vol->name, req->name, vol->name_len);
vol->ubi = ubi;
+ device_set_node(&vol->dev, find_volume_fwnode(vol));
/*
* Finish all pending erases because there may be some LEBs belonging
@@ -315,7 +337,6 @@ out_mapping:
ubi->volumes[vol_id] = NULL;
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
- ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
@@ -359,6 +380,19 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
err = -EBUSY;
goto out_unlock;
}
+
+ /*
+ * Mark volume as dead at this point to prevent that anyone
+ * can take a reference to the volume from now on.
+ * This is necessary as we have to release the spinlock before
+ * calling ubi_volume_notify.
+ */
+ vol->is_dead = true;
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_SHUTDOWN);
+
+ spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
@@ -415,6 +449,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
struct ubi_device *ubi = vol->ubi;
struct ubi_vtbl_record vtbl_rec;
struct ubi_eba_table *new_eba_tbl = NULL;
+ struct ubi_eba_table *old_eba_tbl = NULL;
int vol_id = vol->vol_id;
if (ubi->ro_mode)
@@ -460,10 +495,13 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
err = -ENOSPC;
goto out_free;
}
+
ubi->avail_pebs -= pebs;
ubi->rsvd_pebs += pebs;
ubi_eba_copy_table(vol, new_eba_tbl, vol->reserved_pebs);
- ubi_eba_replace_table(vol, new_eba_tbl);
+ old_eba_tbl = vol->eba_tbl;
+ vol->eba_tbl = new_eba_tbl;
+ vol->reserved_pebs = reserved_pebs;
spin_unlock(&ubi->volumes_lock);
}
@@ -471,14 +509,16 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
for (i = 0; i < -pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
if (err)
- goto out_acc;
+ goto out_free;
}
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs += pebs;
ubi->avail_pebs -= pebs;
ubi_update_reserved(ubi);
ubi_eba_copy_table(vol, new_eba_tbl, reserved_pebs);
- ubi_eba_replace_table(vol, new_eba_tbl);
+ old_eba_tbl = vol->eba_tbl;
+ vol->eba_tbl = new_eba_tbl;
+ vol->reserved_pebs = reserved_pebs;
spin_unlock(&ubi->volumes_lock);
}
@@ -500,7 +540,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (err)
goto out_acc;
- vol->reserved_pebs = reserved_pebs;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
@@ -508,19 +547,25 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
(long long)vol->used_ebs * vol->usable_leb_size;
}
+ /* destroy old table */
+ ubi_eba_destroy_table(old_eba_tbl);
ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
self_check_volumes(ubi);
return err;
out_acc:
- if (pebs > 0) {
- spin_lock(&ubi->volumes_lock);
- ubi->rsvd_pebs -= pebs;
- ubi->avail_pebs += pebs;
- spin_unlock(&ubi->volumes_lock);
- }
+ spin_lock(&ubi->volumes_lock);
+ vol->reserved_pebs = reserved_pebs - pebs;
+ ubi->rsvd_pebs -= pebs;
+ ubi->avail_pebs += pebs;
+ if (pebs > 0)
+ ubi_eba_copy_table(vol, old_eba_tbl, vol->reserved_pebs);
+ else
+ ubi_eba_copy_table(vol, old_eba_tbl, reserved_pebs);
+ vol->eba_tbl = old_eba_tbl;
+ spin_unlock(&ubi->volumes_lock);
out_free:
- kfree(new_eba_tbl);
+ ubi_eba_destroy_table(new_eba_tbl);
return err;
}
@@ -587,6 +632,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
if (err) {
ubi_err(ubi, "cannot add character device for volume %d, error %d",
vol_id, err);
+ vol_release(&vol->dev);
return err;
}
@@ -596,16 +642,16 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
vol->dev.class = &ubi_class;
vol->dev.groups = volume_dev_groups;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ device_set_node(&vol->dev, find_volume_fwnode(vol));
err = device_register(&vol->dev);
- if (err)
- goto out_cdev;
+ if (err) {
+ cdev_del(&vol->cdev);
+ put_device(&vol->dev);
+ return err;
+ }
self_check_volumes(ubi);
return err;
-
-out_cdev:
- cdev_del(&vol->cdev);
- return err;
}
/**
@@ -630,7 +676,7 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
* @ubi: UBI device description object
* @vol_id: volume ID
*
- * Returns zero if volume is all right and a a negative error code if not.
+ * Returns zero if volume is all right and a negative error code if not.
*/
static int self_check_volume(struct ubi_device *ubi, int vol_id)
{
@@ -783,7 +829,7 @@ fail:
* self_check_volumes - check information about all volumes.
* @ubi: UBI device description object
*
- * Returns zero if volumes are all right and a a negative error code if not.
+ * Returns zero if volumes are all right and a negative error code if not.
*/
static int self_check_volumes(struct ubi_device *ubi)
{
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index f700f0e4f2ec..6e5489e233dd 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -791,6 +791,12 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
* The number of supported volumes is limited by the eraseblock size
* and by the UBI_MAX_VOLUMES constant.
*/
+
+ if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) {
+ ubi_err(ubi, "LEB size too small for a volume record");
+ return -EINVAL;
+ }
+
ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
ubi->vtbl_slots = UBI_MAX_VOLUMES;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 8455f1d47f3c..fbd399cf6503 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -165,7 +165,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
}
/**
- * wl_tree_destroy - destroy a wear-leveling entry.
+ * wl_entry_destroy - destroy a wear-leveling entry.
* @ubi: UBI device description object
* @e: the wear-leveling entry to add
*
@@ -181,11 +181,13 @@ static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
/**
* do_work - do one pending work.
* @ubi: UBI device description object
+ * @executed: whether there is one work is executed
*
* This function returns zero in case of success and a negative error code in
- * case of failure.
+ * case of failure. If @executed is not NULL and there is one work executed,
+ * @executed is set as %1, otherwise @executed is set as %0.
*/
-static int do_work(struct ubi_device *ubi)
+static int do_work(struct ubi_device *ubi, int *executed)
{
int err;
struct ubi_work *wrk;
@@ -203,9 +205,13 @@ static int do_work(struct ubi_device *ubi)
if (list_empty(&ubi->works)) {
spin_unlock(&ubi->wl_lock);
up_read(&ubi->work_sem);
+ if (executed)
+ *executed = 0;
return 0;
}
+ if (executed)
+ *executed = 1;
wrk = list_entry(ubi->works.next, struct ubi_work, list);
list_del(&wrk->list);
ubi->works_count -= 1;
@@ -311,12 +317,14 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
* @ubi: UBI device description object
* @root: the RB-tree where to look for
* @diff: maximum possible difference from the smallest erase counter
+ * @pick_max: pick PEB even its erase counter beyonds 'min_ec + @diff'
*
* This function looks for a wear leveling entry with erase counter closest to
* min + @diff, where min is the smallest erase counter.
*/
static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
- struct rb_root *root, int diff)
+ struct rb_root *root, int diff,
+ int pick_max)
{
struct rb_node *p;
struct ubi_wl_entry *e;
@@ -330,9 +338,11 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
struct ubi_wl_entry *e1;
e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
- if (e1->ec >= max)
+ if (e1->ec >= max) {
+ if (pick_max)
+ e = e1;
p = p->rb_left;
- else {
+ } else {
p = p->rb_right;
e = e1;
}
@@ -361,12 +371,15 @@ static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
- /* If no fastmap has been written and this WL entry can be used
- * as anchor PEB, hold it back and return the second best
- * WL entry such that fastmap can use the anchor PEB later. */
+ /*
+ * If no fastmap has been written and fm_anchor is not
+ * reserved and this WL entry can be used as anchor PEB
+ * hold it back and return the second best WL entry such
+ * that fastmap can use the anchor PEB later.
+ */
e = may_reserve_for_fm(ubi, e, root);
} else
- e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
+ e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2, 0);
return e;
}
@@ -376,7 +389,7 @@ static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
* refill_wl_user_pool().
* @ubi: UBI device description object
*
- * This function returns a a wear leveling entry in case of success and
+ * This function returns a wear leveling entry in case of success and
* NULL in case of failure.
*/
static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
@@ -427,16 +440,15 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
}
/**
- * sync_erase - synchronously erase a physical eraseblock.
+ * ubi_sync_erase - synchronously erase a physical eraseblock.
* @ubi: UBI device description object
- * @e: the the physical eraseblock to erase
+ * @e: the physical eraseblock to erase
* @torture: if the physical eraseblock has to be tortured
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
- int torture)
+int ubi_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
{
int err;
struct ubi_ec_hdr *ec_hdr;
@@ -575,7 +587,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* @vol_id: the volume ID that last used this PEB
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
- * @nested: denotes whether the work_sem is already held in read mode
+ * @nested: denotes whether the work_sem is already held
*
* This function returns zero in case of success and a %-ENOMEM in case of
* failure.
@@ -670,7 +682,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi_assert(!ubi->move_from && !ubi->move_to);
ubi_assert(!ubi->move_to_put);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (!next_peb_for_wl(ubi, true) ||
+#else
if (!ubi->free.rb_node ||
+#endif
(!ubi->used.rb_node && !ubi->scrub.rb_node)) {
/*
* No free physical eraseblocks? Well, they must be waiting in
@@ -689,16 +705,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
#ifdef CONFIG_MTD_UBI_FASTMAP
e1 = find_anchor_wl_entry(&ubi->used);
- if (e1 && ubi->fm_next_anchor &&
- (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+ if (e1 && ubi->fm_anchor &&
+ (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
ubi->fm_do_produce_anchor = 1;
- /* fm_next_anchor is no longer considered a good anchor
- * candidate.
+ /*
+ * fm_anchor is no longer considered a good anchor.
* NULL assignment also prevents multiple wear level checks
* of this PEB.
*/
- wl_tree_add(ubi->fm_next_anchor, &ubi->free);
- ubi->fm_next_anchor = NULL;
+ wl_tree_add(ubi->fm_anchor, &ubi->free);
+ ubi->fm_anchor = NULL;
ubi->free_count++;
}
@@ -830,7 +846,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
goto out_not_moved;
}
if (err == MOVE_RETRY) {
- scrubbing = 1;
+ /*
+ * For source PEB:
+ * 1. The scrubbing is set for scrub type PEB, it will
+ * be put back into ubi->scrub list.
+ * 2. Non-scrub type PEB will be put back into ubi->used
+ * list.
+ */
+ keep = 1;
dst_leb_clean = 1;
goto out_not_moved;
}
@@ -886,8 +909,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
- if (e2)
+ if (e2) {
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e2);
+ spin_unlock(&ubi->wl_lock);
+ }
goto out_ro;
}
@@ -969,11 +995,11 @@ out_error:
spin_lock(&ubi->wl_lock);
ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0;
+ wl_entry_destroy(ubi, e1);
+ wl_entry_destroy(ubi, e2);
spin_unlock(&ubi->wl_lock);
ubi_free_vid_buf(vidb);
- wl_entry_destroy(ubi, e1);
- wl_entry_destroy(ubi, e2);
out_ro:
ubi_ro_mode(ubi);
@@ -1003,8 +1029,6 @@ out_cancel:
static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
{
int err = 0;
- struct ubi_wl_entry *e1;
- struct ubi_wl_entry *e2;
struct ubi_work *wrk;
spin_lock(&ubi->wl_lock);
@@ -1014,9 +1038,16 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
/*
* If the ubi->scrub tree is not empty, scrubbing is needed, and the
- * the WL worker has to be scheduled anyway.
+ * WL worker has to be scheduled anyway.
*/
if (!ubi->scrub.rb_node) {
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (!need_wear_leveling(ubi))
+ goto out_unlock;
+#else
+ struct ubi_wl_entry *e1;
+ struct ubi_wl_entry *e2;
+
if (!ubi->used.rb_node || !ubi->free.rb_node)
/* No physical eraseblocks - no deal */
goto out_unlock;
@@ -1028,10 +1059,11 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
* %UBI_WL_THRESHOLD.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
goto out_unlock;
+#endif
dbg_wl("schedule wear-leveling");
} else
dbg_wl("schedule scrubbing");
@@ -1081,16 +1113,17 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
dbg_wl("erase PEB %d EC %d LEB %d:%d",
pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
- err = sync_erase(ubi, e, wl_wrk->torture);
+ err = ubi_sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
spin_lock(&ubi->wl_lock);
- if (!ubi->fm_disabled && !ubi->fm_next_anchor &&
+ if (!ubi->fm_disabled && !ubi->fm_anchor &&
e->pnum < UBI_FM_MAX_START) {
- /* Abort anchor production, if needed it will be
+ /*
+ * Abort anchor production, if needed it will be
* enabled again in the wear leveling started below.
*/
- ubi->fm_next_anchor = e;
+ ubi->fm_anchor = e;
ubi->fm_do_produce_anchor = 0;
} else {
wl_tree_add(e, &ubi->free);
@@ -1117,16 +1150,20 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
int err1;
/* Re-schedule the LEB for erasure */
- err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
if (err1) {
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
+ spin_unlock(&ubi->wl_lock);
err = err1;
goto out_ro;
}
return err;
}
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
+ spin_unlock(&ubi->wl_lock);
if (err != -EIO)
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
@@ -1242,6 +1279,18 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
+ if (!e) {
+ /*
+ * This wl entry has been removed for some errors by other
+ * process (eg. wear leveling worker), corresponding process
+ * (except __erase_worker, which cannot concurrent with
+ * ubi_wl_put_peb) will set ubi ro_mode at the same time,
+ * just ignore this wl entry.
+ */
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_protect);
+ return 0;
+ }
if (e == ubi->move_from) {
/*
* User is putting the physical eraseblock which was selected to
@@ -1453,7 +1502,7 @@ static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
* ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
* @ubi: UBI device description object
* @pnum: the physical eraseblock to schedule
- * @force: dont't read the block, assume bitflips happened and take action.
+ * @force: don't read the block, assume bitflips happened and take action.
*
* This function reads the given eraseblock and checks if bitflips occured.
* In case of bitflips, the eraseblock is scheduled for scrubbing.
@@ -1656,7 +1705,7 @@ int ubi_thread(void *u)
}
spin_unlock(&ubi->wl_lock);
- err = do_work(ubi);
+ err = do_work(ubi, NULL);
if (err) {
ubi_err(ubi, "%s: work failed with error code %d",
ubi->bgt_name, err);
@@ -1719,7 +1768,7 @@ static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync
ubi->lookuptbl[e->pnum] = e;
if (sync) {
- err = sync_erase(ubi, e, false);
+ err = ubi_sync_erase(ubi, e, false);
if (err)
goto out_free;
@@ -2041,7 +2090,7 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
{
struct ubi_wl_entry *e;
- e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
self_check_in_wl_tree(ubi, e, &ubi->free);
ubi->free_count--;
ubi_assert(ubi->free_count >= 0);
@@ -2067,7 +2116,7 @@ static int produce_free_peb(struct ubi_device *ubi)
spin_unlock(&ubi->wl_lock);
dbg_wl("do one work synchronously");
- err = do_work(ubi);
+ err = do_work(ubi, NULL);
spin_lock(&ubi->wl_lock);
if (err)
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index c93a53293786..a69169c35e31 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -5,11 +5,16 @@
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi,
+ bool need_fill);
+static bool need_wear_leveling(struct ubi_device *ubi);
static void ubi_fastmap_close(struct ubi_device *ubi);
static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
{
- /* Reserve enough LEBs to store two fastmaps. */
- *count += (ubi->fm_size / ubi->leb_size) * 2;
+ if (ubi->fm_disabled)
+ ubi->fm_pool_rsv_cnt = 0;
+ /* Reserve enough LEBs to store two fastmaps and to fill pools. */
+ *count += (ubi->fm_size / ubi->leb_size) * 2 + ubi->fm_pool_rsv_cnt;
INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
}
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,