summaryrefslogtreecommitdiff
path: root/drivers/mtd/ubi
diff options
context:
space:
mode:
authorZhihao Cheng <chengzhihao1@huawei.com>2023-08-28 14:38:38 +0800
committerRichard Weinberger <richard@nod.at>2023-10-28 22:41:01 +0200
commit8ff4e620ac93a5d332735e4f5a4ff31d80682b9a (patch)
treec5ec2b47c06be628c7e8e549c757e6d4a321bafc /drivers/mtd/ubi
parentc19286d70aaa361cdb073a68a1f66232c359e2fd (diff)
ubi: fastmap: Use free pebs reserved for bad block handling
If new bad PEBs occur, UBI firstly consumes ubi->beb_rsvd_pebs, and then ubi->avail_pebs, finally UBI becomes read-only if above two items are 0, which means that the amount of PEBs for user volumes is not effected. Besides, UBI reserves count of free PBEs is ubi->beb_rsvd_pebs while filling wl pool or getting free PEBs, but ubi->avail_pebs is not reserved. So ubi->beb_rsvd_pebs and ubi->avail_pebs have nothing to do with the usage of free PEBs, UBI can use all free PEBs. Commit 78d6d497a648 ("UBI: Move fastmap specific functions out of wl.c") has removed beb_rsvd_pebs checking while filling pool. Now, don't reserve ubi->beb_rsvd_pebs while filling wl_pool. This will fill more PEBs in pool and also reduce fastmap updating frequency. Also remove beb_rsvd_pebs checking in ubi_wl_get_fm_peb. Link: https://bugzilla.kernel.org/show_bug.cgi?id=217787 Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com> Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'drivers/mtd/ubi')
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 863f571f1adb..4611a75f1241 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -76,7 +76,7 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
{
struct ubi_wl_entry *e = NULL;
- if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ if (!ubi->free.rb_node)
goto out;
if (anchor)
@@ -100,28 +100,22 @@ out:
/*
* has_enough_free_count - whether ubi has enough free pebs to fill fm pools
* @ubi: UBI device description object
- * @is_wl_pool: whether UBI is filling wear leveling pool
*
* This helper function checks whether there are enough free pebs (deducted
* by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
* there is at least one of free pebs is filled into fm_wl_pool.
- * For wear leveling pool, UBI should also reserve free pebs for bad pebs
- * handling, because there maybe no enough free pebs for user volumes after
- * producing new bad pebs.
*/
-static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
+static bool has_enough_free_count(struct ubi_device *ubi)
{
int fm_used = 0; // fastmap non anchor pebs.
- int beb_rsvd_pebs;
if (!ubi->free.rb_node)
return false;
- beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
fm_used = ubi->fm_size / ubi->leb_size - 1;
- return ubi->free_count - beb_rsvd_pebs > fm_used;
+ return ubi->free_count > fm_used;
}
/**
@@ -159,7 +153,7 @@ void ubi_refill_pools(struct ubi_device *ubi)
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
- if (!has_enough_free_count(ubi, false))
+ if (!has_enough_free_count(ubi))
break;
e = wl_get_wle(ubi);
@@ -172,7 +166,7 @@ void ubi_refill_pools(struct ubi_device *ubi)
enough++;
if (wl_pool->size < wl_pool->max_size) {
- if (!has_enough_free_count(ubi, true))
+ if (!has_enough_free_count(ubi))
break;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);