Skip to content

Commit a2ea69d

Browse files
Zhihao Chengrichardweinberger
authored andcommitted
ubi: fastmap: Wait until there are enough free PEBs before filling pools
Wait until there are enough free PEBs before filling pool/wl_pool, sometimes erase_worker is not scheduled in time, which causes two situations: A. There are few PEBs filled in pool, which makes ubi_update_fastmap is frequently called and leads first 64 PEBs are erased more times than other PEBs. So waiting free PEBs before filling pool reduces fastmap updating frequency and prolongs flash service life. B. In situation that space is nearly running out, ubi_refill_pools() cannot make sure pool and wl_pool are filled with free PEBs, caused by the delay of erase_worker. After this patch applied, there must exist free PEBs in pool after one call of ubi_update_fastmap. Besides, this patch is a preparetion for fixing large erase counter in fastmap data block and fixing lapsed wear leveling for first 64 PEBs. Link: https://bugzilla.kernel.org/show_bug.cgi?id=217787 Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com> Signed-off-by: Richard Weinberger <richard@nod.at>
1 parent 8ff4e62 commit a2ea69d

File tree

5 files changed

+65
-16
lines changed

5 files changed

+65
-16
lines changed

drivers/mtd/ubi/eba.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,6 @@
3333
#include <linux/err.h>
3434
#include "ubi.h"
3535

36-
/* Number of physical eraseblocks reserved for atomic LEB change operation */
37-
#define EBA_RESERVED_PEBS 1
38-
3936
/**
4037
* struct ubi_eba_entry - structure encoding a single LEB -> PEB association
4138
* @pnum: the physical eraseblock number attached to the LEB

drivers/mtd/ubi/fastmap-wl.c

Lines changed: 50 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,46 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
9797
return e;
9898
}
9999

100+
/*
101+
* wait_free_pebs_for_pool - wait until there enough free pebs
102+
* @ubi: UBI device description object
103+
*
104+
* Wait and execute do_work until there are enough free pebs, fill pool
105+
* as much as we can. This will reduce pool refilling times, which can
106+
* reduce the fastmap updating frequency.
107+
*/
108+
static void wait_free_pebs_for_pool(struct ubi_device *ubi)
109+
{
110+
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
111+
struct ubi_fm_pool *pool = &ubi->fm_pool;
112+
int free, expect_free, executed;
113+
/*
114+
* There are at least following free pebs which reserved by UBI:
115+
* 1. WL_RESERVED_PEBS[1]
116+
* 2. EBA_RESERVED_PEBS[1]
117+
* 3. fm pebs - 1: Twice fastmap size deducted by fastmap and fm_anchor
118+
* 4. beb_rsvd_pebs: This value should be get under lock ubi->wl_lock
119+
*/
120+
int reserved = WL_RESERVED_PEBS + EBA_RESERVED_PEBS +
121+
ubi->fm_size / ubi->leb_size - 1;
122+
123+
do {
124+
spin_lock(&ubi->wl_lock);
125+
free = ubi->free_count;
126+
free += pool->size - pool->used + wl_pool->size - wl_pool->used;
127+
expect_free = reserved + ubi->beb_rsvd_pebs;
128+
spin_unlock(&ubi->wl_lock);
129+
130+
/*
131+
* Break out if there are no works or work is executed failure,
132+
* given the fact that erase_worker will schedule itself when
133+
* -EBUSY is returned from mtd layer caused by system shutdown.
134+
*/
135+
if (do_work(ubi, &executed) || !executed)
136+
break;
137+
} while (free < expect_free);
138+
}
139+
100140
/*
101141
* has_enough_free_count - whether ubi has enough free pebs to fill fm pools
102142
* @ubi: UBI device description object
@@ -119,16 +159,23 @@ static bool has_enough_free_count(struct ubi_device *ubi)
119159
}
120160

121161
/**
122-
* ubi_refill_pools - refills all fastmap PEB pools.
162+
* ubi_refill_pools_and_lock - refills all fastmap PEB pools and takes fm locks.
123163
* @ubi: UBI device description object
124164
*/
125-
void ubi_refill_pools(struct ubi_device *ubi)
165+
void ubi_refill_pools_and_lock(struct ubi_device *ubi)
126166
{
127167
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
128168
struct ubi_fm_pool *pool = &ubi->fm_pool;
129169
struct ubi_wl_entry *e;
130170
int enough;
131171

172+
if (!ubi->ro_mode && !ubi->fm_disabled)
173+
wait_free_pebs_for_pool(ubi);
174+
175+
down_write(&ubi->fm_protect);
176+
down_write(&ubi->work_sem);
177+
down_write(&ubi->fm_eba_sem);
178+
132179
spin_lock(&ubi->wl_lock);
133180

134181
return_unused_pool_pebs(ubi, wl_pool);
@@ -204,7 +251,7 @@ static int produce_free_peb(struct ubi_device *ubi)
204251

205252
while (!ubi->free.rb_node && ubi->works_count) {
206253
dbg_wl("do one work synchronously");
207-
err = do_work(ubi);
254+
err = do_work(ubi, NULL);
208255

209256
if (err)
210257
return err;

drivers/mtd/ubi/fastmap.c

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1491,11 +1491,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
14911491
struct ubi_fastmap_layout *new_fm, *old_fm;
14921492
struct ubi_wl_entry *tmp_e;
14931493

1494-
down_write(&ubi->fm_protect);
1495-
down_write(&ubi->work_sem);
1496-
down_write(&ubi->fm_eba_sem);
1497-
1498-
ubi_refill_pools(ubi);
1494+
ubi_refill_pools_and_lock(ubi);
14991495

15001496
if (ubi->ro_mode || ubi->fm_disabled) {
15011497
up_write(&ubi->fm_eba_sem);

drivers/mtd/ubi/ubi.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,9 @@ void ubi_err(const struct ubi_device *ubi, const char *fmt, ...);
8282
#define UBI_DFS_DIR_NAME "ubi%d"
8383
#define UBI_DFS_DIR_LEN (3 + 2 + 1)
8484

85+
/* Number of physical eraseblocks reserved for atomic LEB change operation */
86+
#define EBA_RESERVED_PEBS 1
87+
8588
/*
8689
* Error codes returned by the I/O sub-system.
8790
*
@@ -915,7 +918,7 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
915918
int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
916919
int lnum, int torture);
917920
int ubi_is_erase_work(struct ubi_work *wrk);
918-
void ubi_refill_pools(struct ubi_device *ubi);
921+
void ubi_refill_pools_and_lock(struct ubi_device *ubi);
919922
int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
920923
int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub);
921924

drivers/mtd/ubi/wl.c

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -181,11 +181,13 @@ static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
181181
/**
182182
* do_work - do one pending work.
183183
* @ubi: UBI device description object
184+
* @executed: whether there is one work is executed
184185
*
185186
* This function returns zero in case of success and a negative error code in
186-
* case of failure.
187+
* case of failure. If @executed is not NULL and there is one work executed,
188+
* @executed is set as %1, otherwise @executed is set as %0.
187189
*/
188-
static int do_work(struct ubi_device *ubi)
190+
static int do_work(struct ubi_device *ubi, int *executed)
189191
{
190192
int err;
191193
struct ubi_work *wrk;
@@ -203,9 +205,13 @@ static int do_work(struct ubi_device *ubi)
203205
if (list_empty(&ubi->works)) {
204206
spin_unlock(&ubi->wl_lock);
205207
up_read(&ubi->work_sem);
208+
if (executed)
209+
*executed = 0;
206210
return 0;
207211
}
208212

213+
if (executed)
214+
*executed = 1;
209215
wrk = list_entry(ubi->works.next, struct ubi_work, list);
210216
list_del(&wrk->list);
211217
ubi->works_count -= 1;
@@ -1685,7 +1691,7 @@ int ubi_thread(void *u)
16851691
}
16861692
spin_unlock(&ubi->wl_lock);
16871693

1688-
err = do_work(ubi);
1694+
err = do_work(ubi, NULL);
16891695
if (err) {
16901696
ubi_err(ubi, "%s: work failed with error code %d",
16911697
ubi->bgt_name, err);
@@ -2096,7 +2102,7 @@ static int produce_free_peb(struct ubi_device *ubi)
20962102
spin_unlock(&ubi->wl_lock);
20972103

20982104
dbg_wl("do one work synchronously");
2099-
err = do_work(ubi);
2105+
err = do_work(ubi, NULL);
21002106

21012107
spin_lock(&ubi->wl_lock);
21022108
if (err)

0 commit comments

Comments
 (0)