mtd: ubi: wl: avoid erasing a PEB which is empty
wear_leveling_worker() currently unconditionally puts a PEB on erase in the error case even it just been taken from the free_list and never used. In case the PEB was never used it can be put back on the free list saving a precious erase cycle. v1…v2: - to_leb_clean -> dst_leb_clean - use the nested option for ensure_wear_leveling() - do_sync_erase() can't go -ENOMEM so we can just go into RO-mode now. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Richard Weinberger <richard@nod.at>
This commit is contained in:
parent
168309855a
commit
34b89df903
@ -628,6 +628,7 @@ static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
|
|||||||
return __erase_worker(ubi, &wl_wrk);
|
return __erase_worker(ubi, &wl_wrk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
|
||||||
/**
|
/**
|
||||||
* wear_leveling_worker - wear-leveling worker function.
|
* wear_leveling_worker - wear-leveling worker function.
|
||||||
* @ubi: UBI device description object
|
* @ubi: UBI device description object
|
||||||
@ -649,6 +650,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
|
|||||||
#endif
|
#endif
|
||||||
struct ubi_wl_entry *e1, *e2;
|
struct ubi_wl_entry *e1, *e2;
|
||||||
struct ubi_vid_hdr *vid_hdr;
|
struct ubi_vid_hdr *vid_hdr;
|
||||||
|
int dst_leb_clean = 0;
|
||||||
|
|
||||||
kfree(wrk);
|
kfree(wrk);
|
||||||
if (shutdown)
|
if (shutdown)
|
||||||
@ -753,6 +755,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
|
|||||||
|
|
||||||
err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
|
err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
|
||||||
if (err && err != UBI_IO_BITFLIPS) {
|
if (err && err != UBI_IO_BITFLIPS) {
|
||||||
|
dst_leb_clean = 1;
|
||||||
if (err == UBI_IO_FF) {
|
if (err == UBI_IO_FF) {
|
||||||
/*
|
/*
|
||||||
* We are trying to move PEB without a VID header. UBI
|
* We are trying to move PEB without a VID header. UBI
|
||||||
@ -798,10 +801,12 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
|
|||||||
* protection queue.
|
* protection queue.
|
||||||
*/
|
*/
|
||||||
protect = 1;
|
protect = 1;
|
||||||
|
dst_leb_clean = 1;
|
||||||
goto out_not_moved;
|
goto out_not_moved;
|
||||||
}
|
}
|
||||||
if (err == MOVE_RETRY) {
|
if (err == MOVE_RETRY) {
|
||||||
scrubbing = 1;
|
scrubbing = 1;
|
||||||
|
dst_leb_clean = 1;
|
||||||
goto out_not_moved;
|
goto out_not_moved;
|
||||||
}
|
}
|
||||||
if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
|
if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
|
||||||
@ -827,6 +832,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
|
|||||||
ubi->erroneous_peb_count);
|
ubi->erroneous_peb_count);
|
||||||
goto out_error;
|
goto out_error;
|
||||||
}
|
}
|
||||||
|
dst_leb_clean = 1;
|
||||||
erroneous = 1;
|
erroneous = 1;
|
||||||
goto out_not_moved;
|
goto out_not_moved;
|
||||||
}
|
}
|
||||||
@ -897,15 +903,24 @@ out_not_moved:
|
|||||||
wl_tree_add(e1, &ubi->scrub);
|
wl_tree_add(e1, &ubi->scrub);
|
||||||
else
|
else
|
||||||
wl_tree_add(e1, &ubi->used);
|
wl_tree_add(e1, &ubi->used);
|
||||||
|
if (dst_leb_clean) {
|
||||||
|
wl_tree_add(e2, &ubi->free);
|
||||||
|
ubi->free_count++;
|
||||||
|
}
|
||||||
|
|
||||||
ubi_assert(!ubi->move_to_put);
|
ubi_assert(!ubi->move_to_put);
|
||||||
ubi->move_from = ubi->move_to = NULL;
|
ubi->move_from = ubi->move_to = NULL;
|
||||||
ubi->wl_scheduled = 0;
|
ubi->wl_scheduled = 0;
|
||||||
spin_unlock(&ubi->wl_lock);
|
spin_unlock(&ubi->wl_lock);
|
||||||
|
|
||||||
ubi_free_vid_hdr(ubi, vid_hdr);
|
ubi_free_vid_hdr(ubi, vid_hdr);
|
||||||
err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
|
if (dst_leb_clean) {
|
||||||
if (err)
|
ensure_wear_leveling(ubi, 1);
|
||||||
goto out_ro;
|
} else {
|
||||||
|
err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
|
||||||
|
if (err)
|
||||||
|
goto out_ro;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&ubi->move_mutex);
|
mutex_unlock(&ubi->move_mutex);
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user