ubi,ubifs: sync with linux v4.2
sync with linux v4.2 commit 64291f7db5bd8150a74ad2036f1037e6a0428df2 Author: Linus Torvalds <torvalds@linux-foundation.org> Date: Sun Aug 30 11:34:09 2015 -0700 Linux 4.2 This update is needed, as it turned out, that fastmap was in experimental/broken state in kernel v3.15, which was the last base for U-Boot. Signed-off-by: Heiko Schocher <hs@denx.de> Tested-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
This commit is contained in:
parent
5219db8ae8
commit
0195a7bb36
4
README
4
README
@ -3494,6 +3494,10 @@ FIT uImage format:
|
||||
without a fastmap.
|
||||
default: 0
|
||||
|
||||
CONFIG_MTD_UBI_FM_DEBUG
|
||||
Enable UBI fastmap debug
|
||||
default: 0
|
||||
|
||||
- UBIFS support
|
||||
CONFIG_CMD_UBIFS
|
||||
|
||||
|
@ -255,7 +255,7 @@ static int ubi_remove_vol(char *volume)
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
ubi_err("cannot remove volume %s, error %d", volume, err);
|
||||
ubi_err(ubi, "cannot remove volume %s, error %d", volume, err);
|
||||
if (err < 0)
|
||||
err = -err;
|
||||
return err;
|
||||
@ -284,7 +284,7 @@ static int ubi_volume_continue_write(char *volume, void *buf, size_t size)
|
||||
return -err;
|
||||
|
||||
if (err) {
|
||||
ubi_warn("volume %d on UBI device %d is corrupted",
|
||||
ubi_warn(ubi, "volume %d on UBI device %d is corrupt",
|
||||
vol->vol_id, ubi->ubi_num);
|
||||
vol->corrupted = 1;
|
||||
}
|
||||
|
@ -172,6 +172,7 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
|
||||
|
||||
/**
|
||||
* validate_vid_hdr - check volume identifier header.
|
||||
* @ubi: UBI device description object
|
||||
* @vid_hdr: the volume identifier header to check
|
||||
* @av: information about the volume this logical eraseblock belongs to
|
||||
* @pnum: physical eraseblock number the VID header came from
|
||||
@ -184,7 +185,8 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
|
||||
* information in the VID header is consistent to the information in other VID
|
||||
* headers of the same volume.
|
||||
*/
|
||||
static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
|
||||
static int validate_vid_hdr(const struct ubi_device *ubi,
|
||||
const struct ubi_vid_hdr *vid_hdr,
|
||||
const struct ubi_ainf_volume *av, int pnum)
|
||||
{
|
||||
int vol_type = vid_hdr->vol_type;
|
||||
@ -202,7 +204,7 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
|
||||
*/
|
||||
|
||||
if (vol_id != av->vol_id) {
|
||||
ubi_err("inconsistent vol_id");
|
||||
ubi_err(ubi, "inconsistent vol_id");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
@ -212,17 +214,17 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
|
||||
av_vol_type = UBI_VID_DYNAMIC;
|
||||
|
||||
if (vol_type != av_vol_type) {
|
||||
ubi_err("inconsistent vol_type");
|
||||
ubi_err(ubi, "inconsistent vol_type");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (used_ebs != av->used_ebs) {
|
||||
ubi_err("inconsistent used_ebs");
|
||||
ubi_err(ubi, "inconsistent used_ebs");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (data_pad != av->data_pad) {
|
||||
ubi_err("inconsistent data_pad");
|
||||
ubi_err(ubi, "inconsistent data_pad");
|
||||
goto bad;
|
||||
}
|
||||
}
|
||||
@ -230,7 +232,7 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
|
||||
return 0;
|
||||
|
||||
bad:
|
||||
ubi_err("inconsistent VID header at PEB %d", pnum);
|
||||
ubi_err(ubi, "inconsistent VID header at PEB %d", pnum);
|
||||
ubi_dump_vid_hdr(vid_hdr);
|
||||
ubi_dump_av(av);
|
||||
return -EINVAL;
|
||||
@ -332,7 +334,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
|
||||
* support these images anymore. Well, those images still work,
|
||||
* but only if no unclean reboots happened.
|
||||
*/
|
||||
ubi_err("unsupported on-flash UBI format");
|
||||
ubi_err(ubi, "unsupported on-flash UBI format");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -373,7 +375,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
|
||||
if (err == UBI_IO_BITFLIPS)
|
||||
bitflips = 1;
|
||||
else {
|
||||
ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
|
||||
ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d",
|
||||
pnum, err);
|
||||
if (err > 0)
|
||||
err = -EIO;
|
||||
@ -404,7 +406,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
|
||||
second_is_newer = !second_is_newer;
|
||||
} else {
|
||||
dbg_bld("PEB %d CRC is OK", pnum);
|
||||
bitflips = !!err;
|
||||
bitflips |= !!err;
|
||||
}
|
||||
mutex_unlock(&ubi->buf_mutex);
|
||||
|
||||
@ -503,7 +505,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
|
||||
* logical eraseblocks because there was an unclean reboot.
|
||||
*/
|
||||
if (aeb->sqnum == sqnum && sqnum != 0) {
|
||||
ubi_err("two LEBs with same sequence number %llu",
|
||||
ubi_err(ubi, "two LEBs with same sequence number %llu",
|
||||
sqnum);
|
||||
ubi_dump_aeb(aeb, 0);
|
||||
ubi_dump_vid_hdr(vid_hdr);
|
||||
@ -523,7 +525,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
|
||||
* This logical eraseblock is newer than the one
|
||||
* found earlier.
|
||||
*/
|
||||
err = validate_vid_hdr(vid_hdr, av, pnum);
|
||||
err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -561,7 +563,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
|
||||
* attaching information.
|
||||
*/
|
||||
|
||||
err = validate_vid_hdr(vid_hdr, av, pnum);
|
||||
err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -664,7 +666,8 @@ static int early_erase_peb(struct ubi_device *ubi,
|
||||
* Erase counter overflow. Upgrade UBI and use 64-bit
|
||||
* erase counters internally.
|
||||
*/
|
||||
ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec);
|
||||
ubi_err(ubi, "erase counter overflow at PEB %d, EC %d",
|
||||
pnum, ec);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -732,7 +735,7 @@ struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
|
||||
return aeb;
|
||||
}
|
||||
|
||||
ubi_err("no free eraseblocks");
|
||||
ubi_err(ubi, "no free eraseblocks");
|
||||
return ERR_PTR(-ENOSPC);
|
||||
}
|
||||
|
||||
@ -781,9 +784,9 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
|
||||
if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
|
||||
goto out_unlock;
|
||||
|
||||
ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
|
||||
ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
|
||||
pnum);
|
||||
ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
|
||||
ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
|
||||
ubi_dump_vid_hdr(vid_hdr);
|
||||
pr_err("hexdump of PEB %d offset %d, length %d",
|
||||
pnum, ubi->leb_start, ubi->leb_size);
|
||||
@ -855,7 +858,8 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
||||
bitflips = 1;
|
||||
break;
|
||||
default:
|
||||
ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err);
|
||||
ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d",
|
||||
err);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -864,7 +868,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
||||
|
||||
/* Make sure UBI version is OK */
|
||||
if (ech->version != UBI_VERSION) {
|
||||
ubi_err("this UBI version is %d, image version is %d",
|
||||
ubi_err(ubi, "this UBI version is %d, image version is %d",
|
||||
UBI_VERSION, (int)ech->version);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -878,7 +882,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
||||
* flash. Upgrade UBI and use 64-bit erase counters
|
||||
* internally.
|
||||
*/
|
||||
ubi_err("erase counter overflow, max is %d",
|
||||
ubi_err(ubi, "erase counter overflow, max is %d",
|
||||
UBI_MAX_ERASECOUNTER);
|
||||
ubi_dump_ec_hdr(ech);
|
||||
return -EINVAL;
|
||||
@ -899,7 +903,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
||||
if (!ubi->image_seq)
|
||||
ubi->image_seq = image_seq;
|
||||
if (image_seq && ubi->image_seq != image_seq) {
|
||||
ubi_err("bad image sequence number %d in PEB %d, expected %d",
|
||||
ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d",
|
||||
image_seq, pnum, ubi->image_seq);
|
||||
ubi_dump_ec_hdr(ech);
|
||||
return -EINVAL;
|
||||
@ -977,7 +981,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
||||
return err;
|
||||
goto adjust_mean_ec;
|
||||
default:
|
||||
ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d",
|
||||
ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d",
|
||||
err);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -995,7 +999,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
||||
case UBI_COMPAT_DELETE:
|
||||
if (vol_id != UBI_FM_SB_VOLUME_ID
|
||||
&& vol_id != UBI_FM_DATA_VOLUME_ID) {
|
||||
ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
|
||||
ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
|
||||
vol_id, lnum);
|
||||
}
|
||||
err = add_to_list(ai, pnum, vol_id, lnum,
|
||||
@ -1005,13 +1009,13 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
||||
return 0;
|
||||
|
||||
case UBI_COMPAT_RO:
|
||||
ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
|
||||
ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode",
|
||||
vol_id, lnum);
|
||||
ubi->ro_mode = 1;
|
||||
break;
|
||||
|
||||
case UBI_COMPAT_PRESERVE:
|
||||
ubi_msg("\"preserve\" compatible internal volume %d:%d found",
|
||||
ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found",
|
||||
vol_id, lnum);
|
||||
err = add_to_list(ai, pnum, vol_id, lnum,
|
||||
ec, 0, &ai->alien);
|
||||
@ -1020,14 +1024,14 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
||||
return 0;
|
||||
|
||||
case UBI_COMPAT_REJECT:
|
||||
ubi_err("incompatible internal volume %d:%d found",
|
||||
ubi_err(ubi, "incompatible internal volume %d:%d found",
|
||||
vol_id, lnum);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (ec_err)
|
||||
ubi_warn("valid VID header but corrupted EC header at PEB %d",
|
||||
ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
|
||||
pnum);
|
||||
err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
|
||||
if (err)
|
||||
@ -1071,7 +1075,7 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
* with the flash HW or driver.
|
||||
*/
|
||||
if (ai->corr_peb_count) {
|
||||
ubi_err("%d PEBs are corrupted and preserved",
|
||||
ubi_err(ubi, "%d PEBs are corrupted and preserved",
|
||||
ai->corr_peb_count);
|
||||
pr_err("Corrupted PEBs are:");
|
||||
list_for_each_entry(aeb, &ai->corr, u.list)
|
||||
@ -1083,7 +1087,7 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
* otherwise, only print a warning.
|
||||
*/
|
||||
if (ai->corr_peb_count >= max_corr) {
|
||||
ubi_err("too many corrupted PEBs, refusing");
|
||||
ubi_err(ubi, "too many corrupted PEBs, refusing");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -1106,11 +1110,11 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
*/
|
||||
if (ai->maybe_bad_peb_count <= 2) {
|
||||
ai->is_empty = 1;
|
||||
ubi_msg("empty MTD device detected");
|
||||
ubi_msg(ubi, "empty MTD device detected");
|
||||
get_random_bytes(&ubi->image_seq,
|
||||
sizeof(ubi->image_seq));
|
||||
} else {
|
||||
ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
|
||||
ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1244,7 +1248,7 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
||||
goto out_vidh;
|
||||
}
|
||||
|
||||
ubi_msg("scanning is finished");
|
||||
ubi_msg(ubi, "scanning is finished");
|
||||
|
||||
/* Calculate mean erase counter */
|
||||
if (ai->ec_count)
|
||||
@ -1293,6 +1297,30 @@ out_ech:
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct ubi_attach_info *alloc_ai(void)
|
||||
{
|
||||
struct ubi_attach_info *ai;
|
||||
|
||||
ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
|
||||
if (!ai)
|
||||
return ai;
|
||||
|
||||
INIT_LIST_HEAD(&ai->corr);
|
||||
INIT_LIST_HEAD(&ai->free);
|
||||
INIT_LIST_HEAD(&ai->erase);
|
||||
INIT_LIST_HEAD(&ai->alien);
|
||||
ai->volumes = RB_ROOT;
|
||||
ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
|
||||
sizeof(struct ubi_ainf_peb),
|
||||
0, 0, NULL);
|
||||
if (!ai->aeb_slab_cache) {
|
||||
kfree(ai);
|
||||
ai = NULL;
|
||||
}
|
||||
|
||||
return ai;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MTD_UBI_FASTMAP
|
||||
|
||||
/**
|
||||
@ -1305,7 +1333,7 @@ out_ech:
|
||||
* UBI_NO_FASTMAP denotes that no fastmap was found.
|
||||
* UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
|
||||
*/
|
||||
static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
|
||||
{
|
||||
int err, pnum, fm_anchor = -1;
|
||||
unsigned long long max_sqnum = 0;
|
||||
@ -1326,7 +1354,7 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
cond_resched();
|
||||
|
||||
dbg_gen("process PEB %d", pnum);
|
||||
err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum);
|
||||
err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum);
|
||||
if (err < 0)
|
||||
goto out_vidh;
|
||||
|
||||
@ -1342,7 +1370,12 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
if (fm_anchor < 0)
|
||||
return UBI_NO_FASTMAP;
|
||||
|
||||
return ubi_scan_fastmap(ubi, ai, fm_anchor);
|
||||
destroy_ai(*ai);
|
||||
*ai = alloc_ai();
|
||||
if (!*ai)
|
||||
return -ENOMEM;
|
||||
|
||||
return ubi_scan_fastmap(ubi, *ai, fm_anchor);
|
||||
|
||||
out_vidh:
|
||||
ubi_free_vid_hdr(ubi, vidh);
|
||||
@ -1354,30 +1387,6 @@ out:
|
||||
|
||||
#endif
|
||||
|
||||
static struct ubi_attach_info *alloc_ai(const char *slab_name)
|
||||
{
|
||||
struct ubi_attach_info *ai;
|
||||
|
||||
ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
|
||||
if (!ai)
|
||||
return ai;
|
||||
|
||||
INIT_LIST_HEAD(&ai->corr);
|
||||
INIT_LIST_HEAD(&ai->free);
|
||||
INIT_LIST_HEAD(&ai->erase);
|
||||
INIT_LIST_HEAD(&ai->alien);
|
||||
ai->volumes = RB_ROOT;
|
||||
ai->aeb_slab_cache = kmem_cache_create(slab_name,
|
||||
sizeof(struct ubi_ainf_peb),
|
||||
0, 0, NULL);
|
||||
if (!ai->aeb_slab_cache) {
|
||||
kfree(ai);
|
||||
ai = NULL;
|
||||
}
|
||||
|
||||
return ai;
|
||||
}
|
||||
|
||||
/**
|
||||
* ubi_attach - attach an MTD device.
|
||||
* @ubi: UBI device descriptor
|
||||
@ -1391,7 +1400,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
|
||||
int err;
|
||||
struct ubi_attach_info *ai;
|
||||
|
||||
ai = alloc_ai("ubi_aeb_slab_cache");
|
||||
ai = alloc_ai();
|
||||
if (!ai)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1405,11 +1414,11 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
|
||||
if (force_scan)
|
||||
err = scan_all(ubi, ai, 0);
|
||||
else {
|
||||
err = scan_fast(ubi, ai);
|
||||
if (err > 0) {
|
||||
err = scan_fast(ubi, &ai);
|
||||
if (err > 0 || mtd_is_eccerr(err)) {
|
||||
if (err != UBI_NO_FASTMAP) {
|
||||
destroy_ai(ai);
|
||||
ai = alloc_ai("ubi_aeb_slab_cache2");
|
||||
ai = alloc_ai();
|
||||
if (!ai)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1445,10 +1454,10 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
|
||||
goto out_wl;
|
||||
|
||||
#ifdef CONFIG_MTD_UBI_FASTMAP
|
||||
if (ubi->fm && ubi_dbg_chk_gen(ubi)) {
|
||||
if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
|
||||
struct ubi_attach_info *scan_ai;
|
||||
|
||||
scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
|
||||
scan_ai = alloc_ai();
|
||||
if (!scan_ai) {
|
||||
err = -ENOMEM;
|
||||
goto out_wl;
|
||||
@ -1511,37 +1520,37 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
vols_found += 1;
|
||||
|
||||
if (ai->is_empty) {
|
||||
ubi_err("bad is_empty flag");
|
||||
ubi_err(ubi, "bad is_empty flag");
|
||||
goto bad_av;
|
||||
}
|
||||
|
||||
if (av->vol_id < 0 || av->highest_lnum < 0 ||
|
||||
av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
|
||||
av->data_pad < 0 || av->last_data_size < 0) {
|
||||
ubi_err("negative values");
|
||||
ubi_err(ubi, "negative values");
|
||||
goto bad_av;
|
||||
}
|
||||
|
||||
if (av->vol_id >= UBI_MAX_VOLUMES &&
|
||||
av->vol_id < UBI_INTERNAL_VOL_START) {
|
||||
ubi_err("bad vol_id");
|
||||
ubi_err(ubi, "bad vol_id");
|
||||
goto bad_av;
|
||||
}
|
||||
|
||||
if (av->vol_id > ai->highest_vol_id) {
|
||||
ubi_err("highest_vol_id is %d, but vol_id %d is there",
|
||||
ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there",
|
||||
ai->highest_vol_id, av->vol_id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (av->vol_type != UBI_DYNAMIC_VOLUME &&
|
||||
av->vol_type != UBI_STATIC_VOLUME) {
|
||||
ubi_err("bad vol_type");
|
||||
ubi_err(ubi, "bad vol_type");
|
||||
goto bad_av;
|
||||
}
|
||||
|
||||
if (av->data_pad > ubi->leb_size / 2) {
|
||||
ubi_err("bad data_pad");
|
||||
ubi_err(ubi, "bad data_pad");
|
||||
goto bad_av;
|
||||
}
|
||||
|
||||
@ -1553,48 +1562,48 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
leb_count += 1;
|
||||
|
||||
if (aeb->pnum < 0 || aeb->ec < 0) {
|
||||
ubi_err("negative values");
|
||||
ubi_err(ubi, "negative values");
|
||||
goto bad_aeb;
|
||||
}
|
||||
|
||||
if (aeb->ec < ai->min_ec) {
|
||||
ubi_err("bad ai->min_ec (%d), %d found",
|
||||
ubi_err(ubi, "bad ai->min_ec (%d), %d found",
|
||||
ai->min_ec, aeb->ec);
|
||||
goto bad_aeb;
|
||||
}
|
||||
|
||||
if (aeb->ec > ai->max_ec) {
|
||||
ubi_err("bad ai->max_ec (%d), %d found",
|
||||
ubi_err(ubi, "bad ai->max_ec (%d), %d found",
|
||||
ai->max_ec, aeb->ec);
|
||||
goto bad_aeb;
|
||||
}
|
||||
|
||||
if (aeb->pnum >= ubi->peb_count) {
|
||||
ubi_err("too high PEB number %d, total PEBs %d",
|
||||
ubi_err(ubi, "too high PEB number %d, total PEBs %d",
|
||||
aeb->pnum, ubi->peb_count);
|
||||
goto bad_aeb;
|
||||
}
|
||||
|
||||
if (av->vol_type == UBI_STATIC_VOLUME) {
|
||||
if (aeb->lnum >= av->used_ebs) {
|
||||
ubi_err("bad lnum or used_ebs");
|
||||
ubi_err(ubi, "bad lnum or used_ebs");
|
||||
goto bad_aeb;
|
||||
}
|
||||
} else {
|
||||
if (av->used_ebs != 0) {
|
||||
ubi_err("non-zero used_ebs");
|
||||
ubi_err(ubi, "non-zero used_ebs");
|
||||
goto bad_aeb;
|
||||
}
|
||||
}
|
||||
|
||||
if (aeb->lnum > av->highest_lnum) {
|
||||
ubi_err("incorrect highest_lnum or lnum");
|
||||
ubi_err(ubi, "incorrect highest_lnum or lnum");
|
||||
goto bad_aeb;
|
||||
}
|
||||
}
|
||||
|
||||
if (av->leb_count != leb_count) {
|
||||
ubi_err("bad leb_count, %d objects in the tree",
|
||||
ubi_err(ubi, "bad leb_count, %d objects in the tree",
|
||||
leb_count);
|
||||
goto bad_av;
|
||||
}
|
||||
@ -1605,13 +1614,13 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
aeb = last_aeb;
|
||||
|
||||
if (aeb->lnum != av->highest_lnum) {
|
||||
ubi_err("bad highest_lnum");
|
||||
ubi_err(ubi, "bad highest_lnum");
|
||||
goto bad_aeb;
|
||||
}
|
||||
}
|
||||
|
||||
if (vols_found != ai->vols_found) {
|
||||
ubi_err("bad ai->vols_found %d, should be %d",
|
||||
ubi_err(ubi, "bad ai->vols_found %d, should be %d",
|
||||
ai->vols_found, vols_found);
|
||||
goto out;
|
||||
}
|
||||
@ -1628,7 +1637,8 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
|
||||
err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
|
||||
if (err && err != UBI_IO_BITFLIPS) {
|
||||
ubi_err("VID header is not OK (%d)", err);
|
||||
ubi_err(ubi, "VID header is not OK (%d)",
|
||||
err);
|
||||
if (err > 0)
|
||||
err = -EIO;
|
||||
return err;
|
||||
@ -1637,37 +1647,37 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
|
||||
UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
|
||||
if (av->vol_type != vol_type) {
|
||||
ubi_err("bad vol_type");
|
||||
ubi_err(ubi, "bad vol_type");
|
||||
goto bad_vid_hdr;
|
||||
}
|
||||
|
||||
if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
|
||||
ubi_err("bad sqnum %llu", aeb->sqnum);
|
||||
ubi_err(ubi, "bad sqnum %llu", aeb->sqnum);
|
||||
goto bad_vid_hdr;
|
||||
}
|
||||
|
||||
if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
|
||||
ubi_err("bad vol_id %d", av->vol_id);
|
||||
ubi_err(ubi, "bad vol_id %d", av->vol_id);
|
||||
goto bad_vid_hdr;
|
||||
}
|
||||
|
||||
if (av->compat != vidh->compat) {
|
||||
ubi_err("bad compat %d", vidh->compat);
|
||||
ubi_err(ubi, "bad compat %d", vidh->compat);
|
||||
goto bad_vid_hdr;
|
||||
}
|
||||
|
||||
if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
|
||||
ubi_err("bad lnum %d", aeb->lnum);
|
||||
ubi_err(ubi, "bad lnum %d", aeb->lnum);
|
||||
goto bad_vid_hdr;
|
||||
}
|
||||
|
||||
if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
|
||||
ubi_err("bad used_ebs %d", av->used_ebs);
|
||||
ubi_err(ubi, "bad used_ebs %d", av->used_ebs);
|
||||
goto bad_vid_hdr;
|
||||
}
|
||||
|
||||
if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
|
||||
ubi_err("bad data_pad %d", av->data_pad);
|
||||
ubi_err(ubi, "bad data_pad %d", av->data_pad);
|
||||
goto bad_vid_hdr;
|
||||
}
|
||||
}
|
||||
@ -1676,12 +1686,13 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
continue;
|
||||
|
||||
if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
|
||||
ubi_err("bad highest_lnum %d", av->highest_lnum);
|
||||
ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum);
|
||||
goto bad_vid_hdr;
|
||||
}
|
||||
|
||||
if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
|
||||
ubi_err("bad last_data_size %d", av->last_data_size);
|
||||
ubi_err(ubi, "bad last_data_size %d",
|
||||
av->last_data_size);
|
||||
goto bad_vid_hdr;
|
||||
}
|
||||
}
|
||||
@ -1722,7 +1733,7 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
err = 0;
|
||||
for (pnum = 0; pnum < ubi->peb_count; pnum++)
|
||||
if (!buf[pnum]) {
|
||||
ubi_err("PEB %d is not referred", pnum);
|
||||
ubi_err(ubi, "PEB %d is not referred", pnum);
|
||||
err = 1;
|
||||
}
|
||||
|
||||
@ -1732,18 +1743,18 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
return 0;
|
||||
|
||||
bad_aeb:
|
||||
ubi_err("bad attaching information about LEB %d", aeb->lnum);
|
||||
ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum);
|
||||
ubi_dump_aeb(aeb, 0);
|
||||
ubi_dump_av(av);
|
||||
goto out;
|
||||
|
||||
bad_av:
|
||||
ubi_err("bad attaching information about volume %d", av->vol_id);
|
||||
ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
|
||||
ubi_dump_av(av);
|
||||
goto out;
|
||||
|
||||
bad_vid_hdr:
|
||||
ubi_err("bad attaching information about volume %d", av->vol_id);
|
||||
ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
|
||||
ubi_dump_av(av);
|
||||
ubi_dump_vid_hdr(vidh);
|
||||
|
||||
|
@ -80,6 +80,7 @@ static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
|
||||
#ifdef CONFIG_MTD_UBI_FASTMAP
|
||||
/* UBI module parameter to enable fastmap automatically on non-fastmap images */
|
||||
static bool fm_autoconvert;
|
||||
static bool fm_debug;
|
||||
#endif
|
||||
#else
|
||||
#ifdef CONFIG_MTD_UBI_FASTMAP
|
||||
@ -87,10 +88,12 @@ static bool fm_autoconvert;
|
||||
#define CONFIG_MTD_UBI_FASTMAP_AUTOCONVERT 0
|
||||
#endif
|
||||
static bool fm_autoconvert = CONFIG_MTD_UBI_FASTMAP_AUTOCONVERT;
|
||||
#if !defined(CONFIG_MTD_UBI_FM_DEBUG)
|
||||
#define CONFIG_MTD_UBI_FM_DEBUG 0
|
||||
#endif
|
||||
static bool fm_debug = CONFIG_MTD_UBI_FM_DEBUG;
|
||||
#endif
|
||||
#endif
|
||||
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
|
||||
struct class *ubi_class;
|
||||
|
||||
/* Slab cache for wear-leveling entries */
|
||||
struct kmem_cache *ubi_wl_entry_slab;
|
||||
@ -126,8 +129,17 @@ static ssize_t ubi_version_show(struct class *class,
|
||||
}
|
||||
|
||||
/* UBI version attribute ('/<sysfs>/class/ubi/version') */
|
||||
static struct class_attribute ubi_version =
|
||||
__ATTR(version, S_IRUGO, ubi_version_show, NULL);
|
||||
static struct class_attribute ubi_class_attrs[] = {
|
||||
__ATTR(version, S_IRUGO, ubi_version_show, NULL),
|
||||
__ATTR_NULL
|
||||
};
|
||||
|
||||
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
|
||||
struct class ubi_class = {
|
||||
.name = UBI_NAME_STR,
|
||||
.owner = THIS_MODULE,
|
||||
.class_attrs = ubi_class_attrs,
|
||||
};
|
||||
|
||||
static ssize_t dev_attribute_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
@ -169,23 +181,22 @@ static struct device_attribute dev_mtd_num =
|
||||
*/
|
||||
int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
|
||||
{
|
||||
int ret;
|
||||
struct ubi_notification nt;
|
||||
|
||||
ubi_do_get_device_info(ubi, &nt.di);
|
||||
ubi_do_get_volume_info(ubi, vol, &nt.vi);
|
||||
|
||||
#ifdef CONFIG_MTD_UBI_FASTMAP
|
||||
switch (ntype) {
|
||||
case UBI_VOLUME_ADDED:
|
||||
case UBI_VOLUME_REMOVED:
|
||||
case UBI_VOLUME_RESIZED:
|
||||
case UBI_VOLUME_RENAMED:
|
||||
if (ubi_update_fastmap(ubi)) {
|
||||
ubi_err("Unable to update fastmap!");
|
||||
ubi_ro_mode(ubi);
|
||||
ret = ubi_update_fastmap(ubi);
|
||||
if (ret)
|
||||
ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
|
||||
}
|
||||
|
||||
@ -406,6 +417,22 @@ static ssize_t dev_attribute_show(struct device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct attribute *ubi_dev_attrs[] = {
|
||||
&dev_eraseblock_size.attr,
|
||||
&dev_avail_eraseblocks.attr,
|
||||
&dev_total_eraseblocks.attr,
|
||||
&dev_volumes_count.attr,
|
||||
&dev_max_ec.attr,
|
||||
&dev_reserved_for_bad.attr,
|
||||
&dev_bad_peb_count.attr,
|
||||
&dev_max_vol_count.attr,
|
||||
&dev_min_io_size.attr,
|
||||
&dev_bgt_enabled.attr,
|
||||
&dev_mtd_num.attr,
|
||||
NULL
|
||||
};
|
||||
ATTRIBUTE_GROUPS(ubi_dev);
|
||||
|
||||
static void dev_release(struct device *dev)
|
||||
{
|
||||
struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
|
||||
@ -428,45 +455,15 @@ static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
|
||||
|
||||
ubi->dev.release = dev_release;
|
||||
ubi->dev.devt = ubi->cdev.dev;
|
||||
ubi->dev.class = ubi_class;
|
||||
ubi->dev.class = &ubi_class;
|
||||
ubi->dev.groups = ubi_dev_groups;
|
||||
dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
|
||||
err = device_register(&ubi->dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*ref = 1;
|
||||
err = device_create_file(&ubi->dev, &dev_eraseblock_size);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&ubi->dev, &dev_volumes_count);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&ubi->dev, &dev_max_ec);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&ubi->dev, &dev_bad_peb_count);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&ubi->dev, &dev_max_vol_count);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&ubi->dev, &dev_min_io_size);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&ubi->dev, &dev_bgt_enabled);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&ubi->dev, &dev_mtd_num);
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -475,17 +472,6 @@ static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
|
||||
*/
|
||||
static void ubi_sysfs_close(struct ubi_device *ubi)
|
||||
{
|
||||
device_remove_file(&ubi->dev, &dev_mtd_num);
|
||||
device_remove_file(&ubi->dev, &dev_bgt_enabled);
|
||||
device_remove_file(&ubi->dev, &dev_min_io_size);
|
||||
device_remove_file(&ubi->dev, &dev_max_vol_count);
|
||||
device_remove_file(&ubi->dev, &dev_bad_peb_count);
|
||||
device_remove_file(&ubi->dev, &dev_reserved_for_bad);
|
||||
device_remove_file(&ubi->dev, &dev_max_ec);
|
||||
device_remove_file(&ubi->dev, &dev_volumes_count);
|
||||
device_remove_file(&ubi->dev, &dev_total_eraseblocks);
|
||||
device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
|
||||
device_remove_file(&ubi->dev, &dev_eraseblock_size);
|
||||
device_unregister(&ubi->dev);
|
||||
}
|
||||
#endif
|
||||
@ -541,7 +527,7 @@ static int uif_init(struct ubi_device *ubi, int *ref)
|
||||
*/
|
||||
err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
|
||||
if (err) {
|
||||
ubi_err("cannot register UBI character devices");
|
||||
ubi_err(ubi, "cannot register UBI character devices");
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -552,7 +538,7 @@ static int uif_init(struct ubi_device *ubi, int *ref)
|
||||
|
||||
err = cdev_add(&ubi->cdev, dev, 1);
|
||||
if (err) {
|
||||
ubi_err("cannot add character device");
|
||||
ubi_err(ubi, "cannot add character device");
|
||||
goto out_unreg;
|
||||
}
|
||||
|
||||
@ -564,7 +550,7 @@ static int uif_init(struct ubi_device *ubi, int *ref)
|
||||
if (ubi->volumes[i]) {
|
||||
err = ubi_add_volume(ubi, ubi->volumes[i]);
|
||||
if (err) {
|
||||
ubi_err("cannot add volume %d", i);
|
||||
ubi_err(ubi, "cannot add volume %d", i);
|
||||
goto out_volumes;
|
||||
}
|
||||
}
|
||||
@ -580,7 +566,8 @@ out_sysfs:
|
||||
cdev_del(&ubi->cdev);
|
||||
out_unreg:
|
||||
unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
|
||||
ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
|
||||
ubi_err(ubi, "cannot initialize UBI %s, error %d",
|
||||
ubi->ubi_name, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -674,7 +661,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
|
||||
* guess we should just pick the largest region. But this is
|
||||
* not implemented.
|
||||
*/
|
||||
ubi_err("multiple regions, not implemented");
|
||||
ubi_err(ubi, "multiple regions, not implemented");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -709,7 +696,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
|
||||
* which allows us to avoid costly division operations.
|
||||
*/
|
||||
if (!is_power_of_2(ubi->min_io_size)) {
|
||||
ubi_err("min. I/O unit (%d) is not power of 2",
|
||||
ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
|
||||
ubi->min_io_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -726,7 +713,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
|
||||
if (ubi->max_write_size < ubi->min_io_size ||
|
||||
ubi->max_write_size % ubi->min_io_size ||
|
||||
!is_power_of_2(ubi->max_write_size)) {
|
||||
ubi_err("bad write buffer size %d for %d min. I/O unit",
|
||||
ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
|
||||
ubi->max_write_size, ubi->min_io_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -763,7 +750,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
|
||||
|
||||
/* The shift must be aligned to 32-bit boundary */
|
||||
if (ubi->vid_hdr_shift % 4) {
|
||||
ubi_err("unaligned VID header shift %d",
|
||||
ubi_err(ubi, "unaligned VID header shift %d",
|
||||
ubi->vid_hdr_shift);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -773,7 +760,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
|
||||
ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
|
||||
ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
|
||||
ubi->leb_start & (ubi->min_io_size - 1)) {
|
||||
ubi_err("bad VID header (%d) or data offsets (%d)",
|
||||
ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
|
||||
ubi->vid_hdr_offset, ubi->leb_start);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -793,14 +780,14 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
|
||||
* read-only mode.
|
||||
*/
|
||||
if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
|
||||
ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
|
||||
ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
|
||||
ubi->ro_mode = 1;
|
||||
}
|
||||
|
||||
ubi->leb_size = ubi->peb_size - ubi->leb_start;
|
||||
|
||||
if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
|
||||
ubi_msg("MTD device %d is write-protected, attach in read-only mode",
|
||||
ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
|
||||
ubi->mtd->index);
|
||||
ubi->ro_mode = 1;
|
||||
}
|
||||
@ -833,7 +820,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
|
||||
int err, old_reserved_pebs = vol->reserved_pebs;
|
||||
|
||||
if (ubi->ro_mode) {
|
||||
ubi_warn("skip auto-resize because of R/O mode");
|
||||
ubi_warn(ubi, "skip auto-resize because of R/O mode");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -854,21 +841,22 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
|
||||
vtbl_rec = ubi->vtbl[vol_id];
|
||||
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
|
||||
if (err)
|
||||
ubi_err("cannot clean auto-resize flag for volume %d",
|
||||
ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
|
||||
vol_id);
|
||||
} else {
|
||||
desc.vol = vol;
|
||||
err = ubi_resize_volume(&desc,
|
||||
old_reserved_pebs + ubi->avail_pebs);
|
||||
if (err)
|
||||
ubi_err("cannot auto-resize volume %d", vol_id);
|
||||
ubi_err(ubi, "cannot auto-resize volume %d",
|
||||
vol_id);
|
||||
}
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
|
||||
vol->name, old_reserved_pebs, vol->reserved_pebs);
|
||||
ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
|
||||
vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -909,7 +897,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
||||
for (i = 0; i < UBI_MAX_DEVICES; i++) {
|
||||
ubi = ubi_devices[i];
|
||||
if (ubi && mtd->index == ubi->mtd->index) {
|
||||
ubi_err("mtd%d is already attached to ubi%d",
|
||||
ubi_err(ubi, "mtd%d is already attached to ubi%d",
|
||||
mtd->index, i);
|
||||
return -EEXIST;
|
||||
}
|
||||
@ -924,7 +912,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
||||
* no sense to attach emulated MTD devices, so we prohibit this.
|
||||
*/
|
||||
if (mtd->type == MTD_UBIVOLUME) {
|
||||
ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
|
||||
ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI",
|
||||
mtd->index);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -935,7 +923,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
||||
if (!ubi_devices[ubi_num])
|
||||
break;
|
||||
if (ubi_num == UBI_MAX_DEVICES) {
|
||||
ubi_err("only %d UBI devices may be created",
|
||||
ubi_err(ubi, "only %d UBI devices may be created",
|
||||
UBI_MAX_DEVICES);
|
||||
return -ENFILE;
|
||||
}
|
||||
@ -945,7 +933,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
||||
|
||||
/* Make sure ubi_num is not busy */
|
||||
if (ubi_devices[ubi_num]) {
|
||||
ubi_err("ubi%d already exists", ubi_num);
|
||||
ubi_err(ubi, "already exists");
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
@ -969,21 +957,24 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
||||
*/
|
||||
ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
|
||||
ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
|
||||
if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
|
||||
ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
|
||||
ubi->fm_pool.max_size = max(ubi->fm_pool.max_size,
|
||||
UBI_FM_MIN_POOL_SIZE);
|
||||
|
||||
ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
|
||||
ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
|
||||
ubi->fm_disabled = !fm_autoconvert;
|
||||
if (fm_debug)
|
||||
ubi_enable_dbg_chk_fastmap(ubi);
|
||||
|
||||
if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
|
||||
<= UBI_FM_MAX_START) {
|
||||
ubi_err("More than %i PEBs are needed for fastmap, sorry.",
|
||||
ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
|
||||
UBI_FM_MAX_START);
|
||||
ubi->fm_disabled = 1;
|
||||
}
|
||||
|
||||
ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
|
||||
ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
|
||||
ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
|
||||
ubi_msg(ubi, "default fastmap WL pool size: %d",
|
||||
ubi->fm_wl_pool.max_size);
|
||||
#else
|
||||
ubi->fm_disabled = 1;
|
||||
#endif
|
||||
@ -991,10 +982,10 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
||||
mutex_init(&ubi->ckvol_mutex);
|
||||
mutex_init(&ubi->device_mutex);
|
||||
spin_lock_init(&ubi->volumes_lock);
|
||||
mutex_init(&ubi->fm_mutex);
|
||||
init_rwsem(&ubi->fm_sem);
|
||||
init_rwsem(&ubi->fm_protect);
|
||||
init_rwsem(&ubi->fm_eba_sem);
|
||||
|
||||
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
|
||||
ubi_msg(ubi, "attaching mtd%d", mtd->index);
|
||||
|
||||
err = io_init(ubi, max_beb_per1024);
|
||||
if (err)
|
||||
@ -1013,7 +1004,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
||||
#endif
|
||||
err = ubi_attach(ubi, 0);
|
||||
if (err) {
|
||||
ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
|
||||
ubi_err(ubi, "failed to attach mtd%d, error %d",
|
||||
mtd->index, err);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
@ -1034,28 +1026,28 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
||||
ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
|
||||
if (IS_ERR(ubi->bgt_thread)) {
|
||||
err = PTR_ERR(ubi->bgt_thread);
|
||||
ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
|
||||
err);
|
||||
ubi_err(ubi, "cannot spawn \"%s\", error %d",
|
||||
ubi->bgt_name, err);
|
||||
goto out_debugfs;
|
||||
}
|
||||
|
||||
ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
|
||||
mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
|
||||
ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
|
||||
ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
|
||||
mtd->index, mtd->name, ubi->flash_size >> 20);
|
||||
ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
|
||||
ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
|
||||
ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
|
||||
ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
|
||||
ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
|
||||
ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
|
||||
ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
|
||||
ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
|
||||
ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
|
||||
ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
|
||||
ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
|
||||
ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
|
||||
ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
|
||||
ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
|
||||
ubi->vtbl_slots);
|
||||
ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
|
||||
ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
|
||||
ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
|
||||
ubi->image_seq);
|
||||
ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
|
||||
ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
|
||||
ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
|
||||
|
||||
/*
|
||||
@ -1064,7 +1056,20 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
||||
*/
|
||||
spin_lock(&ubi->wl_lock);
|
||||
ubi->thread_enabled = 1;
|
||||
#ifndef __UBOOT__
|
||||
wake_up_process(ubi->bgt_thread);
|
||||
#else
|
||||
/*
|
||||
* U-Boot special: We have no bgt_thread in U-Boot!
|
||||
* So just call do_work() here directly.
|
||||
*/
|
||||
err = do_work(ubi);
|
||||
if (err) {
|
||||
ubi_err(ubi, "%s: work failed with error code %d",
|
||||
ubi->bgt_name, err);
|
||||
}
|
||||
#endif
|
||||
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
|
||||
ubi_devices[ubi_num] = ubi;
|
||||
@ -1124,7 +1129,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
|
||||
return -EBUSY;
|
||||
}
|
||||
/* This may only happen if there is a bug */
|
||||
ubi_err("%s reference count %d, destroy anyway",
|
||||
ubi_err(ubi, "%s reference count %d, destroy anyway",
|
||||
ubi->ubi_name, ubi->ref_count);
|
||||
}
|
||||
ubi_devices[ubi_num] = NULL;
|
||||
@ -1132,10 +1137,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
|
||||
|
||||
ubi_assert(ubi_num == ubi->ubi_num);
|
||||
ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
|
||||
ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
|
||||
ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
|
||||
#ifdef CONFIG_MTD_UBI_FASTMAP
|
||||
/* If we don't write a new fastmap at detach time we lose all
|
||||
* EC updates that have been made since the last written fastmap. */
|
||||
* EC updates that have been made since the last written fastmap.
|
||||
* In case of fastmap debugging we omit the update to simulate an
|
||||
* unclean shutdown. */
|
||||
if (!ubi_dbg_chk_fastmap(ubi))
|
||||
ubi_update_fastmap(ubi);
|
||||
#endif
|
||||
/*
|
||||
@ -1160,7 +1168,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
|
||||
put_mtd_device(ubi->mtd);
|
||||
vfree(ubi->peb_buf);
|
||||
vfree(ubi->fm_buf);
|
||||
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
|
||||
ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
|
||||
put_device(&ubi->dev);
|
||||
return 0;
|
||||
}
|
||||
@ -1185,9 +1193,9 @@ static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
|
||||
return ERR_PTR(err);
|
||||
|
||||
/* MTD device number is defined by the major / minor numbers */
|
||||
major = imajor(path.dentry->d_inode);
|
||||
minor = iminor(path.dentry->d_inode);
|
||||
mode = path.dentry->d_inode->i_mode;
|
||||
major = imajor(d_backing_inode(path.dentry));
|
||||
minor = iminor(d_backing_inode(path.dentry));
|
||||
mode = d_backing_inode(path.dentry)->i_mode;
|
||||
path_put(&path);
|
||||
if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
|
||||
return ERR_PTR(-EINVAL);
|
||||
@ -1250,28 +1258,20 @@ int ubi_init(void)
|
||||
BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
|
||||
|
||||
if (mtd_devs > UBI_MAX_DEVICES) {
|
||||
ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES);
|
||||
pr_err("UBI error: too many MTD devices, maximum is %d",
|
||||
UBI_MAX_DEVICES);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Create base sysfs directory and sysfs files */
|
||||
ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
|
||||
if (IS_ERR(ubi_class)) {
|
||||
err = PTR_ERR(ubi_class);
|
||||
ubi_err("cannot create UBI class");
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = class_create_file(ubi_class, &ubi_version);
|
||||
if (err) {
|
||||
ubi_err("cannot create sysfs file");
|
||||
goto out_class;
|
||||
}
|
||||
err = class_register(&ubi_class);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = misc_register(&ubi_ctrl_cdev);
|
||||
if (err) {
|
||||
ubi_err("cannot register device");
|
||||
goto out_version;
|
||||
pr_err("UBI error: cannot register device");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
|
||||
@ -1297,7 +1297,8 @@ int ubi_init(void)
|
||||
mtd = open_mtd_device(p->name);
|
||||
if (IS_ERR(mtd)) {
|
||||
err = PTR_ERR(mtd);
|
||||
ubi_err("cannot open mtd %s, error %d", p->name, err);
|
||||
pr_err("UBI error: cannot open mtd %s, error %d",
|
||||
p->name, err);
|
||||
/* See comment below re-ubi_is_module(). */
|
||||
if (ubi_is_module())
|
||||
goto out_detach;
|
||||
@ -1309,7 +1310,8 @@ int ubi_init(void)
|
||||
p->vid_hdr_offs, p->max_beb_per1024);
|
||||
mutex_unlock(&ubi_devices_mutex);
|
||||
if (err < 0) {
|
||||
ubi_err("cannot attach mtd%d", mtd->index);
|
||||
pr_err("UBI error: cannot attach mtd%d",
|
||||
mtd->index);
|
||||
put_mtd_device(mtd);
|
||||
|
||||
/*
|
||||
@ -1332,7 +1334,7 @@ int ubi_init(void)
|
||||
|
||||
err = ubiblock_init();
|
||||
if (err) {
|
||||
ubi_err("block: cannot initialize, error %d", err);
|
||||
pr_err("UBI error: block: cannot initialize, error %d", err);
|
||||
|
||||
/* See comment above re-ubi_is_module(). */
|
||||
if (ubi_is_module())
|
||||
@ -1353,16 +1355,13 @@ out_slab:
|
||||
kmem_cache_destroy(ubi_wl_entry_slab);
|
||||
out_dev_unreg:
|
||||
misc_deregister(&ubi_ctrl_cdev);
|
||||
out_version:
|
||||
class_remove_file(ubi_class, &ubi_version);
|
||||
out_class:
|
||||
class_destroy(ubi_class);
|
||||
out:
|
||||
#ifdef __UBOOT__
|
||||
/* Reset any globals that the driver depends on being zeroed */
|
||||
mtd_devs = 0;
|
||||
#endif
|
||||
ubi_err("cannot initialize UBI, error %d", err);
|
||||
class_unregister(&ubi_class);
|
||||
pr_err("UBI error: cannot initialize UBI, error %d", err);
|
||||
return err;
|
||||
}
|
||||
late_initcall(ubi_init);
|
||||
@ -1386,8 +1385,7 @@ void ubi_exit(void)
|
||||
ubi_debugfs_exit();
|
||||
kmem_cache_destroy(ubi_wl_entry_slab);
|
||||
misc_deregister(&ubi_ctrl_cdev);
|
||||
class_remove_file(ubi_class, &ubi_version);
|
||||
class_destroy(ubi_class);
|
||||
class_unregister(&ubi_class);
|
||||
#ifdef __UBOOT__
|
||||
/* Reset any globals that the driver depends on being zeroed */
|
||||
mtd_devs = 0;
|
||||
@ -1409,7 +1407,7 @@ static int __init bytes_str_to_int(const char *str)
|
||||
|
||||
result = simple_strtoul(str, &endp, 0);
|
||||
if (str == endp || result >= INT_MAX) {
|
||||
ubi_err("incorrect bytes count: \"%s\"\n", str);
|
||||
pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1425,7 +1423,7 @@ static int __init bytes_str_to_int(const char *str)
|
||||
case '\0':
|
||||
break;
|
||||
default:
|
||||
ubi_err("incorrect bytes count: \"%s\"\n", str);
|
||||
pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1467,14 +1465,14 @@ int ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
|
||||
return -EINVAL;
|
||||
|
||||
if (mtd_devs == UBI_MAX_DEVICES) {
|
||||
ubi_err("too many parameters, max. is %d\n",
|
||||
pr_err("UBI error: too many parameters, max. is %d\n",
|
||||
UBI_MAX_DEVICES);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
len = strnlen(val, MTD_PARAM_LEN_MAX);
|
||||
if (len == MTD_PARAM_LEN_MAX) {
|
||||
ubi_err("parameter \"%s\" is too long, max. is %d\n",
|
||||
pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
|
||||
val, MTD_PARAM_LEN_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1494,7 +1492,7 @@ int ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
|
||||
tokens[i] = strsep(&pbuf, ",");
|
||||
|
||||
if (pbuf) {
|
||||
ubi_err("too many arguments at \"%s\"\n", val);
|
||||
pr_err("UBI error: too many arguments at \"%s\"\n", val);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1514,7 +1512,7 @@ int ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
|
||||
int err = kstrtoint(token, 10, &p->max_beb_per1024);
|
||||
|
||||
if (err) {
|
||||
ubi_err("bad value for max_beb_per1024 parameter: %s",
|
||||
pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
|
||||
token);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1525,7 +1523,8 @@ int ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
|
||||
int err = kstrtoint(token, 10, &p->ubi_num);
|
||||
|
||||
if (err) {
|
||||
ubi_err("bad value for ubi_num parameter: %s", token);
|
||||
pr_err("UBI error: bad value for ubi_num parameter: %s",
|
||||
token);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else
|
||||
@ -1552,6 +1551,8 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
|
||||
#ifdef CONFIG_MTD_UBI_FASTMAP
|
||||
module_param(fm_autoconvert, bool, 0644);
|
||||
MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
|
||||
module_param(fm_debug, bool, 0);
|
||||
MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
|
||||
#endif
|
||||
MODULE_VERSION(__stringify(UBI_VERSION));
|
||||
MODULE_DESCRIPTION("UBI - Unsorted Block Images");
|
||||
|
@ -33,12 +33,12 @@ void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
|
||||
return;
|
||||
err = mtd_read(ubi->mtd, addr, len, &read, buf);
|
||||
if (err && err != -EUCLEAN) {
|
||||
ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
|
||||
ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
|
||||
err, len, pnum, offset, read);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ubi_msg("dumping %d bytes of data from PEB %d, offset %d",
|
||||
ubi_msg(ubi, "dumping %d bytes of data from PEB %d, offset %d",
|
||||
len, pnum, offset);
|
||||
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
|
||||
out:
|
||||
@ -229,7 +229,7 @@ int ubi_debugfs_init(void)
|
||||
if (IS_ERR_OR_NULL(dfs_rootdir)) {
|
||||
int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
|
||||
|
||||
ubi_err("cannot create \"ubi\" debugfs directory, error %d\n",
|
||||
pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
@ -254,7 +254,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
|
||||
struct dentry *dent = file->f_path.dentry;
|
||||
struct ubi_device *ubi;
|
||||
struct ubi_debug_info *d;
|
||||
char buf[3];
|
||||
char buf[8];
|
||||
int val;
|
||||
|
||||
ubi = ubi_get_device(ubi_num);
|
||||
@ -266,12 +266,30 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
|
||||
val = d->chk_gen;
|
||||
else if (dent == d->dfs_chk_io)
|
||||
val = d->chk_io;
|
||||
else if (dent == d->dfs_chk_fastmap)
|
||||
val = d->chk_fastmap;
|
||||
else if (dent == d->dfs_disable_bgt)
|
||||
val = d->disable_bgt;
|
||||
else if (dent == d->dfs_emulate_bitflips)
|
||||
val = d->emulate_bitflips;
|
||||
else if (dent == d->dfs_emulate_io_failures)
|
||||
val = d->emulate_io_failures;
|
||||
else if (dent == d->dfs_emulate_power_cut) {
|
||||
snprintf(buf, sizeof(buf), "%u\n", d->emulate_power_cut);
|
||||
count = simple_read_from_buffer(user_buf, count, ppos,
|
||||
buf, strlen(buf));
|
||||
goto out;
|
||||
} else if (dent == d->dfs_power_cut_min) {
|
||||
snprintf(buf, sizeof(buf), "%u\n", d->power_cut_min);
|
||||
count = simple_read_from_buffer(user_buf, count, ppos,
|
||||
buf, strlen(buf));
|
||||
goto out;
|
||||
} else if (dent == d->dfs_power_cut_max) {
|
||||
snprintf(buf, sizeof(buf), "%u\n", d->power_cut_max);
|
||||
count = simple_read_from_buffer(user_buf, count, ppos,
|
||||
buf, strlen(buf));
|
||||
goto out;
|
||||
}
|
||||
else {
|
||||
count = -EINVAL;
|
||||
goto out;
|
||||
@ -300,7 +318,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
|
||||
struct ubi_device *ubi;
|
||||
struct ubi_debug_info *d;
|
||||
size_t buf_size;
|
||||
char buf[8];
|
||||
char buf[8] = {0};
|
||||
int val;
|
||||
|
||||
ubi = ubi_get_device(ubi_num);
|
||||
@ -314,6 +332,21 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (dent == d->dfs_power_cut_min) {
|
||||
if (kstrtouint(buf, 0, &d->power_cut_min) != 0)
|
||||
count = -EINVAL;
|
||||
goto out;
|
||||
} else if (dent == d->dfs_power_cut_max) {
|
||||
if (kstrtouint(buf, 0, &d->power_cut_max) != 0)
|
||||
count = -EINVAL;
|
||||
goto out;
|
||||
} else if (dent == d->dfs_emulate_power_cut) {
|
||||
if (kstrtoint(buf, 0, &val) != 0)
|
||||
count = -EINVAL;
|
||||
d->emulate_power_cut = val;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (buf[0] == '1')
|
||||
val = 1;
|
||||
else if (buf[0] == '0')
|
||||
@ -327,6 +360,8 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
|
||||
d->chk_gen = val;
|
||||
else if (dent == d->dfs_chk_io)
|
||||
d->chk_io = val;
|
||||
else if (dent == d->dfs_chk_fastmap)
|
||||
d->chk_fastmap = val;
|
||||
else if (dent == d->dfs_disable_bgt)
|
||||
d->disable_bgt = val;
|
||||
else if (dent == d->dfs_emulate_bitflips)
|
||||
@ -397,6 +432,13 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
|
||||
goto out_remove;
|
||||
d->dfs_chk_io = dent;
|
||||
|
||||
fname = "chk_fastmap";
|
||||
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
|
||||
&dfs_fops);
|
||||
if (IS_ERR_OR_NULL(dent))
|
||||
goto out_remove;
|
||||
d->dfs_chk_fastmap = dent;
|
||||
|
||||
fname = "tst_disable_bgt";
|
||||
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
|
||||
&dfs_fops);
|
||||
@ -418,13 +460,34 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
|
||||
goto out_remove;
|
||||
d->dfs_emulate_io_failures = dent;
|
||||
|
||||
fname = "tst_emulate_power_cut";
|
||||
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
|
||||
&dfs_fops);
|
||||
if (IS_ERR_OR_NULL(dent))
|
||||
goto out_remove;
|
||||
d->dfs_emulate_power_cut = dent;
|
||||
|
||||
fname = "tst_emulate_power_cut_min";
|
||||
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
|
||||
&dfs_fops);
|
||||
if (IS_ERR_OR_NULL(dent))
|
||||
goto out_remove;
|
||||
d->dfs_power_cut_min = dent;
|
||||
|
||||
fname = "tst_emulate_power_cut_max";
|
||||
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
|
||||
&dfs_fops);
|
||||
if (IS_ERR_OR_NULL(dent))
|
||||
goto out_remove;
|
||||
d->dfs_power_cut_max = dent;
|
||||
|
||||
return 0;
|
||||
|
||||
out_remove:
|
||||
debugfs_remove_recursive(d->dfs_dir);
|
||||
out:
|
||||
err = dent ? PTR_ERR(dent) : -ENODEV;
|
||||
ubi_err("cannot create \"%s\" debugfs file or directory, error %d\n",
|
||||
ubi_err(ubi, "cannot create \"%s\" debugfs file or directory, error %d\n",
|
||||
fname, err);
|
||||
return err;
|
||||
}
|
||||
@ -438,6 +501,39 @@ void ubi_debugfs_exit_dev(struct ubi_device *ubi)
|
||||
if (IS_ENABLED(CONFIG_DEBUG_FS))
|
||||
debugfs_remove_recursive(ubi->dbg.dfs_dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* ubi_dbg_power_cut - emulate a power cut if it is time to do so
|
||||
* @ubi: UBI device description object
|
||||
* @caller: Flags set to indicate from where the function is being called
|
||||
*
|
||||
* Returns non-zero if a power cut was emulated, zero if not.
|
||||
*/
|
||||
int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
|
||||
{
|
||||
unsigned int range;
|
||||
|
||||
if ((ubi->dbg.emulate_power_cut & caller) == 0)
|
||||
return 0;
|
||||
|
||||
if (ubi->dbg.power_cut_counter == 0) {
|
||||
ubi->dbg.power_cut_counter = ubi->dbg.power_cut_min;
|
||||
|
||||
if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
|
||||
range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
|
||||
ubi->dbg.power_cut_counter += prandom_u32() % range;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
ubi->dbg.power_cut_counter--;
|
||||
if (ubi->dbg.power_cut_counter)
|
||||
return 0;
|
||||
|
||||
ubi_msg(ubi, "XXXXXXXXXXXXXXX emulating a power cut XXXXXXXXXXXXXXXX");
|
||||
ubi_ro_mode(ubi);
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
int ubi_debugfs_init(void)
|
||||
{
|
||||
@ -456,4 +552,9 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
|
||||
void ubi_debugfs_exit_dev(struct ubi_device *ubi)
|
||||
{
|
||||
}
|
||||
|
||||
int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@ -117,4 +117,16 @@ static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
|
||||
{
|
||||
return ubi->dbg.chk_gen;
|
||||
}
|
||||
|
||||
static inline int ubi_dbg_chk_fastmap(const struct ubi_device *ubi)
|
||||
{
|
||||
return ubi->dbg.chk_fastmap;
|
||||
}
|
||||
|
||||
static inline void ubi_enable_dbg_chk_fastmap(struct ubi_device *ubi)
|
||||
{
|
||||
ubi->dbg.chk_fastmap = 1;
|
||||
}
|
||||
|
||||
int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
|
||||
#endif /* !__UBI_DEBUG_H__ */
|
||||
|
@ -333,9 +333,9 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
|
||||
|
||||
dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
|
||||
|
||||
down_read(&ubi->fm_sem);
|
||||
down_read(&ubi->fm_eba_sem);
|
||||
vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
|
||||
up_read(&ubi->fm_sem);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
|
||||
|
||||
out_unlock:
|
||||
@ -415,12 +415,14 @@ retry:
|
||||
*/
|
||||
if (err == UBI_IO_BAD_HDR_EBADMSG ||
|
||||
err == UBI_IO_BAD_HDR) {
|
||||
ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
|
||||
ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
|
||||
pnum, vol_id, lnum);
|
||||
err = -EBADMSG;
|
||||
} else
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
ubi_ro_mode(ubi);
|
||||
}
|
||||
}
|
||||
goto out_free;
|
||||
} else if (err == UBI_IO_BITFLIPS)
|
||||
scrub = 1;
|
||||
@ -434,15 +436,14 @@ retry:
|
||||
|
||||
err = ubi_io_read_data(ubi, buf, pnum, offset, len);
|
||||
if (err) {
|
||||
if (err == UBI_IO_BITFLIPS) {
|
||||
if (err == UBI_IO_BITFLIPS)
|
||||
scrub = 1;
|
||||
err = 0;
|
||||
} else if (mtd_is_eccerr(err)) {
|
||||
else if (mtd_is_eccerr(err)) {
|
||||
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
|
||||
goto out_unlock;
|
||||
scrub = 1;
|
||||
if (!check) {
|
||||
ubi_msg("force data checking");
|
||||
ubi_msg(ubi, "force data checking");
|
||||
check = 1;
|
||||
goto retry;
|
||||
}
|
||||
@ -453,7 +454,7 @@ retry:
|
||||
if (check) {
|
||||
uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
|
||||
if (crc1 != crc) {
|
||||
ubi_warn("CRC error: calculated %#08x, must be %#08x",
|
||||
ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
|
||||
crc1, crc);
|
||||
err = -EBADMSG;
|
||||
goto out_unlock;
|
||||
@ -473,6 +474,63 @@ out_unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifndef __UBOOT__
|
||||
/**
|
||||
* ubi_eba_read_leb_sg - read data into a scatter gather list.
|
||||
* @ubi: UBI device description object
|
||||
* @vol: volume description object
|
||||
* @lnum: logical eraseblock number
|
||||
* @sgl: UBI scatter gather list to store the read data
|
||||
* @offset: offset from where to read
|
||||
* @len: how many bytes to read
|
||||
* @check: data CRC check flag
|
||||
*
|
||||
* This function works exactly like ubi_eba_read_leb(). But instead of
|
||||
* storing the read data into a buffer it writes to an UBI scatter gather
|
||||
* list.
|
||||
*/
|
||||
int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
|
||||
struct ubi_sgl *sgl, int lnum, int offset, int len,
|
||||
int check)
|
||||
{
|
||||
int to_read;
|
||||
int ret;
|
||||
struct scatterlist *sg;
|
||||
|
||||
for (;;) {
|
||||
ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
|
||||
sg = &sgl->sg[sgl->list_pos];
|
||||
if (len < sg->length - sgl->page_pos)
|
||||
to_read = len;
|
||||
else
|
||||
to_read = sg->length - sgl->page_pos;
|
||||
|
||||
ret = ubi_eba_read_leb(ubi, vol, lnum,
|
||||
sg_virt(sg) + sgl->page_pos, offset,
|
||||
to_read, check);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
offset += to_read;
|
||||
len -= to_read;
|
||||
if (!len) {
|
||||
sgl->page_pos += to_read;
|
||||
if (sgl->page_pos == sg->length) {
|
||||
sgl->list_pos++;
|
||||
sgl->page_pos = 0;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
sgl->list_pos++;
|
||||
sgl->page_pos = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* recover_peb - recover from write failure.
|
||||
* @ubi: UBI device description object
|
||||
@ -504,22 +562,27 @@ retry:
|
||||
new_pnum = ubi_wl_get_peb(ubi);
|
||||
if (new_pnum < 0) {
|
||||
ubi_free_vid_hdr(ubi, vid_hdr);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
return new_pnum;
|
||||
}
|
||||
|
||||
ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum);
|
||||
ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
|
||||
pnum, new_pnum);
|
||||
|
||||
err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
|
||||
if (err && err != UBI_IO_BITFLIPS) {
|
||||
if (err > 0)
|
||||
err = -EIO;
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
|
||||
err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
|
||||
if (err)
|
||||
if (err) {
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
goto write_error;
|
||||
}
|
||||
|
||||
data_size = offset + len;
|
||||
mutex_lock(&ubi->buf_mutex);
|
||||
@ -528,27 +591,29 @@ retry:
|
||||
/* Read everything before the area where the write failure happened */
|
||||
if (offset > 0) {
|
||||
err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
|
||||
if (err && err != UBI_IO_BITFLIPS)
|
||||
if (err && err != UBI_IO_BITFLIPS) {
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(ubi->peb_buf + offset, buf, len);
|
||||
|
||||
err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
|
||||
if (err) {
|
||||
mutex_unlock(&ubi->buf_mutex);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
goto write_error;
|
||||
}
|
||||
|
||||
mutex_unlock(&ubi->buf_mutex);
|
||||
ubi_free_vid_hdr(ubi, vid_hdr);
|
||||
|
||||
down_read(&ubi->fm_sem);
|
||||
vol->eba_tbl[lnum] = new_pnum;
|
||||
up_read(&ubi->fm_sem);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
|
||||
|
||||
ubi_msg("data was successfully recovered");
|
||||
ubi_msg(ubi, "data was successfully recovered");
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
@ -563,13 +628,13 @@ write_error:
|
||||
* Bad luck? This physical eraseblock is bad too? Crud. Let's try to
|
||||
* get another one.
|
||||
*/
|
||||
ubi_warn("failed to write to PEB %d", new_pnum);
|
||||
ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
|
||||
ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
|
||||
if (++tries > UBI_IO_RETRIES) {
|
||||
ubi_free_vid_hdr(ubi, vid_hdr);
|
||||
return err;
|
||||
}
|
||||
ubi_msg("try again");
|
||||
ubi_msg(ubi, "try again");
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@ -607,7 +672,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
|
||||
|
||||
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
|
||||
if (err) {
|
||||
ubi_warn("failed to write data to PEB %d", pnum);
|
||||
ubi_warn(ubi, "failed to write data to PEB %d", pnum);
|
||||
if (err == -EIO && ubi->bad_allowed)
|
||||
err = recover_peb(ubi, pnum, vol_id, lnum, buf,
|
||||
offset, len);
|
||||
@ -640,6 +705,7 @@ retry:
|
||||
if (pnum < 0) {
|
||||
ubi_free_vid_hdr(ubi, vid_hdr);
|
||||
leb_write_unlock(ubi, vol_id, lnum);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
return pnum;
|
||||
}
|
||||
|
||||
@ -648,23 +714,24 @@ retry:
|
||||
|
||||
err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
|
||||
if (err) {
|
||||
ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
|
||||
ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
|
||||
vol_id, lnum, pnum);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
goto write_error;
|
||||
}
|
||||
|
||||
if (len) {
|
||||
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
|
||||
if (err) {
|
||||
ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
|
||||
ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
|
||||
len, offset, vol_id, lnum, pnum);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
goto write_error;
|
||||
}
|
||||
}
|
||||
|
||||
down_read(&ubi->fm_sem);
|
||||
vol->eba_tbl[lnum] = pnum;
|
||||
up_read(&ubi->fm_sem);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
|
||||
leb_write_unlock(ubi, vol_id, lnum);
|
||||
ubi_free_vid_hdr(ubi, vid_hdr);
|
||||
@ -692,7 +759,7 @@ write_error:
|
||||
}
|
||||
|
||||
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
|
||||
ubi_msg("try another PEB");
|
||||
ubi_msg(ubi, "try another PEB");
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@ -761,6 +828,7 @@ retry:
|
||||
if (pnum < 0) {
|
||||
ubi_free_vid_hdr(ubi, vid_hdr);
|
||||
leb_write_unlock(ubi, vol_id, lnum);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
return pnum;
|
||||
}
|
||||
|
||||
@ -769,22 +837,23 @@ retry:
|
||||
|
||||
err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
|
||||
if (err) {
|
||||
ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
|
||||
ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
|
||||
vol_id, lnum, pnum);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
goto write_error;
|
||||
}
|
||||
|
||||
err = ubi_io_write_data(ubi, buf, pnum, 0, len);
|
||||
if (err) {
|
||||
ubi_warn("failed to write %d bytes of data to PEB %d",
|
||||
ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
|
||||
len, pnum);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
goto write_error;
|
||||
}
|
||||
|
||||
ubi_assert(vol->eba_tbl[lnum] < 0);
|
||||
down_read(&ubi->fm_sem);
|
||||
vol->eba_tbl[lnum] = pnum;
|
||||
up_read(&ubi->fm_sem);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
|
||||
leb_write_unlock(ubi, vol_id, lnum);
|
||||
ubi_free_vid_hdr(ubi, vid_hdr);
|
||||
@ -812,7 +881,7 @@ write_error:
|
||||
}
|
||||
|
||||
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
|
||||
ubi_msg("try another PEB");
|
||||
ubi_msg(ubi, "try another PEB");
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@ -836,7 +905,7 @@ write_error:
|
||||
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
|
||||
int lnum, const void *buf, int len)
|
||||
{
|
||||
int err, pnum, tries = 0, vol_id = vol->vol_id;
|
||||
int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id;
|
||||
struct ubi_vid_hdr *vid_hdr;
|
||||
uint32_t crc;
|
||||
|
||||
@ -879,6 +948,7 @@ retry:
|
||||
pnum = ubi_wl_get_peb(ubi);
|
||||
if (pnum < 0) {
|
||||
err = pnum;
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
goto out_leb_unlock;
|
||||
}
|
||||
|
||||
@ -887,28 +957,30 @@ retry:
|
||||
|
||||
err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
|
||||
if (err) {
|
||||
ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
|
||||
ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
|
||||
vol_id, lnum, pnum);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
goto write_error;
|
||||
}
|
||||
|
||||
err = ubi_io_write_data(ubi, buf, pnum, 0, len);
|
||||
if (err) {
|
||||
ubi_warn("failed to write %d bytes of data to PEB %d",
|
||||
ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
|
||||
len, pnum);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
goto write_error;
|
||||
}
|
||||
|
||||
if (vol->eba_tbl[lnum] >= 0) {
|
||||
err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0);
|
||||
old_pnum = vol->eba_tbl[lnum];
|
||||
vol->eba_tbl[lnum] = pnum;
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
|
||||
if (old_pnum >= 0) {
|
||||
err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0);
|
||||
if (err)
|
||||
goto out_leb_unlock;
|
||||
}
|
||||
|
||||
down_read(&ubi->fm_sem);
|
||||
vol->eba_tbl[lnum] = pnum;
|
||||
up_read(&ubi->fm_sem);
|
||||
|
||||
out_leb_unlock:
|
||||
leb_write_unlock(ubi, vol_id, lnum);
|
||||
out_mutex:
|
||||
@ -934,7 +1006,7 @@ write_error:
|
||||
}
|
||||
|
||||
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
|
||||
ubi_msg("try another PEB");
|
||||
ubi_msg(ubi, "try another PEB");
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@ -1057,7 +1129,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
|
||||
dbg_wl("read %d bytes of data", aldata_size);
|
||||
err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
|
||||
if (err && err != UBI_IO_BITFLIPS) {
|
||||
ubi_warn("error %d while reading data from PEB %d",
|
||||
ubi_warn(ubi, "error %d while reading data from PEB %d",
|
||||
err, from);
|
||||
err = MOVE_SOURCE_RD_ERR;
|
||||
goto out_unlock_buf;
|
||||
@ -1107,7 +1179,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
|
||||
err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
|
||||
if (err) {
|
||||
if (err != UBI_IO_BITFLIPS) {
|
||||
ubi_warn("error %d while reading VID header back from PEB %d",
|
||||
ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
|
||||
err, to);
|
||||
if (is_error_sane(err))
|
||||
err = MOVE_TARGET_RD_ERR;
|
||||
@ -1134,7 +1206,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
|
||||
err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
|
||||
if (err) {
|
||||
if (err != UBI_IO_BITFLIPS) {
|
||||
ubi_warn("error %d while reading data back from PEB %d",
|
||||
ubi_warn(ubi, "error %d while reading data back from PEB %d",
|
||||
err, to);
|
||||
if (is_error_sane(err))
|
||||
err = MOVE_TARGET_RD_ERR;
|
||||
@ -1146,7 +1218,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
|
||||
cond_resched();
|
||||
|
||||
if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
|
||||
ubi_warn("read data back from PEB %d and it is different",
|
||||
ubi_warn(ubi, "read data back from PEB %d and it is different",
|
||||
to);
|
||||
err = -EINVAL;
|
||||
goto out_unlock_buf;
|
||||
@ -1154,9 +1226,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
|
||||
}
|
||||
|
||||
ubi_assert(vol->eba_tbl[lnum] == from);
|
||||
down_read(&ubi->fm_sem);
|
||||
down_read(&ubi->fm_eba_sem);
|
||||
vol->eba_tbl[lnum] = to;
|
||||
up_read(&ubi->fm_sem);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
|
||||
out_unlock_buf:
|
||||
mutex_unlock(&ubi->buf_mutex);
|
||||
@ -1199,10 +1271,10 @@ static void print_rsvd_warning(struct ubi_device *ubi,
|
||||
return;
|
||||
}
|
||||
|
||||
ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
|
||||
ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
|
||||
ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
|
||||
if (ubi->corr_peb_count)
|
||||
ubi_warn("%d PEBs are corrupted and not used",
|
||||
ubi_warn(ubi, "%d PEBs are corrupted and not used",
|
||||
ubi->corr_peb_count);
|
||||
}
|
||||
|
||||
@ -1280,7 +1352,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
|
||||
fm_eba[i][j] == UBI_LEB_UNMAPPED)
|
||||
continue;
|
||||
|
||||
ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
|
||||
ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
|
||||
vol->vol_id, i, fm_eba[i][j],
|
||||
scan_eba[i][j]);
|
||||
ubi_assert(0);
|
||||
@ -1355,15 +1427,16 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
* during re-size.
|
||||
*/
|
||||
ubi_move_aeb_to_list(av, aeb, &ai->erase);
|
||||
else
|
||||
vol->eba_tbl[aeb->lnum] = aeb->pnum;
|
||||
}
|
||||
}
|
||||
|
||||
if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
|
||||
ubi_err("no enough physical eraseblocks (%d, need %d)",
|
||||
ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
|
||||
ubi->avail_pebs, EBA_RESERVED_PEBS);
|
||||
if (ubi->corr_peb_count)
|
||||
ubi_err("%d PEBs are corrupted and not used",
|
||||
ubi_err(ubi, "%d PEBs are corrupted and not used",
|
||||
ubi->corr_peb_count);
|
||||
err = -ENOSPC;
|
||||
goto out_free;
|
||||
|
372
drivers/mtd/ubi/fastmap-wl.c
Normal file
372
drivers/mtd/ubi/fastmap-wl.c
Normal file
@ -0,0 +1,372 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Linutronix GmbH
|
||||
* Copyright (c) 2014 sigma star gmbh
|
||||
* Author: Richard Weinberger <richard@nod.at>
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0+
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
|
||||
* @wrk: the work description object
|
||||
*/
|
||||
#ifndef __UBOOT__
|
||||
static void update_fastmap_work_fn(struct work_struct *wrk)
|
||||
#else
|
||||
void update_fastmap_work_fn(struct ubi_device *ubi)
|
||||
#endif
|
||||
{
|
||||
#ifndef __UBOOT__
|
||||
struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
|
||||
#endif
|
||||
|
||||
ubi_update_fastmap(ubi);
|
||||
spin_lock(&ubi->wl_lock);
|
||||
ubi->fm_work_scheduled = 0;
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
|
||||
* @root: the RB-tree where to look for
|
||||
*/
|
||||
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
|
||||
{
|
||||
struct rb_node *p;
|
||||
struct ubi_wl_entry *e, *victim = NULL;
|
||||
int max_ec = UBI_MAX_ERASECOUNTER;
|
||||
|
||||
ubi_rb_for_each_entry(p, e, root, u.rb) {
|
||||
if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
|
||||
victim = e;
|
||||
max_ec = e->ec;
|
||||
}
|
||||
}
|
||||
|
||||
return victim;
|
||||
}
|
||||
|
||||
/**
|
||||
* return_unused_pool_pebs - returns unused PEB to the free tree.
|
||||
* @ubi: UBI device description object
|
||||
* @pool: fastmap pool description object
|
||||
*/
|
||||
static void return_unused_pool_pebs(struct ubi_device *ubi,
|
||||
struct ubi_fm_pool *pool)
|
||||
{
|
||||
int i;
|
||||
struct ubi_wl_entry *e;
|
||||
|
||||
for (i = pool->used; i < pool->size; i++) {
|
||||
e = ubi->lookuptbl[pool->pebs[i]];
|
||||
wl_tree_add(e, &ubi->free);
|
||||
ubi->free_count++;
|
||||
}
|
||||
}
|
||||
|
||||
static int anchor_pebs_avalible(struct rb_root *root)
|
||||
{
|
||||
struct rb_node *p;
|
||||
struct ubi_wl_entry *e;
|
||||
|
||||
ubi_rb_for_each_entry(p, e, root, u.rb)
|
||||
if (e->pnum < UBI_FM_MAX_START)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
|
||||
* @ubi: UBI device description object
|
||||
* @anchor: This PEB will be used as anchor PEB by fastmap
|
||||
*
|
||||
* The function returns a physical erase block with a given maximal number
|
||||
* and removes it from the wl subsystem.
|
||||
* Must be called with wl_lock held!
|
||||
*/
|
||||
struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
|
||||
{
|
||||
struct ubi_wl_entry *e = NULL;
|
||||
|
||||
if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
|
||||
goto out;
|
||||
|
||||
if (anchor)
|
||||
e = find_anchor_wl_entry(&ubi->free);
|
||||
else
|
||||
e = find_mean_wl_entry(ubi, &ubi->free);
|
||||
|
||||
if (!e)
|
||||
goto out;
|
||||
|
||||
self_check_in_wl_tree(ubi, e, &ubi->free);
|
||||
|
||||
/* remove it from the free list,
|
||||
* the wl subsystem does no longer know this erase block */
|
||||
rb_erase(&e->u.rb, &ubi->free);
|
||||
ubi->free_count--;
|
||||
out:
|
||||
return e;
|
||||
}
|
||||
|
||||
/**
|
||||
* ubi_refill_pools - refills all fastmap PEB pools.
|
||||
* @ubi: UBI device description object
|
||||
*/
|
||||
void ubi_refill_pools(struct ubi_device *ubi)
|
||||
{
|
||||
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
|
||||
struct ubi_fm_pool *pool = &ubi->fm_pool;
|
||||
struct ubi_wl_entry *e;
|
||||
int enough;
|
||||
|
||||
spin_lock(&ubi->wl_lock);
|
||||
|
||||
return_unused_pool_pebs(ubi, wl_pool);
|
||||
return_unused_pool_pebs(ubi, pool);
|
||||
|
||||
wl_pool->size = 0;
|
||||
pool->size = 0;
|
||||
|
||||
for (;;) {
|
||||
enough = 0;
|
||||
if (pool->size < pool->max_size) {
|
||||
if (!ubi->free.rb_node)
|
||||
break;
|
||||
|
||||
e = wl_get_wle(ubi);
|
||||
if (!e)
|
||||
break;
|
||||
|
||||
pool->pebs[pool->size] = e->pnum;
|
||||
pool->size++;
|
||||
} else
|
||||
enough++;
|
||||
|
||||
if (wl_pool->size < wl_pool->max_size) {
|
||||
if (!ubi->free.rb_node ||
|
||||
(ubi->free_count - ubi->beb_rsvd_pebs < 5))
|
||||
break;
|
||||
|
||||
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
|
||||
self_check_in_wl_tree(ubi, e, &ubi->free);
|
||||
rb_erase(&e->u.rb, &ubi->free);
|
||||
ubi->free_count--;
|
||||
|
||||
wl_pool->pebs[wl_pool->size] = e->pnum;
|
||||
wl_pool->size++;
|
||||
} else
|
||||
enough++;
|
||||
|
||||
if (enough == 2)
|
||||
break;
|
||||
}
|
||||
|
||||
wl_pool->used = 0;
|
||||
pool->used = 0;
|
||||
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* ubi_wl_get_peb - get a physical eraseblock.
|
||||
* @ubi: UBI device description object
|
||||
*
|
||||
* This function returns a physical eraseblock in case of success and a
|
||||
* negative error code in case of failure.
|
||||
* Returns with ubi->fm_eba_sem held in read mode!
|
||||
*/
|
||||
int ubi_wl_get_peb(struct ubi_device *ubi)
|
||||
{
|
||||
int ret, retried = 0;
|
||||
struct ubi_fm_pool *pool = &ubi->fm_pool;
|
||||
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
|
||||
|
||||
again:
|
||||
down_read(&ubi->fm_eba_sem);
|
||||
spin_lock(&ubi->wl_lock);
|
||||
|
||||
/* We check here also for the WL pool because at this point we can
|
||||
* refill the WL pool synchronous. */
|
||||
if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
ret = ubi_update_fastmap(ubi);
|
||||
if (ret) {
|
||||
ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
|
||||
down_read(&ubi->fm_eba_sem);
|
||||
return -ENOSPC;
|
||||
}
|
||||
down_read(&ubi->fm_eba_sem);
|
||||
spin_lock(&ubi->wl_lock);
|
||||
}
|
||||
|
||||
if (pool->used == pool->size) {
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
if (retried) {
|
||||
ubi_err(ubi, "Unable to get a free PEB from user WL pool");
|
||||
ret = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
retried = 1;
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
goto again;
|
||||
}
|
||||
|
||||
ubi_assert(pool->used < pool->size);
|
||||
ret = pool->pebs[pool->used++];
|
||||
prot_queue_add(ubi, ubi->lookuptbl[ret]);
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
|
||||
*
|
||||
* @ubi: UBI device description object
|
||||
*/
|
||||
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
|
||||
{
|
||||
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
|
||||
int pnum;
|
||||
|
||||
if (pool->used == pool->size) {
|
||||
#ifndef __UBOOT__
|
||||
/* We cannot update the fastmap here because this
|
||||
* function is called in atomic context.
|
||||
* Let's fail here and refill/update it as soon as possible. */
|
||||
if (!ubi->fm_work_scheduled) {
|
||||
ubi->fm_work_scheduled = 1;
|
||||
schedule_work(&ubi->fm_work);
|
||||
}
|
||||
return NULL;
|
||||
#else
|
||||
/*
|
||||
* No work queues in U-Boot, we must do this immediately
|
||||
*/
|
||||
update_fastmap_work_fn(ubi);
|
||||
#endif
|
||||
}
|
||||
|
||||
pnum = pool->pebs[pool->used++];
|
||||
return ubi->lookuptbl[pnum];
|
||||
}
|
||||
|
||||
/**
|
||||
* ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
|
||||
* @ubi: UBI device description object
|
||||
*/
|
||||
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
|
||||
{
|
||||
struct ubi_work *wrk;
|
||||
|
||||
spin_lock(&ubi->wl_lock);
|
||||
if (ubi->wl_scheduled) {
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
return 0;
|
||||
}
|
||||
ubi->wl_scheduled = 1;
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
|
||||
wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
|
||||
if (!wrk) {
|
||||
spin_lock(&ubi->wl_lock);
|
||||
ubi->wl_scheduled = 0;
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
wrk->anchor = 1;
|
||||
wrk->func = &wear_leveling_worker;
|
||||
schedule_ubi_work(ubi, wrk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
|
||||
* sub-system.
|
||||
* see: ubi_wl_put_peb()
|
||||
*
|
||||
* @ubi: UBI device description object
|
||||
* @fm_e: physical eraseblock to return
|
||||
* @lnum: the last used logical eraseblock number for the PEB
|
||||
* @torture: if this physical eraseblock has to be tortured
|
||||
*/
|
||||
int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
|
||||
int lnum, int torture)
|
||||
{
|
||||
struct ubi_wl_entry *e;
|
||||
int vol_id, pnum = fm_e->pnum;
|
||||
|
||||
dbg_wl("PEB %d", pnum);
|
||||
|
||||
ubi_assert(pnum >= 0);
|
||||
ubi_assert(pnum < ubi->peb_count);
|
||||
|
||||
spin_lock(&ubi->wl_lock);
|
||||
e = ubi->lookuptbl[pnum];
|
||||
|
||||
/* This can happen if we recovered from a fastmap the very
|
||||
* first time and writing now a new one. In this case the wl system
|
||||
* has never seen any PEB used by the original fastmap.
|
||||
*/
|
||||
if (!e) {
|
||||
e = fm_e;
|
||||
ubi_assert(e->ec >= 0);
|
||||
ubi->lookuptbl[pnum] = e;
|
||||
}
|
||||
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
|
||||
vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
|
||||
return schedule_erase(ubi, e, vol_id, lnum, torture);
|
||||
}
|
||||
|
||||
/**
|
||||
* ubi_is_erase_work - checks whether a work is erase work.
|
||||
* @wrk: The work object to be checked
|
||||
*/
|
||||
int ubi_is_erase_work(struct ubi_work *wrk)
|
||||
{
|
||||
return wrk->func == erase_worker;
|
||||
}
|
||||
|
||||
static void ubi_fastmap_close(struct ubi_device *ubi)
|
||||
{
|
||||
int i;
|
||||
|
||||
#ifndef __UBOOT__
|
||||
flush_work(&ubi->fm_work);
|
||||
#else
|
||||
update_fastmap_work_fn(ubi);
|
||||
#endif
|
||||
return_unused_pool_pebs(ubi, &ubi->fm_pool);
|
||||
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
|
||||
|
||||
if (ubi->fm) {
|
||||
for (i = 0; i < ubi->fm->used_blocks; i++)
|
||||
kfree(ubi->fm->e[i]);
|
||||
}
|
||||
kfree(ubi->fm);
|
||||
}
|
||||
|
||||
/**
|
||||
* may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
|
||||
* See find_mean_wl_entry()
|
||||
*
|
||||
* @ubi: UBI device description object
|
||||
* @e: physical eraseblock to return
|
||||
* @root: RB tree to test against.
|
||||
*/
|
||||
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
|
||||
struct ubi_wl_entry *e,
|
||||
struct rb_root *root) {
|
||||
if (e && !ubi->fm_disabled && !ubi->fm &&
|
||||
e->pnum < UBI_FM_MAX_START)
|
||||
e = rb_entry(rb_next(root->rb_node),
|
||||
struct ubi_wl_entry, u.rb);
|
||||
|
||||
return e;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -169,19 +169,20 @@ retry:
|
||||
* enabled. A corresponding message will be printed
|
||||
* later, when it is has been scrubbed.
|
||||
*/
|
||||
ubi_msg("fixable bit-flip detected at PEB %d", pnum);
|
||||
ubi_msg(ubi, "fixable bit-flip detected at PEB %d",
|
||||
pnum);
|
||||
ubi_assert(len == read);
|
||||
return UBI_IO_BITFLIPS;
|
||||
}
|
||||
|
||||
if (retries++ < UBI_IO_RETRIES) {
|
||||
ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
|
||||
ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
|
||||
err, errstr, len, pnum, offset, read);
|
||||
yield();
|
||||
goto retry;
|
||||
}
|
||||
|
||||
ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
|
||||
ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
|
||||
err, errstr, len, pnum, offset, read);
|
||||
dump_stack();
|
||||
|
||||
@ -238,7 +239,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
|
||||
ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
|
||||
|
||||
if (ubi->ro_mode) {
|
||||
ubi_err("read-only mode");
|
||||
ubi_err(ubi, "read-only mode");
|
||||
return -EROFS;
|
||||
}
|
||||
|
||||
@ -265,7 +266,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
|
||||
}
|
||||
|
||||
if (ubi_dbg_is_write_failure(ubi)) {
|
||||
ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
|
||||
ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)",
|
||||
len, pnum, offset);
|
||||
dump_stack();
|
||||
return -EIO;
|
||||
@ -274,7 +275,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
|
||||
addr = (loff_t)pnum * ubi->peb_size + offset;
|
||||
err = mtd_write(ubi->mtd, addr, len, &written, buf);
|
||||
if (err) {
|
||||
ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
|
||||
ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
|
||||
err, len, pnum, offset, written);
|
||||
dump_stack();
|
||||
ubi_dump_flash(ubi, pnum, offset, len);
|
||||
@ -330,7 +331,7 @@ static int do_sync_erase(struct ubi_device *ubi, int pnum)
|
||||
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
|
||||
|
||||
if (ubi->ro_mode) {
|
||||
ubi_err("read-only mode");
|
||||
ubi_err(ubi, "read-only mode");
|
||||
return -EROFS;
|
||||
}
|
||||
|
||||
@ -347,12 +348,12 @@ retry:
|
||||
err = mtd_erase(ubi->mtd, &ei);
|
||||
if (err) {
|
||||
if (retries++ < UBI_IO_RETRIES) {
|
||||
ubi_warn("error %d while erasing PEB %d, retry",
|
||||
ubi_warn(ubi, "error %d while erasing PEB %d, retry",
|
||||
err, pnum);
|
||||
yield();
|
||||
goto retry;
|
||||
}
|
||||
ubi_err("cannot erase PEB %d, error %d", pnum, err);
|
||||
ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err);
|
||||
dump_stack();
|
||||
return err;
|
||||
}
|
||||
@ -360,17 +361,18 @@ retry:
|
||||
err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
|
||||
ei.state == MTD_ERASE_FAILED);
|
||||
if (err) {
|
||||
ubi_err("interrupted PEB %d erasure", pnum);
|
||||
ubi_err(ubi, "interrupted PEB %d erasure", pnum);
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
if (ei.state == MTD_ERASE_FAILED) {
|
||||
if (retries++ < UBI_IO_RETRIES) {
|
||||
ubi_warn("error while erasing PEB %d, retry", pnum);
|
||||
ubi_warn(ubi, "error while erasing PEB %d, retry",
|
||||
pnum);
|
||||
yield();
|
||||
goto retry;
|
||||
}
|
||||
ubi_err("cannot erase PEB %d", pnum);
|
||||
ubi_err(ubi, "cannot erase PEB %d", pnum);
|
||||
dump_stack();
|
||||
return -EIO;
|
||||
}
|
||||
@ -380,7 +382,7 @@ retry:
|
||||
return err;
|
||||
|
||||
if (ubi_dbg_is_erase_failure(ubi)) {
|
||||
ubi_err("cannot erase PEB %d (emulated)", pnum);
|
||||
ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -403,7 +405,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
|
||||
{
|
||||
int err, i, patt_count;
|
||||
|
||||
ubi_msg("run torture test for PEB %d", pnum);
|
||||
ubi_msg(ubi, "run torture test for PEB %d", pnum);
|
||||
patt_count = ARRAY_SIZE(patterns);
|
||||
ubi_assert(patt_count > 0);
|
||||
|
||||
@ -420,7 +422,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
|
||||
|
||||
err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
|
||||
if (err == 0) {
|
||||
ubi_err("erased PEB %d, but a non-0xFF byte found",
|
||||
ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found",
|
||||
pnum);
|
||||
err = -EIO;
|
||||
goto out;
|
||||
@ -440,7 +442,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
|
||||
err = ubi_check_pattern(ubi->peb_buf, patterns[i],
|
||||
ubi->peb_size);
|
||||
if (err == 0) {
|
||||
ubi_err("pattern %x checking failed for PEB %d",
|
||||
ubi_err(ubi, "pattern %x checking failed for PEB %d",
|
||||
patterns[i], pnum);
|
||||
err = -EIO;
|
||||
goto out;
|
||||
@ -448,7 +450,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
|
||||
}
|
||||
|
||||
err = patt_count;
|
||||
ubi_msg("PEB %d passed torture test, do not mark it as bad", pnum);
|
||||
ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum);
|
||||
|
||||
out:
|
||||
mutex_unlock(&ubi->buf_mutex);
|
||||
@ -458,7 +460,7 @@ out:
|
||||
* has not passed because it happened on a freshly erased
|
||||
* physical eraseblock which means something is wrong with it.
|
||||
*/
|
||||
ubi_err("read problems on freshly erased PEB %d, must be bad",
|
||||
ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad",
|
||||
pnum);
|
||||
err = -EIO;
|
||||
}
|
||||
@ -534,7 +536,7 @@ error:
|
||||
* it. Supposedly the flash media or the driver is screwed up, so
|
||||
* return an error.
|
||||
*/
|
||||
ubi_err("cannot invalidate PEB %d, write returned %d", pnum, err);
|
||||
ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err);
|
||||
ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
|
||||
return -EIO;
|
||||
}
|
||||
@ -566,7 +568,7 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
|
||||
return err;
|
||||
|
||||
if (ubi->ro_mode) {
|
||||
ubi_err("read-only mode");
|
||||
ubi_err(ubi, "read-only mode");
|
||||
return -EROFS;
|
||||
}
|
||||
|
||||
@ -608,7 +610,7 @@ int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
|
||||
|
||||
ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
|
||||
if (ret < 0)
|
||||
ubi_err("error %d while checking if PEB %d is bad",
|
||||
ubi_err(ubi, "error %d while checking if PEB %d is bad",
|
||||
ret, pnum);
|
||||
else if (ret)
|
||||
dbg_io("PEB %d is bad", pnum);
|
||||
@ -634,7 +636,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
|
||||
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
|
||||
|
||||
if (ubi->ro_mode) {
|
||||
ubi_err("read-only mode");
|
||||
ubi_err(ubi, "read-only mode");
|
||||
return -EROFS;
|
||||
}
|
||||
|
||||
@ -643,7 +645,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
|
||||
|
||||
err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
|
||||
if (err)
|
||||
ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
|
||||
ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -666,32 +668,32 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
|
||||
leb_start = be32_to_cpu(ec_hdr->data_offset);
|
||||
|
||||
if (ec_hdr->version != UBI_VERSION) {
|
||||
ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
|
||||
ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d",
|
||||
UBI_VERSION, (int)ec_hdr->version);
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (vid_hdr_offset != ubi->vid_hdr_offset) {
|
||||
ubi_err("bad VID header offset %d, expected %d",
|
||||
ubi_err(ubi, "bad VID header offset %d, expected %d",
|
||||
vid_hdr_offset, ubi->vid_hdr_offset);
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (leb_start != ubi->leb_start) {
|
||||
ubi_err("bad data offset %d, expected %d",
|
||||
ubi_err(ubi, "bad data offset %d, expected %d",
|
||||
leb_start, ubi->leb_start);
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
|
||||
ubi_err("bad erase counter %lld", ec);
|
||||
ubi_err(ubi, "bad erase counter %lld", ec);
|
||||
goto bad;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
bad:
|
||||
ubi_err("bad EC header");
|
||||
ubi_err(ubi, "bad EC header");
|
||||
ubi_dump_ec_hdr(ec_hdr);
|
||||
dump_stack();
|
||||
return 1;
|
||||
@ -757,7 +759,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
|
||||
if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
|
||||
/* The physical eraseblock is supposedly empty */
|
||||
if (verbose)
|
||||
ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
|
||||
ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes",
|
||||
pnum);
|
||||
dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
|
||||
pnum);
|
||||
@ -772,7 +774,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
|
||||
* 0xFF bytes. Report that the header is corrupted.
|
||||
*/
|
||||
if (verbose) {
|
||||
ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
|
||||
ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
|
||||
pnum, magic, UBI_EC_HDR_MAGIC);
|
||||
ubi_dump_ec_hdr(ec_hdr);
|
||||
}
|
||||
@ -786,7 +788,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
|
||||
|
||||
if (hdr_crc != crc) {
|
||||
if (verbose) {
|
||||
ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
|
||||
ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
|
||||
pnum, crc, hdr_crc);
|
||||
ubi_dump_ec_hdr(ec_hdr);
|
||||
}
|
||||
@ -802,7 +804,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
|
||||
/* And of course validate what has just been read from the media */
|
||||
err = validate_ec_hdr(ubi, ec_hdr);
|
||||
if (err) {
|
||||
ubi_err("validation failed for PEB %d", pnum);
|
||||
ubi_err(ubi, "validation failed for PEB %d", pnum);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -849,6 +851,9 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
|
||||
return -EROFS;
|
||||
|
||||
err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
|
||||
return err;
|
||||
}
|
||||
@ -876,40 +881,40 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
|
||||
int usable_leb_size = ubi->leb_size - data_pad;
|
||||
|
||||
if (copy_flag != 0 && copy_flag != 1) {
|
||||
ubi_err("bad copy_flag");
|
||||
ubi_err(ubi, "bad copy_flag");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
|
||||
data_pad < 0) {
|
||||
ubi_err("negative values");
|
||||
ubi_err(ubi, "negative values");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
|
||||
ubi_err("bad vol_id");
|
||||
ubi_err(ubi, "bad vol_id");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
|
||||
ubi_err("bad compat");
|
||||
ubi_err(ubi, "bad compat");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
|
||||
compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
|
||||
compat != UBI_COMPAT_REJECT) {
|
||||
ubi_err("bad compat");
|
||||
ubi_err(ubi, "bad compat");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
|
||||
ubi_err("bad vol_type");
|
||||
ubi_err(ubi, "bad vol_type");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (data_pad >= ubi->leb_size / 2) {
|
||||
ubi_err("bad data_pad");
|
||||
ubi_err(ubi, "bad data_pad");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
@ -921,45 +926,45 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
|
||||
* mapped logical eraseblocks.
|
||||
*/
|
||||
if (used_ebs == 0) {
|
||||
ubi_err("zero used_ebs");
|
||||
ubi_err(ubi, "zero used_ebs");
|
||||
goto bad;
|
||||
}
|
||||
if (data_size == 0) {
|
||||
ubi_err("zero data_size");
|
||||
ubi_err(ubi, "zero data_size");
|
||||
goto bad;
|
||||
}
|
||||
if (lnum < used_ebs - 1) {
|
||||
if (data_size != usable_leb_size) {
|
||||
ubi_err("bad data_size");
|
||||
ubi_err(ubi, "bad data_size");
|
||||
goto bad;
|
||||
}
|
||||
} else if (lnum == used_ebs - 1) {
|
||||
if (data_size == 0) {
|
||||
ubi_err("bad data_size at last LEB");
|
||||
ubi_err(ubi, "bad data_size at last LEB");
|
||||
goto bad;
|
||||
}
|
||||
} else {
|
||||
ubi_err("too high lnum");
|
||||
ubi_err(ubi, "too high lnum");
|
||||
goto bad;
|
||||
}
|
||||
} else {
|
||||
if (copy_flag == 0) {
|
||||
if (data_crc != 0) {
|
||||
ubi_err("non-zero data CRC");
|
||||
ubi_err(ubi, "non-zero data CRC");
|
||||
goto bad;
|
||||
}
|
||||
if (data_size != 0) {
|
||||
ubi_err("non-zero data_size");
|
||||
ubi_err(ubi, "non-zero data_size");
|
||||
goto bad;
|
||||
}
|
||||
} else {
|
||||
if (data_size == 0) {
|
||||
ubi_err("zero data_size of copy");
|
||||
ubi_err(ubi, "zero data_size of copy");
|
||||
goto bad;
|
||||
}
|
||||
}
|
||||
if (used_ebs != 0) {
|
||||
ubi_err("bad used_ebs");
|
||||
ubi_err(ubi, "bad used_ebs");
|
||||
goto bad;
|
||||
}
|
||||
}
|
||||
@ -967,7 +972,7 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
|
||||
return 0;
|
||||
|
||||
bad:
|
||||
ubi_err("bad VID header");
|
||||
ubi_err(ubi, "bad VID header");
|
||||
ubi_dump_vid_hdr(vid_hdr);
|
||||
dump_stack();
|
||||
return 1;
|
||||
@ -1012,7 +1017,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
|
||||
|
||||
if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
|
||||
if (verbose)
|
||||
ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
|
||||
ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes",
|
||||
pnum);
|
||||
dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
|
||||
pnum);
|
||||
@ -1023,7 +1028,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
|
||||
}
|
||||
|
||||
if (verbose) {
|
||||
ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
|
||||
ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
|
||||
pnum, magic, UBI_VID_HDR_MAGIC);
|
||||
ubi_dump_vid_hdr(vid_hdr);
|
||||
}
|
||||
@ -1037,7 +1042,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
|
||||
|
||||
if (hdr_crc != crc) {
|
||||
if (verbose) {
|
||||
ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
|
||||
ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x",
|
||||
pnum, crc, hdr_crc);
|
||||
ubi_dump_vid_hdr(vid_hdr);
|
||||
}
|
||||
@ -1051,7 +1056,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
|
||||
|
||||
err = validate_vid_hdr(ubi, vid_hdr);
|
||||
if (err) {
|
||||
ubi_err("validation failed for PEB %d", pnum);
|
||||
ubi_err(ubi, "validation failed for PEB %d", pnum);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1096,6 +1101,9 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
|
||||
return -EROFS;
|
||||
|
||||
p = (char *)vid_hdr - ubi->vid_hdr_shift;
|
||||
err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
|
||||
ubi->vid_hdr_alsize);
|
||||
@ -1121,7 +1129,7 @@ static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
|
||||
if (!err)
|
||||
return err;
|
||||
|
||||
ubi_err("self-check failed for PEB %d", pnum);
|
||||
ubi_err(ubi, "self-check failed for PEB %d", pnum);
|
||||
dump_stack();
|
||||
return err > 0 ? -EINVAL : err;
|
||||
}
|
||||
@ -1146,14 +1154,14 @@ static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
|
||||
|
||||
magic = be32_to_cpu(ec_hdr->magic);
|
||||
if (magic != UBI_EC_HDR_MAGIC) {
|
||||
ubi_err("bad magic %#08x, must be %#08x",
|
||||
ubi_err(ubi, "bad magic %#08x, must be %#08x",
|
||||
magic, UBI_EC_HDR_MAGIC);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = validate_ec_hdr(ubi, ec_hdr);
|
||||
if (err) {
|
||||
ubi_err("self-check failed for PEB %d", pnum);
|
||||
ubi_err(ubi, "self-check failed for PEB %d", pnum);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -1193,8 +1201,9 @@ static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
|
||||
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
|
||||
hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
|
||||
if (hdr_crc != crc) {
|
||||
ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
|
||||
ubi_err("self-check failed for PEB %d", pnum);
|
||||
ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x",
|
||||
crc, hdr_crc);
|
||||
ubi_err(ubi, "self-check failed for PEB %d", pnum);
|
||||
ubi_dump_ec_hdr(ec_hdr);
|
||||
dump_stack();
|
||||
err = -EINVAL;
|
||||
@ -1228,21 +1237,21 @@ static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
|
||||
|
||||
magic = be32_to_cpu(vid_hdr->magic);
|
||||
if (magic != UBI_VID_HDR_MAGIC) {
|
||||
ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
|
||||
ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x",
|
||||
magic, pnum, UBI_VID_HDR_MAGIC);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = validate_vid_hdr(ubi, vid_hdr);
|
||||
if (err) {
|
||||
ubi_err("self-check failed for PEB %d", pnum);
|
||||
ubi_err(ubi, "self-check failed for PEB %d", pnum);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return err;
|
||||
|
||||
fail:
|
||||
ubi_err("self-check failed for PEB %d", pnum);
|
||||
ubi_err(ubi, "self-check failed for PEB %d", pnum);
|
||||
ubi_dump_vid_hdr(vid_hdr);
|
||||
dump_stack();
|
||||
return -EINVAL;
|
||||
@ -1280,9 +1289,9 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
|
||||
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
|
||||
hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
|
||||
if (hdr_crc != crc) {
|
||||
ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
|
||||
ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
|
||||
pnum, crc, hdr_crc);
|
||||
ubi_err("self-check failed for PEB %d", pnum);
|
||||
ubi_err(ubi, "self-check failed for PEB %d", pnum);
|
||||
ubi_dump_vid_hdr(vid_hdr);
|
||||
dump_stack();
|
||||
err = -EINVAL;
|
||||
@ -1321,7 +1330,7 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
|
||||
|
||||
buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
|
||||
if (!buf1) {
|
||||
ubi_err("cannot allocate memory to check writes");
|
||||
ubi_err(ubi, "cannot allocate memory to check writes");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1339,14 +1348,15 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
|
||||
if (c == c1)
|
||||
continue;
|
||||
|
||||
ubi_err("self-check failed for PEB %d:%d, len %d",
|
||||
ubi_err(ubi, "self-check failed for PEB %d:%d, len %d",
|
||||
pnum, offset, len);
|
||||
ubi_msg("data differ at position %d", i);
|
||||
ubi_msg("hex dump of the original buffer from %d to %d",
|
||||
ubi_msg(ubi, "data differ at position %d", i);
|
||||
dump_len = max_t(int, 128, len - i);
|
||||
ubi_msg(ubi, "hex dump of the original buffer from %d to %d",
|
||||
i, i + dump_len);
|
||||
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
|
||||
buf + i, dump_len, 1);
|
||||
ubi_msg("hex dump of the read buffer from %d to %d",
|
||||
ubi_msg(ubi, "hex dump of the read buffer from %d to %d",
|
||||
i, i + dump_len);
|
||||
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
|
||||
buf1 + i, dump_len, 1);
|
||||
@ -1386,20 +1396,20 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
|
||||
|
||||
buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
|
||||
if (!buf) {
|
||||
ubi_err("cannot allocate memory to check for 0xFFs");
|
||||
ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = mtd_read(ubi->mtd, addr, len, &read, buf);
|
||||
if (err && !mtd_is_bitflip(err)) {
|
||||
ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
|
||||
ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
|
||||
err, len, pnum, offset, read);
|
||||
goto error;
|
||||
}
|
||||
|
||||
err = ubi_check_pattern(buf, 0xFF, len);
|
||||
if (err == 0) {
|
||||
ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
|
||||
ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
|
||||
pnum, offset, len);
|
||||
goto fail;
|
||||
}
|
||||
@ -1408,8 +1418,8 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
ubi_err("self-check failed for PEB %d", pnum);
|
||||
ubi_msg("hex dump of the %d-%d region", offset, offset + len);
|
||||
ubi_err(ubi, "self-check failed for PEB %d", pnum);
|
||||
ubi_msg(ubi, "hex dump of the %d-%d region", offset, offset + len);
|
||||
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
|
||||
err = -EINVAL;
|
||||
error:
|
||||
|
@ -132,7 +132,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (mode != UBI_READONLY && mode != UBI_READWRITE &&
|
||||
mode != UBI_EXCLUSIVE)
|
||||
mode != UBI_EXCLUSIVE && mode != UBI_METAONLY)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/*
|
||||
@ -177,10 +177,17 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
|
||||
break;
|
||||
|
||||
case UBI_EXCLUSIVE:
|
||||
if (vol->exclusive || vol->writers || vol->readers)
|
||||
if (vol->exclusive || vol->writers || vol->readers ||
|
||||
vol->metaonly)
|
||||
goto out_unlock;
|
||||
vol->exclusive = 1;
|
||||
break;
|
||||
|
||||
case UBI_METAONLY:
|
||||
if (vol->metaonly || vol->exclusive)
|
||||
goto out_unlock;
|
||||
vol->metaonly = 1;
|
||||
break;
|
||||
}
|
||||
get_device(&vol->dev);
|
||||
vol->ref_count += 1;
|
||||
@ -199,7 +206,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
if (err == 1) {
|
||||
ubi_warn("volume %d on UBI device %d is corrupted",
|
||||
ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
|
||||
vol_id, ubi->ubi_num);
|
||||
vol->corrupted = 1;
|
||||
}
|
||||
@ -216,7 +223,7 @@ out_free:
|
||||
kfree(desc);
|
||||
out_put_ubi:
|
||||
ubi_put_device(ubi);
|
||||
ubi_err("cannot open device %d, volume %d, error %d",
|
||||
ubi_err(ubi, "cannot open device %d, volume %d, error %d",
|
||||
ubi_num, vol_id, err);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
@ -303,7 +310,7 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
|
||||
if (error)
|
||||
return ERR_PTR(error);
|
||||
|
||||
inode = path.dentry->d_inode;
|
||||
inode = d_backing_inode(path.dentry);
|
||||
mod = inode->i_mode;
|
||||
ubi_num = ubi_major2num(imajor(inode));
|
||||
vol_id = iminor(inode) - 1;
|
||||
@ -340,6 +347,10 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
|
||||
break;
|
||||
case UBI_EXCLUSIVE:
|
||||
vol->exclusive = 0;
|
||||
break;
|
||||
case UBI_METAONLY:
|
||||
vol->metaonly = 0;
|
||||
break;
|
||||
}
|
||||
vol->ref_count -= 1;
|
||||
spin_unlock(&ubi->volumes_lock);
|
||||
@ -351,6 +362,43 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ubi_close_volume);
|
||||
|
||||
/**
|
||||
* leb_read_sanity_check - does sanity checks on read requests.
|
||||
* @desc: volume descriptor
|
||||
* @lnum: logical eraseblock number to read from
|
||||
* @offset: offset within the logical eraseblock to read from
|
||||
* @len: how many bytes to read
|
||||
*
|
||||
* This function is used by ubi_leb_read() and ubi_leb_read_sg()
|
||||
* to perform sanity checks.
|
||||
*/
|
||||
static int leb_read_sanity_check(struct ubi_volume_desc *desc, int lnum,
|
||||
int offset, int len)
|
||||
{
|
||||
struct ubi_volume *vol = desc->vol;
|
||||
struct ubi_device *ubi = vol->ubi;
|
||||
int vol_id = vol->vol_id;
|
||||
|
||||
if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
|
||||
lnum >= vol->used_ebs || offset < 0 || len < 0 ||
|
||||
offset + len > vol->usable_leb_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (vol->vol_type == UBI_STATIC_VOLUME) {
|
||||
if (vol->used_ebs == 0)
|
||||
/* Empty static UBI volume */
|
||||
return 0;
|
||||
if (lnum == vol->used_ebs - 1 &&
|
||||
offset + len > vol->last_eb_bytes)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vol->upd_marker)
|
||||
return -EBADF;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ubi_leb_read - read data.
|
||||
* @desc: volume descriptor
|
||||
@ -387,28 +435,16 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
|
||||
|
||||
dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
|
||||
|
||||
if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
|
||||
lnum >= vol->used_ebs || offset < 0 || len < 0 ||
|
||||
offset + len > vol->usable_leb_size)
|
||||
return -EINVAL;
|
||||
err = leb_read_sanity_check(desc, lnum, offset, len);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (vol->vol_type == UBI_STATIC_VOLUME) {
|
||||
if (vol->used_ebs == 0)
|
||||
/* Empty static UBI volume */
|
||||
return 0;
|
||||
if (lnum == vol->used_ebs - 1 &&
|
||||
offset + len > vol->last_eb_bytes)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vol->upd_marker)
|
||||
return -EBADF;
|
||||
if (len == 0)
|
||||
return 0;
|
||||
|
||||
err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
|
||||
if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
|
||||
ubi_warn("mark volume %d as corrupted", vol_id);
|
||||
ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
|
||||
vol->corrupted = 1;
|
||||
}
|
||||
|
||||
@ -416,6 +452,47 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ubi_leb_read);
|
||||
|
||||
#ifndef __UBOOT__
|
||||
/**
|
||||
* ubi_leb_read_sg - read data into a scatter gather list.
|
||||
* @desc: volume descriptor
|
||||
* @lnum: logical eraseblock number to read from
|
||||
* @buf: buffer where to store the read data
|
||||
* @offset: offset within the logical eraseblock to read from
|
||||
* @len: how many bytes to read
|
||||
* @check: whether UBI has to check the read data's CRC or not.
|
||||
*
|
||||
* This function works exactly like ubi_leb_read_sg(). But instead of
|
||||
* storing the read data into a buffer it writes to an UBI scatter gather
|
||||
* list.
|
||||
*/
|
||||
int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
|
||||
int offset, int len, int check)
|
||||
{
|
||||
struct ubi_volume *vol = desc->vol;
|
||||
struct ubi_device *ubi = vol->ubi;
|
||||
int err, vol_id = vol->vol_id;
|
||||
|
||||
dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
|
||||
|
||||
err = leb_read_sanity_check(desc, lnum, offset, len);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (len == 0)
|
||||
return 0;
|
||||
|
||||
err = ubi_eba_read_leb_sg(ubi, vol, sgl, lnum, offset, len, check);
|
||||
if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
|
||||
ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
|
||||
vol->corrupted = 1;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ubi_leb_read_sg);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ubi_leb_write - write data.
|
||||
* @desc: volume descriptor
|
||||
|
@ -63,6 +63,8 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
|
||||
for (i = 0; i < vol->used_ebs; i++) {
|
||||
int size;
|
||||
|
||||
cond_resched();
|
||||
|
||||
if (i == vol->used_ebs - 1)
|
||||
size = vol->last_eb_bytes;
|
||||
else
|
||||
@ -100,7 +102,7 @@ void ubi_update_reserved(struct ubi_device *ubi)
|
||||
ubi->avail_pebs -= need;
|
||||
ubi->rsvd_pebs += need;
|
||||
ubi->beb_rsvd_pebs += need;
|
||||
ubi_msg("reserved more %d PEBs for bad PEB handling", need);
|
||||
ubi_msg(ubi, "reserved more %d PEBs for bad PEB handling", need);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -117,7 +119,7 @@ void ubi_calculate_reserved(struct ubi_device *ubi)
|
||||
ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
|
||||
if (ubi->beb_rsvd_level < 0) {
|
||||
ubi->beb_rsvd_level = 0;
|
||||
ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
|
||||
ubi_warn(ubi, "number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
|
||||
ubi->bad_peb_count, ubi->bad_peb_limit);
|
||||
}
|
||||
}
|
||||
|
@ -395,8 +395,6 @@ struct ubi_vtbl_record {
|
||||
#define UBI_FM_MIN_POOL_SIZE 8
|
||||
#define UBI_FM_MAX_POOL_SIZE 256
|
||||
|
||||
#define UBI_FM_WL_POOL_SIZE 25
|
||||
|
||||
/**
|
||||
* struct ubi_fm_sb - UBI fastmap super block
|
||||
* @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
|
||||
|
@ -43,17 +43,18 @@
|
||||
|
||||
/* Normal UBI messages */
|
||||
#ifdef CONFIG_UBI_SILENCE_MSG
|
||||
#define ubi_msg(fmt, ...)
|
||||
#define ubi_msg(ubi, fmt, ...)
|
||||
#else
|
||||
#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
|
||||
#define ubi_msg(ubi, fmt, ...) printk(UBI_NAME_STR "%d: " fmt "\n", \
|
||||
ubi->ubi_num, ##__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
/* UBI warning messages */
|
||||
#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n", \
|
||||
__func__, ##__VA_ARGS__)
|
||||
#define ubi_warn(ubi, fmt, ...) pr_warn(UBI_NAME_STR "%d warning: %s: " fmt "\n", \
|
||||
ubi->ubi_num, __func__, ##__VA_ARGS__)
|
||||
/* UBI error messages */
|
||||
#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n", \
|
||||
__func__, ##__VA_ARGS__)
|
||||
#define ubi_err(ubi, fmt, ...) pr_err(UBI_NAME_STR "%d error: %s: " fmt "\n", \
|
||||
ubi->ubi_num, __func__, ##__VA_ARGS__)
|
||||
|
||||
/* Background thread name pattern */
|
||||
#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
|
||||
@ -147,6 +148,17 @@ enum {
|
||||
UBI_BAD_FASTMAP,
|
||||
};
|
||||
|
||||
/*
|
||||
* Flags for emulate_power_cut in ubi_debug_info
|
||||
*
|
||||
* POWER_CUT_EC_WRITE: Emulate a power cut when writing an EC header
|
||||
* POWER_CUT_VID_WRITE: Emulate a power cut when writing a VID header
|
||||
*/
|
||||
enum {
|
||||
POWER_CUT_EC_WRITE = 0x01,
|
||||
POWER_CUT_VID_WRITE = 0x02,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ubi_wl_entry - wear-leveling entry.
|
||||
* @u.rb: link in the corresponding (free/used) RB-tree
|
||||
@ -257,6 +269,7 @@ struct ubi_fm_pool {
|
||||
* @readers: number of users holding this volume in read-only mode
|
||||
* @writers: number of users holding this volume in read-write mode
|
||||
* @exclusive: whether somebody holds this volume in exclusive mode
|
||||
* @metaonly: whether somebody is altering only meta data of this volume
|
||||
*
|
||||
* @reserved_pebs: how many physical eraseblocks are reserved for this volume
|
||||
* @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
|
||||
@ -305,6 +318,7 @@ struct ubi_volume {
|
||||
int readers;
|
||||
int writers;
|
||||
int exclusive;
|
||||
int metaonly;
|
||||
|
||||
int reserved_pebs;
|
||||
int vol_type;
|
||||
@ -339,7 +353,8 @@ struct ubi_volume {
|
||||
/**
|
||||
* struct ubi_volume_desc - UBI volume descriptor returned when it is opened.
|
||||
* @vol: reference to the corresponding volume description object
|
||||
* @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE)
|
||||
* @mode: open mode (%UBI_READONLY, %UBI_READWRITE, %UBI_EXCLUSIVE
|
||||
* or %UBI_METAONLY)
|
||||
*/
|
||||
struct ubi_volume_desc {
|
||||
struct ubi_volume *vol;
|
||||
@ -353,30 +368,48 @@ struct ubi_wl_entry;
|
||||
*
|
||||
* @chk_gen: if UBI general extra checks are enabled
|
||||
* @chk_io: if UBI I/O extra checks are enabled
|
||||
* @chk_fastmap: if UBI fastmap extra checks are enabled
|
||||
* @disable_bgt: disable the background task for testing purposes
|
||||
* @emulate_bitflips: emulate bit-flips for testing purposes
|
||||
* @emulate_io_failures: emulate write/erase failures for testing purposes
|
||||
* @emulate_power_cut: emulate power cut for testing purposes
|
||||
* @power_cut_counter: count down for writes left until emulated power cut
|
||||
* @power_cut_min: minimum number of writes before emulating a power cut
|
||||
* @power_cut_max: maximum number of writes until emulating a power cut
|
||||
* @dfs_dir_name: name of debugfs directory containing files of this UBI device
|
||||
* @dfs_dir: direntry object of the UBI device debugfs directory
|
||||
* @dfs_chk_gen: debugfs knob to enable UBI general extra checks
|
||||
* @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
|
||||
* @dfs_chk_fastmap: debugfs knob to enable UBI fastmap extra checks
|
||||
* @dfs_disable_bgt: debugfs knob to disable the background task
|
||||
* @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
|
||||
* @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
|
||||
* @dfs_emulate_power_cut: debugfs knob to emulate power cuts
|
||||
* @dfs_power_cut_min: debugfs knob for minimum writes before power cut
|
||||
* @dfs_power_cut_max: debugfs knob for maximum writes until power cut
|
||||
*/
|
||||
struct ubi_debug_info {
|
||||
unsigned int chk_gen:1;
|
||||
unsigned int chk_io:1;
|
||||
unsigned int chk_fastmap:1;
|
||||
unsigned int disable_bgt:1;
|
||||
unsigned int emulate_bitflips:1;
|
||||
unsigned int emulate_io_failures:1;
|
||||
unsigned int emulate_power_cut:2;
|
||||
unsigned int power_cut_counter;
|
||||
unsigned int power_cut_min;
|
||||
unsigned int power_cut_max;
|
||||
char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
|
||||
struct dentry *dfs_dir;
|
||||
struct dentry *dfs_chk_gen;
|
||||
struct dentry *dfs_chk_io;
|
||||
struct dentry *dfs_chk_fastmap;
|
||||
struct dentry *dfs_disable_bgt;
|
||||
struct dentry *dfs_emulate_bitflips;
|
||||
struct dentry *dfs_emulate_io_failures;
|
||||
struct dentry *dfs_emulate_power_cut;
|
||||
struct dentry *dfs_power_cut_min;
|
||||
struct dentry *dfs_power_cut_max;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -390,7 +423,8 @@ struct ubi_debug_info {
|
||||
* @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
|
||||
* @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
|
||||
* @vol->readers, @vol->writers, @vol->exclusive,
|
||||
* @vol->ref_count, @vol->mapping and @vol->eba_tbl.
|
||||
* @vol->metaonly, @vol->ref_count, @vol->mapping and
|
||||
* @vol->eba_tbl.
|
||||
* @ref_count: count of references on the UBI device
|
||||
* @image_seq: image sequence number recorded on EC headers
|
||||
*
|
||||
@ -422,11 +456,13 @@ struct ubi_debug_info {
|
||||
* @fm_pool: in-memory data structure of the fastmap pool
|
||||
* @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
|
||||
* sub-system
|
||||
* @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf
|
||||
* @fm_protect: serializes ubi_update_fastmap(), protects @fm_buf and makes sure
|
||||
* that critical sections cannot be interrupted by ubi_update_fastmap()
|
||||
* @fm_buf: vmalloc()'d buffer which holds the raw fastmap
|
||||
* @fm_size: fastmap size in bytes
|
||||
* @fm_sem: allows ubi_update_fastmap() to block EBA table changes
|
||||
* @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
|
||||
* @fm_work: fastmap work queue
|
||||
* @fm_work_scheduled: non-zero if fastmap work was scheduled
|
||||
*
|
||||
* @used: RB-tree of used physical eraseblocks
|
||||
* @erroneous: RB-tree of erroneous used physical eraseblocks
|
||||
@ -438,9 +474,11 @@ struct ubi_debug_info {
|
||||
* @pq_head: protection queue head
|
||||
* @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
|
||||
* @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
|
||||
* @erroneous, and @erroneous_peb_count fields
|
||||
* @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool,
|
||||
* and @fm_wl_pool fields
|
||||
* @move_mutex: serializes eraseblock moves
|
||||
* @work_sem: synchronizes the WL worker with use tasks
|
||||
* @work_sem: used to wait for all the scheduled works to finish and prevent
|
||||
* new works from being submitted
|
||||
* @wl_scheduled: non-zero if the wear-leveling was scheduled
|
||||
* @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
|
||||
* physical eraseblock
|
||||
@ -527,13 +565,14 @@ struct ubi_device {
|
||||
struct ubi_fastmap_layout *fm;
|
||||
struct ubi_fm_pool fm_pool;
|
||||
struct ubi_fm_pool fm_wl_pool;
|
||||
struct rw_semaphore fm_sem;
|
||||
struct mutex fm_mutex;
|
||||
struct rw_semaphore fm_eba_sem;
|
||||
struct rw_semaphore fm_protect;
|
||||
void *fm_buf;
|
||||
size_t fm_size;
|
||||
#ifndef __UBOOT__
|
||||
struct work_struct fm_work;
|
||||
#endif
|
||||
int fm_work_scheduled;
|
||||
|
||||
/* Wear-leveling sub-system's stuff */
|
||||
struct rb_root used;
|
||||
@ -716,14 +755,15 @@ struct ubi_attach_info {
|
||||
* @torture: if the physical eraseblock has to be tortured
|
||||
* @anchor: produce a anchor PEB to by used by fastmap
|
||||
*
|
||||
* The @func pointer points to the worker function. If the @cancel argument is
|
||||
* not zero, the worker has to free the resources and exit immediately. The
|
||||
* worker has to return zero in case of success and a negative error code in
|
||||
* The @func pointer points to the worker function. If the @shutdown argument is
|
||||
* not zero, the worker has to free the resources and exit immediately as the
|
||||
* WL sub-system is shutting down.
|
||||
* The worker has to return zero in case of success and a negative error code in
|
||||
* case of failure.
|
||||
*/
|
||||
struct ubi_work {
|
||||
struct list_head list;
|
||||
int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
|
||||
int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int shutdown);
|
||||
/* The below fields are only relevant to erasure works */
|
||||
struct ubi_wl_entry *e;
|
||||
int vol_id;
|
||||
@ -738,7 +778,7 @@ extern struct kmem_cache *ubi_wl_entry_slab;
|
||||
extern const struct file_operations ubi_ctrl_cdev_operations;
|
||||
extern const struct file_operations ubi_cdev_operations;
|
||||
extern const struct file_operations ubi_vol_cdev_operations;
|
||||
extern struct class *ubi_class;
|
||||
extern struct class ubi_class;
|
||||
extern struct mutex ubi_devices_mutex;
|
||||
extern struct blocking_notifier_head ubi_notifiers;
|
||||
|
||||
@ -807,6 +847,9 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
|
||||
int lnum);
|
||||
int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
|
||||
void *buf, int offset, int len, int check);
|
||||
int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
|
||||
struct ubi_sgl *sgl, int lnum, int offset, int len,
|
||||
int check);
|
||||
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
|
||||
const void *buf, int offset, int len);
|
||||
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
|
||||
@ -877,10 +920,14 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
|
||||
int pnum, const struct ubi_vid_hdr *vid_hdr);
|
||||
|
||||
/* fastmap.c */
|
||||
#ifdef CONFIG_MTD_UBI_FASTMAP
|
||||
size_t ubi_calc_fm_size(struct ubi_device *ubi);
|
||||
int ubi_update_fastmap(struct ubi_device *ubi);
|
||||
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
||||
int fm_anchor);
|
||||
#else
|
||||
static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
|
||||
#endif
|
||||
|
||||
/* block.c */
|
||||
#ifdef CONFIG_MTD_UBI_BLOCK
|
||||
@ -901,6 +948,42 @@ static inline int ubiblock_remove(struct ubi_volume_info *vi)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ubi_for_each_free_peb - walk the UBI free RB tree.
|
||||
* @ubi: UBI device description object
|
||||
* @e: a pointer to a ubi_wl_entry to use as cursor
|
||||
* @pos: a pointer to RB-tree entry type to use as a loop counter
|
||||
*/
|
||||
#define ubi_for_each_free_peb(ubi, e, tmp_rb) \
|
||||
ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb)
|
||||
|
||||
/*
|
||||
* ubi_for_each_used_peb - walk the UBI used RB tree.
|
||||
* @ubi: UBI device description object
|
||||
* @e: a pointer to a ubi_wl_entry to use as cursor
|
||||
* @pos: a pointer to RB-tree entry type to use as a loop counter
|
||||
*/
|
||||
#define ubi_for_each_used_peb(ubi, e, tmp_rb) \
|
||||
ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb)
|
||||
|
||||
/*
|
||||
* ubi_for_each_scub_peb - walk the UBI scub RB tree.
|
||||
* @ubi: UBI device description object
|
||||
* @e: a pointer to a ubi_wl_entry to use as cursor
|
||||
* @pos: a pointer to RB-tree entry type to use as a loop counter
|
||||
*/
|
||||
#define ubi_for_each_scrub_peb(ubi, e, tmp_rb) \
|
||||
ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb)
|
||||
|
||||
/*
|
||||
* ubi_for_each_protected_peb - walk the UBI protection queue.
|
||||
* @ubi: UBI device description object
|
||||
* @i: a integer used as counter
|
||||
* @e: a pointer to a ubi_wl_entry to use as cursor
|
||||
*/
|
||||
#define ubi_for_each_protected_peb(ubi, i, e) \
|
||||
for ((i) = 0; (i) < UBI_PROT_QUEUE_LEN; (i)++) \
|
||||
list_for_each_entry((e), &(ubi->pq[(i)]), u.list)
|
||||
|
||||
/*
|
||||
* ubi_rb_for_each_entry - walk an RB-tree.
|
||||
@ -1004,7 +1087,7 @@ static inline void ubi_ro_mode(struct ubi_device *ubi)
|
||||
{
|
||||
if (!ubi->ro_mode) {
|
||||
ubi->ro_mode = 1;
|
||||
ubi_warn("switch to read-only mode");
|
||||
ubi_warn(ubi, "switch to read-only mode");
|
||||
dump_stack();
|
||||
}
|
||||
}
|
||||
@ -1035,4 +1118,7 @@ static inline int idx2vol_id(const struct ubi_device *ubi, int idx)
|
||||
return idx;
|
||||
}
|
||||
|
||||
#ifdef __UBOOT__
|
||||
int do_work(struct ubi_device *ubi);
|
||||
#endif
|
||||
#endif /* !__UBI_UBI_H__ */
|
||||
|
@ -127,6 +127,10 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
|
||||
ubi_assert(!vol->updating && !vol->changing_leb);
|
||||
vol->updating = 1;
|
||||
|
||||
vol->upd_buf = vmalloc(ubi->leb_size);
|
||||
if (!vol->upd_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
err = set_update_marker(ubi, vol);
|
||||
if (err)
|
||||
return err;
|
||||
@ -146,14 +150,12 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
|
||||
err = clear_update_marker(ubi, vol, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
vfree(vol->upd_buf);
|
||||
vol->updating = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
vol->upd_buf = vmalloc(ubi->leb_size);
|
||||
if (!vol->upd_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
|
||||
vol->usable_leb_size);
|
||||
vol->upd_bytes = bytes;
|
||||
|
@ -114,6 +114,19 @@ static ssize_t vol_attribute_show(struct device *dev,
|
||||
ubi_put_device(ubi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct attribute *volume_dev_attrs[] = {
|
||||
&attr_vol_reserved_ebs.attr,
|
||||
&attr_vol_type.attr,
|
||||
&attr_vol_name.attr,
|
||||
&attr_vol_corrupted.attr,
|
||||
&attr_vol_alignment.attr,
|
||||
&attr_vol_usable_eb_size.attr,
|
||||
&attr_vol_data_bytes.attr,
|
||||
&attr_vol_upd_marker.attr,
|
||||
NULL
|
||||
};
|
||||
ATTRIBUTE_GROUPS(volume_dev);
|
||||
#endif
|
||||
|
||||
/* Release method for volume devices */
|
||||
@ -125,66 +138,6 @@ static void vol_release(struct device *dev)
|
||||
kfree(vol);
|
||||
}
|
||||
|
||||
#ifndef __UBOOT__
|
||||
/**
|
||||
* volume_sysfs_init - initialize sysfs for new volume.
|
||||
* @ubi: UBI device description object
|
||||
* @vol: volume description object
|
||||
*
|
||||
* This function returns zero in case of success and a negative error code in
|
||||
* case of failure.
|
||||
*
|
||||
* Note, this function does not free allocated resources in case of failure -
|
||||
* the caller does it. This is because this would cause release() here and the
|
||||
* caller would oops.
|
||||
*/
|
||||
static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = device_create_file(&vol->dev, &attr_vol_reserved_ebs);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&vol->dev, &attr_vol_type);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&vol->dev, &attr_vol_name);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&vol->dev, &attr_vol_corrupted);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&vol->dev, &attr_vol_alignment);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&vol->dev, &attr_vol_usable_eb_size);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&vol->dev, &attr_vol_data_bytes);
|
||||
if (err)
|
||||
return err;
|
||||
err = device_create_file(&vol->dev, &attr_vol_upd_marker);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* volume_sysfs_close - close sysfs for a volume.
|
||||
* @vol: volume description object
|
||||
*/
|
||||
static void volume_sysfs_close(struct ubi_volume *vol)
|
||||
{
|
||||
device_remove_file(&vol->dev, &attr_vol_upd_marker);
|
||||
device_remove_file(&vol->dev, &attr_vol_data_bytes);
|
||||
device_remove_file(&vol->dev, &attr_vol_usable_eb_size);
|
||||
device_remove_file(&vol->dev, &attr_vol_alignment);
|
||||
device_remove_file(&vol->dev, &attr_vol_corrupted);
|
||||
device_remove_file(&vol->dev, &attr_vol_name);
|
||||
device_remove_file(&vol->dev, &attr_vol_type);
|
||||
device_remove_file(&vol->dev, &attr_vol_reserved_ebs);
|
||||
device_unregister(&vol->dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ubi_create_volume - create volume.
|
||||
* @ubi: UBI device description object
|
||||
@ -221,7 +174,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
|
||||
}
|
||||
|
||||
if (vol_id == UBI_VOL_NUM_AUTO) {
|
||||
ubi_err("out of volume IDs");
|
||||
ubi_err(ubi, "out of volume IDs");
|
||||
err = -ENFILE;
|
||||
goto out_unlock;
|
||||
}
|
||||
@ -235,7 +188,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
|
||||
/* Ensure that this volume does not exist */
|
||||
err = -EEXIST;
|
||||
if (ubi->volumes[vol_id]) {
|
||||
ubi_err("volume %d already exists", vol_id);
|
||||
ubi_err(ubi, "volume %d already exists", vol_id);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
@ -244,20 +197,22 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
|
||||
if (ubi->volumes[i] &&
|
||||
ubi->volumes[i]->name_len == req->name_len &&
|
||||
!strcmp(ubi->volumes[i]->name, req->name)) {
|
||||
ubi_err("volume \"%s\" exists (ID %d)", req->name, i);
|
||||
ubi_err(ubi, "volume \"%s\" exists (ID %d)",
|
||||
req->name, i);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Calculate how many eraseblocks are requested */
|
||||
vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
|
||||
vol->reserved_pebs += div_u64(req->bytes + vol->usable_leb_size - 1,
|
||||
vol->reserved_pebs = div_u64(req->bytes + vol->usable_leb_size - 1,
|
||||
vol->usable_leb_size);
|
||||
|
||||
/* Reserve physical eraseblocks */
|
||||
if (vol->reserved_pebs > ubi->avail_pebs) {
|
||||
ubi_err("not enough PEBs, only %d available", ubi->avail_pebs);
|
||||
ubi_err(ubi, "not enough PEBs, only %d available",
|
||||
ubi->avail_pebs);
|
||||
if (ubi->corr_peb_count)
|
||||
ubi_err("%d PEBs are corrupted and not used",
|
||||
ubi_err(ubi, "%d PEBs are corrupted and not used",
|
||||
ubi->corr_peb_count);
|
||||
err = -ENOSPC;
|
||||
goto out_unlock;
|
||||
@ -312,26 +267,25 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
|
||||
dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
|
||||
err = cdev_add(&vol->cdev, dev, 1);
|
||||
if (err) {
|
||||
ubi_err("cannot add character device");
|
||||
ubi_err(ubi, "cannot add character device");
|
||||
goto out_mapping;
|
||||
}
|
||||
|
||||
vol->dev.release = vol_release;
|
||||
vol->dev.parent = &ubi->dev;
|
||||
vol->dev.devt = dev;
|
||||
vol->dev.class = ubi_class;
|
||||
#ifndef __UBOOT__
|
||||
vol->dev.class = &ubi_class;
|
||||
vol->dev.groups = volume_dev_groups;
|
||||
#endif
|
||||
|
||||
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
|
||||
err = device_register(&vol->dev);
|
||||
if (err) {
|
||||
ubi_err("cannot register device");
|
||||
ubi_err(ubi, "cannot register device");
|
||||
goto out_cdev;
|
||||
}
|
||||
|
||||
err = volume_sysfs_init(ubi, vol);
|
||||
if (err)
|
||||
goto out_sysfs;
|
||||
|
||||
/* Fill volume table record */
|
||||
memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record));
|
||||
vtbl_rec.reserved_pebs = cpu_to_be32(vol->reserved_pebs);
|
||||
@ -368,7 +322,7 @@ out_sysfs:
|
||||
*/
|
||||
do_free = 0;
|
||||
get_device(&vol->dev);
|
||||
volume_sysfs_close(vol);
|
||||
device_unregister(&vol->dev);
|
||||
out_cdev:
|
||||
cdev_del(&vol->cdev);
|
||||
out_mapping:
|
||||
@ -384,7 +338,7 @@ out_unlock:
|
||||
kfree(vol);
|
||||
else
|
||||
put_device(&vol->dev);
|
||||
ubi_err("cannot create volume %d, error %d", vol_id, err);
|
||||
ubi_err(ubi, "cannot create volume %d, error %d", vol_id, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -436,7 +390,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
|
||||
}
|
||||
|
||||
cdev_del(&vol->cdev);
|
||||
volume_sysfs_close(vol);
|
||||
device_unregister(&vol->dev);
|
||||
|
||||
spin_lock(&ubi->volumes_lock);
|
||||
ubi->rsvd_pebs -= reserved_pebs;
|
||||
@ -452,7 +406,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
|
||||
return err;
|
||||
|
||||
out_err:
|
||||
ubi_err("cannot remove volume %d, error %d", vol_id, err);
|
||||
ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err);
|
||||
spin_lock(&ubi->volumes_lock);
|
||||
ubi->volumes[vol_id] = vol;
|
||||
out_unlock:
|
||||
@ -485,7 +439,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
|
||||
|
||||
if (vol->vol_type == UBI_STATIC_VOLUME &&
|
||||
reserved_pebs < vol->used_ebs) {
|
||||
ubi_err("too small size %d, %d LEBs contain data",
|
||||
ubi_err(ubi, "too small size %d, %d LEBs contain data",
|
||||
reserved_pebs, vol->used_ebs);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -514,10 +468,10 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
|
||||
if (pebs > 0) {
|
||||
spin_lock(&ubi->volumes_lock);
|
||||
if (pebs > ubi->avail_pebs) {
|
||||
ubi_err("not enough PEBs: requested %d, available %d",
|
||||
ubi_err(ubi, "not enough PEBs: requested %d, available %d",
|
||||
pebs, ubi->avail_pebs);
|
||||
if (ubi->corr_peb_count)
|
||||
ubi_err("%d PEBs are corrupted and not used",
|
||||
ubi_err(ubi, "%d PEBs are corrupted and not used",
|
||||
ubi->corr_peb_count);
|
||||
spin_unlock(&ubi->volumes_lock);
|
||||
err = -ENOSPC;
|
||||
@ -641,7 +595,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
|
||||
dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
|
||||
err = cdev_add(&vol->cdev, dev, 1);
|
||||
if (err) {
|
||||
ubi_err("cannot add character device for volume %d, error %d",
|
||||
ubi_err(ubi, "cannot add character device for volume %d, error %d",
|
||||
vol_id, err);
|
||||
return err;
|
||||
}
|
||||
@ -649,19 +603,15 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
|
||||
vol->dev.release = vol_release;
|
||||
vol->dev.parent = &ubi->dev;
|
||||
vol->dev.devt = dev;
|
||||
vol->dev.class = ubi_class;
|
||||
#ifndef __UBOOT__
|
||||
vol->dev.class = &ubi_class;
|
||||
vol->dev.groups = volume_dev_groups;
|
||||
#endif
|
||||
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
|
||||
err = device_register(&vol->dev);
|
||||
if (err)
|
||||
goto out_cdev;
|
||||
|
||||
err = volume_sysfs_init(ubi, vol);
|
||||
if (err) {
|
||||
cdev_del(&vol->cdev);
|
||||
volume_sysfs_close(vol);
|
||||
return err;
|
||||
}
|
||||
|
||||
self_check_volumes(ubi);
|
||||
return err;
|
||||
|
||||
@ -684,7 +634,7 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
|
||||
|
||||
ubi->volumes[vol->vol_id] = NULL;
|
||||
cdev_del(&vol->cdev);
|
||||
volume_sysfs_close(vol);
|
||||
device_unregister(&vol->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -708,7 +658,7 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
|
||||
|
||||
if (!vol) {
|
||||
if (reserved_pebs) {
|
||||
ubi_err("no volume info, but volume exists");
|
||||
ubi_err(ubi, "no volume info, but volume exists");
|
||||
goto fail;
|
||||
}
|
||||
spin_unlock(&ubi->volumes_lock);
|
||||
@ -717,90 +667,91 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
|
||||
|
||||
if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 ||
|
||||
vol->name_len < 0) {
|
||||
ubi_err("negative values");
|
||||
ubi_err(ubi, "negative values");
|
||||
goto fail;
|
||||
}
|
||||
if (vol->alignment > ubi->leb_size || vol->alignment == 0) {
|
||||
ubi_err("bad alignment");
|
||||
ubi_err(ubi, "bad alignment");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
n = vol->alignment & (ubi->min_io_size - 1);
|
||||
if (vol->alignment != 1 && n) {
|
||||
ubi_err("alignment is not multiple of min I/O unit");
|
||||
ubi_err(ubi, "alignment is not multiple of min I/O unit");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
n = ubi->leb_size % vol->alignment;
|
||||
if (vol->data_pad != n) {
|
||||
ubi_err("bad data_pad, has to be %lld", n);
|
||||
ubi_err(ubi, "bad data_pad, has to be %lld", n);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (vol->vol_type != UBI_DYNAMIC_VOLUME &&
|
||||
vol->vol_type != UBI_STATIC_VOLUME) {
|
||||
ubi_err("bad vol_type");
|
||||
ubi_err(ubi, "bad vol_type");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (vol->upd_marker && vol->corrupted) {
|
||||
ubi_err("update marker and corrupted simultaneously");
|
||||
ubi_err(ubi, "update marker and corrupted simultaneously");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (vol->reserved_pebs > ubi->good_peb_count) {
|
||||
ubi_err("too large reserved_pebs");
|
||||
ubi_err(ubi, "too large reserved_pebs");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
n = ubi->leb_size - vol->data_pad;
|
||||
if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) {
|
||||
ubi_err("bad usable_leb_size, has to be %lld", n);
|
||||
ubi_err(ubi, "bad usable_leb_size, has to be %lld", n);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (vol->name_len > UBI_VOL_NAME_MAX) {
|
||||
ubi_err("too long volume name, max is %d", UBI_VOL_NAME_MAX);
|
||||
ubi_err(ubi, "too long volume name, max is %d",
|
||||
UBI_VOL_NAME_MAX);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
n = strnlen(vol->name, vol->name_len + 1);
|
||||
if (n != vol->name_len) {
|
||||
ubi_err("bad name_len %lld", n);
|
||||
ubi_err(ubi, "bad name_len %lld", n);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
n = (long long)vol->used_ebs * vol->usable_leb_size;
|
||||
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
|
||||
if (vol->corrupted) {
|
||||
ubi_err("corrupted dynamic volume");
|
||||
ubi_err(ubi, "corrupted dynamic volume");
|
||||
goto fail;
|
||||
}
|
||||
if (vol->used_ebs != vol->reserved_pebs) {
|
||||
ubi_err("bad used_ebs");
|
||||
ubi_err(ubi, "bad used_ebs");
|
||||
goto fail;
|
||||
}
|
||||
if (vol->last_eb_bytes != vol->usable_leb_size) {
|
||||
ubi_err("bad last_eb_bytes");
|
||||
ubi_err(ubi, "bad last_eb_bytes");
|
||||
goto fail;
|
||||
}
|
||||
if (vol->used_bytes != n) {
|
||||
ubi_err("bad used_bytes");
|
||||
ubi_err(ubi, "bad used_bytes");
|
||||
goto fail;
|
||||
}
|
||||
} else {
|
||||
if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
|
||||
ubi_err("bad used_ebs");
|
||||
ubi_err(ubi, "bad used_ebs");
|
||||
goto fail;
|
||||
}
|
||||
if (vol->last_eb_bytes < 0 ||
|
||||
vol->last_eb_bytes > vol->usable_leb_size) {
|
||||
ubi_err("bad last_eb_bytes");
|
||||
ubi_err(ubi, "bad last_eb_bytes");
|
||||
goto fail;
|
||||
}
|
||||
if (vol->used_bytes < 0 || vol->used_bytes > n ||
|
||||
vol->used_bytes < n - vol->usable_leb_size) {
|
||||
ubi_err("bad used_bytes");
|
||||
ubi_err(ubi, "bad used_bytes");
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
@ -818,7 +769,7 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
|
||||
if (alignment != vol->alignment || data_pad != vol->data_pad ||
|
||||
upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
|
||||
name_len != vol->name_len || strncmp(name, vol->name, name_len)) {
|
||||
ubi_err("volume info is different");
|
||||
ubi_err(ubi, "volume info is different");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -826,7 +777,7 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
ubi_err("self-check failed for volume %d", vol_id);
|
||||
ubi_err(ubi, "self-check failed for volume %d", vol_id);
|
||||
if (vol)
|
||||
ubi_dump_vol_info(vol);
|
||||
ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
|
||||
|
@ -18,9 +18,12 @@
|
||||
* eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each
|
||||
* other. This redundancy guarantees robustness to unclean reboots. The volume
|
||||
* table is basically an array of volume table records. Each record contains
|
||||
* full information about the volume and protected by a CRC checksum.
|
||||
* full information about the volume and protected by a CRC checksum. Note,
|
||||
* nowadays we use the atomic LEB change operation when updating the volume
|
||||
* table, so we do not really need 2 LEBs anymore, but we preserve the older
|
||||
* design for the backward compatibility reasons.
|
||||
*
|
||||
* The volume table is changed, it is first changed in RAM. Then LEB 0 is
|
||||
* When the volume table is changed, it is first changed in RAM. Then LEB 0 is
|
||||
* erased, and the updated volume table is written back to LEB 0. Then same for
|
||||
* LEB 1. This scheme guarantees recoverability from unclean reboots.
|
||||
*
|
||||
@ -60,6 +63,26 @@ static void self_vtbl_check(const struct ubi_device *ubi);
|
||||
/* Empty volume table record */
|
||||
static struct ubi_vtbl_record empty_vtbl_record;
|
||||
|
||||
/**
|
||||
* ubi_update_layout_vol - helper for updatting layout volumes on flash
|
||||
* @ubi: UBI device description object
|
||||
*/
|
||||
static int ubi_update_layout_vol(struct ubi_device *ubi)
|
||||
{
|
||||
struct ubi_volume *layout_vol;
|
||||
int i, err;
|
||||
|
||||
layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
|
||||
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
|
||||
err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
|
||||
ubi->vtbl_size);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ubi_change_vtbl_record - change volume table record.
|
||||
* @ubi: UBI device description object
|
||||
@ -74,12 +97,10 @@ static struct ubi_vtbl_record empty_vtbl_record;
|
||||
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
|
||||
struct ubi_vtbl_record *vtbl_rec)
|
||||
{
|
||||
int i, err;
|
||||
int err;
|
||||
uint32_t crc;
|
||||
struct ubi_volume *layout_vol;
|
||||
|
||||
ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
|
||||
layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
|
||||
|
||||
if (!vtbl_rec)
|
||||
vtbl_rec = &empty_vtbl_record;
|
||||
@ -89,19 +110,10 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
|
||||
}
|
||||
|
||||
memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
|
||||
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
|
||||
err = ubi_eba_unmap_leb(ubi, layout_vol, i);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
|
||||
ubi->vtbl_size);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
err = ubi_update_layout_vol(ubi);
|
||||
|
||||
self_vtbl_check(ubi);
|
||||
return 0;
|
||||
return err ? err : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -116,9 +128,7 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
|
||||
int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
|
||||
struct list_head *rename_list)
|
||||
{
|
||||
int i, err;
|
||||
struct ubi_rename_entry *re;
|
||||
struct ubi_volume *layout_vol;
|
||||
|
||||
list_for_each_entry(re, rename_list, list) {
|
||||
uint32_t crc;
|
||||
@ -140,19 +150,7 @@ int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
|
||||
vtbl_rec->crc = cpu_to_be32(crc);
|
||||
}
|
||||
|
||||
layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
|
||||
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
|
||||
err = ubi_eba_unmap_leb(ubi, layout_vol, i);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
|
||||
ubi->vtbl_size);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ubi_update_layout_vol(ubi);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -184,7 +182,7 @@ static int vtbl_check(const struct ubi_device *ubi,
|
||||
|
||||
crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
|
||||
if (be32_to_cpu(vtbl[i].crc) != crc) {
|
||||
ubi_err("bad CRC at record %u: %#08x, not %#08x",
|
||||
ubi_err(ubi, "bad CRC at record %u: %#08x, not %#08x",
|
||||
i, crc, be32_to_cpu(vtbl[i].crc));
|
||||
ubi_dump_vtbl_record(&vtbl[i], i);
|
||||
return 1;
|
||||
@ -218,7 +216,7 @@ static int vtbl_check(const struct ubi_device *ubi,
|
||||
|
||||
n = ubi->leb_size % alignment;
|
||||
if (data_pad != n) {
|
||||
ubi_err("bad data_pad, has to be %d", n);
|
||||
ubi_err(ubi, "bad data_pad, has to be %d", n);
|
||||
err = 6;
|
||||
goto bad;
|
||||
}
|
||||
@ -234,7 +232,7 @@ static int vtbl_check(const struct ubi_device *ubi,
|
||||
}
|
||||
|
||||
if (reserved_pebs > ubi->good_peb_count) {
|
||||
ubi_err("too large reserved_pebs %d, good PEBs %d",
|
||||
ubi_err(ubi, "too large reserved_pebs %d, good PEBs %d",
|
||||
reserved_pebs, ubi->good_peb_count);
|
||||
err = 9;
|
||||
goto bad;
|
||||
@ -268,7 +266,7 @@ static int vtbl_check(const struct ubi_device *ubi,
|
||||
#else
|
||||
!strncmp((char *)vtbl[i].name, vtbl[n].name, len1)) {
|
||||
#endif
|
||||
ubi_err("volumes %d and %d have the same name \"%s\"",
|
||||
ubi_err(ubi, "volumes %d and %d have the same name \"%s\"",
|
||||
i, n, vtbl[i].name);
|
||||
ubi_dump_vtbl_record(&vtbl[i], i);
|
||||
ubi_dump_vtbl_record(&vtbl[n], n);
|
||||
@ -280,7 +278,7 @@ static int vtbl_check(const struct ubi_device *ubi,
|
||||
return 0;
|
||||
|
||||
bad:
|
||||
ubi_err("volume table check failed: record %d, error %d", i, err);
|
||||
ubi_err(ubi, "volume table check failed: record %d, error %d", i, err);
|
||||
ubi_dump_vtbl_record(&vtbl[i], i);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -444,11 +442,11 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
|
||||
leb_corrupted[1] = memcmp(leb[0], leb[1],
|
||||
ubi->vtbl_size);
|
||||
if (leb_corrupted[1]) {
|
||||
ubi_warn("volume table copy #2 is corrupted");
|
||||
ubi_warn(ubi, "volume table copy #2 is corrupted");
|
||||
err = create_vtbl(ubi, ai, 1, leb[0]);
|
||||
if (err)
|
||||
goto out_free;
|
||||
ubi_msg("volume table was restored");
|
||||
ubi_msg(ubi, "volume table was restored");
|
||||
}
|
||||
|
||||
/* Both LEB 1 and LEB 2 are OK and consistent */
|
||||
@ -463,15 +461,15 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
|
||||
}
|
||||
if (leb_corrupted[1]) {
|
||||
/* Both LEB 0 and LEB 1 are corrupted */
|
||||
ubi_err("both volume tables are corrupted");
|
||||
ubi_err(ubi, "both volume tables are corrupted");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ubi_warn("volume table copy #1 is corrupted");
|
||||
ubi_warn(ubi, "volume table copy #1 is corrupted");
|
||||
err = create_vtbl(ubi, ai, 0, leb[1]);
|
||||
if (err)
|
||||
goto out_free;
|
||||
ubi_msg("volume table was restored");
|
||||
ubi_msg(ubi, "volume table was restored");
|
||||
|
||||
vfree(leb[0]);
|
||||
return leb[1];
|
||||
@ -560,7 +558,7 @@ static int init_volumes(struct ubi_device *ubi,
|
||||
if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
|
||||
/* Auto re-size flag may be set only for one volume */
|
||||
if (ubi->autoresize_vol_id != -1) {
|
||||
ubi_err("more than one auto-resize volume (%d and %d)",
|
||||
ubi_err(ubi, "more than one auto-resize volume (%d and %d)",
|
||||
ubi->autoresize_vol_id, i);
|
||||
kfree(vol);
|
||||
return -EINVAL;
|
||||
@ -589,7 +587,7 @@ static int init_volumes(struct ubi_device *ubi,
|
||||
|
||||
/* Static volumes only */
|
||||
av = ubi_find_av(ai, i);
|
||||
if (!av) {
|
||||
if (!av || !av->leb_count) {
|
||||
/*
|
||||
* No eraseblocks belonging to this volume found. We
|
||||
* don't actually know whether this static volume is
|
||||
@ -606,7 +604,7 @@ static int init_volumes(struct ubi_device *ubi,
|
||||
* We found a static volume which misses several
|
||||
* eraseblocks. Treat it as corrupted.
|
||||
*/
|
||||
ubi_warn("static volume %d misses %d LEBs - corrupted",
|
||||
ubi_warn(ubi, "static volume %d misses %d LEBs - corrupted",
|
||||
av->vol_id, av->used_ebs - av->leb_count);
|
||||
vol->corrupted = 1;
|
||||
continue;
|
||||
@ -644,10 +642,10 @@ static int init_volumes(struct ubi_device *ubi,
|
||||
vol->ubi = ubi;
|
||||
|
||||
if (reserved_pebs > ubi->avail_pebs) {
|
||||
ubi_err("not enough PEBs, required %d, available %d",
|
||||
ubi_err(ubi, "not enough PEBs, required %d, available %d",
|
||||
reserved_pebs, ubi->avail_pebs);
|
||||
if (ubi->corr_peb_count)
|
||||
ubi_err("%d PEBs are corrupted and not used",
|
||||
ubi_err(ubi, "%d PEBs are corrupted and not used",
|
||||
ubi->corr_peb_count);
|
||||
}
|
||||
ubi->rsvd_pebs += reserved_pebs;
|
||||
@ -692,7 +690,7 @@ static int check_av(const struct ubi_volume *vol,
|
||||
return 0;
|
||||
|
||||
bad:
|
||||
ubi_err("bad attaching information, error %d", err);
|
||||
ubi_err(vol->ubi, "bad attaching information, error %d", err);
|
||||
ubi_dump_av(av);
|
||||
ubi_dump_vol_info(vol);
|
||||
return -EINVAL;
|
||||
@ -716,14 +714,15 @@ static int check_attaching_info(const struct ubi_device *ubi,
|
||||
struct ubi_volume *vol;
|
||||
|
||||
if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
|
||||
ubi_err("found %d volumes while attaching, maximum is %d + %d",
|
||||
ubi_err(ubi, "found %d volumes while attaching, maximum is %d + %d",
|
||||
ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
|
||||
ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
|
||||
ubi_err("too large volume ID %d found", ai->highest_vol_id);
|
||||
ubi_err(ubi, "too large volume ID %d found",
|
||||
ai->highest_vol_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -751,7 +750,7 @@ static int check_attaching_info(const struct ubi_device *ubi,
|
||||
* reboot while the volume was being removed. Discard
|
||||
* these eraseblocks.
|
||||
*/
|
||||
ubi_msg("finish volume %d removal", av->vol_id);
|
||||
ubi_msg(ubi, "finish volume %d removal", av->vol_id);
|
||||
ubi_remove_av(ai, av);
|
||||
} else if (av) {
|
||||
err = check_av(vol, av);
|
||||
@ -805,13 +804,13 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
if (IS_ERR(ubi->vtbl))
|
||||
return PTR_ERR(ubi->vtbl);
|
||||
} else {
|
||||
ubi_err("the layout volume was not found");
|
||||
ubi_err(ubi, "the layout volume was not found");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
|
||||
/* This must not happen with proper UBI images */
|
||||
ubi_err("too many LEBs (%d) in layout volume",
|
||||
ubi_err(ubi, "too many LEBs (%d) in layout volume",
|
||||
av->leb_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -860,7 +859,7 @@ static void self_vtbl_check(const struct ubi_device *ubi)
|
||||
return;
|
||||
|
||||
if (vtbl_check(ubi, ubi->vtbl)) {
|
||||
ubi_err("self-check failed");
|
||||
ubi_err(ubi, "self-check failed");
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
34
drivers/mtd/ubi/wl.h
Normal file
34
drivers/mtd/ubi/wl.h
Normal file
@ -0,0 +1,34 @@
|
||||
#ifndef UBI_WL_H
|
||||
#define UBI_WL_H
|
||||
#ifdef CONFIG_MTD_UBI_FASTMAP
|
||||
static int anchor_pebs_avalible(struct rb_root *root);
|
||||
#ifndef __UBOOT__
|
||||
static void update_fastmap_work_fn(struct work_struct *wrk);
|
||||
#else
|
||||
void update_fastmap_work_fn(struct ubi_device *ubi);
|
||||
#endif
|
||||
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
|
||||
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
|
||||
static void ubi_fastmap_close(struct ubi_device *ubi);
|
||||
static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
|
||||
{
|
||||
/* Reserve enough LEBs to store two fastmaps. */
|
||||
*count += (ubi->fm_size / ubi->leb_size) * 2;
|
||||
#ifndef __UBOOT__
|
||||
INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
|
||||
#endif
|
||||
}
|
||||
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
|
||||
struct ubi_wl_entry *e,
|
||||
struct rb_root *root);
|
||||
#else /* !CONFIG_MTD_UBI_FASTMAP */
|
||||
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
|
||||
static inline void ubi_fastmap_close(struct ubi_device *ubi) { }
|
||||
static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count) { }
|
||||
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
|
||||
struct ubi_wl_entry *e,
|
||||
struct rb_root *root) {
|
||||
return e;
|
||||
}
|
||||
#endif /* CONFIG_MTD_UBI_FASTMAP */
|
||||
#endif /* UBI_WL_H */
|
@ -433,7 +433,6 @@ static int calc_dd_growth(const struct ubifs_info *c,
|
||||
*/
|
||||
int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req)
|
||||
{
|
||||
int uninitialized_var(cmt_retries), uninitialized_var(wb_retries);
|
||||
int err, idx_growth, data_growth, dd_growth, retried = 0;
|
||||
|
||||
ubifs_assert(req->new_page <= 1);
|
||||
@ -506,7 +505,7 @@ again:
|
||||
c->bi.nospace_rp = 1;
|
||||
smp_wmb();
|
||||
} else
|
||||
ubifs_err("cannot budget space, error %d", err);
|
||||
ubifs_err(c, "cannot budget space, error %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
200
fs/ubifs/debug.c
200
fs/ubifs/debug.c
@ -334,9 +334,9 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
|
||||
pr_err("\tkey_fmt %d (%s)\n",
|
||||
(int)sup->key_fmt, get_key_fmt(sup->key_fmt));
|
||||
pr_err("\tflags %#x\n", sup_flags);
|
||||
pr_err("\t big_lpt %u\n",
|
||||
pr_err("\tbig_lpt %u\n",
|
||||
!!(sup_flags & UBIFS_FLG_BIGLPT));
|
||||
pr_err("\t space_fixup %u\n",
|
||||
pr_err("\tspace_fixup %u\n",
|
||||
!!(sup_flags & UBIFS_FLG_SPACE_FIXUP));
|
||||
pr_err("\tmin_io_size %u\n", le32_to_cpu(sup->min_io_size));
|
||||
pr_err("\tleb_size %u\n", le32_to_cpu(sup->leb_size));
|
||||
@ -751,8 +751,10 @@ void ubifs_dump_lprops(struct ubifs_info *c)
|
||||
|
||||
for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) {
|
||||
err = ubifs_read_one_lp(c, lnum, &lp);
|
||||
if (err)
|
||||
ubifs_err("cannot read lprops for LEB %d", lnum);
|
||||
if (err) {
|
||||
ubifs_err(c, "cannot read lprops for LEB %d", lnum);
|
||||
continue;
|
||||
}
|
||||
|
||||
ubifs_dump_lprop(c, &lp);
|
||||
}
|
||||
@ -823,13 +825,13 @@ void ubifs_dump_leb(const struct ubifs_info *c, int lnum)
|
||||
|
||||
buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
|
||||
if (!buf) {
|
||||
ubifs_err("cannot allocate memory for dumping LEB %d", lnum);
|
||||
ubifs_err(c, "cannot allocate memory for dumping LEB %d", lnum);
|
||||
return;
|
||||
}
|
||||
|
||||
sleb = ubifs_scan(c, lnum, 0, buf, 0);
|
||||
if (IS_ERR(sleb)) {
|
||||
ubifs_err("scan error %d", (int)PTR_ERR(sleb));
|
||||
ubifs_err(c, "scan error %d", (int)PTR_ERR(sleb));
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1037,7 +1039,7 @@ int dbg_check_space_info(struct ubifs_info *c)
|
||||
spin_unlock(&c->space_lock);
|
||||
|
||||
if (free != d->saved_free) {
|
||||
ubifs_err("free space changed from %lld to %lld",
|
||||
ubifs_err(c, "free space changed from %lld to %lld",
|
||||
d->saved_free, free);
|
||||
goto out;
|
||||
}
|
||||
@ -1045,15 +1047,15 @@ int dbg_check_space_info(struct ubifs_info *c)
|
||||
return 0;
|
||||
|
||||
out:
|
||||
ubifs_msg("saved lprops statistics dump");
|
||||
ubifs_msg(c, "saved lprops statistics dump");
|
||||
ubifs_dump_lstats(&d->saved_lst);
|
||||
ubifs_msg("saved budgeting info dump");
|
||||
ubifs_msg(c, "saved budgeting info dump");
|
||||
ubifs_dump_budg(c, &d->saved_bi);
|
||||
ubifs_msg("saved idx_gc_cnt %d", d->saved_idx_gc_cnt);
|
||||
ubifs_msg("current lprops statistics dump");
|
||||
ubifs_msg(c, "saved idx_gc_cnt %d", d->saved_idx_gc_cnt);
|
||||
ubifs_msg(c, "current lprops statistics dump");
|
||||
ubifs_get_lp_stats(c, &lst);
|
||||
ubifs_dump_lstats(&lst);
|
||||
ubifs_msg("current budgeting info dump");
|
||||
ubifs_msg(c, "current budgeting info dump");
|
||||
ubifs_dump_budg(c, &c->bi);
|
||||
dump_stack();
|
||||
return -EINVAL;
|
||||
@ -1082,9 +1084,9 @@ int dbg_check_synced_i_size(const struct ubifs_info *c, struct inode *inode)
|
||||
mutex_lock(&ui->ui_mutex);
|
||||
spin_lock(&ui->ui_lock);
|
||||
if (ui->ui_size != ui->synced_i_size && !ui->dirty) {
|
||||
ubifs_err("ui_size is %lld, synced_i_size is %lld, but inode is clean",
|
||||
ubifs_err(c, "ui_size is %lld, synced_i_size is %lld, but inode is clean",
|
||||
ui->ui_size, ui->synced_i_size);
|
||||
ubifs_err("i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino,
|
||||
ubifs_err(c, "i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino,
|
||||
inode->i_mode, i_size_read(inode));
|
||||
dump_stack();
|
||||
err = -EINVAL;
|
||||
@ -1145,7 +1147,7 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir)
|
||||
kfree(pdent);
|
||||
|
||||
if (i_size_read(dir) != size) {
|
||||
ubifs_err("directory inode %lu has size %llu, but calculated size is %llu",
|
||||
ubifs_err(c, "directory inode %lu has size %llu, but calculated size is %llu",
|
||||
dir->i_ino, (unsigned long long)i_size_read(dir),
|
||||
(unsigned long long)size);
|
||||
ubifs_dump_inode(c, dir);
|
||||
@ -1153,7 +1155,7 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (dir->i_nlink != nlink) {
|
||||
ubifs_err("directory inode %lu has nlink %u, but calculated nlink is %u",
|
||||
ubifs_err(c, "directory inode %lu has nlink %u, but calculated nlink is %u",
|
||||
dir->i_ino, dir->i_nlink, nlink);
|
||||
ubifs_dump_inode(c, dir);
|
||||
dump_stack();
|
||||
@ -1212,10 +1214,10 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
|
||||
err = 1;
|
||||
key_read(c, &dent1->key, &key);
|
||||
if (keys_cmp(c, &zbr1->key, &key)) {
|
||||
ubifs_err("1st entry at %d:%d has key %s", zbr1->lnum,
|
||||
ubifs_err(c, "1st entry at %d:%d has key %s", zbr1->lnum,
|
||||
zbr1->offs, dbg_snprintf_key(c, &key, key_buf,
|
||||
DBG_KEY_BUF_LEN));
|
||||
ubifs_err("but it should have key %s according to tnc",
|
||||
ubifs_err(c, "but it should have key %s according to tnc",
|
||||
dbg_snprintf_key(c, &zbr1->key, key_buf,
|
||||
DBG_KEY_BUF_LEN));
|
||||
ubifs_dump_node(c, dent1);
|
||||
@ -1224,10 +1226,10 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
|
||||
|
||||
key_read(c, &dent2->key, &key);
|
||||
if (keys_cmp(c, &zbr2->key, &key)) {
|
||||
ubifs_err("2nd entry at %d:%d has key %s", zbr1->lnum,
|
||||
ubifs_err(c, "2nd entry at %d:%d has key %s", zbr1->lnum,
|
||||
zbr1->offs, dbg_snprintf_key(c, &key, key_buf,
|
||||
DBG_KEY_BUF_LEN));
|
||||
ubifs_err("but it should have key %s according to tnc",
|
||||
ubifs_err(c, "but it should have key %s according to tnc",
|
||||
dbg_snprintf_key(c, &zbr2->key, key_buf,
|
||||
DBG_KEY_BUF_LEN));
|
||||
ubifs_dump_node(c, dent2);
|
||||
@ -1243,14 +1245,14 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
|
||||
goto out_free;
|
||||
}
|
||||
if (cmp == 0 && nlen1 == nlen2)
|
||||
ubifs_err("2 xent/dent nodes with the same name");
|
||||
ubifs_err(c, "2 xent/dent nodes with the same name");
|
||||
else
|
||||
ubifs_err("bad order of colliding key %s",
|
||||
ubifs_err(c, "bad order of colliding key %s",
|
||||
dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN));
|
||||
|
||||
ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs);
|
||||
ubifs_msg(c, "first node at %d:%d\n", zbr1->lnum, zbr1->offs);
|
||||
ubifs_dump_node(c, dent1);
|
||||
ubifs_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs);
|
||||
ubifs_msg(c, "second node at %d:%d\n", zbr2->lnum, zbr2->offs);
|
||||
ubifs_dump_node(c, dent2);
|
||||
|
||||
out_free:
|
||||
@ -1452,11 +1454,11 @@ static int dbg_check_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr)
|
||||
return 0;
|
||||
|
||||
out:
|
||||
ubifs_err("failed, error %d", err);
|
||||
ubifs_msg("dump of the znode");
|
||||
ubifs_err(c, "failed, error %d", err);
|
||||
ubifs_msg(c, "dump of the znode");
|
||||
ubifs_dump_znode(c, znode);
|
||||
if (zp) {
|
||||
ubifs_msg("dump of the parent znode");
|
||||
ubifs_msg(c, "dump of the parent znode");
|
||||
ubifs_dump_znode(c, zp);
|
||||
}
|
||||
dump_stack();
|
||||
@ -1552,9 +1554,9 @@ int dbg_check_tnc(struct ubifs_info *c, int extra)
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (err) {
|
||||
ubifs_msg("first znode");
|
||||
ubifs_msg(c, "first znode");
|
||||
ubifs_dump_znode(c, prev);
|
||||
ubifs_msg("second znode");
|
||||
ubifs_msg(c, "second znode");
|
||||
ubifs_dump_znode(c, znode);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1563,13 +1565,13 @@ int dbg_check_tnc(struct ubifs_info *c, int extra)
|
||||
|
||||
if (extra) {
|
||||
if (clean_cnt != atomic_long_read(&c->clean_zn_cnt)) {
|
||||
ubifs_err("incorrect clean_zn_cnt %ld, calculated %ld",
|
||||
ubifs_err(c, "incorrect clean_zn_cnt %ld, calculated %ld",
|
||||
atomic_long_read(&c->clean_zn_cnt),
|
||||
clean_cnt);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (dirty_cnt != atomic_long_read(&c->dirty_zn_cnt)) {
|
||||
ubifs_err("incorrect dirty_zn_cnt %ld, calculated %ld",
|
||||
ubifs_err(c, "incorrect dirty_zn_cnt %ld, calculated %ld",
|
||||
atomic_long_read(&c->dirty_zn_cnt),
|
||||
dirty_cnt);
|
||||
return -EINVAL;
|
||||
@ -1648,7 +1650,7 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
|
||||
if (znode_cb) {
|
||||
err = znode_cb(c, znode, priv);
|
||||
if (err) {
|
||||
ubifs_err("znode checking function returned error %d",
|
||||
ubifs_err(c, "znode checking function returned error %d",
|
||||
err);
|
||||
ubifs_dump_znode(c, znode);
|
||||
goto out_dump;
|
||||
@ -1659,7 +1661,7 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
|
||||
zbr = &znode->zbranch[idx];
|
||||
err = leaf_cb(c, zbr, priv);
|
||||
if (err) {
|
||||
ubifs_err("leaf checking function returned error %d, for leaf at LEB %d:%d",
|
||||
ubifs_err(c, "leaf checking function returned error %d, for leaf at LEB %d:%d",
|
||||
err, zbr->lnum, zbr->offs);
|
||||
goto out_dump;
|
||||
}
|
||||
@ -1715,7 +1717,7 @@ out_dump:
|
||||
zbr = &znode->parent->zbranch[znode->iip];
|
||||
else
|
||||
zbr = &c->zroot;
|
||||
ubifs_msg("dump of znode at LEB %d:%d", zbr->lnum, zbr->offs);
|
||||
ubifs_msg(c, "dump of znode at LEB %d:%d", zbr->lnum, zbr->offs);
|
||||
ubifs_dump_znode(c, znode);
|
||||
out_unlock:
|
||||
mutex_unlock(&c->tnc_mutex);
|
||||
@ -1762,12 +1764,12 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size)
|
||||
|
||||
err = dbg_walk_index(c, NULL, add_size, &calc);
|
||||
if (err) {
|
||||
ubifs_err("error %d while walking the index", err);
|
||||
ubifs_err(c, "error %d while walking the index", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (calc != idx_size) {
|
||||
ubifs_err("index size check failed: calculated size is %lld, should be %lld",
|
||||
ubifs_err(c, "index size check failed: calculated size is %lld, should be %lld",
|
||||
calc, idx_size);
|
||||
dump_stack();
|
||||
return -EINVAL;
|
||||
@ -1855,7 +1857,7 @@ static struct fsck_inode *add_inode(struct ubifs_info *c,
|
||||
}
|
||||
|
||||
if (inum > c->highest_inum) {
|
||||
ubifs_err("too high inode number, max. is %lu",
|
||||
ubifs_err(c, "too high inode number, max. is %lu",
|
||||
(unsigned long)c->highest_inum);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
@ -1962,17 +1964,17 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c,
|
||||
ino_key_init(c, &key, inum);
|
||||
err = ubifs_lookup_level0(c, &key, &znode, &n);
|
||||
if (!err) {
|
||||
ubifs_err("inode %lu not found in index", (unsigned long)inum);
|
||||
ubifs_err(c, "inode %lu not found in index", (unsigned long)inum);
|
||||
return ERR_PTR(-ENOENT);
|
||||
} else if (err < 0) {
|
||||
ubifs_err("error %d while looking up inode %lu",
|
||||
ubifs_err(c, "error %d while looking up inode %lu",
|
||||
err, (unsigned long)inum);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
zbr = &znode->zbranch[n];
|
||||
if (zbr->len < UBIFS_INO_NODE_SZ) {
|
||||
ubifs_err("bad node %lu node length %d",
|
||||
ubifs_err(c, "bad node %lu node length %d",
|
||||
(unsigned long)inum, zbr->len);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
@ -1983,7 +1985,7 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c,
|
||||
|
||||
err = ubifs_tnc_read_node(c, zbr, ino);
|
||||
if (err) {
|
||||
ubifs_err("cannot read inode node at LEB %d:%d, error %d",
|
||||
ubifs_err(c, "cannot read inode node at LEB %d:%d, error %d",
|
||||
zbr->lnum, zbr->offs, err);
|
||||
kfree(ino);
|
||||
return ERR_PTR(err);
|
||||
@ -1992,7 +1994,7 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c,
|
||||
fscki = add_inode(c, fsckd, ino);
|
||||
kfree(ino);
|
||||
if (IS_ERR(fscki)) {
|
||||
ubifs_err("error %ld while adding inode %lu node",
|
||||
ubifs_err(c, "error %ld while adding inode %lu node",
|
||||
PTR_ERR(fscki), (unsigned long)inum);
|
||||
return fscki;
|
||||
}
|
||||
@ -2026,7 +2028,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
||||
struct fsck_inode *fscki;
|
||||
|
||||
if (zbr->len < UBIFS_CH_SZ) {
|
||||
ubifs_err("bad leaf length %d (LEB %d:%d)",
|
||||
ubifs_err(c, "bad leaf length %d (LEB %d:%d)",
|
||||
zbr->len, zbr->lnum, zbr->offs);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2037,7 +2039,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
||||
|
||||
err = ubifs_tnc_read_node(c, zbr, node);
|
||||
if (err) {
|
||||
ubifs_err("cannot read leaf node at LEB %d:%d, error %d",
|
||||
ubifs_err(c, "cannot read leaf node at LEB %d:%d, error %d",
|
||||
zbr->lnum, zbr->offs, err);
|
||||
goto out_free;
|
||||
}
|
||||
@ -2047,7 +2049,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
||||
fscki = add_inode(c, priv, node);
|
||||
if (IS_ERR(fscki)) {
|
||||
err = PTR_ERR(fscki);
|
||||
ubifs_err("error %d while adding inode node", err);
|
||||
ubifs_err(c, "error %d while adding inode node", err);
|
||||
goto out_dump;
|
||||
}
|
||||
goto out;
|
||||
@ -2055,7 +2057,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
||||
|
||||
if (type != UBIFS_DENT_KEY && type != UBIFS_XENT_KEY &&
|
||||
type != UBIFS_DATA_KEY) {
|
||||
ubifs_err("unexpected node type %d at LEB %d:%d",
|
||||
ubifs_err(c, "unexpected node type %d at LEB %d:%d",
|
||||
type, zbr->lnum, zbr->offs);
|
||||
err = -EINVAL;
|
||||
goto out_free;
|
||||
@ -2063,7 +2065,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
||||
|
||||
ch = node;
|
||||
if (le64_to_cpu(ch->sqnum) > c->max_sqnum) {
|
||||
ubifs_err("too high sequence number, max. is %llu",
|
||||
ubifs_err(c, "too high sequence number, max. is %llu",
|
||||
c->max_sqnum);
|
||||
err = -EINVAL;
|
||||
goto out_dump;
|
||||
@ -2073,6 +2075,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
||||
long long blk_offs;
|
||||
struct ubifs_data_node *dn = node;
|
||||
|
||||
ubifs_assert(zbr->len >= UBIFS_DATA_NODE_SZ);
|
||||
|
||||
/*
|
||||
* Search the inode node this data node belongs to and insert
|
||||
* it to the RB-tree of inodes.
|
||||
@ -2081,7 +2085,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
||||
fscki = read_add_inode(c, priv, inum);
|
||||
if (IS_ERR(fscki)) {
|
||||
err = PTR_ERR(fscki);
|
||||
ubifs_err("error %d while processing data node and trying to find inode node %lu",
|
||||
ubifs_err(c, "error %d while processing data node and trying to find inode node %lu",
|
||||
err, (unsigned long)inum);
|
||||
goto out_dump;
|
||||
}
|
||||
@ -2091,7 +2095,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
||||
blk_offs <<= UBIFS_BLOCK_SHIFT;
|
||||
blk_offs += le32_to_cpu(dn->size);
|
||||
if (blk_offs > fscki->size) {
|
||||
ubifs_err("data node at LEB %d:%d is not within inode size %lld",
|
||||
ubifs_err(c, "data node at LEB %d:%d is not within inode size %lld",
|
||||
zbr->lnum, zbr->offs, fscki->size);
|
||||
err = -EINVAL;
|
||||
goto out_dump;
|
||||
@ -2101,6 +2105,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
||||
struct ubifs_dent_node *dent = node;
|
||||
struct fsck_inode *fscki1;
|
||||
|
||||
ubifs_assert(zbr->len >= UBIFS_DENT_NODE_SZ);
|
||||
|
||||
err = ubifs_validate_entry(c, dent);
|
||||
if (err)
|
||||
goto out_dump;
|
||||
@ -2113,7 +2119,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
||||
fscki = read_add_inode(c, priv, inum);
|
||||
if (IS_ERR(fscki)) {
|
||||
err = PTR_ERR(fscki);
|
||||
ubifs_err("error %d while processing entry node and trying to find inode node %lu",
|
||||
ubifs_err(c, "error %d while processing entry node and trying to find inode node %lu",
|
||||
err, (unsigned long)inum);
|
||||
goto out_dump;
|
||||
}
|
||||
@ -2125,7 +2131,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
||||
fscki1 = read_add_inode(c, priv, inum);
|
||||
if (IS_ERR(fscki1)) {
|
||||
err = PTR_ERR(fscki1);
|
||||
ubifs_err("error %d while processing entry node and trying to find parent inode node %lu",
|
||||
ubifs_err(c, "error %d while processing entry node and trying to find parent inode node %lu",
|
||||
err, (unsigned long)inum);
|
||||
goto out_dump;
|
||||
}
|
||||
@ -2148,7 +2154,7 @@ out:
|
||||
return 0;
|
||||
|
||||
out_dump:
|
||||
ubifs_msg("dump of node at LEB %d:%d", zbr->lnum, zbr->offs);
|
||||
ubifs_msg(c, "dump of node at LEB %d:%d", zbr->lnum, zbr->offs);
|
||||
ubifs_dump_node(c, node);
|
||||
out_free:
|
||||
kfree(node);
|
||||
@ -2199,52 +2205,52 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd)
|
||||
*/
|
||||
if (fscki->inum != UBIFS_ROOT_INO &&
|
||||
fscki->references != 1) {
|
||||
ubifs_err("directory inode %lu has %d direntries which refer it, but should be 1",
|
||||
ubifs_err(c, "directory inode %lu has %d direntries which refer it, but should be 1",
|
||||
(unsigned long)fscki->inum,
|
||||
fscki->references);
|
||||
goto out_dump;
|
||||
}
|
||||
if (fscki->inum == UBIFS_ROOT_INO &&
|
||||
fscki->references != 0) {
|
||||
ubifs_err("root inode %lu has non-zero (%d) direntries which refer it",
|
||||
ubifs_err(c, "root inode %lu has non-zero (%d) direntries which refer it",
|
||||
(unsigned long)fscki->inum,
|
||||
fscki->references);
|
||||
goto out_dump;
|
||||
}
|
||||
if (fscki->calc_sz != fscki->size) {
|
||||
ubifs_err("directory inode %lu size is %lld, but calculated size is %lld",
|
||||
ubifs_err(c, "directory inode %lu size is %lld, but calculated size is %lld",
|
||||
(unsigned long)fscki->inum,
|
||||
fscki->size, fscki->calc_sz);
|
||||
goto out_dump;
|
||||
}
|
||||
if (fscki->calc_cnt != fscki->nlink) {
|
||||
ubifs_err("directory inode %lu nlink is %d, but calculated nlink is %d",
|
||||
ubifs_err(c, "directory inode %lu nlink is %d, but calculated nlink is %d",
|
||||
(unsigned long)fscki->inum,
|
||||
fscki->nlink, fscki->calc_cnt);
|
||||
goto out_dump;
|
||||
}
|
||||
} else {
|
||||
if (fscki->references != fscki->nlink) {
|
||||
ubifs_err("inode %lu nlink is %d, but calculated nlink is %d",
|
||||
ubifs_err(c, "inode %lu nlink is %d, but calculated nlink is %d",
|
||||
(unsigned long)fscki->inum,
|
||||
fscki->nlink, fscki->references);
|
||||
goto out_dump;
|
||||
}
|
||||
}
|
||||
if (fscki->xattr_sz != fscki->calc_xsz) {
|
||||
ubifs_err("inode %lu has xattr size %u, but calculated size is %lld",
|
||||
ubifs_err(c, "inode %lu has xattr size %u, but calculated size is %lld",
|
||||
(unsigned long)fscki->inum, fscki->xattr_sz,
|
||||
fscki->calc_xsz);
|
||||
goto out_dump;
|
||||
}
|
||||
if (fscki->xattr_cnt != fscki->calc_xcnt) {
|
||||
ubifs_err("inode %lu has %u xattrs, but calculated count is %lld",
|
||||
ubifs_err(c, "inode %lu has %u xattrs, but calculated count is %lld",
|
||||
(unsigned long)fscki->inum,
|
||||
fscki->xattr_cnt, fscki->calc_xcnt);
|
||||
goto out_dump;
|
||||
}
|
||||
if (fscki->xattr_nms != fscki->calc_xnms) {
|
||||
ubifs_err("inode %lu has xattr names' size %u, but calculated names' size is %lld",
|
||||
ubifs_err(c, "inode %lu has xattr names' size %u, but calculated names' size is %lld",
|
||||
(unsigned long)fscki->inum, fscki->xattr_nms,
|
||||
fscki->calc_xnms);
|
||||
goto out_dump;
|
||||
@ -2258,11 +2264,11 @@ out_dump:
|
||||
ino_key_init(c, &key, fscki->inum);
|
||||
err = ubifs_lookup_level0(c, &key, &znode, &n);
|
||||
if (!err) {
|
||||
ubifs_err("inode %lu not found in index",
|
||||
ubifs_err(c, "inode %lu not found in index",
|
||||
(unsigned long)fscki->inum);
|
||||
return -ENOENT;
|
||||
} else if (err < 0) {
|
||||
ubifs_err("error %d while looking up inode %lu",
|
||||
ubifs_err(c, "error %d while looking up inode %lu",
|
||||
err, (unsigned long)fscki->inum);
|
||||
return err;
|
||||
}
|
||||
@ -2274,13 +2280,13 @@ out_dump:
|
||||
|
||||
err = ubifs_tnc_read_node(c, zbr, ino);
|
||||
if (err) {
|
||||
ubifs_err("cannot read inode node at LEB %d:%d, error %d",
|
||||
ubifs_err(c, "cannot read inode node at LEB %d:%d, error %d",
|
||||
zbr->lnum, zbr->offs, err);
|
||||
kfree(ino);
|
||||
return err;
|
||||
}
|
||||
|
||||
ubifs_msg("dump of the inode %lu sitting in LEB %d:%d",
|
||||
ubifs_msg(c, "dump of the inode %lu sitting in LEB %d:%d",
|
||||
(unsigned long)fscki->inum, zbr->lnum, zbr->offs);
|
||||
ubifs_dump_node(c, ino);
|
||||
kfree(ino);
|
||||
@ -2321,7 +2327,7 @@ int dbg_check_filesystem(struct ubifs_info *c)
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
ubifs_err("file-system check failed with error %d", err);
|
||||
ubifs_err(c, "file-system check failed with error %d", err);
|
||||
dump_stack();
|
||||
free_inodes(&fsckd);
|
||||
return err;
|
||||
@ -2352,12 +2358,12 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
|
||||
sb = container_of(cur->next, struct ubifs_scan_node, list);
|
||||
|
||||
if (sa->type != UBIFS_DATA_NODE) {
|
||||
ubifs_err("bad node type %d", sa->type);
|
||||
ubifs_err(c, "bad node type %d", sa->type);
|
||||
ubifs_dump_node(c, sa->node);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (sb->type != UBIFS_DATA_NODE) {
|
||||
ubifs_err("bad node type %d", sb->type);
|
||||
ubifs_err(c, "bad node type %d", sb->type);
|
||||
ubifs_dump_node(c, sb->node);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2368,7 +2374,7 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
|
||||
if (inuma < inumb)
|
||||
continue;
|
||||
if (inuma > inumb) {
|
||||
ubifs_err("larger inum %lu goes before inum %lu",
|
||||
ubifs_err(c, "larger inum %lu goes before inum %lu",
|
||||
(unsigned long)inuma, (unsigned long)inumb);
|
||||
goto error_dump;
|
||||
}
|
||||
@ -2377,11 +2383,11 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
|
||||
blkb = key_block(c, &sb->key);
|
||||
|
||||
if (blka > blkb) {
|
||||
ubifs_err("larger block %u goes before %u", blka, blkb);
|
||||
ubifs_err(c, "larger block %u goes before %u", blka, blkb);
|
||||
goto error_dump;
|
||||
}
|
||||
if (blka == blkb) {
|
||||
ubifs_err("two data nodes for the same block");
|
||||
ubifs_err(c, "two data nodes for the same block");
|
||||
goto error_dump;
|
||||
}
|
||||
}
|
||||
@ -2420,19 +2426,19 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
|
||||
|
||||
if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
|
||||
sa->type != UBIFS_XENT_NODE) {
|
||||
ubifs_err("bad node type %d", sa->type);
|
||||
ubifs_err(c, "bad node type %d", sa->type);
|
||||
ubifs_dump_node(c, sa->node);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
|
||||
sa->type != UBIFS_XENT_NODE) {
|
||||
ubifs_err("bad node type %d", sb->type);
|
||||
ubifs_err(c, "bad node type %d", sb->type);
|
||||
ubifs_dump_node(c, sb->node);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sa->type != UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
|
||||
ubifs_err("non-inode node goes before inode node");
|
||||
ubifs_err(c, "non-inode node goes before inode node");
|
||||
goto error_dump;
|
||||
}
|
||||
|
||||
@ -2442,7 +2448,7 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
|
||||
if (sa->type == UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
|
||||
/* Inode nodes are sorted in descending size order */
|
||||
if (sa->len < sb->len) {
|
||||
ubifs_err("smaller inode node goes first");
|
||||
ubifs_err(c, "smaller inode node goes first");
|
||||
goto error_dump;
|
||||
}
|
||||
continue;
|
||||
@ -2458,7 +2464,7 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
|
||||
if (inuma < inumb)
|
||||
continue;
|
||||
if (inuma > inumb) {
|
||||
ubifs_err("larger inum %lu goes before inum %lu",
|
||||
ubifs_err(c, "larger inum %lu goes before inum %lu",
|
||||
(unsigned long)inuma, (unsigned long)inumb);
|
||||
goto error_dump;
|
||||
}
|
||||
@ -2467,7 +2473,7 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
|
||||
hashb = key_block(c, &sb->key);
|
||||
|
||||
if (hasha > hashb) {
|
||||
ubifs_err("larger hash %u goes before %u",
|
||||
ubifs_err(c, "larger hash %u goes before %u",
|
||||
hasha, hashb);
|
||||
goto error_dump;
|
||||
}
|
||||
@ -2476,9 +2482,9 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
|
||||
return 0;
|
||||
|
||||
error_dump:
|
||||
ubifs_msg("dumping first node");
|
||||
ubifs_msg(c, "dumping first node");
|
||||
ubifs_dump_node(c, sa->node);
|
||||
ubifs_msg("dumping second node");
|
||||
ubifs_msg(c, "dumping second node");
|
||||
ubifs_dump_node(c, sb->node);
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
@ -2503,17 +2509,17 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write)
|
||||
|
||||
if (chance(1, 2)) {
|
||||
d->pc_delay = 1;
|
||||
/* Fail withing 1 minute */
|
||||
/* Fail within 1 minute */
|
||||
delay = prandom_u32() % 60000;
|
||||
d->pc_timeout = jiffies;
|
||||
d->pc_timeout += msecs_to_jiffies(delay);
|
||||
ubifs_warn("failing after %lums", delay);
|
||||
ubifs_warn(c, "failing after %lums", delay);
|
||||
} else {
|
||||
d->pc_delay = 2;
|
||||
delay = prandom_u32() % 10000;
|
||||
/* Fail within 10000 operations */
|
||||
d->pc_cnt_max = delay;
|
||||
ubifs_warn("failing after %lu calls", delay);
|
||||
ubifs_warn(c, "failing after %lu calls", delay);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2531,55 +2537,55 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write)
|
||||
return 0;
|
||||
if (chance(19, 20))
|
||||
return 0;
|
||||
ubifs_warn("failing in super block LEB %d", lnum);
|
||||
ubifs_warn(c, "failing in super block LEB %d", lnum);
|
||||
} else if (lnum == UBIFS_MST_LNUM || lnum == UBIFS_MST_LNUM + 1) {
|
||||
if (chance(19, 20))
|
||||
return 0;
|
||||
ubifs_warn("failing in master LEB %d", lnum);
|
||||
ubifs_warn(c, "failing in master LEB %d", lnum);
|
||||
} else if (lnum >= UBIFS_LOG_LNUM && lnum <= c->log_last) {
|
||||
if (write && chance(99, 100))
|
||||
return 0;
|
||||
if (chance(399, 400))
|
||||
return 0;
|
||||
ubifs_warn("failing in log LEB %d", lnum);
|
||||
ubifs_warn(c, "failing in log LEB %d", lnum);
|
||||
} else if (lnum >= c->lpt_first && lnum <= c->lpt_last) {
|
||||
if (write && chance(7, 8))
|
||||
return 0;
|
||||
if (chance(19, 20))
|
||||
return 0;
|
||||
ubifs_warn("failing in LPT LEB %d", lnum);
|
||||
ubifs_warn(c, "failing in LPT LEB %d", lnum);
|
||||
} else if (lnum >= c->orph_first && lnum <= c->orph_last) {
|
||||
if (write && chance(1, 2))
|
||||
return 0;
|
||||
if (chance(9, 10))
|
||||
return 0;
|
||||
ubifs_warn("failing in orphan LEB %d", lnum);
|
||||
ubifs_warn(c, "failing in orphan LEB %d", lnum);
|
||||
} else if (lnum == c->ihead_lnum) {
|
||||
if (chance(99, 100))
|
||||
return 0;
|
||||
ubifs_warn("failing in index head LEB %d", lnum);
|
||||
ubifs_warn(c, "failing in index head LEB %d", lnum);
|
||||
} else if (c->jheads && lnum == c->jheads[GCHD].wbuf.lnum) {
|
||||
if (chance(9, 10))
|
||||
return 0;
|
||||
ubifs_warn("failing in GC head LEB %d", lnum);
|
||||
ubifs_warn(c, "failing in GC head LEB %d", lnum);
|
||||
} else if (write && !RB_EMPTY_ROOT(&c->buds) &&
|
||||
!ubifs_search_bud(c, lnum)) {
|
||||
if (chance(19, 20))
|
||||
return 0;
|
||||
ubifs_warn("failing in non-bud LEB %d", lnum);
|
||||
ubifs_warn(c, "failing in non-bud LEB %d", lnum);
|
||||
} else if (c->cmt_state == COMMIT_RUNNING_BACKGROUND ||
|
||||
c->cmt_state == COMMIT_RUNNING_REQUIRED) {
|
||||
if (chance(999, 1000))
|
||||
return 0;
|
||||
ubifs_warn("failing in bud LEB %d commit running", lnum);
|
||||
ubifs_warn(c, "failing in bud LEB %d commit running", lnum);
|
||||
} else {
|
||||
if (chance(9999, 10000))
|
||||
return 0;
|
||||
ubifs_warn("failing in bud LEB %d commit not running", lnum);
|
||||
ubifs_warn(c, "failing in bud LEB %d commit not running", lnum);
|
||||
}
|
||||
|
||||
d->pc_happened = 1;
|
||||
ubifs_warn("========== Power cut emulated ==========");
|
||||
ubifs_warn(c, "========== Power cut emulated ==========");
|
||||
dump_stack();
|
||||
return 1;
|
||||
}
|
||||
@ -2594,7 +2600,7 @@ static int corrupt_data(const struct ubifs_info *c, const void *buf,
|
||||
/* Corruption span max to end of write unit */
|
||||
to = min(len, ALIGN(from + 1, c->max_write_size));
|
||||
|
||||
ubifs_warn("filled bytes %u-%u with %s", from, to - 1,
|
||||
ubifs_warn(c, "filled bytes %u-%u with %s", from, to - 1,
|
||||
ffs ? "0xFFs" : "random data");
|
||||
|
||||
if (ffs)
|
||||
@ -2616,7 +2622,7 @@ int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf,
|
||||
failing = power_cut_emulated(c, lnum, 1);
|
||||
if (failing) {
|
||||
len = corrupt_data(c, buf, len);
|
||||
ubifs_warn("actually write %d bytes to LEB %d:%d (the buffer was corrupted)",
|
||||
ubifs_warn(c, "actually write %d bytes to LEB %d:%d (the buffer was corrupted)",
|
||||
len, lnum, offs);
|
||||
}
|
||||
err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
|
||||
@ -2946,7 +2952,7 @@ out_remove:
|
||||
debugfs_remove_recursive(d->dfs_dir);
|
||||
out:
|
||||
err = dent ? PTR_ERR(dent) : -ENODEV;
|
||||
ubifs_err("cannot create \"%s\" debugfs file or directory, error %d\n",
|
||||
ubifs_err(c, "cannot create \"%s\" debugfs file or directory, error %d\n",
|
||||
fname, err);
|
||||
return err;
|
||||
}
|
||||
@ -3100,8 +3106,8 @@ out_remove:
|
||||
debugfs_remove_recursive(dfs_rootdir);
|
||||
out:
|
||||
err = dent ? PTR_ERR(dent) : -ENODEV;
|
||||
ubifs_err("cannot create \"%s\" debugfs file or directory, error %d\n",
|
||||
fname, err);
|
||||
pr_err("UBIFS error (pid %d): cannot create \"%s\" debugfs file or directory, error %d\n",
|
||||
current->pid, fname, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
|
||||
c->ro_error = 1;
|
||||
c->no_chk_data_crc = 0;
|
||||
c->vfs_sb->s_flags |= MS_RDONLY;
|
||||
ubifs_warn("switched to read-only mode, error %d", err);
|
||||
ubifs_warn(c, "switched to read-only mode, error %d", err);
|
||||
dump_stack();
|
||||
}
|
||||
}
|
||||
@ -101,7 +101,7 @@ int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
|
||||
* @even_ebadmsg is true.
|
||||
*/
|
||||
if (err && (err != -EBADMSG || even_ebadmsg)) {
|
||||
ubifs_err("reading %d bytes from LEB %d:%d failed, error %d",
|
||||
ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d",
|
||||
len, lnum, offs, err);
|
||||
dump_stack();
|
||||
}
|
||||
@ -118,10 +118,12 @@ int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
|
||||
return -EROFS;
|
||||
if (!dbg_is_tst_rcvry(c))
|
||||
err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
|
||||
#ifndef __UBOOT__
|
||||
else
|
||||
err = dbg_leb_write(c, lnum, buf, offs, len);
|
||||
#endif
|
||||
if (err) {
|
||||
ubifs_err("writing %d bytes to LEB %d:%d failed, error %d",
|
||||
ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d",
|
||||
len, lnum, offs, err);
|
||||
ubifs_ro_mode(c, err);
|
||||
dump_stack();
|
||||
@ -138,10 +140,12 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
|
||||
return -EROFS;
|
||||
if (!dbg_is_tst_rcvry(c))
|
||||
err = ubi_leb_change(c->ubi, lnum, buf, len);
|
||||
#ifndef __UBOOT__
|
||||
else
|
||||
err = dbg_leb_change(c, lnum, buf, len);
|
||||
#endif
|
||||
if (err) {
|
||||
ubifs_err("changing %d bytes in LEB %d failed, error %d",
|
||||
ubifs_err(c, "changing %d bytes in LEB %d failed, error %d",
|
||||
len, lnum, err);
|
||||
ubifs_ro_mode(c, err);
|
||||
dump_stack();
|
||||
@ -158,10 +162,12 @@ int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
|
||||
return -EROFS;
|
||||
if (!dbg_is_tst_rcvry(c))
|
||||
err = ubi_leb_unmap(c->ubi, lnum);
|
||||
#ifndef __UBOOT__
|
||||
else
|
||||
err = dbg_leb_unmap(c, lnum);
|
||||
#endif
|
||||
if (err) {
|
||||
ubifs_err("unmap LEB %d failed, error %d", lnum, err);
|
||||
ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err);
|
||||
ubifs_ro_mode(c, err);
|
||||
dump_stack();
|
||||
}
|
||||
@ -177,10 +183,12 @@ int ubifs_leb_map(struct ubifs_info *c, int lnum)
|
||||
return -EROFS;
|
||||
if (!dbg_is_tst_rcvry(c))
|
||||
err = ubi_leb_map(c->ubi, lnum);
|
||||
#ifndef __UBOOT__
|
||||
else
|
||||
err = dbg_leb_map(c, lnum);
|
||||
#endif
|
||||
if (err) {
|
||||
ubifs_err("mapping LEB %d failed, error %d", lnum, err);
|
||||
ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err);
|
||||
ubifs_ro_mode(c, err);
|
||||
dump_stack();
|
||||
}
|
||||
@ -193,7 +201,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
|
||||
|
||||
err = ubi_is_mapped(c->ubi, lnum);
|
||||
if (err < 0) {
|
||||
ubifs_err("ubi_is_mapped failed for LEB %d, error %d",
|
||||
ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d",
|
||||
lnum, err);
|
||||
dump_stack();
|
||||
}
|
||||
@ -241,7 +249,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
|
||||
magic = le32_to_cpu(ch->magic);
|
||||
if (magic != UBIFS_NODE_MAGIC) {
|
||||
if (!quiet)
|
||||
ubifs_err("bad magic %#08x, expected %#08x",
|
||||
ubifs_err(c, "bad magic %#08x, expected %#08x",
|
||||
magic, UBIFS_NODE_MAGIC);
|
||||
err = -EUCLEAN;
|
||||
goto out;
|
||||
@ -250,7 +258,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
|
||||
type = ch->node_type;
|
||||
if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
|
||||
if (!quiet)
|
||||
ubifs_err("bad node type %d", type);
|
||||
ubifs_err(c, "bad node type %d", type);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -273,7 +281,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
|
||||
node_crc = le32_to_cpu(ch->crc);
|
||||
if (crc != node_crc) {
|
||||
if (!quiet)
|
||||
ubifs_err("bad CRC: calculated %#08x, read %#08x",
|
||||
ubifs_err(c, "bad CRC: calculated %#08x, read %#08x",
|
||||
crc, node_crc);
|
||||
err = -EUCLEAN;
|
||||
goto out;
|
||||
@ -283,10 +291,10 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
|
||||
|
||||
out_len:
|
||||
if (!quiet)
|
||||
ubifs_err("bad node length %d", node_len);
|
||||
ubifs_err(c, "bad node length %d", node_len);
|
||||
out:
|
||||
if (!quiet) {
|
||||
ubifs_err("bad node at LEB %d:%d", lnum, offs);
|
||||
ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
|
||||
ubifs_dump_node(c, buf);
|
||||
dump_stack();
|
||||
}
|
||||
@ -349,11 +357,11 @@ static unsigned long long next_sqnum(struct ubifs_info *c)
|
||||
|
||||
if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
|
||||
if (sqnum >= SQNUM_WATERMARK) {
|
||||
ubifs_err("sequence number overflow %llu, end of life",
|
||||
ubifs_err(c, "sequence number overflow %llu, end of life",
|
||||
sqnum);
|
||||
ubifs_ro_mode(c, -EINVAL);
|
||||
}
|
||||
ubifs_warn("running out of sequence numbers, end of life soon");
|
||||
ubifs_warn(c, "running out of sequence numbers, end of life soon");
|
||||
}
|
||||
|
||||
return sqnum;
|
||||
@ -426,7 +434,7 @@ void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
|
||||
#ifndef __UBOOT__
|
||||
/**
|
||||
* wbuf_timer_callback - write-buffer timer callback function.
|
||||
* @data: timer data (write-buffer descriptor)
|
||||
* @timer: timer data (write-buffer descriptor)
|
||||
*
|
||||
* This function is called when the write-buffer timer expires.
|
||||
*/
|
||||
@ -635,7 +643,7 @@ int ubifs_bg_wbufs_sync(struct ubifs_info *c)
|
||||
err = ubifs_wbuf_sync_nolock(wbuf);
|
||||
mutex_unlock(&wbuf->io_mutex);
|
||||
if (err) {
|
||||
ubifs_err("cannot sync write-buffer, error %d", err);
|
||||
ubifs_err(c, "cannot sync write-buffer, error %d", err);
|
||||
ubifs_ro_mode(c, err);
|
||||
goto out_timers;
|
||||
}
|
||||
@ -832,7 +840,7 @@ exit:
|
||||
return 0;
|
||||
|
||||
out:
|
||||
ubifs_err("cannot write %d bytes to LEB %d:%d, error %d",
|
||||
ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d",
|
||||
len, wbuf->lnum, wbuf->offs, err);
|
||||
ubifs_dump_node(c, buf);
|
||||
dump_stack();
|
||||
@ -932,27 +940,27 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
|
||||
}
|
||||
|
||||
if (type != ch->node_type) {
|
||||
ubifs_err("bad node type (%d but expected %d)",
|
||||
ubifs_err(c, "bad node type (%d but expected %d)",
|
||||
ch->node_type, type);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
|
||||
if (err) {
|
||||
ubifs_err("expected node type %d", type);
|
||||
ubifs_err(c, "expected node type %d", type);
|
||||
return err;
|
||||
}
|
||||
|
||||
rlen = le32_to_cpu(ch->len);
|
||||
if (rlen != len) {
|
||||
ubifs_err("bad node length %d, expected %d", rlen, len);
|
||||
ubifs_err(c, "bad node length %d, expected %d", rlen, len);
|
||||
goto out;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
ubifs_err("bad node at LEB %d:%d", lnum, offs);
|
||||
ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
|
||||
ubifs_dump_node(c, buf);
|
||||
dump_stack();
|
||||
return -EINVAL;
|
||||
@ -988,30 +996,32 @@ int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
|
||||
return err;
|
||||
|
||||
if (type != ch->node_type) {
|
||||
ubifs_err("bad node type (%d but expected %d)",
|
||||
ubifs_errc(c, "bad node type (%d but expected %d)",
|
||||
ch->node_type, type);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
|
||||
if (err) {
|
||||
ubifs_err("expected node type %d", type);
|
||||
ubifs_errc(c, "expected node type %d", type);
|
||||
return err;
|
||||
}
|
||||
|
||||
l = le32_to_cpu(ch->len);
|
||||
if (l != len) {
|
||||
ubifs_err("bad node length %d, expected %d", l, len);
|
||||
ubifs_errc(c, "bad node length %d, expected %d", l, len);
|
||||
goto out;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
ubifs_err("bad node at LEB %d:%d, LEB mapping status %d", lnum, offs,
|
||||
ubi_is_mapped(c->ubi, lnum));
|
||||
ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum,
|
||||
offs, ubi_is_mapped(c->ubi, lnum));
|
||||
if (!c->probing) {
|
||||
ubifs_dump_node(c, buf);
|
||||
dump_stack();
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -98,10 +98,14 @@ static inline long long empty_log_bytes(const struct ubifs_info *c)
|
||||
h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
|
||||
t = (long long)c->ltail_lnum * c->leb_size;
|
||||
|
||||
if (h >= t)
|
||||
if (h > t)
|
||||
return c->log_bytes - h + t;
|
||||
else
|
||||
else if (h != t)
|
||||
return t - h;
|
||||
else if (c->lhead_lnum != c->ltail_lnum)
|
||||
return 0;
|
||||
else
|
||||
return c->log_bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -232,6 +236,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
|
||||
|
||||
if (c->lhead_offs > c->leb_size - c->ref_node_alsz) {
|
||||
c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
|
||||
ubifs_assert(c->lhead_lnum != c->ltail_lnum);
|
||||
c->lhead_offs = 0;
|
||||
}
|
||||
|
||||
@ -396,15 +401,14 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
|
||||
/* Switch to the next log LEB */
|
||||
if (c->lhead_offs) {
|
||||
c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
|
||||
ubifs_assert(c->lhead_lnum != c->ltail_lnum);
|
||||
c->lhead_offs = 0;
|
||||
}
|
||||
|
||||
if (c->lhead_offs == 0) {
|
||||
/* Must ensure next LEB has been unmapped */
|
||||
err = ubifs_leb_unmap(c, c->lhead_lnum);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
len = ALIGN(len, c->min_io_size);
|
||||
dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len);
|
||||
@ -439,9 +443,9 @@ out:
|
||||
* @ltail_lnum: new log tail LEB number
|
||||
*
|
||||
* This function is called on when the commit operation was finished. It
|
||||
* moves log tail to new position and unmaps LEBs which contain obsolete data.
|
||||
* Returns zero in case of success and a negative error code in case of
|
||||
* failure.
|
||||
* moves log tail to new position and updates the master node so that it stores
|
||||
* the new log tail LEB number. Returns zero in case of success and a negative
|
||||
* error code in case of failure.
|
||||
*/
|
||||
int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
|
||||
{
|
||||
@ -469,7 +473,12 @@ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
|
||||
spin_unlock(&c->buds_lock);
|
||||
|
||||
err = dbg_check_bud_bytes(c);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = ubifs_write_master(c);
|
||||
|
||||
out:
|
||||
mutex_unlock(&c->log_mutex);
|
||||
return err;
|
||||
}
|
||||
@ -679,7 +688,7 @@ int ubifs_consolidate_log(struct ubifs_info *c)
|
||||
destroy_done_tree(&done_tree);
|
||||
vfree(buf);
|
||||
if (write_lnum == c->lhead_lnum) {
|
||||
ubifs_err("log is too full");
|
||||
ubifs_err(c, "log is too full");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Unmap remaining LEBs */
|
||||
@ -726,7 +735,7 @@ static int dbg_check_bud_bytes(struct ubifs_info *c)
|
||||
bud_bytes += c->leb_size - bud->start;
|
||||
|
||||
if (c->bud_bytes != bud_bytes) {
|
||||
ubifs_err("bad bud_bytes %lld, calculated %lld",
|
||||
ubifs_err(c, "bad bud_bytes %lld, calculated %lld",
|
||||
c->bud_bytes, bud_bytes);
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
@ -674,7 +674,7 @@ int ubifs_change_one_lp(struct ubifs_info *c, int lnum, int free, int dirty,
|
||||
out:
|
||||
ubifs_release_lprops(c);
|
||||
if (err)
|
||||
ubifs_err("cannot change properties of LEB %d, error %d",
|
||||
ubifs_err(c, "cannot change properties of LEB %d, error %d",
|
||||
lnum, err);
|
||||
return err;
|
||||
}
|
||||
@ -713,7 +713,7 @@ int ubifs_update_one_lp(struct ubifs_info *c, int lnum, int free, int dirty,
|
||||
out:
|
||||
ubifs_release_lprops(c);
|
||||
if (err)
|
||||
ubifs_err("cannot update properties of LEB %d, error %d",
|
||||
ubifs_err(c, "cannot update properties of LEB %d, error %d",
|
||||
lnum, err);
|
||||
return err;
|
||||
}
|
||||
@ -738,7 +738,7 @@ int ubifs_read_one_lp(struct ubifs_info *c, int lnum, struct ubifs_lprops *lp)
|
||||
lpp = ubifs_lpt_lookup(c, lnum);
|
||||
if (IS_ERR(lpp)) {
|
||||
err = PTR_ERR(lpp);
|
||||
ubifs_err("cannot read properties of LEB %d, error %d",
|
||||
ubifs_err(c, "cannot read properties of LEB %d, error %d",
|
||||
lnum, err);
|
||||
goto out;
|
||||
}
|
||||
@ -865,13 +865,13 @@ int dbg_check_cats(struct ubifs_info *c)
|
||||
|
||||
list_for_each_entry(lprops, &c->empty_list, list) {
|
||||
if (lprops->free != c->leb_size) {
|
||||
ubifs_err("non-empty LEB %d on empty list (free %d dirty %d flags %d)",
|
||||
ubifs_err(c, "non-empty LEB %d on empty list (free %d dirty %d flags %d)",
|
||||
lprops->lnum, lprops->free, lprops->dirty,
|
||||
lprops->flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (lprops->flags & LPROPS_TAKEN) {
|
||||
ubifs_err("taken LEB %d on empty list (free %d dirty %d flags %d)",
|
||||
ubifs_err(c, "taken LEB %d on empty list (free %d dirty %d flags %d)",
|
||||
lprops->lnum, lprops->free, lprops->dirty,
|
||||
lprops->flags);
|
||||
return -EINVAL;
|
||||
@ -881,13 +881,13 @@ int dbg_check_cats(struct ubifs_info *c)
|
||||
i = 0;
|
||||
list_for_each_entry(lprops, &c->freeable_list, list) {
|
||||
if (lprops->free + lprops->dirty != c->leb_size) {
|
||||
ubifs_err("non-freeable LEB %d on freeable list (free %d dirty %d flags %d)",
|
||||
ubifs_err(c, "non-freeable LEB %d on freeable list (free %d dirty %d flags %d)",
|
||||
lprops->lnum, lprops->free, lprops->dirty,
|
||||
lprops->flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (lprops->flags & LPROPS_TAKEN) {
|
||||
ubifs_err("taken LEB %d on freeable list (free %d dirty %d flags %d)",
|
||||
ubifs_err(c, "taken LEB %d on freeable list (free %d dirty %d flags %d)",
|
||||
lprops->lnum, lprops->free, lprops->dirty,
|
||||
lprops->flags);
|
||||
return -EINVAL;
|
||||
@ -895,7 +895,7 @@ int dbg_check_cats(struct ubifs_info *c)
|
||||
i += 1;
|
||||
}
|
||||
if (i != c->freeable_cnt) {
|
||||
ubifs_err("freeable list count %d expected %d", i,
|
||||
ubifs_err(c, "freeable list count %d expected %d", i,
|
||||
c->freeable_cnt);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -904,26 +904,26 @@ int dbg_check_cats(struct ubifs_info *c)
|
||||
list_for_each(pos, &c->idx_gc)
|
||||
i += 1;
|
||||
if (i != c->idx_gc_cnt) {
|
||||
ubifs_err("idx_gc list count %d expected %d", i,
|
||||
ubifs_err(c, "idx_gc list count %d expected %d", i,
|
||||
c->idx_gc_cnt);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
list_for_each_entry(lprops, &c->frdi_idx_list, list) {
|
||||
if (lprops->free + lprops->dirty != c->leb_size) {
|
||||
ubifs_err("non-freeable LEB %d on frdi_idx list (free %d dirty %d flags %d)",
|
||||
ubifs_err(c, "non-freeable LEB %d on frdi_idx list (free %d dirty %d flags %d)",
|
||||
lprops->lnum, lprops->free, lprops->dirty,
|
||||
lprops->flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (lprops->flags & LPROPS_TAKEN) {
|
||||
ubifs_err("taken LEB %d on frdi_idx list (free %d dirty %d flags %d)",
|
||||
ubifs_err(c, "taken LEB %d on frdi_idx list (free %d dirty %d flags %d)",
|
||||
lprops->lnum, lprops->free, lprops->dirty,
|
||||
lprops->flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!(lprops->flags & LPROPS_INDEX)) {
|
||||
ubifs_err("non-index LEB %d on frdi_idx list (free %d dirty %d flags %d)",
|
||||
ubifs_err(c, "non-index LEB %d on frdi_idx list (free %d dirty %d flags %d)",
|
||||
lprops->lnum, lprops->free, lprops->dirty,
|
||||
lprops->flags);
|
||||
return -EINVAL;
|
||||
@ -936,15 +936,15 @@ int dbg_check_cats(struct ubifs_info *c)
|
||||
for (i = 0; i < heap->cnt; i++) {
|
||||
lprops = heap->arr[i];
|
||||
if (!lprops) {
|
||||
ubifs_err("null ptr in LPT heap cat %d", cat);
|
||||
ubifs_err(c, "null ptr in LPT heap cat %d", cat);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (lprops->hpos != i) {
|
||||
ubifs_err("bad ptr in LPT heap cat %d", cat);
|
||||
ubifs_err(c, "bad ptr in LPT heap cat %d", cat);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (lprops->flags & LPROPS_TAKEN) {
|
||||
ubifs_err("taken LEB in LPT heap cat %d", cat);
|
||||
ubifs_err(c, "taken LEB in LPT heap cat %d", cat);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -980,7 +980,7 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
|
||||
goto out;
|
||||
}
|
||||
if (lprops != lp) {
|
||||
ubifs_err("lprops %zx lp %zx lprops->lnum %d lp->lnum %d",
|
||||
ubifs_err(c, "lprops %zx lp %zx lprops->lnum %d lp->lnum %d",
|
||||
(size_t)lprops, (size_t)lp, lprops->lnum,
|
||||
lp->lnum);
|
||||
err = 4;
|
||||
@ -1000,7 +1000,7 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
|
||||
}
|
||||
out:
|
||||
if (err) {
|
||||
ubifs_err("failed cat %d hpos %d err %d", cat, i, err);
|
||||
ubifs_err(c, "failed cat %d hpos %d err %d", cat, i, err);
|
||||
dump_stack();
|
||||
ubifs_dump_heap(c, heap, cat);
|
||||
}
|
||||
@ -1024,14 +1024,14 @@ static int scan_check_cb(struct ubifs_info *c,
|
||||
{
|
||||
struct ubifs_scan_leb *sleb;
|
||||
struct ubifs_scan_node *snod;
|
||||
int cat, lnum = lp->lnum, is_idx = 0, used = 0, freef, dirty, ret;
|
||||
int cat, lnum = lp->lnum, is_idx = 0, used = 0, free, dirty, ret;
|
||||
void *buf = NULL;
|
||||
|
||||
cat = lp->flags & LPROPS_CAT_MASK;
|
||||
if (cat != LPROPS_UNCAT) {
|
||||
cat = ubifs_categorize_lprops(c, lp);
|
||||
if (cat != (lp->flags & LPROPS_CAT_MASK)) {
|
||||
ubifs_err("bad LEB category %d expected %d",
|
||||
ubifs_err(c, "bad LEB category %d expected %d",
|
||||
(lp->flags & LPROPS_CAT_MASK), cat);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1066,7 +1066,7 @@ static int scan_check_cb(struct ubifs_info *c,
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
ubifs_err("bad LPT list (category %d)", cat);
|
||||
ubifs_err(c, "bad LPT list (category %d)", cat);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -1078,7 +1078,7 @@ static int scan_check_cb(struct ubifs_info *c,
|
||||
|
||||
if ((lp->hpos != -1 && heap->arr[lp->hpos]->lnum != lnum) ||
|
||||
lp != heap->arr[lp->hpos]) {
|
||||
ubifs_err("bad LPT heap (category %d)", cat);
|
||||
ubifs_err(c, "bad LPT heap (category %d)", cat);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -1125,7 +1125,7 @@ static int scan_check_cb(struct ubifs_info *c,
|
||||
is_idx = (snod->type == UBIFS_IDX_NODE) ? 1 : 0;
|
||||
|
||||
if (is_idx && snod->type != UBIFS_IDX_NODE) {
|
||||
ubifs_err("indexing node in data LEB %d:%d",
|
||||
ubifs_err(c, "indexing node in data LEB %d:%d",
|
||||
lnum, snod->offs);
|
||||
goto out_destroy;
|
||||
}
|
||||
@ -1146,20 +1146,20 @@ static int scan_check_cb(struct ubifs_info *c,
|
||||
}
|
||||
}
|
||||
|
||||
freef = c->leb_size - sleb->endpt;
|
||||
free = c->leb_size - sleb->endpt;
|
||||
dirty = sleb->endpt - used;
|
||||
|
||||
if (freef > c->leb_size || freef < 0 || dirty > c->leb_size ||
|
||||
if (free > c->leb_size || free < 0 || dirty > c->leb_size ||
|
||||
dirty < 0) {
|
||||
ubifs_err("bad calculated accounting for LEB %d: free %d, dirty %d",
|
||||
lnum, freef, dirty);
|
||||
ubifs_err(c, "bad calculated accounting for LEB %d: free %d, dirty %d",
|
||||
lnum, free, dirty);
|
||||
goto out_destroy;
|
||||
}
|
||||
|
||||
if (lp->free + lp->dirty == c->leb_size &&
|
||||
freef + dirty == c->leb_size)
|
||||
free + dirty == c->leb_size)
|
||||
if ((is_idx && !(lp->flags & LPROPS_INDEX)) ||
|
||||
(!is_idx && freef == c->leb_size) ||
|
||||
(!is_idx && free == c->leb_size) ||
|
||||
lp->free == c->leb_size) {
|
||||
/*
|
||||
* Empty or freeable LEBs could contain index
|
||||
@ -1168,12 +1168,12 @@ static int scan_check_cb(struct ubifs_info *c,
|
||||
* the same reason. Or it may simply not have been
|
||||
* unmapped.
|
||||
*/
|
||||
freef = lp->free;
|
||||
free = lp->free;
|
||||
dirty = lp->dirty;
|
||||
is_idx = 0;
|
||||
}
|
||||
|
||||
if (is_idx && lp->free + lp->dirty == freef + dirty &&
|
||||
if (is_idx && lp->free + lp->dirty == free + dirty &&
|
||||
lnum != c->ihead_lnum) {
|
||||
/*
|
||||
* After an unclean unmount, an index LEB could have a different
|
||||
@ -1186,41 +1186,41 @@ static int scan_check_cb(struct ubifs_info *c,
|
||||
* write to the free space at the end of an index LEB - except
|
||||
* by the in-the-gaps method for which it is not a problem.
|
||||
*/
|
||||
freef = lp->free;
|
||||
free = lp->free;
|
||||
dirty = lp->dirty;
|
||||
}
|
||||
|
||||
if (lp->free != freef || lp->dirty != dirty)
|
||||
if (lp->free != free || lp->dirty != dirty)
|
||||
goto out_print;
|
||||
|
||||
if (is_idx && !(lp->flags & LPROPS_INDEX)) {
|
||||
if (freef == c->leb_size)
|
||||
if (free == c->leb_size)
|
||||
/* Free but not unmapped LEB, it's fine */
|
||||
is_idx = 0;
|
||||
else {
|
||||
ubifs_err("indexing node without indexing flag");
|
||||
ubifs_err(c, "indexing node without indexing flag");
|
||||
goto out_print;
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_idx && (lp->flags & LPROPS_INDEX)) {
|
||||
ubifs_err("data node with indexing flag");
|
||||
ubifs_err(c, "data node with indexing flag");
|
||||
goto out_print;
|
||||
}
|
||||
|
||||
if (freef == c->leb_size)
|
||||
if (free == c->leb_size)
|
||||
lst->empty_lebs += 1;
|
||||
|
||||
if (is_idx)
|
||||
lst->idx_lebs += 1;
|
||||
|
||||
if (!(lp->flags & LPROPS_INDEX))
|
||||
lst->total_used += c->leb_size - freef - dirty;
|
||||
lst->total_free += freef;
|
||||
lst->total_used += c->leb_size - free - dirty;
|
||||
lst->total_free += free;
|
||||
lst->total_dirty += dirty;
|
||||
|
||||
if (!(lp->flags & LPROPS_INDEX)) {
|
||||
int spc = freef + dirty;
|
||||
int spc = free + dirty;
|
||||
|
||||
if (spc < c->dead_wm)
|
||||
lst->total_dead += spc;
|
||||
@ -1233,8 +1233,8 @@ static int scan_check_cb(struct ubifs_info *c,
|
||||
return LPT_SCAN_CONTINUE;
|
||||
|
||||
out_print:
|
||||
ubifs_err("bad accounting of LEB %d: free %d, dirty %d flags %#x, should be free %d, dirty %d",
|
||||
lnum, lp->free, lp->dirty, lp->flags, freef, dirty);
|
||||
ubifs_err(c, "bad accounting of LEB %d: free %d, dirty %d flags %#x, should be free %d, dirty %d",
|
||||
lnum, lp->free, lp->dirty, lp->flags, free, dirty);
|
||||
ubifs_dump_leb(c, lnum);
|
||||
out_destroy:
|
||||
ubifs_scan_destroy(sleb);
|
||||
@ -1285,11 +1285,11 @@ int dbg_check_lprops(struct ubifs_info *c)
|
||||
lst.total_free != c->lst.total_free ||
|
||||
lst.total_dirty != c->lst.total_dirty ||
|
||||
lst.total_used != c->lst.total_used) {
|
||||
ubifs_err("bad overall accounting");
|
||||
ubifs_err("calculated: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld",
|
||||
ubifs_err(c, "bad overall accounting");
|
||||
ubifs_err(c, "calculated: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld",
|
||||
lst.empty_lebs, lst.idx_lebs, lst.total_free,
|
||||
lst.total_dirty, lst.total_used);
|
||||
ubifs_err("read from lprops: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld",
|
||||
ubifs_err(c, "read from lprops: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld",
|
||||
c->lst.empty_lebs, c->lst.idx_lebs, c->lst.total_free,
|
||||
c->lst.total_dirty, c->lst.total_used);
|
||||
err = -EINVAL;
|
||||
@ -1298,10 +1298,10 @@ int dbg_check_lprops(struct ubifs_info *c)
|
||||
|
||||
if (lst.total_dead != c->lst.total_dead ||
|
||||
lst.total_dark != c->lst.total_dark) {
|
||||
ubifs_err("bad dead/dark space accounting");
|
||||
ubifs_err("calculated: total_dead %lld, total_dark %lld",
|
||||
ubifs_err(c, "bad dead/dark space accounting");
|
||||
ubifs_err(c, "calculated: total_dead %lld, total_dark %lld",
|
||||
lst.total_dead, lst.total_dark);
|
||||
ubifs_err("read from lprops: total_dead %lld, total_dark %lld",
|
||||
ubifs_err(c, "read from lprops: total_dead %lld, total_dark %lld",
|
||||
c->lst.total_dead, c->lst.total_dark);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
|
@ -141,13 +141,13 @@ int ubifs_calc_lpt_geom(struct ubifs_info *c)
|
||||
sz = c->lpt_sz * 2; /* Must have at least 2 times the size */
|
||||
lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size);
|
||||
if (lebs_needed > c->lpt_lebs) {
|
||||
ubifs_err("too few LPT LEBs");
|
||||
ubifs_err(c, "too few LPT LEBs");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Verify that ltab fits in a single LEB (since ltab is a single node */
|
||||
if (c->ltab_sz > c->leb_size) {
|
||||
ubifs_err("LPT ltab too big");
|
||||
ubifs_err(c, "LPT ltab too big");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -209,7 +209,7 @@ static int calc_dflt_lpt_geom(struct ubifs_info *c, int *main_lebs,
|
||||
continue;
|
||||
}
|
||||
if (c->ltab_sz > c->leb_size) {
|
||||
ubifs_err("LPT ltab too big");
|
||||
ubifs_err(c, "LPT ltab too big");
|
||||
return -EINVAL;
|
||||
}
|
||||
*main_lebs = c->main_lebs;
|
||||
@ -907,7 +907,7 @@ static void replace_cats(struct ubifs_info *c, struct ubifs_pnode *old_pnode,
|
||||
*
|
||||
* This function returns %0 on success and a negative error code on failure.
|
||||
*/
|
||||
static int check_lpt_crc(void *buf, int len)
|
||||
static int check_lpt_crc(const struct ubifs_info *c, void *buf, int len)
|
||||
{
|
||||
int pos = 0;
|
||||
uint8_t *addr = buf;
|
||||
@ -917,8 +917,8 @@ static int check_lpt_crc(void *buf, int len)
|
||||
calc_crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
|
||||
len - UBIFS_LPT_CRC_BYTES);
|
||||
if (crc != calc_crc) {
|
||||
ubifs_err("invalid crc in LPT node: crc %hx calc %hx", crc,
|
||||
calc_crc);
|
||||
ubifs_err(c, "invalid crc in LPT node: crc %hx calc %hx",
|
||||
crc, calc_crc);
|
||||
dump_stack();
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -934,14 +934,15 @@ static int check_lpt_crc(void *buf, int len)
|
||||
*
|
||||
* This function returns %0 on success and a negative error code on failure.
|
||||
*/
|
||||
static int check_lpt_type(uint8_t **addr, int *pos, int type)
|
||||
static int check_lpt_type(const struct ubifs_info *c, uint8_t **addr,
|
||||
int *pos, int type)
|
||||
{
|
||||
int node_type;
|
||||
|
||||
node_type = ubifs_unpack_bits(addr, pos, UBIFS_LPT_TYPE_BITS);
|
||||
if (node_type != type) {
|
||||
ubifs_err("invalid type (%d) in LPT node type %d", node_type,
|
||||
type);
|
||||
ubifs_err(c, "invalid type (%d) in LPT node type %d",
|
||||
node_type, type);
|
||||
dump_stack();
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -962,7 +963,7 @@ static int unpack_pnode(const struct ubifs_info *c, void *buf,
|
||||
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
|
||||
int i, pos = 0, err;
|
||||
|
||||
err = check_lpt_type(&addr, &pos, UBIFS_LPT_PNODE);
|
||||
err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_PNODE);
|
||||
if (err)
|
||||
return err;
|
||||
if (c->big_lpt)
|
||||
@ -981,7 +982,7 @@ static int unpack_pnode(const struct ubifs_info *c, void *buf,
|
||||
lprops->flags = 0;
|
||||
lprops->flags |= ubifs_categorize_lprops(c, lprops);
|
||||
}
|
||||
err = check_lpt_crc(buf, c->pnode_sz);
|
||||
err = check_lpt_crc(c, buf, c->pnode_sz);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -999,7 +1000,7 @@ int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf,
|
||||
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
|
||||
int i, pos = 0, err;
|
||||
|
||||
err = check_lpt_type(&addr, &pos, UBIFS_LPT_NNODE);
|
||||
err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_NNODE);
|
||||
if (err)
|
||||
return err;
|
||||
if (c->big_lpt)
|
||||
@ -1015,7 +1016,7 @@ int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf,
|
||||
nnode->nbranch[i].offs = ubifs_unpack_bits(&addr, &pos,
|
||||
c->lpt_offs_bits);
|
||||
}
|
||||
err = check_lpt_crc(buf, c->nnode_sz);
|
||||
err = check_lpt_crc(c, buf, c->nnode_sz);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1031,7 +1032,7 @@ static int unpack_ltab(const struct ubifs_info *c, void *buf)
|
||||
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
|
||||
int i, pos = 0, err;
|
||||
|
||||
err = check_lpt_type(&addr, &pos, UBIFS_LPT_LTAB);
|
||||
err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_LTAB);
|
||||
if (err)
|
||||
return err;
|
||||
for (i = 0; i < c->lpt_lebs; i++) {
|
||||
@ -1047,7 +1048,7 @@ static int unpack_ltab(const struct ubifs_info *c, void *buf)
|
||||
c->ltab[i].tgc = 0;
|
||||
c->ltab[i].cmt = 0;
|
||||
}
|
||||
err = check_lpt_crc(buf, c->ltab_sz);
|
||||
err = check_lpt_crc(c, buf, c->ltab_sz);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1064,7 +1065,7 @@ static int unpack_lsave(const struct ubifs_info *c, void *buf)
|
||||
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
|
||||
int i, pos = 0, err;
|
||||
|
||||
err = check_lpt_type(&addr, &pos, UBIFS_LPT_LSAVE);
|
||||
err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_LSAVE);
|
||||
if (err)
|
||||
return err;
|
||||
for (i = 0; i < c->lsave_cnt; i++) {
|
||||
@ -1074,7 +1075,7 @@ static int unpack_lsave(const struct ubifs_info *c, void *buf)
|
||||
return -EINVAL;
|
||||
c->lsave[i] = lnum;
|
||||
}
|
||||
err = check_lpt_crc(buf, c->lsave_sz);
|
||||
err = check_lpt_crc(c, buf, c->lsave_sz);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
@ -1241,7 +1242,7 @@ int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
|
||||
return 0;
|
||||
|
||||
out:
|
||||
ubifs_err("error %d reading nnode at %d:%d", err, lnum, offs);
|
||||
ubifs_err(c, "error %d reading nnode at %d:%d", err, lnum, offs);
|
||||
dump_stack();
|
||||
kfree(nnode);
|
||||
return err;
|
||||
@ -1306,10 +1307,10 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
|
||||
return 0;
|
||||
|
||||
out:
|
||||
ubifs_err("error %d reading pnode at %d:%d", err, lnum, offs);
|
||||
ubifs_err(c, "error %d reading pnode at %d:%d", err, lnum, offs);
|
||||
ubifs_dump_pnode(c, pnode, parent, iip);
|
||||
dump_stack();
|
||||
ubifs_err("calc num: %d", calc_pnode_num_from_parent(c, parent, iip));
|
||||
ubifs_err(c, "calc num: %d", calc_pnode_num_from_parent(c, parent, iip));
|
||||
kfree(pnode);
|
||||
return err;
|
||||
}
|
||||
@ -1464,7 +1465,6 @@ struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum)
|
||||
return ERR_CAST(nnode);
|
||||
}
|
||||
iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
|
||||
shft -= UBIFS_LPT_FANOUT_SHIFT;
|
||||
pnode = ubifs_get_pnode(c, nnode, iip);
|
||||
if (IS_ERR(pnode))
|
||||
return ERR_CAST(pnode);
|
||||
@ -1604,7 +1604,6 @@ struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum)
|
||||
return ERR_CAST(nnode);
|
||||
}
|
||||
iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
|
||||
shft -= UBIFS_LPT_FANOUT_SHIFT;
|
||||
pnode = ubifs_get_pnode(c, nnode, iip);
|
||||
if (IS_ERR(pnode))
|
||||
return ERR_CAST(pnode);
|
||||
@ -1970,7 +1969,6 @@ again:
|
||||
}
|
||||
}
|
||||
iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
|
||||
shft -= UBIFS_LPT_FANOUT_SHIFT;
|
||||
pnode = scan_get_pnode(c, path + h, nnode, iip);
|
||||
if (IS_ERR(pnode)) {
|
||||
err = PTR_ERR(pnode);
|
||||
@ -2104,7 +2102,7 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
|
||||
int i;
|
||||
|
||||
if (pnode->num != col) {
|
||||
ubifs_err("pnode num %d expected %d parent num %d iip %d",
|
||||
ubifs_err(c, "pnode num %d expected %d parent num %d iip %d",
|
||||
pnode->num, col, pnode->parent->num, pnode->iip);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2119,13 +2117,13 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
|
||||
if (lnum >= c->leb_cnt)
|
||||
continue;
|
||||
if (lprops->lnum != lnum) {
|
||||
ubifs_err("bad LEB number %d expected %d",
|
||||
ubifs_err(c, "bad LEB number %d expected %d",
|
||||
lprops->lnum, lnum);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (lprops->flags & LPROPS_TAKEN) {
|
||||
if (cat != LPROPS_UNCAT) {
|
||||
ubifs_err("LEB %d taken but not uncat %d",
|
||||
ubifs_err(c, "LEB %d taken but not uncat %d",
|
||||
lprops->lnum, cat);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2138,7 +2136,7 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
|
||||
case LPROPS_FRDI_IDX:
|
||||
break;
|
||||
default:
|
||||
ubifs_err("LEB %d index but cat %d",
|
||||
ubifs_err(c, "LEB %d index but cat %d",
|
||||
lprops->lnum, cat);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2151,7 +2149,7 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
|
||||
case LPROPS_FREEABLE:
|
||||
break;
|
||||
default:
|
||||
ubifs_err("LEB %d not index but cat %d",
|
||||
ubifs_err(c, "LEB %d not index but cat %d",
|
||||
lprops->lnum, cat);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2192,26 +2190,28 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
|
||||
break;
|
||||
}
|
||||
if (!found) {
|
||||
ubifs_err("LEB %d cat %d not found in cat heap/list",
|
||||
ubifs_err(c, "LEB %d cat %d not found in cat heap/list",
|
||||
lprops->lnum, cat);
|
||||
return -EINVAL;
|
||||
}
|
||||
switch (cat) {
|
||||
case LPROPS_EMPTY:
|
||||
if (lprops->free != c->leb_size) {
|
||||
ubifs_err("LEB %d cat %d free %d dirty %d",
|
||||
ubifs_err(c, "LEB %d cat %d free %d dirty %d",
|
||||
lprops->lnum, cat, lprops->free,
|
||||
lprops->dirty);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case LPROPS_FREEABLE:
|
||||
case LPROPS_FRDI_IDX:
|
||||
if (lprops->free + lprops->dirty != c->leb_size) {
|
||||
ubifs_err("LEB %d cat %d free %d dirty %d",
|
||||
ubifs_err(c, "LEB %d cat %d free %d dirty %d",
|
||||
lprops->lnum, cat, lprops->free,
|
||||
lprops->dirty);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -2243,7 +2243,7 @@ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
|
||||
/* cnode is a nnode */
|
||||
num = calc_nnode_num(row, col);
|
||||
if (cnode->num != num) {
|
||||
ubifs_err("nnode num %d expected %d parent num %d iip %d",
|
||||
ubifs_err(c, "nnode num %d expected %d parent num %d iip %d",
|
||||
cnode->num, num,
|
||||
(nnode ? nnode->num : 0), cnode->iip);
|
||||
return -EINVAL;
|
||||
|
@ -301,7 +301,6 @@ static int layout_cnodes(struct ubifs_info *c)
|
||||
ubifs_assert(lnum >= c->lpt_first &&
|
||||
lnum <= c->lpt_last);
|
||||
}
|
||||
done_ltab = 1;
|
||||
c->ltab_lnum = lnum;
|
||||
c->ltab_offs = offs;
|
||||
offs += c->ltab_sz;
|
||||
@ -317,7 +316,7 @@ static int layout_cnodes(struct ubifs_info *c)
|
||||
return 0;
|
||||
|
||||
no_space:
|
||||
ubifs_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
|
||||
ubifs_err(c, "LPT out of space at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
|
||||
lnum, offs, len, done_ltab, done_lsave);
|
||||
ubifs_dump_lpt_info(c);
|
||||
ubifs_dump_lpt_lebs(c);
|
||||
@ -458,9 +457,9 @@ static int write_cnodes(struct ubifs_info *c)
|
||||
* important.
|
||||
*/
|
||||
clear_bit(DIRTY_CNODE, &cnode->flags);
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(COW_CNODE, &cnode->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
offs += len;
|
||||
dbg_chk_lpt_sz(c, 1, len);
|
||||
cnode = cnode->cnext;
|
||||
@ -512,7 +511,6 @@ static int write_cnodes(struct ubifs_info *c)
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
done_ltab = 1;
|
||||
ubifs_pack_ltab(c, buf + offs, c->ltab_cmt);
|
||||
offs += c->ltab_sz;
|
||||
dbg_chk_lpt_sz(c, 1, c->ltab_sz);
|
||||
@ -543,7 +541,7 @@ static int write_cnodes(struct ubifs_info *c)
|
||||
return 0;
|
||||
|
||||
no_space:
|
||||
ubifs_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
|
||||
ubifs_err(c, "LPT out of space mismatch at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
|
||||
lnum, offs, len, done_ltab, done_lsave);
|
||||
ubifs_dump_lpt_info(c);
|
||||
ubifs_dump_lpt_lebs(c);
|
||||
@ -1644,7 +1642,7 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum)
|
||||
|
||||
buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
|
||||
if (!buf) {
|
||||
ubifs_err("cannot allocate memory for ltab checking");
|
||||
ubifs_err(c, "cannot allocate memory for ltab checking");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1666,18 +1664,18 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum)
|
||||
continue;
|
||||
}
|
||||
if (!dbg_is_all_ff(p, len)) {
|
||||
ubifs_err("invalid empty space in LEB %d at %d",
|
||||
ubifs_err(c, "invalid empty space in LEB %d at %d",
|
||||
lnum, c->leb_size - len);
|
||||
err = -EINVAL;
|
||||
}
|
||||
i = lnum - c->lpt_first;
|
||||
if (len != c->ltab[i].free) {
|
||||
ubifs_err("invalid free space in LEB %d (free %d, expected %d)",
|
||||
ubifs_err(c, "invalid free space in LEB %d (free %d, expected %d)",
|
||||
lnum, len, c->ltab[i].free);
|
||||
err = -EINVAL;
|
||||
}
|
||||
if (dirty != c->ltab[i].dirty) {
|
||||
ubifs_err("invalid dirty space in LEB %d (dirty %d, expected %d)",
|
||||
ubifs_err(c, "invalid dirty space in LEB %d (dirty %d, expected %d)",
|
||||
lnum, dirty, c->ltab[i].dirty);
|
||||
err = -EINVAL;
|
||||
}
|
||||
@ -1731,7 +1729,7 @@ int dbg_check_ltab(struct ubifs_info *c)
|
||||
for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) {
|
||||
err = dbg_check_ltab_lnum(c, lnum);
|
||||
if (err) {
|
||||
ubifs_err("failed at LEB %d", lnum);
|
||||
ubifs_err(c, "failed at LEB %d", lnum);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
@ -1763,7 +1761,7 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c)
|
||||
free += c->leb_size;
|
||||
}
|
||||
if (free < c->lpt_sz) {
|
||||
ubifs_err("LPT space error: free %lld lpt_sz %lld",
|
||||
ubifs_err(c, "LPT space error: free %lld lpt_sz %lld",
|
||||
free, c->lpt_sz);
|
||||
ubifs_dump_lpt_info(c);
|
||||
ubifs_dump_lpt_lebs(c);
|
||||
@ -1803,12 +1801,12 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
|
||||
d->chk_lpt_lebs = 0;
|
||||
d->chk_lpt_wastage = 0;
|
||||
if (c->dirty_pn_cnt > c->pnode_cnt) {
|
||||
ubifs_err("dirty pnodes %d exceed max %d",
|
||||
ubifs_err(c, "dirty pnodes %d exceed max %d",
|
||||
c->dirty_pn_cnt, c->pnode_cnt);
|
||||
err = -EINVAL;
|
||||
}
|
||||
if (c->dirty_nn_cnt > c->nnode_cnt) {
|
||||
ubifs_err("dirty nnodes %d exceed max %d",
|
||||
ubifs_err(c, "dirty nnodes %d exceed max %d",
|
||||
c->dirty_nn_cnt, c->nnode_cnt);
|
||||
err = -EINVAL;
|
||||
}
|
||||
@ -1826,22 +1824,22 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
|
||||
chk_lpt_sz *= d->chk_lpt_lebs;
|
||||
chk_lpt_sz += len - c->nhead_offs;
|
||||
if (d->chk_lpt_sz != chk_lpt_sz) {
|
||||
ubifs_err("LPT wrote %lld but space used was %lld",
|
||||
ubifs_err(c, "LPT wrote %lld but space used was %lld",
|
||||
d->chk_lpt_sz, chk_lpt_sz);
|
||||
err = -EINVAL;
|
||||
}
|
||||
if (d->chk_lpt_sz > c->lpt_sz) {
|
||||
ubifs_err("LPT wrote %lld but lpt_sz is %lld",
|
||||
ubifs_err(c, "LPT wrote %lld but lpt_sz is %lld",
|
||||
d->chk_lpt_sz, c->lpt_sz);
|
||||
err = -EINVAL;
|
||||
}
|
||||
if (d->chk_lpt_sz2 && d->chk_lpt_sz != d->chk_lpt_sz2) {
|
||||
ubifs_err("LPT layout size %lld but wrote %lld",
|
||||
ubifs_err(c, "LPT layout size %lld but wrote %lld",
|
||||
d->chk_lpt_sz, d->chk_lpt_sz2);
|
||||
err = -EINVAL;
|
||||
}
|
||||
if (d->chk_lpt_sz2 && d->new_nhead_offs != len) {
|
||||
ubifs_err("LPT new nhead offs: expected %d was %d",
|
||||
ubifs_err(c, "LPT new nhead offs: expected %d was %d",
|
||||
d->new_nhead_offs, len);
|
||||
err = -EINVAL;
|
||||
}
|
||||
@ -1851,7 +1849,7 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
|
||||
if (c->big_lpt)
|
||||
lpt_sz += c->lsave_sz;
|
||||
if (d->chk_lpt_sz - d->chk_lpt_wastage > lpt_sz) {
|
||||
ubifs_err("LPT chk_lpt_sz %lld + waste %lld exceeds %lld",
|
||||
ubifs_err(c, "LPT chk_lpt_sz %lld + waste %lld exceeds %lld",
|
||||
d->chk_lpt_sz, d->chk_lpt_wastage, lpt_sz);
|
||||
err = -EINVAL;
|
||||
}
|
||||
@ -1893,7 +1891,7 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
|
||||
pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum);
|
||||
buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
|
||||
if (!buf) {
|
||||
ubifs_err("cannot allocate memory to dump LPT");
|
||||
ubifs_err(c, "cannot allocate memory to dump LPT");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1945,6 +1943,11 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
|
||||
pr_err("LEB %d:%d, nnode, ",
|
||||
lnum, offs);
|
||||
err = ubifs_unpack_nnode(c, p, &nnode);
|
||||
if (err) {
|
||||
pr_err("failed to unpack_node, error %d\n",
|
||||
err);
|
||||
break;
|
||||
}
|
||||
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
|
||||
pr_cont("%d:%d", nnode.nbranch[i].lnum,
|
||||
nnode.nbranch[i].offs);
|
||||
@ -1963,7 +1966,7 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
|
||||
pr_err("LEB %d:%d, lsave len\n", lnum, offs);
|
||||
break;
|
||||
default:
|
||||
ubifs_err("LPT node type %d not recognized", node_type);
|
||||
ubifs_err(c, "LPT node type %d not recognized", node_type);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ out:
|
||||
return -EUCLEAN;
|
||||
|
||||
out_dump:
|
||||
ubifs_err("unexpected node type %d master LEB %d:%d",
|
||||
ubifs_err(c, "unexpected node type %d master LEB %d:%d",
|
||||
snod->type, lnum, snod->offs);
|
||||
ubifs_scan_destroy(sleb);
|
||||
return -EINVAL;
|
||||
@ -234,7 +234,7 @@ static int validate_master(const struct ubifs_info *c)
|
||||
return 0;
|
||||
|
||||
out:
|
||||
ubifs_err("bad master node at offset %d error %d", c->mst_offs, err);
|
||||
ubifs_err(c, "bad master node at offset %d error %d", c->mst_offs, err);
|
||||
ubifs_dump_node(c, c->mst_node);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -310,7 +310,7 @@ int ubifs_read_master(struct ubifs_info *c)
|
||||
|
||||
if (c->leb_cnt < old_leb_cnt ||
|
||||
c->leb_cnt < UBIFS_MIN_LEB_CNT) {
|
||||
ubifs_err("bad leb_cnt on master node");
|
||||
ubifs_err(c, "bad leb_cnt on master node");
|
||||
ubifs_dump_node(c, c->mst_node);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -349,10 +349,9 @@ int ubifs_read_master(struct ubifs_info *c)
|
||||
* ubifs_write_master - write master node.
|
||||
* @c: UBIFS file-system description object
|
||||
*
|
||||
* This function writes the master node. The caller has to take the
|
||||
* @c->mst_mutex lock before calling this function. Returns zero in case of
|
||||
* success and a negative error code in case of failure. The master node is
|
||||
* written twice to enable recovery.
|
||||
* This function writes the master node. Returns zero in case of success and a
|
||||
* negative error code in case of failure. The master node is written twice to
|
||||
* enable recovery.
|
||||
*/
|
||||
int ubifs_write_master(struct ubifs_info *c)
|
||||
{
|
||||
|
@ -78,7 +78,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum)
|
||||
else if (inum > o->inum)
|
||||
p = &(*p)->rb_right;
|
||||
else {
|
||||
ubifs_err("orphaned twice");
|
||||
ubifs_err(c, "orphaned twice");
|
||||
spin_unlock(&c->orphan_lock);
|
||||
kfree(orphan);
|
||||
return 0;
|
||||
@ -145,7 +145,7 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum)
|
||||
}
|
||||
}
|
||||
spin_unlock(&c->orphan_lock);
|
||||
ubifs_err("missing orphan ino %lu", (unsigned long)inum);
|
||||
ubifs_err(c, "missing orphan ino %lu", (unsigned long)inum);
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
@ -277,7 +277,7 @@ static int write_orph_node(struct ubifs_info *c, int atomic)
|
||||
* We limit the number of orphans so that this should
|
||||
* never happen.
|
||||
*/
|
||||
ubifs_err("out of space in orphan area");
|
||||
ubifs_err(c, "out of space in orphan area");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -336,7 +336,6 @@ static int write_orph_nodes(struct ubifs_info *c, int atomic)
|
||||
int lnum;
|
||||
|
||||
/* Unmap any unused LEBs after consolidation */
|
||||
lnum = c->ohead_lnum + 1;
|
||||
for (lnum = c->ohead_lnum + 1; lnum <= c->orph_last; lnum++) {
|
||||
err = ubifs_leb_unmap(c, lnum);
|
||||
if (err)
|
||||
@ -388,7 +387,7 @@ static int consolidate(struct ubifs_info *c)
|
||||
* We limit the number of orphans so that this should
|
||||
* never happen.
|
||||
*/
|
||||
ubifs_err("out of space in orphan area");
|
||||
ubifs_err(c, "out of space in orphan area");
|
||||
err = -EINVAL;
|
||||
}
|
||||
spin_unlock(&c->orphan_lock);
|
||||
@ -560,7 +559,7 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
|
||||
|
||||
list_for_each_entry(snod, &sleb->nodes, list) {
|
||||
if (snod->type != UBIFS_ORPH_NODE) {
|
||||
ubifs_err("invalid node type %d in orphan area at %d:%d",
|
||||
ubifs_err(c, "invalid node type %d in orphan area at %d:%d",
|
||||
snod->type, sleb->lnum, snod->offs);
|
||||
ubifs_dump_node(c, snod->node);
|
||||
return -EINVAL;
|
||||
@ -587,7 +586,7 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
|
||||
* number. That makes this orphan node, out of date.
|
||||
*/
|
||||
if (!first) {
|
||||
ubifs_err("out of order commit number %llu in orphan node at %d:%d",
|
||||
ubifs_err(c, "out of order commit number %llu in orphan node at %d:%d",
|
||||
cmt_no, sleb->lnum, snod->offs);
|
||||
ubifs_dump_node(c, snod->node);
|
||||
return -EINVAL;
|
||||
@ -822,20 +821,20 @@ static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
||||
if (inum != ci->last_ino) {
|
||||
/* Lowest node type is the inode node, so it comes first */
|
||||
if (key_type(c, &zbr->key) != UBIFS_INO_KEY)
|
||||
ubifs_err("found orphan node ino %lu, type %d",
|
||||
ubifs_err(c, "found orphan node ino %lu, type %d",
|
||||
(unsigned long)inum, key_type(c, &zbr->key));
|
||||
ci->last_ino = inum;
|
||||
ci->tot_inos += 1;
|
||||
err = ubifs_tnc_read_node(c, zbr, ci->node);
|
||||
if (err) {
|
||||
ubifs_err("node read failed, error %d", err);
|
||||
ubifs_err(c, "node read failed, error %d", err);
|
||||
return err;
|
||||
}
|
||||
if (ci->node->nlink == 0)
|
||||
/* Must be recorded as an orphan */
|
||||
if (!dbg_find_check_orphan(&ci->root, inum) &&
|
||||
!dbg_find_orphan(c, inum)) {
|
||||
ubifs_err("missing orphan, ino %lu",
|
||||
ubifs_err(c, "missing orphan, ino %lu",
|
||||
(unsigned long)inum);
|
||||
ci->missing += 1;
|
||||
}
|
||||
@ -878,7 +877,7 @@ static int dbg_scan_orphans(struct ubifs_info *c, struct check_info *ci)
|
||||
|
||||
buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
|
||||
if (!buf) {
|
||||
ubifs_err("cannot allocate memory to check orphans");
|
||||
ubifs_err(c, "cannot allocate memory to check orphans");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -916,7 +915,7 @@ static int dbg_check_orphans(struct ubifs_info *c)
|
||||
ci.root = RB_ROOT;
|
||||
ci.node = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
|
||||
if (!ci.node) {
|
||||
ubifs_err("out of memory");
|
||||
ubifs_err(c, "out of memory");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -926,12 +925,12 @@ static int dbg_check_orphans(struct ubifs_info *c)
|
||||
|
||||
err = dbg_walk_index(c, &dbg_orphan_check, NULL, &ci);
|
||||
if (err) {
|
||||
ubifs_err("cannot scan TNC, error %d", err);
|
||||
ubifs_err(c, "cannot scan TNC, error %d", err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ci.missing) {
|
||||
ubifs_err("%lu missing orphan(s)", ci.missing);
|
||||
ubifs_err(c, "%lu missing orphan(s)", ci.missing);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -298,7 +298,7 @@ int ubifs_recover_master_node(struct ubifs_info *c)
|
||||
mst = mst2;
|
||||
}
|
||||
|
||||
ubifs_msg("recovered master node from LEB %d",
|
||||
ubifs_msg(c, "recovered master node from LEB %d",
|
||||
(mst == mst1 ? UBIFS_MST_LNUM : UBIFS_MST_LNUM + 1));
|
||||
|
||||
memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ);
|
||||
@ -355,13 +355,13 @@ int ubifs_recover_master_node(struct ubifs_info *c)
|
||||
out_err:
|
||||
err = -EINVAL;
|
||||
out_free:
|
||||
ubifs_err("failed to recover master node");
|
||||
ubifs_err(c, "failed to recover master node");
|
||||
if (mst1) {
|
||||
ubifs_err("dumping first master node");
|
||||
ubifs_err(c, "dumping first master node");
|
||||
ubifs_dump_node(c, mst1);
|
||||
}
|
||||
if (mst2) {
|
||||
ubifs_err("dumping second master node");
|
||||
ubifs_err(c, "dumping second master node");
|
||||
ubifs_dump_node(c, mst2);
|
||||
}
|
||||
vfree(buf2);
|
||||
@ -593,7 +593,6 @@ static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs)
|
||||
* drop_last_node - drop the last node.
|
||||
* @sleb: scanned LEB information
|
||||
* @offs: offset of dropped nodes is returned here
|
||||
* @grouped: non-zero if whole group of nodes have to be dropped
|
||||
*
|
||||
* This is a helper function for 'ubifs_recover_leb()' which drops the last
|
||||
* node of the scanned LEB.
|
||||
@ -626,8 +625,8 @@ static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs)
|
||||
*
|
||||
* This function does a scan of a LEB, but caters for errors that might have
|
||||
* been caused by the unclean unmount from which we are attempting to recover.
|
||||
* Returns %0 in case of success, %-EUCLEAN if an unrecoverable corruption is
|
||||
* found, and a negative error code in case of failure.
|
||||
* Returns the scanned information on success and a negative error code on
|
||||
* failure.
|
||||
*/
|
||||
struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
|
||||
int offs, void *sbuf, int jhead)
|
||||
@ -680,7 +679,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
|
||||
ret, lnum, offs);
|
||||
break;
|
||||
} else {
|
||||
ubifs_err("unexpected return value %d", ret);
|
||||
ubifs_err(c, "unexpected return value %d", ret);
|
||||
err = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
@ -700,7 +699,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
|
||||
* See header comment for this file for more
|
||||
* explanations about the reasons we have this check.
|
||||
*/
|
||||
ubifs_err("corrupt empty space LEB %d:%d, corruption starts at %d",
|
||||
ubifs_err(c, "corrupt empty space LEB %d:%d, corruption starts at %d",
|
||||
lnum, offs, corruption);
|
||||
/* Make sure we dump interesting non-0xFF data */
|
||||
offs += corruption;
|
||||
@ -786,13 +785,13 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
|
||||
|
||||
corrupted_rescan:
|
||||
/* Re-scan the corrupted data with verbose messages */
|
||||
ubifs_err("corruption %d", ret);
|
||||
ubifs_err(c, "corruption %d", ret);
|
||||
ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
|
||||
corrupted:
|
||||
ubifs_scanned_corruption(c, lnum, offs, buf);
|
||||
err = -EUCLEAN;
|
||||
error:
|
||||
ubifs_err("LEB %d scanning failed", lnum);
|
||||
ubifs_err(c, "LEB %d scanning failed", lnum);
|
||||
ubifs_scan_destroy(sleb);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
@ -824,15 +823,15 @@ static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs,
|
||||
goto out_free;
|
||||
ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0);
|
||||
if (ret != SCANNED_A_NODE) {
|
||||
ubifs_err("Not a valid node");
|
||||
ubifs_err(c, "Not a valid node");
|
||||
goto out_err;
|
||||
}
|
||||
if (cs_node->ch.node_type != UBIFS_CS_NODE) {
|
||||
ubifs_err("Node a CS node, type is %d", cs_node->ch.node_type);
|
||||
ubifs_err(c, "Node a CS node, type is %d", cs_node->ch.node_type);
|
||||
goto out_err;
|
||||
}
|
||||
if (le64_to_cpu(cs_node->cmt_no) != c->cmt_no) {
|
||||
ubifs_err("CS node cmt_no %llu != current cmt_no %llu",
|
||||
ubifs_err(c, "CS node cmt_no %llu != current cmt_no %llu",
|
||||
(unsigned long long)le64_to_cpu(cs_node->cmt_no),
|
||||
c->cmt_no);
|
||||
goto out_err;
|
||||
@ -845,7 +844,7 @@ static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs,
|
||||
out_err:
|
||||
err = -EINVAL;
|
||||
out_free:
|
||||
ubifs_err("failed to get CS sqnum");
|
||||
ubifs_err(c, "failed to get CS sqnum");
|
||||
kfree(cs_node);
|
||||
return err;
|
||||
}
|
||||
@ -897,7 +896,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
|
||||
}
|
||||
}
|
||||
if (snod->sqnum > cs_sqnum) {
|
||||
ubifs_err("unrecoverable log corruption in LEB %d",
|
||||
ubifs_err(c, "unrecoverable log corruption in LEB %d",
|
||||
lnum);
|
||||
ubifs_scan_destroy(sleb);
|
||||
return ERR_PTR(-EUCLEAN);
|
||||
@ -973,11 +972,8 @@ int ubifs_recover_inl_heads(struct ubifs_info *c, void *sbuf)
|
||||
return err;
|
||||
|
||||
dbg_rcvry("checking LPT head at %d:%d", c->nhead_lnum, c->nhead_offs);
|
||||
err = recover_head(c, c->nhead_lnum, c->nhead_offs, sbuf);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
return recover_head(c, c->nhead_lnum, c->nhead_offs, sbuf);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1002,10 +998,7 @@ static int clean_an_unclean_leb(struct ubifs_info *c,
|
||||
|
||||
if (len == 0) {
|
||||
/* Nothing to read, just unmap it */
|
||||
err = ubifs_leb_unmap(c, lnum);
|
||||
if (err)
|
||||
return err;
|
||||
return 0;
|
||||
return ubifs_leb_unmap(c, lnum);
|
||||
}
|
||||
|
||||
err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
|
||||
@ -1041,7 +1034,7 @@ static int clean_an_unclean_leb(struct ubifs_info *c,
|
||||
}
|
||||
|
||||
if (ret == SCANNED_EMPTY_SPACE) {
|
||||
ubifs_err("unexpected empty space at %d:%d",
|
||||
ubifs_err(c, "unexpected empty space at %d:%d",
|
||||
lnum, offs);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
@ -1136,7 +1129,7 @@ static int grab_empty_leb(struct ubifs_info *c)
|
||||
*/
|
||||
lnum = ubifs_find_free_leb_for_idx(c);
|
||||
if (lnum < 0) {
|
||||
ubifs_err("could not find an empty LEB");
|
||||
ubifs_err(c, "could not find an empty LEB");
|
||||
ubifs_dump_lprops(c);
|
||||
ubifs_dump_budg(c, &c->bi);
|
||||
return lnum;
|
||||
@ -1216,7 +1209,7 @@ int ubifs_rcvry_gc_commit(struct ubifs_info *c)
|
||||
}
|
||||
mutex_unlock(&wbuf->io_mutex);
|
||||
if (err < 0) {
|
||||
ubifs_err("GC failed, error %d", err);
|
||||
ubifs_err(c, "GC failed, error %d", err);
|
||||
if (err == -EAGAIN)
|
||||
err = -EINVAL;
|
||||
return err;
|
||||
@ -1470,7 +1463,7 @@ static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e)
|
||||
return 0;
|
||||
|
||||
out:
|
||||
ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d",
|
||||
ubifs_warn(c, "inode %lu failed to fix size %lld -> %lld error %d",
|
||||
(unsigned long)e->inum, e->i_size, e->d_size, err);
|
||||
return err;
|
||||
}
|
||||
|
@ -451,13 +451,13 @@ int ubifs_validate_entry(struct ubifs_info *c,
|
||||
nlen > UBIFS_MAX_NLEN || dent->name[nlen] != 0 ||
|
||||
strnlen(dent->name, nlen) != nlen ||
|
||||
le64_to_cpu(dent->inum) > MAX_INUM) {
|
||||
ubifs_err("bad %s node", key_type == UBIFS_DENT_KEY ?
|
||||
ubifs_err(c, "bad %s node", key_type == UBIFS_DENT_KEY ?
|
||||
"directory entry" : "extended attribute entry");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (key_type != UBIFS_DENT_KEY && key_type != UBIFS_XENT_KEY) {
|
||||
ubifs_err("bad key type %d", key_type);
|
||||
ubifs_err(c, "bad key type %d", key_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -582,7 +582,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
|
||||
cond_resched();
|
||||
|
||||
if (snod->sqnum >= SQNUM_WATERMARK) {
|
||||
ubifs_err("file system's life ended");
|
||||
ubifs_err(c, "file system's life ended");
|
||||
goto out_dump;
|
||||
}
|
||||
|
||||
@ -640,7 +640,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
|
||||
if (old_size < 0 || old_size > c->max_inode_sz ||
|
||||
new_size < 0 || new_size > c->max_inode_sz ||
|
||||
old_size <= new_size) {
|
||||
ubifs_err("bad truncation node");
|
||||
ubifs_err(c, "bad truncation node");
|
||||
goto out_dump;
|
||||
}
|
||||
|
||||
@ -655,7 +655,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ubifs_err("unexpected node type %d in bud LEB %d:%d",
|
||||
ubifs_err(c, "unexpected node type %d in bud LEB %d:%d",
|
||||
snod->type, lnum, snod->offs);
|
||||
err = -EINVAL;
|
||||
goto out_dump;
|
||||
@ -678,7 +678,7 @@ out:
|
||||
return err;
|
||||
|
||||
out_dump:
|
||||
ubifs_err("bad node is at LEB %d:%d", lnum, snod->offs);
|
||||
ubifs_err(c, "bad node is at LEB %d:%d", lnum, snod->offs);
|
||||
ubifs_dump_node(c, snod->node);
|
||||
ubifs_scan_destroy(sleb);
|
||||
return -EINVAL;
|
||||
@ -798,7 +798,7 @@ static int validate_ref(struct ubifs_info *c, const struct ubifs_ref_node *ref)
|
||||
if (bud) {
|
||||
if (bud->jhead == jhead && bud->start <= offs)
|
||||
return 1;
|
||||
ubifs_err("bud at LEB %d:%d was already referred", lnum, offs);
|
||||
ubifs_err(c, "bud at LEB %d:%d was already referred", lnum, offs);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -854,12 +854,12 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
|
||||
* numbers.
|
||||
*/
|
||||
if (snod->type != UBIFS_CS_NODE) {
|
||||
ubifs_err("first log node at LEB %d:%d is not CS node",
|
||||
ubifs_err(c, "first log node at LEB %d:%d is not CS node",
|
||||
lnum, offs);
|
||||
goto out_dump;
|
||||
}
|
||||
if (le64_to_cpu(node->cmt_no) != c->cmt_no) {
|
||||
ubifs_err("first CS node at LEB %d:%d has wrong commit number %llu expected %llu",
|
||||
ubifs_err(c, "first CS node at LEB %d:%d has wrong commit number %llu expected %llu",
|
||||
lnum, offs,
|
||||
(unsigned long long)le64_to_cpu(node->cmt_no),
|
||||
c->cmt_no);
|
||||
@ -884,7 +884,7 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
|
||||
|
||||
/* Make sure the first node sits at offset zero of the LEB */
|
||||
if (snod->offs != 0) {
|
||||
ubifs_err("first node is not at zero offset");
|
||||
ubifs_err(c, "first node is not at zero offset");
|
||||
goto out_dump;
|
||||
}
|
||||
|
||||
@ -892,12 +892,12 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
|
||||
cond_resched();
|
||||
|
||||
if (snod->sqnum >= SQNUM_WATERMARK) {
|
||||
ubifs_err("file system's life ended");
|
||||
ubifs_err(c, "file system's life ended");
|
||||
goto out_dump;
|
||||
}
|
||||
|
||||
if (snod->sqnum < c->cs_sqnum) {
|
||||
ubifs_err("bad sqnum %llu, commit sqnum %llu",
|
||||
ubifs_err(c, "bad sqnum %llu, commit sqnum %llu",
|
||||
snod->sqnum, c->cs_sqnum);
|
||||
goto out_dump;
|
||||
}
|
||||
@ -927,12 +927,12 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
|
||||
case UBIFS_CS_NODE:
|
||||
/* Make sure it sits at the beginning of LEB */
|
||||
if (snod->offs != 0) {
|
||||
ubifs_err("unexpected node in log");
|
||||
ubifs_err(c, "unexpected node in log");
|
||||
goto out_dump;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ubifs_err("unexpected node in log");
|
||||
ubifs_err(c, "unexpected node in log");
|
||||
goto out_dump;
|
||||
}
|
||||
}
|
||||
@ -948,7 +948,7 @@ out:
|
||||
return err;
|
||||
|
||||
out_dump:
|
||||
ubifs_err("log error detected while replaying the log at LEB %d:%d",
|
||||
ubifs_err(c, "log error detected while replaying the log at LEB %d:%d",
|
||||
lnum, offs + snod->offs);
|
||||
ubifs_dump_node(c, snod->node);
|
||||
ubifs_scan_destroy(sleb);
|
||||
@ -1010,7 +1010,7 @@ int ubifs_replay_journal(struct ubifs_info *c)
|
||||
return free; /* Error code */
|
||||
|
||||
if (c->ihead_offs != c->leb_size - free) {
|
||||
ubifs_err("bad index head LEB %d:%d", c->ihead_lnum,
|
||||
ubifs_err(c, "bad index head LEB %d:%d", c->ihead_lnum,
|
||||
c->ihead_offs);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1021,9 +1021,22 @@ int ubifs_replay_journal(struct ubifs_info *c)
|
||||
|
||||
do {
|
||||
err = replay_log_leb(c, lnum, 0, c->sbuf);
|
||||
if (err == 1)
|
||||
if (err == 1) {
|
||||
if (lnum != c->lhead_lnum)
|
||||
/* We hit the end of the log */
|
||||
break;
|
||||
|
||||
/*
|
||||
* The head of the log must always start with the
|
||||
* "commit start" node on a properly formatted UBIFS.
|
||||
* But we found no nodes at all, which means that
|
||||
* someting went wrong and we cannot proceed mounting
|
||||
* the file-system.
|
||||
*/
|
||||
ubifs_err(c, "no UBIFS nodes found at the log head LEB %d:%d, possibly corrupted",
|
||||
lnum, 0);
|
||||
err = -EINVAL;
|
||||
}
|
||||
if (err)
|
||||
goto out;
|
||||
lnum = ubifs_next_log_lnum(c, lnum);
|
||||
|
@ -330,8 +330,10 @@ static int create_default_filesystem(struct ubifs_info *c)
|
||||
cs->ch.node_type = UBIFS_CS_NODE;
|
||||
err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM, 0);
|
||||
kfree(cs);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ubifs_msg("default file-system created");
|
||||
ubifs_msg(c, "default file-system created");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@ -362,13 +364,13 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
|
||||
}
|
||||
|
||||
if (le32_to_cpu(sup->min_io_size) != c->min_io_size) {
|
||||
ubifs_err("min. I/O unit mismatch: %d in superblock, %d real",
|
||||
ubifs_err(c, "min. I/O unit mismatch: %d in superblock, %d real",
|
||||
le32_to_cpu(sup->min_io_size), c->min_io_size);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (le32_to_cpu(sup->leb_size) != c->leb_size) {
|
||||
ubifs_err("LEB size mismatch: %d in superblock, %d real",
|
||||
ubifs_err(c, "LEB size mismatch: %d in superblock, %d real",
|
||||
le32_to_cpu(sup->leb_size), c->leb_size);
|
||||
goto failed;
|
||||
}
|
||||
@ -390,33 +392,33 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
|
||||
min_leb_cnt += c->lpt_lebs + c->orph_lebs + c->jhead_cnt + 6;
|
||||
|
||||
if (c->leb_cnt < min_leb_cnt || c->leb_cnt > c->vi.size) {
|
||||
ubifs_err("bad LEB count: %d in superblock, %d on UBI volume, %d minimum required",
|
||||
ubifs_err(c, "bad LEB count: %d in superblock, %d on UBI volume, %d minimum required",
|
||||
c->leb_cnt, c->vi.size, min_leb_cnt);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (c->max_leb_cnt < c->leb_cnt) {
|
||||
ubifs_err("max. LEB count %d less than LEB count %d",
|
||||
ubifs_err(c, "max. LEB count %d less than LEB count %d",
|
||||
c->max_leb_cnt, c->leb_cnt);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (c->main_lebs < UBIFS_MIN_MAIN_LEBS) {
|
||||
ubifs_err("too few main LEBs count %d, must be at least %d",
|
||||
ubifs_err(c, "too few main LEBs count %d, must be at least %d",
|
||||
c->main_lebs, UBIFS_MIN_MAIN_LEBS);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
max_bytes = (long long)c->leb_size * UBIFS_MIN_BUD_LEBS;
|
||||
if (c->max_bud_bytes < max_bytes) {
|
||||
ubifs_err("too small journal (%lld bytes), must be at least %lld bytes",
|
||||
ubifs_err(c, "too small journal (%lld bytes), must be at least %lld bytes",
|
||||
c->max_bud_bytes, max_bytes);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
max_bytes = (long long)c->leb_size * c->main_lebs;
|
||||
if (c->max_bud_bytes > max_bytes) {
|
||||
ubifs_err("too large journal size (%lld bytes), only %lld bytes available in the main area",
|
||||
ubifs_err(c, "too large journal size (%lld bytes), only %lld bytes available in the main area",
|
||||
c->max_bud_bytes, max_bytes);
|
||||
goto failed;
|
||||
}
|
||||
@ -446,7 +448,7 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (c->default_compr < 0 || c->default_compr >= UBIFS_COMPR_TYPES_CNT) {
|
||||
if (c->default_compr >= UBIFS_COMPR_TYPES_CNT) {
|
||||
err = 13;
|
||||
goto failed;
|
||||
}
|
||||
@ -465,7 +467,7 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
ubifs_err("bad superblock, error %d", err);
|
||||
ubifs_err(c, "bad superblock, error %d", err);
|
||||
ubifs_dump_node(c, sup);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -551,12 +553,12 @@ int ubifs_read_superblock(struct ubifs_info *c)
|
||||
ubifs_assert(!c->ro_media || c->ro_mount);
|
||||
if (!c->ro_mount ||
|
||||
c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) {
|
||||
ubifs_err("on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d",
|
||||
ubifs_err(c, "on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d",
|
||||
c->fmt_version, c->ro_compat_version,
|
||||
UBIFS_FORMAT_VERSION,
|
||||
UBIFS_RO_COMPAT_VERSION);
|
||||
if (c->ro_compat_version <= UBIFS_RO_COMPAT_VERSION) {
|
||||
ubifs_msg("only R/O mounting is possible");
|
||||
ubifs_msg(c, "only R/O mounting is possible");
|
||||
err = -EROFS;
|
||||
} else
|
||||
err = -EINVAL;
|
||||
@ -572,7 +574,7 @@ int ubifs_read_superblock(struct ubifs_info *c)
|
||||
}
|
||||
|
||||
if (c->fmt_version < 3) {
|
||||
ubifs_err("on-flash format version %d is not supported",
|
||||
ubifs_err(c, "on-flash format version %d is not supported",
|
||||
c->fmt_version);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
@ -597,7 +599,7 @@ int ubifs_read_superblock(struct ubifs_info *c)
|
||||
c->key_len = UBIFS_SK_LEN;
|
||||
break;
|
||||
default:
|
||||
ubifs_err("unsupported key format");
|
||||
ubifs_err(c, "unsupported key format");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -794,7 +796,7 @@ int ubifs_fixup_free_space(struct ubifs_info *c)
|
||||
ubifs_assert(c->space_fixup);
|
||||
ubifs_assert(!c->ro_mount);
|
||||
|
||||
ubifs_msg("start fixing up free space");
|
||||
ubifs_msg(c, "start fixing up free space");
|
||||
|
||||
err = fixup_free_space(c);
|
||||
if (err)
|
||||
@ -813,6 +815,6 @@ int ubifs_fixup_free_space(struct ubifs_info *c)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ubifs_msg("free space fixup complete");
|
||||
ubifs_msg(c, "free space fixup complete");
|
||||
return err;
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
|
||||
if (pad_len < 0 ||
|
||||
offs + node_len + pad_len > c->leb_size) {
|
||||
if (!quiet) {
|
||||
ubifs_err("bad pad node at LEB %d:%d",
|
||||
ubifs_err(c, "bad pad node at LEB %d:%d",
|
||||
lnum, offs);
|
||||
ubifs_dump_node(c, pad);
|
||||
}
|
||||
@ -102,7 +102,7 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
|
||||
/* Make the node pads to 8-byte boundary */
|
||||
if ((node_len + pad_len) & 7) {
|
||||
if (!quiet)
|
||||
ubifs_err("bad padding length %d - %d",
|
||||
ubifs_err(c, "bad padding length %d - %d",
|
||||
offs, offs + node_len + pad_len);
|
||||
return SCANNED_A_BAD_PAD_NODE;
|
||||
}
|
||||
@ -123,7 +123,8 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
|
||||
* @offs: offset to start at (usually zero)
|
||||
* @sbuf: scan buffer (must be c->leb_size)
|
||||
*
|
||||
* This function returns %0 on success and a negative error code on failure.
|
||||
* This function returns the scanned information on success and a negative error
|
||||
* code on failure.
|
||||
*/
|
||||
struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum,
|
||||
int offs, void *sbuf)
|
||||
@ -143,15 +144,16 @@ struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum,
|
||||
|
||||
err = ubifs_leb_read(c, lnum, sbuf + offs, offs, c->leb_size - offs, 0);
|
||||
if (err && err != -EBADMSG) {
|
||||
ubifs_err("cannot read %d bytes from LEB %d:%d, error %d",
|
||||
ubifs_err(c, "cannot read %d bytes from LEB %d:%d, error %d",
|
||||
c->leb_size - offs, lnum, offs, err);
|
||||
kfree(sleb);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
if (err == -EBADMSG)
|
||||
sleb->ecc = 1;
|
||||
|
||||
/*
|
||||
* Note, we ignore integrity errors (EBASMSG) because all the nodes are
|
||||
* protected by CRC checksums.
|
||||
*/
|
||||
return sleb;
|
||||
}
|
||||
|
||||
@ -161,8 +163,6 @@ struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum,
|
||||
* @sleb: scanning information
|
||||
* @lnum: logical eraseblock number
|
||||
* @offs: offset to start at (usually zero)
|
||||
*
|
||||
* This function returns %0 on success and a negative error code on failure.
|
||||
*/
|
||||
void ubifs_end_scan(const struct ubifs_info *c, struct ubifs_scan_leb *sleb,
|
||||
int lnum, int offs)
|
||||
@ -232,11 +232,11 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs,
|
||||
{
|
||||
int len;
|
||||
|
||||
ubifs_err("corruption at LEB %d:%d", lnum, offs);
|
||||
ubifs_err(c, "corruption at LEB %d:%d", lnum, offs);
|
||||
len = c->leb_size - offs;
|
||||
if (len > 8192)
|
||||
len = 8192;
|
||||
ubifs_err("first %d bytes from LEB %d:%d", len, lnum, offs);
|
||||
ubifs_err(c, "first %d bytes from LEB %d:%d", len, lnum, offs);
|
||||
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 4, buf, len, 1);
|
||||
}
|
||||
|
||||
@ -249,7 +249,7 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs,
|
||||
* @quiet: print no messages
|
||||
*
|
||||
* This function scans LEB number @lnum and returns complete information about
|
||||
* its contents. Returns the scaned information in case of success and,
|
||||
* its contents. Returns the scanned information in case of success and,
|
||||
* %-EUCLEAN if the LEB neads recovery, and other negative error codes in case
|
||||
* of failure.
|
||||
*
|
||||
@ -291,16 +291,16 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
|
||||
|
||||
switch (ret) {
|
||||
case SCANNED_GARBAGE:
|
||||
ubifs_err("garbage");
|
||||
ubifs_err(c, "garbage");
|
||||
goto corrupted;
|
||||
case SCANNED_A_NODE:
|
||||
break;
|
||||
case SCANNED_A_CORRUPT_NODE:
|
||||
case SCANNED_A_BAD_PAD_NODE:
|
||||
ubifs_err("bad node");
|
||||
ubifs_err(c, "bad node");
|
||||
goto corrupted;
|
||||
default:
|
||||
ubifs_err("unknown");
|
||||
ubifs_err(c, "unknown");
|
||||
err = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
@ -317,7 +317,7 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
|
||||
|
||||
if (offs % c->min_io_size) {
|
||||
if (!quiet)
|
||||
ubifs_err("empty space starts at non-aligned offset %d",
|
||||
ubifs_err(c, "empty space starts at non-aligned offset %d",
|
||||
offs);
|
||||
goto corrupted;
|
||||
}
|
||||
@ -330,7 +330,7 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
|
||||
for (; len; offs++, buf++, len--)
|
||||
if (*(uint8_t *)buf != 0xff) {
|
||||
if (!quiet)
|
||||
ubifs_err("corrupt empty space at LEB %d:%d",
|
||||
ubifs_err(c, "corrupt empty space at LEB %d:%d",
|
||||
lnum, offs);
|
||||
goto corrupted;
|
||||
}
|
||||
@ -340,14 +340,14 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
|
||||
corrupted:
|
||||
if (!quiet) {
|
||||
ubifs_scanned_corruption(c, lnum, offs, buf);
|
||||
ubifs_err("LEB %d scanning failed", lnum);
|
||||
ubifs_err(c, "LEB %d scanning failed", lnum);
|
||||
}
|
||||
err = -EUCLEAN;
|
||||
ubifs_scan_destroy(sleb);
|
||||
return ERR_PTR(err);
|
||||
|
||||
error:
|
||||
ubifs_err("LEB %d scanning failed, error %d", lnum, err);
|
||||
ubifs_err(c, "LEB %d scanning failed, error %d", lnum, err);
|
||||
ubifs_scan_destroy(sleb);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
146
fs/ubifs/super.c
146
fs/ubifs/super.c
@ -101,7 +101,7 @@ void iput(struct inode *inode)
|
||||
}
|
||||
|
||||
if (i >= INODE_LOCKED_MAX) {
|
||||
ubifs_err("Error, can't lock (save) more inodes while recovery!!!");
|
||||
dbg_gen("Error, can't lock (save) more inodes while recovery!!!");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -208,13 +208,13 @@ static int validate_inode(struct ubifs_info *c, const struct inode *inode)
|
||||
const struct ubifs_inode *ui = ubifs_inode(inode);
|
||||
|
||||
if (inode->i_size > c->max_inode_sz) {
|
||||
ubifs_err("inode is too large (%lld)",
|
||||
ubifs_err(c, "inode is too large (%lld)",
|
||||
(long long)inode->i_size);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (ui->compr_type < 0 || ui->compr_type >= UBIFS_COMPR_TYPES_CNT) {
|
||||
ubifs_err("unknown compression type %d", ui->compr_type);
|
||||
if (ui->compr_type >= UBIFS_COMPR_TYPES_CNT) {
|
||||
ubifs_err(c, "unknown compression type %d", ui->compr_type);
|
||||
return 2;
|
||||
}
|
||||
|
||||
@ -228,7 +228,7 @@ static int validate_inode(struct ubifs_info *c, const struct inode *inode)
|
||||
return 5;
|
||||
|
||||
if (!ubifs_compr_present(ui->compr_type)) {
|
||||
ubifs_warn("inode %lu uses '%s' compression, but it was not compiled in",
|
||||
ubifs_warn(c, "inode %lu uses '%s' compression, but it was not compiled in",
|
||||
inode->i_ino, ubifs_compr_name(ui->compr_type));
|
||||
}
|
||||
|
||||
@ -321,9 +321,6 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
|
||||
goto out_invalid;
|
||||
|
||||
#ifndef __UBOOT__
|
||||
/* Disable read-ahead */
|
||||
inode->i_mapping->backing_dev_info = &c->bdi;
|
||||
|
||||
switch (inode->i_mode & S_IFMT) {
|
||||
case S_IFREG:
|
||||
inode->i_mapping->a_ops = &ubifs_file_address_operations;
|
||||
@ -363,6 +360,7 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
|
||||
}
|
||||
memcpy(ui->data, ino->data, ui->data_len);
|
||||
((char *)ui->data)[ui->data_len] = '\0';
|
||||
inode->i_link = ui->data;
|
||||
break;
|
||||
case S_IFBLK:
|
||||
case S_IFCHR:
|
||||
@ -427,14 +425,14 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
|
||||
return inode;
|
||||
|
||||
out_invalid:
|
||||
ubifs_err("inode %lu validation failed, error %d", inode->i_ino, err);
|
||||
ubifs_err(c, "inode %lu validation failed, error %d", inode->i_ino, err);
|
||||
ubifs_dump_node(c, ino);
|
||||
ubifs_dump_inode(c, inode);
|
||||
err = -EINVAL;
|
||||
out_ino:
|
||||
kfree(ino);
|
||||
out:
|
||||
ubifs_err("failed to read inode %lu, error %d", inode->i_ino, err);
|
||||
ubifs_err(c, "failed to read inode %lu, error %d", inode->i_ino, err);
|
||||
iget_failed(inode);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
@ -505,7 +503,7 @@ static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||
if (inode->i_nlink) {
|
||||
err = ubifs_jnl_write_inode(c, inode);
|
||||
if (err)
|
||||
ubifs_err("can't write inode %lu, error %d",
|
||||
ubifs_err(c, "can't write inode %lu, error %d",
|
||||
inode->i_ino, err);
|
||||
else
|
||||
err = dbg_check_inode_size(c, inode, ui->ui_size);
|
||||
@ -549,7 +547,7 @@ static void ubifs_evict_inode(struct inode *inode)
|
||||
* Worst case we have a lost orphan inode wasting space, so a
|
||||
* simple error message is OK here.
|
||||
*/
|
||||
ubifs_err("can't delete inode %lu, error %d",
|
||||
ubifs_err(c, "can't delete inode %lu, error %d",
|
||||
inode->i_ino, err);
|
||||
|
||||
out:
|
||||
@ -609,19 +607,19 @@ static int ubifs_show_options(struct seq_file *s, struct dentry *root)
|
||||
struct ubifs_info *c = root->d_sb->s_fs_info;
|
||||
|
||||
if (c->mount_opts.unmount_mode == 2)
|
||||
seq_printf(s, ",fast_unmount");
|
||||
seq_puts(s, ",fast_unmount");
|
||||
else if (c->mount_opts.unmount_mode == 1)
|
||||
seq_printf(s, ",norm_unmount");
|
||||
seq_puts(s, ",norm_unmount");
|
||||
|
||||
if (c->mount_opts.bulk_read == 2)
|
||||
seq_printf(s, ",bulk_read");
|
||||
seq_puts(s, ",bulk_read");
|
||||
else if (c->mount_opts.bulk_read == 1)
|
||||
seq_printf(s, ",no_bulk_read");
|
||||
seq_puts(s, ",no_bulk_read");
|
||||
|
||||
if (c->mount_opts.chk_data_crc == 2)
|
||||
seq_printf(s, ",chk_data_crc");
|
||||
seq_puts(s, ",chk_data_crc");
|
||||
else if (c->mount_opts.chk_data_crc == 1)
|
||||
seq_printf(s, ",no_chk_data_crc");
|
||||
seq_puts(s, ",no_chk_data_crc");
|
||||
|
||||
if (c->mount_opts.override_compr) {
|
||||
seq_printf(s, ",compr=%s",
|
||||
@ -681,17 +679,17 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
|
||||
static int init_constants_early(struct ubifs_info *c)
|
||||
{
|
||||
if (c->vi.corrupted) {
|
||||
ubifs_warn("UBI volume is corrupted - read-only mode");
|
||||
ubifs_warn(c, "UBI volume is corrupted - read-only mode");
|
||||
c->ro_media = 1;
|
||||
}
|
||||
|
||||
if (c->di.ro_mode) {
|
||||
ubifs_msg("read-only UBI device");
|
||||
ubifs_msg(c, "read-only UBI device");
|
||||
c->ro_media = 1;
|
||||
}
|
||||
|
||||
if (c->vi.vol_type == UBI_STATIC_VOLUME) {
|
||||
ubifs_msg("static UBI volume - read-only mode");
|
||||
ubifs_msg(c, "static UBI volume - read-only mode");
|
||||
c->ro_media = 1;
|
||||
}
|
||||
|
||||
@ -705,19 +703,19 @@ static int init_constants_early(struct ubifs_info *c)
|
||||
c->max_write_shift = fls(c->max_write_size) - 1;
|
||||
|
||||
if (c->leb_size < UBIFS_MIN_LEB_SZ) {
|
||||
ubifs_err("too small LEBs (%d bytes), min. is %d bytes",
|
||||
ubifs_err(c, "too small LEBs (%d bytes), min. is %d bytes",
|
||||
c->leb_size, UBIFS_MIN_LEB_SZ);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (c->leb_cnt < UBIFS_MIN_LEB_CNT) {
|
||||
ubifs_err("too few LEBs (%d), min. is %d",
|
||||
ubifs_err(c, "too few LEBs (%d), min. is %d",
|
||||
c->leb_cnt, UBIFS_MIN_LEB_CNT);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!is_power_of_2(c->min_io_size)) {
|
||||
ubifs_err("bad min. I/O size %d", c->min_io_size);
|
||||
ubifs_err(c, "bad min. I/O size %d", c->min_io_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -728,7 +726,7 @@ static int init_constants_early(struct ubifs_info *c)
|
||||
if (c->max_write_size < c->min_io_size ||
|
||||
c->max_write_size % c->min_io_size ||
|
||||
!is_power_of_2(c->max_write_size)) {
|
||||
ubifs_err("bad write buffer size %d for %d min. I/O unit",
|
||||
ubifs_err(c, "bad write buffer size %d for %d min. I/O unit",
|
||||
c->max_write_size, c->min_io_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -854,7 +852,7 @@ static int init_constants_sb(struct ubifs_info *c)
|
||||
tmp = UBIFS_CS_NODE_SZ + UBIFS_REF_NODE_SZ * c->jhead_cnt;
|
||||
tmp = ALIGN(tmp, c->min_io_size);
|
||||
if (tmp > c->leb_size) {
|
||||
ubifs_err("too small LEB size %d, at least %d needed",
|
||||
ubifs_err(c, "too small LEB size %d, at least %d needed",
|
||||
c->leb_size, tmp);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -869,7 +867,7 @@ static int init_constants_sb(struct ubifs_info *c)
|
||||
tmp /= c->leb_size;
|
||||
tmp += 1;
|
||||
if (c->log_lebs < tmp) {
|
||||
ubifs_err("too small log %d LEBs, required min. %d LEBs",
|
||||
ubifs_err(c, "too small log %d LEBs, required min. %d LEBs",
|
||||
c->log_lebs, tmp);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -961,7 +959,7 @@ static int take_gc_lnum(struct ubifs_info *c)
|
||||
int err;
|
||||
|
||||
if (c->gc_lnum == -1) {
|
||||
ubifs_err("no LEB for GC");
|
||||
ubifs_err(c, "no LEB for GC");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -982,7 +980,7 @@ static int alloc_wbufs(struct ubifs_info *c)
|
||||
{
|
||||
int i, err;
|
||||
|
||||
c->jheads = kzalloc(c->jhead_cnt * sizeof(struct ubifs_jhead),
|
||||
c->jheads = kcalloc(c->jhead_cnt, sizeof(struct ubifs_jhead),
|
||||
GFP_KERNEL);
|
||||
if (!c->jheads)
|
||||
return -ENOMEM;
|
||||
@ -1046,7 +1044,7 @@ static void free_orphans(struct ubifs_info *c)
|
||||
orph = list_entry(c->orph_list.next, struct ubifs_orphan, list);
|
||||
list_del(&orph->list);
|
||||
kfree(orph);
|
||||
ubifs_err("orphan list not empty at unmount");
|
||||
ubifs_err(c, "orphan list not empty at unmount");
|
||||
}
|
||||
|
||||
vfree(c->orph_buf);
|
||||
@ -1144,7 +1142,8 @@ static const match_table_t tokens = {
|
||||
*/
|
||||
static int parse_standard_option(const char *option)
|
||||
{
|
||||
ubifs_msg("parse %s", option);
|
||||
|
||||
pr_notice("UBIFS: parse %s\n", option);
|
||||
if (!strcmp(option, "sync"))
|
||||
return MS_SYNCHRONOUS;
|
||||
return 0;
|
||||
@ -1216,7 +1215,7 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
|
||||
else if (!strcmp(name, "zlib"))
|
||||
c->mount_opts.compr_type = UBIFS_COMPR_ZLIB;
|
||||
else {
|
||||
ubifs_err("unknown compressor \"%s\"", name);
|
||||
ubifs_err(c, "unknown compressor \"%s\"", name); //FIXME: is c ready?
|
||||
kfree(name);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1232,7 +1231,7 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
|
||||
|
||||
flag = parse_standard_option(p);
|
||||
if (!flag) {
|
||||
ubifs_err("unrecognized mount option \"%s\" or missing value",
|
||||
ubifs_err(c, "unrecognized mount option \"%s\" or missing value",
|
||||
p);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1296,7 +1295,7 @@ again:
|
||||
}
|
||||
|
||||
/* Just disable bulk-read */
|
||||
ubifs_warn("cannot allocate %d bytes of memory for bulk-read, disabling it",
|
||||
ubifs_warn(c, "cannot allocate %d bytes of memory for bulk-read, disabling it",
|
||||
c->max_bu_buf_len);
|
||||
c->mount_opts.bulk_read = 1;
|
||||
c->bulk_read = 0;
|
||||
@ -1316,7 +1315,7 @@ static int check_free_space(struct ubifs_info *c)
|
||||
{
|
||||
ubifs_assert(c->dark_wm > 0);
|
||||
if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) {
|
||||
ubifs_err("insufficient free space to mount in R/W mode");
|
||||
ubifs_err(c, "insufficient free space to mount in R/W mode");
|
||||
ubifs_dump_budg(c, &c->bi);
|
||||
ubifs_dump_lprops(c);
|
||||
return -ENOSPC;
|
||||
@ -1339,6 +1338,8 @@ static int mount_ubifs(struct ubifs_info *c)
|
||||
size_t sz;
|
||||
|
||||
c->ro_mount = !!(c->vfs_sb->s_flags & MS_RDONLY);
|
||||
/* Suppress error messages while probing if MS_SILENT is set */
|
||||
c->probing = !!(c->vfs_sb->s_flags & MS_SILENT);
|
||||
#ifdef __UBOOT__
|
||||
if (!c->ro_mount) {
|
||||
printf("UBIFS: only ro mode in U-Boot allowed.\n");
|
||||
@ -1363,14 +1364,14 @@ static int mount_ubifs(struct ubifs_info *c)
|
||||
* This UBI volume is empty, and read-only, or the file system
|
||||
* is mounted read-only - we cannot format it.
|
||||
*/
|
||||
ubifs_err("can't format empty UBI volume: read-only %s",
|
||||
ubifs_err(c, "can't format empty UBI volume: read-only %s",
|
||||
c->ro_media ? "UBI volume" : "mount");
|
||||
err = -EROFS;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (c->ro_media && !c->ro_mount) {
|
||||
ubifs_err("cannot mount read-write - read-only media");
|
||||
ubifs_err(c, "cannot mount read-write - read-only media");
|
||||
err = -EROFS;
|
||||
goto out_free;
|
||||
}
|
||||
@ -1415,12 +1416,14 @@ static int mount_ubifs(struct ubifs_info *c)
|
||||
if (err)
|
||||
goto out_free;
|
||||
|
||||
c->probing = 0;
|
||||
|
||||
/*
|
||||
* Make sure the compressor which is set as default in the superblock
|
||||
* or overridden by mount options is actually compiled in.
|
||||
*/
|
||||
if (!ubifs_compr_present(c->default_compr)) {
|
||||
ubifs_err("'compressor \"%s\" is not compiled in",
|
||||
ubifs_err(c, "'compressor \"%s\" is not compiled in",
|
||||
ubifs_compr_name(c->default_compr));
|
||||
err = -ENOTSUPP;
|
||||
goto out_free;
|
||||
@ -1450,7 +1453,7 @@ static int mount_ubifs(struct ubifs_info *c)
|
||||
if (IS_ERR(c->bgt)) {
|
||||
err = PTR_ERR(c->bgt);
|
||||
c->bgt = NULL;
|
||||
ubifs_err("cannot spawn \"%s\", error %d",
|
||||
ubifs_err(c, "cannot spawn \"%s\", error %d",
|
||||
c->bgt_name, err);
|
||||
goto out_wbufs;
|
||||
}
|
||||
@ -1465,7 +1468,7 @@ static int mount_ubifs(struct ubifs_info *c)
|
||||
init_constants_master(c);
|
||||
|
||||
if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
|
||||
ubifs_msg("recovery needed");
|
||||
ubifs_msg(c, "recovery needed");
|
||||
c->need_recovery = 1;
|
||||
}
|
||||
|
||||
@ -1488,7 +1491,7 @@ static int mount_ubifs(struct ubifs_info *c)
|
||||
goto out_lpt;
|
||||
}
|
||||
|
||||
if (!c->ro_mount) {
|
||||
if (!c->ro_mount && !c->need_recovery) {
|
||||
/*
|
||||
* Set the "dirty" flag so that if we reboot uncleanly we
|
||||
* will notice this immediately on the next mount.
|
||||
@ -1582,10 +1585,10 @@ static int mount_ubifs(struct ubifs_info *c)
|
||||
|
||||
if (c->need_recovery) {
|
||||
if (c->ro_mount)
|
||||
ubifs_msg("recovery deferred");
|
||||
ubifs_msg(c, "recovery deferred");
|
||||
else {
|
||||
c->need_recovery = 0;
|
||||
ubifs_msg("recovery completed");
|
||||
ubifs_msg(c, "recovery completed");
|
||||
/*
|
||||
* GC LEB has to be empty and taken at this point. But
|
||||
* the journal head LEBs may also be accounted as
|
||||
@ -1606,20 +1609,20 @@ static int mount_ubifs(struct ubifs_info *c)
|
||||
|
||||
c->mounting = 0;
|
||||
|
||||
ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"%s",
|
||||
ubifs_msg(c, "UBIFS: mounted UBI device %d, volume %d, name \"%s\"%s",
|
||||
c->vi.ubi_num, c->vi.vol_id, c->vi.name,
|
||||
c->ro_mount ? ", R/O mode" : "");
|
||||
x = (long long)c->main_lebs * c->leb_size;
|
||||
y = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes;
|
||||
ubifs_msg("LEB size: %d bytes (%d KiB), min./max. I/O unit sizes: %d bytes/%d bytes",
|
||||
ubifs_msg(c, "LEB size: %d bytes (%d KiB), min./max. I/O unit sizes: %d bytes/%d bytes",
|
||||
c->leb_size, c->leb_size >> 10, c->min_io_size,
|
||||
c->max_write_size);
|
||||
ubifs_msg("FS size: %lld bytes (%lld MiB, %d LEBs), journal size %lld bytes (%lld MiB, %d LEBs)",
|
||||
ubifs_msg(c, "FS size: %lld bytes (%lld MiB, %d LEBs), journal size %lld bytes (%lld MiB, %d LEBs)",
|
||||
x, x >> 20, c->main_lebs,
|
||||
y, y >> 20, c->log_lebs + c->max_bud_cnt);
|
||||
ubifs_msg("reserved for root: %llu bytes (%llu KiB)",
|
||||
ubifs_msg(c, "reserved for root: %llu bytes (%llu KiB)",
|
||||
c->report_rp_size, c->report_rp_size >> 10);
|
||||
ubifs_msg("media format: w%d/r%d (latest is w%d/r%d), UUID %pUB%s",
|
||||
ubifs_msg(c, "media format: w%d/r%d (latest is w%d/r%d), UUID %pUB%s",
|
||||
c->fmt_version, c->ro_compat_version,
|
||||
UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION, c->uuid,
|
||||
c->big_lpt ? ", big LPT model" : ", small LPT model");
|
||||
@ -1768,8 +1771,8 @@ static int ubifs_remount_rw(struct ubifs_info *c)
|
||||
int err, lnum;
|
||||
|
||||
if (c->rw_incompat) {
|
||||
ubifs_err("the file-system is not R/W-compatible");
|
||||
ubifs_msg("on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d",
|
||||
ubifs_err(c, "the file-system is not R/W-compatible");
|
||||
ubifs_msg(c, "on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d",
|
||||
c->fmt_version, c->ro_compat_version,
|
||||
UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION);
|
||||
return -EROFS;
|
||||
@ -1806,7 +1809,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
|
||||
}
|
||||
|
||||
if (c->need_recovery) {
|
||||
ubifs_msg("completing deferred recovery");
|
||||
ubifs_msg(c, "completing deferred recovery");
|
||||
err = ubifs_write_rcvrd_mst_node(c);
|
||||
if (err)
|
||||
goto out;
|
||||
@ -1855,7 +1858,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
|
||||
if (IS_ERR(c->bgt)) {
|
||||
err = PTR_ERR(c->bgt);
|
||||
c->bgt = NULL;
|
||||
ubifs_err("cannot spawn \"%s\", error %d",
|
||||
ubifs_err(c, "cannot spawn \"%s\", error %d",
|
||||
c->bgt_name, err);
|
||||
goto out;
|
||||
}
|
||||
@ -1889,7 +1892,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
|
||||
|
||||
if (c->need_recovery) {
|
||||
c->need_recovery = 0;
|
||||
ubifs_msg("deferred recovery completed");
|
||||
ubifs_msg(c, "deferred recovery completed");
|
||||
} else {
|
||||
/*
|
||||
* Do not run the debugging space check if the were doing
|
||||
@ -1977,8 +1980,7 @@ static void ubifs_put_super(struct super_block *sb)
|
||||
int i;
|
||||
struct ubifs_info *c = sb->s_fs_info;
|
||||
|
||||
ubifs_msg("un-mount UBI device %d, volume %d", c->vi.ubi_num,
|
||||
c->vi.vol_id);
|
||||
ubifs_msg(c, "un-mount UBI device %d", c->vi.ubi_num);
|
||||
|
||||
/*
|
||||
* The following asserts are only valid if there has not been a failure
|
||||
@ -2034,7 +2036,7 @@ static void ubifs_put_super(struct super_block *sb)
|
||||
* next mount, so we just print a message and
|
||||
* continue to unmount normally.
|
||||
*/
|
||||
ubifs_err("failed to write master node, error %d",
|
||||
ubifs_err(c, "failed to write master node, error %d",
|
||||
err);
|
||||
} else {
|
||||
#ifndef __UBOOT__
|
||||
@ -2065,17 +2067,17 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
|
||||
|
||||
err = ubifs_parse_options(c, data, 1);
|
||||
if (err) {
|
||||
ubifs_err("invalid or unknown remount parameter");
|
||||
ubifs_err(c, "invalid or unknown remount parameter");
|
||||
return err;
|
||||
}
|
||||
|
||||
if (c->ro_mount && !(*flags & MS_RDONLY)) {
|
||||
if (c->ro_error) {
|
||||
ubifs_msg("cannot re-mount R/W due to prior errors");
|
||||
ubifs_msg(c, "cannot re-mount R/W due to prior errors");
|
||||
return -EROFS;
|
||||
}
|
||||
if (c->ro_media) {
|
||||
ubifs_msg("cannot re-mount R/W - UBI volume is R/O");
|
||||
ubifs_msg(c, "cannot re-mount R/W - UBI volume is R/O");
|
||||
return -EROFS;
|
||||
}
|
||||
err = ubifs_remount_rw(c);
|
||||
@ -2083,7 +2085,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
|
||||
return err;
|
||||
} else if (!c->ro_mount && (*flags & MS_RDONLY)) {
|
||||
if (c->ro_error) {
|
||||
ubifs_msg("cannot re-mount R/O due to prior errors");
|
||||
ubifs_msg(c, "cannot re-mount R/O due to prior errors");
|
||||
return -EROFS;
|
||||
}
|
||||
ubifs_remount_ro(c);
|
||||
@ -2200,7 +2202,6 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
|
||||
mutex_init(&c->lp_mutex);
|
||||
mutex_init(&c->tnc_mutex);
|
||||
mutex_init(&c->log_mutex);
|
||||
mutex_init(&c->mst_mutex);
|
||||
mutex_init(&c->umount_mutex);
|
||||
mutex_init(&c->bu_mutex);
|
||||
mutex_init(&c->write_reserve_mutex);
|
||||
@ -2261,8 +2262,8 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
*
|
||||
* Read-ahead will be disabled because @c->bdi.ra_pages is 0.
|
||||
*/
|
||||
co>bdi.name = "ubifs",
|
||||
c->bdi.capabilities = BDI_CAP_MAP_COPY;
|
||||
c->bdi.name = "ubifs",
|
||||
c->bdi.capabilities = 0;
|
||||
err = bdi_init(&c->bdi);
|
||||
if (err)
|
||||
goto out_close;
|
||||
@ -2285,6 +2286,9 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
if (c->max_inode_sz > MAX_LFS_FILESIZE)
|
||||
sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE;
|
||||
sb->s_op = &ubifs_super_operations;
|
||||
#ifndef __UBOOT__
|
||||
sb->s_xattr = ubifs_xattr_handlers;
|
||||
#endif
|
||||
|
||||
mutex_lock(&c->umount_mutex);
|
||||
err = mount_ubifs(c);
|
||||
@ -2453,8 +2457,8 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
|
||||
*/
|
||||
ubi = open_ubi(name, UBI_READONLY);
|
||||
if (IS_ERR(ubi)) {
|
||||
ubifs_err("cannot open \"%s\", error %d",
|
||||
name, (int)PTR_ERR(ubi));
|
||||
pr_err("UBIFS error (pid: %d): cannot open \"%s\", error %d",
|
||||
current->pid, name, (int)PTR_ERR(ubi));
|
||||
return ERR_CAST(ubi);
|
||||
}
|
||||
|
||||
@ -2595,8 +2599,8 @@ int ubifs_init(void)
|
||||
* UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
|
||||
*/
|
||||
if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) {
|
||||
ubifs_err("VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes",
|
||||
(unsigned int)PAGE_CACHE_SIZE);
|
||||
pr_err("UBIFS error (pid %d): VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes",
|
||||
current->pid, (unsigned int)PAGE_CACHE_SIZE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -2608,7 +2612,9 @@ int ubifs_init(void)
|
||||
if (!ubifs_inode_slab)
|
||||
return -ENOMEM;
|
||||
|
||||
register_shrinker(&ubifs_shrinker_info);
|
||||
err = register_shrinker(&ubifs_shrinker_info);
|
||||
if (err)
|
||||
goto out_slab;
|
||||
#endif
|
||||
|
||||
err = ubifs_compressors_init();
|
||||
@ -2622,7 +2628,8 @@ int ubifs_init(void)
|
||||
|
||||
err = register_filesystem(&ubifs_fs_type);
|
||||
if (err) {
|
||||
ubifs_err("cannot register file system, error %d", err);
|
||||
pr_err("UBIFS error (pid %d): cannot register file system, error %d",
|
||||
current->pid, err);
|
||||
goto out_dbg;
|
||||
}
|
||||
#endif
|
||||
@ -2637,6 +2644,7 @@ out_compr:
|
||||
out_shrinker:
|
||||
#ifndef __UBOOT__
|
||||
unregister_shrinker(&ubifs_shrinker_info);
|
||||
out_slab:
|
||||
#endif
|
||||
kmem_cache_destroy(ubifs_inode_slab);
|
||||
return err;
|
||||
|
@ -93,7 +93,7 @@ static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
|
||||
else if (offs > o->offs)
|
||||
p = &(*p)->rb_right;
|
||||
else {
|
||||
ubifs_err("old idx added twice!");
|
||||
ubifs_err(c, "old idx added twice!");
|
||||
kfree(old_idx);
|
||||
return 0;
|
||||
}
|
||||
@ -442,7 +442,7 @@ static int try_read_node(const struct ubifs_info *c, void *buf, int type,
|
||||
|
||||
err = ubifs_leb_read(c, lnum, buf, offs, len, 1);
|
||||
if (err) {
|
||||
ubifs_err("cannot read node type %d from LEB %d:%d, error %d",
|
||||
ubifs_err(c, "cannot read node type %d from LEB %d:%d, error %d",
|
||||
type, lnum, offs, err);
|
||||
return err;
|
||||
}
|
||||
@ -1683,27 +1683,27 @@ static int validate_data_node(struct ubifs_info *c, void *buf,
|
||||
int err, len;
|
||||
|
||||
if (ch->node_type != UBIFS_DATA_NODE) {
|
||||
ubifs_err("bad node type (%d but expected %d)",
|
||||
ubifs_err(c, "bad node type (%d but expected %d)",
|
||||
ch->node_type, UBIFS_DATA_NODE);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0);
|
||||
if (err) {
|
||||
ubifs_err("expected node type %d", UBIFS_DATA_NODE);
|
||||
ubifs_err(c, "expected node type %d", UBIFS_DATA_NODE);
|
||||
goto out;
|
||||
}
|
||||
|
||||
len = le32_to_cpu(ch->len);
|
||||
if (len != zbr->len) {
|
||||
ubifs_err("bad node length %d, expected %d", len, zbr->len);
|
||||
ubifs_err(c, "bad node length %d, expected %d", len, zbr->len);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Make sure the key of the read node is correct */
|
||||
key_read(c, buf + UBIFS_KEY_OFFSET, &key1);
|
||||
if (!keys_eq(c, &zbr->key, &key1)) {
|
||||
ubifs_err("bad key in node at LEB %d:%d",
|
||||
ubifs_err(c, "bad key in node at LEB %d:%d",
|
||||
zbr->lnum, zbr->offs);
|
||||
dbg_tnck(&zbr->key, "looked for key ");
|
||||
dbg_tnck(&key1, "found node's key ");
|
||||
@ -1715,7 +1715,7 @@ static int validate_data_node(struct ubifs_info *c, void *buf,
|
||||
out_err:
|
||||
err = -EINVAL;
|
||||
out:
|
||||
ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs);
|
||||
ubifs_err(c, "bad node at LEB %d:%d", zbr->lnum, zbr->offs);
|
||||
ubifs_dump_node(c, buf);
|
||||
dump_stack();
|
||||
return err;
|
||||
@ -1740,7 +1740,7 @@ int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
|
||||
len = bu->zbranch[bu->cnt - 1].offs;
|
||||
len += bu->zbranch[bu->cnt - 1].len - offs;
|
||||
if (len > bu->buf_len) {
|
||||
ubifs_err("buffer too small %d vs %d", bu->buf_len, len);
|
||||
ubifs_err(c, "buffer too small %d vs %d", bu->buf_len, len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1756,7 +1756,7 @@ int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
|
||||
return -EAGAIN;
|
||||
|
||||
if (err && err != -EBADMSG) {
|
||||
ubifs_err("failed to read from LEB %d:%d, error %d",
|
||||
ubifs_err(c, "failed to read from LEB %d:%d, error %d",
|
||||
lnum, offs, err);
|
||||
dump_stack();
|
||||
dbg_tnck(&bu->key, "key ");
|
||||
@ -2858,10 +2858,11 @@ void ubifs_tnc_close(struct ubifs_info *c)
|
||||
{
|
||||
tnc_destroy_cnext(c);
|
||||
if (c->zroot.znode) {
|
||||
long n;
|
||||
long n, freed;
|
||||
|
||||
ubifs_destroy_tnc_subtree(c->zroot.znode);
|
||||
n = atomic_long_read(&c->clean_zn_cnt);
|
||||
freed = ubifs_destroy_tnc_subtree(c->zroot.znode);
|
||||
ubifs_assert(freed == n);
|
||||
atomic_long_sub(n, &ubifs_clean_zn_cnt);
|
||||
}
|
||||
kfree(c->gap_lebs);
|
||||
@ -3292,7 +3293,6 @@ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
|
||||
goto out_unlock;
|
||||
|
||||
if (err) {
|
||||
err = -EINVAL;
|
||||
key = &from_key;
|
||||
goto out_dump;
|
||||
}
|
||||
@ -3312,7 +3312,7 @@ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
|
||||
|
||||
out_dump:
|
||||
block = key_block(c, key);
|
||||
ubifs_err("inode %lu has size %lld, but there are data at offset %lld",
|
||||
ubifs_err(c, "inode %lu has size %lld, but there are data at offset %lld",
|
||||
(unsigned long)inode->i_ino, size,
|
||||
((loff_t)block) << UBIFS_BLOCK_SHIFT);
|
||||
mutex_unlock(&c->tnc_mutex);
|
||||
|
@ -285,9 +285,9 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
|
||||
lnum, offs, znode->level, znode->child_cnt);
|
||||
|
||||
if (znode->child_cnt > c->fanout || znode->level > UBIFS_MAX_LEVELS) {
|
||||
ubifs_err("current fanout %d, branch count %d",
|
||||
ubifs_err(c, "current fanout %d, branch count %d",
|
||||
c->fanout, znode->child_cnt);
|
||||
ubifs_err("max levels %d, znode level %d",
|
||||
ubifs_err(c, "max levels %d, znode level %d",
|
||||
UBIFS_MAX_LEVELS, znode->level);
|
||||
err = 1;
|
||||
goto out_dump;
|
||||
@ -308,7 +308,7 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
|
||||
if (zbr->lnum < c->main_first ||
|
||||
zbr->lnum >= c->leb_cnt || zbr->offs < 0 ||
|
||||
zbr->offs + zbr->len > c->leb_size || zbr->offs & 7) {
|
||||
ubifs_err("bad branch %d", i);
|
||||
ubifs_err(c, "bad branch %d", i);
|
||||
err = 2;
|
||||
goto out_dump;
|
||||
}
|
||||
@ -320,7 +320,7 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
|
||||
case UBIFS_XENT_KEY:
|
||||
break;
|
||||
default:
|
||||
ubifs_err("bad key type at slot %d: %d",
|
||||
ubifs_err(c, "bad key type at slot %d: %d",
|
||||
i, key_type(c, &zbr->key));
|
||||
err = 3;
|
||||
goto out_dump;
|
||||
@ -332,17 +332,17 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
|
||||
type = key_type(c, &zbr->key);
|
||||
if (c->ranges[type].max_len == 0) {
|
||||
if (zbr->len != c->ranges[type].len) {
|
||||
ubifs_err("bad target node (type %d) length (%d)",
|
||||
ubifs_err(c, "bad target node (type %d) length (%d)",
|
||||
type, zbr->len);
|
||||
ubifs_err("have to be %d", c->ranges[type].len);
|
||||
ubifs_err(c, "have to be %d", c->ranges[type].len);
|
||||
err = 4;
|
||||
goto out_dump;
|
||||
}
|
||||
} else if (zbr->len < c->ranges[type].min_len ||
|
||||
zbr->len > c->ranges[type].max_len) {
|
||||
ubifs_err("bad target node (type %d) length (%d)",
|
||||
ubifs_err(c, "bad target node (type %d) length (%d)",
|
||||
type, zbr->len);
|
||||
ubifs_err("have to be in range of %d-%d",
|
||||
ubifs_err(c, "have to be in range of %d-%d",
|
||||
c->ranges[type].min_len,
|
||||
c->ranges[type].max_len);
|
||||
err = 5;
|
||||
@ -362,12 +362,12 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
|
||||
|
||||
cmp = keys_cmp(c, key1, key2);
|
||||
if (cmp > 0) {
|
||||
ubifs_err("bad key order (keys %d and %d)", i, i + 1);
|
||||
ubifs_err(c, "bad key order (keys %d and %d)", i, i + 1);
|
||||
err = 6;
|
||||
goto out_dump;
|
||||
} else if (cmp == 0 && !is_hash_key(c, key1)) {
|
||||
/* These can only be keys with colliding hash */
|
||||
ubifs_err("keys %d and %d are not hashed but equivalent",
|
||||
ubifs_err(c, "keys %d and %d are not hashed but equivalent",
|
||||
i, i + 1);
|
||||
err = 7;
|
||||
goto out_dump;
|
||||
@ -378,7 +378,7 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
|
||||
return 0;
|
||||
|
||||
out_dump:
|
||||
ubifs_err("bad indexing node at LEB %d:%d, error %d", lnum, offs, err);
|
||||
ubifs_err(c, "bad indexing node at LEB %d:%d, error %d", lnum, offs, err);
|
||||
ubifs_dump_node(c, idx);
|
||||
kfree(idx);
|
||||
return -EINVAL;
|
||||
@ -474,7 +474,7 @@ int ubifs_tnc_read_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
||||
/* Make sure the key of the read node is correct */
|
||||
key_read(c, node + UBIFS_KEY_OFFSET, &key1);
|
||||
if (!keys_eq(c, key, &key1)) {
|
||||
ubifs_err("bad key in node at LEB %d:%d",
|
||||
ubifs_err(c, "bad key in node at LEB %d:%d",
|
||||
zbr->lnum, zbr->offs);
|
||||
dbg_tnck(key, "looked for key ");
|
||||
dbg_tnck(&key1, "but found node's key ");
|
||||
|
@ -103,8 +103,8 @@ struct crypto_comp {
|
||||
int compressor;
|
||||
};
|
||||
|
||||
static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
|
||||
u32 type, u32 mask)
|
||||
static inline struct crypto_comp
|
||||
*crypto_alloc_comp(const char *alg_name, u32 type, u32 mask)
|
||||
{
|
||||
struct ubifs_compressor *comp;
|
||||
struct crypto_comp *ptr;
|
||||
@ -124,15 +124,16 @@ static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
|
||||
i++;
|
||||
}
|
||||
if (i >= UBIFS_COMPR_TYPES_CNT) {
|
||||
ubifs_err("invalid compression type %s", alg_name);
|
||||
dbg_gen("invalid compression type %s", alg_name);
|
||||
free (ptr);
|
||||
return NULL;
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
static inline int crypto_comp_decompress(struct crypto_comp *tfm,
|
||||
const u8 *src, unsigned int slen,
|
||||
u8 *dst, unsigned int *dlen)
|
||||
static inline int
|
||||
crypto_comp_decompress(const struct ubifs_info *c, struct crypto_comp *tfm,
|
||||
const u8 *src, unsigned int slen, u8 *dst,
|
||||
unsigned int *dlen)
|
||||
{
|
||||
struct ubifs_compressor *compr = ubifs_compressors[tfm->compressor];
|
||||
int err;
|
||||
@ -145,7 +146,7 @@ static inline int crypto_comp_decompress(struct crypto_comp *tfm,
|
||||
|
||||
err = compr->decompress(src, slen, dst, (size_t *)dlen);
|
||||
if (err)
|
||||
ubifs_err("cannot decompress %d bytes, compressor %s, "
|
||||
ubifs_err(c, "cannot decompress %d bytes, compressor %s, "
|
||||
"error %d", slen, compr->name, err);
|
||||
|
||||
return err;
|
||||
@ -172,21 +173,21 @@ atomic_long_t ubifs_clean_zn_cnt;
|
||||
* The length of the uncompressed data is returned in @out_len. This functions
|
||||
* returns %0 on success or a negative error code on failure.
|
||||
*/
|
||||
int ubifs_decompress(const void *in_buf, int in_len, void *out_buf,
|
||||
int *out_len, int compr_type)
|
||||
int ubifs_decompress(const struct ubifs_info *c, const void *in_buf,
|
||||
int in_len, void *out_buf, int *out_len, int compr_type)
|
||||
{
|
||||
int err;
|
||||
struct ubifs_compressor *compr;
|
||||
|
||||
if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) {
|
||||
ubifs_err("invalid compression type %d", compr_type);
|
||||
ubifs_err(c, "invalid compression type %d", compr_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
compr = ubifs_compressors[compr_type];
|
||||
|
||||
if (unlikely(!compr->capi_name)) {
|
||||
ubifs_err("%s compression is not compiled in", compr->name);
|
||||
ubifs_err(c, "%s compression is not compiled in", compr->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -198,13 +199,13 @@ int ubifs_decompress(const void *in_buf, int in_len, void *out_buf,
|
||||
|
||||
if (compr->decomp_mutex)
|
||||
mutex_lock(compr->decomp_mutex);
|
||||
err = crypto_comp_decompress(compr->cc, in_buf, in_len, out_buf,
|
||||
err = crypto_comp_decompress(c, compr->cc, in_buf, in_len, out_buf,
|
||||
(unsigned int *)out_len);
|
||||
if (compr->decomp_mutex)
|
||||
mutex_unlock(compr->decomp_mutex);
|
||||
if (err)
|
||||
ubifs_err("cannot decompress %d bytes, compressor %s, error %d",
|
||||
in_len, compr->name, err);
|
||||
ubifs_err(c, "cannot decompress %d bytes, compressor %s,"
|
||||
" error %d", in_len, compr->name, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -229,8 +230,9 @@ static int __init compr_init(struct ubifs_compressor *compr)
|
||||
if (compr->capi_name) {
|
||||
compr->cc = crypto_alloc_comp(compr->capi_name, 0, 0);
|
||||
if (IS_ERR(compr->cc)) {
|
||||
ubifs_err("cannot initialize compressor %s, error %ld",
|
||||
compr->name, PTR_ERR(compr->cc));
|
||||
dbg_gen("cannot initialize compressor %s,"
|
||||
" error %ld", compr->name,
|
||||
PTR_ERR(compr->cc));
|
||||
return PTR_ERR(compr->cc);
|
||||
}
|
||||
}
|
||||
@ -384,7 +386,7 @@ static int ubifs_printdir(struct file *file, void *dirent)
|
||||
|
||||
out:
|
||||
if (err != -ENOENT) {
|
||||
ubifs_err("cannot find next direntry, error %d", err);
|
||||
ubifs_err(c, "cannot find next direntry, error %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -468,7 +470,7 @@ static int ubifs_finddir(struct super_block *sb, char *dirname,
|
||||
|
||||
out:
|
||||
if (err != -ENOENT)
|
||||
ubifs_err("cannot find next direntry, error %d", err);
|
||||
dbg_gen("cannot find next direntry, error %d", err);
|
||||
|
||||
out_free:
|
||||
if (file->private_data)
|
||||
@ -715,7 +717,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
|
||||
|
||||
dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
|
||||
out_len = UBIFS_BLOCK_SIZE;
|
||||
err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
|
||||
err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
|
||||
le16_to_cpu(dn->compr_type));
|
||||
if (err || len != out_len)
|
||||
goto dump;
|
||||
@ -731,7 +733,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
|
||||
return 0;
|
||||
|
||||
dump:
|
||||
ubifs_err("bad data node (block %u, inode %lu)",
|
||||
ubifs_err(c, "bad data node (block %u, inode %lu)",
|
||||
block, inode->i_ino);
|
||||
ubifs_dump_node(c, dn);
|
||||
return -EINVAL;
|
||||
@ -833,7 +835,7 @@ static int do_readpage(struct ubifs_info *c, struct inode *inode,
|
||||
dbg_gen("hole");
|
||||
goto out_free;
|
||||
}
|
||||
ubifs_err("cannot read page %lu of inode %lu, error %d",
|
||||
ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
|
||||
page->index, inode->i_ino, err);
|
||||
goto error;
|
||||
}
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/mtd/ubi.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/security.h>
|
||||
#include "ubifs-media.h"
|
||||
#else
|
||||
#include <asm/atomic.h>
|
||||
@ -617,24 +618,43 @@ static inline ino_t parent_ino(struct dentry *dentry)
|
||||
#define UBIFS_VERSION 1
|
||||
|
||||
/* Normal UBIFS messages */
|
||||
#define ubifs_msg(fmt, ...) pr_notice("UBIFS: " fmt "\n", ##__VA_ARGS__)
|
||||
#define ubifs_msg(c, fmt, ...) \
|
||||
pr_notice("UBIFS (ubi%d:%d): " fmt "\n", \
|
||||
(c)->vi.ubi_num, (c)->vi.vol_id, ##__VA_ARGS__)
|
||||
/* UBIFS error messages */
|
||||
#ifndef __UBOOT__
|
||||
#define ubifs_err(fmt, ...) \
|
||||
pr_err("UBIFS error (pid %d): %s: " fmt "\n", current->pid, \
|
||||
#define ubifs_err(c, fmt, ...) \
|
||||
pr_err("UBIFS error (ubi%d:%d pid %d): %s: " fmt "\n", \
|
||||
(c)->vi.ubi_num, (c)->vi.vol_id, current->pid, \
|
||||
__func__, ##__VA_ARGS__)
|
||||
/* UBIFS warning messages */
|
||||
#define ubifs_warn(fmt, ...) \
|
||||
pr_warn("UBIFS warning (pid %d): %s: " fmt "\n", \
|
||||
current->pid, __func__, ##__VA_ARGS__)
|
||||
#define ubifs_warn(c, fmt, ...) \
|
||||
pr_warn("UBIFS warning (ubi%d:%d pid %d): %s: " fmt "\n", \
|
||||
(c)->vi.ubi_num, (c)->vi.vol_id, current->pid, \
|
||||
__func__, ##__VA_ARGS__)
|
||||
#else
|
||||
#define ubifs_err(fmt, ...) \
|
||||
pr_err("UBIFS error: %s: " fmt "\n", __func__, ##__VA_ARGS__)
|
||||
#define ubifs_err(c, fmt, ...) \
|
||||
pr_err("UBIFS error (ubi%d:%d pid %d): %s: " fmt "\n", \
|
||||
(c)->vi.ubi_num, (c)->vi.vol_id, 0, \
|
||||
__func__, ##__VA_ARGS__)
|
||||
/* UBIFS warning messages */
|
||||
#define ubifs_warn(fmt, ...) \
|
||||
pr_warn("UBIFS warning: %s: " fmt "\n", __func__, ##__VA_ARGS__)
|
||||
#define ubifs_warn(c, fmt, ...) \
|
||||
pr_warn("UBIFS warning (ubi%d:%d pid %d): %s: " fmt "\n", \
|
||||
(c)->vi.ubi_num, (c)->vi.vol_id, 0, \
|
||||
__func__, ##__VA_ARGS__)
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* A variant of 'ubifs_err()' which takes the UBIFS file-sytem description
|
||||
* object as an argument.
|
||||
*/
|
||||
#define ubifs_errc(c, fmt, ...) \
|
||||
do { \
|
||||
if (!(c)->probing) \
|
||||
ubifs_err(c, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
/* UBIFS file system VFS magic number */
|
||||
#define UBIFS_SUPER_MAGIC 0x24051905
|
||||
|
||||
@ -731,7 +751,7 @@ static inline ino_t parent_ino(struct dentry *dentry)
|
||||
#define WORST_COMPR_FACTOR 2
|
||||
|
||||
/*
|
||||
* How much memory is needed for a buffer where we comress a data node.
|
||||
* How much memory is needed for a buffer where we compress a data node.
|
||||
*/
|
||||
#define COMPRESSED_DATA_NODE_BUF_SZ \
|
||||
(UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR)
|
||||
@ -888,7 +908,6 @@ struct ubifs_scan_node {
|
||||
* @nodes_cnt: number of nodes scanned
|
||||
* @nodes: list of struct ubifs_scan_node
|
||||
* @endpt: end point (and therefore the start of empty space)
|
||||
* @ecc: read returned -EBADMSG
|
||||
* @buf: buffer containing entire LEB scanned
|
||||
*/
|
||||
struct ubifs_scan_leb {
|
||||
@ -896,7 +915,6 @@ struct ubifs_scan_leb {
|
||||
int nodes_cnt;
|
||||
struct list_head nodes;
|
||||
int endpt;
|
||||
int ecc;
|
||||
void *buf;
|
||||
};
|
||||
|
||||
@ -1239,7 +1257,7 @@ typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c,
|
||||
* @lock: serializes @buf, @lnum, @offs, @avail, @used, @next_ino and @inodes
|
||||
* fields
|
||||
* @softlimit: soft write-buffer timeout interval
|
||||
* @delta: hard and soft timeouts delta (the timer expire inteval is @softlimit
|
||||
* @delta: hard and soft timeouts delta (the timer expire interval is @softlimit
|
||||
* and @softlimit + @delta)
|
||||
* @timer: write-buffer timer
|
||||
* @no_timer: non-zero if this write-buffer does not have a timer
|
||||
@ -1509,9 +1527,9 @@ struct ubifs_orphan {
|
||||
/**
|
||||
* struct ubifs_mount_opts - UBIFS-specific mount options information.
|
||||
* @unmount_mode: selected unmount mode (%0 default, %1 normal, %2 fast)
|
||||
* @bulk_read: enable/disable bulk-reads (%0 default, %1 disabe, %2 enable)
|
||||
* @bulk_read: enable/disable bulk-reads (%0 default, %1 disable, %2 enable)
|
||||
* @chk_data_crc: enable/disable CRC data checking when reading data nodes
|
||||
* (%0 default, %1 disabe, %2 enable)
|
||||
* (%0 default, %1 disable, %2 enable)
|
||||
* @override_compr: override default compressor (%0 - do not override and use
|
||||
* superblock compressor, %1 - override and use compressor
|
||||
* specified in @compr_type)
|
||||
@ -1541,9 +1559,9 @@ struct ubifs_mount_opts {
|
||||
* optimization)
|
||||
* @nospace_rp: the same as @nospace, but additionally means that even reserved
|
||||
* pool is full
|
||||
* @page_budget: budget for a page (constant, nenver changed after mount)
|
||||
* @inode_budget: budget for an inode (constant, nenver changed after mount)
|
||||
* @dent_budget: budget for a directory entry (constant, nenver changed after
|
||||
* @page_budget: budget for a page (constant, never changed after mount)
|
||||
* @inode_budget: budget for an inode (constant, never changed after mount)
|
||||
* @dent_budget: budget for a directory entry (constant, never changed after
|
||||
* mount)
|
||||
*/
|
||||
struct ubifs_budg_info {
|
||||
@ -1629,7 +1647,6 @@ struct ubifs_debug_info;
|
||||
*
|
||||
* @mst_node: master node
|
||||
* @mst_offs: offset of valid master node
|
||||
* @mst_mutex: protects the master node area, @mst_node, and @mst_offs
|
||||
*
|
||||
* @max_bu_buf_len: maximum bulk-read buffer length
|
||||
* @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu
|
||||
@ -1796,6 +1813,7 @@ struct ubifs_debug_info;
|
||||
* @need_recovery: %1 if the file-system needs recovery
|
||||
* @replaying: %1 during journal replay
|
||||
* @mounting: %1 while mounting
|
||||
* @probing: %1 while attempting to mount if MS_SILENT mount flag is set
|
||||
* @remounting_rw: %1 while re-mounting from R/O mode to R/W mode
|
||||
* @replay_list: temporary list used during journal replay
|
||||
* @replay_buds: list of buds to replay
|
||||
@ -1871,7 +1889,6 @@ struct ubifs_info {
|
||||
|
||||
struct ubifs_mst_node *mst_node;
|
||||
int mst_offs;
|
||||
struct mutex mst_mutex;
|
||||
|
||||
int max_bu_buf_len;
|
||||
struct mutex bu_mutex;
|
||||
@ -2030,6 +2047,7 @@ struct ubifs_info {
|
||||
unsigned int replaying:1;
|
||||
unsigned int mounting:1;
|
||||
unsigned int remounting_rw:1;
|
||||
unsigned int probing:1;
|
||||
struct list_head replay_list;
|
||||
struct list_head replay_buds;
|
||||
unsigned long long cs_sqnum;
|
||||
@ -2049,6 +2067,7 @@ extern spinlock_t ubifs_infos_lock;
|
||||
extern atomic_long_t ubifs_clean_zn_cnt;
|
||||
extern struct kmem_cache *ubifs_inode_slab;
|
||||
extern const struct super_operations ubifs_super_operations;
|
||||
extern const struct xattr_handler *ubifs_xattr_handlers[];
|
||||
extern const struct address_space_operations ubifs_file_address_operations;
|
||||
extern const struct file_operations ubifs_file_operations;
|
||||
extern const struct inode_operations ubifs_file_inode_operations;
|
||||
@ -2340,6 +2359,8 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
|
||||
size_t size);
|
||||
ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size);
|
||||
int ubifs_removexattr(struct dentry *dentry, const char *name);
|
||||
int ubifs_init_security(struct inode *dentry, struct inode *inode,
|
||||
const struct qstr *qstr);
|
||||
|
||||
/* super.c */
|
||||
struct inode *ubifs_iget(struct super_block *sb, unsigned long inum);
|
||||
@ -2370,10 +2391,10 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
/* compressor.c */
|
||||
int __init ubifs_compressors_init(void);
|
||||
void ubifs_compressors_exit(void);
|
||||
void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len,
|
||||
int *compr_type);
|
||||
int ubifs_decompress(const void *buf, int len, void *out, int *out_len,
|
||||
int compr_type);
|
||||
void ubifs_compress(const struct ubifs_info *c, const void *in_buf, int in_len,
|
||||
void *out_buf, int *out_len, int *compr_type);
|
||||
int ubifs_decompress(const struct ubifs_info *c, const void *buf, int len,
|
||||
void *out, int *out_len, int compr_type);
|
||||
|
||||
#include "debug.h"
|
||||
#include "misc.h"
|
||||
|
@ -244,6 +244,7 @@ struct mtd_info {
|
||||
#ifndef __UBOOT__
|
||||
int (*_suspend) (struct mtd_info *mtd);
|
||||
void (*_resume) (struct mtd_info *mtd);
|
||||
void (*_reboot) (struct mtd_info *mtd);
|
||||
#endif
|
||||
/*
|
||||
* If the driver is something smart, like UBI, it may need to maintain
|
||||
@ -478,6 +479,8 @@ static inline int mtd_is_bitflip_or_eccerr(int err) {
|
||||
return mtd_is_bitflip(err) || mtd_is_eccerr(err);
|
||||
}
|
||||
|
||||
unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
|
||||
|
||||
#ifdef __UBOOT__
|
||||
/* drivers/mtd/mtdcore.h */
|
||||
int add_mtd_device(struct mtd_info *mtd);
|
||||
|
@ -12,23 +12,33 @@
|
||||
#include <linux/types.h>
|
||||
#ifndef __UBOOT__
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <mtd/ubi-user.h>
|
||||
#endif
|
||||
|
||||
/* All voumes/LEBs */
|
||||
#define UBI_ALL -1
|
||||
|
||||
/*
|
||||
* Maximum number of scatter gather list entries,
|
||||
* we use only 64 to have a lower memory foot print.
|
||||
*/
|
||||
#define UBI_MAX_SG_COUNT 64
|
||||
|
||||
/*
|
||||
* enum ubi_open_mode - UBI volume open mode constants.
|
||||
*
|
||||
* UBI_READONLY: read-only mode
|
||||
* UBI_READWRITE: read-write mode
|
||||
* UBI_EXCLUSIVE: exclusive mode
|
||||
* UBI_METAONLY: modify only the volume meta-data,
|
||||
* i.e. the data stored in the volume table, but not in any of volume LEBs.
|
||||
*/
|
||||
enum {
|
||||
UBI_READONLY = 1,
|
||||
UBI_READWRITE,
|
||||
UBI_EXCLUSIVE
|
||||
UBI_EXCLUSIVE,
|
||||
UBI_METAONLY
|
||||
};
|
||||
|
||||
/**
|
||||
@ -105,6 +115,37 @@ struct ubi_volume_info {
|
||||
dev_t cdev;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ubi_sgl - UBI scatter gather list data structure.
|
||||
* @list_pos: current position in @sg[]
|
||||
* @page_pos: current position in @sg[@list_pos]
|
||||
* @sg: the scatter gather list itself
|
||||
*
|
||||
* ubi_sgl is a wrapper around a scatter list which keeps track of the
|
||||
* current position in the list and the current list item such that
|
||||
* it can be used across multiple ubi_leb_read_sg() calls.
|
||||
*/
|
||||
struct ubi_sgl {
|
||||
int list_pos;
|
||||
int page_pos;
|
||||
#ifndef __UBOOT__
|
||||
struct scatterlist sg[UBI_MAX_SG_COUNT];
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* ubi_sgl_init - initialize an UBI scatter gather list data structure.
|
||||
* @usgl: the UBI scatter gather struct itself
|
||||
*
|
||||
* Please note that you still have to use sg_init_table() or any adequate
|
||||
* function to initialize the unterlaying struct scatterlist.
|
||||
*/
|
||||
static inline void ubi_sgl_init(struct ubi_sgl *usgl)
|
||||
{
|
||||
usgl->list_pos = 0;
|
||||
usgl->page_pos = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* struct ubi_device_info - UBI device description data structure.
|
||||
* @ubi_num: ubi device number
|
||||
@ -214,6 +255,8 @@ int ubi_unregister_volume_notifier(struct notifier_block *nb);
|
||||
void ubi_close_volume(struct ubi_volume_desc *desc);
|
||||
int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
|
||||
int len, int check);
|
||||
int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
|
||||
int offset, int len, int check);
|
||||
int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
|
||||
int offset, int len);
|
||||
int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
|
||||
@ -234,4 +277,14 @@ static inline int ubi_read(struct ubi_volume_desc *desc, int lnum, char *buf,
|
||||
{
|
||||
return ubi_leb_read(desc, lnum, buf, offset, len, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is the same as the 'ubi_leb_read_sg()' function, but it does
|
||||
* not provide the checking capability.
|
||||
*/
|
||||
static inline int ubi_read_sg(struct ubi_volume_desc *desc, int lnum,
|
||||
struct ubi_sgl *sgl, int offset, int len)
|
||||
{
|
||||
return ubi_leb_read_sg(desc, lnum, sgl, offset, len, 0);
|
||||
}
|
||||
#endif /* !__LINUX_UBI_H__ */
|
||||
|
Loading…
Reference in New Issue
Block a user