Merge tag 'upstream-4.1-rc1' of git://git.infradead.org/linux-ubifs

Pull UBI/UBIFS updates from Richard Weinberger:
 "This pull request includes the following UBI/UBIFS changes:

   - powercut emulation for UBI
   - a huge update to UBI Fastmap
   - cleanups and bugfixes all over UBI and UBIFS"

* tag 'upstream-4.1-rc1' of git://git.infradead.org/linux-ubifs: (50 commits)
  UBI: power cut emulation for testing
  UBIFS: fix output format of INUM_WATERMARK
  UBI: Fastmap: Fall back to scanning mode after ECC error
  UBI: Fastmap: Remove is_fm_block()
  UBI: Fastmap: Add blank line after declarations
  UBI: Fastmap: Remove else after return.
  UBI: Fastmap: Introduce may_reserve_for_fm()
  UBI: Fastmap: Introduce ubi_fastmap_init()
  UBI: Fastmap: Wire up WL accessor functions
  UBI: Add accessor functions for WL data structures
  UBI: Move fastmap specific functions out of wl.c
  UBI: Fastmap: Add new module parameter fm_debug
  UBI: Fastmap: Make self_check_eba() depend on fastmap self checking
  UBI: Fastmap: Add self check to detect absent PEBs
  UBI: Fix stale pointers in ubi->lookuptbl
  UBI: Fastmap: Enhance fastmap checking
  UBI: Add initial support for fastmap self checks
  UBI: Fastmap: Rework fastmap error paths
  UBI: Fastmap: Prepare for variable sized fastmaps
  UBI: Fastmap: Locking updates
  ...
This commit is contained in:
Linus Torvalds
2015-04-15 13:43:40 -07:00
38 changed files with 1510 additions and 1144 deletions
+39 -34
View File
@@ -410,7 +410,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
second_is_newer = !second_is_newer; second_is_newer = !second_is_newer;
} else { } else {
dbg_bld("PEB %d CRC is OK", pnum); dbg_bld("PEB %d CRC is OK", pnum);
bitflips = !!err; bitflips |= !!err;
} }
mutex_unlock(&ubi->buf_mutex); mutex_unlock(&ubi->buf_mutex);
@@ -1301,6 +1301,30 @@ out_ech:
return err; return err;
} }
static struct ubi_attach_info *alloc_ai(void)
{
struct ubi_attach_info *ai;
ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
if (!ai)
return ai;
INIT_LIST_HEAD(&ai->corr);
INIT_LIST_HEAD(&ai->free);
INIT_LIST_HEAD(&ai->erase);
INIT_LIST_HEAD(&ai->alien);
ai->volumes = RB_ROOT;
ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
sizeof(struct ubi_ainf_peb),
0, 0, NULL);
if (!ai->aeb_slab_cache) {
kfree(ai);
ai = NULL;
}
return ai;
}
#ifdef CONFIG_MTD_UBI_FASTMAP #ifdef CONFIG_MTD_UBI_FASTMAP
/** /**
@@ -1313,7 +1337,7 @@ out_ech:
* UBI_NO_FASTMAP denotes that no fastmap was found. * UBI_NO_FASTMAP denotes that no fastmap was found.
* UBI_BAD_FASTMAP denotes that the found fastmap was invalid. * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
*/ */
static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai) static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
{ {
int err, pnum, fm_anchor = -1; int err, pnum, fm_anchor = -1;
unsigned long long max_sqnum = 0; unsigned long long max_sqnum = 0;
@@ -1334,7 +1358,7 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched(); cond_resched();
dbg_gen("process PEB %d", pnum); dbg_gen("process PEB %d", pnum);
err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum); err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum);
if (err < 0) if (err < 0)
goto out_vidh; goto out_vidh;
@@ -1350,7 +1374,12 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (fm_anchor < 0) if (fm_anchor < 0)
return UBI_NO_FASTMAP; return UBI_NO_FASTMAP;
return ubi_scan_fastmap(ubi, ai, fm_anchor); destroy_ai(*ai);
*ai = alloc_ai();
if (!*ai)
return -ENOMEM;
return ubi_scan_fastmap(ubi, *ai, fm_anchor);
out_vidh: out_vidh:
ubi_free_vid_hdr(ubi, vidh); ubi_free_vid_hdr(ubi, vidh);
@@ -1362,30 +1391,6 @@ out:
#endif #endif
static struct ubi_attach_info *alloc_ai(const char *slab_name)
{
struct ubi_attach_info *ai;
ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
if (!ai)
return ai;
INIT_LIST_HEAD(&ai->corr);
INIT_LIST_HEAD(&ai->free);
INIT_LIST_HEAD(&ai->erase);
INIT_LIST_HEAD(&ai->alien);
ai->volumes = RB_ROOT;
ai->aeb_slab_cache = kmem_cache_create(slab_name,
sizeof(struct ubi_ainf_peb),
0, 0, NULL);
if (!ai->aeb_slab_cache) {
kfree(ai);
ai = NULL;
}
return ai;
}
/** /**
* ubi_attach - attach an MTD device. * ubi_attach - attach an MTD device.
* @ubi: UBI device descriptor * @ubi: UBI device descriptor
@@ -1399,7 +1404,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
int err; int err;
struct ubi_attach_info *ai; struct ubi_attach_info *ai;
ai = alloc_ai("ubi_aeb_slab_cache"); ai = alloc_ai();
if (!ai) if (!ai)
return -ENOMEM; return -ENOMEM;
@@ -1413,11 +1418,11 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
if (force_scan) if (force_scan)
err = scan_all(ubi, ai, 0); err = scan_all(ubi, ai, 0);
else { else {
err = scan_fast(ubi, ai); err = scan_fast(ubi, &ai);
if (err > 0) { if (err > 0 || mtd_is_eccerr(err)) {
if (err != UBI_NO_FASTMAP) { if (err != UBI_NO_FASTMAP) {
destroy_ai(ai); destroy_ai(ai);
ai = alloc_ai("ubi_aeb_slab_cache2"); ai = alloc_ai();
if (!ai) if (!ai)
return -ENOMEM; return -ENOMEM;
@@ -1453,10 +1458,10 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
goto out_wl; goto out_wl;
#ifdef CONFIG_MTD_UBI_FASTMAP #ifdef CONFIG_MTD_UBI_FASTMAP
if (ubi->fm && ubi_dbg_chk_gen(ubi)) { if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
struct ubi_attach_info *scan_ai; struct ubi_attach_info *scan_ai;
scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache"); scan_ai = alloc_ai();
if (!scan_ai) { if (!scan_ai) {
err = -ENOMEM; err = -ENOMEM;
goto out_wl; goto out_wl;
+18 -11
View File
@@ -81,6 +81,7 @@ static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
#ifdef CONFIG_MTD_UBI_FASTMAP #ifdef CONFIG_MTD_UBI_FASTMAP
/* UBI module parameter to enable fastmap automatically on non-fastmap images */ /* UBI module parameter to enable fastmap automatically on non-fastmap images */
static bool fm_autoconvert; static bool fm_autoconvert;
static bool fm_debug;
#endif #endif
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
struct class *ubi_class; struct class *ubi_class;
@@ -154,23 +155,22 @@ static struct device_attribute dev_mtd_num =
*/ */
int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
{ {
int ret;
struct ubi_notification nt; struct ubi_notification nt;
ubi_do_get_device_info(ubi, &nt.di); ubi_do_get_device_info(ubi, &nt.di);
ubi_do_get_volume_info(ubi, vol, &nt.vi); ubi_do_get_volume_info(ubi, vol, &nt.vi);
#ifdef CONFIG_MTD_UBI_FASTMAP
switch (ntype) { switch (ntype) {
case UBI_VOLUME_ADDED: case UBI_VOLUME_ADDED:
case UBI_VOLUME_REMOVED: case UBI_VOLUME_REMOVED:
case UBI_VOLUME_RESIZED: case UBI_VOLUME_RESIZED:
case UBI_VOLUME_RENAMED: case UBI_VOLUME_RENAMED:
if (ubi_update_fastmap(ubi)) { ret = ubi_update_fastmap(ubi);
ubi_err(ubi, "Unable to update fastmap!"); if (ret)
ubi_ro_mode(ubi); ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
}
} }
#endif
return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
} }
@@ -950,8 +950,10 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE) if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE; ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE; ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
ubi->fm_disabled = !fm_autoconvert; ubi->fm_disabled = !fm_autoconvert;
if (fm_debug)
ubi_enable_dbg_chk_fastmap(ubi);
if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
<= UBI_FM_MAX_START) { <= UBI_FM_MAX_START) {
@@ -970,8 +972,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
mutex_init(&ubi->ckvol_mutex); mutex_init(&ubi->ckvol_mutex);
mutex_init(&ubi->device_mutex); mutex_init(&ubi->device_mutex);
spin_lock_init(&ubi->volumes_lock); spin_lock_init(&ubi->volumes_lock);
mutex_init(&ubi->fm_mutex); init_rwsem(&ubi->fm_protect);
init_rwsem(&ubi->fm_sem); init_rwsem(&ubi->fm_eba_sem);
ubi_msg(ubi, "attaching mtd%d", mtd->index); ubi_msg(ubi, "attaching mtd%d", mtd->index);
@@ -1115,8 +1117,11 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index); ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
#ifdef CONFIG_MTD_UBI_FASTMAP #ifdef CONFIG_MTD_UBI_FASTMAP
/* If we don't write a new fastmap at detach time we lose all /* If we don't write a new fastmap at detach time we lose all
* EC updates that have been made since the last written fastmap. */ * EC updates that have been made since the last written fastmap.
ubi_update_fastmap(ubi); * In case of fastmap debugging we omit the update to simulate an
* unclean shutdown. */
if (!ubi_dbg_chk_fastmap(ubi))
ubi_update_fastmap(ubi);
#endif #endif
/* /*
* Before freeing anything, we have to stop the background thread to * Before freeing anything, we have to stop the background thread to
@@ -1501,6 +1506,8 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
#ifdef CONFIG_MTD_UBI_FASTMAP #ifdef CONFIG_MTD_UBI_FASTMAP
module_param(fm_autoconvert, bool, 0644); module_param(fm_autoconvert, bool, 0644);
MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap."); MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
module_param(fm_debug, bool, 0);
MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
#endif #endif
MODULE_VERSION(__stringify(UBI_VERSION)); MODULE_VERSION(__stringify(UBI_VERSION));
MODULE_DESCRIPTION("UBI - Unsorted Block Images"); MODULE_DESCRIPTION("UBI - Unsorted Block Images");
+1 -1
View File
@@ -455,7 +455,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
/* Validate the request */ /* Validate the request */
err = -EINVAL; err = -EINVAL;
if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
req.bytes < 0 || req.lnum >= vol->usable_leb_size) req.bytes < 0 || req.bytes > vol->usable_leb_size)
break; break;
err = get_exclusive(desc); err = get_exclusive(desc);
+98 -2
View File
@@ -263,7 +263,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
struct dentry *dent = file->f_path.dentry; struct dentry *dent = file->f_path.dentry;
struct ubi_device *ubi; struct ubi_device *ubi;
struct ubi_debug_info *d; struct ubi_debug_info *d;
char buf[3]; char buf[8];
int val; int val;
ubi = ubi_get_device(ubi_num); ubi = ubi_get_device(ubi_num);
@@ -275,12 +275,30 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
val = d->chk_gen; val = d->chk_gen;
else if (dent == d->dfs_chk_io) else if (dent == d->dfs_chk_io)
val = d->chk_io; val = d->chk_io;
else if (dent == d->dfs_chk_fastmap)
val = d->chk_fastmap;
else if (dent == d->dfs_disable_bgt) else if (dent == d->dfs_disable_bgt)
val = d->disable_bgt; val = d->disable_bgt;
else if (dent == d->dfs_emulate_bitflips) else if (dent == d->dfs_emulate_bitflips)
val = d->emulate_bitflips; val = d->emulate_bitflips;
else if (dent == d->dfs_emulate_io_failures) else if (dent == d->dfs_emulate_io_failures)
val = d->emulate_io_failures; val = d->emulate_io_failures;
else if (dent == d->dfs_emulate_power_cut) {
snprintf(buf, sizeof(buf), "%u\n", d->emulate_power_cut);
count = simple_read_from_buffer(user_buf, count, ppos,
buf, strlen(buf));
goto out;
} else if (dent == d->dfs_power_cut_min) {
snprintf(buf, sizeof(buf), "%u\n", d->power_cut_min);
count = simple_read_from_buffer(user_buf, count, ppos,
buf, strlen(buf));
goto out;
} else if (dent == d->dfs_power_cut_max) {
snprintf(buf, sizeof(buf), "%u\n", d->power_cut_max);
count = simple_read_from_buffer(user_buf, count, ppos,
buf, strlen(buf));
goto out;
}
else { else {
count = -EINVAL; count = -EINVAL;
goto out; goto out;
@@ -309,7 +327,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
struct ubi_device *ubi; struct ubi_device *ubi;
struct ubi_debug_info *d; struct ubi_debug_info *d;
size_t buf_size; size_t buf_size;
char buf[8]; char buf[8] = {0};
int val; int val;
ubi = ubi_get_device(ubi_num); ubi = ubi_get_device(ubi_num);
@@ -323,6 +341,21 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
goto out; goto out;
} }
if (dent == d->dfs_power_cut_min) {
if (kstrtouint(buf, 0, &d->power_cut_min) != 0)
count = -EINVAL;
goto out;
} else if (dent == d->dfs_power_cut_max) {
if (kstrtouint(buf, 0, &d->power_cut_max) != 0)
count = -EINVAL;
goto out;
} else if (dent == d->dfs_emulate_power_cut) {
if (kstrtoint(buf, 0, &val) != 0)
count = -EINVAL;
d->emulate_power_cut = val;
goto out;
}
if (buf[0] == '1') if (buf[0] == '1')
val = 1; val = 1;
else if (buf[0] == '0') else if (buf[0] == '0')
@@ -336,6 +369,8 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
d->chk_gen = val; d->chk_gen = val;
else if (dent == d->dfs_chk_io) else if (dent == d->dfs_chk_io)
d->chk_io = val; d->chk_io = val;
else if (dent == d->dfs_chk_fastmap)
d->chk_fastmap = val;
else if (dent == d->dfs_disable_bgt) else if (dent == d->dfs_disable_bgt)
d->disable_bgt = val; d->disable_bgt = val;
else if (dent == d->dfs_emulate_bitflips) else if (dent == d->dfs_emulate_bitflips)
@@ -406,6 +441,13 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
goto out_remove; goto out_remove;
d->dfs_chk_io = dent; d->dfs_chk_io = dent;
fname = "chk_fastmap";
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
&dfs_fops);
if (IS_ERR_OR_NULL(dent))
goto out_remove;
d->dfs_chk_fastmap = dent;
fname = "tst_disable_bgt"; fname = "tst_disable_bgt";
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
&dfs_fops); &dfs_fops);
@@ -427,6 +469,27 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
goto out_remove; goto out_remove;
d->dfs_emulate_io_failures = dent; d->dfs_emulate_io_failures = dent;
fname = "tst_emulate_power_cut";
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
&dfs_fops);
if (IS_ERR_OR_NULL(dent))
goto out_remove;
d->dfs_emulate_power_cut = dent;
fname = "tst_emulate_power_cut_min";
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
&dfs_fops);
if (IS_ERR_OR_NULL(dent))
goto out_remove;
d->dfs_power_cut_min = dent;
fname = "tst_emulate_power_cut_max";
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
&dfs_fops);
if (IS_ERR_OR_NULL(dent))
goto out_remove;
d->dfs_power_cut_max = dent;
return 0; return 0;
out_remove: out_remove:
@@ -447,3 +510,36 @@ void ubi_debugfs_exit_dev(struct ubi_device *ubi)
if (IS_ENABLED(CONFIG_DEBUG_FS)) if (IS_ENABLED(CONFIG_DEBUG_FS))
debugfs_remove_recursive(ubi->dbg.dfs_dir); debugfs_remove_recursive(ubi->dbg.dfs_dir);
} }
/**
* ubi_dbg_power_cut - emulate a power cut if it is time to do so
* @ubi: UBI device description object
* @caller: Flags set to indicate from where the function is being called
*
* Returns non-zero if a power cut was emulated, zero if not.
*/
int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
{
unsigned int range;
if ((ubi->dbg.emulate_power_cut & caller) == 0)
return 0;
if (ubi->dbg.power_cut_counter == 0) {
ubi->dbg.power_cut_counter = ubi->dbg.power_cut_min;
if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
ubi->dbg.power_cut_counter += prandom_u32() % range;
}
return 0;
}
ubi->dbg.power_cut_counter--;
if (ubi->dbg.power_cut_counter)
return 0;
ubi_msg(ubi, "XXXXXXXXXXXXXXX emulating a power cut XXXXXXXXXXXXXXXX");
ubi_ro_mode(ubi);
return 1;
}
+12
View File
@@ -127,4 +127,16 @@ static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
{ {
return ubi->dbg.chk_gen; return ubi->dbg.chk_gen;
} }
static inline int ubi_dbg_chk_fastmap(const struct ubi_device *ubi)
{
return ubi->dbg.chk_fastmap;
}
static inline void ubi_enable_dbg_chk_fastmap(struct ubi_device *ubi)
{
ubi->dbg.chk_fastmap = 1;
}
int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
#endif /* !__UBI_DEBUG_H__ */ #endif /* !__UBI_DEBUG_H__ */
+34 -20
View File
@@ -340,9 +340,9 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
down_read(&ubi->fm_sem); down_read(&ubi->fm_eba_sem);
vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
up_read(&ubi->fm_sem); up_read(&ubi->fm_eba_sem);
err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0); err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
out_unlock: out_unlock:
@@ -567,6 +567,7 @@ retry:
new_pnum = ubi_wl_get_peb(ubi); new_pnum = ubi_wl_get_peb(ubi);
if (new_pnum < 0) { if (new_pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr); ubi_free_vid_hdr(ubi, vid_hdr);
up_read(&ubi->fm_eba_sem);
return new_pnum; return new_pnum;
} }
@@ -577,13 +578,16 @@ retry:
if (err && err != UBI_IO_BITFLIPS) { if (err && err != UBI_IO_BITFLIPS) {
if (err > 0) if (err > 0)
err = -EIO; err = -EIO;
up_read(&ubi->fm_eba_sem);
goto out_put; goto out_put;
} }
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
if (err) if (err) {
up_read(&ubi->fm_eba_sem);
goto write_error; goto write_error;
}
data_size = offset + len; data_size = offset + len;
mutex_lock(&ubi->buf_mutex); mutex_lock(&ubi->buf_mutex);
@@ -592,8 +596,10 @@ retry:
/* Read everything before the area where the write failure happened */ /* Read everything before the area where the write failure happened */
if (offset > 0) { if (offset > 0) {
err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset); err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
if (err && err != UBI_IO_BITFLIPS) if (err && err != UBI_IO_BITFLIPS) {
up_read(&ubi->fm_eba_sem);
goto out_unlock; goto out_unlock;
}
} }
memcpy(ubi->peb_buf + offset, buf, len); memcpy(ubi->peb_buf + offset, buf, len);
@@ -601,15 +607,15 @@ retry:
err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
if (err) { if (err) {
mutex_unlock(&ubi->buf_mutex); mutex_unlock(&ubi->buf_mutex);
up_read(&ubi->fm_eba_sem);
goto write_error; goto write_error;
} }
mutex_unlock(&ubi->buf_mutex); mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr); ubi_free_vid_hdr(ubi, vid_hdr);
down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = new_pnum; vol->eba_tbl[lnum] = new_pnum;
up_read(&ubi->fm_sem); up_read(&ubi->fm_eba_sem);
ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
ubi_msg(ubi, "data was successfully recovered"); ubi_msg(ubi, "data was successfully recovered");
@@ -704,6 +710,7 @@ retry:
if (pnum < 0) { if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr); ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum); leb_write_unlock(ubi, vol_id, lnum);
up_read(&ubi->fm_eba_sem);
return pnum; return pnum;
} }
@@ -714,6 +721,7 @@ retry:
if (err) { if (err) {
ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum); vol_id, lnum, pnum);
up_read(&ubi->fm_eba_sem);
goto write_error; goto write_error;
} }
@@ -722,13 +730,13 @@ retry:
if (err) { if (err) {
ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d", ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum); len, offset, vol_id, lnum, pnum);
up_read(&ubi->fm_eba_sem);
goto write_error; goto write_error;
} }
} }
down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum; vol->eba_tbl[lnum] = pnum;
up_read(&ubi->fm_sem); up_read(&ubi->fm_eba_sem);
leb_write_unlock(ubi, vol_id, lnum); leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr); ubi_free_vid_hdr(ubi, vid_hdr);
@@ -825,6 +833,7 @@ retry:
if (pnum < 0) { if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr); ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum); leb_write_unlock(ubi, vol_id, lnum);
up_read(&ubi->fm_eba_sem);
return pnum; return pnum;
} }
@@ -835,6 +844,7 @@ retry:
if (err) { if (err) {
ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum); vol_id, lnum, pnum);
up_read(&ubi->fm_eba_sem);
goto write_error; goto write_error;
} }
@@ -842,13 +852,13 @@ retry:
if (err) { if (err) {
ubi_warn(ubi, "failed to write %d bytes of data to PEB %d", ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
len, pnum); len, pnum);
up_read(&ubi->fm_eba_sem);
goto write_error; goto write_error;
} }
ubi_assert(vol->eba_tbl[lnum] < 0); ubi_assert(vol->eba_tbl[lnum] < 0);
down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum; vol->eba_tbl[lnum] = pnum;
up_read(&ubi->fm_sem); up_read(&ubi->fm_eba_sem);
leb_write_unlock(ubi, vol_id, lnum); leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr); ubi_free_vid_hdr(ubi, vid_hdr);
@@ -900,7 +910,7 @@ write_error:
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len) int lnum, const void *buf, int len)
{ {
int err, pnum, tries = 0, vol_id = vol->vol_id; int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr; struct ubi_vid_hdr *vid_hdr;
uint32_t crc; uint32_t crc;
@@ -943,6 +953,7 @@ retry:
pnum = ubi_wl_get_peb(ubi); pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) { if (pnum < 0) {
err = pnum; err = pnum;
up_read(&ubi->fm_eba_sem);
goto out_leb_unlock; goto out_leb_unlock;
} }
@@ -953,6 +964,7 @@ retry:
if (err) { if (err) {
ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum); vol_id, lnum, pnum);
up_read(&ubi->fm_eba_sem);
goto write_error; goto write_error;
} }
@@ -960,19 +972,20 @@ retry:
if (err) { if (err) {
ubi_warn(ubi, "failed to write %d bytes of data to PEB %d", ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
len, pnum); len, pnum);
up_read(&ubi->fm_eba_sem);
goto write_error; goto write_error;
} }
if (vol->eba_tbl[lnum] >= 0) { old_pnum = vol->eba_tbl[lnum];
err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0); vol->eba_tbl[lnum] = pnum;
up_read(&ubi->fm_eba_sem);
if (old_pnum >= 0) {
err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0);
if (err) if (err)
goto out_leb_unlock; goto out_leb_unlock;
} }
down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
up_read(&ubi->fm_sem);
out_leb_unlock: out_leb_unlock:
leb_write_unlock(ubi, vol_id, lnum); leb_write_unlock(ubi, vol_id, lnum);
out_mutex: out_mutex:
@@ -1218,9 +1231,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
} }
ubi_assert(vol->eba_tbl[lnum] == from); ubi_assert(vol->eba_tbl[lnum] == from);
down_read(&ubi->fm_sem); down_read(&ubi->fm_eba_sem);
vol->eba_tbl[lnum] = to; vol->eba_tbl[lnum] = to;
up_read(&ubi->fm_sem); up_read(&ubi->fm_eba_sem);
out_unlock_buf: out_unlock_buf:
mutex_unlock(&ubi->buf_mutex); mutex_unlock(&ubi->buf_mutex);
@@ -1419,7 +1432,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
* during re-size. * during re-size.
*/ */
ubi_move_aeb_to_list(av, aeb, &ai->erase); ubi_move_aeb_to_list(av, aeb, &ai->erase);
vol->eba_tbl[aeb->lnum] = aeb->pnum; else
vol->eba_tbl[aeb->lnum] = aeb->pnum;
} }
} }
+362
View File
@@ -0,0 +1,362 @@
/*
* Copyright (c) 2012 Linutronix GmbH
* Copyright (c) 2014 sigma star gmbh
* Author: Richard Weinberger <richard@nod.at>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
*/
/**
* update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
* @wrk: the work description object
*/
static void update_fastmap_work_fn(struct work_struct *wrk)
{
struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
ubi_update_fastmap(ubi);
spin_lock(&ubi->wl_lock);
ubi->fm_work_scheduled = 0;
spin_unlock(&ubi->wl_lock);
}
/**
* find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
* @root: the RB-tree where to look for
*/
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
{
struct rb_node *p;
struct ubi_wl_entry *e, *victim = NULL;
int max_ec = UBI_MAX_ERASECOUNTER;
ubi_rb_for_each_entry(p, e, root, u.rb) {
if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
victim = e;
max_ec = e->ec;
}
}
return victim;
}
/**
* return_unused_pool_pebs - returns unused PEB to the free tree.
* @ubi: UBI device description object
* @pool: fastmap pool description object
*/
static void return_unused_pool_pebs(struct ubi_device *ubi,
struct ubi_fm_pool *pool)
{
int i;
struct ubi_wl_entry *e;
for (i = pool->used; i < pool->size; i++) {
e = ubi->lookuptbl[pool->pebs[i]];
wl_tree_add(e, &ubi->free);
ubi->free_count++;
}
}
static int anchor_pebs_avalible(struct rb_root *root)
{
struct rb_node *p;
struct ubi_wl_entry *e;
ubi_rb_for_each_entry(p, e, root, u.rb)
if (e->pnum < UBI_FM_MAX_START)
return 1;
return 0;
}
/**
* ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
* @ubi: UBI device description object
* @anchor: This PEB will be used as anchor PEB by fastmap
*
* The function returns a physical erase block with a given maximal number
* and removes it from the wl subsystem.
* Must be called with wl_lock held!
*/
struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
{
struct ubi_wl_entry *e = NULL;
if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
goto out;
if (anchor)
e = find_anchor_wl_entry(&ubi->free);
else
e = find_mean_wl_entry(ubi, &ubi->free);
if (!e)
goto out;
self_check_in_wl_tree(ubi, e, &ubi->free);
/* remove it from the free list,
* the wl subsystem does no longer know this erase block */
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
out:
return e;
}
/**
* ubi_refill_pools - refills all fastmap PEB pools.
* @ubi: UBI device description object
*/
void ubi_refill_pools(struct ubi_device *ubi)
{
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
struct ubi_fm_pool *pool = &ubi->fm_pool;
struct ubi_wl_entry *e;
int enough;
spin_lock(&ubi->wl_lock);
return_unused_pool_pebs(ubi, wl_pool);
return_unused_pool_pebs(ubi, pool);
wl_pool->size = 0;
pool->size = 0;
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
if (!ubi->free.rb_node)
break;
e = wl_get_wle(ubi);
if (!e)
break;
pool->pebs[pool->size] = e->pnum;
pool->size++;
} else
enough++;
if (wl_pool->size < wl_pool->max_size) {
if (!ubi->free.rb_node ||
(ubi->free_count - ubi->beb_rsvd_pebs < 5))
break;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
self_check_in_wl_tree(ubi, e, &ubi->free);
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
wl_pool->pebs[wl_pool->size] = e->pnum;
wl_pool->size++;
} else
enough++;
if (enough == 2)
break;
}
wl_pool->used = 0;
pool->used = 0;
spin_unlock(&ubi->wl_lock);
}
/**
* ubi_wl_get_peb - get a physical eraseblock.
* @ubi: UBI device description object
*
* This function returns a physical eraseblock in case of success and a
* negative error code in case of failure.
* Returns with ubi->fm_eba_sem held in read mode!
*/
int ubi_wl_get_peb(struct ubi_device *ubi)
{
int ret, retried = 0;
struct ubi_fm_pool *pool = &ubi->fm_pool;
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
again:
down_read(&ubi->fm_eba_sem);
spin_lock(&ubi->wl_lock);
/* We check here also for the WL pool because at this point we can
* refill the WL pool synchronous. */
if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
spin_unlock(&ubi->wl_lock);
up_read(&ubi->fm_eba_sem);
ret = ubi_update_fastmap(ubi);
if (ret) {
ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
down_read(&ubi->fm_eba_sem);
return -ENOSPC;
}
down_read(&ubi->fm_eba_sem);
spin_lock(&ubi->wl_lock);
}
if (pool->used == pool->size) {
spin_unlock(&ubi->wl_lock);
if (retried) {
ubi_err(ubi, "Unable to get a free PEB from user WL pool");
ret = -ENOSPC;
goto out;
}
retried = 1;
up_read(&ubi->fm_eba_sem);
goto again;
}
ubi_assert(pool->used < pool->size);
ret = pool->pebs[pool->used++];
prot_queue_add(ubi, ubi->lookuptbl[ret]);
spin_unlock(&ubi->wl_lock);
out:
return ret;
}
/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
*
* @ubi: UBI device description object
*/
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
{
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
int pnum;
if (pool->used == pool->size) {
/* We cannot update the fastmap here because this
* function is called in atomic context.
* Let's fail here and refill/update it as soon as possible. */
if (!ubi->fm_work_scheduled) {
ubi->fm_work_scheduled = 1;
schedule_work(&ubi->fm_work);
}
return NULL;
}
pnum = pool->pebs[pool->used++];
return ubi->lookuptbl[pnum];
}
/**
* ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
* @ubi: UBI device description object
*/
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
spin_lock(&ubi->wl_lock);
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
}
ubi->wl_scheduled = 1;
spin_unlock(&ubi->wl_lock);
wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
if (!wrk) {
spin_lock(&ubi->wl_lock);
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
return -ENOMEM;
}
wrk->anchor = 1;
wrk->func = &wear_leveling_worker;
schedule_ubi_work(ubi, wrk);
return 0;
}
/**
* ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
* sub-system.
* see: ubi_wl_put_peb()
*
* @ubi: UBI device description object
* @fm_e: physical eraseblock to return
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if this physical eraseblock has to be tortured
*/
int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
int lnum, int torture)
{
struct ubi_wl_entry *e;
int vol_id, pnum = fm_e->pnum;
dbg_wl("PEB %d", pnum);
ubi_assert(pnum >= 0);
ubi_assert(pnum < ubi->peb_count);
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
/* This can happen if we recovered from a fastmap the very
* first time and writing now a new one. In this case the wl system
* has never seen any PEB used by the original fastmap.
*/
if (!e) {
e = fm_e;
ubi_assert(e->ec >= 0);
ubi->lookuptbl[pnum] = e;
}
spin_unlock(&ubi->wl_lock);
vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
return schedule_erase(ubi, e, vol_id, lnum, torture);
}
/**
* ubi_is_erase_work - checks whether a work is erase work.
* @wrk: The work object to be checked
*/
int ubi_is_erase_work(struct ubi_work *wrk)
{
return wrk->func == erase_worker;
}
static void ubi_fastmap_close(struct ubi_device *ubi)
{
int i;
flush_work(&ubi->fm_work);
return_unused_pool_pebs(ubi, &ubi->fm_pool);
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
if (ubi->fm) {
for (i = 0; i < ubi->fm->used_blocks; i++)
kfree(ubi->fm->e[i]);
}
kfree(ubi->fm);
}
/**
* may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
* See find_mean_wl_entry()
*
* @ubi: UBI device description object
* @e: physical eraseblock to return
* @root: RB tree to test against.
*/
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
struct ubi_wl_entry *e,
struct rb_root *root) {
if (e && !ubi->fm_disabled && !ubi->fm &&
e->pnum < UBI_FM_MAX_START)
e = rb_entry(rb_next(root->rb_node),
struct ubi_wl_entry, u.rb);
return e;
}
+249 -200
View File
File diff suppressed because it is too large Load Diff
+6
View File
@@ -859,6 +859,9 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
if (err) if (err)
return err; return err;
if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
return -EROFS;
err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize); err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
return err; return err;
} }
@@ -1106,6 +1109,9 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
if (err) if (err)
return err; return err;
if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
return -EROFS;
p = (char *)vid_hdr - ubi->vid_hdr_shift; p = (char *)vid_hdr - ubi->vid_hdr_shift;
err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset, err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize); ubi->vid_hdr_alsize);
-2
View File
@@ -403,8 +403,6 @@ struct ubi_vtbl_record {
#define UBI_FM_MIN_POOL_SIZE 8 #define UBI_FM_MIN_POOL_SIZE 8
#define UBI_FM_MAX_POOL_SIZE 256 #define UBI_FM_MAX_POOL_SIZE 256
#define UBI_FM_WL_POOL_SIZE 25
/** /**
* struct ubi_fm_sb - UBI fastmap super block * struct ubi_fm_sb - UBI fastmap super block
* @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC) * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
+79 -6
View File
@@ -151,6 +151,17 @@ enum {
UBI_BAD_FASTMAP, UBI_BAD_FASTMAP,
}; };
/*
* Flags for emulate_power_cut in ubi_debug_info
*
* POWER_CUT_EC_WRITE: Emulate a power cut when writing an EC header
* POWER_CUT_VID_WRITE: Emulate a power cut when writing a VID header
*/
enum {
POWER_CUT_EC_WRITE = 0x01,
POWER_CUT_VID_WRITE = 0x02,
};
/** /**
* struct ubi_wl_entry - wear-leveling entry. * struct ubi_wl_entry - wear-leveling entry.
* @u.rb: link in the corresponding (free/used) RB-tree * @u.rb: link in the corresponding (free/used) RB-tree
@@ -356,30 +367,48 @@ struct ubi_wl_entry;
* *
* @chk_gen: if UBI general extra checks are enabled * @chk_gen: if UBI general extra checks are enabled
* @chk_io: if UBI I/O extra checks are enabled * @chk_io: if UBI I/O extra checks are enabled
* @chk_fastmap: if UBI fastmap extra checks are enabled
* @disable_bgt: disable the background task for testing purposes * @disable_bgt: disable the background task for testing purposes
* @emulate_bitflips: emulate bit-flips for testing purposes * @emulate_bitflips: emulate bit-flips for testing purposes
* @emulate_io_failures: emulate write/erase failures for testing purposes * @emulate_io_failures: emulate write/erase failures for testing purposes
* @emulate_power_cut: emulate power cut for testing purposes
* @power_cut_counter: count down for writes left until emulated power cut
* @power_cut_min: minimum number of writes before emulating a power cut
* @power_cut_max: maximum number of writes until emulating a power cut
* @dfs_dir_name: name of debugfs directory containing files of this UBI device * @dfs_dir_name: name of debugfs directory containing files of this UBI device
* @dfs_dir: direntry object of the UBI device debugfs directory * @dfs_dir: direntry object of the UBI device debugfs directory
* @dfs_chk_gen: debugfs knob to enable UBI general extra checks * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
* @dfs_chk_io: debugfs knob to enable UBI I/O extra checks * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
* @dfs_chk_fastmap: debugfs knob to enable UBI fastmap extra checks
* @dfs_disable_bgt: debugfs knob to disable the background task * @dfs_disable_bgt: debugfs knob to disable the background task
* @dfs_emulate_bitflips: debugfs knob to emulate bit-flips * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
* @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
* @dfs_emulate_power_cut: debugfs knob to emulate power cuts
* @dfs_power_cut_min: debugfs knob for minimum writes before power cut
* @dfs_power_cut_max: debugfs knob for maximum writes until power cut
*/ */
struct ubi_debug_info { struct ubi_debug_info {
unsigned int chk_gen:1; unsigned int chk_gen:1;
unsigned int chk_io:1; unsigned int chk_io:1;
unsigned int chk_fastmap:1;
unsigned int disable_bgt:1; unsigned int disable_bgt:1;
unsigned int emulate_bitflips:1; unsigned int emulate_bitflips:1;
unsigned int emulate_io_failures:1; unsigned int emulate_io_failures:1;
unsigned int emulate_power_cut:2;
unsigned int power_cut_counter;
unsigned int power_cut_min;
unsigned int power_cut_max;
char dfs_dir_name[UBI_DFS_DIR_LEN + 1]; char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
struct dentry *dfs_dir; struct dentry *dfs_dir;
struct dentry *dfs_chk_gen; struct dentry *dfs_chk_gen;
struct dentry *dfs_chk_io; struct dentry *dfs_chk_io;
struct dentry *dfs_chk_fastmap;
struct dentry *dfs_disable_bgt; struct dentry *dfs_disable_bgt;
struct dentry *dfs_emulate_bitflips; struct dentry *dfs_emulate_bitflips;
struct dentry *dfs_emulate_io_failures; struct dentry *dfs_emulate_io_failures;
struct dentry *dfs_emulate_power_cut;
struct dentry *dfs_power_cut_min;
struct dentry *dfs_power_cut_max;
}; };
/** /**
@@ -426,11 +455,13 @@ struct ubi_debug_info {
* @fm_pool: in-memory data structure of the fastmap pool * @fm_pool: in-memory data structure of the fastmap pool
* @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
* sub-system * sub-system
* @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf * @fm_protect: serializes ubi_update_fastmap(), protects @fm_buf and makes sure
* that critical sections cannot be interrupted by ubi_update_fastmap()
* @fm_buf: vmalloc()'d buffer which holds the raw fastmap * @fm_buf: vmalloc()'d buffer which holds the raw fastmap
* @fm_size: fastmap size in bytes * @fm_size: fastmap size in bytes
* @fm_sem: allows ubi_update_fastmap() to block EBA table changes * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
* @fm_work: fastmap work queue * @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* *
* @used: RB-tree of used physical eraseblocks * @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks * @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -442,7 +473,8 @@ struct ubi_debug_info {
* @pq_head: protection queue head * @pq_head: protection queue head
* @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
* @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
* @erroneous, and @erroneous_peb_count fields * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool,
* and @fm_wl_pool fields
* @move_mutex: serializes eraseblock moves * @move_mutex: serializes eraseblock moves
* @work_sem: used to wait for all the scheduled works to finish and prevent * @work_sem: used to wait for all the scheduled works to finish and prevent
* new works from being submitted * new works from being submitted
@@ -479,7 +511,7 @@ struct ubi_debug_info {
* @vid_hdr_offset: starting offset of the volume identifier header (might be * @vid_hdr_offset: starting offset of the volume identifier header (might be
* unaligned) * unaligned)
* @vid_hdr_aloffset: starting offset of the VID header aligned to * @vid_hdr_aloffset: starting offset of the VID header aligned to
* @hdrs_min_io_size * @hdrs_min_io_size
* @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
* @bad_allowed: whether the MTD device admits of bad physical eraseblocks or * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
* not * not
@@ -532,11 +564,12 @@ struct ubi_device {
struct ubi_fastmap_layout *fm; struct ubi_fastmap_layout *fm;
struct ubi_fm_pool fm_pool; struct ubi_fm_pool fm_pool;
struct ubi_fm_pool fm_wl_pool; struct ubi_fm_pool fm_wl_pool;
struct rw_semaphore fm_sem; struct rw_semaphore fm_eba_sem;
struct mutex fm_mutex; struct rw_semaphore fm_protect;
void *fm_buf; void *fm_buf;
size_t fm_size; size_t fm_size;
struct work_struct fm_work; struct work_struct fm_work;
int fm_work_scheduled;
/* Wear-leveling sub-system's stuff */ /* Wear-leveling sub-system's stuff */
struct rb_root used; struct rb_root used;
@@ -868,10 +901,14 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr); int pnum, const struct ubi_vid_hdr *vid_hdr);
/* fastmap.c */ /* fastmap.c */
#ifdef CONFIG_MTD_UBI_FASTMAP
size_t ubi_calc_fm_size(struct ubi_device *ubi); size_t ubi_calc_fm_size(struct ubi_device *ubi);
int ubi_update_fastmap(struct ubi_device *ubi); int ubi_update_fastmap(struct ubi_device *ubi);
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
int fm_anchor); int fm_anchor);
#else
static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
#endif
/* block.c */ /* block.c */
#ifdef CONFIG_MTD_UBI_BLOCK #ifdef CONFIG_MTD_UBI_BLOCK
@@ -892,6 +929,42 @@ static inline int ubiblock_remove(struct ubi_volume_info *vi)
} }
#endif #endif
/*
* ubi_for_each_free_peb - walk the UBI free RB tree.
* @ubi: UBI device description object
* @e: a pointer to a ubi_wl_entry to use as cursor
* @pos: a pointer to RB-tree entry type to use as a loop counter
*/
#define ubi_for_each_free_peb(ubi, e, tmp_rb) \
ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb)
/*
* ubi_for_each_used_peb - walk the UBI used RB tree.
* @ubi: UBI device description object
* @e: a pointer to a ubi_wl_entry to use as cursor
* @pos: a pointer to RB-tree entry type to use as a loop counter
*/
#define ubi_for_each_used_peb(ubi, e, tmp_rb) \
ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb)
/*
* ubi_for_each_scub_peb - walk the UBI scub RB tree.
* @ubi: UBI device description object
* @e: a pointer to a ubi_wl_entry to use as cursor
* @pos: a pointer to RB-tree entry type to use as a loop counter
*/
#define ubi_for_each_scrub_peb(ubi, e, tmp_rb) \
ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb)
/*
* ubi_for_each_protected_peb - walk the UBI protection queue.
* @ubi: UBI device description object
* @i: a integer used as counter
* @e: a pointer to a ubi_wl_entry to use as cursor
*/
#define ubi_for_each_protected_peb(ubi, i, e) \
for ((i) = 0; (i) < UBI_PROT_QUEUE_LEN; (i)++) \
list_for_each_entry((e), &(ubi->pq[(i)]), u.list)
/* /*
* ubi_rb_for_each_entry - walk an RB-tree. * ubi_rb_for_each_entry - walk an RB-tree.
+148 -439
View File
File diff suppressed because it is too large Load Diff
+28
View File
@@ -0,0 +1,28 @@
#ifndef UBI_WL_H
#define UBI_WL_H
#ifdef CONFIG_MTD_UBI_FASTMAP
static int anchor_pebs_avalible(struct rb_root *root);
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
static void ubi_fastmap_close(struct ubi_device *ubi);
static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
{
/* Reserve enough LEBs to store two fastmaps. */
*count += (ubi->fm_size / ubi->leb_size) * 2;
INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
}
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
struct ubi_wl_entry *e,
struct rb_root *root);
#else /* !CONFIG_MTD_UBI_FASTMAP */
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
static inline void ubi_fastmap_close(struct ubi_device *ubi) { }
static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count) { }
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
struct ubi_wl_entry *e,
struct rb_root *root) {
return e;
}
#endif /* CONFIG_MTD_UBI_FASTMAP */
#endif /* UBI_WL_H */
+1 -1
View File
@@ -509,7 +509,7 @@ again:
c->bi.nospace_rp = 1; c->bi.nospace_rp = 1;
smp_wmb(); smp_wmb();
} else } else
ubifs_err("cannot budget space, error %d", err); ubifs_err(c, "cannot budget space, error %d", err);
return err; return err;
} }
+6 -6
View File
@@ -225,7 +225,7 @@ out_cancel:
out_up: out_up:
up_write(&c->commit_sem); up_write(&c->commit_sem);
out: out:
ubifs_err("commit failed, error %d", err); ubifs_err(c, "commit failed, error %d", err);
spin_lock(&c->cs_lock); spin_lock(&c->cs_lock);
c->cmt_state = COMMIT_BROKEN; c->cmt_state = COMMIT_BROKEN;
wake_up(&c->cmt_wq); wake_up(&c->cmt_wq);
@@ -289,7 +289,7 @@ int ubifs_bg_thread(void *info)
int err; int err;
struct ubifs_info *c = info; struct ubifs_info *c = info;
ubifs_msg("background thread \"%s\" started, PID %d", ubifs_msg(c, "background thread \"%s\" started, PID %d",
c->bgt_name, current->pid); c->bgt_name, current->pid);
set_freezable(); set_freezable();
@@ -324,7 +324,7 @@ int ubifs_bg_thread(void *info)
cond_resched(); cond_resched();
} }
ubifs_msg("background thread \"%s\" stops", c->bgt_name); ubifs_msg(c, "background thread \"%s\" stops", c->bgt_name);
return 0; return 0;
} }
@@ -712,13 +712,13 @@ out:
return 0; return 0;
out_dump: out_dump:
ubifs_err("dumping index node (iip=%d)", i->iip); ubifs_err(c, "dumping index node (iip=%d)", i->iip);
ubifs_dump_node(c, idx); ubifs_dump_node(c, idx);
list_del(&i->list); list_del(&i->list);
kfree(i); kfree(i);
if (!list_empty(&list)) { if (!list_empty(&list)) {
i = list_entry(list.prev, struct idx_node, list); i = list_entry(list.prev, struct idx_node, list);
ubifs_err("dumping parent index node"); ubifs_err(c, "dumping parent index node");
ubifs_dump_node(c, &i->idx); ubifs_dump_node(c, &i->idx);
} }
out_free: out_free:
@@ -727,7 +727,7 @@ out_free:
list_del(&i->list); list_del(&i->list);
kfree(i); kfree(i);
} }
ubifs_err("failed, error %d", err); ubifs_err(c, "failed, error %d", err);
if (err > 0) if (err > 0)
err = -EINVAL; err = -EINVAL;
return err; return err;
+11 -11
View File
@@ -92,8 +92,8 @@ struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
* Note, if the input buffer was not compressed, it is copied to the output * Note, if the input buffer was not compressed, it is copied to the output
* buffer and %UBIFS_COMPR_NONE is returned in @compr_type. * buffer and %UBIFS_COMPR_NONE is returned in @compr_type.
*/ */
void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len, void ubifs_compress(const struct ubifs_info *c, const void *in_buf,
int *compr_type) int in_len, void *out_buf, int *out_len, int *compr_type)
{ {
int err; int err;
struct ubifs_compressor *compr = ubifs_compressors[*compr_type]; struct ubifs_compressor *compr = ubifs_compressors[*compr_type];
@@ -112,9 +112,9 @@ void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len,
if (compr->comp_mutex) if (compr->comp_mutex)
mutex_unlock(compr->comp_mutex); mutex_unlock(compr->comp_mutex);
if (unlikely(err)) { if (unlikely(err)) {
ubifs_warn("cannot compress %d bytes, compressor %s, error %d, leave data uncompressed", ubifs_warn(c, "cannot compress %d bytes, compressor %s, error %d, leave data uncompressed",
in_len, compr->name, err); in_len, compr->name, err);
goto no_compr; goto no_compr;
} }
/* /*
@@ -144,21 +144,21 @@ no_compr:
* The length of the uncompressed data is returned in @out_len. This functions * The length of the uncompressed data is returned in @out_len. This functions
* returns %0 on success or a negative error code on failure. * returns %0 on success or a negative error code on failure.
*/ */
int ubifs_decompress(const void *in_buf, int in_len, void *out_buf, int ubifs_decompress(const struct ubifs_info *c, const void *in_buf,
int *out_len, int compr_type) int in_len, void *out_buf, int *out_len, int compr_type)
{ {
int err; int err;
struct ubifs_compressor *compr; struct ubifs_compressor *compr;
if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) { if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) {
ubifs_err("invalid compression type %d", compr_type); ubifs_err(c, "invalid compression type %d", compr_type);
return -EINVAL; return -EINVAL;
} }
compr = ubifs_compressors[compr_type]; compr = ubifs_compressors[compr_type];
if (unlikely(!compr->capi_name)) { if (unlikely(!compr->capi_name)) {
ubifs_err("%s compression is not compiled in", compr->name); ubifs_err(c, "%s compression is not compiled in", compr->name);
return -EINVAL; return -EINVAL;
} }
@@ -175,7 +175,7 @@ int ubifs_decompress(const void *in_buf, int in_len, void *out_buf,
if (compr->decomp_mutex) if (compr->decomp_mutex)
mutex_unlock(compr->decomp_mutex); mutex_unlock(compr->decomp_mutex);
if (err) if (err)
ubifs_err("cannot decompress %d bytes, compressor %s, error %d", ubifs_err(c, "cannot decompress %d bytes, compressor %s, error %d",
in_len, compr->name, err); in_len, compr->name, err);
return err; return err;
@@ -193,8 +193,8 @@ static int __init compr_init(struct ubifs_compressor *compr)
if (compr->capi_name) { if (compr->capi_name) {
compr->cc = crypto_alloc_comp(compr->capi_name, 0, 0); compr->cc = crypto_alloc_comp(compr->capi_name, 0, 0);
if (IS_ERR(compr->cc)) { if (IS_ERR(compr->cc)) {
ubifs_err("cannot initialize compressor %s, error %ld", pr_err("UBIFS error (pid %d): cannot initialize compressor %s, error %ld",
compr->name, PTR_ERR(compr->cc)); current->pid, compr->name, PTR_ERR(compr->cc));
return PTR_ERR(compr->cc); return PTR_ERR(compr->cc);
} }
} }
+93 -93
View File
File diff suppressed because it is too large Load Diff
+13 -10
View File
@@ -146,12 +146,12 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
if (c->highest_inum >= INUM_WARN_WATERMARK) { if (c->highest_inum >= INUM_WARN_WATERMARK) {
if (c->highest_inum >= INUM_WATERMARK) { if (c->highest_inum >= INUM_WATERMARK) {
spin_unlock(&c->cnt_lock); spin_unlock(&c->cnt_lock);
ubifs_err("out of inode numbers"); ubifs_err(c, "out of inode numbers");
make_bad_inode(inode); make_bad_inode(inode);
iput(inode); iput(inode);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
ubifs_warn("running out of inode numbers (current %lu, max %d)", ubifs_warn(c, "running out of inode numbers (current %lu, max %u)",
(unsigned long)c->highest_inum, INUM_WATERMARK); (unsigned long)c->highest_inum, INUM_WATERMARK);
} }
@@ -222,7 +222,7 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
* checking. * checking.
*/ */
err = PTR_ERR(inode); err = PTR_ERR(inode);
ubifs_err("dead directory entry '%pd', error %d", ubifs_err(c, "dead directory entry '%pd', error %d",
dentry, err); dentry, err);
ubifs_ro_mode(c, err); ubifs_ro_mode(c, err);
goto out; goto out;
@@ -272,7 +272,7 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
err = ubifs_init_security(dir, inode, &dentry->d_name); err = ubifs_init_security(dir, inode, &dentry->d_name);
if (err) if (err)
goto out_cancel; goto out_inode;
mutex_lock(&dir_ui->ui_mutex); mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change; dir->i_size += sz_change;
@@ -292,11 +292,12 @@ out_cancel:
dir->i_size -= sz_change; dir->i_size -= sz_change;
dir_ui->ui_size = dir->i_size; dir_ui->ui_size = dir->i_size;
mutex_unlock(&dir_ui->ui_mutex); mutex_unlock(&dir_ui->ui_mutex);
out_inode:
make_bad_inode(inode); make_bad_inode(inode);
iput(inode); iput(inode);
out_budg: out_budg:
ubifs_release_budget(c, &req); ubifs_release_budget(c, &req);
ubifs_err("cannot create regular file, error %d", err); ubifs_err(c, "cannot create regular file, error %d", err);
return err; return err;
} }
@@ -449,7 +450,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
out: out:
if (err != -ENOENT) { if (err != -ENOENT) {
ubifs_err("cannot find next direntry, error %d", err); ubifs_err(c, "cannot find next direntry, error %d", err);
return err; return err;
} }
@@ -732,7 +733,7 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
err = ubifs_init_security(dir, inode, &dentry->d_name); err = ubifs_init_security(dir, inode, &dentry->d_name);
if (err) if (err)
goto out_cancel; goto out_inode;
mutex_lock(&dir_ui->ui_mutex); mutex_lock(&dir_ui->ui_mutex);
insert_inode_hash(inode); insert_inode_hash(inode);
@@ -743,7 +744,7 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
dir->i_mtime = dir->i_ctime = inode->i_ctime; dir->i_mtime = dir->i_ctime = inode->i_ctime;
err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 0, 0); err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 0, 0);
if (err) { if (err) {
ubifs_err("cannot create directory, error %d", err); ubifs_err(c, "cannot create directory, error %d", err);
goto out_cancel; goto out_cancel;
} }
mutex_unlock(&dir_ui->ui_mutex); mutex_unlock(&dir_ui->ui_mutex);
@@ -757,6 +758,7 @@ out_cancel:
dir_ui->ui_size = dir->i_size; dir_ui->ui_size = dir->i_size;
drop_nlink(dir); drop_nlink(dir);
mutex_unlock(&dir_ui->ui_mutex); mutex_unlock(&dir_ui->ui_mutex);
out_inode:
make_bad_inode(inode); make_bad_inode(inode);
iput(inode); iput(inode);
out_budg: out_budg:
@@ -816,7 +818,7 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
err = ubifs_init_security(dir, inode, &dentry->d_name); err = ubifs_init_security(dir, inode, &dentry->d_name);
if (err) if (err)
goto out_cancel; goto out_inode;
mutex_lock(&dir_ui->ui_mutex); mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change; dir->i_size += sz_change;
@@ -836,6 +838,7 @@ out_cancel:
dir->i_size -= sz_change; dir->i_size -= sz_change;
dir_ui->ui_size = dir->i_size; dir_ui->ui_size = dir->i_size;
mutex_unlock(&dir_ui->ui_mutex); mutex_unlock(&dir_ui->ui_mutex);
out_inode:
make_bad_inode(inode); make_bad_inode(inode);
iput(inode); iput(inode);
out_budg: out_budg:
@@ -896,7 +899,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
err = ubifs_init_security(dir, inode, &dentry->d_name); err = ubifs_init_security(dir, inode, &dentry->d_name);
if (err) if (err)
goto out_cancel; goto out_inode;
mutex_lock(&dir_ui->ui_mutex); mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change; dir->i_size += sz_change;
+9 -8
View File
@@ -79,7 +79,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
out_len = UBIFS_BLOCK_SIZE; out_len = UBIFS_BLOCK_SIZE;
err = ubifs_decompress(&dn->data, dlen, addr, &out_len, err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
le16_to_cpu(dn->compr_type)); le16_to_cpu(dn->compr_type));
if (err || len != out_len) if (err || len != out_len)
goto dump; goto dump;
@@ -95,7 +95,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
return 0; return 0;
dump: dump:
ubifs_err("bad data node (block %u, inode %lu)", ubifs_err(c, "bad data node (block %u, inode %lu)",
block, inode->i_ino); block, inode->i_ino);
ubifs_dump_node(c, dn); ubifs_dump_node(c, dn);
return -EINVAL; return -EINVAL;
@@ -160,13 +160,14 @@ static int do_readpage(struct page *page)
addr += UBIFS_BLOCK_SIZE; addr += UBIFS_BLOCK_SIZE;
} }
if (err) { if (err) {
struct ubifs_info *c = inode->i_sb->s_fs_info;
if (err == -ENOENT) { if (err == -ENOENT) {
/* Not found, so it must be a hole */ /* Not found, so it must be a hole */
SetPageChecked(page); SetPageChecked(page);
dbg_gen("hole"); dbg_gen("hole");
goto out_free; goto out_free;
} }
ubifs_err("cannot read page %lu of inode %lu, error %d", ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
page->index, inode->i_ino, err); page->index, inode->i_ino, err);
goto error; goto error;
} }
@@ -649,7 +650,7 @@ static int populate_page(struct ubifs_info *c, struct page *page,
dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
out_len = UBIFS_BLOCK_SIZE; out_len = UBIFS_BLOCK_SIZE;
err = ubifs_decompress(&dn->data, dlen, addr, &out_len, err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
le16_to_cpu(dn->compr_type)); le16_to_cpu(dn->compr_type));
if (err || len != out_len) if (err || len != out_len)
goto out_err; goto out_err;
@@ -697,7 +698,7 @@ out_err:
SetPageError(page); SetPageError(page);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap(page);
ubifs_err("bad data node (block %u, inode %lu)", ubifs_err(c, "bad data node (block %u, inode %lu)",
page_block, inode->i_ino); page_block, inode->i_ino);
return -EINVAL; return -EINVAL;
} }
@@ -801,7 +802,7 @@ out_free:
return ret; return ret;
out_warn: out_warn:
ubifs_warn("ignoring error %d and skipping bulk-read", err); ubifs_warn(c, "ignoring error %d and skipping bulk-read", err);
goto out_free; goto out_free;
out_bu_off: out_bu_off:
@@ -929,7 +930,7 @@ static int do_writepage(struct page *page, int len)
} }
if (err) { if (err) {
SetPageError(page); SetPageError(page);
ubifs_err("cannot write page %lu of inode %lu, error %d", ubifs_err(c, "cannot write page %lu of inode %lu, error %d",
page->index, inode->i_ino, err); page->index, inode->i_ino, err);
ubifs_ro_mode(c, err); ubifs_ro_mode(c, err);
} }
@@ -1484,7 +1485,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
err = ubifs_budget_space(c, &req); err = ubifs_budget_space(c, &req);
if (unlikely(err)) { if (unlikely(err)) {
if (err == -ENOSPC) if (err == -ENOSPC)
ubifs_warn("out of space for mmapped file (inode number %lu)", ubifs_warn(c, "out of space for mmapped file (inode number %lu)",
inode->i_ino); inode->i_ino);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
+20 -20
View File
@@ -85,7 +85,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
c->ro_error = 1; c->ro_error = 1;
c->no_chk_data_crc = 0; c->no_chk_data_crc = 0;
c->vfs_sb->s_flags |= MS_RDONLY; c->vfs_sb->s_flags |= MS_RDONLY;
ubifs_warn("switched to read-only mode, error %d", err); ubifs_warn(c, "switched to read-only mode, error %d", err);
dump_stack(); dump_stack();
} }
} }
@@ -107,7 +107,7 @@ int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
* @even_ebadmsg is true. * @even_ebadmsg is true.
*/ */
if (err && (err != -EBADMSG || even_ebadmsg)) { if (err && (err != -EBADMSG || even_ebadmsg)) {
ubifs_err("reading %d bytes from LEB %d:%d failed, error %d", ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d",
len, lnum, offs, err); len, lnum, offs, err);
dump_stack(); dump_stack();
} }
@@ -127,7 +127,7 @@ int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
else else
err = dbg_leb_write(c, lnum, buf, offs, len); err = dbg_leb_write(c, lnum, buf, offs, len);
if (err) { if (err) {
ubifs_err("writing %d bytes to LEB %d:%d failed, error %d", ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d",
len, lnum, offs, err); len, lnum, offs, err);
ubifs_ro_mode(c, err); ubifs_ro_mode(c, err);
dump_stack(); dump_stack();
@@ -147,7 +147,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
else else
err = dbg_leb_change(c, lnum, buf, len); err = dbg_leb_change(c, lnum, buf, len);
if (err) { if (err) {
ubifs_err("changing %d bytes in LEB %d failed, error %d", ubifs_err(c, "changing %d bytes in LEB %d failed, error %d",
len, lnum, err); len, lnum, err);
ubifs_ro_mode(c, err); ubifs_ro_mode(c, err);
dump_stack(); dump_stack();
@@ -167,7 +167,7 @@ int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
else else
err = dbg_leb_unmap(c, lnum); err = dbg_leb_unmap(c, lnum);
if (err) { if (err) {
ubifs_err("unmap LEB %d failed, error %d", lnum, err); ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err);
ubifs_ro_mode(c, err); ubifs_ro_mode(c, err);
dump_stack(); dump_stack();
} }
@@ -186,7 +186,7 @@ int ubifs_leb_map(struct ubifs_info *c, int lnum)
else else
err = dbg_leb_map(c, lnum); err = dbg_leb_map(c, lnum);
if (err) { if (err) {
ubifs_err("mapping LEB %d failed, error %d", lnum, err); ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err);
ubifs_ro_mode(c, err); ubifs_ro_mode(c, err);
dump_stack(); dump_stack();
} }
@@ -199,7 +199,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
err = ubi_is_mapped(c->ubi, lnum); err = ubi_is_mapped(c->ubi, lnum);
if (err < 0) { if (err < 0) {
ubifs_err("ubi_is_mapped failed for LEB %d, error %d", ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d",
lnum, err); lnum, err);
dump_stack(); dump_stack();
} }
@@ -247,7 +247,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
magic = le32_to_cpu(ch->magic); magic = le32_to_cpu(ch->magic);
if (magic != UBIFS_NODE_MAGIC) { if (magic != UBIFS_NODE_MAGIC) {
if (!quiet) if (!quiet)
ubifs_err("bad magic %#08x, expected %#08x", ubifs_err(c, "bad magic %#08x, expected %#08x",
magic, UBIFS_NODE_MAGIC); magic, UBIFS_NODE_MAGIC);
err = -EUCLEAN; err = -EUCLEAN;
goto out; goto out;
@@ -256,7 +256,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
type = ch->node_type; type = ch->node_type;
if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) { if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
if (!quiet) if (!quiet)
ubifs_err("bad node type %d", type); ubifs_err(c, "bad node type %d", type);
goto out; goto out;
} }
@@ -279,7 +279,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
node_crc = le32_to_cpu(ch->crc); node_crc = le32_to_cpu(ch->crc);
if (crc != node_crc) { if (crc != node_crc) {
if (!quiet) if (!quiet)
ubifs_err("bad CRC: calculated %#08x, read %#08x", ubifs_err(c, "bad CRC: calculated %#08x, read %#08x",
crc, node_crc); crc, node_crc);
err = -EUCLEAN; err = -EUCLEAN;
goto out; goto out;
@@ -289,10 +289,10 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
out_len: out_len:
if (!quiet) if (!quiet)
ubifs_err("bad node length %d", node_len); ubifs_err(c, "bad node length %d", node_len);
out: out:
if (!quiet) { if (!quiet) {
ubifs_err("bad node at LEB %d:%d", lnum, offs); ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
ubifs_dump_node(c, buf); ubifs_dump_node(c, buf);
dump_stack(); dump_stack();
} }
@@ -355,11 +355,11 @@ static unsigned long long next_sqnum(struct ubifs_info *c)
if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) { if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
if (sqnum >= SQNUM_WATERMARK) { if (sqnum >= SQNUM_WATERMARK) {
ubifs_err("sequence number overflow %llu, end of life", ubifs_err(c, "sequence number overflow %llu, end of life",
sqnum); sqnum);
ubifs_ro_mode(c, -EINVAL); ubifs_ro_mode(c, -EINVAL);
} }
ubifs_warn("running out of sequence numbers, end of life soon"); ubifs_warn(c, "running out of sequence numbers, end of life soon");
} }
return sqnum; return sqnum;
@@ -636,7 +636,7 @@ int ubifs_bg_wbufs_sync(struct ubifs_info *c)
err = ubifs_wbuf_sync_nolock(wbuf); err = ubifs_wbuf_sync_nolock(wbuf);
mutex_unlock(&wbuf->io_mutex); mutex_unlock(&wbuf->io_mutex);
if (err) { if (err) {
ubifs_err("cannot sync write-buffer, error %d", err); ubifs_err(c, "cannot sync write-buffer, error %d", err);
ubifs_ro_mode(c, err); ubifs_ro_mode(c, err);
goto out_timers; goto out_timers;
} }
@@ -833,7 +833,7 @@ exit:
return 0; return 0;
out: out:
ubifs_err("cannot write %d bytes to LEB %d:%d, error %d", ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d",
len, wbuf->lnum, wbuf->offs, err); len, wbuf->lnum, wbuf->offs, err);
ubifs_dump_node(c, buf); ubifs_dump_node(c, buf);
dump_stack(); dump_stack();
@@ -932,27 +932,27 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
} }
if (type != ch->node_type) { if (type != ch->node_type) {
ubifs_err("bad node type (%d but expected %d)", ubifs_err(c, "bad node type (%d but expected %d)",
ch->node_type, type); ch->node_type, type);
goto out; goto out;
} }
err = ubifs_check_node(c, buf, lnum, offs, 0, 0); err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
if (err) { if (err) {
ubifs_err("expected node type %d", type); ubifs_err(c, "expected node type %d", type);
return err; return err;
} }
rlen = le32_to_cpu(ch->len); rlen = le32_to_cpu(ch->len);
if (rlen != len) { if (rlen != len) {
ubifs_err("bad node length %d, expected %d", rlen, len); ubifs_err(c, "bad node length %d, expected %d", rlen, len);
goto out; goto out;
} }
return 0; return 0;
out: out:
ubifs_err("bad node at LEB %d:%d", lnum, offs); ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
ubifs_dump_node(c, buf); ubifs_dump_node(c, buf);
dump_stack(); dump_stack();
return -EINVAL; return -EINVAL;

Some files were not shown because too many files have changed in this diff Show More