UBIFS has a journal recovery function. It is useful for devices that experience a power failure. And as per comment of ubifs_wbuf_sync_nolock(), wbuf is optimized for performance by writing aligned max_write_size. In following environment, checking offset is not aligned to max_write_buffer. - Using a SPI-NOR device with a write buffer size over 256 bytes For example: Micron MT28EW01GABA, its write buffer is 512 words - LEB hedaer size is 64 bytes - UBI header size is 64 bytes So if write buffer command make a crrupt data in a block, is_last_write() and no_more_nodes() can not check correctly. This patch adjusts wbuf writes to max_write_size, taking into account leb_start. The recovery process also checks the data at the corrected alignment position. Signed-off-by: Yuichi Nakai <xoxyuxu@xxxxxxxxx> --- fs/ubifs/debug.c | 2 +- fs/ubifs/io.c | 16 +++++++++------- fs/ubifs/misc.h | 13 +++++++++++++ fs/ubifs/recovery.c | 6 +++--- 4 files changed, 26 insertions(+), 11 deletions(-) diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index c49ff50fdceb..d8c1fa6d182d 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -2565,7 +2565,7 @@ static int corrupt_data(const struct ubifs_info *c, const void *buf, from = prandom_u32() % len; /* Corruption span max to end of write unit */ - to = min(len, ALIGN(from + 1, c->max_write_size)); + to = min(len, ubifs_align_max_write(c, from + 1)); ubifs_warn(c, "filled bytes %u-%u with %s", from, to - 1, ffs ? "0xFFs" : "random data"); diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index d124117efd42..06ccaeb4c5d9 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -564,7 +564,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) ubifs_assert(c, wbuf->size % c->min_io_size == 0); ubifs_assert(c, !c->ro_media && !c->ro_mount); if (c->leb_size - wbuf->offs >= c->max_write_size) - ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size)); + ubifs_assert(c, !((c->leb_start + wbuf->offs + wbuf->size) + % c->max_write_size)); if (c->ro_error) return -EROFS; @@ -595,8 +596,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) */ if (c->leb_size - wbuf->offs < c->max_write_size) wbuf->size = c->leb_size - wbuf->offs; - else if (wbuf->offs & (c->max_write_size - 1)) - wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs; + else if ((c->leb_start + wbuf->offs) & (c->max_write_size - 1)) + wbuf->size = ubifs_align_max_write(wbuf->offs) - wbuf->offs; else wbuf->size = c->max_write_size; wbuf->avail = wbuf->size; @@ -636,8 +637,8 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs) wbuf->offs = offs; if (c->leb_size - wbuf->offs < c->max_write_size) wbuf->size = c->leb_size - wbuf->offs; - else if (wbuf->offs & (c->max_write_size - 1)) - wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs; + else if ((c->leb_start + wbuf->offs) & (c->max_write_size - 1)) + wbuf->size = ubifs_align_max_write(wbuf->offs) - wbuf->offs; else wbuf->size = c->max_write_size; wbuf->avail = wbuf->size; @@ -746,7 +747,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) ubifs_assert(c, !c->ro_media && !c->ro_mount); ubifs_assert(c, !c->space_fixup); if (c->leb_size - wbuf->offs >= c->max_write_size) - ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size)); + ubifs_assert(c, !((c->leb_start + wbuf->offs + wbuf->size) + % c->max_write_size)); if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) { err = -ENOSPC; @@ -813,7 +815,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) len -= wbuf->avail; aligned_len -= wbuf->avail; written += wbuf->avail; - } else if (wbuf->offs & (c->max_write_size - 1)) { + } else if ((c->leb_start + wbuf->offs) & (c->max_write_size - 1)) { /* * The write-buffer offset is not aligned to * @c->max_write_size and @wbuf->size is less than diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h index 6f87237fdbf4..269350749ce7 100644 --- a/fs/ubifs/misc.h +++ b/fs/ubifs/misc.h @@ -290,4 +290,17 @@ static inline int ubifs_next_log_lnum(const struct ubifs_info *c, int lnum) const char *ubifs_assert_action_name(struct ubifs_info *c); +/** + * ubifs_align_max_write - + * @c: UBIFS file-system description object + * @offs: logical eraseblock offset to aligned to + * + * This function calcurates offset which aligned to size of max_write_size + * from start of LEB . + */ +static inline int ubifs_align_max_write(const struct ubifs_info *c, int offs) +{ + return (c->leb_start + offs) & (c->max_write_size - 1) - c->leb_start ; +} + #endif /* __UBIFS_MISC_H__ */ diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 8526b7ec4707..33fbfb5921ed 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -420,7 +420,7 @@ static int is_last_write(const struct ubifs_info *c, void *buf, int offs) * Round up to the next @c->max_write_size boundary i.e. @offs is in * the last wbuf written. After that should be empty space. */ - empty_offs = ALIGN(offs + 1, c->max_write_size); + empty_offs = ubifs_align_max_write(offs + 1); check_len = c->leb_size - empty_offs; p = buf + empty_offs - offs; return is_empty(p, check_len); @@ -474,7 +474,7 @@ static int no_more_nodes(const struct ubifs_info *c, void *buf, int len, int skip, dlen = le32_to_cpu(ch->len); /* Check for empty space after the corrupt node's common header */ - skip = ALIGN(offs + UBIFS_CH_SZ, c->max_write_size) - offs; + skip = ubifs_align_max_write(offs + UBIFS_CH_SZ) - offs; if (is_empty(buf + skip, len - skip)) return 1; /* @@ -486,7 +486,7 @@ static int no_more_nodes(const struct ubifs_info *c, void *buf, int len, return 0; } /* Now we know the corrupt node's length we can skip over it */ - skip = ALIGN(offs + dlen, c->max_write_size) - offs; + skip = ubifs_align_max_write(offs + dlen) - offs; /* After which there should be empty space */ if (is_empty(buf + skip, len - skip)) return 1; -- 2.11.0 ______________________________________________________ Linux MTD discussion mailing list http://lists.infradead.org/mailman/listinfo/linux-mtd/