diff options
Diffstat (limited to 'fs/ntfs3/run.c')
| -rw-r--r-- | fs/ntfs3/run.c | 67 |
1 files changed, 52 insertions, 15 deletions
diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c index 58e988cd8049..395b20492525 100644 --- a/fs/ntfs3/run.c +++ b/fs/ntfs3/run.c @@ -9,6 +9,7 @@ #include <linux/blkdev.h> #include <linux/fs.h> #include <linux/log2.h> +#include <linux/overflow.h> #include "debug.h" #include "ntfs.h" @@ -486,7 +487,7 @@ requires_new_range: * Helper for attr_collapse_range(), * which is helper for fallocate(collapse_range). */ -bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len) +bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len, CLST sub) { size_t index, eat; struct ntfs_run *r, *e, *eat_start, *eat_end; @@ -510,7 +511,7 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len) /* Collapse a middle part of normal run, split. */ if (!run_add_entry(run, vcn, SPARSE_LCN, len, false)) return false; - return run_collapse_range(run, vcn, len); + return run_collapse_range(run, vcn, len, sub); } r += 1; @@ -544,6 +545,13 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len) memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r)); run->count -= eat; + if (sub) { + e -= eat; + for (r = run->runs; r < e; r++) { + r->vcn -= sub; + } + } + return true; } @@ -982,14 +990,22 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, if (!dlcn) return -EINVAL; - lcn = prev_lcn + dlcn; + + /* Check special combination: 0 + SPARSE_LCN64. */ + if (!prev_lcn && dlcn == SPARSE_LCN64) { + lcn = SPARSE_LCN64; + } else if (check_add_overflow(prev_lcn, dlcn, &lcn)) { + return -EINVAL; + } prev_lcn = lcn; } else { /* The size of 'dlcn' can't be > 8. */ return -EINVAL; } - next_vcn = vcn64 + len; + if (check_add_overflow(vcn64, len, &next_vcn)) + return -EINVAL; + /* Check boundary. */ if (next_vcn > evcn + 1) return -EINVAL; @@ -1055,8 +1071,8 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, { int ret, err; CLST next_vcn, lcn, len; - size_t index; - bool ok; + size_t index, done; + bool ok, zone; struct wnd_bitmap *wnd; ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size); @@ -1087,8 +1103,9 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, continue; down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS); + zone = max(wnd->zone_bit, lcn) < min(wnd->zone_end, lcn + len); /* Check for free blocks. */ - ok = wnd_is_used(wnd, lcn, len); + ok = !zone && wnd_is_used(wnd, lcn, len); up_read(&wnd->rw_lock); if (ok) continue; @@ -1096,14 +1113,33 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, /* Looks like volume is corrupted. */ ntfs_set_state(sbi, NTFS_DIRTY_ERROR); - if (down_write_trylock(&wnd->rw_lock)) { - /* Mark all zero bits as used in range [lcn, lcn+len). */ - size_t done; - err = wnd_set_used_safe(wnd, lcn, len, &done); - up_write(&wnd->rw_lock); - if (err) - return err; + if (!down_write_trylock(&wnd->rw_lock)) + continue; + + if (zone) { + /* + * Range [lcn, lcn + len) intersects with zone. + * To avoid complex with zone just turn it off. + */ + wnd_zone_set(wnd, 0, 0); + } + + /* Mark all zero bits as used in range [lcn, lcn+len). */ + err = wnd_set_used_safe(wnd, lcn, len, &done); + if (zone) { + /* Restore zone. Lock mft run. */ + struct rw_semaphore *lock = + is_mounted(sbi) ? &sbi->mft.ni->file.run_lock : + NULL; + if (lock) + down_read(lock); + ntfs_refresh_zone(sbi); + if (lock) + up_read(lock); } + up_write(&wnd->rw_lock); + if (err) + return err; } return ret; @@ -1133,7 +1169,8 @@ int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn) return -EINVAL; run_buf += size_size + offset_size; - vcn64 += len; + if (check_add_overflow(vcn64, len, &vcn64)) + return -EINVAL; #ifndef CONFIG_NTFS3_64BIT_CLUSTER if (vcn64 > 0x100000000ull) |
