summaryrefslogtreecommitdiff
path: root/fs/ntfs3/fsntfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ntfs3/fsntfs.c')
-rw-r--r--fs/ntfs3/fsntfs.c223
1 files changed, 110 insertions, 113 deletions
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 33afee0f5559..5f138f715835 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -522,7 +522,7 @@ static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
ni->mi.dirty = true;
/* Step 2: Resize $MFT::BITMAP. */
- new_bitmap_bytes = bitmap_size(new_mft_total);
+ new_bitmap_bytes = ntfs3_bitmap_size(new_mft_total);
err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
@@ -853,7 +853,8 @@ void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
/*
* sb can be NULL here. In this case sbi->flags should be 0 too.
*/
- if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR))
+ if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR) ||
+ unlikely(ntfs3_forced_shutdown(sb)))
return;
blocksize = sb->s_blocksize;
@@ -904,10 +905,18 @@ void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
void ntfs_bad_inode(struct inode *inode, const char *hint)
{
struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
+ struct ntfs_inode *ni = ntfs_i(inode);
ntfs_inode_err(inode, "%s", hint);
- make_bad_inode(inode);
- ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+
+ /* Do not call make_bad_inode()! */
+ ni->ni_bad = true;
+
+ /* Avoid recursion if bad inode is $Volume. */
+ if (inode->i_ino != MFT_REC_VOL &&
+ !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING)) {
+ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ }
}
/*
@@ -983,18 +992,11 @@ out:
if (err)
return err;
- mark_inode_dirty(&ni->vfs_inode);
+ mark_inode_dirty_sync(&ni->vfs_inode);
/* verify(!ntfs_update_mftmirr()); */
- /*
- * If we used wait=1, sync_inode_metadata waits for the io for the
- * inode to finish. It hangs when media is removed.
- * So wait=0 is sent down to sync_inode_metadata
- * and filemap_fdatawrite is used for the data blocks.
- */
- err = sync_inode_metadata(&ni->vfs_inode, 0);
- if (!err)
- err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
+ /* write mft record on disk. */
+ err = _ni_write_inode(&ni->vfs_inode, 1);
return err;
}
@@ -1013,32 +1015,28 @@ static inline __le32 security_hash(const void *sd, size_t bytes)
return cpu_to_le32(hash);
}
-int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
+/*
+ * simple wrapper for sb_bread_unmovable.
+ */
+struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
{
- struct block_device *bdev = sb->s_bdev;
- u32 blocksize = sb->s_blocksize;
- u64 block = lbo >> sb->s_blocksize_bits;
- u32 off = lbo & (blocksize - 1);
- u32 op = blocksize - off;
-
- for (; bytes; block += 1, off = 0, op = blocksize) {
- struct buffer_head *bh = __bread(bdev, block, blocksize);
-
- if (!bh)
- return -EIO;
-
- if (op > bytes)
- op = bytes;
-
- memcpy(buffer, bh->b_data + off, op);
-
- put_bh(bh);
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+ struct buffer_head *bh;
- bytes -= op;
- buffer = Add2Ptr(buffer, op);
+ if (unlikely(block >= sbi->volume.blocks)) {
+ /* prevent generic message "attempt to access beyond end of device" */
+ ntfs_err(sb, "try to read out of volume at offset 0x%llx",
+ (u64)block << sb->s_blocksize_bits);
+ return NULL;
}
- return 0;
+ bh = sb_bread_unmovable(sb, block);
+ if (bh)
+ return bh;
+
+ ntfs_err(sb, "failed to read volume at offset 0x%llx",
+ (u64)block << sb->s_blocksize_bits);
+ return NULL;
}
int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
@@ -1351,7 +1349,14 @@ int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
}
if (buffer_locked(bh))
__wait_on_buffer(bh);
- set_buffer_uptodate(bh);
+
+ lock_buffer(bh);
+ if (!buffer_uptodate(bh))
+ {
+ memset(bh->b_data, 0, blocksize);
+ set_buffer_uptodate(bh);
+ }
+ unlock_buffer(bh);
} else {
bh = ntfs_bread(sb, block);
if (!bh) {
@@ -1474,99 +1479,86 @@ int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
}
/*
- * ntfs_bio_pages - Read/write pages from/to disk.
+ * ntfs_read_write_run - Read/Write disk's page cache.
*/
-int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
- struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
- enum req_op op)
+int ntfs_read_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
+ void *buf, u64 vbo, size_t bytes, int wr)
{
- int err = 0;
- struct bio *new, *bio = NULL;
struct super_block *sb = sbi->sb;
- struct block_device *bdev = sb->s_bdev;
- struct page *page;
+ struct address_space *mapping = sb->s_bdev->bd_mapping;
u8 cluster_bits = sbi->cluster_bits;
- CLST lcn, clen, vcn, vcn_next;
- u32 add, off, page_idx;
+ CLST vcn_next, vcn = vbo >> cluster_bits;
+ CLST lcn, clen;
u64 lbo, len;
- size_t run_idx;
- struct blk_plug plug;
+ size_t idx;
+ u32 off, op;
+ struct folio *folio;
+ char *kaddr;
if (!bytes)
return 0;
- blk_start_plug(&plug);
+ if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
+ return -ENOENT;
- /* Align vbo and bytes to be 512 bytes aligned. */
- lbo = (vbo + bytes + 511) & ~511ull;
- vbo = vbo & ~511ull;
- bytes = lbo - vbo;
+ if (lcn == SPARSE_LCN)
+ return -EINVAL;
- vcn = vbo >> cluster_bits;
- if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
- err = -ENOENT;
- goto out;
- }
off = vbo & sbi->cluster_mask;
- page_idx = 0;
- page = pages[0];
+ lbo = ((u64)lcn << cluster_bits) + off;
+ len = ((u64)clen << cluster_bits) - off;
for (;;) {
- lbo = ((u64)lcn << cluster_bits) + off;
- len = ((u64)clen << cluster_bits) - off;
-new_bio:
- new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
- if (bio) {
- bio_chain(bio, new);
- submit_bio(bio);
- }
- bio = new;
- bio->bi_iter.bi_sector = lbo >> 9;
+ /* Read range [lbo, lbo+len). */
+ folio = read_mapping_folio(mapping, lbo >> PAGE_SHIFT, NULL);
- while (len) {
- off = vbo & (PAGE_SIZE - 1);
- add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- if (bio_add_page(bio, page, add, off) < add)
- goto new_bio;
+ off = offset_in_page(lbo);
+ op = PAGE_SIZE - off;
- if (bytes <= add)
- goto out;
- bytes -= add;
- vbo += add;
+ if (op > len)
+ op = len;
+ if (op > bytes)
+ op = bytes;
- if (add + off == PAGE_SIZE) {
- page_idx += 1;
- if (WARN_ON(page_idx >= nr_pages)) {
- err = -EINVAL;
- goto out;
- }
- page = pages[page_idx];
- }
+ kaddr = kmap_local_folio(folio, 0);
+ if (wr) {
+ memcpy(kaddr + off, buf, op);
+ folio_mark_dirty(folio);
+ } else {
+ memcpy(buf, kaddr + off, op);
+ flush_dcache_folio(folio);
+ }
+ kunmap_local(kaddr);
+ folio_put(folio);
- if (len <= add)
- break;
- len -= add;
- lbo += add;
+ bytes -= op;
+ if (!bytes)
+ return 0;
+
+ buf += op;
+ len -= op;
+ if (len) {
+ /* next volume's page. */
+ lbo += op;
+ continue;
}
+ /* get next range. */
vcn_next = vcn + clen;
- if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
+ if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
vcn != vcn_next) {
- err = -ENOENT;
- goto out;
+ return -ENOENT;
}
- off = 0;
- }
-out:
- if (bio) {
- if (!err)
- err = submit_bio_wait(bio);
- bio_put(bio);
- }
- blk_finish_plug(&plug);
- return err;
+ if (lcn == SPARSE_LCN)
+ return -EINVAL;
+
+ lbo = ((u64)lcn << cluster_bits);
+ len = ((u64)clen << cluster_bits);
+ }
}
/*
@@ -2135,8 +2127,8 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
if (le32_to_cpu(d_security->size) == new_sec_size &&
d_security->key.hash == hash_key.hash &&
!memcmp(d_security + 1, sd, size_sd)) {
- *security_id = d_security->key.sec_id;
/* Such security already exists. */
+ *security_id = d_security->key.sec_id;
err = 0;
goto out;
}
@@ -2461,10 +2453,12 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
{
CLST end, i, zone_len, zlen;
struct wnd_bitmap *wnd = &sbi->used.bitmap;
+ bool dirty = false;
down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
if (!wnd_is_used(wnd, lcn, len)) {
- ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ /* mark volume as dirty out of wnd->rw_lock */
+ dirty = true;
end = lcn + len;
len = 0;
@@ -2518,6 +2512,8 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
out:
up_write(&wnd->rw_lock);
+ if (dirty)
+ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
}
/*
@@ -2628,8 +2624,8 @@ int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
{
int err;
struct ATTRIB *attr;
+ u32 uni_bytes;
struct ntfs_inode *ni = sbi->volume.ni;
- const u8 max_ulen = 0x80; /* TODO: use attrdef to get maximum length */
/* Allocate PATH_MAX bytes. */
struct cpu_str *uni = __getname();
@@ -2641,7 +2637,8 @@ int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
if (err < 0)
goto out;
- if (uni->len > max_ulen) {
+ uni_bytes = uni->len * sizeof(u16);
+ if (uni_bytes > NTFS_LABEL_MAX_LENGTH * sizeof(u16)) {
ntfs_warn(sbi->sb, "new label is too long");
err = -EFBIG;
goto out;
@@ -2652,13 +2649,13 @@ int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
/* Ignore any errors. */
ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
- err = ni_insert_resident(ni, uni->len * sizeof(u16), ATTR_LABEL, NULL,
- 0, &attr, NULL, NULL);
+ err = ni_insert_resident(ni, uni_bytes, ATTR_LABEL, NULL, 0, &attr,
+ NULL, NULL);
if (err < 0)
goto unlock_out;
/* write new label in on-disk struct. */
- memcpy(resident_data(attr), uni->name, uni->len * sizeof(u16));
+ memcpy(resident_data(attr), uni->name, uni_bytes);
/* update cached value of current label. */
if (len >= ARRAY_SIZE(sbi->volume.label))
@@ -2676,4 +2673,4 @@ unlock_out:
out:
__putname(uni);
return err;
-} \ No newline at end of file
+}