summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/ntfs3.rst19
-rw-r--r--fs/ntfs3/attrib.c392
-rw-r--r--fs/ntfs3/attrlist.c5
-rw-r--r--fs/ntfs3/bitfunc.c4
-rw-r--r--fs/ntfs3/bitmap.c168
-rw-r--r--fs/ntfs3/dir.c4
-rw-r--r--fs/ntfs3/file.c203
-rw-r--r--fs/ntfs3/frecord.c40
-rw-r--r--fs/ntfs3/fslog.c62
-rw-r--r--fs/ntfs3/fsntfs.c190
-rw-r--r--fs/ntfs3/index.c127
-rw-r--r--fs/ntfs3/inode.c203
-rw-r--r--fs/ntfs3/namei.c238
-rw-r--r--fs/ntfs3/ntfs.h6
-rw-r--r--fs/ntfs3/ntfs_fs.h41
-rw-r--r--fs/ntfs3/record.c13
-rw-r--r--fs/ntfs3/run.c28
-rw-r--r--fs/ntfs3/super.c143
-rw-r--r--fs/ntfs3/upcase.c12
-rw-r--r--fs/ntfs3/xattr.c158
20 files changed, 1430 insertions, 626 deletions
diff --git a/Documentation/filesystems/ntfs3.rst b/Documentation/filesystems/ntfs3.rst
index d67ccd22c63b..5aa102bd72c2 100644
--- a/Documentation/filesystems/ntfs3.rst
+++ b/Documentation/filesystems/ntfs3.rst
@@ -25,6 +25,11 @@ versions up to 3.1. File system type to use on mount is *ntfs3*.
Note: Applied to empty files, this allows to switch type between
sparse(0x200), compressed(0x800) and normal.
+ - *system.ntfs_attrib_be* gets/sets ntfs file/dir attributes.
+
+ Same value as system.ntfs_attrib but always represent as big-endian
+ (endianness of system.ntfs_attrib is the same as of the CPU).
+
Mount Options
=============
@@ -75,6 +80,20 @@ this table marked with no it means default is without **no**.
- Files with the Windows-specific SYSTEM (FILE_ATTRIBUTE_SYSTEM) attribute
will be marked as system immutable files.
+ * - hide_dot_files
+ - Updates the Windows-specific HIDDEN (FILE_ATTRIBUTE_HIDDEN) attribute
+ when creating and moving or renaming files. Files whose names start
+ with a dot will have the HIDDEN attribute set and files whose names
+ do not start with a dot will have it unset.
+
+ * - windows_names
+ - Prevents the creation of files and directories with a name not allowed
+ by Windows, either because it contains some not allowed character (which
+ are the characters " * / : < > ? \\ | and those whose code is less than
+ 0x20), because the name (with or without extension) is a reserved file
+ name (CON, AUX, NUL, PRN, LPT1-9, COM1-9) or because the last character
+ is a space or a dot. Existing such files can still be read and renamed.
+
* - discard
- Enable support of the TRIM command for improved performance on delete
operations, which is recommended for use with the solid-state drives
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
index 71f870d497ae..5e6bafb10f42 100644
--- a/fs/ntfs3/attrib.c
+++ b/fs/ntfs3/attrib.c
@@ -55,33 +55,6 @@ static inline u64 get_pre_allocated(u64 size)
}
/*
- * attr_must_be_resident
- *
- * Return: True if attribute must be resident.
- */
-static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
- enum ATTR_TYPE type)
-{
- const struct ATTR_DEF_ENTRY *de;
-
- switch (type) {
- case ATTR_STD:
- case ATTR_NAME:
- case ATTR_ID:
- case ATTR_LABEL:
- case ATTR_VOL_INFO:
- case ATTR_ROOT:
- case ATTR_EA_INFO:
- return true;
- default:
- de = ntfs_query_def(sbi, type);
- if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
- return true;
- return false;
- }
-}
-
-/*
* attr_load_runs - Load all runs stored in @attr.
*/
static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
@@ -101,6 +74,10 @@ static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
asize = le32_to_cpu(attr->size);
run_off = le16_to_cpu(attr->nres.run_off);
+
+ if (run_off > asize)
+ return -EINVAL;
+
err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
asize - run_off);
@@ -172,7 +149,7 @@ out:
int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
- CLST *new_lcn)
+ CLST *new_lcn, CLST *new_len)
{
int err;
CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
@@ -192,20 +169,36 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
if (err)
goto out;
- if (new_lcn && vcn == vcn0)
- *new_lcn = lcn;
+ if (vcn == vcn0) {
+ /* Return the first fragment. */
+ if (new_lcn)
+ *new_lcn = lcn;
+ if (new_len)
+ *new_len = flen;
+ }
/* Add new fragment into run storage. */
- if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
+ if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
/* Undo last 'ntfs_look_for_free_space' */
mark_as_free_ex(sbi, lcn, len, false);
err = -ENOMEM;
goto out;
}
+ if (opt & ALLOCATE_ZERO) {
+ u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
+
+ err = blkdev_issue_zeroout(sbi->sb->s_bdev,
+ (sector_t)lcn << shift,
+ (sector_t)flen << shift,
+ GFP_NOFS, 0);
+ if (err)
+ goto out;
+ }
+
vcn += flen;
- if (flen >= len || opt == ALLOCATE_MFT ||
+ if (flen >= len || (opt & ALLOCATE_MFT) ||
(fr && run->count - cnt >= fr)) {
*alen = vcn - vcn0;
return 0;
@@ -280,7 +273,8 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
const char *data = resident_data(attr);
err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
- ALLOCATE_DEF, &alen, 0, NULL);
+ ALLOCATE_DEF, &alen, 0, NULL,
+ NULL);
if (err)
goto out1;
@@ -420,6 +414,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
CLST next_svcn, pre_alloc = -1, done = 0;
bool is_ext, is_bad = false;
+ bool dirty = false;
u32 align;
struct MFT_REC *rec;
@@ -440,8 +435,10 @@ again:
return err;
/* Return if file is still resident. */
- if (!attr_b->non_res)
+ if (!attr_b->non_res) {
+ dirty = true;
goto ok1;
+ }
/* Layout of records may be changed, so do a full search. */
goto again;
@@ -464,7 +461,7 @@ again_1:
if (keep_prealloc && new_size < old_size) {
attr_b->nres.data_size = cpu_to_le64(new_size);
- mi_b->dirty = true;
+ mi_b->dirty = dirty = true;
goto ok;
}
@@ -510,7 +507,7 @@ next_le:
if (new_alloc <= old_alloc) {
attr_b->nres.data_size = cpu_to_le64(new_size);
- mi_b->dirty = true;
+ mi_b->dirty = dirty = true;
goto ok;
}
@@ -575,13 +572,13 @@ add_alloc_in_same_attr_seg:
/* ~3 bytes per fragment. */
err = attr_allocate_clusters(
sbi, run, vcn, lcn, to_allocate, &pre_alloc,
- is_mft ? ALLOCATE_MFT : 0, &alen,
+ is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
is_mft ? 0
: (sbi->record_size -
le32_to_cpu(rec->used) + 8) /
3 +
1,
- NULL);
+ NULL, NULL);
if (err)
goto out;
}
@@ -601,7 +598,7 @@ pack_runs:
next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
new_alloc_tmp = (u64)next_svcn << cluster_bits;
attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
- mi_b->dirty = true;
+ mi_b->dirty = dirty = true;
if (next_svcn >= vcn && !to_allocate) {
/* Normal way. Update attribute and exit. */
@@ -687,7 +684,7 @@ pack_runs:
old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
attr_b->nres.valid_size = attr_b->nres.data_size =
attr_b->nres.alloc_size = cpu_to_le64(old_size);
- mi_b->dirty = true;
+ mi_b->dirty = dirty = true;
goto again_1;
}
@@ -749,7 +746,7 @@ pack_runs:
attr_b->nres.valid_size =
attr_b->nres.alloc_size;
}
- mi_b->dirty = true;
+ mi_b->dirty = dirty = true;
err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
true);
@@ -810,16 +807,9 @@ ok1:
if (ret)
*ret = attr_b;
- /* Update inode_set_bytes. */
if (((type == ATTR_DATA && !name_len) ||
(type == ATTR_ALLOC && name == I30_NAME))) {
- bool dirty = false;
-
- if (ni->vfs_inode.i_size != new_size) {
- ni->vfs_inode.i_size = new_size;
- dirty = true;
- }
-
+ /* Update inode_set_bytes. */
if (attr_b->non_res) {
new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
@@ -828,6 +818,7 @@ ok1:
}
}
+ /* Don't forget to update duplicate information in parent. */
if (dirty) {
ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
mark_inode_dirty(&ni->vfs_inode);
@@ -878,8 +869,19 @@ bad_inode:
return err;
}
+/*
+ * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
+ *
+ * @new == NULL means just to get current mapping for 'vcn'
+ * @new != NULL means allocate real cluster if 'vcn' maps to hole
+ * @zero - zeroout new allocated clusters
+ *
+ * NOTE:
+ * - @new != NULL is called only for sparsed or compressed attributes.
+ * - new allocated clusters are zeroed via blkdev_issue_zeroout.
+ */
int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
- CLST *len, bool *new)
+ CLST *len, bool *new, bool zero)
{
int err = 0;
struct runs_tree *run = &ni->file.run;
@@ -888,29 +890,29 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
struct ATTRIB *attr = NULL, *attr_b;
struct ATTR_LIST_ENTRY *le, *le_b;
struct mft_inode *mi, *mi_b;
- CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
- u64 total_size;
- u32 clst_per_frame;
- bool ok;
+ CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
+ CLST alloc, evcn;
+ unsigned fr;
+ u64 total_size, total_size0;
+ int step = 0;
if (new)
*new = false;
+ /* Try to find in cache. */
down_read(&ni->file.run_lock);
- ok = run_lookup_entry(run, vcn, lcn, len, NULL);
+ if (!run_lookup_entry(run, vcn, lcn, len, NULL))
+ *len = 0;
up_read(&ni->file.run_lock);
- if (ok && (*lcn != SPARSE_LCN || !new)) {
- /* Normal way. */
- return 0;
+ if (*len) {
+ if (*lcn != SPARSE_LCN || !new)
+ return 0; /* Fast normal way without allocation. */
+ else if (clen > *len)
+ clen = *len;
}
- if (!clen)
- clen = 1;
-
- if (ok && clen > *len)
- clen = *len;
-
+ /* No cluster in cache or we need to allocate cluster in hole. */
sbi = ni->mi.sbi;
cluster_bits = sbi->cluster_bits;
@@ -932,16 +934,15 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
if (vcn >= asize) {
- err = -EINVAL;
+ if (new) {
+ err = -EINVAL;
+ } else {
+ *len = 1;
+ *lcn = SPARSE_LCN;
+ }
goto out;
}
- clst_per_frame = 1u << attr_b->nres.c_unit;
- to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
-
- if (vcn + to_alloc > asize)
- to_alloc = asize - vcn;
-
svcn = le64_to_cpu(attr_b->nres.svcn);
evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
@@ -960,36 +961,68 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
}
+ /* Load in cache actual information. */
err = attr_load_runs(attr, ni, run, NULL);
if (err)
goto out;
- if (!ok) {
- ok = run_lookup_entry(run, vcn, lcn, len, NULL);
- if (ok && (*lcn != SPARSE_LCN || !new)) {
- /* Normal way. */
- err = 0;
- goto ok;
- }
+ if (!*len) {
+ if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
+ if (*lcn != SPARSE_LCN || !new)
+ goto ok; /* Slow normal way without allocation. */
- if (!ok && !new) {
- *len = 0;
- err = 0;
+ if (clen > *len)
+ clen = *len;
+ } else if (!new) {
+ /* Here we may return -ENOENT.
+ * In any case caller gets zero length. */
goto ok;
}
-
- if (ok && clen > *len) {
- clen = *len;
- to_alloc = (clen + clst_per_frame - 1) &
- ~(clst_per_frame - 1);
- }
}
if (!is_attr_ext(attr_b)) {
+ /* The code below only for sparsed or compressed attributes. */
err = -EINVAL;
goto out;
}
+ vcn0 = vcn;
+ to_alloc = clen;
+ fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
+ /* Allocate frame aligned clusters.
+ * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
+ * ntfs3 uses 1 cluster per frame for new created sparsed files. */
+ if (attr_b->nres.c_unit) {
+ CLST clst_per_frame = 1u << attr_b->nres.c_unit;
+ CLST cmask = ~(clst_per_frame - 1);
+
+ /* Get frame aligned vcn and to_alloc. */
+ vcn = vcn0 & cmask;
+ to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
+ if (fr < clst_per_frame)
+ fr = clst_per_frame;
+ zero = true;
+
+ /* Check if 'vcn' and 'vcn0' in different attribute segments. */
+ if (vcn < svcn || evcn1 <= vcn) {
+ /* Load attribute for truncated vcn. */
+ attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0,
+ &vcn, &mi);
+ if (!attr) {
+ err = -EINVAL;
+ goto out;
+ }
+ svcn = le64_to_cpu(attr->nres.svcn);
+ evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
+ err = attr_load_runs(attr, ni, run, NULL);
+ if (err)
+ goto out;
+ }
+ }
+
+ if (vcn + to_alloc > asize)
+ to_alloc = asize - vcn;
+
/* Get the last LCN to allocate from. */
hint = 0;
@@ -1003,18 +1036,35 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
hint = -1;
}
- err = attr_allocate_clusters(
- sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
- (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
- lcn);
+ /* Allocate and zeroout new clusters. */
+ err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
+ zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
+ fr, lcn, len);
if (err)
goto out;
*new = true;
+ step = 1;
- end = vcn + *len;
+ end = vcn + alen;
+ /* Save 'total_size0' to restore if error. */
+ total_size0 = le64_to_cpu(attr_b->nres.total_size);
+ total_size = total_size0 + ((u64)alen << cluster_bits);
- total_size = le64_to_cpu(attr_b->nres.total_size) +
- ((u64)*len << cluster_bits);
+ if (vcn != vcn0) {
+ if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
+ err = -EINVAL;
+ goto out;
+ }
+ if (*lcn == SPARSE_LCN) {
+ /* Internal error. Should not happened. */
+ WARN_ON(1);
+ err = -EINVAL;
+ goto out;
+ }
+ /* Check case when vcn0 + len overlaps new allocated clusters. */
+ if (vcn0 + *len > end)
+ *len = end - vcn0;
+ }
repack:
err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
@@ -1040,7 +1090,7 @@ repack:
if (!ni->attr_list.size) {
err = ni_create_attr_list(ni);
if (err)
- goto out;
+ goto undo1;
/* Layout of records is changed. */
le_b = NULL;
attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
@@ -1057,67 +1107,83 @@ repack:
}
}
+ /*
+ * The code below may require additional cluster (to extend attribute list)
+ * and / or one MFT record
+ * It is too complex to undo operations if -ENOSPC occurs deep inside
+ * in 'ni_insert_nonresident'.
+ * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
+ */
+ if (!ntfs_check_for_free_space(sbi, 1, 1)) {
+ /* Undo step 1. */
+ err = -ENOSPC;
+ goto undo1;
+ }
+
+ step = 2;
svcn = evcn1;
/* Estimate next attribute. */
attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
- if (attr) {
- CLST alloc = bytes_to_cluster(
- sbi, le64_to_cpu(attr_b->nres.alloc_size));
- CLST evcn = le64_to_cpu(attr->nres.evcn);
-
- if (end < next_svcn)
- end = next_svcn;
- while (end > evcn) {
- /* Remove segment [svcn : evcn). */
- mi_remove_attr(NULL, mi, attr);
-
- if (!al_remove_le(ni, le)) {
- err = -EINVAL;
- goto out;
- }
+ if (!attr) {
+ /* Insert new attribute segment. */
+ goto ins_ext;
+ }
- if (evcn + 1 >= alloc) {
- /* Last attribute segment. */
- evcn1 = evcn + 1;
- goto ins_ext;
- }
+ /* Try to update existed attribute segment. */
+ alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
+ evcn = le64_to_cpu(attr->nres.evcn);
- if (ni_load_mi(ni, le, &mi)) {
- attr = NULL;
- goto out;
- }
+ if (end < next_svcn)
+ end = next_svcn;
+ while (end > evcn) {
+ /* Remove segment [svcn : evcn). */
+ mi_remove_attr(NULL, mi, attr);
- attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
- &le->id);
- if (!attr) {
- err = -EINVAL;
- goto out;
- }
- svcn = le64_to_cpu(attr->nres.svcn);
- evcn = le64_to_cpu(attr->nres.evcn);
+ if (!al_remove_le(ni, le)) {
+ err = -EINVAL;
+ goto out;
}
- if (end < svcn)
- end = svcn;
+ if (evcn + 1 >= alloc) {
+ /* Last attribute segment. */
+ evcn1 = evcn + 1;
+ goto ins_ext;
+ }
- err = attr_load_runs(attr, ni, run, &end);
- if (err)
+ if (ni_load_mi(ni, le, &mi)) {
+ attr = NULL;
goto out;
+ }
- evcn1 = evcn + 1;
- attr->nres.svcn = cpu_to_le64(next_svcn);
- err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
- if (err)
+ attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id);
+ if (!attr) {
+ err = -EINVAL;
goto out;
+ }
+ svcn = le64_to_cpu(attr->nres.svcn);
+ evcn = le64_to_cpu(attr->nres.evcn);
+ }
- le->vcn = cpu_to_le64(next_svcn);
- ni->attr_list.dirty = true;
- mi->dirty = true;
+ if (end < svcn)
+ end = svcn;
+
+ err = attr_load_runs(attr, ni, run, &end);
+ if (err)
+ goto out;
+
+ evcn1 = evcn + 1;
+ attr->nres.svcn = cpu_to_le64(next_svcn);
+ err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
+ if (err)
+ goto out;
+
+ le->vcn = cpu_to_le64(next_svcn);
+ ni->attr_list.dirty = true;
+ mi->dirty = true;
+ next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
- next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
- }
ins_ext:
if (evcn1 > next_svcn) {
err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
@@ -1129,10 +1195,26 @@ ins_ext:
ok:
run_truncate_around(run, vcn);
out:
+ if (err && step > 1) {
+ /* Too complex to restore. */
+ _ntfs_bad_inode(&ni->vfs_inode);
+ }
up_write(&ni->file.run_lock);
ni_unlock(ni);
return err;
+
+undo1:
+ /* Undo step1. */
+ attr_b->nres.total_size = cpu_to_le64(total_size0);
+ inode_set_bytes(&ni->vfs_inode, total_size0);
+
+ if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) ||
+ !run_add_entry(run, vcn, SPARSE_LCN, alen, false) ||
+ mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
+ _ntfs_bad_inode(&ni->vfs_inode);
+ }
+ goto out;
}
int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
@@ -1217,6 +1299,11 @@ int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
CLST svcn, evcn;
u16 ro;
+ if (!ni) {
+ /* Is record corrupted? */
+ return -ENOENT;
+ }
+
attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
if (!attr) {
/* Is record corrupted? */
@@ -1232,6 +1319,10 @@ int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
}
ro = le16_to_cpu(attr->nres.run_off);
+
+ if (ro > le32_to_cpu(attr->size))
+ return -EINVAL;
+
err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
if (err < 0)
@@ -1530,7 +1621,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
struct ATTRIB *attr = NULL, *attr_b;
struct ATTR_LIST_ENTRY *le, *le_b;
struct mft_inode *mi, *mi_b;
- CLST svcn, evcn1, next_svcn, lcn, len;
+ CLST svcn, evcn1, next_svcn, len;
CLST vcn, end, clst_data;
u64 total_size, valid_size, data_size;
@@ -1606,8 +1697,9 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
}
err = attr_allocate_clusters(sbi, run, vcn + clst_data,
- hint + 1, len - clst_data, NULL, 0,
- &alen, 0, &lcn);
+ hint + 1, len - clst_data, NULL,
+ ALLOCATE_DEF, &alen, 0, NULL,
+ NULL);
if (err)
goto out;
@@ -1901,6 +1993,11 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
u16 le_sz;
u16 roff = le16_to_cpu(attr->nres.run_off);
+ if (roff > le32_to_cpu(attr->size)) {
+ err = -EINVAL;
+ goto out;
+ }
+
run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
evcn1 - 1, svcn, Add2Ptr(attr, roff),
le32_to_cpu(attr->size) - roff);
@@ -2020,7 +2117,7 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
return -ENOENT;
if (!attr_b->non_res) {
- u32 data_size = le32_to_cpu(attr->res.data_size);
+ u32 data_size = le32_to_cpu(attr_b->res.data_size);
u32 from, to;
if (vbo > data_size)
@@ -2290,7 +2387,8 @@ int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
if (!attr_b->non_res) {
/* Still resident. */
- char *data = Add2Ptr(attr_b, attr_b->res.data_off);
+ char *data = Add2Ptr(attr_b,
+ le16_to_cpu(attr_b->res.data_off));
memmove(data + bytes, data, bytes);
memset(data, 0, bytes);
@@ -2382,8 +2480,8 @@ int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
if (vbo <= ni->i_valid)
ni->i_valid += bytes;
- attr_b->nres.data_size = le64_to_cpu(data_size + bytes);
- attr_b->nres.alloc_size = le64_to_cpu(alloc_size + bytes);
+ attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
+ attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
/* ni->valid may be not equal valid_size (temporary). */
if (ni->i_valid > data_size + bytes)
diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
index bad6d8a849a2..c0c6bcbc8c05 100644
--- a/fs/ntfs3/attrlist.c
+++ b/fs/ntfs3/attrlist.c
@@ -68,6 +68,11 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
run_init(&ni->attr_list.run);
+ if (run_off > le32_to_cpu(attr->size)) {
+ err = -EINVAL;
+ goto out;
+ }
+
err = run_unpack_ex(&ni->attr_list.run, ni->mi.sbi, ni->mi.rno,
0, le64_to_cpu(attr->nres.evcn), 0,
Add2Ptr(attr, run_off),
diff --git a/fs/ntfs3/bitfunc.c b/fs/ntfs3/bitfunc.c
index 50d838093790..25a4d4896aa9 100644
--- a/fs/ntfs3/bitfunc.c
+++ b/fs/ntfs3/bitfunc.c
@@ -30,7 +30,7 @@ static const u8 zero_mask[] = { 0xFF, 0xFE, 0xFC, 0xF8, 0xF0,
*
* Return: True if all bits [bit, bit+nbits) are zeros "0".
*/
-bool are_bits_clear(const ulong *lmap, size_t bit, size_t nbits)
+bool are_bits_clear(const void *lmap, size_t bit, size_t nbits)
{
size_t pos = bit & 7;
const u8 *map = (u8 *)lmap + (bit >> 3);
@@ -78,7 +78,7 @@ bool are_bits_clear(const ulong *lmap, size_t bit, size_t nbits)
*
* Return: True if all bits [bit, bit+nbits) are ones "1".
*/
-bool are_bits_set(const ulong *lmap, size_t bit, size_t nbits)
+bool are_bits_set(const void *lmap, size_t bit, size_t nbits)
{
u8 mask;
size_t pos = bit & 7;
diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
index e92bbd754365..723fb64e6531 100644
--- a/fs/ntfs3/bitmap.c
+++ b/fs/ntfs3/bitmap.c
@@ -59,14 +59,14 @@ void ntfs3_exit_bitmap(void)
*
* Return: -1 if not found.
*/
-static size_t wnd_scan(const ulong *buf, size_t wbit, u32 wpos, u32 wend,
+static size_t wnd_scan(const void *buf, size_t wbit, u32 wpos, u32 wend,
size_t to_alloc, size_t *prev_tail, size_t *b_pos,
size_t *b_len)
{
while (wpos < wend) {
size_t free_len;
u32 free_bits, end;
- u32 used = find_next_zero_bit(buf, wend, wpos);
+ u32 used = find_next_zero_bit_le(buf, wend, wpos);
if (used >= wend) {
if (*b_len < *prev_tail) {
@@ -92,7 +92,7 @@ static size_t wnd_scan(const ulong *buf, size_t wbit, u32 wpos, u32 wend,
* Now we have a fragment [wpos, wend) staring with 0.
*/
end = wpos + to_alloc - *prev_tail;
- free_bits = find_next_bit(buf, min(end, wend), wpos);
+ free_bits = find_next_bit_le(buf, min(end, wend), wpos);
free_len = *prev_tail + free_bits - wpos;
@@ -504,7 +504,6 @@ static int wnd_rescan(struct wnd_bitmap *wnd)
u8 cluster_bits = sbi->cluster_bits;
u32 wbits = 8 * sb->s_blocksize;
u32 used, frb;
- const ulong *buf;
size_t wpos, wbit, iw, vbo;
struct buffer_head *bh = NULL;
CLST lcn, clen;
@@ -558,9 +557,7 @@ static int wnd_rescan(struct wnd_bitmap *wnd)
goto out;
}
- buf = (ulong *)bh->b_data;
-
- used = bitmap_weight(buf, wbits);
+ used = ntfs_bitmap_weight_le(bh->b_data, wbits);
if (used < wbits) {
frb = wbits - used;
wnd->free_bits[iw] = frb;
@@ -574,7 +571,7 @@ static int wnd_rescan(struct wnd_bitmap *wnd)
wbits = wnd->nbits - wbit;
do {
- used = find_next_zero_bit(buf, wbits, wpos);
+ used = find_next_zero_bit_le(bh->b_data, wbits, wpos);
if (used > wpos && prev_tail) {
wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
@@ -590,7 +587,7 @@ static int wnd_rescan(struct wnd_bitmap *wnd)
break;
}
- frb = find_next_bit(buf, wbits, wpos);
+ frb = find_next_bit_le(bh->b_data, wbits, wpos);
if (frb >= wbits) {
/* Keep last free block. */
prev_tail += frb - wpos;
@@ -661,7 +658,7 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
if (!wnd->bits_last)
wnd->bits_last = wbits;
- wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS);
+ wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN);
if (!wnd->free_bits)
return -ENOMEM;
@@ -718,7 +715,6 @@ int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
while (iw < wnd->nwnd && bits) {
u32 tail, op;
- ulong *buf;
if (iw + 1 == wnd->nwnd)
wbits = wnd->bits_last;
@@ -732,11 +728,9 @@ int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
break;
}
- buf = (ulong *)bh->b_data;
-
lock_buffer(bh);
- __bitmap_clear(buf, wbit, op);
+ ntfs_bitmap_clear_le(bh->b_data, wbit, op);
wnd->free_bits[iw] += op;
@@ -771,7 +765,6 @@ int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
while (iw < wnd->nwnd && bits) {
u32 tail, op;
- ulong *buf;
if (unlikely(iw + 1 == wnd->nwnd))
wbits = wnd->bits_last;
@@ -784,11 +777,10 @@ int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
err = PTR_ERR(bh);
break;
}
- buf = (ulong *)bh->b_data;
lock_buffer(bh);
- __bitmap_set(buf, wbit, op);
+ ntfs_bitmap_set_le(bh->b_data, wbit, op);
wnd->free_bits[iw] -= op;
set_buffer_uptodate(bh);
@@ -809,6 +801,44 @@ int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
}
/*
+ * wnd_set_used_safe - Mark the bits range from bit to bit + bits as used.
+ *
+ * Unlikely wnd_set_used/wnd_set_free this function is not full trusted.
+ * It scans every bit in bitmap and marks free bit as used.
+ * @done - how many bits were marked as used.
+ *
+ * NOTE: normally *done should be 0.
+ */
+int wnd_set_used_safe(struct wnd_bitmap *wnd, size_t bit, size_t bits,
+ size_t *done)
+{
+ size_t i, from = 0, len = 0;
+ int err = 0;
+
+ *done = 0;
+ for (i = 0; i < bits; i++) {
+ if (wnd_is_free(wnd, bit + i, 1)) {
+ if (!len)
+ from = bit + i;
+ len += 1;
+ } else if (len) {
+ err = wnd_set_used(wnd, from, len);
+ *done += len;
+ len = 0;
+ if (err)
+ break;
+ }
+ }
+
+ if (len) {
+ /* last fragment. */
+ err = wnd_set_used(wnd, from, len);
+ *done += len;
+ }
+ return err;
+}
+
+/*
* wnd_is_free_hlp
*
* Return: True if all clusters [bit, bit+bits) are free (bitmap only).
@@ -836,7 +866,7 @@ static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
if (IS_ERR(bh))
return false;
- ret = are_bits_clear((ulong *)bh->b_data, wbit, op);
+ ret = are_bits_clear(bh->b_data, wbit, op);
put_bh(bh);
if (!ret)
@@ -928,7 +958,7 @@ use_wnd:
if (IS_ERR(bh))
goto out;
- ret = are_bits_set((ulong *)bh->b_data, wbit, op);
+ ret = are_bits_set(bh->b_data, wbit, op);
put_bh(bh);
if (!ret)
goto out;
@@ -959,7 +989,6 @@ size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
size_t fnd, max_alloc, b_len, b_pos;
size_t iw, prev_tail, nwnd, wbit, ebit, zbit, zend;
size_t to_alloc0 = to_alloc;
- const ulong *buf;
const struct e_node *e;
const struct rb_node *pr, *cr;
u8 log2_bits;
@@ -1185,14 +1214,13 @@ Again:
continue;
}
- buf = (ulong *)bh->b_data;
-
/* Scan range [wbit, zbit). */
if (wpos < wzbit) {
/* Scan range [wpos, zbit). */
- fnd = wnd_scan(buf, wbit, wpos, wzbit,
- to_alloc, &prev_tail,
- &b_pos, &b_len);
+ fnd = wnd_scan(bh->b_data, wbit, wpos,
+ wzbit, to_alloc,
+ &prev_tail, &b_pos,
+ &b_len);
if (fnd != MINUS_ONE_T) {
put_bh(bh);
goto found;
@@ -1203,7 +1231,7 @@ Again:
/* Scan range [zend, ebit). */
if (wzend < wbits) {
- fnd = wnd_scan(buf, wbit,
+ fnd = wnd_scan(bh->b_data, wbit,
max(wzend, wpos), wbits,
to_alloc, &prev_tail,
&b_pos, &b_len);
@@ -1242,11 +1270,9 @@ Again:
continue;
}
- buf = (ulong *)bh->b_data;
-
/* Scan range [wpos, eBits). */
- fnd = wnd_scan(buf, wbit, wpos, wbits, to_alloc, &prev_tail,
- &b_pos, &b_len);
+ fnd = wnd_scan(bh->b_data, wbit, wpos, wbits, to_alloc,
+ &prev_tail, &b_pos, &b_len);
put_bh(bh);
if (fnd != MINUS_ONE_T)
goto found;
@@ -1324,7 +1350,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
new_last = wbits;
if (new_wnd != wnd->nwnd) {
- new_free = kmalloc(new_wnd * sizeof(u16), GFP_NOFS);
+ new_free = kmalloc_array(new_wnd, sizeof(u16), GFP_NOFS);
if (!new_free)
return -ENOMEM;
@@ -1344,7 +1370,6 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
size_t frb;
u64 vbo, lbo, bytes;
struct buffer_head *bh;
- ulong *buf;
if (iw + 1 == new_wnd)
wbits = new_last;
@@ -1361,10 +1386,9 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
return -EIO;
lock_buffer(bh);
- buf = (ulong *)bh->b_data;
- __bitmap_clear(buf, b0, blocksize * 8 - b0);
- frb = wbits - bitmap_weight(buf, wbits);
+ ntfs_bitmap_clear_le(bh->b_data, b0, blocksize * 8 - b0);
+ frb = wbits - ntfs_bitmap_weight_le(bh->b_data, wbits);
wnd->total_zeroes += frb - wnd->free_bits[iw];
wnd->free_bits[iw] = frb;
@@ -1411,7 +1435,6 @@ int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
CLST lcn_from = bytes_to_cluster(sbi, range->start);
size_t iw = lcn_from >> (sb->s_blocksize_bits + 3);
u32 wbit = lcn_from & (wbits - 1);
- const ulong *buf;
CLST lcn_to;
if (!minlen)
@@ -1424,7 +1447,7 @@ int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
- for (; iw < wnd->nbits; iw++, wbit = 0) {
+ for (; iw < wnd->nwnd; iw++, wbit = 0) {
CLST lcn_wnd = iw * wbits;
struct buffer_head *bh;
@@ -1446,10 +1469,8 @@ int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
break;
}
- buf = (ulong *)bh->b_data;
-
for (; wbit < wbits; wbit++) {
- if (!test_bit(wbit, buf)) {
+ if (!test_bit_le(wbit, bh->b_data)) {
if (!len)
lcn = lcn_wnd + wbit;
len += 1;
@@ -1481,3 +1502,70 @@ out:
return err;
}
+
+#if BITS_PER_LONG == 64
+typedef __le64 bitmap_ulong;
+#define cpu_to_ul(x) cpu_to_le64(x)
+#define ul_to_cpu(x) le64_to_cpu(x)
+#else
+typedef __le32 bitmap_ulong;
+#define cpu_to_ul(x) cpu_to_le32(x)
+#define ul_to_cpu(x) le32_to_cpu(x)
+#endif
+
+void ntfs_bitmap_set_le(void *map, unsigned int start, int len)
+{
+ bitmap_ulong *p = (bitmap_ulong *)map + BIT_WORD(start);
+ const unsigned int size = start + len;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ bitmap_ulong mask_to_set = cpu_to_ul(BITMAP_FIRST_WORD_MASK(start));
+
+ while (len - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ len -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = cpu_to_ul(~0UL);
+ p++;
+ }
+ if (len) {
+ mask_to_set &= cpu_to_ul(BITMAP_LAST_WORD_MASK(size));
+ *p |= mask_to_set;
+ }
+}
+
+void ntfs_bitmap_clear_le(void *map, unsigned int start, int len)
+{
+ bitmap_ulong *p = (bitmap_ulong *)map + BIT_WORD(start);
+ const unsigned int size = start + len;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ bitmap_ulong mask_to_clear = cpu_to_ul(BITMAP_FIRST_WORD_MASK(start));
+
+ while (len - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ len -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = cpu_to_ul(~0UL);
+ p++;
+ }
+ if (len) {
+ mask_to_clear &= cpu_to_ul(BITMAP_LAST_WORD_MASK(size));
+ *p &= ~mask_to_clear;
+ }
+}
+
+unsigned int ntfs_bitmap_weight_le(const void *bitmap, int bits)
+{
+ const ulong *bmp = bitmap;
+ unsigned int k, lim = bits / BITS_PER_LONG;
+ unsigned int w = 0;
+
+ for (k = 0; k < lim; k++)
+ w += hweight_long(bmp[k]);
+
+ if (bits % BITS_PER_LONG) {
+ w += hweight_long(ul_to_cpu(((bitmap_ulong *)bitmap)[k]) &
+ BITMAP_LAST_WORD_MASK(bits));
+ }
+
+ return w;
+}
diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
index fb438d604040..063a6654199b 100644
--- a/fs/ntfs3/dir.c
+++ b/fs/ntfs3/dir.c
@@ -26,8 +26,8 @@ int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const __le16 *name, u32 len,
if (!nls) {
/* UTF-16 -> UTF-8 */
- ret = utf16s_to_utf8s(name, len, UTF16_LITTLE_ENDIAN, buf,
- buf_len);
+ ret = utf16s_to_utf8s((wchar_t *)name, len, UTF16_LITTLE_ENDIAN,
+ buf, buf_len);
buf[ret] = '\0';
return ret;
}
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index c5e4a886593d..e5399ebc3a2b 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -122,31 +122,15 @@ static int ntfs_extend_initialized_size(struct file *file,
bits = sbi->cluster_bits;
vcn = pos >> bits;
- err = attr_data_get_block(ni, vcn, 0, &lcn, &clen,
- NULL);
+ err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL,
+ false);
if (err)
goto out;
if (lcn == SPARSE_LCN) {
- loff_t vbo = (loff_t)vcn << bits;
- loff_t to = vbo + ((loff_t)clen << bits);
-
- if (to <= new_valid) {
- ni->i_valid = to;
- pos = to;
- goto next;
- }
-
- if (vbo < pos) {
- pos = vbo;
- } else {
- to = (new_valid >> bits) << bits;
- if (pos < to) {
- ni->i_valid = to;
- pos = to;
- goto next;
- }
- }
+ pos = ((loff_t)clen + vcn) << bits;
+ ni->i_valid = pos;
+ goto next;
}
}
@@ -196,18 +180,18 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
struct address_space *mapping = inode->i_mapping;
u32 blocksize = 1 << inode->i_blkbits;
pgoff_t idx = vbo >> PAGE_SHIFT;
- u32 z_start = vbo & (PAGE_SIZE - 1);
+ u32 from = vbo & (PAGE_SIZE - 1);
pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
loff_t page_off;
struct buffer_head *head, *bh;
- u32 bh_next, bh_off, z_end;
+ u32 bh_next, bh_off, to;
sector_t iblock;
struct page *page;
- for (; idx < idx_end; idx += 1, z_start = 0) {
+ for (; idx < idx_end; idx += 1, from = 0) {
page_off = (loff_t)idx << PAGE_SHIFT;
- z_end = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off)
- : PAGE_SIZE;
+ to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off)
+ : PAGE_SIZE;
iblock = page_off >> inode->i_blkbits;
page = find_or_create_page(mapping, idx,
@@ -224,7 +208,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
do {
bh_next = bh_off + blocksize;
- if (bh_next <= z_start || bh_off >= z_end)
+ if (bh_next <= from || bh_off >= to)
continue;
if (!buffer_mapped(bh)) {
@@ -258,7 +242,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
} while (bh_off = bh_next, iblock += 1,
head != (bh = bh->b_this_page));
- zero_user_segment(page, z_start, z_end);
+ zero_user_segment(page, from, to);
unlock_page(page);
put_page(page);
@@ -270,81 +254,6 @@ out:
}
/*
- * ntfs_sparse_cluster - Helper function to zero a new allocated clusters.
- *
- * NOTE: 512 <= cluster size <= 2M
- */
-void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
- CLST len)
-{
- struct address_space *mapping = inode->i_mapping;
- struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
- u64 vbo = (u64)vcn << sbi->cluster_bits;
- u64 bytes = (u64)len << sbi->cluster_bits;
- u32 blocksize = 1 << inode->i_blkbits;
- pgoff_t idx0 = page0 ? page0->index : -1;
- loff_t vbo_clst = vbo & sbi->cluster_mask_inv;
- loff_t end = ntfs_up_cluster(sbi, vbo + bytes);
- pgoff_t idx = vbo_clst >> PAGE_SHIFT;
- u32 from = vbo_clst & (PAGE_SIZE - 1);
- pgoff_t idx_end = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
- loff_t page_off;
- u32 to;
- bool partial;
- struct page *page;
-
- for (; idx < idx_end; idx += 1, from = 0) {
- page = idx == idx0 ? page0 : grab_cache_page(mapping, idx);
-
- if (!page)
- continue;
-
- page_off = (loff_t)idx << PAGE_SHIFT;
- to = (page_off + PAGE_SIZE) > end ? (end - page_off)
- : PAGE_SIZE;
- partial = false;
-
- if ((from || PAGE_SIZE != to) &&
- likely(!page_has_buffers(page))) {
- create_empty_buffers(page, blocksize, 0);
- }
-
- if (page_has_buffers(page)) {
- struct buffer_head *head, *bh;
- u32 bh_off = 0;
-
- bh = head = page_buffers(page);
- do {
- u32 bh_next = bh_off + blocksize;
-
- if (from <= bh_off && bh_next <= to) {
- set_buffer_uptodate(bh);
- mark_buffer_dirty(bh);
- } else if (!buffer_uptodate(bh)) {
- partial = true;
- }
- bh_off = bh_next;
- } while (head != (bh = bh->b_this_page));
- }
-
- zero_user_segment(page, from, to);
-
- if (!partial) {
- if (!PageUptodate(page))
- SetPageUptodate(page);
- set_page_dirty(page);
- }
-
- if (idx != idx0) {
- unlock_page(page);
- put_page(page);
- }
- cond_resched();
- }
- mark_inode_dirty(inode);
-}
-
-/*
* ntfs_file_mmap - file_operations::mmap
*/
static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
@@ -385,13 +294,9 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
for (; vcn < end; vcn += len) {
err = attr_data_get_block(ni, vcn, 1, &lcn,
- &len, &new);
+ &len, &new, true);
if (err)
goto out;
-
- if (!new)
- continue;
- ntfs_sparse_cluster(inode, NULL, vcn, 1);
}
}
@@ -432,7 +337,6 @@ static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
err = ntfs_set_size(inode, end);
if (err)
goto out;
- inode->i_size = end;
}
if (extend_init && !is_compressed(ni)) {
@@ -535,7 +439,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
struct ntfs_sb_info *sbi = sb->s_fs_info;
struct ntfs_inode *ni = ntfs_i(inode);
loff_t end = vbo + len;
- loff_t vbo_down = round_down(vbo, PAGE_SIZE);
+ loff_t vbo_down = round_down(vbo, max_t(unsigned long,
+ sbi->cluster_size, PAGE_SIZE));
bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
loff_t i_size, new_size;
bool map_locked;
@@ -588,11 +493,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
u32 frame_size;
loff_t mask, vbo_a, end_a, tmp;
- err = filemap_write_and_wait_range(mapping, vbo, end - 1);
- if (err)
- goto out;
-
- err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
+ err = filemap_write_and_wait_range(mapping, vbo_down,
+ LLONG_MAX);
if (err)
goto out;
@@ -685,47 +587,45 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
if (err)
goto out;
- /*
- * Allocate clusters, do not change 'valid' size.
- */
- err = ntfs_set_size(inode, new_size);
- if (err)
- goto out;
+ if (new_size > i_size) {
+ /*
+ * Allocate clusters, do not change 'valid' size.
+ */
+ err = ntfs_set_size(inode, new_size);
+ if (err)
+ goto out;
+ }
if (is_supported_holes) {
- CLST vcn_v = ni->i_valid >> sbi->cluster_bits;
CLST vcn = vbo >> sbi->cluster_bits;
CLST cend = bytes_to_cluster(sbi, end);
+ CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
CLST lcn, clen;
bool new;
+ if (cend_v > cend)
+ cend_v = cend;
+
/*
- * Allocate but do not zero new clusters. (see below comments)
- * This breaks security: One can read unused on-disk areas.
+ * Allocate and zero new clusters.
* Zeroing these clusters may be too long.
- * Maybe we should check here for root rights?
+ */
+ for (; vcn < cend_v; vcn += clen) {
+ err = attr_data_get_block(ni, vcn, cend_v - vcn,
+ &lcn, &clen, &new,
+ true);
+ if (err)
+ goto out;
+ }
+ /*
+ * Allocate but not zero new clusters.
*/
for (; vcn < cend; vcn += clen) {
err = attr_data_get_block(ni, vcn, cend - vcn,
- &lcn, &clen, &new);
+ &lcn, &clen, &new,
+ false);
if (err)
goto out;
- if (!new || vcn >= vcn_v)
- continue;
-
- /*
- * Unwritten area.
- * NTFS is not able to store several unwritten areas.
- * Activate 'ntfs_sparse_cluster' to zero new allocated clusters.
- *
- * Dangerous in case:
- * 1G of sparsed clusters + 1 cluster of data =>
- * valid_size == 1G + 1 cluster
- * fallocate(1G) will zero 1G and this can be very long
- * xfstest 016/086 will fail without 'ntfs_sparse_cluster'.
- */
- ntfs_sparse_cluster(inode, NULL, vcn,
- min(vcn_v - vcn, clen));
}
}
@@ -736,6 +636,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
&ni->file.run, i_size, &ni->i_valid,
true, NULL);
ni_unlock(ni);
+ } else if (new_size > i_size) {
+ inode->i_size = new_size;
}
}
@@ -779,7 +681,7 @@ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
goto out;
if (ia_valid & ATTR_SIZE) {
- loff_t oldsize = inode->i_size;
+ loff_t newsize, oldsize;
if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
/* Should never be here, see ntfs_file_open(). */
@@ -787,16 +689,19 @@ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
goto out;
}
inode_dio_wait(inode);
+ oldsize = inode->i_size;
+ newsize = attr->ia_size;
- if (attr->ia_size <= oldsize)
- err = ntfs_truncate(inode, attr->ia_size);
- else if (attr->ia_size > oldsize)
- err = ntfs_extend(inode, attr->ia_size, 0, NULL);
+ if (newsize <= oldsize)
+ err = ntfs_truncate(inode, newsize);
+ else
+ err = ntfs_extend(inode, newsize, 0, NULL);
if (err)
goto out;
ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
+ inode->i_size = newsize;
}
setattr_copy(mnt_userns, inode, attr);
@@ -946,8 +851,8 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
frame_vbo = valid & ~(frame_size - 1);
off = valid & (frame_size - 1);
- err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 0, &lcn,
- &clen, NULL);
+ err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn,
+ &clen, NULL, false);
if (err)
goto out;
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 381a38a06ec2..f1df52dfab74 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -557,7 +557,7 @@ static int ni_repack(struct ntfs_inode *ni)
}
if (!mi_p) {
- /* Do not try if not enogh free space. */
+ /* Do not try if not enough free space. */
if (le32_to_cpu(mi->mrec->used) + 8 >= rs)
continue;
@@ -568,6 +568,12 @@ static int ni_repack(struct ntfs_inode *ni)
}
roff = le16_to_cpu(attr->nres.run_off);
+
+ if (roff > le32_to_cpu(attr->size)) {
+ err = -EINVAL;
+ break;
+ }
+
err = run_unpack(&run, sbi, ni->mi.rno, svcn, evcn, svcn,
Add2Ptr(attr, roff),
le32_to_cpu(attr->size) - roff);
@@ -1589,6 +1595,9 @@ int ni_delete_all(struct ntfs_inode *ni)
asize = le32_to_cpu(attr->size);
roff = le16_to_cpu(attr->nres.run_off);
+ if (roff > asize)
+ return -EINVAL;
+
/* run==1 means unpack and deallocate. */
run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
Add2Ptr(attr, roff), asize - roff);
@@ -1636,6 +1645,7 @@ struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
{
struct ATTRIB *attr = NULL;
struct ATTR_FILE_NAME *fname;
+ struct le_str *fns;
if (le)
*le = NULL;
@@ -1659,8 +1669,8 @@ next:
if (uni->len != fname->name_len)
goto next;
- if (ntfs_cmp_names_cpu(uni, (struct le_str *)&fname->name_len, NULL,
- false))
+ fns = (struct le_str *)&fname->name_len;
+ if (ntfs_cmp_names_cpu(uni, fns, NULL, false))
goto next;
return fname;
@@ -2214,7 +2224,7 @@ int ni_decompress_file(struct ntfs_inode *ni)
for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) {
err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
- &clen, &new);
+ &clen, &new, false);
if (err)
goto out;
}
@@ -2291,6 +2301,11 @@ remove_wof:
asize = le32_to_cpu(attr->size);
roff = le16_to_cpu(attr->nres.run_off);
+ if (roff > asize) {
+ err = -EINVAL;
+ goto out;
+ }
+
/*run==1 Means unpack and deallocate. */
run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
Add2Ptr(attr, roff), asize - roff);
@@ -2997,6 +3012,7 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
struct NTFS_DE *de)
{
int err;
+ struct ntfs_sb_info *sbi = ni->mi.sbi;
struct ATTRIB *attr;
struct ATTR_LIST_ENTRY *le;
struct mft_inode *mi;
@@ -3004,6 +3020,19 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
struct ATTR_FILE_NAME *de_name = (struct ATTR_FILE_NAME *)(de + 1);
u16 de_key_size = le16_to_cpu(de->key_size);
+ if (sbi->options->windows_names &&
+ !valid_windows_name(sbi, (struct le_str *)&de_name->name_len))
+ return -EINVAL;
+
+ /* If option "hide_dot_files" then set hidden attribute for dot files. */
+ if (ni->mi.sbi->options->hide_dot_files) {
+ if (de_name->name_len > 0 &&
+ le16_to_cpu(de_name->name[0]) == '.')
+ ni->std_fa |= FILE_ATTRIBUTE_HIDDEN;
+ else
+ ni->std_fa &= ~FILE_ATTRIBUTE_HIDDEN;
+ }
+
mi_get_ref(&ni->mi, &de->ref);
mi_get_ref(&dir_ni->mi, &de_name->home);
@@ -3022,7 +3051,7 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de_name, de_key_size);
/* Insert new name into directory. */
- err = indx_insert_entry(&dir_ni->dir, dir_ni, de, ni->mi.sbi, NULL, 0);
+ err = indx_insert_entry(&dir_ni->dir, dir_ni, de, sbi, NULL, 0);
if (err)
ni_remove_attr_le(ni, attr, mi, le);
@@ -3265,6 +3294,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
modified = true;
}
+ /* std attribute is always in primary MFT record. */
if (modified)
ni->mi.dirty = true;
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
index 0d611a6c5511..c6eb371a3695 100644
--- a/fs/ntfs3/fslog.c
+++ b/fs/ntfs3/fslog.c
@@ -1132,7 +1132,7 @@ static int read_log_page(struct ntfs_log *log, u32 vbo,
return -EINVAL;
if (!*buffer) {
- to_free = kmalloc(bytes, GFP_NOFS);
+ to_free = kmalloc(log->page_size, GFP_NOFS);
if (!to_free)
return -ENOMEM;
*buffer = to_free;
@@ -1180,10 +1180,7 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
struct restart_info *info)
{
u32 skip, vbo;
- struct RESTART_HDR *r_page = kmalloc(DefaultLogPageSize, GFP_NOFS);
-
- if (!r_page)
- return -ENOMEM;
+ struct RESTART_HDR *r_page = NULL;
/* Determine which restart area we are looking for. */
if (first) {
@@ -1197,7 +1194,6 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
/* Loop continuously until we succeed. */
for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) {
bool usa_error;
- u32 sys_page_size;
bool brst, bchk;
struct RESTART_AREA *ra;
@@ -1251,24 +1247,6 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
goto check_result;
}
- /* Read the entire restart area. */
- sys_page_size = le32_to_cpu(r_page->sys_page_size);
- if (DefaultLogPageSize != sys_page_size) {
- kfree(r_page);
- r_page = kzalloc(sys_page_size, GFP_NOFS);
- if (!r_page)
- return -ENOMEM;
-
- if (read_log_page(log, vbo,
- (struct RECORD_PAGE_HDR **)&r_page,
- &usa_error)) {
- /* Ignore any errors. */
- kfree(r_page);
- r_page = NULL;
- continue;
- }
- }
-
if (is_client_area_valid(r_page, usa_error)) {
info->valid_page = true;
ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
@@ -2727,6 +2705,9 @@ static inline bool check_attr(const struct MFT_REC *rec,
return false;
}
+ if (run_off > asize)
+ return false;
+
if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn,
Add2Ptr(attr, run_off), asize - run_off) < 0) {
return false;
@@ -3048,7 +3029,7 @@ static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe,
struct NEW_ATTRIBUTE_SIZES *new_sz;
struct ATTR_FILE_NAME *fname;
struct OpenAttr *oa, *oa2;
- u32 nsize, t32, asize, used, esize, bmp_off, bmp_bits;
+ u32 nsize, t32, asize, used, esize, off, bits;
u16 id, id2;
u32 record_size = sbi->record_size;
u64 t64;
@@ -3635,30 +3616,28 @@ move_data:
break;
case SetBitsInNonresidentBitMap:
- bmp_off =
- le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
- bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
+ off = le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
+ bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
- if (cbo + (bmp_off + 7) / 8 > lco ||
- cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) {
+ if (cbo + (off + 7) / 8 > lco ||
+ cbo + ((off + bits + 7) / 8) > lco) {
goto dirty_vol;
}
- __bitmap_set(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits);
+ ntfs_bitmap_set_le(Add2Ptr(buffer_le, roff), off, bits);
a_dirty = true;
break;
case ClearBitsInNonresidentBitMap:
- bmp_off =
- le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
- bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
+ off = le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
+ bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
- if (cbo + (bmp_off + 7) / 8 > lco ||
- cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) {
+ if (cbo + (off + 7) / 8 > lco ||
+ cbo + ((off + bits + 7) / 8) > lco) {
goto dirty_vol;
}
- __bitmap_clear(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits);
+ ntfs_bitmap_clear_le(Add2Ptr(buffer_le, roff), off, bits);
a_dirty = true;
break;
@@ -4771,6 +4750,12 @@ fake_attr:
u16 roff = le16_to_cpu(attr->nres.run_off);
CLST svcn = le64_to_cpu(attr->nres.svcn);
+ if (roff > t32) {
+ kfree(oa->attr);
+ oa->attr = NULL;
+ goto fake_attr;
+ }
+
err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn,
le64_to_cpu(attr->nres.evcn), svcn,
Add2Ptr(attr, roff), t32 - roff);
@@ -4839,8 +4824,7 @@ next_dirty_page_vcn:
goto out;
}
attr = oa->attr;
- t64 = le64_to_cpu(attr->nres.alloc_size);
- if (size > t64) {
+ if (size > le64_to_cpu(attr->nres.alloc_size)) {
attr->nres.valid_size = attr->nres.data_size =
attr->nres.alloc_size = cpu_to_le64(size);
}
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 4ed15f64b17f..567563771bf8 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -98,6 +98,30 @@ const __le16 WOF_NAME[17] = {
};
#endif
+static const __le16 CON_NAME[3] = {
+ cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
+};
+
+static const __le16 NUL_NAME[3] = {
+ cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
+};
+
+static const __le16 AUX_NAME[3] = {
+ cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
+};
+
+static const __le16 PRN_NAME[3] = {
+ cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
+};
+
+static const __le16 COM_NAME[3] = {
+ cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
+};
+
+static const __le16 LPT_NAME[3] = {
+ cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
+};
+
// clang-format on
/*
@@ -322,35 +346,6 @@ out:
}
/*
- * ntfs_query_def
- *
- * Return: Current ATTR_DEF_ENTRY for given attribute type.
- */
-const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
- enum ATTR_TYPE type)
-{
- int type_in = le32_to_cpu(type);
- size_t min_idx = 0;
- size_t max_idx = sbi->def_entries - 1;
-
- while (min_idx <= max_idx) {
- size_t i = min_idx + ((max_idx - min_idx) >> 1);
- const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
- int diff = le32_to_cpu(entry->type) - type_in;
-
- if (!diff)
- return entry;
- if (diff < 0)
- min_idx = i + 1;
- else if (i)
- max_idx = i - 1;
- else
- return NULL;
- }
- return NULL;
-}
-
-/*
* ntfs_look_for_free_space - Look for a free space in bitmap.
*/
int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
@@ -449,6 +444,39 @@ up_write:
}
/*
+ * ntfs_check_for_free_space
+ *
+ * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
+ */
+bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
+{
+ size_t free, zlen, avail;
+ struct wnd_bitmap *wnd;
+
+ wnd = &sbi->used.bitmap;
+ down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
+ free = wnd_zeroes(wnd);
+ zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
+ up_read(&wnd->rw_lock);
+
+ if (free < zlen + clen)
+ return false;
+
+ avail = free - (zlen + clen);
+
+ wnd = &sbi->mft.bitmap;
+ down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
+ free = wnd_zeroes(wnd);
+ zlen = wnd_zone_len(wnd);
+ up_read(&wnd->rw_lock);
+
+ if (free >= zlen + mlen)
+ return true;
+
+ return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
+}
+
+/*
* ntfs_extend_mft - Allocate additional MFT records.
*
* sbi->mft.bitmap is locked for write.
@@ -475,7 +503,7 @@ static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
struct ATTRIB *attr;
struct wnd_bitmap *wnd = &sbi->mft.bitmap;
- new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
+ new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
/* Step 1: Resize $MFT::DATA. */
@@ -618,13 +646,13 @@ next:
NULL, 0, NULL, NULL))
goto next;
- __clear_bit(ir - MFT_REC_RESERVED,
+ __clear_bit_le(ir - MFT_REC_RESERVED,
&sbi->mft.reserved_bitmap);
}
}
/* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
- zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
+ zbit = find_next_zero_bit_le(&sbi->mft.reserved_bitmap,
MFT_REC_FREE, MFT_REC_RESERVED);
if (zbit >= MFT_REC_FREE) {
sbi->mft.next_reserved = MFT_REC_FREE;
@@ -692,7 +720,7 @@ found:
if (*rno >= MFT_REC_FREE)
wnd_set_used(wnd, *rno, 1);
else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
- __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
+ __set_bit_le(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
out:
if (!mft)
@@ -720,7 +748,7 @@ void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
else
wnd_set_free(wnd, rno, 1);
} else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
- __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
+ __clear_bit_le(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
}
if (rno < wnd_zone_bit(wnd))
@@ -830,7 +858,6 @@ void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
return;
- err = 0;
bytes = sbi->mft.recs_mirr << sbi->record_bits;
block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
@@ -860,8 +887,7 @@ void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
put_bh(bh1);
bh1 = NULL;
- if (wait)
- err = sync_dirty_buffer(bh2);
+ err = wait ? sync_dirty_buffer(bh2) : 0;
put_bh(bh2);
if (err)
@@ -1849,9 +1875,10 @@ int ntfs_security_init(struct ntfs_sb_info *sbi)
goto out;
}
- root_sdh = resident_data(attr);
+ root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT));
if (root_sdh->type != ATTR_ZERO ||
- root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) {
+ root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
+ offsetof(struct INDEX_ROOT, ihdr) + root_sdh->ihdr.used > attr->res.data_size) {
err = -EINVAL;
goto out;
}
@@ -1867,9 +1894,10 @@ int ntfs_security_init(struct ntfs_sb_info *sbi)
goto out;
}
- root_sii = resident_data(attr);
+ root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT));
if (root_sii->type != ATTR_ZERO ||
- root_sii->rule != NTFS_COLLATION_TYPE_UINT) {
+ root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
+ offsetof(struct INDEX_ROOT, ihdr) + root_sii->ihdr.used > attr->res.data_size) {
err = -EINVAL;
goto out;
}
@@ -2502,3 +2530,83 @@ int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
return 0;
}
+
+static inline bool name_has_forbidden_chars(const struct le_str *fname)
+{
+ int i, ch;
+
+ /* check for forbidden chars */
+ for (i = 0; i < fname->len; ++i) {
+ ch = le16_to_cpu(fname->name[i]);
+
+ /* control chars */
+ if (ch < 0x20)
+ return true;
+
+ switch (ch) {
+ /* disallowed by Windows */
+ case '\\':
+ case '/':
+ case ':':
+ case '*':
+ case '?':
+ case '<':
+ case '>':
+ case '|':
+ case '\"':
+ return true;
+
+ default:
+ /* allowed char */
+ break;
+ }
+ }
+
+ /* file names cannot end with space or . */
+ if (fname->len > 0) {
+ ch = le16_to_cpu(fname->name[fname->len - 1]);
+ if (ch == ' ' || ch == '.')
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool is_reserved_name(struct ntfs_sb_info *sbi,
+ const struct le_str *fname)
+{
+ int port_digit;
+ const __le16 *name = fname->name;
+ int len = fname->len;
+ u16 *upcase = sbi->upcase;
+
+ /* check for 3 chars reserved names (device names) */
+ /* name by itself or with any extension is forbidden */
+ if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
+ if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
+ !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
+ !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
+ !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
+ return true;
+
+ /* check for 4 chars reserved names (port name followed by 1..9) */
+ /* name by itself or with any extension is forbidden */
+ if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
+ port_digit = le16_to_cpu(name[3]);
+ if (port_digit >= '1' && port_digit <= '9')
+ if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase, false) ||
+ !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase, false))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * valid_windows_name - Check if a file name is valid in Windows.
+ */
+bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
+{
+ return !name_has_forbidden_chars(fname) &&
+ !is_reserved_name(sbi, fname);
+}
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index 440328147e7e..51ab75954640 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -47,7 +47,7 @@ static int cmp_fnames(const void *key1, size_t l1, const void *key2, size_t l2,
if (l2 < fsize2)
return -1;
- both_case = f2->type != FILE_NAME_DOS /*&& !sbi->options.nocase*/;
+ both_case = f2->type != FILE_NAME_DOS && !sbi->options->nocase;
if (!l1) {
const struct le_str *s2 = (struct le_str *)&f2->name_len;
@@ -323,7 +323,7 @@ static int indx_mark_used(struct ntfs_index *indx, struct ntfs_inode *ni,
if (err)
return err;
- __set_bit(bit - bbuf.bit, bbuf.buf);
+ __set_bit_le(bit - bbuf.bit, bbuf.buf);
bmp_buf_put(&bbuf, true);
@@ -343,7 +343,7 @@ static int indx_mark_free(struct ntfs_index *indx, struct ntfs_inode *ni,
if (err)
return err;
- __clear_bit(bit - bbuf.bit, bbuf.buf);
+ __clear_bit_le(bit - bbuf.bit, bbuf.buf);
bmp_buf_put(&bbuf, true);
@@ -457,7 +457,7 @@ next_run:
static bool scan_for_free(const ulong *buf, u32 bit, u32 bits, size_t *ret)
{
- size_t pos = find_next_zero_bit(buf, bits, bit);
+ size_t pos = find_next_zero_bit_le(buf, bits, bit);
if (pos >= bits)
return false;
@@ -489,7 +489,7 @@ static int indx_find_free(struct ntfs_index *indx, struct ntfs_inode *ni,
if (!b->non_res) {
u32 nbits = 8 * le32_to_cpu(b->res.data_size);
- size_t pos = find_next_zero_bit(resident_data(b), nbits, 0);
+ size_t pos = find_next_zero_bit_le(resident_data(b), nbits, 0);
if (pos < nbits)
*bit = pos;
@@ -505,7 +505,7 @@ static int indx_find_free(struct ntfs_index *indx, struct ntfs_inode *ni,
static bool scan_for_used(const ulong *buf, u32 bit, u32 bits, size_t *ret)
{
- size_t pos = find_next_bit(buf, bits, bit);
+ size_t pos = find_next_bit_le(buf, bits, bit);
if (pos >= bits)
return false;
@@ -536,7 +536,7 @@ int indx_used_bit(struct ntfs_index *indx, struct ntfs_inode *ni, size_t *bit)
if (!b->non_res) {
u32 nbits = le32_to_cpu(b->res.data_size) * 8;
- size_t pos = find_next_bit(resident_data(b), nbits, from);
+ size_t pos = find_next_bit_le(resident_data(b), nbits, from);
if (pos < nbits)
*bit = pos;
@@ -605,11 +605,58 @@ static const struct NTFS_DE *hdr_insert_head(struct INDEX_HDR *hdr,
return e;
}
+/*
+ * index_hdr_check
+ *
+ * return true if INDEX_HDR is valid
+ */
+static bool index_hdr_check(const struct INDEX_HDR *hdr, u32 bytes)
+{
+ u32 end = le32_to_cpu(hdr->used);
+ u32 tot = le32_to_cpu(hdr->total);
+ u32 off = le32_to_cpu(hdr->de_off);
+
+ if (!IS_ALIGNED(off, 8) || tot > bytes || end > tot ||
+ off + sizeof(struct NTFS_DE) > end) {
+ /* incorrect index buffer. */
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * index_buf_check
+ *
+ * return true if INDEX_BUFFER seems is valid
+ */
+static bool index_buf_check(const struct INDEX_BUFFER *ib, u32 bytes,
+ const CLST *vbn)
+{
+ const struct NTFS_RECORD_HEADER *rhdr = &ib->rhdr;
+ u16 fo = le16_to_cpu(rhdr->fix_off);
+ u16 fn = le16_to_cpu(rhdr->fix_num);
+
+ if (bytes <= offsetof(struct INDEX_BUFFER, ihdr) ||
+ rhdr->sign != NTFS_INDX_SIGNATURE ||
+ fo < sizeof(struct INDEX_BUFFER)
+ /* Check index buffer vbn. */
+ || (vbn && *vbn != le64_to_cpu(ib->vbn)) || (fo % sizeof(short)) ||
+ fo + fn * sizeof(short) >= bytes ||
+ fn != ((bytes >> SECTOR_SHIFT) + 1)) {
+ /* incorrect index buffer. */
+ return false;
+ }
+
+ return index_hdr_check(&ib->ihdr,
+ bytes - offsetof(struct INDEX_BUFFER, ihdr));
+}
+
void fnd_clear(struct ntfs_fnd *fnd)
{
int i;
- for (i = 0; i < fnd->level; i++) {
+ for (i = fnd->level - 1; i >= 0; i--) {
struct indx_node *n = fnd->nodes[i];
if (!n)
@@ -625,9 +672,8 @@ void fnd_clear(struct ntfs_fnd *fnd)
static int fnd_push(struct ntfs_fnd *fnd, struct indx_node *n,
struct NTFS_DE *e)
{
- int i;
+ int i = fnd->level;
- i = fnd->level;
if (i < 0 || i >= ARRAY_SIZE(fnd->nodes))
return -EINVAL;
fnd->nodes[i] = n;
@@ -820,9 +866,16 @@ int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi,
u32 t32;
const struct INDEX_ROOT *root = resident_data(attr);
+ t32 = le32_to_cpu(attr->res.data_size);
+ if (t32 <= offsetof(struct INDEX_ROOT, ihdr) ||
+ !index_hdr_check(&root->ihdr,
+ t32 - offsetof(struct INDEX_ROOT, ihdr))) {
+ goto out;
+ }
+
/* Check root fields. */
if (!root->index_block_clst)
- return -EINVAL;
+ goto out;
indx->type = type;
indx->idx2vbn_bits = __ffs(root->index_block_clst);
@@ -834,19 +887,19 @@ int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi,
if (t32 < sbi->cluster_size) {
/* Index record is smaller than a cluster, use 512 blocks. */
if (t32 != root->index_block_clst * SECTOR_SIZE)
- return -EINVAL;
+ goto out;
/* Check alignment to a cluster. */
if ((sbi->cluster_size >> SECTOR_SHIFT) &
(root->index_block_clst - 1)) {
- return -EINVAL;
+ goto out;
}
indx->vbn2vbo_bits = SECTOR_SHIFT;
} else {
/* Index record must be a multiple of cluster size. */
if (t32 != root->index_block_clst << sbi->cluster_bits)
- return -EINVAL;
+ goto out;
indx->vbn2vbo_bits = sbi->cluster_bits;
}
@@ -854,7 +907,14 @@ int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi,
init_rwsem(&indx->run_lock);
indx->cmp = get_cmp_func(root);
- return indx->cmp ? 0 : -EINVAL;
+ if (!indx->cmp)
+ goto out;
+
+ return 0;
+
+out:
+ ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
+ return -EINVAL;
}
static struct indx_node *indx_new(struct ntfs_index *indx,
@@ -1012,11 +1072,24 @@ int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn,
goto out;
ok:
+ if (!index_buf_check(ib, bytes, &vbn)) {
+ ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
+ ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
+ err = -EINVAL;
+ goto out;
+ }
+
if (err == -E_NTFS_FIXUP) {
ntfs_write_bh(ni->mi.sbi, &ib->rhdr, &in->nb, 0);
err = 0;
}
+ /* check for index header length */
+ if (offsetof(struct INDEX_BUFFER, ihdr) + ib->ihdr.used > bytes) {
+ err = -EINVAL;
+ goto out;
+ }
+
in->index = ib;
*node = in;
@@ -1341,8 +1414,8 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
run_init(&run);
- err = attr_allocate_clusters(sbi, &run, 0, 0, len, NULL, 0, &alen, 0,
- NULL);
+ err = attr_allocate_clusters(sbi, &run, 0, 0, len, NULL, ALLOCATE_DEF,
+ &alen, 0, NULL, NULL);
if (err)
goto out;
@@ -1440,6 +1513,9 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
goto out1;
}
+ if (in->name == I30_NAME)
+ ni->vfs_inode.i_size = data_size;
+
*vbn = bit << indx->idx2vbn_bits;
return 0;
@@ -1593,9 +1669,9 @@ static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni,
if (err) {
/* Restore root. */
- if (mi_resize_attr(mi, attr, -ds_root))
+ if (mi_resize_attr(mi, attr, -ds_root)) {
memcpy(attr, a_root, asize);
- else {
+ } else {
/* Bug? */
ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
}
@@ -1947,7 +2023,7 @@ static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni,
if (bit >= nbits)
return 0;
- pos = find_next_bit(bm, nbits, bit);
+ pos = find_next_bit_le(bm, nbits, bit);
if (pos < nbits)
return 0;
} else {
@@ -1973,6 +2049,9 @@ static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni,
if (err)
return err;
+ if (in->name == I30_NAME)
+ ni->vfs_inode.i_size = new_data;
+
bpb = bitmap_size(bit);
if (bpb * 8 == nbits)
return 0;
@@ -2115,9 +2194,10 @@ static int indx_get_entry_to_replace(struct ntfs_index *indx,
fnd->de[level] = e;
indx_write(indx, ni, n, 0);
- /* Check to see if this action created an empty leaf. */
- if (ib_is_leaf(ib) && ib_is_empty(ib))
+ if (ib_is_leaf(ib) && ib_is_empty(ib)) {
+ /* An empty leaf. */
return 0;
+ }
out:
fnd_clear(fnd);
@@ -2455,6 +2535,9 @@ int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
&indx->alloc_run, 0, NULL, false, NULL);
+ if (in->name == I30_NAME)
+ ni->vfs_inode.i_size = 0;
+
err = ni_remove_attr(ni, ATTR_ALLOC, in->name, in->name_len,
false, NULL);
run_close(&indx->alloc_run);
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index d5a3afbbbfd8..20b953871574 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -81,7 +81,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
le16_to_cpu(ref->seq), le16_to_cpu(rec->seq));
goto out;
} else if (!is_rec_inuse(rec)) {
- err = -EINVAL;
+ err = -ESTALE;
ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino);
goto out;
}
@@ -92,8 +92,10 @@ static struct inode *ntfs_read_mft(struct inode *inode,
goto out;
}
- if (!is_rec_base(rec))
- goto Ok;
+ if (!is_rec_base(rec)) {
+ err = -EINVAL;
+ goto out;
+ }
/* Record should contain $I30 root. */
is_dir = rec->flags & RECORD_FLAG_DIR;
@@ -129,6 +131,16 @@ next_attr:
rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
asize = le32_to_cpu(attr->size);
+ if (le16_to_cpu(attr->name_off) + attr->name_len > asize)
+ goto out;
+
+ if (attr->non_res) {
+ t64 = le64_to_cpu(attr->nres.alloc_size);
+ if (le64_to_cpu(attr->nres.data_size) > t64 ||
+ le64_to_cpu(attr->nres.valid_size) > t64)
+ goto out;
+ }
+
switch (attr->type) {
case ATTR_STD:
if (attr->non_res ||
@@ -364,7 +376,13 @@ next_attr:
attr_unpack_run:
roff = le16_to_cpu(attr->nres.run_off);
+ if (roff > asize) {
+ err = -EINVAL;
+ goto out;
+ }
+
t64 = le64_to_cpu(attr->nres.svcn);
+
err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
t64, Add2Ptr(attr, roff), asize - roff);
if (err < 0)
@@ -450,7 +468,6 @@ end_enum:
inode->i_flags |= S_NOSEC;
}
-Ok:
if (ino == MFT_REC_MFT && !sb->s_root)
sbi->mft.ni = NULL;
@@ -504,6 +521,9 @@ struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
_ntfs_bad_inode(inode);
}
+ if (IS_ERR(inode) && name)
+ ntfs_set_state(sb->s_fs_info, NTFS_DIRTY_ERROR);
+
return inode;
}
@@ -535,17 +555,6 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
clear_buffer_new(bh);
clear_buffer_uptodate(bh);
- /* Direct write uses 'create=0'. */
- if (!create && vbo >= ni->i_valid) {
- /* Out of valid. */
- return 0;
- }
-
- if (vbo >= inode->i_size) {
- /* Out of size. */
- return 0;
- }
-
if (is_resident(ni)) {
ni_lock(ni);
err = attr_data_read_resident(ni, page);
@@ -561,7 +570,8 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
off = vbo & sbi->cluster_mask;
new = false;
- err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL);
+ err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL,
+ create && sbi->cluster_size > PAGE_SIZE);
if (err)
goto out;
@@ -579,11 +589,8 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
WARN_ON(1);
}
- if (new) {
+ if (new)
set_buffer_new(bh);
- if ((len << cluster_bits) > block_size)
- ntfs_sparse_cluster(inode, page, vcn, len);
- }
lbo = ((u64)lcn << cluster_bits) + off;
@@ -611,7 +618,6 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
}
} else if (vbo >= valid) {
/* Read out of valid data. */
- /* Should never be here 'cause already checked. */
clear_buffer_mapped(bh);
} else if (vbo + bytes <= valid) {
/* Normal read. */
@@ -953,6 +959,11 @@ int ntfs_write_end(struct file *file, struct address_space *mapping,
dirty = true;
}
+ if (pos + err > inode->i_size) {
+ inode->i_size = pos + err;
+ dirty = true;
+ }
+
if (dirty)
mark_inode_dirty(inode);
}
@@ -1162,6 +1173,18 @@ out:
return ERR_PTR(err);
}
+/*
+ * ntfs_create_inode
+ *
+ * Helper function for:
+ * - ntfs_create
+ * - ntfs_mknod
+ * - ntfs_symlink
+ * - ntfs_mkdir
+ * - ntfs_atomic_open
+ *
+ * NOTE: if fnd != NULL (ntfs_atomic_open) then @dir is locked
+ */
struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
struct inode *dir, struct dentry *dentry,
const struct cpu_str *uni, umode_t mode,
@@ -1191,7 +1214,8 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
struct REPARSE_DATA_BUFFER *rp = NULL;
bool rp_inserted = false;
- ni_lock_dir(dir_ni);
+ if (!fnd)
+ ni_lock_dir(dir_ni);
dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
if (!dir_root) {
@@ -1254,6 +1278,10 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
fa = FILE_ATTRIBUTE_ARCHIVE;
}
+ /* If option "hide_dot_files" then set hidden attribute for dot files. */
+ if (sbi->options->hide_dot_files && name->name[0] == '.')
+ fa |= FILE_ATTRIBUTE_HIDDEN;
+
if (!(mode & 0222))
fa |= FILE_ATTRIBUTE_READONLY;
@@ -1339,6 +1367,13 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
mi_get_ref(&ni->mi, &new_de->ref);
fname = (struct ATTR_FILE_NAME *)(new_de + 1);
+
+ if (sbi->options->windows_names &&
+ !valid_windows_name(sbi, (struct le_str *)&fname->name_len)) {
+ err = -EINVAL;
+ goto out4;
+ }
+
mi_get_ref(&dir_ni->mi, &fname->home);
fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
fname->dup.a_time = std5->cr_time;
@@ -1502,8 +1537,8 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
cpu_to_le64(ntfs_up_cluster(sbi, nsize));
err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
- clst, NULL, 0, &alen, 0,
- NULL);
+ clst, NULL, ALLOCATE_DEF,
+ &alen, 0, NULL, NULL);
if (err)
goto out5;
@@ -1550,7 +1585,8 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
goto out6;
/* Unlock parent directory before ntfs_init_acl. */
- ni_unlock(dir_ni);
+ if (!fnd)
+ ni_unlock(dir_ni);
inode->i_generation = le16_to_cpu(rec->seq);
@@ -1610,7 +1646,8 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
out7:
/* Undo 'indx_insert_entry'. */
- ni_lock_dir(dir_ni);
+ if (!fnd)
+ ni_lock_dir(dir_ni);
indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
le16_to_cpu(new_de->key_size), sbi);
/* ni_unlock(dir_ni); will be called later. */
@@ -1619,10 +1656,8 @@ out6:
ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
out5:
- if (S_ISDIR(mode) || run_is_empty(&ni->file.run))
- goto out4;
-
- run_deallocate(sbi, &ni->file.run, false);
+ if (!S_ISDIR(mode))
+ run_deallocate(sbi, &ni->file.run, false);
out4:
clear_rec_inuse(rec);
@@ -1638,7 +1673,8 @@ out2:
out1:
if (err) {
- ni_unlock(dir_ni);
+ if (!fnd)
+ ni_unlock(dir_ni);
return ERR_PTR(err);
}
@@ -1746,7 +1782,103 @@ void ntfs_evict_inode(struct inode *inode)
ni_clear(ntfs_i(inode));
}
-static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
+/*
+ * ntfs_translate_junction
+ *
+ * Translate a Windows junction target to the Linux equivalent.
+ * On junctions, targets are always absolute (they include the drive
+ * letter). We have no way of knowing if the target is for the current
+ * mounted device or not so we just assume it is.
+ */
+static int ntfs_translate_junction(const struct super_block *sb,
+ const struct dentry *link_de, char *target,
+ int target_len, int target_max)
+{
+ int tl_len, err = target_len;
+ char *link_path_buffer = NULL, *link_path;
+ char *translated = NULL;
+ char *target_start;
+ int copy_len;
+
+ link_path_buffer = kmalloc(PATH_MAX, GFP_NOFS);
+ if (!link_path_buffer) {
+ err = -ENOMEM;
+ goto out;
+ }
+ /* Get link path, relative to mount point */
+ link_path = dentry_path_raw(link_de, link_path_buffer, PATH_MAX);
+ if (IS_ERR(link_path)) {
+ ntfs_err(sb, "Error getting link path");
+ err = -EINVAL;
+ goto out;
+ }
+
+ translated = kmalloc(PATH_MAX, GFP_NOFS);
+ if (!translated) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Make translated path a relative path to mount point */
+ strcpy(translated, "./");
+ ++link_path; /* Skip leading / */
+ for (tl_len = sizeof("./") - 1; *link_path; ++link_path) {
+ if (*link_path == '/') {
+ if (PATH_MAX - tl_len < sizeof("../")) {
+ ntfs_err(sb,
+ "Link path %s has too many components",
+ link_path);
+ err = -EINVAL;
+ goto out;
+ }
+ strcpy(translated + tl_len, "../");
+ tl_len += sizeof("../") - 1;
+ }
+ }
+
+ /* Skip drive letter */
+ target_start = target;
+ while (*target_start && *target_start != ':')
+ ++target_start;
+
+ if (!*target_start) {
+ ntfs_err(sb, "Link target (%s) missing drive separator",
+ target);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Skip drive separator and leading /, if exists */
+ target_start += 1 + (target_start[1] == '/');
+ copy_len = target_len - (target_start - target);
+
+ if (PATH_MAX - tl_len <= copy_len) {
+ ntfs_err(sb, "Link target %s too large for buffer (%d <= %d)",
+ target_start, PATH_MAX - tl_len, copy_len);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* translated path has a trailing / and target_start does not */
+ strcpy(translated + tl_len, target_start);
+ tl_len += copy_len;
+ if (target_max <= tl_len) {
+ ntfs_err(sb, "Target path %s too large for buffer (%d <= %d)",
+ translated, target_max, tl_len);
+ err = -EINVAL;
+ goto out;
+ }
+ strcpy(target, translated);
+ err = tl_len;
+
+out:
+ kfree(link_path_buffer);
+ kfree(translated);
+ return err;
+}
+
+static noinline int ntfs_readlink_hlp(const struct dentry *link_de,
+ struct inode *inode, char *buffer,
int buflen)
{
int i, err = -EINVAL;
@@ -1889,6 +2021,11 @@ static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
/* Always set last zero. */
buffer[err] = 0;
+
+ /* If this is a junction, translate the link target. */
+ if (rp->ReparseTag == IO_REPARSE_TAG_MOUNT_POINT)
+ err = ntfs_translate_junction(sb, link_de, buffer, err, buflen);
+
out:
kfree(to_free);
return err;
@@ -1907,7 +2044,7 @@ static const char *ntfs_get_link(struct dentry *de, struct inode *inode,
if (!ret)
return ERR_PTR(-ENOMEM);
- err = ntfs_readlink_hlp(inode, ret, PAGE_SIZE);
+ err = ntfs_readlink_hlp(de, inode, ret, PAGE_SIZE);
if (err < 0) {
kfree(ret);
return ERR_PTR(err);
diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
index 053cc0e0f8b5..c8db35e2ae17 100644
--- a/fs/ntfs3/namei.c
+++ b/fs/ntfs3/namei.c
@@ -7,6 +7,8 @@
#include <linux/fs.h>
#include <linux/nls.h>
+#include <linux/ctype.h>
+#include <linux/posix_acl.h>
#include "debug.h"
#include "ntfs.h"
@@ -303,6 +305,8 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *dir,
ni_lock_dir(dir_ni);
ni_lock(ni);
+ if (dir_ni != new_dir_ni)
+ ni_lock_dir2(new_dir_ni);
is_bad = false;
err = ni_rename(dir_ni, new_dir_ni, ni, de, new_de, &is_bad);
@@ -326,6 +330,8 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *dir,
ntfs_sync_inode(inode);
}
+ if (dir_ni != new_dir_ni)
+ ni_unlock(new_dir_ni);
ni_unlock(ni);
ni_unlock(dir_ni);
out:
@@ -333,6 +339,104 @@ out:
return err;
}
+/*
+ * ntfs_atomic_open
+ *
+ * inode_operations::atomic_open
+ */
+static int ntfs_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct file *file, u32 flags, umode_t mode)
+{
+ int err;
+ struct inode *inode;
+ struct ntfs_fnd *fnd = NULL;
+ struct ntfs_inode *ni = ntfs_i(dir);
+ struct dentry *d = NULL;
+ struct cpu_str *uni = __getname();
+ bool locked = false;
+
+ if (!uni)
+ return -ENOMEM;
+
+ err = ntfs_nls_to_utf16(ni->mi.sbi, dentry->d_name.name,
+ dentry->d_name.len, uni, NTFS_NAME_LEN,
+ UTF16_HOST_ENDIAN);
+ if (err < 0)
+ goto out;
+
+#ifdef CONFIG_NTFS3_FS_POSIX_ACL
+ if (IS_POSIXACL(dir)) {
+ /*
+ * Load in cache current acl to avoid ni_lock(dir):
+ * ntfs_create_inode -> ntfs_init_acl -> posix_acl_create ->
+ * ntfs_get_acl -> ntfs_get_acl_ex -> ni_lock
+ */
+ struct posix_acl *p = get_inode_acl(dir, ACL_TYPE_DEFAULT);
+
+ if (IS_ERR(p)) {
+ err = PTR_ERR(p);
+ goto out;
+ }
+ posix_acl_release(p);
+ }
+#endif
+
+ if (d_in_lookup(dentry)) {
+ ni_lock_dir(ni);
+ locked = true;
+ fnd = fnd_get();
+ if (!fnd) {
+ err = -ENOMEM;
+ goto out1;
+ }
+
+ d = d_splice_alias(dir_search_u(dir, uni, fnd), dentry);
+ if (IS_ERR(d)) {
+ err = PTR_ERR(d);
+ d = NULL;
+ goto out2;
+ }
+
+ if (d)
+ dentry = d;
+ }
+
+ if (!(flags & O_CREAT) || d_really_is_positive(dentry)) {
+ err = finish_no_open(file, d);
+ goto out2;
+ }
+
+ file->f_mode |= FMODE_CREATED;
+
+ /*
+ * fnd contains tree's path to insert to.
+ * If fnd is not NULL then dir is locked.
+ */
+
+ /*
+ * Unfortunately I don't know how to get here correct 'struct nameidata *nd'
+ * or 'struct user_namespace *mnt_userns'.
+ * See atomic_open in fs/namei.c.
+ * This is why xfstest/633 failed.
+ * Looks like ntfs_atomic_open must accept 'struct user_namespace *mnt_userns' as argument.
+ */
+
+ inode = ntfs_create_inode(&init_user_ns, dir, dentry, uni, mode, 0,
+ NULL, 0, fnd);
+ err = IS_ERR(inode) ? PTR_ERR(inode)
+ : finish_open(file, dentry, ntfs_file_open);
+ dput(d);
+
+out2:
+ fnd_put(fnd);
+out1:
+ if (locked)
+ ni_unlock(ni);
+out:
+ __putname(uni);
+ return err;
+}
+
struct dentry *ntfs3_get_parent(struct dentry *child)
{
struct inode *inode = d_inode(child);
@@ -355,6 +459,133 @@ struct dentry *ntfs3_get_parent(struct dentry *child)
return ERR_PTR(-ENOENT);
}
+/*
+ * dentry_operations::d_hash
+ */
+static int ntfs_d_hash(const struct dentry *dentry, struct qstr *name)
+{
+ struct ntfs_sb_info *sbi;
+ const char *n = name->name;
+ unsigned int len = name->len;
+ unsigned long hash;
+ struct cpu_str *uni;
+ unsigned int c;
+ int err;
+
+ /* First try fast implementation. */
+ hash = init_name_hash(dentry);
+
+ for (;;) {
+ if (!len--) {
+ name->hash = end_name_hash(hash);
+ return 0;
+ }
+
+ c = *n++;
+ if (c >= 0x80)
+ break;
+
+ hash = partial_name_hash(toupper(c), hash);
+ }
+
+ /*
+ * Try slow way with current upcase table
+ */
+ uni = __getname();
+ if (!uni)
+ return -ENOMEM;
+
+ sbi = dentry->d_sb->s_fs_info;
+
+ err = ntfs_nls_to_utf16(sbi, name->name, name->len, uni, NTFS_NAME_LEN,
+ UTF16_HOST_ENDIAN);
+ if (err < 0)
+ goto out;
+
+ if (!err) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ hash = ntfs_names_hash(uni->name, uni->len, sbi->upcase,
+ init_name_hash(dentry));
+ name->hash = end_name_hash(hash);
+ err = 0;
+
+out:
+ __putname(uni);
+ return err;
+}
+
+/*
+ * dentry_operations::d_compare
+ */
+static int ntfs_d_compare(const struct dentry *dentry, unsigned int len1,
+ const char *str, const struct qstr *name)
+{
+ struct ntfs_sb_info *sbi;
+ int ret;
+ const char *n1 = str;
+ const char *n2 = name->name;
+ unsigned int len2 = name->len;
+ unsigned int lm = min(len1, len2);
+ unsigned char c1, c2;
+ struct cpu_str *uni1;
+ struct le_str *uni2;
+
+ /* First try fast implementation. */
+ for (;;) {
+ if (!lm--)
+ return len1 != len2;
+
+ if ((c1 = *n1++) == (c2 = *n2++))
+ continue;
+
+ if (c1 >= 0x80 || c2 >= 0x80)
+ break;
+
+ if (toupper(c1) != toupper(c2))
+ return 1;
+ }
+
+ /*
+ * Try slow way with current upcase table
+ */
+ sbi = dentry->d_sb->s_fs_info;
+ uni1 = __getname();
+ if (!uni1)
+ return -ENOMEM;
+
+ ret = ntfs_nls_to_utf16(sbi, str, len1, uni1, NTFS_NAME_LEN,
+ UTF16_HOST_ENDIAN);
+ if (ret < 0)
+ goto out;
+
+ if (!ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ uni2 = Add2Ptr(uni1, 2048);
+
+ ret = ntfs_nls_to_utf16(sbi, name->name, name->len,
+ (struct cpu_str *)uni2, NTFS_NAME_LEN,
+ UTF16_LITTLE_ENDIAN);
+ if (ret < 0)
+ goto out;
+
+ if (!ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = !ntfs_cmp_names_cpu(uni1, uni2, sbi->upcase, false) ? 0 : 1;
+
+out:
+ __putname(uni1);
+ return ret;
+}
+
// clang-format off
const struct inode_operations ntfs_dir_inode_operations = {
.lookup = ntfs_lookup,
@@ -372,6 +603,7 @@ const struct inode_operations ntfs_dir_inode_operations = {
.setattr = ntfs3_setattr,
.getattr = ntfs_getattr,
.listxattr = ntfs_listxattr,
+ .atomic_open = ntfs_atomic_open,
.fiemap = ntfs_fiemap,
};
@@ -382,4 +614,10 @@ const struct inode_operations ntfs_special_inode_operations = {
.get_inode_acl = ntfs_get_acl,
.set_acl = ntfs_set_acl,
};
+
+const struct dentry_operations ntfs_dentry_ops = {
+ .d_hash = ntfs_d_hash,
+ .d_compare = ntfs_d_compare,
+};
+
// clang-format on
diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
index 9cc396b117bf..86ea1826d099 100644
--- a/fs/ntfs3/ntfs.h
+++ b/fs/ntfs3/ntfs.h
@@ -84,7 +84,6 @@ typedef u32 CLST;
#define COMPRESSION_UNIT 4
#define COMPRESS_MAX_CLUSTER 0x1000
-#define MFT_INCREASE_CHUNK 1024
enum RECORD_NUM {
MFT_REC_MFT = 0,
@@ -715,12 +714,13 @@ static inline struct NTFS_DE *hdr_first_de(const struct INDEX_HDR *hdr)
{
u32 de_off = le32_to_cpu(hdr->de_off);
u32 used = le32_to_cpu(hdr->used);
- struct NTFS_DE *e = Add2Ptr(hdr, de_off);
+ struct NTFS_DE *e;
u16 esize;
- if (de_off >= used || de_off >= le32_to_cpu(hdr->total))
+ if (de_off >= used || de_off + sizeof(struct NTFS_DE) > used )
return NULL;
+ e = Add2Ptr(hdr, de_off);
esize = le16_to_cpu(e->size);
if (esize < sizeof(struct NTFS_DE) || de_off + esize > used)
return NULL;
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index a4d292809a33..0e051c5595a2 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -97,9 +97,12 @@ struct ntfs_mount_options {
unsigned sparse : 1; /* Create sparse files. */
unsigned showmeta : 1; /* Show meta files. */
unsigned nohidden : 1; /* Do not show hidden files. */
+ unsigned hide_dot_files : 1; /* Set hidden flag on dot files. */
+ unsigned windows_names : 1; /* Disallow names forbidden by Windows. */
unsigned force : 1; /* RW mount dirty volume. */
unsigned noacsrules : 1; /* Exclude acs rules. */
unsigned prealloc : 1; /* Preallocate space when file is growing. */
+ unsigned nocase : 1; /* case insensitive. */
};
/* Special value to unpack and deallocate. */
@@ -124,6 +127,7 @@ struct ntfs_buffers {
enum ALLOCATE_OPT {
ALLOCATE_DEF = 0, // Allocate all clusters.
ALLOCATE_MFT = 1, // Allocate for MFT.
+ ALLOCATE_ZERO = 2, // Zeroout new allocated clusters
};
enum bitmap_mutex_classes {
@@ -195,6 +199,8 @@ struct ntfs_index {
/* Minimum MFT zone. */
#define NTFS_MIN_MFT_ZONE 100
+/* Step to increase the MFT. */
+#define NTFS_MFT_INCREASE_STEP 1024
/* Ntfs file system in-core superblock data. */
struct ntfs_sb_info {
@@ -330,6 +336,7 @@ enum ntfs_inode_mutex_lock_class {
NTFS_INODE_MUTEX_REPARSE,
NTFS_INODE_MUTEX_NORMAL,
NTFS_INODE_MUTEX_PARENT,
+ NTFS_INODE_MUTEX_PARENT2,
};
/*
@@ -412,7 +419,7 @@ enum REPARSE_SIGN {
int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
- CLST *new_lcn);
+ CLST *new_lcn, CLST *new_len);
int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
u64 new_size, struct runs_tree *run,
@@ -422,7 +429,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
u64 new_size, const u64 *new_valid, bool keep_prealloc,
struct ATTRIB **ret);
int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
- CLST *len, bool *new);
+ CLST *len, bool *new, bool zero);
int attr_data_read_resident(struct ntfs_inode *ni, struct page *page);
int attr_data_write_resident(struct ntfs_inode *ni, struct page *page);
int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
@@ -469,9 +476,9 @@ static inline size_t al_aligned(size_t size)
}
/* Globals from bitfunc.c */
-bool are_bits_clear(const ulong *map, size_t bit, size_t nbits);
-bool are_bits_set(const ulong *map, size_t bit, size_t nbits);
-size_t get_set_bits_ex(const ulong *map, size_t bit, size_t nbits);
+bool are_bits_clear(const void *map, size_t bit, size_t nbits);
+bool are_bits_set(const void *map, size_t bit, size_t nbits);
+size_t get_set_bits_ex(const void *map, size_t bit, size_t nbits);
/* Globals from dir.c */
int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const __le16 *name, u32 len,
@@ -487,8 +494,6 @@ extern const struct file_operations ntfs_dir_operations;
/* Globals from file.c */
int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
struct kstat *stat, u32 request_mask, u32 flags);
-void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
- CLST len);
int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
struct iattr *attr);
int ntfs_file_open(struct inode *inode, struct file *file);
@@ -582,11 +587,10 @@ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
bool simple);
int ntfs_extend_init(struct ntfs_sb_info *sbi);
int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi);
-const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
- enum ATTR_TYPE Type);
int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
CLST *new_lcn, CLST *new_len,
enum ALLOCATE_OPT opt);
+bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen);
int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
struct ntfs_inode *ni, struct mft_inode **mi);
void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft);
@@ -643,6 +647,7 @@ int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
const struct MFT_REF *ref);
void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim);
int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim);
+bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *name);
/* Globals from index.c */
int indx_used_bit(struct ntfs_index *indx, struct ntfs_inode *ni, size_t *bit);
@@ -720,6 +725,7 @@ struct dentry *ntfs3_get_parent(struct dentry *child);
extern const struct inode_operations ntfs_dir_inode_operations;
extern const struct inode_operations ntfs_special_inode_operations;
+extern const struct dentry_operations ntfs_dentry_ops;
/* Globals from record.c */
int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi);
@@ -793,12 +799,12 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
u32 run_buf_size, CLST *packed_vcns);
int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
- u32 run_buf_size);
+ int run_buf_size);
#ifdef NTFS3_CHECK_FREE_CLST
int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
- u32 run_buf_size);
+ int run_buf_size);
#else
#define run_unpack_ex run_unpack
#endif
@@ -822,6 +828,8 @@ static inline size_t wnd_zeroes(const struct wnd_bitmap *wnd)
int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits);
int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits);
int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits);
+int wnd_set_used_safe(struct wnd_bitmap *wnd, size_t bit, size_t bits,
+ size_t *done);
bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits);
bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits);
@@ -834,11 +842,17 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits);
void wnd_zone_set(struct wnd_bitmap *wnd, size_t Lcn, size_t Len);
int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range);
+void ntfs_bitmap_set_le(void *map, unsigned int start, int len);
+void ntfs_bitmap_clear_le(void *map, unsigned int start, int len);
+unsigned int ntfs_bitmap_weight_le(const void *bitmap, int bits);
+
/* Globals from upcase.c */
int ntfs_cmp_names(const __le16 *s1, size_t l1, const __le16 *s2, size_t l2,
const u16 *upcase, bool bothcase);
int ntfs_cmp_names_cpu(const struct cpu_str *uni1, const struct le_str *uni2,
const u16 *upcase, bool bothcase);
+unsigned long ntfs_names_hash(const u16 *name, size_t len, const u16 *upcase,
+ unsigned long hash);
/* globals from xattr.c */
#ifdef CONFIG_NTFS3_FS_POSIX_ACL
@@ -1113,6 +1127,11 @@ static inline void ni_lock_dir(struct ntfs_inode *ni)
mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_PARENT);
}
+static inline void ni_lock_dir2(struct ntfs_inode *ni)
+{
+ mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_PARENT2);
+}
+
static inline void ni_unlock(struct ntfs_inode *ni)
{
mutex_unlock(&ni->ni_lock);
diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
index 7d2fac5ee215..defce6a5c8e1 100644
--- a/fs/ntfs3/record.c
+++ b/fs/ntfs3/record.c
@@ -220,6 +220,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
return NULL;
}
+ if (off + asize < off) {
+ /* overflow check */
+ return NULL;
+ }
+
attr = Add2Ptr(attr, asize);
off += asize;
}
@@ -260,6 +265,10 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
if (t16 + t32 > asize)
return NULL;
+ t32 = sizeof(short) * attr->name_len;
+ if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
+ return NULL;
+
return attr;
}
@@ -537,6 +546,10 @@ bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
return true;
}
+/*
+ * Pack runs in MFT record.
+ * If failed record is not changed.
+ */
int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
struct runs_tree *run, CLST len)
{
diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
index aaaa0d3d35a2..a5af71cd8d14 100644
--- a/fs/ntfs3/run.c
+++ b/fs/ntfs3/run.c
@@ -919,12 +919,15 @@ out:
*/
int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
- u32 run_buf_size)
+ int run_buf_size)
{
u64 prev_lcn, vcn64, lcn, next_vcn;
const u8 *run_last, *run_0;
bool is_mft = ino == MFT_REC_MFT;
+ if (run_buf_size < 0)
+ return -EINVAL;
+
/* Check for empty. */
if (evcn + 1 == svcn)
return 0;
@@ -1046,7 +1049,7 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
*/
int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
- u32 run_buf_size)
+ int run_buf_size)
{
int ret, err;
CLST next_vcn, lcn, len;
@@ -1093,25 +1096,8 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
if (down_write_trylock(&wnd->rw_lock)) {
/* Mark all zero bits as used in range [lcn, lcn+len). */
- CLST i, lcn_f = 0, len_f = 0;
-
- err = 0;
- for (i = 0; i < len; i++) {
- if (wnd_is_free(wnd, lcn + i, 1)) {
- if (!len_f)
- lcn_f = lcn + i;
- len_f += 1;
- } else if (len_f) {
- err = wnd_set_used(wnd, lcn_f, len_f);
- len_f = 0;
- if (err)
- break;
- }
- }
-
- if (len_f)
- err = wnd_set_used(wnd, lcn_f, len_f);
-
+ size_t done;
+ err = wnd_set_used_safe(wnd, lcn, len, &done);
up_write(&wnd->rw_lock);
if (err)
return err;
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 47012c9bf505..ef4ea3f21905 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -21,6 +21,30 @@
* https://docs.microsoft.com/en-us/windows/wsl/file-permissions
* It stores uid/gid/mode/dev in xattr
*
+ * ntfs allows up to 2^64 clusters per volume.
+ * It means you should use 64 bits lcn to operate with ntfs.
+ * Implementation of ntfs.sys uses only 32 bits lcn.
+ * Default ntfs3 uses 32 bits lcn too.
+ * ntfs3 built with CONFIG_NTFS3_64BIT_CLUSTER (ntfs3_64) uses 64 bits per lcn.
+ *
+ *
+ * ntfs limits, cluster size is 4K (2^12)
+ * -----------------------------------------------------------------------------
+ * | Volume size | Clusters | ntfs.sys | ntfs3 | ntfs3_64 | mkntfs | chkdsk |
+ * -----------------------------------------------------------------------------
+ * | < 16T, 2^44 | < 2^32 | yes | yes | yes | yes | yes |
+ * | > 16T, 2^44 | > 2^32 | no | no | yes | yes | yes |
+ * ----------------------------------------------------------|------------------
+ *
+ * To mount large volumes as ntfs one should use large cluster size (up to 2M)
+ * The maximum volume size in this case is 2^32 * 2^21 = 2^53 = 8P
+ *
+ * ntfs limits, cluster size is 2M (2^31)
+ * -----------------------------------------------------------------------------
+ * | < 8P, 2^54 | < 2^32 | yes | yes | yes | yes | yes |
+ * | > 8P, 2^54 | > 2^32 | no | no | yes | yes | yes |
+ * ----------------------------------------------------------|------------------
+ *
*/
#include <linux/blkdev.h>
@@ -223,11 +247,14 @@ enum Opt {
Opt_force,
Opt_sparse,
Opt_nohidden,
+ Opt_hide_dot_files,
+ Opt_windows_names,
Opt_showmeta,
Opt_acl,
Opt_iocharset,
Opt_prealloc,
Opt_noacsrules,
+ Opt_nocase,
Opt_err,
};
@@ -242,10 +269,13 @@ static const struct fs_parameter_spec ntfs_fs_parameters[] = {
fsparam_flag_no("force", Opt_force),
fsparam_flag_no("sparse", Opt_sparse),
fsparam_flag_no("hidden", Opt_nohidden),
+ fsparam_flag_no("hide_dot_files", Opt_hide_dot_files),
+ fsparam_flag_no("windows_names", Opt_windows_names),
fsparam_flag_no("acl", Opt_acl),
fsparam_flag_no("showmeta", Opt_showmeta),
fsparam_flag_no("prealloc", Opt_prealloc),
fsparam_flag_no("acsrules", Opt_noacsrules),
+ fsparam_flag_no("nocase", Opt_nocase),
fsparam_string("iocharset", Opt_iocharset),
{}
};
@@ -330,6 +360,12 @@ static int ntfs_fs_parse_param(struct fs_context *fc,
case Opt_nohidden:
opts->nohidden = result.negated ? 1 : 0;
break;
+ case Opt_hide_dot_files:
+ opts->hide_dot_files = result.negated ? 0 : 1;
+ break;
+ case Opt_windows_names:
+ opts->windows_names = result.negated ? 0 : 1;
+ break;
case Opt_acl:
if (!result.negated)
#ifdef CONFIG_NTFS3_FS_POSIX_ACL
@@ -354,6 +390,9 @@ static int ntfs_fs_parse_param(struct fs_context *fc,
case Opt_noacsrules:
opts->noacsrules = result.negated ? 1 : 0;
break;
+ case Opt_nocase:
+ opts->nocase = result.negated ? 1 : 0;
+ break;
default:
/* Should not be here unless we forget add case. */
return -EINVAL;
@@ -406,27 +445,18 @@ static struct inode *ntfs_alloc_inode(struct super_block *sb)
return NULL;
memset(ni, 0, offsetof(struct ntfs_inode, vfs_inode));
-
mutex_init(&ni->ni_lock);
-
return &ni->vfs_inode;
}
-static void ntfs_i_callback(struct rcu_head *head)
+static void ntfs_free_inode(struct inode *inode)
{
- struct inode *inode = container_of(head, struct inode, i_rcu);
struct ntfs_inode *ni = ntfs_i(inode);
mutex_destroy(&ni->ni_lock);
-
kmem_cache_free(ntfs_inode_cachep, ni);
}
-static void ntfs_destroy_inode(struct inode *inode)
-{
- call_rcu(&inode->i_rcu, ntfs_i_callback);
-}
-
static void init_once(void *foo)
{
struct ntfs_inode *ni = foo;
@@ -519,9 +549,9 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root)
seq_printf(m, ",gid=%u",
from_kgid_munged(user_ns, opts->fs_gid));
if (opts->fmask)
- seq_printf(m, ",fmask=%04o", ~opts->fs_fmask_inv);
+ seq_printf(m, ",fmask=%04o", opts->fs_fmask_inv ^ 0xffff);
if (opts->dmask)
- seq_printf(m, ",dmask=%04o", ~opts->fs_dmask_inv);
+ seq_printf(m, ",dmask=%04o", opts->fs_dmask_inv ^ 0xffff);
if (opts->nls)
seq_printf(m, ",iocharset=%s", opts->nls->charset);
else
@@ -536,6 +566,10 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",showmeta");
if (opts->nohidden)
seq_puts(m, ",nohidden");
+ if (opts->windows_names)
+ seq_puts(m, ",windows_names");
+ if (opts->hide_dot_files)
+ seq_puts(m, ",hide_dot_files");
if (opts->force)
seq_puts(m, ",force");
if (opts->noacsrules)
@@ -592,7 +626,7 @@ static int ntfs_sync_fs(struct super_block *sb, int wait)
static const struct super_operations ntfs_sops = {
.alloc_inode = ntfs_alloc_inode,
- .destroy_inode = ntfs_destroy_inode,
+ .free_inode = ntfs_free_inode,
.evict_inode = ntfs_evict_inode,
.put_super = ntfs_put_super,
.statfs = ntfs_statfs,
@@ -672,7 +706,7 @@ static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
if (boot->sectors_per_clusters <= 0x80)
return boot->sectors_per_clusters;
if (boot->sectors_per_clusters >= 0xf4) /* limit shift to 2MB max */
- return 1U << (0 - boot->sectors_per_clusters);
+ return 1U << -(s8)boot->sectors_per_clusters;
return -EINVAL;
}
@@ -789,7 +823,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
: (u32)boot->record_size
<< sbi->cluster_bits;
- if (record_size > MAXIMUM_BYTES_PER_MFT)
+ if (record_size > MAXIMUM_BYTES_PER_MFT || record_size < SECTOR_SIZE)
goto out;
sbi->record_bits = blksize_bits(record_size);
@@ -896,7 +930,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
struct block_device *bdev = sb->s_bdev;
struct inode *inode;
struct ntfs_inode *ni;
- size_t i, tt;
+ size_t i, tt, bad_len, bad_frags;
CLST vcn, lcn, len;
struct ATTRIB *attr;
const struct VOLUME_INFO *info;
@@ -916,6 +950,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_export_op = &ntfs_export_ops;
sb->s_time_gran = NTFS_TIME_GRAN; // 100 nsec
sb->s_xattr = ntfs_xattr_handlers;
+ sb->s_d_op = sbi->options->nocase ? &ntfs_dentry_ops : NULL;
sbi->options->nls = ntfs_load_nls(sbi->options->nls_name);
if (IS_ERR(sbi->options->nls)) {
@@ -1065,30 +1100,6 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
sbi->mft.ni = ni;
- /* Load $BadClus. */
- ref.low = cpu_to_le32(MFT_REC_BADCLUST);
- ref.seq = cpu_to_le16(MFT_REC_BADCLUST);
- inode = ntfs_iget5(sb, &ref, &NAME_BADCLUS);
- if (IS_ERR(inode)) {
- ntfs_err(sb, "Failed to load $BadClus.");
- err = PTR_ERR(inode);
- goto out;
- }
-
- ni = ntfs_i(inode);
-
- for (i = 0; run_get_entry(&ni->file.run, i, &vcn, &lcn, &len); i++) {
- if (lcn == SPARSE_LCN)
- continue;
-
- if (!sbi->bad_clusters)
- ntfs_notice(sb, "Volume contains bad blocks");
-
- sbi->bad_clusters += len;
- }
-
- iput(inode);
-
/* Load $Bitmap. */
ref.low = cpu_to_le32(MFT_REC_BITMAP);
ref.seq = cpu_to_le16(MFT_REC_BITMAP);
@@ -1126,6 +1137,44 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
goto out;
+ /* Load $BadClus. */
+ ref.low = cpu_to_le32(MFT_REC_BADCLUST);
+ ref.seq = cpu_to_le16(MFT_REC_BADCLUST);
+ inode = ntfs_iget5(sb, &ref, &NAME_BADCLUS);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ ntfs_err(sb, "Failed to load $BadClus (%d).", err);
+ goto out;
+ }
+
+ ni = ntfs_i(inode);
+ bad_len = bad_frags = 0;
+ for (i = 0; run_get_entry(&ni->file.run, i, &vcn, &lcn, &len); i++) {
+ if (lcn == SPARSE_LCN)
+ continue;
+
+ bad_len += len;
+ bad_frags += 1;
+ if (sb_rdonly(sb))
+ continue;
+
+ if (wnd_set_used_safe(&sbi->used.bitmap, lcn, len, &tt) || tt) {
+ /* Bad blocks marked as free in bitmap. */
+ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ }
+ }
+ if (bad_len) {
+ /*
+ * Notice about bad blocks.
+ * In normal cases these blocks are marked as used in bitmap.
+ * And we never allocate space in it.
+ */
+ ntfs_notice(sb,
+ "Volume contains %zu bad blocks in %zu fragments.",
+ bad_len, bad_frags);
+ }
+ iput(inode);
+
/* Load $AttrDef. */
ref.low = cpu_to_le32(MFT_REC_ATTR);
ref.seq = cpu_to_le16(MFT_REC_ATTR);
@@ -1141,7 +1190,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
goto put_inode_out;
}
bytes = inode->i_size;
- sbi->def_table = t = kmalloc(bytes, GFP_NOFS);
+ sbi->def_table = t = kmalloc(bytes, GFP_NOFS | __GFP_NOWARN);
if (!t) {
err = -ENOMEM;
goto put_inode_out;
@@ -1260,9 +1309,9 @@ load_root:
ref.low = cpu_to_le32(MFT_REC_ROOT);
ref.seq = cpu_to_le16(MFT_REC_ROOT);
inode = ntfs_iget5(sb, &ref, &NAME_ROOT);
- if (IS_ERR(inode)) {
+ if (IS_ERR(inode) || !inode->i_op) {
ntfs_err(sb, "Failed to load root.");
- err = PTR_ERR(inode);
+ err = IS_ERR(inode) ? PTR_ERR(inode) : -EINVAL;
goto out;
}
@@ -1281,6 +1330,7 @@ out:
* Free resources here.
* ntfs_fs_free will be called with fc->s_fs_info = NULL
*/
+ put_mount_options(sbi->options);
put_ntfs(sbi);
sb->s_fs_info = NULL;
@@ -1488,11 +1538,8 @@ out1:
static void __exit exit_ntfs_fs(void)
{
- if (ntfs_inode_cachep) {
- rcu_barrier();
- kmem_cache_destroy(ntfs_inode_cachep);
- }
-
+ rcu_barrier();
+ kmem_cache_destroy(ntfs_inode_cachep);
unregister_filesystem(&ntfs_fs_type);
ntfs3_exit_bitmap();
}
diff --git a/fs/ntfs3/upcase.c b/fs/ntfs3/upcase.c
index b5e8256fd710..7681eefacb4b 100644
--- a/fs/ntfs3/upcase.c
+++ b/fs/ntfs3/upcase.c
@@ -102,3 +102,15 @@ case_insentive:
diff2 = l1 - l2;
return diff2 ? diff2 : diff1;
}
+
+/* Helper function for ntfs_d_hash. */
+unsigned long ntfs_names_hash(const u16 *name, size_t len, const u16 *upcase,
+ unsigned long hash)
+{
+ while (len--) {
+ unsigned int c = upcase_unicode_char(upcase, *name++);
+ hash = partial_name_hash(c, hash);
+ }
+
+ return hash;
+}
diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
index aafe98ee0b21..616df209feea 100644
--- a/fs/ntfs3/xattr.c
+++ b/fs/ntfs3/xattr.c
@@ -15,9 +15,10 @@
#include "ntfs_fs.h"
// clang-format off
-#define SYSTEM_DOS_ATTRIB "system.dos_attrib"
-#define SYSTEM_NTFS_ATTRIB "system.ntfs_attrib"
-#define SYSTEM_NTFS_SECURITY "system.ntfs_security"
+#define SYSTEM_DOS_ATTRIB "system.dos_attrib"
+#define SYSTEM_NTFS_ATTRIB "system.ntfs_attrib"
+#define SYSTEM_NTFS_ATTRIB_BE "system.ntfs_attrib_be"
+#define SYSTEM_NTFS_SECURITY "system.ntfs_security"
// clang-format on
static inline size_t unpacked_ea_size(const struct EA_FULL *ea)
@@ -42,28 +43,26 @@ static inline size_t packed_ea_size(const struct EA_FULL *ea)
* Assume there is at least one xattr in the list.
*/
static inline bool find_ea(const struct EA_FULL *ea_all, u32 bytes,
- const char *name, u8 name_len, u32 *off)
+ const char *name, u8 name_len, u32 *off, u32 *ea_sz)
{
- *off = 0;
+ u32 ea_size;
- if (!ea_all || !bytes)
+ *off = 0;
+ if (!ea_all)
return false;
- for (;;) {
+ for (; *off < bytes; *off += ea_size) {
const struct EA_FULL *ea = Add2Ptr(ea_all, *off);
- u32 next_off = *off + unpacked_ea_size(ea);
-
- if (next_off > bytes)
- return false;
-
+ ea_size = unpacked_ea_size(ea);
if (ea->name_len == name_len &&
- !memcmp(ea->name, name, name_len))
+ !memcmp(ea->name, name, name_len)) {
+ if (ea_sz)
+ *ea_sz = ea_size;
return true;
-
- *off = next_off;
- if (next_off >= bytes)
- return false;
+ }
}
+
+ return false;
}
/*
@@ -74,12 +73,12 @@ static inline bool find_ea(const struct EA_FULL *ea_all, u32 bytes,
static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
size_t add_bytes, const struct EA_INFO **info)
{
- int err;
+ int err = -EINVAL;
struct ntfs_sb_info *sbi = ni->mi.sbi;
struct ATTR_LIST_ENTRY *le = NULL;
struct ATTRIB *attr_info, *attr_ea;
void *ea_p;
- u32 size;
+ u32 size, off, ea_size;
static_assert(le32_to_cpu(ATTR_EA_INFO) < le32_to_cpu(ATTR_EA));
@@ -96,24 +95,31 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
*info = resident_data_ex(attr_info, sizeof(struct EA_INFO));
if (!*info)
- return -EINVAL;
+ goto out;
/* Check Ea limit. */
size = le32_to_cpu((*info)->size);
- if (size > sbi->ea_max_size)
- return -EFBIG;
+ if (size > sbi->ea_max_size) {
+ err = -EFBIG;
+ goto out;
+ }
- if (attr_size(attr_ea) > sbi->ea_max_size)
- return -EFBIG;
+ if (attr_size(attr_ea) > sbi->ea_max_size) {
+ err = -EFBIG;
+ goto out;
+ }
+
+ if (!size) {
+ /* EA info persists, but xattr is empty. Looks like EA problem. */
+ goto out;
+ }
/* Allocate memory for packed Ea. */
- ea_p = kmalloc(size + add_bytes, GFP_NOFS);
+ ea_p = kmalloc(size_add(size, add_bytes), GFP_NOFS);
if (!ea_p)
return -ENOMEM;
- if (!size) {
- /* EA info persists, but xattr is empty. Looks like EA problem. */
- } else if (attr_ea->non_res) {
+ if (attr_ea->non_res) {
struct runs_tree run;
run_init(&run);
@@ -124,24 +130,52 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
run_close(&run);
if (err)
- goto out;
+ goto out1;
} else {
void *p = resident_data_ex(attr_ea, size);
- if (!p) {
- err = -EINVAL;
- goto out;
- }
+ if (!p)
+ goto out1;
memcpy(ea_p, p, size);
}
memset(Add2Ptr(ea_p, size), 0, add_bytes);
+
+ /* Check all attributes for consistency. */
+ for (off = 0; off < size; off += ea_size) {
+ const struct EA_FULL *ef = Add2Ptr(ea_p, off);
+ u32 bytes = size - off;
+
+ /* Check if we can use field ea->size. */
+ if (bytes < sizeof(ef->size))
+ goto out1;
+
+ if (ef->size) {
+ ea_size = le32_to_cpu(ef->size);
+ if (ea_size > bytes)
+ goto out1;
+ continue;
+ }
+
+ /* Check if we can use fields ef->name_len and ef->elength. */
+ if (bytes < offsetof(struct EA_FULL, name))
+ goto out1;
+
+ ea_size = ALIGN(struct_size(ef, name,
+ 1 + ef->name_len +
+ le16_to_cpu(ef->elength)),
+ 4);
+ if (ea_size > bytes)
+ goto out1;
+ }
+
*ea = ea_p;
return 0;
-out:
+out1:
kfree(ea_p);
- *ea = NULL;
+out:
+ ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
return err;
}
@@ -163,6 +197,7 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
const struct EA_FULL *ea;
u32 off, size;
int err;
+ int ea_size;
size_t ret;
err = ntfs_read_ea(ni, &ea_all, 0, &info);
@@ -175,8 +210,9 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
size = le32_to_cpu(info->size);
/* Enumerate all xattrs. */
- for (ret = 0, off = 0; off < size; off += unpacked_ea_size(ea)) {
+ for (ret = 0, off = 0; off < size; off += ea_size) {
ea = Add2Ptr(ea_all, off);
+ ea_size = unpacked_ea_size(ea);
if (buffer) {
if (ret + ea->name_len + 1 > bytes_per_buffer) {
@@ -227,7 +263,8 @@ static int ntfs_get_ea(struct inode *inode, const char *name, size_t name_len,
goto out;
/* Enumerate all xattrs. */
- if (!find_ea(ea_all, le32_to_cpu(info->size), name, name_len, &off)) {
+ if (!find_ea(ea_all, le32_to_cpu(info->size), name, name_len, &off,
+ NULL)) {
err = -ENODATA;
goto out;
}
@@ -269,7 +306,7 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
struct EA_FULL *new_ea;
struct EA_FULL *ea_all = NULL;
size_t add, new_pack;
- u32 off, size;
+ u32 off, size, ea_sz;
__le16 size_pack;
struct ATTRIB *attr;
struct ATTR_LIST_ENTRY *le;
@@ -304,9 +341,8 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
size_pack = ea_info.size_pack;
}
- if (info && find_ea(ea_all, size, name, name_len, &off)) {
+ if (info && find_ea(ea_all, size, name, name_len, &off, &ea_sz)) {
struct EA_FULL *ea;
- size_t ea_sz;
if (flags & XATTR_CREATE) {
err = -EEXIST;
@@ -329,8 +365,6 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
if (ea->flags & FILE_NEED_EA)
le16_add_cpu(&ea_info.count, -1);
- ea_sz = unpacked_ea_size(ea);
-
le16_add_cpu(&ea_info.size_pack, 0 - packed_ea_size(ea));
memmove(ea, Add2Ptr(ea, ea_sz), size - off - ea_sz);
@@ -604,10 +638,9 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
err = 0; /* Removing non existed xattr. */
if (!err) {
set_cached_acl(inode, type, acl);
- if (inode->i_mode != mode) {
- inode->i_mode = mode;
- mark_inode_dirty(inode);
- }
+ inode->i_mode = mode;
+ inode->i_ctime = current_time(inode);
+ mark_inode_dirty(inode);
}
out:
@@ -721,11 +754,9 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
{
int err;
struct ntfs_inode *ni = ntfs_i(inode);
- size_t name_len = strlen(name);
/* Dispatch request. */
- if (name_len == sizeof(SYSTEM_DOS_ATTRIB) - 1 &&
- !memcmp(name, SYSTEM_DOS_ATTRIB, sizeof(SYSTEM_DOS_ATTRIB))) {
+ if (!strcmp(name, SYSTEM_DOS_ATTRIB)) {
/* system.dos_attrib */
if (!buffer) {
err = sizeof(u8);
@@ -738,8 +769,8 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
goto out;
}
- if (name_len == sizeof(SYSTEM_NTFS_ATTRIB) - 1 &&
- !memcmp(name, SYSTEM_NTFS_ATTRIB, sizeof(SYSTEM_NTFS_ATTRIB))) {
+ if (!strcmp(name, SYSTEM_NTFS_ATTRIB) ||
+ !strcmp(name, SYSTEM_NTFS_ATTRIB_BE)) {
/* system.ntfs_attrib */
if (!buffer) {
err = sizeof(u32);
@@ -748,12 +779,13 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
} else {
err = sizeof(u32);
*(u32 *)buffer = le32_to_cpu(ni->std_fa);
+ if (!strcmp(name, SYSTEM_NTFS_ATTRIB_BE))
+ *(u32 *)buffer = cpu_to_be32(*(u32 *)buffer);
}
goto out;
}
- if (name_len == sizeof(SYSTEM_NTFS_SECURITY) - 1 &&
- !memcmp(name, SYSTEM_NTFS_SECURITY, sizeof(SYSTEM_NTFS_SECURITY))) {
+ if (!strcmp(name, SYSTEM_NTFS_SECURITY)) {
/* system.ntfs_security*/
struct SECURITY_DESCRIPTOR_RELATIVE *sd = NULL;
size_t sd_size = 0;
@@ -793,7 +825,7 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
}
/* Deal with NTFS extended attribute. */
- err = ntfs_get_ea(inode, name, name_len, buffer, size, NULL);
+ err = ntfs_get_ea(inode, name, strlen(name), buffer, size, NULL);
out:
return err;
@@ -810,23 +842,24 @@ static noinline int ntfs_setxattr(const struct xattr_handler *handler,
{
int err = -EINVAL;
struct ntfs_inode *ni = ntfs_i(inode);
- size_t name_len = strlen(name);
enum FILE_ATTRIBUTE new_fa;
/* Dispatch request. */
- if (name_len == sizeof(SYSTEM_DOS_ATTRIB) - 1 &&
- !memcmp(name, SYSTEM_DOS_ATTRIB, sizeof(SYSTEM_DOS_ATTRIB))) {
+ if (!strcmp(name, SYSTEM_DOS_ATTRIB)) {
if (sizeof(u8) != size)
goto out;
new_fa = cpu_to_le32(*(u8 *)value);
goto set_new_fa;
}
- if (name_len == sizeof(SYSTEM_NTFS_ATTRIB) - 1 &&
- !memcmp(name, SYSTEM_NTFS_ATTRIB, sizeof(SYSTEM_NTFS_ATTRIB))) {
+ if (!strcmp(name, SYSTEM_NTFS_ATTRIB) ||
+ !strcmp(name, SYSTEM_NTFS_ATTRIB_BE)) {
if (size != sizeof(u32))
goto out;
- new_fa = cpu_to_le32(*(u32 *)value);
+ if (!strcmp(name, SYSTEM_NTFS_ATTRIB_BE))
+ new_fa = cpu_to_le32(be32_to_cpu(*(u32 *)value));
+ else
+ new_fa = cpu_to_le32(*(u32 *)value);
if (S_ISREG(inode->i_mode)) {
/* Process compressed/sparsed in special way. */
@@ -861,8 +894,7 @@ set_new_fa:
goto out;
}
- if (name_len == sizeof(SYSTEM_NTFS_SECURITY) - 1 &&
- !memcmp(name, SYSTEM_NTFS_SECURITY, sizeof(SYSTEM_NTFS_SECURITY))) {
+ if (!strcmp(name, SYSTEM_NTFS_SECURITY)) {
/* system.ntfs_security*/
__le32 security_id;
bool inserted;
@@ -905,7 +937,7 @@ set_new_fa:
}
/* Deal with NTFS extended attribute. */
- err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
+ err = ntfs_set_ea(inode, name, strlen(name), value, size, flags, 0);
out:
inode->i_ctime = current_time(inode);