From ebeb0406f153db51ab2d4771faf2342bd6ca14dd Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Wed, 12 Nov 2008 07:48:00 +0900 Subject: fat: drop negative dentry on rename() path Drop the negative dentry on rename() path, in order to make sure to use the case sensitive name which is specified by user if this is for creation. For it, this uses newly added LOOKUP_RENAME_TARGET like LOOKUP_CREATE. Signed-off-by: OGAWA Hirofumi --- fs/fat/namei_vfat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index bf326d4356a3..8ae32e37673c 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -78,7 +78,7 @@ static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) * for creation. */ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) { - if (nd->flags & LOOKUP_CREATE) + if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; } -- cgit From 985eafcc5480b0d98419b96869f2560abb2764c7 Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Wed, 12 Nov 2008 07:48:01 +0900 Subject: fat: fix duplicate addition of ->llseek handler Signed-off-by: OGAWA Hirofumi --- fs/fat/dir.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/fat/dir.c b/fs/fat/dir.c index 67e058357098..3a7f603b6982 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -841,7 +841,6 @@ const struct file_operations fat_dir_operations = { .compat_ioctl = fat_compat_dir_ioctl, #endif .fsync = file_fsync, - .llseek = generic_file_llseek, }; static int fat_get_short_entry(struct inode *dir, loff_t *pos, -- cgit From 5a6bb10393eb9a1985e97af12f0cb2906bcbf1af Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 12 Nov 2008 07:48:01 +0900 Subject: fat: make sure to set d_ops in fat_get_parent fat_get_parent needs to setup the dentry operations, otherwise we might lose them when the NFS server needs to reconnect out of cache inodes. Signed-off-by: Christoph Hellwig Signed-off-by: OGAWA Hirofumi --- fs/fat/inode.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/fat/inode.c b/fs/fat/inode.c index bdd8fb7be2ca..37a8af159a13 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -749,6 +749,8 @@ static struct dentry *fat_get_parent(struct dentry *child) brelse(bh); parent = d_obtain_alias(inode); + if (!IS_ERR(parent)) + parent->d_op = sb->s_root->d_op; out: unlock_super(sb); -- cgit From 062e4fee4400f283307cf8ac1b7931c939010229 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 26 Oct 2008 16:58:25 +0200 Subject: UBIFS: slight compression optimization If data does not compress, it is better to leave it uncompressed because we'll read it faster then. So do not compress data if we save less than 64 bytes. Signed-off-by: Artem Bityutskiy --- fs/ubifs/compress.c | 6 +++--- fs/ubifs/ubifs-media.h | 7 +++++++ 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c index a0ada596b17c..6414d50780e1 100644 --- a/fs/ubifs/compress.c +++ b/fs/ubifs/compress.c @@ -119,10 +119,10 @@ void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len, } /* - * Presently, we just require that compression results in less data, - * rather than any defined minimum compression ratio or amount. + * If the data compressed only slightly, it is better to leave it + * uncompressed to improve read speed. */ - if (ALIGN(*out_len, 8) >= ALIGN(in_len, 8)) + if (in_len - *out_len < UBIFS_MIN_COMPRESS_DIFF) goto no_compr; return; diff --git a/fs/ubifs/ubifs-media.h b/fs/ubifs/ubifs-media.h index 0b378042a3a2..b25fc36cf72f 100644 --- a/fs/ubifs/ubifs-media.h +++ b/fs/ubifs/ubifs-media.h @@ -51,6 +51,13 @@ */ #define UBIFS_MIN_COMPR_LEN 128 +/* + * If compressed data length is less than %UBIFS_MIN_COMPRESS_DIFF bytes + * shorter than uncompressed data length, UBIFS preferes to leave this data + * node uncompress, because it'll be read faster. + */ +#define UBIFS_MIN_COMPRESS_DIFF 64 + /* Root inode number */ #define UBIFS_ROOT_INO 1 -- cgit From a1dc080c27ec8ea7ca1c8a9b499362a71ebff792 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sat, 1 Nov 2008 14:20:50 +0200 Subject: UBIFS: use bit-fields to store compression type Save a 4 bytes of RAM per 'struct inode' by stroring inode compression type in bit-filed, instead of using 'int'. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 8 ++++++++ fs/ubifs/ubifs.h | 6 +++--- 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index d80b2aef42b6..21b4103271ec 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -2020,6 +2020,14 @@ static int __init ubifs_init(void) BUILD_BUG_ON(UBIFS_INO_NODE_SZ != 160); BUILD_BUG_ON(UBIFS_REF_NODE_SZ != 64); + /* + * We use 2 bit wide bit-fields to store compression type, which should + * be amended if more compressors are added. The bit-fields are: + * @compr_type in 'struct ubifs_inode' and @default_compr in + * 'struct ubifs_info'. + */ + BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4); + /* * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2. diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 46b172560a06..4d76aba57ee1 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -386,12 +386,12 @@ struct ubifs_inode { unsigned int dirty:1; unsigned int xattr:1; unsigned int bulk_read:1; + unsigned int compr_type:2; struct mutex ui_mutex; spinlock_t ui_lock; loff_t synced_i_size; loff_t ui_size; int flags; - int compr_type; pgoff_t last_page_read; pgoff_t read_in_a_row; int data_len; @@ -946,6 +946,7 @@ struct ubifs_mount_opts { * @no_chk_data_crc: do not check CRCs when reading data nodes (except during * recovery) * @bulk_read: enable bulk-reads + * @default_compr: default compression algorithm (%UBIFS_COMPR_LZO, etc) * * @tnc_mutex: protects the Tree Node Cache (TNC), @zroot, @cnext, @enext, and * @calc_idx_sz @@ -986,7 +987,6 @@ struct ubifs_mount_opts { * @main_lebs: count of LEBs in the main area * @main_first: first LEB of the main area * @main_bytes: main area size in bytes - * @default_compr: default compression algorithm (%UBIFS_COMPR_LZO, etc) * * @key_hash_type: type of the key hash * @key_hash: direntry key hash function @@ -1196,6 +1196,7 @@ struct ubifs_info { unsigned int big_lpt:1; unsigned int no_chk_data_crc:1; unsigned int bulk_read:1; + unsigned int default_compr:2; struct mutex tnc_mutex; struct ubifs_zbranch zroot; @@ -1237,7 +1238,6 @@ struct ubifs_info { int main_lebs; int main_first; long long main_bytes; - int default_compr; uint8_t key_hash_type; uint32_t (*key_hash)(const char *str, int len); -- cgit From 553dea4dd531562688ba01c641c7f8fc7abaaf8c Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sat, 1 Nov 2008 14:57:49 +0200 Subject: UBIFS: introduce compression mount options It is very handy to be able to change default UBIFS compressor via mount options. Introduce -o compr= mount option support. Currently only "none", "lzo" and "zlib" compressors are supported. Signed-off-by: Artem Bityutskiy --- fs/ubifs/compress.c | 6 +++--- fs/ubifs/sb.c | 10 ++++++---- fs/ubifs/super.c | 44 +++++++++++++++++++++++++++++++++++++------- fs/ubifs/ubifs.h | 12 ++++++++++-- 4 files changed, 56 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c index 6414d50780e1..4afb3ea24d44 100644 --- a/fs/ubifs/compress.c +++ b/fs/ubifs/compress.c @@ -33,7 +33,7 @@ /* Fake description object for the "none" compressor */ static struct ubifs_compressor none_compr = { .compr_type = UBIFS_COMPR_NONE, - .name = "no compression", + .name = "none", .capi_name = "", }; @@ -43,13 +43,13 @@ static DEFINE_MUTEX(lzo_mutex); static struct ubifs_compressor lzo_compr = { .compr_type = UBIFS_COMPR_LZO, .comp_mutex = &lzo_mutex, - .name = "LZO", + .name = "lzo", .capi_name = "lzo", }; #else static struct ubifs_compressor lzo_compr = { .compr_type = UBIFS_COMPR_LZO, - .name = "LZO", + .name = "lzo", }; #endif diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index 0f392351dc5a..c5da201ab54f 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -179,8 +179,11 @@ static int create_default_filesystem(struct ubifs_info *c) sup->fanout = cpu_to_le32(DEFAULT_FANOUT); sup->lsave_cnt = cpu_to_le32(c->lsave_cnt); sup->fmt_version = cpu_to_le32(UBIFS_FORMAT_VERSION); - sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO); sup->time_gran = cpu_to_le32(DEFAULT_TIME_GRAN); + if (c->mount_opts.override_compr) + sup->default_compr = cpu_to_le16(c->mount_opts.compr_type); + else + sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO); generate_random_uuid(sup->uuid); @@ -582,16 +585,15 @@ int ubifs_read_superblock(struct ubifs_info *c) c->jhead_cnt = le32_to_cpu(sup->jhead_cnt) + NONDATA_JHEADS_CNT; c->fanout = le32_to_cpu(sup->fanout); c->lsave_cnt = le32_to_cpu(sup->lsave_cnt); - c->default_compr = le16_to_cpu(sup->default_compr); c->rp_size = le64_to_cpu(sup->rp_size); c->rp_uid = le32_to_cpu(sup->rp_uid); c->rp_gid = le32_to_cpu(sup->rp_gid); sup_flags = le32_to_cpu(sup->flags); + if (!c->mount_opts.override_compr) + c->default_compr = le16_to_cpu(sup->default_compr); c->vfs_sb->s_time_gran = le32_to_cpu(sup->time_gran); - memcpy(&c->uuid, &sup->uuid, 16); - c->big_lpt = !!(sup_flags & UBIFS_FLG_BIGLPT); /* Automatically increase file system size to the maximum size */ diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 21b4103271ec..fc81022cc26d 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -417,6 +417,11 @@ static int ubifs_show_options(struct seq_file *s, struct vfsmount *mnt) else if (c->mount_opts.chk_data_crc == 1) seq_printf(s, ",no_chk_data_crc"); + if (c->mount_opts.override_compr) { + seq_printf(s, ",compr="); + seq_printf(s, ubifs_compr_name(c->mount_opts.compr_type)); + } + return 0; } @@ -878,6 +883,7 @@ static int check_volume_empty(struct ubifs_info *c) * Opt_no_bulk_read: disable bulk-reads * Opt_chk_data_crc: check CRCs when reading data nodes * Opt_no_chk_data_crc: do not check CRCs when reading data nodes + * Opt_override_compr: override default compressor * Opt_err: just end of array marker */ enum { @@ -887,6 +893,7 @@ enum { Opt_no_bulk_read, Opt_chk_data_crc, Opt_no_chk_data_crc, + Opt_override_compr, Opt_err, }; @@ -897,6 +904,7 @@ static const match_table_t tokens = { {Opt_no_bulk_read, "no_bulk_read"}, {Opt_chk_data_crc, "chk_data_crc"}, {Opt_no_chk_data_crc, "no_chk_data_crc"}, + {Opt_override_compr, "compr=%s"}, {Opt_err, NULL}, }; @@ -950,6 +958,28 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, c->mount_opts.chk_data_crc = 1; c->no_chk_data_crc = 1; break; + case Opt_override_compr: + { + char *name = match_strdup(&args[0]); + + if (!name) + return -ENOMEM; + if (!strcmp(name, "none")) + c->mount_opts.compr_type = UBIFS_COMPR_NONE; + else if (!strcmp(name, "lzo")) + c->mount_opts.compr_type = UBIFS_COMPR_LZO; + else if (!strcmp(name, "zlib")) + c->mount_opts.compr_type = UBIFS_COMPR_ZLIB; + else { + ubifs_err("unknown compressor \"%s\"", name); + kfree(name); + return -EINVAL; + } + kfree(name); + c->mount_opts.override_compr = 1; + c->default_compr = c->mount_opts.compr_type; + break; + } default: ubifs_err("unrecognized mount option \"%s\" " "or missing value", p); @@ -1100,13 +1130,13 @@ static int mount_ubifs(struct ubifs_info *c) goto out_free; /* - * Make sure the compressor which is set as the default on in the - * superblock was actually compiled in. + * Make sure the compressor which is set as default in the superblock + * or overriden by mount options is actually compiled in. */ if (!ubifs_compr_present(c->default_compr)) { - ubifs_warn("'%s' compressor is set by superblock, but not " - "compiled in", ubifs_compr_name(c->default_compr)); - c->default_compr = UBIFS_COMPR_NONE; + ubifs_err("'compressor \"%s\" is not compiled in", + ubifs_compr_name(c->default_compr)); + goto out_free; } dbg_failure_mode_registration(c); @@ -2023,8 +2053,8 @@ static int __init ubifs_init(void) /* * We use 2 bit wide bit-fields to store compression type, which should * be amended if more compressors are added. The bit-fields are: - * @compr_type in 'struct ubifs_inode' and @default_compr in - * 'struct ubifs_info'. + * @compr_type in 'struct ubifs_inode', @default_compr in + * 'struct ubifs_info' and @compr_type in 'struct ubifs_mount_opts'. */ BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 4d76aba57ee1..16840e099eff 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -893,13 +893,21 @@ struct ubifs_orphan { /** * struct ubifs_mount_opts - UBIFS-specific mount options information. * @unmount_mode: selected unmount mode (%0 default, %1 normal, %2 fast) - * @bulk_read: enable bulk-reads - * @chk_data_crc: check CRCs when reading data nodes + * @bulk_read: enable/disable bulk-reads (%0 default, %1 disabe, %2 enable) + * @chk_data_crc: enable/disable CRC data checking when reading data nodes + * (%0 default, %1 disabe, %2 enable) + * @override_compr: override default compressor (%0 - do not override and use + * superblock compressor, %1 - override and use compressor + * specified in @compr_type) + * @compr_type: compressor type to override the superblock compressor with + * (%UBIFS_COMPR_NONE, etc) */ struct ubifs_mount_opts { unsigned int unmount_mode:2; unsigned int bulk_read:2; unsigned int chk_data_crc:2; + unsigned int override_compr:1; + unsigned int compr_type:2; }; /** -- cgit From 5dd7cbc083f3a91fa7454125fe992826701b67bc Mon Sep 17 00:00:00 2001 From: Kukkonen Mika Date: Tue, 2 Dec 2008 11:32:49 +0200 Subject: UBIFS: avoid unnecessary checks I have a habit of compiling kernel with EXTRA_CFLAGS="-Wextra -Wno-unused -Wno-sign-compare -Wno-missing-field-initializers" and so fs/ubifs/key.h give lots (~10) of these every time: CC fs/ubifs/tnc_misc.o In file included from fs/ubifs/ubifs.h:1725, from fs/ubifs/tnc_misc.c:30: fs/ubifs/key.h: In function 'key_r5_hash': fs/ubifs/key.h:64: warning: comparison of unsigned expression >= 0 is always true fs/ubifs/key.h: In function 'key_test_hash': fs/ubifs/key.h:81: warning: comparison of unsigned expression >= 0 is always true This patch fixes the warnings. Signed-off-by: Artem Bityutskiy --- fs/ubifs/key.h | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h index 3f1f16bc25c9..efb3430a2581 100644 --- a/fs/ubifs/key.h +++ b/fs/ubifs/key.h @@ -37,6 +37,22 @@ #ifndef __UBIFS_KEY_H__ #define __UBIFS_KEY_H__ +/** + * key_mask_hash - mask a valid hash value. + * @val: value to be masked + * + * We use hash values as offset in directories, so values %0 and %1 are + * reserved for "." and "..". %2 is reserved for "end of readdir" marker. This + * function makes sure the reserved values are not used. + */ +static inline uint32_t key_mask_hash(uint32_t hash) +{ + hash &= UBIFS_S_KEY_HASH_MASK; + if (unlikely(hash <= 2)) + hash += 3; + return hash; +} + /** * key_r5_hash - R5 hash function (borrowed from reiserfs). * @s: direntry name @@ -54,16 +70,7 @@ static inline uint32_t key_r5_hash(const char *s, int len) str++; } - a &= UBIFS_S_KEY_HASH_MASK; - - /* - * We use hash values as offset in directories, so values %0 and %1 are - * reserved for "." and "..". %2 is reserved for "end of readdir" - * marker. - */ - if (unlikely(a >= 0 && a <= 2)) - a += 3; - return a; + return key_mask_hash(a); } /** @@ -77,10 +84,7 @@ static inline uint32_t key_test_hash(const char *str, int len) len = min_t(uint32_t, len, 4); memcpy(&a, str, len); - a &= UBIFS_S_KEY_HASH_MASK; - if (unlikely(a >= 0 && a <= 2)) - a += 3; - return a; + return key_mask_hash(a); } /** -- cgit From 17c2f9f85c896b48a5d74a9155d99ec5b241a0e6 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 17 Oct 2008 13:31:39 +0300 Subject: UBIFS: separate debugging fields out Introduce a new data structure which contains all debugging stuff inside. This is cleaner than having debugging stuff directly in 'c'. Signed-off-by: Artem Bityutskiy --- fs/ubifs/commit.c | 25 +++++++++--------- fs/ubifs/debug.c | 71 +++++++++++++++++++++++++++++++++++++++------------ fs/ubifs/debug.h | 51 ++++++++++++++++++++++++++++++------ fs/ubifs/lprops.c | 2 +- fs/ubifs/lpt_commit.c | 55 +++++++++++++++++++-------------------- fs/ubifs/orphan.c | 2 +- fs/ubifs/super.c | 22 +++++----------- fs/ubifs/tnc_commit.c | 7 ++--- fs/ubifs/ubifs.h | 34 +++--------------------- 9 files changed, 156 insertions(+), 113 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index b49884c8c10e..f3a7945527fb 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c @@ -470,12 +470,12 @@ int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot) { struct ubifs_idx_node *idx; int lnum, offs, len, err = 0; + struct ubifs_debug_info *d = c->dbg; - c->old_zroot = *zroot; - - lnum = c->old_zroot.lnum; - offs = c->old_zroot.offs; - len = c->old_zroot.len; + d->old_zroot = *zroot; + lnum = d->old_zroot.lnum; + offs = d->old_zroot.offs; + len = d->old_zroot.len; idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); if (!idx) @@ -485,8 +485,8 @@ int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot) if (err) goto out; - c->old_zroot_level = le16_to_cpu(idx->level); - c->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum); + d->old_zroot_level = le16_to_cpu(idx->level); + d->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum); out: kfree(idx); return err; @@ -509,6 +509,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) { int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt; int first = 1, iip; + struct ubifs_debug_info *d = c->dbg; union ubifs_key lower_key, upper_key, l_key, u_key; unsigned long long uninitialized_var(last_sqnum); struct ubifs_idx_node *idx; @@ -525,9 +526,9 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) UBIFS_IDX_NODE_SZ; /* Start at the old zroot */ - lnum = c->old_zroot.lnum; - offs = c->old_zroot.offs; - len = c->old_zroot.len; + lnum = d->old_zroot.lnum; + offs = d->old_zroot.offs; + len = d->old_zroot.len; iip = 0; /* @@ -560,11 +561,11 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) if (first) { first = 0; /* Check root level and sqnum */ - if (le16_to_cpu(idx->level) != c->old_zroot_level) { + if (le16_to_cpu(idx->level) != d->old_zroot_level) { err = 2; goto out_dump; } - if (le64_to_cpu(idx->ch.sqnum) != c->old_zroot_sqnum) { + if (le64_to_cpu(idx->ch.sqnum) != d->old_zroot_sqnum) { err = 3; goto out_dump; } diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 510ffa0bbda4..0332a856a082 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -705,7 +705,7 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum) printk(KERN_DEBUG "(pid %d) Dumping LEB %d\n", current->pid, lnum); - sleb = ubifs_scan(c, lnum, 0, c->dbg_buf); + sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); if (IS_ERR(sleb)) { ubifs_err("scan error %d", (int)PTR_ERR(sleb)); return; @@ -2097,7 +2097,7 @@ static int simple_rand(void) return (next >> 16) & 32767; } -void dbg_failure_mode_registration(struct ubifs_info *c) +static void failure_mode_init(struct ubifs_info *c) { struct failure_mode_info *fmi; @@ -2112,7 +2112,7 @@ void dbg_failure_mode_registration(struct ubifs_info *c) spin_unlock(&fmi_lock); } -void dbg_failure_mode_deregistration(struct ubifs_info *c) +static void failure_mode_exit(struct ubifs_info *c) { struct failure_mode_info *fmi, *tmp; @@ -2146,42 +2146,44 @@ static int in_failure_mode(struct ubi_volume_desc *desc) struct ubifs_info *c = dbg_find_info(desc); if (c && dbg_failure_mode) - return c->failure_mode; + return c->dbg->failure_mode; return 0; } static int do_fail(struct ubi_volume_desc *desc, int lnum, int write) { struct ubifs_info *c = dbg_find_info(desc); + struct ubifs_debug_info *d; if (!c || !dbg_failure_mode) return 0; - if (c->failure_mode) + d = c->dbg; + if (d->failure_mode) return 1; - if (!c->fail_cnt) { + if (!d->fail_cnt) { /* First call - decide delay to failure */ if (chance(1, 2)) { unsigned int delay = 1 << (simple_rand() >> 11); if (chance(1, 2)) { - c->fail_delay = 1; - c->fail_timeout = jiffies + + d->fail_delay = 1; + d->fail_timeout = jiffies + msecs_to_jiffies(delay); dbg_rcvry("failing after %ums", delay); } else { - c->fail_delay = 2; - c->fail_cnt_max = delay; + d->fail_delay = 2; + d->fail_cnt_max = delay; dbg_rcvry("failing after %u calls", delay); } } - c->fail_cnt += 1; + d->fail_cnt += 1; } /* Determine if failure delay has expired */ - if (c->fail_delay == 1) { - if (time_before(jiffies, c->fail_timeout)) + if (d->fail_delay == 1) { + if (time_before(jiffies, d->fail_timeout)) return 0; - } else if (c->fail_delay == 2) - if (c->fail_cnt++ < c->fail_cnt_max) + } else if (d->fail_delay == 2) + if (d->fail_cnt++ < d->fail_cnt_max) return 0; if (lnum == UBIFS_SB_LNUM) { if (write) { @@ -2239,7 +2241,7 @@ static int do_fail(struct ubi_volume_desc *desc, int lnum, int write) dbg_rcvry("failing in bud LEB %d commit not running", lnum); } ubifs_err("*** SETTING FAILURE MODE ON (LEB %d) ***", lnum); - c->failure_mode = 1; + d->failure_mode = 1; dump_stack(); return 1; } @@ -2344,4 +2346,41 @@ int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype) return 0; } +/** + * ubifs_debugging_init - initialize UBIFS debugging. + * @c: UBIFS file-system description object + * + * This function initializes debugging-related data for the file system. + * Returns zero in case of success and a negative error code in case of + * failure. + */ +int ubifs_debugging_init(struct ubifs_info *c) +{ + c->dbg = kzalloc(sizeof(struct ubifs_debug_info), GFP_KERNEL); + if (!c->dbg) + return -ENOMEM; + + c->dbg->buf = vmalloc(c->leb_size); + if (!c->dbg->buf) + goto out; + + failure_mode_init(c); + return 0; + +out: + kfree(c->dbg); + return -ENOMEM; +} + +/** + * ubifs_debugging_exit - free debugging data. + * @c: UBIFS file-system description object + */ +void ubifs_debugging_exit(struct ubifs_info *c) +{ + failure_mode_exit(c); + vfree(c->dbg->buf); + kfree(c->dbg); +} + #endif /* CONFIG_UBIFS_FS_DEBUG */ diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 33d6b95071e4..d6ea1362d56a 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -25,7 +25,43 @@ #ifdef CONFIG_UBIFS_FS_DEBUG -#define UBIFS_DBG(op) op +/** + * ubifs_debug_info - per-FS debugging information. + * @buf: a buffer of LEB size, used for various purposes + * @old_zroot: old index root - used by 'dbg_check_old_index()' + * @old_zroot_level: old index root level - used by 'dbg_check_old_index()' + * @old_zroot_sqnum: old index root sqnum - used by 'dbg_check_old_index()' + * @failure_mode: failure mode for recovery testing + * @fail_delay: 0=>don't delay, 1=>delay a time, 2=>delay a number of calls + * @fail_timeout: time in jiffies when delay of failure mode expires + * @fail_cnt: current number of calls to failure mode I/O functions + * @fail_cnt_max: number of calls by which to delay failure mode + * @chk_lpt_sz: used by LPT tree size checker + * @chk_lpt_sz2: used by LPT tree size checker + * @chk_lpt_wastage: used by LPT tree size checker + * @chk_lpt_lebs: used by LPT tree size checker + * @new_nhead_offs: used by LPT tree size checker + * @new_ihead_lnum: used by debugging to check ihead_lnum + * @new_ihead_offs: used by debugging to check ihead_offs + */ +struct ubifs_debug_info { + void *buf; + struct ubifs_zbranch old_zroot; + int old_zroot_level; + unsigned long long old_zroot_sqnum; + int failure_mode; + int fail_delay; + unsigned long fail_timeout; + unsigned int fail_cnt; + unsigned int fail_cnt_max; + long long chk_lpt_sz; + long long chk_lpt_sz2; + long long chk_lpt_wastage; + int chk_lpt_lebs; + int new_nhead_offs; + int new_ihead_lnum; + int new_ihead_offs; +}; #define ubifs_assert(expr) do { \ if (unlikely(!(expr))) { \ @@ -211,6 +247,9 @@ extern unsigned int ubifs_msg_flags; extern unsigned int ubifs_chk_flags; extern unsigned int ubifs_tst_flags; +int ubifs_debugging_init(struct ubifs_info *c); +void ubifs_debugging_exit(struct ubifs_info *c); + /* Dump functions */ const char *dbg_ntype(int type); @@ -274,9 +313,6 @@ int dbg_force_in_the_gaps(void); #define dbg_failure_mode (ubifs_tst_flags & UBIFS_TST_RCVRY) -void dbg_failure_mode_registration(struct ubifs_info *c); -void dbg_failure_mode_deregistration(struct ubifs_info *c); - #ifndef UBIFS_DBG_PRESERVE_UBI #define ubi_leb_read dbg_leb_read @@ -320,8 +356,6 @@ static inline int dbg_change(struct ubi_volume_desc *desc, int lnum, #else /* !CONFIG_UBIFS_FS_DEBUG */ -#define UBIFS_DBG(op) - /* Use "if (0)" to make compiler check arguments even if debugging is off */ #define ubifs_assert(expr) do { \ if (0 && (expr)) \ @@ -360,6 +394,9 @@ static inline int dbg_change(struct ubi_volume_desc *desc, int lnum, #define DBGKEY(key) ((char *)(key)) #define DBGKEY1(key) ((char *)(key)) +#define ubifs_debugging_init(c) 0 +#define ubifs_debugging_exit(c) ({}) + #define dbg_ntype(type) "" #define dbg_cstate(cmt_state) "" #define dbg_get_key_dump(c, key) ({}) @@ -396,8 +433,6 @@ static inline int dbg_change(struct ubi_volume_desc *desc, int lnum, #define dbg_force_in_the_gaps_enabled 0 #define dbg_force_in_the_gaps() 0 #define dbg_failure_mode 0 -#define dbg_failure_mode_registration(c) ({}) -#define dbg_failure_mode_deregistration(c) ({}) #endif /* !CONFIG_UBIFS_FS_DEBUG */ diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index f27176e9b70d..10ba663eb329 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -1088,7 +1088,7 @@ static int scan_check_cb(struct ubifs_info *c, } } - sleb = ubifs_scan(c, lnum, 0, c->dbg_buf); + sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); if (IS_ERR(sleb)) { /* * After an unclean unmount, empty and freeable LEBs diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index a41434b42785..1aefab9f0b5e 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -1602,7 +1602,7 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum) { int err, len = c->leb_size, dirty = 0, node_type, node_num, node_len; int ret; - void *buf = c->dbg_buf; + void *buf = c->dbg->buf; dbg_lp("LEB %d", lnum); err = ubi_read(c->ubi, lnum, buf, 0, c->leb_size); @@ -1731,15 +1731,16 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c) */ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) { + struct ubifs_debug_info *d = c->dbg; long long chk_lpt_sz, lpt_sz; int err = 0; switch (action) { case 0: - c->chk_lpt_sz = 0; - c->chk_lpt_sz2 = 0; - c->chk_lpt_lebs = 0; - c->chk_lpt_wastage = 0; + d->chk_lpt_sz = 0; + d->chk_lpt_sz2 = 0; + d->chk_lpt_lebs = 0; + d->chk_lpt_wastage = 0; if (c->dirty_pn_cnt > c->pnode_cnt) { dbg_err("dirty pnodes %d exceed max %d", c->dirty_pn_cnt, c->pnode_cnt); @@ -1752,35 +1753,35 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) } return err; case 1: - c->chk_lpt_sz += len; + d->chk_lpt_sz += len; return 0; case 2: - c->chk_lpt_sz += len; - c->chk_lpt_wastage += len; - c->chk_lpt_lebs += 1; + d->chk_lpt_sz += len; + d->chk_lpt_wastage += len; + d->chk_lpt_lebs += 1; return 0; case 3: chk_lpt_sz = c->leb_size; - chk_lpt_sz *= c->chk_lpt_lebs; + chk_lpt_sz *= d->chk_lpt_lebs; chk_lpt_sz += len - c->nhead_offs; - if (c->chk_lpt_sz != chk_lpt_sz) { + if (d->chk_lpt_sz != chk_lpt_sz) { dbg_err("LPT wrote %lld but space used was %lld", - c->chk_lpt_sz, chk_lpt_sz); + d->chk_lpt_sz, chk_lpt_sz); err = -EINVAL; } - if (c->chk_lpt_sz > c->lpt_sz) { + if (d->chk_lpt_sz > c->lpt_sz) { dbg_err("LPT wrote %lld but lpt_sz is %lld", - c->chk_lpt_sz, c->lpt_sz); + d->chk_lpt_sz, c->lpt_sz); err = -EINVAL; } - if (c->chk_lpt_sz2 && c->chk_lpt_sz != c->chk_lpt_sz2) { + if (d->chk_lpt_sz2 && d->chk_lpt_sz != d->chk_lpt_sz2) { dbg_err("LPT layout size %lld but wrote %lld", - c->chk_lpt_sz, c->chk_lpt_sz2); + d->chk_lpt_sz, d->chk_lpt_sz2); err = -EINVAL; } - if (c->chk_lpt_sz2 && c->new_nhead_offs != len) { + if (d->chk_lpt_sz2 && d->new_nhead_offs != len) { dbg_err("LPT new nhead offs: expected %d was %d", - c->new_nhead_offs, len); + d->new_nhead_offs, len); err = -EINVAL; } lpt_sz = (long long)c->pnode_cnt * c->pnode_sz; @@ -1788,22 +1789,22 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) lpt_sz += c->ltab_sz; if (c->big_lpt) lpt_sz += c->lsave_sz; - if (c->chk_lpt_sz - c->chk_lpt_wastage > lpt_sz) { + if (d->chk_lpt_sz - d->chk_lpt_wastage > lpt_sz) { dbg_err("LPT chk_lpt_sz %lld + waste %lld exceeds %lld", - c->chk_lpt_sz, c->chk_lpt_wastage, lpt_sz); + d->chk_lpt_sz, d->chk_lpt_wastage, lpt_sz); err = -EINVAL; } if (err) dbg_dump_lpt_info(c); - c->chk_lpt_sz2 = c->chk_lpt_sz; - c->chk_lpt_sz = 0; - c->chk_lpt_wastage = 0; - c->chk_lpt_lebs = 0; - c->new_nhead_offs = len; + d->chk_lpt_sz2 = d->chk_lpt_sz; + d->chk_lpt_sz = 0; + d->chk_lpt_wastage = 0; + d->chk_lpt_lebs = 0; + d->new_nhead_offs = len; return err; case 4: - c->chk_lpt_sz += len; - c->chk_lpt_wastage += len; + d->chk_lpt_sz += len; + d->chk_lpt_wastage += len; return 0; default: return -EINVAL; diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index 9bd5a43d4526..9e6f403f170e 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -899,7 +899,7 @@ static int dbg_scan_orphans(struct ubifs_info *c, struct check_info *ci) for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) { struct ubifs_scan_leb *sleb; - sleb = ubifs_scan(c, lnum, 0, c->dbg_buf); + sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); if (IS_ERR(sleb)) { err = PTR_ERR(sleb); break; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index fc81022cc26d..ad44822059c7 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1069,11 +1069,9 @@ static int mount_ubifs(struct ubifs_info *c) if (err) return err; -#ifdef CONFIG_UBIFS_FS_DEBUG - c->dbg_buf = vmalloc(c->leb_size); - if (!c->dbg_buf) - return -ENOMEM; -#endif + err = ubifs_debugging_init(c); + if (err) + return err; err = check_volume_empty(c); if (err) @@ -1139,18 +1137,16 @@ static int mount_ubifs(struct ubifs_info *c) goto out_free; } - dbg_failure_mode_registration(c); - err = init_constants_late(c); if (err) - goto out_dereg; + goto out_free; sz = ALIGN(c->max_idx_node_sz, c->min_io_size); sz = ALIGN(sz + c->max_idx_node_sz, c->min_io_size); c->cbuf = kmalloc(sz, GFP_NOFS); if (!c->cbuf) { err = -ENOMEM; - goto out_dereg; + goto out_free; } sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id); @@ -1350,14 +1346,12 @@ out_wbufs: free_wbufs(c); out_cbuf: kfree(c->cbuf); -out_dereg: - dbg_failure_mode_deregistration(c); out_free: kfree(c->bu.buf); vfree(c->ileb_buf); vfree(c->sbuf); kfree(c->bottom_up_buf); - UBIFS_DBG(vfree(c->dbg_buf)); + ubifs_debugging_exit(c); return err; } @@ -1394,8 +1388,7 @@ static void ubifs_umount(struct ubifs_info *c) vfree(c->ileb_buf); vfree(c->sbuf); kfree(c->bottom_up_buf); - UBIFS_DBG(vfree(c->dbg_buf)); - dbg_failure_mode_deregistration(c); + ubifs_debugging_exit(c); } /** @@ -1879,7 +1872,6 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) goto out_iput; mutex_unlock(&c->umount_mutex); - return 0; out_iput: diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index 8ac76b1c2d55..3c0af452887b 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -553,8 +553,8 @@ static int layout_in_empty_space(struct ubifs_info *c) } #ifdef CONFIG_UBIFS_FS_DEBUG - c->new_ihead_lnum = lnum; - c->new_ihead_offs = buf_offs; + c->dbg->new_ihead_lnum = lnum; + c->dbg->new_ihead_offs = buf_offs; #endif return 0; @@ -1002,7 +1002,8 @@ static int write_index(struct ubifs_info *c) } #ifdef CONFIG_UBIFS_FS_DEBUG - if (lnum != c->new_ihead_lnum || buf_offs != c->new_ihead_offs) { + if (lnum != c->dbg->new_ihead_lnum || + buf_offs != c->dbg->new_ihead_offs) { ubifs_err("inconsistent ihead"); return -EINVAL; } diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 16840e099eff..7e090a5e2bf6 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -910,6 +910,8 @@ struct ubifs_mount_opts { unsigned int compr_type:2; }; +struct ubifs_debug_info; + /** * struct ubifs_info - UBIFS file-system description data structure * (per-superblock). @@ -972,8 +974,6 @@ struct ubifs_mount_opts { * @ileb_nxt: next pre-allocated index LEBs * @old_idx: tree of index nodes obsoleted since the last commit start * @bottom_up_buf: a buffer which is used by 'dirty_cow_bottom_up()' in tnc.c - * @new_ihead_lnum: used by debugging to check ihead_lnum - * @new_ihead_offs: used by debugging to check ihead_offs * * @mst_node: master node * @mst_offs: offset of valid master node @@ -1157,15 +1157,7 @@ struct ubifs_mount_opts { * @always_chk_crc: always check CRCs (while mounting and remounting rw) * @mount_opts: UBIFS-specific mount options * - * @dbg_buf: a buffer of LEB size used for debugging purposes - * @old_zroot: old index root - used by 'dbg_check_old_index()' - * @old_zroot_level: old index root level - used by 'dbg_check_old_index()' - * @old_zroot_sqnum: old index root sqnum - used by 'dbg_check_old_index()' - * @failure_mode: failure mode for recovery testing - * @fail_delay: 0=>don't delay, 1=>delay a time, 2=>delay a number of calls - * @fail_timeout: time in jiffies when delay of failure mode expires - * @fail_cnt: current number of calls to failure mode I/O functions - * @fail_cnt_max: number of calls by which to delay failure mode + * @dbg: debugging-related information */ struct ubifs_info { struct super_block *vfs_sb; @@ -1221,10 +1213,6 @@ struct ubifs_info { int ileb_nxt; struct rb_root old_idx; int *bottom_up_buf; -#ifdef CONFIG_UBIFS_FS_DEBUG - int new_ihead_lnum; - int new_ihead_offs; -#endif struct ubifs_mst_node *mst_node; int mst_offs; @@ -1399,21 +1387,7 @@ struct ubifs_info { struct ubifs_mount_opts mount_opts; #ifdef CONFIG_UBIFS_FS_DEBUG - void *dbg_buf; - struct ubifs_zbranch old_zroot; - int old_zroot_level; - unsigned long long old_zroot_sqnum; - int failure_mode; - int fail_delay; - unsigned long fail_timeout; - unsigned int fail_cnt; - unsigned int fail_cnt_max; - long long chk_lpt_sz; - long long chk_lpt_sz2; - long long chk_lpt_wastage; - int chk_lpt_lebs; - int new_nhead_lnum; - int new_nhead_offs; + struct ubifs_debug_info *dbg; #endif }; -- cgit From 552ff3179d1e93a3e982357544c059f3e9a5516e Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 23 Oct 2008 11:49:28 +0300 Subject: UBIFS: add debugfs support We need to have a possibility to see various UBIFS variables and ask UBIFS to dump various information. Debugfs is what we need. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 173 +++++++++++++++++++++++++++++++++++++++++++++++++------ fs/ubifs/debug.h | 27 ++++++++- fs/ubifs/super.c | 12 ++++ fs/ubifs/ubifs.h | 1 + 4 files changed, 193 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 0332a856a082..56842772c804 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -32,6 +32,7 @@ #include "ubifs.h" #include #include +#include #ifdef CONFIG_UBIFS_FS_DEBUG @@ -988,22 +989,20 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, err = 1; key_read(c, &dent1->key, &key); if (keys_cmp(c, &zbr1->key, &key)) { - dbg_err("1st entry at %d:%d has key %s", zbr1->lnum, - zbr1->offs, DBGKEY(&key)); - dbg_err("but it should have key %s according to tnc", - DBGKEY(&zbr1->key)); - dbg_dump_node(c, dent1); - goto out_free; + ubifs_err("1st entry at %d:%d has key %s", zbr1->lnum, + zbr1->offs, DBGKEY(&key)); + ubifs_err("but it should have key %s according to tnc", + DBGKEY(&zbr1->key)); dbg_dump_node(c, dent1); + goto out_free; } key_read(c, &dent2->key, &key); if (keys_cmp(c, &zbr2->key, &key)) { - dbg_err("2nd entry at %d:%d has key %s", zbr1->lnum, - zbr1->offs, DBGKEY(&key)); - dbg_err("but it should have key %s according to tnc", - DBGKEY(&zbr2->key)); - dbg_dump_node(c, dent2); - goto out_free; + ubifs_err("2nd entry at %d:%d has key %s", zbr1->lnum, + zbr1->offs, DBGKEY(&key)); + ubifs_err("but it should have key %s according to tnc", + DBGKEY(&zbr2->key)); dbg_dump_node(c, dent2); + goto out_free; } nlen1 = le16_to_cpu(dent1->nlen); @@ -1015,14 +1014,14 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, goto out_free; } if (cmp == 0 && nlen1 == nlen2) - dbg_err("2 xent/dent nodes with the same name"); + ubifs_err("2 xent/dent nodes with the same name"); else - dbg_err("bad order of colliding key %s", + ubifs_err("bad order of colliding key %s", DBGKEY(&key)); - dbg_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs); + ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs); dbg_dump_node(c, dent1); - dbg_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs); + ubifs_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs); dbg_dump_node(c, dent2); out_free: @@ -2103,7 +2102,7 @@ static void failure_mode_init(struct ubifs_info *c) fmi = kmalloc(sizeof(struct failure_mode_info), GFP_NOFS); if (!fmi) { - dbg_err("Failed to register failure mode - no memory"); + ubifs_err("Failed to register failure mode - no memory"); return; } fmi->c = c; @@ -2383,4 +2382,144 @@ void ubifs_debugging_exit(struct ubifs_info *c) kfree(c->dbg); } +/* + * Root directory for UBIFS stuff in debugfs. Contains sub-directories which + * contain the stuff specific to particular file-system mounts. + */ +static struct dentry *debugfs_rootdir; + +/** + * dbg_debugfs_init - initialize debugfs file-system. + * + * UBIFS uses debugfs file-system to expose various debugging knobs to + * user-space. This function creates "ubifs" directory in the debugfs + * file-system. Returns zero in case of success and a negative error code in + * case of failure. + */ +int dbg_debugfs_init(void) +{ + debugfs_rootdir = debugfs_create_dir("ubifs", NULL); + if (IS_ERR(debugfs_rootdir)) { + int err = PTR_ERR(debugfs_rootdir); + ubifs_err("cannot create \"ubifs\" debugfs directory, " + "error %d\n", err); + return err; + } + + return 0; +} + +/** + * dbg_debugfs_exit - remove the "ubifs" directory from debugfs file-system. + */ +void dbg_debugfs_exit(void) +{ + debugfs_remove(debugfs_rootdir); +} + +static int open_debugfs_file(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t write_debugfs_file(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ubifs_info *c = file->private_data; + struct ubifs_debug_info *d = c->dbg; + + if (file->f_path.dentry == d->dump_lprops) + dbg_dump_lprops(c); + else if (file->f_path.dentry == d->dump_budg) { + spin_lock(&c->space_lock); + dbg_dump_budg(c); + spin_unlock(&c->space_lock); + } else if (file->f_path.dentry == d->dump_budg) { + mutex_lock(&c->tnc_mutex); + dbg_dump_tnc(c); + mutex_unlock(&c->tnc_mutex); + } else + return -EINVAL; + + *ppos += count; + return count; +} + +static const struct file_operations debugfs_fops = { + .open = open_debugfs_file, + .write = write_debugfs_file, + .owner = THIS_MODULE, +}; + +/** + * dbg_debugfs_init_fs - initialize debugfs for UBIFS instance. + * @c: UBIFS file-system description object + * + * This function creates all debugfs files for this instance of UBIFS. Returns + * zero in case of success and a negative error code in case of failure. + * + * Note, the only reason we have not merged this function with the + * 'ubifs_debugging_init()' function is because it is better to initialize + * debugfs interfaces at the very end of the mount process, and remove them at + * the very beginning of the mount process. + */ +int dbg_debugfs_init_fs(struct ubifs_info *c) +{ + int err; + const char *fname; + struct dentry *dent; + struct ubifs_debug_info *d = c->dbg; + + sprintf(d->debugfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id); + d->debugfs_dir = debugfs_create_dir(d->debugfs_dir_name, + debugfs_rootdir); + if (IS_ERR(d->debugfs_dir)) { + err = PTR_ERR(d->debugfs_dir); + ubifs_err("cannot create \"%s\" debugfs directory, error %d\n", + d->debugfs_dir_name, err); + goto out; + } + + fname = "dump_lprops"; + dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c, + &debugfs_fops); + if (IS_ERR(dent)) + goto out_remove; + d->dump_lprops = dent; + + fname = "dump_budg"; + dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c, + &debugfs_fops); + if (IS_ERR(dent)) + goto out_remove; + d->dump_budg = dent; + + fname = "dump_tnc"; + dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c, + &debugfs_fops); + if (IS_ERR(dent)) + goto out_remove; + d->dump_tnc = dent; + + return 0; + +out_remove: + err = PTR_ERR(dent); + ubifs_err("cannot create \"%s\" debugfs directory, error %d\n", + fname, err); + debugfs_remove_recursive(d->debugfs_dir); +out: + return err; +} + +/** + * dbg_debugfs_exit_fs - remove all debugfs files. + * @c: UBIFS file-system description object + */ +void dbg_debugfs_exit_fs(struct ubifs_info *c) +{ + debugfs_remove_recursive(c->dbg->debugfs_dir); +} + #endif /* CONFIG_UBIFS_FS_DEBUG */ diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index d6ea1362d56a..a6b70f8aac9c 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -43,6 +43,13 @@ * @new_nhead_offs: used by LPT tree size checker * @new_ihead_lnum: used by debugging to check ihead_lnum * @new_ihead_offs: used by debugging to check ihead_offs + * + * debugfs_dir_name: name of debugfs directory containing this file-system's + * files + * debugfs_dir: direntry object of the file-system debugfs directory + * dump_lprops: "dump lprops" debugfs knob + * dump_budg: "dump budgeting information" debugfs knob + * dump_tnc: "dump TNC" debugfs knob */ struct ubifs_debug_info { void *buf; @@ -61,6 +68,12 @@ struct ubifs_debug_info { int new_nhead_offs; int new_ihead_lnum; int new_ihead_offs; + + char debugfs_dir_name[100]; + struct dentry *debugfs_dir; + struct dentry *dump_lprops; + struct dentry *dump_budg; + struct dentry *dump_tnc; }; #define ubifs_assert(expr) do { \ @@ -251,7 +264,6 @@ int ubifs_debugging_init(struct ubifs_info *c); void ubifs_debugging_exit(struct ubifs_info *c); /* Dump functions */ - const char *dbg_ntype(int type); const char *dbg_cstate(int cmt_state); const char *dbg_get_key_dump(const struct ubifs_info *c, @@ -274,7 +286,6 @@ void dbg_dump_tnc(struct ubifs_info *c); void dbg_dump_index(struct ubifs_info *c); /* Checking helper functions */ - typedef int (*dbg_leaf_callback)(struct ubifs_info *c, struct ubifs_zbranch *zbr, void *priv); typedef int (*dbg_znode_callback)(struct ubifs_info *c, @@ -354,6 +365,12 @@ static inline int dbg_change(struct ubi_volume_desc *desc, int lnum, return dbg_leb_change(desc, lnum, buf, len, UBI_UNKNOWN); } +/* Debugfs-related stuff */ +int dbg_debugfs_init(void); +void dbg_debugfs_exit(void); +int dbg_debugfs_init_fs(struct ubifs_info *c); +void dbg_debugfs_exit_fs(struct ubifs_info *c); + #else /* !CONFIG_UBIFS_FS_DEBUG */ /* Use "if (0)" to make compiler check arguments even if debugging is off */ @@ -434,6 +451,10 @@ static inline int dbg_change(struct ubi_volume_desc *desc, int lnum, #define dbg_force_in_the_gaps() 0 #define dbg_failure_mode 0 -#endif /* !CONFIG_UBIFS_FS_DEBUG */ +#define dbg_debugfs_init() 0 +#define dbg_debugfs_exit() +#define dbg_debugfs_init_fs(c) 0 +#define dbg_debugfs_exit_fs(c) 0 +#endif /* !CONFIG_UBIFS_FS_DEBUG */ #endif /* !__UBIFS_DEBUG_H__ */ diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index ad44822059c7..2dbaa4fc2cbb 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1258,6 +1258,10 @@ static int mount_ubifs(struct ubifs_info *c) } } + err = dbg_debugfs_init_fs(c); + if (err) + goto out_infos; + err = dbg_check_filesystem(c); if (err) goto out_infos; @@ -1369,6 +1373,7 @@ static void ubifs_umount(struct ubifs_info *c) dbg_gen("un-mounting UBI device %d, volume %d", c->vi.ubi_num, c->vi.vol_id); + dbg_debugfs_exit_fs(c); spin_lock(&ubifs_infos_lock); list_del(&c->infos_list); spin_unlock(&ubifs_infos_lock); @@ -2078,12 +2083,18 @@ static int __init ubifs_init(void) register_shrinker(&ubifs_shrinker_info); err = ubifs_compressors_init(); + if (err) + goto out_shrinker; + + err = dbg_debugfs_init(); if (err) goto out_compr; return 0; out_compr: + ubifs_compressors_exit(); +out_shrinker: unregister_shrinker(&ubifs_shrinker_info); kmem_cache_destroy(ubifs_inode_slab); out_reg: @@ -2098,6 +2109,7 @@ static void __exit ubifs_exit(void) ubifs_assert(list_empty(&ubifs_infos)); ubifs_assert(atomic_long_read(&ubifs_clean_zn_cnt) == 0); + dbg_debugfs_exit(); ubifs_compressors_exit(); unregister_shrinker(&ubifs_shrinker_info); kmem_cache_destroy(ubifs_inode_slab); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 7e090a5e2bf6..4cf28e85de78 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1158,6 +1158,7 @@ struct ubifs_debug_info; * @mount_opts: UBIFS-specific mount options * * @dbg: debugging-related information + * @dfs: debugfs support-related information */ struct ubifs_info { struct super_block *vfs_sb; -- cgit From 45e12d901fee57bccf90f6940155724954e1aac7 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 31 Oct 2008 11:42:18 +0200 Subject: UBIFS: run debugging checks only if they are enabled Do not forget to check whether lpt debugging is enabled before running the check functions. This commit also makes some spelling fixes. Signed-off-by: Artem Bityutskiy --- fs/ubifs/lpt.c | 3 +-- fs/ubifs/lpt_commit.c | 9 +++++++++ 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index db8bd0e518b2..93c181c742f2 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -36,7 +36,7 @@ * can be written into a single eraseblock. In that case, garbage collection * consists of just writing the whole table, which therefore makes all other * eraseblocks reusable. In the case of the big model, dirty eraseblocks are - * selected for garbage collection, which consists are marking the nodes in + * selected for garbage collection, which consists of marking the clean nodes in * that LEB as dirty, and then only the dirty nodes are written out. Also, in * the case of the big model, a table of LEB numbers is saved so that the entire * LPT does not to be scanned looking for empty eraseblocks when UBIFS is first @@ -156,7 +156,6 @@ int ubifs_calc_lpt_geom(struct ubifs_info *c) } c->check_lpt_free = c->big_lpt; - return 0; } diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index 1aefab9f0b5e..7bbf03518c7f 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -1604,6 +1604,9 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum) int ret; void *buf = c->dbg->buf; + if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) + return 0; + dbg_lp("LEB %d", lnum); err = ubi_read(c->ubi, lnum, buf, 0, c->leb_size); if (err) { @@ -1704,6 +1707,9 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c) long long free = 0; int i; + if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) + return 0; + for (i = 0; i < c->lpt_lebs; i++) { if (c->ltab[i].tgc || c->ltab[i].cmt) continue; @@ -1735,6 +1741,9 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) long long chk_lpt_sz, lpt_sz; int err = 0; + if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) + return 0; + switch (action) { case 0: d->chk_lpt_sz = 0; -- cgit From 787845bdeadd368eedeace92d5bf53f5aa1450ba Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 31 Oct 2008 12:17:42 +0200 Subject: UBIFS: dump stack in LPT check functions It is useful to know how we got to the checking function when hunting the bugs. Signed-off-by: Artem Bityutskiy --- fs/ubifs/lpt_commit.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index 7bbf03518c7f..c5c07f9cd22c 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -320,6 +320,7 @@ no_space: dbg_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, " "done_lsave %d", lnum, offs, len, done_ltab, done_lsave); dbg_dump_lpt_info(c); + dump_stack(); return err; } @@ -548,6 +549,7 @@ no_space: dbg_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab " "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); dbg_dump_lpt_info(c); + dump_stack(); return err; } @@ -1722,6 +1724,7 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c) dbg_err("LPT space error: free %lld lpt_sz %lld", free, c->lpt_sz); dbg_dump_lpt_info(c); + dump_stack(); return -EINVAL; } return 0; @@ -1803,8 +1806,10 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) d->chk_lpt_sz, d->chk_lpt_wastage, lpt_sz); err = -EINVAL; } - if (err) + if (err) { dbg_dump_lpt_info(c); + dump_stack(); + } d->chk_lpt_sz2 = d->chk_lpt_sz; d->chk_lpt_sz = 0; d->chk_lpt_wastage = 0; -- cgit From 2ba5f7ae8165b3f575dd3a7d8bb18f421fab8273 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 31 Oct 2008 17:32:30 +0200 Subject: UBIFS: introduce LPT dump function Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 28 +++++++---- fs/ubifs/debug.h | 45 +++++++++-------- fs/ubifs/lpt.c | 27 ++++++----- fs/ubifs/lpt_commit.c | 131 ++++++++++++++++++++++++++++++++++++++++++++++++-- fs/ubifs/ubifs.h | 3 ++ 5 files changed, 186 insertions(+), 48 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 56842772c804..934db1855f09 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -646,7 +646,8 @@ void dbg_dump_lprops(struct ubifs_info *c) struct ubifs_lprops lp; struct ubifs_lp_stats lst; - printk(KERN_DEBUG "(pid %d) Dumping LEB properties\n", current->pid); + printk(KERN_DEBUG "(pid %d) start dumping LEB properties\n", + current->pid); ubifs_get_lp_stats(c, &lst); dbg_dump_lstats(&lst); @@ -657,6 +658,8 @@ void dbg_dump_lprops(struct ubifs_info *c) dbg_dump_lprop(c, &lp); } + printk(KERN_DEBUG "(pid %d) finish dumping LEB properties\n", + current->pid); } void dbg_dump_lpt_info(struct ubifs_info *c) @@ -664,6 +667,7 @@ void dbg_dump_lpt_info(struct ubifs_info *c) int i; spin_lock(&dbg_lock); + printk(KERN_DEBUG "(pid %d) dumping LPT information\n", current->pid); printk(KERN_DEBUG "\tlpt_sz: %lld\n", c->lpt_sz); printk(KERN_DEBUG "\tpnode_sz: %d\n", c->pnode_sz); printk(KERN_DEBUG "\tnnode_sz: %d\n", c->nnode_sz); @@ -704,8 +708,8 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum) if (dbg_failure_mode) return; - printk(KERN_DEBUG "(pid %d) Dumping LEB %d\n", current->pid, lnum); - + printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n", + current->pid, lnum); sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); if (IS_ERR(sleb)) { ubifs_err("scan error %d", (int)PTR_ERR(sleb)); @@ -722,6 +726,8 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum) dbg_dump_node(c, snod->node); } + printk(KERN_DEBUG "(pid %d) finish dumping LEB %d\n", + current->pid, lnum); ubifs_scan_destroy(sleb); return; } @@ -769,7 +775,7 @@ void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat) { int i; - printk(KERN_DEBUG "(pid %d) Dumping heap cat %d (%d elements)\n", + printk(KERN_DEBUG "(pid %d) start dumping heap cat %d (%d elements)\n", current->pid, cat, heap->cnt); for (i = 0; i < heap->cnt; i++) { struct ubifs_lprops *lprops = heap->arr[i]; @@ -778,6 +784,7 @@ void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat) "flags %d\n", i, lprops->lnum, lprops->hpos, lprops->free, lprops->dirty, lprops->flags); } + printk(KERN_DEBUG "(pid %d) finish dumping heap\n", current->pid); } void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, @@ -785,7 +792,7 @@ void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, { int i; - printk(KERN_DEBUG "(pid %d) Dumping pnode:\n", current->pid); + printk(KERN_DEBUG "(pid %d) dumping pnode:\n", current->pid); printk(KERN_DEBUG "\taddress %zx parent %zx cnext %zx\n", (size_t)pnode, (size_t)parent, (size_t)pnode->cnext); printk(KERN_DEBUG "\tflags %lu iip %d level %d num %d\n", @@ -804,7 +811,7 @@ void dbg_dump_tnc(struct ubifs_info *c) int level; printk(KERN_DEBUG "\n"); - printk(KERN_DEBUG "(pid %d) Dumping the TNC tree\n", current->pid); + printk(KERN_DEBUG "(pid %d) start dumping TNC tree\n", current->pid); znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL); level = znode->level; printk(KERN_DEBUG "== Level %d ==\n", level); @@ -816,8 +823,7 @@ void dbg_dump_tnc(struct ubifs_info *c) dbg_dump_znode(c, znode); znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode); } - - printk(KERN_DEBUG "\n"); + printk(KERN_DEBUG "(pid %d) finish dumping TNC tree\n", current->pid); } static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode, @@ -992,7 +998,8 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, ubifs_err("1st entry at %d:%d has key %s", zbr1->lnum, zbr1->offs, DBGKEY(&key)); ubifs_err("but it should have key %s according to tnc", - DBGKEY(&zbr1->key)); dbg_dump_node(c, dent1); + DBGKEY(&zbr1->key)); + dbg_dump_node(c, dent1); goto out_free; } @@ -1001,7 +1008,8 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, ubifs_err("2nd entry at %d:%d has key %s", zbr1->lnum, zbr1->offs, DBGKEY(&key)); ubifs_err("but it should have key %s according to tnc", - DBGKEY(&zbr2->key)); dbg_dump_node(c, dent2); + DBGKEY(&zbr2->key)); + dbg_dump_node(c, dent2); goto out_free; } diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index a6b70f8aac9c..9820d6999f7e 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -270,6 +270,8 @@ const char *dbg_get_key_dump(const struct ubifs_info *c, const union ubifs_key *key); void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode); void dbg_dump_node(const struct ubifs_info *c, const void *node); +void dbg_dump_lpt_node(const struct ubifs_info *c, void *node, int lnum, + int offs); void dbg_dump_budget_req(const struct ubifs_budget_req *req); void dbg_dump_lstats(const struct ubifs_lp_stats *lst); void dbg_dump_budg(struct ubifs_info *c); @@ -284,6 +286,7 @@ void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, struct ubifs_nnode *parent, int iip); void dbg_dump_tnc(struct ubifs_info *c); void dbg_dump_index(struct ubifs_info *c); +void dbg_dump_lpt_lebs(const struct ubifs_info *c); /* Checking helper functions */ typedef int (*dbg_leaf_callback)(struct ubifs_info *c, @@ -411,26 +414,28 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c); #define DBGKEY(key) ((char *)(key)) #define DBGKEY1(key) ((char *)(key)) -#define ubifs_debugging_init(c) 0 -#define ubifs_debugging_exit(c) ({}) - -#define dbg_ntype(type) "" -#define dbg_cstate(cmt_state) "" -#define dbg_get_key_dump(c, key) ({}) -#define dbg_dump_inode(c, inode) ({}) -#define dbg_dump_node(c, node) ({}) -#define dbg_dump_budget_req(req) ({}) -#define dbg_dump_lstats(lst) ({}) -#define dbg_dump_budg(c) ({}) -#define dbg_dump_lprop(c, lp) ({}) -#define dbg_dump_lprops(c) ({}) -#define dbg_dump_lpt_info(c) ({}) -#define dbg_dump_leb(c, lnum) ({}) -#define dbg_dump_znode(c, znode) ({}) -#define dbg_dump_heap(c, heap, cat) ({}) -#define dbg_dump_pnode(c, pnode, parent, iip) ({}) -#define dbg_dump_tnc(c) ({}) -#define dbg_dump_index(c) ({}) +#define ubifs_debugging_init(c) 0 +#define ubifs_debugging_exit(c) ({}) + +#define dbg_ntype(type) "" +#define dbg_cstate(cmt_state) "" +#define dbg_get_key_dump(c, key) ({}) +#define dbg_dump_inode(c, inode) ({}) +#define dbg_dump_node(c, node) ({}) +#define dbg_dump_lpt_node(c, node, lnum, offs) ({}) +#define dbg_dump_budget_req(req) ({}) +#define dbg_dump_lstats(lst) ({}) +#define dbg_dump_budg(c) ({}) +#define dbg_dump_lprop(c, lp) ({}) +#define dbg_dump_lprops(c) ({}) +#define dbg_dump_lpt_info(c) ({}) +#define dbg_dump_leb(c, lnum) ({}) +#define dbg_dump_znode(c, znode) ({}) +#define dbg_dump_heap(c, heap, cat) ({}) +#define dbg_dump_pnode(c, pnode, parent, iip) ({}) +#define dbg_dump_tnc(c) ({}) +#define dbg_dump_index(c) ({}) +#define dbg_dump_lpt_lebs(c) ({}) #define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0 #define dbg_old_index_check_init(c, zroot) 0 diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index 93c181c742f2..6d914160ec55 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -557,7 +557,7 @@ static int calc_nnode_num(int row, int col) * This function calculates and returns the nnode number based on the parent's * nnode number and the index in parent. */ -static int calc_nnode_num_from_parent(struct ubifs_info *c, +static int calc_nnode_num_from_parent(const struct ubifs_info *c, struct ubifs_nnode *parent, int iip) { int num, shft; @@ -582,7 +582,7 @@ static int calc_nnode_num_from_parent(struct ubifs_info *c, * This function calculates and returns the pnode number based on the parent's * nnode number and the index in parent. */ -static int calc_pnode_num_from_parent(struct ubifs_info *c, +static int calc_pnode_num_from_parent(const struct ubifs_info *c, struct ubifs_nnode *parent, int iip) { int i, n = c->lpt_hght - 1, pnum = parent->num, num = 0; @@ -965,7 +965,7 @@ static int check_lpt_type(uint8_t **addr, int *pos, int type) * * This function returns %0 on success and a negative error code on failure. */ -static int unpack_pnode(struct ubifs_info *c, void *buf, +static int unpack_pnode(const struct ubifs_info *c, void *buf, struct ubifs_pnode *pnode) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; @@ -995,15 +995,15 @@ static int unpack_pnode(struct ubifs_info *c, void *buf, } /** - * unpack_nnode - unpack a nnode. + * ubifs_unpack_nnode - unpack a nnode. * @c: UBIFS file-system description object * @buf: buffer containing packed nnode to unpack * @nnode: nnode structure to fill * * This function returns %0 on success and a negative error code on failure. */ -static int unpack_nnode(struct ubifs_info *c, void *buf, - struct ubifs_nnode *nnode) +int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf, + struct ubifs_nnode *nnode) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0, err; @@ -1035,7 +1035,7 @@ static int unpack_nnode(struct ubifs_info *c, void *buf, * * This function returns %0 on success and a negative error code on failure. */ -static int unpack_ltab(struct ubifs_info *c, void *buf) +static int unpack_ltab(const struct ubifs_info *c, void *buf) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0, err; @@ -1067,7 +1067,7 @@ static int unpack_ltab(struct ubifs_info *c, void *buf) * * This function returns %0 on success and a negative error code on failure. */ -static int unpack_lsave(struct ubifs_info *c, void *buf) +static int unpack_lsave(const struct ubifs_info *c, void *buf) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0, err; @@ -1095,7 +1095,7 @@ static int unpack_lsave(struct ubifs_info *c, void *buf) * * This function returns %0 on success and a negative error code on failure. */ -static int validate_nnode(struct ubifs_info *c, struct ubifs_nnode *nnode, +static int validate_nnode(const struct ubifs_info *c, struct ubifs_nnode *nnode, struct ubifs_nnode *parent, int iip) { int i, lvl, max_offs; @@ -1139,7 +1139,7 @@ static int validate_nnode(struct ubifs_info *c, struct ubifs_nnode *nnode, * * This function returns %0 on success and a negative error code on failure. */ -static int validate_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, +static int validate_pnode(const struct ubifs_info *c, struct ubifs_pnode *pnode, struct ubifs_nnode *parent, int iip) { int i; @@ -1173,7 +1173,8 @@ static int validate_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, * This function calculates the LEB numbers for the LEB properties it contains * based on the pnode number. */ -static void set_pnode_lnum(struct ubifs_info *c, struct ubifs_pnode *pnode) +static void set_pnode_lnum(const struct ubifs_info *c, + struct ubifs_pnode *pnode) { int i, lnum; @@ -1226,7 +1227,7 @@ int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) err = ubi_read(c->ubi, lnum, buf, offs, c->nnode_sz); if (err) goto out; - err = unpack_nnode(c, buf, nnode); + err = ubifs_unpack_nnode(c, buf, nnode); if (err) goto out; } @@ -1815,7 +1816,7 @@ static struct ubifs_nnode *scan_get_nnode(struct ubifs_info *c, c->nnode_sz); if (err) return ERR_PTR(err); - err = unpack_nnode(c, buf, nnode); + err = ubifs_unpack_nnode(c, buf, nnode); if (err) return ERR_PTR(err); } diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index c5c07f9cd22c..da60b5a0fab9 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -320,6 +320,7 @@ no_space: dbg_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, " "done_lsave %d", lnum, offs, len, done_ltab, done_lsave); dbg_dump_lpt_info(c); + dbg_dump_lpt_lebs(c); dump_stack(); return err; } @@ -549,6 +550,7 @@ no_space: dbg_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab " "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); dbg_dump_lpt_info(c); + dbg_dump_lpt_lebs(c); dump_stack(); return err; } @@ -1027,7 +1029,7 @@ static int make_node_dirty(struct ubifs_info *c, int node_type, int node_num, * @c: UBIFS file-system description object * @node_type: LPT node type */ -static int get_lpt_node_len(struct ubifs_info *c, int node_type) +static int get_lpt_node_len(const struct ubifs_info *c, int node_type) { switch (node_type) { case UBIFS_LPT_NNODE: @@ -1048,7 +1050,7 @@ static int get_lpt_node_len(struct ubifs_info *c, int node_type) * @buf: buffer * @len: length of buffer */ -static int get_pad_len(struct ubifs_info *c, uint8_t *buf, int len) +static int get_pad_len(const struct ubifs_info *c, uint8_t *buf, int len) { int offs, pad_len; @@ -1065,7 +1067,8 @@ static int get_pad_len(struct ubifs_info *c, uint8_t *buf, int len) * @buf: buffer * @node_num: node number is returned here */ -static int get_lpt_node_type(struct ubifs_info *c, uint8_t *buf, int *node_num) +static int get_lpt_node_type(const struct ubifs_info *c, uint8_t *buf, + int *node_num) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int pos = 0, node_type; @@ -1083,7 +1086,7 @@ static int get_lpt_node_type(struct ubifs_info *c, uint8_t *buf, int *node_num) * * This function returns %1 if the buffer contains a node or %0 if it does not. */ -static int is_a_node(struct ubifs_info *c, uint8_t *buf, int len) +static int is_a_node(const struct ubifs_info *c, uint8_t *buf, int len) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int pos = 0, node_type, node_len; @@ -1107,7 +1110,6 @@ static int is_a_node(struct ubifs_info *c, uint8_t *buf, int len) return 1; } - /** * lpt_gc_lnum - garbage collect a LPT LEB. * @c: UBIFS file-system description object @@ -1724,6 +1726,7 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c) dbg_err("LPT space error: free %lld lpt_sz %lld", free, c->lpt_sz); dbg_dump_lpt_info(c); + dbg_dump_lpt_lebs(c); dump_stack(); return -EINVAL; } @@ -1808,6 +1811,7 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) } if (err) { dbg_dump_lpt_info(c); + dbg_dump_lpt_lebs(c); dump_stack(); } d->chk_lpt_sz2 = d->chk_lpt_sz; @@ -1825,4 +1829,121 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) } } +/** + * dbg_dump_lpt_leb - dump an LPT LEB. + * @c: UBIFS file-system description object + * @lnum: LEB number to dump + * + * This function dumps an LEB from LPT area. Nodes in this area are very + * different to nodes in the main area (e.g., they do not have common headers, + * they do not have 8-byte alignments, etc), so we have a separate function to + * dump LPT area LEBs. Note, LPT has to be locked by the coller. + */ +static void dump_lpt_leb(const struct ubifs_info *c, int lnum) +{ + int err, len = c->leb_size, node_type, node_num, node_len, offs; + void *buf = c->dbg->buf; + + printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n", + current->pid, lnum); + err = ubi_read(c->ubi, lnum, buf, 0, c->leb_size); + if (err) { + ubifs_err("cannot read LEB %d, error %d", lnum, err); + return; + } + while (1) { + offs = c->leb_size - len; + if (!is_a_node(c, buf, len)) { + int pad_len; + + pad_len = get_pad_len(c, buf, len); + if (pad_len) { + printk(KERN_DEBUG "LEB %d:%d, pad %d bytes\n", + lnum, offs, pad_len); + buf += pad_len; + len -= pad_len; + continue; + } + if (len) + printk(KERN_DEBUG "LEB %d:%d, free %d bytes\n", + lnum, offs, len); + break; + } + + node_type = get_lpt_node_type(c, buf, &node_num); + switch (node_type) { + case UBIFS_LPT_PNODE: + { + node_len = c->pnode_sz; + if (c->big_lpt) + printk(KERN_DEBUG "LEB %d:%d, pnode num %d\n", + lnum, offs, node_num); + else + printk(KERN_DEBUG "LEB %d:%d, pnode\n", + lnum, offs); + break; + } + case UBIFS_LPT_NNODE: + { + int i; + struct ubifs_nnode nnode; + + node_len = c->nnode_sz; + if (c->big_lpt) + printk(KERN_DEBUG "LEB %d:%d, nnode num %d, ", + lnum, offs, node_num); + else + printk(KERN_DEBUG "LEB %d:%d, nnode, ", + lnum, offs); + err = ubifs_unpack_nnode(c, buf, &nnode); + for (i = 0; i < UBIFS_LPT_FANOUT; i++) { + printk("%d:%d", nnode.nbranch[i].lnum, + nnode.nbranch[i].offs); + if (i != UBIFS_LPT_FANOUT - 1) + printk(", "); + } + printk("\n"); + break; + } + case UBIFS_LPT_LTAB: + node_len = c->ltab_sz; + printk(KERN_DEBUG "LEB %d:%d, ltab\n", + lnum, offs); + break; + case UBIFS_LPT_LSAVE: + node_len = c->lsave_sz; + printk(KERN_DEBUG "LEB %d:%d, lsave len\n", lnum, offs); + break; + default: + ubifs_err("LPT node type %d not recognized", node_type); + return; + } + + buf += node_len; + len -= node_len; + } + + printk(KERN_DEBUG "(pid %d) finish dumping LEB %d\n", + current->pid, lnum); +} + +/** + * dbg_dump_lpt_lebs - dump LPT lebs. + * @c: UBIFS file-system description object + * + * This function dumps all LPT LEBs. The caller has to make sure the LPT is + * locked. + */ +void dbg_dump_lpt_lebs(const struct ubifs_info *c) +{ + int i; + + printk(KERN_DEBUG "(pid %d) start dumping all LPT LEBs\n", + current->pid); + for (i = 0; i < c->lpt_lebs; i++) + dump_lpt_leb(c, i + c->lpt_first); + printk(KERN_DEBUG "(pid %d) finish dumping all LPT LEBs\n", + current->pid); +} + #endif /* CONFIG_UBIFS_FS_DEBUG */ diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 4cf28e85de78..e658b06fd451 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1622,6 +1622,9 @@ void ubifs_add_lpt_dirt(struct ubifs_info *c, int lnum, int dirty); void ubifs_add_nnode_dirt(struct ubifs_info *c, struct ubifs_nnode *nnode); uint32_t ubifs_unpack_bits(uint8_t **addr, int *pos, int nrbits); struct ubifs_nnode *ubifs_first_nnode(struct ubifs_info *c, int *hght); +/* Needed only in debugging code in lpt_commit.c */ +int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf, + struct ubifs_nnode *nnode); /* lpt_commit.c */ int ubifs_lpt_start_commit(struct ubifs_info *c); -- cgit From 995be04548f62c8e6b447410cd28b0666614b461 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Thu, 4 Dec 2008 17:04:18 +0300 Subject: UBIFS: fix section mismatch This patch fixes the following section mismatch: WARNING: fs/ubifs/ubifs.o(.init.text+0xec): Section mismatch in reference from the function init_module() to the function .exit.text:ubifs_compressors_exit() Signed-off-by: Artem Bityutskiy --- fs/ubifs/compress.c | 2 +- fs/ubifs/ubifs.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c index 4afb3ea24d44..4c90ee2aef46 100644 --- a/fs/ubifs/compress.c +++ b/fs/ubifs/compress.c @@ -244,7 +244,7 @@ out_lzo: /** * ubifs_compressors_exit - de-initialize UBIFS compressors. */ -void __exit ubifs_compressors_exit(void) +void ubifs_compressors_exit(void) { compr_exit(&lzo_compr); compr_exit(&zlib_compr); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index e658b06fd451..055c6b52d2f6 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1700,7 +1700,7 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); /* compressor.c */ int __init ubifs_compressors_init(void); -void __exit ubifs_compressors_exit(void); +void ubifs_compressors_exit(void); void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len, int *compr_type); int ubifs_decompress(const void *buf, int len, void *out, int *out_len, -- cgit From 7bbe5b5aa6d1e38af6f1fc866efc0aa461d73f19 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 9 Dec 2008 11:02:51 -0500 Subject: UBIFS: use PAGE_CACHE_MASK correctly It has high bits set, not low bits set as the UBIFS code assumed. Signed-off-by: Artem Bityutskiy --- fs/ubifs/file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 2624411d9758..7f1de98e6091 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -254,7 +254,7 @@ static int write_begin_slow(struct address_space *mapping, } if (!PageUptodate(page)) { - if (!(pos & PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) + if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) SetPageChecked(page); else { err = do_readpage(page); @@ -444,7 +444,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, if (!PageUptodate(page)) { /* The page is not loaded from the flash */ - if (!(pos & PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) + if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) /* * We change whole page so no need to load it. But we * have to set the @PG_checked flag to make the further -- cgit From 24fa9e9438b263600737c839b36543981d87d65b Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 17 Dec 2008 17:45:14 +0200 Subject: UBIFS: fix tnc dumping debugfs tnc dumping was broken because of an obvious typo. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 934db1855f09..367d97520d95 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -2443,7 +2443,7 @@ static ssize_t write_debugfs_file(struct file *file, const char __user *buf, spin_lock(&c->space_lock); dbg_dump_budg(c); spin_unlock(&c->space_lock); - } else if (file->f_path.dentry == d->dump_budg) { + } else if (file->f_path.dentry == d->dump_tnc) { mutex_lock(&c->tnc_mutex); dbg_dump_tnc(c); mutex_unlock(&c->tnc_mutex); -- cgit From 21a60258976227daaf7a4c35e96c3d77d4988b15 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 12 Dec 2008 11:13:17 -0500 Subject: UBIFS: improve budgeting dump Dump available space calculated by budgeting subsystem. Signed-off-by: Artem Bityutskiy --- fs/ubifs/budget.c | 4 ++-- fs/ubifs/debug.c | 13 +++++++++++++ fs/ubifs/ubifs.h | 2 +- 3 files changed, 16 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 1a4973e10664..d5a65037e172 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -713,8 +713,8 @@ void ubifs_release_dirty_inode_budget(struct ubifs_info *c, * (e.g., via the 'statfs()' call) reports that it has N bytes available, they * are able to write a file of size N. UBIFS attaches node headers to each data * node and it has to write indexind nodes as well. This introduces additional - * overhead, and UBIFS it has to report sligtly less free space to meet the - * above expectetion. + * overhead, and UBIFS has to report sligtly less free space to meet the above + * expectetions. * * This function assumes free space is made up of uncompressed data nodes and * full index nodes (one per data node, tripled because we always allow enough diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 367d97520d95..6ecb01a99d14 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -597,7 +597,9 @@ void dbg_dump_budg(struct ubifs_info *c) struct rb_node *rb; struct ubifs_bud *bud; struct ubifs_gced_idx_leb *idx_gc; + long long available, outstanding, free; + ubifs_assert(spin_is_locked(&c->space_lock)); spin_lock(&dbg_lock); printk(KERN_DEBUG "(pid %d) Budgeting info: budg_data_growth %lld, " "budg_dd_growth %lld, budg_idx_growth %lld\n", current->pid, @@ -630,6 +632,17 @@ void dbg_dump_budg(struct ubifs_info *c) printk(KERN_DEBUG "\tGC'ed idx LEB %d unmap %d\n", idx_gc->lnum, idx_gc->unmap); printk(KERN_DEBUG "\tcommit state %d\n", c->cmt_state); + + /* Print budgeting predictions */ + available = ubifs_calc_available(c, c->min_idx_lebs); + outstanding = c->budg_data_growth + c->budg_dd_growth; + if (available > outstanding) + free = ubifs_reported_space(c, available - outstanding); + else + free = 0; + printk(KERN_DEBUG "Budgeting predictions:\n"); + printk(KERN_DEBUG "\tavailable: %lld, outstanding %lld, free %lld\n", + available, outstanding, free); spin_unlock(&dbg_lock); } diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 055c6b52d2f6..e61c08106b47 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -419,7 +419,7 @@ struct ubifs_unclean_leb { * * LPROPS_UNCAT: not categorized * LPROPS_DIRTY: dirty > 0, not index - * LPROPS_DIRTY_IDX: dirty + free > UBIFS_CH_SZ and index + * LPROPS_DIRTY_IDX: dirty + free > @c->min_idx_node_sze and index * LPROPS_FREE: free > 0, not empty, not index * LPROPS_HEAP_CNT: number of heaps used for storing categorized LEBs * LPROPS_EMPTY: LEB is empty, not taken -- cgit From d3cf502b6ccee1c52890d42cd18cbc98b7526126 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 16 Dec 2008 17:52:35 +0200 Subject: UBIFS: various comment improvements and fixes Signed-off-by: Artem Bityutskiy --- fs/ubifs/lprops.c | 12 ++++++------ fs/ubifs/ubifs.h | 34 ++++++++++++++++++---------------- 2 files changed, 24 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index 10ba663eb329..dfd2bcece27a 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -520,13 +520,13 @@ static int is_lprops_dirty(struct ubifs_info *c, struct ubifs_lprops *lprops) * @flags: new flags * @idx_gc_cnt: change to the count of idx_gc list * - * This function changes LEB properties. This function does not change a LEB - * property (@free, @dirty or @flag) if the value passed is %LPROPS_NC. + * This function changes LEB properties (@free, @dirty or @flag). However, the + * property which has the %LPROPS_NC value is not changed. Returns a pointer to + * the updated LEB properties on success and a negative error code on failure. * - * This function returns a pointer to the updated LEB properties on success - * and a negative error code on failure. N.B. the LEB properties may have had to - * be copied (due to COW) and consequently the pointer returned may not be the - * same as the pointer passed. + * Note, the LEB properties may have had to be copied (due to COW) and + * consequently the pointer returned may not be the same as the pointer + * passed. */ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, const struct ubifs_lprops *lp, diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index e61c08106b47..f8ef7c1def1f 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -482,24 +482,26 @@ struct ubifs_lpt_lprops { * @empty_lebs: number of empty LEBs * @taken_empty_lebs: number of taken LEBs * @idx_lebs: number of indexing LEBs - * @total_free: total free space in bytes - * @total_dirty: total dirty space in bytes - * @total_used: total used space in bytes (includes only data LEBs) - * @total_dead: total dead space in bytes (includes only data LEBs) - * @total_dark: total dark space in bytes (includes only data LEBs) + * @total_free: total free space in bytes (includes all LEBs) + * @total_dirty: total dirty space in bytes (includes all LEBs) + * @total_used: total used space in bytes (does not include index LEBs) + * @total_dead: total dead space in bytes (does not include index LEBs) + * @total_dark: total dark space in bytes (does not include index LEBs) * - * N.B. total_dirty and total_used are different to other total_* fields, - * because they account _all_ LEBs, not just data LEBs. + * The @taken_empty_lebs field counts the LEBs that are in the transient state + * of having been "taken" for use but not yet written to. @taken_empty_lebs is + * needed to account correctly for @gc_lnum, otherwise @empty_lebs could be + * used by itself (in which case 'unused_lebs' would be a better name). In the + * case of @gc_lnum, it is "taken" at mount time or whenever a LEB is retained + * by GC, but unlike other empty LEBs that are "taken", it may not be written + * straight away (i.e. before the next commit start or unmount), so either + * @gc_lnum must be specially accounted for, or the current approach followed + * i.e. count it under @taken_empty_lebs. * - * 'taken_empty_lebs' counts the LEBs that are in the transient state of having - * been 'taken' for use but not yet written to. 'taken_empty_lebs' is needed - * to account correctly for gc_lnum, otherwise 'empty_lebs' could be used - * by itself (in which case 'unused_lebs' would be a better name). In the case - * of gc_lnum, it is 'taken' at mount time or whenever a LEB is retained by GC, - * but unlike other empty LEBs that are 'taken', it may not be written straight - * away (i.e. before the next commit start or unmount), so either gc_lnum must - * be specially accounted for, or the current approach followed i.e. count it - * under 'taken_empty_lebs'. + * @empty_lebs includes @taken_empty_lebs. + * + * @total_used, @total_dead and @total_dark fields do not account indexing + * LEBs. */ struct ubifs_lp_stats { int empty_lebs; -- cgit From af14a1ad792621942a03e4bd0e5f17b6e177e2e0 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 19 Dec 2008 19:26:29 +0200 Subject: UBIFS: fix available blocks count Take into account that 2 eraseblocks are never available because they are reserved for the index. This gives more realistic count of FS blocks. To avoid future confusions like this, introduce a constant. Signed-off-by: Artem Bityutskiy --- fs/ubifs/budget.c | 9 ++------- fs/ubifs/super.c | 5 +++-- fs/ubifs/ubifs.h | 8 ++++++++ 3 files changed, 13 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index d5a65037e172..e42342547001 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -280,13 +280,8 @@ int ubifs_calc_min_idx_lebs(struct ubifs_info *c) * extra LEB to compensate. */ ret += 1; - /* - * At present the index needs at least 2 LEBs: one for the index head - * and one for in-the-gaps method (which currently does not cater for - * the index head and so excludes it from consideration). - */ - if (ret < 2) - ret = 2; + if (ret < MIN_INDEX_LEBS) + ret = MIN_INDEX_LEBS; return ret; } diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 2dbaa4fc2cbb..a6a7798d020b 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -695,9 +695,10 @@ static int init_constants_late(struct ubifs_info *c) * necessary to report something for the 'statfs()' call. * * Subtract the LEB reserved for GC, the LEB which is reserved for - * deletions, and assume only one journal head is available. + * deletions, minimum LEBs for the index, and assume only one journal + * head is available. */ - tmp64 = c->main_lebs - 2 - c->jhead_cnt + 1; + tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt + 1; tmp64 *= (uint64_t)c->leb_size - c->leb_overhead; tmp64 = ubifs_reported_space(c, tmp64); c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT; diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index f8ef7c1def1f..543e850022eb 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -63,6 +63,14 @@ #define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL #define SQNUM_WATERMARK 0xFFFFFFFFFF000000ULL +/* + * Minimum amount of LEBs reserved for the index. At present the index needs at + * least 2 LEBs: one for the index head and one for in-the-gaps method (which + * currently does not cater for the index head and so excludes it from + * consideration). + */ +#define MIN_INDEX_LEBS 2 + /* Minimum amount of data UBIFS writes to the flash */ #define MIN_WRITE_SZ (UBIFS_DATA_NODE_SZ + 8) -- cgit From 4d61db4f87b527734ac0cc830dda8fcc4e2add2f Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 18 Dec 2008 14:06:51 +0200 Subject: UBIFS: use nicer 64-bit math Instead of using do_div(), use better primitives from linux/math64.h. Signed-off-by: Artem Bityutskiy --- fs/ubifs/budget.c | 25 +++++++++++-------------- fs/ubifs/debug.c | 1 + fs/ubifs/lpt.c | 15 ++++++--------- fs/ubifs/sb.c | 10 +++++----- fs/ubifs/super.c | 12 ++++++------ fs/ubifs/ubifs.h | 2 +- 6 files changed, 30 insertions(+), 35 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index e42342547001..0bcb8031ca18 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -32,7 +32,7 @@ #include "ubifs.h" #include -#include +#include /* * When pessimistic budget calculations say that there is no enough space, @@ -258,8 +258,8 @@ static int make_free_space(struct ubifs_info *c, struct retries_info *ri) */ int ubifs_calc_min_idx_lebs(struct ubifs_info *c) { - int ret; - uint64_t idx_size; + int idx_lebs, eff_leb_size = c->leb_size - c->max_idx_node_sz; + long long idx_size; idx_size = c->old_idx_sz + c->budg_idx_growth + c->budg_uncommitted_idx; @@ -271,18 +271,16 @@ int ubifs_calc_min_idx_lebs(struct ubifs_info *c) * pair, nor similarly the two variables for the new index size, so we * have to do this costly 64-bit division on fast-path. */ - if (do_div(idx_size, c->leb_size - c->max_idx_node_sz)) - ret = idx_size + 1; - else - ret = idx_size; + idx_size += eff_leb_size - 1; + idx_lebs = div_u64(idx_size, eff_leb_size); /* * The index head is not available for the in-the-gaps method, so add an * extra LEB to compensate. */ - ret += 1; - if (ret < MIN_INDEX_LEBS) - ret = MIN_INDEX_LEBS; - return ret; + idx_lebs += 1; + if (idx_lebs < MIN_INDEX_LEBS) + idx_lebs = MIN_INDEX_LEBS; + return idx_lebs; } /** @@ -718,7 +716,7 @@ void ubifs_release_dirty_inode_budget(struct ubifs_info *c, * Note, the calculation is pessimistic, which means that most of the time * UBIFS reports less space than it actually has. */ -long long ubifs_reported_space(const struct ubifs_info *c, uint64_t free) +long long ubifs_reported_space(const struct ubifs_info *c, long long free) { int divisor, factor, f; @@ -740,8 +738,7 @@ long long ubifs_reported_space(const struct ubifs_info *c, uint64_t free) divisor = UBIFS_MAX_DATA_NODE_SZ; divisor += (c->max_idx_node_sz * 3) / (f - 1); free *= factor; - do_div(free, divisor); - return free; + return div_u64(free, divisor); } /** diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 6ecb01a99d14..a2be11584ad3 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -33,6 +33,7 @@ #include #include #include +#include #ifdef CONFIG_UBIFS_FS_DEBUG diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index 6d914160ec55..b2792e84d245 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -43,8 +43,9 @@ * mounted. */ -#include #include "ubifs.h" +#include +#include /** * do_calc_lpt_geom - calculate sizes for the LPT area. @@ -135,15 +136,13 @@ static void do_calc_lpt_geom(struct ubifs_info *c) int ubifs_calc_lpt_geom(struct ubifs_info *c) { int lebs_needed; - uint64_t sz; + long long sz; do_calc_lpt_geom(c); /* Verify that lpt_lebs is big enough */ sz = c->lpt_sz * 2; /* Must have at least 2 times the size */ - sz += c->leb_size - 1; - do_div(sz, c->leb_size); - lebs_needed = sz; + lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size); if (lebs_needed > c->lpt_lebs) { ubifs_err("too few LPT LEBs"); return -EINVAL; @@ -175,7 +174,7 @@ static int calc_dflt_lpt_geom(struct ubifs_info *c, int *main_lebs, int *big_lpt) { int i, lebs_needed; - uint64_t sz; + long long sz; /* Start by assuming the minimum number of LPT LEBs */ c->lpt_lebs = UBIFS_MIN_LPT_LEBS; @@ -202,9 +201,7 @@ static int calc_dflt_lpt_geom(struct ubifs_info *c, int *main_lebs, /* Now check there are enough LPT LEBs */ for (i = 0; i < 64 ; i++) { sz = c->lpt_sz * 4; /* Allow 4 times the size */ - sz += c->leb_size - 1; - do_div(sz, c->leb_size); - lebs_needed = sz; + lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size); if (lebs_needed > c->lpt_lebs) { /* Not enough LPT LEBs so try again with more */ c->lpt_lebs = lebs_needed; diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index c5da201ab54f..e070c643d1bb 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -28,6 +28,7 @@ #include "ubifs.h" #include +#include /* * Default journal size in logical eraseblocks as a percent of total @@ -80,7 +81,7 @@ static int create_default_filesystem(struct ubifs_info *c) int err, tmp, jnl_lebs, log_lebs, max_buds, main_lebs, main_first; int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0; int min_leb_cnt = UBIFS_MIN_LEB_CNT; - uint64_t tmp64, main_bytes; + long long tmp64, main_bytes; __le64 tmp_le64; /* Some functions called from here depend on the @c->key_len filed */ @@ -160,7 +161,7 @@ static int create_default_filesystem(struct ubifs_info *c) if (!sup) return -ENOMEM; - tmp64 = (uint64_t)max_buds * c->leb_size; + tmp64 = (long long)max_buds * c->leb_size; if (big_lpt) sup_flags |= UBIFS_FLG_BIGLPT; @@ -187,9 +188,8 @@ static int create_default_filesystem(struct ubifs_info *c) generate_random_uuid(sup->uuid); - main_bytes = (uint64_t)main_lebs * c->leb_size; - tmp64 = main_bytes * DEFAULT_RP_PERCENT; - do_div(tmp64, 100); + main_bytes = (long long)main_lebs * c->leb_size; + tmp64 = div_u64(main_bytes * DEFAULT_RP_PERCENT, 100); if (tmp64 > DEFAULT_MAX_RP_SIZE) tmp64 = DEFAULT_MAX_RP_SIZE; sup->rp_size = cpu_to_le64(tmp64); diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index a6a7798d020b..c3cefc841374 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "ubifs.h" /* @@ -612,7 +613,7 @@ static int bud_wbuf_callback(struct ubifs_info *c, int lnum, int free, int pad) static int init_constants_late(struct ubifs_info *c) { int tmp, err; - uint64_t tmp64; + long long tmp64; c->main_bytes = (long long)c->main_lebs * c->leb_size; c->max_znode_sz = sizeof(struct ubifs_znode) + @@ -639,9 +640,8 @@ static int init_constants_late(struct ubifs_info *c) * Make sure that the log is large enough to fit reference nodes for * all buds plus one reserved LEB. */ - tmp64 = c->max_bud_bytes; - tmp = do_div(tmp64, c->leb_size); - c->max_bud_cnt = tmp64 + !!tmp; + tmp64 = c->max_bud_bytes + c->leb_size - 1; + c->max_bud_cnt = div_u64(tmp64, c->leb_size); tmp = (c->ref_node_alsz * c->max_bud_cnt + c->leb_size - 1); tmp /= c->leb_size; tmp += 1; @@ -677,7 +677,7 @@ static int init_constants_late(struct ubifs_info *c) * Consequently, if the journal is too small, UBIFS will treat it as * always full. */ - tmp64 = (uint64_t)(c->jhead_cnt + 1) * c->leb_size + 1; + tmp64 = (long long)(c->jhead_cnt + 1) * c->leb_size + 1; if (c->bg_bud_bytes < tmp64) c->bg_bud_bytes = tmp64; if (c->max_bud_bytes < tmp64 + c->leb_size) @@ -699,7 +699,7 @@ static int init_constants_late(struct ubifs_info *c) * head is available. */ tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt + 1; - tmp64 *= (uint64_t)c->leb_size - c->leb_overhead; + tmp64 *= (long long)c->leb_size - c->leb_overhead; tmp64 = ubifs_reported_space(c, tmp64); c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT; diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 543e850022eb..a17dd794ae92 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1498,7 +1498,7 @@ void ubifs_cancel_ino_op(struct ubifs_info *c, struct inode *inode, long long ubifs_get_free_space(struct ubifs_info *c); int ubifs_calc_min_idx_lebs(struct ubifs_info *c); void ubifs_convert_page_budget(struct ubifs_info *c); -long long ubifs_reported_space(const struct ubifs_info *c, uint64_t free); +long long ubifs_reported_space(const struct ubifs_info *c, long long free); long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs); /* find.c */ -- cgit From 650ed50f4298e76007070b7ab9d640dfe7228ab3 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 22 Dec 2008 11:09:04 +0200 Subject: UBIFS: re-calculate min_idx_size after the commit When we commit, but before we try to write anything to the flash media, @c->min_idx_size is inaccurate, because we do not re-calculate it after the commit. Do not forget to do this. Signed-off-by: Artem Bityutskiy --- fs/ubifs/tnc_commit.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index 3c0af452887b..fde8d127c768 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -802,8 +802,10 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot) * budgeting subsystem to assume the index is already committed, * even though it is not. */ + ubifs_assert(c->min_idx_lebs == ubifs_calc_min_idx_lebs(c)); c->old_idx_sz = c->calc_idx_sz; c->budg_uncommitted_idx = 0; + c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); spin_unlock(&c->space_lock); mutex_unlock(&c->tnc_mutex); -- cgit From c8f915913afdfe1a796e312e21658b8edcf20868 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 19 Dec 2008 16:11:13 +0200 Subject: UBIFS: avoid unnecessary calculations Do not calculate min_idx_lebs, because it is available in c->min_idx_lebs Signed-off-by: Artem Bityutskiy --- fs/ubifs/budget.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 0bcb8031ca18..44cff803171a 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -763,7 +763,8 @@ long long ubifs_get_free_space(struct ubifs_info *c) long long available, outstanding, free; spin_lock(&c->space_lock); - min_idx_lebs = ubifs_calc_min_idx_lebs(c); + min_idx_lebs = c->min_idx_lebs; + ubifs_assert(min_idx_lebs == ubifs_calc_min_idx_lebs(c)); outstanding = c->budg_data_growth + c->budg_dd_growth; /* -- cgit From d6d7b702a3a1ca50f7ca2bebaa79c80425156bac Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 12 Nov 2008 16:49:48 -0600 Subject: dlm: fix up memory allocation flags Use ls_allocation for memory allocations, which a cluster fs sets to GFP_NOFS. Use GFP_NOFS for allocations when no lockspace struct is available. Taking dlm locks needs to avoid calling back into the cluster fs because write-out can require taking dlm locks. Cc: Christine Caulfield Signed-off-by: Steven Whitehouse Signed-off-by: David Teigland --- fs/dlm/lowcomms.c | 1 + fs/dlm/memory.c | 6 +++--- fs/dlm/midcomms.c | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 3962262f991a..1e720316300b 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -295,6 +295,7 @@ static int add_sock(struct socket *sock, struct connection *con) con->sock->sk->sk_write_space = lowcomms_write_space; con->sock->sk->sk_state_change = lowcomms_state_change; con->sock->sk->sk_user_data = con; + con->sock->sk->sk_allocation = GFP_NOFS; return 0; } diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c index 54c14c6d06cb..c1775b84ebab 100644 --- a/fs/dlm/memory.c +++ b/fs/dlm/memory.c @@ -39,7 +39,7 @@ char *dlm_allocate_lvb(struct dlm_ls *ls) { char *p; - p = kzalloc(ls->ls_lvblen, GFP_KERNEL); + p = kzalloc(ls->ls_lvblen, ls->ls_allocation); return p; } @@ -57,7 +57,7 @@ struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls, int namelen) DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,); - r = kzalloc(sizeof(*r) + namelen, GFP_KERNEL); + r = kzalloc(sizeof(*r) + namelen, ls->ls_allocation); return r; } @@ -72,7 +72,7 @@ struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls) { struct dlm_lkb *lkb; - lkb = kmem_cache_zalloc(lkb_cache, GFP_KERNEL); + lkb = kmem_cache_zalloc(lkb_cache, ls->ls_allocation); return lkb; } diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c index 07ac709f3ed7..f3396c622aec 100644 --- a/fs/dlm/midcomms.c +++ b/fs/dlm/midcomms.c @@ -112,7 +112,7 @@ int dlm_process_incoming_buffer(int nodeid, const void *base, ordinary messages). */ if (msglen > sizeof(__tmp) && p == &__tmp.p) { - p = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL); + p = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS); if (p == NULL) return ret; } -- cgit From cd8e4679bdcf9b54564f2cda2389bd0f0457e12d Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Wed, 12 Nov 2008 16:28:43 -0600 Subject: dlm: trivial annotation of be16 value fs/dlm/dir.c:419:14: warning: incorrect type in assignment (different base types) fs/dlm/dir.c:419:14: expected unsigned short [unsigned] [addressable] [assigned] [usertype] be_namelen fs/dlm/dir.c:419:14: got restricted __be16 [usertype] Signed-off-by: Harvey Harrison Signed-off-by: David Teigland --- fs/dlm/dir.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c index 85defeb64df4..92969f879a17 100644 --- a/fs/dlm/dir.c +++ b/fs/dlm/dir.c @@ -374,7 +374,7 @@ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen, struct list_head *list; struct dlm_rsb *r; int offset = 0, dir_nodeid; - uint16_t be_namelen; + __be16 be_namelen; down_read(&ls->ls_root_sem); @@ -410,15 +410,15 @@ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen, if (offset + sizeof(uint16_t)*2 + r->res_length > outlen) { /* Write end-of-block record */ - be_namelen = 0; - memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t)); - offset += sizeof(uint16_t); + be_namelen = cpu_to_be16(0); + memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); + offset += sizeof(__be16); goto out; } be_namelen = cpu_to_be16(r->res_length); - memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t)); - offset += sizeof(uint16_t); + memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); + offset += sizeof(__be16); memcpy(outbuf + offset, r->res_name, r->res_length); offset += r->res_length; } @@ -430,9 +430,9 @@ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen, if ((list == &ls->ls_root_list) && (offset + sizeof(uint16_t) <= outlen)) { - be_namelen = 0xFFFF; - memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t)); - offset += sizeof(uint16_t); + be_namelen = cpu_to_be16(0xFFFF); + memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); + offset += sizeof(__be16); } out: -- cgit From 1521848cbb42935a52d11305c054b14461ad061c Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 12 Nov 2008 17:00:16 -0600 Subject: dlm: remove kmap/kunmap The pages used in lowcomms are not highmem, so kmap is not necessary. Cc: Christine Caulfield Signed-off-by: Steven Whitehouse Signed-off-by: David Teigland --- fs/dlm/lowcomms.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'fs') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 1e720316300b..103a5ebd1371 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -824,7 +824,6 @@ static void sctp_init_assoc(struct connection *con) len = e->len; offset = e->offset; spin_unlock(&con->writequeue_lock); - kmap(e->page); /* Send the first block off the write queue */ iov[0].iov_base = page_address(e->page)+offset; @@ -855,7 +854,6 @@ static void sctp_init_assoc(struct connection *con) if (e->len == 0 && e->users == 0) { list_del(&e->list); - kunmap(e->page); free_entry(e); } spin_unlock(&con->writequeue_lock); @@ -1204,8 +1202,6 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) if (e) { got_one: - if (users == 0) - kmap(e->page); *ppc = page_address(e->page) + offset; return e; } @@ -1234,7 +1230,6 @@ void dlm_lowcomms_commit_buffer(void *mh) if (users) goto out; e->len = e->end - e->offset; - kunmap(e->page); spin_unlock(&con->writequeue_lock); if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) { @@ -1273,7 +1268,6 @@ static void send_to_sock(struct connection *con) offset = e->offset; BUG_ON(len == 0 && e->users == 0); spin_unlock(&con->writequeue_lock); - kmap(e->page); ret = 0; if (len) { @@ -1295,7 +1289,6 @@ static void send_to_sock(struct connection *con) if (e->len == 0 && e->users == 0) { list_del(&e->list); - kunmap(e->page); free_entry(e); continue; } -- cgit From d61e9aac96317a43c192f1faabfa95d4d675b7ce Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 10 Dec 2008 09:31:02 -0600 Subject: dlm: replace schedule with cond_resched This is a one-liner to use cond_resched() rather than schedule() in the ast delivery loop. It should not be necessary to schedule every time, so this will save some cpu time while continuing to allow scheduling when required. Signed-off-by: Steven Whitehouse Signed-off-by: David Teigland --- fs/dlm/ast.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index 8bf31e3fbf01..30c11f3855b3 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -101,7 +101,7 @@ static void process_asts(void) and may result in the lkb being freed */ dlm_put_lkb(lkb); - schedule(); + cond_resched(); } } -- cgit From 03339696314fffb95dafb349b84243358e945ce6 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Mon, 8 Dec 2008 17:14:10 -0600 Subject: dlm: remove extra blocking callback check Just before delivering a blocking callback (bast), the dlm_astd thread checks again that the granted mode of the lkb actually blocks the mode requested by the bast. The idea behind this was originally that the granted mode may have changed since the bast was queued, making the callback now unnecessary. Reasons for removing this extra check are: - dlm_astd doesn't lock the rsb before reading the lkb grmode, so it's not technically safe (this removes the long standing FIXME) - after running some tests, it doesn't appear the check ever actually eliminates a bast - delivering an unnecessary blocking callback isn't a bad thing and can happen anyway Signed-off-by: David Teigland --- fs/dlm/ast.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index 30c11f3855b3..09b167df790e 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -89,13 +89,8 @@ static void process_asts(void) if ((type & AST_COMP) && cast) cast(lkb->lkb_astparam); - /* FIXME: Is it safe to look at lkb_grmode here - without doing a lock_rsb() ? - Look at other checks in v1 to avoid basts. */ - if ((type & AST_BAST) && bast) - if (!dlm_modes_compat(lkb->lkb_grmode, bmode)) - bast(lkb->lkb_astparam, bmode); + bast(lkb->lkb_astparam, bmode); /* this removes the reference added by dlm_add_ast and may result in the lkb being freed */ -- cgit From fd22a51bcc0b7b76fc729b02316214fd979f9fe1 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Tue, 9 Dec 2008 11:55:46 -0600 Subject: dlm: improve how bast mode handling The lkb bastmode value is set in the context of processing the lock, and read by the dlm_astd thread. Because it's accessed in these two separate contexts, the writing/reading ought to be done under a lock. This is simple to do by setting it and reading it when the lkb is added to and removed from dlm_astd's callback list which is properly locked. Signed-off-by: David Teigland --- fs/dlm/ast.c | 14 ++++++++------ fs/dlm/ast.h | 4 ++-- fs/dlm/lock.c | 8 +++----- fs/dlm/user.c | 4 +++- fs/dlm/user.h | 2 +- 5 files changed, 17 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index 09b167df790e..fbe840d09493 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -2,7 +2,7 @@ ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. -** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. +** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions @@ -33,10 +33,10 @@ void dlm_del_ast(struct dlm_lkb *lkb) spin_unlock(&ast_queue_lock); } -void dlm_add_ast(struct dlm_lkb *lkb, int type) +void dlm_add_ast(struct dlm_lkb *lkb, int type, int bastmode) { if (lkb->lkb_flags & DLM_IFL_USER) { - dlm_user_add_ast(lkb, type); + dlm_user_add_ast(lkb, type, bastmode); return; } @@ -46,6 +46,8 @@ void dlm_add_ast(struct dlm_lkb *lkb, int type) list_add_tail(&lkb->lkb_astqueue, &ast_queue); } lkb->lkb_ast_type |= type; + if (bastmode) + lkb->lkb_bastmode = bastmode; spin_unlock(&ast_queue_lock); set_bit(WAKE_ASTS, &astd_wakeflags); @@ -59,7 +61,7 @@ static void process_asts(void) struct dlm_lkb *lkb; void (*cast) (void *astparam); void (*bast) (void *astparam, int mode); - int type = 0, found, bmode; + int type = 0, found, bastmode; for (;;) { found = 0; @@ -74,6 +76,7 @@ static void process_asts(void) list_del(&lkb->lkb_astqueue); type = lkb->lkb_ast_type; lkb->lkb_ast_type = 0; + bastmode = lkb->lkb_bastmode; found = 1; break; } @@ -84,13 +87,12 @@ static void process_asts(void) cast = lkb->lkb_astfn; bast = lkb->lkb_bastfn; - bmode = lkb->lkb_bastmode; if ((type & AST_COMP) && cast) cast(lkb->lkb_astparam); if ((type & AST_BAST) && bast) - bast(lkb->lkb_astparam, bmode); + bast(lkb->lkb_astparam, bastmode); /* this removes the reference added by dlm_add_ast and may result in the lkb being freed */ diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h index 6ee276c74c52..1b5fc5f428fd 100644 --- a/fs/dlm/ast.h +++ b/fs/dlm/ast.h @@ -1,7 +1,7 @@ /****************************************************************************** ******************************************************************************* ** -** Copyright (C) 2005 Red Hat, Inc. All rights reserved. +** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions @@ -13,7 +13,7 @@ #ifndef __ASTD_DOT_H__ #define __ASTD_DOT_H__ -void dlm_add_ast(struct dlm_lkb *lkb, int type); +void dlm_add_ast(struct dlm_lkb *lkb, int type, int bastmode); void dlm_del_ast(struct dlm_lkb *lkb); void dlm_astd_wake(void); diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 724ddac91538..7b758dadbdd6 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -307,7 +307,7 @@ static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv) lkb->lkb_lksb->sb_status = rv; lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags; - dlm_add_ast(lkb, AST_COMP); + dlm_add_ast(lkb, AST_COMP, 0); } static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb) @@ -320,10 +320,8 @@ static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode) { if (is_master_copy(lkb)) send_bast(r, lkb, rqmode); - else { - lkb->lkb_bastmode = rqmode; - dlm_add_ast(lkb, AST_BAST); - } + else + dlm_add_ast(lkb, AST_BAST, rqmode); } /* diff --git a/fs/dlm/user.c b/fs/dlm/user.c index b3832c67194a..065149e84f42 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -175,7 +175,7 @@ static int lkb_is_endoflife(struct dlm_lkb *lkb, int sb_status, int type) /* we could possibly check if the cancel of an orphan has resulted in the lkb being removed and then remove that lkb from the orphans list and free it */ -void dlm_user_add_ast(struct dlm_lkb *lkb, int type) +void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int bastmode) { struct dlm_ls *ls; struct dlm_user_args *ua; @@ -208,6 +208,8 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type) ast_type = lkb->lkb_ast_type; lkb->lkb_ast_type |= type; + if (bastmode) + lkb->lkb_bastmode = bastmode; if (!ast_type) { kref_get(&lkb->lkb_ref); diff --git a/fs/dlm/user.h b/fs/dlm/user.h index 35eb6a13d616..1c9686492286 100644 --- a/fs/dlm/user.h +++ b/fs/dlm/user.h @@ -9,7 +9,7 @@ #ifndef __USER_DOT_H__ #define __USER_DOT_H__ -void dlm_user_add_ast(struct dlm_lkb *lkb, int type); +void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int bastmode); int dlm_user_init(void); void dlm_user_exit(void); int dlm_device_deregister(struct dlm_ls *ls); -- cgit From eeda418d8c2646f33f24e9ad33d86c239adc6de7 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Tue, 9 Dec 2008 14:12:21 -0600 Subject: dlm: change lock time stamping Use ktime instead of jiffies for timestamping lkb's. Also stamp the time on every lkb whenever it's added to a resource queue, instead of just stamping locks subject to timeouts. This will allow us to use timestamps more widely for debugging all locks. Signed-off-by: David Teigland --- fs/dlm/debug_fs.c | 14 +++++++------- fs/dlm/dlm_internal.h | 2 +- fs/dlm/lock.c | 21 +++++++++++---------- fs/dlm/netlink.c | 1 - 4 files changed, 19 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index 8fc24f4507a3..19e4f9eb44e1 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -162,21 +162,21 @@ static int print_resource(struct dlm_rsb *res, struct seq_file *s) static void print_lock(struct seq_file *s, struct dlm_lkb *lkb, struct dlm_rsb *r) { - unsigned int waiting = 0; - uint64_t xid = 0; + u64 xid = 0; + u64 us; if (lkb->lkb_flags & DLM_IFL_USER) { if (lkb->lkb_ua) xid = lkb->lkb_ua->xid; } - if (lkb->lkb_timestamp) - waiting = jiffies_to_msecs(jiffies - lkb->lkb_timestamp); + /* microseconds since lkb was added to current queue */ + us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_timestamp)); - /* id nodeid remid pid xid exflags flags sts grmode rqmode time_ms + /* id nodeid remid pid xid exflags flags sts grmode rqmode time_us r_nodeid r_len r_name */ - seq_printf(s, "%x %d %x %u %llu %x %x %d %d %d %u %u %d \"%s\"\n", + seq_printf(s, "%x %d %x %u %llu %x %x %d %d %d %llu %u %d \"%s\"\n", lkb->lkb_id, lkb->lkb_nodeid, lkb->lkb_remid, @@ -187,7 +187,7 @@ static void print_lock(struct seq_file *s, struct dlm_lkb *lkb, struct dlm_rsb * lkb->lkb_status, lkb->lkb_grmode, lkb->lkb_rqmode, - waiting, + (unsigned long long)us, r->res_nodeid, r->res_length, r->res_name); diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 868e4c9ef127..e69135c83d5d 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -245,7 +245,7 @@ struct dlm_lkb { struct list_head lkb_astqueue; /* need ast to be sent */ struct list_head lkb_ownqueue; /* list of locks for a process */ struct list_head lkb_time_list; - unsigned long lkb_timestamp; + ktime_t lkb_timestamp; unsigned long lkb_timeout_cs; char *lkb_lvbptr; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 7b758dadbdd6..dfc57ae27045 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -742,6 +742,8 @@ static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status) DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb);); + lkb->lkb_timestamp = ktime_get(); + lkb->lkb_status = status; switch (status) { @@ -1011,10 +1013,8 @@ static void add_timeout(struct dlm_lkb *lkb) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; - if (is_master_copy(lkb)) { - lkb->lkb_timestamp = jiffies; + if (is_master_copy(lkb)) return; - } if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) { @@ -1029,7 +1029,6 @@ static void add_timeout(struct dlm_lkb *lkb) DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb);); mutex_lock(&ls->ls_timeout_mutex); hold_lkb(lkb); - lkb->lkb_timestamp = jiffies; list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout); mutex_unlock(&ls->ls_timeout_mutex); } @@ -1057,6 +1056,7 @@ void dlm_scan_timeout(struct dlm_ls *ls) struct dlm_rsb *r; struct dlm_lkb *lkb; int do_cancel, do_warn; + s64 wait_us; for (;;) { if (dlm_locking_stopped(ls)) @@ -1067,14 +1067,15 @@ void dlm_scan_timeout(struct dlm_ls *ls) mutex_lock(&ls->ls_timeout_mutex); list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) { + wait_us = ktime_to_us(ktime_sub(ktime_get(), + lkb->lkb_timestamp)); + if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) && - time_after_eq(jiffies, lkb->lkb_timestamp + - lkb->lkb_timeout_cs * HZ/100)) + wait_us >= (lkb->lkb_timeout_cs * 10000)) do_cancel = 1; if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) && - time_after_eq(jiffies, lkb->lkb_timestamp + - dlm_config.ci_timewarn_cs * HZ/100)) + wait_us >= dlm_config.ci_timewarn_cs * 10000) do_warn = 1; if (!do_cancel && !do_warn) @@ -1120,12 +1121,12 @@ void dlm_scan_timeout(struct dlm_ls *ls) void dlm_adjust_timeouts(struct dlm_ls *ls) { struct dlm_lkb *lkb; - long adj = jiffies - ls->ls_recover_begin; + u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin); ls->ls_recover_begin = 0; mutex_lock(&ls->ls_timeout_mutex); list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) - lkb->lkb_timestamp += adj; + lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us); mutex_unlock(&ls->ls_timeout_mutex); } diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c index 18bda83cc892..46e582c8d603 100644 --- a/fs/dlm/netlink.c +++ b/fs/dlm/netlink.c @@ -115,7 +115,6 @@ static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb) data->status = lkb->lkb_status; data->grmode = lkb->lkb_grmode; data->rqmode = lkb->lkb_rqmode; - data->timestamp = lkb->lkb_timestamp; if (lkb->lkb_ua) data->xid = lkb->lkb_ua->xid; if (r) { -- cgit From e3a84ad495d1fddb542e0922160f0194a1361950 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Tue, 9 Dec 2008 14:47:29 -0600 Subject: dlm: add time stamp of blocking callback Record the time the latest blocking callback was queued for a lock. This will be used for debugging in combination with lock queue timestamp changes in the previous patch. Signed-off-by: David Teigland --- fs/dlm/dlm_internal.h | 1 + fs/dlm/lock.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index e69135c83d5d..0c4882951923 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -245,6 +245,7 @@ struct dlm_lkb { struct list_head lkb_astqueue; /* need ast to be sent */ struct list_head lkb_ownqueue; /* list of locks for a process */ struct list_head lkb_time_list; + ktime_t lkb_time_bast; /* for debugging */ ktime_t lkb_timestamp; unsigned long lkb_timeout_cs; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index dfc57ae27045..6cfe65bbf4a2 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -318,6 +318,8 @@ static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb) static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode) { + lkb->lkb_time_bast = ktime_get(); + if (is_master_copy(lkb)) send_bast(r, lkb, rqmode); else -- cgit From d022509d1c54be4918e7fc8f1195ee8c392e9a57 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Tue, 16 Dec 2008 14:53:23 -0600 Subject: dlm: add new debugfs entry The new debugfs entry dumps all rsb and lkb structures, and includes a lot more information than has been available before. This includes the new timestamps added by a previous patch for debugging callback issues. Signed-off-by: David Teigland --- fs/dlm/debug_fs.c | 296 +++++++++++++++++++++++++++++++++++++++++--------- fs/dlm/dlm_internal.h | 1 + 2 files changed, 247 insertions(+), 50 deletions(-) (limited to 'fs') diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index 19e4f9eb44e1..2f107d1a6a45 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -1,7 +1,7 @@ /****************************************************************************** ******************************************************************************* ** -** Copyright (C) 2005 Red Hat, Inc. All rights reserved. +** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions @@ -27,7 +27,7 @@ static struct dentry *dlm_root; struct rsb_iter { int entry; - int locks; + int format; int header; struct dlm_ls *ls; struct list_head *next; @@ -60,8 +60,8 @@ static char *print_lockmode(int mode) } } -static void print_resource_lock(struct seq_file *s, struct dlm_lkb *lkb, - struct dlm_rsb *res) +static void print_format1_lock(struct seq_file *s, struct dlm_lkb *lkb, + struct dlm_rsb *res) { seq_printf(s, "%08x %s", lkb->lkb_id, print_lockmode(lkb->lkb_grmode)); @@ -83,7 +83,7 @@ static void print_resource_lock(struct seq_file *s, struct dlm_lkb *lkb, seq_printf(s, "\n"); } -static int print_resource(struct dlm_rsb *res, struct seq_file *s) +static int print_format1(struct dlm_rsb *res, struct seq_file *s) { struct dlm_lkb *lkb; int i, lvblen = res->res_ls->ls_lvblen, recover_list, root_list; @@ -134,15 +134,15 @@ static int print_resource(struct dlm_rsb *res, struct seq_file *s) /* Print the locks attached to this resource */ seq_printf(s, "Granted Queue\n"); list_for_each_entry(lkb, &res->res_grantqueue, lkb_statequeue) - print_resource_lock(s, lkb, res); + print_format1_lock(s, lkb, res); seq_printf(s, "Conversion Queue\n"); list_for_each_entry(lkb, &res->res_convertqueue, lkb_statequeue) - print_resource_lock(s, lkb, res); + print_format1_lock(s, lkb, res); seq_printf(s, "Waiting Queue\n"); list_for_each_entry(lkb, &res->res_waitqueue, lkb_statequeue) - print_resource_lock(s, lkb, res); + print_format1_lock(s, lkb, res); if (list_empty(&res->res_lookup)) goto out; @@ -160,7 +160,8 @@ static int print_resource(struct dlm_rsb *res, struct seq_file *s) return 0; } -static void print_lock(struct seq_file *s, struct dlm_lkb *lkb, struct dlm_rsb *r) +static void print_format2_lock(struct seq_file *s, struct dlm_lkb *lkb, + struct dlm_rsb *r) { u64 xid = 0; u64 us; @@ -193,20 +194,108 @@ static void print_lock(struct seq_file *s, struct dlm_lkb *lkb, struct dlm_rsb * r->res_name); } -static int print_locks(struct dlm_rsb *r, struct seq_file *s) +static int print_format2(struct dlm_rsb *r, struct seq_file *s) { struct dlm_lkb *lkb; lock_rsb(r); list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) - print_lock(s, lkb, r); + print_format2_lock(s, lkb, r); list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) - print_lock(s, lkb, r); + print_format2_lock(s, lkb, r); list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) - print_lock(s, lkb, r); + print_format2_lock(s, lkb, r); + + unlock_rsb(r); + return 0; +} + +static void print_format3_lock(struct seq_file *s, struct dlm_lkb *lkb, + int rsb_lookup) +{ + u64 xid = 0; + + if (lkb->lkb_flags & DLM_IFL_USER) { + if (lkb->lkb_ua) + xid = lkb->lkb_ua->xid; + } + + seq_printf(s, "lkb %x %d %x %u %llu %x %x %d %d %d %d %d %d %u %llu %llu\n", + lkb->lkb_id, + lkb->lkb_nodeid, + lkb->lkb_remid, + lkb->lkb_ownpid, + (unsigned long long)xid, + lkb->lkb_exflags, + lkb->lkb_flags, + lkb->lkb_status, + lkb->lkb_grmode, + lkb->lkb_rqmode, + lkb->lkb_highbast, + rsb_lookup, + lkb->lkb_wait_type, + lkb->lkb_lvbseq, + (unsigned long long)ktime_to_ns(lkb->lkb_timestamp), + (unsigned long long)ktime_to_ns(lkb->lkb_time_bast)); +} + +static int print_format3(struct dlm_rsb *r, struct seq_file *s) +{ + struct dlm_lkb *lkb; + int i, lvblen = r->res_ls->ls_lvblen; + int print_name = 1; + + lock_rsb(r); + + seq_printf(s, "rsb %p %d %x %lx %d %d %u %d ", + r, + r->res_nodeid, + r->res_first_lkid, + r->res_flags, + !list_empty(&r->res_root_list), + !list_empty(&r->res_recover_list), + r->res_recover_locks_count, + r->res_length); + + for (i = 0; i < r->res_length; i++) { + if (!isascii(r->res_name[i]) || !isprint(r->res_name[i])) + print_name = 0; + } + + seq_printf(s, "%s", print_name ? "str " : "hex"); + + for (i = 0; i < r->res_length; i++) { + if (print_name) + seq_printf(s, "%c", r->res_name[i]); + else + seq_printf(s, " %02x", (unsigned char)r->res_name[i]); + } + seq_printf(s, "\n"); + + if (!r->res_lvbptr) + goto do_locks; + + seq_printf(s, "lvb %u %d", r->res_lvbseq, lvblen); + + for (i = 0; i < lvblen; i++) + seq_printf(s, " %02x", (unsigned char)r->res_lvbptr[i]); + seq_printf(s, "\n"); + + do_locks: + list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) + print_format3_lock(s, lkb, 0); + + list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) + print_format3_lock(s, lkb, 0); + + list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) + print_format3_lock(s, lkb, 0); + + list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup) + print_format3_lock(s, lkb, 1); unlock_rsb(r); return 0; @@ -231,7 +320,7 @@ static int rsb_iter_next(struct rsb_iter *ri) break; } read_unlock(&ls->ls_rsbtbl[i].lock); - } + } ri->entry = i; if (ri->entry >= ls->ls_rsbtbl_size) @@ -248,7 +337,7 @@ static int rsb_iter_next(struct rsb_iter *ri) read_unlock(&ls->ls_rsbtbl[i].lock); dlm_put_rsb(old); goto top; - } + } ri->rsb = list_entry(ri->next, struct dlm_rsb, res_hashchain); dlm_hold_rsb(ri->rsb); read_unlock(&ls->ls_rsbtbl[i].lock); @@ -274,6 +363,7 @@ static struct rsb_iter *rsb_iter_init(struct dlm_ls *ls) ri->ls = ls; ri->entry = 0; ri->next = NULL; + ri->format = 1; if (rsb_iter_next(ri)) { rsb_iter_free(ri); @@ -325,16 +415,26 @@ static int rsb_seq_show(struct seq_file *file, void *iter_ptr) { struct rsb_iter *ri = iter_ptr; - if (ri->locks) { + switch (ri->format) { + case 1: + print_format1(ri->rsb, file); + break; + case 2: if (ri->header) { - seq_printf(file, "id nodeid remid pid xid exflags flags " - "sts grmode rqmode time_ms r_nodeid " - "r_len r_name\n"); + seq_printf(file, "id nodeid remid pid xid exflags " + "flags sts grmode rqmode time_ms " + "r_nodeid r_len r_name\n"); ri->header = 0; } - print_locks(ri->rsb, file); - } else { - print_resource(ri->rsb, file); + print_format2(ri->rsb, file); + break; + case 3: + if (ri->header) { + seq_printf(file, "version rsb 1.1 lvb 1.1 lkb 1.1\n"); + ri->header = 0; + } + print_format3(ri->rsb, file); + break; } return 0; @@ -385,7 +485,7 @@ static struct rsb_iter *locks_iter_init(struct dlm_ls *ls, loff_t *pos) ri->ls = ls; ri->entry = 0; ri->next = NULL; - ri->locks = 1; + ri->format = 2; if (*pos == 0) ri->header = 1; @@ -447,6 +547,84 @@ static const struct file_operations locks_fops = { .release = seq_release }; +/* + * Dump all rsb/lvb/lkb state in compact listing, more complete than _locks + * This can replace both formats 1 and 2 eventually. + */ + +static struct rsb_iter *all_iter_init(struct dlm_ls *ls, loff_t *pos) +{ + struct rsb_iter *ri; + + ri = kzalloc(sizeof *ri, GFP_KERNEL); + if (!ri) + return NULL; + + ri->ls = ls; + ri->entry = 0; + ri->next = NULL; + ri->format = 3; + + if (*pos == 0) + ri->header = 1; + + if (rsb_iter_next(ri)) { + rsb_iter_free(ri); + return NULL; + } + + return ri; +} + +static void *all_seq_start(struct seq_file *file, loff_t *pos) +{ + struct rsb_iter *ri; + loff_t n = *pos; + + ri = all_iter_init(file->private, pos); + if (!ri) + return NULL; + + while (n--) { + if (rsb_iter_next(ri)) { + rsb_iter_free(ri); + return NULL; + } + } + + return ri; +} + +static struct seq_operations all_seq_ops = { + .start = all_seq_start, + .next = rsb_seq_next, + .stop = rsb_seq_stop, + .show = rsb_seq_show, +}; + +static int all_open(struct inode *inode, struct file *file) +{ + struct seq_file *seq; + int ret; + + ret = seq_open(file, &all_seq_ops); + if (ret) + return ret; + + seq = file->private_data; + seq->private = inode->i_private; + + return 0; +} + +static const struct file_operations all_fops = { + .owner = THIS_MODULE, + .open = all_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + /* * dump lkb's on the ls_waiters list */ @@ -489,30 +667,33 @@ static const struct file_operations waiters_fops = { .read = waiters_read }; +void dlm_delete_debug_file(struct dlm_ls *ls) +{ + if (ls->ls_debug_rsb_dentry) + debugfs_remove(ls->ls_debug_rsb_dentry); + if (ls->ls_debug_waiters_dentry) + debugfs_remove(ls->ls_debug_waiters_dentry); + if (ls->ls_debug_locks_dentry) + debugfs_remove(ls->ls_debug_locks_dentry); + if (ls->ls_debug_all_dentry) + debugfs_remove(ls->ls_debug_all_dentry); +} + int dlm_create_debug_file(struct dlm_ls *ls) { char name[DLM_LOCKSPACE_LEN+8]; + /* format 1 */ + ls->ls_debug_rsb_dentry = debugfs_create_file(ls->ls_name, S_IFREG | S_IRUGO, dlm_root, ls, &rsb_fops); if (!ls->ls_debug_rsb_dentry) - return -ENOMEM; + goto fail; - memset(name, 0, sizeof(name)); - snprintf(name, DLM_LOCKSPACE_LEN+8, "%s_waiters", ls->ls_name); - - ls->ls_debug_waiters_dentry = debugfs_create_file(name, - S_IFREG | S_IRUGO, - dlm_root, - ls, - &waiters_fops); - if (!ls->ls_debug_waiters_dentry) { - debugfs_remove(ls->ls_debug_rsb_dentry); - return -ENOMEM; - } + /* format 2 */ memset(name, 0, sizeof(name)); snprintf(name, DLM_LOCKSPACE_LEN+8, "%s_locks", ls->ls_name); @@ -522,23 +703,38 @@ int dlm_create_debug_file(struct dlm_ls *ls) dlm_root, ls, &locks_fops); - if (!ls->ls_debug_locks_dentry) { - debugfs_remove(ls->ls_debug_waiters_dentry); - debugfs_remove(ls->ls_debug_rsb_dentry); - return -ENOMEM; - } + if (!ls->ls_debug_locks_dentry) + goto fail; + + /* format 3 */ + + memset(name, 0, sizeof(name)); + snprintf(name, DLM_LOCKSPACE_LEN+8, "%s_all", ls->ls_name); + + ls->ls_debug_all_dentry = debugfs_create_file(name, + S_IFREG | S_IRUGO, + dlm_root, + ls, + &all_fops); + if (!ls->ls_debug_all_dentry) + goto fail; + + memset(name, 0, sizeof(name)); + snprintf(name, DLM_LOCKSPACE_LEN+8, "%s_waiters", ls->ls_name); + + ls->ls_debug_waiters_dentry = debugfs_create_file(name, + S_IFREG | S_IRUGO, + dlm_root, + ls, + &waiters_fops); + if (!ls->ls_debug_waiters_dentry) + goto fail; return 0; -} -void dlm_delete_debug_file(struct dlm_ls *ls) -{ - if (ls->ls_debug_rsb_dentry) - debugfs_remove(ls->ls_debug_rsb_dentry); - if (ls->ls_debug_waiters_dentry) - debugfs_remove(ls->ls_debug_waiters_dentry); - if (ls->ls_debug_locks_dentry) - debugfs_remove(ls->ls_debug_locks_dentry); + fail: + dlm_delete_debug_file(ls); + return -ENOMEM; } int __init dlm_register_debugfs(void) diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 0c4882951923..ef2f1e353966 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -482,6 +482,7 @@ struct dlm_ls { struct dentry *ls_debug_rsb_dentry; /* debugfs */ struct dentry *ls_debug_waiters_dentry; /* debugfs */ struct dentry *ls_debug_locks_dentry; /* debugfs */ + struct dentry *ls_debug_all_dentry; /* debugfs */ wait_queue_head_t ls_uevent_wait; /* user part of join/leave */ int ls_uevent_result; -- cgit From 722d74219ea21223c74e5e894b0afcc5e4ca75a7 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 23 Dec 2008 10:22:56 -0600 Subject: dlm: fs/dlm/ast.c: fix warning fs/dlm/ast.c: In function 'dlm_astd': fs/dlm/ast.c:64: warning: 'bastmode' may be used uninitialized in this function Cleans code up. Signed-off-by: Andrew Morton Signed-off-by: David Teigland --- fs/dlm/ast.c | 39 +++++++++++++++++---------------------- 1 file changed, 17 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index fbe840d09493..dc2ad6008b2d 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -61,30 +61,23 @@ static void process_asts(void) struct dlm_lkb *lkb; void (*cast) (void *astparam); void (*bast) (void *astparam, int mode); - int type = 0, found, bastmode; - - for (;;) { - found = 0; - spin_lock(&ast_queue_lock); - list_for_each_entry(lkb, &ast_queue, lkb_astqueue) { - r = lkb->lkb_resource; - ls = r->res_ls; - - if (dlm_locking_stopped(ls)) - continue; - - list_del(&lkb->lkb_astqueue); - type = lkb->lkb_ast_type; - lkb->lkb_ast_type = 0; - bastmode = lkb->lkb_bastmode; - found = 1; - break; - } - spin_unlock(&ast_queue_lock); + int type = 0, bastmode; + +repeat: + spin_lock(&ast_queue_lock); + list_for_each_entry(lkb, &ast_queue, lkb_astqueue) { + r = lkb->lkb_resource; + ls = r->res_ls; - if (!found) - break; + if (dlm_locking_stopped(ls)) + continue; + list_del(&lkb->lkb_astqueue); + type = lkb->lkb_ast_type; + lkb->lkb_ast_type = 0; + bastmode = lkb->lkb_bastmode; + + spin_unlock(&ast_queue_lock); cast = lkb->lkb_astfn; bast = lkb->lkb_bastfn; @@ -99,7 +92,9 @@ static void process_asts(void) dlm_put_lkb(lkb); cond_resched(); + goto repeat; } + spin_unlock(&ast_queue_lock); } static inline int no_asts(void) -- cgit From 26ddd8d5cac8a563953d5febe8c6e40909f7bce1 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Fri, 26 Dec 2008 14:24:10 +0900 Subject: proc: remove ifdef CONFIG_SPARSE_IRQ from stat.c Impact: cleanup irq_desc can be NULL when CONFIG_SPARSE_IRQ=y only. therefore, NULL checking can move into kstat_irqs_cpu() of SPARSE_IRQ version. Signed-off-by: KOSAKI Motohiro Acked-by: "Yinghai Lu" Signed-off-by: Ingo Molnar --- fs/proc/stat.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 3bb1cf1e7425..f75efa22df5e 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #ifndef arch_irq_stat_cpu @@ -45,10 +46,6 @@ static int show_stat(struct seq_file *p, void *v) steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); for_each_irq_nr(j) { -#ifdef CONFIG_SPARSE_IRQ - if (!irq_to_desc(j)) - continue; -#endif sum += kstat_irqs_cpu(j, i); } sum += arch_irq_stat_cpu(i); @@ -95,12 +92,6 @@ static int show_stat(struct seq_file *p, void *v) /* sum again ? it could be updated? */ for_each_irq_nr(j) { per_irq_sum = 0; -#ifdef CONFIG_SPARSE_IRQ - if (!irq_to_desc(j)) { - seq_printf(p, " %u", per_irq_sum); - continue; - } -#endif for_each_possible_cpu(i) per_irq_sum += kstat_irqs_cpu(j, i); -- cgit From cb78a0ce69fad2026825f957e24e2d9cda1ec9f1 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 30 Dec 2008 09:05:14 +1030 Subject: bitmap: fix seq_bitmap and seq_cpumask to take const pointer Impact: cleanup seq_bitmap just calls bitmap_scnprintf on the bits: that arg can be const. Similarly, seq_cpumask just calls seq_bitmap. Signed-off-by: Rusty Russell --- fs/seq_file.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/seq_file.c b/fs/seq_file.c index 16c211558c22..c99358a52176 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -462,7 +462,8 @@ int seq_dentry(struct seq_file *m, struct dentry *dentry, char *esc) return -1; } -int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits) +int seq_bitmap(struct seq_file *m, const unsigned long *bits, + unsigned int nr_bits) { if (m->count < m->size) { int len = bitmap_scnprintf(m->buf + m->count, -- cgit From 79807d075ab8d1ca3574f5f52421e0047c1f1256 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sat, 27 Dec 2008 19:18:00 +0200 Subject: UBIFS: fix constants initialization The c->min_idx_lebs constant depends on c->old_idx_sz, which is read from the master node. This means that we have to initialize c->min_idx_lebs only after we have read the master node. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index c3cefc841374..13097830e8bc 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -602,7 +602,7 @@ static int bud_wbuf_callback(struct ubifs_info *c, int lnum, int free, int pad) } /* - * init_constants_late - initialize UBIFS constants. + * init_constants_sb - initialize UBIFS constants. * @c: UBIFS file-system description object * * This is a helper function which initializes various UBIFS constants after @@ -610,7 +610,7 @@ static int bud_wbuf_callback(struct ubifs_info *c, int lnum, int free, int pad) * makes sure they are all right. Returns zero in case of success and a * negative error code in case of failure. */ -static int init_constants_late(struct ubifs_info *c) +static int init_constants_sb(struct ubifs_info *c) { int tmp, err; long long tmp64; @@ -687,6 +687,21 @@ static int init_constants_late(struct ubifs_info *c) if (err) return err; + return 0; +} + +/* + * init_constants_master - initialize UBIFS constants. + * @c: UBIFS file-system description object + * + * This is a helper function which initializes various UBIFS constants after + * the master node has been read. It also checks various UBIFS parameters and + * makes sure they are all right. + */ +static void init_constants_master(struct ubifs_info *c) +{ + long long tmp64; + c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); /* @@ -702,8 +717,6 @@ static int init_constants_late(struct ubifs_info *c) tmp64 *= (long long)c->leb_size - c->leb_overhead; tmp64 = ubifs_reported_space(c, tmp64); c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT; - - return 0; } /** @@ -1138,7 +1151,7 @@ static int mount_ubifs(struct ubifs_info *c) goto out_free; } - err = init_constants_late(c); + err = init_constants_sb(c); if (err) goto out_free; @@ -1172,6 +1185,8 @@ static int mount_ubifs(struct ubifs_info *c) if (err) goto out_master; + init_constants_master(c); + if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { ubifs_msg("recovery needed"); c->need_recovery = 1; -- cgit From 304d427cd99eb645b44b08d77e70ce308e6bcd8c Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 28 Dec 2008 08:04:17 +0200 Subject: UBIFS: fix file-system synchronization Argh. The ->sync_fs call is called _before_ all inodes are flushed. This means we first sync write buffers and commit, then all inodes are synced, and we end up with unflushed write buffers! Fix this by forcing synching all indoes from 'ubifs_sync_fs()'. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 13097830e8bc..471301799c52 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "ubifs.h" /* @@ -431,6 +432,23 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) struct ubifs_info *c = sb->s_fs_info; int i, ret = 0, err; long long bud_bytes; + struct writeback_control wbc = { + .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD, + .range_start = 0, + .range_end = LLONG_MAX, + .nr_to_write = LONG_MAX, + }; + + /* + * VFS calls '->sync_fs()' before synchronizing all dirty inodes and + * pages, so synchronize them first, then commit the journal. Strictly + * speaking, it is not necessary to commit the journal here, + * synchronizing write-buffers would be enough. But committing makes + * UBIFS free space predictions much more accurate, so we want to let + * the user be able to get more accurate results of 'statfs()' after + * they synchronize the file system. + */ + generic_sync_sb_inodes(sb, &wbc); if (c->jheads) { for (i = 0; i < c->jhead_cnt; i++) { -- cgit From f10383006c26b33539820759b9dc8656497b02a4 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 28 Dec 2008 08:16:32 +0200 Subject: UBIFS: always commit in sync_fs Always run commit in sync_fs, because even if the journal seems to be almost empty, there may be a deletion which removes a large file, which affects the index greatly. And because we want better free space predictions after 'sync_fs()', we have to commit. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 471301799c52..ee8e7749eae1 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -429,9 +429,8 @@ static int ubifs_show_options(struct seq_file *s, struct vfsmount *mnt) static int ubifs_sync_fs(struct super_block *sb, int wait) { + int i, err; struct ubifs_info *c = sb->s_fs_info; - int i, ret = 0, err; - long long bud_bytes; struct writeback_control wbc = { .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD, .range_start = 0, @@ -439,6 +438,19 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) .nr_to_write = LONG_MAX, }; + if (sb->s_flags & MS_RDONLY) + return 0; + + /* + * Synchronize write buffers, because 'ubifs_run_commit()' does not + * do this if it waits for an already running commit. + */ + for (i = 0; i < c->jhead_cnt; i++) { + err = ubifs_wbuf_sync(&c->jheads[i].wbuf); + if (err) + return err; + } + /* * VFS calls '->sync_fs()' before synchronizing all dirty inodes and * pages, so synchronize them first, then commit the journal. Strictly @@ -450,30 +462,16 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) */ generic_sync_sb_inodes(sb, &wbc); - if (c->jheads) { - for (i = 0; i < c->jhead_cnt; i++) { - err = ubifs_wbuf_sync(&c->jheads[i].wbuf); - if (err && !ret) - ret = err; - } - - /* Commit the journal unless it has too little data */ - spin_lock(&c->buds_lock); - bud_bytes = c->bud_bytes; - spin_unlock(&c->buds_lock); - if (bud_bytes > c->leb_size) { - err = ubifs_run_commit(c); - if (err) - return err; - } - } + err = ubifs_run_commit(c); + if (err) + return err; /* * We ought to call sync for c->ubi but it does not have one. If it had * it would in turn call mtd->sync, however mtd operations are * synchronous anyway, so we don't lose any sleep here. */ - return ret; + return err; } /** -- cgit From cb5c6a2b2be59b480a3746c5113cb3411c053bff Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 28 Dec 2008 08:18:43 +0200 Subject: UBIFS: use ubi_sync UBI now has (fake for now, though) synchronization call - use it. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index ee8e7749eae1..a14703e0a9ad 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -466,12 +466,7 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) if (err) return err; - /* - * We ought to call sync for c->ubi but it does not have one. If it had - * it would in turn call mtd->sync, however mtd operations are - * synchronous anyway, so we don't lose any sleep here. - */ - return err; + return ubi_sync(c->vi.ubi_num); } /** -- cgit From 26d05777b0a23062a39e83c369c0a3583918f164 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 28 Dec 2008 09:11:02 +0200 Subject: UBIFS: always commit on unmount UBIFS commits on unmount to make the next mount faster. Currently, it commits only if there is more than LEB size bytes in the journal. This is not very good, because journal size may be large (512KiB). And there may be few deletions in the journal which do not take much journal space, but which do introduce a lot of TNC changes and make mount slow. Thus, jurt remove this condition and always commit. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index a14703e0a9ad..1c1bbe4135c6 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1570,20 +1570,24 @@ out: * @c: UBIFS file-system description object * * This function is called during un-mounting and re-mounting, and it commits - * the journal unless the "fast unmount" mode is enabled. It also avoids - * committing the journal if it contains too few data. + * the journal unless the "fast unmount" mode is enabled. */ static void commit_on_unmount(struct ubifs_info *c) { - if (!c->fast_unmount) { - long long bud_bytes; + struct super_block *sb = c->vfs_sb; + long long bud_bytes; - spin_lock(&c->buds_lock); - bud_bytes = c->bud_bytes; - spin_unlock(&c->buds_lock); - if (bud_bytes > c->leb_size) - ubifs_run_commit(c); - } + /* + * This function is called before the background thread is stopped, so + * we may race with ongoing commit, which means we have to take + * @c->bud_lock to access @c->bud_bytes. + */ + spin_lock(&c->buds_lock); + bud_bytes = c->bud_bytes; + spin_unlock(&c->buds_lock); + + if (!c->fast_unmount && !(sb->s_flags & MS_RDONLY) && bud_bytes) + ubifs_run_commit(c); } /** @@ -2009,7 +2013,7 @@ static void ubifs_kill_sb(struct super_block *sb) * We do 'commit_on_unmount()' here instead of 'ubifs_put_super()' * in order to be outside BKL. */ - if (sb->s_root && !(sb->s_flags & MS_RDONLY)) + if (sb->s_root) commit_on_unmount(c); /* The un-mount routine is actually done in put_super() */ generic_shutdown_super(sb); -- cgit From 6edbfafda682b30ad984964cc432da6fa1c8fab5 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 30 Dec 2008 20:06:49 +0200 Subject: UBIFS: restore budg_uncommitted_idx UBIFS stores uncommitted index size in c->budg_uncommitted_idx, and this affect budgeting calculations. When mounting and replaying, this variable is not updated, so we may end up with "over-budgeting". This patch fixes the issue. Signed-off-by: Artem Bityutskiy --- fs/ubifs/replay.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 21f7d047c306..ce42a7b0ca5a 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -144,7 +144,7 @@ static int set_bud_lprops(struct ubifs_info *c, struct replay_entry *r) /* * If the replay order was perfect the dirty space would now be * zero. The order is not perfect because the the journal heads - * race with eachother. This is not a problem but is does mean + * race with each other. This is not a problem but is does mean * that the dirty space may temporarily exceed c->leb_size * during the replay. */ @@ -656,7 +656,7 @@ out_dump: * @dirty: amount of dirty space from padding and deletion nodes * * This function inserts a reference node to the replay tree and returns zero - * in case of success ort a negative error code in case of failure. + * in case of success or a negative error code in case of failure. */ static int insert_ref_node(struct ubifs_info *c, int lnum, int offs, unsigned long long sqnum, int free, int dirty) @@ -883,7 +883,7 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) * This means that we reached end of log and now * look to the older log data, which was already * committed but the eraseblock was not erased (UBIFS - * only unmaps it). So this basically means we have to + * only un-maps it). So this basically means we have to * exit with "end of log" code. */ err = 1; @@ -1062,6 +1062,15 @@ int ubifs_replay_journal(struct ubifs_info *c) if (err) goto out; + /* + * UBIFS budgeting calculations use @c->budg_uncommitted_idx variable + * to roughly estimate index growth. Things like @c->min_idx_lebs + * depend on it. This means we have to initialize it to make sure + * budgeting works properly. + */ + c->budg_uncommitted_idx = atomic_long_read(&c->dirty_zn_cnt); + c->budg_uncommitted_idx *= c->max_idx_node_sz; + ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery); dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, " "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum, -- cgit From 2edc2025c2583a18eafe5cdbc7deb36e320aaec5 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 22 Dec 2008 11:21:03 +0200 Subject: UBIFS: do not lie about used blocks Do not force UBIFS return 0 used space when it is empty. It leads to a situation when creating any file immediately produces tens of used blocks, which looks very weird. It is better to be honest and say that some blocks are used even if the FS is empty. And ext2 does the same. Signed-off-by: Artem Bityutskiy --- fs/ubifs/budget.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 44cff803171a..3715d0114952 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -766,16 +766,6 @@ long long ubifs_get_free_space(struct ubifs_info *c) min_idx_lebs = c->min_idx_lebs; ubifs_assert(min_idx_lebs == ubifs_calc_min_idx_lebs(c)); outstanding = c->budg_data_growth + c->budg_dd_growth; - - /* - * Force the amount available to the total size reported if the used - * space is zero. - */ - if (c->lst.total_used <= UBIFS_INO_NODE_SZ && !outstanding) { - spin_unlock(&c->space_lock); - return (long long)c->block_cnt << UBIFS_BLOCK_SHIFT; - } - available = ubifs_calc_available(c, min_idx_lebs); /* -- cgit From 2acf80675800d5e6775990d1280cca5c2ffb30e6 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 9 Dec 2008 11:04:40 -0500 Subject: UBIFS: simplify make_free_space The 'make_free_space()' function was too complex and this patch simplifies it. It also fixes a bug - the freespace test failed straight away on UBI volumes with 512 bytes LEB size. Signed-off-by: Artem Bityutskiy --- fs/ubifs/budget.c | 151 ++++++++++++++++++------------------------------------ 1 file changed, 49 insertions(+), 102 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 3715d0114952..4d270f0a8564 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -37,13 +37,10 @@ /* * When pessimistic budget calculations say that there is no enough space, * UBIFS starts writing back dirty inodes and pages, doing garbage collection, - * or committing. The below constants define maximum number of times UBIFS + * or committing. The below constant defines maximum number of times UBIFS * repeats the operations. */ -#define MAX_SHRINK_RETRIES 8 -#define MAX_GC_RETRIES 4 -#define MAX_CMT_RETRIES 2 -#define MAX_NOSPC_RETRIES 1 +#define MAX_MKSPC_RETRIES 3 /* * The below constant defines amount of dirty pages which should be written @@ -51,30 +48,6 @@ */ #define NR_TO_WRITE 16 -/** - * struct retries_info - information about re-tries while making free space. - * @prev_liability: previous liability - * @shrink_cnt: how many times the liability was shrinked - * @shrink_retries: count of liability shrink re-tries (increased when - * liability does not shrink) - * @try_gc: GC should be tried first - * @gc_retries: how many times GC was run - * @cmt_retries: how many times commit has been done - * @nospc_retries: how many times GC returned %-ENOSPC - * - * Since we consider budgeting to be the fast-path, and this structure has to - * be allocated on stack and zeroed out, we make it smaller using bit-fields. - */ -struct retries_info { - long long prev_liability; - unsigned int shrink_cnt; - unsigned int shrink_retries:5; - unsigned int try_gc:1; - unsigned int gc_retries:4; - unsigned int cmt_retries:3; - unsigned int nospc_retries:1; -}; - /** * shrink_liability - write-back some dirty pages/inodes. * @c: UBIFS file-system description object @@ -146,10 +119,26 @@ static int run_gc(struct ubifs_info *c) return 0; } +/** + * get_liability - calculate current liability. + * @c: UBIFS file-system description object + * + * This function calculates and returns current UBIFS liability, i.e. the + * amount of bytes UBIFS has "promised" to write to the media. + */ +static long long get_liability(struct ubifs_info *c) +{ + long long liab; + + spin_lock(&c->space_lock); + liab = c->budg_idx_growth + c->budg_data_growth + c->budg_dd_growth; + spin_unlock(&c->space_lock); + return liab; +} + /** * make_free_space - make more free space on the file-system. * @c: UBIFS file-system description object - * @ri: information about previous invocations of this function * * This function is called when an operation cannot be budgeted because there * is supposedly no free space. But in most cases there is some free space: @@ -165,87 +154,42 @@ static int run_gc(struct ubifs_info *c) * Returns %-ENOSPC if it couldn't do more free space, and other negative error * codes on failures. */ -static int make_free_space(struct ubifs_info *c, struct retries_info *ri) +static int make_free_space(struct ubifs_info *c) { - int err; - - /* - * If we have some dirty pages and inodes (liability), try to write - * them back unless this was tried too many times without effect - * already. - */ - if (ri->shrink_retries < MAX_SHRINK_RETRIES && !ri->try_gc) { - long long liability; - - spin_lock(&c->space_lock); - liability = c->budg_idx_growth + c->budg_data_growth + - c->budg_dd_growth; - spin_unlock(&c->space_lock); + int err, retries = 0; + long long liab1, liab2; - if (ri->prev_liability >= liability) { - /* Liability does not shrink, next time try GC then */ - ri->shrink_retries += 1; - if (ri->gc_retries < MAX_GC_RETRIES) - ri->try_gc = 1; - dbg_budg("liability did not shrink: retries %d of %d", - ri->shrink_retries, MAX_SHRINK_RETRIES); - } - - dbg_budg("force write-back (count %d)", ri->shrink_cnt); - shrink_liability(c, NR_TO_WRITE + ri->shrink_cnt); + do { + liab1 = get_liability(c); + /* + * We probably have some dirty pages or inodes (liability), try + * to write them back. + */ + dbg_budg("liability %lld, run write-back", liab1); + shrink_liability(c, NR_TO_WRITE); - ri->prev_liability = liability; - ri->shrink_cnt += 1; - return -EAGAIN; - } + liab2 = get_liability(c); + if (liab2 < liab1) + return -EAGAIN; - /* - * Try to run garbage collector unless it was already tried too many - * times. - */ - if (ri->gc_retries < MAX_GC_RETRIES) { - ri->gc_retries += 1; - dbg_budg("run GC, retries %d of %d", - ri->gc_retries, MAX_GC_RETRIES); + dbg_budg("new liability %lld (not shrinked)", liab2); - ri->try_gc = 0; + /* Liability did not shrink again, try GC */ + dbg_budg("Run GC"); err = run_gc(c); if (!err) return -EAGAIN; - if (err == -EAGAIN) { - dbg_budg("GC asked to commit"); - err = ubifs_run_commit(c); - if (err) - return err; - return -EAGAIN; - } - - if (err != -ENOSPC) + if (err != -EAGAIN && err != -ENOSPC) + /* Some real error happened */ return err; - /* - * GC could not make any progress. If this is the first time, - * then it makes sense to try to commit, because it might make - * some dirty space. - */ - dbg_budg("GC returned -ENOSPC, retries %d", - ri->nospc_retries); - if (ri->nospc_retries >= MAX_NOSPC_RETRIES) - return err; - ri->nospc_retries += 1; - } - - /* Neither GC nor write-back helped, try to commit */ - if (ri->cmt_retries < MAX_CMT_RETRIES) { - ri->cmt_retries += 1; - dbg_budg("run commit, retries %d of %d", - ri->cmt_retries, MAX_CMT_RETRIES); + dbg_budg("Run commit (retries %d)", retries); err = ubifs_run_commit(c); if (err) return err; - return -EAGAIN; - } + } while (retries++ < MAX_MKSPC_RETRIES); + return -ENOSPC; } @@ -523,8 +467,7 @@ static int calc_dd_growth(const struct ubifs_info *c, int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req) { int uninitialized_var(cmt_retries), uninitialized_var(wb_retries); - int err, idx_growth, data_growth, dd_growth; - struct retries_info ri; + int err, idx_growth, data_growth, dd_growth, retried = 0; ubifs_assert(req->new_page <= 1); ubifs_assert(req->dirtied_page <= 1); @@ -542,7 +485,6 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req) if (!data_growth && !dd_growth) return 0; idx_growth = calc_idx_growth(c, req); - memset(&ri, 0, sizeof(struct retries_info)); again: spin_lock(&c->space_lock); @@ -580,12 +522,17 @@ again: return err; } - err = make_free_space(c, &ri); + err = make_free_space(c); + cond_resched(); if (err == -EAGAIN) { dbg_budg("try again"); - cond_resched(); goto again; } else if (err == -ENOSPC) { + if (!retried) { + retried = 1; + dbg_budg("-ENOSPC, but anyway try once again"); + goto again; + } dbg_budg("FS is full, -ENOSPC"); c->nospace = 1; if (can_use_rp(c) || c->rp_size == 0) -- cgit From 6a4a9b438fe43397f4652853838f284cddd629b5 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 28 Dec 2008 11:00:55 +0200 Subject: UBIFS: fix sparse warnings fs/ubifs/compress.c:111:8: warning: incorrect type in argument 5 (different signedness) fs/ubifs/compress.c:111:8: expected unsigned int *dlen fs/ubifs/compress.c:111:8: got int *out_len fs/ubifs/compress.c:175:10: warning: incorrect type in argument 5 (different signedness) fs/ubifs/compress.c:175:10: expected unsigned int *dlen fs/ubifs/compress.c:175:10: got int *out_len Fix this by adding a cast to (unsigned int *). We guarantee that our lengths are small and no overflow is possible. Signed-off-by: Artem Bityutskiy --- fs/ubifs/compress.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c index 4c90ee2aef46..11e4132f314a 100644 --- a/fs/ubifs/compress.c +++ b/fs/ubifs/compress.c @@ -108,7 +108,7 @@ void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len, if (compr->comp_mutex) mutex_lock(compr->comp_mutex); err = crypto_comp_compress(compr->cc, in_buf, in_len, out_buf, - out_len); + (unsigned int *)out_len); if (compr->comp_mutex) mutex_unlock(compr->comp_mutex); if (unlikely(err)) { @@ -172,7 +172,7 @@ int ubifs_decompress(const void *in_buf, int in_len, void *out_buf, if (compr->decomp_mutex) mutex_lock(compr->decomp_mutex); err = crypto_comp_decompress(compr->cc, in_buf, in_len, out_buf, - out_len); + (unsigned int *)out_len); if (compr->decomp_mutex) mutex_unlock(compr->decomp_mutex); if (err) -- cgit From f92b982680e4b4149c559789a54e1e9db190752a Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 28 Dec 2008 11:34:26 +0200 Subject: UBIFS: fix checkpatch.pl warnings These are mostly long lines and wrong indentation warning fixes. But also there are two volatile variables and checkpatch.pl complains about them: WARNING: Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt + volatile int gc_seq; WARNING: Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt + volatile int gced_lnum; Well, we anyway use smp_wmb() for c->gc_seq and c->gced_lnum, so these 'volatile' modifiers can be just dropped. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 3 ++- fs/ubifs/file.c | 4 ++-- fs/ubifs/journal.c | 2 +- fs/ubifs/lpt_commit.c | 2 +- fs/ubifs/tnc.c | 31 +++++++++++++++---------------- fs/ubifs/ubifs.h | 8 ++++---- 6 files changed, 25 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index a2be11584ad3..350fedecc833 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -703,7 +703,8 @@ void dbg_dump_lpt_info(struct ubifs_info *c) printk(KERN_DEBUG "\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs); printk(KERN_DEBUG "\tLPT head is at %d:%d\n", c->nhead_lnum, c->nhead_offs); - printk(KERN_DEBUG "\tLPT ltab is at %d:%d\n", c->ltab_lnum, c->ltab_offs); + printk(KERN_DEBUG "\tLPT ltab is at %d:%d\n", + c->ltab_lnum, c->ltab_offs); if (c->big_lpt) printk(KERN_DEBUG "\tLPT lsave is at %d:%d\n", c->lsave_lnum, c->lsave_offs); diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 7f1de98e6091..fe82d2464d46 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -72,8 +72,8 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, return err; } - ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); - + ubifs_assert(le64_to_cpu(dn->ch.sqnum) > + ubifs_inode(inode)->creat_sqnum); len = le32_to_cpu(dn->size); if (len <= 0 || len > UBIFS_BLOCK_SIZE) goto dump; diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index f91b745908ea..3b0fa704d559 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -1220,7 +1220,7 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, data_key_init(c, &key, inum, blk); bit = old_size & (UBIFS_BLOCK_SIZE - 1); - blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0: 1); + blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1); data_key_init(c, &to_key, inum, blk); err = ubifs_tnc_remove_range(c, &key, &to_key); diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index da60b5a0fab9..b8a060794239 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -548,7 +548,7 @@ static int write_cnodes(struct ubifs_info *c) no_space: ubifs_err("LPT out of space mismatch"); dbg_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab " - "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); + "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); dbg_dump_lpt_info(c); dbg_dump_lpt_lebs(c); dump_stack(); diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 6eef5344a145..f7e36f545527 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -2245,12 +2245,11 @@ int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key, if (found) { /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { - znode = dirty_cow_bottom_up(c, - znode); - if (IS_ERR(znode)) { - err = PTR_ERR(znode); - goto out_unlock; - } + znode = dirty_cow_bottom_up(c, znode); + if (IS_ERR(znode)) { + err = PTR_ERR(znode); + goto out_unlock; + } } zbr = &znode->zbranch[n]; lnc_free(zbr); @@ -2317,11 +2316,11 @@ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key, /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { - znode = dirty_cow_bottom_up(c, znode); - if (IS_ERR(znode)) { - err = PTR_ERR(znode); - goto out_unlock; - } + znode = dirty_cow_bottom_up(c, znode); + if (IS_ERR(znode)) { + err = PTR_ERR(znode); + goto out_unlock; + } } if (found == 1) { @@ -2627,11 +2626,11 @@ int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key, /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { - znode = dirty_cow_bottom_up(c, znode); - if (IS_ERR(znode)) { - err = PTR_ERR(znode); - goto out_unlock; - } + znode = dirty_cow_bottom_up(c, znode); + if (IS_ERR(znode)) { + err = PTR_ERR(znode); + goto out_unlock; + } } /* Remove all keys in range except the first */ diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index a17dd794ae92..3275c89a3585 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -481,8 +481,8 @@ struct ubifs_lprops { struct ubifs_lpt_lprops { int free; int dirty; - unsigned tgc : 1; - unsigned cmt : 1; + unsigned tgc:1; + unsigned cmt:1; }; /** @@ -1322,8 +1322,8 @@ struct ubifs_info { void *sbuf; struct list_head idx_gc; int idx_gc_cnt; - volatile int gc_seq; - volatile int gced_lnum; + int gc_seq; + int gced_lnum; struct list_head infos_list; struct mutex umount_mutex; -- cgit From a9f2fc0e251e71a51deb8059b181c375a4a5e979 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 23 Dec 2008 14:39:14 +0200 Subject: UBIFS: fix writing uncompressed files UBIFS does not disable compression if ui->flags is non-zero, e.g. if the file has "sync" flag. This is because of the typo which is fixed by this patch. The patch also adds a couple of useful debugging prints. Signed-off-by: Artem Bityutskiy --- fs/ubifs/ioctl.c | 2 ++ fs/ubifs/journal.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c index 5e82cffe9695..6db7a6be6c97 100644 --- a/fs/ubifs/ioctl.c +++ b/fs/ubifs/ioctl.c @@ -154,6 +154,7 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case FS_IOC_GETFLAGS: flags = ubifs2ioctl(ubifs_inode(inode)->flags); + dbg_gen("get flags: %#x, i_flags %#x", flags, inode->i_flags); return put_user(flags, (int __user *) arg); case FS_IOC_SETFLAGS: { @@ -176,6 +177,7 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = mnt_want_write(file->f_path.mnt); if (err) return err; + dbg_gen("set flags: %#x, i_flags %#x", flags, inode->i_flags); err = setflags(inode, flags); mnt_drop_write(file->f_path.mnt); return err; diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 3b0fa704d559..10ae25b7d1db 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -704,7 +704,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, data->size = cpu_to_le32(len); zero_data_node_unused(data); - if (!(ui->flags && UBIFS_COMPR_FL)) + if (!(ui->flags & UBIFS_COMPR_FL)) /* Compression is disabled for this inode */ compr_type = UBIFS_COMPR_NONE; else -- cgit From 57a450e95932f7798677885b8a01443aca72fdc7 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 30 Dec 2008 16:23:34 +0200 Subject: UBIFS: allow mounting when short of space It is fine if there is not free space - we should still allow mounting this FS. This patch relaxes the free space requirements and adds info dumps. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 40 +++++++++++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 1c1bbe4135c6..2c91d6fa4e05 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1073,6 +1073,30 @@ again: } } +/** + * check_free_space - check if there is enough free space to mount. + * @c: UBIFS file-system description object + * + * This function makes sure UBIFS has enough free space to be mounted in + * read/write mode. UBIFS must always have some free space to allow deletions. + */ +static int check_free_space(struct ubifs_info *c) +{ + ubifs_assert(c->dark_wm > 0); + if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) { + ubifs_err("insufficient free space to mount in read/write mode"); + dbg_dump_budg(c); + dbg_dump_lprops(c); + /* + * We return %-EINVAL instead of %-ENOSPC because it seems to + * be the closest error code mentioned in the mount function + * documentation. + */ + return -EINVAL; + } + return 0; +} + /** * mount_ubifs - mount UBIFS file-system. * @c: UBIFS file-system description object @@ -1154,7 +1178,7 @@ static int mount_ubifs(struct ubifs_info *c) /* * Make sure the compressor which is set as default in the superblock - * or overriden by mount options is actually compiled in. + * or overridden by mount options is actually compiled in. */ if (!ubifs_compr_present(c->default_compr)) { ubifs_err("'compressor \"%s\" is not compiled in", @@ -1236,12 +1260,9 @@ static int mount_ubifs(struct ubifs_info *c) if (!mounted_read_only) { int lnum; - /* Check for enough free space */ - if (ubifs_calc_available(c, c->min_idx_lebs) <= 0) { - ubifs_err("insufficient available space"); - err = -EINVAL; + err = check_free_space(c); + if (err) goto out_orphans; - } /* Check for enough log space */ lnum = c->lhead_lnum + 1; @@ -1442,12 +1463,9 @@ static int ubifs_remount_rw(struct ubifs_info *c) c->remounting_rw = 1; c->always_chk_crc = 1; - /* Check for enough free space */ - if (ubifs_calc_available(c, c->min_idx_lebs) <= 0) { - ubifs_err("insufficient available space"); - err = -EINVAL; + err = check_free_space(c); + if (err) goto out; - } if (c->old_leb_cnt != c->leb_cnt) { struct ubifs_sb_node *sup; -- cgit From 80736d41f895bc472b2433a1c27fa6d4afe6ca35 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 30 Dec 2008 17:44:02 +0200 Subject: UBIFS: fix numerous spelling mistakes Signed-off-by: Artem Bityutskiy --- fs/ubifs/budget.c | 14 +++++++------- fs/ubifs/lpt_commit.c | 8 ++++---- fs/ubifs/ubifs.h | 1 - 3 files changed, 11 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 4d270f0a8564..31870d8dab84 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -652,9 +652,9 @@ void ubifs_release_dirty_inode_budget(struct ubifs_info *c, * user-space. User-space application tend to expect that if the file-system * (e.g., via the 'statfs()' call) reports that it has N bytes available, they * are able to write a file of size N. UBIFS attaches node headers to each data - * node and it has to write indexind nodes as well. This introduces additional - * overhead, and UBIFS has to report sligtly less free space to meet the above - * expectetions. + * node and it has to write indexing nodes as well. This introduces additional + * overhead, and UBIFS has to report slightly less free space to meet the above + * expectations. * * This function assumes free space is made up of uncompressed data nodes and * full index nodes (one per data node, tripled because we always allow enough @@ -677,7 +677,7 @@ long long ubifs_reported_space(const struct ubifs_info *c, long long free) * of data nodes, f - fanout. Because effective UBIFS fanout is twice * as less than maximum fanout, we assume that each data node * introduces 3 * @c->max_idx_node_sz / (@c->fanout/2 - 1) bytes. - * Note, the multiplier 3 is because UBIFS reseves thrice as more space + * Note, the multiplier 3 is because UBIFS reserves thrice as more space * for the index. */ f = c->fanout > 3 ? c->fanout >> 1 : 2; @@ -695,10 +695,10 @@ long long ubifs_reported_space(const struct ubifs_info *c, long long free) * This function calculates amount of free space to report to user-space. * * Because UBIFS may introduce substantial overhead (the index, node headers, - * alighment, wastage at the end of eraseblocks, etc), it cannot report real + * alignment, wastage at the end of eraseblocks, etc), it cannot report real * amount of free flash space it has (well, because not all dirty space is - * reclamable, UBIFS does not actually know the real amount). If UBIFS did so, - * it would bread user expectetion about what free space is. Users seem to + * reclaimable, UBIFS does not actually know the real amount). If UBIFS did so, + * it would bread user expectations about what free space is. Users seem to * accustomed to assume that if the file-system reports N bytes of free space, * they would be able to fit a file of N bytes to the FS. This almost works for * traditional file-systems, because they have way less overhead than UBIFS. diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index b8a060794239..96ca95707175 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -753,7 +753,7 @@ static void lpt_tgc_start(struct ubifs_info *c) * LPT trivial garbage collection is where a LPT LEB contains only dirty and * free space and so may be reused as soon as the next commit is completed. * This function is called after the commit is completed (master node has been - * written) and unmaps LPT LEBs that were marked for trivial GC. + * written) and un-maps LPT LEBs that were marked for trivial GC. */ static int lpt_tgc_end(struct ubifs_info *c) { @@ -1467,7 +1467,7 @@ void ubifs_lpt_free(struct ubifs_info *c, int wr_only) #ifdef CONFIG_UBIFS_FS_DEBUG /** - * dbg_is_all_ff - determine if a buffer contains only 0xff bytes. + * dbg_is_all_ff - determine if a buffer contains only 0xFF bytes. * @buf: buffer * @len: buffer length */ @@ -1492,7 +1492,7 @@ static int dbg_is_nnode_dirty(struct ubifs_info *c, int lnum, int offs) struct ubifs_nnode *nnode; int hght; - /* Entire tree is in memory so first_nnode / next_nnode are ok */ + /* Entire tree is in memory so first_nnode / next_nnode are OK */ nnode = first_nnode(c, &hght); for (; nnode; nnode = next_nnode(c, nnode, &hght)) { struct ubifs_nbranch *branch; @@ -1837,7 +1837,7 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) * This function dumps an LEB from LPT area. Nodes in this area are very * different to nodes in the main area (e.g., they do not have common headers, * they do not have 8-byte alignments, etc), so we have a separate function to - * dump LPT area LEBs. Note, LPT has to be locked by the coller. + * dump LPT area LEBs. Note, LPT has to be locked by the caller. */ static void dump_lpt_leb(const struct ubifs_info *c, int lnum) { diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 3275c89a3585..fc2a4cc66d03 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1168,7 +1168,6 @@ struct ubifs_debug_info; * @mount_opts: UBIFS-specific mount options * * @dbg: debugging-related information - * @dfs: debugfs support-related information */ struct ubifs_info { struct super_block *vfs_sb; -- cgit From 5d38b3ac78e0e0e420fba134716fc3d20e6b978a Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 30 Dec 2008 17:58:42 +0200 Subject: UBIFS: print debugging messages properly We cannot use ubifs_err() macro with DBGKEY() and DBGKEY1(), because this is racy and holding dbg_lock is needed. Use dbg_err() instead, which does have the lock held. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 350fedecc833..792c5a16c182 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -1010,20 +1010,20 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, err = 1; key_read(c, &dent1->key, &key); if (keys_cmp(c, &zbr1->key, &key)) { - ubifs_err("1st entry at %d:%d has key %s", zbr1->lnum, - zbr1->offs, DBGKEY(&key)); - ubifs_err("but it should have key %s according to tnc", - DBGKEY(&zbr1->key)); + dbg_err("1st entry at %d:%d has key %s", zbr1->lnum, + zbr1->offs, DBGKEY(&key)); + dbg_err("but it should have key %s according to tnc", + DBGKEY(&zbr1->key)); dbg_dump_node(c, dent1); goto out_free; } key_read(c, &dent2->key, &key); if (keys_cmp(c, &zbr2->key, &key)) { - ubifs_err("2nd entry at %d:%d has key %s", zbr1->lnum, - zbr1->offs, DBGKEY(&key)); - ubifs_err("but it should have key %s according to tnc", - DBGKEY(&zbr2->key)); + dbg_err("2nd entry at %d:%d has key %s", zbr1->lnum, + zbr1->offs, DBGKEY(&key)); + dbg_err("but it should have key %s according to tnc", + DBGKEY(&zbr2->key)); dbg_dump_node(c, dent2); goto out_free; } @@ -1037,9 +1037,9 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, goto out_free; } if (cmp == 0 && nlen1 == nlen2) - ubifs_err("2 xent/dent nodes with the same name"); + dbg_err("2 xent/dent nodes with the same name"); else - ubifs_err("bad order of colliding key %s", + dbg_err("bad order of colliding key %s", DBGKEY(&key)); ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs); -- cgit From 8e5033adc78ff4fbeab7052134e7af1f6ff04187 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 30 Dec 2008 18:37:45 +0200 Subject: UBIFS: add more useful debugging prints Print node sizes and maximum node sizes. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 2c91d6fa4e05..0d7564b95f8e 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1361,8 +1361,20 @@ static int mount_ubifs(struct ubifs_info *c) dbg_msg("tree fanout: %d", c->fanout); dbg_msg("reserved GC LEB: %d", c->gc_lnum); dbg_msg("first main LEB: %d", c->main_first); + dbg_msg("max. znode size %d", c->max_znode_sz); + dbg_msg("max. index node size %d", c->max_idx_node_sz); + dbg_msg("node sizes: data %zu, inode %zu, dentry %zu", + UBIFS_DATA_NODE_SZ, UBIFS_INO_NODE_SZ, UBIFS_DENT_NODE_SZ); + dbg_msg("node sizes: trun %zu, sb %zu, master %zu", + UBIFS_TRUN_NODE_SZ, UBIFS_SB_NODE_SZ, UBIFS_MST_NODE_SZ); + dbg_msg("node sizes: ref %zu, cmt. start %zu, orph %zu", + UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ); + dbg_msg("max. node sizes: data %zu, inode %zu dentry %zu", + UBIFS_MAX_DATA_NODE_SZ, UBIFS_MAX_INO_NODE_SZ, + UBIFS_MAX_DENT_NODE_SZ); dbg_msg("dead watermark: %d", c->dead_wm); dbg_msg("dark watermark: %d", c->dark_wm); + dbg_msg("LEB overhead: %d", c->leb_overhead); x = (long long)c->main_lebs * c->dark_wm; dbg_msg("max. dark space: %lld (%lld KiB, %lld MiB)", x, x >> 10, x >> 20); -- cgit From e3a2a0d4e5ace731e60e2eff4fb7056ecb34adc1 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Tue, 2 Dec 2008 11:16:03 +0100 Subject: anon_inodes: use fops->owner for module refcount There is an imbalance for anonymous inodes. If the fops->owner field is set, the module reference count of owner is decreases on release. ("filp_close" --> "__fput" ---> "fops_put") On the other hand, anon_inode_getfd does not increase the module reference count of owner. This causes two problems: - if owner is set, the module refcount goes negative - if owner is not set, the module can be unloaded while code is running This patch changes anon_inode_getfd to be symmetric regarding fops->owner handling. I have checked all existing users of anon_inode_getfd. Noone sets fops->owner, thats why nobody has seen the module refcount negative. The refcounting was tested with a patched and unpatched KVM module.(see patch 2/2) I also did an epoll_open/close test. Signed-off-by: Christian Borntraeger Reviewed-by: Davide Libenzi Signed-off-by: Avi Kivity --- fs/anon_inodes.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index c16d9be1b017..3bbdb9d02376 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -79,9 +79,12 @@ int anon_inode_getfd(const char *name, const struct file_operations *fops, if (IS_ERR(anon_inode_inode)) return -ENODEV; + if (fops->owner && !try_module_get(fops->owner)) + return -ENOENT; + error = get_unused_fd_flags(flags); if (error < 0) - return error; + goto err_module; fd = error; /* @@ -128,6 +131,8 @@ err_dput: dput(dentry); err_put_unused_fd: put_unused_fd(fd); +err_module: + module_put(fops->owner); return error; } EXPORT_SYMBOL_GPL(anon_inode_getfd); -- cgit From be6d3e56a6b9b3a4ee44a0685e39e595073c6f0d Mon Sep 17 00:00:00 2001 From: Kentaro Takeda Date: Wed, 17 Dec 2008 13:24:15 +0900 Subject: introduce new LSM hooks where vfsmount is available. Add new LSM hooks for path-based checks. Call them on directory-modifying operations at the points where we still know the vfsmount involved. Signed-off-by: Kentaro Takeda Signed-off-by: Tetsuo Handa Signed-off-by: Toshiharu Harada Signed-off-by: Al Viro --- fs/namei.c | 36 ++++++++++++++++++++++++++++++++++++ fs/open.c | 5 +++++ 2 files changed, 41 insertions(+) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index af3783fff1de..ab441af4196b 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1556,6 +1556,9 @@ int may_open(struct nameidata *nd, int acc_mode, int flag) * Refuse to truncate files with mandatory locks held on them. */ error = locks_verify_locked(inode); + if (!error) + error = security_path_truncate(&nd->path, 0, + ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); if (!error) { DQUOT_INIT(inode); @@ -1586,7 +1589,11 @@ static int __open_namei_create(struct nameidata *nd, struct path *path, if (!IS_POSIXACL(dir->d_inode)) mode &= ~current->fs->umask; + error = security_path_mknod(&nd->path, path->dentry, mode, 0); + if (error) + goto out_unlock; error = vfs_create(dir->d_inode, path->dentry, mode, nd); +out_unlock: mutex_unlock(&dir->d_inode->i_mutex); dput(nd->path.dentry); nd->path.dentry = path->dentry; @@ -1999,6 +2006,9 @@ asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode, error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; + error = security_path_mknod(&nd.path, dentry, mode, dev); + if (error) + goto out_drop_write; switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(nd.path.dentry->d_inode,dentry,mode,&nd); @@ -2011,6 +2021,7 @@ asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode, error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,0); break; } +out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(dentry); @@ -2070,7 +2081,11 @@ asmlinkage long sys_mkdirat(int dfd, const char __user *pathname, int mode) error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; + error = security_path_mkdir(&nd.path, dentry, mode); + if (error) + goto out_drop_write; error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); +out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(dentry); @@ -2180,7 +2195,11 @@ static long do_rmdir(int dfd, const char __user *pathname) error = mnt_want_write(nd.path.mnt); if (error) goto exit3; + error = security_path_rmdir(&nd.path, dentry); + if (error) + goto exit4; error = vfs_rmdir(nd.path.dentry->d_inode, dentry); +exit4: mnt_drop_write(nd.path.mnt); exit3: dput(dentry); @@ -2265,7 +2284,11 @@ static long do_unlinkat(int dfd, const char __user *pathname) error = mnt_want_write(nd.path.mnt); if (error) goto exit2; + error = security_path_unlink(&nd.path, dentry); + if (error) + goto exit3; error = vfs_unlink(nd.path.dentry->d_inode, dentry); +exit3: mnt_drop_write(nd.path.mnt); exit2: dput(dentry); @@ -2346,7 +2369,11 @@ asmlinkage long sys_symlinkat(const char __user *oldname, error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; + error = security_path_symlink(&nd.path, dentry, from); + if (error) + goto out_drop_write; error = vfs_symlink(nd.path.dentry->d_inode, dentry, from); +out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(dentry); @@ -2443,7 +2470,11 @@ asmlinkage long sys_linkat(int olddfd, const char __user *oldname, error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; + error = security_path_link(old_path.dentry, &nd.path, new_dentry); + if (error) + goto out_drop_write; error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry); +out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(new_dentry); @@ -2679,8 +2710,13 @@ asmlinkage long sys_renameat(int olddfd, const char __user *oldname, error = mnt_want_write(oldnd.path.mnt); if (error) goto exit5; + error = security_path_rename(&oldnd.path, old_dentry, + &newnd.path, new_dentry); + if (error) + goto exit6; error = vfs_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, new_dentry); +exit6: mnt_drop_write(oldnd.path.mnt); exit5: dput(new_dentry); diff --git a/fs/open.c b/fs/open.c index c0a426d5766c..1cd7d40e9991 100644 --- a/fs/open.c +++ b/fs/open.c @@ -272,6 +272,8 @@ static long do_sys_truncate(const char __user *pathname, loff_t length) goto put_write_and_out; error = locks_verify_truncate(inode, NULL, length); + if (!error) + error = security_path_truncate(&path, length, 0); if (!error) { DQUOT_INIT(inode); error = do_truncate(path.dentry, length, 0, NULL); @@ -328,6 +330,9 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small) goto out_putf; error = locks_verify_truncate(inode, file, length); + if (!error) + error = security_path_truncate(&file->f_path, length, + ATTR_MTIME|ATTR_CTIME); if (!error) error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file); out_putf: -- cgit From e2b689d82c0394e5239a3557a217f19e2f47f1be Mon Sep 17 00:00:00 2001 From: Richard Kennedy Date: Thu, 4 Dec 2008 11:17:47 +0000 Subject: fs: reorder struct inotify_device on 64bits to remove padding Reorder struct inotify_device to remove 8 bytes of padding on 64bit builds, reducing size to 128 bytes . Therefore allocating from a smaller slab & using one fewer cachelines. Signed-off-by: Richard Kennedy ---- Hi, patch against 2.6.28-rc7. built & tested on AMDX2 desktop. I've not been able to send this to the listed inotify maintainers, I just get mail failures. So I guessed filesystem was the best home for it, hope that's ok. regards Richard Signed-off-by: Al Viro --- fs/inotify_user.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/inotify_user.c b/fs/inotify_user.c index e2425bbd871f..400f8064a548 100644 --- a/fs/inotify_user.c +++ b/fs/inotify_user.c @@ -76,10 +76,10 @@ struct inotify_device { struct mutex ev_mutex; /* protects event queue */ struct mutex up_mutex; /* synchronizes watch updates */ struct list_head events; /* list of queued events */ - atomic_t count; /* reference count */ struct user_struct *user; /* user who opened this dev */ struct inotify_handle *ih; /* inotify handle */ struct fasync_struct *fa; /* async notification */ + atomic_t count; /* reference count */ unsigned int queue_size; /* size of the queue (bytes) */ unsigned int event_count; /* number of pending events */ unsigned int max_events; /* maximum number of events */ -- cgit From c2452f32786159ed85f0e4b21fec09258f822fc8 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Mon, 1 Dec 2008 09:33:43 +0100 Subject: shrink struct dentry struct dentry is one of the most critical structures in the kernel. So it's sad to see it going neglected. With CONFIG_PROFILING turned on (which is probably the common case at least for distros and kernel developers), sizeof(struct dcache) == 208 here (64-bit). This gives 19 objects per slab. I packed d_mounted into a hole, and took another 4 bytes off the inline name length to take the padding out from the end of the structure. This shinks it to 200 bytes. I could have gone the other way and increased the length to 40, but I'm aiming for a magic number, read on... I then got rid of the d_cookie pointer. This shrinks it to 192 bytes. Rant: why was this ever a good idea? The cookie system should increase its hash size or use a tree or something if lookups are a problem. Also the "fast dcookie lookups" in oprofile should be moved into the dcookie code -- how can oprofile possibly care about the dcookie_mutex? It gets dropped after get_dcookie() returns so it can't be providing any sort of protection. At 192 bytes, 21 objects fit into a 4K page, saving about 3MB on my system with ~140 000 entries allocated. 192 is also a multiple of 64, so we get nice cacheline alignment on 64 and 32 byte line systems -- any given dentry will now require 3 cachelines to touch all fields wheras previously it would require 4. I know the inline name size was chosen quite carefully, however with the reduction in cacheline footprint, it should actually be just about as fast to do a name lookup for a 36 character name as it was before the patch (and faster for other sizes). The memory footprint savings for names which are <= 32 or > 36 bytes long should more than make up for the memory cost for 33-36 byte names. Performance is a feature... Signed-off-by: Al Viro --- fs/dcache.c | 4 ---- fs/dcookies.c | 28 +++++++++++++++++++--------- 2 files changed, 19 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index a1d86c7f3e66..fd244c7a7cc0 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -34,7 +34,6 @@ #include #include "internal.h" - int sysctl_vfs_cache_pressure __read_mostly = 100; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); @@ -948,9 +947,6 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) dentry->d_op = NULL; dentry->d_fsdata = NULL; dentry->d_mounted = 0; -#ifdef CONFIG_PROFILING - dentry->d_cookie = NULL; -#endif INIT_HLIST_NODE(&dentry->d_hash); INIT_LIST_HEAD(&dentry->d_lru); INIT_LIST_HEAD(&dentry->d_subdirs); diff --git a/fs/dcookies.c b/fs/dcookies.c index 855d4b1d619a..180e9fec4ad8 100644 --- a/fs/dcookies.c +++ b/fs/dcookies.c @@ -93,10 +93,15 @@ static struct dcookie_struct *alloc_dcookie(struct path *path) { struct dcookie_struct *dcs = kmem_cache_alloc(dcookie_cache, GFP_KERNEL); + struct dentry *d; if (!dcs) return NULL; - path->dentry->d_cookie = dcs; + d = path->dentry; + spin_lock(&d->d_lock); + d->d_flags |= DCACHE_COOKIE; + spin_unlock(&d->d_lock); + dcs->path = *path; path_get(path); hash_dcookie(dcs); @@ -119,14 +124,14 @@ int get_dcookie(struct path *path, unsigned long *cookie) goto out; } - dcs = path->dentry->d_cookie; - - if (!dcs) + if (path->dentry->d_flags & DCACHE_COOKIE) { + dcs = find_dcookie((unsigned long)path->dentry); + } else { dcs = alloc_dcookie(path); - - if (!dcs) { - err = -ENOMEM; - goto out; + if (!dcs) { + err = -ENOMEM; + goto out; + } } *cookie = dcookie_value(dcs); @@ -251,7 +256,12 @@ out_kmem: static void free_dcookie(struct dcookie_struct * dcs) { - dcs->path.dentry->d_cookie = NULL; + struct dentry *d = dcs->path.dentry; + + spin_lock(&d->d_lock); + d->d_flags &= ~DCACHE_COOKIE; + spin_unlock(&d->d_lock); + path_put(&dcs->path); kmem_cache_free(dcookie_cache, dcs); } -- cgit From 5cc4a0341a1295ea56b2e62eb70d96d8fdb94ded Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 1 Dec 2008 14:34:51 -0800 Subject: fs/namespace.c: drop code after return The extra semicolon serves no purpose. Signed-off-by: Julia Lawall Reviewed-by: Richard Genoud Cc: Al Viro Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Al Viro --- fs/namespace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namespace.c b/fs/namespace.c index 1c09cab8f7cf..a40685d800a8 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1990,7 +1990,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, if (!new_ns->root) { up_write(&namespace_sem); kfree(new_ns); - return ERR_PTR(-ENOMEM);; + return ERR_PTR(-ENOMEM); } spin_lock(&vfsmount_lock); list_add_tail(&new_ns->list, &new_ns->root->mnt_list); -- cgit From a17d5232de7b53d34229de79ec22f4bb04adb7e4 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:10 +0000 Subject: eCryptfs: check readlink result was not an error before using it The result from readlink is being used to index into the link name buffer without checking whether it is a valid length. If readlink returns an error this will fault or cause memory corruption. Cc: Tyler Hicks Cc: Dustin Kirkland Cc: ecryptfs-devel@lists.launchpad.net Signed-off-by: Duane Griffin Acked-by: Michael Halcrow Acked-by: Tyler Hicks Signed-off-by: Al Viro --- fs/ecryptfs/inode.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 89209f00f9c7..5e78fc179886 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -673,10 +673,11 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd) ecryptfs_printk(KERN_DEBUG, "Calling readlink w/ " "dentry->d_name.name = [%s]\n", dentry->d_name.name); rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len); - buf[rc] = '\0'; set_fs(old_fs); if (rc < 0) goto out_free; + else + buf[rc] = '\0'; rc = 0; nd_set_link(nd, buf); goto out; -- cgit From ebd09abbd9699f328165aee50a070403fbf55a37 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:12 +0000 Subject: vfs: ensure page symlinks are NUL-terminated On-disk data corruption could cause a page link to have its i_size set to PAGE_SIZE (or a multiple thereof) and its contents all non-NUL. NUL-terminate the link name to ensure this doesn't cause further problems for the kernel. Cc: Al Viro Cc: Andrew Morton Signed-off-by: Duane Griffin Signed-off-by: Al Viro --- fs/namei.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index ab441af4196b..9ed5e2818f80 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2786,13 +2786,16 @@ int vfs_follow_link(struct nameidata *nd, const char *link) /* get the link contents into pagecache */ static char *page_getlink(struct dentry * dentry, struct page **ppage) { - struct page * page; + char *kaddr; + struct page *page; struct address_space *mapping = dentry->d_inode->i_mapping; page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) return (char*)page; *ppage = page; - return kmap(page); + kaddr = kmap(page); + nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1); + return kaddr; } int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) -- cgit From 8d6d0c4da2dbbe0a69fea3692146af39f139f8b4 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:13 +0000 Subject: ext2: ensure fast symlinks are NUL-terminated Ensure fast symlink targets are NUL-terminated, even if corrupted on-disk. Cc: Andrew Morton Signed-off-by: Duane Griffin Signed-off-by: Al Viro --- fs/ext2/inode.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 7658b33e2653..02b39a5deb74 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "ext2.h" #include "acl.h" #include "xip.h" @@ -1286,9 +1287,11 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino) else inode->i_mapping->a_ops = &ext2_aops; } else if (S_ISLNK(inode->i_mode)) { - if (ext2_inode_is_fast_symlink(inode)) + if (ext2_inode_is_fast_symlink(inode)) { inode->i_op = &ext2_fast_symlink_inode_operations; - else { + nd_terminate_link(ei->i_data, inode->i_size, + sizeof(ei->i_data) - 1); + } else { inode->i_op = &ext2_symlink_inode_operations; if (test_opt(inode->i_sb, NOBH)) inode->i_mapping->a_ops = &ext2_nobh_aops; -- cgit From b5ed3112b5f74c8ec1c7aa03a76c596635e85197 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:14 +0000 Subject: ext3: ensure fast symlinks are NUL-terminated Ensure fast symlink targets are NUL-terminated, even if corrupted on-disk. Cc: Andrew Morton Cc: Stephen Tweedie Cc: linux-ext4@vger.kernel.org Signed-off-by: Duane Griffin Signed-off-by: Al Viro --- fs/ext3/inode.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index f8424ad89971..c4bdccf976b5 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "xattr.h" #include "acl.h" @@ -2817,9 +2818,11 @@ struct inode *ext3_iget(struct super_block *sb, unsigned long ino) inode->i_op = &ext3_dir_inode_operations; inode->i_fop = &ext3_dir_operations; } else if (S_ISLNK(inode->i_mode)) { - if (ext3_inode_is_fast_symlink(inode)) + if (ext3_inode_is_fast_symlink(inode)) { inode->i_op = &ext3_fast_symlink_inode_operations; - else { + nd_terminate_link(ei->i_data, inode->i_size, + sizeof(ei->i_data) - 1); + } else { inode->i_op = &ext3_symlink_inode_operations; ext3_set_aops(inode); } -- cgit From e83c1397cafc4e44f868289db5e417463c0d09a4 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:15 +0000 Subject: ext4: ensure fast symlinks are NUL-terminated Ensure fast symlink targets are NUL-terminated, even if corrupted on-disk. Cc: Andrew Morton Cc: Theodore Ts'o Cc: adilger@sun.com Cc: linux-ext4@vger.kernel.org Signed-off-by: Duane Griffin Signed-off-by: Al Viro --- fs/ext4/inode.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index be21a5ae33cb..7c3325e0b005 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include "ext4_jbd2.h" @@ -4164,9 +4165,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) inode->i_op = &ext4_dir_inode_operations; inode->i_fop = &ext4_dir_operations; } else if (S_ISLNK(inode->i_mode)) { - if (ext4_inode_is_fast_symlink(inode)) + if (ext4_inode_is_fast_symlink(inode)) { inode->i_op = &ext4_fast_symlink_inode_operations; - else { + nd_terminate_link(ei->i_data, inode->i_size, + sizeof(ei->i_data) - 1); + } else { inode->i_op = &ext4_symlink_inode_operations; ext4_set_aops(inode); } -- cgit From 21acaf8e8da00235be59a3e489d5fa2a8721cafc Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:16 +0000 Subject: sysv: ensure fast symlinks are NUL-terminated Ensure fast symlink targets are NUL-terminated, even if corrupted on-disk. Cc: Christoph Hellwig Cc: Al Viro Signed-off-by: Duane Griffin Signed-off-by: Al Viro --- fs/sysv/inode.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index df0d435baa48..3d81bf58dae2 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include "sysv.h" @@ -163,8 +164,11 @@ void sysv_set_inode(struct inode *inode, dev_t rdev) if (inode->i_blocks) { inode->i_op = &sysv_symlink_inode_operations; inode->i_mapping->a_ops = &sysv_aops; - } else + } else { inode->i_op = &sysv_fast_symlink_inode_operations; + nd_terminate_link(SYSV_I(inode)->i_data, inode->i_size, + sizeof(SYSV_I(inode)->i_data) - 1); + } } else init_special_inode(inode, inode->i_mode, rdev); } -- cgit From a63d0ff31a136bdf52350c4e6c2929eaf47ea2b2 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:17 +0000 Subject: freevxfs: ensure fast symlinks are NUL-terminated Ensure fast symlink targets are NUL-terminated, even if corrupted on-disk. Cc: Christoph Hellwig Signed-off-by: Duane Griffin Signed-off-by: Al Viro --- fs/freevxfs/vxfs_inode.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c index 9f3f2ceb73f0..03a6ea5e99f7 100644 --- a/fs/freevxfs/vxfs_inode.c +++ b/fs/freevxfs/vxfs_inode.c @@ -325,8 +325,10 @@ vxfs_iget(struct super_block *sbp, ino_t ino) if (!VXFS_ISIMMED(vip)) { ip->i_op = &page_symlink_inode_operations; ip->i_mapping->a_ops = &vxfs_aops; - } else + } else { ip->i_op = &vxfs_immed_symlink_iops; + vip->vii_immed.vi_immed[ip->i_size] = '\0'; + } } else init_special_inode(ip, ip->i_mode, old_decode_dev(vip->vii_rdev)); -- cgit From 7df5fa06de89a4ac311957e0cb9c1d87552b4325 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:18 +0000 Subject: befs: ensure fast symlinks are NUL-terminated Ensure fast symlink targets are NUL-terminated, even if corrupted on-disk. Cc: Sergey S. Kostyliov Signed-off-by: Duane Griffin Signed-off-by: Al Viro --- fs/befs/linuxvfs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index b6dfee37c7b7..d06cb023ad02 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -378,7 +378,8 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino) inode->i_size = 0; inode->i_blocks = befs_sb->block_size / VFS_BLOCK_SIZE; strncpy(befs_ino->i_data.symlink, raw_inode->data.symlink, - BEFS_SYMLINK_LEN); + BEFS_SYMLINK_LEN - 1); + befs_ino->i_data.symlink[BEFS_SYMLINK_LEN - 1] = '\0'; } else { int num_blks; @@ -477,6 +478,8 @@ befs_follow_link(struct dentry *dentry, struct nameidata *nd) kfree(link); befs_error(sb, "Failed to read entire long symlink"); link = ERR_PTR(-EIO); + } else { + link[len - 1] = '\0'; } } else { link = befs_ino->i_data.symlink; -- cgit From dc711ca35f9d95a1eec02118e0c298b5e3068315 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 3 Nov 2008 15:03:50 -0500 Subject: fix switch_names() breakage in short-to-short case We want ->name.len to match the resulting name on *both* source and target Signed-off-by: Al Viro --- fs/dcache.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index fd244c7a7cc0..eeafc14c2a14 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1616,8 +1616,11 @@ static void switch_names(struct dentry *dentry, struct dentry *target) */ memcpy(dentry->d_iname, target->d_name.name, target->d_name.len + 1); + dentry->d_name.len = target->d_name.len; + return; } } + do_switch(dentry->d_name.len, target->d_name.len); } /* @@ -1677,7 +1680,6 @@ already_unhashed: /* Switch the names.. */ switch_names(dentry, target); - do_switch(dentry->d_name.len, target->d_name.len); do_switch(dentry->d_name.hash, target->d_name.hash); /* ... and switch the parents */ @@ -1787,7 +1789,6 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) struct dentry *dparent, *aparent; switch_names(dentry, anon); - do_switch(dentry->d_name.len, anon->d_name.len); do_switch(dentry->d_name.hash, anon->d_name.hash); dparent = dentry->d_parent; -- cgit From be42c4c433c2c0d3f1583c08908fead00d36d222 Mon Sep 17 00:00:00 2001 From: Zhaolei Date: Mon, 1 Dec 2008 14:34:58 -0800 Subject: correct wrong function name of d_put in kernel document and source comment no function named d_put(), it should be dput(). Impact: fix document and comment, no functionality changed Signed-off-by: Zhao Lei Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Al Viro --- fs/dcache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index eeafc14c2a14..c231a639c2a2 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1332,7 +1332,7 @@ err_out: * * Searches the children of the parent dentry for the name in question. If * the dentry is found its reference count is incremented and the dentry - * is returned. The caller must use d_put to free the entry when it has + * is returned. The caller must use dput to free the entry when it has * finished using it. %NULL is returned on failure. * * __d_lookup is dcache_lock free. The hash list is protected using RCU. -- cgit From 52afeefb9dac9287429642189996426a2bfd6a25 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Dec 2008 14:35:00 -0800 Subject: expand some comments (d_path / seq_path) Explain that you really need to use the return value of d_path rather than the buffer you passed into it. Also fix the comment for seq_path(), the function arguments changed recently but the comment hadn't been updated in sync. Signed-off-by: Arjan van de Ven Cc: Al Viro Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Al Viro --- fs/dcache.c | 8 ++++++-- fs/seq_file.c | 10 ++++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index c231a639c2a2..bdb3f50248a7 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1908,7 +1908,8 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name) * Convert a dentry into an ASCII path name. If the entry has been deleted * the string " (deleted)" is appended. Note that this is ambiguous. * - * Returns the buffer or an error code if the path was too long. + * Returns a pointer into the buffer or an error code if the + * path was too long. * * "buflen" should be positive. Caller holds the dcache_lock. * @@ -1984,7 +1985,10 @@ Elong: * Convert a dentry into an ASCII path name. If the entry has been deleted * the string " (deleted)" is appended. Note that this is ambiguous. * - * Returns the buffer or an error code if the path was too long. + * Returns a pointer into the buffer or an error code if the path was + * too long. Note: Callers should use the returned pointer, not the passed + * in buffer, to use the name! The implementation often starts at an offset + * into the buffer, and may leave 0 bytes at the start. * * "buflen" should be positive. */ diff --git a/fs/seq_file.c b/fs/seq_file.c index 16c211558c22..99d8b8cfc9b7 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -389,8 +389,14 @@ char *mangle_path(char *s, char *p, char *esc) } EXPORT_SYMBOL(mangle_path); -/* - * return the absolute path of 'dentry' residing in mount 'mnt'. +/** + * seq_path - seq_file interface to print a pathname + * @m: the seq_file handle + * @path: the struct path to print + * @esc: set of characters to escape in the output + * + * return the absolute path of 'path', as represented by the + * dentry / mnt pair in the path parameter. */ int seq_path(struct seq_file *m, struct path *path, char *esc) { -- cgit From 66f221875dc10813aa2f06c83ad60d0eb1356406 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 5 Nov 2008 15:04:29 +0100 Subject: remove incorrect comment in inode_permission We now pass on all MAY_ flags to the filesystems permission routines, so remove the comment stating the contrary. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/namei.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 9ed5e2818f80..631cfdd45c68 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -247,7 +247,6 @@ int inode_permission(struct inode *inode, int mask) return -EACCES; } - /* Ordinary permission routines do not understand MAY_APPEND. */ if (inode->i_op && inode->i_op->permission) retval = inode->i_op->permission(inode, mask); else -- cgit From b4091d5f6fde28ab762e1094a1a26d81f3badfa5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 5 Nov 2008 15:07:21 +0100 Subject: kill walk_init_root walk_init_root is a tiny helper that is marked __always_inline, has just one caller and an unused argument. Just merge it into the caller. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/namei.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 631cfdd45c68..d4d0b59ed2cc 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -526,18 +526,6 @@ out_unlock: return result; } -/* SMP-safe */ -static __always_inline void -walk_init_root(const char *name, struct nameidata *nd) -{ - struct fs_struct *fs = current->fs; - - read_lock(&fs->lock); - nd->path = fs->root; - path_get(&fs->root); - read_unlock(&fs->lock); -} - /* * Wrapper to retry pathname resolution whenever the underlying * file system returns an ESTALE. @@ -575,9 +563,16 @@ static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *l goto fail; if (*link == '/') { + struct fs_struct *fs = current->fs; + path_put(&nd->path); - walk_init_root(link, nd); + + read_lock(&fs->lock); + nd->path = fs->root; + path_get(&fs->root); + read_unlock(&fs->lock); } + res = link_path_walk(link, nd); if (nd->depth || res || nd->last_type!=LAST_NORM) return res; -- cgit From 3fb64190aa3c23c10e6e9fd0124ac030115c99bf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Oct 2008 09:58:10 +0200 Subject: pass a struct path * to may_open No need for the nameidata in may_open - a struct path is enough. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/namei.c | 14 +++++++------- fs/nfsctl.c | 5 +++-- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index d4d0b59ed2cc..5cc0dc95a7a5 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1487,9 +1487,9 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode, return error; } -int may_open(struct nameidata *nd, int acc_mode, int flag) +int may_open(struct path *path, int acc_mode, int flag) { - struct dentry *dentry = nd->path.dentry; + struct dentry *dentry = path->dentry; struct inode *inode = dentry->d_inode; int error; @@ -1510,13 +1510,13 @@ int may_open(struct nameidata *nd, int acc_mode, int flag) if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { flag &= ~O_TRUNC; } else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { - if (nd->path.mnt->mnt_flags & MNT_NODEV) + if (path->mnt->mnt_flags & MNT_NODEV) return -EACCES; flag &= ~O_TRUNC; } - error = vfs_permission(nd, acc_mode); + error = inode_permission(inode, acc_mode); if (error) return error; /* @@ -1551,7 +1551,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag) */ error = locks_verify_locked(inode); if (!error) - error = security_path_truncate(&nd->path, 0, + error = security_path_truncate(path, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); if (!error) { DQUOT_INIT(inode); @@ -1594,7 +1594,7 @@ out_unlock: if (error) return error; /* Don't check for write permission, don't truncate */ - return may_open(nd, 0, flag & ~O_TRUNC); + return may_open(&nd->path, 0, flag & ~O_TRUNC); } /* @@ -1780,7 +1780,7 @@ ok: if (error) goto exit; } - error = may_open(&nd, acc_mode, flag); + error = may_open(&nd.path, acc_mode, flag); if (error) { if (will_write) mnt_drop_write(nd.path.mnt); diff --git a/fs/nfsctl.c b/fs/nfsctl.c index b1acbd6ab6fb..b27451909dff 100644 --- a/fs/nfsctl.c +++ b/fs/nfsctl.c @@ -38,9 +38,10 @@ static struct file *do_open(char *name, int flags) return ERR_PTR(error); if (flags == O_RDWR) - error = may_open(&nd,MAY_READ|MAY_WRITE,FMODE_READ|FMODE_WRITE); + error = may_open(&nd.path, MAY_READ|MAY_WRITE, + FMODE_READ|FMODE_WRITE); else - error = may_open(&nd, MAY_WRITE, FMODE_WRITE); + error = may_open(&nd.path, MAY_WRITE, FMODE_WRITE); if (!error) return dentry_open(nd.path.dentry, nd.path.mnt, flags, -- cgit From cb23beb55100171646e69e248fb45f10db6e99a4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Oct 2008 09:59:29 +0200 Subject: kill vfs_permission With all the nameidata removal there's no point anymore for this helper. Of the three callers left two will go away with the next lookup series anyway. Also add proper kerneldoc to inode_permission as this is the main permission check routine now. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/exec.c | 5 +++-- fs/namei.c | 31 +++++++++++++------------------ 2 files changed, 16 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index 02d2e120542d..dfbf7009fbe7 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -127,7 +127,8 @@ asmlinkage long sys_uselib(const char __user * library) if (nd.path.mnt->mnt_flags & MNT_NOEXEC) goto exit; - error = vfs_permission(&nd, MAY_READ | MAY_EXEC | MAY_OPEN); + error = inode_permission(nd.path.dentry->d_inode, + MAY_READ | MAY_EXEC | MAY_OPEN); if (error) goto exit; @@ -680,7 +681,7 @@ struct file *open_exec(const char *name) if (nd.path.mnt->mnt_flags & MNT_NOEXEC) goto out_path_put; - err = vfs_permission(&nd, MAY_EXEC | MAY_OPEN); + err = inode_permission(nd.path.dentry->d_inode, MAY_EXEC | MAY_OPEN); if (err) goto out_path_put; diff --git a/fs/namei.c b/fs/namei.c index 5cc0dc95a7a5..3f88e043d459 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -226,6 +226,16 @@ int generic_permission(struct inode *inode, int mask, return -EACCES; } +/** + * inode_permission - check for access rights to a given inode + * @inode: inode to check permission on + * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) + * + * Used to check for read/write/execute permissions on an inode. + * We use "fsuid" for this, letting us set arbitrary permissions + * for filesystem access without changing the "normal" uids which + * are used for other things. + */ int inode_permission(struct inode *inode, int mask) { int retval; @@ -263,21 +273,6 @@ int inode_permission(struct inode *inode, int mask) mask & (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND)); } -/** - * vfs_permission - check for access rights to a given path - * @nd: lookup result that describes the path - * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) - * - * Used to check for read/write/execute permissions on a path. - * We use "fsuid" for this, letting us set arbitrary permissions - * for filesystem access without changing the "normal" uids which - * are used for other things. - */ -int vfs_permission(struct nameidata *nd, int mask) -{ - return inode_permission(nd->path.dentry->d_inode, mask); -} - /** * file_permission - check for additional access rights to a given file * @file: file to check access rights for @@ -288,7 +283,7 @@ int vfs_permission(struct nameidata *nd, int mask) * * Note: * Do not use this function in new code. All access checks should - * be done using vfs_permission(). + * be done using inode_permission(). */ int file_permission(struct file *file, int mask) { @@ -853,7 +848,8 @@ static int __link_path_walk(const char *name, struct nameidata *nd) nd->flags |= LOOKUP_CONTINUE; err = exec_permission_lite(inode); if (err == -EAGAIN) - err = vfs_permission(nd, MAY_EXEC); + err = inode_permission(nd->path.dentry->d_inode, + MAY_EXEC); if (err) break; @@ -2882,7 +2878,6 @@ EXPORT_SYMBOL(path_lookup); EXPORT_SYMBOL(kern_path); EXPORT_SYMBOL(vfs_path_lookup); EXPORT_SYMBOL(inode_permission); -EXPORT_SYMBOL(vfs_permission); EXPORT_SYMBOL(file_permission); EXPORT_SYMBOL(unlock_rename); EXPORT_SYMBOL(vfs_create); -- cgit From 18d8fda7c3c9439be04d7ea2e82da2513b121acb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 26 Dec 2008 00:35:37 -0500 Subject: take init_fs to saner place Signed-off-by: Al Viro --- fs/namei.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 3f88e043d459..e203691b9d12 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2893,3 +2893,10 @@ EXPORT_SYMBOL(vfs_symlink); EXPORT_SYMBOL(vfs_unlink); EXPORT_SYMBOL(dentry_unhash); EXPORT_SYMBOL(generic_readlink); + +/* to be mentioned only in INIT_TASK */ +struct fs_struct init_fs = { + .count = ATOMIC_INIT(1), + .lock = RW_LOCK_UNLOCKED, + .umask = 0022, +}; -- cgit From 1239f26c05899f1f3c541b41e719c59d58038786 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 10 Dec 2008 18:37:28 -0500 Subject: make INIT_FS use the __RW_LOCK_UNLOCKED initialization [AV: rediffed on top of unification of init_fs] Initialization of init_fs still uses the deprecated RW_LOCK_UNLOCKED macro. This patch updates it to use the __RW_LOCK_UNLOCKED(lock) macro. Signed-off-by: Steven Rostedt Signed-off-by: Al Viro --- fs/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index e203691b9d12..dd5c9f0bf829 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2897,6 +2897,6 @@ EXPORT_SYMBOL(generic_readlink); /* to be mentioned only in INIT_TASK */ struct fs_struct init_fs = { .count = ATOMIC_INIT(1), - .lock = RW_LOCK_UNLOCKED, + .lock = __RW_LOCK_UNLOCKED(init_fs.lock), .umask = 0022, }; -- cgit From b6b3fdead251d432f32f2cfce2a893ab8a658110 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 10 Dec 2008 09:35:45 -0800 Subject: filp_cachep can be static in fs/file_table.c Instead of creating the "filp" kmem_cache in vfs_caches_init(), we can do it a litle be later in files_init(), so that filp_cachep is static to fs/file_table.c Acked-by: Paul E. McKenney Signed-off-by: Eric Dumazet Signed-off-by: Al Viro --- fs/dcache.c | 6 ------ fs/file_table.c | 10 +++++++++- 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index bdb3f50248a7..e88c23b85a32 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2314,9 +2314,6 @@ static void __init dcache_init(void) /* SLAB cache for __getname() consumers */ struct kmem_cache *names_cachep __read_mostly; -/* SLAB cache for file structures */ -struct kmem_cache *filp_cachep __read_mostly; - EXPORT_SYMBOL(d_genocide); void __init vfs_caches_init_early(void) @@ -2338,9 +2335,6 @@ void __init vfs_caches_init(unsigned long mempages) names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); - filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); - dcache_init(); inode_init(); files_init(mempages); diff --git a/fs/file_table.c b/fs/file_table.c index 0fbcacc3ea75..bbeeac6efa1a 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -32,6 +32,9 @@ struct files_stat_struct files_stat = { /* public. Not pretty! */ __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); +/* SLAB cache for file structures */ +static struct kmem_cache *filp_cachep __read_mostly; + static struct percpu_counter nr_files __cacheline_aligned_in_smp; static inline void file_free_rcu(struct rcu_head *head) @@ -397,7 +400,12 @@ too_bad: void __init files_init(unsigned long mempages) { int n; - /* One file with associated inode and dcache is very roughly 1K. + + filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, + SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); + + /* + * One file with associated inode and dcache is very roughly 1K. * Per default don't use more than 10% of our memory for files. */ -- cgit From 6badd79bd002788aaec27b50a74ab69ef65ab8ee Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 26 Dec 2008 00:57:40 -0500 Subject: kill ->dir_notify() Remove the hopelessly misguided ->dir_notify(). The only instance (cifs) has been broken by design from the very beginning; the objects it creates are never destroyed, keep references to struct file they can outlive, nothing that could possibly evict them exists on close(2) path *and* no locking whatsoever is done to prevent races with close(), should the previous, er, deficiencies someday be dealt with. Signed-off-by: Al Viro --- fs/bad_inode.c | 6 --- fs/cifs/Makefile | 2 +- fs/cifs/cifsfs.c | 7 ---- fs/cifs/cifsfs.h | 1 - fs/cifs/fcntl.c | 118 ------------------------------------------------------- fs/dnotify.c | 3 -- 6 files changed, 1 insertion(+), 136 deletions(-) delete mode 100644 fs/cifs/fcntl.c (limited to 'fs') diff --git a/fs/bad_inode.c b/fs/bad_inode.c index 5f1538c03b1b..a05287a23f62 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -132,11 +132,6 @@ static int bad_file_check_flags(int flags) return -EIO; } -static int bad_file_dir_notify(struct file *file, unsigned long arg) -{ - return -EIO; -} - static int bad_file_flock(struct file *filp, int cmd, struct file_lock *fl) { return -EIO; @@ -179,7 +174,6 @@ static const struct file_operations bad_file_ops = .sendpage = bad_file_sendpage, .get_unmapped_area = bad_file_get_unmapped_area, .check_flags = bad_file_check_flags, - .dir_notify = bad_file_dir_notify, .flock = bad_file_flock, .splice_write = bad_file_splice_write, .splice_read = bad_file_splice_read, diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile index 6ba43fb346fb..9948c0030e86 100644 --- a/fs/cifs/Makefile +++ b/fs/cifs/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_CIFS) += cifs.o cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \ link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o \ - md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o \ + md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o \ readdir.o ioctl.o sess.o export.o cifsacl.o cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 0005a194a75c..13ea53251dcf 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -747,7 +747,6 @@ const struct file_operations cifs_file_ops = { #endif /* CONFIG_CIFS_POSIX */ #ifdef CONFIG_CIFS_EXPERIMENTAL - .dir_notify = cifs_dir_notify, .setlease = cifs_setlease, #endif /* CONFIG_CIFS_EXPERIMENTAL */ }; @@ -768,7 +767,6 @@ const struct file_operations cifs_file_direct_ops = { #endif /* CONFIG_CIFS_POSIX */ .llseek = cifs_llseek, #ifdef CONFIG_CIFS_EXPERIMENTAL - .dir_notify = cifs_dir_notify, .setlease = cifs_setlease, #endif /* CONFIG_CIFS_EXPERIMENTAL */ }; @@ -789,7 +787,6 @@ const struct file_operations cifs_file_nobrl_ops = { #endif /* CONFIG_CIFS_POSIX */ #ifdef CONFIG_CIFS_EXPERIMENTAL - .dir_notify = cifs_dir_notify, .setlease = cifs_setlease, #endif /* CONFIG_CIFS_EXPERIMENTAL */ }; @@ -809,7 +806,6 @@ const struct file_operations cifs_file_direct_nobrl_ops = { #endif /* CONFIG_CIFS_POSIX */ .llseek = cifs_llseek, #ifdef CONFIG_CIFS_EXPERIMENTAL - .dir_notify = cifs_dir_notify, .setlease = cifs_setlease, #endif /* CONFIG_CIFS_EXPERIMENTAL */ }; @@ -818,9 +814,6 @@ const struct file_operations cifs_dir_ops = { .readdir = cifs_readdir, .release = cifs_closedir, .read = generic_read_dir, -#ifdef CONFIG_CIFS_EXPERIMENTAL - .dir_notify = cifs_dir_notify, -#endif /* CONFIG_CIFS_EXPERIMENTAL */ .unlocked_ioctl = cifs_ioctl, .llseek = generic_file_llseek, }; diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 2ce04c73d74e..7ac481841f87 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -76,7 +76,6 @@ extern int cifs_file_mmap(struct file * , struct vm_area_struct *); extern const struct file_operations cifs_dir_ops; extern int cifs_dir_open(struct inode *inode, struct file *file); extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir); -extern int cifs_dir_notify(struct file *, unsigned long arg); /* Functions related to dir entries */ extern struct dentry_operations cifs_dentry_ops; diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c deleted file mode 100644 index 5a57581eb4b2..000000000000 --- a/fs/cifs/fcntl.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * fs/cifs/fcntl.c - * - * vfs operations that deal with the file control API - * - * Copyright (C) International Business Machines Corp., 2003,2004 - * Author(s): Steve French (sfrench@us.ibm.com) - * - * This library is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; either version 2.1 of the License, or - * (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See - * the GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#include -#include -#include -#include "cifsglob.h" -#include "cifsproto.h" -#include "cifs_unicode.h" -#include "cifs_debug.h" -#include "cifsfs.h" - -static __u32 convert_to_cifs_notify_flags(unsigned long fcntl_notify_flags) -{ - __u32 cifs_ntfy_flags = 0; - - /* No way on Linux VFS to ask to monitor xattr - changes (and no stream support either */ - if (fcntl_notify_flags & DN_ACCESS) - cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_ACCESS; - if (fcntl_notify_flags & DN_MODIFY) { - /* What does this mean on directories? */ - cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_WRITE | - FILE_NOTIFY_CHANGE_SIZE; - } - if (fcntl_notify_flags & DN_CREATE) { - cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_CREATION | - FILE_NOTIFY_CHANGE_LAST_WRITE; - } - if (fcntl_notify_flags & DN_DELETE) - cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_WRITE; - if (fcntl_notify_flags & DN_RENAME) { - /* BB review this - checking various server behaviors */ - cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_DIR_NAME | - FILE_NOTIFY_CHANGE_FILE_NAME; - } - if (fcntl_notify_flags & DN_ATTRIB) { - cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_SECURITY | - FILE_NOTIFY_CHANGE_ATTRIBUTES; - } -/* if (fcntl_notify_flags & DN_MULTISHOT) { - cifs_ntfy_flags |= ; - } */ /* BB fixme - not sure how to handle this with CIFS yet */ - - return cifs_ntfy_flags; -} - -int cifs_dir_notify(struct file *file, unsigned long arg) -{ - int xid; - int rc = -EINVAL; - int oplock = 0; - struct cifs_sb_info *cifs_sb; - struct cifsTconInfo *pTcon; - char *full_path = NULL; - __u32 filter = FILE_NOTIFY_CHANGE_NAME | FILE_NOTIFY_CHANGE_ATTRIBUTES; - __u16 netfid; - - if (experimEnabled == 0) - return 0; - - xid = GetXid(); - cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); - pTcon = cifs_sb->tcon; - - full_path = build_path_from_dentry(file->f_path.dentry); - - if (full_path == NULL) { - rc = -ENOMEM; - } else { - cFYI(1, ("dir notify on file %s Arg 0x%lx", full_path, arg)); - rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, - GENERIC_READ | SYNCHRONIZE, 0 /* create options */, - &netfid, &oplock, NULL, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); - /* BB fixme - add this handle to a notify handle list */ - if (rc) { - cFYI(1, ("Could not open directory for notify")); - } else { - filter = convert_to_cifs_notify_flags(arg); - if (filter != 0) { - rc = CIFSSMBNotify(xid, pTcon, - 0 /* no subdirs */, netfid, - filter, file, arg & DN_MULTISHOT, - cifs_sb->local_nls); - } else { - rc = -EINVAL; - } - /* BB add code to close file eventually (at unmount - it would close automatically but may be a way - to do it easily when inode freed or when - notify info is cleared/changed */ - cFYI(1, ("notify rc %d", rc)); - } - } - - FreeXid(xid); - return rc; -} diff --git a/fs/dnotify.c b/fs/dnotify.c index 676073b8dda5..b0aa2cde80bd 100644 --- a/fs/dnotify.c +++ b/fs/dnotify.c @@ -115,9 +115,6 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) dn->dn_next = inode->i_dnotify; inode->i_dnotify = dn; spin_unlock(&inode->i_lock); - - if (filp->f_op && filp->f_op->dir_notify) - return filp->f_op->dir_notify(filp, arg); return 0; out_free: -- cgit From c2acf7b90821785fe812cc0aa05148e5a1f84204 Mon Sep 17 00:00:00 2001 From: Denis ChengRq Date: Mon, 1 Dec 2008 14:34:56 -0800 Subject: fs/block_dev.c: __read_mostly improvement and sb_is_blkdev_sb utilization - iget5_locked in bdget really needs blockdev_superblock, instead of bd_mnt, so bd_mnt could be just a local variable; - blockdev_superblock really needs __read_mostly, while local var bd_mnt not; - make use of sb_is_blkdev_sb in bd_forget, instead of direct reference to blockdev_superblock. Signed-off-by: Denis ChengRq Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Al Viro --- fs/block_dev.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 99e0ae1a4c78..349a26c10001 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -326,12 +326,13 @@ static struct file_system_type bd_type = { .kill_sb = kill_anon_super, }; -static struct vfsmount *bd_mnt __read_mostly; -struct super_block *blockdev_superblock; +struct super_block *blockdev_superblock __read_mostly; void __init bdev_cache_init(void) { int err; + struct vfsmount *bd_mnt; + bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_PANIC), @@ -373,7 +374,7 @@ struct block_device *bdget(dev_t dev) struct block_device *bdev; struct inode *inode; - inode = iget5_locked(bd_mnt->mnt_sb, hash(dev), + inode = iget5_locked(blockdev_superblock, hash(dev), bdev_test, bdev_set, &dev); if (!inode) @@ -463,7 +464,7 @@ void bd_forget(struct inode *inode) spin_lock(&bdev_lock); if (inode->i_bdev) { - if (inode->i_sb != blockdev_superblock) + if (!sb_is_blkdev_sb(inode->i_sb)) bdev = inode->i_bdev; __bd_forget(inode); } -- cgit From 272eb01485dda98e3b8910c7c1a53d597616b0a0 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 17 Dec 2008 13:59:41 -0500 Subject: filesystem notification: create fs/notify to contain all fs notification Creating a generic filesystem notification interface, fsnotify, which will be used by inotify, dnotify, and eventually fanotify is really starting to clutter the fs directory. This patch simply moves inotify and dnotify into fs/notify/inotify and fs/notify/dnotify respectively to make both current fs/ and future notification tidier. Signed-off-by: Eric Paris Signed-off-by: Al Viro --- fs/Kconfig | 39 +- fs/Makefile | 5 +- fs/dnotify.c | 191 -------- fs/inotify.c | 913 --------------------------------------- fs/inotify_user.c | 778 --------------------------------- fs/notify/Kconfig | 2 + fs/notify/Makefile | 2 + fs/notify/dnotify/Kconfig | 10 + fs/notify/dnotify/Makefile | 1 + fs/notify/dnotify/dnotify.c | 191 ++++++++ fs/notify/inotify/Kconfig | 27 ++ fs/notify/inotify/Makefile | 2 + fs/notify/inotify/inotify.c | 913 +++++++++++++++++++++++++++++++++++++++ fs/notify/inotify/inotify_user.c | 778 +++++++++++++++++++++++++++++++++ 14 files changed, 1928 insertions(+), 1924 deletions(-) delete mode 100644 fs/dnotify.c delete mode 100644 fs/inotify.c delete mode 100644 fs/inotify_user.c create mode 100644 fs/notify/Kconfig create mode 100644 fs/notify/Makefile create mode 100644 fs/notify/dnotify/Kconfig create mode 100644 fs/notify/dnotify/Makefile create mode 100644 fs/notify/dnotify/dnotify.c create mode 100644 fs/notify/inotify/Kconfig create mode 100644 fs/notify/inotify/Makefile create mode 100644 fs/notify/inotify/inotify.c create mode 100644 fs/notify/inotify/inotify_user.c (limited to 'fs') diff --git a/fs/Kconfig b/fs/Kconfig index 522469a7eca3..ff0e81980207 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -270,44 +270,7 @@ config OCFS2_COMPAT_JBD endif # BLOCK -config DNOTIFY - bool "Dnotify support" - default y - help - Dnotify is a directory-based per-fd file change notification system - that uses signals to communicate events to user-space. There exist - superior alternatives, but some applications may still rely on - dnotify. - - If unsure, say Y. - -config INOTIFY - bool "Inotify file change notification support" - default y - ---help--- - Say Y here to enable inotify support. Inotify is a file change - notification system and a replacement for dnotify. Inotify fixes - numerous shortcomings in dnotify and introduces several new features - including multiple file events, one-shot support, and unmount - notification. - - For more information, see - - If unsure, say Y. - -config INOTIFY_USER - bool "Inotify support for userspace" - depends on INOTIFY - default y - ---help--- - Say Y here to enable inotify support for userspace, including the - associated system calls. Inotify allows monitoring of both files and - directories via a single open fd. Events are read from the file - descriptor, which is also select()- and poll()-able. - - For more information, see - - If unsure, say Y. +source "fs/notify/Kconfig" config QUOTA bool "Quota support" diff --git a/fs/Makefile b/fs/Makefile index d9f8afe6f0c4..e6f423d1d228 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -20,8 +20,7 @@ obj-y += no-block.o endif obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o -obj-$(CONFIG_INOTIFY) += inotify.o -obj-$(CONFIG_INOTIFY_USER) += inotify_user.o +obj-y += notify/ obj-$(CONFIG_EPOLL) += eventpoll.o obj-$(CONFIG_ANON_INODES) += anon_inodes.o obj-$(CONFIG_SIGNALFD) += signalfd.o @@ -57,8 +56,6 @@ obj-$(CONFIG_QFMT_V1) += quota_v1.o obj-$(CONFIG_QFMT_V2) += quota_v2.o obj-$(CONFIG_QUOTACTL) += quota.o -obj-$(CONFIG_DNOTIFY) += dnotify.o - obj-$(CONFIG_PROC_FS) += proc/ obj-y += partitions/ obj-$(CONFIG_SYSFS) += sysfs/ diff --git a/fs/dnotify.c b/fs/dnotify.c deleted file mode 100644 index b0aa2cde80bd..000000000000 --- a/fs/dnotify.c +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Directory notifications for Linux. - * - * Copyright (C) 2000,2001,2002 Stephen Rothwell - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2, or (at your option) any - * later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#include -#include -#include -#include -#include -#include -#include -#include - -int dir_notify_enable __read_mostly = 1; - -static struct kmem_cache *dn_cache __read_mostly; - -static void redo_inode_mask(struct inode *inode) -{ - unsigned long new_mask; - struct dnotify_struct *dn; - - new_mask = 0; - for (dn = inode->i_dnotify; dn != NULL; dn = dn->dn_next) - new_mask |= dn->dn_mask & ~DN_MULTISHOT; - inode->i_dnotify_mask = new_mask; -} - -void dnotify_flush(struct file *filp, fl_owner_t id) -{ - struct dnotify_struct *dn; - struct dnotify_struct **prev; - struct inode *inode; - - inode = filp->f_path.dentry->d_inode; - if (!S_ISDIR(inode->i_mode)) - return; - spin_lock(&inode->i_lock); - prev = &inode->i_dnotify; - while ((dn = *prev) != NULL) { - if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { - *prev = dn->dn_next; - redo_inode_mask(inode); - kmem_cache_free(dn_cache, dn); - break; - } - prev = &dn->dn_next; - } - spin_unlock(&inode->i_lock); -} - -int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) -{ - struct dnotify_struct *dn; - struct dnotify_struct *odn; - struct dnotify_struct **prev; - struct inode *inode; - fl_owner_t id = current->files; - struct file *f; - int error = 0; - - if ((arg & ~DN_MULTISHOT) == 0) { - dnotify_flush(filp, id); - return 0; - } - if (!dir_notify_enable) - return -EINVAL; - inode = filp->f_path.dentry->d_inode; - if (!S_ISDIR(inode->i_mode)) - return -ENOTDIR; - dn = kmem_cache_alloc(dn_cache, GFP_KERNEL); - if (dn == NULL) - return -ENOMEM; - spin_lock(&inode->i_lock); - prev = &inode->i_dnotify; - while ((odn = *prev) != NULL) { - if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { - odn->dn_fd = fd; - odn->dn_mask |= arg; - inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; - goto out_free; - } - prev = &odn->dn_next; - } - - rcu_read_lock(); - f = fcheck(fd); - rcu_read_unlock(); - /* we'd lost the race with close(), sod off silently */ - /* note that inode->i_lock prevents reordering problems - * between accesses to descriptor table and ->i_dnotify */ - if (f != filp) - goto out_free; - - error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); - if (error) - goto out_free; - - dn->dn_mask = arg; - dn->dn_fd = fd; - dn->dn_filp = filp; - dn->dn_owner = id; - inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; - dn->dn_next = inode->i_dnotify; - inode->i_dnotify = dn; - spin_unlock(&inode->i_lock); - return 0; - -out_free: - spin_unlock(&inode->i_lock); - kmem_cache_free(dn_cache, dn); - return error; -} - -void __inode_dir_notify(struct inode *inode, unsigned long event) -{ - struct dnotify_struct * dn; - struct dnotify_struct **prev; - struct fown_struct * fown; - int changed = 0; - - spin_lock(&inode->i_lock); - prev = &inode->i_dnotify; - while ((dn = *prev) != NULL) { - if ((dn->dn_mask & event) == 0) { - prev = &dn->dn_next; - continue; - } - fown = &dn->dn_filp->f_owner; - send_sigio(fown, dn->dn_fd, POLL_MSG); - if (dn->dn_mask & DN_MULTISHOT) - prev = &dn->dn_next; - else { - *prev = dn->dn_next; - changed = 1; - kmem_cache_free(dn_cache, dn); - } - } - if (changed) - redo_inode_mask(inode); - spin_unlock(&inode->i_lock); -} - -EXPORT_SYMBOL(__inode_dir_notify); - -/* - * This is hopelessly wrong, but unfixable without API changes. At - * least it doesn't oops the kernel... - * - * To safely access ->d_parent we need to keep d_move away from it. Use the - * dentry's d_lock for this. - */ -void dnotify_parent(struct dentry *dentry, unsigned long event) -{ - struct dentry *parent; - - if (!dir_notify_enable) - return; - - spin_lock(&dentry->d_lock); - parent = dentry->d_parent; - if (parent->d_inode->i_dnotify_mask & event) { - dget(parent); - spin_unlock(&dentry->d_lock); - __inode_dir_notify(parent->d_inode, event); - dput(parent); - } else { - spin_unlock(&dentry->d_lock); - } -} -EXPORT_SYMBOL_GPL(dnotify_parent); - -static int __init dnotify_init(void) -{ - dn_cache = kmem_cache_create("dnotify_cache", - sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL); - return 0; -} - -module_init(dnotify_init) diff --git a/fs/inotify.c b/fs/inotify.c deleted file mode 100644 index dae3f28f30d4..000000000000 --- a/fs/inotify.c +++ /dev/null @@ -1,913 +0,0 @@ -/* - * fs/inotify.c - inode-based file event notifications - * - * Authors: - * John McCutchan - * Robert Love - * - * Kernel API added by: Amy Griffis - * - * Copyright (C) 2005 John McCutchan - * Copyright 2006 Hewlett-Packard Development Company, L.P. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2, or (at your option) any - * later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static atomic_t inotify_cookie; - -/* - * Lock ordering: - * - * dentry->d_lock (used to keep d_move() away from dentry->d_parent) - * iprune_mutex (synchronize shrink_icache_memory()) - * inode_lock (protects the super_block->s_inodes list) - * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) - * inotify_handle->mutex (protects inotify_handle and watches->h_list) - * - * The inode->inotify_mutex and inotify_handle->mutex and held during execution - * of a caller's event handler. Thus, the caller must not hold any locks - * taken in their event handler while calling any of the published inotify - * interfaces. - */ - -/* - * Lifetimes of the three main data structures--inotify_handle, inode, and - * inotify_watch--are managed by reference count. - * - * inotify_handle: Lifetime is from inotify_init() to inotify_destroy(). - * Additional references can bump the count via get_inotify_handle() and drop - * the count via put_inotify_handle(). - * - * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch() - * to remove_watch_no_event(). Additional references can bump the count via - * get_inotify_watch() and drop the count via put_inotify_watch(). The caller - * is reponsible for the final put after receiving IN_IGNORED, or when using - * IN_ONESHOT after receiving the first event. Inotify does the final put if - * inotify_destroy() is called. - * - * inode: Pinned so long as the inode is associated with a watch, from - * inotify_add_watch() to the final put_inotify_watch(). - */ - -/* - * struct inotify_handle - represents an inotify instance - * - * This structure is protected by the mutex 'mutex'. - */ -struct inotify_handle { - struct idr idr; /* idr mapping wd -> watch */ - struct mutex mutex; /* protects this bad boy */ - struct list_head watches; /* list of watches */ - atomic_t count; /* reference count */ - u32 last_wd; /* the last wd allocated */ - const struct inotify_operations *in_ops; /* inotify caller operations */ -}; - -static inline void get_inotify_handle(struct inotify_handle *ih) -{ - atomic_inc(&ih->count); -} - -static inline void put_inotify_handle(struct inotify_handle *ih) -{ - if (atomic_dec_and_test(&ih->count)) { - idr_destroy(&ih->idr); - kfree(ih); - } -} - -/** - * get_inotify_watch - grab a reference to an inotify_watch - * @watch: watch to grab - */ -void get_inotify_watch(struct inotify_watch *watch) -{ - atomic_inc(&watch->count); -} -EXPORT_SYMBOL_GPL(get_inotify_watch); - -int pin_inotify_watch(struct inotify_watch *watch) -{ - struct super_block *sb = watch->inode->i_sb; - spin_lock(&sb_lock); - if (sb->s_count >= S_BIAS) { - atomic_inc(&sb->s_active); - spin_unlock(&sb_lock); - atomic_inc(&watch->count); - return 1; - } - spin_unlock(&sb_lock); - return 0; -} - -/** - * put_inotify_watch - decrements the ref count on a given watch. cleans up - * watch references if the count reaches zero. inotify_watch is freed by - * inotify callers via the destroy_watch() op. - * @watch: watch to release - */ -void put_inotify_watch(struct inotify_watch *watch) -{ - if (atomic_dec_and_test(&watch->count)) { - struct inotify_handle *ih = watch->ih; - - iput(watch->inode); - ih->in_ops->destroy_watch(watch); - put_inotify_handle(ih); - } -} -EXPORT_SYMBOL_GPL(put_inotify_watch); - -void unpin_inotify_watch(struct inotify_watch *watch) -{ - struct super_block *sb = watch->inode->i_sb; - put_inotify_watch(watch); - deactivate_super(sb); -} - -/* - * inotify_handle_get_wd - returns the next WD for use by the given handle - * - * Callers must hold ih->mutex. This function can sleep. - */ -static int inotify_handle_get_wd(struct inotify_handle *ih, - struct inotify_watch *watch) -{ - int ret; - - do { - if (unlikely(!idr_pre_get(&ih->idr, GFP_KERNEL))) - return -ENOSPC; - ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd); - } while (ret == -EAGAIN); - - if (likely(!ret)) - ih->last_wd = watch->wd; - - return ret; -} - -/* - * inotify_inode_watched - returns nonzero if there are watches on this inode - * and zero otherwise. We call this lockless, we do not care if we race. - */ -static inline int inotify_inode_watched(struct inode *inode) -{ - return !list_empty(&inode->inotify_watches); -} - -/* - * Get child dentry flag into synch with parent inode. - * Flag should always be clear for negative dentrys. - */ -static void set_dentry_child_flags(struct inode *inode, int watched) -{ - struct dentry *alias; - - spin_lock(&dcache_lock); - list_for_each_entry(alias, &inode->i_dentry, d_alias) { - struct dentry *child; - - list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { - if (!child->d_inode) - continue; - - spin_lock(&child->d_lock); - if (watched) - child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; - else - child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED; - spin_unlock(&child->d_lock); - } - } - spin_unlock(&dcache_lock); -} - -/* - * inotify_find_handle - find the watch associated with the given inode and - * handle - * - * Callers must hold inode->inotify_mutex. - */ -static struct inotify_watch *inode_find_handle(struct inode *inode, - struct inotify_handle *ih) -{ - struct inotify_watch *watch; - - list_for_each_entry(watch, &inode->inotify_watches, i_list) { - if (watch->ih == ih) - return watch; - } - - return NULL; -} - -/* - * remove_watch_no_event - remove watch without the IN_IGNORED event. - * - * Callers must hold both inode->inotify_mutex and ih->mutex. - */ -static void remove_watch_no_event(struct inotify_watch *watch, - struct inotify_handle *ih) -{ - list_del(&watch->i_list); - list_del(&watch->h_list); - - if (!inotify_inode_watched(watch->inode)) - set_dentry_child_flags(watch->inode, 0); - - idr_remove(&ih->idr, watch->wd); -} - -/** - * inotify_remove_watch_locked - Remove a watch from both the handle and the - * inode. Sends the IN_IGNORED event signifying that the inode is no longer - * watched. May be invoked from a caller's event handler. - * @ih: inotify handle associated with watch - * @watch: watch to remove - * - * Callers must hold both inode->inotify_mutex and ih->mutex. - */ -void inotify_remove_watch_locked(struct inotify_handle *ih, - struct inotify_watch *watch) -{ - remove_watch_no_event(watch, ih); - ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL); -} -EXPORT_SYMBOL_GPL(inotify_remove_watch_locked); - -/* Kernel API for producing events */ - -/* - * inotify_d_instantiate - instantiate dcache entry for inode - */ -void inotify_d_instantiate(struct dentry *entry, struct inode *inode) -{ - struct dentry *parent; - - if (!inode) - return; - - spin_lock(&entry->d_lock); - parent = entry->d_parent; - if (parent->d_inode && inotify_inode_watched(parent->d_inode)) - entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; - spin_unlock(&entry->d_lock); -} - -/* - * inotify_d_move - dcache entry has been moved - */ -void inotify_d_move(struct dentry *entry) -{ - struct dentry *parent; - - parent = entry->d_parent; - if (inotify_inode_watched(parent->d_inode)) - entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; - else - entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED; -} - -/** - * inotify_inode_queue_event - queue an event to all watches on this inode - * @inode: inode event is originating from - * @mask: event mask describing this event - * @cookie: cookie for synchronization, or zero - * @name: filename, if any - * @n_inode: inode associated with name - */ -void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, - const char *name, struct inode *n_inode) -{ - struct inotify_watch *watch, *next; - - if (!inotify_inode_watched(inode)) - return; - - mutex_lock(&inode->inotify_mutex); - list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { - u32 watch_mask = watch->mask; - if (watch_mask & mask) { - struct inotify_handle *ih= watch->ih; - mutex_lock(&ih->mutex); - if (watch_mask & IN_ONESHOT) - remove_watch_no_event(watch, ih); - ih->in_ops->handle_event(watch, watch->wd, mask, cookie, - name, n_inode); - mutex_unlock(&ih->mutex); - } - } - mutex_unlock(&inode->inotify_mutex); -} -EXPORT_SYMBOL_GPL(inotify_inode_queue_event); - -/** - * inotify_dentry_parent_queue_event - queue an event to a dentry's parent - * @dentry: the dentry in question, we queue against this dentry's parent - * @mask: event mask describing this event - * @cookie: cookie for synchronization, or zero - * @name: filename, if any - */ -void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask, - u32 cookie, const char *name) -{ - struct dentry *parent; - struct inode *inode; - - if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED)) - return; - - spin_lock(&dentry->d_lock); - parent = dentry->d_parent; - inode = parent->d_inode; - - if (inotify_inode_watched(inode)) { - dget(parent); - spin_unlock(&dentry->d_lock); - inotify_inode_queue_event(inode, mask, cookie, name, - dentry->d_inode); - dput(parent); - } else - spin_unlock(&dentry->d_lock); -} -EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event); - -/** - * inotify_get_cookie - return a unique cookie for use in synchronizing events. - */ -u32 inotify_get_cookie(void) -{ - return atomic_inc_return(&inotify_cookie); -} -EXPORT_SYMBOL_GPL(inotify_get_cookie); - -/** - * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes. - * @list: list of inodes being unmounted (sb->s_inodes) - * - * Called with inode_lock held, protecting the unmounting super block's list - * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. - * We temporarily drop inode_lock, however, and CAN block. - */ -void inotify_unmount_inodes(struct list_head *list) -{ - struct inode *inode, *next_i, *need_iput = NULL; - - list_for_each_entry_safe(inode, next_i, list, i_sb_list) { - struct inotify_watch *watch, *next_w; - struct inode *need_iput_tmp; - struct list_head *watches; - - /* - * If i_count is zero, the inode cannot have any watches and - * doing an __iget/iput with MS_ACTIVE clear would actually - * evict all inodes with zero i_count from icache which is - * unnecessarily violent and may in fact be illegal to do. - */ - if (!atomic_read(&inode->i_count)) - continue; - - /* - * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or - * I_WILL_FREE which is fine because by that point the inode - * cannot have any associated watches. - */ - if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE)) - continue; - - need_iput_tmp = need_iput; - need_iput = NULL; - /* In case inotify_remove_watch_locked() drops a reference. */ - if (inode != need_iput_tmp) - __iget(inode); - else - need_iput_tmp = NULL; - /* In case the dropping of a reference would nuke next_i. */ - if ((&next_i->i_sb_list != list) && - atomic_read(&next_i->i_count) && - !(next_i->i_state & (I_CLEAR | I_FREEING | - I_WILL_FREE))) { - __iget(next_i); - need_iput = next_i; - } - - /* - * We can safely drop inode_lock here because we hold - * references on both inode and next_i. Also no new inodes - * will be added since the umount has begun. Finally, - * iprune_mutex keeps shrink_icache_memory() away. - */ - spin_unlock(&inode_lock); - - if (need_iput_tmp) - iput(need_iput_tmp); - - /* for each watch, send IN_UNMOUNT and then remove it */ - mutex_lock(&inode->inotify_mutex); - watches = &inode->inotify_watches; - list_for_each_entry_safe(watch, next_w, watches, i_list) { - struct inotify_handle *ih= watch->ih; - get_inotify_watch(watch); - mutex_lock(&ih->mutex); - ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0, - NULL, NULL); - inotify_remove_watch_locked(ih, watch); - mutex_unlock(&ih->mutex); - put_inotify_watch(watch); - } - mutex_unlock(&inode->inotify_mutex); - iput(inode); - - spin_lock(&inode_lock); - } -} -EXPORT_SYMBOL_GPL(inotify_unmount_inodes); - -/** - * inotify_inode_is_dead - an inode has been deleted, cleanup any watches - * @inode: inode that is about to be removed - */ -void inotify_inode_is_dead(struct inode *inode) -{ - struct inotify_watch *watch, *next; - - mutex_lock(&inode->inotify_mutex); - list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { - struct inotify_handle *ih = watch->ih; - mutex_lock(&ih->mutex); - inotify_remove_watch_locked(ih, watch); - mutex_unlock(&ih->mutex); - } - mutex_unlock(&inode->inotify_mutex); -} -EXPORT_SYMBOL_GPL(inotify_inode_is_dead); - -/* Kernel Consumer API */ - -/** - * inotify_init - allocate and initialize an inotify instance - * @ops: caller's inotify operations - */ -struct inotify_handle *inotify_init(const struct inotify_operations *ops) -{ - struct inotify_handle *ih; - - ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL); - if (unlikely(!ih)) - return ERR_PTR(-ENOMEM); - - idr_init(&ih->idr); - INIT_LIST_HEAD(&ih->watches); - mutex_init(&ih->mutex); - ih->last_wd = 0; - ih->in_ops = ops; - atomic_set(&ih->count, 0); - get_inotify_handle(ih); - - return ih; -} -EXPORT_SYMBOL_GPL(inotify_init); - -/** - * inotify_init_watch - initialize an inotify watch - * @watch: watch to initialize - */ -void inotify_init_watch(struct inotify_watch *watch) -{ - INIT_LIST_HEAD(&watch->h_list); - INIT_LIST_HEAD(&watch->i_list); - atomic_set(&watch->count, 0); - get_inotify_watch(watch); /* initial get */ -} -EXPORT_SYMBOL_GPL(inotify_init_watch); - -/* - * Watch removals suck violently. To kick the watch out we need (in this - * order) inode->inotify_mutex and ih->mutex. That's fine if we have - * a hold on inode; however, for all other cases we need to make damn sure - * we don't race with umount. We can *NOT* just grab a reference to a - * watch - inotify_unmount_inodes() will happily sail past it and we'll end - * with reference to inode potentially outliving its superblock. Ideally - * we just want to grab an active reference to superblock if we can; that - * will make sure we won't go into inotify_umount_inodes() until we are - * done. Cleanup is just deactivate_super(). However, that leaves a messy - * case - what if we *are* racing with umount() and active references to - * superblock can't be acquired anymore? We can bump ->s_count, grab - * ->s_umount, which will almost certainly wait until the superblock is shut - * down and the watch in question is pining for fjords. That's fine, but - * there is a problem - we might have hit the window between ->s_active - * getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock - * is past the point of no return and is heading for shutdown) and the - * moment when deactivate_super() acquires ->s_umount. We could just do - * drop_super() yield() and retry, but that's rather antisocial and this - * stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having - * found that we'd got there first (i.e. that ->s_root is non-NULL) we know - * that we won't race with inotify_umount_inodes(). So we could grab a - * reference to watch and do the rest as above, just with drop_super() instead - * of deactivate_super(), right? Wrong. We had to drop ih->mutex before we - * could grab ->s_umount. So the watch could've been gone already. - * - * That still can be dealt with - we need to save watch->wd, do idr_find() - * and compare its result with our pointer. If they match, we either have - * the damn thing still alive or we'd lost not one but two races at once, - * the watch had been killed and a new one got created with the same ->wd - * at the same address. That couldn't have happened in inotify_destroy(), - * but inotify_rm_wd() could run into that. Still, "new one got created" - * is not a problem - we have every right to kill it or leave it alone, - * whatever's more convenient. - * - * So we can use idr_find(...) == watch && watch->inode->i_sb == sb as - * "grab it and kill it" check. If it's been our original watch, we are - * fine, if it's a newcomer - nevermind, just pretend that we'd won the - * race and kill the fscker anyway; we are safe since we know that its - * superblock won't be going away. - * - * And yes, this is far beyond mere "not very pretty"; so's the entire - * concept of inotify to start with. - */ - -/** - * pin_to_kill - pin the watch down for removal - * @ih: inotify handle - * @watch: watch to kill - * - * Called with ih->mutex held, drops it. Possible return values: - * 0 - nothing to do, it has died - * 1 - remove it, drop the reference and deactivate_super() - * 2 - remove it, drop the reference and drop_super(); we tried hard to avoid - * that variant, since it involved a lot of PITA, but that's the best that - * could've been done. - */ -static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch) -{ - struct super_block *sb = watch->inode->i_sb; - s32 wd = watch->wd; - - spin_lock(&sb_lock); - if (sb->s_count >= S_BIAS) { - atomic_inc(&sb->s_active); - spin_unlock(&sb_lock); - get_inotify_watch(watch); - mutex_unlock(&ih->mutex); - return 1; /* the best outcome */ - } - sb->s_count++; - spin_unlock(&sb_lock); - mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */ - down_read(&sb->s_umount); - if (likely(!sb->s_root)) { - /* fs is already shut down; the watch is dead */ - drop_super(sb); - return 0; - } - /* raced with the final deactivate_super() */ - mutex_lock(&ih->mutex); - if (idr_find(&ih->idr, wd) != watch || watch->inode->i_sb != sb) { - /* the watch is dead */ - mutex_unlock(&ih->mutex); - drop_super(sb); - return 0; - } - /* still alive or freed and reused with the same sb and wd; kill */ - get_inotify_watch(watch); - mutex_unlock(&ih->mutex); - return 2; -} - -static void unpin_and_kill(struct inotify_watch *watch, int how) -{ - struct super_block *sb = watch->inode->i_sb; - put_inotify_watch(watch); - switch (how) { - case 1: - deactivate_super(sb); - break; - case 2: - drop_super(sb); - } -} - -/** - * inotify_destroy - clean up and destroy an inotify instance - * @ih: inotify handle - */ -void inotify_destroy(struct inotify_handle *ih) -{ - /* - * Destroy all of the watches for this handle. Unfortunately, not very - * pretty. We cannot do a simple iteration over the list, because we - * do not know the inode until we iterate to the watch. But we need to - * hold inode->inotify_mutex before ih->mutex. The following works. - * - * AV: it had to become even uglier to start working ;-/ - */ - while (1) { - struct inotify_watch *watch; - struct list_head *watches; - struct super_block *sb; - struct inode *inode; - int how; - - mutex_lock(&ih->mutex); - watches = &ih->watches; - if (list_empty(watches)) { - mutex_unlock(&ih->mutex); - break; - } - watch = list_first_entry(watches, struct inotify_watch, h_list); - sb = watch->inode->i_sb; - how = pin_to_kill(ih, watch); - if (!how) - continue; - - inode = watch->inode; - mutex_lock(&inode->inotify_mutex); - mutex_lock(&ih->mutex); - - /* make sure we didn't race with another list removal */ - if (likely(idr_find(&ih->idr, watch->wd))) { - remove_watch_no_event(watch, ih); - put_inotify_watch(watch); - } - - mutex_unlock(&ih->mutex); - mutex_unlock(&inode->inotify_mutex); - unpin_and_kill(watch, how); - } - - /* free this handle: the put matching the get in inotify_init() */ - put_inotify_handle(ih); -} -EXPORT_SYMBOL_GPL(inotify_destroy); - -/** - * inotify_find_watch - find an existing watch for an (ih,inode) pair - * @ih: inotify handle - * @inode: inode to watch - * @watchp: pointer to existing inotify_watch - * - * Caller must pin given inode (via nameidata). - */ -s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode, - struct inotify_watch **watchp) -{ - struct inotify_watch *old; - int ret = -ENOENT; - - mutex_lock(&inode->inotify_mutex); - mutex_lock(&ih->mutex); - - old = inode_find_handle(inode, ih); - if (unlikely(old)) { - get_inotify_watch(old); /* caller must put watch */ - *watchp = old; - ret = old->wd; - } - - mutex_unlock(&ih->mutex); - mutex_unlock(&inode->inotify_mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(inotify_find_watch); - -/** - * inotify_find_update_watch - find and update the mask of an existing watch - * @ih: inotify handle - * @inode: inode's watch to update - * @mask: mask of events to watch - * - * Caller must pin given inode (via nameidata). - */ -s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode, - u32 mask) -{ - struct inotify_watch *old; - int mask_add = 0; - int ret; - - if (mask & IN_MASK_ADD) - mask_add = 1; - - /* don't allow invalid bits: we don't want flags set */ - mask &= IN_ALL_EVENTS | IN_ONESHOT; - if (unlikely(!mask)) - return -EINVAL; - - mutex_lock(&inode->inotify_mutex); - mutex_lock(&ih->mutex); - - /* - * Handle the case of re-adding a watch on an (inode,ih) pair that we - * are already watching. We just update the mask and return its wd. - */ - old = inode_find_handle(inode, ih); - if (unlikely(!old)) { - ret = -ENOENT; - goto out; - } - - if (mask_add) - old->mask |= mask; - else - old->mask = mask; - ret = old->wd; -out: - mutex_unlock(&ih->mutex); - mutex_unlock(&inode->inotify_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(inotify_find_update_watch); - -/** - * inotify_add_watch - add a watch to an inotify instance - * @ih: inotify handle - * @watch: caller allocated watch structure - * @inode: inode to watch - * @mask: mask of events to watch - * - * Caller must pin given inode (via nameidata). - * Caller must ensure it only calls inotify_add_watch() once per watch. - * Calls inotify_handle_get_wd() so may sleep. - */ -s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch, - struct inode *inode, u32 mask) -{ - int ret = 0; - int newly_watched; - - /* don't allow invalid bits: we don't want flags set */ - mask &= IN_ALL_EVENTS | IN_ONESHOT; - if (unlikely(!mask)) - return -EINVAL; - watch->mask = mask; - - mutex_lock(&inode->inotify_mutex); - mutex_lock(&ih->mutex); - - /* Initialize a new watch */ - ret = inotify_handle_get_wd(ih, watch); - if (unlikely(ret)) - goto out; - ret = watch->wd; - - /* save a reference to handle and bump the count to make it official */ - get_inotify_handle(ih); - watch->ih = ih; - - /* - * Save a reference to the inode and bump the ref count to make it - * official. We hold a reference to nameidata, which makes this safe. - */ - watch->inode = igrab(inode); - - /* Add the watch to the handle's and the inode's list */ - newly_watched = !inotify_inode_watched(inode); - list_add(&watch->h_list, &ih->watches); - list_add(&watch->i_list, &inode->inotify_watches); - /* - * Set child flags _after_ adding the watch, so there is no race - * windows where newly instantiated children could miss their parent's - * watched flag. - */ - if (newly_watched) - set_dentry_child_flags(inode, 1); - -out: - mutex_unlock(&ih->mutex); - mutex_unlock(&inode->inotify_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(inotify_add_watch); - -/** - * inotify_clone_watch - put the watch next to existing one - * @old: already installed watch - * @new: new watch - * - * Caller must hold the inotify_mutex of inode we are dealing with; - * it is expected to remove the old watch before unlocking the inode. - */ -s32 inotify_clone_watch(struct inotify_watch *old, struct inotify_watch *new) -{ - struct inotify_handle *ih = old->ih; - int ret = 0; - - new->mask = old->mask; - new->ih = ih; - - mutex_lock(&ih->mutex); - - /* Initialize a new watch */ - ret = inotify_handle_get_wd(ih, new); - if (unlikely(ret)) - goto out; - ret = new->wd; - - get_inotify_handle(ih); - - new->inode = igrab(old->inode); - - list_add(&new->h_list, &ih->watches); - list_add(&new->i_list, &old->inode->inotify_watches); -out: - mutex_unlock(&ih->mutex); - return ret; -} - -void inotify_evict_watch(struct inotify_watch *watch) -{ - get_inotify_watch(watch); - mutex_lock(&watch->ih->mutex); - inotify_remove_watch_locked(watch->ih, watch); - mutex_unlock(&watch->ih->mutex); -} - -/** - * inotify_rm_wd - remove a watch from an inotify instance - * @ih: inotify handle - * @wd: watch descriptor to remove - * - * Can sleep. - */ -int inotify_rm_wd(struct inotify_handle *ih, u32 wd) -{ - struct inotify_watch *watch; - struct super_block *sb; - struct inode *inode; - int how; - - mutex_lock(&ih->mutex); - watch = idr_find(&ih->idr, wd); - if (unlikely(!watch)) { - mutex_unlock(&ih->mutex); - return -EINVAL; - } - sb = watch->inode->i_sb; - how = pin_to_kill(ih, watch); - if (!how) - return 0; - - inode = watch->inode; - - mutex_lock(&inode->inotify_mutex); - mutex_lock(&ih->mutex); - - /* make sure that we did not race */ - if (likely(idr_find(&ih->idr, wd) == watch)) - inotify_remove_watch_locked(ih, watch); - - mutex_unlock(&ih->mutex); - mutex_unlock(&inode->inotify_mutex); - unpin_and_kill(watch, how); - - return 0; -} -EXPORT_SYMBOL_GPL(inotify_rm_wd); - -/** - * inotify_rm_watch - remove a watch from an inotify instance - * @ih: inotify handle - * @watch: watch to remove - * - * Can sleep. - */ -int inotify_rm_watch(struct inotify_handle *ih, - struct inotify_watch *watch) -{ - return inotify_rm_wd(ih, watch->wd); -} -EXPORT_SYMBOL_GPL(inotify_rm_watch); - -/* - * inotify_setup - core initialization function - */ -static int __init inotify_setup(void) -{ - atomic_set(&inotify_cookie, 0); - - return 0; -} - -module_init(inotify_setup); diff --git a/fs/inotify_user.c b/fs/inotify_user.c deleted file mode 100644 index 400f8064a548..000000000000 --- a/fs/inotify_user.c +++ /dev/null @@ -1,778 +0,0 @@ -/* - * fs/inotify_user.c - inotify support for userspace - * - * Authors: - * John McCutchan - * Robert Love - * - * Copyright (C) 2005 John McCutchan - * Copyright 2006 Hewlett-Packard Development Company, L.P. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2, or (at your option) any - * later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -static struct kmem_cache *watch_cachep __read_mostly; -static struct kmem_cache *event_cachep __read_mostly; - -static struct vfsmount *inotify_mnt __read_mostly; - -/* these are configurable via /proc/sys/fs/inotify/ */ -static int inotify_max_user_instances __read_mostly; -static int inotify_max_user_watches __read_mostly; -static int inotify_max_queued_events __read_mostly; - -/* - * Lock ordering: - * - * inotify_dev->up_mutex (ensures we don't re-add the same watch) - * inode->inotify_mutex (protects inode's watch list) - * inotify_handle->mutex (protects inotify_handle's watch list) - * inotify_dev->ev_mutex (protects device's event queue) - */ - -/* - * Lifetimes of the main data structures: - * - * inotify_device: Lifetime is managed by reference count, from - * sys_inotify_init() until release. Additional references can bump the count - * via get_inotify_dev() and drop the count via put_inotify_dev(). - * - * inotify_user_watch: Lifetime is from create_watch() to the receipt of an - * IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the - * first event, or to inotify_destroy(). - */ - -/* - * struct inotify_device - represents an inotify instance - * - * This structure is protected by the mutex 'mutex'. - */ -struct inotify_device { - wait_queue_head_t wq; /* wait queue for i/o */ - struct mutex ev_mutex; /* protects event queue */ - struct mutex up_mutex; /* synchronizes watch updates */ - struct list_head events; /* list of queued events */ - struct user_struct *user; /* user who opened this dev */ - struct inotify_handle *ih; /* inotify handle */ - struct fasync_struct *fa; /* async notification */ - atomic_t count; /* reference count */ - unsigned int queue_size; /* size of the queue (bytes) */ - unsigned int event_count; /* number of pending events */ - unsigned int max_events; /* maximum number of events */ -}; - -/* - * struct inotify_kernel_event - An inotify event, originating from a watch and - * queued for user-space. A list of these is attached to each instance of the - * device. In read(), this list is walked and all events that can fit in the - * buffer are returned. - * - * Protected by dev->ev_mutex of the device in which we are queued. - */ -struct inotify_kernel_event { - struct inotify_event event; /* the user-space event */ - struct list_head list; /* entry in inotify_device's list */ - char *name; /* filename, if any */ -}; - -/* - * struct inotify_user_watch - our version of an inotify_watch, we add - * a reference to the associated inotify_device. - */ -struct inotify_user_watch { - struct inotify_device *dev; /* associated device */ - struct inotify_watch wdata; /* inotify watch data */ -}; - -#ifdef CONFIG_SYSCTL - -#include - -static int zero; - -ctl_table inotify_table[] = { - { - .ctl_name = INOTIFY_MAX_USER_INSTANCES, - .procname = "max_user_instances", - .data = &inotify_max_user_instances, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &zero, - }, - { - .ctl_name = INOTIFY_MAX_USER_WATCHES, - .procname = "max_user_watches", - .data = &inotify_max_user_watches, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &zero, - }, - { - .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, - .procname = "max_queued_events", - .data = &inotify_max_queued_events, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &zero - }, - { .ctl_name = 0 } -}; -#endif /* CONFIG_SYSCTL */ - -static inline void get_inotify_dev(struct inotify_device *dev) -{ - atomic_inc(&dev->count); -} - -static inline void put_inotify_dev(struct inotify_device *dev) -{ - if (atomic_dec_and_test(&dev->count)) { - atomic_dec(&dev->user->inotify_devs); - free_uid(dev->user); - kfree(dev); - } -} - -/* - * free_inotify_user_watch - cleans up the watch and its references - */ -static void free_inotify_user_watch(struct inotify_watch *w) -{ - struct inotify_user_watch *watch; - struct inotify_device *dev; - - watch = container_of(w, struct inotify_user_watch, wdata); - dev = watch->dev; - - atomic_dec(&dev->user->inotify_watches); - put_inotify_dev(dev); - kmem_cache_free(watch_cachep, watch); -} - -/* - * kernel_event - create a new kernel event with the given parameters - * - * This function can sleep. - */ -static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, - const char *name) -{ - struct inotify_kernel_event *kevent; - - kevent = kmem_cache_alloc(event_cachep, GFP_NOFS); - if (unlikely(!kevent)) - return NULL; - - /* we hand this out to user-space, so zero it just in case */ - memset(&kevent->event, 0, sizeof(struct inotify_event)); - - kevent->event.wd = wd; - kevent->event.mask = mask; - kevent->event.cookie = cookie; - - INIT_LIST_HEAD(&kevent->list); - - if (name) { - size_t len, rem, event_size = sizeof(struct inotify_event); - - /* - * We need to pad the filename so as to properly align an - * array of inotify_event structures. Because the structure is - * small and the common case is a small filename, we just round - * up to the next multiple of the structure's sizeof. This is - * simple and safe for all architectures. - */ - len = strlen(name) + 1; - rem = event_size - len; - if (len > event_size) { - rem = event_size - (len % event_size); - if (len % event_size == 0) - rem = 0; - } - - kevent->name = kmalloc(len + rem, GFP_KERNEL); - if (unlikely(!kevent->name)) { - kmem_cache_free(event_cachep, kevent); - return NULL; - } - memcpy(kevent->name, name, len); - if (rem) - memset(kevent->name + len, 0, rem); - kevent->event.len = len + rem; - } else { - kevent->event.len = 0; - kevent->name = NULL; - } - - return kevent; -} - -/* - * inotify_dev_get_event - return the next event in the given dev's queue - * - * Caller must hold dev->ev_mutex. - */ -static inline struct inotify_kernel_event * -inotify_dev_get_event(struct inotify_device *dev) -{ - return list_entry(dev->events.next, struct inotify_kernel_event, list); -} - -/* - * inotify_dev_get_last_event - return the last event in the given dev's queue - * - * Caller must hold dev->ev_mutex. - */ -static inline struct inotify_kernel_event * -inotify_dev_get_last_event(struct inotify_device *dev) -{ - if (list_empty(&dev->events)) - return NULL; - return list_entry(dev->events.prev, struct inotify_kernel_event, list); -} - -/* - * inotify_dev_queue_event - event handler registered with core inotify, adds - * a new event to the given device - * - * Can sleep (calls kernel_event()). - */ -static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask, - u32 cookie, const char *name, - struct inode *ignored) -{ - struct inotify_user_watch *watch; - struct inotify_device *dev; - struct inotify_kernel_event *kevent, *last; - - watch = container_of(w, struct inotify_user_watch, wdata); - dev = watch->dev; - - mutex_lock(&dev->ev_mutex); - - /* we can safely put the watch as we don't reference it while - * generating the event - */ - if (mask & IN_IGNORED || w->mask & IN_ONESHOT) - put_inotify_watch(w); /* final put */ - - /* coalescing: drop this event if it is a dupe of the previous */ - last = inotify_dev_get_last_event(dev); - if (last && last->event.mask == mask && last->event.wd == wd && - last->event.cookie == cookie) { - const char *lastname = last->name; - - if (!name && !lastname) - goto out; - if (name && lastname && !strcmp(lastname, name)) - goto out; - } - - /* the queue overflowed and we already sent the Q_OVERFLOW event */ - if (unlikely(dev->event_count > dev->max_events)) - goto out; - - /* if the queue overflows, we need to notify user space */ - if (unlikely(dev->event_count == dev->max_events)) - kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL); - else - kevent = kernel_event(wd, mask, cookie, name); - - if (unlikely(!kevent)) - goto out; - - /* queue the event and wake up anyone waiting */ - dev->event_count++; - dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; - list_add_tail(&kevent->list, &dev->events); - wake_up_interruptible(&dev->wq); - kill_fasync(&dev->fa, SIGIO, POLL_IN); - -out: - mutex_unlock(&dev->ev_mutex); -} - -/* - * remove_kevent - cleans up the given kevent - * - * Caller must hold dev->ev_mutex. - */ -static void remove_kevent(struct inotify_device *dev, - struct inotify_kernel_event *kevent) -{ - list_del(&kevent->list); - - dev->event_count--; - dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; -} - -/* - * free_kevent - frees the given kevent. - */ -static void free_kevent(struct inotify_kernel_event *kevent) -{ - kfree(kevent->name); - kmem_cache_free(event_cachep, kevent); -} - -/* - * inotify_dev_event_dequeue - destroy an event on the given device - * - * Caller must hold dev->ev_mutex. - */ -static void inotify_dev_event_dequeue(struct inotify_device *dev) -{ - if (!list_empty(&dev->events)) { - struct inotify_kernel_event *kevent; - kevent = inotify_dev_get_event(dev); - remove_kevent(dev, kevent); - free_kevent(kevent); - } -} - -/* - * find_inode - resolve a user-given path to a specific inode - */ -static int find_inode(const char __user *dirname, struct path *path, - unsigned flags) -{ - int error; - - error = user_path_at(AT_FDCWD, dirname, flags, path); - if (error) - return error; - /* you can only watch an inode if you have read permissions on it */ - error = inode_permission(path->dentry->d_inode, MAY_READ); - if (error) - path_put(path); - return error; -} - -/* - * create_watch - creates a watch on the given device. - * - * Callers must hold dev->up_mutex. - */ -static int create_watch(struct inotify_device *dev, struct inode *inode, - u32 mask) -{ - struct inotify_user_watch *watch; - int ret; - - if (atomic_read(&dev->user->inotify_watches) >= - inotify_max_user_watches) - return -ENOSPC; - - watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); - if (unlikely(!watch)) - return -ENOMEM; - - /* save a reference to device and bump the count to make it official */ - get_inotify_dev(dev); - watch->dev = dev; - - atomic_inc(&dev->user->inotify_watches); - - inotify_init_watch(&watch->wdata); - ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask); - if (ret < 0) - free_inotify_user_watch(&watch->wdata); - - return ret; -} - -/* Device Interface */ - -static unsigned int inotify_poll(struct file *file, poll_table *wait) -{ - struct inotify_device *dev = file->private_data; - int ret = 0; - - poll_wait(file, &dev->wq, wait); - mutex_lock(&dev->ev_mutex); - if (!list_empty(&dev->events)) - ret = POLLIN | POLLRDNORM; - mutex_unlock(&dev->ev_mutex); - - return ret; -} - -static ssize_t inotify_read(struct file *file, char __user *buf, - size_t count, loff_t *pos) -{ - size_t event_size = sizeof (struct inotify_event); - struct inotify_device *dev; - char __user *start; - int ret; - DEFINE_WAIT(wait); - - start = buf; - dev = file->private_data; - - while (1) { - - prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); - - mutex_lock(&dev->ev_mutex); - if (!list_empty(&dev->events)) { - ret = 0; - break; - } - mutex_unlock(&dev->ev_mutex); - - if (file->f_flags & O_NONBLOCK) { - ret = -EAGAIN; - break; - } - - if (signal_pending(current)) { - ret = -EINTR; - break; - } - - schedule(); - } - - finish_wait(&dev->wq, &wait); - if (ret) - return ret; - - while (1) { - struct inotify_kernel_event *kevent; - - ret = buf - start; - if (list_empty(&dev->events)) - break; - - kevent = inotify_dev_get_event(dev); - if (event_size + kevent->event.len > count) { - if (ret == 0 && count > 0) { - /* - * could not get a single event because we - * didn't have enough buffer space. - */ - ret = -EINVAL; - } - break; - } - remove_kevent(dev, kevent); - - /* - * Must perform the copy_to_user outside the mutex in order - * to avoid a lock order reversal with mmap_sem. - */ - mutex_unlock(&dev->ev_mutex); - - if (copy_to_user(buf, &kevent->event, event_size)) { - ret = -EFAULT; - break; - } - buf += event_size; - count -= event_size; - - if (kevent->name) { - if (copy_to_user(buf, kevent->name, kevent->event.len)){ - ret = -EFAULT; - break; - } - buf += kevent->event.len; - count -= kevent->event.len; - } - - free_kevent(kevent); - - mutex_lock(&dev->ev_mutex); - } - mutex_unlock(&dev->ev_mutex); - - return ret; -} - -static int inotify_fasync(int fd, struct file *file, int on) -{ - struct inotify_device *dev = file->private_data; - - return fasync_helper(fd, file, on, &dev->fa) >= 0 ? 0 : -EIO; -} - -static int inotify_release(struct inode *ignored, struct file *file) -{ - struct inotify_device *dev = file->private_data; - - inotify_destroy(dev->ih); - - /* destroy all of the events on this device */ - mutex_lock(&dev->ev_mutex); - while (!list_empty(&dev->events)) - inotify_dev_event_dequeue(dev); - mutex_unlock(&dev->ev_mutex); - - /* free this device: the put matching the get in inotify_init() */ - put_inotify_dev(dev); - - return 0; -} - -static long inotify_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct inotify_device *dev; - void __user *p; - int ret = -ENOTTY; - - dev = file->private_data; - p = (void __user *) arg; - - switch (cmd) { - case FIONREAD: - ret = put_user(dev->queue_size, (int __user *) p); - break; - } - - return ret; -} - -static const struct file_operations inotify_fops = { - .poll = inotify_poll, - .read = inotify_read, - .fasync = inotify_fasync, - .release = inotify_release, - .unlocked_ioctl = inotify_ioctl, - .compat_ioctl = inotify_ioctl, -}; - -static const struct inotify_operations inotify_user_ops = { - .handle_event = inotify_dev_queue_event, - .destroy_watch = free_inotify_user_watch, -}; - -asmlinkage long sys_inotify_init1(int flags) -{ - struct inotify_device *dev; - struct inotify_handle *ih; - struct user_struct *user; - struct file *filp; - int fd, ret; - - /* Check the IN_* constants for consistency. */ - BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); - BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); - - if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) - return -EINVAL; - - fd = get_unused_fd_flags(flags & O_CLOEXEC); - if (fd < 0) - return fd; - - filp = get_empty_filp(); - if (!filp) { - ret = -ENFILE; - goto out_put_fd; - } - - user = get_current_user(); - if (unlikely(atomic_read(&user->inotify_devs) >= - inotify_max_user_instances)) { - ret = -EMFILE; - goto out_free_uid; - } - - dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); - if (unlikely(!dev)) { - ret = -ENOMEM; - goto out_free_uid; - } - - ih = inotify_init(&inotify_user_ops); - if (IS_ERR(ih)) { - ret = PTR_ERR(ih); - goto out_free_dev; - } - dev->ih = ih; - dev->fa = NULL; - - filp->f_op = &inotify_fops; - filp->f_path.mnt = mntget(inotify_mnt); - filp->f_path.dentry = dget(inotify_mnt->mnt_root); - filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; - filp->f_mode = FMODE_READ; - filp->f_flags = O_RDONLY | (flags & O_NONBLOCK); - filp->private_data = dev; - - INIT_LIST_HEAD(&dev->events); - init_waitqueue_head(&dev->wq); - mutex_init(&dev->ev_mutex); - mutex_init(&dev->up_mutex); - dev->event_count = 0; - dev->queue_size = 0; - dev->max_events = inotify_max_queued_events; - dev->user = user; - atomic_set(&dev->count, 0); - - get_inotify_dev(dev); - atomic_inc(&user->inotify_devs); - fd_install(fd, filp); - - return fd; -out_free_dev: - kfree(dev); -out_free_uid: - free_uid(user); - put_filp(filp); -out_put_fd: - put_unused_fd(fd); - return ret; -} - -asmlinkage long sys_inotify_init(void) -{ - return sys_inotify_init1(0); -} - -asmlinkage long sys_inotify_add_watch(int fd, const char __user *pathname, u32 mask) -{ - struct inode *inode; - struct inotify_device *dev; - struct path path; - struct file *filp; - int ret, fput_needed; - unsigned flags = 0; - - filp = fget_light(fd, &fput_needed); - if (unlikely(!filp)) - return -EBADF; - - /* verify that this is indeed an inotify instance */ - if (unlikely(filp->f_op != &inotify_fops)) { - ret = -EINVAL; - goto fput_and_out; - } - - if (!(mask & IN_DONT_FOLLOW)) - flags |= LOOKUP_FOLLOW; - if (mask & IN_ONLYDIR) - flags |= LOOKUP_DIRECTORY; - - ret = find_inode(pathname, &path, flags); - if (unlikely(ret)) - goto fput_and_out; - - /* inode held in place by reference to path; dev by fget on fd */ - inode = path.dentry->d_inode; - dev = filp->private_data; - - mutex_lock(&dev->up_mutex); - ret = inotify_find_update_watch(dev->ih, inode, mask); - if (ret == -ENOENT) - ret = create_watch(dev, inode, mask); - mutex_unlock(&dev->up_mutex); - - path_put(&path); -fput_and_out: - fput_light(filp, fput_needed); - return ret; -} - -asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) -{ - struct file *filp; - struct inotify_device *dev; - int ret, fput_needed; - - filp = fget_light(fd, &fput_needed); - if (unlikely(!filp)) - return -EBADF; - - /* verify that this is indeed an inotify instance */ - if (unlikely(filp->f_op != &inotify_fops)) { - ret = -EINVAL; - goto out; - } - - dev = filp->private_data; - - /* we free our watch data when we get IN_IGNORED */ - ret = inotify_rm_wd(dev->ih, wd); - -out: - fput_light(filp, fput_needed); - return ret; -} - -static int -inotify_get_sb(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data, struct vfsmount *mnt) -{ - return get_sb_pseudo(fs_type, "inotify", NULL, - INOTIFYFS_SUPER_MAGIC, mnt); -} - -static struct file_system_type inotify_fs_type = { - .name = "inotifyfs", - .get_sb = inotify_get_sb, - .kill_sb = kill_anon_super, -}; - -/* - * inotify_user_setup - Our initialization function. Note that we cannnot return - * error because we have compiled-in VFS hooks. So an (unlikely) failure here - * must result in panic(). - */ -static int __init inotify_user_setup(void) -{ - int ret; - - ret = register_filesystem(&inotify_fs_type); - if (unlikely(ret)) - panic("inotify: register_filesystem returned %d!\n", ret); - - inotify_mnt = kern_mount(&inotify_fs_type); - if (IS_ERR(inotify_mnt)) - panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); - - inotify_max_queued_events = 16384; - inotify_max_user_instances = 128; - inotify_max_user_watches = 8192; - - watch_cachep = kmem_cache_create("inotify_watch_cache", - sizeof(struct inotify_user_watch), - 0, SLAB_PANIC, NULL); - event_cachep = kmem_cache_create("inotify_event_cache", - sizeof(struct inotify_kernel_event), - 0, SLAB_PANIC, NULL); - - return 0; -} - -module_init(inotify_user_setup); diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig new file mode 100644 index 000000000000..50914d7303c6 --- /dev/null +++ b/fs/notify/Kconfig @@ -0,0 +1,2 @@ +source "fs/notify/dnotify/Kconfig" +source "fs/notify/inotify/Kconfig" diff --git a/fs/notify/Makefile b/fs/notify/Makefile new file mode 100644 index 000000000000..5a95b6010ce7 --- /dev/null +++ b/fs/notify/Makefile @@ -0,0 +1,2 @@ +obj-y += dnotify/ +obj-y += inotify/ diff --git a/fs/notify/dnotify/Kconfig b/fs/notify/dnotify/Kconfig new file mode 100644 index 000000000000..26adf5dfa646 --- /dev/null +++ b/fs/notify/dnotify/Kconfig @@ -0,0 +1,10 @@ +config DNOTIFY + bool "Dnotify support" + default y + help + Dnotify is a directory-based per-fd file change notification system + that uses signals to communicate events to user-space. There exist + superior alternatives, but some applications may still rely on + dnotify. + + If unsure, say Y. diff --git a/fs/notify/dnotify/Makefile b/fs/notify/dnotify/Makefile new file mode 100644 index 000000000000..f145251dcadb --- /dev/null +++ b/fs/notify/dnotify/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_DNOTIFY) += dnotify.o diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c new file mode 100644 index 000000000000..b0aa2cde80bd --- /dev/null +++ b/fs/notify/dnotify/dnotify.c @@ -0,0 +1,191 @@ +/* + * Directory notifications for Linux. + * + * Copyright (C) 2000,2001,2002 Stephen Rothwell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +int dir_notify_enable __read_mostly = 1; + +static struct kmem_cache *dn_cache __read_mostly; + +static void redo_inode_mask(struct inode *inode) +{ + unsigned long new_mask; + struct dnotify_struct *dn; + + new_mask = 0; + for (dn = inode->i_dnotify; dn != NULL; dn = dn->dn_next) + new_mask |= dn->dn_mask & ~DN_MULTISHOT; + inode->i_dnotify_mask = new_mask; +} + +void dnotify_flush(struct file *filp, fl_owner_t id) +{ + struct dnotify_struct *dn; + struct dnotify_struct **prev; + struct inode *inode; + + inode = filp->f_path.dentry->d_inode; + if (!S_ISDIR(inode->i_mode)) + return; + spin_lock(&inode->i_lock); + prev = &inode->i_dnotify; + while ((dn = *prev) != NULL) { + if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { + *prev = dn->dn_next; + redo_inode_mask(inode); + kmem_cache_free(dn_cache, dn); + break; + } + prev = &dn->dn_next; + } + spin_unlock(&inode->i_lock); +} + +int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) +{ + struct dnotify_struct *dn; + struct dnotify_struct *odn; + struct dnotify_struct **prev; + struct inode *inode; + fl_owner_t id = current->files; + struct file *f; + int error = 0; + + if ((arg & ~DN_MULTISHOT) == 0) { + dnotify_flush(filp, id); + return 0; + } + if (!dir_notify_enable) + return -EINVAL; + inode = filp->f_path.dentry->d_inode; + if (!S_ISDIR(inode->i_mode)) + return -ENOTDIR; + dn = kmem_cache_alloc(dn_cache, GFP_KERNEL); + if (dn == NULL) + return -ENOMEM; + spin_lock(&inode->i_lock); + prev = &inode->i_dnotify; + while ((odn = *prev) != NULL) { + if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { + odn->dn_fd = fd; + odn->dn_mask |= arg; + inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; + goto out_free; + } + prev = &odn->dn_next; + } + + rcu_read_lock(); + f = fcheck(fd); + rcu_read_unlock(); + /* we'd lost the race with close(), sod off silently */ + /* note that inode->i_lock prevents reordering problems + * between accesses to descriptor table and ->i_dnotify */ + if (f != filp) + goto out_free; + + error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); + if (error) + goto out_free; + + dn->dn_mask = arg; + dn->dn_fd = fd; + dn->dn_filp = filp; + dn->dn_owner = id; + inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; + dn->dn_next = inode->i_dnotify; + inode->i_dnotify = dn; + spin_unlock(&inode->i_lock); + return 0; + +out_free: + spin_unlock(&inode->i_lock); + kmem_cache_free(dn_cache, dn); + return error; +} + +void __inode_dir_notify(struct inode *inode, unsigned long event) +{ + struct dnotify_struct * dn; + struct dnotify_struct **prev; + struct fown_struct * fown; + int changed = 0; + + spin_lock(&inode->i_lock); + prev = &inode->i_dnotify; + while ((dn = *prev) != NULL) { + if ((dn->dn_mask & event) == 0) { + prev = &dn->dn_next; + continue; + } + fown = &dn->dn_filp->f_owner; + send_sigio(fown, dn->dn_fd, POLL_MSG); + if (dn->dn_mask & DN_MULTISHOT) + prev = &dn->dn_next; + else { + *prev = dn->dn_next; + changed = 1; + kmem_cache_free(dn_cache, dn); + } + } + if (changed) + redo_inode_mask(inode); + spin_unlock(&inode->i_lock); +} + +EXPORT_SYMBOL(__inode_dir_notify); + +/* + * This is hopelessly wrong, but unfixable without API changes. At + * least it doesn't oops the kernel... + * + * To safely access ->d_parent we need to keep d_move away from it. Use the + * dentry's d_lock for this. + */ +void dnotify_parent(struct dentry *dentry, unsigned long event) +{ + struct dentry *parent; + + if (!dir_notify_enable) + return; + + spin_lock(&dentry->d_lock); + parent = dentry->d_parent; + if (parent->d_inode->i_dnotify_mask & event) { + dget(parent); + spin_unlock(&dentry->d_lock); + __inode_dir_notify(parent->d_inode, event); + dput(parent); + } else { + spin_unlock(&dentry->d_lock); + } +} +EXPORT_SYMBOL_GPL(dnotify_parent); + +static int __init dnotify_init(void) +{ + dn_cache = kmem_cache_create("dnotify_cache", + sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL); + return 0; +} + +module_init(dnotify_init) diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig new file mode 100644 index 000000000000..446792841023 --- /dev/null +++ b/fs/notify/inotify/Kconfig @@ -0,0 +1,27 @@ +config INOTIFY + bool "Inotify file change notification support" + default y + ---help--- + Say Y here to enable inotify support. Inotify is a file change + notification system and a replacement for dnotify. Inotify fixes + numerous shortcomings in dnotify and introduces several new features + including multiple file events, one-shot support, and unmount + notification. + + For more information, see + + If unsure, say Y. + +config INOTIFY_USER + bool "Inotify support for userspace" + depends on INOTIFY + default y + ---help--- + Say Y here to enable inotify support for userspace, including the + associated system calls. Inotify allows monitoring of both files and + directories via a single open fd. Events are read from the file + descriptor, which is also select()- and poll()-able. + + For more information, see + + If unsure, say Y. diff --git a/fs/notify/inotify/Makefile b/fs/notify/inotify/Makefile new file mode 100644 index 000000000000..e290f3bb9d8d --- /dev/null +++ b/fs/notify/inotify/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_INOTIFY) += inotify.o +obj-$(CONFIG_INOTIFY_USER) += inotify_user.o diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c new file mode 100644 index 000000000000..dae3f28f30d4 --- /dev/null +++ b/fs/notify/inotify/inotify.c @@ -0,0 +1,913 @@ +/* + * fs/inotify.c - inode-based file event notifications + * + * Authors: + * John McCutchan + * Robert Love + * + * Kernel API added by: Amy Griffis + * + * Copyright (C) 2005 John McCutchan + * Copyright 2006 Hewlett-Packard Development Company, L.P. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static atomic_t inotify_cookie; + +/* + * Lock ordering: + * + * dentry->d_lock (used to keep d_move() away from dentry->d_parent) + * iprune_mutex (synchronize shrink_icache_memory()) + * inode_lock (protects the super_block->s_inodes list) + * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) + * inotify_handle->mutex (protects inotify_handle and watches->h_list) + * + * The inode->inotify_mutex and inotify_handle->mutex and held during execution + * of a caller's event handler. Thus, the caller must not hold any locks + * taken in their event handler while calling any of the published inotify + * interfaces. + */ + +/* + * Lifetimes of the three main data structures--inotify_handle, inode, and + * inotify_watch--are managed by reference count. + * + * inotify_handle: Lifetime is from inotify_init() to inotify_destroy(). + * Additional references can bump the count via get_inotify_handle() and drop + * the count via put_inotify_handle(). + * + * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch() + * to remove_watch_no_event(). Additional references can bump the count via + * get_inotify_watch() and drop the count via put_inotify_watch(). The caller + * is reponsible for the final put after receiving IN_IGNORED, or when using + * IN_ONESHOT after receiving the first event. Inotify does the final put if + * inotify_destroy() is called. + * + * inode: Pinned so long as the inode is associated with a watch, from + * inotify_add_watch() to the final put_inotify_watch(). + */ + +/* + * struct inotify_handle - represents an inotify instance + * + * This structure is protected by the mutex 'mutex'. + */ +struct inotify_handle { + struct idr idr; /* idr mapping wd -> watch */ + struct mutex mutex; /* protects this bad boy */ + struct list_head watches; /* list of watches */ + atomic_t count; /* reference count */ + u32 last_wd; /* the last wd allocated */ + const struct inotify_operations *in_ops; /* inotify caller operations */ +}; + +static inline void get_inotify_handle(struct inotify_handle *ih) +{ + atomic_inc(&ih->count); +} + +static inline void put_inotify_handle(struct inotify_handle *ih) +{ + if (atomic_dec_and_test(&ih->count)) { + idr_destroy(&ih->idr); + kfree(ih); + } +} + +/** + * get_inotify_watch - grab a reference to an inotify_watch + * @watch: watch to grab + */ +void get_inotify_watch(struct inotify_watch *watch) +{ + atomic_inc(&watch->count); +} +EXPORT_SYMBOL_GPL(get_inotify_watch); + +int pin_inotify_watch(struct inotify_watch *watch) +{ + struct super_block *sb = watch->inode->i_sb; + spin_lock(&sb_lock); + if (sb->s_count >= S_BIAS) { + atomic_inc(&sb->s_active); + spin_unlock(&sb_lock); + atomic_inc(&watch->count); + return 1; + } + spin_unlock(&sb_lock); + return 0; +} + +/** + * put_inotify_watch - decrements the ref count on a given watch. cleans up + * watch references if the count reaches zero. inotify_watch is freed by + * inotify callers via the destroy_watch() op. + * @watch: watch to release + */ +void put_inotify_watch(struct inotify_watch *watch) +{ + if (atomic_dec_and_test(&watch->count)) { + struct inotify_handle *ih = watch->ih; + + iput(watch->inode); + ih->in_ops->destroy_watch(watch); + put_inotify_handle(ih); + } +} +EXPORT_SYMBOL_GPL(put_inotify_watch); + +void unpin_inotify_watch(struct inotify_watch *watch) +{ + struct super_block *sb = watch->inode->i_sb; + put_inotify_watch(watch); + deactivate_super(sb); +} + +/* + * inotify_handle_get_wd - returns the next WD for use by the given handle + * + * Callers must hold ih->mutex. This function can sleep. + */ +static int inotify_handle_get_wd(struct inotify_handle *ih, + struct inotify_watch *watch) +{ + int ret; + + do { + if (unlikely(!idr_pre_get(&ih->idr, GFP_KERNEL))) + return -ENOSPC; + ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd); + } while (ret == -EAGAIN); + + if (likely(!ret)) + ih->last_wd = watch->wd; + + return ret; +} + +/* + * inotify_inode_watched - returns nonzero if there are watches on this inode + * and zero otherwise. We call this lockless, we do not care if we race. + */ +static inline int inotify_inode_watched(struct inode *inode) +{ + return !list_empty(&inode->inotify_watches); +} + +/* + * Get child dentry flag into synch with parent inode. + * Flag should always be clear for negative dentrys. + */ +static void set_dentry_child_flags(struct inode *inode, int watched) +{ + struct dentry *alias; + + spin_lock(&dcache_lock); + list_for_each_entry(alias, &inode->i_dentry, d_alias) { + struct dentry *child; + + list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { + if (!child->d_inode) + continue; + + spin_lock(&child->d_lock); + if (watched) + child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; + else + child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED; + spin_unlock(&child->d_lock); + } + } + spin_unlock(&dcache_lock); +} + +/* + * inotify_find_handle - find the watch associated with the given inode and + * handle + * + * Callers must hold inode->inotify_mutex. + */ +static struct inotify_watch *inode_find_handle(struct inode *inode, + struct inotify_handle *ih) +{ + struct inotify_watch *watch; + + list_for_each_entry(watch, &inode->inotify_watches, i_list) { + if (watch->ih == ih) + return watch; + } + + return NULL; +} + +/* + * remove_watch_no_event - remove watch without the IN_IGNORED event. + * + * Callers must hold both inode->inotify_mutex and ih->mutex. + */ +static void remove_watch_no_event(struct inotify_watch *watch, + struct inotify_handle *ih) +{ + list_del(&watch->i_list); + list_del(&watch->h_list); + + if (!inotify_inode_watched(watch->inode)) + set_dentry_child_flags(watch->inode, 0); + + idr_remove(&ih->idr, watch->wd); +} + +/** + * inotify_remove_watch_locked - Remove a watch from both the handle and the + * inode. Sends the IN_IGNORED event signifying that the inode is no longer + * watched. May be invoked from a caller's event handler. + * @ih: inotify handle associated with watch + * @watch: watch to remove + * + * Callers must hold both inode->inotify_mutex and ih->mutex. + */ +void inotify_remove_watch_locked(struct inotify_handle *ih, + struct inotify_watch *watch) +{ + remove_watch_no_event(watch, ih); + ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL); +} +EXPORT_SYMBOL_GPL(inotify_remove_watch_locked); + +/* Kernel API for producing events */ + +/* + * inotify_d_instantiate - instantiate dcache entry for inode + */ +void inotify_d_instantiate(struct dentry *entry, struct inode *inode) +{ + struct dentry *parent; + + if (!inode) + return; + + spin_lock(&entry->d_lock); + parent = entry->d_parent; + if (parent->d_inode && inotify_inode_watched(parent->d_inode)) + entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; + spin_unlock(&entry->d_lock); +} + +/* + * inotify_d_move - dcache entry has been moved + */ +void inotify_d_move(struct dentry *entry) +{ + struct dentry *parent; + + parent = entry->d_parent; + if (inotify_inode_watched(parent->d_inode)) + entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; + else + entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED; +} + +/** + * inotify_inode_queue_event - queue an event to all watches on this inode + * @inode: inode event is originating from + * @mask: event mask describing this event + * @cookie: cookie for synchronization, or zero + * @name: filename, if any + * @n_inode: inode associated with name + */ +void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, + const char *name, struct inode *n_inode) +{ + struct inotify_watch *watch, *next; + + if (!inotify_inode_watched(inode)) + return; + + mutex_lock(&inode->inotify_mutex); + list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { + u32 watch_mask = watch->mask; + if (watch_mask & mask) { + struct inotify_handle *ih= watch->ih; + mutex_lock(&ih->mutex); + if (watch_mask & IN_ONESHOT) + remove_watch_no_event(watch, ih); + ih->in_ops->handle_event(watch, watch->wd, mask, cookie, + name, n_inode); + mutex_unlock(&ih->mutex); + } + } + mutex_unlock(&inode->inotify_mutex); +} +EXPORT_SYMBOL_GPL(inotify_inode_queue_event); + +/** + * inotify_dentry_parent_queue_event - queue an event to a dentry's parent + * @dentry: the dentry in question, we queue against this dentry's parent + * @mask: event mask describing this event + * @cookie: cookie for synchronization, or zero + * @name: filename, if any + */ +void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask, + u32 cookie, const char *name) +{ + struct dentry *parent; + struct inode *inode; + + if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED)) + return; + + spin_lock(&dentry->d_lock); + parent = dentry->d_parent; + inode = parent->d_inode; + + if (inotify_inode_watched(inode)) { + dget(parent); + spin_unlock(&dentry->d_lock); + inotify_inode_queue_event(inode, mask, cookie, name, + dentry->d_inode); + dput(parent); + } else + spin_unlock(&dentry->d_lock); +} +EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event); + +/** + * inotify_get_cookie - return a unique cookie for use in synchronizing events. + */ +u32 inotify_get_cookie(void) +{ + return atomic_inc_return(&inotify_cookie); +} +EXPORT_SYMBOL_GPL(inotify_get_cookie); + +/** + * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes. + * @list: list of inodes being unmounted (sb->s_inodes) + * + * Called with inode_lock held, protecting the unmounting super block's list + * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. + * We temporarily drop inode_lock, however, and CAN block. + */ +void inotify_unmount_inodes(struct list_head *list) +{ + struct inode *inode, *next_i, *need_iput = NULL; + + list_for_each_entry_safe(inode, next_i, list, i_sb_list) { + struct inotify_watch *watch, *next_w; + struct inode *need_iput_tmp; + struct list_head *watches; + + /* + * If i_count is zero, the inode cannot have any watches and + * doing an __iget/iput with MS_ACTIVE clear would actually + * evict all inodes with zero i_count from icache which is + * unnecessarily violent and may in fact be illegal to do. + */ + if (!atomic_read(&inode->i_count)) + continue; + + /* + * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or + * I_WILL_FREE which is fine because by that point the inode + * cannot have any associated watches. + */ + if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE)) + continue; + + need_iput_tmp = need_iput; + need_iput = NULL; + /* In case inotify_remove_watch_locked() drops a reference. */ + if (inode != need_iput_tmp) + __iget(inode); + else + need_iput_tmp = NULL; + /* In case the dropping of a reference would nuke next_i. */ + if ((&next_i->i_sb_list != list) && + atomic_read(&next_i->i_count) && + !(next_i->i_state & (I_CLEAR | I_FREEING | + I_WILL_FREE))) { + __iget(next_i); + need_iput = next_i; + } + + /* + * We can safely drop inode_lock here because we hold + * references on both inode and next_i. Also no new inodes + * will be added since the umount has begun. Finally, + * iprune_mutex keeps shrink_icache_memory() away. + */ + spin_unlock(&inode_lock); + + if (need_iput_tmp) + iput(need_iput_tmp); + + /* for each watch, send IN_UNMOUNT and then remove it */ + mutex_lock(&inode->inotify_mutex); + watches = &inode->inotify_watches; + list_for_each_entry_safe(watch, next_w, watches, i_list) { + struct inotify_handle *ih= watch->ih; + get_inotify_watch(watch); + mutex_lock(&ih->mutex); + ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0, + NULL, NULL); + inotify_remove_watch_locked(ih, watch); + mutex_unlock(&ih->mutex); + put_inotify_watch(watch); + } + mutex_unlock(&inode->inotify_mutex); + iput(inode); + + spin_lock(&inode_lock); + } +} +EXPORT_SYMBOL_GPL(inotify_unmount_inodes); + +/** + * inotify_inode_is_dead - an inode has been deleted, cleanup any watches + * @inode: inode that is about to be removed + */ +void inotify_inode_is_dead(struct inode *inode) +{ + struct inotify_watch *watch, *next; + + mutex_lock(&inode->inotify_mutex); + list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { + struct inotify_handle *ih = watch->ih; + mutex_lock(&ih->mutex); + inotify_remove_watch_locked(ih, watch); + mutex_unlock(&ih->mutex); + } + mutex_unlock(&inode->inotify_mutex); +} +EXPORT_SYMBOL_GPL(inotify_inode_is_dead); + +/* Kernel Consumer API */ + +/** + * inotify_init - allocate and initialize an inotify instance + * @ops: caller's inotify operations + */ +struct inotify_handle *inotify_init(const struct inotify_operations *ops) +{ + struct inotify_handle *ih; + + ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL); + if (unlikely(!ih)) + return ERR_PTR(-ENOMEM); + + idr_init(&ih->idr); + INIT_LIST_HEAD(&ih->watches); + mutex_init(&ih->mutex); + ih->last_wd = 0; + ih->in_ops = ops; + atomic_set(&ih->count, 0); + get_inotify_handle(ih); + + return ih; +} +EXPORT_SYMBOL_GPL(inotify_init); + +/** + * inotify_init_watch - initialize an inotify watch + * @watch: watch to initialize + */ +void inotify_init_watch(struct inotify_watch *watch) +{ + INIT_LIST_HEAD(&watch->h_list); + INIT_LIST_HEAD(&watch->i_list); + atomic_set(&watch->count, 0); + get_inotify_watch(watch); /* initial get */ +} +EXPORT_SYMBOL_GPL(inotify_init_watch); + +/* + * Watch removals suck violently. To kick the watch out we need (in this + * order) inode->inotify_mutex and ih->mutex. That's fine if we have + * a hold on inode; however, for all other cases we need to make damn sure + * we don't race with umount. We can *NOT* just grab a reference to a + * watch - inotify_unmount_inodes() will happily sail past it and we'll end + * with reference to inode potentially outliving its superblock. Ideally + * we just want to grab an active reference to superblock if we can; that + * will make sure we won't go into inotify_umount_inodes() until we are + * done. Cleanup is just deactivate_super(). However, that leaves a messy + * case - what if we *are* racing with umount() and active references to + * superblock can't be acquired anymore? We can bump ->s_count, grab + * ->s_umount, which will almost certainly wait until the superblock is shut + * down and the watch in question is pining for fjords. That's fine, but + * there is a problem - we might have hit the window between ->s_active + * getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock + * is past the point of no return and is heading for shutdown) and the + * moment when deactivate_super() acquires ->s_umount. We could just do + * drop_super() yield() and retry, but that's rather antisocial and this + * stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having + * found that we'd got there first (i.e. that ->s_root is non-NULL) we know + * that we won't race with inotify_umount_inodes(). So we could grab a + * reference to watch and do the rest as above, just with drop_super() instead + * of deactivate_super(), right? Wrong. We had to drop ih->mutex before we + * could grab ->s_umount. So the watch could've been gone already. + * + * That still can be dealt with - we need to save watch->wd, do idr_find() + * and compare its result with our pointer. If they match, we either have + * the damn thing still alive or we'd lost not one but two races at once, + * the watch had been killed and a new one got created with the same ->wd + * at the same address. That couldn't have happened in inotify_destroy(), + * but inotify_rm_wd() could run into that. Still, "new one got created" + * is not a problem - we have every right to kill it or leave it alone, + * whatever's more convenient. + * + * So we can use idr_find(...) == watch && watch->inode->i_sb == sb as + * "grab it and kill it" check. If it's been our original watch, we are + * fine, if it's a newcomer - nevermind, just pretend that we'd won the + * race and kill the fscker anyway; we are safe since we know that its + * superblock won't be going away. + * + * And yes, this is far beyond mere "not very pretty"; so's the entire + * concept of inotify to start with. + */ + +/** + * pin_to_kill - pin the watch down for removal + * @ih: inotify handle + * @watch: watch to kill + * + * Called with ih->mutex held, drops it. Possible return values: + * 0 - nothing to do, it has died + * 1 - remove it, drop the reference and deactivate_super() + * 2 - remove it, drop the reference and drop_super(); we tried hard to avoid + * that variant, since it involved a lot of PITA, but that's the best that + * could've been done. + */ +static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch) +{ + struct super_block *sb = watch->inode->i_sb; + s32 wd = watch->wd; + + spin_lock(&sb_lock); + if (sb->s_count >= S_BIAS) { + atomic_inc(&sb->s_active); + spin_unlock(&sb_lock); + get_inotify_watch(watch); + mutex_unlock(&ih->mutex); + return 1; /* the best outcome */ + } + sb->s_count++; + spin_unlock(&sb_lock); + mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */ + down_read(&sb->s_umount); + if (likely(!sb->s_root)) { + /* fs is already shut down; the watch is dead */ + drop_super(sb); + return 0; + } + /* raced with the final deactivate_super() */ + mutex_lock(&ih->mutex); + if (idr_find(&ih->idr, wd) != watch || watch->inode->i_sb != sb) { + /* the watch is dead */ + mutex_unlock(&ih->mutex); + drop_super(sb); + return 0; + } + /* still alive or freed and reused with the same sb and wd; kill */ + get_inotify_watch(watch); + mutex_unlock(&ih->mutex); + return 2; +} + +static void unpin_and_kill(struct inotify_watch *watch, int how) +{ + struct super_block *sb = watch->inode->i_sb; + put_inotify_watch(watch); + switch (how) { + case 1: + deactivate_super(sb); + break; + case 2: + drop_super(sb); + } +} + +/** + * inotify_destroy - clean up and destroy an inotify instance + * @ih: inotify handle + */ +void inotify_destroy(struct inotify_handle *ih) +{ + /* + * Destroy all of the watches for this handle. Unfortunately, not very + * pretty. We cannot do a simple iteration over the list, because we + * do not know the inode until we iterate to the watch. But we need to + * hold inode->inotify_mutex before ih->mutex. The following works. + * + * AV: it had to become even uglier to start working ;-/ + */ + while (1) { + struct inotify_watch *watch; + struct list_head *watches; + struct super_block *sb; + struct inode *inode; + int how; + + mutex_lock(&ih->mutex); + watches = &ih->watches; + if (list_empty(watches)) { + mutex_unlock(&ih->mutex); + break; + } + watch = list_first_entry(watches, struct inotify_watch, h_list); + sb = watch->inode->i_sb; + how = pin_to_kill(ih, watch); + if (!how) + continue; + + inode = watch->inode; + mutex_lock(&inode->inotify_mutex); + mutex_lock(&ih->mutex); + + /* make sure we didn't race with another list removal */ + if (likely(idr_find(&ih->idr, watch->wd))) { + remove_watch_no_event(watch, ih); + put_inotify_watch(watch); + } + + mutex_unlock(&ih->mutex); + mutex_unlock(&inode->inotify_mutex); + unpin_and_kill(watch, how); + } + + /* free this handle: the put matching the get in inotify_init() */ + put_inotify_handle(ih); +} +EXPORT_SYMBOL_GPL(inotify_destroy); + +/** + * inotify_find_watch - find an existing watch for an (ih,inode) pair + * @ih: inotify handle + * @inode: inode to watch + * @watchp: pointer to existing inotify_watch + * + * Caller must pin given inode (via nameidata). + */ +s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode, + struct inotify_watch **watchp) +{ + struct inotify_watch *old; + int ret = -ENOENT; + + mutex_lock(&inode->inotify_mutex); + mutex_lock(&ih->mutex); + + old = inode_find_handle(inode, ih); + if (unlikely(old)) { + get_inotify_watch(old); /* caller must put watch */ + *watchp = old; + ret = old->wd; + } + + mutex_unlock(&ih->mutex); + mutex_unlock(&inode->inotify_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(inotify_find_watch); + +/** + * inotify_find_update_watch - find and update the mask of an existing watch + * @ih: inotify handle + * @inode: inode's watch to update + * @mask: mask of events to watch + * + * Caller must pin given inode (via nameidata). + */ +s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode, + u32 mask) +{ + struct inotify_watch *old; + int mask_add = 0; + int ret; + + if (mask & IN_MASK_ADD) + mask_add = 1; + + /* don't allow invalid bits: we don't want flags set */ + mask &= IN_ALL_EVENTS | IN_ONESHOT; + if (unlikely(!mask)) + return -EINVAL; + + mutex_lock(&inode->inotify_mutex); + mutex_lock(&ih->mutex); + + /* + * Handle the case of re-adding a watch on an (inode,ih) pair that we + * are already watching. We just update the mask and return its wd. + */ + old = inode_find_handle(inode, ih); + if (unlikely(!old)) { + ret = -ENOENT; + goto out; + } + + if (mask_add) + old->mask |= mask; + else + old->mask = mask; + ret = old->wd; +out: + mutex_unlock(&ih->mutex); + mutex_unlock(&inode->inotify_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(inotify_find_update_watch); + +/** + * inotify_add_watch - add a watch to an inotify instance + * @ih: inotify handle + * @watch: caller allocated watch structure + * @inode: inode to watch + * @mask: mask of events to watch + * + * Caller must pin given inode (via nameidata). + * Caller must ensure it only calls inotify_add_watch() once per watch. + * Calls inotify_handle_get_wd() so may sleep. + */ +s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch, + struct inode *inode, u32 mask) +{ + int ret = 0; + int newly_watched; + + /* don't allow invalid bits: we don't want flags set */ + mask &= IN_ALL_EVENTS | IN_ONESHOT; + if (unlikely(!mask)) + return -EINVAL; + watch->mask = mask; + + mutex_lock(&inode->inotify_mutex); + mutex_lock(&ih->mutex); + + /* Initialize a new watch */ + ret = inotify_handle_get_wd(ih, watch); + if (unlikely(ret)) + goto out; + ret = watch->wd; + + /* save a reference to handle and bump the count to make it official */ + get_inotify_handle(ih); + watch->ih = ih; + + /* + * Save a reference to the inode and bump the ref count to make it + * official. We hold a reference to nameidata, which makes this safe. + */ + watch->inode = igrab(inode); + + /* Add the watch to the handle's and the inode's list */ + newly_watched = !inotify_inode_watched(inode); + list_add(&watch->h_list, &ih->watches); + list_add(&watch->i_list, &inode->inotify_watches); + /* + * Set child flags _after_ adding the watch, so there is no race + * windows where newly instantiated children could miss their parent's + * watched flag. + */ + if (newly_watched) + set_dentry_child_flags(inode, 1); + +out: + mutex_unlock(&ih->mutex); + mutex_unlock(&inode->inotify_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(inotify_add_watch); + +/** + * inotify_clone_watch - put the watch next to existing one + * @old: already installed watch + * @new: new watch + * + * Caller must hold the inotify_mutex of inode we are dealing with; + * it is expected to remove the old watch before unlocking the inode. + */ +s32 inotify_clone_watch(struct inotify_watch *old, struct inotify_watch *new) +{ + struct inotify_handle *ih = old->ih; + int ret = 0; + + new->mask = old->mask; + new->ih = ih; + + mutex_lock(&ih->mutex); + + /* Initialize a new watch */ + ret = inotify_handle_get_wd(ih, new); + if (unlikely(ret)) + goto out; + ret = new->wd; + + get_inotify_handle(ih); + + new->inode = igrab(old->inode); + + list_add(&new->h_list, &ih->watches); + list_add(&new->i_list, &old->inode->inotify_watches); +out: + mutex_unlock(&ih->mutex); + return ret; +} + +void inotify_evict_watch(struct inotify_watch *watch) +{ + get_inotify_watch(watch); + mutex_lock(&watch->ih->mutex); + inotify_remove_watch_locked(watch->ih, watch); + mutex_unlock(&watch->ih->mutex); +} + +/** + * inotify_rm_wd - remove a watch from an inotify instance + * @ih: inotify handle + * @wd: watch descriptor to remove + * + * Can sleep. + */ +int inotify_rm_wd(struct inotify_handle *ih, u32 wd) +{ + struct inotify_watch *watch; + struct super_block *sb; + struct inode *inode; + int how; + + mutex_lock(&ih->mutex); + watch = idr_find(&ih->idr, wd); + if (unlikely(!watch)) { + mutex_unlock(&ih->mutex); + return -EINVAL; + } + sb = watch->inode->i_sb; + how = pin_to_kill(ih, watch); + if (!how) + return 0; + + inode = watch->inode; + + mutex_lock(&inode->inotify_mutex); + mutex_lock(&ih->mutex); + + /* make sure that we did not race */ + if (likely(idr_find(&ih->idr, wd) == watch)) + inotify_remove_watch_locked(ih, watch); + + mutex_unlock(&ih->mutex); + mutex_unlock(&inode->inotify_mutex); + unpin_and_kill(watch, how); + + return 0; +} +EXPORT_SYMBOL_GPL(inotify_rm_wd); + +/** + * inotify_rm_watch - remove a watch from an inotify instance + * @ih: inotify handle + * @watch: watch to remove + * + * Can sleep. + */ +int inotify_rm_watch(struct inotify_handle *ih, + struct inotify_watch *watch) +{ + return inotify_rm_wd(ih, watch->wd); +} +EXPORT_SYMBOL_GPL(inotify_rm_watch); + +/* + * inotify_setup - core initialization function + */ +static int __init inotify_setup(void) +{ + atomic_set(&inotify_cookie, 0); + + return 0; +} + +module_init(inotify_setup); diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c new file mode 100644 index 000000000000..400f8064a548 --- /dev/null +++ b/fs/notify/inotify/inotify_user.c @@ -0,0 +1,778 @@ +/* + * fs/inotify_user.c - inotify support for userspace + * + * Authors: + * John McCutchan + * Robert Love + * + * Copyright (C) 2005 John McCutchan + * Copyright 2006 Hewlett-Packard Development Company, L.P. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static struct kmem_cache *watch_cachep __read_mostly; +static struct kmem_cache *event_cachep __read_mostly; + +static struct vfsmount *inotify_mnt __read_mostly; + +/* these are configurable via /proc/sys/fs/inotify/ */ +static int inotify_max_user_instances __read_mostly; +static int inotify_max_user_watches __read_mostly; +static int inotify_max_queued_events __read_mostly; + +/* + * Lock ordering: + * + * inotify_dev->up_mutex (ensures we don't re-add the same watch) + * inode->inotify_mutex (protects inode's watch list) + * inotify_handle->mutex (protects inotify_handle's watch list) + * inotify_dev->ev_mutex (protects device's event queue) + */ + +/* + * Lifetimes of the main data structures: + * + * inotify_device: Lifetime is managed by reference count, from + * sys_inotify_init() until release. Additional references can bump the count + * via get_inotify_dev() and drop the count via put_inotify_dev(). + * + * inotify_user_watch: Lifetime is from create_watch() to the receipt of an + * IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the + * first event, or to inotify_destroy(). + */ + +/* + * struct inotify_device - represents an inotify instance + * + * This structure is protected by the mutex 'mutex'. + */ +struct inotify_device { + wait_queue_head_t wq; /* wait queue for i/o */ + struct mutex ev_mutex; /* protects event queue */ + struct mutex up_mutex; /* synchronizes watch updates */ + struct list_head events; /* list of queued events */ + struct user_struct *user; /* user who opened this dev */ + struct inotify_handle *ih; /* inotify handle */ + struct fasync_struct *fa; /* async notification */ + atomic_t count; /* reference count */ + unsigned int queue_size; /* size of the queue (bytes) */ + unsigned int event_count; /* number of pending events */ + unsigned int max_events; /* maximum number of events */ +}; + +/* + * struct inotify_kernel_event - An inotify event, originating from a watch and + * queued for user-space. A list of these is attached to each instance of the + * device. In read(), this list is walked and all events that can fit in the + * buffer are returned. + * + * Protected by dev->ev_mutex of the device in which we are queued. + */ +struct inotify_kernel_event { + struct inotify_event event; /* the user-space event */ + struct list_head list; /* entry in inotify_device's list */ + char *name; /* filename, if any */ +}; + +/* + * struct inotify_user_watch - our version of an inotify_watch, we add + * a reference to the associated inotify_device. + */ +struct inotify_user_watch { + struct inotify_device *dev; /* associated device */ + struct inotify_watch wdata; /* inotify watch data */ +}; + +#ifdef CONFIG_SYSCTL + +#include + +static int zero; + +ctl_table inotify_table[] = { + { + .ctl_name = INOTIFY_MAX_USER_INSTANCES, + .procname = "max_user_instances", + .data = &inotify_max_user_instances, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + }, + { + .ctl_name = INOTIFY_MAX_USER_WATCHES, + .procname = "max_user_watches", + .data = &inotify_max_user_watches, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + }, + { + .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, + .procname = "max_queued_events", + .data = &inotify_max_queued_events, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero + }, + { .ctl_name = 0 } +}; +#endif /* CONFIG_SYSCTL */ + +static inline void get_inotify_dev(struct inotify_device *dev) +{ + atomic_inc(&dev->count); +} + +static inline void put_inotify_dev(struct inotify_device *dev) +{ + if (atomic_dec_and_test(&dev->count)) { + atomic_dec(&dev->user->inotify_devs); + free_uid(dev->user); + kfree(dev); + } +} + +/* + * free_inotify_user_watch - cleans up the watch and its references + */ +static void free_inotify_user_watch(struct inotify_watch *w) +{ + struct inotify_user_watch *watch; + struct inotify_device *dev; + + watch = container_of(w, struct inotify_user_watch, wdata); + dev = watch->dev; + + atomic_dec(&dev->user->inotify_watches); + put_inotify_dev(dev); + kmem_cache_free(watch_cachep, watch); +} + +/* + * kernel_event - create a new kernel event with the given parameters + * + * This function can sleep. + */ +static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, + const char *name) +{ + struct inotify_kernel_event *kevent; + + kevent = kmem_cache_alloc(event_cachep, GFP_NOFS); + if (unlikely(!kevent)) + return NULL; + + /* we hand this out to user-space, so zero it just in case */ + memset(&kevent->event, 0, sizeof(struct inotify_event)); + + kevent->event.wd = wd; + kevent->event.mask = mask; + kevent->event.cookie = cookie; + + INIT_LIST_HEAD(&kevent->list); + + if (name) { + size_t len, rem, event_size = sizeof(struct inotify_event); + + /* + * We need to pad the filename so as to properly align an + * array of inotify_event structures. Because the structure is + * small and the common case is a small filename, we just round + * up to the next multiple of the structure's sizeof. This is + * simple and safe for all architectures. + */ + len = strlen(name) + 1; + rem = event_size - len; + if (len > event_size) { + rem = event_size - (len % event_size); + if (len % event_size == 0) + rem = 0; + } + + kevent->name = kmalloc(len + rem, GFP_KERNEL); + if (unlikely(!kevent->name)) { + kmem_cache_free(event_cachep, kevent); + return NULL; + } + memcpy(kevent->name, name, len); + if (rem) + memset(kevent->name + len, 0, rem); + kevent->event.len = len + rem; + } else { + kevent->event.len = 0; + kevent->name = NULL; + } + + return kevent; +} + +/* + * inotify_dev_get_event - return the next event in the given dev's queue + * + * Caller must hold dev->ev_mutex. + */ +static inline struct inotify_kernel_event * +inotify_dev_get_event(struct inotify_device *dev) +{ + return list_entry(dev->events.next, struct inotify_kernel_event, list); +} + +/* + * inotify_dev_get_last_event - return the last event in the given dev's queue + * + * Caller must hold dev->ev_mutex. + */ +static inline struct inotify_kernel_event * +inotify_dev_get_last_event(struct inotify_device *dev) +{ + if (list_empty(&dev->events)) + return NULL; + return list_entry(dev->events.prev, struct inotify_kernel_event, list); +} + +/* + * inotify_dev_queue_event - event handler registered with core inotify, adds + * a new event to the given device + * + * Can sleep (calls kernel_event()). + */ +static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask, + u32 cookie, const char *name, + struct inode *ignored) +{ + struct inotify_user_watch *watch; + struct inotify_device *dev; + struct inotify_kernel_event *kevent, *last; + + watch = container_of(w, struct inotify_user_watch, wdata); + dev = watch->dev; + + mutex_lock(&dev->ev_mutex); + + /* we can safely put the watch as we don't reference it while + * generating the event + */ + if (mask & IN_IGNORED || w->mask & IN_ONESHOT) + put_inotify_watch(w); /* final put */ + + /* coalescing: drop this event if it is a dupe of the previous */ + last = inotify_dev_get_last_event(dev); + if (last && last->event.mask == mask && last->event.wd == wd && + last->event.cookie == cookie) { + const char *lastname = last->name; + + if (!name && !lastname) + goto out; + if (name && lastname && !strcmp(lastname, name)) + goto out; + } + + /* the queue overflowed and we already sent the Q_OVERFLOW event */ + if (unlikely(dev->event_count > dev->max_events)) + goto out; + + /* if the queue overflows, we need to notify user space */ + if (unlikely(dev->event_count == dev->max_events)) + kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL); + else + kevent = kernel_event(wd, mask, cookie, name); + + if (unlikely(!kevent)) + goto out; + + /* queue the event and wake up anyone waiting */ + dev->event_count++; + dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; + list_add_tail(&kevent->list, &dev->events); + wake_up_interruptible(&dev->wq); + kill_fasync(&dev->fa, SIGIO, POLL_IN); + +out: + mutex_unlock(&dev->ev_mutex); +} + +/* + * remove_kevent - cleans up the given kevent + * + * Caller must hold dev->ev_mutex. + */ +static void remove_kevent(struct inotify_device *dev, + struct inotify_kernel_event *kevent) +{ + list_del(&kevent->list); + + dev->event_count--; + dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; +} + +/* + * free_kevent - frees the given kevent. + */ +static void free_kevent(struct inotify_kernel_event *kevent) +{ + kfree(kevent->name); + kmem_cache_free(event_cachep, kevent); +} + +/* + * inotify_dev_event_dequeue - destroy an event on the given device + * + * Caller must hold dev->ev_mutex. + */ +static void inotify_dev_event_dequeue(struct inotify_device *dev) +{ + if (!list_empty(&dev->events)) { + struct inotify_kernel_event *kevent; + kevent = inotify_dev_get_event(dev); + remove_kevent(dev, kevent); + free_kevent(kevent); + } +} + +/* + * find_inode - resolve a user-given path to a specific inode + */ +static int find_inode(const char __user *dirname, struct path *path, + unsigned flags) +{ + int error; + + error = user_path_at(AT_FDCWD, dirname, flags, path); + if (error) + return error; + /* you can only watch an inode if you have read permissions on it */ + error = inode_permission(path->dentry->d_inode, MAY_READ); + if (error) + path_put(path); + return error; +} + +/* + * create_watch - creates a watch on the given device. + * + * Callers must hold dev->up_mutex. + */ +static int create_watch(struct inotify_device *dev, struct inode *inode, + u32 mask) +{ + struct inotify_user_watch *watch; + int ret; + + if (atomic_read(&dev->user->inotify_watches) >= + inotify_max_user_watches) + return -ENOSPC; + + watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); + if (unlikely(!watch)) + return -ENOMEM; + + /* save a reference to device and bump the count to make it official */ + get_inotify_dev(dev); + watch->dev = dev; + + atomic_inc(&dev->user->inotify_watches); + + inotify_init_watch(&watch->wdata); + ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask); + if (ret < 0) + free_inotify_user_watch(&watch->wdata); + + return ret; +} + +/* Device Interface */ + +static unsigned int inotify_poll(struct file *file, poll_table *wait) +{ + struct inotify_device *dev = file->private_data; + int ret = 0; + + poll_wait(file, &dev->wq, wait); + mutex_lock(&dev->ev_mutex); + if (!list_empty(&dev->events)) + ret = POLLIN | POLLRDNORM; + mutex_unlock(&dev->ev_mutex); + + return ret; +} + +static ssize_t inotify_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + size_t event_size = sizeof (struct inotify_event); + struct inotify_device *dev; + char __user *start; + int ret; + DEFINE_WAIT(wait); + + start = buf; + dev = file->private_data; + + while (1) { + + prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); + + mutex_lock(&dev->ev_mutex); + if (!list_empty(&dev->events)) { + ret = 0; + break; + } + mutex_unlock(&dev->ev_mutex); + + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } + + if (signal_pending(current)) { + ret = -EINTR; + break; + } + + schedule(); + } + + finish_wait(&dev->wq, &wait); + if (ret) + return ret; + + while (1) { + struct inotify_kernel_event *kevent; + + ret = buf - start; + if (list_empty(&dev->events)) + break; + + kevent = inotify_dev_get_event(dev); + if (event_size + kevent->event.len > count) { + if (ret == 0 && count > 0) { + /* + * could not get a single event because we + * didn't have enough buffer space. + */ + ret = -EINVAL; + } + break; + } + remove_kevent(dev, kevent); + + /* + * Must perform the copy_to_user outside the mutex in order + * to avoid a lock order reversal with mmap_sem. + */ + mutex_unlock(&dev->ev_mutex); + + if (copy_to_user(buf, &kevent->event, event_size)) { + ret = -EFAULT; + break; + } + buf += event_size; + count -= event_size; + + if (kevent->name) { + if (copy_to_user(buf, kevent->name, kevent->event.len)){ + ret = -EFAULT; + break; + } + buf += kevent->event.len; + count -= kevent->event.len; + } + + free_kevent(kevent); + + mutex_lock(&dev->ev_mutex); + } + mutex_unlock(&dev->ev_mutex); + + return ret; +} + +static int inotify_fasync(int fd, struct file *file, int on) +{ + struct inotify_device *dev = file->private_data; + + return fasync_helper(fd, file, on, &dev->fa) >= 0 ? 0 : -EIO; +} + +static int inotify_release(struct inode *ignored, struct file *file) +{ + struct inotify_device *dev = file->private_data; + + inotify_destroy(dev->ih); + + /* destroy all of the events on this device */ + mutex_lock(&dev->ev_mutex); + while (!list_empty(&dev->events)) + inotify_dev_event_dequeue(dev); + mutex_unlock(&dev->ev_mutex); + + /* free this device: the put matching the get in inotify_init() */ + put_inotify_dev(dev); + + return 0; +} + +static long inotify_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct inotify_device *dev; + void __user *p; + int ret = -ENOTTY; + + dev = file->private_data; + p = (void __user *) arg; + + switch (cmd) { + case FIONREAD: + ret = put_user(dev->queue_size, (int __user *) p); + break; + } + + return ret; +} + +static const struct file_operations inotify_fops = { + .poll = inotify_poll, + .read = inotify_read, + .fasync = inotify_fasync, + .release = inotify_release, + .unlocked_ioctl = inotify_ioctl, + .compat_ioctl = inotify_ioctl, +}; + +static const struct inotify_operations inotify_user_ops = { + .handle_event = inotify_dev_queue_event, + .destroy_watch = free_inotify_user_watch, +}; + +asmlinkage long sys_inotify_init1(int flags) +{ + struct inotify_device *dev; + struct inotify_handle *ih; + struct user_struct *user; + struct file *filp; + int fd, ret; + + /* Check the IN_* constants for consistency. */ + BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); + BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); + + if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) + return -EINVAL; + + fd = get_unused_fd_flags(flags & O_CLOEXEC); + if (fd < 0) + return fd; + + filp = get_empty_filp(); + if (!filp) { + ret = -ENFILE; + goto out_put_fd; + } + + user = get_current_user(); + if (unlikely(atomic_read(&user->inotify_devs) >= + inotify_max_user_instances)) { + ret = -EMFILE; + goto out_free_uid; + } + + dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); + if (unlikely(!dev)) { + ret = -ENOMEM; + goto out_free_uid; + } + + ih = inotify_init(&inotify_user_ops); + if (IS_ERR(ih)) { + ret = PTR_ERR(ih); + goto out_free_dev; + } + dev->ih = ih; + dev->fa = NULL; + + filp->f_op = &inotify_fops; + filp->f_path.mnt = mntget(inotify_mnt); + filp->f_path.dentry = dget(inotify_mnt->mnt_root); + filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; + filp->f_mode = FMODE_READ; + filp->f_flags = O_RDONLY | (flags & O_NONBLOCK); + filp->private_data = dev; + + INIT_LIST_HEAD(&dev->events); + init_waitqueue_head(&dev->wq); + mutex_init(&dev->ev_mutex); + mutex_init(&dev->up_mutex); + dev->event_count = 0; + dev->queue_size = 0; + dev->max_events = inotify_max_queued_events; + dev->user = user; + atomic_set(&dev->count, 0); + + get_inotify_dev(dev); + atomic_inc(&user->inotify_devs); + fd_install(fd, filp); + + return fd; +out_free_dev: + kfree(dev); +out_free_uid: + free_uid(user); + put_filp(filp); +out_put_fd: + put_unused_fd(fd); + return ret; +} + +asmlinkage long sys_inotify_init(void) +{ + return sys_inotify_init1(0); +} + +asmlinkage long sys_inotify_add_watch(int fd, const char __user *pathname, u32 mask) +{ + struct inode *inode; + struct inotify_device *dev; + struct path path; + struct file *filp; + int ret, fput_needed; + unsigned flags = 0; + + filp = fget_light(fd, &fput_needed); + if (unlikely(!filp)) + return -EBADF; + + /* verify that this is indeed an inotify instance */ + if (unlikely(filp->f_op != &inotify_fops)) { + ret = -EINVAL; + goto fput_and_out; + } + + if (!(mask & IN_DONT_FOLLOW)) + flags |= LOOKUP_FOLLOW; + if (mask & IN_ONLYDIR) + flags |= LOOKUP_DIRECTORY; + + ret = find_inode(pathname, &path, flags); + if (unlikely(ret)) + goto fput_and_out; + + /* inode held in place by reference to path; dev by fget on fd */ + inode = path.dentry->d_inode; + dev = filp->private_data; + + mutex_lock(&dev->up_mutex); + ret = inotify_find_update_watch(dev->ih, inode, mask); + if (ret == -ENOENT) + ret = create_watch(dev, inode, mask); + mutex_unlock(&dev->up_mutex); + + path_put(&path); +fput_and_out: + fput_light(filp, fput_needed); + return ret; +} + +asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) +{ + struct file *filp; + struct inotify_device *dev; + int ret, fput_needed; + + filp = fget_light(fd, &fput_needed); + if (unlikely(!filp)) + return -EBADF; + + /* verify that this is indeed an inotify instance */ + if (unlikely(filp->f_op != &inotify_fops)) { + ret = -EINVAL; + goto out; + } + + dev = filp->private_data; + + /* we free our watch data when we get IN_IGNORED */ + ret = inotify_rm_wd(dev->ih, wd); + +out: + fput_light(filp, fput_needed); + return ret; +} + +static int +inotify_get_sb(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, struct vfsmount *mnt) +{ + return get_sb_pseudo(fs_type, "inotify", NULL, + INOTIFYFS_SUPER_MAGIC, mnt); +} + +static struct file_system_type inotify_fs_type = { + .name = "inotifyfs", + .get_sb = inotify_get_sb, + .kill_sb = kill_anon_super, +}; + +/* + * inotify_user_setup - Our initialization function. Note that we cannnot return + * error because we have compiled-in VFS hooks. So an (unlikely) failure here + * must result in panic(). + */ +static int __init inotify_user_setup(void) +{ + int ret; + + ret = register_filesystem(&inotify_fs_type); + if (unlikely(ret)) + panic("inotify: register_filesystem returned %d!\n", ret); + + inotify_mnt = kern_mount(&inotify_fs_type); + if (IS_ERR(inotify_mnt)) + panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); + + inotify_max_queued_events = 16384; + inotify_max_user_instances = 128; + inotify_max_user_watches = 8192; + + watch_cachep = kmem_cache_create("inotify_watch_cache", + sizeof(struct inotify_user_watch), + 0, SLAB_PANIC, NULL); + event_cachep = kmem_cache_create("inotify_event_cache", + sizeof(struct inotify_kernel_event), + 0, SLAB_PANIC, NULL); + + return 0; +} + +module_init(inotify_user_setup); -- cgit From 261bca86ed4f7f391d1938167624e78da61dcc6b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 30 Dec 2008 01:48:21 -0500 Subject: nfsd/create race fixes, infrastructure new helpers - insert_inode_locked() and insert_inode_locked4(). Hash new inode, making sure that there's no such inode in icache already. If there is and it does not end up unhashed (as would happen if we have nfsd trying to resolve a bogus fhandle), fail. Otherwise insert our inode into hash and succeed. In either case have i_state set to new+locked; cleanup ends up being simpler with such calling conventions. Signed-off-by: Al Viro --- fs/inode.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index 098a2443196f..7de1cda92489 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1032,6 +1032,65 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino) EXPORT_SYMBOL(iget_locked); +int insert_inode_locked(struct inode *inode) +{ + struct super_block *sb = inode->i_sb; + ino_t ino = inode->i_ino; + struct hlist_head *head = inode_hashtable + hash(sb, ino); + struct inode *old; + + inode->i_state |= I_LOCK|I_NEW; + while (1) { + spin_lock(&inode_lock); + old = find_inode_fast(sb, head, ino); + if (likely(!old)) { + hlist_add_head(&inode->i_hash, head); + spin_unlock(&inode_lock); + return 0; + } + __iget(old); + spin_unlock(&inode_lock); + wait_on_inode(old); + if (unlikely(!hlist_unhashed(&old->i_hash))) { + iput(old); + return -EBUSY; + } + iput(old); + } +} + +EXPORT_SYMBOL(insert_inode_locked); + +int insert_inode_locked4(struct inode *inode, unsigned long hashval, + int (*test)(struct inode *, void *), void *data) +{ + struct super_block *sb = inode->i_sb; + struct hlist_head *head = inode_hashtable + hash(sb, hashval); + struct inode *old; + + inode->i_state |= I_LOCK|I_NEW; + + while (1) { + spin_lock(&inode_lock); + old = find_inode(sb, head, test, data); + if (likely(!old)) { + hlist_add_head(&inode->i_hash, head); + spin_unlock(&inode_lock); + return 0; + } + __iget(old); + spin_unlock(&inode_lock); + wait_on_inode(old); + if (unlikely(!hlist_unhashed(&old->i_hash))) { + iput(old); + return -EBUSY; + } + iput(old); + } +} + +EXPORT_SYMBOL(insert_inode_locked4); + /** * __insert_inode_hash - hash an inode * @inode: unhashed inode -- cgit From 41080b5a240113328c607f22b849f653373db0ce Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 30 Dec 2008 01:52:35 -0500 Subject: nfsd race fixes: ext2 * make ext2_new_inode() put the inode into icache in locked state * do not unlock until the inode is fully set up; otherwise nfsd might pick it in half-baked state. * make sure that ext2_new_inode() does *not* lead to two inodes with the same inumber hashed at the same time; otherwise a bogus fhandle coming from nfsd might race with inode creation: nfsd: iget_locked() creates inode nfsd: try to read from disk, block on that. ext2_new_inode(): allocate inode with that inumber ext2_new_inode(): insert it into icache, set it up and dirty ext2_write_inode(): get the relevant part of inode table in cache, set the entry for our inode (and start writing to disk) nfsd: get CPU again, look into inode table, see nice and sane on-disk inode, set the in-core inode from it oops - we have two in-core inodes with the same inumber live in icache, both used for IO. Welcome to fs corruption... Signed-off-by: Al Viro --- fs/ext2/ialloc.c | 6 +++++- fs/ext2/namei.c | 15 ++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index 8d0add625870..c454d5db28a5 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -585,7 +585,10 @@ got: spin_lock(&sbi->s_next_gen_lock); inode->i_generation = sbi->s_next_generation++; spin_unlock(&sbi->s_next_gen_lock); - insert_inode_hash(inode); + if (insert_inode_locked(inode) < 0) { + err = -EINVAL; + goto fail_drop; + } if (DQUOT_ALLOC_INODE(inode)) { err = -EDQUOT; @@ -612,6 +615,7 @@ fail_drop: DQUOT_DROP(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; + unlock_new_inode(inode); iput(inode); return ERR_PTR(err); diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 2a747252ec12..90ea17998a73 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -41,9 +41,11 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) int err = ext2_add_link(dentry, inode); if (!err) { d_instantiate(dentry, inode); + unlock_new_inode(inode); return 0; } inode_dec_link_count(inode); + unlock_new_inode(inode); iput(inode); return err; } @@ -170,6 +172,7 @@ out: out_fail: inode_dec_link_count(inode); + unlock_new_inode(inode); iput (inode); goto out; } @@ -178,6 +181,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; + int err; if (inode->i_nlink >= EXT2_LINK_MAX) return -EMLINK; @@ -186,7 +190,14 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir, inode_inc_link_count(inode); atomic_inc(&inode->i_count); - return ext2_add_nondir(dentry, inode); + err = ext2_add_link(dentry, inode); + if (!err) { + d_instantiate(dentry, inode); + return 0; + } + inode_dec_link_count(inode); + iput(inode); + return err; } static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) @@ -222,12 +233,14 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) goto out_fail; d_instantiate(dentry, inode); + unlock_new_inode(inode); out: return err; out_fail: inode_dec_link_count(inode); inode_dec_link_count(inode); + unlock_new_inode(inode); iput(inode); out_dir: inode_dec_link_count(dir); -- cgit From c38012daa7ad902a39a4213ba2b3fe50e81157ea Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 30 Dec 2008 02:02:50 -0500 Subject: nfsd race fixes: ext3 ext3 analog of the previous patch Signed-off-by: Al Viro --- fs/ext3/ialloc.c | 6 +++++- fs/ext3/namei.c | 15 ++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c index 490bd0ed7896..5655fbcbd11f 100644 --- a/fs/ext3/ialloc.c +++ b/fs/ext3/ialloc.c @@ -579,7 +579,10 @@ got: ext3_set_inode_flags(inode); if (IS_DIRSYNC(inode)) handle->h_sync = 1; - insert_inode_hash(inode); + if (insert_inode_locked(inode) < 0) { + err = -EINVAL; + goto fail_drop; + } spin_lock(&sbi->s_next_gen_lock); inode->i_generation = sbi->s_next_generation++; spin_unlock(&sbi->s_next_gen_lock); @@ -627,6 +630,7 @@ fail_drop: DQUOT_DROP(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; + unlock_new_inode(inode); iput(inode); brelse(bitmap_bh); return ERR_PTR(err); diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 3e5edc92aa0b..297ea8dfac7c 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -1652,9 +1652,11 @@ static int ext3_add_nondir(handle_t *handle, if (!err) { ext3_mark_inode_dirty(handle, inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); return 0; } drop_nlink(inode); + unlock_new_inode(inode); iput(inode); return err; } @@ -1765,6 +1767,7 @@ retry: dir_block = ext3_bread (handle, inode, 0, 1, &err); if (!dir_block) { drop_nlink(inode); /* is this nlink == 0? */ + unlock_new_inode(inode); ext3_mark_inode_dirty(handle, inode); iput (inode); goto out_stop; @@ -1792,6 +1795,7 @@ retry: err = ext3_add_entry (handle, dentry, inode); if (err) { inode->i_nlink = 0; + unlock_new_inode(inode); ext3_mark_inode_dirty(handle, inode); iput (inode); goto out_stop; @@ -1800,6 +1804,7 @@ retry: ext3_update_dx_flag(dir); ext3_mark_inode_dirty(handle, dir); d_instantiate(dentry, inode); + unlock_new_inode(inode); out_stop: ext3_journal_stop(handle); if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) @@ -2174,6 +2179,7 @@ retry: mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); if (err) { drop_nlink(inode); + unlock_new_inode(inode); ext3_mark_inode_dirty(handle, inode); iput (inode); goto out_stop; @@ -2221,7 +2227,14 @@ retry: inc_nlink(inode); atomic_inc(&inode->i_count); - err = ext3_add_nondir(handle, dentry, inode); + err = ext3_add_entry(handle, dentry, inode); + if (!err) { + ext3_mark_inode_dirty(handle, inode); + d_instantiate(dentry, inode); + } else { + drop_nlink(inode); + iput(inode); + } ext3_journal_stop(handle); if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) goto retry; -- cgit From 6b38e842bb832a3dbeb17e382404aef3c40ac5f9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 30 Dec 2008 02:03:31 -0500 Subject: nfsd race fixes: ext4 Signed-off-by: Al Viro --- fs/ext4/ialloc.c | 6 +++++- fs/ext4/namei.c | 14 +++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 08cac9fcace2..6e6052879aa2 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -826,7 +826,10 @@ got: ext4_set_inode_flags(inode); if (IS_DIRSYNC(inode)) handle->h_sync = 1; - insert_inode_hash(inode); + if (insert_inode_locked(inode) < 0) { + err = -EINVAL; + goto fail_drop; + } spin_lock(&sbi->s_next_gen_lock); inode->i_generation = sbi->s_next_generation++; spin_unlock(&sbi->s_next_gen_lock); @@ -881,6 +884,7 @@ fail_drop: DQUOT_DROP(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; + unlock_new_inode(inode); iput(inode); brelse(bitmap_bh); return ERR_PTR(err); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 63adcb792988..da98a9012fa5 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1693,9 +1693,11 @@ static int ext4_add_nondir(handle_t *handle, if (!err) { ext4_mark_inode_dirty(handle, inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); return 0; } drop_nlink(inode); + unlock_new_inode(inode); iput(inode); return err; } @@ -1830,6 +1832,7 @@ retry: if (err) { out_clear_inode: clear_nlink(inode); + unlock_new_inode(inode); ext4_mark_inode_dirty(handle, inode); iput(inode); goto out_stop; @@ -1838,6 +1841,7 @@ out_clear_inode: ext4_update_dx_flag(dir); ext4_mark_inode_dirty(handle, dir); d_instantiate(dentry, inode); + unlock_new_inode(inode); out_stop: ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) @@ -2212,6 +2216,7 @@ retry: mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); if (err) { clear_nlink(inode); + unlock_new_inode(inode); ext4_mark_inode_dirty(handle, inode); iput(inode); goto out_stop; @@ -2262,7 +2267,14 @@ retry: ext4_inc_count(handle, inode); atomic_inc(&inode->i_count); - err = ext4_add_nondir(handle, dentry, inode); + err = ext4_add_entry(handle, dentry, inode); + if (!err) { + ext4_mark_inode_dirty(handle, inode); + d_instantiate(dentry, inode); + } else { + drop_nlink(inode); + iput(inode); + } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; -- cgit From c1eaa26b671299b3ec01d40c6c71ee19a4f81517 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 30 Dec 2008 02:03:58 -0500 Subject: nfsd race fixes: reiserfs ... and the same for reiserfs. The difference here is that we need insert_inode_locked4() to match iget5_locked(). Signed-off-by: Al Viro --- fs/reiserfs/inode.c | 15 ++++++++++----- fs/reiserfs/namei.c | 8 ++++++++ 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 6c4c2c69449f..145c2d3e5e01 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1753,6 +1753,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, struct inode *inode) { struct super_block *sb; + struct reiserfs_iget_args args; INITIALIZE_PATH(path_to_key); struct cpu_key key; struct item_head ih; @@ -1780,6 +1781,14 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, err = -ENOMEM; goto out_bad_inode; } + args.objectid = inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid); + memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE); + args.dirid = le32_to_cpu(ih.ih_key.k_dir_id); + if (insert_inode_locked4(inode, args.objectid, + reiserfs_find_actor, &args) < 0) { + err = -EINVAL; + goto out_bad_inode; + } if (old_format_only(sb)) /* not a perfect generation count, as object ids can be reused, but ** this is as good as reiserfs can do right now. @@ -1859,13 +1868,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, } else { inode2sd(&sd, inode, inode->i_size); } - // these do not go to on-disk stat data - inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid); - // store in in-core inode the key of stat data and version all // object items will have (directory items will have old offset // format, other new objects will consist of new items) - memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE); if (old_format_only(sb) || S_ISDIR(mode) || S_ISLNK(mode)) set_inode_item_key_version(inode, KEY_FORMAT_3_5); else @@ -1929,7 +1934,6 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, reiserfs_mark_inode_private(inode); } - insert_inode_hash(inode); reiserfs_update_sd(th, inode); reiserfs_check_path(&path_to_key); @@ -1956,6 +1960,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, out_inserted_sd: inode->i_nlink = 0; th->t_trans_id = 0; /* so the caller can't use this handle later */ + unlock_new_inode(inode); /* OK to do even if we hadn't locked it */ /* If we were inheriting an ACL, we need to release the lock so that * iput doesn't deadlock in reiserfs_delete_xattrs. The locking diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index 4f322e5ed840..738967f6c8ee 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -646,6 +646,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, int mode, err = journal_end(&th, dir->i_sb, jbegin_count); if (err) retval = err; + unlock_new_inode(inode); iput(inode); goto out_failed; } @@ -653,6 +654,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, int mode, reiserfs_update_inode_transaction(dir); d_instantiate(dentry, inode); + unlock_new_inode(inode); retval = journal_end(&th, dir->i_sb, jbegin_count); out_failed: @@ -727,11 +729,13 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, int mode, err = journal_end(&th, dir->i_sb, jbegin_count); if (err) retval = err; + unlock_new_inode(inode); iput(inode); goto out_failed; } d_instantiate(dentry, inode); + unlock_new_inode(inode); retval = journal_end(&th, dir->i_sb, jbegin_count); out_failed: @@ -812,6 +816,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) err = journal_end(&th, dir->i_sb, jbegin_count); if (err) retval = err; + unlock_new_inode(inode); iput(inode); goto out_failed; } @@ -819,6 +824,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) reiserfs_update_sd(&th, dir); d_instantiate(dentry, inode); + unlock_new_inode(inode); retval = journal_end(&th, dir->i_sb, jbegin_count); out_failed: if (locked) @@ -1096,11 +1102,13 @@ static int reiserfs_symlink(struct inode *parent_dir, err = journal_end(&th, parent_dir->i_sb, jbegin_count); if (err) retval = err; + unlock_new_inode(inode); iput(inode); goto out_failed; } d_instantiate(dentry, inode); + unlock_new_inode(inode); retval = journal_end(&th, parent_dir->i_sb, jbegin_count); out_failed: reiserfs_write_unlock(parent_dir->i_sb); -- cgit From 1f3403fa640f9f7b135dee79f2d39d01c8ad4a08 Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Tue, 30 Dec 2008 22:08:37 -0600 Subject: nfsd race fixes: jfs jfs version of Al Viro's nfsd race patches Signed-off-by: Dave Kleikamp Signed-off-by: Al Viro --- fs/jfs/jfs_inode.c | 29 +++++++++++++++++++++-------- fs/jfs/namei.c | 24 ++++++++++++++++-------- 2 files changed, 37 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c index 70022fd1c539..d4d142c2edd4 100644 --- a/fs/jfs/jfs_inode.c +++ b/fs/jfs/jfs_inode.c @@ -79,7 +79,8 @@ struct inode *ialloc(struct inode *parent, umode_t mode) inode = new_inode(sb); if (!inode) { jfs_warn("ialloc: new_inode returned NULL!"); - return ERR_PTR(-ENOMEM); + rc = -ENOMEM; + goto fail; } jfs_inode = JFS_IP(inode); @@ -89,8 +90,12 @@ struct inode *ialloc(struct inode *parent, umode_t mode) jfs_warn("ialloc: diAlloc returned %d!", rc); if (rc == -EIO) make_bad_inode(inode); - iput(inode); - return ERR_PTR(rc); + goto fail_put; + } + + if (insert_inode_locked(inode) < 0) { + rc = -EINVAL; + goto fail_unlock; } inode->i_uid = current_fsuid(); @@ -112,11 +117,8 @@ struct inode *ialloc(struct inode *parent, umode_t mode) * Allocate inode to quota. */ if (DQUOT_ALLOC_INODE(inode)) { - DQUOT_DROP(inode); - inode->i_flags |= S_NOQUOTA; - inode->i_nlink = 0; - iput(inode); - return ERR_PTR(-EDQUOT); + rc = -EDQUOT; + goto fail_drop; } inode->i_mode = mode; @@ -158,4 +160,15 @@ struct inode *ialloc(struct inode *parent, umode_t mode) jfs_info("ialloc returns inode = 0x%p\n", inode); return inode; + +fail_drop: + DQUOT_DROP(inode); + inode->i_flags |= S_NOQUOTA; +fail_unlock: + inode->i_nlink = 0; + unlock_new_inode(inode); +fail_put: + iput(inode); +fail: + return ERR_PTR(rc); } diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index cc3cedffbfa1..b4de56b851e4 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -155,7 +155,6 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode, ip->i_fop = &jfs_file_operations; ip->i_mapping->a_ops = &jfs_aops; - insert_inode_hash(ip); mark_inode_dirty(ip); dip->i_ctime = dip->i_mtime = CURRENT_TIME; @@ -171,9 +170,12 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode, if (rc) { free_ea_wmap(ip); ip->i_nlink = 0; + unlock_new_inode(ip); iput(ip); - } else + } else { d_instantiate(dentry, ip); + unlock_new_inode(ip); + } out2: free_UCSname(&dname); @@ -289,7 +291,6 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode) ip->i_op = &jfs_dir_inode_operations; ip->i_fop = &jfs_dir_operations; - insert_inode_hash(ip); mark_inode_dirty(ip); /* update parent directory inode */ @@ -306,9 +307,12 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode) if (rc) { free_ea_wmap(ip); ip->i_nlink = 0; + unlock_new_inode(ip); iput(ip); - } else + } else { d_instantiate(dentry, ip); + unlock_new_inode(ip); + } out2: free_UCSname(&dname); @@ -1019,7 +1023,6 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, goto out3; } - insert_inode_hash(ip); mark_inode_dirty(ip); dip->i_ctime = dip->i_mtime = CURRENT_TIME; @@ -1039,9 +1042,12 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, if (rc) { free_ea_wmap(ip); ip->i_nlink = 0; + unlock_new_inode(ip); iput(ip); - } else + } else { d_instantiate(dentry, ip); + unlock_new_inode(ip); + } out2: free_UCSname(&dname); @@ -1399,7 +1405,6 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, jfs_ip->dev = new_encode_dev(rdev); init_special_inode(ip, ip->i_mode, rdev); - insert_inode_hash(ip); mark_inode_dirty(ip); dir->i_ctime = dir->i_mtime = CURRENT_TIME; @@ -1417,9 +1422,12 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, if (rc) { free_ea_wmap(ip); ip->i_nlink = 0; + unlock_new_inode(ip); iput(ip); - } else + } else { d_instantiate(dentry, ip); + unlock_new_inode(ip); + } out1: free_UCSname(&dname); -- cgit From 59e55e6cf86eb472e8373831c4234252916c53ef Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 2 Jan 2009 13:41:11 +0000 Subject: Remove devpts_root global Remove the 'devpts_root' global variable and find the root dentry using the super_block. The super-block can be found from the device inode, using the new wrapper, pts_sb_from_inode(). Changelog: This patch is based on an earlier patchset from Serge Hallyn and Matt Helsley. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 5d61b7c06e13..f96e10a109fe 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -34,7 +34,6 @@ static DEFINE_IDA(allocated_ptys); static DEFINE_MUTEX(allocated_ptys_lock); static struct vfsmount *devpts_mnt; -static struct dentry *devpts_root; static struct { int setuid; @@ -56,6 +55,14 @@ static const match_table_t tokens = { {Opt_err, NULL} }; +static inline struct super_block *pts_sb_from_inode(struct inode *inode) +{ + if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) + return inode->i_sb; + + return devpts_mnt->mnt_sb; +} + static int devpts_remount(struct super_block *sb, int *flags, char *data) { char *p; @@ -142,7 +149,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent) inode->i_fop = &simple_dir_operations; inode->i_nlink = 2; - devpts_root = s->s_root = d_alloc_root(inode); + s->s_root = d_alloc_root(inode); if (s->s_root) return 0; @@ -211,7 +218,9 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) struct tty_driver *driver = tty->driver; dev_t device = MKDEV(driver->major, driver->minor_start+number); struct dentry *dentry; - struct inode *inode = new_inode(devpts_mnt->mnt_sb); + struct super_block *sb = pts_sb_from_inode(ptmx_inode); + struct inode *inode = new_inode(sb); + struct dentry *root = sb->s_root; char s[12]; /* We're supposed to be given the slave end of a pty */ @@ -231,15 +240,15 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) sprintf(s, "%d", number); - mutex_lock(&devpts_root->d_inode->i_mutex); + mutex_lock(&root->d_inode->i_mutex); - dentry = d_alloc_name(devpts_root, s); + dentry = d_alloc_name(root, s); if (!IS_ERR(dentry)) { d_add(dentry, inode); - fsnotify_create(devpts_root->d_inode, dentry); + fsnotify_create(root->d_inode, dentry); } - mutex_unlock(&devpts_root->d_inode->i_mutex); + mutex_unlock(&root->d_inode->i_mutex); return 0; } @@ -256,11 +265,13 @@ struct tty_struct *devpts_get_tty(struct inode *pts_inode, int number) void devpts_pty_kill(struct tty_struct *tty) { struct inode *inode = tty->driver_data; + struct super_block *sb = pts_sb_from_inode(inode); + struct dentry *root = sb->s_root; struct dentry *dentry; BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); - mutex_lock(&devpts_root->d_inode->i_mutex); + mutex_lock(&root->d_inode->i_mutex); dentry = d_find_alias(inode); if (dentry && !IS_ERR(dentry)) { @@ -269,7 +280,7 @@ void devpts_pty_kill(struct tty_struct *tty) dput(dentry); } - mutex_unlock(&devpts_root->d_inode->i_mutex); + mutex_unlock(&root->d_inode->i_mutex); } static int __init init_devpts_fs(void) -- cgit From e76b7c01e598d2d14ddfdb6ae5c6afe45245d0de Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 2 Jan 2009 13:41:21 +0000 Subject: Per-mount allocated_ptys To enable multiple mounts of devpts, 'allocated_ptys' must be a per-mount variable rather than a global variable. Move 'allocated_ptys' into the super_block's s_fs_info. Changelog[v2]: Define and use DEVPTS_SB() wrapper. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index f96e10a109fe..49d879d911b1 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -30,7 +30,6 @@ #define PTMX_MINOR 2 extern int pty_limit; /* Config limit on Unix98 ptys */ -static DEFINE_IDA(allocated_ptys); static DEFINE_MUTEX(allocated_ptys_lock); static struct vfsmount *devpts_mnt; @@ -55,6 +54,15 @@ static const match_table_t tokens = { {Opt_err, NULL} }; +struct pts_fs_info { + struct ida allocated_ptys; +}; + +static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb) +{ + return sb->s_fs_info; +} + static inline struct super_block *pts_sb_from_inode(struct inode *inode) { if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) @@ -126,6 +134,19 @@ static const struct super_operations devpts_sops = { .show_options = devpts_show_options, }; +static void *new_pts_fs_info(void) +{ + struct pts_fs_info *fsi; + + fsi = kzalloc(sizeof(struct pts_fs_info), GFP_KERNEL); + if (!fsi) + return NULL; + + ida_init(&fsi->allocated_ptys); + + return fsi; +} + static int devpts_fill_super(struct super_block *s, void *data, int silent) { @@ -137,9 +158,13 @@ devpts_fill_super(struct super_block *s, void *data, int silent) s->s_op = &devpts_sops; s->s_time_gran = 1; + s->s_fs_info = new_pts_fs_info(); + if (!s->s_fs_info) + goto fail; + inode = new_inode(s); if (!inode) - goto fail; + goto free_fsi; inode->i_ino = 1; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_blocks = 0; @@ -155,6 +180,9 @@ devpts_fill_super(struct super_block *s, void *data, int silent) printk("devpts: get root dentry failed\n"); iput(inode); + +free_fsi: + kfree(s->s_fs_info); fail: return -ENOMEM; } @@ -165,11 +193,19 @@ static int devpts_get_sb(struct file_system_type *fs_type, return get_sb_single(fs_type, flags, data, devpts_fill_super, mnt); } +static void devpts_kill_sb(struct super_block *sb) +{ + struct pts_fs_info *fsi = DEVPTS_SB(sb); + + kfree(fsi); + kill_anon_super(sb); +} + static struct file_system_type devpts_fs_type = { .owner = THIS_MODULE, .name = "devpts", .get_sb = devpts_get_sb, - .kill_sb = kill_anon_super, + .kill_sb = devpts_kill_sb, }; /* @@ -179,16 +215,18 @@ static struct file_system_type devpts_fs_type = { int devpts_new_index(struct inode *ptmx_inode) { + struct super_block *sb = pts_sb_from_inode(ptmx_inode); + struct pts_fs_info *fsi = DEVPTS_SB(sb); int index; int ida_ret; retry: - if (!ida_pre_get(&allocated_ptys, GFP_KERNEL)) { + if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) { return -ENOMEM; } mutex_lock(&allocated_ptys_lock); - ida_ret = ida_get_new(&allocated_ptys, &index); + ida_ret = ida_get_new(&fsi->allocated_ptys, &index); if (ida_ret < 0) { mutex_unlock(&allocated_ptys_lock); if (ida_ret == -EAGAIN) @@ -197,7 +235,7 @@ retry: } if (index >= pty_limit) { - ida_remove(&allocated_ptys, index); + ida_remove(&fsi->allocated_ptys, index); mutex_unlock(&allocated_ptys_lock); return -EIO; } @@ -207,8 +245,11 @@ retry: void devpts_kill_index(struct inode *ptmx_inode, int idx) { + struct super_block *sb = pts_sb_from_inode(ptmx_inode); + struct pts_fs_info *fsi = DEVPTS_SB(sb); + mutex_lock(&allocated_ptys_lock); - ida_remove(&allocated_ptys, idx); + ida_remove(&fsi->allocated_ptys, idx); mutex_unlock(&allocated_ptys_lock); } -- cgit From 31af0abbdafb66ad8e27e3df878faec2ebe1132e Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 2 Jan 2009 13:41:33 +0000 Subject: Per-mount 'config' object With support for multiple mounts of devpts, the 'config' structure really represents per-mount options rather than config parameters. Rename 'config' structure to 'pts_mount_opts' and store it in the super-block. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 45 +++++++++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 49d879d911b1..b793e6e3c21e 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -34,13 +34,13 @@ static DEFINE_MUTEX(allocated_ptys_lock); static struct vfsmount *devpts_mnt; -static struct { +struct pts_mount_opts { int setuid; int setgid; uid_t uid; gid_t gid; umode_t mode; -} config = {.mode = DEVPTS_DEFAULT_MODE}; +}; enum { Opt_uid, Opt_gid, Opt_mode, @@ -56,6 +56,7 @@ static const match_table_t tokens = { struct pts_fs_info { struct ida allocated_ptys; + struct pts_mount_opts mount_opts; }; static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb) @@ -74,12 +75,14 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode) static int devpts_remount(struct super_block *sb, int *flags, char *data) { char *p; + struct pts_fs_info *fsi = DEVPTS_SB(sb); + struct pts_mount_opts *opts = &fsi->mount_opts; - config.setuid = 0; - config.setgid = 0; - config.uid = 0; - config.gid = 0; - config.mode = DEVPTS_DEFAULT_MODE; + opts->setuid = 0; + opts->setgid = 0; + opts->uid = 0; + opts->gid = 0; + opts->mode = DEVPTS_DEFAULT_MODE; while ((p = strsep(&data, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; @@ -94,19 +97,19 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data) case Opt_uid: if (match_int(&args[0], &option)) return -EINVAL; - config.uid = option; - config.setuid = 1; + opts->uid = option; + opts->setuid = 1; break; case Opt_gid: if (match_int(&args[0], &option)) return -EINVAL; - config.gid = option; - config.setgid = 1; + opts->gid = option; + opts->setgid = 1; break; case Opt_mode: if (match_octal(&args[0], &option)) return -EINVAL; - config.mode = option & S_IALLUGO; + opts->mode = option & S_IALLUGO; break; default: printk(KERN_ERR "devpts: called with bogus options\n"); @@ -119,11 +122,14 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data) static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs) { - if (config.setuid) - seq_printf(seq, ",uid=%u", config.uid); - if (config.setgid) - seq_printf(seq, ",gid=%u", config.gid); - seq_printf(seq, ",mode=%03o", config.mode); + struct pts_fs_info *fsi = DEVPTS_SB(vfs->mnt_sb); + struct pts_mount_opts *opts = &fsi->mount_opts; + + if (opts->setuid) + seq_printf(seq, ",uid=%u", opts->uid); + if (opts->setgid) + seq_printf(seq, ",gid=%u", opts->gid); + seq_printf(seq, ",mode=%03o", opts->mode); return 0; } @@ -143,6 +149,7 @@ static void *new_pts_fs_info(void) return NULL; ida_init(&fsi->allocated_ptys); + fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE; return fsi; } @@ -262,6 +269,8 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) struct super_block *sb = pts_sb_from_inode(ptmx_inode); struct inode *inode = new_inode(sb); struct dentry *root = sb->s_root; + struct pts_fs_info *fsi = DEVPTS_SB(sb); + struct pts_mount_opts *opts = &fsi->mount_opts; char s[12]; /* We're supposed to be given the slave end of a pty */ @@ -275,7 +284,7 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) inode->i_uid = config.setuid ? config.uid : current_fsuid(); inode->i_gid = config.setgid ? config.gid : current_fsgid(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; - init_special_inode(inode, S_IFCHR|config.mode, device); + init_special_inode(inode, S_IFCHR|opts->mode, device); inode->i_private = tty; tty->driver_data = inode; -- cgit From 53af8ee4094d80ddaac7efefb572b1c22ae49367 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 2 Jan 2009 13:41:47 +0000 Subject: Extract option parsing to new function Move code to parse mount options into a separate function so it can (later) be shared between mount and remount operations. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index b793e6e3c21e..00530e82673e 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -72,11 +72,9 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode) return devpts_mnt->mnt_sb; } -static int devpts_remount(struct super_block *sb, int *flags, char *data) +static int parse_mount_options(char *data, struct pts_mount_opts *opts) { char *p; - struct pts_fs_info *fsi = DEVPTS_SB(sb); - struct pts_mount_opts *opts = &fsi->mount_opts; opts->setuid = 0; opts->setgid = 0; @@ -120,6 +118,14 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data) return 0; } +static int devpts_remount(struct super_block *sb, int *flags, char *data) +{ + struct pts_fs_info *fsi = DEVPTS_SB(sb); + struct pts_mount_opts *opts = &fsi->mount_opts; + + return parse_mount_options(data, opts); +} + static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs) { struct pts_fs_info *fsi = DEVPTS_SB(vfs->mnt_sb); -- cgit From 1f8f1e296583f9f832c2fe7b5a219675b74bf43e Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 2 Jan 2009 13:42:02 +0000 Subject: Define mknod_ptmx() /dev/ptmx is closely tied to the devpts filesystem. An open of /dev/ptmx, allocates the next pty index and the associated device shows up in the devpts fs as /dev/pts/n. Wih multiple instancs of devpts filesystem, during an open of /dev/ptmx we would be unable to determine which instance of the devpts is being accessed. So we move the 'ptmx' node into /dev/pts and use the inode of the 'ptmx' node to identify the superblock and hence the devpts instance. This patch adds ability for the kernel to internally create the [ptmx, c, 5:2] device when mounting devpts filesystem. Since the ptmx node in devpts is new and may surprise some userspace scripts, the default permissions for the new node is 0000. These permissions can be changed either using chmod or by remounting with the new '-o ptmxmode=0666' mount option. Changelog[v5]: - [Serge Hallyn bugfix]: Letting new_inode() assign inode number to ptmx can collide with hand-assigning inode numbers to ptys. So, hand-assign specific inode number to ptmx node also. - [Serge Hallyn]: Maybe safer to grab root dentry mutex while creating ptmx node - [Bugfix with Serge Hallyn] Replace lookup_one_len() in mknod_ptmx() wih d_alloc_name() (lookup during ->get_sb() locks up system). To simplify patchset, fold the ptmx_dentry patch into this. Changelog[v4]: - Change default permissions of pts/ptmx node to 0000. - Move code for ptmxmode under #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES. Changelog[v3]: - Rename ptmx_mode to ptmxmode (for consistency with 'newinstance') Changelog[v2]: - [H. Peter Anvin] Remove mknod() system call support and create the ptmx node internally. Changelog[v1]: - Earlier version of this patch enabled creating /dev/pts/tty as well. As pointed out by Al Viro and H. Peter Anvin, that is not really necessary. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 115 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 110 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 00530e82673e..8ee9dc2f9e48 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -27,6 +27,13 @@ #define DEVPTS_SUPER_MAGIC 0x1cd1 #define DEVPTS_DEFAULT_MODE 0600 +/* + * ptmx is a new node in /dev/pts and will be unused in legacy (single- + * instance) mode. To prevent surprises in user space, set permissions of + * ptmx to 0. Use 'chmod' or remount with '-o ptmxmode' to set meaningful + * permissions. + */ +#define DEVPTS_DEFAULT_PTMX_MODE 0000 #define PTMX_MINOR 2 extern int pty_limit; /* Config limit on Unix98 ptys */ @@ -40,10 +47,11 @@ struct pts_mount_opts { uid_t uid; gid_t gid; umode_t mode; + umode_t ptmxmode; }; enum { - Opt_uid, Opt_gid, Opt_mode, + Opt_uid, Opt_gid, Opt_mode, Opt_ptmxmode, Opt_err }; @@ -51,12 +59,16 @@ static const match_table_t tokens = { {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_mode, "mode=%o"}, +#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES + {Opt_ptmxmode, "ptmxmode=%o"}, +#endif {Opt_err, NULL} }; struct pts_fs_info { struct ida allocated_ptys; struct pts_mount_opts mount_opts; + struct dentry *ptmx_dentry; }; static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb) @@ -81,6 +93,7 @@ static int parse_mount_options(char *data, struct pts_mount_opts *opts) opts->uid = 0; opts->gid = 0; opts->mode = DEVPTS_DEFAULT_MODE; + opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; while ((p = strsep(&data, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; @@ -109,6 +122,13 @@ static int parse_mount_options(char *data, struct pts_mount_opts *opts) return -EINVAL; opts->mode = option & S_IALLUGO; break; +#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES + case Opt_ptmxmode: + if (match_octal(&args[0], &option)) + return -EINVAL; + opts->ptmxmode = option & S_IALLUGO; + break; +#endif default: printk(KERN_ERR "devpts: called with bogus options\n"); return -EINVAL; @@ -118,12 +138,93 @@ static int parse_mount_options(char *data, struct pts_mount_opts *opts) return 0; } +#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES +static int mknod_ptmx(struct super_block *sb) +{ + int mode; + int rc = -ENOMEM; + struct dentry *dentry; + struct inode *inode; + struct dentry *root = sb->s_root; + struct pts_fs_info *fsi = DEVPTS_SB(sb); + struct pts_mount_opts *opts = &fsi->mount_opts; + + mutex_lock(&root->d_inode->i_mutex); + + /* If we have already created ptmx node, return */ + if (fsi->ptmx_dentry) { + rc = 0; + goto out; + } + + dentry = d_alloc_name(root, "ptmx"); + if (!dentry) { + printk(KERN_NOTICE "Unable to alloc dentry for ptmx node\n"); + goto out; + } + + /* + * Create a new 'ptmx' node in this mount of devpts. + */ + inode = new_inode(sb); + if (!inode) { + printk(KERN_ERR "Unable to alloc inode for ptmx node\n"); + dput(dentry); + goto out; + } + + inode->i_ino = 2; + inode->i_uid = inode->i_gid = 0; + inode->i_blocks = 0; + inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; + + mode = S_IFCHR|opts->ptmxmode; + init_special_inode(inode, mode, MKDEV(TTYAUX_MAJOR, 2)); + + d_add(dentry, inode); + + fsi->ptmx_dentry = dentry; + rc = 0; + + printk(KERN_DEBUG "Created ptmx node in devpts ino %lu\n", + inode->i_ino); +out: + mutex_unlock(&root->d_inode->i_mutex); + return rc; +} + +static void update_ptmx_mode(struct pts_fs_info *fsi) +{ + struct inode *inode; + if (fsi->ptmx_dentry) { + inode = fsi->ptmx_dentry->d_inode; + inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode; + } +} +#else +static inline void update_ptmx_mode(struct pts_fs_info *fsi) +{ + return; +} +#endif + static int devpts_remount(struct super_block *sb, int *flags, char *data) { + int err; struct pts_fs_info *fsi = DEVPTS_SB(sb); struct pts_mount_opts *opts = &fsi->mount_opts; - return parse_mount_options(data, opts); + err = parse_mount_options(data, opts); + + /* + * parse_mount_options() restores options to default values + * before parsing and may have changed ptmxmode. So, update the + * mode in the inode too. Bogus options don't fail the remount, + * so do this even on error return. + */ + update_ptmx_mode(fsi); + + return err; } static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs) @@ -136,6 +237,9 @@ static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs) if (opts->setgid) seq_printf(seq, ",gid=%u", opts->gid); seq_printf(seq, ",mode=%03o", opts->mode); +#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES + seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode); +#endif return 0; } @@ -156,6 +260,7 @@ static void *new_pts_fs_info(void) ida_init(&fsi->allocated_ptys); fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE; + fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; return fsi; } @@ -163,7 +268,7 @@ static void *new_pts_fs_info(void) static int devpts_fill_super(struct super_block *s, void *data, int silent) { - struct inode * inode; + struct inode *inode; s->s_blocksize = 1024; s->s_blocksize_bits = 10; @@ -190,7 +295,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent) s->s_root = d_alloc_root(inode); if (s->s_root) return 0; - + printk("devpts: get root dentry failed\n"); iput(inode); @@ -211,7 +316,7 @@ static void devpts_kill_sb(struct super_block *sb) struct pts_fs_info *fsi = DEVPTS_SB(sb); kfree(fsi); - kill_anon_super(sb); + kill_litter_super(sb); } static struct file_system_type devpts_fs_type = { -- cgit From d4076ac55bf8755ce6c5706478631c1726cf0179 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 2 Jan 2009 13:42:19 +0000 Subject: Define get_init_pts_sb() See comments in the function header for details. The new interface will be used in a follow-on patch. Changelog [v2]: [Dave Hansen] Replace get_sb_ref() in fs/super.c with get_init_pts_sb() and make the new interface private to devpts Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 8ee9dc2f9e48..2d0eb2cf99e6 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -305,10 +305,63 @@ fail: return -ENOMEM; } +static int compare_init_pts_sb(struct super_block *s, void *p) +{ + if (devpts_mnt) + return devpts_mnt->mnt_sb == s; + + return 0; +} + +/* + * get_init_pts_sb() + * + * This interface is needed to support multiple namespace semantics in + * devpts while preserving backward compatibility of the current 'single- + * namespace' semantics. i.e all mounts of devpts without the 'newinstance' + * mount option should bind to the initial kernel mount, like + * get_sb_single(). + * + * Mounts with 'newinstance' option create a new private namespace. + * + * But for single-mount semantics, devpts cannot use get_sb_single(), + * because get_sb_single()/sget() find and use the super-block from + * the most recent mount of devpts. But that recent mount may be a + * 'newinstance' mount and get_sb_single() would pick the newinstance + * super-block instead of the initial super-block. + * + * This interface is identical to get_sb_single() except that it + * consistently selects the 'single-namespace' superblock even in the + * presence of the private namespace (i.e 'newinstance') super-blocks. + */ +static int get_init_pts_sb(struct file_system_type *fs_type, int flags, + void *data, struct vfsmount *mnt) +{ + struct super_block *s; + int error; + + s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL); + if (IS_ERR(s)) + return PTR_ERR(s); + + if (!s->s_root) { + s->s_flags = flags; + error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0); + if (error) { + up_write(&s->s_umount); + deactivate_super(s); + return error; + } + s->s_flags |= MS_ACTIVE; + } + do_remount_sb(s, flags, data, 0); + return simple_set_mnt(mnt, s); +} + static int devpts_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { - return get_sb_single(fs_type, flags, data, devpts_fill_super, mnt); + return get_init_pts_sb(fs_type, flags, data, mnt); } static void devpts_kill_sb(struct super_block *sb) -- cgit From 2a1b2dc0c83bbfc24d72cafd5e69810a149b44e4 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 2 Jan 2009 13:42:27 +0000 Subject: Enable multiple instances of devpts To support containers, allow multiple instances of devpts filesystem, such that indices of ptys allocated in one instance are independent of ptys allocated in other instances of devpts. But to preserve backward compatibility, enable this support for multiple instances only if: - CONFIG_DEVPTS_MULTIPLE_INSTANCES is set to Y, and - '-o newinstance' mount option is specified while mounting devpts To use multi-instance mount, a container startup script could: $ ns_exec -cm /bin/bash $ umount /dev/pts $ mount -t devpts -o newinstance lxcpts /dev/pts $ mount -o bind /dev/pts/ptmx /dev/ptmx $ /usr/sbin/sshd -p 1234 where 'ns_exec -cm /bin/bash' is calls clone() with CLONE_NEWNS flag and execs /bin/bash in the child process. A pty created by the sshd is not visible in the original mount of /dev/pts. USER-SPACE-IMPACT: - See Documentation/fs/devpts.txt (included in next patch) for user- space impact in multi-instance and mixed-mode operation. TODO: - Update mount(8), pts(4) man pages. Highlight impact of not redirecting /dev/ptmx to /dev/pts/ptmx after a multi-instance mount. Changelog[v6]: - [Dave Hansen] Use new get_init_pts_sb() interface - [Serge Hallyn] Don't bother displaying 'newinstance' in show_options - [Serge Hallyn] Use macros (PARSE_REMOUNT/PARSE_MOUNT) instead of 0/1. - [Serge Hallyn] Check error return from get_sb_single() (now get_init_pts_sb()) - devpts_pty_kill(): don't dput error dentries Changelog[v5]: - Move get_sb_ref() definition to earlier patch - Move usage info to Documentation/filesystems/devpts.txt (next patch) - Make ptmx node even in init_pts_ns, now that default mode is 0000 (defined in earlier patch, enabled here). - Cache ptmx dentry and use to update mode during remount (defined in earlier patch, enabled here). - Bugfix: explicitly ignore newinstance on remount (if newinstance was specified on remount of initial mount, it would be ignored but /proc/mounts would imply that the option was set) Changelog[v4]: - Update patch description to address H. Peter Anvin's comments - Consolidate multi-instance mode code under new config token, CONFIG_DEVPTS_MULTIPLE_INSTANCE. - Move usage-details from patch description to Documentation/fs/devpts.txt Changelog[v3]: - Rename new mount option to 'newinstance' - Create ptmx nodes only in 'newinstance' mounts - Bugfix: parse_mount_options() modifies @data but since we need to parse the @data twice (once in devpts_get_sb() and once during do_remount_sb()), parse a local copy of @data in devpts_get_sb(). (restructured code in devpts_get_sb() to fix this) Changelog[v2]: - Support both single-mount and multiple-mount semantics and provide '-onewmnt' option to select the semantics. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 170 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 163 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 2d0eb2cf99e6..b4a89fa21673 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -48,10 +48,11 @@ struct pts_mount_opts { gid_t gid; umode_t mode; umode_t ptmxmode; + int newinstance; }; enum { - Opt_uid, Opt_gid, Opt_mode, Opt_ptmxmode, + Opt_uid, Opt_gid, Opt_mode, Opt_ptmxmode, Opt_newinstance, Opt_err }; @@ -61,6 +62,7 @@ static const match_table_t tokens = { {Opt_mode, "mode=%o"}, #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES {Opt_ptmxmode, "ptmxmode=%o"}, + {Opt_newinstance, "newinstance"}, #endif {Opt_err, NULL} }; @@ -78,13 +80,17 @@ static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb) static inline struct super_block *pts_sb_from_inode(struct inode *inode) { +#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) return inode->i_sb; - +#endif return devpts_mnt->mnt_sb; } -static int parse_mount_options(char *data, struct pts_mount_opts *opts) +#define PARSE_MOUNT 0 +#define PARSE_REMOUNT 1 + +static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) { char *p; @@ -95,6 +101,10 @@ static int parse_mount_options(char *data, struct pts_mount_opts *opts) opts->mode = DEVPTS_DEFAULT_MODE; opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; + /* newinstance makes sense only on initial mount */ + if (op == PARSE_MOUNT) + opts->newinstance = 0; + while ((p = strsep(&data, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; int token; @@ -128,6 +138,11 @@ static int parse_mount_options(char *data, struct pts_mount_opts *opts) return -EINVAL; opts->ptmxmode = option & S_IALLUGO; break; + case Opt_newinstance: + /* newinstance makes sense only on initial mount */ + if (op == PARSE_MOUNT) + opts->newinstance = 1; + break; #endif default: printk(KERN_ERR "devpts: called with bogus options\n"); @@ -214,7 +229,7 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data) struct pts_fs_info *fsi = DEVPTS_SB(sb); struct pts_mount_opts *opts = &fsi->mount_opts; - err = parse_mount_options(data, opts); + err = parse_mount_options(data, PARSE_REMOUNT, opts); /* * parse_mount_options() restores options to default values @@ -309,8 +324,100 @@ static int compare_init_pts_sb(struct super_block *s, void *p) { if (devpts_mnt) return devpts_mnt->mnt_sb == s; + return 0; +} + +#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES +/* + * Safely parse the mount options in @data and update @opts. + * + * devpts ends up parsing options two times during mount, due to the + * two modes of operation it supports. The first parse occurs in + * devpts_get_sb() when determining the mode (single-instance or + * multi-instance mode). The second parse happens in devpts_remount() + * or new_pts_mount() depending on the mode. + * + * Parsing of options modifies the @data making subsequent parsing + * incorrect. So make a local copy of @data and parse it. + * + * Return: 0 On success, -errno on error + */ +static int safe_parse_mount_options(void *data, struct pts_mount_opts *opts) +{ + int rc; + void *datacp; + + if (!data) + return 0; + + /* Use kstrdup() ? */ + datacp = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!datacp) + return -ENOMEM; + + memcpy(datacp, data, PAGE_SIZE); + rc = parse_mount_options((char *)datacp, PARSE_MOUNT, opts); + kfree(datacp); + + return rc; +} + +/* + * Mount a new (private) instance of devpts. PTYs created in this + * instance are independent of the PTYs in other devpts instances. + */ +static int new_pts_mount(struct file_system_type *fs_type, int flags, + void *data, struct vfsmount *mnt) +{ + int err; + struct pts_fs_info *fsi; + struct pts_mount_opts *opts; + + printk(KERN_NOTICE "devpts: newinstance mount\n"); + + err = get_sb_nodev(fs_type, flags, data, devpts_fill_super, mnt); + if (err) + return err; + + fsi = DEVPTS_SB(mnt->mnt_sb); + opts = &fsi->mount_opts; + + err = parse_mount_options(data, PARSE_MOUNT, opts); + if (err) + goto fail; + + err = mknod_ptmx(mnt->mnt_sb); + if (err) + goto fail; return 0; + +fail: + dput(mnt->mnt_sb->s_root); + deactivate_super(mnt->mnt_sb); + return err; +} + +/* + * Check if 'newinstance' mount option was specified in @data. + * + * Return: -errno on error (eg: invalid mount options specified) + * : 1 if 'newinstance' mount option was specified + * : 0 if 'newinstance' mount option was NOT specified + */ +static int is_new_instance_mount(void *data) +{ + int rc; + struct pts_mount_opts opts; + + if (!data) + return 0; + + rc = safe_parse_mount_options(data, &opts); + if (!rc) + rc = opts.newinstance; + + return rc; } /* @@ -358,11 +465,54 @@ static int get_init_pts_sb(struct file_system_type *fs_type, int flags, return simple_set_mnt(mnt, s); } +/* + * Mount or remount the initial kernel mount of devpts. This type of + * mount maintains the legacy, single-instance semantics, while the + * kernel still allows multiple-instances. + */ +static int init_pts_mount(struct file_system_type *fs_type, int flags, + void *data, struct vfsmount *mnt) +{ + int err; + + err = get_init_pts_sb(fs_type, flags, data, mnt); + if (err) + return err; + + err = mknod_ptmx(mnt->mnt_sb); + if (err) { + dput(mnt->mnt_sb->s_root); + deactivate_super(mnt->mnt_sb); + } + + return err; +} + static int devpts_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { - return get_init_pts_sb(fs_type, flags, data, mnt); + int new; + + new = is_new_instance_mount(data); + if (new < 0) + return new; + + if (new) + return new_pts_mount(fs_type, flags, data, mnt); + + return init_pts_mount(fs_type, flags, data, mnt); } +#else +/* + * This supports only the legacy single-instance semantics (no + * multiple-instance semantics) + */ +static int devpts_get_sb(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, struct vfsmount *mnt) +{ + return get_sb_single(fs_type, flags, data, devpts_fill_super, mnt); +} +#endif static void devpts_kill_sb(struct super_block *sb) { @@ -488,12 +638,18 @@ void devpts_pty_kill(struct tty_struct *tty) mutex_lock(&root->d_inode->i_mutex); dentry = d_find_alias(inode); - if (dentry && !IS_ERR(dentry)) { + if (IS_ERR(dentry)) + goto out; + + if (dentry) { inode->i_nlink--; d_delete(dentry); - dput(dentry); + dput(dentry); // d_alloc_name() in devpts_pty_new() } + dput(dentry); // d_find_alias above + +out: mutex_unlock(&root->d_inode->i_mutex); } -- cgit From 835aa440f1c3fe16a622015bc1b52dffedf6d90e Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Fri, 2 Jan 2009 13:42:48 +0000 Subject: devpts: Coding style clean up Just nail the oddments now while this code is being touched Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 53 ++++++++++++++++++++++++++--------------------------- 1 file changed, 26 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index b4a89fa21673..b02c24313d5c 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -311,7 +311,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent) if (s->s_root) return 0; - printk("devpts: get root dentry failed\n"); + printk(KERN_ERR "devpts: get root dentry failed\n"); iput(inode); free_fsi: @@ -444,25 +444,25 @@ static int is_new_instance_mount(void *data) static int get_init_pts_sb(struct file_system_type *fs_type, int flags, void *data, struct vfsmount *mnt) { - struct super_block *s; - int error; - - s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL); - if (IS_ERR(s)) - return PTR_ERR(s); - - if (!s->s_root) { - s->s_flags = flags; - error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0); - if (error) { - up_write(&s->s_umount); - deactivate_super(s); - return error; - } - s->s_flags |= MS_ACTIVE; - } - do_remount_sb(s, flags, data, 0); - return simple_set_mnt(mnt, s); + struct super_block *s; + int error; + + s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL); + if (IS_ERR(s)) + return PTR_ERR(s); + + if (!s->s_root) { + s->s_flags = flags; + error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0); + if (error) { + up_write(&s->s_umount); + deactivate_super(s); + return error; + } + s->s_flags |= MS_ACTIVE; + } + do_remount_sb(s, flags, data, 0); + return simple_set_mnt(mnt, s); } /* @@ -477,7 +477,7 @@ static int init_pts_mount(struct file_system_type *fs_type, int flags, err = get_init_pts_sb(fs_type, flags, data, mnt); if (err) - return err; + return err; err = mknod_ptmx(mnt->mnt_sb); if (err) { @@ -542,9 +542,8 @@ int devpts_new_index(struct inode *ptmx_inode) int ida_ret; retry: - if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) { + if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) return -ENOMEM; - } mutex_lock(&allocated_ptys_lock); ida_ret = ida_get_new(&fsi->allocated_ptys, &index); @@ -576,7 +575,8 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx) int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) { - int number = tty->index; /* tty layer puts index from devpts_new_index() in here */ + /* tty layer puts index from devpts_new_index() in here */ + int number = tty->index; struct tty_driver *driver = tty->driver; dev_t device = MKDEV(driver->major, driver->minor_start+number); struct dentry *dentry; @@ -644,11 +644,10 @@ void devpts_pty_kill(struct tty_struct *tty) if (dentry) { inode->i_nlink--; d_delete(dentry); - dput(dentry); // d_alloc_name() in devpts_pty_new() + dput(dentry); /* d_alloc_name() in devpts_pty_new() */ } - dput(dentry); // d_find_alias above - + dput(dentry); /* d_find_alias above */ out: mutex_unlock(&root->d_inode->i_mutex); } -- cgit From 8c056e5b148498192832678cf2957760945e8c71 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 2 Jan 2009 13:44:12 +0000 Subject: devpts: fix unused function warning fs/devpts/inode.c:324: warning: 'compare_init_pts_sb' defined but not used Signed-off-by: Andrew Morton Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index b02c24313d5c..3f309f181de8 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -320,6 +320,7 @@ fail: return -ENOMEM; } +#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES static int compare_init_pts_sb(struct super_block *s, void *p) { if (devpts_mnt) @@ -327,7 +328,6 @@ static int compare_init_pts_sb(struct super_block *s, void *p) return 0; } -#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES /* * Safely parse the mount options in @data and update @opts. * -- cgit From d0eafc7db8f170d534a16b5f04617e98ae2025de Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 2 Jan 2009 13:44:49 +0000 Subject: CRED: Wrap task credential accesses in the devpts filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 3f309f181de8..fff96e152c0c 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -594,9 +594,9 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) if (!inode) return -ENOMEM; - inode->i_ino = number+2; - inode->i_uid = config.setuid ? config.uid : current_fsuid(); - inode->i_gid = config.setgid ? config.gid : current_fsgid(); + inode->i_ino = number + 3; + inode->i_uid = opts->setuid ? opts->uid : current_fsuid(); + inode->i_gid = opts->setgid ? opts->gid : current_fsgid(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; init_special_inode(inode, S_IFCHR|opts->mode, device); inode->i_private = tty; -- cgit From fe30af971d896c144ef4708f97cf9d3186303c42 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 3 Jan 2009 07:16:13 +0000 Subject: remove the rudiment of a.out for sparc it's been used only in sunos compat Signed-off-by: Al Viro Signed-off-by: Linus Torvalds --- fs/binfmt_aout.c | 65 +------------------------------------------------------- 1 file changed, 1 insertion(+), 64 deletions(-) (limited to 'fs') diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index f1f3f4192a60..8a3b32f5b781 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -99,88 +99,53 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u # define START_DATA(u) (u.start_data) #elif defined(__arm__) # define START_DATA(u) ((u.u_tsize << PAGE_SHIFT) + u.start_code) -#elif defined(__sparc__) -# define START_DATA(u) (u.u_tsize) #elif defined(__i386__) || defined(__mc68000__) || defined(__arch_um__) # define START_DATA(u) (u.u_tsize << PAGE_SHIFT) #endif -#ifdef __sparc__ -# define START_STACK(u) ((regs->u_regs[UREG_FP]) & ~(PAGE_SIZE - 1)) -#else # define START_STACK(u) (u.start_stack) -#endif fs = get_fs(); set_fs(KERNEL_DS); has_dumped = 1; current->flags |= PF_DUMPCORE; strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm)); -#ifndef __sparc__ dump.u_ar0 = offsetof(struct user, regs); -#endif dump.signal = signr; aout_dump_thread(regs, &dump); /* If the size of the dump file exceeds the rlimit, then see what would happen if we wrote the stack, but not the data area. */ -#ifdef __sparc__ - if ((dump.u_dsize + dump.u_ssize) > limit) - dump.u_dsize = 0; -#else if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit) dump.u_dsize = 0; -#endif /* Make sure we have enough room to write the stack and data areas. */ -#ifdef __sparc__ - if (dump.u_ssize > limit) - dump.u_ssize = 0; -#else if ((dump.u_ssize + 1) * PAGE_SIZE > limit) dump.u_ssize = 0; -#endif /* make sure we actually have a data and stack area to dump */ set_fs(USER_DS); -#ifdef __sparc__ - if (!access_ok(VERIFY_READ, (void __user *)START_DATA(dump), dump.u_dsize)) - dump.u_dsize = 0; - if (!access_ok(VERIFY_READ, (void __user *)START_STACK(dump), dump.u_ssize)) - dump.u_ssize = 0; -#else if (!access_ok(VERIFY_READ, (void __user *)START_DATA(dump), dump.u_dsize << PAGE_SHIFT)) dump.u_dsize = 0; if (!access_ok(VERIFY_READ, (void __user *)START_STACK(dump), dump.u_ssize << PAGE_SHIFT)) dump.u_ssize = 0; -#endif set_fs(KERNEL_DS); /* struct user */ DUMP_WRITE(&dump,sizeof(dump)); /* Now dump all of the user data. Include malloced stuff as well */ -#ifndef __sparc__ DUMP_SEEK(PAGE_SIZE); -#endif /* now we start writing out the user space info */ set_fs(USER_DS); /* Dump the data area */ if (dump.u_dsize != 0) { dump_start = START_DATA(dump); -#ifdef __sparc__ - dump_size = dump.u_dsize; -#else dump_size = dump.u_dsize << PAGE_SHIFT; -#endif DUMP_WRITE(dump_start,dump_size); } /* Now prepare to dump the stack area */ if (dump.u_ssize != 0) { dump_start = START_STACK(dump); -#ifdef __sparc__ - dump_size = dump.u_ssize; -#else dump_size = dump.u_ssize << PAGE_SHIFT; -#endif DUMP_WRITE(dump_start,dump_size); } /* Finally dump the task struct. Not be used by gdb, but could be useful */ @@ -205,11 +170,6 @@ static unsigned long __user *create_aout_tables(char __user *p, struct linux_bin int envc = bprm->envc; sp = (void __user *)((-(unsigned long)sizeof(char *)) & (unsigned long) p); -#ifdef __sparc__ - /* This imposes the proper stack alignment for a new process. */ - sp = (void __user *) (((unsigned long) sp) & ~7); - if ((envc+argc+3)&1) --sp; -#endif #ifdef __alpha__ /* whee.. test-programs are so much fun. */ put_user(0, --sp); @@ -302,11 +262,6 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) /* OK, This is the point of no return */ #if defined(__alpha__) SET_AOUT_PERSONALITY(bprm, ex); -#elif defined(__sparc__) - set_personality(PER_SUNOS); -#if !defined(__sparc_v9__) - memcpy(¤t->thread.core_exec, &ex, sizeof(struct exec)); -#endif #else set_personality(PER_LINUX); #endif @@ -322,24 +277,6 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) install_exec_creds(bprm); current->flags &= ~PF_FORKNOEXEC; -#ifdef __sparc__ - if (N_MAGIC(ex) == NMAGIC) { - loff_t pos = fd_offset; - /* Fuck me plenty... */ - /* */ - down_write(¤t->mm->mmap_sem); - error = do_brk(N_TXTADDR(ex), ex.a_text); - up_write(¤t->mm->mmap_sem); - bprm->file->f_op->read(bprm->file, (char *) N_TXTADDR(ex), - ex.a_text, &pos); - down_write(¤t->mm->mmap_sem); - error = do_brk(N_DATADDR(ex), ex.a_data); - up_write(¤t->mm->mmap_sem); - bprm->file->f_op->read(bprm->file, (char *) N_DATADDR(ex), - ex.a_data, &pos); - goto beyond_if; - } -#endif if (N_MAGIC(ex) == OMAGIC) { unsigned long text_addr, map_size; @@ -347,7 +284,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) text_addr = N_TXTADDR(ex); -#if defined(__alpha__) || defined(__sparc__) +#ifdef __alpha__ pos = fd_offset; map_size = ex.a_text+ex.a_data + PAGE_SIZE - 1; #else -- cgit From 17580d7f2f632ff8c9786d609508c35c9f56e1f3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 3 Jan 2009 07:16:23 +0000 Subject: sanitize ifdefs in binfmt_aout They are actually alpha vs. i386/arm/m68k i.e. ecoff vs. aout. In the only place where we actually tried to handle arm and i386/m68k in different ways (START_DATA() in coredump handling), the arm variant works for all of them (i386 and m68k have u.start_code set to 0). Signed-off-by: Al Viro Signed-off-by: Linus Torvalds --- fs/binfmt_aout.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index 8a3b32f5b781..b639dcf7c778 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -95,12 +95,10 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u int has_dumped = 0; unsigned long dump_start, dump_size; struct user dump; -#if defined(__alpha__) +#ifdef __alpha__ # define START_DATA(u) (u.start_data) -#elif defined(__arm__) +#else # define START_DATA(u) ((u.u_tsize << PAGE_SHIFT) + u.start_code) -#elif defined(__i386__) || defined(__mc68000__) || defined(__arch_um__) -# define START_DATA(u) (u.u_tsize << PAGE_SHIFT) #endif # define START_STACK(u) (u.start_stack) @@ -176,18 +174,18 @@ static unsigned long __user *create_aout_tables(char __user *p, struct linux_bin put_user(0, --sp); if (bprm->loader) { put_user(0, --sp); - put_user(0x3eb, --sp); + put_user(1003, --sp); put_user(bprm->loader, --sp); - put_user(0x3ea, --sp); + put_user(1002, --sp); } put_user(bprm->exec, --sp); - put_user(0x3e9, --sp); + put_user(1001, --sp); #endif sp -= envc+1; envp = (char __user * __user *) sp; sp -= argc+1; argv = (char __user * __user *) sp; -#if defined(__i386__) || defined(__mc68000__) || defined(__arm__) || defined(__arch_um__) +#ifndef __alpha__ put_user((unsigned long) envp,--sp); put_user((unsigned long) argv,--sp); #endif @@ -260,7 +258,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) return retval; /* OK, This is the point of no return */ -#if defined(__alpha__) +#ifdef __alpha__ SET_AOUT_PERSONALITY(bprm, ex); #else set_personality(PER_LINUX); -- cgit From 3bfacef412b4bc993a8992217e50f1245f2fd3a6 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 3 Jan 2009 07:16:33 +0000 Subject: get rid of special-casing the /sbin/loader on alpha ... just make it a binfmt handler like #! one. Signed-off-by: Al Viro Signed-off-by: Linus Torvalds --- fs/exec.c | 39 --------------------------------------- 1 file changed, 39 deletions(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index dfbf7009fbe7..3ef9cf9b1871 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -57,11 +57,6 @@ #include #include "internal.h" -#ifdef __alpha__ -/* for /sbin/loader handling in search_binary_handler() */ -#include -#endif - int core_uses_pid; char core_pattern[CORENAME_MAX_SIZE] = "core"; int suid_dumpable = 0; @@ -1172,41 +1167,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) unsigned int depth = bprm->recursion_depth; int try,retval; struct linux_binfmt *fmt; -#ifdef __alpha__ - /* handle /sbin/loader.. */ - { - struct exec * eh = (struct exec *) bprm->buf; - if (!bprm->loader && eh->fh.f_magic == 0x183 && - (eh->fh.f_flags & 0x3000) == 0x3000) - { - struct file * file; - unsigned long loader; - - allow_write_access(bprm->file); - fput(bprm->file); - bprm->file = NULL; - - loader = bprm->vma->vm_end - sizeof(void *); - - file = open_exec("/sbin/loader"); - retval = PTR_ERR(file); - if (IS_ERR(file)) - return retval; - - /* Remember if the application is TASO. */ - bprm->taso = eh->ah.entry < 0x100000000UL; - - bprm->file = file; - bprm->loader = loader; - retval = prepare_binprm(bprm); - if (retval<0) - return retval; - /* should call search_binary_handler recursively here, - but it does not matter */ - } - } -#endif retval = security_bprm_check(bprm); if (retval) return retval; -- cgit From 157cf649a735a2f7e8dba0ed08e6e38b6c30d886 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 14 Dec 2008 04:57:47 -0500 Subject: sanitize audit_fd_pair() * no allocations * return void Signed-off-by: Al Viro --- fs/pipe.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/pipe.c b/fs/pipe.c index aaf797bd57b9..891697112f66 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -1016,10 +1016,7 @@ int do_pipe_flags(int *fd, int flags) goto err_fdr; fdw = error; - error = audit_fd_pair(fdr, fdw); - if (error < 0) - goto err_fdw; - + audit_fd_pair(fdr, fdw); fd_install(fdr, fr); fd_install(fdw, fw); fd[0] = fdr; @@ -1027,8 +1024,6 @@ int do_pipe_flags(int *fd, int flags) return 0; - err_fdw: - put_unused_fd(fdw); err_fdr: put_unused_fd(fdr); err_read_pipe: -- cgit From c644f0e4b56f9a2fc066cd0d75a18074d130e4a3 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sun, 4 Jan 2009 12:00:48 -0800 Subject: fs: introduce bgl_lock_ptr() As suggested by Andreas Dilger, introduce a bgl_lock_ptr() helper in and add separate sb_bgl_lock() helpers to filesystem specific header files to break the hidden dependency to struct ext[234]_sb_info. Also, while at it, convert the macros to static inlines to try make up for all the times I broke Andrew Morton's tree. Acked-by: Andreas Dilger Signed-off-by: Pekka Enberg Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ext4/ext4_sb.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/ext4/ext4_sb.h b/fs/ext4/ext4_sb.h index 445fde603df8..b21f16713db0 100644 --- a/fs/ext4/ext4_sb.h +++ b/fs/ext4/ext4_sb.h @@ -146,4 +146,10 @@ struct ext4_sb_info { struct flex_groups *s_flex_groups; }; +static inline spinlock_t * +sb_bgl_lock(struct ext4_sb_info *sbi, unsigned int block_group) +{ + return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group); +} + #endif /* _EXT4_SB */ -- cgit From 54566b2c1594c2326a645a3551f9d989f7ba3c5e Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sun, 4 Jan 2009 12:00:53 -0800 Subject: fs: symlink write_begin allocation context fix With the write_begin/write_end aops, page_symlink was broken because it could no longer pass a GFP_NOFS type mask into the point where the allocations happened. They are done in write_begin, which would always assume that the filesystem can be entered from reclaim. This bug could cause filesystem deadlocks. The funny thing with having a gfp_t mask there is that it doesn't really allow the caller to arbitrarily tinker with the context in which it can be called. It couldn't ever be GFP_ATOMIC, for example, because it needs to take the page lock. The only thing any callers care about is __GFP_FS anyway, so turn that into a single flag. Add a new flag for write_begin, AOP_FLAG_NOFS. Filesystems can now act on this flag in their write_begin function. Change __grab_cache_page to accept a nofs argument as well, to honour that flag (while we're there, change the name to grab_cache_page_write_begin which is more instructive and does away with random leading underscores). This is really a more flexible way to go in the end anyway -- if a filesystem happens to want any extra allocations aside from the pagecache ones in ints write_begin function, it may now use GFP_KERNEL (rather than GFP_NOFS) for common case allocations (eg. ocfs2_alloc_write_ctxt, for a random example). [kosaki.motohiro@jp.fujitsu.com: fix ubifs] [kosaki.motohiro@jp.fujitsu.com: fix fuse] Signed-off-by: Nick Piggin Reviewed-by: KOSAKI Motohiro Cc: [2.6.28.x] Signed-off-by: KOSAKI Motohiro Signed-off-by: Andrew Morton [ Cleaned up the calling convention: just pass in the AOP flags untouched to the grab_cache_page_write_begin() function. That just simplifies everybody, and may even allow future expansion of the logic. - Linus ] Signed-off-by: Linus Torvalds --- fs/affs/file.c | 2 +- fs/afs/write.c | 2 +- fs/buffer.c | 4 ++-- fs/cifs/file.c | 2 +- fs/ecryptfs/mmap.c | 2 +- fs/ext3/inode.c | 2 +- fs/ext3/namei.c | 3 +-- fs/ext4/inode.c | 4 ++-- fs/ext4/namei.c | 3 +-- fs/fuse/file.c | 4 ++-- fs/gfs2/ops_address.c | 2 +- fs/hostfs/hostfs_kern.c | 2 +- fs/jffs2/file.c | 2 +- fs/libfs.c | 2 +- fs/namei.c | 13 +++++++++---- fs/nfs/file.c | 2 +- fs/reiserfs/inode.c | 2 +- fs/smbfs/file.c | 2 +- fs/ubifs/file.c | 9 +++++---- 19 files changed, 34 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/affs/file.c b/fs/affs/file.c index 1377b1240b6e..9246cb4aa018 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -628,7 +628,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping } index = pos >> PAGE_CACHE_SHIFT; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/afs/write.c b/fs/afs/write.c index d6b85dab35fc..3fb36d433621 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -144,7 +144,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, candidate->state = AFS_WBACK_PENDING; init_waitqueue_head(&candidate->waitq); - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { kfree(candidate); return -ENOMEM; diff --git a/fs/buffer.c b/fs/buffer.c index 776ae091d3b0..a13f09b696f7 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1996,7 +1996,7 @@ int block_write_begin(struct file *file, struct address_space *mapping, page = *pagep; if (page == NULL) { ownpage = 1; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { status = -ENOMEM; goto out; @@ -2502,7 +2502,7 @@ int nobh_write_begin(struct file *file, struct address_space *mapping, from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index b1e1fc6a6e6a..12bb656fbe75 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2074,7 +2074,7 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping, cFYI(1, ("write_begin from %lld len %d", (long long)pos, len)); - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { rc = -ENOMEM; goto out; diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 04d7b3fa1ac6..46cec2b69796 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -288,7 +288,7 @@ static int ecryptfs_write_begin(struct file *file, loff_t prev_page_end_size; int rc = 0; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index c4bdccf976b5..5fa453b49a64 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -1161,7 +1161,7 @@ static int ext3_write_begin(struct file *file, struct address_space *mapping, to = from + len; retry: - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 297ea8dfac7c..1dd2abe6313e 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -2175,8 +2175,7 @@ retry: * We have a transaction open. All is sweetness. It also sets * i_size in generic_commit_write(). */ - err = __page_symlink(inode, symname, l, - mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); + err = __page_symlink(inode, symname, l, 1); if (err) { drop_nlink(inode); unlock_new_inode(inode); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7c3325e0b005..6702a49992a6 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1346,7 +1346,7 @@ retry: goto out; } - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { ext4_journal_stop(handle); ret = -ENOMEM; @@ -2550,7 +2550,7 @@ retry: goto out; } - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { ext4_journal_stop(handle); ret = -ENOMEM; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index da98a9012fa5..9fd2a5e1be4d 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2212,8 +2212,7 @@ retry: * We have a transaction open. All is sweetness. It also sets * i_size in generic_commit_write(). */ - err = __page_symlink(inode, symname, l, - mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); + err = __page_symlink(inode, symname, l, 1); if (err) { clear_nlink(inode); unlock_new_inode(inode); diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 34930a964b82..4c9ee7011265 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -646,7 +646,7 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping, { pgoff_t index = pos >> PAGE_CACHE_SHIFT; - *pagep = __grab_cache_page(mapping, index); + *pagep = grab_cache_page_write_begin(mapping, index, flags); if (!*pagep) return -ENOMEM; return 0; @@ -779,7 +779,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, break; err = -ENOMEM; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, 0); if (!page) break; diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c index 27563816e1c5..15f710f2d4da 100644 --- a/fs/gfs2/ops_address.c +++ b/fs/gfs2/ops_address.c @@ -675,7 +675,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, goto out_trans_fail; error = -ENOMEM; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); *pagep = page; if (unlikely(!page)) goto out_endtrans; diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 3a31451ac170..5c538e0ec14b 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -501,7 +501,7 @@ int hostfs_write_begin(struct file *file, struct address_space *mapping, { pgoff_t index = pos >> PAGE_CACHE_SHIFT; - *pagep = __grab_cache_page(mapping, index); + *pagep = grab_cache_page_write_begin(mapping, index, flags); if (!*pagep) return -ENOMEM; return 0; diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 5a98aa87c853..5edc2bf20581 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -132,7 +132,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, uint32_t pageofs = index << PAGE_CACHE_SHIFT; int ret = 0; - pg = __grab_cache_page(mapping, index); + pg = grab_cache_page_write_begin(mapping, index, flags); if (!pg) return -ENOMEM; *pagep = pg; diff --git a/fs/libfs.c b/fs/libfs.c index e960a8321902..bdaec17fa388 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -360,7 +360,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping, index = pos >> PAGE_CACHE_SHIFT; from = pos & (PAGE_CACHE_SIZE - 1); - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; diff --git a/fs/namei.c b/fs/namei.c index dd5c9f0bf829..df2d3df4f049 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2817,18 +2817,23 @@ void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) } } -int __page_symlink(struct inode *inode, const char *symname, int len, - gfp_t gfp_mask) +/* + * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS + */ +int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; int err; char *kaddr; + unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE; + if (nofs) + flags |= AOP_FLAG_NOFS; retry: err = pagecache_write_begin(NULL, mapping, 0, len-1, - AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); + flags, &page, &fsdata); if (err) goto fail; @@ -2852,7 +2857,7 @@ fail: int page_symlink(struct inode *inode, const char *symname, int len) { return __page_symlink(inode, symname, len, - mapping_gfp_mask(inode->i_mapping)); + !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS)); } const struct inode_operations page_symlink_inode_operations = { diff --git a/fs/nfs/file.c b/fs/nfs/file.c index d319b49f8f06..90f292b520d2 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -354,7 +354,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping, file->f_path.dentry->d_name.name, mapping->host->i_ino, len, (long long) pos); - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 145c2d3e5e01..ed04f47007f8 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -2561,7 +2561,7 @@ static int reiserfs_write_begin(struct file *file, } index = pos >> PAGE_CACHE_SHIFT; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/smbfs/file.c b/fs/smbfs/file.c index e4f8d51a5553..92d5e8ffb639 100644 --- a/fs/smbfs/file.c +++ b/fs/smbfs/file.c @@ -297,7 +297,7 @@ static int smb_write_begin(struct file *file, struct address_space *mapping, struct page **pagep, void **fsdata) { pgoff_t index = pos >> PAGE_CACHE_SHIFT; - *pagep = __grab_cache_page(mapping, index); + *pagep = grab_cache_page_write_begin(mapping, index, flags); if (!*pagep) return -ENOMEM; return 0; diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index fe82d2464d46..bf37374567fa 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -219,7 +219,8 @@ static void release_existing_page_budget(struct ubifs_info *c) } static int write_begin_slow(struct address_space *mapping, - loff_t pos, unsigned len, struct page **pagep) + loff_t pos, unsigned len, struct page **pagep, + unsigned flags) { struct inode *inode = mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; @@ -247,7 +248,7 @@ static int write_begin_slow(struct address_space *mapping, if (unlikely(err)) return err; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (unlikely(!page)) { ubifs_release_budget(c, &req); return -ENOMEM; @@ -438,7 +439,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, return -EROFS; /* Try out the fast-path part first */ - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (unlikely(!page)) return -ENOMEM; @@ -483,7 +484,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, unlock_page(page); page_cache_release(page); - return write_begin_slow(mapping, pos, len, pagep); + return write_begin_slow(mapping, pos, len, pagep, flags); } /* -- cgit From e9079cce201784632aed4b1a3121ee38c1ced0b6 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Tue, 14 Oct 2008 14:43:29 +0100 Subject: GFS2: Support for FIEMAP ioctl This patch implements the FIEMAP ioctl for GFS2. We can use the generic code (aside from a lock order issue, solved as per Ted Tso's suggestion) for which I've introduced a new variant of the generic function. We also have one exception to deal with, namely stuffed files, so we do that "by hand", setting all the required flags. This has been tested with a modified (I could only find an old version) of Eric's test program, and appears to work correctly. This patch does not currently support FIEMAP of xattrs, but the plan is to add that feature at some future point. Signed-off-by: Steven Whitehouse Cc: Theodore Tso Cc: Eric Sandeen --- fs/gfs2/ops_inode.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ fs/ioctl.c | 44 ++++++++++++++++++++++++++++++++++---------- 2 files changed, 80 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index d232991b9046..1e24b65e1d23 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "gfs2.h" @@ -1212,6 +1213,48 @@ static int gfs2_removexattr(struct dentry *dentry, const char *name) return gfs2_ea_remove(GFS2_I(dentry->d_inode), &er); } +static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + u64 start, u64 len) +{ + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_holder gh; + int ret; + + ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); + if (ret) + return ret; + + mutex_lock(&inode->i_mutex); + + ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); + if (ret) + goto out; + + if (gfs2_is_stuffed(ip)) { + u64 phys = ip->i_no_addr << inode->i_blkbits; + u64 size = i_size_read(inode); + u32 flags = FIEMAP_EXTENT_LAST|FIEMAP_EXTENT_NOT_ALIGNED| + FIEMAP_EXTENT_DATA_INLINE; + phys += sizeof(struct gfs2_dinode); + phys += start; + if (start + len > size) + len = size - start; + if (start < size) + ret = fiemap_fill_next_extent(fieinfo, start, phys, + len, flags); + if (ret == 1) + ret = 0; + } else { + ret = __generic_block_fiemap(inode, fieinfo, start, len, + gfs2_block_map); + } + + gfs2_glock_dq_uninit(&gh); +out: + mutex_unlock(&inode->i_mutex); + return ret; +} + const struct inode_operations gfs2_file_iops = { .permission = gfs2_permission, .setattr = gfs2_setattr, @@ -1220,6 +1263,7 @@ const struct inode_operations gfs2_file_iops = { .getxattr = gfs2_getxattr, .listxattr = gfs2_listxattr, .removexattr = gfs2_removexattr, + .fiemap = gfs2_fiemap, }; const struct inode_operations gfs2_dir_iops = { @@ -1239,6 +1283,7 @@ const struct inode_operations gfs2_dir_iops = { .getxattr = gfs2_getxattr, .listxattr = gfs2_listxattr, .removexattr = gfs2_removexattr, + .fiemap = gfs2_fiemap, }; const struct inode_operations gfs2_symlink_iops = { @@ -1251,5 +1296,6 @@ const struct inode_operations gfs2_symlink_iops = { .getxattr = gfs2_getxattr, .listxattr = gfs2_listxattr, .removexattr = gfs2_removexattr, + .fiemap = gfs2_fiemap, }; diff --git a/fs/ioctl.c b/fs/ioctl.c index 43e8b2c0664b..cc3f1aa1cf7b 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -231,7 +231,8 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg) #define blk_to_logical(inode, blk) (blk << (inode)->i_blkbits) #define logical_to_blk(inode, offset) (offset >> (inode)->i_blkbits); -/* +/** + * __generic_block_fiemap - FIEMAP for block based inodes (no locking) * @inode - the inode to map * @arg - the pointer to userspace where we copy everything to * @get_block - the fs's get_block function @@ -242,11 +243,15 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg) * * If it is possible to have data blocks beyond a hole past @inode->i_size, then * please do not use this function, it will stop at the first unmapped block - * beyond i_size + * beyond i_size. + * + * If you use this function directly, you need to do your own locking. Use + * generic_block_fiemap if you want the locking done for you. */ -int generic_block_fiemap(struct inode *inode, - struct fiemap_extent_info *fieinfo, u64 start, - u64 len, get_block_t *get_block) + +int __generic_block_fiemap(struct inode *inode, + struct fiemap_extent_info *fieinfo, u64 start, + u64 len, get_block_t *get_block) { struct buffer_head tmp; unsigned int start_blk; @@ -260,9 +265,6 @@ int generic_block_fiemap(struct inode *inode, start_blk = logical_to_blk(inode, start); - /* guard against change */ - mutex_lock(&inode->i_mutex); - length = (long long)min_t(u64, len, i_size_read(inode)); map_len = length; @@ -334,14 +336,36 @@ int generic_block_fiemap(struct inode *inode, cond_resched(); } while (1); - mutex_unlock(&inode->i_mutex); - /* if ret is 1 then we just hit the end of the extent array */ if (ret == 1) ret = 0; return ret; } +EXPORT_SYMBOL(__generic_block_fiemap); + +/** + * generic_block_fiemap - FIEMAP for block based inodes + * @inode: The inode to map + * @fieinfo: The mapping information + * @start: The initial block to map + * @len: The length of the extect to attempt to map + * @get_block: The block mapping function for the fs + * + * Calls __generic_block_fiemap to map the inode, after taking + * the inode's mutex lock. + */ + +int generic_block_fiemap(struct inode *inode, + struct fiemap_extent_info *fieinfo, u64 start, + u64 len, get_block_t *get_block) +{ + int ret; + mutex_lock(&inode->i_mutex); + ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block); + mutex_unlock(&inode->i_mutex); + return ret; +} EXPORT_SYMBOL(generic_block_fiemap); #endif /* CONFIG_BLOCK */ -- cgit From b276058371f5c2ad92f9f27373a72b219ed580ed Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Tue, 14 Oct 2008 16:05:55 +0100 Subject: GFS2: Rationalise header files Move the contents of some headers which contained very little into more sensible places, and remove the original header files. This should make it easier to find things. Signed-off-by: Steven Whitehouse --- fs/gfs2/inode.c | 1 - fs/gfs2/inode.h | 11 +++++++++++ fs/gfs2/main.c | 2 +- fs/gfs2/ops_dentry.c | 2 +- fs/gfs2/ops_dentry.h | 17 ----------------- fs/gfs2/ops_export.c | 3 +-- fs/gfs2/ops_file.c | 1 - fs/gfs2/ops_fstype.c | 3 --- fs/gfs2/ops_fstype.h | 19 ------------------- fs/gfs2/ops_inode.c | 3 +-- fs/gfs2/ops_inode.h | 25 ------------------------- fs/gfs2/ops_super.c | 1 - fs/gfs2/ops_super.h | 17 ----------------- fs/gfs2/super.h | 8 ++++++++ 14 files changed, 23 insertions(+), 90 deletions(-) delete mode 100644 fs/gfs2/ops_dentry.h delete mode 100644 fs/gfs2/ops_fstype.h delete mode 100644 fs/gfs2/ops_inode.h delete mode 100644 fs/gfs2/ops_super.h (limited to 'fs') diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index d57616840e89..bf4676d7acd5 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -32,7 +32,6 @@ #include "log.h" #include "meta_io.h" #include "ops_address.h" -#include "ops_inode.h" #include "quota.h" #include "rgrp.h" #include "trans.h" diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index 2d43f69610a0..c3577906f0ad 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h @@ -10,6 +10,7 @@ #ifndef __INODE_DOT_H__ #define __INODE_DOT_H__ +#include #include "util.h" static inline int gfs2_is_stuffed(const struct gfs2_inode *ip) @@ -97,5 +98,15 @@ struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf); void gfs2_dinode_print(const struct gfs2_inode *ip); +extern const struct inode_operations gfs2_file_iops; +extern const struct inode_operations gfs2_dir_iops; +extern const struct inode_operations gfs2_symlink_iops; +extern const struct file_operations gfs2_file_fops; +extern const struct file_operations gfs2_dir_fops; +extern const struct file_operations gfs2_file_fops_nolock; +extern const struct file_operations gfs2_dir_fops_nolock; + +extern void gfs2_set_inode_flags(struct inode *inode); + #endif /* __INODE_DOT_H__ */ diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index bb2cc303ac29..3eea03c78534 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -19,7 +19,7 @@ #include "gfs2.h" #include "incore.h" -#include "ops_fstype.h" +#include "super.h" #include "sys.h" #include "util.h" #include "glock.h" diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c index 4a5e676b4420..c2ad36330ca3 100644 --- a/fs/gfs2/ops_dentry.c +++ b/fs/gfs2/ops_dentry.c @@ -19,7 +19,7 @@ #include "incore.h" #include "dir.h" #include "glock.h" -#include "ops_dentry.h" +#include "super.h" #include "util.h" #include "inode.h" diff --git a/fs/gfs2/ops_dentry.h b/fs/gfs2/ops_dentry.h deleted file mode 100644 index 5caa3db4d3f5..000000000000 --- a/fs/gfs2/ops_dentry.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. - * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. - * - * This copyrighted material is made available to anyone wishing to use, - * modify, copy, or redistribute it subject to the terms and conditions - * of the GNU General Public License version 2. - */ - -#ifndef __OPS_DENTRY_DOT_H__ -#define __OPS_DENTRY_DOT_H__ - -#include - -extern struct dentry_operations gfs2_dops; - -#endif /* __OPS_DENTRY_DOT_H__ */ diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c index bbb8c36403a9..3a9b9b438344 100644 --- a/fs/gfs2/ops_export.c +++ b/fs/gfs2/ops_export.c @@ -22,8 +22,7 @@ #include "glock.h" #include "glops.h" #include "inode.h" -#include "ops_dentry.h" -#include "ops_fstype.h" +#include "super.h" #include "rgrp.h" #include "util.h" diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c index 3a747f8e2188..fcfaaefc92fb 100644 --- a/fs/gfs2/ops_file.c +++ b/fs/gfs2/ops_file.c @@ -39,7 +39,6 @@ #include "util.h" #include "eaops.h" #include "ops_address.h" -#include "ops_inode.h" /** * gfs2_llseek - seek to a location in a file diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index b117fcf2c4f5..ca463a450eb9 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -27,9 +27,6 @@ #include "glops.h" #include "inode.h" #include "mount.h" -#include "ops_fstype.h" -#include "ops_dentry.h" -#include "ops_super.h" #include "recovery.h" #include "rgrp.h" #include "super.h" diff --git a/fs/gfs2/ops_fstype.h b/fs/gfs2/ops_fstype.h deleted file mode 100644 index da8490511836..000000000000 --- a/fs/gfs2/ops_fstype.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. - * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. - * - * This copyrighted material is made available to anyone wishing to use, - * modify, copy, or redistribute it subject to the terms and conditions - * of the GNU General Public License version 2. - */ - -#ifndef __OPS_FSTYPE_DOT_H__ -#define __OPS_FSTYPE_DOT_H__ - -#include - -extern struct file_system_type gfs2_fs_type; -extern struct file_system_type gfs2meta_fs_type; -extern const struct export_operations gfs2_export_ops; - -#endif /* __OPS_FSTYPE_DOT_H__ */ diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index 1e24b65e1d23..98440fef01cf 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -32,12 +32,11 @@ #include "glock.h" #include "inode.h" #include "meta_io.h" -#include "ops_dentry.h" -#include "ops_inode.h" #include "quota.h" #include "rgrp.h" #include "trans.h" #include "util.h" +#include "super.h" /** * gfs2_create - Create a file diff --git a/fs/gfs2/ops_inode.h b/fs/gfs2/ops_inode.h deleted file mode 100644 index 14b4b797622a..000000000000 --- a/fs/gfs2/ops_inode.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. - * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. - * - * This copyrighted material is made available to anyone wishing to use, - * modify, copy, or redistribute it subject to the terms and conditions - * of the GNU General Public License version 2. - */ - -#ifndef __OPS_INODE_DOT_H__ -#define __OPS_INODE_DOT_H__ - -#include - -extern const struct inode_operations gfs2_file_iops; -extern const struct inode_operations gfs2_dir_iops; -extern const struct inode_operations gfs2_symlink_iops; -extern const struct file_operations gfs2_file_fops; -extern const struct file_operations gfs2_dir_fops; -extern const struct file_operations gfs2_file_fops_nolock; -extern const struct file_operations gfs2_dir_fops_nolock; - -extern void gfs2_set_inode_flags(struct inode *inode); - -#endif /* __OPS_INODE_DOT_H__ */ diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c index d5355d9b5926..9c7678db08fb 100644 --- a/fs/gfs2/ops_super.c +++ b/fs/gfs2/ops_super.c @@ -28,7 +28,6 @@ #include "inode.h" #include "log.h" #include "mount.h" -#include "ops_super.h" #include "quota.h" #include "recovery.h" #include "rgrp.h" diff --git a/fs/gfs2/ops_super.h b/fs/gfs2/ops_super.h deleted file mode 100644 index 442a274c6272..000000000000 --- a/fs/gfs2/ops_super.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. - * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. - * - * This copyrighted material is made available to anyone wishing to use, - * modify, copy, or redistribute it subject to the terms and conditions - * of the GNU General Public License version 2. - */ - -#ifndef __OPS_SUPER_DOT_H__ -#define __OPS_SUPER_DOT_H__ - -#include - -extern const struct super_operations gfs2_super_ops; - -#endif /* __OPS_SUPER_DOT_H__ */ diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h index 50a4c9b1215e..1848dad3ecba 100644 --- a/fs/gfs2/super.h +++ b/fs/gfs2/super.h @@ -10,6 +10,8 @@ #ifndef __SUPER_DOT_H__ #define __SUPER_DOT_H__ +#include +#include #include "incore.h" void gfs2_lm_unmount(struct gfs2_sbd *sdp); @@ -46,5 +48,11 @@ int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc); int gfs2_freeze_fs(struct gfs2_sbd *sdp); void gfs2_unfreeze_fs(struct gfs2_sbd *sdp); +extern struct file_system_type gfs2_fs_type; +extern struct file_system_type gfs2meta_fs_type; +extern const struct export_operations gfs2_export_ops; +extern const struct super_operations gfs2_super_ops; +extern struct dentry_operations gfs2_dops; + #endif /* __SUPER_DOT_H__ */ -- cgit From 1bb7322fd0d5abdce396de51cbc5dbc489523018 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 15 Oct 2008 09:46:39 +0100 Subject: GFS2: Fix up jdata writepage/delete_inode There is a bug in writepage and delete_inode which allows jdata files to invalidate pages from the address space without being in a transaction at the time. This causes problems in case the pages are in the journal. This patch fixes that case and prevents the resulting oops. Signed-off-by: Steven Whitehouse --- fs/gfs2/ops_address.c | 16 +++++++--------- fs/gfs2/ops_super.c | 7 ++++--- 2 files changed, 11 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c index 15f710f2d4da..574b222feefc 100644 --- a/fs/gfs2/ops_address.c +++ b/fs/gfs2/ops_address.c @@ -210,25 +210,23 @@ static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc { struct inode *inode = page->mapping->host; struct gfs2_sbd *sdp = GFS2_SB(inode); - int error; + int ret; int done_trans = 0; - error = gfs2_writepage_common(page, wbc); - if (error <= 0) - return error; - if (PageChecked(page)) { if (wbc->sync_mode != WB_SYNC_ALL) goto out_ignore; - error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0); - if (error) + ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0); + if (ret) goto out_ignore; done_trans = 1; } - error = __gfs2_jdata_writepage(page, wbc); + ret = gfs2_writepage_common(page, wbc); + if (ret > 0) + ret = __gfs2_jdata_writepage(page, wbc); if (done_trans) gfs2_trans_end(sdp); - return error; + return ret; out_ignore: redirty_page_for_writepage(wbc, page); diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c index 9c7678db08fb..2cb744ba3b77 100644 --- a/fs/gfs2/ops_super.c +++ b/fs/gfs2/ops_super.c @@ -493,7 +493,7 @@ static void gfs2_delete_inode(struct inode *inode) gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh); error = gfs2_glock_nq(&ip->i_iopen_gh); if (error) - goto out_uninit; + goto out_truncate; if (S_ISDIR(inode->i_mode) && (ip->i_di.di_flags & GFS2_DIF_EXHASH)) { @@ -518,6 +518,7 @@ static void gfs2_delete_inode(struct inode *inode) if (error) goto out_unlock; +out_truncate: error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); if (error) goto out_unlock; @@ -526,8 +527,8 @@ static void gfs2_delete_inode(struct inode *inode) gfs2_trans_end(sdp); out_unlock: - gfs2_glock_dq(&ip->i_iopen_gh); -out_uninit: + if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) + gfs2_glock_dq(&ip->i_iopen_gh); gfs2_holder_uninit(&ip->i_iopen_gh); gfs2_glock_dq_uninit(&gh); if (error && error != GLR_TRYFAILED) -- cgit From 55ba474daed9763b2f6fe26ad762ee373554d65e Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 24 Oct 2008 11:31:12 -0700 Subject: GFS2: sparse annotation of gl->gl_spin fs/gfs2/glock.c:308:5: warning: context problem in 'do_promote': '_spin_unlock' expected different context fs/gfs2/glock.c:308:5: context '*gl+28': wanted >= 1, got 0 fs/gfs2/glock.c:529:2: warning: context problem in 'do_xmote': '_spin_unlock' expected different context fs/gfs2/glock.c:529:2: context '*gl+28': wanted >= 1, got 0 fs/gfs2/glock.c:925:3: warning: context problem in 'add_to_queue': '_spin_unlock' expected different context fs/gfs2/glock.c:925:3: context '*gl+28': wanted >= 1, got 0 Signed-off-by: Harvey Harrison Signed-off-by: Steven Whitehouse --- fs/gfs2/glock.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'fs') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index c962283d4e7f..27cb9cca9c08 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -293,6 +293,8 @@ static void gfs2_holder_wake(struct gfs2_holder *gh) */ static int do_promote(struct gfs2_glock *gl) +__releases(&gl->gl_spin) +__acquires(&gl->gl_spin) { const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_holder *gh, *tmp; @@ -511,6 +513,8 @@ static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, */ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) +__releases(&gl->gl_spin) +__acquires(&gl->gl_spin) { const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_sbd *sdp = gl->gl_sbd; @@ -576,6 +580,8 @@ static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) */ static void run_queue(struct gfs2_glock *gl, const int nonblock) +__releases(&gl->gl_spin) +__acquires(&gl->gl_spin) { struct gfs2_holder *gh = NULL; @@ -877,6 +883,8 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) */ static inline void add_to_queue(struct gfs2_holder *gh) +__releases(&gl->gl_spin) +__acquires(&gl->gl_spin) { struct gfs2_glock *gl = gh->gh_gl; struct gfs2_sbd *sdp = gl->gl_sbd; -- cgit From bcf0b5b348a1f49c2c878ffdb78e68c930baabb8 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Mon, 3 Nov 2008 13:39:46 +0000 Subject: GFS2: Move generation number into "proper" part of inode This moves the generation number from the gfs2_dinode_host into the gfs2_inode structure. Eventually the plan is to get rid of the gfs2_dinode_host structure completely. Signed-off-by: Steven Whitehouse --- fs/gfs2/incore.h | 2 +- fs/gfs2/inode.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index f566ec1b4e8e..4ff1d7ecd98a 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -235,7 +235,6 @@ enum { struct gfs2_dinode_host { u64 di_size; /* number of bytes in file */ - u64 di_generation; /* generation number for NFS */ u32 di_flags; /* GFS2_DIF_... */ /* These only apply to directories */ u32 di_entries; /* The number of entries in the directory */ @@ -246,6 +245,7 @@ struct gfs2_inode { struct inode i_inode; u64 i_no_addr; u64 i_no_formal_ino; + u64 i_generation; unsigned long i_flags; /* GIF_... */ struct gfs2_dinode_host i_di; /* To be replaced by ref to block */ diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index bf4676d7acd5..9d97f699c81a 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -286,7 +286,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); ip->i_goal = be64_to_cpu(str->di_goal_meta); - di->di_generation = be64_to_cpu(str->di_generation); + ip->i_generation = be64_to_cpu(str->di_generation); di->di_flags = be32_to_cpu(str->di_flags); gfs2_set_inode_flags(&ip->i_inode); @@ -1263,7 +1263,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) str->di_goal_meta = cpu_to_be64(ip->i_goal); str->di_goal_data = cpu_to_be64(ip->i_goal); - str->di_generation = cpu_to_be64(di->di_generation); + str->di_generation = cpu_to_be64(ip->i_generation); str->di_flags = cpu_to_be32(di->di_flags); str->di_height = cpu_to_be16(ip->i_height); -- cgit From ad6203f2b46c2217f74b2e88299640eef5889e72 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Mon, 3 Nov 2008 13:59:19 +0000 Subject: GFS2: Move "entries" into "proper" inode This moves the directory entry count into the proper inode. Potentially we could get this to share the space used by something else in the future, but this is one more step on the way to removing the gfs2_dinode_host structure. Signed-off-by: Steven Whitehouse --- fs/gfs2/dir.c | 20 ++++++++++---------- fs/gfs2/incore.h | 3 +-- fs/gfs2/inode.c | 10 +++++----- fs/gfs2/ops_inode.c | 14 +++++++------- 4 files changed, 23 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index eed040d8ba3a..830cf48184e3 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c @@ -858,8 +858,8 @@ static int dir_make_exhash(struct inode *inode) return -ENOSPC; bn = bh->b_blocknr; - gfs2_assert(sdp, dip->i_di.di_entries < (1 << 16)); - leaf->lf_entries = cpu_to_be16(dip->i_di.di_entries); + gfs2_assert(sdp, dip->i_entries < (1 << 16)); + leaf->lf_entries = cpu_to_be16(dip->i_entries); /* Copy dirents */ @@ -1426,7 +1426,7 @@ int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, int copied = 0; int error; - if (!dip->i_di.di_entries) + if (!dip->i_entries) return 0; if (dip->i_di.di_flags & GFS2_DIF_EXHASH) @@ -1453,17 +1453,17 @@ int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, error = PTR_ERR(dent); goto out; } - if (dip->i_di.di_entries != g.offset) { + if (dip->i_entries != g.offset) { fs_warn(sdp, "Number of entries corrupt in dir %llu, " - "ip->i_di.di_entries (%u) != g.offset (%u)\n", + "ip->i_entries (%u) != g.offset (%u)\n", (unsigned long long)dip->i_no_addr, - dip->i_di.di_entries, + dip->i_entries, g.offset); error = -EIO; goto out; } error = do_filldir_main(dip, offset, opaque, filldir, darr, - dip->i_di.di_entries, &copied); + dip->i_entries, &copied); out: kfree(darr); } @@ -1621,7 +1621,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name, if (error) break; gfs2_trans_add_bh(ip->i_gl, bh, 1); - ip->i_di.di_entries++; + ip->i_entries++; ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; gfs2_dinode_out(ip, bh->b_data); brelse(bh); @@ -1704,10 +1704,10 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name) if (error) return error; - if (!dip->i_di.di_entries) + if (!dip->i_entries) gfs2_consist_inode(dip); gfs2_trans_add_bh(dip->i_gl, bh, 1); - dip->i_di.di_entries--; + dip->i_entries--; dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME; gfs2_dinode_out(dip, bh->b_data); brelse(bh); diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 4ff1d7ecd98a..15ca3a75cf12 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -236,8 +236,6 @@ enum { struct gfs2_dinode_host { u64 di_size; /* number of bytes in file */ u32 di_flags; /* GFS2_DIF_... */ - /* These only apply to directories */ - u32 di_entries; /* The number of entries in the directory */ u64 di_eattr; /* extended attribute block number */ }; @@ -256,6 +254,7 @@ struct gfs2_inode { struct gfs2_alloc *i_alloc; u64 i_goal; /* goal block for allocations */ struct rw_semaphore i_rw_mutex; + u32 i_entries; u8 i_height; u8 i_depth; }; diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 9d97f699c81a..015d4c007086 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -299,7 +299,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) goto corrupt; ip->i_depth = (u8)depth; - di->di_entries = be32_to_cpu(str->di_entries); + ip->i_entries = be32_to_cpu(str->di_entries); di->di_eattr = be64_to_cpu(str->di_eattr); if (S_ISREG(ip->i_inode.i_mode)) @@ -689,7 +689,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name, return error; } - if (dip->i_di.di_entries == (u32)-1) + if (dip->i_entries == (u32)-1) return -EFBIG; if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1) return -EMLINK; @@ -1067,7 +1067,7 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name, struct qstr dotname; int error; - if (ip->i_di.di_entries != 2) { + if (ip->i_entries != 2) { if (gfs2_consist_inode(ip)) gfs2_dinode_print(ip); return -EIO; @@ -1271,7 +1271,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ? GFS2_FORMAT_DE : 0); str->di_depth = cpu_to_be16(ip->i_depth); - str->di_entries = cpu_to_be32(di->di_entries); + str->di_entries = cpu_to_be32(ip->i_entries); str->di_eattr = cpu_to_be64(di->di_eattr); str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec); @@ -1295,7 +1295,7 @@ void gfs2_dinode_print(const struct gfs2_inode *ip) printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags); printk(KERN_INFO " i_height = %u\n", ip->i_height); printk(KERN_INFO " i_depth = %u\n", ip->i_depth); - printk(KERN_INFO " di_entries = %u\n", di->di_entries); + printk(KERN_INFO " i_entries = %u\n", ip->i_entries); printk(KERN_INFO " di_eattr = %llu\n", (unsigned long long)di->di_eattr); } diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index 98440fef01cf..48468f48d7bf 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -185,7 +185,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, if (!dip->i_inode.i_nlink) goto out_gunlock; error = -EFBIG; - if (dip->i_di.di_entries == (u32)-1) + if (dip->i_entries == (u32)-1) goto out_gunlock; error = -EPERM; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) @@ -427,7 +427,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) ip->i_inode.i_nlink = 2; ip->i_di.di_size = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); ip->i_di.di_flags |= GFS2_DIF_JDATA; - ip->i_di.di_entries = 2; + ip->i_entries = 2; error = gfs2_meta_inode_buffer(ip, &dibh); @@ -517,13 +517,13 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry) if (error) goto out_gunlock; - if (ip->i_di.di_entries < 2) { + if (ip->i_entries < 2) { if (gfs2_consist_inode(ip)) gfs2_dinode_print(ip); error = -EIO; goto out_gunlock; } - if (ip->i_di.di_entries > 2) { + if (ip->i_entries > 2) { error = -ENOTEMPTY; goto out_gunlock; } @@ -726,13 +726,13 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, goto out_gunlock; if (S_ISDIR(nip->i_inode.i_mode)) { - if (nip->i_di.di_entries < 2) { + if (nip->i_entries < 2) { if (gfs2_consist_inode(nip)) gfs2_dinode_print(nip); error = -EIO; goto out_gunlock; } - if (nip->i_di.di_entries > 2) { + if (nip->i_entries > 2) { error = -ENOTEMPTY; goto out_gunlock; } @@ -758,7 +758,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, error = -EINVAL; goto out_gunlock; } - if (ndip->i_di.di_entries == (u32)-1) { + if (ndip->i_entries == (u32)-1) { error = -EFBIG; goto out_gunlock; } -- cgit From 3767ac21f471fe669a7d9f6abef682ddac8fc3d8 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Mon, 3 Nov 2008 14:28:42 +0000 Subject: GFS2: Move di_eattr into "proper" inode This moves the di_eattr field out of gfs2_inode_host and into the inode proper. Signed-off-by: Steven Whitehouse --- fs/gfs2/acl.c | 2 +- fs/gfs2/eattr.c | 26 +++++++++++++------------- fs/gfs2/incore.h | 2 +- fs/gfs2/inode.c | 8 ++++---- fs/gfs2/ops_super.c | 2 +- 5 files changed, 20 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c index 3e9bd46f27e3..e335dceb6a4f 100644 --- a/fs/gfs2/acl.c +++ b/fs/gfs2/acl.c @@ -91,7 +91,7 @@ static int acl_get(struct gfs2_inode *ip, int access, struct posix_acl **acl, struct gfs2_ea_location el_this; int error; - if (!ip->i_di.di_eattr) + if (!ip->i_eattr) return 0; memset(&er, 0, sizeof(struct gfs2_ea_request)); diff --git a/fs/gfs2/eattr.c b/fs/gfs2/eattr.c index e3f76f451b0a..1c1e06136aaa 100644 --- a/fs/gfs2/eattr.c +++ b/fs/gfs2/eattr.c @@ -114,7 +114,7 @@ static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data) __be64 *eablk, *end; int error; - error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh); + error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &bh); if (error) return error; @@ -414,7 +414,7 @@ int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er) if (error) return error; - if (ip->i_di.di_eattr) { + if (ip->i_eattr) { struct ea_list ei = { .ei_er = er, .ei_size = 0 }; error = ea_foreach(ip, ea_list_i, &ei); @@ -514,7 +514,7 @@ int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er) struct gfs2_ea_location el; int error; - if (!ip->i_di.di_eattr) + if (!ip->i_eattr) return -ENODATA; error = gfs2_ea_find(ip, er, &el); @@ -741,7 +741,7 @@ static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er, if (error) return error; - ip->i_di.di_eattr = bh->b_blocknr; + ip->i_eattr = bh->b_blocknr; error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er); brelse(bh); @@ -938,7 +938,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er, if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) { __be64 *end; - error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, + error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh); if (error) return error; @@ -972,8 +972,8 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er, gfs2_buffer_clear_tail(indbh, mh_size); eablk = (__be64 *)(indbh->b_data + mh_size); - *eablk = cpu_to_be64(ip->i_di.di_eattr); - ip->i_di.di_eattr = blk; + *eablk = cpu_to_be64(ip->i_eattr); + ip->i_eattr = blk; ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT; gfs2_add_inode_blocks(&ip->i_inode, 1); @@ -1040,7 +1040,7 @@ int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er) struct gfs2_ea_location el; int error; - if (!ip->i_di.di_eattr) { + if (!ip->i_eattr) { if (er->er_flags & XATTR_REPLACE) return -ENODATA; return ea_init(ip, er); @@ -1145,7 +1145,7 @@ int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er) struct gfs2_ea_location el; int error; - if (!ip->i_di.di_eattr) + if (!ip->i_eattr) return -ENODATA; error = gfs2_ea_find(ip, er, &el); @@ -1309,7 +1309,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) memset(&rlist, 0, sizeof(struct gfs2_rgrp_list)); - error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &indbh); + error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh); if (error) return error; @@ -1416,7 +1416,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip) struct buffer_head *dibh; int error; - rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr); + rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr); if (!rgd) { gfs2_consist_inode(ip); return -EIO; @@ -1432,9 +1432,9 @@ static int ea_dealloc_block(struct gfs2_inode *ip) if (error) goto out_gunlock; - gfs2_free_meta(ip, ip->i_di.di_eattr, 1); + gfs2_free_meta(ip, ip->i_eattr, 1); - ip->i_di.di_eattr = 0; + ip->i_eattr = 0; gfs2_add_inode_blocks(&ip->i_inode, -1); error = gfs2_meta_inode_buffer(ip, &dibh); diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 15ca3a75cf12..fb2fd4adaae4 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -236,7 +236,6 @@ enum { struct gfs2_dinode_host { u64 di_size; /* number of bytes in file */ u32 di_flags; /* GFS2_DIF_... */ - u64 di_eattr; /* extended attribute block number */ }; struct gfs2_inode { @@ -244,6 +243,7 @@ struct gfs2_inode { u64 i_no_addr; u64 i_no_formal_ino; u64 i_generation; + u64 i_eattr; unsigned long i_flags; /* GIF_... */ struct gfs2_dinode_host i_di; /* To be replaced by ref to block */ diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 015d4c007086..91735b8cecd8 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -301,7 +301,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) ip->i_depth = (u8)depth; ip->i_entries = be32_to_cpu(str->di_entries); - di->di_eattr = be64_to_cpu(str->di_eattr); + ip->i_eattr = be64_to_cpu(str->di_eattr); if (S_ISREG(ip->i_inode.i_mode)) gfs2_set_aops(&ip->i_inode); @@ -1273,7 +1273,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) str->di_depth = cpu_to_be16(ip->i_depth); str->di_entries = cpu_to_be32(ip->i_entries); - str->di_eattr = cpu_to_be64(di->di_eattr); + str->di_eattr = cpu_to_be64(ip->i_eattr); str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec); str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec); str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec); @@ -1296,7 +1296,7 @@ void gfs2_dinode_print(const struct gfs2_inode *ip) printk(KERN_INFO " i_height = %u\n", ip->i_height); printk(KERN_INFO " i_depth = %u\n", ip->i_depth); printk(KERN_INFO " i_entries = %u\n", ip->i_entries); - printk(KERN_INFO " di_eattr = %llu\n", - (unsigned long long)di->di_eattr); + printk(KERN_INFO " i_eattr = %llu\n", + (unsigned long long)ip->i_eattr); } diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c index 2cb744ba3b77..aee6cbaf58df 100644 --- a/fs/gfs2/ops_super.c +++ b/fs/gfs2/ops_super.c @@ -502,7 +502,7 @@ static void gfs2_delete_inode(struct inode *inode) goto out_unlock; } - if (ip->i_di.di_eattr) { + if (ip->i_eattr) { error = gfs2_ea_dealloc(ip); if (error) goto out_unlock; -- cgit From c9e98886776386f1f7828d9685e78cd341849867 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Tue, 4 Nov 2008 09:47:33 +0000 Subject: GFS2: Move i_size from gfs2_dinode_host and rename it to i_disksize This patch moved the i_size field from the gfs2_dinode_host and following the ext3 convention renames it i_disksize. Signed-off-by: Steven Whitehouse --- fs/gfs2/bmap.c | 26 +++++++++++++------------- fs/gfs2/dir.c | 26 +++++++++++++------------- fs/gfs2/incore.h | 2 +- fs/gfs2/inode.c | 13 +++++++------ fs/gfs2/ops_address.c | 10 +++++----- fs/gfs2/ops_file.c | 2 +- fs/gfs2/ops_fstype.c | 2 +- fs/gfs2/ops_inode.c | 10 +++++----- fs/gfs2/quota.c | 6 +++--- fs/gfs2/rgrp.c | 6 +++--- fs/gfs2/super.c | 8 ++++---- 11 files changed, 56 insertions(+), 55 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index bec76b1c2bb0..b43aee75d3c2 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -75,9 +75,9 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, void *kaddr = kmap(page); memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), - ip->i_di.di_size); - memset(kaddr + ip->i_di.di_size, 0, - PAGE_CACHE_SIZE - ip->i_di.di_size); + ip->i_disksize); + memset(kaddr + ip->i_disksize, 0, + PAGE_CACHE_SIZE - ip->i_disksize); kunmap(page); SetPageUptodate(page); @@ -132,7 +132,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page) if (error) goto out; - if (ip->i_di.di_size) { + if (ip->i_disksize) { /* Get a free block, fill it with the stuffed data, and write it out to disk */ @@ -159,7 +159,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page) di = (struct gfs2_dinode *)dibh->b_data; gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); - if (ip->i_di.di_size) { + if (ip->i_disksize) { *(__be64 *)(di + 1) = cpu_to_be64(block); gfs2_add_inode_blocks(&ip->i_inode, 1); di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); @@ -926,7 +926,7 @@ static int do_grow(struct gfs2_inode *ip, u64 size) } } - ip->i_di.di_size = size; + ip->i_disksize = size; ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; gfs2_trans_add_bh(ip->i_gl, dibh, 1); gfs2_dinode_out(ip, dibh->b_data); @@ -1033,7 +1033,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size) goto out; if (gfs2_is_stuffed(ip)) { - ip->i_di.di_size = size; + ip->i_disksize = size; ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; gfs2_trans_add_bh(ip->i_gl, dibh, 1); gfs2_dinode_out(ip, dibh->b_data); @@ -1045,7 +1045,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size) error = gfs2_block_truncate_page(ip->i_inode.i_mapping); if (!error) { - ip->i_di.di_size = size; + ip->i_disksize = size; ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG; gfs2_trans_add_bh(ip->i_gl, dibh, 1); @@ -1114,7 +1114,7 @@ static int trunc_end(struct gfs2_inode *ip) if (error) goto out; - if (!ip->i_di.di_size) { + if (!ip->i_disksize) { ip->i_height = 0; ip->i_goal = ip->i_no_addr; gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); @@ -1205,9 +1205,9 @@ int gfs2_truncatei(struct gfs2_inode *ip, u64 size) if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_inode.i_mode))) return -EINVAL; - if (size > ip->i_di.di_size) + if (size > ip->i_disksize) error = do_grow(ip, size); - else if (size < ip->i_di.di_size) + else if (size < ip->i_disksize) error = do_shrink(ip, size); else /* update time stamps */ @@ -1219,7 +1219,7 @@ int gfs2_truncatei(struct gfs2_inode *ip, u64 size) int gfs2_truncatei_resume(struct gfs2_inode *ip) { int error; - error = trunc_dealloc(ip, ip->i_di.di_size); + error = trunc_dealloc(ip, ip->i_disksize); if (!error) error = trunc_end(ip); return error; @@ -1298,7 +1298,7 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, lblock_stop = offset + len + bsize - 1; do_div(lblock_stop, bsize); } else { - u64 end_of_file = (ip->i_di.di_size + sdp->sd_sb.sb_bsize - 1) >> shift; + u64 end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift; lblock = offset >> shift; lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; if (lblock_stop > end_of_file) diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index 830cf48184e3..d8d823240542 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c @@ -128,8 +128,8 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf, gfs2_trans_add_bh(ip->i_gl, dibh, 1); memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); - if (ip->i_di.di_size < offset + size) - ip->i_di.di_size = offset + size; + if (ip->i_disksize < offset + size) + ip->i_disksize = offset + size; ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; gfs2_dinode_out(ip, dibh->b_data); @@ -226,8 +226,8 @@ out: if (error) return error; - if (ip->i_di.di_size < offset + copied) - ip->i_di.di_size = offset + copied; + if (ip->i_disksize < offset + copied) + ip->i_disksize = offset + copied; ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; gfs2_trans_add_bh(ip->i_gl, dibh, 1); @@ -277,11 +277,11 @@ static int gfs2_dir_read_data(struct gfs2_inode *ip, char *buf, u64 offset, int copied = 0; int error = 0; - if (offset >= ip->i_di.di_size) + if (offset >= ip->i_disksize) return 0; - if (offset + size > ip->i_di.di_size) - size = ip->i_di.di_size - offset; + if (offset + size > ip->i_disksize) + size = ip->i_disksize - offset; if (!size) return 0; @@ -760,7 +760,7 @@ static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode, unsigned hsize = 1 << ip->i_depth; unsigned index; u64 ln; - if (hsize * sizeof(u64) != ip->i_di.di_size) { + if (hsize * sizeof(u64) != ip->i_disksize) { gfs2_consist_inode(ip); return ERR_PTR(-EIO); } @@ -905,7 +905,7 @@ static int dir_make_exhash(struct inode *inode) for (x = sdp->sd_hash_ptrs; x--; lp++) *lp = cpu_to_be64(bn); - dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2; + dip->i_disksize = sdp->sd_sb.sb_bsize / 2; gfs2_add_inode_blocks(&dip->i_inode, 1); dip->i_di.di_flags |= GFS2_DIF_EXHASH; @@ -1082,7 +1082,7 @@ static int dir_double_exhash(struct gfs2_inode *dip) int error = 0; hsize = 1 << dip->i_depth; - if (hsize * sizeof(u64) != dip->i_di.di_size) { + if (hsize * sizeof(u64) != dip->i_disksize) { gfs2_consist_inode(dip); return -EIO; } @@ -1091,7 +1091,7 @@ static int dir_double_exhash(struct gfs2_inode *dip) buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS | __GFP_NOFAIL); - for (block = dip->i_di.di_size >> sdp->sd_hash_bsize_shift; block--;) { + for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) { error = gfs2_dir_read_data(dip, (char *)buf, block * sdp->sd_hash_bsize, sdp->sd_hash_bsize, 1); @@ -1370,7 +1370,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, unsigned depth = 0; hsize = 1 << dip->i_depth; - if (hsize * sizeof(u64) != dip->i_di.di_size) { + if (hsize * sizeof(u64) != dip->i_disksize) { gfs2_consist_inode(dip); return -EIO; } @@ -1784,7 +1784,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data) int error = 0; hsize = 1 << dip->i_depth; - if (hsize * sizeof(u64) != dip->i_di.di_size) { + if (hsize * sizeof(u64) != dip->i_disksize) { gfs2_consist_inode(dip); return -EIO; } diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index fb2fd4adaae4..4596cd254be6 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -234,7 +234,6 @@ enum { }; struct gfs2_dinode_host { - u64 di_size; /* number of bytes in file */ u32 di_flags; /* GFS2_DIF_... */ }; @@ -244,6 +243,7 @@ struct gfs2_inode { u64 i_no_formal_ino; u64 i_generation; u64 i_eattr; + loff_t i_disksize; unsigned long i_flags; /* GIF_... */ struct gfs2_dinode_host i_di; /* To be replaced by ref to block */ diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 91735b8cecd8..baf8b24b2de7 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -273,8 +273,8 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) * to do that. */ ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink); - di->di_size = be64_to_cpu(str->di_size); - i_size_write(&ip->i_inode, di->di_size); + ip->i_disksize = be64_to_cpu(str->di_size); + i_size_write(&ip->i_inode, ip->i_disksize); gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); atime.tv_sec = be64_to_cpu(str->di_atime); atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); @@ -1167,7 +1167,7 @@ int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len) return error; } - if (!ip->i_di.di_size) { + if (!ip->i_disksize) { gfs2_consist_inode(ip); error = -EIO; goto out; @@ -1177,7 +1177,7 @@ int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len) if (error) goto out; - x = ip->i_di.di_size + 1; + x = ip->i_disksize + 1; if (x > *len) { *buf = kmalloc(x, GFP_NOFS); if (!*buf) { @@ -1255,7 +1255,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) str->di_uid = cpu_to_be32(ip->i_inode.i_uid); str->di_gid = cpu_to_be32(ip->i_inode.i_gid); str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink); - str->di_size = cpu_to_be64(di->di_size); + str->di_size = cpu_to_be64(ip->i_disksize); str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec); @@ -1287,7 +1287,8 @@ void gfs2_dinode_print(const struct gfs2_inode *ip) (unsigned long long)ip->i_no_formal_ino); printk(KERN_INFO " no_addr = %llu\n", (unsigned long long)ip->i_no_addr); - printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size); + printk(KERN_INFO " i_disksize = %llu\n", + (unsigned long long)ip->i_disksize); printk(KERN_INFO " blocks = %llu\n", (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode)); printk(KERN_INFO " i_goal = %llu\n", diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c index 574b222feefc..0df560f4269a 100644 --- a/fs/gfs2/ops_address.c +++ b/fs/gfs2/ops_address.c @@ -451,8 +451,8 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) kaddr = kmap_atomic(page, KM_USER0); memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), - ip->i_di.di_size); - memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size); + ip->i_disksize); + memset(kaddr + ip->i_disksize, 0, PAGE_CACHE_SIZE - ip->i_disksize); kunmap_atomic(kaddr, KM_USER0); flush_dcache_page(page); brelse(dibh); @@ -780,7 +780,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, if (inode->i_size < to) { i_size_write(inode, to); - ip->i_di.di_size = inode->i_size; + ip->i_disksize = inode->i_size; di->di_size = cpu_to_be64(inode->i_size); mark_inode_dirty(inode); } @@ -845,9 +845,9 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping, ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); - if (likely(ret >= 0) && (inode->i_size > ip->i_di.di_size)) { + if (likely(ret >= 0) && (inode->i_size > ip->i_disksize)) { di = (struct gfs2_dinode *)dibh->b_data; - ip->i_di.di_size = inode->i_size; + ip->i_disksize = inode->i_size; di->di_size = cpu_to_be64(inode->i_size); mark_inode_dirty(inode); } diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c index fcfaaefc92fb..d7e649ed62f2 100644 --- a/fs/gfs2/ops_file.c +++ b/fs/gfs2/ops_file.c @@ -478,7 +478,7 @@ static int gfs2_open(struct inode *inode, struct file *file) goto fail; if (!(file->f_flags & O_LARGEFILE) && - ip->i_di.di_size > MAX_NON_LFS) { + ip->i_disksize > MAX_NON_LFS) { error = -EOVERFLOW; goto fail_gunlock; } diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index ca463a450eb9..dd83e8322350 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -617,7 +617,7 @@ static int map_journal_extents(struct gfs2_sbd *sdp) prev_db = 0; - for (lb = 0; lb < ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift; lb++) { + for (lb = 0; lb < ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; lb++) { bh.b_state = 0; bh.b_blocknr = 0; bh.b_size = 1 << ip->i_inode.i_blkbits; diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index 48468f48d7bf..b932d72b5f5b 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -371,7 +371,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry, ip = ghs[1].gh_gl->gl_object; - ip->i_di.di_size = size; + ip->i_disksize = size; error = gfs2_meta_inode_buffer(ip, &dibh); @@ -425,7 +425,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) ip = ghs[1].gh_gl->gl_object; ip->i_inode.i_nlink = 2; - ip->i_di.di_size = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); + ip->i_disksize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); ip->i_di.di_flags |= GFS2_DIF_JDATA; ip->i_entries = 2; @@ -990,7 +990,7 @@ static int setattr_size(struct inode *inode, struct iattr *attr) struct gfs2_sbd *sdp = GFS2_SB(inode); int error; - if (attr->ia_size != ip->i_di.di_size) { + if (attr->ia_size != ip->i_disksize) { error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); if (error) return error; @@ -1001,8 +1001,8 @@ static int setattr_size(struct inode *inode, struct iattr *attr) } error = gfs2_truncatei(ip, attr->ia_size); - if (error && (inode->i_size != ip->i_di.di_size)) - i_size_write(inode, ip->i_di.di_size); + if (error && (inode->i_size != ip->i_disksize)) + i_size_write(inode, ip->i_disksize); return error; } diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 3e073f5144fa..188d0a277fa3 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1100,15 +1100,15 @@ static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void * int gfs2_quota_init(struct gfs2_sbd *sdp) { struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); - unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift; + unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; unsigned int x, slot = 0; unsigned int found = 0; u64 dblock; u32 extlen = 0; int error; - if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) || - ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) { + if (!ip->i_disksize || ip->i_disksize > (64 << 20) || + ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) { gfs2_consist_inode(ip); return -EIO; } diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 2d90fb253505..bdad0dffc6b4 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -501,7 +501,7 @@ u64 gfs2_ri_total(struct gfs2_sbd *sdp) for (rgrps = 0;; rgrps++) { loff_t pos = rgrps * sizeof(struct gfs2_rindex); - if (pos + sizeof(struct gfs2_rindex) >= ip->i_di.di_size) + if (pos + sizeof(struct gfs2_rindex) >= ip->i_disksize) break; error = gfs2_internal_read(ip, &ra_state, buf, &pos, sizeof(struct gfs2_rindex)); @@ -590,7 +590,7 @@ static int gfs2_ri_update(struct gfs2_inode *ip) struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct inode *inode = &ip->i_inode; struct file_ra_state ra_state; - u64 rgrp_count = ip->i_di.di_size; + u64 rgrp_count = ip->i_disksize; int error; if (do_div(rgrp_count, sizeof(struct gfs2_rindex))) { @@ -634,7 +634,7 @@ static int gfs2_ri_update_special(struct gfs2_inode *ip) for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) { /* Ignore partials */ if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) > - ip->i_di.di_size) + ip->i_disksize) break; error = read_rindex_entry(ip, &ra_state); if (error) { diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index c3ba3d9d0aac..f5cef2ad7ae1 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -206,14 +206,14 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd) int ar; int error; - if (ip->i_di.di_size < (8 << 20) || ip->i_di.di_size > (1 << 30) || - (ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1))) { + if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) || + (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) { gfs2_consist_inode(ip); return -EIO; } - jd->jd_blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift; + jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; - error = gfs2_write_alloc_required(ip, 0, ip->i_di.di_size, &ar); + error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar); if (!error && ar) { gfs2_consist_inode(ip); error = -EIO; -- cgit From 383f01fbf4a701b73f5e35ea805ed1700b4b4db9 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Tue, 4 Nov 2008 10:05:22 +0000 Subject: GFS2: Banish struct gfs2_dinode_host The final field in gfs2_dinode_host was the i_flags field. Thats renamed to i_diskflags in order to avoid confusion with the existing inode flags, and moved into the inode proper at a suitable location to avoid creating a "hole". At that point struct gfs2_dinode_host is no longer needed and as promised (quite some time ago!) it can now be removed completely. Signed-off-by: Steven Whitehouse --- fs/gfs2/bmap.c | 4 ++-- fs/gfs2/dir.c | 16 ++++++++-------- fs/gfs2/eattr.c | 14 +++++++------- fs/gfs2/glops.c | 2 +- fs/gfs2/incore.h | 7 +------ fs/gfs2/inode.c | 16 ++++++---------- fs/gfs2/inode.h | 2 +- fs/gfs2/ops_export.c | 2 +- fs/gfs2/ops_file.c | 17 ++++++++--------- fs/gfs2/ops_inode.c | 2 +- fs/gfs2/ops_super.c | 2 +- fs/gfs2/quota.c | 2 +- 12 files changed, 38 insertions(+), 48 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index b43aee75d3c2..789f28cfdc20 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1047,7 +1047,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size) if (!error) { ip->i_disksize = size; ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; - ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG; + ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG; gfs2_trans_add_bh(ip->i_gl, dibh, 1); gfs2_dinode_out(ip, dibh->b_data); } @@ -1120,7 +1120,7 @@ static int trunc_end(struct gfs2_inode *ip) gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); } ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; - ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG; + ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG; gfs2_trans_add_bh(ip->i_gl, dibh, 1); gfs2_dinode_out(ip, dibh->b_data); diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index d8d823240542..b7c8e5c70791 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c @@ -36,7 +36,7 @@ * the block. In leaves, they begin at offset sizeof(struct gfs2_leaf) from the * beginning of the leaf block. The dirents reside in leaves when * - * dip->i_di.di_flags & GFS2_DIF_EXHASH is true + * dip->i_diskflags & GFS2_DIF_EXHASH is true * * Otherwise, the dirents are "linear", within a single stuffed dinode block. * @@ -755,7 +755,7 @@ static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode, struct gfs2_inode *ip = GFS2_I(inode); int error; - if (ip->i_di.di_flags & GFS2_DIF_EXHASH) { + if (ip->i_diskflags & GFS2_DIF_EXHASH) { struct gfs2_leaf *leaf; unsigned hsize = 1 << ip->i_depth; unsigned index; @@ -907,7 +907,7 @@ static int dir_make_exhash(struct inode *inode) dip->i_disksize = sdp->sd_sb.sb_bsize / 2; gfs2_add_inode_blocks(&dip->i_inode, 1); - dip->i_di.di_flags |= GFS2_DIF_EXHASH; + dip->i_diskflags |= GFS2_DIF_EXHASH; for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ; dip->i_depth = y; @@ -1429,7 +1429,7 @@ int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, if (!dip->i_entries) return 0; - if (dip->i_di.di_flags & GFS2_DIF_EXHASH) + if (dip->i_diskflags & GFS2_DIF_EXHASH) return dir_e_read(inode, offset, opaque, filldir); if (!gfs2_is_stuffed(dip)) { @@ -1612,7 +1612,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name, dent = gfs2_init_dirent(inode, dent, name, bh); gfs2_inum_out(nip, dent); dent->de_type = cpu_to_be16(type); - if (ip->i_di.di_flags & GFS2_DIF_EXHASH) { + if (ip->i_diskflags & GFS2_DIF_EXHASH) { leaf = (struct gfs2_leaf *)bh->b_data; be16_add_cpu(&leaf->lf_entries, 1); } @@ -1628,7 +1628,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name, error = 0; break; } - if (!(ip->i_di.di_flags & GFS2_DIF_EXHASH)) { + if (!(ip->i_diskflags & GFS2_DIF_EXHASH)) { error = dir_make_exhash(inode); if (error) break; @@ -1691,7 +1691,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name) } dirent_del(dip, bh, prev, dent); - if (dip->i_di.di_flags & GFS2_DIF_EXHASH) { + if (dip->i_diskflags & GFS2_DIF_EXHASH) { struct gfs2_leaf *leaf = (struct gfs2_leaf *)bh->b_data; u16 entries = be16_to_cpu(leaf->lf_entries); if (!entries) @@ -1748,7 +1748,7 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, gfs2_inum_out(nip, dent); dent->de_type = cpu_to_be16(new_type); - if (dip->i_di.di_flags & GFS2_DIF_EXHASH) { + if (dip->i_diskflags & GFS2_DIF_EXHASH) { brelse(bh); error = gfs2_meta_inode_buffer(dip, &bh); if (error) diff --git a/fs/gfs2/eattr.c b/fs/gfs2/eattr.c index 1c1e06136aaa..0d1c76d906ae 100644 --- a/fs/gfs2/eattr.c +++ b/fs/gfs2/eattr.c @@ -118,7 +118,7 @@ static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data) if (error) return error; - if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) { + if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) { error = ea_foreach_i(ip, bh, ea_call, data); goto out; } @@ -935,7 +935,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er, int error; int mh_size = sizeof(struct gfs2_meta_header); - if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) { + if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) { __be64 *end; error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, @@ -974,7 +974,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er, eablk = (__be64 *)(indbh->b_data + mh_size); *eablk = cpu_to_be64(ip->i_eattr); ip->i_eattr = blk; - ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT; + ip->i_diskflags |= GFS2_DIF_EA_INDIRECT; gfs2_add_inode_blocks(&ip->i_inode, 1); eablk++; @@ -1015,7 +1015,7 @@ static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er, if (error) return error; - if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) + if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) blks++; if (GFS2_EAREQ_SIZE_STUFFED(er) > GFS2_SB(&ip->i_inode)->sd_jbsize) blks += DIV_ROUND_UP(er->er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize); @@ -1051,7 +1051,7 @@ int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er) return error; if (el.el_ea) { - if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) { + if (ip->i_diskflags & GFS2_DIF_APPENDONLY) { brelse(el.el_bh); return -EPERM; } @@ -1388,7 +1388,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) if (bstart) gfs2_free_meta(ip, bstart, blen); - ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT; + ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT; error = gfs2_meta_inode_buffer(ip, &dibh); if (!error) { @@ -1479,7 +1479,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip) if (error) goto out_rindex; - if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) { + if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) { error = ea_dealloc_indirect(ip); if (error) goto out_rindex; diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index c6c318c2a0f6..848d64c8b62d 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -239,7 +239,7 @@ static int inode_go_lock(struct gfs2_holder *gh) return error; } - if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) && + if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && (gl->gl_state == LM_ST_EXCLUSIVE) && (gh->gh_state == LM_ST_EXCLUSIVE)) error = gfs2_truncatei_resume(ip); diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 4596cd254be6..6f67e753f883 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -233,9 +233,6 @@ enum { GIF_USER = 4, /* user inode, not metadata addr space */ }; -struct gfs2_dinode_host { - u32 di_flags; /* GFS2_DIF_... */ -}; struct gfs2_inode { struct inode i_inode; @@ -245,9 +242,6 @@ struct gfs2_inode { u64 i_eattr; loff_t i_disksize; unsigned long i_flags; /* GIF_... */ - - struct gfs2_dinode_host i_di; /* To be replaced by ref to block */ - struct gfs2_glock *i_gl; /* Move into i_gh? */ struct gfs2_holder i_iopen_gh; struct gfs2_holder i_gh; /* for prepare/commit_write only */ @@ -255,6 +249,7 @@ struct gfs2_inode { u64 i_goal; /* goal block for allocations */ struct rw_semaphore i_rw_mutex; u32 i_entries; + u32 i_diskflags; u8 i_height; u8 i_depth; }; diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index baf8b24b2de7..97d3ce65e26f 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -247,7 +247,6 @@ fail: static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) { - struct gfs2_dinode_host *di = &ip->i_di; const struct gfs2_dinode *str = buf; struct timespec atime; u16 height, depth; @@ -288,7 +287,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) ip->i_goal = be64_to_cpu(str->di_goal_meta); ip->i_generation = be64_to_cpu(str->di_generation); - di->di_flags = be32_to_cpu(str->di_flags); + ip->i_diskflags = be32_to_cpu(str->di_flags); gfs2_set_inode_flags(&ip->i_inode); height = be16_to_cpu(str->di_height); if (unlikely(height > GFS2_MAX_META_HEIGHT)) @@ -789,11 +788,11 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl, di->di_flags = 0; if (S_ISREG(mode)) { - if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) || + if ((dip->i_diskflags & GFS2_DIF_INHERIT_JDATA) || gfs2_tune_get(sdp, gt_new_files_jdata)) di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA); } else if (S_ISDIR(mode)) { - di->di_flags |= cpu_to_be32(dip->i_di.di_flags & + di->di_flags |= cpu_to_be32(dip->i_diskflags & GFS2_DIF_INHERIT_JDATA); } @@ -1241,7 +1240,6 @@ int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr) void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) { - const struct gfs2_dinode_host *di = &ip->i_di; struct gfs2_dinode *str = buf; str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); @@ -1265,10 +1263,10 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) str->di_goal_data = cpu_to_be64(ip->i_goal); str->di_generation = cpu_to_be64(ip->i_generation); - str->di_flags = cpu_to_be32(di->di_flags); + str->di_flags = cpu_to_be32(ip->i_diskflags); str->di_height = cpu_to_be16(ip->i_height); str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) && - !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ? + !(ip->i_diskflags & GFS2_DIF_EXHASH) ? GFS2_FORMAT_DE : 0); str->di_depth = cpu_to_be16(ip->i_depth); str->di_entries = cpu_to_be32(ip->i_entries); @@ -1281,8 +1279,6 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) void gfs2_dinode_print(const struct gfs2_inode *ip) { - const struct gfs2_dinode_host *di = &ip->i_di; - printk(KERN_INFO " no_formal_ino = %llu\n", (unsigned long long)ip->i_no_formal_ino); printk(KERN_INFO " no_addr = %llu\n", @@ -1293,7 +1289,7 @@ void gfs2_dinode_print(const struct gfs2_inode *ip) (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode)); printk(KERN_INFO " i_goal = %llu\n", (unsigned long long)ip->i_goal); - printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags); + printk(KERN_INFO " i_diskflags = 0x%.8X\n", ip->i_diskflags); printk(KERN_INFO " i_height = %u\n", ip->i_height); printk(KERN_INFO " i_depth = %u\n", ip->i_depth); printk(KERN_INFO " i_entries = %u\n", ip->i_entries); diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index c3577906f0ad..d5329364cdff 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h @@ -20,7 +20,7 @@ static inline int gfs2_is_stuffed(const struct gfs2_inode *ip) static inline int gfs2_is_jdata(const struct gfs2_inode *ip) { - return ip->i_di.di_flags & GFS2_DIF_JDATA; + return ip->i_diskflags & GFS2_DIF_JDATA; } static inline int gfs2_is_writeback(const struct gfs2_inode *ip) diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c index 3a9b9b438344..7fdeb14ddd1a 100644 --- a/fs/gfs2/ops_export.c +++ b/fs/gfs2/ops_export.c @@ -213,7 +213,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, } error = -EIO; - if (GFS2_I(inode)->i_di.di_flags & GFS2_DIF_SYSTEM) { + if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM) { iput(inode); goto fail; } diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c index d7e649ed62f2..a6b7a733fd4d 100644 --- a/fs/gfs2/ops_file.c +++ b/fs/gfs2/ops_file.c @@ -157,8 +157,8 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr) if (error) return error; - fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_di.di_flags); - if (!S_ISDIR(inode->i_mode) && ip->i_di.di_flags & GFS2_DIF_JDATA) + fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags); + if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA) fsflags |= FS_JOURNAL_DATA_FL; if (put_user(fsflags, ptr)) error = -EFAULT; @@ -171,17 +171,16 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr) void gfs2_set_inode_flags(struct inode *inode) { struct gfs2_inode *ip = GFS2_I(inode); - struct gfs2_dinode_host *di = &ip->i_di; unsigned int flags = inode->i_flags; flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); - if (di->di_flags & GFS2_DIF_IMMUTABLE) + if (ip->i_diskflags & GFS2_DIF_IMMUTABLE) flags |= S_IMMUTABLE; - if (di->di_flags & GFS2_DIF_APPENDONLY) + if (ip->i_diskflags & GFS2_DIF_APPENDONLY) flags |= S_APPEND; - if (di->di_flags & GFS2_DIF_NOATIME) + if (ip->i_diskflags & GFS2_DIF_NOATIME) flags |= S_NOATIME; - if (di->di_flags & GFS2_DIF_SYNC) + if (ip->i_diskflags & GFS2_DIF_SYNC) flags |= S_SYNC; inode->i_flags = flags; } @@ -220,7 +219,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) if (error) goto out_drop_write; - flags = ip->i_di.di_flags; + flags = ip->i_diskflags; new_flags = (flags & ~mask) | (reqflags & mask); if ((new_flags ^ flags) == 0) goto out; @@ -259,7 +258,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) if (error) goto out_trans_end; gfs2_trans_add_bh(ip->i_gl, bh, 1); - ip->i_di.di_flags = new_flags; + ip->i_diskflags = new_flags; gfs2_dinode_out(ip, bh->b_data); brelse(bh); gfs2_set_inode_flags(inode); diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index b932d72b5f5b..49877546beb9 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -426,7 +426,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) ip->i_inode.i_nlink = 2; ip->i_disksize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); - ip->i_di.di_flags |= GFS2_DIF_JDATA; + ip->i_diskflags |= GFS2_DIF_JDATA; ip->i_entries = 2; error = gfs2_meta_inode_buffer(ip, &dibh); diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c index aee6cbaf58df..ad36af254fee 100644 --- a/fs/gfs2/ops_super.c +++ b/fs/gfs2/ops_super.c @@ -496,7 +496,7 @@ static void gfs2_delete_inode(struct inode *inode) goto out_truncate; if (S_ISDIR(inode->i_mode) && - (ip->i_di.di_flags & GFS2_DIF_EXHASH)) { + (ip->i_diskflags & GFS2_DIF_EXHASH)) { error = gfs2_dir_exhash_dealloc(ip); if (error) goto out_unlock; diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 188d0a277fa3..228a46596188 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1013,7 +1013,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change, if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change)) return; - if (ip->i_di.di_flags & GFS2_DIF_SYSTEM) + if (ip->i_diskflags & GFS2_DIF_SYSTEM) return; for (x = 0; x < al->al_qd_num; x++) { -- cgit From d8b71f7381769177998acb2f59ddc73465a60fe0 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Tue, 4 Nov 2008 10:19:03 +0000 Subject: GFS2: Move rg_igeneration into struct gfs2_rgrpd This moves one of the fields of struct gfs2_rgrpd_host into the struct gfs2_rgrpd with the eventual aim of removing the struct rgrpd_host completely. Signed-off-by: Steven Whitehouse --- fs/gfs2/incore.h | 2 +- fs/gfs2/rgrp.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 6f67e753f883..869ac83297e6 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -71,7 +71,6 @@ struct gfs2_bitmap { struct gfs2_rgrp_host { u32 rg_free; u32 rg_dinodes; - u64 rg_igeneration; }; struct gfs2_rgrpd { @@ -84,6 +83,7 @@ struct gfs2_rgrpd { u32 rd_data; /* num of data blocks in rgrp */ u32 rd_bitbytes; /* number of bytes in data bitmaps */ struct gfs2_rgrp_host rd_rg; + u64 rd_igeneration; struct gfs2_bitmap *rd_bits; unsigned int rd_bh_count; struct mutex rd_mutex; diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index bdad0dffc6b4..8e93d62991cc 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -702,7 +702,7 @@ static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf) rgd->rd_flags &= ~GFS2_RDF_NOALLOC; rg->rg_free = be32_to_cpu(str->rg_free); rg->rg_dinodes = be32_to_cpu(str->rg_dinodes); - rg->rg_igeneration = be64_to_cpu(str->rg_igeneration); + rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration); } static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf) @@ -717,7 +717,7 @@ static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf) str->rg_free = cpu_to_be32(rg->rg_free); str->rg_dinodes = cpu_to_be32(rg->rg_dinodes); str->__pad = cpu_to_be32(0); - str->rg_igeneration = cpu_to_be64(rg->rg_igeneration); + str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration); memset(&str->rg_reserved, 0, sizeof(str->rg_reserved)); } @@ -1448,7 +1448,7 @@ u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation) gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); rgd->rd_rg.rg_free--; rgd->rd_rg.rg_dinodes++; - *generation = rgd->rd_rg.rg_igeneration++; + *generation = rgd->rd_igeneration++; gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); -- cgit From cfc8b54922db7b647b6d88914dc7ef8c63b6671d Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Tue, 4 Nov 2008 10:25:13 +0000 Subject: GFS2: Move rg_free from gfs2_rgrpd_host to gfs2_rgrpd The second of three fields which need to move, in order to remove the struct gfs2_rgrpd_host. Signed-off-by: Steven Whitehouse --- fs/gfs2/incore.h | 2 +- fs/gfs2/rgrp.c | 28 ++++++++++++++-------------- fs/gfs2/super.c | 2 +- 3 files changed, 16 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 869ac83297e6..f8d977362515 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -69,7 +69,6 @@ struct gfs2_bitmap { }; struct gfs2_rgrp_host { - u32 rg_free; u32 rg_dinodes; }; @@ -82,6 +81,7 @@ struct gfs2_rgrpd { u32 rd_length; /* length of rgrp header in fs blocks */ u32 rd_data; /* num of data blocks in rgrp */ u32 rd_bitbytes; /* number of bytes in data bitmaps */ + u32 rd_free; struct gfs2_rgrp_host rd_rg; u64 rd_igeneration; struct gfs2_bitmap *rd_bits; diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 8e93d62991cc..bab9cfab34c7 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -269,15 +269,15 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) bi->bi_len, x); } - if (count[0] != rgd->rd_rg.rg_free) { + if (count[0] != rgd->rd_free) { if (gfs2_consist_rgrpd(rgd)) fs_err(sdp, "free data mismatch: %u != %u\n", - count[0], rgd->rd_rg.rg_free); + count[0], rgd->rd_free); return; } tmp = rgd->rd_data - - rgd->rd_rg.rg_free - + rgd->rd_free - rgd->rd_rg.rg_dinodes; if (count[1] + count[2] != tmp) { if (gfs2_consist_rgrpd(rgd)) @@ -700,7 +700,7 @@ static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf) rgd->rd_flags |= GFS2_RDF_NOALLOC; else rgd->rd_flags &= ~GFS2_RDF_NOALLOC; - rg->rg_free = be32_to_cpu(str->rg_free); + rgd->rd_free = be32_to_cpu(str->rg_free); rg->rg_dinodes = be32_to_cpu(str->rg_dinodes); rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration); } @@ -714,7 +714,7 @@ static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf) if (rgd->rd_flags & GFS2_RDF_NOALLOC) rg_flags |= GFS2_RGF_NOALLOC; str->rg_flags = cpu_to_be32(rg_flags); - str->rg_free = cpu_to_be32(rg->rg_free); + str->rg_free = cpu_to_be32(rgd->rd_free); str->rg_dinodes = cpu_to_be32(rg->rg_dinodes); str->__pad = cpu_to_be32(0); str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration); @@ -776,7 +776,7 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd) } spin_lock(&sdp->sd_rindex_spin); - rgd->rd_free_clone = rgd->rd_rg.rg_free; + rgd->rd_free_clone = rgd->rd_free; rgd->rd_bh_count++; spin_unlock(&sdp->sd_rindex_spin); @@ -850,7 +850,7 @@ void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd) } spin_lock(&sdp->sd_rindex_spin); - rgd->rd_free_clone = rgd->rd_rg.rg_free; + rgd->rd_free_clone = rgd->rd_free; spin_unlock(&sdp->sd_rindex_spin); } @@ -1403,8 +1403,8 @@ u64 gfs2_alloc_block(struct gfs2_inode *ip, unsigned int *n) block = rgd->rd_data0 + blk; ip->i_goal = block; - gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free >= *n); - rgd->rd_rg.rg_free -= *n; + gfs2_assert_withdraw(sdp, rgd->rd_free >= *n); + rgd->rd_free -= *n; gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); @@ -1445,8 +1445,8 @@ u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation) block = rgd->rd_data0 + blk; - gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); - rgd->rd_rg.rg_free--; + gfs2_assert_withdraw(sdp, rgd->rd_free); + rgd->rd_free--; rgd->rd_rg.rg_dinodes++; *generation = rgd->rd_igeneration++; gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); @@ -1481,7 +1481,7 @@ void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen) if (!rgd) return; - rgd->rd_rg.rg_free += blen; + rgd->rd_free += blen; gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); @@ -1509,7 +1509,7 @@ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen) if (!rgd) return; - rgd->rd_rg.rg_free += blen; + rgd->rd_free += blen; gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); @@ -1549,7 +1549,7 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) if (!rgd->rd_rg.rg_dinodes) gfs2_consist_rgrpd(rgd); rgd->rd_rg.rg_dinodes--; - rgd->rd_rg.rg_free++; + rgd->rd_free++; gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index f5cef2ad7ae1..e76907691ad9 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -468,7 +468,7 @@ static int statfs_slow_fill(struct gfs2_rgrpd *rgd, { gfs2_rgrp_verify(rgd); sc->sc_total += rgd->rd_data; - sc->sc_free += rgd->rd_rg.rg_free; + sc->sc_free += rgd->rd_free; sc->sc_dinodes += rgd->rd_rg.rg_dinodes; return 0; } -- cgit From 73f749483ed18f3b5759909cc4187b1741f54b10 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Tue, 4 Nov 2008 10:32:57 +0000 Subject: GFS2: Banish struct gfs2_rgrpd_host This patch moves the final field so that we can get rid of struct gfs2_rgrpd_host, as promised some time ago. Also by rearranging the fields slightly, we are able to reduce the size of the gfs2_rgrpd structure at the same time. Signed-off-by: Steven Whitehouse --- fs/gfs2/incore.h | 12 ++++-------- fs/gfs2/rgrp.c | 20 ++++++++------------ fs/gfs2/super.c | 2 +- 3 files changed, 13 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index f8d977362515..9e3b613d0bac 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -68,10 +68,6 @@ struct gfs2_bitmap { u32 bi_len; }; -struct gfs2_rgrp_host { - u32 rg_dinodes; -}; - struct gfs2_rgrpd { struct list_head rd_list; /* Link with superblock */ struct list_head rd_list_mru; @@ -82,15 +78,15 @@ struct gfs2_rgrpd { u32 rd_data; /* num of data blocks in rgrp */ u32 rd_bitbytes; /* number of bytes in data bitmaps */ u32 rd_free; - struct gfs2_rgrp_host rd_rg; + u32 rd_free_clone; + u32 rd_dinodes; u64 rd_igeneration; struct gfs2_bitmap *rd_bits; - unsigned int rd_bh_count; struct mutex rd_mutex; - u32 rd_free_clone; struct gfs2_log_element rd_le; - u32 rd_last_alloc; struct gfs2_sbd *rd_sbd; + unsigned int rd_bh_count; + u32 rd_last_alloc; unsigned char rd_flags; #define GFS2_RDF_CHECK 0x01 /* Need to check for unlinked inodes */ #define GFS2_RDF_NOALLOC 0x02 /* rg prohibits allocation */ diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index bab9cfab34c7..8b01c635d925 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -276,9 +276,7 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) return; } - tmp = rgd->rd_data - - rgd->rd_free - - rgd->rd_rg.rg_dinodes; + tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes; if (count[1] + count[2] != tmp) { if (gfs2_consist_rgrpd(rgd)) fs_err(sdp, "used data mismatch: %u != %u\n", @@ -286,10 +284,10 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) return; } - if (count[3] != rgd->rd_rg.rg_dinodes) { + if (count[3] != rgd->rd_dinodes) { if (gfs2_consist_rgrpd(rgd)) fs_err(sdp, "used metadata mismatch: %u != %u\n", - count[3], rgd->rd_rg.rg_dinodes); + count[3], rgd->rd_dinodes); return; } @@ -692,7 +690,6 @@ int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh) static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf) { const struct gfs2_rgrp *str = buf; - struct gfs2_rgrp_host *rg = &rgd->rd_rg; u32 rg_flags; rg_flags = be32_to_cpu(str->rg_flags); @@ -701,21 +698,20 @@ static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf) else rgd->rd_flags &= ~GFS2_RDF_NOALLOC; rgd->rd_free = be32_to_cpu(str->rg_free); - rg->rg_dinodes = be32_to_cpu(str->rg_dinodes); + rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes); rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration); } static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf) { struct gfs2_rgrp *str = buf; - struct gfs2_rgrp_host *rg = &rgd->rd_rg; u32 rg_flags = 0; if (rgd->rd_flags & GFS2_RDF_NOALLOC) rg_flags |= GFS2_RGF_NOALLOC; str->rg_flags = cpu_to_be32(rg_flags); str->rg_free = cpu_to_be32(rgd->rd_free); - str->rg_dinodes = cpu_to_be32(rg->rg_dinodes); + str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes); str->__pad = cpu_to_be32(0); str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration); memset(&str->rg_reserved, 0, sizeof(str->rg_reserved)); @@ -1447,7 +1443,7 @@ u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation) gfs2_assert_withdraw(sdp, rgd->rd_free); rgd->rd_free--; - rgd->rd_rg.rg_dinodes++; + rgd->rd_dinodes++; *generation = rgd->rd_igeneration++; gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); @@ -1546,9 +1542,9 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) return; gfs2_assert_withdraw(sdp, rgd == tmp_rgd); - if (!rgd->rd_rg.rg_dinodes) + if (!rgd->rd_dinodes) gfs2_consist_rgrpd(rgd); - rgd->rd_rg.rg_dinodes--; + rgd->rd_dinodes--; rgd->rd_free++; gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index e76907691ad9..b85877062a48 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -469,7 +469,7 @@ static int statfs_slow_fill(struct gfs2_rgrpd *rgd, gfs2_rgrp_verify(rgd); sc->sc_total += rgd->rd_data; sc->sc_free += rgd->rd_free; - sc->sc_dinodes += rgd->rd_rg.rg_dinodes; + sc->sc_dinodes += rgd->rd_dinodes; return 0; } -- cgit From fa75cedc3da5923b8ea3877be9d5bc09b02e3860 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Mon, 10 Nov 2008 10:10:12 +0000 Subject: GFS2: Add more detail to debugfs glock dumps Although the glock dumps print quite a lot of information about the glocks themselves, there are more things which can be usefully added to the dump realting to the objects themselves. This patch adds a few more fields to the inode and resource group lines, which should be useful for debugging. Signed-off-by: Steven Whitehouse --- fs/gfs2/glops.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 848d64c8b62d..68ee66552d19 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -260,10 +260,13 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) const struct gfs2_inode *ip = gl->gl_object; if (ip == NULL) return 0; - gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%08lx\n", + gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu/%llu\n", (unsigned long long)ip->i_no_formal_ino, (unsigned long long)ip->i_no_addr, - IF2DT(ip->i_inode.i_mode), ip->i_flags); + IF2DT(ip->i_inode.i_mode), ip->i_flags, + (unsigned int)ip->i_diskflags, + (unsigned long long)ip->i_inode.i_size, + (unsigned long long)ip->i_disksize); return 0; } @@ -318,7 +321,9 @@ static int rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) const struct gfs2_rgrpd *rgd = gl->gl_object; if (rgd == NULL) return 0; - gfs2_print_dbg(seq, " R: n:%llu\n", (unsigned long long)rgd->rd_addr); + gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u\n", + (unsigned long long)rgd->rd_addr, rgd->rd_flags, + rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes); return 0; } -- cgit From 37b2c8377c98acb60cf4d0126e385ef2153bded9 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Mon, 17 Nov 2008 14:25:37 +0000 Subject: GFS2: Clean up & move gfs2_quotad This patch is a clean up of gfs2_quotad prior to giving it an extra job to do in addition to the current portfolio of updating the quota and statfs information from time to time. As a result it has been moved into quota.c allowing one of the functions it calls to be made static. Also the clean up allows the two existing functions to have separate timeouts and also to coexist with its future role of dealing with the "truncate in progress" inode flag. The (pointless) setting of gfs2_quotad_secs is removed since we arrange to only wake up quotad when one of the two timers expires. In addition the struct gfs2_quota_data is moved into a slab cache, mainly for easier debugging. It should also be possible to use a shrinker in the future, rather than the current scheme of scanning the quota data entries from time to time. Signed-off-by: Steven Whitehouse --- fs/gfs2/daemon.c | 53 ------------------------------------ fs/gfs2/incore.h | 4 +-- fs/gfs2/main.c | 10 +++++++ fs/gfs2/ops_fstype.c | 5 +--- fs/gfs2/quota.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++----- fs/gfs2/quota.h | 1 - fs/gfs2/sys.c | 2 -- fs/gfs2/util.c | 1 + fs/gfs2/util.h | 1 + 9 files changed, 84 insertions(+), 69 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/daemon.c b/fs/gfs2/daemon.c index e51991947d2c..5668aa77b95a 100644 --- a/fs/gfs2/daemon.c +++ b/fs/gfs2/daemon.c @@ -23,7 +23,6 @@ #include "daemon.h" #include "glock.h" #include "log.h" -#include "quota.h" #include "recovery.h" #include "super.h" #include "util.h" @@ -82,55 +81,3 @@ int gfs2_recoverd(void *data) return 0; } -/** - * gfs2_quotad - Write cached quota changes into the quota file - * @sdp: Pointer to GFS2 superblock - * - */ - -int gfs2_quotad(void *data) -{ - struct gfs2_sbd *sdp = data; - unsigned long t; - int error; - - while (!kthread_should_stop()) { - /* Update the master statfs file */ - - t = sdp->sd_statfs_sync_time + - gfs2_tune_get(sdp, gt_statfs_quantum) * HZ; - - if (time_after_eq(jiffies, t)) { - error = gfs2_statfs_sync(sdp); - if (error && - error != -EROFS && - !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) - fs_err(sdp, "quotad: (1) error=%d\n", error); - sdp->sd_statfs_sync_time = jiffies; - } - - /* Update quota file */ - - t = sdp->sd_quota_sync_time + - gfs2_tune_get(sdp, gt_quota_quantum) * HZ; - - if (time_after_eq(jiffies, t)) { - error = gfs2_quota_sync(sdp); - if (error && - error != -EROFS && - !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) - fs_err(sdp, "quotad: (2) error=%d\n", error); - sdp->sd_quota_sync_time = jiffies; - } - - gfs2_quota_scan(sdp); - - t = gfs2_tune_get(sdp, gt_quotad_secs) * HZ; - if (freezing(current)) - refrigerator(); - schedule_timeout_interruptible(t); - } - - return 0; -} - diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 9e3b613d0bac..cfebc1793574 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -402,7 +402,6 @@ struct gfs2_tune { unsigned int gt_recoverd_secs; unsigned int gt_logd_secs; - unsigned int gt_quotad_secs; unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */ unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */ @@ -509,7 +508,6 @@ struct gfs2_sbd { spinlock_t sd_statfs_spin; struct gfs2_statfs_change_host sd_statfs_master; struct gfs2_statfs_change_host sd_statfs_local; - unsigned long sd_statfs_sync_time; /* Resource group stuff */ @@ -551,13 +549,13 @@ struct gfs2_sbd { atomic_t sd_quota_count; spinlock_t sd_quota_spin; struct mutex sd_quota_mutex; + wait_queue_head_t sd_quota_wait; unsigned int sd_quota_slots; unsigned int sd_quota_chunks; unsigned char **sd_quota_bitmap; u64 sd_quota_sync_gen; - unsigned long sd_quota_sync_time; /* Log stuff */ diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 3eea03c78534..e3f6f1844a21 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -93,6 +93,12 @@ static int __init init_gfs2_fs(void) if (!gfs2_rgrpd_cachep) goto fail; + gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad", + sizeof(struct gfs2_quota_data), + 0, 0, NULL); + if (!gfs2_quotad_cachep) + goto fail; + error = register_filesystem(&gfs2_fs_type); if (error) goto fail; @@ -112,6 +118,9 @@ fail_unregister: fail: gfs2_glock_exit(); + if (gfs2_quotad_cachep) + kmem_cache_destroy(gfs2_quotad_cachep); + if (gfs2_rgrpd_cachep) kmem_cache_destroy(gfs2_rgrpd_cachep); @@ -140,6 +149,7 @@ static void __exit exit_gfs2_fs(void) unregister_filesystem(&gfs2_fs_type); unregister_filesystem(&gfs2meta_fs_type); + kmem_cache_destroy(gfs2_quotad_cachep); kmem_cache_destroy(gfs2_rgrpd_cachep); kmem_cache_destroy(gfs2_bufdata_cachep); kmem_cache_destroy(gfs2_inode_cachep); diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index dd83e8322350..5d137063b679 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -60,7 +60,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt) gt->gt_log_flush_secs = 60; gt->gt_recoverd_secs = 60; gt->gt_logd_secs = 1; - gt->gt_quotad_secs = 5; gt->gt_quota_simul_sync = 64; gt->gt_quota_warn_period = 10; gt->gt_quota_scale_num = 1; @@ -107,6 +106,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) INIT_LIST_HEAD(&sdp->sd_quota_list); spin_lock_init(&sdp->sd_quota_spin); mutex_init(&sdp->sd_quota_mutex); + init_waitqueue_head(&sdp->sd_quota_wait); spin_lock_init(&sdp->sd_log_lock); @@ -970,9 +970,6 @@ static int init_threads(struct gfs2_sbd *sdp, int undo) } sdp->sd_logd_process = p; - sdp->sd_statfs_sync_time = jiffies; - sdp->sd_quota_sync_time = jiffies; - p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad"); error = IS_ERR(p); if (error) { diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 228a46596188..0cfe44f0b6ab 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -46,6 +46,8 @@ #include #include #include +#include +#include #include "gfs2.h" #include "incore.h" @@ -94,7 +96,7 @@ static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id, struct gfs2_quota_data *qd; int error; - qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_NOFS); + qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS); if (!qd) return -ENOMEM; @@ -119,7 +121,7 @@ static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id, return 0; fail: - kfree(qd); + kmem_cache_free(gfs2_quotad_cachep, qd); return error; } @@ -158,7 +160,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, if (qd || !create) { if (new_qd) { gfs2_lvb_unhold(new_qd->qd_gl); - kfree(new_qd); + kmem_cache_free(gfs2_quotad_cachep, new_qd); } *qdp = qd; return 0; @@ -1195,7 +1197,7 @@ fail: return error; } -void gfs2_quota_scan(struct gfs2_sbd *sdp) +static void gfs2_quota_scan(struct gfs2_sbd *sdp) { struct gfs2_quota_data *qd, *safe; LIST_HEAD(dead); @@ -1222,7 +1224,7 @@ void gfs2_quota_scan(struct gfs2_sbd *sdp) gfs2_assert_warn(sdp, !qd->qd_bh_count); gfs2_lvb_unhold(qd->qd_gl); - kfree(qd); + kmem_cache_free(gfs2_quotad_cachep, qd); } } @@ -1257,7 +1259,7 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp) gfs2_assert_warn(sdp, !qd->qd_bh_count); gfs2_lvb_unhold(qd->qd_gl); - kfree(qd); + kmem_cache_free(gfs2_quotad_cachep, qd); spin_lock(&sdp->sd_quota_spin); } @@ -1272,3 +1274,65 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp) } } +static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error) +{ + if (error == 0 || error == -EROFS) + return; + if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) + fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error); +} + +static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, + int (*fxn)(struct gfs2_sbd *sdp), + unsigned long t, unsigned long *timeo, + unsigned int *new_timeo) +{ + if (t >= *timeo) { + int error = fxn(sdp); + quotad_error(sdp, msg, error); + *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; + } else { + *timeo -= t; + } +} + +/** + * gfs2_quotad - Write cached quota changes into the quota file + * @sdp: Pointer to GFS2 superblock + * + */ + +int gfs2_quotad(void *data) +{ + struct gfs2_sbd *sdp = data; + struct gfs2_tune *tune = &sdp->sd_tune; + unsigned long statfs_timeo = 0; + unsigned long quotad_timeo = 0; + unsigned long t = 0; + DEFINE_WAIT(wait); + + while (!kthread_should_stop()) { + + /* Update the master statfs file */ + quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, + &statfs_timeo, &tune->gt_statfs_quantum); + + /* Update quota file */ + quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, + "ad_timeo, &tune->gt_quota_quantum); + + /* FIXME: This should be turned into a shrinker */ + gfs2_quota_scan(sdp); + + if (freezing(current)) + refrigerator(); + t = min(quotad_timeo, statfs_timeo); + + prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_UNINTERRUPTIBLE); + t -= schedule_timeout(t); + finish_wait(&sdp->sd_quota_wait, &wait); + } + + return 0; +} + diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index 3b7f4b0e5dfe..1d08aeef07e6 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h @@ -29,7 +29,6 @@ int gfs2_quota_sync(struct gfs2_sbd *sdp); int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); int gfs2_quota_init(struct gfs2_sbd *sdp); -void gfs2_quota_scan(struct gfs2_sbd *sdp); void gfs2_quota_cleanup(struct gfs2_sbd *sdp); static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index 7e1879f1a02c..59e36fd80903 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -408,7 +408,6 @@ TUNE_ATTR(stall_secs, 1); TUNE_ATTR(statfs_quantum, 1); TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process); TUNE_ATTR_DAEMON(logd_secs, logd_process); -TUNE_ATTR_DAEMON(quotad_secs, quotad_process); TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); static struct attribute *tune_attrs[] = { @@ -426,7 +425,6 @@ static struct attribute *tune_attrs[] = { &tune_attr_statfs_quantum.attr, &tune_attr_recoverd_secs.attr, &tune_attr_logd_secs.attr, - &tune_attr_quotad_secs.attr, &tune_attr_quota_scale.attr, &tune_attr_new_files_jdata.attr, NULL, diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c index d31e355c61fb..374f50e95496 100644 --- a/fs/gfs2/util.c +++ b/fs/gfs2/util.c @@ -25,6 +25,7 @@ struct kmem_cache *gfs2_glock_cachep __read_mostly; struct kmem_cache *gfs2_inode_cachep __read_mostly; struct kmem_cache *gfs2_bufdata_cachep __read_mostly; struct kmem_cache *gfs2_rgrpd_cachep __read_mostly; +struct kmem_cache *gfs2_quotad_cachep __read_mostly; void gfs2_assert_i(struct gfs2_sbd *sdp) { diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h index 7f48576289c9..33e96b0ce9ab 100644 --- a/fs/gfs2/util.h +++ b/fs/gfs2/util.h @@ -148,6 +148,7 @@ extern struct kmem_cache *gfs2_glock_cachep; extern struct kmem_cache *gfs2_inode_cachep; extern struct kmem_cache *gfs2_bufdata_cachep; extern struct kmem_cache *gfs2_rgrpd_cachep; +extern struct kmem_cache *gfs2_quotad_cachep; static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt, unsigned int *p) -- cgit From 813e0c46c9e2a0c6f0b6e774faac82afd7a2e812 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Tue, 18 Nov 2008 13:38:48 +0000 Subject: GFS2: Fix "truncate in progress" hang Following on from the recent clean up of gfs2_quotad, this patch moves the processing of "truncate in progress" inodes from the glock workqueue into gfs2_quotad. This fixes a hang due to the "truncate in progress" processing requiring glocks in order to complete. It might seem odd to use gfs2_quotad for this particular item, but we have to use a pre-existing thread since creating a thread implies a GFP_KERNEL memory allocation which is not allowed from the glock workqueue context. Of the existing threads, gfs2_logd and gfs2_recoverd may deadlock if used for this operation. gfs2_scand and gfs2_glockd are both scheduled for removal at some (hopefully not too distant) future point. That leaves only gfs2_quotad whose workload is generally fairly light and is easily adapted for this extra task. Also, as a result of this change, it opens the way for a future patch to make the reading of the inode's information asynchronous with respect to the glock workqueue, which is another improvement that has been on the list for some time now. Signed-off-by: Steven Whitehouse --- fs/gfs2/glock.c | 33 +++++++++++++++++++++++++++++---- fs/gfs2/glock.h | 1 + fs/gfs2/glops.c | 11 +++++++++-- fs/gfs2/incore.h | 3 +++ fs/gfs2/main.c | 1 + fs/gfs2/ops_fstype.c | 2 ++ fs/gfs2/quota.c | 31 ++++++++++++++++++++++++++++++- 7 files changed, 75 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 27cb9cca9c08..4ddf3bd55dda 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -40,6 +40,7 @@ #include "quota.h" #include "super.h" #include "util.h" +#include "bmap.h" struct gfs2_gl_hash_bucket { struct hlist_head hb_list; @@ -289,7 +290,8 @@ static void gfs2_holder_wake(struct gfs2_holder *gh) * do_promote - promote as many requests as possible on the current queue * @gl: The glock * - * Returns: true if there is a blocked holder at the head of the list + * Returns: 1 if there is a blocked holder at the head of the list, or 2 + * if a type specific operation is underway. */ static int do_promote(struct gfs2_glock *gl) @@ -312,6 +314,8 @@ restart: ret = glops->go_lock(gh); spin_lock(&gl->gl_spin); if (ret) { + if (ret == 1) + return 2; gh->gh_error = ret; list_del_init(&gh->gh_list); gfs2_holder_wake(gh); @@ -416,6 +420,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_holder *gh; unsigned state = ret & LM_OUT_ST_MASK; + int rv; spin_lock(&gl->gl_spin); state_change(gl, state); @@ -470,7 +475,6 @@ retry: gfs2_demote_wake(gl); if (state != LM_ST_UNLOCKED) { if (glops->go_xmote_bh) { - int rv; spin_unlock(&gl->gl_spin); rv = glops->go_xmote_bh(gl, gh); if (rv == -EAGAIN) @@ -481,10 +485,13 @@ retry: goto out; } } - do_promote(gl); + rv = do_promote(gl); + if (rv == 2) + goto out_locked; } out: clear_bit(GLF_LOCK, &gl->gl_flags); +out_locked: spin_unlock(&gl->gl_spin); gfs2_glock_put(gl); } @@ -584,6 +591,7 @@ __releases(&gl->gl_spin) __acquires(&gl->gl_spin) { struct gfs2_holder *gh = NULL; + int ret; if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) return; @@ -602,8 +610,11 @@ __acquires(&gl->gl_spin) } else { if (test_bit(GLF_DEMOTE, &gl->gl_flags)) gfs2_demote_wake(gl); - if (do_promote(gl) == 0) + ret = do_promote(gl); + if (ret == 0) goto out; + if (ret == 2) + return; gh = find_first_waiter(gl); gl->gl_target = gh->gh_state; if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) @@ -1556,6 +1567,20 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) } } +void gfs2_glock_finish_truncate(struct gfs2_inode *ip) +{ + struct gfs2_glock *gl = ip->i_gl; + int ret; + + ret = gfs2_truncatei_resume(ip); + gfs2_assert_withdraw(gl->gl_sbd, ret == 0); + + spin_lock(&gl->gl_spin); + clear_bit(GLF_LOCK, &gl->gl_flags); + run_queue(gl, 1); + spin_unlock(&gl->gl_spin); +} + static const char *state2str(unsigned state) { switch(state) { diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 695c6b193611..13a64ee6523b 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -132,6 +132,7 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data); void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl); void gfs2_reclaim_glock(struct gfs2_sbd *sdp); void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); +void gfs2_glock_finish_truncate(struct gfs2_inode *ip); int __init gfs2_glock_init(void); void gfs2_glock_exit(void); diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 68ee66552d19..8ebff8ebae20 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -227,6 +227,7 @@ static int inode_go_demote_ok(struct gfs2_glock *gl) static int inode_go_lock(struct gfs2_holder *gh) { struct gfs2_glock *gl = gh->gh_gl; + struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_inode *ip = gl->gl_object; int error = 0; @@ -241,8 +242,14 @@ static int inode_go_lock(struct gfs2_holder *gh) if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && (gl->gl_state == LM_ST_EXCLUSIVE) && - (gh->gh_state == LM_ST_EXCLUSIVE)) - error = gfs2_truncatei_resume(ip); + (gh->gh_state == LM_ST_EXCLUSIVE)) { + spin_lock(&sdp->sd_trunc_lock); + if (list_empty(&ip->i_trunc_list)) + list_add(&sdp->sd_trunc_list, &ip->i_trunc_list); + spin_unlock(&sdp->sd_trunc_lock); + wake_up(&sdp->sd_quota_wait); + return 1; + } return error; } diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index cfebc1793574..dd7d0f8f3575 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -244,6 +244,7 @@ struct gfs2_inode { struct gfs2_alloc *i_alloc; u64 i_goal; /* goal block for allocations */ struct rw_semaphore i_rw_mutex; + struct list_head i_trunc_list; u32 i_entries; u32 i_diskflags; u8 i_height; @@ -550,6 +551,8 @@ struct gfs2_sbd { spinlock_t sd_quota_spin; struct mutex sd_quota_mutex; wait_queue_head_t sd_quota_wait; + struct list_head sd_trunc_list; + spinlock_t sd_trunc_lock; unsigned int sd_quota_slots; unsigned int sd_quota_chunks; diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index e3f6f1844a21..cf39295ccb90 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -30,6 +30,7 @@ static void gfs2_init_inode_once(void *foo) inode_init_once(&ip->i_inode); init_rwsem(&ip->i_rw_mutex); + INIT_LIST_HEAD(&ip->i_trunc_list); ip->i_alloc = NULL; } diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 5d137063b679..a9a83804eea7 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -107,6 +107,8 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) spin_lock_init(&sdp->sd_quota_spin); mutex_init(&sdp->sd_quota_mutex); init_waitqueue_head(&sdp->sd_quota_wait); + INIT_LIST_HEAD(&sdp->sd_trunc_list); + spin_lock_init(&sdp->sd_trunc_lock); spin_lock_init(&sdp->sd_log_lock); diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 0cfe44f0b6ab..b08d09696b3e 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1296,6 +1296,25 @@ static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, } } +static void quotad_check_trunc_list(struct gfs2_sbd *sdp) +{ + struct gfs2_inode *ip; + + while(1) { + ip = NULL; + spin_lock(&sdp->sd_trunc_lock); + if (!list_empty(&sdp->sd_trunc_list)) { + ip = list_entry(sdp->sd_trunc_list.next, + struct gfs2_inode, i_trunc_list); + list_del_init(&ip->i_trunc_list); + } + spin_unlock(&sdp->sd_trunc_lock); + if (ip == NULL) + return; + gfs2_glock_finish_truncate(ip); + } +} + /** * gfs2_quotad - Write cached quota changes into the quota file * @sdp: Pointer to GFS2 superblock @@ -1310,6 +1329,7 @@ int gfs2_quotad(void *data) unsigned long quotad_timeo = 0; unsigned long t = 0; DEFINE_WAIT(wait); + int empty; while (!kthread_should_stop()) { @@ -1324,12 +1344,21 @@ int gfs2_quotad(void *data) /* FIXME: This should be turned into a shrinker */ gfs2_quota_scan(sdp); + /* Check for & recover partially truncated inodes */ + quotad_check_trunc_list(sdp); + if (freezing(current)) refrigerator(); t = min(quotad_timeo, statfs_timeo); prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_UNINTERRUPTIBLE); - t -= schedule_timeout(t); + spin_lock(&sdp->sd_trunc_lock); + empty = list_empty(&sdp->sd_trunc_list); + spin_unlock(&sdp->sd_trunc_lock); + if (empty) + t -= schedule_timeout(t); + else + t = 0; finish_wait(&sdp->sd_quota_wait, &wait); } -- cgit From 9ac1b4d9b6f885ccd7d8f56bceb609003a920ff7 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 19 Nov 2008 10:08:22 +0000 Subject: GFS2: Move gfs2_recoverd into recovery.c By moving gfs2_recoverd, we can make an additional function static and it also leaves only (the already scheduled for removal) gfs2_glockd in daemon.c. At the same time the declaration of gfs2_quotad is moved to quota.h to reflect the new location of gfs2_quotad in a previous patch. Also the recovery.h and quota.h headers are cleaned up. Signed-off-by: Steven Whitehouse --- fs/gfs2/daemon.c | 22 ---------------------- fs/gfs2/daemon.h | 2 -- fs/gfs2/ops_fstype.c | 1 + fs/gfs2/quota.h | 23 ++++++++++++----------- fs/gfs2/recovery.c | 26 +++++++++++++++++++++++++- fs/gfs2/recovery.h | 14 +++++++------- 6 files changed, 45 insertions(+), 43 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/daemon.c b/fs/gfs2/daemon.c index 5668aa77b95a..2662df0d5b93 100644 --- a/fs/gfs2/daemon.c +++ b/fs/gfs2/daemon.c @@ -59,25 +59,3 @@ int gfs2_glockd(void *data) return 0; } -/** - * gfs2_recoverd - Recover dead machine's journals - * @sdp: Pointer to GFS2 superblock - * - */ - -int gfs2_recoverd(void *data) -{ - struct gfs2_sbd *sdp = data; - unsigned long t; - - while (!kthread_should_stop()) { - gfs2_check_journals(sdp); - t = gfs2_tune_get(sdp, gt_recoverd_secs) * HZ; - if (freezing(current)) - refrigerator(); - schedule_timeout_interruptible(t); - } - - return 0; -} - diff --git a/fs/gfs2/daemon.h b/fs/gfs2/daemon.h index 4be084fb6a62..5258954a234f 100644 --- a/fs/gfs2/daemon.h +++ b/fs/gfs2/daemon.h @@ -11,7 +11,5 @@ #define __DAEMON_DOT_H__ int gfs2_glockd(void *data); -int gfs2_recoverd(void *data); -int gfs2_quotad(void *data); #endif /* __DAEMON_DOT_H__ */ diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index a9a83804eea7..d159e7e72722 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -33,6 +33,7 @@ #include "sys.h" #include "util.h" #include "log.h" +#include "quota.h" #define DO 0 #define UNDO 1 diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index 1d08aeef07e6..cec9032be97d 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h @@ -15,21 +15,22 @@ struct gfs2_sbd; #define NO_QUOTA_CHANGE ((u32)-1) -int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid); -void gfs2_quota_unhold(struct gfs2_inode *ip); +extern int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid); +extern void gfs2_quota_unhold(struct gfs2_inode *ip); -int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid); -void gfs2_quota_unlock(struct gfs2_inode *ip); +extern int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid); +extern void gfs2_quota_unlock(struct gfs2_inode *ip); -int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid); -void gfs2_quota_change(struct gfs2_inode *ip, s64 change, - u32 uid, u32 gid); +extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid); +extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, + u32 uid, u32 gid); -int gfs2_quota_sync(struct gfs2_sbd *sdp); -int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); +extern int gfs2_quota_sync(struct gfs2_sbd *sdp); +extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); -int gfs2_quota_init(struct gfs2_sbd *sdp); -void gfs2_quota_cleanup(struct gfs2_sbd *sdp); +extern int gfs2_quota_init(struct gfs2_sbd *sdp); +extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp); +extern int gfs2_quotad(void *data); static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) { diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index d5e91f4f6a0b..b56ba3db7771 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include "gfs2.h" #include "incore.h" @@ -589,7 +591,7 @@ fail: * */ -void gfs2_check_journals(struct gfs2_sbd *sdp) +static void gfs2_check_journals(struct gfs2_sbd *sdp) { struct gfs2_jdesc *jd; @@ -603,3 +605,25 @@ void gfs2_check_journals(struct gfs2_sbd *sdp) } } +/** + * gfs2_recoverd - Recover dead machine's journals + * @sdp: Pointer to GFS2 superblock + * + */ + +int gfs2_recoverd(void *data) +{ + struct gfs2_sbd *sdp = data; + unsigned long t; + + while (!kthread_should_stop()) { + gfs2_check_journals(sdp); + t = gfs2_tune_get(sdp, gt_recoverd_secs) * HZ; + if (freezing(current)) + refrigerator(); + schedule_timeout_interruptible(t); + } + + return 0; +} + diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h index f7235e61c723..a8218ea15b57 100644 --- a/fs/gfs2/recovery.h +++ b/fs/gfs2/recovery.h @@ -18,17 +18,17 @@ static inline void gfs2_replay_incr_blk(struct gfs2_sbd *sdp, unsigned int *blk) *blk = 0; } -int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk, +extern int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk, struct buffer_head **bh); -int gfs2_revoke_add(struct gfs2_sbd *sdp, u64 blkno, unsigned int where); -int gfs2_revoke_check(struct gfs2_sbd *sdp, u64 blkno, unsigned int where); -void gfs2_revoke_clean(struct gfs2_sbd *sdp); +extern int gfs2_revoke_add(struct gfs2_sbd *sdp, u64 blkno, unsigned int where); +extern int gfs2_revoke_check(struct gfs2_sbd *sdp, u64 blkno, unsigned int where); +extern void gfs2_revoke_clean(struct gfs2_sbd *sdp); -int gfs2_find_jhead(struct gfs2_jdesc *jd, +extern int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head); -int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd); -void gfs2_check_journals(struct gfs2_sbd *sdp); +extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd); +extern int gfs2_recoverd(void *data); #endif /* __RECOVERY_DOT_H__ */ -- cgit From 97cc1025b1a91c52e84f12478dcf0f853abc6564 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Thu, 20 Nov 2008 13:39:47 +0000 Subject: GFS2: Kill two daemons with one patch This patch removes the two daemons, gfs2_scand and gfs2_glockd and replaces them with a shrinker which is called from the VM. The net result is that GFS2 responds better when there is memory pressure, since it shrinks the glock cache at the same rate as the VFS shrinks the dcache and icache. There are no longer any time based criteria for shrinking glocks, they are kept until such time as the VM asks for more memory and then we demote just as many glocks as required. There are potential future changes to this code, including the possibility of sorting the glocks which are to be written back into inode number order, to get a better I/O ordering. It would be very useful to have an elevator based workqueue implementation for this, as that would automatically deal with the read I/O cases at the same time. This patch is my answer to Andrew Morton's remark, made during the initial review of GFS2, asking why GFS2 needs so many kernel threads, the answer being that it doesn't :-) This patch is a net loss of about 200 lines of code. Signed-off-by: Steven Whitehouse --- fs/gfs2/Makefile | 2 +- fs/gfs2/daemon.c | 61 ------------- fs/gfs2/daemon.h | 15 ---- fs/gfs2/glock.c | 248 ++++++++++++++++++++++----------------------------- fs/gfs2/glock.h | 1 - fs/gfs2/glops.c | 32 ++++--- fs/gfs2/incore.h | 16 +--- fs/gfs2/inode.c | 1 - fs/gfs2/main.c | 2 +- fs/gfs2/mount.c | 21 +---- fs/gfs2/ops_fstype.c | 25 ------ fs/gfs2/ops_super.c | 5 -- fs/gfs2/sys.c | 42 +-------- 13 files changed, 130 insertions(+), 341 deletions(-) delete mode 100644 fs/gfs2/daemon.c delete mode 100644 fs/gfs2/daemon.h (limited to 'fs') diff --git a/fs/gfs2/Makefile b/fs/gfs2/Makefile index ec65851ec80a..c1b4ec6a9650 100644 --- a/fs/gfs2/Makefile +++ b/fs/gfs2/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_GFS2_FS) += gfs2.o -gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \ +gfs2-y := acl.o bmap.o dir.o eaops.o eattr.o glock.o \ glops.o inode.o log.o lops.o locking.o main.o meta_io.o \ mount.o ops_address.o ops_dentry.o ops_export.o ops_file.o \ ops_fstype.o ops_inode.o ops_super.o quota.o \ diff --git a/fs/gfs2/daemon.c b/fs/gfs2/daemon.c deleted file mode 100644 index 2662df0d5b93..000000000000 --- a/fs/gfs2/daemon.c +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. - * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. - * - * This copyrighted material is made available to anyone wishing to use, - * modify, copy, or redistribute it subject to the terms and conditions - * of the GNU General Public License version 2. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "gfs2.h" -#include "incore.h" -#include "daemon.h" -#include "glock.h" -#include "log.h" -#include "recovery.h" -#include "super.h" -#include "util.h" - -/* This uses schedule_timeout() instead of msleep() because it's good for - the daemons to wake up more often than the timeout when unmounting so - the user's unmount doesn't sit there forever. - - The kthread functions used to start these daemons block and flush signals. */ - -/** - * gfs2_glockd - Reclaim unused glock structures - * @sdp: Pointer to GFS2 superblock - * - * One or more of these daemons run, reclaiming glocks on sd_reclaim_list. - * Number of daemons can be set by user, with num_glockd mount option. - */ - -int gfs2_glockd(void *data) -{ - struct gfs2_sbd *sdp = data; - - while (!kthread_should_stop()) { - while (atomic_read(&sdp->sd_reclaim_count)) - gfs2_reclaim_glock(sdp); - - wait_event_interruptible(sdp->sd_reclaim_wq, - (atomic_read(&sdp->sd_reclaim_count) || - kthread_should_stop())); - if (freezing(current)) - refrigerator(); - } - - return 0; -} - diff --git a/fs/gfs2/daemon.h b/fs/gfs2/daemon.h deleted file mode 100644 index 5258954a234f..000000000000 --- a/fs/gfs2/daemon.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. - * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. - * - * This copyrighted material is made available to anyone wishing to use, - * modify, copy, or redistribute it subject to the terms and conditions - * of the GNU General Public License version 2. - */ - -#ifndef __DAEMON_DOT_H__ -#define __DAEMON_DOT_H__ - -int gfs2_glockd(void *data); - -#endif /* __DAEMON_DOT_H__ */ diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 4ddf3bd55dda..07ffc8123d74 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -62,9 +62,10 @@ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int static DECLARE_RWSEM(gfs2_umount_flush_sem); static struct dentry *gfs2_root; -static struct task_struct *scand_process; -static unsigned int scand_secs = 5; static struct workqueue_struct *glock_workqueue; +static LIST_HEAD(lru_list); +static atomic_t lru_count = ATOMIC_INIT(0); +static spinlock_t lru_lock = SPIN_LOCK_UNLOCKED; #define GFS2_GL_HASH_SHIFT 15 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) @@ -174,6 +175,22 @@ static void gfs2_glock_hold(struct gfs2_glock *gl) atomic_inc(&gl->gl_ref); } +/** + * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list + * @gl: the glock + * + */ + +static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) +{ + spin_lock(&lru_lock); + if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) { + list_add_tail(&gl->gl_lru, &lru_list); + atomic_inc(&lru_count); + } + spin_unlock(&lru_lock); +} + /** * gfs2_glock_put() - Decrement reference count on glock * @gl: The glock to put @@ -188,14 +205,23 @@ int gfs2_glock_put(struct gfs2_glock *gl) if (atomic_dec_and_test(&gl->gl_ref)) { hlist_del(&gl->gl_list); write_unlock(gl_lock_addr(gl->gl_hash)); + spin_lock(&lru_lock); + if (!list_empty(&gl->gl_lru)) { + list_del_init(&gl->gl_lru); + atomic_dec(&lru_count); + } + spin_unlock(&lru_lock); GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED); - GLOCK_BUG_ON(gl, !list_empty(&gl->gl_reclaim)); + GLOCK_BUG_ON(gl, !list_empty(&gl->gl_lru)); GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); glock_free(gl); rv = 1; goto out; } write_unlock(gl_lock_addr(gl->gl_hash)); + /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */ + if (atomic_read(&gl->gl_ref) == 2) + gfs2_glock_schedule_for_reclaim(gl); out: return rv; } @@ -837,7 +863,7 @@ static void wait_on_demote(struct gfs2_glock *gl) */ static void handle_callback(struct gfs2_glock *gl, unsigned int state, - int remote, unsigned long delay) + unsigned long delay) { int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; @@ -845,9 +871,6 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state, if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { gl->gl_demote_state = state; gl->gl_demote_time = jiffies; - if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN && - gl->gl_object) - gfs2_glock_schedule_for_reclaim(gl); } else if (gl->gl_demote_state != LM_ST_UNLOCKED && gl->gl_demote_state != state) { gl->gl_demote_state = LM_ST_UNLOCKED; @@ -1017,7 +1040,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh) spin_lock(&gl->gl_spin); if (gh->gh_flags & GL_NOCACHE) - handle_callback(gl, LM_ST_UNLOCKED, 0, 0); + handle_callback(gl, LM_ST_UNLOCKED, 0); list_del_init(&gh->gh_list); if (find_first_holder(gl) == NULL) { @@ -1288,7 +1311,7 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, delay = gl->gl_ops->go_min_hold_time; spin_lock(&gl->gl_spin); - handle_callback(gl, state, 1, delay); + handle_callback(gl, state, delay); spin_unlock(&gl->gl_spin); if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) gfs2_glock_put(gl); @@ -1357,80 +1380,83 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) * Returns: 1 if it's ok */ -static int demote_ok(struct gfs2_glock *gl) +static int demote_ok(const struct gfs2_glock *gl) { const struct gfs2_glock_operations *glops = gl->gl_ops; - int demote = 1; - - if (test_bit(GLF_STICKY, &gl->gl_flags)) - demote = 0; - else if (glops->go_demote_ok) - demote = glops->go_demote_ok(gl); - - return demote; -} -/** - * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list - * @gl: the glock - * - */ - -void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) -{ - struct gfs2_sbd *sdp = gl->gl_sbd; - - spin_lock(&sdp->sd_reclaim_lock); - if (list_empty(&gl->gl_reclaim)) { - gfs2_glock_hold(gl); - list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list); - atomic_inc(&sdp->sd_reclaim_count); - spin_unlock(&sdp->sd_reclaim_lock); - wake_up(&sdp->sd_reclaim_wq); - } else - spin_unlock(&sdp->sd_reclaim_lock); + if (gl->gl_state == LM_ST_UNLOCKED) + return 0; + if (!list_empty(&gl->gl_holders)) + return 0; + if (glops->go_demote_ok) + return glops->go_demote_ok(gl); + return 1; } -/** - * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list - * @sdp: the filesystem - * - * Called from gfs2_glockd() glock reclaim daemon, or when promoting a - * different glock and we notice that there are a lot of glocks in the - * reclaim list. - * - */ -void gfs2_reclaim_glock(struct gfs2_sbd *sdp) +static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) { struct gfs2_glock *gl; - int done_callback = 0; + int may_demote; + int nr_skipped = 0; + int got_ref = 0; + LIST_HEAD(skipped); - spin_lock(&sdp->sd_reclaim_lock); - if (list_empty(&sdp->sd_reclaim_list)) { - spin_unlock(&sdp->sd_reclaim_lock); - return; - } - gl = list_entry(sdp->sd_reclaim_list.next, - struct gfs2_glock, gl_reclaim); - list_del_init(&gl->gl_reclaim); - spin_unlock(&sdp->sd_reclaim_lock); + if (nr == 0) + goto out; - atomic_dec(&sdp->sd_reclaim_count); - atomic_inc(&sdp->sd_reclaimed); + if (!(gfp_mask & __GFP_FS)) + return -1; - spin_lock(&gl->gl_spin); - if (find_first_holder(gl) == NULL && - gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) { - handle_callback(gl, LM_ST_UNLOCKED, 0, 0); - done_callback = 1; + spin_lock(&lru_lock); + while(nr && !list_empty(&lru_list)) { + gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); + list_del_init(&gl->gl_lru); + atomic_dec(&lru_count); + + /* Test for being demotable */ + if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { + gfs2_glock_hold(gl); + got_ref = 1; + spin_unlock(&lru_lock); + spin_lock(&gl->gl_spin); + may_demote = demote_ok(gl); + spin_unlock(&gl->gl_spin); + clear_bit(GLF_LOCK, &gl->gl_flags); + if (may_demote) { + handle_callback(gl, LM_ST_UNLOCKED, 0); + nr--; + if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) + gfs2_glock_put(gl); + } + spin_lock(&lru_lock); + if (may_demote) + continue; + } + if (list_empty(&gl->gl_lru) && + (atomic_read(&gl->gl_ref) <= (2 + got_ref))) { + nr_skipped++; + list_add(&gl->gl_lru, &skipped); + } + if (got_ref) { + spin_unlock(&lru_lock); + gfs2_glock_put(gl); + spin_lock(&lru_lock); + got_ref = 0; + } } - spin_unlock(&gl->gl_spin); - if (!done_callback || - queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) - gfs2_glock_put(gl); + list_splice(&skipped, &lru_list); + atomic_add(nr_skipped, &lru_count); + spin_unlock(&lru_lock); +out: + return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure; } +static struct shrinker glock_shrinker = { + .shrink = gfs2_shrink_glock_memory, + .seeks = DEFAULT_SEEKS, +}; + /** * examine_bucket - Call a function for glock in a hash bucket * @examiner: the function @@ -1475,26 +1501,6 @@ out: return has_entries; } -/** - * scan_glock - look at a glock and see if we can reclaim it - * @gl: the glock to look at - * - */ - -static void scan_glock(struct gfs2_glock *gl) -{ - if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) - return; - if (test_bit(GLF_LOCK, &gl->gl_flags)) - return; - - spin_lock(&gl->gl_spin); - if (find_first_holder(gl) == NULL && - gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) - gfs2_glock_schedule_for_reclaim(gl); - spin_unlock(&gl->gl_spin); -} - /** * clear_glock - look at a glock and see if we can free it from glock cache * @gl: the glock to look at @@ -1503,23 +1509,16 @@ static void scan_glock(struct gfs2_glock *gl) static void clear_glock(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_sbd; - int released; - - spin_lock(&sdp->sd_reclaim_lock); - if (!list_empty(&gl->gl_reclaim)) { - list_del_init(&gl->gl_reclaim); - atomic_dec(&sdp->sd_reclaim_count); - spin_unlock(&sdp->sd_reclaim_lock); - released = gfs2_glock_put(gl); - gfs2_assert(sdp, !released); - } else { - spin_unlock(&sdp->sd_reclaim_lock); + spin_lock(&lru_lock); + if (!list_empty(&gl->gl_lru)) { + list_del_init(&gl->gl_lru); + atomic_dec(&lru_count); } + spin_unlock(&lru_lock); spin_lock(&gl->gl_spin); if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED) - handle_callback(gl, LM_ST_UNLOCKED, 0, 0); + handle_callback(gl, LM_ST_UNLOCKED, 0); spin_unlock(&gl->gl_spin); gfs2_glock_hold(gl); if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) @@ -1656,8 +1655,6 @@ static const char *gflags2str(char *buf, const unsigned long *gflags) char *p = buf; if (test_bit(GLF_LOCK, gflags)) *p++ = 'l'; - if (test_bit(GLF_STICKY, gflags)) - *p++ = 's'; if (test_bit(GLF_DEMOTE, gflags)) *p++ = 'D'; if (test_bit(GLF_PENDING_DEMOTE, gflags)) @@ -1776,34 +1773,6 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) return error; } -/** - * gfs2_scand - Look for cached glocks and inodes to toss from memory - * @sdp: Pointer to GFS2 superblock - * - * One of these daemons runs, finding candidates to add to sd_reclaim_list. - * See gfs2_glockd() - */ - -static int gfs2_scand(void *data) -{ - unsigned x; - unsigned delay; - - while (!kthread_should_stop()) { - for (x = 0; x < GFS2_GL_HASH_SIZE; x++) - examine_bucket(scan_glock, NULL, x); - if (freezing(current)) - refrigerator(); - delay = scand_secs; - if (delay < 1) - delay = 1; - schedule_timeout_interruptible(delay * HZ); - } - - return 0; -} - - int __init gfs2_glock_init(void) { @@ -1817,28 +1786,21 @@ int __init gfs2_glock_init(void) } #endif - scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand"); - if (IS_ERR(scand_process)) - return PTR_ERR(scand_process); - glock_workqueue = create_workqueue("glock_workqueue"); - if (IS_ERR(glock_workqueue)) { - kthread_stop(scand_process); + if (IS_ERR(glock_workqueue)) return PTR_ERR(glock_workqueue); - } + + register_shrinker(&glock_shrinker); return 0; } void gfs2_glock_exit(void) { + unregister_shrinker(&glock_shrinker); destroy_workqueue(glock_workqueue); - kthread_stop(scand_process); } -module_param(scand_secs, uint, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs"); - static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) { struct gfs2_glock *gl; diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 13a64ee6523b..543ec7ecfbda 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -129,7 +129,6 @@ int gfs2_lvb_hold(struct gfs2_glock *gl); void gfs2_lvb_unhold(struct gfs2_glock *gl); void gfs2_glock_cb(void *cb_data, unsigned int type, void *data); -void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl); void gfs2_reclaim_glock(struct gfs2_sbd *sdp); void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); void gfs2_glock_finish_truncate(struct gfs2_inode *ip); diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 8ebff8ebae20..8522d3aa64fc 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -201,19 +201,12 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) * Returns: 1 if it's ok */ -static int inode_go_demote_ok(struct gfs2_glock *gl) +static int inode_go_demote_ok(const struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_sbd; - int demote = 0; - - if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages) - demote = 1; - else if (!sdp->sd_args.ar_localcaching && - time_after_eq(jiffies, gl->gl_stamp + - gfs2_tune_get(sdp, gt_demote_secs) * HZ)) - demote = 1; - - return demote; + if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) + return 0; + return 1; } /** @@ -284,7 +277,7 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) * Returns: 1 if it's ok */ -static int rgrp_go_demote_ok(struct gfs2_glock *gl) +static int rgrp_go_demote_ok(const struct gfs2_glock *gl) { return !gl->gl_aspace->i_mapping->nrpages; } @@ -385,6 +378,18 @@ static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) return 0; } +/** + * trans_go_demote_ok + * @gl: the glock + * + * Always returns 0 + */ + +static int trans_go_demote_ok(const struct gfs2_glock *gl) +{ + return 0; +} + /** * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock * @gl: the glock @@ -392,7 +397,7 @@ static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) * Returns: 1 if it's ok */ -static int quota_go_demote_ok(struct gfs2_glock *gl) +static int quota_go_demote_ok(const struct gfs2_glock *gl) { return !atomic_read(&gl->gl_lvb_count); } @@ -426,6 +431,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = { const struct gfs2_glock_operations gfs2_trans_glops = { .go_xmote_th = trans_go_sync, .go_xmote_bh = trans_go_xmote_bh, + .go_demote_ok = trans_go_demote_ok, .go_type = LM_TYPE_NONDISK, }; diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index dd7d0f8f3575..608849d00021 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -125,7 +125,7 @@ struct gfs2_glock_operations { void (*go_xmote_th) (struct gfs2_glock *gl); int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh); void (*go_inval) (struct gfs2_glock *gl, int flags); - int (*go_demote_ok) (struct gfs2_glock *gl); + int (*go_demote_ok) (const struct gfs2_glock *gl); int (*go_lock) (struct gfs2_holder *gh); void (*go_unlock) (struct gfs2_holder *gh); int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl); @@ -155,7 +155,6 @@ struct gfs2_holder { enum { GLF_LOCK = 1, - GLF_STICKY = 2, GLF_DEMOTE = 3, GLF_PENDING_DEMOTE = 4, GLF_DEMOTE_IN_PROGRESS = 5, @@ -190,7 +189,7 @@ struct gfs2_glock { unsigned long gl_tchange; void *gl_object; - struct list_head gl_reclaim; + struct list_head gl_lru; struct gfs2_sbd *gl_sbd; @@ -397,7 +396,6 @@ struct gfs2_args { struct gfs2_tune { spinlock_t gt_spin; - unsigned int gt_demote_secs; /* Cache retention for unheld glock */ unsigned int gt_incore_log_blocks; unsigned int gt_log_flush_secs; @@ -478,10 +476,6 @@ struct gfs2_sbd { /* Lock Stuff */ struct lm_lockstruct sd_lockstruct; - struct list_head sd_reclaim_list; - spinlock_t sd_reclaim_lock; - wait_queue_head_t sd_reclaim_wq; - atomic_t sd_reclaim_count; struct gfs2_holder sd_live_gh; struct gfs2_glock *sd_rename_gl; struct gfs2_glock *sd_trans_gl; @@ -541,8 +535,6 @@ struct gfs2_sbd { struct task_struct *sd_recoverd_process; struct task_struct *sd_logd_process; struct task_struct *sd_quotad_process; - struct task_struct *sd_glockd_process[GFS2_GLOCKD_MAX]; - unsigned int sd_glockd_num; /* Quota stuff */ @@ -615,10 +607,6 @@ struct gfs2_sbd { struct mutex sd_freeze_lock; unsigned int sd_freeze_count; - /* Counters */ - - atomic_t sd_reclaimed; - char sd_fsname[GFS2_FSNAME_LEN]; char sd_table_name[GFS2_FSNAME_LEN]; char sd_proto_name[GFS2_FSNAME_LEN]; diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 97d3ce65e26f..3b87c188da41 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -386,7 +386,6 @@ int gfs2_dinode_dealloc(struct gfs2_inode *ip) gfs2_free_di(rgd, ip); gfs2_trans_end(sdp); - clear_bit(GLF_STICKY, &ip->i_gl->gl_flags); out_rg_gunlock: gfs2_glock_dq_uninit(&al->al_rgd_gh); diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index cf39295ccb90..7cacfde32194 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -43,7 +43,7 @@ static void gfs2_init_glock_once(void *foo) INIT_LIST_HEAD(&gl->gl_holders); gl->gl_lvb = NULL; atomic_set(&gl->gl_lvb_count, 0); - INIT_LIST_HEAD(&gl->gl_reclaim); + INIT_LIST_HEAD(&gl->gl_lru); INIT_LIST_HEAD(&gl->gl_ail_list); atomic_set(&gl->gl_ail_count, 0); } diff --git a/fs/gfs2/mount.c b/fs/gfs2/mount.c index f96eb90a2cfa..8c0f16e301f6 100644 --- a/fs/gfs2/mount.c +++ b/fs/gfs2/mount.c @@ -32,7 +32,6 @@ enum { Opt_debug, Opt_nodebug, Opt_upgrade, - Opt_num_glockd, Opt_acl, Opt_noacl, Opt_quota_off, @@ -57,7 +56,6 @@ static const match_table_t tokens = { {Opt_debug, "debug"}, {Opt_nodebug, "nodebug"}, {Opt_upgrade, "upgrade"}, - {Opt_num_glockd, "num_glockd=%d"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_quota_off, "quota=off"}, @@ -96,7 +94,6 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount) spin_unlock(&gfs2_sys_margs_lock); /* Set some defaults */ - args->ar_num_glockd = GFS2_GLOCKD_DEFAULT; args->ar_quota = GFS2_QUOTA_DEFAULT; args->ar_data = GFS2_DATA_DEFAULT; } @@ -105,7 +102,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount) process them */ for (options = data; (o = strsep(&options, ",")); ) { - int token, option; + int token; substring_t tmp[MAX_OPT_ARGS]; if (!*o) @@ -196,22 +193,6 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount) goto cant_remount; args->ar_upgrade = 1; break; - case Opt_num_glockd: - if ((error = match_int(&tmp[0], &option))) { - fs_info(sdp, "problem getting num_glockd\n"); - goto out_error; - } - - if (remount && option != args->ar_num_glockd) - goto cant_remount; - if (!option || option > GFS2_GLOCKD_MAX) { - fs_info(sdp, "0 < num_glockd <= %u (not %u)\n", - GFS2_GLOCKD_MAX, option); - error = -EINVAL; - goto out_error; - } - args->ar_num_glockd = option; - break; case Opt_acl: args->ar_posix_acl = 1; sdp->sd_vfs->s_flags |= MS_POSIXACL; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index d159e7e72722..fc300eafda84 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -22,7 +22,6 @@ #include "gfs2.h" #include "incore.h" #include "bmap.h" -#include "daemon.h" #include "glock.h" #include "glops.h" #include "inode.h" @@ -56,7 +55,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt) { spin_lock_init(>->gt_spin); - gt->gt_demote_secs = 300; gt->gt_incore_log_blocks = 1024; gt->gt_log_flush_secs = 60; gt->gt_recoverd_secs = 60; @@ -88,10 +86,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) gfs2_tune_init(&sdp->sd_tune); - INIT_LIST_HEAD(&sdp->sd_reclaim_list); - spin_lock_init(&sdp->sd_reclaim_lock); - init_waitqueue_head(&sdp->sd_reclaim_wq); - mutex_init(&sdp->sd_inum_mutex); spin_lock_init(&sdp->sd_statfs_spin); @@ -443,24 +437,11 @@ out: static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh, int undo) { - struct task_struct *p; int error = 0; if (undo) goto fail_trans; - for (sdp->sd_glockd_num = 0; - sdp->sd_glockd_num < sdp->sd_args.ar_num_glockd; - sdp->sd_glockd_num++) { - p = kthread_run(gfs2_glockd, sdp, "gfs2_glockd"); - error = IS_ERR(p); - if (error) { - fs_err(sdp, "can't start glockd thread: %d\n", error); - goto fail; - } - sdp->sd_glockd_process[sdp->sd_glockd_num] = p; - } - error = gfs2_glock_nq_num(sdp, GFS2_MOUNT_LOCK, &gfs2_nondisk_glops, LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE, @@ -493,7 +474,6 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh, fs_err(sdp, "can't create transaction glock: %d\n", error); goto fail_rename; } - set_bit(GLF_STICKY, &sdp->sd_trans_gl->gl_flags); return 0; @@ -506,9 +486,6 @@ fail_live: fail_mount: gfs2_glock_dq_uninit(mount_gh); fail: - while (sdp->sd_glockd_num--) - kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]); - return error; } @@ -681,7 +658,6 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) return PTR_ERR(sdp->sd_jindex); } ip = GFS2_I(sdp->sd_jindex); - set_bit(GLF_STICKY, &ip->i_gl->gl_flags); /* Load in the journal index special file */ @@ -832,7 +808,6 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo) goto fail_statfs; } ip = GFS2_I(sdp->sd_rindex); - set_bit(GLF_STICKY, &ip->i_gl->gl_flags); sdp->sd_rindex_uptodate = 0; /* Read in the quota inode */ diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c index ad36af254fee..29f8a5c0b45b 100644 --- a/fs/gfs2/ops_super.c +++ b/fs/gfs2/ops_super.c @@ -142,8 +142,6 @@ static void gfs2_put_super(struct super_block *sb) kthread_stop(sdp->sd_quotad_process); kthread_stop(sdp->sd_logd_process); kthread_stop(sdp->sd_recoverd_process); - while (sdp->sd_glockd_num--) - kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]); if (!(sb->s_flags & MS_RDONLY)) { error = gfs2_make_fs_ro(sdp); @@ -369,7 +367,6 @@ static void gfs2_clear_inode(struct inode *inode) */ if (test_bit(GIF_USER, &ip->i_flags)) { ip->i_gl->gl_object = NULL; - gfs2_glock_schedule_for_reclaim(ip->i_gl); gfs2_glock_put(ip->i_gl); ip->i_gl = NULL; if (ip->i_iopen_gh.gh_gl) { @@ -422,8 +419,6 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt) seq_printf(s, ",debug"); if (args->ar_upgrade) seq_printf(s, ",upgrade"); - if (args->ar_num_glockd != GFS2_GLOCKD_DEFAULT) - seq_printf(s, ",num_glockd=%u", args->ar_num_glockd); if (args->ar_posix_acl) seq_printf(s, ",acl"); if (args->ar_quota != GFS2_QUOTA_DEFAULT) { diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index 59e36fd80903..67ba5b7b759b 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -263,7 +263,6 @@ ARGS_ATTR(localcaching, "%d\n"); ARGS_ATTR(localflocks, "%d\n"); ARGS_ATTR(debug, "%d\n"); ARGS_ATTR(upgrade, "%d\n"); -ARGS_ATTR(num_glockd, "%u\n"); ARGS_ATTR(posix_acl, "%d\n"); ARGS_ATTR(quota, "%u\n"); ARGS_ATTR(suiddir, "%d\n"); @@ -279,7 +278,6 @@ static struct attribute *args_attrs[] = { &args_attr_localflocks.attr, &args_attr_debug.attr, &args_attr_upgrade.attr, - &args_attr_num_glockd.attr, &args_attr_posix_acl.attr, &args_attr_quota.attr, &args_attr_suiddir.attr, @@ -287,30 +285,6 @@ static struct attribute *args_attrs[] = { NULL, }; -/* - * display counters from superblock - */ - -struct counters_attr { - struct attribute attr; - ssize_t (*show)(struct gfs2_sbd *, char *); -}; - -#define COUNTERS_ATTR(name, fmt) \ -static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \ -{ \ - return snprintf(buf, PAGE_SIZE, fmt, \ - (unsigned int)atomic_read(&sdp->sd_##name)); \ -} \ -static struct counters_attr counters_attr_##name = __ATTR_RO(name) - -COUNTERS_ATTR(reclaimed, "%u\n"); - -static struct attribute *counters_attrs[] = { - &counters_attr_reclaimed.attr, - NULL, -}; - /* * get and set struct gfs2_tune fields */ @@ -393,7 +367,6 @@ static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\ } \ TUNE_ATTR_2(name, name##_store) -TUNE_ATTR(demote_secs, 0); TUNE_ATTR(incore_log_blocks, 0); TUNE_ATTR(log_flush_secs, 0); TUNE_ATTR(quota_warn_period, 0); @@ -411,7 +384,6 @@ TUNE_ATTR_DAEMON(logd_secs, logd_process); TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); static struct attribute *tune_attrs[] = { - &tune_attr_demote_secs.attr, &tune_attr_incore_log_blocks.attr, &tune_attr_log_flush_secs.attr, &tune_attr_quota_warn_period.attr, @@ -435,11 +407,6 @@ static struct attribute_group lockstruct_group = { .attrs = lockstruct_attrs, }; -static struct attribute_group counters_group = { - .name = "counters", - .attrs = counters_attrs, -}; - static struct attribute_group args_group = { .name = "args", .attrs = args_attrs, @@ -464,13 +431,9 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp) if (error) goto fail_reg; - error = sysfs_create_group(&sdp->sd_kobj, &counters_group); - if (error) - goto fail_lockstruct; - error = sysfs_create_group(&sdp->sd_kobj, &args_group); if (error) - goto fail_counters; + goto fail_lockstruct; error = sysfs_create_group(&sdp->sd_kobj, &tune_group); if (error) @@ -481,8 +444,6 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp) fail_args: sysfs_remove_group(&sdp->sd_kobj, &args_group); -fail_counters: - sysfs_remove_group(&sdp->sd_kobj, &counters_group); fail_lockstruct: sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group); fail_reg: @@ -496,7 +457,6 @@ void gfs2_sys_fs_del(struct gfs2_sbd *sdp) { sysfs_remove_group(&sdp->sd_kobj, &tune_group); sysfs_remove_group(&sdp->sd_kobj, &args_group); - sysfs_remove_group(&sdp->sd_kobj, &counters_group); sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group); kobject_put(&sdp->sd_kobj); } -- cgit From fdd1062ebaa422c5684f97fa91da06f91167d76b Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 26 Nov 2008 10:26:38 +0000 Subject: GFS2: Send some sensible sysfs stuff We ought to inform the user of the locktable and lockproto for each uevent we generate. Signed-off-by: Steven Whitehouse --- fs/gfs2/locking/dlm/sysfs.c | 16 +++++++++++++++- fs/gfs2/sys.c | 16 +++++++++++++++- 2 files changed, 30 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/locking/dlm/sysfs.c b/fs/gfs2/locking/dlm/sysfs.c index 4ec571c3d8a9..9b7edcf7bd49 100644 --- a/fs/gfs2/locking/dlm/sysfs.c +++ b/fs/gfs2/locking/dlm/sysfs.c @@ -195,9 +195,23 @@ void gdlm_kobject_release(struct gdlm_ls *ls) kobject_put(&ls->kobj); } +static int gdlm_uevent(struct kset *kset, struct kobject *kobj, + struct kobj_uevent_env *env) +{ + struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj); + add_uevent_var(env, "LOCKTABLE=%s:%s", ls->clustername, ls->fsname); + add_uevent_var(env, "LOCKPROTO=lock_dlm"); + return 0; +} + +static struct kset_uevent_ops gdlm_uevent_ops = { + .uevent = gdlm_uevent, +}; + + int gdlm_sysfs_init(void) { - gdlm_kset = kset_create_and_add("lock_dlm", NULL, kernel_kobj); + gdlm_kset = kset_create_and_add("lock_dlm", &gdlm_uevent_ops, kernel_kobj); if (!gdlm_kset) { printk(KERN_WARNING "%s: can not create kset\n", __func__); return -ENOMEM; diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index 67ba5b7b759b..298bcb6c2713 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -461,11 +461,25 @@ void gfs2_sys_fs_del(struct gfs2_sbd *sdp) kobject_put(&sdp->sd_kobj); } +static int gfs2_uevent(struct kset *kset, struct kobject *kobj, + struct kobj_uevent_env *env) +{ + struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); + add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); + add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); + return 0; +} + +static struct kset_uevent_ops gfs2_uevent_ops = { + .uevent = gfs2_uevent, +}; + + int gfs2_sys_init(void) { gfs2_sys_margs = NULL; spin_lock_init(&gfs2_sys_margs_lock); - gfs2_kset = kset_create_and_add("gfs2", NULL, fs_kobj); + gfs2_kset = kset_create_and_add("gfs2", &gfs2_uevent_ops, fs_kobj); if (!gfs2_kset) return -ENOMEM; return 0; -- cgit From b52896813c2f16bcc5c5b67bb3c3f75bc084439b Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 26 Nov 2008 12:49:26 +0000 Subject: GFS2: Fix bug in gfs2_lock_fs_check_clean() gfs2_lock_fs_check_clean() should not be calling gfs2_jindex_hold() since it doesn't work like rindex hold, despite the comment. That allows gfs2_jindex_hold() to be moved into ops_fstype.c where it can be made static. Signed-off-by: Steven Whitehouse --- fs/gfs2/dir.h | 1 + fs/gfs2/ops_fstype.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++ fs/gfs2/super.c | 74 ---------------------------------------------------- fs/gfs2/super.h | 1 - 4 files changed, 68 insertions(+), 75 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h index 8a468cac9328..4f919440c3be 100644 --- a/fs/gfs2/dir.h +++ b/fs/gfs2/dir.h @@ -11,6 +11,7 @@ #define __DIR_DOT_H__ #include +#include struct inode; struct gfs2_inode; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index fc300eafda84..4cae60f4a175 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -33,6 +33,7 @@ #include "util.h" #include "log.h" #include "quota.h" +#include "dir.h" #define DO 0 #define UNDO 1 @@ -638,6 +639,72 @@ static void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp) sdp->sd_lockstruct.ls_lockspace); } +/** + * gfs2_jindex_hold - Grab a lock on the jindex + * @sdp: The GFS2 superblock + * @ji_gh: the holder for the jindex glock + * + * Returns: errno + */ + +static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) +{ + struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex); + struct qstr name; + char buf[20]; + struct gfs2_jdesc *jd; + int error; + + name.name = buf; + + mutex_lock(&sdp->sd_jindex_mutex); + + for (;;) { + error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh); + if (error) + break; + + name.len = sprintf(buf, "journal%u", sdp->sd_journals); + name.hash = gfs2_disk_hash(name.name, name.len); + + error = gfs2_dir_check(sdp->sd_jindex, &name, NULL); + if (error == -ENOENT) { + error = 0; + break; + } + + gfs2_glock_dq_uninit(ji_gh); + + if (error) + break; + + error = -ENOMEM; + jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL); + if (!jd) + break; + + INIT_LIST_HEAD(&jd->extent_list); + jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1); + if (!jd->jd_inode || IS_ERR(jd->jd_inode)) { + if (!jd->jd_inode) + error = -ENOENT; + else + error = PTR_ERR(jd->jd_inode); + kfree(jd); + break; + } + + spin_lock(&sdp->sd_jindex_spin); + jd->jd_jid = sdp->sd_journals++; + list_add_tail(&jd->jd_list, &sdp->sd_jindex_list); + spin_unlock(&sdp->sd_jindex_spin); + } + + mutex_unlock(&sdp->sd_jindex_mutex); + + return error; +} + static int init_journal(struct gfs2_sbd *sdp, int undo) { struct inode *master = sdp->sd_master_dir->d_inode; diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index b85877062a48..3dd9f5788cb0 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -33,76 +33,6 @@ #include "trans.h" #include "util.h" -/** - * gfs2_jindex_hold - Grab a lock on the jindex - * @sdp: The GFS2 superblock - * @ji_gh: the holder for the jindex glock - * - * This is very similar to the gfs2_rindex_hold() function, except that - * in general we hold the jindex lock for longer periods of time and - * we grab it far less frequently (in general) then the rgrp lock. - * - * Returns: errno - */ - -int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) -{ - struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex); - struct qstr name; - char buf[20]; - struct gfs2_jdesc *jd; - int error; - - name.name = buf; - - mutex_lock(&sdp->sd_jindex_mutex); - - for (;;) { - error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh); - if (error) - break; - - name.len = sprintf(buf, "journal%u", sdp->sd_journals); - name.hash = gfs2_disk_hash(name.name, name.len); - - error = gfs2_dir_check(sdp->sd_jindex, &name, NULL); - if (error == -ENOENT) { - error = 0; - break; - } - - gfs2_glock_dq_uninit(ji_gh); - - if (error) - break; - - error = -ENOMEM; - jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL); - if (!jd) - break; - - INIT_LIST_HEAD(&jd->extent_list); - jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1); - if (!jd->jd_inode || IS_ERR(jd->jd_inode)) { - if (!jd->jd_inode) - error = -ENOENT; - else - error = PTR_ERR(jd->jd_inode); - kfree(jd); - break; - } - - spin_lock(&sdp->sd_jindex_spin); - jd->jd_jid = sdp->sd_journals++; - list_add_tail(&jd->jd_list, &sdp->sd_jindex_list); - spin_unlock(&sdp->sd_jindex_spin); - } - - mutex_unlock(&sdp->sd_jindex_mutex); - - return error; -} - /** * gfs2_jindex_free - Clear all the journal index information * @sdp: The GFS2 superblock @@ -580,10 +510,6 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp, struct gfs2_log_header_host lh; int error; - error = gfs2_jindex_hold(sdp, &ji_gh); - if (error) - return error; - list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL); if (!lfcc) { diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h index 1848dad3ecba..c6254596713a 100644 --- a/fs/gfs2/super.h +++ b/fs/gfs2/super.h @@ -25,7 +25,6 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp) return x; } -int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh); void gfs2_jindex_free(struct gfs2_sbd *sdp); struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid); -- cgit From 2bfb6449b7a1f29a2a63e1d869103b5811c3b69f Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 26 Nov 2008 13:30:49 +0000 Subject: GFS2: Move four functions from super.c The functions which are being moved can all be marked static in their new locations, since they only have a single caller each. Their new locations are more logical than before and some of the functions are small enough that the compiler might well inline them. Signed-off-by: Steven Whitehouse --- fs/gfs2/glock.c | 14 +++++ fs/gfs2/ops_super.c | 131 +++++++++++++++++++++++++++++++++++++++++ fs/gfs2/recovery.c | 22 +++++++ fs/gfs2/super.c | 164 ---------------------------------------------------- fs/gfs2/super.h | 4 -- 5 files changed, 167 insertions(+), 168 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 07ffc8123d74..6e298b070117 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1317,6 +1317,20 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, gfs2_glock_put(gl); } +static void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid) +{ + struct gfs2_jdesc *jd; + + spin_lock(&sdp->sd_jindex_spin); + list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { + if (jd->jd_jid != jid) + continue; + jd->jd_dirty = 1; + break; + } + spin_unlock(&sdp->sd_jindex_spin); +} + /** * gfs2_glock_cb - Callback used by locking module * @sdp: Pointer to the superblock diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c index 29f8a5c0b45b..08837a728635 100644 --- a/fs/gfs2/ops_super.c +++ b/fs/gfs2/ops_super.c @@ -256,6 +256,137 @@ static void gfs2_unlockfs(struct super_block *sb) gfs2_unfreeze_fs(sb->s_fs_info); } +/** + * statfs_fill - fill in the sg for a given RG + * @rgd: the RG + * @sc: the sc structure + * + * Returns: 0 on success, -ESTALE if the LVB is invalid + */ + +static int statfs_slow_fill(struct gfs2_rgrpd *rgd, + struct gfs2_statfs_change_host *sc) +{ + gfs2_rgrp_verify(rgd); + sc->sc_total += rgd->rd_data; + sc->sc_free += rgd->rd_free; + sc->sc_dinodes += rgd->rd_dinodes; + return 0; +} + +/** + * gfs2_statfs_slow - Stat a filesystem using asynchronous locking + * @sdp: the filesystem + * @sc: the sc info that will be returned + * + * Any error (other than a signal) will cause this routine to fall back + * to the synchronous version. + * + * FIXME: This really shouldn't busy wait like this. + * + * Returns: errno + */ + +static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) +{ + struct gfs2_holder ri_gh; + struct gfs2_rgrpd *rgd_next; + struct gfs2_holder *gha, *gh; + unsigned int slots = 64; + unsigned int x; + int done; + int error = 0, err; + + memset(sc, 0, sizeof(struct gfs2_statfs_change_host)); + gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL); + if (!gha) + return -ENOMEM; + + error = gfs2_rindex_hold(sdp, &ri_gh); + if (error) + goto out; + + rgd_next = gfs2_rgrpd_get_first(sdp); + + for (;;) { + done = 1; + + for (x = 0; x < slots; x++) { + gh = gha + x; + + if (gh->gh_gl && gfs2_glock_poll(gh)) { + err = gfs2_glock_wait(gh); + if (err) { + gfs2_holder_uninit(gh); + error = err; + } else { + if (!error) + error = statfs_slow_fill( + gh->gh_gl->gl_object, sc); + gfs2_glock_dq_uninit(gh); + } + } + + if (gh->gh_gl) + done = 0; + else if (rgd_next && !error) { + error = gfs2_glock_nq_init(rgd_next->rd_gl, + LM_ST_SHARED, + GL_ASYNC, + gh); + rgd_next = gfs2_rgrpd_get_next(rgd_next); + done = 0; + } + + if (signal_pending(current)) + error = -ERESTARTSYS; + } + + if (done) + break; + + yield(); + } + + gfs2_glock_dq_uninit(&ri_gh); + +out: + kfree(gha); + return error; +} + +/** + * gfs2_statfs_i - Do a statfs + * @sdp: the filesystem + * @sg: the sg structure + * + * Returns: errno + */ + +static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) +{ + struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; + struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; + + spin_lock(&sdp->sd_statfs_spin); + + *sc = *m_sc; + sc->sc_total += l_sc->sc_total; + sc->sc_free += l_sc->sc_free; + sc->sc_dinodes += l_sc->sc_dinodes; + + spin_unlock(&sdp->sd_statfs_spin); + + if (sc->sc_free < 0) + sc->sc_free = 0; + if (sc->sc_free > sc->sc_total) + sc->sc_free = sc->sc_total; + if (sc->sc_dinodes < 0) + sc->sc_dinodes = 0; + + return 0; +} + /** * gfs2_statfs - Gather and return stats about the filesystem * @sb: The superblock diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index b56ba3db7771..efd09c3d2b26 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c @@ -585,6 +585,28 @@ fail: return error; } +static struct gfs2_jdesc *gfs2_jdesc_find_dirty(struct gfs2_sbd *sdp) +{ + struct gfs2_jdesc *jd; + int found = 0; + + spin_lock(&sdp->sd_jindex_spin); + + list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { + if (jd->jd_dirty) { + jd->jd_dirty = 0; + found = 1; + break; + } + } + spin_unlock(&sdp->sd_jindex_spin); + + if (!found) + jd = NULL; + + return jd; +} + /** * gfs2_check_journals - Recover any dirty journals * @sdp: the filesystem diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 3dd9f5788cb0..141b781f2fcc 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -96,39 +96,6 @@ struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid) return jd; } -void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid) -{ - struct gfs2_jdesc *jd; - - spin_lock(&sdp->sd_jindex_spin); - jd = jdesc_find_i(&sdp->sd_jindex_list, jid); - if (jd) - jd->jd_dirty = 1; - spin_unlock(&sdp->sd_jindex_spin); -} - -struct gfs2_jdesc *gfs2_jdesc_find_dirty(struct gfs2_sbd *sdp) -{ - struct gfs2_jdesc *jd; - int found = 0; - - spin_lock(&sdp->sd_jindex_spin); - - list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { - if (jd->jd_dirty) { - jd->jd_dirty = 0; - found = 1; - break; - } - } - spin_unlock(&sdp->sd_jindex_spin); - - if (!found) - jd = NULL; - - return jd; -} - int gfs2_jdesc_check(struct gfs2_jdesc *jd) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); @@ -353,137 +320,6 @@ out: return error; } -/** - * gfs2_statfs_i - Do a statfs - * @sdp: the filesystem - * @sg: the sg structure - * - * Returns: errno - */ - -int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) -{ - struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; - struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; - - spin_lock(&sdp->sd_statfs_spin); - - *sc = *m_sc; - sc->sc_total += l_sc->sc_total; - sc->sc_free += l_sc->sc_free; - sc->sc_dinodes += l_sc->sc_dinodes; - - spin_unlock(&sdp->sd_statfs_spin); - - if (sc->sc_free < 0) - sc->sc_free = 0; - if (sc->sc_free > sc->sc_total) - sc->sc_free = sc->sc_total; - if (sc->sc_dinodes < 0) - sc->sc_dinodes = 0; - - return 0; -} - -/** - * statfs_fill - fill in the sg for a given RG - * @rgd: the RG - * @sc: the sc structure - * - * Returns: 0 on success, -ESTALE if the LVB is invalid - */ - -static int statfs_slow_fill(struct gfs2_rgrpd *rgd, - struct gfs2_statfs_change_host *sc) -{ - gfs2_rgrp_verify(rgd); - sc->sc_total += rgd->rd_data; - sc->sc_free += rgd->rd_free; - sc->sc_dinodes += rgd->rd_dinodes; - return 0; -} - -/** - * gfs2_statfs_slow - Stat a filesystem using asynchronous locking - * @sdp: the filesystem - * @sc: the sc info that will be returned - * - * Any error (other than a signal) will cause this routine to fall back - * to the synchronous version. - * - * FIXME: This really shouldn't busy wait like this. - * - * Returns: errno - */ - -int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) -{ - struct gfs2_holder ri_gh; - struct gfs2_rgrpd *rgd_next; - struct gfs2_holder *gha, *gh; - unsigned int slots = 64; - unsigned int x; - int done; - int error = 0, err; - - memset(sc, 0, sizeof(struct gfs2_statfs_change_host)); - gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL); - if (!gha) - return -ENOMEM; - - error = gfs2_rindex_hold(sdp, &ri_gh); - if (error) - goto out; - - rgd_next = gfs2_rgrpd_get_first(sdp); - - for (;;) { - done = 1; - - for (x = 0; x < slots; x++) { - gh = gha + x; - - if (gh->gh_gl && gfs2_glock_poll(gh)) { - err = gfs2_glock_wait(gh); - if (err) { - gfs2_holder_uninit(gh); - error = err; - } else { - if (!error) - error = statfs_slow_fill( - gh->gh_gl->gl_object, sc); - gfs2_glock_dq_uninit(gh); - } - } - - if (gh->gh_gl) - done = 0; - else if (rgd_next && !error) { - error = gfs2_glock_nq_init(rgd_next->rd_gl, - LM_ST_SHARED, - GL_ASYNC, - gh); - rgd_next = gfs2_rgrpd_get_next(rgd_next); - done = 0; - } - - if (signal_pending(current)) - error = -ERESTARTSYS; - } - - if (done) - break; - - yield(); - } - - gfs2_glock_dq_uninit(&ri_gh); - -out: - kfree(gha); - return error; -} - struct lfcc { struct list_head list; struct gfs2_holder gh; diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h index c6254596713a..f6b8b00ad881 100644 --- a/fs/gfs2/super.h +++ b/fs/gfs2/super.h @@ -28,8 +28,6 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp) void gfs2_jindex_free(struct gfs2_sbd *sdp); struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid); -void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid); -struct gfs2_jdesc *gfs2_jdesc_find_dirty(struct gfs2_sbd *sdp); int gfs2_jdesc_check(struct gfs2_jdesc *jd); int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename, @@ -41,8 +39,6 @@ int gfs2_statfs_init(struct gfs2_sbd *sdp); void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, s64 dinodes); int gfs2_statfs_sync(struct gfs2_sbd *sdp); -int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc); -int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc); int gfs2_freeze_fs(struct gfs2_sbd *sdp); void gfs2_unfreeze_fs(struct gfs2_sbd *sdp); -- cgit From 2e204703a1161e9bae38ba0d3d0df04a679e6f4f Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 26 Nov 2008 14:01:26 +0000 Subject: GFS2: Remove ancient, unused code Remove code that used to have something to do with initrd but has been unused for a long time. Signed-off-by: Steven Whitehouse --- fs/gfs2/mount.c | 8 -------- fs/gfs2/sys.c | 6 ------ fs/gfs2/sys.h | 4 ---- 3 files changed, 18 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/mount.c b/fs/gfs2/mount.c index 8c0f16e301f6..3cb0a44ba023 100644 --- a/fs/gfs2/mount.c +++ b/fs/gfs2/mount.c @@ -85,14 +85,6 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount) int error = 0; if (!remount) { - /* If someone preloaded options, use those instead */ - spin_lock(&gfs2_sys_margs_lock); - if (gfs2_sys_margs) { - data = gfs2_sys_margs; - gfs2_sys_margs = NULL; - } - spin_unlock(&gfs2_sys_margs_lock); - /* Set some defaults */ args->ar_quota = GFS2_QUOTA_DEFAULT; args->ar_data = GFS2_DATA_DEFAULT; diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index 298bcb6c2713..26c1fa777a95 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -26,9 +26,6 @@ #include "quota.h" #include "util.h" -char *gfs2_sys_margs; -spinlock_t gfs2_sys_margs_lock; - static ssize_t id_show(struct gfs2_sbd *sdp, char *buf) { return snprintf(buf, PAGE_SIZE, "%u:%u\n", @@ -477,8 +474,6 @@ static struct kset_uevent_ops gfs2_uevent_ops = { int gfs2_sys_init(void) { - gfs2_sys_margs = NULL; - spin_lock_init(&gfs2_sys_margs_lock); gfs2_kset = kset_create_and_add("gfs2", &gfs2_uevent_ops, fs_kobj); if (!gfs2_kset) return -ENOMEM; @@ -487,7 +482,6 @@ int gfs2_sys_init(void) void gfs2_sys_uninit(void) { - kfree(gfs2_sys_margs); kset_unregister(gfs2_kset); } diff --git a/fs/gfs2/sys.h b/fs/gfs2/sys.h index 1ca8cdac5304..e94560e836d7 100644 --- a/fs/gfs2/sys.h +++ b/fs/gfs2/sys.h @@ -13,10 +13,6 @@ #include struct gfs2_sbd; -/* Allow args to be passed to GFS2 when using an initial ram disk */ -extern char *gfs2_sys_margs; -extern spinlock_t gfs2_sys_margs_lock; - int gfs2_sys_fs_add(struct gfs2_sbd *sdp); void gfs2_sys_fs_del(struct gfs2_sbd *sdp); -- cgit From 3af165ac4d099385b12e3e75a9ee3ffd02da33e0 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Thu, 27 Nov 2008 08:27:28 +0000 Subject: GFS2: Fix use-after-free bug on umount There was a use-after-free with the GFS2 super block during umount. This patch moves almost all of the umount code from ->put_super into ->kill_sb, the only bit that cannot be moved being the glock hash clearing which has to remain as ->put_super due to umount ordering requirements. As a result its now obvious that the kfree is the final operation, whereas before it was hidden in ->put_super. Also gfs2_jindex_free is then only referenced from a single file so thats moved and marked static too. Signed-off-by: Steven Whitehouse --- fs/gfs2/glock.c | 3 +- fs/gfs2/glock.h | 2 +- fs/gfs2/ops_fstype.c | 98 ++++++++++++++++++++++++++++++++++++++++++++++------ fs/gfs2/ops_super.c | 68 ++---------------------------------- fs/gfs2/super.c | 34 ------------------ fs/gfs2/super.h | 3 +- 6 files changed, 94 insertions(+), 114 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 6e298b070117..5eae62e7f778 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1547,8 +1547,9 @@ static void clear_glock(struct gfs2_glock *gl) * Called when unmounting the filesystem. */ -void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) +void gfs2_gl_hash_clear(struct super_block *sb) { + struct gfs2_sbd *sdp = sb->s_fs_info; unsigned long t; unsigned int x; int cont; diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 543ec7ecfbda..ce54f338cff9 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -130,7 +130,7 @@ void gfs2_lvb_unhold(struct gfs2_glock *gl); void gfs2_glock_cb(void *cb_data, unsigned int type, void *data); void gfs2_reclaim_glock(struct gfs2_sbd *sdp); -void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); +void gfs2_gl_hash_clear(struct super_block *sb); void gfs2_glock_finish_truncate(struct gfs2_inode *ip); int __init gfs2_glock_init(void); diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 4cae60f4a175..2e735bece6b9 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -705,6 +705,40 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) return error; } +/** + * gfs2_jindex_free - Clear all the journal index information + * @sdp: The GFS2 superblock + * + */ + +static void gfs2_jindex_free(struct gfs2_sbd *sdp) +{ + struct list_head list, *head; + struct gfs2_jdesc *jd; + struct gfs2_journal_extent *jext; + + spin_lock(&sdp->sd_jindex_spin); + list_add(&list, &sdp->sd_jindex_list); + list_del_init(&sdp->sd_jindex_list); + sdp->sd_journals = 0; + spin_unlock(&sdp->sd_jindex_spin); + + while (!list_empty(&list)) { + jd = list_entry(list.next, struct gfs2_jdesc, jd_list); + head = &jd->extent_list; + while (!list_empty(head)) { + jext = list_entry(head->next, + struct gfs2_journal_extent, + extent_list); + list_del(&jext->extent_list); + kfree(jext); + } + list_del(&jd->jd_list); + iput(jd->jd_inode); + kfree(jd); + } +} + static int init_journal(struct gfs2_sbd *sdp, int undo) { struct inode *master = sdp->sd_master_dir->d_inode; @@ -1203,7 +1237,7 @@ fail_sb: fail_locking: init_locking(sdp, &mount_gh, UNDO); fail_lm: - gfs2_gl_hash_clear(sdp); + gfs2_gl_hash_clear(sb); gfs2_lm_unmount(sdp); while (invalidate_inodes(sb)) yield(); @@ -1263,17 +1297,61 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags, static void gfs2_kill_sb(struct super_block *sb) { struct gfs2_sbd *sdp = sb->s_fs_info; - if (sdp) { - gfs2_meta_syncfs(sdp); - dput(sdp->sd_root_dir); - dput(sdp->sd_master_dir); - sdp->sd_root_dir = NULL; - sdp->sd_master_dir = NULL; + + if (sdp == NULL) { + kill_block_super(sb); + return; } - shrink_dcache_sb(sb); + gfs2_meta_syncfs(sdp); + dput(sdp->sd_root_dir); + dput(sdp->sd_master_dir); + sdp->sd_root_dir = NULL; + sdp->sd_master_dir = NULL; + + /* Unfreeze the filesystem, if we need to */ + mutex_lock(&sdp->sd_freeze_lock); + if (sdp->sd_freeze_count) + gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); + mutex_unlock(&sdp->sd_freeze_lock); + + kthread_stop(sdp->sd_quotad_process); + kthread_stop(sdp->sd_logd_process); + kthread_stop(sdp->sd_recoverd_process); + + if (!(sb->s_flags & MS_RDONLY)) { + int error = gfs2_make_fs_ro(sdp); + if (error) + gfs2_io_error(sdp); + } + + /* At this point, we're through modifying the disk */ + gfs2_jindex_free(sdp); + gfs2_clear_rgrpd(sdp); + iput(sdp->sd_jindex); + iput(sdp->sd_inum_inode); + iput(sdp->sd_statfs_inode); + iput(sdp->sd_rindex); + iput(sdp->sd_quota_inode); + + gfs2_glock_put(sdp->sd_rename_gl); + gfs2_glock_put(sdp->sd_trans_gl); + + if (!sdp->sd_args.ar_spectator) { + gfs2_glock_dq_uninit(&sdp->sd_journal_gh); + gfs2_glock_dq_uninit(&sdp->sd_jinode_gh); + gfs2_glock_dq_uninit(&sdp->sd_ir_gh); + gfs2_glock_dq_uninit(&sdp->sd_sc_gh); + gfs2_glock_dq_uninit(&sdp->sd_qc_gh); + iput(sdp->sd_ir_inode); + iput(sdp->sd_sc_inode); + iput(sdp->sd_qc_inode); + } + gfs2_glock_dq_uninit(&sdp->sd_live_gh); kill_block_super(sb); - if (sdp) - gfs2_delete_debugfs_file(sdp); + gfs2_lm_unmount(sdp); + gfs2_sys_fs_del(sdp); + gfs2_delete_debugfs_file(sdp); + kfree(sdp); } struct file_system_type gfs2_fs_type = { diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c index 08837a728635..bd08a0a8d9bf 100644 --- a/fs/gfs2/ops_super.c +++ b/fs/gfs2/ops_super.c @@ -95,7 +95,7 @@ do_flush: * Returns: errno */ -static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) +int gfs2_make_fs_ro(struct gfs2_sbd *sdp) { struct gfs2_holder t_gh; int error; @@ -121,70 +121,6 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) return error; } -/** - * gfs2_put_super - Unmount the filesystem - * @sb: The VFS superblock - * - */ - -static void gfs2_put_super(struct super_block *sb) -{ - struct gfs2_sbd *sdp = sb->s_fs_info; - int error; - - /* Unfreeze the filesystem, if we need to */ - - mutex_lock(&sdp->sd_freeze_lock); - if (sdp->sd_freeze_count) - gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); - mutex_unlock(&sdp->sd_freeze_lock); - - kthread_stop(sdp->sd_quotad_process); - kthread_stop(sdp->sd_logd_process); - kthread_stop(sdp->sd_recoverd_process); - - if (!(sb->s_flags & MS_RDONLY)) { - error = gfs2_make_fs_ro(sdp); - if (error) - gfs2_io_error(sdp); - } - /* At this point, we're through modifying the disk */ - - /* Release stuff */ - - iput(sdp->sd_jindex); - iput(sdp->sd_inum_inode); - iput(sdp->sd_statfs_inode); - iput(sdp->sd_rindex); - iput(sdp->sd_quota_inode); - - gfs2_glock_put(sdp->sd_rename_gl); - gfs2_glock_put(sdp->sd_trans_gl); - - if (!sdp->sd_args.ar_spectator) { - gfs2_glock_dq_uninit(&sdp->sd_journal_gh); - gfs2_glock_dq_uninit(&sdp->sd_jinode_gh); - gfs2_glock_dq_uninit(&sdp->sd_ir_gh); - gfs2_glock_dq_uninit(&sdp->sd_sc_gh); - gfs2_glock_dq_uninit(&sdp->sd_qc_gh); - iput(sdp->sd_ir_inode); - iput(sdp->sd_sc_inode); - iput(sdp->sd_qc_inode); - } - - gfs2_glock_dq_uninit(&sdp->sd_live_gh); - gfs2_clear_rgrpd(sdp); - gfs2_jindex_free(sdp); - /* Take apart glock structures and buffer lists */ - gfs2_gl_hash_clear(sdp); - /* Unmount the locking protocol */ - gfs2_lm_unmount(sdp); - - /* At this point, we're through participating in the lockspace */ - gfs2_sys_fs_del(sdp); - kfree(sdp); -} - /** * gfs2_write_super * @sb: the superblock @@ -686,7 +622,7 @@ const struct super_operations gfs2_super_ops = { .destroy_inode = gfs2_destroy_inode, .write_inode = gfs2_write_inode, .delete_inode = gfs2_delete_inode, - .put_super = gfs2_put_super, + .put_super = gfs2_gl_hash_clear, .write_super = gfs2_write_super, .sync_fs = gfs2_sync_fs, .write_super_lockfs = gfs2_write_super_lockfs, diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 141b781f2fcc..f14658b20204 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -33,40 +33,6 @@ #include "trans.h" #include "util.h" -/** - * gfs2_jindex_free - Clear all the journal index information - * @sdp: The GFS2 superblock - * - */ - -void gfs2_jindex_free(struct gfs2_sbd *sdp) -{ - struct list_head list, *head; - struct gfs2_jdesc *jd; - struct gfs2_journal_extent *jext; - - spin_lock(&sdp->sd_jindex_spin); - list_add(&list, &sdp->sd_jindex_list); - list_del_init(&sdp->sd_jindex_list); - sdp->sd_journals = 0; - spin_unlock(&sdp->sd_jindex_spin); - - while (!list_empty(&list)) { - jd = list_entry(list.next, struct gfs2_jdesc, jd_list); - head = &jd->extent_list; - while (!list_empty(head)) { - jext = list_entry(head->next, - struct gfs2_journal_extent, - extent_list); - list_del(&jext->extent_list); - kfree(jext); - } - list_del(&jd->jd_list); - iput(jd->jd_inode); - kfree(jd); - } -} - static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid) { struct gfs2_jdesc *jd; diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h index f6b8b00ad881..4d2492b3e7ef 100644 --- a/fs/gfs2/super.h +++ b/fs/gfs2/super.h @@ -25,8 +25,6 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp) return x; } -void gfs2_jindex_free(struct gfs2_sbd *sdp); - struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid); int gfs2_jdesc_check(struct gfs2_jdesc *jd); @@ -34,6 +32,7 @@ int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename, struct gfs2_inode **ipp); int gfs2_make_fs_rw(struct gfs2_sbd *sdp); +int gfs2_make_fs_ro(struct gfs2_sbd *sdp); int gfs2_statfs_init(struct gfs2_sbd *sdp); void gfs2_statfs_change(struct gfs2_sbd *sdp, -- cgit From 9a776db7371b9c77a8f4f0d2ac6374d78ac7db7d Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Thu, 27 Nov 2008 09:42:51 +0000 Subject: GFS2: Send useful information with uevent messages In order to distinguish between two differing uevent messages and to avoid using the (racy) method of reading status from sysfs in future, this adds some status information to our uevent messages. Btw, before anybody says "sysfs isn't racy", I'm aware of that, but the way that GFS2 was using it (send an ambiugous uevent and then expect the receiver to read sysfs to find out the status of the reported operation) was. The additional benefit of using the new interface is that it should be possible for a node to recover multiple journals at the same time, since there is no longer any confusion as to which journal the status belongs to. At some future stage, when all the userland programs have been converted, I intend to remove the old interface. Signed-off-by: Steven Whitehouse --- fs/gfs2/locking/dlm/mount.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c index 0c4cbe6c8285..1aa7eb6a0226 100644 --- a/fs/gfs2/locking/dlm/mount.c +++ b/fs/gfs2/locking/dlm/mount.c @@ -194,17 +194,25 @@ out: static void gdlm_recovery_done(void *lockspace, unsigned int jid, unsigned int message) { + char env_jid[20]; + char env_status[20]; + char *envp[] = { env_jid, env_status, NULL }; struct gdlm_ls *ls = lockspace; ls->recover_jid_done = jid; ls->recover_jid_status = message; - kobject_uevent(&ls->kobj, KOBJ_CHANGE); + sprintf(env_jid, "JID=%d", jid); + sprintf(env_status, "RECOVERY=%s", + message == LM_RD_SUCCESS ? "Done" : "Failed"); + kobject_uevent_env(&ls->kobj, KOBJ_CHANGE, envp); } static void gdlm_others_may_mount(void *lockspace) { + char *message = "FIRSTMOUNT=Done"; + char *envp[] = { message, NULL }; struct gdlm_ls *ls = lockspace; ls->first_done = 1; - kobject_uevent(&ls->kobj, KOBJ_CHANGE); + kobject_uevent_env(&ls->kobj, KOBJ_CHANGE, envp); } /* Userspace gets the offline uevent, blocks new gfs locks on -- cgit From 7ed122e42c72b3e4531f8b4a9f72159e8303ac15 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 10 Dec 2008 10:28:10 +0000 Subject: GFS2: Streamline alloc calculations for writes This patch removes some unused code, and make the calculation of the number of blocks required conditional in order to reduce the number of times this (potentially expensive) calculation is done. Signed-off-by: Steven Whitehouse --- fs/gfs2/bmap.c | 49 +++++++------------------------------------------ fs/gfs2/bmap.h | 34 ++++++++++++++++++++++++++++++---- fs/gfs2/ops_address.c | 6 ++++-- fs/gfs2/ops_file.c | 2 +- 4 files changed, 42 insertions(+), 49 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 789f28cfdc20..11ffc56f1f81 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1230,35 +1230,6 @@ int gfs2_file_dealloc(struct gfs2_inode *ip) return trunc_dealloc(ip, 0); } -/** - * gfs2_write_calc_reserv - calculate number of blocks needed to write to a file - * @ip: the file - * @len: the number of bytes to be written to the file - * @data_blocks: returns the number of data blocks required - * @ind_blocks: returns the number of indirect blocks required - * - */ - -void gfs2_write_calc_reserv(struct gfs2_inode *ip, unsigned int len, - unsigned int *data_blocks, unsigned int *ind_blocks) -{ - struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - unsigned int tmp; - - if (gfs2_is_dir(ip)) { - *data_blocks = DIV_ROUND_UP(len, sdp->sd_jbsize) + 2; - *ind_blocks = 3 * (sdp->sd_max_jheight - 1); - } else { - *data_blocks = (len >> sdp->sd_sb.sb_bsize_shift) + 3; - *ind_blocks = 3 * (sdp->sd_max_height - 1); - } - - for (tmp = *data_blocks; tmp > sdp->sd_diptrs;) { - tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); - *ind_blocks += tmp; - } -} - /** * gfs2_write_alloc_required - figure out if a write will require an allocation * @ip: the file being written to @@ -1276,6 +1247,7 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, struct buffer_head bh; unsigned int shift; u64 lblock, lblock_stop, size; + u64 end_of_file; *alloc_required = 0; @@ -1291,19 +1263,12 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, *alloc_required = 1; shift = sdp->sd_sb.sb_bsize_shift; - if (gfs2_is_dir(ip)) { - unsigned int bsize = sdp->sd_jbsize; - lblock = offset; - do_div(lblock, bsize); - lblock_stop = offset + len + bsize - 1; - do_div(lblock_stop, bsize); - } else { - u64 end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift; - lblock = offset >> shift; - lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; - if (lblock_stop > end_of_file) - return 0; - } + BUG_ON(gfs2_is_dir(ip)); + end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift; + lblock = offset >> shift; + lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; + if (lblock_stop > end_of_file) + return 0; size = (lblock_stop - lblock) << shift; do { diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h index 4e6cde2943bd..c983177e05ac 100644 --- a/fs/gfs2/bmap.h +++ b/fs/gfs2/bmap.h @@ -10,10 +10,40 @@ #ifndef __BMAP_DOT_H__ #define __BMAP_DOT_H__ +#include "inode.h" + struct inode; struct gfs2_inode; struct page; + +/** + * gfs2_write_calc_reserv - calculate number of blocks needed to write to a file + * @ip: the file + * @len: the number of bytes to be written to the file + * @data_blocks: returns the number of data blocks required + * @ind_blocks: returns the number of indirect blocks required + * + */ + +static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip, + unsigned int len, + unsigned int *data_blocks, + unsigned int *ind_blocks) +{ + const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); + unsigned int tmp; + + BUG_ON(gfs2_is_dir(ip)); + *data_blocks = (len >> sdp->sd_sb.sb_bsize_shift) + 3; + *ind_blocks = 3 * (sdp->sd_max_height - 1); + + for (tmp = *data_blocks; tmp > sdp->sd_diptrs;) { + tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); + *ind_blocks += tmp; + } +} + int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page); int gfs2_block_map(struct inode *inode, sector_t lblock, struct buffer_head *bh, int create); int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen); @@ -21,10 +51,6 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi int gfs2_truncatei(struct gfs2_inode *ip, u64 size); int gfs2_truncatei_resume(struct gfs2_inode *ip); int gfs2_file_dealloc(struct gfs2_inode *ip); - -void gfs2_write_calc_reserv(struct gfs2_inode *ip, unsigned int len, - unsigned int *data_blocks, - unsigned int *ind_blocks); int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, unsigned int len, int *alloc_required); diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c index 0df560f4269a..6e4ea36c6605 100644 --- a/fs/gfs2/ops_address.c +++ b/fs/gfs2/ops_address.c @@ -625,7 +625,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); - unsigned int data_blocks, ind_blocks, rblocks; + unsigned int data_blocks = 0, ind_blocks = 0, rblocks; int alloc_required; int error = 0; struct gfs2_alloc *al; @@ -639,11 +639,13 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, if (unlikely(error)) goto out_uninit; - gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); error = gfs2_write_alloc_required(ip, pos, len, &alloc_required); if (error) goto out_unlock; + if (alloc_required || gfs2_is_jdata(ip)) + gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); + if (alloc_required) { al = gfs2_alloc_get(ip); if (!al) { diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c index a6b7a733fd4d..289c5f54ba53 100644 --- a/fs/gfs2/ops_file.c +++ b/fs/gfs2/ops_file.c @@ -355,7 +355,6 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page) goto out; set_bit(GIF_SW_PAGED, &ip->i_flags); - gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required); if (ret || !alloc_required) goto out_unlock; @@ -367,6 +366,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page) ret = gfs2_quota_lock_check(ip); if (ret) goto out_alloc_put; + gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); al->al_requested = data_blocks + ind_blocks; ret = gfs2_inplace_reserve(ip); if (ret) -- cgit From fefc03bfedeff2002f14e848ecb7c0cd77ee0b15 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Fri, 19 Dec 2008 15:32:06 +0000 Subject: Revert "GFS2: Fix use-after-free bug on umount" This reverts commit 78802499912f1ba31ce83a94c55b5a980f250a43. The original patch is causing problems in relation to order of operations at umount in relation to jdata files. I need to fix this a different way. Signed-off-by: Steven Whitehouse --- fs/gfs2/glock.c | 3 +- fs/gfs2/glock.h | 2 +- fs/gfs2/ops_fstype.c | 98 ++++++---------------------------------------------- fs/gfs2/ops_super.c | 68 ++++++++++++++++++++++++++++++++++-- fs/gfs2/super.c | 34 ++++++++++++++++++ fs/gfs2/super.h | 3 +- 6 files changed, 114 insertions(+), 94 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 5eae62e7f778..6e298b070117 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1547,9 +1547,8 @@ static void clear_glock(struct gfs2_glock *gl) * Called when unmounting the filesystem. */ -void gfs2_gl_hash_clear(struct super_block *sb) +void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) { - struct gfs2_sbd *sdp = sb->s_fs_info; unsigned long t; unsigned int x; int cont; diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index ce54f338cff9..543ec7ecfbda 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -130,7 +130,7 @@ void gfs2_lvb_unhold(struct gfs2_glock *gl); void gfs2_glock_cb(void *cb_data, unsigned int type, void *data); void gfs2_reclaim_glock(struct gfs2_sbd *sdp); -void gfs2_gl_hash_clear(struct super_block *sb); +void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); void gfs2_glock_finish_truncate(struct gfs2_inode *ip); int __init gfs2_glock_init(void); diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 2e735bece6b9..4cae60f4a175 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -705,40 +705,6 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) return error; } -/** - * gfs2_jindex_free - Clear all the journal index information - * @sdp: The GFS2 superblock - * - */ - -static void gfs2_jindex_free(struct gfs2_sbd *sdp) -{ - struct list_head list, *head; - struct gfs2_jdesc *jd; - struct gfs2_journal_extent *jext; - - spin_lock(&sdp->sd_jindex_spin); - list_add(&list, &sdp->sd_jindex_list); - list_del_init(&sdp->sd_jindex_list); - sdp->sd_journals = 0; - spin_unlock(&sdp->sd_jindex_spin); - - while (!list_empty(&list)) { - jd = list_entry(list.next, struct gfs2_jdesc, jd_list); - head = &jd->extent_list; - while (!list_empty(head)) { - jext = list_entry(head->next, - struct gfs2_journal_extent, - extent_list); - list_del(&jext->extent_list); - kfree(jext); - } - list_del(&jd->jd_list); - iput(jd->jd_inode); - kfree(jd); - } -} - static int init_journal(struct gfs2_sbd *sdp, int undo) { struct inode *master = sdp->sd_master_dir->d_inode; @@ -1237,7 +1203,7 @@ fail_sb: fail_locking: init_locking(sdp, &mount_gh, UNDO); fail_lm: - gfs2_gl_hash_clear(sb); + gfs2_gl_hash_clear(sdp); gfs2_lm_unmount(sdp); while (invalidate_inodes(sb)) yield(); @@ -1297,61 +1263,17 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags, static void gfs2_kill_sb(struct super_block *sb) { struct gfs2_sbd *sdp = sb->s_fs_info; - - if (sdp == NULL) { - kill_block_super(sb); - return; - } - gfs2_meta_syncfs(sdp); - dput(sdp->sd_root_dir); - dput(sdp->sd_master_dir); - sdp->sd_root_dir = NULL; - sdp->sd_master_dir = NULL; - - /* Unfreeze the filesystem, if we need to */ - mutex_lock(&sdp->sd_freeze_lock); - if (sdp->sd_freeze_count) - gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); - mutex_unlock(&sdp->sd_freeze_lock); - - kthread_stop(sdp->sd_quotad_process); - kthread_stop(sdp->sd_logd_process); - kthread_stop(sdp->sd_recoverd_process); - - if (!(sb->s_flags & MS_RDONLY)) { - int error = gfs2_make_fs_ro(sdp); - if (error) - gfs2_io_error(sdp); - } - - /* At this point, we're through modifying the disk */ - gfs2_jindex_free(sdp); - gfs2_clear_rgrpd(sdp); - iput(sdp->sd_jindex); - iput(sdp->sd_inum_inode); - iput(sdp->sd_statfs_inode); - iput(sdp->sd_rindex); - iput(sdp->sd_quota_inode); - - gfs2_glock_put(sdp->sd_rename_gl); - gfs2_glock_put(sdp->sd_trans_gl); - - if (!sdp->sd_args.ar_spectator) { - gfs2_glock_dq_uninit(&sdp->sd_journal_gh); - gfs2_glock_dq_uninit(&sdp->sd_jinode_gh); - gfs2_glock_dq_uninit(&sdp->sd_ir_gh); - gfs2_glock_dq_uninit(&sdp->sd_sc_gh); - gfs2_glock_dq_uninit(&sdp->sd_qc_gh); - iput(sdp->sd_ir_inode); - iput(sdp->sd_sc_inode); - iput(sdp->sd_qc_inode); + if (sdp) { + gfs2_meta_syncfs(sdp); + dput(sdp->sd_root_dir); + dput(sdp->sd_master_dir); + sdp->sd_root_dir = NULL; + sdp->sd_master_dir = NULL; } - gfs2_glock_dq_uninit(&sdp->sd_live_gh); + shrink_dcache_sb(sb); kill_block_super(sb); - gfs2_lm_unmount(sdp); - gfs2_sys_fs_del(sdp); - gfs2_delete_debugfs_file(sdp); - kfree(sdp); + if (sdp) + gfs2_delete_debugfs_file(sdp); } struct file_system_type gfs2_fs_type = { diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c index bd08a0a8d9bf..08837a728635 100644 --- a/fs/gfs2/ops_super.c +++ b/fs/gfs2/ops_super.c @@ -95,7 +95,7 @@ do_flush: * Returns: errno */ -int gfs2_make_fs_ro(struct gfs2_sbd *sdp) +static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) { struct gfs2_holder t_gh; int error; @@ -121,6 +121,70 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp) return error; } +/** + * gfs2_put_super - Unmount the filesystem + * @sb: The VFS superblock + * + */ + +static void gfs2_put_super(struct super_block *sb) +{ + struct gfs2_sbd *sdp = sb->s_fs_info; + int error; + + /* Unfreeze the filesystem, if we need to */ + + mutex_lock(&sdp->sd_freeze_lock); + if (sdp->sd_freeze_count) + gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); + mutex_unlock(&sdp->sd_freeze_lock); + + kthread_stop(sdp->sd_quotad_process); + kthread_stop(sdp->sd_logd_process); + kthread_stop(sdp->sd_recoverd_process); + + if (!(sb->s_flags & MS_RDONLY)) { + error = gfs2_make_fs_ro(sdp); + if (error) + gfs2_io_error(sdp); + } + /* At this point, we're through modifying the disk */ + + /* Release stuff */ + + iput(sdp->sd_jindex); + iput(sdp->sd_inum_inode); + iput(sdp->sd_statfs_inode); + iput(sdp->sd_rindex); + iput(sdp->sd_quota_inode); + + gfs2_glock_put(sdp->sd_rename_gl); + gfs2_glock_put(sdp->sd_trans_gl); + + if (!sdp->sd_args.ar_spectator) { + gfs2_glock_dq_uninit(&sdp->sd_journal_gh); + gfs2_glock_dq_uninit(&sdp->sd_jinode_gh); + gfs2_glock_dq_uninit(&sdp->sd_ir_gh); + gfs2_glock_dq_uninit(&sdp->sd_sc_gh); + gfs2_glock_dq_uninit(&sdp->sd_qc_gh); + iput(sdp->sd_ir_inode); + iput(sdp->sd_sc_inode); + iput(sdp->sd_qc_inode); + } + + gfs2_glock_dq_uninit(&sdp->sd_live_gh); + gfs2_clear_rgrpd(sdp); + gfs2_jindex_free(sdp); + /* Take apart glock structures and buffer lists */ + gfs2_gl_hash_clear(sdp); + /* Unmount the locking protocol */ + gfs2_lm_unmount(sdp); + + /* At this point, we're through participating in the lockspace */ + gfs2_sys_fs_del(sdp); + kfree(sdp); +} + /** * gfs2_write_super * @sb: the superblock @@ -622,7 +686,7 @@ const struct super_operations gfs2_super_ops = { .destroy_inode = gfs2_destroy_inode, .write_inode = gfs2_write_inode, .delete_inode = gfs2_delete_inode, - .put_super = gfs2_gl_hash_clear, + .put_super = gfs2_put_super, .write_super = gfs2_write_super, .sync_fs = gfs2_sync_fs, .write_super_lockfs = gfs2_write_super_lockfs, diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index f14658b20204..141b781f2fcc 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -33,6 +33,40 @@ #include "trans.h" #include "util.h" +/** + * gfs2_jindex_free - Clear all the journal index information + * @sdp: The GFS2 superblock + * + */ + +void gfs2_jindex_free(struct gfs2_sbd *sdp) +{ + struct list_head list, *head; + struct gfs2_jdesc *jd; + struct gfs2_journal_extent *jext; + + spin_lock(&sdp->sd_jindex_spin); + list_add(&list, &sdp->sd_jindex_list); + list_del_init(&sdp->sd_jindex_list); + sdp->sd_journals = 0; + spin_unlock(&sdp->sd_jindex_spin); + + while (!list_empty(&list)) { + jd = list_entry(list.next, struct gfs2_jdesc, jd_list); + head = &jd->extent_list; + while (!list_empty(head)) { + jext = list_entry(head->next, + struct gfs2_journal_extent, + extent_list); + list_del(&jext->extent_list); + kfree(jext); + } + list_del(&jd->jd_list); + iput(jd->jd_inode); + kfree(jd); + } +} + static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid) { struct gfs2_jdesc *jd; diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h index 4d2492b3e7ef..f6b8b00ad881 100644 --- a/fs/gfs2/super.h +++ b/fs/gfs2/super.h @@ -25,6 +25,8 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp) return x; } +void gfs2_jindex_free(struct gfs2_sbd *sdp); + struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid); int gfs2_jdesc_check(struct gfs2_jdesc *jd); @@ -32,7 +34,6 @@ int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename, struct gfs2_inode **ipp); int gfs2_make_fs_rw(struct gfs2_sbd *sdp); -int gfs2_make_fs_ro(struct gfs2_sbd *sdp); int gfs2_statfs_init(struct gfs2_sbd *sdp); void gfs2_statfs_change(struct gfs2_sbd *sdp, -- cgit From 88a19ad066c1aab2f9713beb670525fcc06e1c09 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Fri, 19 Dec 2008 15:43:05 +0000 Subject: GFS2: Fix use-after-free bug on umount (try #2) This should solve the issue with the previous attempt at fixing this. Signed-off-by: Steven Whitehouse --- fs/gfs2/ops_fstype.c | 20 ++++++++++++-------- fs/gfs2/ops_super.c | 1 - 2 files changed, 12 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 4cae60f4a175..f91eebdde581 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -1263,17 +1263,21 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags, static void gfs2_kill_sb(struct super_block *sb) { struct gfs2_sbd *sdp = sb->s_fs_info; - if (sdp) { - gfs2_meta_syncfs(sdp); - dput(sdp->sd_root_dir); - dput(sdp->sd_master_dir); - sdp->sd_root_dir = NULL; - sdp->sd_master_dir = NULL; + + if (sdp == NULL) { + kill_block_super(sb); + return; } + + gfs2_meta_syncfs(sdp); + dput(sdp->sd_root_dir); + dput(sdp->sd_master_dir); + sdp->sd_root_dir = NULL; + sdp->sd_master_dir = NULL; shrink_dcache_sb(sb); kill_block_super(sb); - if (sdp) - gfs2_delete_debugfs_file(sdp); + gfs2_delete_debugfs_file(sdp); + kfree(sdp); } struct file_system_type gfs2_fs_type = { diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c index 08837a728635..777783deddcb 100644 --- a/fs/gfs2/ops_super.c +++ b/fs/gfs2/ops_super.c @@ -182,7 +182,6 @@ static void gfs2_put_super(struct super_block *sb) /* At this point, we're through participating in the lockspace */ gfs2_sys_fs_del(sdp); - kfree(sdp); } /** -- cgit From eb8374e71f941a1b3c2ed6ea19dc809e7124dc5d Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Thu, 25 Dec 2008 15:35:27 +0100 Subject: GFS2: Use DEFINE_SPINLOCK SPIN_LOCK_UNLOCKED is deprecated. The following makes the change suggested in Documentation/spinlocks.txt The semantic patch that makes this change is as follows: (http://www.emn.fr/x-info/coccinelle/) // @@ declarer name DEFINE_SPINLOCK; identifier xxx_lock; @@ - spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED; + DEFINE_SPINLOCK(xxx_lock); // Signed-off-by: Julia Lawall Signed-off-by: Steven Whitehouse --- fs/gfs2/glock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 6e298b070117..6b983aef785d 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -65,7 +65,7 @@ static struct dentry *gfs2_root; static struct workqueue_struct *glock_workqueue; static LIST_HEAD(lru_list); static atomic_t lru_count = ATOMIC_INIT(0); -static spinlock_t lru_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(lru_lock); #define GFS2_GL_HASH_SHIFT 15 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) -- cgit From 4ac6032d6c92f0ac65cf5bc56b68557b3f099b66 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Sat, 18 Oct 2008 19:11:42 -0700 Subject: ocfs2: Field prefixes for the xattr_bucket structure The ocfs2_xattr_bucket structure keeps track of the buffers for one xattr bucket. Let's prefix the fields for easier code navigation. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 100 +++++++++++++++++++++++++++---------------------------- 1 file changed, 50 insertions(+), 50 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 74d7367ade13..9c0ee42eb931 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -61,8 +61,8 @@ struct ocfs2_xattr_def_value_root { }; struct ocfs2_xattr_bucket { - struct buffer_head *bhs[OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET]; - struct ocfs2_xattr_header *xh; + struct buffer_head *bu_bhs[OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET]; + struct ocfs2_xattr_header *bu_xh; }; #define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root)) @@ -795,11 +795,11 @@ static int ocfs2_xattr_block_get(struct inode *inode, if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) { ret = ocfs2_xattr_bucket_get_name_value(inode, - xs->bucket.xh, + xs->bucket.bu_xh, i, &block_off, &name_offset); - xs->base = xs->bucket.bhs[block_off]->b_data; + xs->base = xs->bucket.bu_bhs[block_off]->b_data; } if (ocfs2_xattr_is_local(xs->here)) { memcpy(buffer, (void *)xs->base + @@ -818,7 +818,7 @@ static int ocfs2_xattr_block_get(struct inode *inode, ret = size; cleanup: for (i = 0; i < OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET; i++) - brelse(xs->bucket.bhs[i]); + brelse(xs->bucket.bu_bhs[i]); memset(&xs->bucket, 0, sizeof(xs->bucket)); brelse(xs->xattr_bh); @@ -2032,7 +2032,7 @@ cleanup: brelse(di_bh); brelse(xbs.xattr_bh); for (i = 0; i < blk_per_bucket; i++) - brelse(xbs.bucket.bhs[i]); + brelse(xbs.bucket.bu_bhs[i]); return ret; } @@ -2276,13 +2276,13 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, lower_bh = bh; bh = NULL; } - xs->bucket.bhs[0] = lower_bh; - xs->bucket.xh = (struct ocfs2_xattr_header *) - xs->bucket.bhs[0]->b_data; + xs->bucket.bu_bhs[0] = lower_bh; + xs->bucket.bu_xh = (struct ocfs2_xattr_header *) + xs->bucket.bu_bhs[0]->b_data; lower_bh = NULL; - xs->header = xs->bucket.xh; - xs->base = xs->bucket.bhs[0]->b_data; + xs->header = xs->bucket.bu_xh; + xs->base = xs->bucket.bu_bhs[0]->b_data; xs->end = xs->base + inode->i_sb->s_blocksize; if (found) { @@ -2290,8 +2290,8 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, * If we have found the xattr enty, read all the blocks in * this bucket. */ - ret = ocfs2_read_blocks(inode, xs->bucket.bhs[0]->b_blocknr + 1, - blk_per_bucket - 1, &xs->bucket.bhs[1], + ret = ocfs2_read_blocks(inode, xs->bucket.bu_bhs[0]->b_blocknr + 1, + blk_per_bucket - 1, &xs->bucket.bu_bhs[1], 0); if (ret) { mlog_errno(ret); @@ -2300,7 +2300,7 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, xs->here = &xs->header->xh_entries[index]; mlog(0, "find xattr %s in bucket %llu, entry = %u\n", name, - (unsigned long long)xs->bucket.bhs[0]->b_blocknr, index); + (unsigned long long)xs->bucket.bu_bhs[0]->b_blocknr, index); } else ret = -ENODATA; @@ -2370,23 +2370,23 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode, for (i = 0; i < num_buckets; i++, blkno += blk_per_bucket) { ret = ocfs2_read_blocks(inode, blkno, blk_per_bucket, - bucket.bhs, 0); + bucket.bu_bhs, 0); if (ret) { mlog_errno(ret); goto out; } - bucket.xh = (struct ocfs2_xattr_header *)bucket.bhs[0]->b_data; + bucket.bu_xh = (struct ocfs2_xattr_header *)bucket.bu_bhs[0]->b_data; /* * The real bucket num in this series of blocks is stored * in the 1st bucket. */ if (i == 0) - num_buckets = le16_to_cpu(bucket.xh->xh_num_buckets); + num_buckets = le16_to_cpu(bucket.bu_xh->xh_num_buckets); mlog(0, "iterating xattr bucket %llu, first hash %u\n", (unsigned long long)blkno, - le32_to_cpu(bucket.xh->xh_entries[0].xe_name_hash)); + le32_to_cpu(bucket.bu_xh->xh_entries[0].xe_name_hash)); if (func) { ret = func(inode, &bucket, para); if (ret) { @@ -2396,13 +2396,13 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode, } for (j = 0; j < blk_per_bucket; j++) - brelse(bucket.bhs[j]); + brelse(bucket.bu_bhs[j]); memset(&bucket, 0, sizeof(bucket)); } out: for (j = 0; j < blk_per_bucket; j++) - brelse(bucket.bhs[j]); + brelse(bucket.bu_bhs[j]); return ret; } @@ -2441,21 +2441,21 @@ static int ocfs2_list_xattr_bucket(struct inode *inode, int i, block_off, new_offset; const char *prefix, *name; - for (i = 0 ; i < le16_to_cpu(bucket->xh->xh_count); i++) { - struct ocfs2_xattr_entry *entry = &bucket->xh->xh_entries[i]; + for (i = 0 ; i < le16_to_cpu(bucket->bu_xh->xh_count); i++) { + struct ocfs2_xattr_entry *entry = &bucket->bu_xh->xh_entries[i]; type = ocfs2_xattr_get_type(entry); prefix = ocfs2_xattr_prefix(type); if (prefix) { ret = ocfs2_xattr_bucket_get_name_value(inode, - bucket->xh, + bucket->bu_xh, i, &block_off, &new_offset); if (ret) break; - name = (const char *)bucket->bhs[block_off]->b_data + + name = (const char *)bucket->bu_bhs[block_off]->b_data + new_offset; ret = ocfs2_xattr_list_entry(xl->buffer, xl->buffer_size, @@ -2626,10 +2626,10 @@ static int ocfs2_xattr_update_xattr_search(struct inode *inode, int i, blocksize = inode->i_sb->s_blocksize; u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); - xs->bucket.bhs[0] = new_bh; + xs->bucket.bu_bhs[0] = new_bh; get_bh(new_bh); - xs->bucket.xh = (struct ocfs2_xattr_header *)xs->bucket.bhs[0]->b_data; - xs->header = xs->bucket.xh; + xs->bucket.bu_xh = (struct ocfs2_xattr_header *)xs->bucket.bu_bhs[0]->b_data; + xs->header = xs->bucket.bu_xh; xs->base = new_bh->b_data; xs->end = xs->base + inode->i_sb->s_blocksize; @@ -2637,8 +2637,8 @@ static int ocfs2_xattr_update_xattr_search(struct inode *inode, if (!xs->not_found) { if (OCFS2_XATTR_BUCKET_SIZE != blocksize) { ret = ocfs2_read_blocks(inode, - xs->bucket.bhs[0]->b_blocknr + 1, - blk_per_bucket - 1, &xs->bucket.bhs[1], + xs->bucket.bu_bhs[0]->b_blocknr + 1, + blk_per_bucket - 1, &xs->bucket.bu_bhs[1], 0); if (ret) { mlog_errno(ret); @@ -2835,7 +2835,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, size_t end, offset, len, value_len; struct ocfs2_xattr_header *xh; char *entries, *buf, *bucket_buf = NULL; - u64 blkno = bucket->bhs[0]->b_blocknr; + u64 blkno = bucket->bu_bhs[0]->b_blocknr; u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); u16 xh_free_start; size_t blocksize = inode->i_sb->s_blocksize; @@ -3929,7 +3929,7 @@ static inline char *ocfs2_xattr_bucket_get_val(struct inode *inode, int block_off = offs >> inode->i_sb->s_blocksize_bits; offs = offs % inode->i_sb->s_blocksize; - return bucket->bhs[block_off]->b_data + offs; + return bucket->bu_bhs[block_off]->b_data + offs; } /* @@ -4124,12 +4124,12 @@ static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n", (unsigned long)xi->value_len, xi->name_index, - (unsigned long long)xs->bucket.bhs[0]->b_blocknr); + (unsigned long long)xs->bucket.bu_bhs[0]->b_blocknr); - if (!xs->bucket.bhs[1]) { + if (!xs->bucket.bu_bhs[1]) { ret = ocfs2_read_blocks(inode, - xs->bucket.bhs[0]->b_blocknr + 1, - blk_per_bucket - 1, &xs->bucket.bhs[1], + xs->bucket.bu_bhs[0]->b_blocknr + 1, + blk_per_bucket - 1, &xs->bucket.bu_bhs[1], 0); if (ret) { mlog_errno(ret); @@ -4146,7 +4146,7 @@ static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, } for (i = 0; i < blk_per_bucket; i++) { - ret = ocfs2_journal_access(handle, inode, xs->bucket.bhs[i], + ret = ocfs2_journal_access(handle, inode, xs->bucket.bu_bhs[i], OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); @@ -4158,7 +4158,7 @@ static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, /*Only dirty the blocks we have touched in set xattr. */ ret = ocfs2_xattr_bucket_handle_journal(inode, handle, xs, - xs->bucket.bhs, blk_per_bucket); + xs->bucket.bu_bhs, blk_per_bucket); if (ret) mlog_errno(ret); out: @@ -4272,10 +4272,10 @@ static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode, struct ocfs2_xattr_entry *xe = xs->here; struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)xs->base; - BUG_ON(!xs->bucket.bhs[0] || !xe || ocfs2_xattr_is_local(xe)); + BUG_ON(!xs->bucket.bu_bhs[0] || !xe || ocfs2_xattr_is_local(xe)); offset = xe - xh->xh_entries; - ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket.bhs[0], + ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket.bu_bhs[0], offset, len); if (ret) mlog_errno(ret); @@ -4395,7 +4395,7 @@ static void ocfs2_xattr_bucket_remove_xs(struct inode *inode, struct ocfs2_xattr_search *xs) { handle_t *handle = NULL; - struct ocfs2_xattr_header *xh = xs->bucket.xh; + struct ocfs2_xattr_header *xh = xs->bucket.bu_xh; struct ocfs2_xattr_entry *last = &xh->xh_entries[ le16_to_cpu(xh->xh_count) - 1]; int ret = 0; @@ -4407,7 +4407,7 @@ static void ocfs2_xattr_bucket_remove_xs(struct inode *inode, return; } - ret = ocfs2_journal_access(handle, inode, xs->bucket.bhs[0], + ret = ocfs2_journal_access(handle, inode, xs->bucket.bu_bhs[0], OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -4420,7 +4420,7 @@ static void ocfs2_xattr_bucket_remove_xs(struct inode *inode, memset(last, 0, sizeof(struct ocfs2_xattr_entry)); le16_add_cpu(&xh->xh_count, -1); - ret = ocfs2_journal_dirty(handle, xs->bucket.bhs[0]); + ret = ocfs2_journal_dirty(handle, xs->bucket.bu_bhs[0]); if (ret < 0) mlog_errno(ret); out_commit: @@ -4530,7 +4530,7 @@ static int ocfs2_check_xattr_bucket_collision(struct inode *inode, struct ocfs2_xattr_bucket *bucket, const char *name) { - struct ocfs2_xattr_header *xh = bucket->xh; + struct ocfs2_xattr_header *xh = bucket->bu_xh; u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name)); if (name_hash != le32_to_cpu(xh->xh_entries[0].xe_name_hash)) @@ -4540,7 +4540,7 @@ static int ocfs2_check_xattr_bucket_collision(struct inode *inode, xh->xh_entries[0].xe_name_hash) { mlog(ML_ERROR, "Too much hash collision in xattr bucket %llu, " "hash = %u\n", - (unsigned long long)bucket->bhs[0]->b_blocknr, + (unsigned long long)bucket->bu_bhs[0]->b_blocknr, le32_to_cpu(xh->xh_entries[0].xe_name_hash)); return -ENOSPC; } @@ -4574,7 +4574,7 @@ try_again: mlog_bug_on_msg(header_size > blocksize, "bucket %llu has header size " "of %u which exceed block size\n", - (unsigned long long)xs->bucket.bhs[0]->b_blocknr, + (unsigned long long)xs->bucket.bu_bhs[0]->b_blocknr, header_size); if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) @@ -4614,7 +4614,7 @@ try_again: mlog(0, "xs->not_found = %d, in xattr bucket %llu: free = %d, " "need = %d, max_free = %d, xh_free_start = %u, xh_name_value_len =" " %u\n", xs->not_found, - (unsigned long long)xs->bucket.bhs[0]->b_blocknr, + (unsigned long long)xs->bucket.bu_bhs[0]->b_blocknr, free, need, max_free, le16_to_cpu(xh->xh_free_start), le16_to_cpu(xh->xh_name_value_len)); @@ -4667,14 +4667,14 @@ try_again: ret = ocfs2_add_new_xattr_bucket(inode, xs->xattr_bh, - xs->bucket.bhs[0]); + xs->bucket.bu_bhs[0]); if (ret) { mlog_errno(ret); goto out; } for (i = 0; i < blk_per_bucket; i++) - brelse(xs->bucket.bhs[i]); + brelse(xs->bucket.bu_bhs[i]); memset(&xs->bucket, 0, sizeof(xs->bucket)); @@ -4700,7 +4700,7 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, void *para) { int ret = 0; - struct ocfs2_xattr_header *xh = bucket->xh; + struct ocfs2_xattr_header *xh = bucket->bu_xh; u16 i; struct ocfs2_xattr_entry *xe; @@ -4710,7 +4710,7 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, continue; ret = ocfs2_xattr_bucket_value_truncate(inode, - bucket->bhs[0], + bucket->bu_bhs[0], i, 0); if (ret) { mlog_errno(ret); -- cgit From 9c7759aa670918a48f0c6e06779cd20f2781a2ac Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Fri, 24 Oct 2008 16:21:03 -0700 Subject: ocfs2: Convenient access to an xattr bucket's block number. The xattr code often wants to know the block number of an xattr bucket. This is usually found by dereferencing the first bh hanging off of the ocfs2_xattr_bucket structure. Rather than do this all the time, let's provide a nice little macro. The idea is ripped from the ocfs2_path code. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 9c0ee42eb931..3cf8e80b2b6c 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -154,6 +154,8 @@ static inline u16 ocfs2_xattr_max_xe_in_bucket(struct super_block *sb) return len / sizeof(struct ocfs2_xattr_entry); } +#define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr) + static inline const char *ocfs2_xattr_prefix(int name_index) { struct xattr_handler *handler = NULL; @@ -2290,7 +2292,7 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, * If we have found the xattr enty, read all the blocks in * this bucket. */ - ret = ocfs2_read_blocks(inode, xs->bucket.bu_bhs[0]->b_blocknr + 1, + ret = ocfs2_read_blocks(inode, bucket_blkno(&xs->bucket) + 1, blk_per_bucket - 1, &xs->bucket.bu_bhs[1], 0); if (ret) { @@ -2300,7 +2302,7 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, xs->here = &xs->header->xh_entries[index]; mlog(0, "find xattr %s in bucket %llu, entry = %u\n", name, - (unsigned long long)xs->bucket.bu_bhs[0]->b_blocknr, index); + (unsigned long long)bucket_blkno(&xs->bucket), index); } else ret = -ENODATA; @@ -2637,7 +2639,7 @@ static int ocfs2_xattr_update_xattr_search(struct inode *inode, if (!xs->not_found) { if (OCFS2_XATTR_BUCKET_SIZE != blocksize) { ret = ocfs2_read_blocks(inode, - xs->bucket.bu_bhs[0]->b_blocknr + 1, + bucket_blkno(&xs->bucket) + 1, blk_per_bucket - 1, &xs->bucket.bu_bhs[1], 0); if (ret) { @@ -2835,7 +2837,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, size_t end, offset, len, value_len; struct ocfs2_xattr_header *xh; char *entries, *buf, *bucket_buf = NULL; - u64 blkno = bucket->bu_bhs[0]->b_blocknr; + u64 blkno = bucket_blkno(bucket); u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); u16 xh_free_start; size_t blocksize = inode->i_sb->s_blocksize; @@ -4124,11 +4126,11 @@ static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n", (unsigned long)xi->value_len, xi->name_index, - (unsigned long long)xs->bucket.bu_bhs[0]->b_blocknr); + (unsigned long long)bucket_blkno(&xs->bucket)); if (!xs->bucket.bu_bhs[1]) { ret = ocfs2_read_blocks(inode, - xs->bucket.bu_bhs[0]->b_blocknr + 1, + bucket_blkno(&xs->bucket) + 1, blk_per_bucket - 1, &xs->bucket.bu_bhs[1], 0); if (ret) { @@ -4540,7 +4542,7 @@ static int ocfs2_check_xattr_bucket_collision(struct inode *inode, xh->xh_entries[0].xe_name_hash) { mlog(ML_ERROR, "Too much hash collision in xattr bucket %llu, " "hash = %u\n", - (unsigned long long)bucket->bu_bhs[0]->b_blocknr, + (unsigned long long)bucket_blkno(bucket), le32_to_cpu(xh->xh_entries[0].xe_name_hash)); return -ENOSPC; } @@ -4574,7 +4576,7 @@ try_again: mlog_bug_on_msg(header_size > blocksize, "bucket %llu has header size " "of %u which exceed block size\n", - (unsigned long long)xs->bucket.bu_bhs[0]->b_blocknr, + (unsigned long long)bucket_blkno(&xs->bucket), header_size); if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) @@ -4614,7 +4616,7 @@ try_again: mlog(0, "xs->not_found = %d, in xattr bucket %llu: free = %d, " "need = %d, max_free = %d, xh_free_start = %u, xh_name_value_len =" " %u\n", xs->not_found, - (unsigned long long)xs->bucket.bu_bhs[0]->b_blocknr, + (unsigned long long)bucket_blkno(&xs->bucket), free, need, max_free, le16_to_cpu(xh->xh_free_start), le16_to_cpu(xh->xh_name_value_len)); -- cgit From 51def39f0cabd46131c7c4df08751cb0cb9433d1 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Fri, 24 Oct 2008 16:57:21 -0700 Subject: ocfs2: Convenient access to xattr bucket data blocks. The xattr code often wants to access the data pointer for blocks in an xattr bucket. This is usually found by dereferencing the bh array hanging off of the ocfs2_xattr_bucket structure. Rather than do this all the time, let's provide a nice little macro. The idea is ripped from the ocfs2_path code. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 3cf8e80b2b6c..8594df36640e 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -155,6 +155,7 @@ static inline u16 ocfs2_xattr_max_xe_in_bucket(struct super_block *sb) } #define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr) +#define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data) static inline const char *ocfs2_xattr_prefix(int name_index) { @@ -801,7 +802,7 @@ static int ocfs2_xattr_block_get(struct inode *inode, i, &block_off, &name_offset); - xs->base = xs->bucket.bu_bhs[block_off]->b_data; + xs->base = bucket_block(&xs->bucket, block_off); } if (ocfs2_xattr_is_local(xs->here)) { memcpy(buffer, (void *)xs->base + @@ -2280,11 +2281,11 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, } xs->bucket.bu_bhs[0] = lower_bh; xs->bucket.bu_xh = (struct ocfs2_xattr_header *) - xs->bucket.bu_bhs[0]->b_data; + bucket_block(&xs->bucket, 0); lower_bh = NULL; xs->header = xs->bucket.bu_xh; - xs->base = xs->bucket.bu_bhs[0]->b_data; + xs->base = bucket_block(&xs->bucket, 0); xs->end = xs->base + inode->i_sb->s_blocksize; if (found) { @@ -2378,7 +2379,7 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode, goto out; } - bucket.bu_xh = (struct ocfs2_xattr_header *)bucket.bu_bhs[0]->b_data; + bucket.bu_xh = (struct ocfs2_xattr_header *)bucket_block(&bucket, 0); /* * The real bucket num in this series of blocks is stored * in the 1st bucket. @@ -2457,7 +2458,7 @@ static int ocfs2_list_xattr_bucket(struct inode *inode, if (ret) break; - name = (const char *)bucket->bu_bhs[block_off]->b_data + + name = (const char *)bucket_block(bucket, block_off) + new_offset; ret = ocfs2_xattr_list_entry(xl->buffer, xl->buffer_size, @@ -2630,7 +2631,7 @@ static int ocfs2_xattr_update_xattr_search(struct inode *inode, xs->bucket.bu_bhs[0] = new_bh; get_bh(new_bh); - xs->bucket.bu_xh = (struct ocfs2_xattr_header *)xs->bucket.bu_bhs[0]->b_data; + xs->bucket.bu_xh = (struct ocfs2_xattr_header *)bucket_block(&xs->bucket, 0); xs->header = xs->bucket.bu_xh; xs->base = new_bh->b_data; @@ -3931,7 +3932,7 @@ static inline char *ocfs2_xattr_bucket_get_val(struct inode *inode, int block_off = offs >> inode->i_sb->s_blocksize_bits; offs = offs % inode->i_sb->s_blocksize; - return bucket->bu_bhs[block_off]->b_data + offs; + return bucket_block(bucket, block_off) + offs; } /* -- cgit From 3e6329463e3a5c311e1d607ff3db735a18b6d67a Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Fri, 24 Oct 2008 17:04:49 -0700 Subject: ocfs2: Convenient access to an xattr bucket's header. The xattr code often wants to access the ocfs2_xattr_header at the start of an bucket. Rather than walk the pointer chains, let's just create another nice macro. As a side benefit, we can get rid of the mostly spurious ->bu_xh element on the bucket structure. The idea is ripped from the ocfs2_path code. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 8594df36640e..1b77302b54ff 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -62,7 +62,6 @@ struct ocfs2_xattr_def_value_root { struct ocfs2_xattr_bucket { struct buffer_head *bu_bhs[OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET]; - struct ocfs2_xattr_header *bu_xh; }; #define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root)) @@ -156,6 +155,7 @@ static inline u16 ocfs2_xattr_max_xe_in_bucket(struct super_block *sb) #define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr) #define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data) +#define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0)) static inline const char *ocfs2_xattr_prefix(int name_index) { @@ -798,7 +798,7 @@ static int ocfs2_xattr_block_get(struct inode *inode, if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) { ret = ocfs2_xattr_bucket_get_name_value(inode, - xs->bucket.bu_xh, + bucket_xh(&xs->bucket), i, &block_off, &name_offset); @@ -2280,11 +2280,9 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, bh = NULL; } xs->bucket.bu_bhs[0] = lower_bh; - xs->bucket.bu_xh = (struct ocfs2_xattr_header *) - bucket_block(&xs->bucket, 0); lower_bh = NULL; - xs->header = xs->bucket.bu_xh; + xs->header = bucket_xh(&xs->bucket); xs->base = bucket_block(&xs->bucket, 0); xs->end = xs->base + inode->i_sb->s_blocksize; @@ -2379,17 +2377,16 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode, goto out; } - bucket.bu_xh = (struct ocfs2_xattr_header *)bucket_block(&bucket, 0); /* * The real bucket num in this series of blocks is stored * in the 1st bucket. */ if (i == 0) - num_buckets = le16_to_cpu(bucket.bu_xh->xh_num_buckets); + num_buckets = le16_to_cpu(bucket_xh(&bucket)->xh_num_buckets); mlog(0, "iterating xattr bucket %llu, first hash %u\n", (unsigned long long)blkno, - le32_to_cpu(bucket.bu_xh->xh_entries[0].xe_name_hash)); + le32_to_cpu(bucket_xh(&bucket)->xh_entries[0].xe_name_hash)); if (func) { ret = func(inode, &bucket, para); if (ret) { @@ -2444,14 +2441,14 @@ static int ocfs2_list_xattr_bucket(struct inode *inode, int i, block_off, new_offset; const char *prefix, *name; - for (i = 0 ; i < le16_to_cpu(bucket->bu_xh->xh_count); i++) { - struct ocfs2_xattr_entry *entry = &bucket->bu_xh->xh_entries[i]; + for (i = 0 ; i < le16_to_cpu(bucket_xh(bucket)->xh_count); i++) { + struct ocfs2_xattr_entry *entry = &bucket_xh(bucket)->xh_entries[i]; type = ocfs2_xattr_get_type(entry); prefix = ocfs2_xattr_prefix(type); if (prefix) { ret = ocfs2_xattr_bucket_get_name_value(inode, - bucket->bu_xh, + bucket_xh(bucket), i, &block_off, &new_offset); @@ -2631,8 +2628,7 @@ static int ocfs2_xattr_update_xattr_search(struct inode *inode, xs->bucket.bu_bhs[0] = new_bh; get_bh(new_bh); - xs->bucket.bu_xh = (struct ocfs2_xattr_header *)bucket_block(&xs->bucket, 0); - xs->header = xs->bucket.bu_xh; + xs->header = bucket_xh(&xs->bucket); xs->base = new_bh->b_data; xs->end = xs->base + inode->i_sb->s_blocksize; @@ -4398,7 +4394,7 @@ static void ocfs2_xattr_bucket_remove_xs(struct inode *inode, struct ocfs2_xattr_search *xs) { handle_t *handle = NULL; - struct ocfs2_xattr_header *xh = xs->bucket.bu_xh; + struct ocfs2_xattr_header *xh = bucket_xh(&xs->bucket); struct ocfs2_xattr_entry *last = &xh->xh_entries[ le16_to_cpu(xh->xh_count) - 1]; int ret = 0; @@ -4533,7 +4529,7 @@ static int ocfs2_check_xattr_bucket_collision(struct inode *inode, struct ocfs2_xattr_bucket *bucket, const char *name) { - struct ocfs2_xattr_header *xh = bucket->bu_xh; + struct ocfs2_xattr_header *xh = bucket_xh(bucket); u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name)); if (name_hash != le32_to_cpu(xh->xh_entries[0].xe_name_hash)) @@ -4703,7 +4699,7 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, void *para) { int ret = 0; - struct ocfs2_xattr_header *xh = bucket->bu_xh; + struct ocfs2_xattr_header *xh = bucket_xh(bucket); u16 i; struct ocfs2_xattr_entry *xe; -- cgit From 6dde41d9e7ba62f84cd7e91c0e993500af32ceb6 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Fri, 24 Oct 2008 17:16:48 -0700 Subject: ocfs2: Provide a wrapper to brelse() xattr bucket buffers. A common theme is walking all the buffer heads on an ocfs2_xattr_bucket and releasing them. Let's wrap that. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 1b77302b54ff..3478ad177b7f 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -157,6 +157,17 @@ static inline u16 ocfs2_xattr_max_xe_in_bucket(struct super_block *sb) #define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data) #define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0)) +static void ocfs2_xattr_bucket_relse(struct inode *inode, + struct ocfs2_xattr_bucket *bucket) +{ + int i, blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + + for (i = 0; i < blks; i++) { + brelse(bucket->bu_bhs[i]); + bucket->bu_bhs[i] = NULL; + } +} + static inline const char *ocfs2_xattr_prefix(int name_index) { struct xattr_handler *handler = NULL; @@ -820,8 +831,7 @@ static int ocfs2_xattr_block_get(struct inode *inode, } ret = size; cleanup: - for (i = 0; i < OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET; i++) - brelse(xs->bucket.bu_bhs[i]); + ocfs2_xattr_bucket_relse(inode, &xs->bucket); memset(&xs->bucket, 0, sizeof(xs->bucket)); brelse(xs->xattr_bh); @@ -1932,7 +1942,6 @@ int ocfs2_xattr_set(struct inode *inode, struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; int ret; - u16 i, blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); struct ocfs2_xattr_info xi = { .name_index = name_index, @@ -2034,8 +2043,7 @@ cleanup: ocfs2_inode_unlock(inode, 1); brelse(di_bh); brelse(xbs.xattr_bh); - for (i = 0; i < blk_per_bucket; i++) - brelse(xbs.bucket.bu_bhs[i]); + ocfs2_xattr_bucket_relse(inode, &xbs.bucket); return ret; } @@ -2358,7 +2366,7 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode, xattr_bucket_func *func, void *para) { - int i, j, ret = 0; + int i, ret = 0; int blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)); u32 num_buckets = clusters * bpc; @@ -2395,14 +2403,12 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode, } } - for (j = 0; j < blk_per_bucket; j++) - brelse(bucket.bu_bhs[j]); + ocfs2_xattr_bucket_relse(inode, &bucket); memset(&bucket, 0, sizeof(bucket)); } out: - for (j = 0; j < blk_per_bucket; j++) - brelse(bucket.bu_bhs[j]); + ocfs2_xattr_bucket_relse(inode, &bucket); return ret; } @@ -4554,11 +4560,10 @@ static int ocfs2_xattr_set_entry_index_block(struct inode *inode, struct ocfs2_xattr_header *xh; struct ocfs2_xattr_entry *xe; u16 count, header_size, xh_free_start; - int i, free, max_free, need, old; + int free, max_free, need, old; size_t value_size = 0, name_len = strlen(xi->name); size_t blocksize = inode->i_sb->s_blocksize; int ret, allocation = 0; - u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); mlog_entry("Set xattr %s in xattr index block\n", xi->name); @@ -4672,9 +4677,7 @@ try_again: goto out; } - for (i = 0; i < blk_per_bucket; i++) - brelse(xs->bucket.bu_bhs[i]); - + ocfs2_xattr_bucket_relse(inode, &xs->bucket); memset(&xs->bucket, 0, sizeof(xs->bucket)); ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh, -- cgit From 784b816a9198dc3782c97cde8ddcf52fecdf1797 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Fri, 24 Oct 2008 17:33:40 -0700 Subject: ocfs2: Improve ocfs2_read_xattr_bucket(). The ocfs2_read_xattr_bucket() function would read an xattr bucket into a list of buffer heads. However, we have a nice ocfs2_xattr_bucket structure. Let's have it fill that out instead. In addition, ocfs2_read_xattr_bucket() would initialize buffer heads for a bucket that's never been on disk before. That's confusing. Let's call that functionality ocfs2_init_xattr_bucket(). The functions ocfs2_cp_xattr_bucket() and ocfs2_half_xattr_bucket() are updated to use the ocfs2_xattr_bucket structure rather than raw bh lists. That way they can use the new read/init calls. In addition, they drop the wasted read of an existing target bucket. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 165 ++++++++++++++++++++++++++----------------------------- 1 file changed, 79 insertions(+), 86 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 3478ad177b7f..fa13fa488786 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -168,6 +168,48 @@ static void ocfs2_xattr_bucket_relse(struct inode *inode, } } +/* + * A bucket that has never been written to disk doesn't need to be + * read. We just need the buffer_heads. Don't call this for + * buckets that are already on disk. ocfs2_read_xattr_bucket() initializes + * them fully. + */ +static int ocfs2_init_xattr_bucket(struct inode *inode, + struct ocfs2_xattr_bucket *bucket, + u64 xb_blkno) +{ + int i, rc = 0; + int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + + for (i = 0; i < blks; i++) { + bucket->bu_bhs[i] = sb_getblk(inode->i_sb, xb_blkno + i); + if (!bucket->bu_bhs[i]) { + rc = -EIO; + mlog_errno(rc); + break; + } + + ocfs2_set_new_buffer_uptodate(inode, bucket->bu_bhs[i]); + } + + if (rc) + ocfs2_xattr_bucket_relse(inode, bucket); + return rc; +} + +/* Read the xattr bucket at xb_blkno */ +static int ocfs2_read_xattr_bucket(struct inode *inode, + struct ocfs2_xattr_bucket *bucket, + u64 xb_blkno) +{ + int rc, blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + + rc = ocfs2_read_blocks(inode, xb_blkno, blks, bucket->bu_bhs, 0); + if (rc) + ocfs2_xattr_bucket_relse(inode, bucket); + return rc; +} + static inline const char *ocfs2_xattr_prefix(int name_index) { struct xattr_handler *handler = NULL; @@ -3097,31 +3139,6 @@ out: return ret; } -static int ocfs2_read_xattr_bucket(struct inode *inode, - u64 blkno, - struct buffer_head **bhs, - int new) -{ - int ret = 0; - u16 i, blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); - - if (!new) - return ocfs2_read_blocks(inode, blkno, - blk_per_bucket, bhs, 0); - - for (i = 0; i < blk_per_bucket; i++) { - bhs[i] = sb_getblk(inode->i_sb, blkno + i); - if (bhs[i] == NULL) { - ret = -EIO; - mlog_errno(ret); - break; - } - ocfs2_set_new_buffer_uptodate(inode, bhs[i]); - } - - return ret; -} - /* * Find the suitable pos when we divide a bucket into 2. * We have to make sure the xattrs with the same hash value exist @@ -3184,7 +3201,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, int ret, i; int count, start, len, name_value_len = 0, xe_len, name_offset = 0; u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); - struct buffer_head **s_bhs, **t_bhs = NULL; + struct ocfs2_xattr_bucket s_bucket, t_bucket; struct ocfs2_xattr_header *xh; struct ocfs2_xattr_entry *xe; int blocksize = inode->i_sb->s_blocksize; @@ -3192,37 +3209,34 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, mlog(0, "move some of xattrs from bucket %llu to %llu\n", (unsigned long long)blk, (unsigned long long)new_blk); - s_bhs = kcalloc(blk_per_bucket, sizeof(struct buffer_head *), GFP_NOFS); - if (!s_bhs) - return -ENOMEM; + memset(&s_bucket, 0, sizeof(struct ocfs2_xattr_bucket)); + memset(&t_bucket, 0, sizeof(struct ocfs2_xattr_bucket)); - ret = ocfs2_read_xattr_bucket(inode, blk, s_bhs, 0); + ret = ocfs2_read_xattr_bucket(inode, &s_bucket, blk); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_journal_access(handle, inode, s_bhs[0], + ret = ocfs2_journal_access(handle, inode, s_bucket.bu_bhs[0], OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } - t_bhs = kcalloc(blk_per_bucket, sizeof(struct buffer_head *), GFP_NOFS); - if (!t_bhs) { - ret = -ENOMEM; - goto out; - } - - ret = ocfs2_read_xattr_bucket(inode, new_blk, t_bhs, new_bucket_head); + /* + * Even if !new_bucket_head, we're overwriting t_bucket. Thus, + * there's no need to read it. + */ + ret = ocfs2_init_xattr_bucket(inode, &t_bucket, new_blk); if (ret) { mlog_errno(ret); goto out; } for (i = 0; i < blk_per_bucket; i++) { - ret = ocfs2_journal_access(handle, inode, t_bhs[i], + ret = ocfs2_journal_access(handle, inode, t_bucket.bu_bhs[i], new_bucket_head ? OCFS2_JOURNAL_ACCESS_CREATE : OCFS2_JOURNAL_ACCESS_WRITE); @@ -3232,7 +3246,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, } } - xh = (struct ocfs2_xattr_header *)s_bhs[0]->b_data; + xh = bucket_xh(&s_bucket); count = le16_to_cpu(xh->xh_count); start = ocfs2_xattr_find_divide_pos(xh); @@ -3245,9 +3259,9 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, * that of the last entry in the previous bucket. */ for (i = 0; i < blk_per_bucket; i++) - memset(t_bhs[i]->b_data, 0, blocksize); + memset(bucket_block(&t_bucket, i), 0, blocksize); - xh = (struct ocfs2_xattr_header *)t_bhs[0]->b_data; + xh = bucket_xh(&t_bucket); xh->xh_free_start = cpu_to_le16(blocksize); xh->xh_entries[0].xe_name_hash = xe->xe_name_hash; le32_add_cpu(&xh->xh_entries[0].xe_name_hash, 1); @@ -3257,10 +3271,11 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, /* copy the whole bucket to the new first. */ for (i = 0; i < blk_per_bucket; i++) - memcpy(t_bhs[i]->b_data, s_bhs[i]->b_data, blocksize); + memcpy(bucket_block(&t_bucket, i), bucket_block(&s_bucket, i), + blocksize); /* update the new bucket. */ - xh = (struct ocfs2_xattr_header *)t_bhs[0]->b_data; + xh = bucket_xh(&t_bucket); /* * Calculate the total name/value len and xh_free_start for @@ -3325,7 +3340,7 @@ set_num_buckets: xh->xh_num_buckets = 0; for (i = 0; i < blk_per_bucket; i++) { - ocfs2_journal_dirty(handle, t_bhs[i]); + ocfs2_journal_dirty(handle, t_bucket.bu_bhs[i]); if (ret) mlog_errno(ret); } @@ -3342,29 +3357,20 @@ set_num_buckets: if (start == count) goto out; - xh = (struct ocfs2_xattr_header *)s_bhs[0]->b_data; + xh = bucket_xh(&s_bucket); memset(&xh->xh_entries[start], 0, sizeof(struct ocfs2_xattr_entry) * (count - start)); xh->xh_count = cpu_to_le16(start); xh->xh_free_start = cpu_to_le16(name_offset); xh->xh_name_value_len = cpu_to_le16(name_value_len); - ocfs2_journal_dirty(handle, s_bhs[0]); + ocfs2_journal_dirty(handle, s_bucket.bu_bhs[0]); if (ret) mlog_errno(ret); out: - if (s_bhs) { - for (i = 0; i < blk_per_bucket; i++) - brelse(s_bhs[i]); - } - kfree(s_bhs); - - if (t_bhs) { - for (i = 0; i < blk_per_bucket; i++) - brelse(t_bhs[i]); - } - kfree(t_bhs); + ocfs2_xattr_bucket_relse(inode, &s_bucket); + ocfs2_xattr_bucket_relse(inode, &t_bucket); return ret; } @@ -3384,7 +3390,7 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, int ret, i; int blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); int blocksize = inode->i_sb->s_blocksize; - struct buffer_head **s_bhs, **t_bhs = NULL; + struct ocfs2_xattr_bucket s_bucket, t_bucket; BUG_ON(s_blkno == t_blkno); @@ -3392,28 +3398,23 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, (unsigned long long)s_blkno, (unsigned long long)t_blkno, t_is_new); - s_bhs = kzalloc(sizeof(struct buffer_head *) * blk_per_bucket, - GFP_NOFS); - if (!s_bhs) - return -ENOMEM; + memset(&s_bucket, 0, sizeof(struct ocfs2_xattr_bucket)); + memset(&t_bucket, 0, sizeof(struct ocfs2_xattr_bucket)); - ret = ocfs2_read_xattr_bucket(inode, s_blkno, s_bhs, 0); + ret = ocfs2_read_xattr_bucket(inode, &s_bucket, s_blkno); if (ret) goto out; - t_bhs = kzalloc(sizeof(struct buffer_head *) * blk_per_bucket, - GFP_NOFS); - if (!t_bhs) { - ret = -ENOMEM; - goto out; - } - - ret = ocfs2_read_xattr_bucket(inode, t_blkno, t_bhs, t_is_new); + /* + * Even if !t_is_new, we're overwriting t_bucket. Thus, + * there's no need to read it. + */ + ret = ocfs2_init_xattr_bucket(inode, &t_bucket, t_blkno); if (ret) goto out; for (i = 0; i < blk_per_bucket; i++) { - ret = ocfs2_journal_access(handle, inode, t_bhs[i], + ret = ocfs2_journal_access(handle, inode, t_bucket.bu_bhs[i], t_is_new ? OCFS2_JOURNAL_ACCESS_CREATE : OCFS2_JOURNAL_ACCESS_WRITE); @@ -3422,22 +3423,14 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, } for (i = 0; i < blk_per_bucket; i++) { - memcpy(t_bhs[i]->b_data, s_bhs[i]->b_data, blocksize); - ocfs2_journal_dirty(handle, t_bhs[i]); + memcpy(bucket_block(&t_bucket, i), bucket_block(&s_bucket, i), + blocksize); + ocfs2_journal_dirty(handle, t_bucket.bu_bhs[i]); } out: - if (s_bhs) { - for (i = 0; i < blk_per_bucket; i++) - brelse(s_bhs[i]); - } - kfree(s_bhs); - - if (t_bhs) { - for (i = 0; i < blk_per_bucket; i++) - brelse(t_bhs[i]); - } - kfree(t_bhs); + ocfs2_xattr_bucket_relse(inode, &s_bucket); + ocfs2_xattr_bucket_relse(inode, &t_bucket); return ret; } -- cgit From 1224be020f62ada3e19822feeac3840abf80de3e Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Fri, 24 Oct 2008 18:47:33 -0700 Subject: ocfs2: Wrap journal_access/journal_dirty for xattr buckets. A common action is to call ocfs2_journal_access() and ocfs2_journal_dirty() on the buffer heads of an xattr bucket. Let's create nice wrappers. While we're there, let's drop the places that try to be smart by writing only the first and last blocks of a bucket. A bucket is contiguous, so writing the whole thing is actually more efficient. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 140 +++++++++++++++++++++++++------------------------------ 1 file changed, 64 insertions(+), 76 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index fa13fa488786..99aefe4ea750 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -210,6 +210,37 @@ static int ocfs2_read_xattr_bucket(struct inode *inode, return rc; } +static int ocfs2_xattr_bucket_journal_access(handle_t *handle, + struct inode *inode, + struct ocfs2_xattr_bucket *bucket, + int type) +{ + int i, rc = 0; + int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + + for (i = 0; i < blks; i++) { + rc = ocfs2_journal_access(handle, inode, + bucket->bu_bhs[i], type); + if (rc) { + mlog_errno(rc); + break; + } + } + + return rc; +} + +static void ocfs2_xattr_bucket_journal_dirty(handle_t *handle, + struct inode *inode, + struct ocfs2_xattr_bucket *bucket) +{ + int i, blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + + for (i = 0; i < blks; i++) + ocfs2_journal_dirty(handle, bucket->bu_bhs[i]); +} + + static inline const char *ocfs2_xattr_prefix(int name_index) { struct xattr_handler *handler = NULL; @@ -3218,8 +3249,8 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, goto out; } - ret = ocfs2_journal_access(handle, inode, s_bucket.bu_bhs[0], - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_xattr_bucket_journal_access(handle, inode, &s_bucket, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -3235,15 +3266,13 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, goto out; } - for (i = 0; i < blk_per_bucket; i++) { - ret = ocfs2_journal_access(handle, inode, t_bucket.bu_bhs[i], - new_bucket_head ? - OCFS2_JOURNAL_ACCESS_CREATE : - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - mlog_errno(ret); - goto out; - } + ret = ocfs2_xattr_bucket_journal_access(handle, inode, &t_bucket, + new_bucket_head ? + OCFS2_JOURNAL_ACCESS_CREATE : + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; } xh = bucket_xh(&s_bucket); @@ -3339,11 +3368,7 @@ set_num_buckets: else xh->xh_num_buckets = 0; - for (i = 0; i < blk_per_bucket; i++) { - ocfs2_journal_dirty(handle, t_bucket.bu_bhs[i]); - if (ret) - mlog_errno(ret); - } + ocfs2_xattr_bucket_journal_dirty(handle, inode, &t_bucket); /* store the first_hash of the new bucket. */ if (first_hash) @@ -3364,9 +3389,7 @@ set_num_buckets: xh->xh_free_start = cpu_to_le16(name_offset); xh->xh_name_value_len = cpu_to_le16(name_value_len); - ocfs2_journal_dirty(handle, s_bucket.bu_bhs[0]); - if (ret) - mlog_errno(ret); + ocfs2_xattr_bucket_journal_dirty(handle, inode, &s_bucket); out: ocfs2_xattr_bucket_relse(inode, &s_bucket); @@ -3413,20 +3436,18 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, if (ret) goto out; - for (i = 0; i < blk_per_bucket; i++) { - ret = ocfs2_journal_access(handle, inode, t_bucket.bu_bhs[i], - t_is_new ? - OCFS2_JOURNAL_ACCESS_CREATE : - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) - goto out; - } + ret = ocfs2_xattr_bucket_journal_access(handle, inode, &t_bucket, + t_is_new ? + OCFS2_JOURNAL_ACCESS_CREATE : + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) + goto out; for (i = 0; i < blk_per_bucket; i++) { memcpy(bucket_block(&t_bucket, i), bucket_block(&s_bucket, i), blocksize); - ocfs2_journal_dirty(handle, t_bucket.bu_bhs[i]); } + ocfs2_xattr_bucket_journal_dirty(handle, inode, &t_bucket); out: ocfs2_xattr_bucket_relse(inode, &s_bucket); @@ -3799,9 +3820,9 @@ static int ocfs2_extend_xattr_bucket(struct inode *inode, /* * We will touch all the buckets after the start_bh(include it). - * Add one more bucket and modify the first_bh. + * Then we add one more bucket. */ - credits = end_blk - start_blk + 2 * blk_per_bucket + 1; + credits = end_blk - start_blk + 3 * blk_per_bucket + 1; handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); @@ -4077,33 +4098,6 @@ set_new_name_value: return; } -static int ocfs2_xattr_bucket_handle_journal(struct inode *inode, - handle_t *handle, - struct ocfs2_xattr_search *xs, - struct buffer_head **bhs, - u16 bh_num) -{ - int ret = 0, off, block_off; - struct ocfs2_xattr_entry *xe = xs->here; - - /* - * First calculate all the blocks we should journal_access - * and journal_dirty. The first block should always be touched. - */ - ret = ocfs2_journal_dirty(handle, bhs[0]); - if (ret) - mlog_errno(ret); - - /* calc the data. */ - off = le16_to_cpu(xe->xe_name_offset); - block_off = off >> inode->i_sb->s_blocksize_bits; - ret = ocfs2_journal_dirty(handle, bhs[block_off]); - if (ret) - mlog_errno(ret); - - return ret; -} - /* * Set the xattr entry in the specified bucket. * The bucket is indicated by xs->bucket and it should have the enough @@ -4115,7 +4109,7 @@ static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, u32 name_hash, int local) { - int i, ret; + int ret; handle_t *handle = NULL; u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); @@ -4143,22 +4137,16 @@ static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, goto out; } - for (i = 0; i < blk_per_bucket; i++) { - ret = ocfs2_journal_access(handle, inode, xs->bucket.bu_bhs[i], - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret < 0) { - mlog_errno(ret); - goto out; - } + ret = ocfs2_xattr_bucket_journal_access(handle, inode, &xs->bucket, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out; } ocfs2_xattr_set_entry_normal(inode, xi, xs, name_hash, local); + ocfs2_xattr_bucket_journal_dirty(handle, inode, &xs->bucket); - /*Only dirty the blocks we have touched in set xattr. */ - ret = ocfs2_xattr_bucket_handle_journal(inode, handle, xs, - xs->bucket.bu_bhs, blk_per_bucket); - if (ret) - mlog_errno(ret); out: ocfs2_commit_trans(osb, handle); @@ -4398,15 +4386,16 @@ static void ocfs2_xattr_bucket_remove_xs(struct inode *inode, le16_to_cpu(xh->xh_count) - 1]; int ret = 0; - handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), 1); + handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), + ocfs2_blocks_per_xattr_bucket(inode->i_sb)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); return; } - ret = ocfs2_journal_access(handle, inode, xs->bucket.bu_bhs[0], - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_xattr_bucket_journal_access(handle, inode, &xs->bucket, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; @@ -4418,9 +4407,8 @@ static void ocfs2_xattr_bucket_remove_xs(struct inode *inode, memset(last, 0, sizeof(struct ocfs2_xattr_entry)); le16_add_cpu(&xh->xh_count, -1); - ret = ocfs2_journal_dirty(handle, xs->bucket.bu_bhs[0]); - if (ret < 0) - mlog_errno(ret); + ocfs2_xattr_bucket_journal_dirty(handle, inode, &xs->bucket); + out_commit: ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); } -- cgit From 4980c6daba967124ed6420032960abd2b48412e2 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Fri, 24 Oct 2008 18:54:43 -0700 Subject: ocfs2: Copy xattr buckets with a dedicated function. Now that the places that copy whole buckets are using struct ocfs2_xattr_bucket, we can do the copy in a dedicated function. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 99aefe4ea750..71d9e7bdd30a 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -240,6 +240,19 @@ static void ocfs2_xattr_bucket_journal_dirty(handle_t *handle, ocfs2_journal_dirty(handle, bucket->bu_bhs[i]); } +static void ocfs2_xattr_bucket_copy_data(struct inode *inode, + struct ocfs2_xattr_bucket *dest, + struct ocfs2_xattr_bucket *src) +{ + int i; + int blocksize = inode->i_sb->s_blocksize; + int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + + for (i = 0; i < blks; i++) { + memcpy(bucket_block(dest, i), bucket_block(src, i), + blocksize); + } +} static inline const char *ocfs2_xattr_prefix(int name_index) { @@ -3299,9 +3312,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, } /* copy the whole bucket to the new first. */ - for (i = 0; i < blk_per_bucket; i++) - memcpy(bucket_block(&t_bucket, i), bucket_block(&s_bucket, i), - blocksize); + ocfs2_xattr_bucket_copy_data(inode, &t_bucket, &s_bucket); /* update the new bucket. */ xh = bucket_xh(&t_bucket); @@ -3410,9 +3421,7 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, u64 t_blkno, int t_is_new) { - int ret, i; - int blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); - int blocksize = inode->i_sb->s_blocksize; + int ret; struct ocfs2_xattr_bucket s_bucket, t_bucket; BUG_ON(s_blkno == t_blkno); @@ -3443,10 +3452,7 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, if (ret) goto out; - for (i = 0; i < blk_per_bucket; i++) { - memcpy(bucket_block(&t_bucket, i), bucket_block(&s_bucket, i), - blocksize); - } + ocfs2_xattr_bucket_copy_data(inode, &t_bucket, &s_bucket); ocfs2_xattr_bucket_journal_dirty(handle, inode, &t_bucket); out: -- cgit From ba937127596ec2c61437006741f7d29999284de4 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Fri, 24 Oct 2008 19:13:20 -0700 Subject: ocfs2: Take ocfs2_xattr_bucket structures off of the stack. The ocfs2_xattr_bucket structure is a nice abstraction, but it is a bit large to have on the stack. Just like ocfs2_path, let's allocate it with a ocfs2_xattr_bucket_new() function. We can now store the inode on the bucket, cleaning up all the other bucket functions. While we're here, we catch another place or two that wasn't using ocfs2_read_xattr_bucket(). Updates: - No longer allocating xis.bucket, as it will never be used. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 281 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 166 insertions(+), 115 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 71d9e7bdd30a..766494ed6e11 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -61,7 +61,14 @@ struct ocfs2_xattr_def_value_root { }; struct ocfs2_xattr_bucket { + /* The inode these xattrs are associated with */ + struct inode *bu_inode; + + /* The actual buffers that make up the bucket */ struct buffer_head *bu_bhs[OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET]; + + /* How many blocks make up one bucket for this filesystem */ + int bu_blocks; }; #define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root)) @@ -97,7 +104,7 @@ struct ocfs2_xattr_search { */ struct buffer_head *xattr_bh; struct ocfs2_xattr_header *header; - struct ocfs2_xattr_bucket bucket; + struct ocfs2_xattr_bucket *bucket; void *base; void *end; struct ocfs2_xattr_entry *here; @@ -157,69 +164,91 @@ static inline u16 ocfs2_xattr_max_xe_in_bucket(struct super_block *sb) #define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data) #define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0)) -static void ocfs2_xattr_bucket_relse(struct inode *inode, - struct ocfs2_xattr_bucket *bucket) +static struct ocfs2_xattr_bucket *ocfs2_xattr_bucket_new(struct inode *inode) { - int i, blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + struct ocfs2_xattr_bucket *bucket; + int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); - for (i = 0; i < blks; i++) { + BUG_ON(blks > OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET); + + bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS); + if (bucket) { + bucket->bu_inode = inode; + bucket->bu_blocks = blks; + } + + return bucket; +} + +static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket) +{ + int i; + + for (i = 0; i < bucket->bu_blocks; i++) { brelse(bucket->bu_bhs[i]); bucket->bu_bhs[i] = NULL; } } +static void ocfs2_xattr_bucket_free(struct ocfs2_xattr_bucket *bucket) +{ + if (bucket) { + ocfs2_xattr_bucket_relse(bucket); + bucket->bu_inode = NULL; + kfree(bucket); + } +} + /* * A bucket that has never been written to disk doesn't need to be * read. We just need the buffer_heads. Don't call this for * buckets that are already on disk. ocfs2_read_xattr_bucket() initializes * them fully. */ -static int ocfs2_init_xattr_bucket(struct inode *inode, - struct ocfs2_xattr_bucket *bucket, +static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket, u64 xb_blkno) { int i, rc = 0; - int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); - for (i = 0; i < blks; i++) { - bucket->bu_bhs[i] = sb_getblk(inode->i_sb, xb_blkno + i); + for (i = 0; i < bucket->bu_blocks; i++) { + bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb, + xb_blkno + i); if (!bucket->bu_bhs[i]) { rc = -EIO; mlog_errno(rc); break; } - ocfs2_set_new_buffer_uptodate(inode, bucket->bu_bhs[i]); + ocfs2_set_new_buffer_uptodate(bucket->bu_inode, + bucket->bu_bhs[i]); } if (rc) - ocfs2_xattr_bucket_relse(inode, bucket); + ocfs2_xattr_bucket_relse(bucket); return rc; } /* Read the xattr bucket at xb_blkno */ -static int ocfs2_read_xattr_bucket(struct inode *inode, - struct ocfs2_xattr_bucket *bucket, +static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket, u64 xb_blkno) { - int rc, blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + int rc; - rc = ocfs2_read_blocks(inode, xb_blkno, blks, bucket->bu_bhs, 0); + rc = ocfs2_read_blocks(bucket->bu_inode, xb_blkno, + bucket->bu_blocks, bucket->bu_bhs, 0); if (rc) - ocfs2_xattr_bucket_relse(inode, bucket); + ocfs2_xattr_bucket_relse(bucket); return rc; } static int ocfs2_xattr_bucket_journal_access(handle_t *handle, - struct inode *inode, struct ocfs2_xattr_bucket *bucket, int type) { int i, rc = 0; - int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); - for (i = 0; i < blks; i++) { - rc = ocfs2_journal_access(handle, inode, + for (i = 0; i < bucket->bu_blocks; i++) { + rc = ocfs2_journal_access(handle, bucket->bu_inode, bucket->bu_bhs[i], type); if (rc) { mlog_errno(rc); @@ -231,24 +260,24 @@ static int ocfs2_xattr_bucket_journal_access(handle_t *handle, } static void ocfs2_xattr_bucket_journal_dirty(handle_t *handle, - struct inode *inode, struct ocfs2_xattr_bucket *bucket) { - int i, blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + int i; - for (i = 0; i < blks; i++) + for (i = 0; i < bucket->bu_blocks; i++) ocfs2_journal_dirty(handle, bucket->bu_bhs[i]); } -static void ocfs2_xattr_bucket_copy_data(struct inode *inode, - struct ocfs2_xattr_bucket *dest, +static void ocfs2_xattr_bucket_copy_data(struct ocfs2_xattr_bucket *dest, struct ocfs2_xattr_bucket *src) { int i; - int blocksize = inode->i_sb->s_blocksize; - int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + int blocksize = src->bu_inode->i_sb->s_blocksize; + + BUG_ON(dest->bu_blocks != src->bu_blocks); + BUG_ON(dest->bu_inode != src->bu_inode); - for (i = 0; i < blks; i++) { + for (i = 0; i < src->bu_blocks; i++) { memcpy(bucket_block(dest, i), bucket_block(src, i), blocksize); } @@ -869,7 +898,12 @@ static int ocfs2_xattr_block_get(struct inode *inode, size_t size; int ret = -ENODATA, name_offset, name_len, block_off, i; - memset(&xs->bucket, 0, sizeof(xs->bucket)); + xs->bucket = ocfs2_xattr_bucket_new(inode); + if (!xs->bucket) { + ret = -ENOMEM; + mlog_errno(ret); + goto cleanup; + } ret = ocfs2_xattr_block_find(inode, name_index, name, xs); if (ret) { @@ -895,11 +929,11 @@ static int ocfs2_xattr_block_get(struct inode *inode, if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) { ret = ocfs2_xattr_bucket_get_name_value(inode, - bucket_xh(&xs->bucket), + bucket_xh(xs->bucket), i, &block_off, &name_offset); - xs->base = bucket_block(&xs->bucket, block_off); + xs->base = bucket_block(xs->bucket, block_off); } if (ocfs2_xattr_is_local(xs->here)) { memcpy(buffer, (void *)xs->base + @@ -917,8 +951,7 @@ static int ocfs2_xattr_block_get(struct inode *inode, } ret = size; cleanup: - ocfs2_xattr_bucket_relse(inode, &xs->bucket); - memset(&xs->bucket, 0, sizeof(xs->bucket)); + ocfs2_xattr_bucket_free(xs->bucket); brelse(xs->xattr_bh); xs->xattr_bh = NULL; @@ -2047,10 +2080,20 @@ int ocfs2_xattr_set(struct inode *inode, if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb))) return -EOPNOTSUPP; + /* + * Only xbs will be used on indexed trees. xis doesn't need a + * bucket. + */ + xbs.bucket = ocfs2_xattr_bucket_new(inode); + if (!xbs.bucket) { + mlog_errno(-ENOMEM); + return -ENOMEM; + } + ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret < 0) { mlog_errno(ret); - return ret; + goto cleanup_nolock; } xis.inode_bh = xbs.inode_bh = di_bh; di = (struct ocfs2_dinode *)di_bh->b_data; @@ -2127,9 +2170,10 @@ int ocfs2_xattr_set(struct inode *inode, cleanup: up_write(&OCFS2_I(inode)->ip_xattr_sem); ocfs2_inode_unlock(inode, 1); +cleanup_nolock: brelse(di_bh); brelse(xbs.xattr_bh); - ocfs2_xattr_bucket_relse(inode, &xbs.bucket); + ocfs2_xattr_bucket_free(xbs.bucket); return ret; } @@ -2373,11 +2417,11 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, lower_bh = bh; bh = NULL; } - xs->bucket.bu_bhs[0] = lower_bh; + xs->bucket->bu_bhs[0] = lower_bh; lower_bh = NULL; - xs->header = bucket_xh(&xs->bucket); - xs->base = bucket_block(&xs->bucket, 0); + xs->header = bucket_xh(xs->bucket); + xs->base = bucket_block(xs->bucket, 0); xs->end = xs->base + inode->i_sb->s_blocksize; if (found) { @@ -2385,8 +2429,8 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, * If we have found the xattr enty, read all the blocks in * this bucket. */ - ret = ocfs2_read_blocks(inode, bucket_blkno(&xs->bucket) + 1, - blk_per_bucket - 1, &xs->bucket.bu_bhs[1], + ret = ocfs2_read_blocks(inode, bucket_blkno(xs->bucket) + 1, + blk_per_bucket - 1, &xs->bucket->bu_bhs[1], 0); if (ret) { mlog_errno(ret); @@ -2395,7 +2439,7 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, xs->here = &xs->header->xh_entries[index]; mlog(0, "find xattr %s in bucket %llu, entry = %u\n", name, - (unsigned long long)bucket_blkno(&xs->bucket), index); + (unsigned long long)bucket_blkno(xs->bucket), index); } else ret = -ENODATA; @@ -2453,22 +2497,24 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode, void *para) { int i, ret = 0; - int blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)); u32 num_buckets = clusters * bpc; - struct ocfs2_xattr_bucket bucket; + struct ocfs2_xattr_bucket *bucket; - memset(&bucket, 0, sizeof(bucket)); + bucket = ocfs2_xattr_bucket_new(inode); + if (!bucket) { + mlog_errno(-ENOMEM); + return -ENOMEM; + } mlog(0, "iterating xattr buckets in %u clusters starting from %llu\n", clusters, (unsigned long long)blkno); - for (i = 0; i < num_buckets; i++, blkno += blk_per_bucket) { - ret = ocfs2_read_blocks(inode, blkno, blk_per_bucket, - bucket.bu_bhs, 0); + for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) { + ret = ocfs2_read_xattr_bucket(bucket, blkno); if (ret) { mlog_errno(ret); - goto out; + break; } /* @@ -2476,26 +2522,24 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode, * in the 1st bucket. */ if (i == 0) - num_buckets = le16_to_cpu(bucket_xh(&bucket)->xh_num_buckets); + num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets); mlog(0, "iterating xattr bucket %llu, first hash %u\n", (unsigned long long)blkno, - le32_to_cpu(bucket_xh(&bucket)->xh_entries[0].xe_name_hash)); + le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash)); if (func) { - ret = func(inode, &bucket, para); - if (ret) { + ret = func(inode, bucket, para); + if (ret) mlog_errno(ret); - break; - } + /* Fall through to bucket_relse() */ } - ocfs2_xattr_bucket_relse(inode, &bucket); - memset(&bucket, 0, sizeof(bucket)); + ocfs2_xattr_bucket_relse(bucket); + if (ret) + break; } -out: - ocfs2_xattr_bucket_relse(inode, &bucket); - + ocfs2_xattr_bucket_free(bucket); return ret; } @@ -2718,9 +2762,9 @@ static int ocfs2_xattr_update_xattr_search(struct inode *inode, int i, blocksize = inode->i_sb->s_blocksize; u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); - xs->bucket.bu_bhs[0] = new_bh; + xs->bucket->bu_bhs[0] = new_bh; get_bh(new_bh); - xs->header = bucket_xh(&xs->bucket); + xs->header = bucket_xh(xs->bucket); xs->base = new_bh->b_data; xs->end = xs->base + inode->i_sb->s_blocksize; @@ -2728,8 +2772,8 @@ static int ocfs2_xattr_update_xattr_search(struct inode *inode, if (!xs->not_found) { if (OCFS2_XATTR_BUCKET_SIZE != blocksize) { ret = ocfs2_read_blocks(inode, - bucket_blkno(&xs->bucket) + 1, - blk_per_bucket - 1, &xs->bucket.bu_bhs[1], + bucket_blkno(xs->bucket) + 1, + blk_per_bucket - 1, &xs->bucket->bu_bhs[1], 0); if (ret) { mlog_errno(ret); @@ -3244,8 +3288,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, { int ret, i; int count, start, len, name_value_len = 0, xe_len, name_offset = 0; - u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); - struct ocfs2_xattr_bucket s_bucket, t_bucket; + struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL; struct ocfs2_xattr_header *xh; struct ocfs2_xattr_entry *xe; int blocksize = inode->i_sb->s_blocksize; @@ -3253,16 +3296,21 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, mlog(0, "move some of xattrs from bucket %llu to %llu\n", (unsigned long long)blk, (unsigned long long)new_blk); - memset(&s_bucket, 0, sizeof(struct ocfs2_xattr_bucket)); - memset(&t_bucket, 0, sizeof(struct ocfs2_xattr_bucket)); + s_bucket = ocfs2_xattr_bucket_new(inode); + t_bucket = ocfs2_xattr_bucket_new(inode); + if (!s_bucket || !t_bucket) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } - ret = ocfs2_read_xattr_bucket(inode, &s_bucket, blk); + ret = ocfs2_read_xattr_bucket(s_bucket, blk); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_xattr_bucket_journal_access(handle, inode, &s_bucket, + ret = ocfs2_xattr_bucket_journal_access(handle, s_bucket, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -3273,13 +3321,13 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, * Even if !new_bucket_head, we're overwriting t_bucket. Thus, * there's no need to read it. */ - ret = ocfs2_init_xattr_bucket(inode, &t_bucket, new_blk); + ret = ocfs2_init_xattr_bucket(t_bucket, new_blk); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_xattr_bucket_journal_access(handle, inode, &t_bucket, + ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket, new_bucket_head ? OCFS2_JOURNAL_ACCESS_CREATE : OCFS2_JOURNAL_ACCESS_WRITE); @@ -3288,7 +3336,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, goto out; } - xh = bucket_xh(&s_bucket); + xh = bucket_xh(s_bucket); count = le16_to_cpu(xh->xh_count); start = ocfs2_xattr_find_divide_pos(xh); @@ -3300,10 +3348,10 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, * The hash value is set as one larger than * that of the last entry in the previous bucket. */ - for (i = 0; i < blk_per_bucket; i++) - memset(bucket_block(&t_bucket, i), 0, blocksize); + for (i = 0; i < t_bucket->bu_blocks; i++) + memset(bucket_block(t_bucket, i), 0, blocksize); - xh = bucket_xh(&t_bucket); + xh = bucket_xh(t_bucket); xh->xh_free_start = cpu_to_le16(blocksize); xh->xh_entries[0].xe_name_hash = xe->xe_name_hash; le32_add_cpu(&xh->xh_entries[0].xe_name_hash, 1); @@ -3312,10 +3360,10 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, } /* copy the whole bucket to the new first. */ - ocfs2_xattr_bucket_copy_data(inode, &t_bucket, &s_bucket); + ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket); /* update the new bucket. */ - xh = bucket_xh(&t_bucket); + xh = bucket_xh(t_bucket); /* * Calculate the total name/value len and xh_free_start for @@ -3379,7 +3427,7 @@ set_num_buckets: else xh->xh_num_buckets = 0; - ocfs2_xattr_bucket_journal_dirty(handle, inode, &t_bucket); + ocfs2_xattr_bucket_journal_dirty(handle, t_bucket); /* store the first_hash of the new bucket. */ if (first_hash) @@ -3393,18 +3441,18 @@ set_num_buckets: if (start == count) goto out; - xh = bucket_xh(&s_bucket); + xh = bucket_xh(s_bucket); memset(&xh->xh_entries[start], 0, sizeof(struct ocfs2_xattr_entry) * (count - start)); xh->xh_count = cpu_to_le16(start); xh->xh_free_start = cpu_to_le16(name_offset); xh->xh_name_value_len = cpu_to_le16(name_value_len); - ocfs2_xattr_bucket_journal_dirty(handle, inode, &s_bucket); + ocfs2_xattr_bucket_journal_dirty(handle, s_bucket); out: - ocfs2_xattr_bucket_relse(inode, &s_bucket); - ocfs2_xattr_bucket_relse(inode, &t_bucket); + ocfs2_xattr_bucket_free(s_bucket); + ocfs2_xattr_bucket_free(t_bucket); return ret; } @@ -3422,7 +3470,7 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, int t_is_new) { int ret; - struct ocfs2_xattr_bucket s_bucket, t_bucket; + struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL; BUG_ON(s_blkno == t_blkno); @@ -3430,10 +3478,15 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, (unsigned long long)s_blkno, (unsigned long long)t_blkno, t_is_new); - memset(&s_bucket, 0, sizeof(struct ocfs2_xattr_bucket)); - memset(&t_bucket, 0, sizeof(struct ocfs2_xattr_bucket)); - - ret = ocfs2_read_xattr_bucket(inode, &s_bucket, s_blkno); + s_bucket = ocfs2_xattr_bucket_new(inode); + t_bucket = ocfs2_xattr_bucket_new(inode); + if (!s_bucket || !t_bucket) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_read_xattr_bucket(s_bucket, s_blkno); if (ret) goto out; @@ -3441,23 +3494,23 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, * Even if !t_is_new, we're overwriting t_bucket. Thus, * there's no need to read it. */ - ret = ocfs2_init_xattr_bucket(inode, &t_bucket, t_blkno); + ret = ocfs2_init_xattr_bucket(t_bucket, t_blkno); if (ret) goto out; - ret = ocfs2_xattr_bucket_journal_access(handle, inode, &t_bucket, + ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket, t_is_new ? OCFS2_JOURNAL_ACCESS_CREATE : OCFS2_JOURNAL_ACCESS_WRITE); if (ret) goto out; - ocfs2_xattr_bucket_copy_data(inode, &t_bucket, &s_bucket); - ocfs2_xattr_bucket_journal_dirty(handle, inode, &t_bucket); + ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket); + ocfs2_xattr_bucket_journal_dirty(handle, t_bucket); out: - ocfs2_xattr_bucket_relse(inode, &s_bucket); - ocfs2_xattr_bucket_relse(inode, &t_bucket); + ocfs2_xattr_bucket_free(t_bucket); + ocfs2_xattr_bucket_free(s_bucket); return ret; } @@ -4009,7 +4062,7 @@ static void ocfs2_xattr_set_entry_normal(struct inode *inode, xe->xe_value_size = 0; val = ocfs2_xattr_bucket_get_val(inode, - &xs->bucket, offs); + xs->bucket, offs); memset(val + OCFS2_XATTR_SIZE(name_len), 0, size - OCFS2_XATTR_SIZE(name_len)); if (OCFS2_XATTR_SIZE(xi->value_len) > 0) @@ -4087,8 +4140,7 @@ set_new_name_value: xh->xh_free_start = cpu_to_le16(offs); } - val = ocfs2_xattr_bucket_get_val(inode, - &xs->bucket, offs - size); + val = ocfs2_xattr_bucket_get_val(inode, xs->bucket, offs - size); xe->xe_name_offset = cpu_to_le16(offs - size); memset(val, 0, size); @@ -4122,12 +4174,12 @@ static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n", (unsigned long)xi->value_len, xi->name_index, - (unsigned long long)bucket_blkno(&xs->bucket)); + (unsigned long long)bucket_blkno(xs->bucket)); - if (!xs->bucket.bu_bhs[1]) { + if (!xs->bucket->bu_bhs[1]) { ret = ocfs2_read_blocks(inode, - bucket_blkno(&xs->bucket) + 1, - blk_per_bucket - 1, &xs->bucket.bu_bhs[1], + bucket_blkno(xs->bucket) + 1, + blk_per_bucket - 1, &xs->bucket->bu_bhs[1], 0); if (ret) { mlog_errno(ret); @@ -4143,7 +4195,7 @@ static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, goto out; } - ret = ocfs2_xattr_bucket_journal_access(handle, inode, &xs->bucket, + ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); @@ -4151,7 +4203,7 @@ static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, } ocfs2_xattr_set_entry_normal(inode, xi, xs, name_hash, local); - ocfs2_xattr_bucket_journal_dirty(handle, inode, &xs->bucket); + ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket); out: ocfs2_commit_trans(osb, handle); @@ -4264,10 +4316,10 @@ static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode, struct ocfs2_xattr_entry *xe = xs->here; struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)xs->base; - BUG_ON(!xs->bucket.bu_bhs[0] || !xe || ocfs2_xattr_is_local(xe)); + BUG_ON(!xs->bucket->bu_bhs[0] || !xe || ocfs2_xattr_is_local(xe)); offset = xe - xh->xh_entries; - ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket.bu_bhs[0], + ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket->bu_bhs[0], offset, len); if (ret) mlog_errno(ret); @@ -4387,7 +4439,7 @@ static void ocfs2_xattr_bucket_remove_xs(struct inode *inode, struct ocfs2_xattr_search *xs) { handle_t *handle = NULL; - struct ocfs2_xattr_header *xh = bucket_xh(&xs->bucket); + struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket); struct ocfs2_xattr_entry *last = &xh->xh_entries[ le16_to_cpu(xh->xh_count) - 1]; int ret = 0; @@ -4400,7 +4452,7 @@ static void ocfs2_xattr_bucket_remove_xs(struct inode *inode, return; } - ret = ocfs2_xattr_bucket_journal_access(handle, inode, &xs->bucket, + ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -4413,7 +4465,7 @@ static void ocfs2_xattr_bucket_remove_xs(struct inode *inode, memset(last, 0, sizeof(struct ocfs2_xattr_entry)); le16_add_cpu(&xh->xh_count, -1); - ocfs2_xattr_bucket_journal_dirty(handle, inode, &xs->bucket); + ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket); out_commit: ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); @@ -4565,7 +4617,7 @@ try_again: mlog_bug_on_msg(header_size > blocksize, "bucket %llu has header size " "of %u which exceed block size\n", - (unsigned long long)bucket_blkno(&xs->bucket), + (unsigned long long)bucket_blkno(xs->bucket), header_size); if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) @@ -4605,7 +4657,7 @@ try_again: mlog(0, "xs->not_found = %d, in xattr bucket %llu: free = %d, " "need = %d, max_free = %d, xh_free_start = %u, xh_name_value_len =" " %u\n", xs->not_found, - (unsigned long long)bucket_blkno(&xs->bucket), + (unsigned long long)bucket_blkno(xs->bucket), free, need, max_free, le16_to_cpu(xh->xh_free_start), le16_to_cpu(xh->xh_name_value_len)); @@ -4617,7 +4669,7 @@ try_again: * name/value will be moved, the xe shouldn't be changed * in xs. */ - ret = ocfs2_defrag_xattr_bucket(inode, &xs->bucket); + ret = ocfs2_defrag_xattr_bucket(inode, xs->bucket); if (ret) { mlog_errno(ret); goto out; @@ -4649,7 +4701,7 @@ try_again: * add a new bucket for the insert. */ ret = ocfs2_check_xattr_bucket_collision(inode, - &xs->bucket, + xs->bucket, xi->name); if (ret) { mlog_errno(ret); @@ -4658,14 +4710,13 @@ try_again: ret = ocfs2_add_new_xattr_bucket(inode, xs->xattr_bh, - xs->bucket.bu_bhs[0]); + xs->bucket->bu_bhs[0]); if (ret) { mlog_errno(ret); goto out; } - ocfs2_xattr_bucket_relse(inode, &xs->bucket); - memset(&xs->bucket, 0, sizeof(xs->bucket)); + ocfs2_xattr_bucket_relse(xs->bucket); ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh, xi->name_index, -- cgit From e2356a3f02cfdbce735465a2b40b6dc72a764c26 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Mon, 27 Oct 2008 15:01:54 -0700 Subject: ocfs2: Use buckets in ocfs2_xattr_bucket_find(). Change the ocfs2_xattr_bucket_find() function to use ocfs2_xattr_bucket as its abstraction. This makes for more efficient reads, as buckets are linear blocks, and also has improved caching characteristics. It also reads better. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 89 ++++++++++++++++++++------------------------------------ 1 file changed, 31 insertions(+), 58 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 766494ed6e11..46986c635eb8 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -2248,7 +2248,7 @@ typedef int (xattr_bucket_func)(struct inode *inode, void *para); static int ocfs2_find_xe_in_bucket(struct inode *inode, - struct buffer_head *header_bh, + struct ocfs2_xattr_bucket *bucket, int name_index, const char *name, u32 name_hash, @@ -2256,11 +2256,9 @@ static int ocfs2_find_xe_in_bucket(struct inode *inode, int *found) { int i, ret = 0, cmp = 1, block_off, new_offset; - struct ocfs2_xattr_header *xh = - (struct ocfs2_xattr_header *)header_bh->b_data; + struct ocfs2_xattr_header *xh = bucket_xh(bucket); size_t name_len = strlen(name); struct ocfs2_xattr_entry *xe = NULL; - struct buffer_head *name_bh = NULL; char *xe_name; /* @@ -2291,19 +2289,8 @@ static int ocfs2_find_xe_in_bucket(struct inode *inode, break; } - ret = ocfs2_read_block(inode, header_bh->b_blocknr + block_off, - &name_bh); - if (ret) { - mlog_errno(ret); - break; - } - xe_name = name_bh->b_data + new_offset; - - cmp = memcmp(name, xe_name, name_len); - brelse(name_bh); - name_bh = NULL; - - if (cmp == 0) { + xe_name = bucket_block(bucket, block_off) + new_offset; + if (!memcmp(name, xe_name, name_len)) { *xe_index = i; *found = 1; ret = 0; @@ -2333,39 +2320,42 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, struct ocfs2_xattr_search *xs) { int ret, found = 0; - struct buffer_head *bh = NULL; - struct buffer_head *lower_bh = NULL; struct ocfs2_xattr_header *xh = NULL; struct ocfs2_xattr_entry *xe = NULL; u16 index = 0; u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); int low_bucket = 0, bucket, high_bucket; + struct ocfs2_xattr_bucket *search; u32 last_hash; - u64 blkno; + u64 blkno, lower_blkno = 0; - ret = ocfs2_read_block(inode, p_blkno, &bh); + search = ocfs2_xattr_bucket_new(inode); + if (!search) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_read_xattr_bucket(search, p_blkno); if (ret) { mlog_errno(ret); goto out; } - xh = (struct ocfs2_xattr_header *)bh->b_data; + xh = bucket_xh(search); high_bucket = le16_to_cpu(xh->xh_num_buckets) - 1; - while (low_bucket <= high_bucket) { - brelse(bh); - bh = NULL; - bucket = (low_bucket + high_bucket) / 2; + ocfs2_xattr_bucket_relse(search); + bucket = (low_bucket + high_bucket) / 2; blkno = p_blkno + bucket * blk_per_bucket; - - ret = ocfs2_read_block(inode, blkno, &bh); + ret = ocfs2_read_xattr_bucket(search, blkno); if (ret) { mlog_errno(ret); goto out; } - xh = (struct ocfs2_xattr_header *)bh->b_data; + xh = bucket_xh(search); xe = &xh->xh_entries[0]; if (name_hash < le32_to_cpu(xe->xe_name_hash)) { high_bucket = bucket - 1; @@ -2382,10 +2372,8 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, last_hash = le32_to_cpu(xe->xe_name_hash); - /* record lower_bh which may be the insert place. */ - brelse(lower_bh); - lower_bh = bh; - bh = NULL; + /* record lower_blkno which may be the insert place. */ + lower_blkno = blkno; if (name_hash > le32_to_cpu(xe->xe_name_hash)) { low_bucket = bucket + 1; @@ -2393,7 +2381,7 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, } /* the searched xattr should reside in this bucket if exists. */ - ret = ocfs2_find_xe_in_bucket(inode, lower_bh, + ret = ocfs2_find_xe_in_bucket(inode, search, name_index, name, name_hash, &index, &found); if (ret) { @@ -2408,35 +2396,21 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, * When the xattr's hash value is in the gap of 2 buckets, we will * always set it to the previous bucket. */ - if (!lower_bh) { - /* - * We can't find any bucket whose first name_hash is less - * than the find name_hash. - */ - BUG_ON(bh->b_blocknr != p_blkno); - lower_bh = bh; - bh = NULL; + if (!lower_blkno) + lower_blkno = p_blkno; + + /* This should be in cache - we just read it during the search */ + ret = ocfs2_read_xattr_bucket(xs->bucket, lower_blkno); + if (ret) { + mlog_errno(ret); + goto out; } - xs->bucket->bu_bhs[0] = lower_bh; - lower_bh = NULL; xs->header = bucket_xh(xs->bucket); xs->base = bucket_block(xs->bucket, 0); xs->end = xs->base + inode->i_sb->s_blocksize; if (found) { - /* - * If we have found the xattr enty, read all the blocks in - * this bucket. - */ - ret = ocfs2_read_blocks(inode, bucket_blkno(xs->bucket) + 1, - blk_per_bucket - 1, &xs->bucket->bu_bhs[1], - 0); - if (ret) { - mlog_errno(ret); - goto out; - } - xs->here = &xs->header->xh_entries[index]; mlog(0, "find xattr %s in bucket %llu, entry = %u\n", name, (unsigned long long)bucket_blkno(xs->bucket), index); @@ -2444,8 +2418,7 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, ret = -ENODATA; out: - brelse(bh); - brelse(lower_bh); + ocfs2_xattr_bucket_free(search); return ret; } -- cgit From 178eeac354ea28828d5e94a3a7b51368c171d6a5 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Mon, 27 Oct 2008 15:18:29 -0700 Subject: ocfs2: Use buckets in ocfs2_xattr_create_index_block(). Use the ocfs2_xattr_bucket abstraction in ocfs2_xattr_create_index_block() and its helpers. We get more efficient reads, a lot less buffer_head munging, and nicer code to boot. While we're at it, ocfs2_xattr_update_xattr_search() becomes void. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 114 ++++++++++++++++--------------------------------------- 1 file changed, 32 insertions(+), 82 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 46986c635eb8..76969b922002 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -2649,32 +2649,34 @@ static void swap_xe(void *a, void *b, int size) /* * When the ocfs2_xattr_block is filled up, new bucket will be created * and all the xattr entries will be moved to the new bucket. + * The header goes at the start of the bucket, and the names+values are + * filled from the end. This is why *target starts as the last buffer. * Note: we need to sort the entries since they are not saved in order * in the ocfs2_xattr_block. */ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode, struct buffer_head *xb_bh, - struct buffer_head *xh_bh, - struct buffer_head *data_bh) + struct ocfs2_xattr_bucket *bucket) { int i, blocksize = inode->i_sb->s_blocksize; + int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); u16 offset, size, off_change; struct ocfs2_xattr_entry *xe; struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)xb_bh->b_data; struct ocfs2_xattr_header *xb_xh = &xb->xb_attrs.xb_header; - struct ocfs2_xattr_header *xh = - (struct ocfs2_xattr_header *)xh_bh->b_data; + struct ocfs2_xattr_header *xh = bucket_xh(bucket); u16 count = le16_to_cpu(xb_xh->xh_count); - char *target = xh_bh->b_data, *src = xb_bh->b_data; + char *src = xb_bh->b_data; + char *target = bucket_block(bucket, blks - 1); mlog(0, "cp xattr from block %llu to bucket %llu\n", (unsigned long long)xb_bh->b_blocknr, - (unsigned long long)xh_bh->b_blocknr); + (unsigned long long)bucket_blkno(bucket)); + + for (i = 0; i < blks; i++) + memset(bucket_block(bucket, i), 0, blocksize); - memset(xh_bh->b_data, 0, blocksize); - if (data_bh) - memset(data_bh->b_data, 0, blocksize); /* * Since the xe_name_offset is based on ocfs2_xattr_header, * there is a offset change corresponding to the change of @@ -2686,8 +2688,6 @@ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode, size = blocksize - offset; /* copy all the names and values. */ - if (data_bh) - target = data_bh->b_data; memcpy(target + offset, src + offset, size); /* Init new header now. */ @@ -2697,7 +2697,7 @@ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode, xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE - size); /* copy all the entries. */ - target = xh_bh->b_data; + target = bucket_block(bucket, 0); offset = offsetof(struct ocfs2_xattr_header, xh_entries); size = count * sizeof(struct ocfs2_xattr_entry); memcpy(target + offset, (char *)xb_xh + offset, size); @@ -2723,42 +2723,24 @@ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode, * While if the entry is in index b-tree, "bucket" indicates the * real place of the xattr. */ -static int ocfs2_xattr_update_xattr_search(struct inode *inode, - struct ocfs2_xattr_search *xs, - struct buffer_head *old_bh, - struct buffer_head *new_bh) +static void ocfs2_xattr_update_xattr_search(struct inode *inode, + struct ocfs2_xattr_search *xs, + struct buffer_head *old_bh) { - int ret = 0; char *buf = old_bh->b_data; struct ocfs2_xattr_block *old_xb = (struct ocfs2_xattr_block *)buf; struct ocfs2_xattr_header *old_xh = &old_xb->xb_attrs.xb_header; - int i, blocksize = inode->i_sb->s_blocksize; - u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + int i; - xs->bucket->bu_bhs[0] = new_bh; - get_bh(new_bh); xs->header = bucket_xh(xs->bucket); - - xs->base = new_bh->b_data; + xs->base = bucket_block(xs->bucket, 0); xs->end = xs->base + inode->i_sb->s_blocksize; - if (!xs->not_found) { - if (OCFS2_XATTR_BUCKET_SIZE != blocksize) { - ret = ocfs2_read_blocks(inode, - bucket_blkno(xs->bucket) + 1, - blk_per_bucket - 1, &xs->bucket->bu_bhs[1], - 0); - if (ret) { - mlog_errno(ret); - return ret; - } - - } - i = xs->here - old_xh->xh_entries; - xs->here = &xs->header->xh_entries[i]; - } + if (xs->not_found) + return; - return ret; + i = xs->here - old_xh->xh_entries; + xs->here = &xs->header->xh_entries[i]; } static int ocfs2_xattr_create_index_block(struct inode *inode, @@ -2771,18 +2753,17 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_alloc_context *data_ac; - struct buffer_head *xh_bh = NULL, *data_bh = NULL; struct buffer_head *xb_bh = xs->xattr_bh; struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)xb_bh->b_data; struct ocfs2_xattr_tree_root *xr; u16 xb_flags = le16_to_cpu(xb->xb_flags); - u16 bpb = ocfs2_blocks_per_xattr_bucket(inode->i_sb); mlog(0, "create xattr index block for %llu\n", (unsigned long long)xb_bh->b_blocknr); BUG_ON(xb_flags & OCFS2_XATTR_INDEXED); + BUG_ON(!xs->bucket); ret = ocfs2_reserve_clusters(osb, 1, &data_ac); if (ret) { @@ -2798,10 +2779,10 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, down_write(&oi->ip_alloc_sem); /* - * 3 more credits, one for xattr block update, one for the 1st block - * of the new xattr bucket and one for the value/data. + * We need more credits. One for the xattr block update and one + * for each block of the new xattr bucket. */ - credits += 3; + credits += 1 + ocfs2_blocks_per_xattr_bucket(inode->i_sb); handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); @@ -2832,51 +2813,23 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, mlog(0, "allocate 1 cluster from %llu to xattr block\n", (unsigned long long)blkno); - xh_bh = sb_getblk(inode->i_sb, blkno); - if (!xh_bh) { - ret = -EIO; + ret = ocfs2_init_xattr_bucket(xs->bucket, blkno); + if (ret) { mlog_errno(ret); goto out_commit; } - ocfs2_set_new_buffer_uptodate(inode, xh_bh); - - ret = ocfs2_journal_access(handle, inode, xh_bh, - OCFS2_JOURNAL_ACCESS_CREATE); + ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket, + OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); goto out_commit; } - if (bpb > 1) { - data_bh = sb_getblk(inode->i_sb, blkno + bpb - 1); - if (!data_bh) { - ret = -EIO; - mlog_errno(ret); - goto out_commit; - } - - ocfs2_set_new_buffer_uptodate(inode, data_bh); - - ret = ocfs2_journal_access(handle, inode, data_bh, - OCFS2_JOURNAL_ACCESS_CREATE); - if (ret) { - mlog_errno(ret); - goto out_commit; - } - } - - ocfs2_cp_xattr_block_to_bucket(inode, xb_bh, xh_bh, data_bh); - - ocfs2_journal_dirty(handle, xh_bh); - if (data_bh) - ocfs2_journal_dirty(handle, data_bh); + ocfs2_cp_xattr_block_to_bucket(inode, xb_bh, xs->bucket); + ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket); - ret = ocfs2_xattr_update_xattr_search(inode, xs, xb_bh, xh_bh); - if (ret) { - mlog_errno(ret); - goto out_commit; - } + ocfs2_xattr_update_xattr_search(inode, xs, xb_bh); /* Change from ocfs2_xattr_header to ocfs2_xattr_tree_root */ memset(&xb->xb_attrs, 0, inode->i_sb->s_blocksize - @@ -2911,9 +2864,6 @@ out: if (data_ac) ocfs2_free_alloc_context(data_ac); - brelse(xh_bh); - brelse(data_bh); - return ret; } -- cgit From 161d6f30f18c4a7e2b24705b6690cce3ff276eb9 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Mon, 27 Oct 2008 15:25:18 -0700 Subject: ocfs2: Use buckets in ocfs2_defrag_xattr_bucket(). Use the ocfs2_xattr_bucket abstraction for reading and writing the bucket in ocfs2_defrag_xattr_bucket(). Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 55 +++++++++++++++++++++++-------------------------------- 1 file changed, 23 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 76969b922002..127a6285078a 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -2894,21 +2894,11 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, struct ocfs2_xattr_header *xh; char *entries, *buf, *bucket_buf = NULL; u64 blkno = bucket_blkno(bucket); - u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); u16 xh_free_start; size_t blocksize = inode->i_sb->s_blocksize; handle_t *handle; - struct buffer_head **bhs; struct ocfs2_xattr_entry *xe; - - bhs = kzalloc(sizeof(struct buffer_head *) * blk_per_bucket, - GFP_NOFS); - if (!bhs) - return -ENOMEM; - - ret = ocfs2_read_blocks(inode, blkno, blk_per_bucket, bhs, 0); - if (ret) - goto out; + struct ocfs2_xattr_bucket *wb = NULL; /* * In order to make the operation more efficient and generic, @@ -2922,11 +2912,21 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, goto out; } + wb = ocfs2_xattr_bucket_new(inode); + if (!wb) { + ret = -ENOMEM; + goto out; + } + + ret = ocfs2_read_xattr_bucket(wb, blkno); + if (ret) + goto out; + buf = bucket_buf; - for (i = 0; i < blk_per_bucket; i++, buf += blocksize) - memcpy(buf, bhs[i]->b_data, blocksize); + for (i = 0; i < wb->bu_blocks; i++, buf += blocksize) + memcpy(buf, bucket_block(wb, i), blocksize); - handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), blk_per_bucket); + handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), wb->bu_blocks); if (IS_ERR(handle)) { ret = PTR_ERR(handle); handle = NULL; @@ -2934,13 +2934,11 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, goto out; } - for (i = 0; i < blk_per_bucket; i++) { - ret = ocfs2_journal_access(handle, inode, bhs[i], - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret < 0) { - mlog_errno(ret); - goto commit; - } + ret = ocfs2_xattr_bucket_journal_access(handle, wb, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto commit; } xh = (struct ocfs2_xattr_header *)bucket_buf; @@ -3009,21 +3007,14 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, cmp_xe, swap_xe); buf = bucket_buf; - for (i = 0; i < blk_per_bucket; i++, buf += blocksize) { - memcpy(bhs[i]->b_data, buf, blocksize); - ocfs2_journal_dirty(handle, bhs[i]); - } + for (i = 0; i < wb->bu_blocks; i++, buf += blocksize) + memcpy(bucket_block(wb, i), buf, blocksize); + ocfs2_xattr_bucket_journal_dirty(handle, wb); commit: ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); out: - - if (bhs) { - for (i = 0; i < blk_per_bucket; i++) - brelse(bhs[i]); - } - kfree(bhs); - + ocfs2_xattr_bucket_free(wb); kfree(bucket_buf); return ret; } -- cgit From 02dbf38d19c19016f558fe0dc0c44f8041d3eb8e Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Mon, 27 Oct 2008 18:07:45 -0700 Subject: ocfs2: Use buckets in ocfs2_xattr_set_entry_in_bucket(). The ocfs2_xattr_set_entry_in_bucket() function is already working on an ocfs2_xattr_bucket structure, so let's use the bucket API. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 127a6285078a..029a9f4559f1 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -4083,25 +4083,24 @@ static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, { int ret; handle_t *handle = NULL; - u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + u64 blkno; mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n", (unsigned long)xi->value_len, xi->name_index, (unsigned long long)bucket_blkno(xs->bucket)); if (!xs->bucket->bu_bhs[1]) { - ret = ocfs2_read_blocks(inode, - bucket_blkno(xs->bucket) + 1, - blk_per_bucket - 1, &xs->bucket->bu_bhs[1], - 0); + blkno = bucket_blkno(xs->bucket); + ocfs2_xattr_bucket_relse(xs->bucket); + ret = ocfs2_read_xattr_bucket(xs->bucket, blkno); if (ret) { mlog_errno(ret); goto out; } } - handle = ocfs2_start_trans(osb, blk_per_bucket); + handle = ocfs2_start_trans(osb, xs->bucket->bu_blocks); if (IS_ERR(handle)) { ret = PTR_ERR(handle); handle = NULL; -- cgit From 1c32a2fd46ddc01bd86bff56a8f5d98c815750f4 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Thu, 6 Nov 2008 08:10:47 +0800 Subject: ocfs2/xattr: Remove additional bucket allocation in bucket defragment. Joel has refactored xattr bucket and make xattr bucket a general wrapper. So in ocfs2_defrag_xattr_bucket, we have already passed the bucket in, so there is no need to allocate a new one and read it. Signed-off-by: Tao Ma Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 029a9f4559f1..87cf39ddfe5b 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -2898,7 +2898,6 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, size_t blocksize = inode->i_sb->s_blocksize; handle_t *handle; struct ocfs2_xattr_entry *xe; - struct ocfs2_xattr_bucket *wb = NULL; /* * In order to make the operation more efficient and generic, @@ -2912,21 +2911,11 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, goto out; } - wb = ocfs2_xattr_bucket_new(inode); - if (!wb) { - ret = -ENOMEM; - goto out; - } - - ret = ocfs2_read_xattr_bucket(wb, blkno); - if (ret) - goto out; - buf = bucket_buf; - for (i = 0; i < wb->bu_blocks; i++, buf += blocksize) - memcpy(buf, bucket_block(wb, i), blocksize); + for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize) + memcpy(buf, bucket_block(bucket, i), blocksize); - handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), wb->bu_blocks); + handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), bucket->bu_blocks); if (IS_ERR(handle)) { ret = PTR_ERR(handle); handle = NULL; @@ -2934,7 +2923,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, goto out; } - ret = ocfs2_xattr_bucket_journal_access(handle, wb, + ret = ocfs2_xattr_bucket_journal_access(handle, bucket, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); @@ -3007,14 +2996,13 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, cmp_xe, swap_xe); buf = bucket_buf; - for (i = 0; i < wb->bu_blocks; i++, buf += blocksize) - memcpy(bucket_block(wb, i), buf, blocksize); - ocfs2_xattr_bucket_journal_dirty(handle, wb); + for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize) + memcpy(bucket_block(bucket, i), buf, blocksize); + ocfs2_xattr_bucket_journal_dirty(handle, bucket); commit: ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); out: - ocfs2_xattr_bucket_free(wb); kfree(bucket_buf); return ret; } -- cgit From 757055adc5d41b910bdead925060f077dd2d9169 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Thu, 6 Nov 2008 08:10:48 +0800 Subject: ocfs2/xattr: Only set buffer update if it doesn't exist in cache. When we call ocfs2_init_xattr_bucket, we deem that the new buffer head will be written to disk immediately, so we just use sb_getblk. But in some cases the buffer may have already been in ocfs2 uptodate cache, so we only call ocfs2_set_buffer_uptodate if the buffer head isn't in the cache. Signed-off-by: Tao Ma Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 87cf39ddfe5b..d8fc714e9415 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -219,8 +219,10 @@ static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket, break; } - ocfs2_set_new_buffer_uptodate(bucket->bu_inode, - bucket->bu_bhs[i]); + if (!ocfs2_buffer_uptodate(bucket->bu_inode, + bucket->bu_bhs[i])) + ocfs2_set_new_buffer_uptodate(bucket->bu_inode, + bucket->bu_bhs[i]); } if (rc) -- cgit From 976331d8789d4d84a11b45b87c520ade83715343 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Wed, 12 Nov 2008 08:26:57 +0800 Subject: ocfs2/xattr: Only extend xattr bucket in need. When the first block of a bucket is filled up with xattr entries, we normally extend the bucket. But if we are just replace one xattr with small length, we don't need to extend it. This is important since we will calculate what we need before the transaction and in this situation no resources will be allocated. Signed-off-by: Tao Ma Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index d8fc714e9415..4501c63193df 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -4564,7 +4564,9 @@ try_again: free, need, max_free, le16_to_cpu(xh->xh_free_start), le16_to_cpu(xh->xh_name_value_len)); - if (free < need || count == ocfs2_xattr_max_xe_in_bucket(inode->i_sb)) { + if (free < need || + (xs->not_found && + count == ocfs2_xattr_max_xe_in_bucket(inode->i_sb))) { if (need <= max_free && count < ocfs2_xattr_max_xe_in_bucket(inode->i_sb)) { /* -- cgit From 2891d290aa6eee0821f7e4ad0b1c4ae4d964b0f1 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Wed, 12 Nov 2008 08:26:58 +0800 Subject: ocfs2: Add clusters free in dealloc_ctxt. Now in ocfs2 xattr set, the whole process are divided into many small parts and they are wrapped into diffrent transactions and it make the set doesn't look like a real transaction. So we want to integrate it into a real one. In some cases we will allocate some clusters and free some in just one transaction. e.g, one xattr is larger than inline size, so it and its value root is stored within the inode while the value is outside in a cluster. Then we try to update it with a smaller value(larger than the size of root but smaller than inline size), we may need to free the outside cluster while allocate a new bucket(one cluster) since now the inode may be full. The old solution will lock the global_bitmap(if the local alloc failed in stress test) and then the truncate log. This will cause a ABBA lock with truncate log flush. This patch add the clusters free in dealloc_ctxt, so that we can record the free clusters during the transaction and then free it after we release the global_bitmap in xattr set. Signed-off-by: Tao Ma Signed-off-by: Mark Fasheh --- fs/ocfs2/alloc.c | 106 +++++++++++++++++++++++++++++++++++++++++++++++++++---- fs/ocfs2/alloc.h | 4 +++ 2 files changed, 103 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 0cc2deb9394c..4614614084dd 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -5800,7 +5800,10 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb) */ /* - * Describes a single block free from a suballocator + * Describe a single bit freed from a suballocator. For the block + * suballocators, it represents one block. For the global cluster + * allocator, it represents some clusters and free_bit indicates + * clusters number. */ struct ocfs2_cached_block_free { struct ocfs2_cached_block_free *free_next; @@ -5815,10 +5818,10 @@ struct ocfs2_per_slot_free_list { struct ocfs2_cached_block_free *f_first; }; -static int ocfs2_free_cached_items(struct ocfs2_super *osb, - int sysfile_type, - int slot, - struct ocfs2_cached_block_free *head) +static int ocfs2_free_cached_blocks(struct ocfs2_super *osb, + int sysfile_type, + int slot, + struct ocfs2_cached_block_free *head) { int ret; u64 bg_blkno; @@ -5893,6 +5896,82 @@ out: return ret; } +int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, + u64 blkno, unsigned int bit) +{ + int ret = 0; + struct ocfs2_cached_block_free *item; + + item = kmalloc(sizeof(*item), GFP_NOFS); + if (item == NULL) { + ret = -ENOMEM; + mlog_errno(ret); + return ret; + } + + mlog(0, "Insert clusters: (bit %u, blk %llu)\n", + bit, (unsigned long long)blkno); + + item->free_blk = blkno; + item->free_bit = bit; + item->free_next = ctxt->c_global_allocator; + + ctxt->c_global_allocator = item; + return ret; +} + +static int ocfs2_free_cached_clusters(struct ocfs2_super *osb, + struct ocfs2_cached_block_free *head) +{ + struct ocfs2_cached_block_free *tmp; + struct inode *tl_inode = osb->osb_tl_inode; + handle_t *handle; + int ret = 0; + + mutex_lock(&tl_inode->i_mutex); + + while (head) { + if (ocfs2_truncate_log_needs_flush(osb)) { + ret = __ocfs2_flush_truncate_log(osb); + if (ret < 0) { + mlog_errno(ret); + break; + } + } + + handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + break; + } + + ret = ocfs2_truncate_log_append(osb, handle, head->free_blk, + head->free_bit); + + ocfs2_commit_trans(osb, handle); + tmp = head; + head = head->free_next; + kfree(tmp); + + if (ret < 0) { + mlog_errno(ret); + break; + } + } + + mutex_unlock(&tl_inode->i_mutex); + + while (head) { + /* Premature exit may have left some dangling items. */ + tmp = head; + head = head->free_next; + kfree(tmp); + } + + return ret; +} + int ocfs2_run_deallocs(struct ocfs2_super *osb, struct ocfs2_cached_dealloc_ctxt *ctxt) { @@ -5908,8 +5987,10 @@ int ocfs2_run_deallocs(struct ocfs2_super *osb, if (fl->f_first) { mlog(0, "Free items: (type %u, slot %d)\n", fl->f_inode_type, fl->f_slot); - ret2 = ocfs2_free_cached_items(osb, fl->f_inode_type, - fl->f_slot, fl->f_first); + ret2 = ocfs2_free_cached_blocks(osb, + fl->f_inode_type, + fl->f_slot, + fl->f_first); if (ret2) mlog_errno(ret2); if (!ret) @@ -5920,6 +6001,17 @@ int ocfs2_run_deallocs(struct ocfs2_super *osb, kfree(fl); } + if (ctxt->c_global_allocator) { + ret2 = ocfs2_free_cached_clusters(osb, + ctxt->c_global_allocator); + if (ret2) + mlog_errno(ret2); + if (!ret) + ret = ret2; + + ctxt->c_global_allocator = NULL; + } + return ret; } diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 70257c84cfbe..c301cf225f0b 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h @@ -167,11 +167,15 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb); */ struct ocfs2_cached_dealloc_ctxt { struct ocfs2_per_slot_free_list *c_first_suballocator; + struct ocfs2_cached_block_free *c_global_allocator; }; static inline void ocfs2_init_dealloc_ctxt(struct ocfs2_cached_dealloc_ctxt *c) { c->c_first_suballocator = NULL; + c->c_global_allocator = NULL; } +int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, + u64 blkno, unsigned int bit); int ocfs2_run_deallocs(struct ocfs2_super *osb, struct ocfs2_cached_dealloc_ctxt *ctxt); -- cgit From c73f60f900ddf73ec4ea2a143829ab97242c4e8c Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Wed, 12 Nov 2008 08:26:59 +0800 Subject: ocfs2/xattr: Move clusters free into dealloc. Move clusters free process into dealloc context so that they can be freed after the transaction. Signed-off-by: Tao Ma Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 4501c63193df..f1da381a44f6 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -457,7 +457,6 @@ static int __ocfs2_remove_xattr_range(struct inode *inode, int ret; u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct inode *tl_inode = osb->osb_tl_inode; handle_t *handle; struct ocfs2_alloc_context *meta_ac = NULL; struct ocfs2_extent_tree et; @@ -470,16 +469,6 @@ static int __ocfs2_remove_xattr_range(struct inode *inode, return ret; } - mutex_lock(&tl_inode->i_mutex); - - if (ocfs2_truncate_log_needs_flush(osb)) { - ret = __ocfs2_flush_truncate_log(osb); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - } - handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); @@ -509,14 +498,13 @@ static int __ocfs2_remove_xattr_range(struct inode *inode, goto out_commit; } - ret = ocfs2_truncate_log_append(osb, handle, phys_blkno, len); + ret = ocfs2_cache_cluster_dealloc(dealloc, phys_blkno, len); if (ret) mlog_errno(ret); out_commit: ocfs2_commit_trans(osb, handle); out: - mutex_unlock(&tl_inode->i_mutex); if (meta_ac) ocfs2_free_alloc_context(meta_ac); -- cgit From 78f30c314a74b9dc5d7368d96fe4be883d9a3a04 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Wed, 12 Nov 2008 08:27:00 +0800 Subject: ocfs2/xattr: Reserve meta/data at the beginning of ocfs2_xattr_set. In ocfs2 xattr set, we reserve metadata and clusters in any place they are needed. It is time-consuming and ineffective, so this patch try to reserve metadata and clusters at the beginning of ocfs2_xattr_set. Signed-off-by: Tao Ma Signed-off-by: Mark Fasheh --- fs/ocfs2/alloc.h | 4 + fs/ocfs2/xattr.c | 483 ++++++++++++++++++++++++++++++++++++++++--------------- 2 files changed, 361 insertions(+), 126 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index c301cf225f0b..3eb735eedae6 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h @@ -176,6 +176,10 @@ static inline void ocfs2_init_dealloc_ctxt(struct ocfs2_cached_dealloc_ctxt *c) } int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, u64 blkno, unsigned int bit); +static inline int ocfs2_dealloc_has_cluster(struct ocfs2_cached_dealloc_ctxt *c) +{ + return c->c_global_allocator != NULL; +} int ocfs2_run_deallocs(struct ocfs2_super *osb, struct ocfs2_cached_dealloc_ctxt *ctxt); diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index f1da381a44f6..4fd201a54c72 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -71,6 +71,12 @@ struct ocfs2_xattr_bucket { int bu_blocks; }; +struct ocfs2_xattr_set_ctxt { + struct ocfs2_alloc_context *meta_ac; + struct ocfs2_alloc_context *data_ac; + struct ocfs2_cached_dealloc_ctxt dealloc; +}; + #define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root)) #define OCFS2_XATTR_INLINE_SIZE 80 @@ -133,11 +139,13 @@ static int ocfs2_xattr_tree_list_index_block(struct inode *inode, size_t buffer_size); static int ocfs2_xattr_create_index_block(struct inode *inode, - struct ocfs2_xattr_search *xs); + struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_set_ctxt *ctxt); static int ocfs2_xattr_set_entry_index_block(struct inode *inode, struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs); + struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_set_ctxt *ctxt); static int ocfs2_delete_xattr_index_block(struct inode *inode, struct buffer_head *xb_bh); @@ -334,14 +342,13 @@ static void ocfs2_xattr_hash_entry(struct inode *inode, static int ocfs2_xattr_extend_allocation(struct inode *inode, u32 clusters_to_add, struct buffer_head *xattr_bh, - struct ocfs2_xattr_value_root *xv) + struct ocfs2_xattr_value_root *xv, + struct ocfs2_xattr_set_ctxt *ctxt) { int status = 0; int restart_func = 0; int credits = 0; handle_t *handle = NULL; - struct ocfs2_alloc_context *data_ac = NULL; - struct ocfs2_alloc_context *meta_ac = NULL; enum ocfs2_alloc_restarted why; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); u32 prev_clusters, logical_start = le32_to_cpu(xv->xr_clusters); @@ -353,13 +360,6 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, restart_all: - status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0, - &data_ac, &meta_ac); - if (status) { - mlog_errno(status); - goto leave; - } - credits = ocfs2_calc_extend_credits(osb->sb, et.et_root_el, clusters_to_add); handle = ocfs2_start_trans(osb, credits); @@ -386,8 +386,8 @@ restarted_transaction: 0, &et, handle, - data_ac, - meta_ac, + ctxt->data_ac, + ctxt->meta_ac, &why); if ((status < 0) && (status != -EAGAIN)) { if (status != -ENOSPC) @@ -432,14 +432,6 @@ leave: ocfs2_commit_trans(osb, handle); handle = NULL; } - if (data_ac) { - ocfs2_free_alloc_context(data_ac); - data_ac = NULL; - } - if (meta_ac) { - ocfs2_free_alloc_context(meta_ac); - meta_ac = NULL; - } if ((!status) && restart_func) { restart_func = 0; goto restart_all; @@ -452,23 +444,16 @@ static int __ocfs2_remove_xattr_range(struct inode *inode, struct buffer_head *root_bh, struct ocfs2_xattr_value_root *xv, u32 cpos, u32 phys_cpos, u32 len, - struct ocfs2_cached_dealloc_ctxt *dealloc) + struct ocfs2_xattr_set_ctxt *ctxt) { int ret; u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); handle_t *handle; - struct ocfs2_alloc_context *meta_ac = NULL; struct ocfs2_extent_tree et; ocfs2_init_xattr_value_extent_tree(&et, inode, root_bh, xv); - ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac); - if (ret) { - mlog_errno(ret); - return ret; - } - handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); @@ -483,8 +468,8 @@ static int __ocfs2_remove_xattr_range(struct inode *inode, goto out_commit; } - ret = ocfs2_remove_extent(inode, &et, cpos, len, handle, meta_ac, - dealloc); + ret = ocfs2_remove_extent(inode, &et, cpos, len, handle, ctxt->meta_ac, + &ctxt->dealloc); if (ret) { mlog_errno(ret); goto out_commit; @@ -498,17 +483,13 @@ static int __ocfs2_remove_xattr_range(struct inode *inode, goto out_commit; } - ret = ocfs2_cache_cluster_dealloc(dealloc, phys_blkno, len); + ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc, phys_blkno, len); if (ret) mlog_errno(ret); out_commit: ocfs2_commit_trans(osb, handle); out: - - if (meta_ac) - ocfs2_free_alloc_context(meta_ac); - return ret; } @@ -516,15 +497,12 @@ static int ocfs2_xattr_shrink_size(struct inode *inode, u32 old_clusters, u32 new_clusters, struct buffer_head *root_bh, - struct ocfs2_xattr_value_root *xv) + struct ocfs2_xattr_value_root *xv, + struct ocfs2_xattr_set_ctxt *ctxt) { int ret = 0; u32 trunc_len, cpos, phys_cpos, alloc_size; u64 block; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct ocfs2_cached_dealloc_ctxt dealloc; - - ocfs2_init_dealloc_ctxt(&dealloc); if (old_clusters <= new_clusters) return 0; @@ -544,7 +522,7 @@ static int ocfs2_xattr_shrink_size(struct inode *inode, ret = __ocfs2_remove_xattr_range(inode, root_bh, xv, cpos, phys_cpos, alloc_size, - &dealloc); + ctxt); if (ret) { mlog_errno(ret); goto out; @@ -558,16 +536,14 @@ static int ocfs2_xattr_shrink_size(struct inode *inode, } out: - ocfs2_schedule_truncate_log_flush(osb, 1); - ocfs2_run_deallocs(osb, &dealloc); - return ret; } static int ocfs2_xattr_value_truncate(struct inode *inode, struct buffer_head *root_bh, struct ocfs2_xattr_value_root *xv, - int len) + int len, + struct ocfs2_xattr_set_ctxt *ctxt) { int ret; u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, len); @@ -579,11 +555,11 @@ static int ocfs2_xattr_value_truncate(struct inode *inode, if (new_clusters > old_clusters) ret = ocfs2_xattr_extend_allocation(inode, new_clusters - old_clusters, - root_bh, xv); + root_bh, xv, ctxt); else ret = ocfs2_xattr_shrink_size(inode, old_clusters, new_clusters, - root_bh, xv); + root_bh, xv, ctxt); return ret; } @@ -1167,6 +1143,7 @@ out: static int ocfs2_xattr_set_value_outside(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_set_ctxt *ctxt, size_t offs) { size_t name_len = strlen(xi->name); @@ -1186,7 +1163,7 @@ static int ocfs2_xattr_set_value_outside(struct inode *inode, xv->xr_list.l_next_free_rec = 0; ret = ocfs2_xattr_value_truncate(inode, xs->xattr_bh, xv, - xi->value_len); + xi->value_len, ctxt); if (ret < 0) { mlog_errno(ret); return ret; @@ -1317,6 +1294,7 @@ static void ocfs2_xattr_set_entry_local(struct inode *inode, static int ocfs2_xattr_set_entry(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_set_ctxt *ctxt, int flag) { struct ocfs2_xattr_entry *last; @@ -1387,7 +1365,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode, if (ocfs2_xattr_is_local(xs->here) && size == size_l) { /* Replace existing local xattr with tree root */ ret = ocfs2_xattr_set_value_outside(inode, xi, xs, - offs); + ctxt, offs); if (ret < 0) mlog_errno(ret); goto out; @@ -1406,7 +1384,8 @@ static int ocfs2_xattr_set_entry(struct inode *inode, ret = ocfs2_xattr_value_truncate(inode, xs->xattr_bh, xv, - xi->value_len); + xi->value_len, + ctxt); if (ret < 0) { mlog_errno(ret); goto out; @@ -1436,7 +1415,8 @@ static int ocfs2_xattr_set_entry(struct inode *inode, ret = ocfs2_xattr_value_truncate(inode, xs->xattr_bh, xv, - 0); + 0, + ctxt); if (ret < 0) mlog_errno(ret); } @@ -1531,7 +1511,7 @@ out_commit: * This is the second step for value size > INLINE_SIZE. */ size_t offs = le16_to_cpu(xs->here->xe_name_offset); - ret = ocfs2_xattr_set_value_outside(inode, xi, xs, offs); + ret = ocfs2_xattr_set_value_outside(inode, xi, xs, ctxt, offs); if (ret < 0) { int ret2; @@ -1555,6 +1535,10 @@ static int ocfs2_remove_value_outside(struct inode*inode, struct ocfs2_xattr_header *header) { int ret = 0, i; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, }; + + ocfs2_init_dealloc_ctxt(&ctxt.dealloc); for (i = 0; i < le16_to_cpu(header->xh_count); i++) { struct ocfs2_xattr_entry *entry = &header->xh_entries[i]; @@ -1567,14 +1551,17 @@ static int ocfs2_remove_value_outside(struct inode*inode, le16_to_cpu(entry->xe_name_offset); xv = (struct ocfs2_xattr_value_root *) (val + OCFS2_XATTR_SIZE(entry->xe_name_len)); - ret = ocfs2_xattr_value_truncate(inode, bh, xv, 0); + ret = ocfs2_xattr_value_truncate(inode, bh, xv, + 0, &ctxt); if (ret < 0) { mlog_errno(ret); - return ret; + break; } } } + ocfs2_schedule_truncate_log_flush(osb, 1); + ocfs2_run_deallocs(osb, &ctxt.dealloc); return ret; } @@ -1836,7 +1823,8 @@ static int ocfs2_xattr_ibody_find(struct inode *inode, */ static int ocfs2_xattr_ibody_set(struct inode *inode, struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs) + struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_set_ctxt *ctxt) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; @@ -1853,7 +1841,7 @@ static int ocfs2_xattr_ibody_set(struct inode *inode, } } - ret = ocfs2_xattr_set_entry(inode, xi, xs, + ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt, (OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL)); out: up_write(&oi->ip_alloc_sem); @@ -1926,12 +1914,12 @@ cleanup: */ static int ocfs2_xattr_block_set(struct inode *inode, struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs) + struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_set_ctxt *ctxt) { struct buffer_head *new_bh = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; - struct ocfs2_alloc_context *meta_ac = NULL; handle_t *handle = NULL; struct ocfs2_xattr_block *xblk = NULL; u16 suballoc_bit_start; @@ -1940,15 +1928,6 @@ static int ocfs2_xattr_block_set(struct inode *inode, int ret; if (!xs->xattr_bh) { - /* - * Alloc one external block for extended attribute - * outside of inode. - */ - ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); - if (ret < 0) { - mlog_errno(ret); - goto out; - } handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS); if (IS_ERR(handle)) { @@ -1963,7 +1942,7 @@ static int ocfs2_xattr_block_set(struct inode *inode, goto out_commit; } - ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1, + ret = ocfs2_claim_metadata(osb, handle, ctxt->meta_ac, 1, &suballoc_bit_start, &num_got, &first_blkno); if (ret < 0) { @@ -1996,7 +1975,6 @@ static int ocfs2_xattr_block_set(struct inode *inode, xs->end = (void *)xblk + inode->i_sb->s_blocksize; xs->here = xs->header->xh_entries; - ret = ocfs2_journal_dirty(handle, new_bh); if (ret < 0) { mlog_errno(ret); @@ -2009,8 +1987,6 @@ static int ocfs2_xattr_block_set(struct inode *inode, out_commit: ocfs2_commit_trans(osb, handle); out: - if (meta_ac) - ocfs2_free_alloc_context(meta_ac); if (ret < 0) return ret; } else @@ -2018,22 +1994,266 @@ out: if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) { /* Set extended attribute into external block */ - ret = ocfs2_xattr_set_entry(inode, xi, xs, OCFS2_HAS_XATTR_FL); + ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt, + OCFS2_HAS_XATTR_FL); if (!ret || ret != -ENOSPC) goto end; - ret = ocfs2_xattr_create_index_block(inode, xs); + ret = ocfs2_xattr_create_index_block(inode, xs, ctxt); if (ret) goto end; } - ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs); + ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt); end: return ret; } +/* Check whether the new xattr can be inserted into the inode. */ +static int ocfs2_xattr_can_be_in_inode(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs) +{ + u64 value_size; + struct ocfs2_xattr_entry *last; + int free, i; + size_t min_offs = xs->end - xs->base; + + if (!xs->header) + return 0; + + last = xs->header->xh_entries; + + for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) { + size_t offs = le16_to_cpu(last->xe_name_offset); + if (offs < min_offs) + min_offs = offs; + last += 1; + } + + free = min_offs - ((void *)last - xs->base) - sizeof(__u32); + if (free < 0) + return 0; + + BUG_ON(!xs->not_found); + + if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) + value_size = OCFS2_XATTR_ROOT_SIZE; + else + value_size = OCFS2_XATTR_SIZE(xi->value_len); + + if (free >= sizeof(struct ocfs2_xattr_entry) + + OCFS2_XATTR_SIZE(strlen(xi->name)) + value_size) + return 1; + + return 0; +} + +static int ocfs2_calc_xattr_set_need(struct inode *inode, + struct ocfs2_dinode *di, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xis, + struct ocfs2_xattr_search *xbs, + int *clusters_need, + int *meta_need) +{ + int ret = 0, old_in_xb = 0; + int clusters_add = 0, meta_add = 0; + struct buffer_head *bh = NULL; + struct ocfs2_xattr_block *xb = NULL; + struct ocfs2_xattr_entry *xe = NULL; + struct ocfs2_xattr_value_root *xv = NULL; + char *base = NULL; + int name_offset, name_len = 0; + u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, + xi->value_len); + u64 value_size; + + /* + * delete a xattr doesn't need metadata and cluster allocation. + * so return. + */ + if (!xi->value) + goto out; + + if (xis->not_found && xbs->not_found) { + if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) + clusters_add += new_clusters; + + goto meta_guess; + } + + if (!xis->not_found) { + xe = xis->here; + name_offset = le16_to_cpu(xe->xe_name_offset); + name_len = OCFS2_XATTR_SIZE(xe->xe_name_len); + base = xis->base; + } else { + int i, block_off; + xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data; + xe = xbs->here; + name_offset = le16_to_cpu(xe->xe_name_offset); + name_len = OCFS2_XATTR_SIZE(xe->xe_name_len); + i = xbs->here - xbs->header->xh_entries; + old_in_xb = 1; + + if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) { + ret = ocfs2_xattr_bucket_get_name_value(inode, + bucket_xh(xbs->bucket), + i, &block_off, + &name_offset); + base = bucket_block(xbs->bucket, block_off); + } else + base = xbs->base; + } + + /* do cluster allocation guess first. */ + value_size = le64_to_cpu(xe->xe_value_size); + + if (old_in_xb) { + /* + * In xattr set, we always try to set the xe in inode first, + * so if it can be inserted into inode successfully, the old + * one will be removed from the xattr block, and this xattr + * will be inserted into inode as a new xattr in inode. + */ + if (ocfs2_xattr_can_be_in_inode(inode, xi, xis)) { + clusters_add += new_clusters; + goto out; + } + } + + if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) { + /* the new values will be stored outside. */ + u32 old_clusters = 0; + + if (!ocfs2_xattr_is_local(xe)) { + old_clusters = ocfs2_clusters_for_bytes(inode->i_sb, + value_size); + xv = (struct ocfs2_xattr_value_root *) + (base + name_offset + name_len); + } else + xv = &def_xv.xv; + + if (old_clusters >= new_clusters) + goto out; + else { + meta_add += ocfs2_extend_meta_needed(&xv->xr_list); + clusters_add += new_clusters - old_clusters; + goto out; + } + } else { + /* + * Now the new value will be stored inside. So if the new + * value is smaller than the size of value root or the old + * value, we don't need any allocation, otherwise we have + * to guess metadata allocation. + */ + if ((ocfs2_xattr_is_local(xe) && value_size >= xi->value_len) || + (!ocfs2_xattr_is_local(xe) && + OCFS2_XATTR_ROOT_SIZE >= xi->value_len)) + goto out; + } + +meta_guess: + /* calculate metadata allocation. */ + if (di->i_xattr_loc) { + if (!xbs->xattr_bh) { + ret = ocfs2_read_block(inode, + le64_to_cpu(di->i_xattr_loc), + &bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + xb = (struct ocfs2_xattr_block *)bh->b_data; + } else + xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data; + + if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) { + struct ocfs2_extent_list *el = + &xb->xb_attrs.xb_root.xt_list; + meta_add += ocfs2_extend_meta_needed(el); + } + + /* + * This cluster will be used either for new bucket or for + * new xattr block. + * If the cluster size is the same as the bucket size, one + * more is needed since we may need to extend the bucket + * also. + */ + clusters_add += 1; + if (OCFS2_XATTR_BUCKET_SIZE == + OCFS2_SB(inode->i_sb)->s_clustersize) + clusters_add += 1; + } else + meta_add += 1; +out: + if (clusters_need) + *clusters_need = clusters_add; + if (meta_need) + *meta_need = meta_add; + brelse(bh); + return ret; +} + +static int ocfs2_init_xattr_set_ctxt(struct inode *inode, + struct ocfs2_dinode *di, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xis, + struct ocfs2_xattr_search *xbs, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int clusters_add, meta_add, ret; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + memset(ctxt, 0, sizeof(struct ocfs2_xattr_set_ctxt)); + + ocfs2_init_dealloc_ctxt(&ctxt->dealloc); + + ret = ocfs2_calc_xattr_set_need(inode, di, xi, xis, xbs, + &clusters_add, &meta_add); + if (ret) { + mlog_errno(ret); + return ret; + } + + mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d\n", + xi->name, meta_add, clusters_add); + + if (meta_add) { + ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, + &ctxt->meta_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + if (clusters_add) { + ret = ocfs2_reserve_clusters(osb, clusters_add, &ctxt->data_ac); + if (ret) + mlog_errno(ret); + } +out: + if (ret) { + if (ctxt->meta_ac) { + ocfs2_free_alloc_context(ctxt->meta_ac); + ctxt->meta_ac = NULL; + } + + /* + * We cannot have an error and a non null ctxt->data_ac. + */ + } + + return ret; +} + /* * ocfs2_xattr_set() * @@ -2051,6 +2271,8 @@ int ocfs2_xattr_set(struct inode *inode, struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; int ret; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, }; struct ocfs2_xattr_info xi = { .name_index = name_index, @@ -2115,15 +2337,21 @@ int ocfs2_xattr_set(struct inode *inode, goto cleanup; } + ret = ocfs2_init_xattr_set_ctxt(inode, di, &xi, &xis, &xbs, &ctxt); + if (ret) { + mlog_errno(ret); + goto cleanup; + } + if (!value) { /* Remove existing extended attribute */ if (!xis.not_found) - ret = ocfs2_xattr_ibody_set(inode, &xi, &xis); + ret = ocfs2_xattr_ibody_set(inode, &xi, &xis, &ctxt); else if (!xbs.not_found) - ret = ocfs2_xattr_block_set(inode, &xi, &xbs); + ret = ocfs2_xattr_block_set(inode, &xi, &xbs, &ctxt); } else { /* We always try to set extended attribute into inode first*/ - ret = ocfs2_xattr_ibody_set(inode, &xi, &xis); + ret = ocfs2_xattr_ibody_set(inode, &xi, &xis, &ctxt); if (!ret && !xbs.not_found) { /* * If succeed and that extended attribute existing in @@ -2131,7 +2359,7 @@ int ocfs2_xattr_set(struct inode *inode, */ xi.value = NULL; xi.value_len = 0; - ret = ocfs2_xattr_block_set(inode, &xi, &xbs); + ret = ocfs2_xattr_block_set(inode, &xi, &xbs, &ctxt); } else if (ret == -ENOSPC) { if (di->i_xattr_loc && !xbs.xattr_bh) { ret = ocfs2_xattr_block_find(inode, name_index, @@ -2143,9 +2371,9 @@ int ocfs2_xattr_set(struct inode *inode, * If no space in inode, we will set extended attribute * into external block. */ - ret = ocfs2_xattr_block_set(inode, &xi, &xbs); + ret = ocfs2_xattr_block_set(inode, &xi, &xbs, &ctxt); if (ret) - goto cleanup; + goto free; if (!xis.not_found) { /* * If succeed and that extended attribute @@ -2153,10 +2381,19 @@ int ocfs2_xattr_set(struct inode *inode, */ xi.value = NULL; xi.value_len = 0; - ret = ocfs2_xattr_ibody_set(inode, &xi, &xis); + ret = ocfs2_xattr_ibody_set(inode, &xi, + &xis, &ctxt); } } } +free: + if (ctxt.data_ac) + ocfs2_free_alloc_context(ctxt.data_ac); + if (ctxt.meta_ac) + ocfs2_free_alloc_context(ctxt.meta_ac); + if (ocfs2_dealloc_has_cluster(&ctxt.dealloc)) + ocfs2_schedule_truncate_log_flush(osb, 1); + ocfs2_run_deallocs(osb, &ctxt.dealloc); cleanup: up_write(&OCFS2_I(inode)->ip_xattr_sem); ocfs2_inode_unlock(inode, 1); @@ -2734,7 +2971,8 @@ static void ocfs2_xattr_update_xattr_search(struct inode *inode, } static int ocfs2_xattr_create_index_block(struct inode *inode, - struct ocfs2_xattr_search *xs) + struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_set_ctxt *ctxt) { int ret, credits = OCFS2_SUBALLOC_ALLOC; u32 bit_off, len; @@ -2742,7 +2980,6 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, handle_t *handle; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(inode); - struct ocfs2_alloc_context *data_ac; struct buffer_head *xb_bh = xs->xattr_bh; struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)xb_bh->b_data; @@ -2755,12 +2992,6 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, BUG_ON(xb_flags & OCFS2_XATTR_INDEXED); BUG_ON(!xs->bucket); - ret = ocfs2_reserve_clusters(osb, 1, &data_ac); - if (ret) { - mlog_errno(ret); - goto out; - } - /* * XXX: * We can use this lock for now, and maybe move to a dedicated mutex @@ -2787,7 +3018,8 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, goto out_commit; } - ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, &len); + ret = __ocfs2_claim_clusters(osb, handle, ctxt->data_ac, + 1, 1, &bit_off, &len); if (ret) { mlog_errno(ret); goto out_commit; @@ -2850,10 +3082,6 @@ out_commit: out_sem: up_write(&oi->ip_alloc_sem); -out: - if (data_ac) - ocfs2_free_alloc_context(data_ac); - return ret; } @@ -3614,7 +3842,8 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, u32 *num_clusters, u32 prev_cpos, u64 prev_blkno, - int *extend) + int *extend, + struct ocfs2_xattr_set_ctxt *ctxt) { int ret, credits; u16 bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); @@ -3622,8 +3851,6 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, u32 clusters_to_add = 1, bit_off, num_bits, v_start = 0; u64 block; handle_t *handle = NULL; - struct ocfs2_alloc_context *data_ac = NULL; - struct ocfs2_alloc_context *meta_ac = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_extent_tree et; @@ -3634,13 +3861,6 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, ocfs2_init_xattr_tree_extent_tree(&et, inode, root_bh); - ret = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0, - &data_ac, &meta_ac); - if (ret) { - mlog_errno(ret); - goto leave; - } - credits = ocfs2_calc_extend_credits(osb->sb, et.et_root_el, clusters_to_add); handle = ocfs2_start_trans(osb, credits); @@ -3658,7 +3878,7 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, goto leave; } - ret = __ocfs2_claim_clusters(osb, handle, data_ac, 1, + ret = __ocfs2_claim_clusters(osb, handle, ctxt->data_ac, 1, clusters_to_add, &bit_off, &num_bits); if (ret < 0) { if (ret != -ENOSPC) @@ -3719,7 +3939,7 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, mlog(0, "Insert %u clusters at block %llu for xattr at %u\n", num_bits, (unsigned long long)block, v_start); ret = ocfs2_insert_extent(osb, handle, inode, &et, v_start, block, - num_bits, 0, meta_ac); + num_bits, 0, ctxt->meta_ac); if (ret < 0) { mlog_errno(ret); goto leave; @@ -3734,10 +3954,6 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, leave: if (handle) ocfs2_commit_trans(osb, handle); - if (data_ac) - ocfs2_free_alloc_context(data_ac); - if (meta_ac) - ocfs2_free_alloc_context(meta_ac); return ret; } @@ -3821,7 +4037,8 @@ out: */ static int ocfs2_add_new_xattr_bucket(struct inode *inode, struct buffer_head *xb_bh, - struct buffer_head *header_bh) + struct buffer_head *header_bh, + struct ocfs2_xattr_set_ctxt *ctxt) { struct ocfs2_xattr_header *first_xh = NULL; struct buffer_head *first_bh = NULL; @@ -3872,7 +4089,8 @@ static int ocfs2_add_new_xattr_bucket(struct inode *inode, &num_clusters, e_cpos, p_blkno, - &extend); + &extend, + ctxt); if (ret) { mlog_errno(ret); goto out; @@ -4147,7 +4365,8 @@ out: static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, struct buffer_head *header_bh, int xe_off, - int len) + int len, + struct ocfs2_xattr_set_ctxt *ctxt) { int ret, offset; u64 value_blk; @@ -4182,7 +4401,7 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, mlog(0, "truncate %u in xattr bucket %llu to %d bytes.\n", xe_off, (unsigned long long)header_bh->b_blocknr, len); - ret = ocfs2_xattr_value_truncate(inode, value_bh, xv, len); + ret = ocfs2_xattr_value_truncate(inode, value_bh, xv, len, ctxt); if (ret) { mlog_errno(ret); goto out; @@ -4200,8 +4419,9 @@ out: } static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode, - struct ocfs2_xattr_search *xs, - int len) + struct ocfs2_xattr_search *xs, + int len, + struct ocfs2_xattr_set_ctxt *ctxt) { int ret, offset; struct ocfs2_xattr_entry *xe = xs->here; @@ -4211,7 +4431,7 @@ static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode, offset = xe - xh->xh_entries; ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket->bu_bhs[0], - offset, len); + offset, len, ctxt); if (ret) mlog_errno(ret); @@ -4375,7 +4595,8 @@ out_commit: */ static int ocfs2_xattr_set_in_bucket(struct inode *inode, struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs) + struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_set_ctxt *ctxt) { int ret, local = 1; size_t value_len; @@ -4403,7 +4624,8 @@ static int ocfs2_xattr_set_in_bucket(struct inode *inode, value_len = 0; ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs, - value_len); + value_len, + ctxt); if (ret) goto out; @@ -4434,7 +4656,7 @@ static int ocfs2_xattr_set_in_bucket(struct inode *inode, /* allocate the space now for the outside block storage. */ ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs, - value_len); + value_len, ctxt); if (ret) { mlog_errno(ret); @@ -4485,7 +4707,8 @@ static int ocfs2_check_xattr_bucket_collision(struct inode *inode, static int ocfs2_xattr_set_entry_index_block(struct inode *inode, struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs) + struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_set_ctxt *ctxt) { struct ocfs2_xattr_header *xh; struct ocfs2_xattr_entry *xe; @@ -4603,7 +4826,8 @@ try_again: ret = ocfs2_add_new_xattr_bucket(inode, xs->xattr_bh, - xs->bucket->bu_bhs[0]); + xs->bucket->bu_bhs[0], + ctxt); if (ret) { mlog_errno(ret); goto out; @@ -4622,7 +4846,7 @@ try_again: } xattr_set: - ret = ocfs2_xattr_set_in_bucket(inode, xi, xs); + ret = ocfs2_xattr_set_in_bucket(inode, xi, xs, ctxt); out: mlog_exit(ret); return ret; @@ -4636,6 +4860,10 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, struct ocfs2_xattr_header *xh = bucket_xh(bucket); u16 i; struct ocfs2_xattr_entry *xe; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_xattr_set_ctxt ctxt = {NULL, NULL,}; + + ocfs2_init_dealloc_ctxt(&ctxt.dealloc); for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { xe = &xh->xh_entries[i]; @@ -4644,13 +4872,16 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, ret = ocfs2_xattr_bucket_value_truncate(inode, bucket->bu_bhs[0], - i, 0); + i, 0, &ctxt); if (ret) { mlog_errno(ret); break; } } + ocfs2_schedule_truncate_log_flush(osb, 1); + ocfs2_run_deallocs(osb, &ctxt.dealloc); + return ret; } -- cgit From 85db90e77806d48a19fda77dabe8897d369a1710 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Wed, 12 Nov 2008 08:27:01 +0800 Subject: ocfs2/xattr: Merge xattr set transaction. In current ocfs2/xattr, the whole xattr set is divided into many steps are many transaction are used, this make the xattr set process isn't like a real transaction, so this patch try to merge all the transaction into one. Another benefit is that acl can use it easily now. I don't merge the transaction of deleting xattr when we remove an inode. The reason is that if we have a large number of xattrs and every xattrs has large values(large enough for outside storage), the whole transaction will be very huge and it looks like jbd can't handle it(I meet with a jbd complain once). And the old inode removal is also divided into many steps, so I'd like to leave as it is. Note: In xattr set, I try to avoid ocfs2_extend_trans since if the credits aren't enough for the extension, it will commit all the dirty blocks and create a new transaction which may lead to inconsistency in metadata. All ocfs2_extend_trans remained are safe now. Signed-off-by: Tao Ma Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 673 +++++++++++++++++++++++++++---------------------------- 1 file changed, 325 insertions(+), 348 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 4fd201a54c72..7a9089255a87 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -72,6 +72,7 @@ struct ocfs2_xattr_bucket { }; struct ocfs2_xattr_set_ctxt { + handle_t *handle; struct ocfs2_alloc_context *meta_ac; struct ocfs2_alloc_context *data_ac; struct ocfs2_cached_dealloc_ctxt dealloc; @@ -346,9 +347,7 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, struct ocfs2_xattr_set_ctxt *ctxt) { int status = 0; - int restart_func = 0; - int credits = 0; - handle_t *handle = NULL; + handle_t *handle = ctxt->handle; enum ocfs2_alloc_restarted why; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); u32 prev_clusters, logical_start = le32_to_cpu(xv->xr_clusters); @@ -358,19 +357,6 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, ocfs2_init_xattr_value_extent_tree(&et, inode, xattr_bh, xv); -restart_all: - - credits = ocfs2_calc_extend_credits(osb->sb, et.et_root_el, - clusters_to_add); - handle = ocfs2_start_trans(osb, credits); - if (IS_ERR(handle)) { - status = PTR_ERR(handle); - handle = NULL; - mlog_errno(status); - goto leave; - } - -restarted_transaction: status = ocfs2_journal_access(handle, inode, xattr_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { @@ -389,9 +375,8 @@ restarted_transaction: ctxt->data_ac, ctxt->meta_ac, &why); - if ((status < 0) && (status != -EAGAIN)) { - if (status != -ENOSPC) - mlog_errno(status); + if (status < 0) { + mlog_errno(status); goto leave; } @@ -403,39 +388,13 @@ restarted_transaction: clusters_to_add -= le32_to_cpu(xv->xr_clusters) - prev_clusters; - if (why != RESTART_NONE && clusters_to_add) { - if (why == RESTART_META) { - mlog(0, "restarting function.\n"); - restart_func = 1; - } else { - BUG_ON(why != RESTART_TRANS); - - mlog(0, "restarting transaction.\n"); - /* TODO: This can be more intelligent. */ - credits = ocfs2_calc_extend_credits(osb->sb, - et.et_root_el, - clusters_to_add); - status = ocfs2_extend_trans(handle, credits); - if (status < 0) { - /* handle still has to be committed at - * this point. */ - status = -ENOMEM; - mlog_errno(status); - goto leave; - } - goto restarted_transaction; - } - } + /* + * We should have already allocated enough space before the transaction, + * so no need to restart. + */ + BUG_ON(why != RESTART_NONE || clusters_to_add); leave: - if (handle) { - ocfs2_commit_trans(osb, handle); - handle = NULL; - } - if ((!status) && restart_func) { - restart_func = 0; - goto restart_all; - } return status; } @@ -448,31 +407,23 @@ static int __ocfs2_remove_xattr_range(struct inode *inode, { int ret; u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - handle_t *handle; + handle_t *handle = ctxt->handle; struct ocfs2_extent_tree et; ocfs2_init_xattr_value_extent_tree(&et, inode, root_bh, xv); - handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - mlog_errno(ret); - goto out; - } - ret = ocfs2_journal_access(handle, inode, root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); - goto out_commit; + goto out; } ret = ocfs2_remove_extent(inode, &et, cpos, len, handle, ctxt->meta_ac, &ctxt->dealloc); if (ret) { mlog_errno(ret); - goto out_commit; + goto out; } le32_add_cpu(&xv->xr_clusters, -len); @@ -480,15 +431,13 @@ static int __ocfs2_remove_xattr_range(struct inode *inode, ret = ocfs2_journal_dirty(handle, root_bh); if (ret) { mlog_errno(ret); - goto out_commit; + goto out; } ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc, phys_blkno, len); if (ret) mlog_errno(ret); -out_commit: - ocfs2_commit_trans(osb, handle); out: return ret; } @@ -975,6 +924,7 @@ static int ocfs2_xattr_get(struct inode *inode, } static int __ocfs2_xattr_set_value_outside(struct inode *inode, + handle_t *handle, struct ocfs2_xattr_value_root *xv, const void *value, int value_len) @@ -986,14 +936,17 @@ static int __ocfs2_xattr_set_value_outside(struct inode *inode, u32 clusters = ocfs2_clusters_for_bytes(inode->i_sb, value_len); u64 blkno; struct buffer_head *bh = NULL; - handle_t *handle; BUG_ON(clusters > le32_to_cpu(xv->xr_clusters)); + /* + * In __ocfs2_xattr_set_value_outside has already been dirtied, + * so we don't need to worry about whether ocfs2_extend_trans + * will create a new transactio for us or not. + */ credits = clusters * bpc; - handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb), credits); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); + ret = ocfs2_extend_trans(handle, credits); + if (ret) { mlog_errno(ret); goto out; } @@ -1003,7 +956,7 @@ static int __ocfs2_xattr_set_value_outside(struct inode *inode, &num_clusters, &xv->xr_list); if (ret) { mlog_errno(ret); - goto out_commit; + goto out; } blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster); @@ -1012,7 +965,7 @@ static int __ocfs2_xattr_set_value_outside(struct inode *inode, ret = ocfs2_read_block(inode, blkno, &bh); if (ret) { mlog_errno(ret); - goto out_commit; + goto out; } ret = ocfs2_journal_access(handle, @@ -1021,7 +974,7 @@ static int __ocfs2_xattr_set_value_outside(struct inode *inode, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); - goto out_commit; + goto out; } cp_len = value_len > blocksize ? blocksize : value_len; @@ -1035,7 +988,7 @@ static int __ocfs2_xattr_set_value_outside(struct inode *inode, ret = ocfs2_journal_dirty(handle, bh); if (ret < 0) { mlog_errno(ret); - goto out_commit; + goto out; } brelse(bh); bh = NULL; @@ -1049,8 +1002,6 @@ static int __ocfs2_xattr_set_value_outside(struct inode *inode, } cpos += num_clusters; } -out_commit: - ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); out: brelse(bh); @@ -1058,28 +1009,21 @@ out: } static int ocfs2_xattr_cleanup(struct inode *inode, + handle_t *handle, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, size_t offs) { - handle_t *handle = NULL; int ret = 0; size_t name_len = strlen(xi->name); void *val = xs->base + offs; size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; - handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), - OCFS2_XATTR_BLOCK_UPDATE_CREDITS); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - mlog_errno(ret); - goto out; - } ret = ocfs2_journal_access(handle, inode, xs->xattr_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); - goto out_commit; + goto out; } /* Decrease xattr count */ le16_add_cpu(&xs->header->xh_count, -1); @@ -1090,32 +1034,23 @@ static int ocfs2_xattr_cleanup(struct inode *inode, ret = ocfs2_journal_dirty(handle, xs->xattr_bh); if (ret < 0) mlog_errno(ret); -out_commit: - ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); out: return ret; } static int ocfs2_xattr_update_entry(struct inode *inode, + handle_t *handle, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, size_t offs) { - handle_t *handle = NULL; - int ret = 0; + int ret; - handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), - OCFS2_XATTR_BLOCK_UPDATE_CREDITS); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - mlog_errno(ret); - goto out; - } ret = ocfs2_journal_access(handle, inode, xs->xattr_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); - goto out_commit; + goto out; } xs->here->xe_name_offset = cpu_to_le16(offs); @@ -1129,8 +1064,6 @@ static int ocfs2_xattr_update_entry(struct inode *inode, ret = ocfs2_journal_dirty(handle, xs->xattr_bh); if (ret < 0) mlog_errno(ret); -out_commit: - ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); out: return ret; } @@ -1168,13 +1101,13 @@ static int ocfs2_xattr_set_value_outside(struct inode *inode, mlog_errno(ret); return ret; } - ret = __ocfs2_xattr_set_value_outside(inode, xv, xi->value, - xi->value_len); + ret = ocfs2_xattr_update_entry(inode, ctxt->handle, xi, xs, offs); if (ret < 0) { mlog_errno(ret); return ret; } - ret = ocfs2_xattr_update_entry(inode, xi, xs, offs); + ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, xv, + xi->value, xi->value_len); if (ret < 0) mlog_errno(ret); @@ -1302,7 +1235,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode, struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; size_t min_offs = xs->end - xs->base, name_len = strlen(xi->name); size_t size_l = 0; - handle_t *handle = NULL; + handle_t *handle = ctxt->handle; int free, i, ret; struct ocfs2_xattr_info xi_l = { .name_index = xi->name_index, @@ -1391,19 +1324,21 @@ static int ocfs2_xattr_set_entry(struct inode *inode, goto out; } - ret = __ocfs2_xattr_set_value_outside(inode, - xv, - xi->value, - xi->value_len); + ret = ocfs2_xattr_update_entry(inode, + handle, + xi, + xs, + offs); if (ret < 0) { mlog_errno(ret); goto out; } - ret = ocfs2_xattr_update_entry(inode, - xi, - xs, - offs); + ret = __ocfs2_xattr_set_value_outside(inode, + handle, + xv, + xi->value, + xi->value_len); if (ret < 0) mlog_errno(ret); goto out; @@ -1413,45 +1348,29 @@ static int ocfs2_xattr_set_entry(struct inode *inode, * just trucate old value to zero. */ ret = ocfs2_xattr_value_truncate(inode, - xs->xattr_bh, - xv, - 0, - ctxt); + xs->xattr_bh, + xv, + 0, + ctxt); if (ret < 0) mlog_errno(ret); } } } - handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), - OCFS2_INODE_UPDATE_CREDITS); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - mlog_errno(ret); - goto out; - } - ret = ocfs2_journal_access(handle, inode, xs->inode_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); - goto out_commit; + goto out; } if (!(flag & OCFS2_INLINE_XATTR_FL)) { - /* set extended attribute in external block. */ - ret = ocfs2_extend_trans(handle, - OCFS2_INODE_UPDATE_CREDITS + - OCFS2_XATTR_BLOCK_UPDATE_CREDITS); - if (ret) { - mlog_errno(ret); - goto out_commit; - } ret = ocfs2_journal_access(handle, inode, xs->xattr_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); - goto out_commit; + goto out; } } @@ -1465,7 +1384,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode, ret = ocfs2_journal_dirty(handle, xs->xattr_bh); if (ret < 0) { mlog_errno(ret); - goto out_commit; + goto out; } } @@ -1502,9 +1421,6 @@ static int ocfs2_xattr_set_entry(struct inode *inode, if (ret < 0) mlog_errno(ret); -out_commit: - ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); - if (!ret && xi->value_len > OCFS2_XATTR_INLINE_SIZE) { /* * Set value outside in B tree. @@ -1520,14 +1436,14 @@ out_commit: * If set value outside failed, we have to clean * the junk tree root we have already set in local. */ - ret2 = ocfs2_xattr_cleanup(inode, xi, xs, offs); + ret2 = ocfs2_xattr_cleanup(inode, ctxt->handle, + xi, xs, offs); if (ret2 < 0) mlog_errno(ret2); } } out: return ret; - } static int ocfs2_remove_value_outside(struct inode*inode, @@ -1540,6 +1456,13 @@ static int ocfs2_remove_value_outside(struct inode*inode, ocfs2_init_dealloc_ctxt(&ctxt.dealloc); + ctxt.handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS); + if (IS_ERR(ctxt.handle)) { + ret = PTR_ERR(ctxt.handle); + mlog_errno(ret); + goto out; + } + for (i = 0; i < le16_to_cpu(header->xh_count); i++) { struct ocfs2_xattr_entry *entry = &header->xh_entries[i]; @@ -1560,8 +1483,10 @@ static int ocfs2_remove_value_outside(struct inode*inode, } } + ocfs2_commit_trans(osb, ctxt.handle); ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &ctxt.dealloc); +out: return ret; } @@ -1920,7 +1845,7 @@ static int ocfs2_xattr_block_set(struct inode *inode, struct buffer_head *new_bh = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; - handle_t *handle = NULL; + handle_t *handle = ctxt->handle; struct ocfs2_xattr_block *xblk = NULL; u16 suballoc_bit_start; u32 num_got; @@ -1928,18 +1853,11 @@ static int ocfs2_xattr_block_set(struct inode *inode, int ret; if (!xs->xattr_bh) { - handle = ocfs2_start_trans(osb, - OCFS2_XATTR_BLOCK_CREATE_CREDITS); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - mlog_errno(ret); - goto out; - } ret = ocfs2_journal_access(handle, inode, xs->inode_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { mlog_errno(ret); - goto out_commit; + goto end; } ret = ocfs2_claim_metadata(osb, handle, ctxt->meta_ac, 1, @@ -1947,7 +1865,7 @@ static int ocfs2_xattr_block_set(struct inode *inode, &first_blkno); if (ret < 0) { mlog_errno(ret); - goto out_commit; + goto end; } new_bh = sb_getblk(inode->i_sb, first_blkno); @@ -1957,7 +1875,7 @@ static int ocfs2_xattr_block_set(struct inode *inode, OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { mlog_errno(ret); - goto out_commit; + goto end; } /* Initialize ocfs2_xattr_block */ @@ -1978,17 +1896,10 @@ static int ocfs2_xattr_block_set(struct inode *inode, ret = ocfs2_journal_dirty(handle, new_bh); if (ret < 0) { mlog_errno(ret); - goto out_commit; + goto end; } di->i_xattr_loc = cpu_to_le64(first_blkno); - ret = ocfs2_journal_dirty(handle, xs->inode_bh); - if (ret < 0) - mlog_errno(ret); -out_commit: - ocfs2_commit_trans(osb, handle); -out: - if (ret < 0) - return ret; + ocfs2_journal_dirty(handle, xs->inode_bh); } else xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data; @@ -2057,10 +1968,11 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, struct ocfs2_xattr_search *xis, struct ocfs2_xattr_search *xbs, int *clusters_need, - int *meta_need) + int *meta_need, + int *credits_need) { int ret = 0, old_in_xb = 0; - int clusters_add = 0, meta_add = 0; + int clusters_add = 0, meta_add = 0, credits = 0; struct buffer_head *bh = NULL; struct ocfs2_xattr_block *xb = NULL; struct ocfs2_xattr_entry *xe = NULL; @@ -2071,16 +1983,15 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, xi->value_len); u64 value_size; - /* - * delete a xattr doesn't need metadata and cluster allocation. - * so return. - */ - if (!xi->value) - goto out; - if (xis->not_found && xbs->not_found) { - if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) + credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb); + + if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) { clusters_add += new_clusters; + credits += ocfs2_calc_extend_credits(inode->i_sb, + &def_xv.xv.xr_list, + new_clusters); + } goto meta_guess; } @@ -2090,6 +2001,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, name_offset = le16_to_cpu(xe->xe_name_offset); name_len = OCFS2_XATTR_SIZE(xe->xe_name_len); base = xis->base; + credits += OCFS2_INODE_UPDATE_CREDITS; } else { int i, block_off; xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data; @@ -2105,8 +2017,25 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, i, &block_off, &name_offset); base = bucket_block(xbs->bucket, block_off); - } else + credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb); + } else { base = xbs->base; + credits += OCFS2_XATTR_BLOCK_UPDATE_CREDITS; + } + } + + /* + * delete a xattr doesn't need metadata and cluster allocation. + * so just calculate the credits and return. + * + * The credits for removing the value tree will be extended + * by ocfs2_remove_extent itself. + */ + if (!xi->value) { + if (!ocfs2_xattr_is_local(xe)) + credits += OCFS2_REMOVE_EXTENT_CREDITS; + + goto out; } /* do cluster allocation guess first. */ @@ -2121,6 +2050,13 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, */ if (ocfs2_xattr_can_be_in_inode(inode, xi, xis)) { clusters_add += new_clusters; + credits += OCFS2_REMOVE_EXTENT_CREDITS + + OCFS2_INODE_UPDATE_CREDITS; + if (!ocfs2_xattr_is_local(xe)) + credits += ocfs2_calc_extend_credits( + inode->i_sb, + &def_xv.xv.xr_list, + new_clusters); goto out; } } @@ -2137,11 +2073,16 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, } else xv = &def_xv.xv; - if (old_clusters >= new_clusters) + if (old_clusters >= new_clusters) { + credits += OCFS2_REMOVE_EXTENT_CREDITS; goto out; - else { + } else { meta_add += ocfs2_extend_meta_needed(&xv->xr_list); clusters_add += new_clusters - old_clusters; + credits += ocfs2_calc_extend_credits(inode->i_sb, + &xv->xr_list, + new_clusters - + old_clusters); goto out; } } else { @@ -2177,6 +2118,8 @@ meta_guess: struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list; meta_add += ocfs2_extend_meta_needed(el); + credits += ocfs2_calc_extend_credits(inode->i_sb, + el, 1); } /* @@ -2187,16 +2130,23 @@ meta_guess: * also. */ clusters_add += 1; + credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb); if (OCFS2_XATTR_BUCKET_SIZE == - OCFS2_SB(inode->i_sb)->s_clustersize) + OCFS2_SB(inode->i_sb)->s_clustersize) { + credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb); clusters_add += 1; - } else + } + } else { meta_add += 1; + credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS; + } out: if (clusters_need) *clusters_need = clusters_add; if (meta_need) *meta_need = meta_add; + if (credits_need) + *credits_need = credits; brelse(bh); return ret; } @@ -2206,7 +2156,8 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xis, struct ocfs2_xattr_search *xbs, - struct ocfs2_xattr_set_ctxt *ctxt) + struct ocfs2_xattr_set_ctxt *ctxt, + int *credits) { int clusters_add, meta_add, ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); @@ -2216,14 +2167,14 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode, ocfs2_init_dealloc_ctxt(&ctxt->dealloc); ret = ocfs2_calc_xattr_set_need(inode, di, xi, xis, xbs, - &clusters_add, &meta_add); + &clusters_add, &meta_add, credits); if (ret) { mlog_errno(ret); return ret; } - mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d\n", - xi->name, meta_add, clusters_add); + mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, " + "credits = %d\n", xi->name, meta_add, clusters_add, *credits); if (meta_add) { ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, @@ -2254,6 +2205,126 @@ out: return ret; } +static int __ocfs2_xattr_set_handle(struct inode *inode, + struct ocfs2_dinode *di, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xis, + struct ocfs2_xattr_search *xbs, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int ret = 0, credits; + + if (!xi->value) { + /* Remove existing extended attribute */ + if (!xis->not_found) + ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt); + else if (!xbs->not_found) + ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt); + } else { + /* We always try to set extended attribute into inode first*/ + ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt); + if (!ret && !xbs->not_found) { + /* + * If succeed and that extended attribute existing in + * external block, then we will remove it. + */ + xi->value = NULL; + xi->value_len = 0; + + xis->not_found = -ENODATA; + ret = ocfs2_calc_xattr_set_need(inode, + di, + xi, + xis, + xbs, + NULL, + NULL, + &credits); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_extend_trans(ctxt->handle, credits + + ctxt->handle->h_buffer_credits); + if (ret) { + mlog_errno(ret); + goto out; + } + ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt); + } else if (ret == -ENOSPC) { + if (di->i_xattr_loc && !xbs->xattr_bh) { + ret = ocfs2_xattr_block_find(inode, + xi->name_index, + xi->name, xbs); + if (ret) + goto out; + + xis->not_found = -ENODATA; + ret = ocfs2_calc_xattr_set_need(inode, + di, + xi, + xis, + xbs, + NULL, + NULL, + &credits); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_extend_trans(ctxt->handle, credits + + ctxt->handle->h_buffer_credits); + if (ret) { + mlog_errno(ret); + goto out; + } + } + /* + * If no space in inode, we will set extended attribute + * into external block. + */ + ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt); + if (ret) + goto out; + if (!xis->not_found) { + /* + * If succeed and that extended attribute + * existing in inode, we will remove it. + */ + xi->value = NULL; + xi->value_len = 0; + xbs->not_found = -ENODATA; + ret = ocfs2_calc_xattr_set_need(inode, + di, + xi, + xis, + xbs, + NULL, + NULL, + &credits); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_extend_trans(ctxt->handle, credits + + ctxt->handle->h_buffer_credits); + if (ret) { + mlog_errno(ret); + goto out; + } + ret = ocfs2_xattr_ibody_set(inode, xi, + xis, ctxt); + } + } + } + +out: + return ret; +} + /* * ocfs2_xattr_set() * @@ -2270,8 +2341,9 @@ int ocfs2_xattr_set(struct inode *inode, { struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; - int ret; + int ret, credits; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct inode *tl_inode = osb->osb_tl_inode; struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, }; struct ocfs2_xattr_info xi = { @@ -2337,56 +2409,37 @@ int ocfs2_xattr_set(struct inode *inode, goto cleanup; } - ret = ocfs2_init_xattr_set_ctxt(inode, di, &xi, &xis, &xbs, &ctxt); + + mutex_lock(&tl_inode->i_mutex); + + if (ocfs2_truncate_log_needs_flush(osb)) { + ret = __ocfs2_flush_truncate_log(osb); + if (ret < 0) { + mutex_unlock(&tl_inode->i_mutex); + mlog_errno(ret); + goto cleanup; + } + } + mutex_unlock(&tl_inode->i_mutex); + + ret = ocfs2_init_xattr_set_ctxt(inode, di, &xi, &xis, + &xbs, &ctxt, &credits); if (ret) { mlog_errno(ret); goto cleanup; } - if (!value) { - /* Remove existing extended attribute */ - if (!xis.not_found) - ret = ocfs2_xattr_ibody_set(inode, &xi, &xis, &ctxt); - else if (!xbs.not_found) - ret = ocfs2_xattr_block_set(inode, &xi, &xbs, &ctxt); - } else { - /* We always try to set extended attribute into inode first*/ - ret = ocfs2_xattr_ibody_set(inode, &xi, &xis, &ctxt); - if (!ret && !xbs.not_found) { - /* - * If succeed and that extended attribute existing in - * external block, then we will remove it. - */ - xi.value = NULL; - xi.value_len = 0; - ret = ocfs2_xattr_block_set(inode, &xi, &xbs, &ctxt); - } else if (ret == -ENOSPC) { - if (di->i_xattr_loc && !xbs.xattr_bh) { - ret = ocfs2_xattr_block_find(inode, name_index, - name, &xbs); - if (ret) - goto cleanup; - } - /* - * If no space in inode, we will set extended attribute - * into external block. - */ - ret = ocfs2_xattr_block_set(inode, &xi, &xbs, &ctxt); - if (ret) - goto free; - if (!xis.not_found) { - /* - * If succeed and that extended attribute - * existing in inode, we will remove it. - */ - xi.value = NULL; - xi.value_len = 0; - ret = ocfs2_xattr_ibody_set(inode, &xi, - &xis, &ctxt); - } - } + ctxt.handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(ctxt.handle)) { + ret = PTR_ERR(ctxt.handle); + mlog_errno(ret); + goto cleanup; } -free: + + ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt); + + ocfs2_commit_trans(osb, ctxt.handle); + if (ctxt.data_ac) ocfs2_free_alloc_context(ctxt.data_ac); if (ctxt.meta_ac) @@ -2974,10 +3027,10 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt) { - int ret, credits = OCFS2_SUBALLOC_ALLOC; + int ret; u32 bit_off, len; u64 blkno; - handle_t *handle; + handle_t *handle = ctxt->handle; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(inode); struct buffer_head *xb_bh = xs->xattr_bh; @@ -2999,30 +3052,18 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, */ down_write(&oi->ip_alloc_sem); - /* - * We need more credits. One for the xattr block update and one - * for each block of the new xattr bucket. - */ - credits += 1 + ocfs2_blocks_per_xattr_bucket(inode->i_sb); - handle = ocfs2_start_trans(osb, credits); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - mlog_errno(ret); - goto out_sem; - } - ret = ocfs2_journal_access(handle, inode, xb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); - goto out_commit; + goto out; } ret = __ocfs2_claim_clusters(osb, handle, ctxt->data_ac, 1, 1, &bit_off, &len); if (ret) { mlog_errno(ret); - goto out_commit; + goto out; } /* @@ -3038,14 +3079,14 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, ret = ocfs2_init_xattr_bucket(xs->bucket, blkno); if (ret) { mlog_errno(ret); - goto out_commit; + goto out; } ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); - goto out_commit; + goto out; } ocfs2_cp_xattr_block_to_bucket(inode, xb_bh, xs->bucket); @@ -3070,16 +3111,9 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, xb->xb_flags = cpu_to_le16(xb_flags | OCFS2_XATTR_INDEXED); - ret = ocfs2_journal_dirty(handle, xb_bh); - if (ret) { - mlog_errno(ret); - goto out_commit; - } + ocfs2_journal_dirty(handle, xb_bh); -out_commit: - ocfs2_commit_trans(osb, handle); - -out_sem: +out: up_write(&oi->ip_alloc_sem); return ret; @@ -3105,6 +3139,7 @@ static int cmp_xe_offset(const void *a, const void *b) * so that we can spare some space for insertion. */ static int ocfs2_defrag_xattr_bucket(struct inode *inode, + handle_t *handle, struct ocfs2_xattr_bucket *bucket) { int ret, i; @@ -3114,7 +3149,6 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, u64 blkno = bucket_blkno(bucket); u16 xh_free_start; size_t blocksize = inode->i_sb->s_blocksize; - handle_t *handle; struct ocfs2_xattr_entry *xe; /* @@ -3133,19 +3167,11 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize) memcpy(buf, bucket_block(bucket, i), blocksize); - handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), bucket->bu_blocks); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - handle = NULL; - mlog_errno(ret); - goto out; - } - ret = ocfs2_xattr_bucket_journal_access(handle, bucket, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); - goto commit; + goto out; } xh = (struct ocfs2_xattr_header *)bucket_buf; @@ -3203,7 +3229,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, "bucket %llu\n", (unsigned long long)blkno); if (xh_free_start == end) - goto commit; + goto out; memset(bucket_buf + xh_free_start, 0, end - xh_free_start); xh->xh_free_start = cpu_to_le16(end); @@ -3218,8 +3244,6 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, memcpy(bucket_block(bucket, i), buf, blocksize); ocfs2_xattr_bucket_journal_dirty(handle, bucket); -commit: - ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); out: kfree(bucket_buf); return ret; @@ -3270,7 +3294,7 @@ static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode, * 1 more for the update of the 1st bucket of the previous * extent record. */ - credits = bpc / 2 + 1; + credits = bpc / 2 + 1 + handle->h_buffer_credits; ret = ocfs2_extend_trans(handle, credits); if (ret) { mlog_errno(ret); @@ -3662,7 +3686,7 @@ static int ocfs2_cp_xattr_cluster(struct inode *inode, * We need to update the new cluster and 1 more for the update of * the 1st bucket of the previous extent rec. */ - credits = bpc + 1; + credits = bpc + 1 + handle->h_buffer_credits; ret = ocfs2_extend_trans(handle, credits); if (ret) { mlog_errno(ret); @@ -3732,7 +3756,7 @@ static int ocfs2_divide_xattr_cluster(struct inode *inode, u32 *first_hash) { u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); - int ret, credits = 2 * blk_per_bucket; + int ret, credits = 2 * blk_per_bucket + handle->h_buffer_credits; BUG_ON(OCFS2_XATTR_BUCKET_SIZE < OCFS2_SB(inode->i_sb)->s_clustersize); @@ -3845,12 +3869,12 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, int *extend, struct ocfs2_xattr_set_ctxt *ctxt) { - int ret, credits; + int ret; u16 bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); u32 prev_clusters = *num_clusters; u32 clusters_to_add = 1, bit_off, num_bits, v_start = 0; u64 block; - handle_t *handle = NULL; + handle_t *handle = ctxt->handle; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_extent_tree et; @@ -3861,16 +3885,6 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, ocfs2_init_xattr_tree_extent_tree(&et, inode, root_bh); - credits = ocfs2_calc_extend_credits(osb->sb, et.et_root_el, - clusters_to_add); - handle = ocfs2_start_trans(osb, credits); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - handle = NULL; - mlog_errno(ret); - goto leave; - } - ret = ocfs2_journal_access(handle, inode, root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { @@ -3924,18 +3938,6 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, } } - if (handle->h_buffer_credits < credits) { - /* - * The journal has been restarted before, and don't - * have enough space for the insertion, so extend it - * here. - */ - ret = ocfs2_extend_trans(handle, credits); - if (ret) { - mlog_errno(ret); - goto leave; - } - } mlog(0, "Insert %u clusters at block %llu for xattr at %u\n", num_bits, (unsigned long long)block, v_start); ret = ocfs2_insert_extent(osb, handle, inode, &et, v_start, block, @@ -3946,15 +3948,10 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, } ret = ocfs2_journal_dirty(handle, root_bh); - if (ret < 0) { + if (ret < 0) mlog_errno(ret); - goto leave; - } leave: - if (handle) - ocfs2_commit_trans(osb, handle); - return ret; } @@ -3963,6 +3960,7 @@ leave: * We meet with start_bh. Only move half of the xattrs to the bucket after it. */ static int ocfs2_extend_xattr_bucket(struct inode *inode, + handle_t *handle, struct buffer_head *first_bh, struct buffer_head *start_bh, u32 num_clusters) @@ -3972,7 +3970,6 @@ static int ocfs2_extend_xattr_bucket(struct inode *inode, u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); u64 start_blk = start_bh->b_blocknr, end_blk; u32 num_buckets = num_clusters * ocfs2_xattr_buckets_per_cluster(osb); - handle_t *handle; struct ocfs2_xattr_header *first_xh = (struct ocfs2_xattr_header *)first_bh->b_data; u16 bucket = le16_to_cpu(first_xh->xh_num_buckets); @@ -3989,11 +3986,10 @@ static int ocfs2_extend_xattr_bucket(struct inode *inode, * We will touch all the buckets after the start_bh(include it). * Then we add one more bucket. */ - credits = end_blk - start_blk + 3 * blk_per_bucket + 1; - handle = ocfs2_start_trans(osb, credits); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - handle = NULL; + credits = end_blk - start_blk + 3 * blk_per_bucket + 1 + + handle->h_buffer_credits; + ret = ocfs2_extend_trans(handle, credits); + if (ret) { mlog_errno(ret); goto out; } @@ -4002,14 +3998,14 @@ static int ocfs2_extend_xattr_bucket(struct inode *inode, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); - goto commit; + goto out; } while (end_blk != start_blk) { ret = ocfs2_cp_xattr_bucket(inode, handle, end_blk, end_blk + blk_per_bucket, 0); if (ret) - goto commit; + goto out; end_blk -= blk_per_bucket; } @@ -4020,8 +4016,6 @@ static int ocfs2_extend_xattr_bucket(struct inode *inode, le16_add_cpu(&first_xh->xh_num_buckets, 1); ocfs2_journal_dirty(handle, first_bh); -commit: - ocfs2_commit_trans(osb, handle); out: return ret; } @@ -4099,6 +4093,7 @@ static int ocfs2_add_new_xattr_bucket(struct inode *inode, if (extend) ret = ocfs2_extend_xattr_bucket(inode, + ctxt->handle, first_bh, header_bh, num_clusters); @@ -4272,14 +4267,13 @@ set_new_name_value: * space for the xattr insertion. */ static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, + handle_t *handle, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, u32 name_hash, int local) { int ret; - handle_t *handle = NULL; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); u64 blkno; mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n", @@ -4296,14 +4290,6 @@ static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, } } - handle = ocfs2_start_trans(osb, xs->bucket->bu_blocks); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - handle = NULL; - mlog_errno(ret); - goto out; - } - ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { @@ -4315,32 +4301,22 @@ static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket); out: - ocfs2_commit_trans(osb, handle); - return ret; } static int ocfs2_xattr_value_update_size(struct inode *inode, + handle_t *handle, struct buffer_head *xe_bh, struct ocfs2_xattr_entry *xe, u64 new_size) { int ret; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - handle_t *handle = NULL; - - handle = ocfs2_start_trans(osb, 1); - if (IS_ERR(handle)) { - ret = -ENOMEM; - mlog_errno(ret); - goto out; - } ret = ocfs2_journal_access(handle, inode, xe_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); - goto out_commit; + goto out; } xe->xe_value_size = cpu_to_le64(new_size); @@ -4349,8 +4325,6 @@ static int ocfs2_xattr_value_update_size(struct inode *inode, if (ret < 0) mlog_errno(ret); -out_commit: - ocfs2_commit_trans(osb, handle); out: return ret; } @@ -4407,7 +4381,8 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, goto out; } - ret = ocfs2_xattr_value_update_size(inode, header_bh, xe, len); + ret = ocfs2_xattr_value_update_size(inode, ctxt->handle, + header_bh, xe, len); if (ret) { mlog_errno(ret); goto out; @@ -4439,6 +4414,7 @@ static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode, } static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode, + handle_t *handle, struct ocfs2_xattr_search *xs, char *val, int value_len) @@ -4454,7 +4430,8 @@ static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode, xv = (struct ocfs2_xattr_value_root *)(xs->base + offset); - return __ocfs2_xattr_set_value_outside(inode, xv, val, value_len); + return __ocfs2_xattr_set_value_outside(inode, handle, + xv, val, value_len); } static int ocfs2_rm_xattr_cluster(struct inode *inode, @@ -4547,27 +4524,19 @@ out: } static void ocfs2_xattr_bucket_remove_xs(struct inode *inode, + handle_t *handle, struct ocfs2_xattr_search *xs) { - handle_t *handle = NULL; struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket); struct ocfs2_xattr_entry *last = &xh->xh_entries[ le16_to_cpu(xh->xh_count) - 1]; int ret = 0; - handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), - ocfs2_blocks_per_xattr_bucket(inode->i_sb)); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - mlog_errno(ret); - return; - } - ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); - goto out_commit; + return; } /* Remove the old entry. */ @@ -4577,9 +4546,6 @@ static void ocfs2_xattr_bucket_remove_xs(struct inode *inode, le16_add_cpu(&xh->xh_count, -1); ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket); - -out_commit: - ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); } /* @@ -4645,7 +4611,8 @@ static int ocfs2_xattr_set_in_bucket(struct inode *inode, xi->value_len = OCFS2_XATTR_ROOT_SIZE; } - ret = ocfs2_xattr_set_entry_in_bucket(inode, xi, xs, name_hash, local); + ret = ocfs2_xattr_set_entry_in_bucket(inode, ctxt->handle, xi, xs, + name_hash, local); if (ret) { mlog_errno(ret); goto out; @@ -4666,13 +4633,14 @@ static int ocfs2_xattr_set_in_bucket(struct inode *inode, * storage and we have allocated xattr already, * so need to remove it. */ - ocfs2_xattr_bucket_remove_xs(inode, xs); + ocfs2_xattr_bucket_remove_xs(inode, ctxt->handle, xs); } goto out; } set_value_outside: - ret = ocfs2_xattr_bucket_set_value_outside(inode, xs, val, value_len); + ret = ocfs2_xattr_bucket_set_value_outside(inode, ctxt->handle, + xs, val, value_len); out: return ret; } @@ -4785,7 +4753,8 @@ try_again: * name/value will be moved, the xe shouldn't be changed * in xs. */ - ret = ocfs2_defrag_xattr_bucket(inode, xs->bucket); + ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle, + xs->bucket); if (ret) { mlog_errno(ret); goto out; @@ -4865,6 +4834,13 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, ocfs2_init_dealloc_ctxt(&ctxt.dealloc); + ctxt.handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS); + if (IS_ERR(ctxt.handle)) { + ret = PTR_ERR(ctxt.handle); + mlog_errno(ret); + goto out; + } + for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { xe = &xh->xh_entries[i]; if (ocfs2_xattr_is_local(xe)) @@ -4879,9 +4855,10 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, } } + ret = ocfs2_commit_trans(osb, ctxt.handle); ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &ctxt.dealloc); - +out: return ret; } -- cgit From fecc01126d7a244b7e9b563c80663ffdca35343b Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Wed, 12 Nov 2008 15:16:38 -0800 Subject: ocfs2: turn __ocfs2_remove_inode_range() into ocfs2_remove_btree_range() This patch genericizes the high level handling of extent removal. ocfs2_remove_btree_range() is nearly identical to __ocfs2_remove_inode_range(), except that extent tree operations have been used where necessary. We update ocfs2_remove_inode_range() to use the generic helper. Now extent tree based structures have an easy way to truncate ranges. Signed-off-by: Mark Fasheh Acked-by: Joel Becker --- fs/ocfs2/alloc.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++ fs/ocfs2/alloc.h | 5 ++++ fs/ocfs2/file.c | 85 ++++---------------------------------------------------- 3 files changed, 82 insertions(+), 80 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 4614614084dd..5592a2f6335b 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -5255,6 +5255,78 @@ out: return ret; } +int ocfs2_remove_btree_range(struct inode *inode, + struct ocfs2_extent_tree *et, + u32 cpos, u32 phys_cpos, u32 len, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret; + u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct inode *tl_inode = osb->osb_tl_inode; + handle_t *handle; + struct ocfs2_alloc_context *meta_ac = NULL; + + ret = ocfs2_lock_allocators(inode, et, 0, 1, NULL, &meta_ac); + if (ret) { + mlog_errno(ret); + return ret; + } + + mutex_lock(&tl_inode->i_mutex); + + if (ocfs2_truncate_log_needs_flush(osb)) { + ret = __ocfs2_flush_truncate_log(osb); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + } + + handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, et->et_root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_remove_extent(inode, et, cpos, len, handle, meta_ac, + dealloc); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ocfs2_et_update_clusters(inode, et, -len); + + ret = ocfs2_journal_dirty(handle, et->et_root_bh); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_truncate_log_append(osb, handle, phys_blkno, len); + if (ret) + mlog_errno(ret); + +out_commit: + ocfs2_commit_trans(osb, handle); +out: + mutex_unlock(&tl_inode->i_mutex); + + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + + return ret; +} + int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb) { struct buffer_head *tl_bh = osb->osb_tl_bh; diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 3eb735eedae6..0fbf8fc55a49 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h @@ -110,6 +110,11 @@ int ocfs2_remove_extent(struct inode *inode, u32 cpos, u32 len, handle_t *handle, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc); +int ocfs2_remove_btree_range(struct inode *inode, + struct ocfs2_extent_tree *et, + u32 cpos, u32 phys_cpos, u32 len, + struct ocfs2_cached_dealloc_ctxt *dealloc); + int ocfs2_num_free_extents(struct ocfs2_super *osb, struct inode *inode, struct ocfs2_extent_tree *et); diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index e2570a3bc2b2..360549161e20 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1226,83 +1226,6 @@ out: return ret; } -static int __ocfs2_remove_inode_range(struct inode *inode, - struct buffer_head *di_bh, - u32 cpos, u32 phys_cpos, u32 len, - struct ocfs2_cached_dealloc_ctxt *dealloc) -{ - int ret; - u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct inode *tl_inode = osb->osb_tl_inode; - handle_t *handle; - struct ocfs2_alloc_context *meta_ac = NULL; - struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; - struct ocfs2_extent_tree et; - - ocfs2_init_dinode_extent_tree(&et, inode, di_bh); - - ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac); - if (ret) { - mlog_errno(ret); - return ret; - } - - mutex_lock(&tl_inode->i_mutex); - - if (ocfs2_truncate_log_needs_flush(osb)) { - ret = __ocfs2_flush_truncate_log(osb); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - } - - handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - mlog_errno(ret); - goto out; - } - - ret = ocfs2_journal_access(handle, inode, di_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - mlog_errno(ret); - goto out; - } - - ret = ocfs2_remove_extent(inode, &et, cpos, len, handle, meta_ac, - dealloc); - if (ret) { - mlog_errno(ret); - goto out_commit; - } - - OCFS2_I(inode)->ip_clusters -= len; - di->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters); - - ret = ocfs2_journal_dirty(handle, di_bh); - if (ret) { - mlog_errno(ret); - goto out_commit; - } - - ret = ocfs2_truncate_log_append(osb, handle, phys_blkno, len); - if (ret) - mlog_errno(ret); - -out_commit: - ocfs2_commit_trans(osb, handle); -out: - mutex_unlock(&tl_inode->i_mutex); - - if (meta_ac) - ocfs2_free_alloc_context(meta_ac); - - return ret; -} - /* * Truncate a byte range, avoiding pages within partial clusters. This * preserves those pages for the zeroing code to write to. @@ -1402,7 +1325,9 @@ static int ocfs2_remove_inode_range(struct inode *inode, struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_cached_dealloc_ctxt dealloc; struct address_space *mapping = inode->i_mapping; + struct ocfs2_extent_tree et; + ocfs2_init_dinode_extent_tree(&et, inode, di_bh); ocfs2_init_dealloc_ctxt(&dealloc); if (byte_len == 0) @@ -1458,9 +1383,9 @@ static int ocfs2_remove_inode_range(struct inode *inode, /* Only do work for non-holes */ if (phys_cpos != 0) { - ret = __ocfs2_remove_inode_range(inode, di_bh, cpos, - phys_cpos, alloc_size, - &dealloc); + ret = ocfs2_remove_btree_range(inode, &et, cpos, + phys_cpos, alloc_size, + &dealloc); if (ret) { mlog_errno(ret); goto out; -- cgit From f5d362022a947e84b0a3dd656d09c6b2322e234f Mon Sep 17 00:00:00 2001 From: Tiger Yang Date: Fri, 14 Nov 2008 11:15:44 +0800 Subject: ocfs2: move new inode allocation out of the transaction Move out inode allocation from ocfs2_mknod_locked() because vfs_dq_init() must be called outside of a transaction. Signed-off-by: Jan Kara Signed-off-by: Tiger Yang Signed-off-by: Mark Fasheh --- fs/ocfs2/namei.c | 108 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 64 insertions(+), 44 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 2545e7402efe..e8ff0bae179d 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -66,12 +66,12 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, struct inode *dir, - struct dentry *dentry, int mode, + struct inode *inode, + struct dentry *dentry, dev_t dev, struct buffer_head **new_fe_bh, struct buffer_head *parent_fe_bh, handle_t *handle, - struct inode **ret_inode, struct ocfs2_alloc_context *inode_ac); static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, @@ -186,6 +186,34 @@ bail: return ret; } +static struct inode *ocfs2_get_init_inode(struct inode *dir, int mode) +{ + struct inode *inode; + + inode = new_inode(dir->i_sb); + if (!inode) { + mlog(ML_ERROR, "new_inode failed!\n"); + return NULL; + } + + /* populate as many fields early on as possible - many of + * these are used by the support functions here and in + * callers. */ + if (S_ISDIR(mode)) + inode->i_nlink = 2; + else + inode->i_nlink = 1; + inode->i_uid = current_fsuid(); + if (dir->i_mode & S_ISGID) { + inode->i_gid = dir->i_gid; + if (S_ISDIR(mode)) + mode |= S_ISGID; + } else + inode->i_gid = current_fsgid(); + inode->i_mode = mode; + return inode; +} + static int ocfs2_mknod(struct inode *dir, struct dentry *dentry, int mode, @@ -250,6 +278,13 @@ static int ocfs2_mknod(struct inode *dir, goto leave; } + inode = ocfs2_get_init_inode(dir, mode); + if (!inode) { + status = -ENOMEM; + mlog_errno(status); + goto leave; + } + /* Reserve a cluster if creating an extent based directory. */ if (S_ISDIR(mode) && !ocfs2_supports_inline_data(osb)) { status = ocfs2_reserve_clusters(osb, 1, &data_ac); @@ -269,9 +304,9 @@ static int ocfs2_mknod(struct inode *dir, } /* do the real work now. */ - status = ocfs2_mknod_locked(osb, dir, dentry, mode, dev, + status = ocfs2_mknod_locked(osb, dir, inode, dentry, dev, &new_fe_bh, parent_fe_bh, handle, - &inode, inode_ac); + inode_ac); if (status < 0) { mlog_errno(status); goto leave; @@ -332,8 +367,10 @@ leave: brelse(de_bh); brelse(parent_fe_bh); - if ((status < 0) && inode) + if ((status < 0) && inode) { + clear_nlink(inode); iput(inode); + } if (inode_ac) ocfs2_free_alloc_context(inode_ac); @@ -348,12 +385,12 @@ leave: static int ocfs2_mknod_locked(struct ocfs2_super *osb, struct inode *dir, - struct dentry *dentry, int mode, + struct inode *inode, + struct dentry *dentry, dev_t dev, struct buffer_head **new_fe_bh, struct buffer_head *parent_fe_bh, handle_t *handle, - struct inode **ret_inode, struct ocfs2_alloc_context *inode_ac) { int status = 0; @@ -361,14 +398,12 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, struct ocfs2_extent_list *fel; u64 fe_blkno = 0; u16 suballoc_bit; - struct inode *inode = NULL; - mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode, - (unsigned long)dev, dentry->d_name.len, + mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, + inode->i_mode, (unsigned long)dev, dentry->d_name.len, dentry->d_name.name); *new_fe_bh = NULL; - *ret_inode = NULL; status = ocfs2_claim_new_inode(osb, handle, inode_ac, &suballoc_bit, &fe_blkno); @@ -377,23 +412,11 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, goto leave; } - inode = new_inode(dir->i_sb); - if (!inode) { - status = -ENOMEM; - mlog(ML_ERROR, "new_inode failed!\n"); - goto leave; - } - /* populate as many fields early on as possible - many of * these are used by the support functions here and in * callers. */ inode->i_ino = ino_from_blkno(osb->sb, fe_blkno); OCFS2_I(inode)->ip_blkno = fe_blkno; - if (S_ISDIR(mode)) - inode->i_nlink = 2; - else - inode->i_nlink = 1; - inode->i_mode = mode; spin_lock(&osb->osb_lock); inode->i_generation = osb->s_next_generation++; spin_unlock(&osb->osb_lock); @@ -421,17 +444,11 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, fe->i_blkno = cpu_to_le64(fe_blkno); fe->i_suballoc_bit = cpu_to_le16(suballoc_bit); fe->i_suballoc_slot = cpu_to_le16(inode_ac->ac_alloc_slot); - fe->i_uid = cpu_to_le32(current_fsuid()); - if (dir->i_mode & S_ISGID) { - fe->i_gid = cpu_to_le32(dir->i_gid); - if (S_ISDIR(mode)) - mode |= S_ISGID; - } else - fe->i_gid = cpu_to_le32(current_fsgid()); - fe->i_mode = cpu_to_le16(mode); - if (S_ISCHR(mode) || S_ISBLK(mode)) + fe->i_uid = cpu_to_le32(inode->i_uid); + fe->i_gid = cpu_to_le32(inode->i_gid); + fe->i_mode = cpu_to_le16(inode->i_mode); + if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) fe->id1.dev1.i_rdev = cpu_to_le64(huge_encode_dev(dev)); - fe->i_links_count = cpu_to_le16(inode->i_nlink); fe->i_last_eb_blk = 0; @@ -446,7 +463,7 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, /* * If supported, directories start with inline data. */ - if (S_ISDIR(mode) && ocfs2_supports_inline_data(osb)) { + if (S_ISDIR(inode->i_mode) && ocfs2_supports_inline_data(osb)) { u16 feat = le16_to_cpu(fe->i_dyn_features); fe->i_dyn_features = cpu_to_le16(feat | OCFS2_INLINE_DATA_FL); @@ -484,17 +501,12 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, status = 0; /* error in ocfs2_create_new_inode_locks is not * critical */ - *ret_inode = inode; leave: if (status < 0) { if (*new_fe_bh) { brelse(*new_fe_bh); *new_fe_bh = NULL; } - if (inode) { - clear_nlink(inode); - iput(inode); - } } mlog_exit(status); @@ -1542,6 +1554,13 @@ static int ocfs2_symlink(struct inode *dir, goto bail; } + inode = ocfs2_get_init_inode(dir, S_IFLNK | S_IRWXUGO); + if (!inode) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + /* don't reserve bitmap space for fast symlinks. */ if (l > ocfs2_fast_symlink_chars(sb)) { status = ocfs2_reserve_clusters(osb, 1, &data_ac); @@ -1560,10 +1579,9 @@ static int ocfs2_symlink(struct inode *dir, goto bail; } - status = ocfs2_mknod_locked(osb, dir, dentry, - S_IFLNK | S_IRWXUGO, 0, - &new_fe_bh, parent_fe_bh, handle, - &inode, inode_ac); + status = ocfs2_mknod_locked(osb, dir, inode, dentry, + 0, &new_fe_bh, parent_fe_bh, handle, + inode_ac); if (status < 0) { mlog_errno(status); goto bail; @@ -1644,8 +1662,10 @@ bail: ocfs2_free_alloc_context(inode_ac); if (data_ac) ocfs2_free_alloc_context(data_ac); - if ((status < 0) && inode) + if ((status < 0) && inode) { + clear_nlink(inode); iput(inode); + } mlog_exit(status); -- cgit From 6c3faba4421e230d77a181c260972229c542dec9 Mon Sep 17 00:00:00 2001 From: Tiger Yang Date: Fri, 14 Nov 2008 11:16:03 +0800 Subject: ocfs2: add ocfs2_xattr_set_handle This function is used to set xattr's in a started transaction. It is only called during inode creation inode for initial security/acl xattrs of the new inode. These xattrs could be put into ibody or extent block, so xattr bucket would not be use in this case. Signed-off-by: Tiger Yang Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ocfs2/xattr.h | 4 ++++ 2 files changed, 72 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 7a9089255a87..6480254fe396 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -2325,6 +2325,74 @@ out: return ret; } +/* + * This function only called duing creating inode + * for init security/acl xattrs of the new inode. + * The xattrs could be put into ibody or extent block, + * xattr bucket would not be use in this case. + * transanction credits also be reserved in here. + */ +int ocfs2_xattr_set_handle(handle_t *handle, + struct inode *inode, + struct buffer_head *di_bh, + int name_index, + const char *name, + const void *value, + size_t value_len, + int flags, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_alloc_context *data_ac) +{ + struct ocfs2_dinode *di; + int ret; + + struct ocfs2_xattr_info xi = { + .name_index = name_index, + .name = name, + .value = value, + .value_len = value_len, + }; + + struct ocfs2_xattr_search xis = { + .not_found = -ENODATA, + }; + + struct ocfs2_xattr_search xbs = { + .not_found = -ENODATA, + }; + + struct ocfs2_xattr_set_ctxt ctxt = { + .handle = handle, + .meta_ac = meta_ac, + .data_ac = data_ac, + }; + + if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb))) + return -EOPNOTSUPP; + + xis.inode_bh = xbs.inode_bh = di_bh; + di = (struct ocfs2_dinode *)di_bh->b_data; + + down_write(&OCFS2_I(inode)->ip_xattr_sem); + + ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis); + if (ret) + goto cleanup; + if (xis.not_found) { + ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs); + if (ret) + goto cleanup; + } + + ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt); + +cleanup: + up_write(&OCFS2_I(inode)->ip_xattr_sem); + brelse(xbs.xattr_bh); + + return ret; +} + /* * ocfs2_xattr_set() * diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h index 1d8314c7656d..8fbdc163c839 100644 --- a/fs/ocfs2/xattr.h +++ b/fs/ocfs2/xattr.h @@ -37,6 +37,10 @@ extern struct xattr_handler *ocfs2_xattr_handlers[]; ssize_t ocfs2_listxattr(struct dentry *, char *, size_t); int ocfs2_xattr_set(struct inode *, int, const char *, const void *, size_t, int); +int ocfs2_xattr_set_handle(handle_t *, struct inode *, struct buffer_head *, + int, const char *, const void *, size_t, int, + struct ocfs2_alloc_context *, + struct ocfs2_alloc_context *); int ocfs2_xattr_remove(struct inode *, struct buffer_head *); #endif /* OCFS2_XATTR_H */ -- cgit From 923f7f3102b80403152e05aee3d55ecfce240440 Mon Sep 17 00:00:00 2001 From: Tiger Yang Date: Fri, 14 Nov 2008 11:16:27 +0800 Subject: ocfs2: add security xattr API This patch add security xattr set/get/list APIs to support security attributes in Ocfs2. Signed-off-by: Tiger Yang Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ fs/ocfs2/xattr.h | 1 + 2 files changed, 48 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 6480254fe396..db03162914cc 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -35,6 +35,7 @@ #include #include #include +#include #define MLOG_MASK_PREFIX ML_XATTR #include @@ -88,12 +89,14 @@ static struct ocfs2_xattr_def_value_root def_xv = { struct xattr_handler *ocfs2_xattr_handlers[] = { &ocfs2_xattr_user_handler, &ocfs2_xattr_trusted_handler, + &ocfs2_xattr_security_handler, NULL }; static struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = { [OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler, [OCFS2_XATTR_INDEX_TRUSTED] = &ocfs2_xattr_trusted_handler, + [OCFS2_XATTR_INDEX_SECURITY] = &ocfs2_xattr_security_handler, }; struct ocfs2_xattr_info { @@ -4976,6 +4979,50 @@ out: return ret; } +/* + * 'security' attributes support + */ +static size_t ocfs2_xattr_security_list(struct inode *inode, char *list, + size_t list_size, const char *name, + size_t name_len) +{ + const size_t prefix_len = XATTR_SECURITY_PREFIX_LEN; + const size_t total_len = prefix_len + name_len + 1; + + if (list && total_len <= list_size) { + memcpy(list, XATTR_SECURITY_PREFIX, prefix_len); + memcpy(list + prefix_len, name, name_len); + list[prefix_len + name_len] = '\0'; + } + return total_len; +} + +static int ocfs2_xattr_security_get(struct inode *inode, const char *name, + void *buffer, size_t size) +{ + if (strcmp(name, "") == 0) + return -EINVAL; + return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_SECURITY, name, + buffer, size); +} + +static int ocfs2_xattr_security_set(struct inode *inode, const char *name, + const void *value, size_t size, int flags) +{ + if (strcmp(name, "") == 0) + return -EINVAL; + + return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY, name, value, + size, flags); +} + +struct xattr_handler ocfs2_xattr_security_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .list = ocfs2_xattr_security_list, + .get = ocfs2_xattr_security_get, + .set = ocfs2_xattr_security_set, +}; + /* * 'trusted' attributes support */ diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h index 8fbdc163c839..55c5256ff563 100644 --- a/fs/ocfs2/xattr.h +++ b/fs/ocfs2/xattr.h @@ -32,6 +32,7 @@ enum ocfs2_xattr_type { extern struct xattr_handler ocfs2_xattr_user_handler; extern struct xattr_handler ocfs2_xattr_trusted_handler; +extern struct xattr_handler ocfs2_xattr_security_handler; extern struct xattr_handler *ocfs2_xattr_handlers[]; ssize_t ocfs2_listxattr(struct dentry *, char *, size_t); -- cgit From 534eadddc1de8754a227202c0e747af4973f82ce Mon Sep 17 00:00:00 2001 From: Tiger Yang Date: Fri, 14 Nov 2008 11:16:41 +0800 Subject: ocfs2: add ocfs2_init_security in during file create Security attributes must be set when creating a new inode. We do this in three steps. - First, get security xattr's name and value by security_operation - Calculate and reserve the meta data and clusters needed by this security xattr before starting transaction - Finally, we set it before add_entry Signed-off-by: Tiger Yang Signed-off-by: Mark Fasheh --- fs/ocfs2/namei.c | 107 ++++++++++++++++++++++++++++++++++++++++++++++++------- fs/ocfs2/xattr.c | 70 ++++++++++++++++++++++++++++++++++++ fs/ocfs2/xattr.h | 17 +++++++++ 3 files changed, 182 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index e8ff0bae179d..40da46b907fb 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -229,6 +229,12 @@ static int ocfs2_mknod(struct inode *dir, struct inode *inode = NULL; struct ocfs2_alloc_context *inode_ac = NULL; struct ocfs2_alloc_context *data_ac = NULL; + struct ocfs2_alloc_context *xattr_ac = NULL; + int want_clusters = 0; + int xattr_credits = 0; + struct ocfs2_security_xattr_info si = { + .enable = 1, + }; mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode, (unsigned long)dev, dentry->d_name.len, @@ -285,17 +291,39 @@ static int ocfs2_mknod(struct inode *dir, goto leave; } - /* Reserve a cluster if creating an extent based directory. */ - if (S_ISDIR(mode) && !ocfs2_supports_inline_data(osb)) { - status = ocfs2_reserve_clusters(osb, 1, &data_ac); + /* get security xattr */ + status = ocfs2_init_security_get(inode, dir, &si); + if (status) { + if (status == -EOPNOTSUPP) + si.enable = 0; + else { + mlog_errno(status); + goto leave; + } + } + + /* calculate meta data/clusters for setting security xattr */ + if (si.enable) { + status = ocfs2_calc_security_init(dir, &si, &want_clusters, + &xattr_credits, &xattr_ac); if (status < 0) { - if (status != -ENOSPC) - mlog_errno(status); + mlog_errno(status); goto leave; } } - handle = ocfs2_start_trans(osb, OCFS2_MKNOD_CREDITS); + /* Reserve a cluster if creating an extent based directory. */ + if (S_ISDIR(mode) && !ocfs2_supports_inline_data(osb)) + want_clusters += 1; + + status = ocfs2_reserve_clusters(osb, want_clusters, &data_ac); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto leave; + } + + handle = ocfs2_start_trans(osb, OCFS2_MKNOD_CREDITS + xattr_credits); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -335,6 +363,15 @@ static int ocfs2_mknod(struct inode *dir, inc_nlink(dir); } + if (si.enable) { + status = ocfs2_init_security_set(handle, inode, new_fe_bh, &si, + xattr_ac, data_ac); + if (status < 0) { + mlog_errno(status); + goto leave; + } + } + status = ocfs2_add_entry(handle, dentry, inode, OCFS2_I(inode)->ip_blkno, parent_fe_bh, de_bh); @@ -366,6 +403,8 @@ leave: brelse(new_fe_bh); brelse(de_bh); brelse(parent_fe_bh); + kfree(si.name); + kfree(si.value); if ((status < 0) && inode) { clear_nlink(inode); @@ -378,6 +417,9 @@ leave: if (data_ac) ocfs2_free_alloc_context(data_ac); + if (xattr_ac) + ocfs2_free_alloc_context(xattr_ac); + mlog_exit(status); return status; @@ -1508,6 +1550,12 @@ static int ocfs2_symlink(struct inode *dir, handle_t *handle = NULL; struct ocfs2_alloc_context *inode_ac = NULL; struct ocfs2_alloc_context *data_ac = NULL; + struct ocfs2_alloc_context *xattr_ac = NULL; + int want_clusters = 0; + int xattr_credits = 0; + struct ocfs2_security_xattr_info si = { + .enable = 1, + }; mlog_entry("(0x%p, 0x%p, symname='%s' actual='%.*s')\n", dir, dentry, symname, dentry->d_name.len, dentry->d_name.name); @@ -1561,17 +1609,39 @@ static int ocfs2_symlink(struct inode *dir, goto bail; } - /* don't reserve bitmap space for fast symlinks. */ - if (l > ocfs2_fast_symlink_chars(sb)) { - status = ocfs2_reserve_clusters(osb, 1, &data_ac); + /* get security xattr */ + status = ocfs2_init_security_get(inode, dir, &si); + if (status) { + if (status == -EOPNOTSUPP) + si.enable = 0; + else { + mlog_errno(status); + goto bail; + } + } + + /* calculate meta data/clusters for setting security xattr */ + if (si.enable) { + status = ocfs2_calc_security_init(dir, &si, &want_clusters, + &xattr_credits, &xattr_ac); if (status < 0) { - if (status != -ENOSPC) - mlog_errno(status); + mlog_errno(status); goto bail; } } - handle = ocfs2_start_trans(osb, credits); + /* don't reserve bitmap space for fast symlinks. */ + if (l > ocfs2_fast_symlink_chars(sb)) + want_clusters += 1; + + status = ocfs2_reserve_clusters(osb, want_clusters, &data_ac); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + + handle = ocfs2_start_trans(osb, credits + xattr_credits); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -1632,6 +1702,15 @@ static int ocfs2_symlink(struct inode *dir, } } + if (si.enable) { + status = ocfs2_init_security_set(handle, inode, new_fe_bh, &si, + xattr_ac, data_ac); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + status = ocfs2_add_entry(handle, dentry, inode, le64_to_cpu(fe->i_blkno), parent_fe_bh, de_bh); @@ -1658,10 +1737,14 @@ bail: brelse(new_fe_bh); brelse(parent_fe_bh); brelse(de_bh); + kfree(si.name); + kfree(si.value); if (inode_ac) ocfs2_free_alloc_context(inode_ac); if (data_ac) ocfs2_free_alloc_context(data_ac); + if (xattr_ac) + ocfs2_free_alloc_context(xattr_ac); if ((status < 0) && inode) { clear_nlink(inode); iput(inode); diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index db03162914cc..2cab0d6615f9 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -81,6 +81,9 @@ struct ocfs2_xattr_set_ctxt { #define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root)) #define OCFS2_XATTR_INLINE_SIZE 80 +#define OCFS2_XATTR_FREE_IN_IBODY (OCFS2_MIN_XATTR_INLINE_SIZE \ + - sizeof(struct ocfs2_xattr_header) \ + - sizeof(__u32)) static struct ocfs2_xattr_def_value_root def_xv = { .xv.xr_list.l_count = cpu_to_le16(1), @@ -343,6 +346,52 @@ static void ocfs2_xattr_hash_entry(struct inode *inode, return; } +static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len) +{ + int size = 0; + + if (value_len <= OCFS2_XATTR_INLINE_SIZE) + size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len); + else + size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; + size += sizeof(struct ocfs2_xattr_entry); + + return size; +} + +int ocfs2_calc_security_init(struct inode *dir, + struct ocfs2_security_xattr_info *si, + int *want_clusters, + int *xattr_credits, + struct ocfs2_alloc_context **xattr_ac) +{ + int ret = 0; + struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); + int s_size = ocfs2_xattr_entry_real_size(strlen(si->name), + si->value_len); + + /* + * The max space of security xattr taken inline is + * 256(name) + 80(value) + 16(entry) = 352 bytes, + * So reserve one metadata block for it is ok. + */ + if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE || + s_size > OCFS2_XATTR_FREE_IN_IBODY) { + ret = ocfs2_reserve_new_metadata_blocks(osb, 1, xattr_ac); + if (ret) { + mlog_errno(ret); + return ret; + } + *xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS; + } + + /* reserve clusters for xattr value which will be set in B tree*/ + if (si->value_len > OCFS2_XATTR_INLINE_SIZE) + *want_clusters += ocfs2_clusters_for_bytes(dir->i_sb, + si->value_len); + return ret; +} + static int ocfs2_xattr_extend_allocation(struct inode *inode, u32 clusters_to_add, struct buffer_head *xattr_bh, @@ -5016,6 +5065,27 @@ static int ocfs2_xattr_security_set(struct inode *inode, const char *name, size, flags); } +int ocfs2_init_security_get(struct inode *inode, + struct inode *dir, + struct ocfs2_security_xattr_info *si) +{ + return security_inode_init_security(inode, dir, &si->name, &si->value, + &si->value_len); +} + +int ocfs2_init_security_set(handle_t *handle, + struct inode *inode, + struct buffer_head *di_bh, + struct ocfs2_security_xattr_info *si, + struct ocfs2_alloc_context *xattr_ac, + struct ocfs2_alloc_context *data_ac) +{ + return ocfs2_xattr_set_handle(handle, inode, di_bh, + OCFS2_XATTR_INDEX_SECURITY, + si->name, si->value, si->value_len, 0, + xattr_ac, data_ac); +} + struct xattr_handler ocfs2_xattr_security_handler = { .prefix = XATTR_SECURITY_PREFIX, .list = ocfs2_xattr_security_list, diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h index 55c5256ff563..188ef6ba6836 100644 --- a/fs/ocfs2/xattr.h +++ b/fs/ocfs2/xattr.h @@ -30,6 +30,13 @@ enum ocfs2_xattr_type { OCFS2_XATTR_MAX }; +struct ocfs2_security_xattr_info { + int enable; + char *name; + void *value; + size_t value_len; +}; + extern struct xattr_handler ocfs2_xattr_user_handler; extern struct xattr_handler ocfs2_xattr_trusted_handler; extern struct xattr_handler ocfs2_xattr_security_handler; @@ -43,5 +50,15 @@ int ocfs2_xattr_set_handle(handle_t *, struct inode *, struct buffer_head *, struct ocfs2_alloc_context *, struct ocfs2_alloc_context *); int ocfs2_xattr_remove(struct inode *, struct buffer_head *); +int ocfs2_init_security_get(struct inode *, struct inode *, + struct ocfs2_security_xattr_info *); +int ocfs2_init_security_set(handle_t *, struct inode *, + struct buffer_head *, + struct ocfs2_security_xattr_info *, + struct ocfs2_alloc_context *, + struct ocfs2_alloc_context *); +int ocfs2_calc_security_init(struct inode *, + struct ocfs2_security_xattr_info *, + int *, int *, struct ocfs2_alloc_context **); #endif /* OCFS2_XATTR_H */ -- cgit From 4e3e9d027f63488e676bf7700ec515a192e54f69 Mon Sep 17 00:00:00 2001 From: Tiger Yang Date: Fri, 14 Nov 2008 11:16:53 +0800 Subject: ocfs2: add ocfs2_xattr_get_nolock This function does the work of ocfs2_xattr_get under an open lock. Signed-off-by: Tiger Yang Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 40 ++++++++++++++++++++++++++++------------ fs/ocfs2/xattr.h | 2 ++ 2 files changed, 30 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 2cab0d6615f9..ba9b870a5dda 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -925,12 +925,8 @@ cleanup: return ret; } -/* ocfs2_xattr_get() - * - * Copy an extended attribute into the buffer provided. - * Buffer is NULL to compute the size of buffer required. - */ -static int ocfs2_xattr_get(struct inode *inode, +int ocfs2_xattr_get_nolock(struct inode *inode, + struct buffer_head *di_bh, int name_index, const char *name, void *buffer, @@ -938,7 +934,6 @@ static int ocfs2_xattr_get(struct inode *inode, { int ret; struct ocfs2_dinode *di = NULL; - struct buffer_head *di_bh = NULL; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_xattr_search xis = { .not_found = -ENODATA, @@ -953,11 +948,6 @@ static int ocfs2_xattr_get(struct inode *inode, if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) ret = -ENODATA; - ret = ocfs2_inode_lock(inode, &di_bh, 0); - if (ret < 0) { - mlog_errno(ret); - return ret; - } xis.inode_bh = xbs.inode_bh = di_bh; di = (struct ocfs2_dinode *)di_bh->b_data; @@ -968,6 +958,32 @@ static int ocfs2_xattr_get(struct inode *inode, ret = ocfs2_xattr_block_get(inode, name_index, name, buffer, buffer_size, &xbs); up_read(&oi->ip_xattr_sem); + + return ret; +} + +/* ocfs2_xattr_get() + * + * Copy an extended attribute into the buffer provided. + * Buffer is NULL to compute the size of buffer required. + */ +static int ocfs2_xattr_get(struct inode *inode, + int name_index, + const char *name, + void *buffer, + size_t buffer_size) +{ + int ret; + struct buffer_head *di_bh = NULL; + + ret = ocfs2_inode_lock(inode, &di_bh, 0); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, + name, buffer, buffer_size); + ocfs2_inode_unlock(inode, 0); brelse(di_bh); diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h index 188ef6ba6836..86aa10ffe3f3 100644 --- a/fs/ocfs2/xattr.h +++ b/fs/ocfs2/xattr.h @@ -43,6 +43,8 @@ extern struct xattr_handler ocfs2_xattr_security_handler; extern struct xattr_handler *ocfs2_xattr_handlers[]; ssize_t ocfs2_listxattr(struct dentry *, char *, size_t); +int ocfs2_xattr_get_nolock(struct inode *, struct buffer_head *, int, + const char *, void *, size_t); int ocfs2_xattr_set(struct inode *, int, const char *, const void *, size_t, int); int ocfs2_xattr_set_handle(handle_t *, struct inode *, struct buffer_head *, -- cgit From 929fb014e041c6572c5e8c3686f1e32742b5b953 Mon Sep 17 00:00:00 2001 From: Tiger Yang Date: Fri, 14 Nov 2008 11:17:04 +0800 Subject: ocfs2: add POSIX ACL API This patch adds POSIX ACL(access control lists) APIs in ocfs2. We convert struct posix_acl to many ocfs2_acl_entry and regard them as an extended attribute entry. Signed-off-by: Tiger Yang Signed-off-by: Mark Fasheh --- fs/ocfs2/Makefile | 4 + fs/ocfs2/acl.c | 378 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ocfs2/acl.h | 29 +++++ fs/ocfs2/ocfs2.h | 1 + fs/ocfs2/xattr.c | 10 ++ fs/ocfs2/xattr.h | 4 + 6 files changed, 426 insertions(+) create mode 100644 fs/ocfs2/acl.c create mode 100644 fs/ocfs2/acl.h (limited to 'fs') diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile index 589dcdfdfe3c..e9ef5d162db1 100644 --- a/fs/ocfs2/Makefile +++ b/fs/ocfs2/Makefile @@ -37,6 +37,10 @@ ocfs2-objs := \ ver.o \ xattr.o +ifeq ($(CONFIG_OCFS2_FS_POSIX_ACL),y) +ocfs2-objs += acl.o +endif + ocfs2_stackglue-objs := stackglue.o ocfs2_stack_o2cb-objs := stack_o2cb.o ocfs2_stack_user-objs := stack_user.o diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c new file mode 100644 index 000000000000..62d0faad600b --- /dev/null +++ b/fs/ocfs2/acl.c @@ -0,0 +1,378 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * acl.c + * + * Copyright (C) 2004, 2008 Oracle. All rights reserved. + * + * CREDITS: + * Lots of code in this file is copy from linux/fs/ext3/acl.c. + * Copyright (C) 2001-2003 Andreas Gruenbacher, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include + +#define MLOG_MASK_PREFIX ML_INODE +#include + +#include "ocfs2.h" +#include "alloc.h" +#include "dlmglue.h" +#include "file.h" +#include "ocfs2_fs.h" + +#include "xattr.h" +#include "acl.h" + +/* + * Convert from xattr value to acl struct. + */ +static struct posix_acl *ocfs2_acl_from_xattr(const void *value, size_t size) +{ + int n, count; + struct posix_acl *acl; + + if (!value) + return NULL; + if (size < sizeof(struct posix_acl_entry)) + return ERR_PTR(-EINVAL); + + count = size / sizeof(struct posix_acl_entry); + if (count < 0) + return ERR_PTR(-EINVAL); + if (count == 0) + return NULL; + + acl = posix_acl_alloc(count, GFP_NOFS); + if (!acl) + return ERR_PTR(-ENOMEM); + for (n = 0; n < count; n++) { + struct ocfs2_acl_entry *entry = + (struct ocfs2_acl_entry *)value; + + acl->a_entries[n].e_tag = le16_to_cpu(entry->e_tag); + acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm); + acl->a_entries[n].e_id = le32_to_cpu(entry->e_id); + value += sizeof(struct posix_acl_entry); + + } + return acl; +} + +/* + * Convert acl struct to xattr value. + */ +static void *ocfs2_acl_to_xattr(const struct posix_acl *acl, size_t *size) +{ + struct ocfs2_acl_entry *entry = NULL; + char *ocfs2_acl; + size_t n; + + *size = acl->a_count * sizeof(struct posix_acl_entry); + + ocfs2_acl = kmalloc(*size, GFP_NOFS); + if (!ocfs2_acl) + return ERR_PTR(-ENOMEM); + + entry = (struct ocfs2_acl_entry *)ocfs2_acl; + for (n = 0; n < acl->a_count; n++, entry++) { + entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag); + entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm); + entry->e_id = cpu_to_le32(acl->a_entries[n].e_id); + } + return ocfs2_acl; +} + +static struct posix_acl *ocfs2_get_acl_nolock(struct inode *inode, + int type, + struct buffer_head *di_bh) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + int name_index; + char *value = NULL; + struct posix_acl *acl; + int retval; + + if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) + return NULL; + + switch (type) { + case ACL_TYPE_ACCESS: + name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS; + break; + case ACL_TYPE_DEFAULT: + name_index = OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT; + break; + default: + return ERR_PTR(-EINVAL); + } + + retval = ocfs2_xattr_get_nolock(inode, di_bh, name_index, "", NULL, 0); + if (retval > 0) { + value = kmalloc(retval, GFP_NOFS); + if (!value) + return ERR_PTR(-ENOMEM); + retval = ocfs2_xattr_get_nolock(inode, di_bh, name_index, + "", value, retval); + } + + if (retval > 0) + acl = ocfs2_acl_from_xattr(value, retval); + else if (retval == -ENODATA || retval == 0) + acl = NULL; + else + acl = ERR_PTR(retval); + + kfree(value); + + return acl; +} + + +/* + * Get posix acl. + */ +static struct posix_acl *ocfs2_get_acl(struct inode *inode, int type) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct buffer_head *di_bh = NULL; + struct posix_acl *acl; + int ret; + + if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) + return NULL; + + ret = ocfs2_inode_lock(inode, &di_bh, 0); + if (ret < 0) { + mlog_errno(ret); + acl = ERR_PTR(ret); + return acl; + } + + acl = ocfs2_get_acl_nolock(inode, type, di_bh); + + ocfs2_inode_unlock(inode, 0); + + brelse(di_bh); + + return acl; +} + +/* + * Set the access or default ACL of an inode. + */ +static int ocfs2_set_acl(handle_t *handle, + struct inode *inode, + struct buffer_head *di_bh, + int type, + struct posix_acl *acl, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_alloc_context *data_ac) +{ + int name_index; + void *value = NULL; + size_t size = 0; + int ret; + + if (S_ISLNK(inode->i_mode)) + return -EOPNOTSUPP; + + switch (type) { + case ACL_TYPE_ACCESS: + name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS; + if (acl) { + mode_t mode = inode->i_mode; + ret = posix_acl_equiv_mode(acl, &mode); + if (ret < 0) + return ret; + else { + inode->i_mode = mode; + if (ret == 0) + acl = NULL; + } + } + break; + case ACL_TYPE_DEFAULT: + name_index = OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT; + if (!S_ISDIR(inode->i_mode)) + return acl ? -EACCES : 0; + break; + default: + return -EINVAL; + } + + if (acl) { + value = ocfs2_acl_to_xattr(acl, &size); + if (IS_ERR(value)) + return (int)PTR_ERR(value); + } + + if (handle) + ret = ocfs2_xattr_set_handle(handle, inode, di_bh, name_index, + "", value, size, 0, + meta_ac, data_ac); + else + ret = ocfs2_xattr_set(inode, name_index, "", value, size, 0); + + kfree(value); + + return ret; +} + +static size_t ocfs2_xattr_list_acl_access(struct inode *inode, + char *list, + size_t list_len, + const char *name, + size_t name_len) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + const size_t size = sizeof(POSIX_ACL_XATTR_ACCESS); + + if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) + return 0; + + if (list && size <= list_len) + memcpy(list, POSIX_ACL_XATTR_ACCESS, size); + return size; +} + +static size_t ocfs2_xattr_list_acl_default(struct inode *inode, + char *list, + size_t list_len, + const char *name, + size_t name_len) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + const size_t size = sizeof(POSIX_ACL_XATTR_DEFAULT); + + if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) + return 0; + + if (list && size <= list_len) + memcpy(list, POSIX_ACL_XATTR_DEFAULT, size); + return size; +} + +static int ocfs2_xattr_get_acl(struct inode *inode, + int type, + void *buffer, + size_t size) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct posix_acl *acl; + int ret; + + if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) + return -EOPNOTSUPP; + + acl = ocfs2_get_acl(inode, type); + if (IS_ERR(acl)) + return PTR_ERR(acl); + if (acl == NULL) + return -ENODATA; + ret = posix_acl_to_xattr(acl, buffer, size); + posix_acl_release(acl); + + return ret; +} + +static int ocfs2_xattr_get_acl_access(struct inode *inode, + const char *name, + void *buffer, + size_t size) +{ + if (strcmp(name, "") != 0) + return -EINVAL; + return ocfs2_xattr_get_acl(inode, ACL_TYPE_ACCESS, buffer, size); +} + +static int ocfs2_xattr_get_acl_default(struct inode *inode, + const char *name, + void *buffer, + size_t size) +{ + if (strcmp(name, "") != 0) + return -EINVAL; + return ocfs2_xattr_get_acl(inode, ACL_TYPE_DEFAULT, buffer, size); +} + +static int ocfs2_xattr_set_acl(struct inode *inode, + int type, + const void *value, + size_t size) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct posix_acl *acl; + int ret = 0; + + if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) + return -EOPNOTSUPP; + + if (!is_owner_or_cap(inode)) + return -EPERM; + + if (value) { + acl = posix_acl_from_xattr(value, size); + if (IS_ERR(acl)) + return PTR_ERR(acl); + else if (acl) { + ret = posix_acl_valid(acl); + if (ret) + goto cleanup; + } + } else + acl = NULL; + + ret = ocfs2_set_acl(NULL, inode, NULL, type, acl, NULL, NULL); + +cleanup: + posix_acl_release(acl); + return ret; +} + +static int ocfs2_xattr_set_acl_access(struct inode *inode, + const char *name, + const void *value, + size_t size, + int flags) +{ + if (strcmp(name, "") != 0) + return -EINVAL; + return ocfs2_xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size); +} + +static int ocfs2_xattr_set_acl_default(struct inode *inode, + const char *name, + const void *value, + size_t size, + int flags) +{ + if (strcmp(name, "") != 0) + return -EINVAL; + return ocfs2_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size); +} + +struct xattr_handler ocfs2_xattr_acl_access_handler = { + .prefix = POSIX_ACL_XATTR_ACCESS, + .list = ocfs2_xattr_list_acl_access, + .get = ocfs2_xattr_get_acl_access, + .set = ocfs2_xattr_set_acl_access, +}; + +struct xattr_handler ocfs2_xattr_acl_default_handler = { + .prefix = POSIX_ACL_XATTR_DEFAULT, + .list = ocfs2_xattr_list_acl_default, + .get = ocfs2_xattr_get_acl_default, + .set = ocfs2_xattr_set_acl_default, +}; diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h new file mode 100644 index 000000000000..1b39f3e14c1b --- /dev/null +++ b/fs/ocfs2/acl.h @@ -0,0 +1,29 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * acl.h + * + * Copyright (C) 2004, 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef OCFS2_ACL_H +#define OCFS2_ACL_H + +#include + +struct ocfs2_acl_entry { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +}; + +#endif /* OCFS2_ACL_H */ diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 3fed9e3d8992..25d07ff1d3cd 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -195,6 +195,7 @@ enum ocfs2_mount_options OCFS2_MOUNT_LOCALFLOCKS = 1 << 5, /* No cluster aware user file locks */ OCFS2_MOUNT_NOUSERXATTR = 1 << 6, /* No user xattr */ OCFS2_MOUNT_INODE64 = 1 << 7, /* Allow inode numbers > 2^32 */ + OCFS2_MOUNT_POSIX_ACL = 1 << 8, /* POSIX access control lists */ }; #define OCFS2_OSB_SOFT_RO 0x0001 diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index ba9b870a5dda..2e273c2cb831 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -91,6 +91,10 @@ static struct ocfs2_xattr_def_value_root def_xv = { struct xattr_handler *ocfs2_xattr_handlers[] = { &ocfs2_xattr_user_handler, +#ifdef CONFIG_OCFS2_FS_POSIX_ACL + &ocfs2_xattr_acl_access_handler, + &ocfs2_xattr_acl_default_handler, +#endif &ocfs2_xattr_trusted_handler, &ocfs2_xattr_security_handler, NULL @@ -98,6 +102,12 @@ struct xattr_handler *ocfs2_xattr_handlers[] = { static struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = { [OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler, +#ifdef CONFIG_OCFS2_FS_POSIX_ACL + [OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS] + = &ocfs2_xattr_acl_access_handler, + [OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT] + = &ocfs2_xattr_acl_default_handler, +#endif [OCFS2_XATTR_INDEX_TRUSTED] = &ocfs2_xattr_trusted_handler, [OCFS2_XATTR_INDEX_SECURITY] = &ocfs2_xattr_security_handler, }; diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h index 86aa10ffe3f3..6163df336d8c 100644 --- a/fs/ocfs2/xattr.h +++ b/fs/ocfs2/xattr.h @@ -40,6 +40,10 @@ struct ocfs2_security_xattr_info { extern struct xattr_handler ocfs2_xattr_user_handler; extern struct xattr_handler ocfs2_xattr_trusted_handler; extern struct xattr_handler ocfs2_xattr_security_handler; +#ifdef CONFIG_OCFS2_FS_POSIX_ACL +extern struct xattr_handler ocfs2_xattr_acl_access_handler; +extern struct xattr_handler ocfs2_xattr_acl_default_handler; +#endif extern struct xattr_handler *ocfs2_xattr_handlers[]; ssize_t ocfs2_listxattr(struct dentry *, char *, size_t); -- cgit From 23fc2702bea686569281708ad519b41a11d0a2f4 Mon Sep 17 00:00:00 2001 From: Tiger Yang Date: Fri, 14 Nov 2008 11:17:18 +0800 Subject: ocfs2: add ocfs2_check_acl This function is used to enhance permission checking with POSIX ACLs. Signed-off-by: Tiger Yang Signed-off-by: Mark Fasheh --- fs/ocfs2/acl.c | 15 +++++++++++++++ fs/ocfs2/acl.h | 10 ++++++++++ fs/ocfs2/file.c | 3 ++- 3 files changed, 27 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index 62d0faad600b..a6a2bf6d6845 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c @@ -230,6 +230,21 @@ static int ocfs2_set_acl(handle_t *handle, return ret; } +int ocfs2_check_acl(struct inode *inode, int mask) +{ + struct posix_acl *acl = ocfs2_get_acl(inode, ACL_TYPE_ACCESS); + + if (IS_ERR(acl)) + return PTR_ERR(acl); + if (acl) { + int ret = posix_acl_permission(inode, acl, mask); + posix_acl_release(acl); + return ret; + } + + return -EAGAIN; +} + static size_t ocfs2_xattr_list_acl_access(struct inode *inode, char *list, size_t list_len, diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h index 1b39f3e14c1b..fef10f1b782b 100644 --- a/fs/ocfs2/acl.h +++ b/fs/ocfs2/acl.h @@ -26,4 +26,14 @@ struct ocfs2_acl_entry { __le32 e_id; }; +#ifdef CONFIG_OCFS2_FS_POSIX_ACL + +extern int ocfs2_check_acl(struct inode *, int); + +#else /* CONFIG_OCFS2_FS_POSIX_ACL*/ + +#define ocfs2_check_acl NULL + +#endif /* CONFIG_OCFS2_FS_POSIX_ACL*/ + #endif /* OCFS2_ACL_H */ diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 360549161e20..7bad7d9b9a2c 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -56,6 +56,7 @@ #include "suballoc.h" #include "super.h" #include "xattr.h" +#include "acl.h" #include "buffer_head_io.h" @@ -1035,7 +1036,7 @@ int ocfs2_permission(struct inode *inode, int mask) goto out; } - ret = generic_permission(inode, mask, NULL); + ret = generic_permission(inode, mask, ocfs2_check_acl); ocfs2_inode_unlock(inode, 0); out: -- cgit From 060bc66dd5017460076d9e808e2198cd532c943d Mon Sep 17 00:00:00 2001 From: Tiger Yang Date: Fri, 14 Nov 2008 11:17:29 +0800 Subject: ocfs2: add ocfs2_acl_chmod This function is used to update acl xattrs during file mode changes. Signed-off-by: Tiger Yang Signed-off-by: Mark Fasheh --- fs/ocfs2/acl.c | 27 +++++++++++++++++++++++++++ fs/ocfs2/acl.h | 5 +++++ fs/ocfs2/file.c | 6 ++++++ 3 files changed, 38 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index a6a2bf6d6845..df72256c4422 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c @@ -245,6 +245,33 @@ int ocfs2_check_acl(struct inode *inode, int mask) return -EAGAIN; } +int ocfs2_acl_chmod(struct inode *inode) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct posix_acl *acl, *clone; + int ret; + + if (S_ISLNK(inode->i_mode)) + return -EOPNOTSUPP; + + if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) + return 0; + + acl = ocfs2_get_acl(inode, ACL_TYPE_ACCESS); + if (IS_ERR(acl) || !acl) + return PTR_ERR(acl); + clone = posix_acl_clone(acl, GFP_KERNEL); + posix_acl_release(acl); + if (!clone) + return -ENOMEM; + ret = posix_acl_chmod_masq(clone, inode->i_mode); + if (!ret) + ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS, + clone, NULL, NULL); + posix_acl_release(clone); + return ret; +} + static size_t ocfs2_xattr_list_acl_access(struct inode *inode, char *list, size_t list_len, diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h index fef10f1b782b..68ffd6436c50 100644 --- a/fs/ocfs2/acl.h +++ b/fs/ocfs2/acl.h @@ -29,10 +29,15 @@ struct ocfs2_acl_entry { #ifdef CONFIG_OCFS2_FS_POSIX_ACL extern int ocfs2_check_acl(struct inode *, int); +extern int ocfs2_acl_chmod(struct inode *); #else /* CONFIG_OCFS2_FS_POSIX_ACL*/ #define ocfs2_check_acl NULL +static inline int ocfs2_acl_chmod(struct inode *inode) +{ + return 0; +} #endif /* CONFIG_OCFS2_FS_POSIX_ACL*/ diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 7bad7d9b9a2c..4636aa6b0117 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -990,6 +990,12 @@ bail_unlock_rw: bail: brelse(bh); + if (!status && attr->ia_valid & ATTR_MODE) { + status = ocfs2_acl_chmod(inode); + if (status < 0) + mlog_errno(status); + } + mlog_exit(status); return status; } -- cgit From 89c38bd0ade3c567707ed8fce088b253b0369c50 Mon Sep 17 00:00:00 2001 From: Tiger Yang Date: Fri, 14 Nov 2008 11:17:41 +0800 Subject: ocfs2: add ocfs2_init_acl in mknod We need to get the parent directories acls and let the new child inherit it. To this, we add additional calculations for data/metadata allocation. Signed-off-by: Tiger Yang Signed-off-by: Mark Fasheh --- fs/ocfs2/acl.c | 59 ++++++++++++++++++++++++++++++++++++++++++ fs/ocfs2/acl.h | 14 ++++++++++ fs/ocfs2/namei.c | 23 +++++++++++------ fs/ocfs2/xattr.c | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ocfs2/xattr.h | 3 +++ 5 files changed, 170 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index df72256c4422..12dfb44c22e5 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c @@ -272,6 +272,65 @@ int ocfs2_acl_chmod(struct inode *inode) return ret; } +/* + * Initialize the ACLs of a new inode. If parent directory has default ACL, + * then clone to new inode. Called from ocfs2_mknod. + */ +int ocfs2_init_acl(handle_t *handle, + struct inode *inode, + struct inode *dir, + struct buffer_head *di_bh, + struct buffer_head *dir_bh, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_alloc_context *data_ac) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct posix_acl *acl = NULL; + int ret = 0; + + if (!S_ISLNK(inode->i_mode)) { + if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) { + acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT, + dir_bh); + if (IS_ERR(acl)) + return PTR_ERR(acl); + } + if (!acl) + inode->i_mode &= ~current->fs->umask; + } + if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) { + struct posix_acl *clone; + mode_t mode; + + if (S_ISDIR(inode->i_mode)) { + ret = ocfs2_set_acl(handle, inode, di_bh, + ACL_TYPE_DEFAULT, acl, + meta_ac, data_ac); + if (ret) + goto cleanup; + } + clone = posix_acl_clone(acl, GFP_NOFS); + ret = -ENOMEM; + if (!clone) + goto cleanup; + + mode = inode->i_mode; + ret = posix_acl_create_masq(clone, &mode); + if (ret >= 0) { + inode->i_mode = mode; + if (ret > 0) { + ret = ocfs2_set_acl(handle, inode, + di_bh, ACL_TYPE_ACCESS, + clone, meta_ac, data_ac); + } + } + posix_acl_release(clone); + } +cleanup: + posix_acl_release(acl); + return ret; +} + static size_t ocfs2_xattr_list_acl_access(struct inode *inode, char *list, size_t list_len, diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h index 68ffd6436c50..8f6389ed4da5 100644 --- a/fs/ocfs2/acl.h +++ b/fs/ocfs2/acl.h @@ -30,6 +30,10 @@ struct ocfs2_acl_entry { extern int ocfs2_check_acl(struct inode *, int); extern int ocfs2_acl_chmod(struct inode *); +extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *, + struct buffer_head *, struct buffer_head *, + struct ocfs2_alloc_context *, + struct ocfs2_alloc_context *); #else /* CONFIG_OCFS2_FS_POSIX_ACL*/ @@ -38,6 +42,16 @@ static inline int ocfs2_acl_chmod(struct inode *inode) { return 0; } +static inline int ocfs2_init_acl(handle_t *handle, + struct inode *inode, + struct inode *dir, + struct buffer_head *di_bh, + struct buffer_head *dir_bh, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_alloc_context *data_ac) +{ + return 0; +} #endif /* CONFIG_OCFS2_FS_POSIX_ACL*/ diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 40da46b907fb..765514512096 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -61,6 +61,7 @@ #include "sysfile.h" #include "uptodate.h" #include "xattr.h" +#include "acl.h" #include "buffer_head_io.h" @@ -302,14 +303,13 @@ static int ocfs2_mknod(struct inode *dir, } } - /* calculate meta data/clusters for setting security xattr */ - if (si.enable) { - status = ocfs2_calc_security_init(dir, &si, &want_clusters, - &xattr_credits, &xattr_ac); - if (status < 0) { - mlog_errno(status); - goto leave; - } + /* calculate meta data/clusters for setting security and acl xattr */ + status = ocfs2_calc_xattr_init(dir, parent_fe_bh, mode, + &si, &want_clusters, + &xattr_credits, &xattr_ac); + if (status < 0) { + mlog_errno(status); + goto leave; } /* Reserve a cluster if creating an extent based directory. */ @@ -363,6 +363,13 @@ static int ocfs2_mknod(struct inode *dir, inc_nlink(dir); } + status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh, + xattr_ac, data_ac); + if (status < 0) { + mlog_errno(status); + goto leave; + } + if (si.enable) { status = ocfs2_init_security_set(handle, inode, new_fe_bh, &si, xattr_ac, data_ac); diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 2e273c2cb831..3cc8385f9738 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -84,6 +84,10 @@ struct ocfs2_xattr_set_ctxt { #define OCFS2_XATTR_FREE_IN_IBODY (OCFS2_MIN_XATTR_INLINE_SIZE \ - sizeof(struct ocfs2_xattr_header) \ - sizeof(__u32)) +#define OCFS2_XATTR_FREE_IN_BLOCK(ptr) ((ptr)->i_sb->s_blocksize \ + - sizeof(struct ocfs2_xattr_block) \ + - sizeof(struct ocfs2_xattr_header) \ + - sizeof(__u32)) static struct ocfs2_xattr_def_value_root def_xv = { .xv.xr_list.l_count = cpu_to_le16(1), @@ -402,6 +406,81 @@ int ocfs2_calc_security_init(struct inode *dir, return ret; } +int ocfs2_calc_xattr_init(struct inode *dir, + struct buffer_head *dir_bh, + int mode, + struct ocfs2_security_xattr_info *si, + int *want_clusters, + int *xattr_credits, + struct ocfs2_alloc_context **xattr_ac) +{ + int ret = 0; + struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); + int s_size = 0; + int a_size = 0; + int acl_len = 0; + + if (si->enable) + s_size = ocfs2_xattr_entry_real_size(strlen(si->name), + si->value_len); + + if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) { + acl_len = ocfs2_xattr_get_nolock(dir, dir_bh, + OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT, + "", NULL, 0); + if (acl_len > 0) { + a_size = ocfs2_xattr_entry_real_size(0, acl_len); + if (S_ISDIR(mode)) + a_size <<= 1; + } else if (acl_len != 0 && acl_len != -ENODATA) { + mlog_errno(ret); + return ret; + } + } + + if (!(s_size + a_size)) + return ret; + + /* + * The max space of security xattr taken inline is + * 256(name) + 80(value) + 16(entry) = 352 bytes, + * The max space of acl xattr taken inline is + * 80(value) + 16(entry) * 2(if directory) = 192 bytes, + * when blocksize = 512, may reserve one more cluser for + * xattr bucket, otherwise reserve one metadata block + * for them is ok. + */ + if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE || + (s_size + a_size) > OCFS2_XATTR_FREE_IN_IBODY) { + ret = ocfs2_reserve_new_metadata_blocks(osb, 1, xattr_ac); + if (ret) { + mlog_errno(ret); + return ret; + } + *xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS; + } + + if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE && + (s_size + a_size) > OCFS2_XATTR_FREE_IN_BLOCK(dir)) { + *want_clusters += 1; + *xattr_credits += ocfs2_blocks_per_xattr_bucket(dir->i_sb); + } + + /* reserve clusters for xattr value which will be set in B tree*/ + if (si->enable && si->value_len > OCFS2_XATTR_INLINE_SIZE) + *want_clusters += ocfs2_clusters_for_bytes(dir->i_sb, + si->value_len); + if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL && + acl_len > OCFS2_XATTR_INLINE_SIZE) { + *want_clusters += ocfs2_clusters_for_bytes(dir->i_sb, acl_len); + if (S_ISDIR(mode)) + *want_clusters += ocfs2_clusters_for_bytes(dir->i_sb, + acl_len); + } + + return ret; +} + static int ocfs2_xattr_extend_allocation(struct inode *inode, u32 clusters_to_add, struct buffer_head *xattr_bh, diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h index 6163df336d8c..9a67e7d8f812 100644 --- a/fs/ocfs2/xattr.h +++ b/fs/ocfs2/xattr.h @@ -66,5 +66,8 @@ int ocfs2_init_security_set(handle_t *, struct inode *, int ocfs2_calc_security_init(struct inode *, struct ocfs2_security_xattr_info *, int *, int *, struct ocfs2_alloc_context **); +int ocfs2_calc_xattr_init(struct inode *, struct buffer_head *, + int, struct ocfs2_security_xattr_info *, + int *, int *, struct ocfs2_alloc_context **); #endif /* OCFS2_XATTR_H */ -- cgit From a68979b857283daf4acc405e476dcc8812a3ff2b Mon Sep 17 00:00:00 2001 From: Tiger Yang Date: Fri, 14 Nov 2008 11:17:52 +0800 Subject: ocfs2: add mount option and Kconfig option for acl This patch adds the Kconfig option "CONFIG_OCFS2_FS_POSIX_ACL" and mount options "acl" to enable acls in Ocfs2. Signed-off-by: Tiger Yang Signed-off-by: Mark Fasheh --- fs/Kconfig | 9 +++++++++ fs/ocfs2/super.c | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) (limited to 'fs') diff --git a/fs/Kconfig b/fs/Kconfig index ff0e81980207..e8a47f74a839 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -268,6 +268,15 @@ config OCFS2_COMPAT_JBD is backwards compatible with JBD. It is safe to say N here. However, if you really want to use the original JBD, say Y here. +config OCFS2_FS_POSIX_ACL + bool "OCFS2 POSIX Access Control Lists" + depends on OCFS2_FS + select FS_POSIX_ACL + default n + help + Posix Access Control Lists (ACLs) support permissions for users and + groups beyond the owner/group/world scheme. + endif # BLOCK source "fs/notify/Kconfig" diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 304b63ac78cf..9e7accc68b4b 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -158,6 +158,8 @@ enum { Opt_user_xattr, Opt_nouser_xattr, Opt_inode64, + Opt_acl, + Opt_noacl, Opt_err, }; @@ -180,6 +182,8 @@ static const match_table_t tokens = { {Opt_user_xattr, "user_xattr"}, {Opt_nouser_xattr, "nouser_xattr"}, {Opt_inode64, "inode64"}, + {Opt_acl, "acl"}, + {Opt_noacl, "noacl"}, {Opt_err, NULL} }; @@ -466,6 +470,8 @@ unlock_osb: if (!ret) { /* Only save off the new mount options in case of a successful * remount. */ + if (!(osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_XATTR)) + parsed_options.mount_opt &= ~OCFS2_MOUNT_POSIX_ACL; osb->s_mount_opt = parsed_options.mount_opt; osb->s_atime_quantum = parsed_options.atime_quantum; osb->preferred_slot = parsed_options.slot; @@ -651,6 +657,10 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) } brelse(bh); bh = NULL; + + if (!(osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_XATTR)) + parsed_options.mount_opt &= ~OCFS2_MOUNT_POSIX_ACL; + osb->s_mount_opt = parsed_options.mount_opt; osb->s_atime_quantum = parsed_options.atime_quantum; osb->preferred_slot = parsed_options.slot; @@ -664,6 +674,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) sb->s_magic = OCFS2_SUPER_MAGIC; + sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | + ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); + /* Hard readonly mode only if: bdev_read_only, MS_RDONLY, * heartbeat=none */ if (bdev_read_only(sb->s_bdev)) { @@ -945,6 +958,19 @@ static int ocfs2_parse_options(struct super_block *sb, case Opt_inode64: mopt->mount_opt |= OCFS2_MOUNT_INODE64; break; +#ifdef CONFIG_OCFS2_FS_POSIX_ACL + case Opt_acl: + mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL; + break; + case Opt_noacl: + mopt->mount_opt &= ~OCFS2_MOUNT_POSIX_ACL; + break; +#else + case Opt_acl: + case Opt_noacl: + printk(KERN_INFO "ocfs2 (no)acl options not supported\n"); + break; +#endif default: mlog(ML_ERROR, "Unrecognized mount option \"%s\" " @@ -1017,6 +1043,13 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt) if (opts & OCFS2_MOUNT_INODE64) seq_printf(s, ",inode64"); +#ifdef CONFIG_OCFS2_FS_POSIX_ACL + if (opts & OCFS2_MOUNT_POSIX_ACL) + seq_printf(s, ",acl"); + else + seq_printf(s, ",noacl"); +#endif + return 0; } -- cgit From b657c95c11088d77fc1bfc9c84d940f778bf9d12 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 13 Nov 2008 14:49:11 -0800 Subject: ocfs2: Wrap inode block reads in a dedicated function. The ocfs2 code currently reads inodes off disk with a simple ocfs2_read_block() call. Each place that does this has a different set of sanity checks it performs. Some check only the signature. A couple validate the block number (the block read vs di->i_blkno). A couple others check for VALID_FL. Only one place validates i_fs_generation. A couple check nothing. Even when an error is found, they don't all do the same thing. We wrap inode reading into ocfs2_read_inode_block(). This will validate all the above fields, going readonly if they are invalid (they never should be). ocfs2_read_inode_block_full() is provided for the places that want to pass read_block flags. Every caller is passing a struct inode with a valid ip_blkno, so we don't need a separate blkno argument either. We will remove the validation checks from the rest of the code in a later commit, as they are no longer necessary. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/alloc.c | 2 +- fs/ocfs2/aops.c | 11 +--- fs/ocfs2/dir.c | 6 +-- fs/ocfs2/dlmglue.c | 12 ++--- fs/ocfs2/extent_map.c | 2 +- fs/ocfs2/file.c | 21 ++------ fs/ocfs2/inode.c | 136 ++++++++++++++++++++++++++++++++++++-------------- fs/ocfs2/inode.h | 16 +++++- fs/ocfs2/journal.c | 3 +- fs/ocfs2/localalloc.c | 8 +-- fs/ocfs2/namei.c | 14 +----- fs/ocfs2/symlink.c | 2 +- 12 files changed, 136 insertions(+), 97 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 5592a2f6335b..9c598adc9475 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -5658,7 +5658,7 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, goto bail; } - status = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, &bh); + status = ocfs2_read_inode_block(inode, &bh); if (status < 0) { iput(inode); mlog_errno(status); diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index c22543b33420..e219f8b546ac 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -68,20 +68,13 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, goto bail; } - status = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, &bh); + status = ocfs2_read_inode_block(inode, &bh); if (status < 0) { mlog_errno(status); goto bail; } fe = (struct ocfs2_dinode *) bh->b_data; - if (!OCFS2_IS_VALID_DINODE(fe)) { - mlog(ML_ERROR, "Invalid dinode #%llu: signature = %.*s\n", - (unsigned long long)le64_to_cpu(fe->i_blkno), 7, - fe->i_signature); - goto bail; - } - if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb, le32_to_cpu(fe->i_clusters))) { mlog(ML_ERROR, "block offset is outside the allocated size: " @@ -262,7 +255,7 @@ static int ocfs2_readpage_inline(struct inode *inode, struct page *page) BUG_ON(!PageLocked(page)); BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); - ret = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, &di_bh); + ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog_errno(ret); goto out; diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 026e6eb85187..5777045f1a67 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -231,7 +231,7 @@ static struct buffer_head *ocfs2_find_entry_id(const char *name, struct ocfs2_dinode *di; struct ocfs2_inline_data *data; - ret = ocfs2_read_block(dir, OCFS2_I(dir)->ip_blkno, &di_bh); + ret = ocfs2_read_inode_block(dir, &di_bh); if (ret) { mlog_errno(ret); goto out; @@ -458,7 +458,7 @@ static inline int ocfs2_delete_entry_id(handle_t *handle, struct ocfs2_dinode *di; struct ocfs2_inline_data *data; - ret = ocfs2_read_block(dir, OCFS2_I(dir)->ip_blkno, &di_bh); + ret = ocfs2_read_inode_block(dir, &di_bh); if (ret) { mlog_errno(ret); goto out; @@ -636,7 +636,7 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode, struct ocfs2_inline_data *data; struct ocfs2_dir_entry *de; - ret = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, &di_bh); + ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog(ML_ERROR, "Unable to read inode block for dir %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 6e6cc0a2e5f7..9f2a7f75d1b3 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2024,7 +2024,7 @@ static int ocfs2_inode_lock_update(struct inode *inode, } else { /* Boo, we have to go to disk. */ /* read bh, cast, ocfs2_refresh_inode */ - status = ocfs2_read_block(inode, oi->ip_blkno, bh); + status = ocfs2_read_inode_block(inode, bh); if (status < 0) { mlog_errno(status); goto bail_refresh; @@ -2032,18 +2032,14 @@ static int ocfs2_inode_lock_update(struct inode *inode, fe = (struct ocfs2_dinode *) (*bh)->b_data; /* This is a good chance to make sure we're not - * locking an invalid object. + * locking an invalid object. ocfs2_read_inode_block() + * already checked that the inode block is sane. * * We bug on a stale inode here because we checked * above whether it was wiped from disk. The wiping * node provides a guarantee that we receive that * message and can mark the inode before dropping any * locks associated with it. */ - if (!OCFS2_IS_VALID_DINODE(fe)) { - OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe); - status = -EIO; - goto bail_refresh; - } mlog_bug_on_msg(inode->i_generation != le32_to_cpu(fe->i_generation), "Invalid dinode %llu disk generation: %u " @@ -2085,7 +2081,7 @@ static int ocfs2_assign_bh(struct inode *inode, return 0; } - status = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, ret_bh); + status = ocfs2_read_inode_block(inode, ret_bh); if (status < 0) mlog_errno(status); diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index 2baedac58234..b686b31cf49c 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -630,7 +630,7 @@ int ocfs2_get_clusters(struct inode *inode, u32 v_cluster, if (ret == 0) goto out; - ret = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, &di_bh); + ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog_errno(ret); goto out; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 4636aa6b0117..41001d515fae 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -402,12 +402,9 @@ static int ocfs2_truncate_file(struct inode *inode, (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)new_i_size); + /* We trust di_bh because it comes from ocfs2_inode_lock(), which + * already validated it */ fe = (struct ocfs2_dinode *) di_bh->b_data; - if (!OCFS2_IS_VALID_DINODE(fe)) { - OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe); - status = -EIO; - goto bail; - } mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode), "Inode %llu, inode i_size = %lld != di " @@ -546,18 +543,12 @@ static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start, */ BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb)); - status = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, &bh); + status = ocfs2_read_inode_block(inode, &bh); if (status < 0) { mlog_errno(status); goto leave; } - fe = (struct ocfs2_dinode *) bh->b_data; - if (!OCFS2_IS_VALID_DINODE(fe)) { - OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe); - status = -EIO; - goto leave; - } restart_all: BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters); @@ -1135,9 +1126,8 @@ static int ocfs2_write_remove_suid(struct inode *inode) { int ret; struct buffer_head *bh = NULL; - struct ocfs2_inode_info *oi = OCFS2_I(inode); - ret = ocfs2_read_block(inode, oi->ip_blkno, &bh); + ret = ocfs2_read_inode_block(inode, &bh); if (ret < 0) { mlog_errno(ret); goto out; @@ -1163,8 +1153,7 @@ static int ocfs2_allocate_unwritten_extents(struct inode *inode, struct buffer_head *di_bh = NULL; if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { - ret = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, - &di_bh); + ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog_errno(ret); goto out; diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 7aa00d511874..9eb701b86466 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -214,12 +214,11 @@ static int ocfs2_init_locked_inode(struct inode *inode, void *opaque) return 0; } -int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, - int create_ino) +void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, + int create_ino) { struct super_block *sb; struct ocfs2_super *osb; - int status = -EINVAL; int use_plocks = 1; mlog_entry("(0x%p, size:%llu)\n", inode, @@ -232,25 +231,17 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, ocfs2_mount_local(osb) || !ocfs2_stack_supports_plocks()) use_plocks = 0; - /* this means that read_inode cannot create a superblock inode - * today. change if needed. */ - if (!OCFS2_IS_VALID_DINODE(fe) || - !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL))) { - mlog(0, "Invalid dinode: i_ino=%lu, i_blkno=%llu, " - "signature = %.*s, flags = 0x%x\n", - inode->i_ino, - (unsigned long long)le64_to_cpu(fe->i_blkno), 7, - fe->i_signature, le32_to_cpu(fe->i_flags)); - goto bail; - } + /* + * These have all been checked by ocfs2_read_inode_block() or set + * by ocfs2_mknod_locked(), so a failure is a code bug. + */ + BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); /* This means that read_inode + cannot create a superblock + inode today. change if + that is needed. */ + BUG_ON(!(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL))); + BUG_ON(le32_to_cpu(fe->i_fs_generation) != osb->fs_generation); - if (le32_to_cpu(fe->i_fs_generation) != osb->fs_generation) { - mlog(ML_ERROR, "file entry generation does not match " - "superblock! osb->fs_generation=%x, " - "fe->i_fs_generation=%x\n", - osb->fs_generation, le32_to_cpu(fe->i_fs_generation)); - goto bail; - } OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr); @@ -354,10 +345,7 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, ocfs2_set_inode_flags(inode); - status = 0; -bail: - mlog_exit(status); - return status; + mlog_exit_void(); } static int ocfs2_read_locked_inode(struct inode *inode, @@ -460,11 +448,14 @@ static int ocfs2_read_locked_inode(struct inode *inode, } } - if (can_lock) - status = ocfs2_read_blocks(inode, args->fi_blkno, 1, &bh, - OCFS2_BH_IGNORE_CACHE); - else + if (can_lock) { + status = ocfs2_read_inode_block_full(inode, &bh, + OCFS2_BH_IGNORE_CACHE); + } else { status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh); + if (!status) + status = ocfs2_validate_inode_block(osb->sb, bh); + } if (status < 0) { mlog_errno(status); goto bail; @@ -472,12 +463,6 @@ static int ocfs2_read_locked_inode(struct inode *inode, status = -EINVAL; fe = (struct ocfs2_dinode *) bh->b_data; - if (!OCFS2_IS_VALID_DINODE(fe)) { - mlog(0, "Invalid dinode #%llu: signature = %.*s\n", - (unsigned long long)args->fi_blkno, 7, - fe->i_signature); - goto bail; - } /* * This is a code bug. Right now the caller needs to @@ -491,10 +476,9 @@ static int ocfs2_read_locked_inode(struct inode *inode, if (S_ISCHR(le16_to_cpu(fe->i_mode)) || S_ISBLK(le16_to_cpu(fe->i_mode))) - inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev)); + inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev)); - if (ocfs2_populate_inode(inode, fe, 0) < 0) - goto bail; + ocfs2_populate_inode(inode, fe, 0); BUG_ON(args->fi_blkno != le64_to_cpu(fe->i_blkno)); @@ -1264,3 +1248,79 @@ void ocfs2_refresh_inode(struct inode *inode, spin_unlock(&OCFS2_I(inode)->ip_lock); } + +int ocfs2_validate_inode_block(struct super_block *sb, + struct buffer_head *bh) +{ + int rc = -EINVAL; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; + + BUG_ON(!buffer_uptodate(bh)); + + if (!OCFS2_IS_VALID_DINODE(di)) { + ocfs2_error(sb, "Invalid dinode #%llu: signature = %.*s\n", + (unsigned long long)bh->b_blocknr, 7, + di->i_signature); + goto bail; + } + + if (le64_to_cpu(di->i_blkno) != bh->b_blocknr) { + ocfs2_error(sb, "Invalid dinode #%llu: i_blkno is %llu\n", + (unsigned long long)bh->b_blocknr, + (unsigned long long)le64_to_cpu(di->i_blkno)); + goto bail; + } + + if (!(di->i_flags & cpu_to_le32(OCFS2_VALID_FL))) { + ocfs2_error(sb, + "Invalid dinode #%llu: OCFS2_VALID_FL not set\n", + (unsigned long long)bh->b_blocknr); + goto bail; + } + + if (le32_to_cpu(di->i_fs_generation) != + OCFS2_SB(sb)->fs_generation) { + ocfs2_error(sb, + "Invalid dinode #%llu: fs_generation is %u\n", + (unsigned long long)bh->b_blocknr, + le32_to_cpu(di->i_fs_generation)); + goto bail; + } + + rc = 0; + +bail: + return rc; +} + +int ocfs2_read_inode_block_full(struct inode *inode, struct buffer_head **bh, + int flags) +{ + int rc; + struct buffer_head *tmp = *bh; + + rc = ocfs2_read_blocks(inode, OCFS2_I(inode)->ip_blkno, 1, &tmp, + flags); + if (rc) + goto out; + + if (!(flags & OCFS2_BH_READAHEAD)) { + rc = ocfs2_validate_inode_block(inode->i_sb, tmp); + if (rc) { + brelse(tmp); + goto out; + } + } + + /* If ocfs2_read_blocks() got us a new bh, pass it up. */ + if (!*bh) + *bh = tmp; + +out: + return rc; +} + +int ocfs2_read_inode_block(struct inode *inode, struct buffer_head **bh) +{ + return ocfs2_read_inode_block_full(inode, bh, 0); +} diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index 2f37af9bcc4a..b79c371a9d27 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h @@ -128,8 +128,8 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 feoff, unsigned flags, int sysfile_type); int ocfs2_inode_init_private(struct inode *inode); int ocfs2_inode_revalidate(struct dentry *dentry); -int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, - int create_ino); +void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, + int create_ino); void ocfs2_read_inode(struct inode *inode); void ocfs2_read_inode2(struct inode *inode, void *opaque); ssize_t ocfs2_rw_direct(int rw, struct file *filp, char *buf, @@ -153,4 +153,16 @@ static inline blkcnt_t ocfs2_inode_sector_count(struct inode *inode) return (blkcnt_t)(OCFS2_I(inode)->ip_clusters << c_to_s_bits); } +/* Validate that a bh contains a valid inode */ +int ocfs2_validate_inode_block(struct super_block *sb, + struct buffer_head *bh); +/* + * Read an inode block into *bh. If *bh is NULL, a bh will be allocated. + * This is a cached read. The inode will be validated with + * ocfs2_validate_inode_block(). + */ +int ocfs2_read_inode_block(struct inode *inode, struct buffer_head **bh); +/* The same, but can be passed OCFS2_BH_* flags */ +int ocfs2_read_inode_block_full(struct inode *inode, struct buffer_head **bh, + int flags); #endif /* OCFS2_INODE_H */ diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 99fe9d584f3c..877aaa05e199 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1135,8 +1135,7 @@ static int ocfs2_read_journal_inode(struct ocfs2_super *osb, } SET_INODE_JOURNAL(inode); - status = ocfs2_read_blocks(inode, OCFS2_I(inode)->ip_blkno, 1, bh, - OCFS2_BH_IGNORE_CACHE); + status = ocfs2_read_inode_block_full(inode, bh, OCFS2_BH_IGNORE_CACHE); if (status < 0) { mlog_errno(status); goto bail; diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 687b28713c32..19cfb1b9ce09 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -248,8 +248,8 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb) goto bail; } - status = ocfs2_read_blocks(inode, OCFS2_I(inode)->ip_blkno, 1, - &alloc_bh, OCFS2_BH_IGNORE_CACHE); + status = ocfs2_read_inode_block_full(inode, &alloc_bh, + OCFS2_BH_IGNORE_CACHE); if (status < 0) { mlog_errno(status); goto bail; @@ -459,8 +459,8 @@ int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb, mutex_lock(&inode->i_mutex); - status = ocfs2_read_blocks(inode, OCFS2_I(inode)->ip_blkno, 1, - &alloc_bh, OCFS2_BH_IGNORE_CACHE); + status = ocfs2_read_inode_block_full(inode, &alloc_bh, + OCFS2_BH_IGNORE_CACHE); if (status < 0) { mlog_errno(status); goto bail; diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 765514512096..0134bafdab9e 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -531,15 +531,7 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, goto leave; } - if (ocfs2_populate_inode(inode, fe, 1) < 0) { - mlog(ML_ERROR, "populate inode failed! bh->b_blocknr=%llu, " - "i_blkno=%llu, i_ino=%lu\n", - (unsigned long long)(*new_fe_bh)->b_blocknr, - (unsigned long long)le64_to_cpu(fe->i_blkno), - inode->i_ino); - BUG(); - } - + ocfs2_populate_inode(inode, fe, 1); ocfs2_inode_set_new(osb, inode); if (!ocfs2_mount_local(osb)) { status = ocfs2_create_new_inode_locks(inode); @@ -1864,9 +1856,7 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino); - status = ocfs2_read_block(orphan_dir_inode, - OCFS2_I(orphan_dir_inode)->ip_blkno, - &orphan_dir_bh); + status = ocfs2_read_inode_block(orphan_dir_inode, &orphan_dir_bh); if (status < 0) { mlog_errno(status); goto leave; diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index cbd03dfdc7b9..ed0a0cfd68d2 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -84,7 +84,7 @@ static char *ocfs2_fast_symlink_getlink(struct inode *inode, mlog_entry_void(); - status = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, bh); + status = ocfs2_read_inode_block(inode, bh); if (status < 0) { mlog_errno(status); link = ERR_PTR(status); -- cgit From 10995aa2451afa20b721cc7de856cae1a13dba57 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 13 Nov 2008 14:49:12 -0800 Subject: ocfs2: Morph the haphazard OCFS2_IS_VALID_DINODE() checks. Random places in the code would check a dinode bh to see if it was valid. Not only did they do different levels of validation, they handled errors in different ways. The previous commit unified inode block reads, validating all block reads in the same place. Thus, these haphazard checks are no longer necessary. Rather than eliminate them, however, we change them to BUG_ON() checks. This ensures the assumptions remain true. All of the code paths to these checks have been audited to ensure they come from a validated inode read. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/alloc.c | 50 +++++++++++++++++++++----------------------------- fs/ocfs2/journal.c | 17 +++++------------ fs/ocfs2/ocfs2.h | 8 -------- fs/ocfs2/resize.c | 10 ++++------ fs/ocfs2/suballoc.c | 36 ++++++++++++++++-------------------- 5 files changed, 46 insertions(+), 75 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 9c598adc9475..320545b9fe12 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -187,20 +187,12 @@ static int ocfs2_dinode_insert_check(struct inode *inode, static int ocfs2_dinode_sanity_check(struct inode *inode, struct ocfs2_extent_tree *et) { - int ret = 0; - struct ocfs2_dinode *di; + struct ocfs2_dinode *di = et->et_object; BUG_ON(et->et_ops != &ocfs2_dinode_et_ops); + BUG_ON(!OCFS2_IS_VALID_DINODE(di)); - di = et->et_object; - if (!OCFS2_IS_VALID_DINODE(di)) { - ret = -EIO; - ocfs2_error(inode->i_sb, - "Inode %llu has invalid path root", - (unsigned long long)OCFS2_I(inode)->ip_blkno); - } - - return ret; + return 0; } static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et) @@ -5380,13 +5372,13 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb, start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk); di = (struct ocfs2_dinode *) tl_bh->b_data; - tl = &di->id2.i_dealloc; - if (!OCFS2_IS_VALID_DINODE(di)) { - OCFS2_RO_ON_INVALID_DINODE(osb->sb, di); - status = -EIO; - goto bail; - } + /* tl_bh is loaded from ocfs2_truncate_log_init(). It's validated + * by the underlying call to ocfs2_read_inode_block(), so any + * corruption is a code bug */ + BUG_ON(!OCFS2_IS_VALID_DINODE(di)); + + tl = &di->id2.i_dealloc; tl_count = le16_to_cpu(tl->tl_count); mlog_bug_on_msg(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) || tl_count == 0, @@ -5536,13 +5528,13 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb) BUG_ON(mutex_trylock(&tl_inode->i_mutex)); di = (struct ocfs2_dinode *) tl_bh->b_data; - tl = &di->id2.i_dealloc; - if (!OCFS2_IS_VALID_DINODE(di)) { - OCFS2_RO_ON_INVALID_DINODE(osb->sb, di); - status = -EIO; - goto out; - } + /* tl_bh is loaded from ocfs2_truncate_log_init(). It's validated + * by the underlying call to ocfs2_read_inode_block(), so any + * corruption is a code bug */ + BUG_ON(!OCFS2_IS_VALID_DINODE(di)); + + tl = &di->id2.i_dealloc; num_to_flush = le16_to_cpu(tl->tl_used); mlog(0, "Flush %u records from truncate log #%llu\n", num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno); @@ -5697,13 +5689,13 @@ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb, } di = (struct ocfs2_dinode *) tl_bh->b_data; - tl = &di->id2.i_dealloc; - if (!OCFS2_IS_VALID_DINODE(di)) { - OCFS2_RO_ON_INVALID_DINODE(tl_inode->i_sb, di); - status = -EIO; - goto bail; - } + /* tl_bh is loaded from ocfs2_get_truncate_log_info(). It's + * validated by the underlying call to ocfs2_read_inode_block(), + * so any corruption is a code bug */ + BUG_ON(!OCFS2_IS_VALID_DINODE(di)); + + tl = &di->id2.i_dealloc; if (le16_to_cpu(tl->tl_used)) { mlog(0, "We'll have %u logs to recover\n", le16_to_cpu(tl->tl_used)); diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 877aaa05e199..9223bfcca3ba 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -587,17 +587,11 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, mlog_entry_void(); fe = (struct ocfs2_dinode *)bh->b_data; - if (!OCFS2_IS_VALID_DINODE(fe)) { - /* This is called from startup/shutdown which will - * handle the errors in a specific manner, so no need - * to call ocfs2_error() here. */ - mlog(ML_ERROR, "Journal dinode %llu has invalid " - "signature: %.*s", - (unsigned long long)le64_to_cpu(fe->i_blkno), 7, - fe->i_signature); - status = -EIO; - goto out; - } + + /* The journal bh on the osb always comes from ocfs2_journal_init() + * and was validated there inside ocfs2_inode_lock_full(). It's a + * code bug if we mess it up. */ + BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); flags = le32_to_cpu(fe->id1.journal1.ij_flags); if (dirty) @@ -613,7 +607,6 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, if (status < 0) mlog_errno(status); -out: mlog_exit(status); return status; } diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 25d07ff1d3cd..467bdb6f71e1 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -444,14 +444,6 @@ static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb) #define OCFS2_IS_VALID_DINODE(ptr) \ (!strcmp((ptr)->i_signature, OCFS2_INODE_SIGNATURE)) -#define OCFS2_RO_ON_INVALID_DINODE(__sb, __di) do { \ - typeof(__di) ____di = (__di); \ - ocfs2_error((__sb), \ - "Dinode # %llu has bad signature %.*s", \ - (unsigned long long)le64_to_cpu((____di)->i_blkno), 7, \ - (____di)->i_signature); \ -} while (0) - #define OCFS2_IS_VALID_EXTENT_BLOCK(ptr) \ (!strcmp((ptr)->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE)) diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c index ffd48db229a7..739d452f6174 100644 --- a/fs/ocfs2/resize.c +++ b/fs/ocfs2/resize.c @@ -314,6 +314,10 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters) fe = (struct ocfs2_dinode *)main_bm_bh->b_data; + /* main_bm_bh is validated by inode read inside ocfs2_inode_lock(), + * so any corruption is a code bug. */ + BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); + if (le16_to_cpu(fe->id2.i_chain.cl_cpg) != ocfs2_group_bitmap_size(osb->sb) * 8) { mlog(ML_ERROR, "The disk is too old and small. " @@ -322,12 +326,6 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters) goto out_unlock; } - if (!OCFS2_IS_VALID_DINODE(fe)) { - OCFS2_RO_ON_INVALID_DINODE(main_bm_inode->i_sb, fe); - ret = -EIO; - goto out_unlock; - } - first_new_cluster = le32_to_cpu(fe->i_clusters); lgd_blkno = ocfs2_which_cluster_group(main_bm_inode, first_new_cluster - 1); diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index c5ff18b46b57..95d432b694e4 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -441,11 +441,11 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb, ac->ac_alloc_slot = slot; fe = (struct ocfs2_dinode *) bh->b_data; - if (!OCFS2_IS_VALID_DINODE(fe)) { - OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe); - status = -EIO; - goto bail; - } + + /* The bh was validated by the inode read inside + * ocfs2_inode_lock(). Any corruption is a code bug. */ + BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); + if (!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL))) { ocfs2_error(alloc_inode->i_sb, "Invalid chain allocator %llu", (unsigned long long)le64_to_cpu(fe->i_blkno)); @@ -931,11 +931,6 @@ static int ocfs2_relink_block_group(handle_t *handle, struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data; - if (!OCFS2_IS_VALID_DINODE(fe)) { - OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe); - status = -EIO; - goto out; - } if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); status = -EIO; @@ -1392,11 +1387,11 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, BUG_ON(!ac->ac_bh); fe = (struct ocfs2_dinode *) ac->ac_bh->b_data; - if (!OCFS2_IS_VALID_DINODE(fe)) { - OCFS2_RO_ON_INVALID_DINODE(osb->sb, fe); - status = -EIO; - goto bail; - } + + /* The bh was validated by the inode read during + * ocfs2_reserve_suballoc_bits(). Any corruption is a code bug. */ + BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); + if (le32_to_cpu(fe->id1.bitmap1.i_used) >= le32_to_cpu(fe->id1.bitmap1.i_total)) { ocfs2_error(osb->sb, "Chain allocator dinode %llu has %u used " @@ -1782,11 +1777,12 @@ int ocfs2_free_suballoc_bits(handle_t *handle, mlog_entry_void(); - if (!OCFS2_IS_VALID_DINODE(fe)) { - OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe); - status = -EIO; - goto bail; - } + /* The alloc_bh comes from ocfs2_free_dinode() or + * ocfs2_free_clusters(). The callers have all locked the + * allocator and gotten alloc_bh from the lock call. This + * validates the dinode buffer. Any corruption that has happended + * is a code bug. */ + BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl)); mlog(0, "%llu: freeing %u bits from group %llu, starting at %u\n", -- cgit From 57e3e7971136003c96766346049aa73b82cab079 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 13 Nov 2008 14:49:13 -0800 Subject: ocfs2: Consolidate validation of group descriptors. Currently the validation of group descriptors is directly duplicated so that one version can error the filesystem and the other (resize) can just report the problem. Consolidate to one function that takes a boolean. Wrap that function with the old call for the old users. This is in preparation for lifting the read+validate step into a single function. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/resize.c | 40 ++++++----------------------- fs/ocfs2/suballoc.c | 74 +++++++++++++++++++++++++++++++---------------------- fs/ocfs2/suballoc.h | 20 ++++++++++++--- 3 files changed, 68 insertions(+), 66 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c index 739d452f6174..a2de32a317ad 100644 --- a/fs/ocfs2/resize.c +++ b/fs/ocfs2/resize.c @@ -396,41 +396,16 @@ static int ocfs2_check_new_group(struct inode *inode, struct buffer_head *group_bh) { int ret; - struct ocfs2_group_desc *gd; + struct ocfs2_group_desc *gd = + (struct ocfs2_group_desc *)group_bh->b_data; u16 cl_bpc = le16_to_cpu(di->id2.i_chain.cl_bpc); - unsigned int max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * - le16_to_cpu(di->id2.i_chain.cl_bpc); + ret = ocfs2_validate_group_descriptor(inode->i_sb, di, gd, 1); + if (ret) + goto out; - gd = (struct ocfs2_group_desc *)group_bh->b_data; - - ret = -EIO; - if (!OCFS2_IS_VALID_GROUP_DESC(gd)) - mlog(ML_ERROR, "Group descriptor # %llu isn't valid.\n", - (unsigned long long)le64_to_cpu(gd->bg_blkno)); - else if (di->i_blkno != gd->bg_parent_dinode) - mlog(ML_ERROR, "Group descriptor # %llu has bad parent " - "pointer (%llu, expected %llu)\n", - (unsigned long long)le64_to_cpu(gd->bg_blkno), - (unsigned long long)le64_to_cpu(gd->bg_parent_dinode), - (unsigned long long)le64_to_cpu(di->i_blkno)); - else if (le16_to_cpu(gd->bg_bits) > max_bits) - mlog(ML_ERROR, "Group descriptor # %llu has bit count of %u\n", - (unsigned long long)le64_to_cpu(gd->bg_blkno), - le16_to_cpu(gd->bg_bits)); - else if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) - mlog(ML_ERROR, "Group descriptor # %llu has bit count %u but " - "claims that %u are free\n", - (unsigned long long)le64_to_cpu(gd->bg_blkno), - le16_to_cpu(gd->bg_bits), - le16_to_cpu(gd->bg_free_bits_count)); - else if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) - mlog(ML_ERROR, "Group descriptor # %llu has bit count %u but " - "max bitmap bits of %u\n", - (unsigned long long)le64_to_cpu(gd->bg_blkno), - le16_to_cpu(gd->bg_bits), - 8 * le16_to_cpu(gd->bg_size)); - else if (le16_to_cpu(gd->bg_chain) != input->chain) + ret = -EINVAL; + if (le16_to_cpu(gd->bg_chain) != input->chain) mlog(ML_ERROR, "Group descriptor # %llu has bad chain %u " "while input has %u set.\n", (unsigned long long)le64_to_cpu(gd->bg_blkno), @@ -449,6 +424,7 @@ static int ocfs2_check_new_group(struct inode *inode, else ret = 0; +out: return ret; } diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 95d432b694e4..ddba97dc06a0 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -146,59 +146,71 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl) } /* somewhat more expensive than our other checks, so use sparingly. */ -int ocfs2_check_group_descriptor(struct super_block *sb, - struct ocfs2_dinode *di, - struct ocfs2_group_desc *gd) +int ocfs2_validate_group_descriptor(struct super_block *sb, + struct ocfs2_dinode *di, + struct ocfs2_group_desc *gd, + int clean_error) { unsigned int max_bits; +#define do_error(fmt, ...) \ + do{ \ + if (clean_error) \ + mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__); \ + else \ + ocfs2_error(sb, fmt, ##__VA_ARGS__); \ + } while (0) + if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(sb, gd); - return -EIO; + do_error("Group Descriptor #%llu has bad signature %.*s", + (unsigned long long)le64_to_cpu(gd->bg_blkno), 7, + gd->bg_signature); + return -EINVAL; } if (di->i_blkno != gd->bg_parent_dinode) { - ocfs2_error(sb, "Group descriptor # %llu has bad parent " - "pointer (%llu, expected %llu)", - (unsigned long long)le64_to_cpu(gd->bg_blkno), - (unsigned long long)le64_to_cpu(gd->bg_parent_dinode), - (unsigned long long)le64_to_cpu(di->i_blkno)); - return -EIO; + do_error("Group descriptor # %llu has bad parent " + "pointer (%llu, expected %llu)", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + (unsigned long long)le64_to_cpu(gd->bg_parent_dinode), + (unsigned long long)le64_to_cpu(di->i_blkno)); + return -EINVAL; } max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc); if (le16_to_cpu(gd->bg_bits) > max_bits) { - ocfs2_error(sb, "Group descriptor # %llu has bit count of %u", - (unsigned long long)le64_to_cpu(gd->bg_blkno), - le16_to_cpu(gd->bg_bits)); - return -EIO; + do_error("Group descriptor # %llu has bit count of %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits)); + return -EINVAL; } if (le16_to_cpu(gd->bg_chain) >= le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) { - ocfs2_error(sb, "Group descriptor # %llu has bad chain %u", - (unsigned long long)le64_to_cpu(gd->bg_blkno), - le16_to_cpu(gd->bg_chain)); - return -EIO; + do_error("Group descriptor # %llu has bad chain %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_chain)); + return -EINVAL; } if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) { - ocfs2_error(sb, "Group descriptor # %llu has bit count %u but " - "claims that %u are free", - (unsigned long long)le64_to_cpu(gd->bg_blkno), - le16_to_cpu(gd->bg_bits), - le16_to_cpu(gd->bg_free_bits_count)); - return -EIO; + do_error("Group descriptor # %llu has bit count %u but " + "claims that %u are free", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + le16_to_cpu(gd->bg_free_bits_count)); + return -EINVAL; } if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) { - ocfs2_error(sb, "Group descriptor # %llu has bit count %u but " - "max bitmap bits of %u", - (unsigned long long)le64_to_cpu(gd->bg_blkno), - le16_to_cpu(gd->bg_bits), - 8 * le16_to_cpu(gd->bg_size)); - return -EIO; + do_error("Group descriptor # %llu has bit count %u but " + "max bitmap bits of %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + 8 * le16_to_cpu(gd->bg_size)); + return -EINVAL; } +#undef do_error return 0; } diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index 4df159d8f450..7adfcc478bdb 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -165,9 +165,23 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac); u64 ocfs2_which_cluster_group(struct inode *inode, u32 cluster); /* somewhat more expensive than our other checks, so use sparingly. */ -int ocfs2_check_group_descriptor(struct super_block *sb, - struct ocfs2_dinode *di, - struct ocfs2_group_desc *gd); +/* + * By default, ocfs2_validate_group_descriptor() calls ocfs2_error() when it + * finds a problem. A caller that wants to check a group descriptor + * without going readonly passes a nonzero clean_error. This is only + * resize, really. + */ +int ocfs2_validate_group_descriptor(struct super_block *sb, + struct ocfs2_dinode *di, + struct ocfs2_group_desc *gd, + int clean_error); +static inline int ocfs2_check_group_descriptor(struct super_block *sb, + struct ocfs2_dinode *di, + struct ocfs2_group_desc *gd) +{ + return ocfs2_validate_group_descriptor(sb, di, gd, 0); +} + int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_extent_tree *et, u32 clusters_to_add, u32 extents_to_split, struct ocfs2_alloc_context **data_ac, -- cgit From 68f64d471be38631d7196b938d9809802dd467fa Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 13 Nov 2008 14:49:14 -0800 Subject: ocfs2: Wrap group descriptor reads in a dedicated function. We have a clean call for validating group descriptors, but every place that wants the always does a read_block()+validate() call pair. Create a toplevel ocfs2_read_group_descriptor() that does the right thing. This allows us to leverage the single call point later for fancier handling. We also add validation of gd->bg_generation against the superblock and gd->bg_blkno against the block we thought we read. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/resize.c | 12 ++---- fs/ocfs2/suballoc.c | 108 +++++++++++++++++++++++++++++++--------------------- fs/ocfs2/suballoc.h | 19 ++++----- 3 files changed, 78 insertions(+), 61 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c index a2de32a317ad..252baff5eb84 100644 --- a/fs/ocfs2/resize.c +++ b/fs/ocfs2/resize.c @@ -330,20 +330,14 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters) lgd_blkno = ocfs2_which_cluster_group(main_bm_inode, first_new_cluster - 1); - ret = ocfs2_read_block(main_bm_inode, lgd_blkno, &group_bh); + ret = ocfs2_read_group_descriptor(main_bm_inode, fe, lgd_blkno, + &group_bh); if (ret < 0) { mlog_errno(ret); goto out_unlock; } - group = (struct ocfs2_group_desc *)group_bh->b_data; - ret = ocfs2_check_group_descriptor(inode->i_sb, fe, group); - if (ret) { - mlog_errno(ret); - goto out_unlock; - } - cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc); if (le16_to_cpu(group->bg_bits) / cl_bpc + new_clusters > le16_to_cpu(fe->id2.i_chain.cl_cpg)) { @@ -400,7 +394,7 @@ static int ocfs2_check_new_group(struct inode *inode, (struct ocfs2_group_desc *)group_bh->b_data; u16 cl_bpc = le16_to_cpu(di->id2.i_chain.cl_bpc); - ret = ocfs2_validate_group_descriptor(inode->i_sb, di, gd, 1); + ret = ocfs2_validate_group_descriptor(inode->i_sb, di, group_bh, 1); if (ret) goto out; diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index ddba97dc06a0..797f509d7250 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -145,13 +145,13 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl) return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc); } -/* somewhat more expensive than our other checks, so use sparingly. */ int ocfs2_validate_group_descriptor(struct super_block *sb, struct ocfs2_dinode *di, - struct ocfs2_group_desc *gd, + struct buffer_head *bh, int clean_error) { unsigned int max_bits; + struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; #define do_error(fmt, ...) \ do{ \ @@ -162,16 +162,32 @@ int ocfs2_validate_group_descriptor(struct super_block *sb, } while (0) if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { - do_error("Group Descriptor #%llu has bad signature %.*s", - (unsigned long long)le64_to_cpu(gd->bg_blkno), 7, + do_error("Group descriptor #%llu has bad signature %.*s", + (unsigned long long)bh->b_blocknr, 7, gd->bg_signature); return -EINVAL; } + if (le64_to_cpu(gd->bg_blkno) != bh->b_blocknr) { + do_error("Group descriptor #%llu has an invalid bg_blkno " + "of %llu", + (unsigned long long)bh->b_blocknr, + (unsigned long long)le64_to_cpu(gd->bg_blkno)); + return -EINVAL; + } + + if (le32_to_cpu(gd->bg_generation) != OCFS2_SB(sb)->fs_generation) { + do_error("Group descriptor #%llu has an invalid " + "fs_generation of #%u", + (unsigned long long)bh->b_blocknr, + le32_to_cpu(gd->bg_generation)); + return -EINVAL; + } + if (di->i_blkno != gd->bg_parent_dinode) { - do_error("Group descriptor # %llu has bad parent " + do_error("Group descriptor #%llu has bad parent " "pointer (%llu, expected %llu)", - (unsigned long long)le64_to_cpu(gd->bg_blkno), + (unsigned long long)bh->b_blocknr, (unsigned long long)le64_to_cpu(gd->bg_parent_dinode), (unsigned long long)le64_to_cpu(di->i_blkno)); return -EINVAL; @@ -179,33 +195,33 @@ int ocfs2_validate_group_descriptor(struct super_block *sb, max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc); if (le16_to_cpu(gd->bg_bits) > max_bits) { - do_error("Group descriptor # %llu has bit count of %u", - (unsigned long long)le64_to_cpu(gd->bg_blkno), + do_error("Group descriptor #%llu has bit count of %u", + (unsigned long long)bh->b_blocknr, le16_to_cpu(gd->bg_bits)); return -EINVAL; } if (le16_to_cpu(gd->bg_chain) >= le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) { - do_error("Group descriptor # %llu has bad chain %u", - (unsigned long long)le64_to_cpu(gd->bg_blkno), + do_error("Group descriptor #%llu has bad chain %u", + (unsigned long long)bh->b_blocknr, le16_to_cpu(gd->bg_chain)); return -EINVAL; } if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) { - do_error("Group descriptor # %llu has bit count %u but " + do_error("Group descriptor #%llu has bit count %u but " "claims that %u are free", - (unsigned long long)le64_to_cpu(gd->bg_blkno), + (unsigned long long)bh->b_blocknr, le16_to_cpu(gd->bg_bits), le16_to_cpu(gd->bg_free_bits_count)); return -EINVAL; } if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) { - do_error("Group descriptor # %llu has bit count %u but " + do_error("Group descriptor #%llu has bit count %u but " "max bitmap bits of %u", - (unsigned long long)le64_to_cpu(gd->bg_blkno), + (unsigned long long)bh->b_blocknr, le16_to_cpu(gd->bg_bits), 8 * le16_to_cpu(gd->bg_size)); return -EINVAL; @@ -215,6 +231,30 @@ int ocfs2_validate_group_descriptor(struct super_block *sb, return 0; } +int ocfs2_read_group_descriptor(struct inode *inode, struct ocfs2_dinode *di, + u64 gd_blkno, struct buffer_head **bh) +{ + int rc; + struct buffer_head *tmp = *bh; + + rc = ocfs2_read_block(inode, gd_blkno, &tmp); + if (rc) + goto out; + + rc = ocfs2_validate_group_descriptor(inode->i_sb, di, tmp, 0); + if (rc) { + brelse(tmp); + goto out; + } + + /* If ocfs2_read_block() got us a new bh, pass it up. */ + if (!*bh) + *bh = tmp; + +out: + return rc; +} + static int ocfs2_block_group_fill(handle_t *handle, struct inode *alloc_inode, struct buffer_head *bg_bh, @@ -1177,21 +1217,17 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, u16 found; struct buffer_head *group_bh = NULL; struct ocfs2_group_desc *gd; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)ac->ac_bh->b_data; struct inode *alloc_inode = ac->ac_inode; - ret = ocfs2_read_block(alloc_inode, gd_blkno, &group_bh); + ret = ocfs2_read_group_descriptor(alloc_inode, di, gd_blkno, + &group_bh); if (ret < 0) { mlog_errno(ret); return ret; } gd = (struct ocfs2_group_desc *) group_bh->b_data; - if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, gd); - ret = -EIO; - goto out; - } - ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits, ac->ac_max_block, bit_off, &found); if (ret < 0) { @@ -1248,19 +1284,14 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, bits_wanted, chain, (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno); - status = ocfs2_read_block(alloc_inode, - le64_to_cpu(cl->cl_recs[chain].c_blkno), - &group_bh); + status = ocfs2_read_group_descriptor(alloc_inode, fe, + le64_to_cpu(cl->cl_recs[chain].c_blkno), + &group_bh); if (status < 0) { mlog_errno(status); goto bail; } bg = (struct ocfs2_group_desc *) group_bh->b_data; - status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg); - if (status) { - mlog_errno(status); - goto bail; - } status = -ENOSPC; /* for now, the chain search is a bit simplistic. We just use @@ -1278,18 +1309,13 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, next_group = le64_to_cpu(bg->bg_next_group); prev_group_bh = group_bh; group_bh = NULL; - status = ocfs2_read_block(alloc_inode, - next_group, &group_bh); + status = ocfs2_read_group_descriptor(alloc_inode, fe, + next_group, &group_bh); if (status < 0) { mlog_errno(status); goto bail; } bg = (struct ocfs2_group_desc *) group_bh->b_data; - status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg); - if (status) { - mlog_errno(status); - goto bail; - } } if (status < 0) { if (status != -ENOSPC) @@ -1801,18 +1827,14 @@ int ocfs2_free_suballoc_bits(handle_t *handle, (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, count, (unsigned long long)bg_blkno, start_bit); - status = ocfs2_read_block(alloc_inode, bg_blkno, &group_bh); + status = ocfs2_read_group_descriptor(alloc_inode, fe, bg_blkno, + &group_bh); if (status < 0) { mlog_errno(status); goto bail; } - group = (struct ocfs2_group_desc *) group_bh->b_data; - status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, group); - if (status) { - mlog_errno(status); - goto bail; - } + BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits)); status = ocfs2_block_group_clear_bits(handle, alloc_inode, diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index 7adfcc478bdb..43de4fd826d3 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -164,23 +164,24 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac); * and return that block offset. */ u64 ocfs2_which_cluster_group(struct inode *inode, u32 cluster); -/* somewhat more expensive than our other checks, so use sparingly. */ /* * By default, ocfs2_validate_group_descriptor() calls ocfs2_error() when it * finds a problem. A caller that wants to check a group descriptor * without going readonly passes a nonzero clean_error. This is only - * resize, really. + * resize, really. Everyone else should be using + * ocfs2_read_group_descriptor(). */ int ocfs2_validate_group_descriptor(struct super_block *sb, struct ocfs2_dinode *di, - struct ocfs2_group_desc *gd, + struct buffer_head *bh, int clean_error); -static inline int ocfs2_check_group_descriptor(struct super_block *sb, - struct ocfs2_dinode *di, - struct ocfs2_group_desc *gd) -{ - return ocfs2_validate_group_descriptor(sb, di, gd, 0); -} +/* + * Read a group descriptor block into *bh. If *bh is NULL, a bh will be + * allocated. This is a cached read. The descriptor will be validated with + * ocfs2_validate_group_descriptor(). + */ +int ocfs2_read_group_descriptor(struct inode *inode, struct ocfs2_dinode *di, + u64 gd_blkno, struct buffer_head **bh); int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_extent_tree *et, u32 clusters_to_add, u32 extents_to_split, -- cgit From 4203530613280281868b3ca36c817530bca3825c Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 13 Nov 2008 14:49:15 -0800 Subject: ocfs2: Morph the haphazard OCFS2_IS_VALID_GROUP_DESC() checks. Random places in the code would check a group descriptor bh to see if it was valid. The previous commit unified descriptor block reads, validating all block reads in the same place. Thus, these checks are no longer necessary. Rather than eliminate them, however, we change them to BUG_ON() checks. This ensures the assumptions remain true. All of the code paths to these checks have been audited to ensure they come from a validated descriptor read. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/ocfs2.h | 7 ------- fs/ocfs2/suballoc.c | 39 ++++++++++++++------------------------- 2 files changed, 14 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 467bdb6f71e1..82ba887afa0d 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -458,13 +458,6 @@ static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb) #define OCFS2_IS_VALID_GROUP_DESC(ptr) \ (!strcmp((ptr)->bg_signature, OCFS2_GROUP_DESC_SIGNATURE)) -#define OCFS2_RO_ON_INVALID_GROUP_DESC(__sb, __gd) do { \ - typeof(__gd) ____gd = (__gd); \ - ocfs2_error((__sb), \ - "Group Descriptor # %llu has bad signature %.*s", \ - (unsigned long long)le64_to_cpu((____gd)->bg_blkno), 7, \ - (____gd)->bg_signature); \ -} while (0) #define OCFS2_IS_VALID_XATTR_BLOCK(ptr) \ (!strcmp((ptr)->xb_signature, OCFS2_XATTR_BLOCK_SIGNATURE)) diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 797f509d7250..766a00b26441 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -842,10 +842,9 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, int offset, start, found, status = 0; struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; - if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(osb->sb, bg); - return -EIO; - } + /* Callers got this descriptor from + * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ + BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); found = start = best_offset = best_size = 0; bitmap = bg->bg_bitmap; @@ -910,11 +909,9 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle, mlog_entry_void(); - if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); - status = -EIO; - goto bail; - } + /* All callers get the descriptor via + * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ + BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits); mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off, @@ -983,16 +980,10 @@ static int ocfs2_relink_block_group(handle_t *handle, struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data; - if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); - status = -EIO; - goto out; - } - if (!OCFS2_IS_VALID_GROUP_DESC(prev_bg)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, prev_bg); - status = -EIO; - goto out; - } + /* The caller got these descriptors from + * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ + BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); + BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(prev_bg)); mlog(0, "Suballoc %llu, chain %u, move group %llu to top, prev = %llu\n", (unsigned long long)le64_to_cpu(fe->i_blkno), chain, @@ -1055,7 +1046,7 @@ out_rollback: bg->bg_next_group = cpu_to_le64(bg_ptr); prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr); } -out: + mlog_exit(status); return status; } @@ -1758,11 +1749,9 @@ static inline int ocfs2_block_group_clear_bits(handle_t *handle, mlog_entry_void(); - if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); - status = -EIO; - goto bail; - } + /* The caller got this descriptor from + * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ + BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); mlog(0, "off = %u, num = %u\n", bit_off, num_bits); -- cgit From 5e96581a377fc6bd76e9b112da9aeb8a7ae8bf22 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 13 Nov 2008 14:49:16 -0800 Subject: ocfs2: Wrap extent block reads in a dedicated function. We weren't consistently checking extent blocks after we read them. Most places checked the signature, but none checked h_blkno or h_fs_signature. Create a toplevel ocfs2_read_extent_block() that does the read and the validation. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/alloc.c | 151 ++++++++++++++++++++++++++++++++------------------ fs/ocfs2/alloc.h | 8 +++ fs/ocfs2/extent_map.c | 23 ++------ fs/ocfs2/ocfs2.h | 8 --- 4 files changed, 111 insertions(+), 79 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 320545b9fe12..f430cc6e0f35 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -678,6 +678,66 @@ struct ocfs2_merge_ctxt { int c_split_covers_rec; }; +static int ocfs2_validate_extent_block(struct super_block *sb, + struct buffer_head *bh) +{ + struct ocfs2_extent_block *eb = + (struct ocfs2_extent_block *)bh->b_data; + + if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { + ocfs2_error(sb, + "Extent block #%llu has bad signature %.*s", + (unsigned long long)bh->b_blocknr, 7, + eb->h_signature); + return -EINVAL; + } + + if (le64_to_cpu(eb->h_blkno) != bh->b_blocknr) { + ocfs2_error(sb, + "Extent block #%llu has an invalid h_blkno " + "of %llu", + (unsigned long long)bh->b_blocknr, + (unsigned long long)le64_to_cpu(eb->h_blkno)); + return -EINVAL; + } + + if (le32_to_cpu(eb->h_fs_generation) != OCFS2_SB(sb)->fs_generation) { + ocfs2_error(sb, + "Extent block #%llu has an invalid " + "h_fs_generation of #%u", + (unsigned long long)bh->b_blocknr, + le32_to_cpu(eb->h_fs_generation)); + return -EINVAL; + } + + return 0; +} + +int ocfs2_read_extent_block(struct inode *inode, u64 eb_blkno, + struct buffer_head **bh) +{ + int rc; + struct buffer_head *tmp = *bh; + + rc = ocfs2_read_block(inode, eb_blkno, &tmp); + if (rc) + goto out; + + rc = ocfs2_validate_extent_block(inode->i_sb, tmp); + if (rc) { + brelse(tmp); + goto out; + } + + /* If ocfs2_read_block() got us a new bh, pass it up. */ + if (!*bh) + *bh = tmp; + +out: + return rc; +} + + /* * How many free extents have we got before we need more meta data? */ @@ -697,8 +757,7 @@ int ocfs2_num_free_extents(struct ocfs2_super *osb, last_eb_blk = ocfs2_et_get_last_eb_blk(et); if (last_eb_blk) { - retval = ocfs2_read_block(inode, last_eb_blk, - &eb_bh); + retval = ocfs2_read_extent_block(inode, last_eb_blk, &eb_bh); if (retval < 0) { mlog_errno(retval); goto bail; @@ -900,11 +959,8 @@ static int ocfs2_add_branch(struct ocfs2_super *osb, for(i = 0; i < new_blocks; i++) { bh = new_eb_bhs[i]; eb = (struct ocfs2_extent_block *) bh->b_data; - if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { - OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); - status = -EIO; - goto bail; - } + /* ocfs2_create_new_meta_bhs() should create it right! */ + BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); eb_el = &eb->h_list; status = ocfs2_journal_access(handle, inode, bh, @@ -1044,11 +1100,8 @@ static int ocfs2_shift_tree_depth(struct ocfs2_super *osb, } eb = (struct ocfs2_extent_block *) new_eb_bh->b_data; - if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { - OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); - status = -EIO; - goto bail; - } + /* ocfs2_create_new_meta_bhs() should create it right! */ + BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); eb_el = &eb->h_list; root_el = et->et_root_el; @@ -1168,18 +1221,13 @@ static int ocfs2_find_branch_target(struct ocfs2_super *osb, brelse(bh); bh = NULL; - status = ocfs2_read_block(inode, blkno, &bh); + status = ocfs2_read_extent_block(inode, blkno, &bh); if (status < 0) { mlog_errno(status); goto bail; } eb = (struct ocfs2_extent_block *) bh->b_data; - if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { - OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); - status = -EIO; - goto bail; - } el = &eb->h_list; if (le16_to_cpu(el->l_next_free_rec) < @@ -1532,7 +1580,7 @@ static int __ocfs2_find_path(struct inode *inode, brelse(bh); bh = NULL; - ret = ocfs2_read_block(inode, blkno, &bh); + ret = ocfs2_read_extent_block(inode, blkno, &bh); if (ret) { mlog_errno(ret); goto out; @@ -1540,11 +1588,6 @@ static int __ocfs2_find_path(struct inode *inode, eb = (struct ocfs2_extent_block *) bh->b_data; el = &eb->h_list; - if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { - OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); - ret = -EIO; - goto out; - } if (le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count)) { @@ -4089,8 +4132,15 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, le16_to_cpu(new_el->l_count)) { bh = path_leaf_bh(left_path); eb = (struct ocfs2_extent_block *)bh->b_data; - OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, - eb); + ocfs2_error(inode->i_sb, + "Extent block #%llu has an " + "invalid l_next_free_rec of " + "%d. It should have " + "matched the l_count of %d", + (unsigned long long)le64_to_cpu(eb->h_blkno), + le16_to_cpu(new_el->l_next_free_rec), + le16_to_cpu(new_el->l_count)); + status = -EINVAL; goto out; } rec = &new_el->l_recs[ @@ -4139,8 +4189,12 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, if (le16_to_cpu(new_el->l_next_free_rec) <= 1) { bh = path_leaf_bh(right_path); eb = (struct ocfs2_extent_block *)bh->b_data; - OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, - eb); + ocfs2_error(inode->i_sb, + "Extent block #%llu has an " + "invalid l_next_free_rec of %d", + (unsigned long long)le64_to_cpu(eb->h_blkno), + le16_to_cpu(new_el->l_next_free_rec)); + status = -EINVAL; goto out; } rec = &new_el->l_recs[1]; @@ -4286,7 +4340,9 @@ static int ocfs2_figure_insert_type(struct inode *inode, * ocfs2_figure_insert_type() and ocfs2_add_branch() * may want it later. */ - ret = ocfs2_read_block(inode, ocfs2_et_get_last_eb_blk(et), &bh); + ret = ocfs2_read_extent_block(inode, + ocfs2_et_get_last_eb_blk(et), + &bh); if (ret) { mlog_exit(ret); goto out; @@ -4752,20 +4808,15 @@ static int __ocfs2_mark_extent_written(struct inode *inode, if (path->p_tree_depth) { struct ocfs2_extent_block *eb; - ret = ocfs2_read_block(inode, ocfs2_et_get_last_eb_blk(et), - &last_eb_bh); + ret = ocfs2_read_extent_block(inode, + ocfs2_et_get_last_eb_blk(et), + &last_eb_bh); if (ret) { mlog_exit(ret); goto out; } eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; - if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { - OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); - ret = -EROFS; - goto out; - } - rightmost_el = &eb->h_list; } else rightmost_el = path_root_el(path); @@ -4910,8 +4961,9 @@ static int ocfs2_split_tree(struct inode *inode, struct ocfs2_extent_tree *et, depth = path->p_tree_depth; if (depth > 0) { - ret = ocfs2_read_block(inode, ocfs2_et_get_last_eb_blk(et), - &last_eb_bh); + ret = ocfs2_read_extent_block(inode, + ocfs2_et_get_last_eb_blk(et), + &last_eb_bh); if (ret < 0) { mlog_errno(ret); goto out; @@ -6231,11 +6283,10 @@ static int ocfs2_find_new_last_ext_blk(struct inode *inode, eb = (struct ocfs2_extent_block *) bh->b_data; el = &eb->h_list; - if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { - OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); - ret = -EROFS; - goto out; - } + + /* ocfs2_find_leaf() gets the eb from ocfs2_read_extent_block(). + * Any corruption is a code bug. */ + BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); *new_last_eb = bh; get_bh(*new_last_eb); @@ -7140,20 +7191,14 @@ int ocfs2_prepare_truncate(struct ocfs2_super *osb, ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc); if (fe->id2.i_list.l_tree_depth) { - status = ocfs2_read_block(inode, le64_to_cpu(fe->i_last_eb_blk), - &last_eb_bh); + status = ocfs2_read_extent_block(inode, + le64_to_cpu(fe->i_last_eb_blk), + &last_eb_bh); if (status < 0) { mlog_errno(status); goto bail; } eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; - if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { - OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); - - brelse(last_eb_bh); - status = -EIO; - goto bail; - } } (*tc)->tc_last_eb_bh = last_eb_bh; diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 0fbf8fc55a49..59d37d1b7d4c 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h @@ -73,6 +73,14 @@ void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et, struct buffer_head *bh, struct ocfs2_xattr_value_root *xv); +/* + * Read an extent block into *bh. If *bh is NULL, a bh will be + * allocated. This is a cached read. The extent block will be validated + * with ocfs2_validate_extent_block(). + */ +int ocfs2_read_extent_block(struct inode *inode, u64 eb_blkno, + struct buffer_head **bh); + struct ocfs2_alloc_context; int ocfs2_insert_extent(struct ocfs2_super *osb, handle_t *handle, diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index b686b31cf49c..0bd9d9698a24 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -293,7 +293,7 @@ static int ocfs2_last_eb_is_empty(struct inode *inode, struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; - ret = ocfs2_read_block(inode, last_eb_blk, &eb_bh); + ret = ocfs2_read_extent_block(inode, last_eb_blk, &eb_bh); if (ret) { mlog_errno(ret); goto out; @@ -302,12 +302,6 @@ static int ocfs2_last_eb_is_empty(struct inode *inode, eb = (struct ocfs2_extent_block *) eb_bh->b_data; el = &eb->h_list; - if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { - ret = -EROFS; - OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); - goto out; - } - if (el->l_tree_depth) { ocfs2_error(inode->i_sb, "Inode %lu has non zero tree depth in " @@ -381,23 +375,16 @@ static int ocfs2_figure_hole_clusters(struct inode *inode, if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL) goto no_more_extents; - ret = ocfs2_read_block(inode, - le64_to_cpu(eb->h_next_leaf_blk), - &next_eb_bh); + ret = ocfs2_read_extent_block(inode, + le64_to_cpu(eb->h_next_leaf_blk), + &next_eb_bh); if (ret) { mlog_errno(ret); goto out; } - next_eb = (struct ocfs2_extent_block *)next_eb_bh->b_data; - - if (!OCFS2_IS_VALID_EXTENT_BLOCK(next_eb)) { - ret = -EROFS; - OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, next_eb); - goto out; - } + next_eb = (struct ocfs2_extent_block *)next_eb_bh->b_data; el = &next_eb->h_list; - i = ocfs2_search_for_hole_index(el, v_cluster); } diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 82ba887afa0d..f04b229fc757 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -447,14 +447,6 @@ static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb) #define OCFS2_IS_VALID_EXTENT_BLOCK(ptr) \ (!strcmp((ptr)->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE)) -#define OCFS2_RO_ON_INVALID_EXTENT_BLOCK(__sb, __eb) do { \ - typeof(__eb) ____eb = (__eb); \ - ocfs2_error((__sb), \ - "Extent Block # %llu has bad signature %.*s", \ - (unsigned long long)le64_to_cpu((____eb)->h_blkno), 7, \ - (____eb)->h_signature); \ -} while (0) - #define OCFS2_IS_VALID_GROUP_DESC(ptr) \ (!strcmp((ptr)->bg_signature, OCFS2_GROUP_DESC_SIGNATURE)) -- cgit From a22305cc693254a2aa651e797875669112ef8635 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 13 Nov 2008 14:49:17 -0800 Subject: ocfs2: Wrap dirblock reads in a dedicated function. We have ocfs2_bread() as a vestige of the original ext-based dir code. It's only used by directories, though. Turn it into ocfs2_read_dir_block(), with a prototype matching the other metadata read functions. It's set up to validate dirblocks when the time comes. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/dir.c | 150 +++++++++++++++++++++++++++++++++------------------------ 1 file changed, 88 insertions(+), 62 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 5777045f1a67..c2f3fd93be5c 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -82,49 +82,6 @@ static int ocfs2_do_extend_dir(struct super_block *sb, struct ocfs2_alloc_context *meta_ac, struct buffer_head **new_bh); -static struct buffer_head *ocfs2_bread(struct inode *inode, - int block, int *err, int reada) -{ - struct buffer_head *bh = NULL; - int tmperr; - u64 p_blkno; - int readflags = 0; - - if (reada) - readflags |= OCFS2_BH_READAHEAD; - - if (((u64)block << inode->i_sb->s_blocksize_bits) >= - i_size_read(inode)) { - BUG_ON(!reada); - return NULL; - } - - down_read(&OCFS2_I(inode)->ip_alloc_sem); - tmperr = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, - NULL); - up_read(&OCFS2_I(inode)->ip_alloc_sem); - if (tmperr < 0) { - mlog_errno(tmperr); - goto fail; - } - - tmperr = ocfs2_read_blocks(inode, p_blkno, 1, &bh, readflags); - if (tmperr < 0) - goto fail; - - tmperr = 0; - - *err = 0; - return bh; - -fail: - brelse(bh); - bh = NULL; - - *err = -EIO; - return NULL; -} - /* * bh passed here can be an inode block or a dir data block, depending * on the inode inline data flag. @@ -250,6 +207,76 @@ out: return NULL; } +static int ocfs2_validate_dir_block(struct super_block *sb, + struct buffer_head *bh) +{ + /* + * Nothing yet. We don't validate dirents here, that's handled + * in-place when the code walks them. + */ + + return 0; +} + +/* + * This function forces all errors to -EIO for consistency with its + * predecessor, ocfs2_bread(). We haven't audited what returning the + * real error codes would do to callers. We log the real codes with + * mlog_errno() before we squash them. + */ +static int ocfs2_read_dir_block(struct inode *inode, u64 v_block, + struct buffer_head **bh, int flags) +{ + int rc = 0; + struct buffer_head *tmp = *bh; + u64 p_blkno; + + if (((u64)v_block << inode->i_sb->s_blocksize_bits) >= + i_size_read(inode)) { + BUG_ON(!(flags & OCFS2_BH_READAHEAD)); + goto out; + } + + down_read(&OCFS2_I(inode)->ip_alloc_sem); + rc = ocfs2_extent_map_get_blocks(inode, v_block, &p_blkno, NULL, + NULL); + up_read(&OCFS2_I(inode)->ip_alloc_sem); + if (rc) { + mlog_errno(rc); + goto out; + } + + if (!p_blkno) { + rc = -EIO; + mlog(ML_ERROR, + "Directory #%llu contains a hole at offset %llu\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)v_block << inode->i_sb->s_blocksize_bits); + goto out; + } + + rc = ocfs2_read_blocks(inode, p_blkno, 1, &tmp, flags); + if (rc) { + mlog_errno(rc); + goto out; + } + + if (!(flags & OCFS2_BH_READAHEAD)) { + rc = ocfs2_validate_dir_block(inode->i_sb, tmp); + if (rc) { + brelse(tmp); + goto out; + } + } + + /* If ocfs2_read_blocks() got us a new bh, pass it up. */ + if (!*bh) + *bh = tmp; + +out: + return rc ? -EIO : 0; +} + static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen, struct inode *dir, struct ocfs2_dir_entry **res_dir) @@ -296,15 +323,17 @@ restart: } num++; - bh = ocfs2_bread(dir, b++, &err, 1); + bh = NULL; + err = ocfs2_read_dir_block(dir, b++, &bh, + OCFS2_BH_READAHEAD); bh_use[ra_max] = bh; } } if ((bh = bh_use[ra_ptr++]) == NULL) goto next; - if (ocfs2_read_block(dir, block, &bh)) { + if (ocfs2_read_dir_block(dir, block, &bh, 0)) { /* read error, skip block & hope for the best. - * ocfs2_read_block() has released the bh. */ + * ocfs2_read_dir_block() has released the bh. */ ocfs2_error(dir->i_sb, "reading directory %llu, " "offset %lu\n", (unsigned long long)OCFS2_I(dir)->ip_blkno, @@ -724,7 +753,6 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode, int i, stored; struct buffer_head * bh, * tmp; struct ocfs2_dir_entry * de; - int err; struct super_block * sb = inode->i_sb; unsigned int ra_sectors = 16; @@ -735,12 +763,8 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode, while (!error && !stored && *f_pos < i_size_read(inode)) { blk = (*f_pos) >> sb->s_blocksize_bits; - bh = ocfs2_bread(inode, blk, &err, 0); - if (!bh) { - mlog(ML_ERROR, - "directory #%llu contains a hole at offset %lld\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno, - *f_pos); + if (ocfs2_read_dir_block(inode, blk, &bh, 0)) { + /* Skip the corrupt dirblock and keep trying */ *f_pos += sb->s_blocksize - offset; continue; } @@ -754,8 +778,10 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode, || (((last_ra_blk - blk) << 9) <= (ra_sectors / 2))) { for (i = ra_sectors >> (sb->s_blocksize_bits - 9); i > 0; i--) { - tmp = ocfs2_bread(inode, ++blk, &err, 1); - brelse(tmp); + tmp = NULL; + if (!ocfs2_read_dir_block(inode, ++blk, &tmp, + OCFS2_BH_READAHEAD)) + brelse(tmp); } last_ra_blk = blk; ra_sectors = 8; @@ -828,6 +854,7 @@ revalidate: } offset = 0; brelse(bh); + bh = NULL; } stored = 0; @@ -1680,8 +1707,8 @@ static int ocfs2_find_dir_space_el(struct inode *dir, const char *name, struct super_block *sb = dir->i_sb; int status; - bh = ocfs2_bread(dir, 0, &status, 0); - if (!bh) { + status = ocfs2_read_dir_block(dir, 0, &bh, 0); + if (status) { mlog_errno(status); goto bail; } @@ -1702,11 +1729,10 @@ static int ocfs2_find_dir_space_el(struct inode *dir, const char *name, status = -ENOSPC; goto bail; } - bh = ocfs2_bread(dir, - offset >> sb->s_blocksize_bits, - &status, - 0); - if (!bh) { + status = ocfs2_read_dir_block(dir, + offset >> sb->s_blocksize_bits, + &bh, 0); + if (status) { mlog_errno(status); goto bail; } -- cgit From 4ae1d69bedc8d174cb8a558694607e013157cde1 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 13 Nov 2008 14:49:18 -0800 Subject: ocfs2: Wrap xattr block reads in a dedicated function We weren't consistently checking xattr blocks after we read them. Most places checked the signature, but none checked xb_blkno or xb_fs_signature. Create a toplevel ocfs2_read_xattr_block() that does the read and the validation. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 94 +++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 70 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 3cc8385f9738..ef4aa5482d01 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -314,6 +314,65 @@ static void ocfs2_xattr_bucket_copy_data(struct ocfs2_xattr_bucket *dest, } } +static int ocfs2_validate_xattr_block(struct super_block *sb, + struct buffer_head *bh) +{ + struct ocfs2_xattr_block *xb = + (struct ocfs2_xattr_block *)bh->b_data; + + mlog(0, "Validating xattr block %llu\n", + (unsigned long long)bh->b_blocknr); + + if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) { + ocfs2_error(sb, + "Extended attribute block #%llu has bad " + "signature %.*s", + (unsigned long long)bh->b_blocknr, 7, + xb->xb_signature); + return -EINVAL; + } + + if (le64_to_cpu(xb->xb_blkno) != bh->b_blocknr) { + ocfs2_error(sb, + "Extended attribute block #%llu has an " + "invalid xb_blkno of %llu", + (unsigned long long)bh->b_blocknr, + (unsigned long long)le64_to_cpu(xb->xb_blkno)); + return -EINVAL; + } + + if (le32_to_cpu(xb->xb_fs_generation) != OCFS2_SB(sb)->fs_generation) { + ocfs2_error(sb, + "Extended attribute block #%llu has an invalid " + "xb_fs_generation of #%u", + (unsigned long long)bh->b_blocknr, + le32_to_cpu(xb->xb_fs_generation)); + return -EINVAL; + } + + return 0; +} + +static int ocfs2_read_xattr_block(struct inode *inode, u64 xb_blkno, + struct buffer_head **bh) +{ + int rc; + struct buffer_head *tmp = *bh; + + rc = ocfs2_read_block(inode, xb_blkno, &tmp); + if (!rc) { + rc = ocfs2_validate_xattr_block(inode->i_sb, tmp); + if (rc) + brelse(tmp); + } + + /* If ocfs2_read_block() got us a new bh, pass it up. */ + if (!rc && !*bh) + *bh = tmp; + + return rc; +} + static inline const char *ocfs2_xattr_prefix(int name_index) { struct xattr_handler *handler = NULL; @@ -739,18 +798,14 @@ static int ocfs2_xattr_block_list(struct inode *inode, if (!di->i_xattr_loc) return ret; - ret = ocfs2_read_block(inode, le64_to_cpu(di->i_xattr_loc), &blk_bh); + ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc), + &blk_bh); if (ret < 0) { mlog_errno(ret); return ret; } xb = (struct ocfs2_xattr_block *)blk_bh->b_data; - if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) { - ret = -EIO; - goto cleanup; - } - if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) { struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header; ret = ocfs2_xattr_list_entries(inode, header, @@ -760,7 +815,7 @@ static int ocfs2_xattr_block_list(struct inode *inode, ret = ocfs2_xattr_tree_list_index_block(inode, xt, buffer, buffer_size); } -cleanup: + brelse(blk_bh); return ret; @@ -1693,24 +1748,19 @@ static int ocfs2_xattr_free_block(struct inode *inode, u64 blk, bg_blkno; u16 bit; - ret = ocfs2_read_block(inode, block, &blk_bh); + ret = ocfs2_read_xattr_block(inode, block, &blk_bh); if (ret < 0) { mlog_errno(ret); goto out; } - xb = (struct ocfs2_xattr_block *)blk_bh->b_data; - if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) { - ret = -EIO; - goto out; - } - ret = ocfs2_xattr_block_remove(inode, blk_bh); if (ret < 0) { mlog_errno(ret); goto out; } + xb = (struct ocfs2_xattr_block *)blk_bh->b_data; blk = le64_to_cpu(xb->xb_blkno); bit = le16_to_cpu(xb->xb_suballoc_bit); bg_blkno = ocfs2_which_suballoc_group(blk, bit); @@ -1950,19 +2000,15 @@ static int ocfs2_xattr_block_find(struct inode *inode, if (!di->i_xattr_loc) return ret; - ret = ocfs2_read_block(inode, le64_to_cpu(di->i_xattr_loc), &blk_bh); + ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc), + &blk_bh); if (ret < 0) { mlog_errno(ret); return ret; } - xb = (struct ocfs2_xattr_block *)blk_bh->b_data; - if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) { - ret = -EIO; - goto cleanup; - } - xs->xattr_bh = blk_bh; + xb = (struct ocfs2_xattr_block *)blk_bh->b_data; if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) { xs->header = &xb->xb_attrs.xb_header; @@ -2259,9 +2305,9 @@ meta_guess: /* calculate metadata allocation. */ if (di->i_xattr_loc) { if (!xbs->xattr_bh) { - ret = ocfs2_read_block(inode, - le64_to_cpu(di->i_xattr_loc), - &bh); + ret = ocfs2_read_xattr_block(inode, + le64_to_cpu(di->i_xattr_loc), + &bh); if (ret) { mlog_errno(ret); goto out; -- cgit From 970e4936d7d15f35d00fd15a14f5343ba78b2fc8 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 13 Nov 2008 14:49:19 -0800 Subject: ocfs2: Validate metadata only when it's read from disk. Add an optional validation hook to ocfs2_read_blocks(). Now the validation function is only called when a block was actually read off of disk. It is not called when the buffer was in cache. We add a buffer state bit BH_NeedsValidate to flag these buffers. It must always be one higher than the last JBD2 buffer state bit. The dinode, dirblock, extent_block, and xattr_block validators are lifted to this scheme directly. The group_descriptor validator needs to be split into two pieces. The first part only needs the gd buffer and is passed to ocfs2_read_block(). The second part requires the dinode as well, and is called every time. It's only 3 compares, so it's tiny. This also allows us to clean up the non-fatal gd check used by resize.c. It now has no magic argument. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/alloc.c | 17 ++++----- fs/ocfs2/buffer_head_io.c | 33 ++++++++++++++++- fs/ocfs2/buffer_head_io.h | 27 ++++++++------ fs/ocfs2/dir.c | 13 +++---- fs/ocfs2/inode.c | 18 +++------- fs/ocfs2/resize.c | 2 +- fs/ocfs2/slot_map.c | 4 +-- fs/ocfs2/suballoc.c | 91 +++++++++++++++++++++++++++++++++-------------- fs/ocfs2/suballoc.h | 15 ++++---- fs/ocfs2/xattr.c | 26 +++++++------- 10 files changed, 149 insertions(+), 97 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index f430cc6e0f35..e823a27ba340 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -684,6 +684,9 @@ static int ocfs2_validate_extent_block(struct super_block *sb, struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)bh->b_data; + mlog(0, "Validating extent block %llu\n", + (unsigned long long)bh->b_blocknr); + if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { ocfs2_error(sb, "Extent block #%llu has bad signature %.*s", @@ -719,21 +722,13 @@ int ocfs2_read_extent_block(struct inode *inode, u64 eb_blkno, int rc; struct buffer_head *tmp = *bh; - rc = ocfs2_read_block(inode, eb_blkno, &tmp); - if (rc) - goto out; - - rc = ocfs2_validate_extent_block(inode->i_sb, tmp); - if (rc) { - brelse(tmp); - goto out; - } + rc = ocfs2_read_block(inode, eb_blkno, &tmp, + ocfs2_validate_extent_block); /* If ocfs2_read_block() got us a new bh, pass it up. */ - if (!*bh) + if (!rc && !*bh) *bh = tmp; -out: return rc; } diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 3a178ec48d7c..0e9eed0c223f 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -39,6 +39,19 @@ #include "buffer_head_io.h" +/* + * Bits on bh->b_state used by ocfs2. + * + * These MUST be after the JBD2 bits. Currently BH_Unshadow is the last + * JBD2 bit. + */ +enum ocfs2_state_bits { + BH_NeedsValidate = BH_Unshadow + 1, +}; + +/* Expand the magic b_state functions */ +BUFFER_FNS(NeedsValidate, needs_validate); + int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, struct inode *inode) { @@ -166,7 +179,9 @@ bail: } int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, - struct buffer_head *bhs[], int flags) + struct buffer_head *bhs[], int flags, + int (*validate)(struct super_block *sb, + struct buffer_head *bh)) { int status = 0; int i, ignore_cache = 0; @@ -298,6 +313,8 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, clear_buffer_uptodate(bh); get_bh(bh); /* for end_buffer_read_sync() */ + if (validate) + set_buffer_needs_validate(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(READ, bh); continue; @@ -328,6 +345,20 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, bhs[i] = NULL; continue; } + + if (buffer_needs_validate(bh)) { + /* We never set NeedsValidate if the + * buffer was held by the journal, so + * that better not have changed */ + BUG_ON(buffer_jbd(bh)); + clear_buffer_needs_validate(bh); + status = validate(inode->i_sb, bh); + if (status) { + put_bh(bh); + bhs[i] = NULL; + continue; + } + } } /* Always set the buffer in the cache, even if it was diff --git a/fs/ocfs2/buffer_head_io.h b/fs/ocfs2/buffer_head_io.h index 75e1dcb1ade7..c75d682dadd8 100644 --- a/fs/ocfs2/buffer_head_io.h +++ b/fs/ocfs2/buffer_head_io.h @@ -31,21 +31,24 @@ void ocfs2_end_buffer_io_sync(struct buffer_head *bh, int uptodate); -static inline int ocfs2_read_block(struct inode *inode, - u64 off, - struct buffer_head **bh); - int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, struct inode *inode); -int ocfs2_read_blocks(struct inode *inode, - u64 block, - int nr, - struct buffer_head *bhs[], - int flags); int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, unsigned int nr, struct buffer_head *bhs[]); +/* + * If not NULL, validate() will be called on a buffer that is freshly + * read from disk. It will not be called if the buffer was in cache. + * Note that if validate() is being used for this buffer, it needs to + * be set even for a READAHEAD call, as it marks the buffer for later + * validation. + */ +int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, + struct buffer_head *bhs[], int flags, + int (*validate)(struct super_block *sb, + struct buffer_head *bh)); + int ocfs2_write_super_or_backup(struct ocfs2_super *osb, struct buffer_head *bh); @@ -53,7 +56,9 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb, #define OCFS2_BH_READAHEAD 8 static inline int ocfs2_read_block(struct inode *inode, u64 off, - struct buffer_head **bh) + struct buffer_head **bh, + int (*validate)(struct super_block *sb, + struct buffer_head *bh)) { int status = 0; @@ -63,7 +68,7 @@ static inline int ocfs2_read_block(struct inode *inode, u64 off, goto bail; } - status = ocfs2_read_blocks(inode, off, 1, bh, 0); + status = ocfs2_read_blocks(inode, off, 1, bh, 0, validate); bail: return status; diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index c2f3fd93be5c..7e863d40380d 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -214,6 +214,8 @@ static int ocfs2_validate_dir_block(struct super_block *sb, * Nothing yet. We don't validate dirents here, that's handled * in-place when the code walks them. */ + mlog(0, "Validating dirblock %llu\n", + (unsigned long long)bh->b_blocknr); return 0; } @@ -255,20 +257,13 @@ static int ocfs2_read_dir_block(struct inode *inode, u64 v_block, goto out; } - rc = ocfs2_read_blocks(inode, p_blkno, 1, &tmp, flags); + rc = ocfs2_read_blocks(inode, p_blkno, 1, &tmp, flags, + ocfs2_validate_dir_block); if (rc) { mlog_errno(rc); goto out; } - if (!(flags & OCFS2_BH_READAHEAD)) { - rc = ocfs2_validate_dir_block(inode->i_sb, tmp); - if (rc) { - brelse(tmp); - goto out; - } - } - /* If ocfs2_read_blocks() got us a new bh, pass it up. */ if (!*bh) *bh = tmp; diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 9eb701b86466..ec3497bafda6 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -1255,6 +1255,9 @@ int ocfs2_validate_inode_block(struct super_block *sb, int rc = -EINVAL; struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; + mlog(0, "Validating dinode %llu\n", + (unsigned long long)bh->b_blocknr); + BUG_ON(!buffer_uptodate(bh)); if (!OCFS2_IS_VALID_DINODE(di)) { @@ -1300,23 +1303,12 @@ int ocfs2_read_inode_block_full(struct inode *inode, struct buffer_head **bh, struct buffer_head *tmp = *bh; rc = ocfs2_read_blocks(inode, OCFS2_I(inode)->ip_blkno, 1, &tmp, - flags); - if (rc) - goto out; - - if (!(flags & OCFS2_BH_READAHEAD)) { - rc = ocfs2_validate_inode_block(inode->i_sb, tmp); - if (rc) { - brelse(tmp); - goto out; - } - } + flags, ocfs2_validate_inode_block); /* If ocfs2_read_blocks() got us a new bh, pass it up. */ - if (!*bh) + if (!rc && !*bh) *bh = tmp; -out: return rc; } diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c index 252baff5eb84..867de3ebfcaf 100644 --- a/fs/ocfs2/resize.c +++ b/fs/ocfs2/resize.c @@ -394,7 +394,7 @@ static int ocfs2_check_new_group(struct inode *inode, (struct ocfs2_group_desc *)group_bh->b_data; u16 cl_bpc = le16_to_cpu(di->id2.i_chain.cl_bpc); - ret = ocfs2_validate_group_descriptor(inode->i_sb, di, group_bh, 1); + ret = ocfs2_check_group_descriptor(inode->i_sb, di, group_bh); if (ret) goto out; diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c index bdda2d8f8508..40661e7824e9 100644 --- a/fs/ocfs2/slot_map.c +++ b/fs/ocfs2/slot_map.c @@ -151,7 +151,7 @@ int ocfs2_refresh_slot_info(struct ocfs2_super *osb) * this is not true, the read of -1 (UINT64_MAX) will fail. */ ret = ocfs2_read_blocks(si->si_inode, -1, si->si_blocks, si->si_bh, - OCFS2_BH_IGNORE_CACHE); + OCFS2_BH_IGNORE_CACHE, NULL); if (ret == 0) { spin_lock(&osb->osb_lock); ocfs2_update_slot_info(si); @@ -405,7 +405,7 @@ static int ocfs2_map_slot_buffers(struct ocfs2_super *osb, bh = NULL; /* Acquire a fresh bh */ status = ocfs2_read_blocks(si->si_inode, blkno, 1, &bh, - OCFS2_BH_IGNORE_CACHE); + OCFS2_BH_IGNORE_CACHE, NULL); if (status < 0) { mlog_errno(status); goto bail; diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 766a00b26441..226fe21f2608 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -145,14 +145,6 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl) return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc); } -int ocfs2_validate_group_descriptor(struct super_block *sb, - struct ocfs2_dinode *di, - struct buffer_head *bh, - int clean_error) -{ - unsigned int max_bits; - struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; - #define do_error(fmt, ...) \ do{ \ if (clean_error) \ @@ -161,6 +153,12 @@ int ocfs2_validate_group_descriptor(struct super_block *sb, ocfs2_error(sb, fmt, ##__VA_ARGS__); \ } while (0) +static int ocfs2_validate_gd_self(struct super_block *sb, + struct buffer_head *bh, + int clean_error) +{ + struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; + if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { do_error("Group descriptor #%llu has bad signature %.*s", (unsigned long long)bh->b_blocknr, 7, @@ -184,6 +182,35 @@ int ocfs2_validate_group_descriptor(struct super_block *sb, return -EINVAL; } + if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) { + do_error("Group descriptor #%llu has bit count %u but " + "claims that %u are free", + (unsigned long long)bh->b_blocknr, + le16_to_cpu(gd->bg_bits), + le16_to_cpu(gd->bg_free_bits_count)); + return -EINVAL; + } + + if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) { + do_error("Group descriptor #%llu has bit count %u but " + "max bitmap bits of %u", + (unsigned long long)bh->b_blocknr, + le16_to_cpu(gd->bg_bits), + 8 * le16_to_cpu(gd->bg_size)); + return -EINVAL; + } + + return 0; +} + +static int ocfs2_validate_gd_parent(struct super_block *sb, + struct ocfs2_dinode *di, + struct buffer_head *bh, + int clean_error) +{ + unsigned int max_bits; + struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; + if (di->i_blkno != gd->bg_parent_dinode) { do_error("Group descriptor #%llu has bad parent " "pointer (%llu, expected %llu)", @@ -209,26 +236,35 @@ int ocfs2_validate_group_descriptor(struct super_block *sb, return -EINVAL; } - if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) { - do_error("Group descriptor #%llu has bit count %u but " - "claims that %u are free", - (unsigned long long)bh->b_blocknr, - le16_to_cpu(gd->bg_bits), - le16_to_cpu(gd->bg_free_bits_count)); - return -EINVAL; - } + return 0; +} - if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) { - do_error("Group descriptor #%llu has bit count %u but " - "max bitmap bits of %u", - (unsigned long long)bh->b_blocknr, - le16_to_cpu(gd->bg_bits), - 8 * le16_to_cpu(gd->bg_size)); - return -EINVAL; - } #undef do_error - return 0; +/* + * This version only prints errors. It does not fail the filesystem, and + * exists only for resize. + */ +int ocfs2_check_group_descriptor(struct super_block *sb, + struct ocfs2_dinode *di, + struct buffer_head *bh) +{ + int rc; + + rc = ocfs2_validate_gd_self(sb, bh, 1); + if (!rc) + rc = ocfs2_validate_gd_parent(sb, di, bh, 1); + + return rc; +} + +static int ocfs2_validate_group_descriptor(struct super_block *sb, + struct buffer_head *bh) +{ + mlog(0, "Validating group descriptor %llu\n", + (unsigned long long)bh->b_blocknr); + + return ocfs2_validate_gd_self(sb, bh, 0); } int ocfs2_read_group_descriptor(struct inode *inode, struct ocfs2_dinode *di, @@ -237,11 +273,12 @@ int ocfs2_read_group_descriptor(struct inode *inode, struct ocfs2_dinode *di, int rc; struct buffer_head *tmp = *bh; - rc = ocfs2_read_block(inode, gd_blkno, &tmp); + rc = ocfs2_read_block(inode, gd_blkno, &tmp, + ocfs2_validate_group_descriptor); if (rc) goto out; - rc = ocfs2_validate_group_descriptor(inode->i_sb, di, tmp, 0); + rc = ocfs2_validate_gd_parent(inode->i_sb, di, tmp, 0); if (rc) { brelse(tmp); goto out; diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index 43de4fd826d3..e3c13c77f9e8 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -165,16 +165,15 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac); u64 ocfs2_which_cluster_group(struct inode *inode, u32 cluster); /* - * By default, ocfs2_validate_group_descriptor() calls ocfs2_error() when it + * By default, ocfs2_read_group_descriptor() calls ocfs2_error() when it * finds a problem. A caller that wants to check a group descriptor - * without going readonly passes a nonzero clean_error. This is only - * resize, really. Everyone else should be using - * ocfs2_read_group_descriptor(). + * without going readonly should read the block with ocfs2_read_block[s]() + * and then checking it with this function. This is only resize, really. + * Everyone else should be using ocfs2_read_group_descriptor(). */ -int ocfs2_validate_group_descriptor(struct super_block *sb, - struct ocfs2_dinode *di, - struct buffer_head *bh, - int clean_error); +int ocfs2_check_group_descriptor(struct super_block *sb, + struct ocfs2_dinode *di, + struct buffer_head *bh); /* * Read a group descriptor block into *bh. If *bh is NULL, a bh will be * allocated. This is a cached read. The descriptor will be validated with diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index ef4aa5482d01..8af29b3bd6de 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -266,7 +266,8 @@ static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket, int rc; rc = ocfs2_read_blocks(bucket->bu_inode, xb_blkno, - bucket->bu_blocks, bucket->bu_bhs, 0); + bucket->bu_blocks, bucket->bu_bhs, 0, + NULL); if (rc) ocfs2_xattr_bucket_relse(bucket); return rc; @@ -359,12 +360,8 @@ static int ocfs2_read_xattr_block(struct inode *inode, u64 xb_blkno, int rc; struct buffer_head *tmp = *bh; - rc = ocfs2_read_block(inode, xb_blkno, &tmp); - if (!rc) { - rc = ocfs2_validate_xattr_block(inode->i_sb, tmp); - if (rc) - brelse(tmp); - } + rc = ocfs2_read_block(inode, xb_blkno, &tmp, + ocfs2_validate_xattr_block); /* If ocfs2_read_block() got us a new bh, pass it up. */ if (!rc && !*bh) @@ -925,7 +922,7 @@ static int ocfs2_xattr_get_value_outside(struct inode *inode, blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster); /* Copy ocfs2_xattr_value */ for (i = 0; i < num_clusters * bpc; i++, blkno++) { - ret = ocfs2_read_block(inode, blkno, &bh); + ret = ocfs2_read_block(inode, blkno, &bh, NULL); if (ret) { mlog_errno(ret); goto out; @@ -1174,7 +1171,7 @@ static int __ocfs2_xattr_set_value_outside(struct inode *inode, blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster); for (i = 0; i < num_clusters * bpc; i++, blkno++) { - ret = ocfs2_read_block(inode, blkno, &bh); + ret = ocfs2_read_block(inode, blkno, &bh, NULL); if (ret) { mlog_errno(ret); goto out; @@ -2206,7 +2203,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, base = xis->base; credits += OCFS2_INODE_UPDATE_CREDITS; } else { - int i, block_off; + int i, block_off = 0; xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data; xe = xbs->here; name_offset = le16_to_cpu(xe->xe_name_offset); @@ -2840,6 +2837,7 @@ static int ocfs2_find_xe_in_bucket(struct inode *inode, break; } + xe_name = bucket_block(bucket, block_off) + new_offset; if (!memcmp(name, xe_name, name_len)) { *xe_index = i; @@ -3598,7 +3596,7 @@ static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode, goto out; } - ret = ocfs2_read_block(inode, prev_blkno, &old_bh); + ret = ocfs2_read_block(inode, prev_blkno, &old_bh, NULL); if (ret < 0) { mlog_errno(ret); brelse(new_bh); @@ -3990,7 +3988,7 @@ static int ocfs2_cp_xattr_cluster(struct inode *inode, ocfs2_journal_dirty(handle, first_bh); /* update the new bucket header. */ - ret = ocfs2_read_block(inode, to_blk_start, &bh); + ret = ocfs2_read_block(inode, to_blk_start, &bh, NULL); if (ret < 0) { mlog_errno(ret); goto out; @@ -4337,7 +4335,7 @@ static int ocfs2_add_new_xattr_bucket(struct inode *inode, goto out; } - ret = ocfs2_read_block(inode, p_blkno, &first_bh); + ret = ocfs2_read_block(inode, p_blkno, &first_bh, NULL); if (ret) { mlog_errno(ret); goto out; @@ -4635,7 +4633,7 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, BUG_ON(value_blk != (offset + OCFS2_XATTR_ROOT_SIZE - 1) / blocksize); value_blk += header_bh->b_blocknr; - ret = ocfs2_read_block(inode, value_blk, &value_bh); + ret = ocfs2_read_block(inode, value_blk, &value_bh, NULL); if (ret) { mlog_errno(ret); goto out; -- cgit From a8549fb5abb2b372e46d5de0d23ff8b24f4a61af Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 13 Nov 2008 14:49:20 -0800 Subject: ocfs2: Wrap virtual block reads in ocfs2_read_virt_blocks() The ocfs2_read_dir_block() function really maps an inode's virtual blocks to physical ones before calling ocfs2_read_blocks(). Let's extract that to common code, because other places might want to do that. Other than the block number being virtual, ocfs2_read_virt_blocks() takes the same arguments as ocfs2_read_blocks(). It converts those virtual block numbers to physical before calling ocfs2_read_blocks() directly. If the blocks asked for are discontiguous, this can mean multiple calls to ocfs2_read_blocks(), but this is mostly hidden from the caller. Like ocfs2_read_blocks(), the caller can pass in an existing buffer_head. This is usually done to pick up some readahead I/O. ocfs2_read_virt_blocks() checks the buffer_head's block number against the extent map - it must match. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/extent_map.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ocfs2/extent_map.h | 24 +++++++++++++++++ 2 files changed, 95 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index 0bd9d9698a24..f2bb1a04d253 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -806,3 +806,74 @@ out: return ret; } + +int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, + struct buffer_head *bhs[], int flags, + int (*validate)(struct super_block *sb, + struct buffer_head *bh)) +{ + int rc = 0; + u64 p_block, p_count; + int i, count, done = 0; + + mlog_entry("(inode = %p, v_block = %llu, nr = %d, bhs = %p, " + "flags = %x, validate = %p)\n", + inode, (unsigned long long)v_block, nr, bhs, flags, + validate); + + if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >= + i_size_read(inode)) { + BUG_ON(!(flags & OCFS2_BH_READAHEAD)); + goto out; + } + + while (done < nr) { + down_read(&OCFS2_I(inode)->ip_alloc_sem); + rc = ocfs2_extent_map_get_blocks(inode, v_block + done, + &p_block, &p_count, NULL); + up_read(&OCFS2_I(inode)->ip_alloc_sem); + if (rc) { + mlog_errno(rc); + break; + } + + if (!p_block) { + rc = -EIO; + mlog(ML_ERROR, + "Inode #%llu contains a hole at offset %llu\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)(v_block + done) << + inode->i_sb->s_blocksize_bits); + break; + } + + count = nr - done; + if (p_count < count) + count = p_count; + + /* + * If the caller passed us bhs, they should have come + * from a previous readahead call to this function. Thus, + * they should have the right b_blocknr. + */ + for (i = 0; i < count; i++) { + if (!bhs[done + i]) + continue; + BUG_ON(bhs[done + i]->b_blocknr != (p_block + i)); + } + + rc = ocfs2_read_blocks(inode, p_block, count, bhs + done, + flags, validate); + if (rc) { + mlog_errno(rc); + break; + } + done += count; + } + +out: + mlog_exit(rc); + return rc; +} + + diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h index 1c4aa8b06f34..b7dd9731b462 100644 --- a/fs/ocfs2/extent_map.h +++ b/fs/ocfs2/extent_map.h @@ -57,4 +57,28 @@ int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, u32 *p_cluster, u32 *num_clusters, struct ocfs2_extent_list *el); +int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, + struct buffer_head *bhs[], int flags, + int (*validate)(struct super_block *sb, + struct buffer_head *bh)); +static inline int ocfs2_read_virt_block(struct inode *inode, u64 v_block, + struct buffer_head **bh, + int (*validate)(struct super_block *sb, + struct buffer_head *bh)) +{ + int status = 0; + + if (bh == NULL) { + printk("ocfs2: bh == NULL\n"); + status = -EINVAL; + goto bail; + } + + status = ocfs2_read_virt_blocks(inode, v_block, 1, bh, 0, validate); + +bail: + return status; +} + + #endif /* _EXTENT_MAP_H */ -- cgit From 511308d90b53479b194cd067715f44dc99d39b08 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 13 Nov 2008 14:49:21 -0800 Subject: ocfs2: Convert ocfs2_read_dir_block() to ocfs2_read_virt_blocks() Now that we've centralized the ocfs2_read_virt_blocks() code, let's use it in ocfs2_read_dir_block(). Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/dir.c | 38 +++++--------------------------------- 1 file changed, 5 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 7e863d40380d..d83cff95759e 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -231,44 +231,16 @@ static int ocfs2_read_dir_block(struct inode *inode, u64 v_block, { int rc = 0; struct buffer_head *tmp = *bh; - u64 p_blkno; - if (((u64)v_block << inode->i_sb->s_blocksize_bits) >= - i_size_read(inode)) { - BUG_ON(!(flags & OCFS2_BH_READAHEAD)); - goto out; - } - - down_read(&OCFS2_I(inode)->ip_alloc_sem); - rc = ocfs2_extent_map_get_blocks(inode, v_block, &p_blkno, NULL, - NULL); - up_read(&OCFS2_I(inode)->ip_alloc_sem); - if (rc) { + rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, flags, + ocfs2_validate_dir_block); + if (rc) mlog_errno(rc); - goto out; - } - if (!p_blkno) { - rc = -EIO; - mlog(ML_ERROR, - "Directory #%llu contains a hole at offset %llu\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno, - (unsigned long long)v_block << inode->i_sb->s_blocksize_bits); - goto out; - } - - rc = ocfs2_read_blocks(inode, p_blkno, 1, &tmp, flags, - ocfs2_validate_dir_block); - if (rc) { - mlog_errno(rc); - goto out; - } - - /* If ocfs2_read_blocks() got us a new bh, pass it up. */ - if (!*bh) + /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */ + if (!rc && !*bh) *bh = tmp; -out: return rc ? -EIO : 0; } -- cgit From 53ef99cad9878f02f27bb30bc304fc42af8bdd6e Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Tue, 18 Nov 2008 16:53:43 -0800 Subject: ocfs2: Remove JBD compatibility layer JBD2 is fully backwards compatible with JBD and it's been tested enough with Ocfs2 that we can clean this code up now. Signed-off-by: Mark Fasheh --- fs/Kconfig | 10 ------ fs/ocfs2/alloc.c | 5 --- fs/ocfs2/aops.c | 24 ++----------- fs/ocfs2/journal.c | 14 -------- fs/ocfs2/journal.h | 11 +----- fs/ocfs2/ocfs2_jbd_compat.h | 82 --------------------------------------------- 6 files changed, 3 insertions(+), 143 deletions(-) delete mode 100644 fs/ocfs2/ocfs2_jbd_compat.h (limited to 'fs') diff --git a/fs/Kconfig b/fs/Kconfig index e8a47f74a839..b93425ad15de 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -258,16 +258,6 @@ config OCFS2_DEBUG_FS this option for debugging only as it is likely to decrease performance of the filesystem. -config OCFS2_COMPAT_JBD - bool "Use JBD for compatibility" - depends on OCFS2_FS - default n - select JBD - help - The ocfs2 filesystem now uses JBD2 for its journalling. JBD2 - is backwards compatible with JBD. It is safe to say N here. - However, if you really want to use the original JBD, say Y here. - config OCFS2_FS_POSIX_ACL bool "OCFS2 POSIX Access Control Lists" depends on OCFS2_FS diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index e823a27ba340..69d67ab069bb 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -6638,11 +6638,6 @@ static void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, mlog_errno(ret); else if (ocfs2_should_order_data(inode)) { ret = ocfs2_jbd2_file_inode(handle, inode); -#ifdef CONFIG_OCFS2_COMPAT_JBD - ret = walk_page_buffers(handle, page_buffers(page), - from, to, &partial, - ocfs2_journal_dirty_data); -#endif if (ret < 0) mlog_errno(ret); } diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index e219f8b546ac..6af79adb2eca 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -474,12 +474,6 @@ handle_t *ocfs2_start_walk_page_trans(struct inode *inode, if (ocfs2_should_order_data(inode)) { ret = ocfs2_jbd2_file_inode(handle, inode); -#ifdef CONFIG_OCFS2_COMPAT_JBD - ret = walk_page_buffers(handle, - page_buffers(page), - from, to, NULL, - ocfs2_journal_dirty_data); -#endif if (ret < 0) mlog_errno(ret); } @@ -1065,15 +1059,8 @@ static void ocfs2_write_failure(struct inode *inode, tmppage = wc->w_pages[i]; if (page_has_buffers(tmppage)) { - if (ocfs2_should_order_data(inode)) { + if (ocfs2_should_order_data(inode)) ocfs2_jbd2_file_inode(wc->w_handle, inode); -#ifdef CONFIG_OCFS2_COMPAT_JBD - walk_page_buffers(wc->w_handle, - page_buffers(tmppage), - from, to, NULL, - ocfs2_journal_dirty_data); -#endif - } block_commit_write(tmppage, from, to); } @@ -1912,15 +1899,8 @@ int ocfs2_write_end_nolock(struct address_space *mapping, } if (page_has_buffers(tmppage)) { - if (ocfs2_should_order_data(inode)) { + if (ocfs2_should_order_data(inode)) ocfs2_jbd2_file_inode(wc->w_handle, inode); -#ifdef CONFIG_OCFS2_COMPAT_JBD - walk_page_buffers(wc->w_handle, - page_buffers(tmppage), - from, to, NULL, - ocfs2_journal_dirty_data); -#endif - } block_commit_write(tmppage, from, to); } } diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 9223bfcca3ba..12b62a3cbf69 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -434,20 +434,6 @@ int ocfs2_journal_dirty(handle_t *handle, return status; } -#ifdef CONFIG_OCFS2_COMPAT_JBD -int ocfs2_journal_dirty_data(handle_t *handle, - struct buffer_head *bh) -{ - int err = journal_dirty_data(handle, bh); - if (err) - mlog_errno(err); - /* TODO: When we can handle it, abort the handle and go RO on - * error here. */ - - return err; -} -#endif - #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE) void ocfs2_set_journal_params(struct ocfs2_super *osb) diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index d4d14e9a3cea..8203980fefed 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -27,12 +27,7 @@ #define OCFS2_JOURNAL_H #include -#ifndef CONFIG_OCFS2_COMPAT_JBD -# include -#else -# include -# include "ocfs2_jbd_compat.h" -#endif +#include enum ocfs2_journal_state { OCFS2_JOURNAL_FREE = 0, @@ -273,10 +268,6 @@ int ocfs2_journal_access(handle_t *handle, */ int ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh); -#ifdef CONFIG_OCFS2_COMPAT_JBD -int ocfs2_journal_dirty_data(handle_t *handle, - struct buffer_head *bh); -#endif /* * Credit Macros: diff --git a/fs/ocfs2/ocfs2_jbd_compat.h b/fs/ocfs2/ocfs2_jbd_compat.h deleted file mode 100644 index b91c78f8f558..000000000000 --- a/fs/ocfs2/ocfs2_jbd_compat.h +++ /dev/null @@ -1,82 +0,0 @@ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * - * ocfs2_jbd_compat.h - * - * Compatibility defines for JBD. - * - * Copyright (C) 2008 Oracle. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public - * License version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef OCFS2_JBD_COMPAT_H -#define OCFS2_JBD_COMPAT_H - -#ifndef CONFIG_OCFS2_COMPAT_JBD -# error Should not have been included -#endif - -struct jbd2_inode { - unsigned int dummy; -}; - -#define JBD2_BARRIER JFS_BARRIER -#define JBD2_DEFAULT_MAX_COMMIT_AGE JBD_DEFAULT_MAX_COMMIT_AGE - -#define jbd2_journal_ack_err journal_ack_err -#define jbd2_journal_clear_err journal_clear_err -#define jbd2_journal_destroy journal_destroy -#define jbd2_journal_dirty_metadata journal_dirty_metadata -#define jbd2_journal_errno journal_errno -#define jbd2_journal_extend journal_extend -#define jbd2_journal_flush journal_flush -#define jbd2_journal_force_commit journal_force_commit -#define jbd2_journal_get_write_access journal_get_write_access -#define jbd2_journal_get_undo_access journal_get_undo_access -#define jbd2_journal_init_inode journal_init_inode -#define jbd2_journal_invalidatepage journal_invalidatepage -#define jbd2_journal_load journal_load -#define jbd2_journal_lock_updates journal_lock_updates -#define jbd2_journal_restart journal_restart -#define jbd2_journal_start journal_start -#define jbd2_journal_start_commit journal_start_commit -#define jbd2_journal_stop journal_stop -#define jbd2_journal_try_to_free_buffers journal_try_to_free_buffers -#define jbd2_journal_unlock_updates journal_unlock_updates -#define jbd2_journal_wipe journal_wipe -#define jbd2_log_wait_commit log_wait_commit - -static inline int jbd2_journal_file_inode(handle_t *handle, - struct jbd2_inode *inode) -{ - return 0; -} - -static inline int jbd2_journal_begin_ordered_truncate(struct jbd2_inode *inode, - loff_t new_size) -{ - return 0; -} - -static inline void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, - struct inode *inode) -{ - return; -} - -static inline void jbd2_journal_release_jbd_inode(journal_t *journal, - struct jbd2_inode *jinode) -{ - return; -} - - -#endif /* OCFS2_JBD_COMPAT_H */ -- cgit From 97aff52ae13d3c11a074bbbfc80ad0b59cb8cdeb Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Wed, 19 Nov 2008 16:48:41 +0800 Subject: ocfs2/xattr: Fix a bug in xattr allocation estimation When we extend one xattr's value to a large size, the old value size might be smaller than the size of a value root. In those cases, we still need to guess the metadata allocation. Reported-by: Tiger Yang Signed-off-by: Tao Ma Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 8af29b3bd6de..d0b94edb9662 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -2270,6 +2270,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, value_size); xv = (struct ocfs2_xattr_value_root *) (base + name_offset + name_len); + value_size = OCFS2_XATTR_ROOT_SIZE; } else xv = &def_xv.xv; @@ -2283,7 +2284,8 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, &xv->xr_list, new_clusters - old_clusters); - goto out; + if (value_size >= OCFS2_XATTR_ROOT_SIZE) + goto out; } } else { /* -- cgit From 9f868f16e40e9ad8e39aebff94a4be0d96520734 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Wed, 19 Nov 2008 16:48:42 +0800 Subject: ocfs2/xattr: Restore not_found in xis During an xattr set, when we move a xattr which was stored in inode to the outside bucket, we have to delete it and it will use the old value of xis->not_found. xis->not_found is removed by ocfs2_calc_xattr_set_need though, so we must restore it. Signed-off-by: Tao Ma Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index d0b94edb9662..9cb71e1c7c60 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -2414,7 +2414,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, struct ocfs2_xattr_search *xbs, struct ocfs2_xattr_set_ctxt *ctxt) { - int ret = 0, credits; + int ret = 0, credits, old_found; if (!xi->value) { /* Remove existing extended attribute */ @@ -2433,6 +2433,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, xi->value = NULL; xi->value_len = 0; + old_found = xis->not_found; xis->not_found = -ENODATA; ret = ocfs2_calc_xattr_set_need(inode, di, @@ -2442,6 +2443,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, NULL, NULL, &credits); + xis->not_found = old_found; if (ret) { mlog_errno(ret); goto out; @@ -2462,6 +2464,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, if (ret) goto out; + old_found = xis->not_found; xis->not_found = -ENODATA; ret = ocfs2_calc_xattr_set_need(inode, di, @@ -2471,6 +2474,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, NULL, NULL, &credits); + xis->not_found = old_found; if (ret) { mlog_errno(ret); goto out; -- cgit From 74f783af95c982aef6d3a1415275650dcf511666 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 19 Aug 2008 14:51:22 +0200 Subject: quota: Add callbacks for allocating and destroying dquot structures Some filesystems would like to keep private information together with each dquot. Add callbacks alloc_dquot and destroy_dquot allowing filesystem to allocate larger dquots from their private slab in a similar fashion we currently allocate inodes. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/dquot.c | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/dquot.c b/fs/dquot.c index c237ccc8581c..1b5fc4b7fbeb 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -415,6 +415,16 @@ out_dqlock: return ret; } +static void dquot_destroy(struct dquot *dquot) +{ + kmem_cache_free(dquot_cachep, dquot); +} + +static inline void do_destroy_dquot(struct dquot *dquot) +{ + dquot->dq_sb->dq_op->destroy_dquot(dquot); +} + /* Invalidate all dquots on the list. Note that this function is called after * quota is disabled and pointers from inodes removed so there cannot be new * quota users. There can still be some users of quotas due to inodes being @@ -463,7 +473,7 @@ restart: remove_dquot_hash(dquot); remove_free_dquot(dquot); remove_inuse(dquot); - kmem_cache_free(dquot_cachep, dquot); + do_destroy_dquot(dquot); } spin_unlock(&dq_list_lock); } @@ -527,7 +537,7 @@ static void prune_dqcache(int count) remove_dquot_hash(dquot); remove_free_dquot(dquot); remove_inuse(dquot); - kmem_cache_free(dquot_cachep, dquot); + do_destroy_dquot(dquot); count--; head = free_dquots.prev; } @@ -625,11 +635,16 @@ we_slept: spin_unlock(&dq_list_lock); } +static struct dquot *dquot_alloc(struct super_block *sb, int type) +{ + return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); +} + static struct dquot *get_empty_dquot(struct super_block *sb, int type) { struct dquot *dquot; - dquot = kmem_cache_zalloc(dquot_cachep, GFP_NOFS); + dquot = sb->dq_op->alloc_dquot(sb, type); if(!dquot) return NODQUOT; @@ -682,7 +697,7 @@ we_slept: dqstats.lookups++; spin_unlock(&dq_list_lock); if (empty) - kmem_cache_free(dquot_cachep, empty); + do_destroy_dquot(empty); } /* Wait for dq_lock - after this we know that either dquot_release() is already * finished or it will be canceled due to dq_count > 1 test */ @@ -1533,7 +1548,9 @@ struct dquot_operations dquot_operations = { .acquire_dquot = dquot_acquire, .release_dquot = dquot_release, .mark_dirty = dquot_mark_dquot_dirty, - .write_info = dquot_commit_info + .write_info = dquot_commit_info, + .alloc_dquot = dquot_alloc, + .destroy_dquot = dquot_destroy, }; static inline void set_enable_flags(struct quota_info *dqopt, int type) -- cgit From 12095460f7f315f8ef67a55b2194195d325d48d7 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Aug 2008 14:45:12 +0200 Subject: quota: Increase size of variables for limits and inode usage So far quota was fine with quota block limits and inode limits/numbers in a 32-bit type. Now with rapid increase in storage sizes there are coming requests to be able to handle quota limits above 4TB / more that 2^32 inodes. So bump up sizes of types in mem_dqblk structure to 64-bits to be able to handle this. Also update inode allocation / checking functions to use qsize_t and make global structure keep quota limits in bytes so that things are consistent. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/dquot.c | 50 +++++++++++++++++++++++++++++--------------------- fs/quota_v1.c | 25 +++++++++++++++++++------ fs/quota_v2.c | 21 +++++++++++++++++---- 3 files changed, 65 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/fs/dquot.c b/fs/dquot.c index 1b5fc4b7fbeb..c02223b6aeb2 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -835,7 +835,7 @@ static void drop_dquot_ref(struct super_block *sb, int type) } } -static inline void dquot_incr_inodes(struct dquot *dquot, unsigned long number) +static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number) { dquot->dq_dqb.dqb_curinodes += number; } @@ -845,7 +845,7 @@ static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) dquot->dq_dqb.dqb_curspace += number; } -static inline void dquot_decr_inodes(struct dquot *dquot, unsigned long number) +static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) { if (dquot->dq_dqb.dqb_curinodes > number) dquot->dq_dqb.dqb_curinodes -= number; @@ -862,7 +862,7 @@ static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) dquot->dq_dqb.dqb_curspace -= number; else dquot->dq_dqb.dqb_curspace = 0; - if (toqb(dquot->dq_dqb.dqb_curspace) <= dquot->dq_dqb.dqb_bsoftlimit) + if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) dquot->dq_dqb.dqb_btime = (time_t) 0; clear_bit(DQ_BLKS_B, &dquot->dq_flags); } @@ -1038,7 +1038,7 @@ static inline char ignore_hardlimit(struct dquot *dquot) } /* needs dq_data_lock */ -static int check_idq(struct dquot *dquot, ulong inodes, char *warntype) +static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) { *warntype = QUOTA_NL_NOWARN; if (inodes <= 0 || test_bit(DQ_FAKE_B, &dquot->dq_flags)) @@ -1077,7 +1077,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war return QUOTA_OK; if (dquot->dq_dqb.dqb_bhardlimit && - toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bhardlimit && + dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bhardlimit && !ignore_hardlimit(dquot)) { if (!prealloc) *warntype = QUOTA_NL_BHARDWARN; @@ -1085,7 +1085,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war } if (dquot->dq_dqb.dqb_bsoftlimit && - toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit && + dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit && dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime && !ignore_hardlimit(dquot)) { if (!prealloc) @@ -1094,7 +1094,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war } if (dquot->dq_dqb.dqb_bsoftlimit && - toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit && + dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit && dquot->dq_dqb.dqb_btime == 0) { if (!prealloc) { *warntype = QUOTA_NL_BSOFTWARN; @@ -1111,7 +1111,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war return QUOTA_OK; } -static int info_idq_free(struct dquot *dquot, ulong inodes) +static int info_idq_free(struct dquot *dquot, qsize_t inodes) { if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) @@ -1128,15 +1128,13 @@ static int info_idq_free(struct dquot *dquot, ulong inodes) static int info_bdq_free(struct dquot *dquot, qsize_t space) { if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || - toqb(dquot->dq_dqb.dqb_curspace) <= dquot->dq_dqb.dqb_bsoftlimit) + dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) return QUOTA_NL_NOWARN; - if (toqb(dquot->dq_dqb.dqb_curspace - space) <= - dquot->dq_dqb.dqb_bsoftlimit) + if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit) return QUOTA_NL_BSOFTBELOW; - if (toqb(dquot->dq_dqb.dqb_curspace) >= dquot->dq_dqb.dqb_bhardlimit && - toqb(dquot->dq_dqb.dqb_curspace - space) < - dquot->dq_dqb.dqb_bhardlimit) + if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit && + dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit) return QUOTA_NL_BHARDBELOW; return QUOTA_NL_NOWARN; } @@ -1279,7 +1277,7 @@ warn_put_all: /* * This operation can block, but only after everything is updated */ -int dquot_alloc_inode(const struct inode *inode, unsigned long number) +int dquot_alloc_inode(const struct inode *inode, qsize_t number) { int cnt, ret = NO_QUOTA; char warntype[MAXQUOTAS]; @@ -1364,7 +1362,7 @@ out_sub: /* * This operation can block, but only after everything is updated */ -int dquot_free_inode(const struct inode *inode, unsigned long number) +int dquot_free_inode(const struct inode *inode, qsize_t number) { unsigned int cnt; char warntype[MAXQUOTAS]; @@ -1883,14 +1881,24 @@ int vfs_dq_quota_on_remount(struct super_block *sb) return ret; } +static inline qsize_t qbtos(qsize_t blocks) +{ + return blocks << QIF_DQBLKSIZE_BITS; +} + +static inline qsize_t stoqb(qsize_t space) +{ + return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; +} + /* Generic routine for getting common part of quota structure */ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) { struct mem_dqblk *dm = &dquot->dq_dqb; spin_lock(&dq_data_lock); - di->dqb_bhardlimit = dm->dqb_bhardlimit; - di->dqb_bsoftlimit = dm->dqb_bsoftlimit; + di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit); + di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit); di->dqb_curspace = dm->dqb_curspace; di->dqb_ihardlimit = dm->dqb_ihardlimit; di->dqb_isoftlimit = dm->dqb_isoftlimit; @@ -1937,8 +1945,8 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) check_blim = 1; } if (di->dqb_valid & QIF_BLIMITS) { - dm->dqb_bsoftlimit = di->dqb_bsoftlimit; - dm->dqb_bhardlimit = di->dqb_bhardlimit; + dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); + dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); check_blim = 1; } if (di->dqb_valid & QIF_INODES) { @@ -1956,7 +1964,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) dm->dqb_itime = di->dqb_itime; if (check_blim) { - if (!dm->dqb_bsoftlimit || toqb(dm->dqb_curspace) < dm->dqb_bsoftlimit) { + if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) { dm->dqb_btime = 0; clear_bit(DQ_BLKS_B, &dquot->dq_flags); } diff --git a/fs/quota_v1.c b/fs/quota_v1.c index 5ae15b13eeb0..3e078eee5644 100644 --- a/fs/quota_v1.c +++ b/fs/quota_v1.c @@ -14,14 +14,27 @@ MODULE_AUTHOR("Jan Kara"); MODULE_DESCRIPTION("Old quota format support"); MODULE_LICENSE("GPL"); +#define QUOTABLOCK_BITS 10 +#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) + +static inline qsize_t v1_stoqb(qsize_t space) +{ + return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS; +} + +static inline qsize_t v1_qbtos(qsize_t blocks) +{ + return blocks << QUOTABLOCK_BITS; +} + static void v1_disk2mem_dqblk(struct mem_dqblk *m, struct v1_disk_dqblk *d) { m->dqb_ihardlimit = d->dqb_ihardlimit; m->dqb_isoftlimit = d->dqb_isoftlimit; m->dqb_curinodes = d->dqb_curinodes; - m->dqb_bhardlimit = d->dqb_bhardlimit; - m->dqb_bsoftlimit = d->dqb_bsoftlimit; - m->dqb_curspace = ((qsize_t)d->dqb_curblocks) << QUOTABLOCK_BITS; + m->dqb_bhardlimit = v1_qbtos(d->dqb_bhardlimit); + m->dqb_bsoftlimit = v1_qbtos(d->dqb_bsoftlimit); + m->dqb_curspace = v1_qbtos(d->dqb_curblocks); m->dqb_itime = d->dqb_itime; m->dqb_btime = d->dqb_btime; } @@ -31,9 +44,9 @@ static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m) d->dqb_ihardlimit = m->dqb_ihardlimit; d->dqb_isoftlimit = m->dqb_isoftlimit; d->dqb_curinodes = m->dqb_curinodes; - d->dqb_bhardlimit = m->dqb_bhardlimit; - d->dqb_bsoftlimit = m->dqb_bsoftlimit; - d->dqb_curblocks = toqb(m->dqb_curspace); + d->dqb_bhardlimit = v1_stoqb(m->dqb_bhardlimit); + d->dqb_bsoftlimit = v1_stoqb(m->dqb_bsoftlimit); + d->dqb_curblocks = v1_stoqb(m->dqb_curspace); d->dqb_itime = m->dqb_itime; d->dqb_btime = m->dqb_btime; } diff --git a/fs/quota_v2.c b/fs/quota_v2.c index b53827dc02d9..51c4717f7c6a 100644 --- a/fs/quota_v2.c +++ b/fs/quota_v2.c @@ -26,6 +26,19 @@ typedef char *dqbuf_t; #define GETIDINDEX(id, depth) (((id) >> ((V2_DQTREEDEPTH-(depth)-1)*8)) & 0xff) #define GETENTRIES(buf) ((struct v2_disk_dqblk *)(((char *)buf)+sizeof(struct v2_disk_dqdbheader))) +#define QUOTABLOCK_BITS 10 +#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) + +static inline qsize_t v2_stoqb(qsize_t space) +{ + return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS; +} + +static inline qsize_t v2_qbtos(qsize_t blocks) +{ + return blocks << QUOTABLOCK_BITS; +} + /* Check whether given file is really vfsv0 quotafile */ static int v2_check_quota_file(struct super_block *sb, int type) { @@ -104,8 +117,8 @@ static void disk2memdqb(struct mem_dqblk *m, struct v2_disk_dqblk *d) m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit); m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes); m->dqb_itime = le64_to_cpu(d->dqb_itime); - m->dqb_bhardlimit = le32_to_cpu(d->dqb_bhardlimit); - m->dqb_bsoftlimit = le32_to_cpu(d->dqb_bsoftlimit); + m->dqb_bhardlimit = v2_qbtos(le32_to_cpu(d->dqb_bhardlimit)); + m->dqb_bsoftlimit = v2_qbtos(le32_to_cpu(d->dqb_bsoftlimit)); m->dqb_curspace = le64_to_cpu(d->dqb_curspace); m->dqb_btime = le64_to_cpu(d->dqb_btime); } @@ -116,8 +129,8 @@ static void mem2diskdqb(struct v2_disk_dqblk *d, struct mem_dqblk *m, qid_t id) d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit); d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes); d->dqb_itime = cpu_to_le64(m->dqb_itime); - d->dqb_bhardlimit = cpu_to_le32(m->dqb_bhardlimit); - d->dqb_bsoftlimit = cpu_to_le32(m->dqb_bsoftlimit); + d->dqb_bhardlimit = cpu_to_le32(v2_qbtos(m->dqb_bhardlimit)); + d->dqb_bsoftlimit = cpu_to_le32(v2_qbtos(m->dqb_bsoftlimit)); d->dqb_curspace = cpu_to_le64(m->dqb_curspace); d->dqb_btime = cpu_to_le64(m->dqb_btime); d->dqb_id = cpu_to_le32(id); -- cgit From 1497d3ad487b64eeea83ac203263802755438949 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Aug 2008 15:49:59 +0200 Subject: quota: Remove bogus 'optimization' in check_idq() and check_bdq() Checks like <= 0 for an unsigned type do not make much sence. The value could be only 0 and that does not happen often enough for the check to be worth it. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/dquot.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/dquot.c b/fs/dquot.c index c02223b6aeb2..c88330602ddd 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -1041,7 +1041,7 @@ static inline char ignore_hardlimit(struct dquot *dquot) static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) { *warntype = QUOTA_NL_NOWARN; - if (inodes <= 0 || test_bit(DQ_FAKE_B, &dquot->dq_flags)) + if (test_bit(DQ_FAKE_B, &dquot->dq_flags)) return QUOTA_OK; if (dquot->dq_dqb.dqb_ihardlimit && @@ -1073,7 +1073,7 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) { *warntype = QUOTA_NL_NOWARN; - if (space <= 0 || test_bit(DQ_FAKE_B, &dquot->dq_flags)) + if (test_bit(DQ_FAKE_B, &dquot->dq_flags)) return QUOTA_OK; if (dquot->dq_dqb.dqb_bhardlimit && -- cgit From e4bc7b4b7ff783779b6928d55a9308910bf180a3 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Aug 2008 16:21:01 +0200 Subject: quota: Make _SUSPENDED just a flag Upto now, DQUOT_USR_SUSPENDED behaved like a state - i.e., either quota was enabled or suspended or none. Now allowed states are 0, ENABLED, ENABLED | SUSPENDED. This will be useful later when we implement separate enabling of quota usage tracking and limits enforcement because we need to keep track of a state which has been suspended. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/dquot.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/dquot.c b/fs/dquot.c index c88330602ddd..22340c610e1a 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -1570,18 +1570,20 @@ static inline void reset_enable_flags(struct quota_info *dqopt, int type, { switch (type) { case USRQUOTA: - dqopt->flags &= ~DQUOT_USR_ENABLED; if (remount) dqopt->flags |= DQUOT_USR_SUSPENDED; - else + else { + dqopt->flags &= ~DQUOT_USR_ENABLED; dqopt->flags &= ~DQUOT_USR_SUSPENDED; + } break; case GRPQUOTA: - dqopt->flags &= ~DQUOT_GRP_ENABLED; if (remount) dqopt->flags |= DQUOT_GRP_SUSPENDED; - else + else { + dqopt->flags &= ~DQUOT_GRP_ENABLED; dqopt->flags &= ~DQUOT_GRP_SUSPENDED; + } break; } } -- cgit From f55abc0fb9c3189de3da829adf3220322c0da43e Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Aug 2008 17:50:32 +0200 Subject: quota: Allow to separately enable quota accounting and enforcing limits Split DQUOT_USR_ENABLED (and DQUOT_GRP_ENABLED) into DQUOT_USR_USAGE_ENABLED and DQUOT_USR_LIMITS_ENABLED. This way we are able to separately enable / disable whether we should: 1) ignore quotas completely 2) just keep uptodate information about usage 3) actually enforce quota limits This is going to be useful when quota is treated as filesystem metadata - we then want to keep quota information uptodate all the time and just enable / disable limits enforcement. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/dquot.c | 222 ++++++++++++++++++++++++++++++++++++++----------------------- fs/quota.c | 8 +-- 2 files changed, 142 insertions(+), 88 deletions(-) (limited to 'fs') diff --git a/fs/dquot.c b/fs/dquot.c index 22340c610e1a..7569633efe0e 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -489,7 +489,7 @@ int vfs_quota_sync(struct super_block *sb, int type) for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && cnt != type) continue; - if (!sb_has_quota_enabled(sb, cnt)) + if (!sb_has_quota_active(sb, cnt)) continue; spin_lock(&dq_list_lock); dirty = &dqopt->info[cnt].dqi_dirty_list; @@ -514,8 +514,8 @@ int vfs_quota_sync(struct super_block *sb, int type) } for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if ((cnt == type || type == -1) && sb_has_quota_enabled(sb, cnt) - && info_dirty(&dqopt->info[cnt])) + if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) + && info_dirty(&dqopt->info[cnt])) sb->dq_op->write_info(sb, cnt); spin_lock(&dq_list_lock); dqstats.syncs++; @@ -594,7 +594,7 @@ we_slept: /* We have more than one user... nothing to do */ atomic_dec(&dquot->dq_count); /* Releasing dquot during quotaoff phase? */ - if (!sb_has_quota_enabled(dquot->dq_sb, dquot->dq_type) && + if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) && atomic_read(&dquot->dq_count) == 1) wake_up(&dquot->dq_wait_unused); spin_unlock(&dq_list_lock); @@ -670,7 +670,7 @@ static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) unsigned int hashent = hashfn(sb, id, type); struct dquot *dquot, *empty = NODQUOT; - if (!sb_has_quota_enabled(sb, type)) + if (!sb_has_quota_active(sb, type)) return NODQUOT; we_slept: spin_lock(&dq_list_lock); @@ -1041,7 +1041,8 @@ static inline char ignore_hardlimit(struct dquot *dquot) static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) { *warntype = QUOTA_NL_NOWARN; - if (test_bit(DQ_FAKE_B, &dquot->dq_flags)) + if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || + test_bit(DQ_FAKE_B, &dquot->dq_flags)) return QUOTA_OK; if (dquot->dq_dqb.dqb_ihardlimit && @@ -1073,7 +1074,8 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) { *warntype = QUOTA_NL_NOWARN; - if (test_bit(DQ_FAKE_B, &dquot->dq_flags)) + if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || + test_bit(DQ_FAKE_B, &dquot->dq_flags)) return QUOTA_OK; if (dquot->dq_dqb.dqb_bhardlimit && @@ -1114,7 +1116,8 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war static int info_idq_free(struct dquot *dquot, qsize_t inodes) { if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || - dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) + dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || + !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) return QUOTA_NL_NOWARN; if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit) @@ -1508,7 +1511,7 @@ warn_put_all: /* Wrapper for transferring ownership of an inode */ int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) { - if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) { + if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) { vfs_dq_init(inode); if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA) return 1; @@ -1551,53 +1554,22 @@ struct dquot_operations dquot_operations = { .destroy_dquot = dquot_destroy, }; -static inline void set_enable_flags(struct quota_info *dqopt, int type) -{ - switch (type) { - case USRQUOTA: - dqopt->flags |= DQUOT_USR_ENABLED; - dqopt->flags &= ~DQUOT_USR_SUSPENDED; - break; - case GRPQUOTA: - dqopt->flags |= DQUOT_GRP_ENABLED; - dqopt->flags &= ~DQUOT_GRP_SUSPENDED; - break; - } -} - -static inline void reset_enable_flags(struct quota_info *dqopt, int type, - int remount) -{ - switch (type) { - case USRQUOTA: - if (remount) - dqopt->flags |= DQUOT_USR_SUSPENDED; - else { - dqopt->flags &= ~DQUOT_USR_ENABLED; - dqopt->flags &= ~DQUOT_USR_SUSPENDED; - } - break; - case GRPQUOTA: - if (remount) - dqopt->flags |= DQUOT_GRP_SUSPENDED; - else { - dqopt->flags &= ~DQUOT_GRP_ENABLED; - dqopt->flags &= ~DQUOT_GRP_SUSPENDED; - } - break; - } -} - - /* * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) */ -int vfs_quota_off(struct super_block *sb, int type, int remount) +int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) { int cnt, ret = 0; struct quota_info *dqopt = sb_dqopt(sb); struct inode *toputinode[MAXQUOTAS]; + /* Cannot turn off usage accounting without turning off limits, or + * suspend quotas and simultaneously turn quotas off. */ + if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED)) + || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED | + DQUOT_USAGE_ENABLED))) + return -EINVAL; + /* We need to serialize quota_off() for device */ mutex_lock(&dqopt->dqonoff_mutex); @@ -1606,7 +1578,7 @@ int vfs_quota_off(struct super_block *sb, int type, int remount) * sometimes we are called when fill_super() failed and calling * sync_fs() in such cases does no good. */ - if (!sb_any_quota_enabled(sb) && !sb_any_quota_suspended(sb)) { + if (!sb_any_quota_loaded(sb)) { mutex_unlock(&dqopt->dqonoff_mutex); return 0; } @@ -1614,17 +1586,28 @@ int vfs_quota_off(struct super_block *sb, int type, int remount) toputinode[cnt] = NULL; if (type != -1 && cnt != type) continue; - /* If we keep inodes of quota files after remount and quotaoff - * is called, drop kept inodes. */ - if (!remount && sb_has_quota_suspended(sb, cnt)) { - iput(dqopt->files[cnt]); - dqopt->files[cnt] = NULL; - reset_enable_flags(dqopt, cnt, 0); + if (!sb_has_quota_loaded(sb, cnt)) continue; + + if (flags & DQUOT_SUSPENDED) { + dqopt->flags |= + dquot_state_flag(DQUOT_SUSPENDED, cnt); + } else { + dqopt->flags &= ~dquot_state_flag(flags, cnt); + /* Turning off suspended quotas? */ + if (!sb_has_quota_loaded(sb, cnt) && + sb_has_quota_suspended(sb, cnt)) { + dqopt->flags &= ~dquot_state_flag( + DQUOT_SUSPENDED, cnt); + iput(dqopt->files[cnt]); + dqopt->files[cnt] = NULL; + continue; + } } - if (!sb_has_quota_enabled(sb, cnt)) + + /* We still have to keep quota loaded? */ + if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED)) continue; - reset_enable_flags(dqopt, cnt, remount); /* Note: these are blocking operations */ drop_dquot_ref(sb, cnt); @@ -1640,7 +1623,7 @@ int vfs_quota_off(struct super_block *sb, int type, int remount) put_quota_format(dqopt->info[cnt].dqi_format); toputinode[cnt] = dqopt->files[cnt]; - if (!remount) + if (!sb_has_quota_loaded(sb, cnt)) dqopt->files[cnt] = NULL; dqopt->info[cnt].dqi_flags = 0; dqopt->info[cnt].dqi_igrace = 0; @@ -1663,7 +1646,7 @@ int vfs_quota_off(struct super_block *sb, int type, int remount) mutex_lock(&dqopt->dqonoff_mutex); /* If quota was reenabled in the meantime, we have * nothing to do */ - if (!sb_has_quota_enabled(sb, cnt)) { + if (!sb_has_quota_loaded(sb, cnt)) { mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA); toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | S_NOATIME | S_NOQUOTA); @@ -1673,10 +1656,13 @@ int vfs_quota_off(struct super_block *sb, int type, int remount) } mutex_unlock(&dqopt->dqonoff_mutex); /* On remount RO, we keep the inode pointer so that we - * can reenable quota on the subsequent remount RW. - * But we have better not keep inode pointer when there - * is pending delete on the quota file... */ - if (!remount) + * can reenable quota on the subsequent remount RW. We + * have to check 'flags' variable and not use sb_has_ + * function because another quotaon / quotaoff could + * change global state before we got here. We refuse + * to suspend quotas when there is pending delete on + * the quota file... */ + if (!(flags & DQUOT_SUSPENDED)) iput(toputinode[cnt]); else if (!toputinode[cnt]->i_nlink) ret = -EBUSY; @@ -1686,12 +1672,22 @@ int vfs_quota_off(struct super_block *sb, int type, int remount) return ret; } +int vfs_quota_off(struct super_block *sb, int type, int remount) +{ + return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED : + (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED)); +} + /* * Turn quotas on on a device */ -/* Helper function when we already have the inode */ -static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) +/* + * Helper function to turn quotas on when we already have the inode of + * quota file and no quota information is loaded. + */ +static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, + unsigned int flags) { struct quota_format_type *fmt = find_quota_format(format_id); struct super_block *sb = inode->i_sb; @@ -1713,6 +1709,11 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) error = -EINVAL; goto out_fmt; } + /* Usage always has to be set... */ + if (!(flags & DQUOT_USAGE_ENABLED)) { + error = -EINVAL; + goto out_fmt; + } /* As we bypass the pagecache we must now flush the inode so that * we see all the changes from userspace... */ @@ -1721,8 +1722,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) invalidate_bdev(sb->s_bdev); mutex_lock(&inode->i_mutex); mutex_lock(&dqopt->dqonoff_mutex); - if (sb_has_quota_enabled(sb, type) || - sb_has_quota_suspended(sb, type)) { + if (sb_has_quota_loaded(sb, type)) { error = -EBUSY; goto out_lock; } @@ -1754,7 +1754,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) } mutex_unlock(&dqopt->dqio_mutex); mutex_unlock(&inode->i_mutex); - set_enable_flags(dqopt, type); + dqopt->flags |= dquot_state_flag(flags, type); add_dquot_ref(sb, type); mutex_unlock(&dqopt->dqonoff_mutex); @@ -1787,20 +1787,23 @@ static int vfs_quota_on_remount(struct super_block *sb, int type) struct quota_info *dqopt = sb_dqopt(sb); struct inode *inode; int ret; + unsigned int flags; mutex_lock(&dqopt->dqonoff_mutex); if (!sb_has_quota_suspended(sb, type)) { mutex_unlock(&dqopt->dqonoff_mutex); return 0; } - BUG_ON(sb_has_quota_enabled(sb, type)); - inode = dqopt->files[type]; dqopt->files[type] = NULL; - reset_enable_flags(dqopt, type, 0); + flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | + DQUOT_LIMITS_ENABLED, type); + dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type); mutex_unlock(&dqopt->dqonoff_mutex); - ret = vfs_quota_on_inode(inode, type, dqopt->info[type].dqi_fmt_id); + flags = dquot_generic_flag(flags, type); + ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id, + flags); iput(inode); return ret; @@ -1816,12 +1819,12 @@ int vfs_quota_on_path(struct super_block *sb, int type, int format_id, if (path->mnt->mnt_sb != sb) error = -EXDEV; else - error = vfs_quota_on_inode(path->dentry->d_inode, type, - format_id); + error = vfs_load_quota_inode(path->dentry->d_inode, type, + format_id, DQUOT_USAGE_ENABLED | + DQUOT_LIMITS_ENABLED); return error; } -/* Actual function called from quotactl() */ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, int remount) { @@ -1839,6 +1842,50 @@ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, return error; } +/* + * More powerful function for turning on quotas allowing setting + * of individual quota flags + */ +int vfs_quota_enable(struct inode *inode, int type, int format_id, + unsigned int flags) +{ + int ret = 0; + struct super_block *sb = inode->i_sb; + struct quota_info *dqopt = sb_dqopt(sb); + + /* Just unsuspend quotas? */ + if (flags & DQUOT_SUSPENDED) + return vfs_quota_on_remount(sb, type); + if (!flags) + return 0; + /* Just updating flags needed? */ + if (sb_has_quota_loaded(sb, type)) { + mutex_lock(&dqopt->dqonoff_mutex); + /* Now do a reliable test... */ + if (!sb_has_quota_loaded(sb, type)) { + mutex_unlock(&dqopt->dqonoff_mutex); + goto load_quota; + } + if (flags & DQUOT_USAGE_ENABLED && + sb_has_quota_usage_enabled(sb, type)) { + ret = -EBUSY; + goto out_lock; + } + if (flags & DQUOT_LIMITS_ENABLED && + sb_has_quota_limits_enabled(sb, type)) { + ret = -EBUSY; + goto out_lock; + } + sb_dqopt(sb)->flags |= dquot_state_flag(flags, type); +out_lock: + mutex_unlock(&dqopt->dqonoff_mutex); + return ret; + } + +load_quota: + return vfs_load_quota_inode(inode, type, format_id, flags); +} + /* * This function is used when filesystem needs to initialize quotas * during mount time. @@ -1860,7 +1907,8 @@ int vfs_quota_on_mount(struct super_block *sb, char *qf_name, error = security_quota_on(dentry); if (!error) - error = vfs_quota_on_inode(dentry->d_inode, type, format_id); + error = vfs_load_quota_inode(dentry->d_inode, type, format_id, + DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); out: dput(dentry); @@ -1997,12 +2045,14 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d int rc; mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); - if (!(dquot = dqget(sb, id, type))) { - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); - return -ESRCH; + dquot = dqget(sb, id, type); + if (!dquot) { + rc = -ESRCH; + goto out; } rc = do_set_dqblk(dquot, di); dqput(dquot); +out: mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return rc; } @@ -2013,7 +2063,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) struct mem_dqinfo *mi; mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); - if (!sb_has_quota_enabled(sb, type)) { + if (!sb_has_quota_active(sb, type)) { mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } @@ -2032,11 +2082,12 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) { struct mem_dqinfo *mi; + int err = 0; mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); - if (!sb_has_quota_enabled(sb, type)) { - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); - return -ESRCH; + if (!sb_has_quota_active(sb, type)) { + err = -ESRCH; + goto out; } mi = sb_dqopt(sb)->info + type; spin_lock(&dq_data_lock); @@ -2050,8 +2101,9 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) mark_info_dirty(sb, type); /* Force write to disk */ sb->dq_op->write_info(sb, type); +out: mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); - return 0; + return err; } struct quotactl_ops vfs_quotactl_ops = { @@ -2213,9 +2265,11 @@ EXPORT_SYMBOL(register_quota_format); EXPORT_SYMBOL(unregister_quota_format); EXPORT_SYMBOL(dqstats); EXPORT_SYMBOL(dq_data_lock); +EXPORT_SYMBOL(vfs_quota_enable); EXPORT_SYMBOL(vfs_quota_on); EXPORT_SYMBOL(vfs_quota_on_path); EXPORT_SYMBOL(vfs_quota_on_mount); +EXPORT_SYMBOL(vfs_quota_disable); EXPORT_SYMBOL(vfs_quota_off); EXPORT_SYMBOL(vfs_quota_sync); EXPORT_SYMBOL(vfs_get_dqinfo); diff --git a/fs/quota.c b/fs/quota.c index b7fe44e01618..8678d9f35ee9 100644 --- a/fs/quota.c +++ b/fs/quota.c @@ -73,7 +73,7 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid case Q_SETQUOTA: case Q_GETQUOTA: /* This is just informative test so we are satisfied without a lock */ - if (!sb_has_quota_enabled(sb, type)) + if (!sb_has_quota_active(sb, type)) return -ESRCH; } @@ -175,7 +175,7 @@ static void quota_sync_sb(struct super_block *sb, int type) for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && cnt != type) continue; - if (!sb_has_quota_enabled(sb, cnt)) + if (!sb_has_quota_active(sb, cnt)) continue; mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA); truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); @@ -201,7 +201,7 @@ restart: for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && type != cnt) continue; - if (!sb_has_quota_enabled(sb, cnt)) + if (!sb_has_quota_active(sb, cnt)) continue; if (!info_dirty(&sb_dqopt(sb)->info[cnt]) && list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list)) @@ -245,7 +245,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __u32 fmt; down_read(&sb_dqopt(sb)->dqptr_sem); - if (!sb_has_quota_enabled(sb, type)) { + if (!sb_has_quota_active(sb, type)) { up_read(&sb_dqopt(sb)->dqptr_sem); return -ESRCH; } -- cgit From ee0d5ffe0da2aa992004447113e28622621a983f Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Aug 2008 18:11:50 +0200 Subject: ext3: Use sb_any_quota_loaded() instead of sb_any_quota_enabled() Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ext3/super.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ext3/super.c b/fs/ext3/super.c index f6c94f232ec1..250ec53195cb 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -1035,8 +1035,7 @@ static int parse_options (char *options, struct super_block *sb, case Opt_grpjquota: qtype = GRPQUOTA; set_qf_name: - if ((sb_any_quota_enabled(sb) || - sb_any_quota_suspended(sb)) && + if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) { printk(KERN_ERR "EXT3-fs: Cannot change journaled " @@ -1075,8 +1074,7 @@ set_qf_name: case Opt_offgrpjquota: qtype = GRPQUOTA; clear_qf_name: - if ((sb_any_quota_enabled(sb) || - sb_any_quota_suspended(sb)) && + if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { printk(KERN_ERR "EXT3-fs: Cannot change " "journaled quota options when " @@ -1095,8 +1093,7 @@ clear_qf_name: case Opt_jqfmt_vfsv0: qfmt = QFMT_VFS_V0; set_qf_format: - if ((sb_any_quota_enabled(sb) || - sb_any_quota_suspended(sb)) && + if (sb_any_quota_loaded(sb) && sbi->s_jquota_fmt != qfmt) { printk(KERN_ERR "EXT3-fs: Cannot change " "journaled quota options when " @@ -1115,8 +1112,7 @@ set_qf_format: set_opt(sbi->s_mount_opt, GRPQUOTA); break; case Opt_noquota: - if (sb_any_quota_enabled(sb) || - sb_any_quota_suspended(sb)) { + if (sb_any_quota_loaded(sb)) { printk(KERN_ERR "EXT3-fs: Cannot change quota " "options when quota turned on.\n"); return 0; -- cgit From 17bd13b31ce4fe7f789d8848e8cbc8cb42b10544 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Aug 2008 18:14:35 +0200 Subject: ext4: Use sb_any_quota_loaded() instead of sb_any_quota_enabled() Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ext4/super.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 04158ad74dbb..49fcf8864e76 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1142,8 +1142,7 @@ static int parse_options(char *options, struct super_block *sb, case Opt_grpjquota: qtype = GRPQUOTA; set_qf_name: - if ((sb_any_quota_enabled(sb) || - sb_any_quota_suspended(sb)) && + if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) { printk(KERN_ERR "EXT4-fs: Cannot change journaled " @@ -1182,8 +1181,7 @@ set_qf_name: case Opt_offgrpjquota: qtype = GRPQUOTA; clear_qf_name: - if ((sb_any_quota_enabled(sb) || - sb_any_quota_suspended(sb)) && + if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { printk(KERN_ERR "EXT4-fs: Cannot change " "journaled quota options when " @@ -1202,8 +1200,7 @@ clear_qf_name: case Opt_jqfmt_vfsv0: qfmt = QFMT_VFS_V0; set_qf_format: - if ((sb_any_quota_enabled(sb) || - sb_any_quota_suspended(sb)) && + if (sb_any_quota_loaded(sb) && sbi->s_jquota_fmt != qfmt) { printk(KERN_ERR "EXT4-fs: Cannot change " "journaled quota options when " @@ -1222,7 +1219,7 @@ set_qf_format: set_opt(sbi->s_mount_opt, GRPQUOTA); break; case Opt_noquota: - if (sb_any_quota_enabled(sb)) { + if (sb_any_quota_loaded(sb)) { printk(KERN_ERR "EXT4-fs: Cannot change quota " "options when quota turned on.\n"); return 0; -- cgit From 6929f891241d3fe3af01d28503b645e63241e49a Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Aug 2008 18:16:36 +0200 Subject: reiserfs: Use sb_any_quota_loaded() instead of sb_any_quota_enabled(). Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/reiserfs/super.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 663a91f5dce8..a9b393a5815d 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -994,8 +994,7 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin if (c == 'u' || c == 'g') { int qtype = c == 'u' ? USRQUOTA : GRPQUOTA; - if ((sb_any_quota_enabled(s) || - sb_any_quota_suspended(s)) && + if (sb_any_quota_loaded(s) && (!*arg != !REISERFS_SB(s)->s_qf_names[qtype])) { reiserfs_warning(s, "reiserfs_parse_options: cannot change journaled quota options when quota turned on."); @@ -1041,8 +1040,7 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin "reiserfs_parse_options: unknown quota format specified."); return 0; } - if ((sb_any_quota_enabled(s) || - sb_any_quota_suspended(s)) && + if (sb_any_quota_loaded(s) && *qfmt != REISERFS_SB(s)->s_jquota_fmt) { reiserfs_warning(s, "reiserfs_parse_options: cannot change journaled quota options when quota turned on."); @@ -1067,7 +1065,7 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin } /* This checking is not precise wrt the quota type but for our purposes it is sufficient */ if (!(*mount_options & (1 << REISERFS_QUOTA)) - && sb_any_quota_enabled(s)) { + && sb_any_quota_loaded(s)) { reiserfs_warning(s, "reiserfs_parse_options: quota options must be present when quota is turned on."); return 0; -- cgit From ca785ec66b991e9ca74dd9840fc014487ad095e1 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 30 Sep 2008 17:53:37 +0200 Subject: quota: Introduce DQUOT_QUOTA_SYS_FILE flag If filesystem can handle quota files as system files hidden from users, we can skip a lot of cache invalidation, syncing, inode flags setting etc. when turning quotas on, off and quota_sync. Allow filesystem to indicate that it is hiding quota files from users by DQUOT_QUOTA_SYS_FILE flag. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/dquot.c | 45 ++++++++++++++++++++++++++++++--------------- fs/quota.c | 3 +++ 2 files changed, 33 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/dquot.c b/fs/dquot.c index 7569633efe0e..74185c34a4f0 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -1631,6 +1631,11 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) dqopt->ops[cnt] = NULL; } mutex_unlock(&dqopt->dqonoff_mutex); + + /* Skip syncing and setting flags if quota files are hidden */ + if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) + goto put_inodes; + /* Sync the superblock so that buffers with quota data are written to * disk (and so userspace sees correct data afterwards). */ if (sb->s_op->sync_fs) @@ -1655,6 +1660,12 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) mark_inode_dirty(toputinode[cnt]); } mutex_unlock(&dqopt->dqonoff_mutex); + } + if (sb->s_bdev) + invalidate_bdev(sb->s_bdev); +put_inodes: + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (toputinode[cnt]) { /* On remount RO, we keep the inode pointer so that we * can reenable quota on the subsequent remount RW. We * have to check 'flags' variable and not use sb_has_ @@ -1667,8 +1678,6 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) else if (!toputinode[cnt]->i_nlink) ret = -EBUSY; } - if (sb->s_bdev) - invalidate_bdev(sb->s_bdev); return ret; } @@ -1715,25 +1724,31 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, goto out_fmt; } - /* As we bypass the pagecache we must now flush the inode so that - * we see all the changes from userspace... */ - write_inode_now(inode, 1); - /* And now flush the block cache so that kernel sees the changes */ - invalidate_bdev(sb->s_bdev); + if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { + /* As we bypass the pagecache we must now flush the inode so + * that we see all the changes from userspace... */ + write_inode_now(inode, 1); + /* And now flush the block cache so that kernel sees the + * changes */ + invalidate_bdev(sb->s_bdev); + } mutex_lock(&inode->i_mutex); mutex_lock(&dqopt->dqonoff_mutex); if (sb_has_quota_loaded(sb, type)) { error = -EBUSY; goto out_lock; } - /* We don't want quota and atime on quota files (deadlocks possible) - * Also nobody should write to the file - we use special IO operations - * which ignore the immutable bit. */ - down_write(&dqopt->dqptr_sem); - oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA); - inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; - up_write(&dqopt->dqptr_sem); - sb->dq_op->drop(inode); + + if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { + /* We don't want quota and atime on quota files (deadlocks + * possible) Also nobody should write to the file - we use + * special IO operations which ignore the immutable bit. */ + down_write(&dqopt->dqptr_sem); + oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA); + inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; + up_write(&dqopt->dqptr_sem); + sb->dq_op->drop(inode); + } error = -EIO; dqopt->files[type] = igrab(inode); diff --git a/fs/quota.c b/fs/quota.c index 8678d9f35ee9..4a8c94f05f76 100644 --- a/fs/quota.c +++ b/fs/quota.c @@ -160,6 +160,9 @@ static void quota_sync_sb(struct super_block *sb, int type) int cnt; sb->s_qcop->quota_sync(sb, type); + + if (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE) + return; /* This is not very clever (and fast) but currently I don't know about * any other simple way of getting quota data to disk and we must get * them there for userspace to be visible... */ -- cgit From cf770c137122b78470a67ebd5498947869a09197 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Sun, 21 Sep 2008 23:17:53 +0200 Subject: quota: Move quotaio_v[12].h from include/linux/ to fs/ Since these include files are used only by implementation of quota formats, there's no need to have them in include/linux/. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/quota_v1.c | 3 ++- fs/quota_v2.c | 7 ++--- fs/quotaio_v1.h | 33 ++++++++++++++++++++++++ fs/quotaio_v2.h | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 118 insertions(+), 4 deletions(-) create mode 100644 fs/quotaio_v1.h create mode 100644 fs/quotaio_v2.h (limited to 'fs') diff --git a/fs/quota_v1.c b/fs/quota_v1.c index 3e078eee5644..b4af1c69ad16 100644 --- a/fs/quota_v1.c +++ b/fs/quota_v1.c @@ -3,13 +3,14 @@ #include #include #include -#include #include #include #include #include +#include "quotaio_v1.h" + MODULE_AUTHOR("Jan Kara"); MODULE_DESCRIPTION("Old quota format support"); MODULE_LICENSE("GPL"); diff --git a/fs/quota_v2.c b/fs/quota_v2.c index 51c4717f7c6a..a21d1a7c356a 100644 --- a/fs/quota_v2.c +++ b/fs/quota_v2.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -15,6 +14,8 @@ #include +#include "quotaio_v2.h" + MODULE_AUTHOR("Jan Kara"); MODULE_DESCRIPTION("Quota format v2 support"); MODULE_LICENSE("GPL"); @@ -129,8 +130,8 @@ static void mem2diskdqb(struct v2_disk_dqblk *d, struct mem_dqblk *m, qid_t id) d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit); d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes); d->dqb_itime = cpu_to_le64(m->dqb_itime); - d->dqb_bhardlimit = cpu_to_le32(v2_qbtos(m->dqb_bhardlimit)); - d->dqb_bsoftlimit = cpu_to_le32(v2_qbtos(m->dqb_bsoftlimit)); + d->dqb_bhardlimit = cpu_to_le32(v2_stoqb(m->dqb_bhardlimit)); + d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit)); d->dqb_curspace = cpu_to_le64(m->dqb_curspace); d->dqb_btime = cpu_to_le64(m->dqb_btime); d->dqb_id = cpu_to_le32(id); diff --git a/fs/quotaio_v1.h b/fs/quotaio_v1.h new file mode 100644 index 000000000000..746654b5de70 --- /dev/null +++ b/fs/quotaio_v1.h @@ -0,0 +1,33 @@ +#ifndef _LINUX_QUOTAIO_V1_H +#define _LINUX_QUOTAIO_V1_H + +#include + +/* + * The following constants define the amount of time given a user + * before the soft limits are treated as hard limits (usually resulting + * in an allocation failure). The timer is started when the user crosses + * their soft limit, it is reset when they go below their soft limit. + */ +#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */ +#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */ + +/* + * The following structure defines the format of the disk quota file + * (as it appears on disk) - the file is an array of these structures + * indexed by user or group number. + */ +struct v1_disk_dqblk { + __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ + __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ + __u32 dqb_curblocks; /* current block count */ + __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ + __u32 dqb_isoftlimit; /* preferred inode limit */ + __u32 dqb_curinodes; /* current # allocated inodes */ + time_t dqb_btime; /* time limit for excessive disk use */ + time_t dqb_itime; /* time limit for excessive inode use */ +}; + +#define v1_dqoff(UID) ((loff_t)((UID) * sizeof (struct v1_disk_dqblk))) + +#endif /* _LINUX_QUOTAIO_V1_H */ diff --git a/fs/quotaio_v2.h b/fs/quotaio_v2.h new file mode 100644 index 000000000000..303d7cbe30d4 --- /dev/null +++ b/fs/quotaio_v2.h @@ -0,0 +1,79 @@ +/* + * Definitions of structures for vfsv0 quota format + */ + +#ifndef _LINUX_QUOTAIO_V2_H +#define _LINUX_QUOTAIO_V2_H + +#include +#include + +/* + * Definitions of magics and versions of current quota files + */ +#define V2_INITQMAGICS {\ + 0xd9c01f11, /* USRQUOTA */\ + 0xd9c01927 /* GRPQUOTA */\ +} + +#define V2_INITQVERSIONS {\ + 0, /* USRQUOTA */\ + 0 /* GRPQUOTA */\ +} + +/* + * The following structure defines the format of the disk quota file + * (as it appears on disk) - the file is a radix tree whose leaves point + * to blocks of these structures. + */ +struct v2_disk_dqblk { + __le32 dqb_id; /* id this quota applies to */ + __le32 dqb_ihardlimit; /* absolute limit on allocated inodes */ + __le32 dqb_isoftlimit; /* preferred inode limit */ + __le32 dqb_curinodes; /* current # allocated inodes */ + __le32 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */ + __le32 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */ + __le64 dqb_curspace; /* current space occupied (in bytes) */ + __le64 dqb_btime; /* time limit for excessive disk use */ + __le64 dqb_itime; /* time limit for excessive inode use */ +}; + +/* + * Here are header structures as written on disk and their in-memory copies + */ +/* First generic header */ +struct v2_disk_dqheader { + __le32 dqh_magic; /* Magic number identifying file */ + __le32 dqh_version; /* File version */ +}; + +/* Header with type and version specific information */ +struct v2_disk_dqinfo { + __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */ + __le32 dqi_igrace; /* Time before inode soft limit becomes hard limit */ + __le32 dqi_flags; /* Flags for quotafile (DQF_*) */ + __le32 dqi_blocks; /* Number of blocks in file */ + __le32 dqi_free_blk; /* Number of first free block in the list */ + __le32 dqi_free_entry; /* Number of block with at least one free entry */ +}; + +/* + * Structure of header of block with quota structures. It is padded to 16 bytes so + * there will be space for exactly 21 quota-entries in a block + */ +struct v2_disk_dqdbheader { + __le32 dqdh_next_free; /* Number of next block with free entry */ + __le32 dqdh_prev_free; /* Number of previous block with free entry */ + __le16 dqdh_entries; /* Number of valid entries in block */ + __le16 dqdh_pad1; + __le32 dqdh_pad2; +}; + +#define V2_DQINFOOFF sizeof(struct v2_disk_dqheader) /* Offset of info header in file */ +#define V2_DQBLKSIZE_BITS 10 +#define V2_DQBLKSIZE (1 << V2_DQBLKSIZE_BITS) /* Size of block with quota structures */ +#define V2_DQTREEOFF 1 /* Offset of tree in file in blocks */ +#define V2_DQTREEDEPTH 4 /* Depth of quota tree */ +#define V2_DQSTRINBLK ((V2_DQBLKSIZE - sizeof(struct v2_disk_dqdbheader)) / sizeof(struct v2_disk_dqblk)) /* Number of entries in one blocks */ + +#endif /* _LINUX_QUOTAIO_V2_H */ -- cgit From 1ccd14b9c271c1ac6eec5c5ec5def433100e7248 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 22 Sep 2008 05:54:49 +0200 Subject: quota: Split off quota tree handling into a separate file There is going to be a new version of quota format having 64-bit quota limits and a new quota format for OCFS2. They are both going to use the same tree structure as VFSv0 quota format. So split out tree handling into a separate file and make size of leaf blocks, amount of space usable in each block (needed for checksumming) and structures contained in them configurable so that the code can be shared. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/Kconfig | 5 + fs/Makefile | 1 + fs/quota_tree.c | 645 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/quota_tree.h | 25 +++ fs/quota_v2.c | 596 +++++---------------------------------------------- fs/quotaio_v2.h | 33 +-- 6 files changed, 735 insertions(+), 570 deletions(-) create mode 100644 fs/quota_tree.c create mode 100644 fs/quota_tree.h (limited to 'fs') diff --git a/fs/Kconfig b/fs/Kconfig index b93425ad15de..c1ce3d8831d8 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -302,6 +302,10 @@ config PRINT_QUOTA_WARNING Note that this behavior is currently deprecated and may go away in future. Please use notification via netlink socket instead. +# Generic support for tree structured quota files. Seleted when needed. +config QUOTA_TREE + tristate + config QFMT_V1 tristate "Old quota format support" depends on QUOTA @@ -313,6 +317,7 @@ config QFMT_V1 config QFMT_V2 tristate "Quota format v2 support" depends on QUOTA + select QUOTA_TREE help This quota format allows using quotas with 32-bit UIDs/GIDs. If you need this functionality say Y here. diff --git a/fs/Makefile b/fs/Makefile index e6f423d1d228..c830611550d3 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -54,6 +54,7 @@ obj-$(CONFIG_GENERIC_ACL) += generic_acl.o obj-$(CONFIG_QUOTA) += dquot.o obj-$(CONFIG_QFMT_V1) += quota_v1.o obj-$(CONFIG_QFMT_V2) += quota_v2.o +obj-$(CONFIG_QUOTA_TREE) += quota_tree.o obj-$(CONFIG_QUOTACTL) += quota.o obj-$(CONFIG_PROC_FS) += proc/ diff --git a/fs/quota_tree.c b/fs/quota_tree.c new file mode 100644 index 000000000000..953404c95b17 --- /dev/null +++ b/fs/quota_tree.c @@ -0,0 +1,645 @@ +/* + * vfsv0 quota IO operations on file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "quota_tree.h" + +MODULE_AUTHOR("Jan Kara"); +MODULE_DESCRIPTION("Quota trie support"); +MODULE_LICENSE("GPL"); + +#define __QUOTA_QT_PARANOIA + +typedef char *dqbuf_t; + +static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) +{ + unsigned int epb = info->dqi_usable_bs >> 2; + + depth = info->dqi_qtree_depth - depth - 1; + while (depth--) + id /= epb; + return id % epb; +} + +/* Number of entries in one blocks */ +static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) +{ + return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) + / info->dqi_entry_size; +} + +static dqbuf_t getdqbuf(size_t size) +{ + dqbuf_t buf = kmalloc(size, GFP_NOFS); + if (!buf) + printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); + return buf; +} + +static inline void freedqbuf(dqbuf_t buf) +{ + kfree(buf); +} + +static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) +{ + struct super_block *sb = info->dqi_sb; + + memset(buf, 0, info->dqi_usable_bs); + return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf, + info->dqi_usable_bs, blk << info->dqi_blocksize_bits); +} + +static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) +{ + struct super_block *sb = info->dqi_sb; + + return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf, + info->dqi_usable_bs, blk << info->dqi_blocksize_bits); +} + +/* Remove empty block from list and return it */ +static int get_free_dqblk(struct qtree_mem_dqinfo *info) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + int ret, blk; + + if (!buf) + return -ENOMEM; + if (info->dqi_free_blk) { + blk = info->dqi_free_blk; + ret = read_blk(info, blk, buf); + if (ret < 0) + goto out_buf; + info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); + } + else { + memset(buf, 0, info->dqi_usable_bs); + /* Assure block allocation... */ + ret = write_blk(info, info->dqi_blocks, buf); + if (ret < 0) + goto out_buf; + blk = info->dqi_blocks++; + } + mark_info_dirty(info->dqi_sb, info->dqi_type); + ret = blk; +out_buf: + freedqbuf(buf); + return ret; +} + +/* Insert empty block to the list */ +static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + int err; + + dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk); + dh->dqdh_prev_free = cpu_to_le32(0); + dh->dqdh_entries = cpu_to_le16(0); + err = write_blk(info, blk, buf); + if (err < 0) + return err; + info->dqi_free_blk = blk; + mark_info_dirty(info->dqi_sb, info->dqi_type); + return 0; +} + +/* Remove given block from the list of blocks with free entries */ +static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); + struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + uint nextblk = le32_to_cpu(dh->dqdh_next_free); + uint prevblk = le32_to_cpu(dh->dqdh_prev_free); + int err; + + if (!tmpbuf) + return -ENOMEM; + if (nextblk) { + err = read_blk(info, nextblk, tmpbuf); + if (err < 0) + goto out_buf; + ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = + dh->dqdh_prev_free; + err = write_blk(info, nextblk, tmpbuf); + if (err < 0) + goto out_buf; + } + if (prevblk) { + err = read_blk(info, prevblk, tmpbuf); + if (err < 0) + goto out_buf; + ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free = + dh->dqdh_next_free; + err = write_blk(info, prevblk, tmpbuf); + if (err < 0) + goto out_buf; + } else { + info->dqi_free_entry = nextblk; + mark_info_dirty(info->dqi_sb, info->dqi_type); + } + freedqbuf(tmpbuf); + dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); + /* No matter whether write succeeds block is out of list */ + if (write_blk(info, blk, buf) < 0) + printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); + return 0; +out_buf: + freedqbuf(tmpbuf); + return err; +} + +/* Insert given block to the beginning of list with free entries */ +static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); + struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + int err; + + if (!tmpbuf) + return -ENOMEM; + dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry); + dh->dqdh_prev_free = cpu_to_le32(0); + err = write_blk(info, blk, buf); + if (err < 0) + goto out_buf; + if (info->dqi_free_entry) { + err = read_blk(info, info->dqi_free_entry, tmpbuf); + if (err < 0) + goto out_buf; + ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = + cpu_to_le32(blk); + err = write_blk(info, info->dqi_free_entry, tmpbuf); + if (err < 0) + goto out_buf; + } + freedqbuf(tmpbuf); + info->dqi_free_entry = blk; + mark_info_dirty(info->dqi_sb, info->dqi_type); + return 0; +out_buf: + freedqbuf(tmpbuf); + return err; +} + +/* Is the entry in the block free? */ +int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk) +{ + int i; + + for (i = 0; i < info->dqi_entry_size; i++) + if (disk[i]) + return 0; + return 1; +} +EXPORT_SYMBOL(qtree_entry_unused); + +/* Find space for dquot */ +static uint find_free_dqentry(struct qtree_mem_dqinfo *info, + struct dquot *dquot, int *err) +{ + uint blk, i; + struct qt_disk_dqdbheader *dh; + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *ddquot; + + *err = 0; + if (!buf) { + *err = -ENOMEM; + return 0; + } + dh = (struct qt_disk_dqdbheader *)buf; + if (info->dqi_free_entry) { + blk = info->dqi_free_entry; + *err = read_blk(info, blk, buf); + if (*err < 0) + goto out_buf; + } else { + blk = get_free_dqblk(info); + if ((int)blk < 0) { + *err = blk; + freedqbuf(buf); + return 0; + } + memset(buf, 0, info->dqi_usable_bs); + /* This is enough as block is already zeroed and entry list is empty... */ + info->dqi_free_entry = blk; + mark_info_dirty(dquot->dq_sb, dquot->dq_type); + } + /* Block will be full? */ + if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { + *err = remove_free_dqentry(info, buf, blk); + if (*err < 0) { + printk(KERN_ERR "VFS: find_free_dqentry(): Can't " + "remove block (%u) from entry free list.\n", + blk); + goto out_buf; + } + } + le16_add_cpu(&dh->dqdh_entries, 1); + /* Find free structure in block */ + for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); + i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot); + i++, ddquot += info->dqi_entry_size); +#ifdef __QUOTA_QT_PARANOIA + if (i == qtree_dqstr_in_blk(info)) { + printk(KERN_ERR "VFS: find_free_dqentry(): Data block full " + "but it shouldn't.\n"); + *err = -EIO; + goto out_buf; + } +#endif + *err = write_blk(info, blk, buf); + if (*err < 0) { + printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota " + "data block %u.\n", blk); + goto out_buf; + } + dquot->dq_off = (blk << info->dqi_blocksize_bits) + + sizeof(struct qt_disk_dqdbheader) + + i * info->dqi_entry_size; + freedqbuf(buf); + return blk; +out_buf: + freedqbuf(buf); + return 0; +} + +/* Insert reference to structure into the trie */ +static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, + uint *treeblk, int depth) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + int ret = 0, newson = 0, newact = 0; + __le32 *ref; + uint newblk; + + if (!buf) + return -ENOMEM; + if (!*treeblk) { + ret = get_free_dqblk(info); + if (ret < 0) + goto out_buf; + *treeblk = ret; + memset(buf, 0, info->dqi_usable_bs); + newact = 1; + } else { + ret = read_blk(info, *treeblk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read tree quota block " + "%u.\n", *treeblk); + goto out_buf; + } + } + ref = (__le32 *)buf; + newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); + if (!newblk) + newson = 1; + if (depth == info->dqi_qtree_depth - 1) { +#ifdef __QUOTA_QT_PARANOIA + if (newblk) { + printk(KERN_ERR "VFS: Inserting already present quota " + "entry (block %u).\n", + le32_to_cpu(ref[get_index(info, + dquot->dq_id, depth)])); + ret = -EIO; + goto out_buf; + } +#endif + newblk = find_free_dqentry(info, dquot, &ret); + } else { + ret = do_insert_tree(info, dquot, &newblk, depth+1); + } + if (newson && ret >= 0) { + ref[get_index(info, dquot->dq_id, depth)] = + cpu_to_le32(newblk); + ret = write_blk(info, *treeblk, buf); + } else if (newact && ret < 0) { + put_free_dqblk(info, buf, *treeblk); + } +out_buf: + freedqbuf(buf); + return ret; +} + +/* Wrapper for inserting quota structure into tree */ +static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, + struct dquot *dquot) +{ + int tmp = QT_TREEOFF; + return do_insert_tree(info, dquot, &tmp, 0); +} + +/* + * We don't have to be afraid of deadlocks as we never have quotas on quota files... + */ +int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) +{ + int type = dquot->dq_type; + struct super_block *sb = dquot->dq_sb; + ssize_t ret; + dqbuf_t ddquot = getdqbuf(info->dqi_entry_size); + + if (!ddquot) + return -ENOMEM; + + /* dq_off is guarded by dqio_mutex */ + if (!dquot->dq_off) { + ret = dq_insert_tree(info, dquot); + if (ret < 0) { + printk(KERN_ERR "VFS: Error %zd occurred while " + "creating quota.\n", ret); + freedqbuf(ddquot); + return ret; + } + } + spin_lock(&dq_data_lock); + info->dqi_ops->mem2disk_dqblk(ddquot, dquot); + spin_unlock(&dq_data_lock); + ret = sb->s_op->quota_write(sb, type, (char *)ddquot, + info->dqi_entry_size, dquot->dq_off); + if (ret != info->dqi_entry_size) { + printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", + sb->s_id); + if (ret >= 0) + ret = -ENOSPC; + } else { + ret = 0; + } + dqstats.writes++; + freedqbuf(ddquot); + + return ret; +} +EXPORT_SYMBOL(qtree_write_dquot); + +/* Free dquot entry in data block */ +static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, + uint blk) +{ + struct qt_disk_dqdbheader *dh; + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + int ret = 0; + + if (!buf) + return -ENOMEM; + if (dquot->dq_off >> info->dqi_blocksize_bits != blk) { + printk(KERN_ERR "VFS: Quota structure has offset to other " + "block (%u) than it should (%u).\n", blk, + (uint)(dquot->dq_off >> info->dqi_blocksize_bits)); + goto out_buf; + } + ret = read_blk(info, blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk); + goto out_buf; + } + dh = (struct qt_disk_dqdbheader *)buf; + le16_add_cpu(&dh->dqdh_entries, -1); + if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ + ret = remove_free_dqentry(info, buf, blk); + if (ret >= 0) + ret = put_free_dqblk(info, buf, blk); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't move quota data block (%u) " + "to free list.\n", blk); + goto out_buf; + } + } else { + memset(buf + + (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)), + 0, info->dqi_entry_size); + if (le16_to_cpu(dh->dqdh_entries) == + qtree_dqstr_in_blk(info) - 1) { + /* Insert will write block itself */ + ret = insert_free_dqentry(info, buf, blk); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't insert quota data " + "block (%u) to free entry list.\n", blk); + goto out_buf; + } + } else { + ret = write_blk(info, blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't write quota data " + "block %u\n", blk); + goto out_buf; + } + } + } + dquot->dq_off = 0; /* Quota is now unattached */ +out_buf: + freedqbuf(buf); + return ret; +} + +/* Remove reference to dquot from tree */ +static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, + uint *blk, int depth) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + int ret = 0; + uint newblk; + __le32 *ref = (__le32 *)buf; + + if (!buf) + return -ENOMEM; + ret = read_blk(info, *blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk); + goto out_buf; + } + newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); + if (depth == info->dqi_qtree_depth - 1) { + ret = free_dqentry(info, dquot, newblk); + newblk = 0; + } else { + ret = remove_tree(info, dquot, &newblk, depth+1); + } + if (ret >= 0 && !newblk) { + int i; + ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); + /* Block got empty? */ + for (i = 0; + i < (info->dqi_usable_bs >> 2) && !ref[i]; + i++); + /* Don't put the root block into the free block list */ + if (i == (info->dqi_usable_bs >> 2) + && *blk != QT_TREEOFF) { + put_free_dqblk(info, buf, *blk); + *blk = 0; + } else { + ret = write_blk(info, *blk, buf); + if (ret < 0) + printk(KERN_ERR "VFS: Can't write quota tree " + "block %u.\n", *blk); + } + } +out_buf: + freedqbuf(buf); + return ret; +} + +/* Delete dquot from tree */ +int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) +{ + uint tmp = QT_TREEOFF; + + if (!dquot->dq_off) /* Even not allocated? */ + return 0; + return remove_tree(info, dquot, &tmp, 0); +} +EXPORT_SYMBOL(qtree_delete_dquot); + +/* Find entry in block */ +static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, + struct dquot *dquot, uint blk) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + loff_t ret = 0; + int i; + char *ddquot; + + if (!buf) + return -ENOMEM; + ret = read_blk(info, blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); + goto out_buf; + } + for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); + i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot); + i++, ddquot += info->dqi_entry_size); + if (i == qtree_dqstr_in_blk(info)) { + printk(KERN_ERR "VFS: Quota for id %u referenced " + "but not present.\n", dquot->dq_id); + ret = -EIO; + goto out_buf; + } else { + ret = (blk << info->dqi_blocksize_bits) + sizeof(struct + qt_disk_dqdbheader) + i * info->dqi_entry_size; + } +out_buf: + freedqbuf(buf); + return ret; +} + +/* Find entry for given id in the tree */ +static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, + struct dquot *dquot, uint blk, int depth) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + loff_t ret = 0; + __le32 *ref = (__le32 *)buf; + + if (!buf) + return -ENOMEM; + ret = read_blk(info, blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); + goto out_buf; + } + ret = 0; + blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); + if (!blk) /* No reference? */ + goto out_buf; + if (depth < info->dqi_qtree_depth - 1) + ret = find_tree_dqentry(info, dquot, blk, depth+1); + else + ret = find_block_dqentry(info, dquot, blk); +out_buf: + freedqbuf(buf); + return ret; +} + +/* Find entry for given id in the tree - wrapper function */ +static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, + struct dquot *dquot) +{ + return find_tree_dqentry(info, dquot, QT_TREEOFF, 0); +} + +int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) +{ + int type = dquot->dq_type; + struct super_block *sb = dquot->dq_sb; + loff_t offset; + dqbuf_t ddquot; + int ret = 0; + +#ifdef __QUOTA_QT_PARANOIA + /* Invalidated quota? */ + if (!sb_dqopt(dquot->dq_sb)->files[type]) { + printk(KERN_ERR "VFS: Quota invalidated while reading!\n"); + return -EIO; + } +#endif + /* Do we know offset of the dquot entry in the quota file? */ + if (!dquot->dq_off) { + offset = find_dqentry(info, dquot); + if (offset <= 0) { /* Entry not present? */ + if (offset < 0) + printk(KERN_ERR "VFS: Can't read quota " + "structure for id %u.\n", dquot->dq_id); + dquot->dq_off = 0; + set_bit(DQ_FAKE_B, &dquot->dq_flags); + memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); + ret = offset; + goto out; + } + dquot->dq_off = offset; + } + ddquot = getdqbuf(info->dqi_entry_size); + if (!ddquot) + return -ENOMEM; + ret = sb->s_op->quota_read(sb, type, (char *)ddquot, + info->dqi_entry_size, dquot->dq_off); + if (ret != info->dqi_entry_size) { + if (ret >= 0) + ret = -EIO; + printk(KERN_ERR "VFS: Error while reading quota " + "structure for id %u.\n", dquot->dq_id); + set_bit(DQ_FAKE_B, &dquot->dq_flags); + memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); + freedqbuf(ddquot); + goto out; + } + spin_lock(&dq_data_lock); + info->dqi_ops->disk2mem_dqblk(dquot, ddquot); + if (!dquot->dq_dqb.dqb_bhardlimit && + !dquot->dq_dqb.dqb_bsoftlimit && + !dquot->dq_dqb.dqb_ihardlimit && + !dquot->dq_dqb.dqb_isoftlimit) + set_bit(DQ_FAKE_B, &dquot->dq_flags); + spin_unlock(&dq_data_lock); + freedqbuf(ddquot); +out: + dqstats.reads++; + return ret; +} +EXPORT_SYMBOL(qtree_read_dquot); + +/* Check whether dquot should not be deleted. We know we are + * the only one operating on dquot (thanks to dq_lock) */ +int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) +{ + if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) + return qtree_delete_dquot(info, dquot); + return 0; +} +EXPORT_SYMBOL(qtree_release_dquot); diff --git a/fs/quota_tree.h b/fs/quota_tree.h new file mode 100644 index 000000000000..a1ab8db81a51 --- /dev/null +++ b/fs/quota_tree.h @@ -0,0 +1,25 @@ +/* + * Definitions of structures for vfsv0 quota format + */ + +#ifndef _LINUX_QUOTA_TREE_H +#define _LINUX_QUOTA_TREE_H + +#include +#include + +/* + * Structure of header of block with quota structures. It is padded to 16 bytes so + * there will be space for exactly 21 quota-entries in a block + */ +struct qt_disk_dqdbheader { + __le32 dqdh_next_free; /* Number of next block with free entry */ + __le32 dqdh_prev_free; /* Number of previous block with free entry */ + __le16 dqdh_entries; /* Number of valid entries in block */ + __le16 dqdh_pad1; + __le32 dqdh_pad2; +}; + +#define QT_TREEOFF 1 /* Offset of tree in file in blocks */ + +#endif /* _LINUX_QUOTAIO_TREE_H */ diff --git a/fs/quota_v2.c b/fs/quota_v2.c index a21d1a7c356a..a87f1028a425 100644 --- a/fs/quota_v2.c +++ b/fs/quota_v2.c @@ -14,6 +14,7 @@ #include +#include "quota_tree.h" #include "quotaio_v2.h" MODULE_AUTHOR("Jan Kara"); @@ -22,10 +23,15 @@ MODULE_LICENSE("GPL"); #define __QUOTA_V2_PARANOIA -typedef char *dqbuf_t; +static void v2_mem2diskdqb(void *dp, struct dquot *dquot); +static void v2_disk2memdqb(struct dquot *dquot, void *dp); +static int v2_is_id(void *dp, struct dquot *dquot); -#define GETIDINDEX(id, depth) (((id) >> ((V2_DQTREEDEPTH-(depth)-1)*8)) & 0xff) -#define GETENTRIES(buf) ((struct v2_disk_dqblk *)(((char *)buf)+sizeof(struct v2_disk_dqdbheader))) +static struct qtree_fmt_operations v2_qtree_ops = { + .mem2disk_dqblk = v2_mem2diskdqb, + .disk2mem_dqblk = v2_disk2memdqb, + .is_id = v2_is_id, +}; #define QUOTABLOCK_BITS 10 #define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) @@ -64,7 +70,7 @@ static int v2_check_quota_file(struct super_block *sb, int type) static int v2_read_file_info(struct super_block *sb, int type) { struct v2_disk_dqinfo dinfo; - struct mem_dqinfo *info = sb_dqopt(sb)->info+type; + struct mem_dqinfo *info = sb_dqinfo(sb, type); ssize_t size; size = sb->s_op->quota_read(sb, type, (char *)&dinfo, @@ -80,9 +86,16 @@ static int v2_read_file_info(struct super_block *sb, int type) info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); info->dqi_flags = le32_to_cpu(dinfo.dqi_flags); - info->u.v2_i.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); - info->u.v2_i.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); - info->u.v2_i.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); + info->u.v2_i.i.dqi_sb = sb; + info->u.v2_i.i.dqi_type = type; + info->u.v2_i.i.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); + info->u.v2_i.i.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); + info->u.v2_i.i.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); + info->u.v2_i.i.dqi_blocksize_bits = V2_DQBLKSIZE_BITS; + info->u.v2_i.i.dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS; + info->u.v2_i.i.dqi_qtree_depth = qtree_depth(&info->u.v2_i.i); + info->u.v2_i.i.dqi_entry_size = sizeof(struct v2_disk_dqblk); + info->u.v2_i.i.dqi_ops = &v2_qtree_ops; return 0; } @@ -90,7 +103,7 @@ static int v2_read_file_info(struct super_block *sb, int type) static int v2_write_file_info(struct super_block *sb, int type) { struct v2_disk_dqinfo dinfo; - struct mem_dqinfo *info = sb_dqopt(sb)->info+type; + struct mem_dqinfo *info = sb_dqinfo(sb, type); ssize_t size; spin_lock(&dq_data_lock); @@ -99,9 +112,9 @@ static int v2_write_file_info(struct super_block *sb, int type) dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace); dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK); spin_unlock(&dq_data_lock); - dinfo.dqi_blocks = cpu_to_le32(info->u.v2_i.dqi_blocks); - dinfo.dqi_free_blk = cpu_to_le32(info->u.v2_i.dqi_free_blk); - dinfo.dqi_free_entry = cpu_to_le32(info->u.v2_i.dqi_free_entry); + dinfo.dqi_blocks = cpu_to_le32(info->u.v2_i.i.dqi_blocks); + dinfo.dqi_free_blk = cpu_to_le32(info->u.v2_i.i.dqi_free_blk); + dinfo.dqi_free_entry = cpu_to_le32(info->u.v2_i.i.dqi_free_entry); size = sb->s_op->quota_write(sb, type, (char *)&dinfo, sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); if (size != sizeof(struct v2_disk_dqinfo)) { @@ -112,8 +125,11 @@ static int v2_write_file_info(struct super_block *sb, int type) return 0; } -static void disk2memdqb(struct mem_dqblk *m, struct v2_disk_dqblk *d) +static void v2_disk2memdqb(struct dquot *dquot, void *dp) { + struct v2_disk_dqblk *d = dp, empty; + struct mem_dqblk *m = &dquot->dq_dqb; + m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit); m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit); m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes); @@ -122,10 +138,20 @@ static void disk2memdqb(struct mem_dqblk *m, struct v2_disk_dqblk *d) m->dqb_bsoftlimit = v2_qbtos(le32_to_cpu(d->dqb_bsoftlimit)); m->dqb_curspace = le64_to_cpu(d->dqb_curspace); m->dqb_btime = le64_to_cpu(d->dqb_btime); + /* We need to escape back all-zero structure */ + memset(&empty, 0, sizeof(struct v2_disk_dqblk)); + empty.dqb_itime = cpu_to_le64(1); + if (!memcmp(&empty, dp, sizeof(struct v2_disk_dqblk))) + m->dqb_itime = 0; } -static void mem2diskdqb(struct v2_disk_dqblk *d, struct mem_dqblk *m, qid_t id) +static void v2_mem2diskdqb(void *dp, struct dquot *dquot) { + struct v2_disk_dqblk *d = dp; + struct mem_dqblk *m = &dquot->dq_dqb; + struct qtree_mem_dqinfo *info = + &sb_dqinfo(dquot->dq_sb, dquot->dq_type)->u.v2_i.i; + d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit); d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit); d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes); @@ -134,553 +160,35 @@ static void mem2diskdqb(struct v2_disk_dqblk *d, struct mem_dqblk *m, qid_t id) d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit)); d->dqb_curspace = cpu_to_le64(m->dqb_curspace); d->dqb_btime = cpu_to_le64(m->dqb_btime); - d->dqb_id = cpu_to_le32(id); -} - -static dqbuf_t getdqbuf(void) -{ - dqbuf_t buf = kmalloc(V2_DQBLKSIZE, GFP_NOFS); - if (!buf) - printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); - return buf; -} - -static inline void freedqbuf(dqbuf_t buf) -{ - kfree(buf); -} - -static inline ssize_t read_blk(struct super_block *sb, int type, uint blk, dqbuf_t buf) -{ - memset(buf, 0, V2_DQBLKSIZE); - return sb->s_op->quota_read(sb, type, (char *)buf, - V2_DQBLKSIZE, blk << V2_DQBLKSIZE_BITS); -} - -static inline ssize_t write_blk(struct super_block *sb, int type, uint blk, dqbuf_t buf) -{ - return sb->s_op->quota_write(sb, type, (char *)buf, - V2_DQBLKSIZE, blk << V2_DQBLKSIZE_BITS); -} - -/* Remove empty block from list and return it */ -static int get_free_dqblk(struct super_block *sb, int type) -{ - dqbuf_t buf = getdqbuf(); - struct mem_dqinfo *info = sb_dqinfo(sb, type); - struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf; - int ret, blk; - - if (!buf) - return -ENOMEM; - if (info->u.v2_i.dqi_free_blk) { - blk = info->u.v2_i.dqi_free_blk; - if ((ret = read_blk(sb, type, blk, buf)) < 0) - goto out_buf; - info->u.v2_i.dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); - } - else { - memset(buf, 0, V2_DQBLKSIZE); - /* Assure block allocation... */ - if ((ret = write_blk(sb, type, info->u.v2_i.dqi_blocks, buf)) < 0) - goto out_buf; - blk = info->u.v2_i.dqi_blocks++; - } - mark_info_dirty(sb, type); - ret = blk; -out_buf: - freedqbuf(buf); - return ret; -} - -/* Insert empty block to the list */ -static int put_free_dqblk(struct super_block *sb, int type, dqbuf_t buf, uint blk) -{ - struct mem_dqinfo *info = sb_dqinfo(sb, type); - struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf; - int err; - - dh->dqdh_next_free = cpu_to_le32(info->u.v2_i.dqi_free_blk); - dh->dqdh_prev_free = cpu_to_le32(0); - dh->dqdh_entries = cpu_to_le16(0); - info->u.v2_i.dqi_free_blk = blk; - mark_info_dirty(sb, type); - /* Some strange block. We had better leave it... */ - if ((err = write_blk(sb, type, blk, buf)) < 0) - return err; - return 0; + d->dqb_id = cpu_to_le32(dquot->dq_id); + if (qtree_entry_unused(info, dp)) + d->dqb_itime = cpu_to_le64(1); } -/* Remove given block from the list of blocks with free entries */ -static int remove_free_dqentry(struct super_block *sb, int type, dqbuf_t buf, uint blk) +static int v2_is_id(void *dp, struct dquot *dquot) { - dqbuf_t tmpbuf = getdqbuf(); - struct mem_dqinfo *info = sb_dqinfo(sb, type); - struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf; - uint nextblk = le32_to_cpu(dh->dqdh_next_free), prevblk = le32_to_cpu(dh->dqdh_prev_free); - int err; + struct v2_disk_dqblk *d = dp; + struct qtree_mem_dqinfo *info = + &sb_dqinfo(dquot->dq_sb, dquot->dq_type)->u.v2_i.i; - if (!tmpbuf) - return -ENOMEM; - if (nextblk) { - if ((err = read_blk(sb, type, nextblk, tmpbuf)) < 0) - goto out_buf; - ((struct v2_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = dh->dqdh_prev_free; - if ((err = write_blk(sb, type, nextblk, tmpbuf)) < 0) - goto out_buf; - } - if (prevblk) { - if ((err = read_blk(sb, type, prevblk, tmpbuf)) < 0) - goto out_buf; - ((struct v2_disk_dqdbheader *)tmpbuf)->dqdh_next_free = dh->dqdh_next_free; - if ((err = write_blk(sb, type, prevblk, tmpbuf)) < 0) - goto out_buf; - } - else { - info->u.v2_i.dqi_free_entry = nextblk; - mark_info_dirty(sb, type); - } - freedqbuf(tmpbuf); - dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); - /* No matter whether write succeeds block is out of list */ - if (write_blk(sb, type, blk, buf) < 0) - printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); - return 0; -out_buf: - freedqbuf(tmpbuf); - return err; -} - -/* Insert given block to the beginning of list with free entries */ -static int insert_free_dqentry(struct super_block *sb, int type, dqbuf_t buf, uint blk) -{ - dqbuf_t tmpbuf = getdqbuf(); - struct mem_dqinfo *info = sb_dqinfo(sb, type); - struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf; - int err; - - if (!tmpbuf) - return -ENOMEM; - dh->dqdh_next_free = cpu_to_le32(info->u.v2_i.dqi_free_entry); - dh->dqdh_prev_free = cpu_to_le32(0); - if ((err = write_blk(sb, type, blk, buf)) < 0) - goto out_buf; - if (info->u.v2_i.dqi_free_entry) { - if ((err = read_blk(sb, type, info->u.v2_i.dqi_free_entry, tmpbuf)) < 0) - goto out_buf; - ((struct v2_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = cpu_to_le32(blk); - if ((err = write_blk(sb, type, info->u.v2_i.dqi_free_entry, tmpbuf)) < 0) - goto out_buf; - } - freedqbuf(tmpbuf); - info->u.v2_i.dqi_free_entry = blk; - mark_info_dirty(sb, type); - return 0; -out_buf: - freedqbuf(tmpbuf); - return err; -} - -/* Find space for dquot */ -static uint find_free_dqentry(struct dquot *dquot, int *err) -{ - struct super_block *sb = dquot->dq_sb; - struct mem_dqinfo *info = sb_dqopt(sb)->info+dquot->dq_type; - uint blk, i; - struct v2_disk_dqdbheader *dh; - struct v2_disk_dqblk *ddquot; - struct v2_disk_dqblk fakedquot; - dqbuf_t buf; - - *err = 0; - if (!(buf = getdqbuf())) { - *err = -ENOMEM; + if (qtree_entry_unused(info, dp)) return 0; - } - dh = (struct v2_disk_dqdbheader *)buf; - ddquot = GETENTRIES(buf); - if (info->u.v2_i.dqi_free_entry) { - blk = info->u.v2_i.dqi_free_entry; - if ((*err = read_blk(sb, dquot->dq_type, blk, buf)) < 0) - goto out_buf; - } - else { - blk = get_free_dqblk(sb, dquot->dq_type); - if ((int)blk < 0) { - *err = blk; - freedqbuf(buf); - return 0; - } - memset(buf, 0, V2_DQBLKSIZE); - /* This is enough as block is already zeroed and entry list is empty... */ - info->u.v2_i.dqi_free_entry = blk; - mark_info_dirty(sb, dquot->dq_type); - } - if (le16_to_cpu(dh->dqdh_entries)+1 >= V2_DQSTRINBLK) /* Block will be full? */ - if ((*err = remove_free_dqentry(sb, dquot->dq_type, buf, blk)) < 0) { - printk(KERN_ERR "VFS: find_free_dqentry(): Can't remove block (%u) from entry free list.\n", blk); - goto out_buf; - } - le16_add_cpu(&dh->dqdh_entries, 1); - memset(&fakedquot, 0, sizeof(struct v2_disk_dqblk)); - /* Find free structure in block */ - for (i = 0; i < V2_DQSTRINBLK && memcmp(&fakedquot, ddquot+i, sizeof(struct v2_disk_dqblk)); i++); -#ifdef __QUOTA_V2_PARANOIA - if (i == V2_DQSTRINBLK) { - printk(KERN_ERR "VFS: find_free_dqentry(): Data block full but it shouldn't.\n"); - *err = -EIO; - goto out_buf; - } -#endif - if ((*err = write_blk(sb, dquot->dq_type, blk, buf)) < 0) { - printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota data block %u.\n", blk); - goto out_buf; - } - dquot->dq_off = (blk<dqb_id) == dquot->dq_id; } -/* Insert reference to structure into the trie */ -static int do_insert_tree(struct dquot *dquot, uint *treeblk, int depth) -{ - struct super_block *sb = dquot->dq_sb; - dqbuf_t buf; - int ret = 0, newson = 0, newact = 0; - __le32 *ref; - uint newblk; - - if (!(buf = getdqbuf())) - return -ENOMEM; - if (!*treeblk) { - ret = get_free_dqblk(sb, dquot->dq_type); - if (ret < 0) - goto out_buf; - *treeblk = ret; - memset(buf, 0, V2_DQBLKSIZE); - newact = 1; - } - else { - if ((ret = read_blk(sb, dquot->dq_type, *treeblk, buf)) < 0) { - printk(KERN_ERR "VFS: Can't read tree quota block %u.\n", *treeblk); - goto out_buf; - } - } - ref = (__le32 *)buf; - newblk = le32_to_cpu(ref[GETIDINDEX(dquot->dq_id, depth)]); - if (!newblk) - newson = 1; - if (depth == V2_DQTREEDEPTH-1) { -#ifdef __QUOTA_V2_PARANOIA - if (newblk) { - printk(KERN_ERR "VFS: Inserting already present quota entry (block %u).\n", le32_to_cpu(ref[GETIDINDEX(dquot->dq_id, depth)])); - ret = -EIO; - goto out_buf; - } -#endif - newblk = find_free_dqentry(dquot, &ret); - } - else - ret = do_insert_tree(dquot, &newblk, depth+1); - if (newson && ret >= 0) { - ref[GETIDINDEX(dquot->dq_id, depth)] = cpu_to_le32(newblk); - ret = write_blk(sb, dquot->dq_type, *treeblk, buf); - } - else if (newact && ret < 0) - put_free_dqblk(sb, dquot->dq_type, buf, *treeblk); -out_buf: - freedqbuf(buf); - return ret; -} - -/* Wrapper for inserting quota structure into tree */ -static inline int dq_insert_tree(struct dquot *dquot) +static int v2_read_dquot(struct dquot *dquot) { - int tmp = V2_DQTREEOFF; - return do_insert_tree(dquot, &tmp, 0); + return qtree_read_dquot(&sb_dqinfo(dquot->dq_sb, dquot->dq_type)->u.v2_i.i, dquot); } -/* - * We don't have to be afraid of deadlocks as we never have quotas on quota files... - */ static int v2_write_dquot(struct dquot *dquot) { - int type = dquot->dq_type; - ssize_t ret; - struct v2_disk_dqblk ddquot, empty; - - /* dq_off is guarded by dqio_mutex */ - if (!dquot->dq_off) - if ((ret = dq_insert_tree(dquot)) < 0) { - printk(KERN_ERR "VFS: Error %zd occurred while creating quota.\n", ret); - return ret; - } - spin_lock(&dq_data_lock); - mem2diskdqb(&ddquot, &dquot->dq_dqb, dquot->dq_id); - /* Argh... We may need to write structure full of zeroes but that would be - * treated as an empty place by the rest of the code. Format change would - * be definitely cleaner but the problems probably are not worth it */ - memset(&empty, 0, sizeof(struct v2_disk_dqblk)); - if (!memcmp(&empty, &ddquot, sizeof(struct v2_disk_dqblk))) - ddquot.dqb_itime = cpu_to_le64(1); - spin_unlock(&dq_data_lock); - ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, - (char *)&ddquot, sizeof(struct v2_disk_dqblk), dquot->dq_off); - if (ret != sizeof(struct v2_disk_dqblk)) { - printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", dquot->dq_sb->s_id); - if (ret >= 0) - ret = -ENOSPC; - } - else - ret = 0; - dqstats.writes++; - - return ret; + return qtree_write_dquot(&sb_dqinfo(dquot->dq_sb, dquot->dq_type)->u.v2_i.i, dquot); } -/* Free dquot entry in data block */ -static int free_dqentry(struct dquot *dquot, uint blk) -{ - struct super_block *sb = dquot->dq_sb; - int type = dquot->dq_type; - struct v2_disk_dqdbheader *dh; - dqbuf_t buf = getdqbuf(); - int ret = 0; - - if (!buf) - return -ENOMEM; - if (dquot->dq_off >> V2_DQBLKSIZE_BITS != blk) { - printk(KERN_ERR "VFS: Quota structure has offset to other " - "block (%u) than it should (%u).\n", blk, - (uint)(dquot->dq_off >> V2_DQBLKSIZE_BITS)); - goto out_buf; - } - if ((ret = read_blk(sb, type, blk, buf)) < 0) { - printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk); - goto out_buf; - } - dh = (struct v2_disk_dqdbheader *)buf; - le16_add_cpu(&dh->dqdh_entries, -1); - if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ - if ((ret = remove_free_dqentry(sb, type, buf, blk)) < 0 || - (ret = put_free_dqblk(sb, type, buf, blk)) < 0) { - printk(KERN_ERR "VFS: Can't move quota data block (%u) " - "to free list.\n", blk); - goto out_buf; - } - } - else { - memset(buf+(dquot->dq_off & ((1 << V2_DQBLKSIZE_BITS)-1)), 0, - sizeof(struct v2_disk_dqblk)); - if (le16_to_cpu(dh->dqdh_entries) == V2_DQSTRINBLK-1) { - /* Insert will write block itself */ - if ((ret = insert_free_dqentry(sb, type, buf, blk)) < 0) { - printk(KERN_ERR "VFS: Can't insert quota data block (%u) to free entry list.\n", blk); - goto out_buf; - } - } - else - if ((ret = write_blk(sb, type, blk, buf)) < 0) { - printk(KERN_ERR "VFS: Can't write quota data " - "block %u\n", blk); - goto out_buf; - } - } - dquot->dq_off = 0; /* Quota is now unattached */ -out_buf: - freedqbuf(buf); - return ret; -} - -/* Remove reference to dquot from tree */ -static int remove_tree(struct dquot *dquot, uint *blk, int depth) -{ - struct super_block *sb = dquot->dq_sb; - int type = dquot->dq_type; - dqbuf_t buf = getdqbuf(); - int ret = 0; - uint newblk; - __le32 *ref = (__le32 *)buf; - - if (!buf) - return -ENOMEM; - if ((ret = read_blk(sb, type, *blk, buf)) < 0) { - printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk); - goto out_buf; - } - newblk = le32_to_cpu(ref[GETIDINDEX(dquot->dq_id, depth)]); - if (depth == V2_DQTREEDEPTH-1) { - ret = free_dqentry(dquot, newblk); - newblk = 0; - } - else - ret = remove_tree(dquot, &newblk, depth+1); - if (ret >= 0 && !newblk) { - int i; - ref[GETIDINDEX(dquot->dq_id, depth)] = cpu_to_le32(0); - for (i = 0; i < V2_DQBLKSIZE && !buf[i]; i++); /* Block got empty? */ - /* Don't put the root block into the free block list */ - if (i == V2_DQBLKSIZE && *blk != V2_DQTREEOFF) { - put_free_dqblk(sb, type, buf, *blk); - *blk = 0; - } - else - if ((ret = write_blk(sb, type, *blk, buf)) < 0) - printk(KERN_ERR "VFS: Can't write quota tree " - "block %u.\n", *blk); - } -out_buf: - freedqbuf(buf); - return ret; -} - -/* Delete dquot from tree */ -static int v2_delete_dquot(struct dquot *dquot) -{ - uint tmp = V2_DQTREEOFF; - - if (!dquot->dq_off) /* Even not allocated? */ - return 0; - return remove_tree(dquot, &tmp, 0); -} - -/* Find entry in block */ -static loff_t find_block_dqentry(struct dquot *dquot, uint blk) -{ - dqbuf_t buf = getdqbuf(); - loff_t ret = 0; - int i; - struct v2_disk_dqblk *ddquot = GETENTRIES(buf); - - if (!buf) - return -ENOMEM; - if ((ret = read_blk(dquot->dq_sb, dquot->dq_type, blk, buf)) < 0) { - printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); - goto out_buf; - } - if (dquot->dq_id) - for (i = 0; i < V2_DQSTRINBLK && - le32_to_cpu(ddquot[i].dqb_id) != dquot->dq_id; i++); - else { /* ID 0 as a bit more complicated searching... */ - struct v2_disk_dqblk fakedquot; - - memset(&fakedquot, 0, sizeof(struct v2_disk_dqblk)); - for (i = 0; i < V2_DQSTRINBLK; i++) - if (!le32_to_cpu(ddquot[i].dqb_id) && - memcmp(&fakedquot, ddquot+i, sizeof(struct v2_disk_dqblk))) - break; - } - if (i == V2_DQSTRINBLK) { - printk(KERN_ERR "VFS: Quota for id %u referenced " - "but not present.\n", dquot->dq_id); - ret = -EIO; - goto out_buf; - } - else - ret = (blk << V2_DQBLKSIZE_BITS) + sizeof(struct - v2_disk_dqdbheader) + i * sizeof(struct v2_disk_dqblk); -out_buf: - freedqbuf(buf); - return ret; -} - -/* Find entry for given id in the tree */ -static loff_t find_tree_dqentry(struct dquot *dquot, uint blk, int depth) -{ - dqbuf_t buf = getdqbuf(); - loff_t ret = 0; - __le32 *ref = (__le32 *)buf; - - if (!buf) - return -ENOMEM; - if ((ret = read_blk(dquot->dq_sb, dquot->dq_type, blk, buf)) < 0) { - printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); - goto out_buf; - } - ret = 0; - blk = le32_to_cpu(ref[GETIDINDEX(dquot->dq_id, depth)]); - if (!blk) /* No reference? */ - goto out_buf; - if (depth < V2_DQTREEDEPTH-1) - ret = find_tree_dqentry(dquot, blk, depth+1); - else - ret = find_block_dqentry(dquot, blk); -out_buf: - freedqbuf(buf); - return ret; -} - -/* Find entry for given id in the tree - wrapper function */ -static inline loff_t find_dqentry(struct dquot *dquot) -{ - return find_tree_dqentry(dquot, V2_DQTREEOFF, 0); -} - -static int v2_read_dquot(struct dquot *dquot) -{ - int type = dquot->dq_type; - loff_t offset; - struct v2_disk_dqblk ddquot, empty; - int ret = 0; - -#ifdef __QUOTA_V2_PARANOIA - /* Invalidated quota? */ - if (!dquot->dq_sb || !sb_dqopt(dquot->dq_sb)->files[type]) { - printk(KERN_ERR "VFS: Quota invalidated while reading!\n"); - return -EIO; - } -#endif - offset = find_dqentry(dquot); - if (offset <= 0) { /* Entry not present? */ - if (offset < 0) - printk(KERN_ERR "VFS: Can't read quota " - "structure for id %u.\n", dquot->dq_id); - dquot->dq_off = 0; - set_bit(DQ_FAKE_B, &dquot->dq_flags); - memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); - ret = offset; - } - else { - dquot->dq_off = offset; - if ((ret = dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, - (char *)&ddquot, sizeof(struct v2_disk_dqblk), offset)) - != sizeof(struct v2_disk_dqblk)) { - if (ret >= 0) - ret = -EIO; - printk(KERN_ERR "VFS: Error while reading quota " - "structure for id %u.\n", dquot->dq_id); - memset(&ddquot, 0, sizeof(struct v2_disk_dqblk)); - } - else { - ret = 0; - /* We need to escape back all-zero structure */ - memset(&empty, 0, sizeof(struct v2_disk_dqblk)); - empty.dqb_itime = cpu_to_le64(1); - if (!memcmp(&empty, &ddquot, sizeof(struct v2_disk_dqblk))) - ddquot.dqb_itime = 0; - } - disk2memdqb(&dquot->dq_dqb, &ddquot); - if (!dquot->dq_dqb.dqb_bhardlimit && - !dquot->dq_dqb.dqb_bsoftlimit && - !dquot->dq_dqb.dqb_ihardlimit && - !dquot->dq_dqb.dqb_isoftlimit) - set_bit(DQ_FAKE_B, &dquot->dq_flags); - } - dqstats.reads++; - - return ret; -} - -/* Check whether dquot should not be deleted. We know we are - * the only one operating on dquot (thanks to dq_lock) */ static int v2_release_dquot(struct dquot *dquot) { - if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) - return v2_delete_dquot(dquot); - return 0; + return qtree_release_dquot(&sb_dqinfo(dquot->dq_sb, dquot->dq_type)->u.v2_i.i, dquot); } static struct quota_format_ops v2_format_ops = { diff --git a/fs/quotaio_v2.h b/fs/quotaio_v2.h index 303d7cbe30d4..530fe580685c 100644 --- a/fs/quotaio_v2.h +++ b/fs/quotaio_v2.h @@ -21,6 +21,12 @@ 0 /* GRPQUOTA */\ } +/* First generic header */ +struct v2_disk_dqheader { + __le32 dqh_magic; /* Magic number identifying file */ + __le32 dqh_version; /* File version */ +}; + /* * The following structure defines the format of the disk quota file * (as it appears on disk) - the file is a radix tree whose leaves point @@ -38,15 +44,6 @@ struct v2_disk_dqblk { __le64 dqb_itime; /* time limit for excessive inode use */ }; -/* - * Here are header structures as written on disk and their in-memory copies - */ -/* First generic header */ -struct v2_disk_dqheader { - __le32 dqh_magic; /* Magic number identifying file */ - __le32 dqh_version; /* File version */ -}; - /* Header with type and version specific information */ struct v2_disk_dqinfo { __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */ @@ -57,23 +54,7 @@ struct v2_disk_dqinfo { __le32 dqi_free_entry; /* Number of block with at least one free entry */ }; -/* - * Structure of header of block with quota structures. It is padded to 16 bytes so - * there will be space for exactly 21 quota-entries in a block - */ -struct v2_disk_dqdbheader { - __le32 dqdh_next_free; /* Number of next block with free entry */ - __le32 dqdh_prev_free; /* Number of previous block with free entry */ - __le16 dqdh_entries; /* Number of valid entries in block */ - __le16 dqdh_pad1; - __le32 dqdh_pad2; -}; - #define V2_DQINFOOFF sizeof(struct v2_disk_dqheader) /* Offset of info header in file */ -#define V2_DQBLKSIZE_BITS 10 -#define V2_DQBLKSIZE (1 << V2_DQBLKSIZE_BITS) /* Size of block with quota structures */ -#define V2_DQTREEOFF 1 /* Offset of tree in file in blocks */ -#define V2_DQTREEDEPTH 4 /* Depth of quota tree */ -#define V2_DQSTRINBLK ((V2_DQBLKSIZE - sizeof(struct v2_disk_dqdbheader)) / sizeof(struct v2_disk_dqblk)) /* Number of entries in one blocks */ +#define V2_DQBLKSIZE_BITS 10 /* Size of leaf block in tree */ #endif /* _LINUX_QUOTAIO_V2_H */ -- cgit From e3d4d56b9715e40ded2a84d0d4fa7f3b6c58983c Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 2 Oct 2008 18:44:14 +0200 Subject: quota: Convert union in mem_dqinfo to a pointer Coming quota support for OCFS2 is going to need quite a bit of additional per-sb quota information. Moreover having fs.h include all the types needed for this structure would be a pain in the a**. So remove the union from mem_dqinfo and add a private pointer for filesystem's use. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/quota_v2.c | 53 ++++++++++++++++++++++++++++++++++------------------- 1 file changed, 34 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/quota_v2.c b/fs/quota_v2.c index a87f1028a425..b618b563635c 100644 --- a/fs/quota_v2.c +++ b/fs/quota_v2.c @@ -71,6 +71,7 @@ static int v2_read_file_info(struct super_block *sb, int type) { struct v2_disk_dqinfo dinfo; struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct qtree_mem_dqinfo *qinfo; ssize_t size; size = sb->s_op->quota_read(sb, type, (char *)&dinfo, @@ -80,22 +81,29 @@ static int v2_read_file_info(struct super_block *sb, int type) sb->s_id); return -1; } + info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS); + if (!info->dqi_priv) { + printk(KERN_WARNING + "Not enough memory for quota information structure.\n"); + return -1; + } + qinfo = info->dqi_priv; /* limits are stored as unsigned 32-bit data */ info->dqi_maxblimit = 0xffffffff; info->dqi_maxilimit = 0xffffffff; info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); info->dqi_flags = le32_to_cpu(dinfo.dqi_flags); - info->u.v2_i.i.dqi_sb = sb; - info->u.v2_i.i.dqi_type = type; - info->u.v2_i.i.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); - info->u.v2_i.i.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); - info->u.v2_i.i.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); - info->u.v2_i.i.dqi_blocksize_bits = V2_DQBLKSIZE_BITS; - info->u.v2_i.i.dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS; - info->u.v2_i.i.dqi_qtree_depth = qtree_depth(&info->u.v2_i.i); - info->u.v2_i.i.dqi_entry_size = sizeof(struct v2_disk_dqblk); - info->u.v2_i.i.dqi_ops = &v2_qtree_ops; + qinfo->dqi_sb = sb; + qinfo->dqi_type = type; + qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); + qinfo->dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); + qinfo->dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); + qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS; + qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS; + qinfo->dqi_qtree_depth = qtree_depth(qinfo); + qinfo->dqi_entry_size = sizeof(struct v2_disk_dqblk); + qinfo->dqi_ops = &v2_qtree_ops; return 0; } @@ -104,6 +112,7 @@ static int v2_write_file_info(struct super_block *sb, int type) { struct v2_disk_dqinfo dinfo; struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct qtree_mem_dqinfo *qinfo = info->dqi_priv; ssize_t size; spin_lock(&dq_data_lock); @@ -112,9 +121,9 @@ static int v2_write_file_info(struct super_block *sb, int type) dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace); dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK); spin_unlock(&dq_data_lock); - dinfo.dqi_blocks = cpu_to_le32(info->u.v2_i.i.dqi_blocks); - dinfo.dqi_free_blk = cpu_to_le32(info->u.v2_i.i.dqi_free_blk); - dinfo.dqi_free_entry = cpu_to_le32(info->u.v2_i.i.dqi_free_entry); + dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks); + dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk); + dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry); size = sb->s_op->quota_write(sb, type, (char *)&dinfo, sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); if (size != sizeof(struct v2_disk_dqinfo)) { @@ -150,7 +159,7 @@ static void v2_mem2diskdqb(void *dp, struct dquot *dquot) struct v2_disk_dqblk *d = dp; struct mem_dqblk *m = &dquot->dq_dqb; struct qtree_mem_dqinfo *info = - &sb_dqinfo(dquot->dq_sb, dquot->dq_type)->u.v2_i.i; + sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit); d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit); @@ -169,7 +178,7 @@ static int v2_is_id(void *dp, struct dquot *dquot) { struct v2_disk_dqblk *d = dp; struct qtree_mem_dqinfo *info = - &sb_dqinfo(dquot->dq_sb, dquot->dq_type)->u.v2_i.i; + sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; if (qtree_entry_unused(info, dp)) return 0; @@ -178,24 +187,30 @@ static int v2_is_id(void *dp, struct dquot *dquot) static int v2_read_dquot(struct dquot *dquot) { - return qtree_read_dquot(&sb_dqinfo(dquot->dq_sb, dquot->dq_type)->u.v2_i.i, dquot); + return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); } static int v2_write_dquot(struct dquot *dquot) { - return qtree_write_dquot(&sb_dqinfo(dquot->dq_sb, dquot->dq_type)->u.v2_i.i, dquot); + return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); } static int v2_release_dquot(struct dquot *dquot) { - return qtree_release_dquot(&sb_dqinfo(dquot->dq_sb, dquot->dq_type)->u.v2_i.i, dquot); + return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); +} + +static int v2_free_file_info(struct super_block *sb, int type) +{ + kfree(sb_dqinfo(sb, type)->dqi_priv); + return 0; } static struct quota_format_ops v2_format_ops = { .check_quota_file = v2_check_quota_file, .read_file_info = v2_read_file_info, .write_file_info = v2_write_file_info, - .free_file_info = NULL, + .free_file_info = v2_free_file_info, .read_dqblk = v2_read_dquot, .commit_dqblk = v2_write_dquot, .release_dqblk = v2_release_dquot, -- cgit From db49d2df489f727096438706a5428115e84a3f0d Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 1 Oct 2008 18:21:39 +0200 Subject: quota: Allow negative usage of space and inodes For clustered filesystems, it can happen that space / inode usage goes negative temporarily (because some node is allocating another node is freeing and they are not completely in sync). So let quota code allow this and change qsize_t so a signed type so that we don't underflow the variables. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/dquot.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/dquot.c b/fs/dquot.c index 74185c34a4f0..9c78ffe1aad2 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -847,7 +847,8 @@ static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) { - if (dquot->dq_dqb.dqb_curinodes > number) + if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || + dquot->dq_dqb.dqb_curinodes >= number) dquot->dq_dqb.dqb_curinodes -= number; else dquot->dq_dqb.dqb_curinodes = 0; @@ -858,7 +859,8 @@ static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) { - if (dquot->dq_dqb.dqb_curspace > number) + if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || + dquot->dq_dqb.dqb_curspace >= number) dquot->dq_dqb.dqb_curspace -= number; else dquot->dq_dqb.dqb_curspace = 0; -- cgit From 4d59bce4f9eaf26d6d9046b56a2f1c0c7f20981d Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 2 Oct 2008 16:48:10 +0200 Subject: quota: Keep which entries were set by SETQUOTA quotactl Quota in a clustered environment needs to synchronize quota information among cluster nodes. This means we have to occasionally update some information in dquot from disk / network. On the other hand we have to be careful not to overwrite changes administrator did via SETQUOTA. So indicate in dquot->dq_flags which entries have been set by SETQUOTA and quota format can clear these flags when it properly propagated the changes. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/dquot.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/dquot.c b/fs/dquot.c index 9c78ffe1aad2..89226726daa8 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -2010,25 +2010,33 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) if (di->dqb_valid & QIF_SPACE) { dm->dqb_curspace = di->dqb_curspace; check_blim = 1; + __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); } if (di->dqb_valid & QIF_BLIMITS) { dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); check_blim = 1; + __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); } if (di->dqb_valid & QIF_INODES) { dm->dqb_curinodes = di->dqb_curinodes; check_ilim = 1; + __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); } if (di->dqb_valid & QIF_ILIMITS) { dm->dqb_isoftlimit = di->dqb_isoftlimit; dm->dqb_ihardlimit = di->dqb_ihardlimit; check_ilim = 1; + __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); } - if (di->dqb_valid & QIF_BTIME) + if (di->dqb_valid & QIF_BTIME) { dm->dqb_btime = di->dqb_btime; - if (di->dqb_valid & QIF_ITIME) + __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); + } + if (di->dqb_valid & QIF_ITIME) { dm->dqb_itime = di->dqb_itime; + __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); + } if (check_blim) { if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) { -- cgit From 3d9ea253a0e73dccaa869888ec2ceb17ea76c810 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 10 Oct 2008 16:12:23 +0200 Subject: quota: Add helpers to allow ocfs2 specific quota initialization, freeing and recovery OCFS2 needs to peek whether quota structure is already in memory so that it can avoid expensive cluster locking in that case. Similarly when freeing dquots, it checks whether it is the last quota structure user or not. Finally, it needs to get reference to dquot structure for specified id and quota type when recovering quota file after crash. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/dquot.c | 38 ++++++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/dquot.c b/fs/dquot.c index 89226726daa8..ae8fd9e645cc 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -211,8 +211,6 @@ static struct hlist_head *dquot_hash; struct dqstats dqstats; -static void dqput(struct dquot *dquot); - static inline unsigned int hashfn(const struct super_block *sb, unsigned int id, int type) { @@ -568,7 +566,7 @@ static struct shrinker dqcache_shrinker = { * NOTE: If you change this function please check whether dqput_blocks() works right... * MUST be called with either dqptr_sem or dqonoff_mutex held */ -static void dqput(struct dquot *dquot) +void dqput(struct dquot *dquot) { int ret; @@ -661,11 +659,29 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) return dquot; } +/* + * Check whether dquot is in memory. + * MUST be called with either dqptr_sem or dqonoff_mutex held + */ +int dquot_is_cached(struct super_block *sb, unsigned int id, int type) +{ + unsigned int hashent = hashfn(sb, id, type); + int ret = 0; + + if (!sb_has_quota_active(sb, type)) + return 0; + spin_lock(&dq_list_lock); + if (find_dquot(hashent, sb, id, type) != NODQUOT) + ret = 1; + spin_unlock(&dq_list_lock); + return ret; +} + /* * Get reference to dquot * MUST be called with either dqptr_sem or dqonoff_mutex held */ -static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) +struct dquot *dqget(struct super_block *sb, unsigned int id, int type) { unsigned int hashent = hashfn(sb, id, type); struct dquot *dquot, *empty = NODQUOT; @@ -1184,17 +1200,23 @@ out_err: * Release all quotas referenced by inode * Transaction must be started at an entry */ -int dquot_drop(struct inode *inode) +int dquot_drop_locked(struct inode *inode) { int cnt; - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (inode->i_dquot[cnt] != NODQUOT) { dqput(inode->i_dquot[cnt]); inode->i_dquot[cnt] = NODQUOT; } } + return 0; +} + +int dquot_drop(struct inode *inode) +{ + down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + dquot_drop_locked(inode); up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); return 0; } @@ -2308,7 +2330,11 @@ EXPORT_SYMBOL(dquot_release); EXPORT_SYMBOL(dquot_mark_dquot_dirty); EXPORT_SYMBOL(dquot_initialize); EXPORT_SYMBOL(dquot_drop); +EXPORT_SYMBOL(dquot_drop_locked); EXPORT_SYMBOL(vfs_dq_drop); +EXPORT_SYMBOL(dqget); +EXPORT_SYMBOL(dqput); +EXPORT_SYMBOL(dquot_is_cached); EXPORT_SYMBOL(dquot_alloc_space); EXPORT_SYMBOL(dquot_alloc_inode); EXPORT_SYMBOL(dquot_free_space); -- cgit From 12c77527e4138bc3b17d17b0e0c909e4fc84924f Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 20 Oct 2008 17:05:00 +0200 Subject: quota: Implement function for scanning active dquots OCFS2 needs to scan all active dquots once in a while and sync quota information among cluster nodes. Provide a helper function for it so that it does not have to reimplement internally a list which VFS already has. Moreover this function is probably going to be useful for other clustered filesystems if they decide to use VFS quotas. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/dquot.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'fs') diff --git a/fs/dquot.c b/fs/dquot.c index ae8fd9e645cc..075dc76904e7 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -476,6 +476,41 @@ restart: spin_unlock(&dq_list_lock); } +/* Call callback for every active dquot on given filesystem */ +int dquot_scan_active(struct super_block *sb, + int (*fn)(struct dquot *dquot, unsigned long priv), + unsigned long priv) +{ + struct dquot *dquot, *old_dquot = NULL; + int ret = 0; + + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + spin_lock(&dq_list_lock); + list_for_each_entry(dquot, &inuse_list, dq_inuse) { + if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) + continue; + if (dquot->dq_sb != sb) + continue; + /* Now we have active dquot so we can just increase use count */ + atomic_inc(&dquot->dq_count); + dqstats.lookups++; + spin_unlock(&dq_list_lock); + dqput(old_dquot); + old_dquot = dquot; + ret = fn(dquot, priv); + if (ret < 0) + goto out; + spin_lock(&dq_list_lock); + /* We are safe to continue now because our dquot could not + * be moved out of the inuse list while we hold the reference */ + } + spin_unlock(&dq_list_lock); +out: + dqput(old_dquot); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + return ret; +} + int vfs_quota_sync(struct super_block *sb, int type) { struct list_head *dirty; @@ -2318,6 +2353,7 @@ EXPORT_SYMBOL(vfs_quota_on_path); EXPORT_SYMBOL(vfs_quota_on_mount); EXPORT_SYMBOL(vfs_quota_disable); EXPORT_SYMBOL(vfs_quota_off); +EXPORT_SYMBOL(dquot_scan_active); EXPORT_SYMBOL(vfs_quota_sync); EXPORT_SYMBOL(vfs_get_dqinfo); EXPORT_SYMBOL(vfs_set_dqinfo); -- cgit From 90e86a63eadf1a3b2f19b68d82150dc63fe01443 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 27 Aug 2008 22:30:28 +0200 Subject: ocfs2: Support nested transactions OCFS2 can easily support nested transactions. We just have to take care and not spoil statistics acquire semaphore unnecessarily. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ocfs2/journal.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 12b62a3cbf69..11a1178d5ee8 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -256,11 +256,9 @@ handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs) BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE); BUG_ON(max_buffs <= 0); - /* JBD might support this, but our journalling code doesn't yet. */ - if (journal_current_handle()) { - mlog(ML_ERROR, "Recursive transaction attempted!\n"); - BUG(); - } + /* Nested transaction? Just return the handle... */ + if (journal_current_handle()) + return jbd2_journal_start(journal, max_buffs); down_read(&osb->journal->j_trans_barrier); @@ -285,16 +283,18 @@ handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs) int ocfs2_commit_trans(struct ocfs2_super *osb, handle_t *handle) { - int ret; + int ret, nested; struct ocfs2_journal *journal = osb->journal; BUG_ON(!handle); + nested = handle->h_ref > 1; ret = jbd2_journal_stop(handle); if (ret < 0) mlog_errno(ret); - up_read(&journal->j_trans_barrier); + if (!nested) + up_read(&journal->j_trans_barrier); return ret; } -- cgit From 1a224ad11eeb190da4a123e156601aad1bb67f24 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Aug 2008 15:43:36 +0200 Subject: ocfs2: Assign feature bits and system inodes to quota feature and quota files Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/Kconfig | 2 ++ fs/ocfs2/inode.c | 2 ++ fs/ocfs2/ocfs2_fs.h | 21 ++++++++++++++++++--- fs/ocfs2/super.c | 17 +++++++++++++++++ 4 files changed, 39 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/Kconfig b/fs/Kconfig index c1ce3d8831d8..f9b6e2979aaa 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -189,6 +189,8 @@ config OCFS2_FS select CONFIGFS_FS select JBD2 select CRC32 + select QUOTA + select QUOTA_TREE help OCFS2 is a general purpose extent based shared disk cluster file system with many similarities to ext3. It supports 64 bit inode diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index ec3497bafda6..ec25d9984192 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -283,6 +283,8 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, mlog(0, "local alloc inode: i_ino=%lu\n", inode->i_ino); } else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) { OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP; + } else if (fe->i_flags & cpu_to_le32(OCFS2_QUOTA_FL)) { + inode->i_flags |= S_NOQUOTA; } else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) { mlog(0, "superblock inode: i_ino=%lu\n", inode->i_ino); /* we can't actually hit this as read_inode can't diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 5e0c0d0aef7d..06e3bd632ff3 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -94,7 +94,7 @@ | OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP \ | OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK \ | OCFS2_FEATURE_INCOMPAT_XATTR) -#define OCFS2_FEATURE_RO_COMPAT_SUPP OCFS2_FEATURE_RO_COMPAT_UNWRITTEN +#define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN) /* * Heartbeat-only devices are missing journals and other files. The @@ -163,6 +163,12 @@ */ #define OCFS2_FEATURE_RO_COMPAT_UNWRITTEN 0x0001 +/* + * Maintain quota information for this filesystem + */ +#define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002 +#define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004 + /* The byte offset of the first backup block will be 1G. * The following will be 4G, 16G, 64G, 256G and 1T. */ @@ -192,6 +198,7 @@ #define OCFS2_HEARTBEAT_FL (0x00000200) /* Heartbeat area */ #define OCFS2_CHAIN_FL (0x00000400) /* Chain allocator */ #define OCFS2_DEALLOC_FL (0x00000800) /* Truncate log */ +#define OCFS2_QUOTA_FL (0x00001000) /* Quota file */ /* * Flags on ocfs2_dinode.i_dyn_features @@ -329,13 +336,17 @@ enum { #define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE HEARTBEAT_SYSTEM_INODE, GLOBAL_BITMAP_SYSTEM_INODE, -#define OCFS2_LAST_GLOBAL_SYSTEM_INODE GLOBAL_BITMAP_SYSTEM_INODE + USER_QUOTA_SYSTEM_INODE, + GROUP_QUOTA_SYSTEM_INODE, +#define OCFS2_LAST_GLOBAL_SYSTEM_INODE GROUP_QUOTA_SYSTEM_INODE ORPHAN_DIR_SYSTEM_INODE, EXTENT_ALLOC_SYSTEM_INODE, INODE_ALLOC_SYSTEM_INODE, JOURNAL_SYSTEM_INODE, LOCAL_ALLOC_SYSTEM_INODE, TRUNCATE_LOG_SYSTEM_INODE, + LOCAL_USER_QUOTA_SYSTEM_INODE, + LOCAL_GROUP_QUOTA_SYSTEM_INODE, NUM_SYSTEM_INODES }; @@ -349,6 +360,8 @@ static struct ocfs2_system_inode_info ocfs2_system_inodes[NUM_SYSTEM_INODES] = { [SLOT_MAP_SYSTEM_INODE] = { "slot_map", 0, S_IFREG | 0644 }, [HEARTBEAT_SYSTEM_INODE] = { "heartbeat", OCFS2_HEARTBEAT_FL, S_IFREG | 0644 }, [GLOBAL_BITMAP_SYSTEM_INODE] = { "global_bitmap", 0, S_IFREG | 0644 }, + [USER_QUOTA_SYSTEM_INODE] = { "aquota.user", OCFS2_QUOTA_FL, S_IFREG | 0644 }, + [GROUP_QUOTA_SYSTEM_INODE] = { "aquota.group", OCFS2_QUOTA_FL, S_IFREG | 0644 }, /* Slot-specific system inodes (one copy per slot) */ [ORPHAN_DIR_SYSTEM_INODE] = { "orphan_dir:%04d", 0, S_IFDIR | 0755 }, @@ -356,7 +369,9 @@ static struct ocfs2_system_inode_info ocfs2_system_inodes[NUM_SYSTEM_INODES] = { [INODE_ALLOC_SYSTEM_INODE] = { "inode_alloc:%04d", OCFS2_BITMAP_FL | OCFS2_CHAIN_FL, S_IFREG | 0644 }, [JOURNAL_SYSTEM_INODE] = { "journal:%04d", OCFS2_JOURNAL_FL, S_IFREG | 0644 }, [LOCAL_ALLOC_SYSTEM_INODE] = { "local_alloc:%04d", OCFS2_BITMAP_FL | OCFS2_LOCAL_ALLOC_FL, S_IFREG | 0644 }, - [TRUNCATE_LOG_SYSTEM_INODE] = { "truncate_log:%04d", OCFS2_DEALLOC_FL, S_IFREG | 0644 } + [TRUNCATE_LOG_SYSTEM_INODE] = { "truncate_log:%04d", OCFS2_DEALLOC_FL, S_IFREG | 0644 }, + [LOCAL_USER_QUOTA_SYSTEM_INODE] = { "aquota.user:%04d", OCFS2_QUOTA_FL, S_IFREG | 0644 }, + [LOCAL_GROUP_QUOTA_SYSTEM_INODE] = { "aquota.group:%04d", OCFS2_QUOTA_FL, S_IFREG | 0644 }, }; /* Parameter passed from mount.ocfs2 to module */ diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 9e7accc68b4b..41bb0197cf4c 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -225,6 +225,19 @@ static int ocfs2_sync_fs(struct super_block *sb, int wait) return 0; } +static int ocfs2_need_system_inode(struct ocfs2_super *osb, int ino) +{ + if (!OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_RO_COMPAT_USRQUOTA) + && (ino == USER_QUOTA_SYSTEM_INODE + || ino == LOCAL_USER_QUOTA_SYSTEM_INODE)) + return 0; + if (!OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_RO_COMPAT_GRPQUOTA) + && (ino == GROUP_QUOTA_SYSTEM_INODE + || ino == LOCAL_GROUP_QUOTA_SYSTEM_INODE)) + return 0; + return 1; +} + static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb) { struct inode *new = NULL; @@ -251,6 +264,8 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb) for (i = OCFS2_FIRST_ONLINE_SYSTEM_INODE; i <= OCFS2_LAST_GLOBAL_SYSTEM_INODE; i++) { + if (!ocfs2_need_system_inode(osb, i)) + continue; new = ocfs2_get_system_file_inode(osb, i, osb->slot_num); if (!new) { ocfs2_release_system_inodes(osb); @@ -281,6 +296,8 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb) for (i = OCFS2_LAST_GLOBAL_SYSTEM_INODE + 1; i < NUM_SYSTEM_INODES; i++) { + if (!ocfs2_need_system_inode(osb, i)) + continue; new = ocfs2_get_system_file_inode(osb, i, osb->slot_num); if (!new) { ocfs2_release_system_inodes(osb); -- cgit From bbbd0eb34bf801dee01e345785959a75258f6567 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 21 Aug 2008 18:22:30 +0200 Subject: ocfs2: Mark system files as not subject to quota accounting Mark system files as not subject to quota accounting. This prevents possible recursions into quota code and thus deadlocks. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ocfs2/inode.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index ec25d9984192..50dbc486ef71 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -275,8 +275,10 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, inode->i_nlink = le16_to_cpu(fe->i_links_count); - if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) + if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) { OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE; + inode->i_flags |= S_NOQUOTA; + } if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) { OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP; -- cgit From 9e33d69f553aaf11377307e8d6f82deb3385e351 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 25 Aug 2008 19:56:50 +0200 Subject: ocfs2: Implementation of local and global quota file handling For each quota type each node has local quota file. In this file it stores changes users have made to disk usage via this node. Once in a while this information is synced to global file (and thus with other nodes) so that limits enforcement at least aproximately works. Global quota files contain all the information about usage and limits. It's mostly handled by the generic VFS code (which implements a trie of structures inside a quota file). We only have to provide functions to convert structures from on-disk format to in-memory one. We also have to provide wrappers for various quota functions starting transactions and acquiring necessary cluster locks before the actual IO is really started. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ocfs2/Makefile | 2 + fs/ocfs2/cluster/masklog.h | 1 + fs/ocfs2/dlmglue.c | 146 +++++++ fs/ocfs2/dlmglue.h | 19 + fs/ocfs2/file.c | 6 +- fs/ocfs2/file.h | 3 + fs/ocfs2/inode.h | 2 + fs/ocfs2/ocfs2_fs.h | 103 +++++ fs/ocfs2/ocfs2_lockid.h | 5 + fs/ocfs2/quota.h | 93 +++++ fs/ocfs2/quota_global.c | 919 +++++++++++++++++++++++++++++++++++++++++++++ fs/ocfs2/quota_local.c | 833 ++++++++++++++++++++++++++++++++++++++++ fs/ocfs2/super.c | 38 +- 13 files changed, 2165 insertions(+), 5 deletions(-) create mode 100644 fs/ocfs2/quota.h create mode 100644 fs/ocfs2/quota_global.c create mode 100644 fs/ocfs2/quota_local.c (limited to 'fs') diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile index e9ef5d162db1..7e4b361b755c 100644 --- a/fs/ocfs2/Makefile +++ b/fs/ocfs2/Makefile @@ -35,6 +35,8 @@ ocfs2-objs := \ sysfile.o \ uptodate.o \ ver.o \ + quota_local.o \ + quota_global.o \ xattr.o ifeq ($(CONFIG_OCFS2_FS_POSIX_ACL),y) diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h index 57670c680471..7e72a81bc2d4 100644 --- a/fs/ocfs2/cluster/masklog.h +++ b/fs/ocfs2/cluster/masklog.h @@ -113,6 +113,7 @@ #define ML_QUORUM 0x0000000008000000ULL /* net connection quorum */ #define ML_EXPORT 0x0000000010000000ULL /* ocfs2 export operations */ #define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */ +#define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */ /* bits that are infrequently given and frequently matched in the high word */ #define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */ #define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */ diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 9f2a7f75d1b3..058aa86490ae 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -32,6 +32,7 @@ #include #include #include +#include #define MLOG_MASK_PREFIX ML_DLM_GLUE #include @@ -51,6 +52,7 @@ #include "slot_map.h" #include "super.h" #include "uptodate.h" +#include "quota.h" #include "buffer_head_io.h" @@ -68,6 +70,7 @@ struct ocfs2_mask_waiter { static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres); static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres); static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres); +static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres); /* * Return value from ->downconvert_worker functions. @@ -102,6 +105,7 @@ static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres, static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres); +static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres); #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres) @@ -258,6 +262,12 @@ static struct ocfs2_lock_res_ops ocfs2_flock_lops = { .flags = 0, }; +static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = { + .set_lvb = ocfs2_set_qinfo_lvb, + .get_osb = ocfs2_get_qinfo_osb, + .flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB, +}; + static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres) { return lockres->l_type == OCFS2_LOCK_TYPE_META || @@ -279,6 +289,13 @@ static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res return (struct ocfs2_dentry_lock *)lockres->l_priv; } +static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres) +{ + BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO); + + return (struct ocfs2_mem_dqinfo *)lockres->l_priv; +} + static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres) { if (lockres->l_ops->get_osb) @@ -507,6 +524,13 @@ static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres) return OCFS2_SB(inode->i_sb); } +static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres) +{ + struct ocfs2_mem_dqinfo *info = lockres->l_priv; + + return OCFS2_SB(info->dqi_gi.dqi_sb); +} + static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres) { struct ocfs2_file_private *fp = lockres->l_priv; @@ -609,6 +633,17 @@ void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres, lockres->l_flags |= OCFS2_LOCK_NOCACHE; } +void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres, + struct ocfs2_mem_dqinfo *info) +{ + ocfs2_lock_res_init_once(lockres); + ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type, + 0, lockres->l_name); + ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres, + OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops, + info); +} + void ocfs2_lock_res_free(struct ocfs2_lock_res *res) { mlog_entry_void(); @@ -3445,6 +3480,117 @@ static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres, return UNBLOCK_CONTINUE_POST; } +static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres) +{ + struct ocfs2_qinfo_lvb *lvb; + struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres); + struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb, + oinfo->dqi_gi.dqi_type); + + mlog_entry_void(); + + lvb = (struct ocfs2_qinfo_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb); + lvb->lvb_version = OCFS2_QINFO_LVB_VERSION; + lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace); + lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace); + lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms); + lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks); + lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk); + lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry); + + mlog_exit_void(); +} + +void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex) +{ + struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock; + struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb); + int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; + + mlog_entry_void(); + if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) + ocfs2_cluster_unlock(osb, lockres, level); + mlog_exit_void(); +} + +static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo) +{ + struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb, + oinfo->dqi_gi.dqi_type); + struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock; + struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb); + struct buffer_head *bh; + struct ocfs2_global_disk_dqinfo *gdinfo; + int status = 0; + + if (lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) { + info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace); + info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace); + oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms); + oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks); + oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk); + oinfo->dqi_gi.dqi_free_entry = + be32_to_cpu(lvb->lvb_free_entry); + } else { + bh = ocfs2_read_quota_block(oinfo->dqi_gqinode, 0, &status); + if (!bh) { + mlog_errno(status); + goto bail; + } + gdinfo = (struct ocfs2_global_disk_dqinfo *) + (bh->b_data + OCFS2_GLOBAL_INFO_OFF); + info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace); + info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace); + oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms); + oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks); + oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk); + oinfo->dqi_gi.dqi_free_entry = + le32_to_cpu(gdinfo->dqi_free_entry); + brelse(bh); + ocfs2_track_lock_refresh(lockres); + } + +bail: + return status; +} + +/* Lock quota info, this function expects at least shared lock on the quota file + * so that we can safely refresh quota info from disk. */ +int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex) +{ + struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock; + struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb); + int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; + int status = 0; + + mlog_entry_void(); + + /* On RO devices, locking really isn't needed... */ + if (ocfs2_is_hard_readonly(osb)) { + if (ex) + status = -EROFS; + goto bail; + } + if (ocfs2_mount_local(osb)) + goto bail; + + status = ocfs2_cluster_lock(osb, lockres, level, 0, 0); + if (status < 0) { + mlog_errno(status); + goto bail; + } + if (!ocfs2_should_refresh_lock_res(lockres)) + goto bail; + /* OK, we have the lock but we need to refresh the quota info */ + status = ocfs2_refresh_qinfo(oinfo); + if (status) + ocfs2_qinfo_unlock(oinfo, ex); + ocfs2_complete_lock_res_refresh(lockres, status); +bail: + mlog_exit(status); + return status; +} + /* * This is the filesystem locking protocol. It provides the lock handling * hooks for the underlying DLM. It has a maximum version number. diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h index 2bb01f09c1b1..3f8d9986b8e0 100644 --- a/fs/ocfs2/dlmglue.h +++ b/fs/ocfs2/dlmglue.h @@ -49,6 +49,19 @@ struct ocfs2_meta_lvb { __be32 lvb_reserved2; }; +#define OCFS2_QINFO_LVB_VERSION 1 + +struct ocfs2_qinfo_lvb { + __u8 lvb_version; + __u8 lvb_reserved[3]; + __be32 lvb_bgrace; + __be32 lvb_igrace; + __be32 lvb_syncms; + __be32 lvb_blocks; + __be32 lvb_free_blk; + __be32 lvb_free_entry; +}; + /* ocfs2_inode_lock_full() 'arg_flags' flags */ /* don't wait on recovery. */ #define OCFS2_META_LOCK_RECOVERY (0x01) @@ -69,6 +82,9 @@ void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl, struct ocfs2_file_private; void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres, struct ocfs2_file_private *fp); +struct ocfs2_mem_dqinfo; +void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres, + struct ocfs2_mem_dqinfo *info); void ocfs2_lock_res_free(struct ocfs2_lock_res *res); int ocfs2_create_new_inode_locks(struct inode *inode); int ocfs2_drop_inode_locks(struct inode *inode); @@ -103,6 +119,9 @@ int ocfs2_dentry_lock(struct dentry *dentry, int ex); void ocfs2_dentry_unlock(struct dentry *dentry, int ex); int ocfs2_file_lock(struct file *file, int ex, int trylock); void ocfs2_file_unlock(struct file *file); +int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex); +void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex); + void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres); void ocfs2_simple_drop_lockres(struct ocfs2_super *osb, diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 41001d515fae..372d96505a79 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -304,9 +304,9 @@ bail: return status; } -static int ocfs2_simple_size_update(struct inode *inode, - struct buffer_head *di_bh, - u64 new_i_size) +int ocfs2_simple_size_update(struct inode *inode, + struct buffer_head *di_bh, + u64 new_i_size) { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h index e92382cbca5f..172f9fbc9fc7 100644 --- a/fs/ocfs2/file.h +++ b/fs/ocfs2/file.h @@ -51,6 +51,9 @@ int ocfs2_add_inode_data(struct ocfs2_super *osb, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, enum ocfs2_alloc_restarted *reason_ret); +int ocfs2_simple_size_update(struct inode *inode, + struct buffer_head *di_bh, + u64 new_i_size); int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to); int ocfs2_setattr(struct dentry *dentry, struct iattr *attr); diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index b79c371a9d27..eb3c302b38d3 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h @@ -142,6 +142,8 @@ int ocfs2_mark_inode_dirty(handle_t *handle, struct buffer_head *bh); int ocfs2_aio_read(struct file *file, struct kiocb *req, struct iocb *iocb); int ocfs2_aio_write(struct file *file, struct kiocb *req, struct iocb *iocb); +struct buffer_head *ocfs2_bread(struct inode *inode, + int block, int *err, int reada); void ocfs2_set_inode_flags(struct inode *inode); void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi); diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 06e3bd632ff3..0a5ac790a628 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -883,6 +883,109 @@ static inline int ocfs2_xattr_get_type(struct ocfs2_xattr_entry *xe) return xe->xe_type & OCFS2_XATTR_TYPE_MASK; } +/* + * On disk structures for global quota file + */ + +/* Magic numbers and known versions for global quota files */ +#define OCFS2_GLOBAL_QMAGICS {\ + 0x0cf52470, /* USRQUOTA */ \ + 0x0cf52471 /* GRPQUOTA */ \ +} + +#define OCFS2_GLOBAL_QVERSIONS {\ + 0, \ + 0, \ +} + + +/* Each block of each quota file has a certain fixed number of bytes reserved + * for OCFS2 internal use at its end. OCFS2 can use it for things like + * checksums, etc. */ +#define OCFS2_QBLK_RESERVED_SPACE 8 + +/* Generic header of all quota files */ +struct ocfs2_disk_dqheader { + __le32 dqh_magic; /* Magic number identifying file */ + __le32 dqh_version; /* Quota format version */ +}; + +#define OCFS2_GLOBAL_INFO_OFF (sizeof(struct ocfs2_disk_dqheader)) + +/* Information header of global quota file (immediately follows the generic + * header) */ +struct ocfs2_global_disk_dqinfo { +/*00*/ __le32 dqi_bgrace; /* Grace time for space softlimit excess */ + __le32 dqi_igrace; /* Grace time for inode softlimit excess */ + __le32 dqi_syncms; /* Time after which we sync local changes to + * global quota file */ + __le32 dqi_blocks; /* Number of blocks in quota file */ +/*10*/ __le32 dqi_free_blk; /* First free block in quota file */ + __le32 dqi_free_entry; /* First block with free dquot entry in quota + * file */ +}; + +/* Structure with global user / group information. We reserve some space + * for future use. */ +struct ocfs2_global_disk_dqblk { +/*00*/ __le32 dqb_id; /* ID the structure belongs to */ + __le32 dqb_use_count; /* Number of nodes having reference to this structure */ + __le64 dqb_ihardlimit; /* absolute limit on allocated inodes */ +/*10*/ __le64 dqb_isoftlimit; /* preferred inode limit */ + __le64 dqb_curinodes; /* current # allocated inodes */ +/*20*/ __le64 dqb_bhardlimit; /* absolute limit on disk space */ + __le64 dqb_bsoftlimit; /* preferred limit on disk space */ +/*30*/ __le64 dqb_curspace; /* current space occupied */ + __le64 dqb_btime; /* time limit for excessive disk use */ +/*40*/ __le64 dqb_itime; /* time limit for excessive inode use */ + __le64 dqb_pad1; +/*50*/ __le64 dqb_pad2; +}; + +/* + * On-disk structures for local quota file + */ + +/* Magic numbers and known versions for local quota files */ +#define OCFS2_LOCAL_QMAGICS {\ + 0x0cf524c0, /* USRQUOTA */ \ + 0x0cf524c1 /* GRPQUOTA */ \ +} + +#define OCFS2_LOCAL_QVERSIONS {\ + 0, \ + 0, \ +} + +/* Quota flags in dqinfo header */ +#define OLQF_CLEAN 0x0001 /* Quota file is empty (this should be after\ + * quota has been cleanly turned off) */ + +#define OCFS2_LOCAL_INFO_OFF (sizeof(struct ocfs2_disk_dqheader)) + +/* Information header of local quota file (immediately follows the generic + * header) */ +struct ocfs2_local_disk_dqinfo { + __le32 dqi_flags; /* Flags for quota file */ + __le32 dqi_chunks; /* Number of chunks of quota structures + * with a bitmap */ + __le32 dqi_blocks; /* Number of blocks allocated for quota file */ +}; + +/* Header of one chunk of a quota file */ +struct ocfs2_local_disk_chunk { + __le32 dqc_free; /* Number of free entries in the bitmap */ + u8 dqc_bitmap[0]; /* Bitmap of entries in the corresponding + * chunk of quota file */ +}; + +/* One entry in local quota file */ +struct ocfs2_local_disk_dqblk { +/*00*/ __le64 dqb_id; /* id this quota applies to */ + __le64 dqb_spacemod; /* Change in the amount of used space */ +/*10*/ __le64 dqb_inodemod; /* Change in the amount of used inodes */ +}; + #ifdef __KERNEL__ static inline int ocfs2_fast_symlink_chars(struct super_block *sb) { diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h index 82c200f7a8f1..eb6f50c9ceca 100644 --- a/fs/ocfs2/ocfs2_lockid.h +++ b/fs/ocfs2/ocfs2_lockid.h @@ -46,6 +46,7 @@ enum ocfs2_lock_type { OCFS2_LOCK_TYPE_DENTRY, OCFS2_LOCK_TYPE_OPEN, OCFS2_LOCK_TYPE_FLOCK, + OCFS2_LOCK_TYPE_QINFO, OCFS2_NUM_LOCK_TYPES }; @@ -77,6 +78,9 @@ static inline char ocfs2_lock_type_char(enum ocfs2_lock_type type) case OCFS2_LOCK_TYPE_FLOCK: c = 'F'; break; + case OCFS2_LOCK_TYPE_QINFO: + c = 'Q'; + break; default: c = '\0'; } @@ -95,6 +99,7 @@ static char *ocfs2_lock_type_strings[] = { [OCFS2_LOCK_TYPE_DENTRY] = "Dentry", [OCFS2_LOCK_TYPE_OPEN] = "Open", [OCFS2_LOCK_TYPE_FLOCK] = "Flock", + [OCFS2_LOCK_TYPE_QINFO] = "Quota", }; static inline const char *ocfs2_lock_type_string(enum ocfs2_lock_type type) diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h new file mode 100644 index 000000000000..1f1c86311b32 --- /dev/null +++ b/fs/ocfs2/quota.h @@ -0,0 +1,93 @@ +/* + * quota.h for OCFS2 + * + * On disk quota structures for local and global quota file, in-memory + * structures. + * + */ + +#ifndef _OCFS2_QUOTA_H +#define _OCFS2_QUOTA_H + +#include +#include +#include +#include +#include + +#include "ocfs2.h" + +/* Common stuff */ +/* id number of quota format */ +#define QFMT_OCFS2 3 + +/* + * In-memory structures + */ +struct ocfs2_dquot { + struct dquot dq_dquot; /* Generic VFS dquot */ + loff_t dq_local_off; /* Offset in the local quota file */ + struct ocfs2_quota_chunk *dq_chunk; /* Chunk dquot is in */ + unsigned int dq_use_count; /* Number of nodes having reference to this entry in global quota file */ + s64 dq_origspace; /* Last globally synced space usage */ + s64 dq_originodes; /* Last globally synced inode usage */ +}; + +/* In-memory structure with quota header information */ +struct ocfs2_mem_dqinfo { + unsigned int dqi_type; /* Quota type this structure describes */ + unsigned int dqi_chunks; /* Number of chunks in local quota file */ + unsigned int dqi_blocks; /* Number of blocks allocated for local quota file */ + unsigned int dqi_syncms; /* How often should we sync with other nodes */ + struct list_head dqi_chunk; /* List of chunks */ + struct inode *dqi_gqinode; /* Global quota file inode */ + struct ocfs2_lock_res dqi_gqlock; /* Lock protecting quota information structure */ + struct buffer_head *dqi_gqi_bh; /* Buffer head with global quota file inode - set only if inode lock is obtained */ + int dqi_gqi_count; /* Number of holders of dqi_gqi_bh */ + struct buffer_head *dqi_lqi_bh; /* Buffer head with local quota file inode */ + struct buffer_head *dqi_ibh; /* Buffer with information header */ + struct qtree_mem_dqinfo dqi_gi; /* Info about global file */ +}; + +static inline struct ocfs2_dquot *OCFS2_DQUOT(struct dquot *dquot) +{ + return container_of(dquot, struct ocfs2_dquot, dq_dquot); +} + +struct ocfs2_quota_chunk { + struct list_head qc_chunk; /* List of quotafile chunks */ + int qc_num; /* Number of quota chunk */ + struct buffer_head *qc_headerbh; /* Buffer head with chunk header */ +}; + +extern struct kmem_cache *ocfs2_dquot_cachep; +extern struct kmem_cache *ocfs2_qf_chunk_cachep; + +extern struct qtree_fmt_operations ocfs2_global_ops; + +ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data, + size_t len, loff_t off); +ssize_t ocfs2_quota_write(struct super_block *sb, int type, + const char *data, size_t len, loff_t off); +int ocfs2_global_read_info(struct super_block *sb, int type); +int ocfs2_global_write_info(struct super_block *sb, int type); +int ocfs2_global_read_dquot(struct dquot *dquot); +int __ocfs2_sync_dquot(struct dquot *dquot, int freeing); +static inline int ocfs2_sync_dquot(struct dquot *dquot) +{ + return __ocfs2_sync_dquot(dquot, 0); +} +static inline int ocfs2_global_release_dquot(struct dquot *dquot) +{ + return __ocfs2_sync_dquot(dquot, 1); +} + +int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex); +void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex); +struct buffer_head *ocfs2_read_quota_block(struct inode *inode, + int block, int *err); + +extern struct dquot_operations ocfs2_quota_operations; +extern struct quota_format_type ocfs2_quota_format; + +#endif /* _OCFS2_QUOTA_H */ diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c new file mode 100644 index 000000000000..af8340c45367 --- /dev/null +++ b/fs/ocfs2/quota_global.c @@ -0,0 +1,919 @@ +/* + * Implementation of operations over global quota file + */ +#include +#include +#include +#include + +#define MLOG_MASK_PREFIX ML_QUOTA +#include + +#include "ocfs2_fs.h" +#include "ocfs2.h" +#include "alloc.h" +#include "inode.h" +#include "journal.h" +#include "file.h" +#include "sysfile.h" +#include "dlmglue.h" +#include "uptodate.h" +#include "quota.h" + +static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp) +{ + struct ocfs2_global_disk_dqblk *d = dp; + struct mem_dqblk *m = &dquot->dq_dqb; + + /* Update from disk only entries not set by the admin */ + if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) { + m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit); + m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit); + } + if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags)) + m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes); + if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) { + m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit); + m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit); + } + if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags)) + m->dqb_curspace = le64_to_cpu(d->dqb_curspace); + if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags)) + m->dqb_btime = le64_to_cpu(d->dqb_btime); + if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags)) + m->dqb_itime = le64_to_cpu(d->dqb_itime); + OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count); +} + +static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot) +{ + struct ocfs2_global_disk_dqblk *d = dp; + struct mem_dqblk *m = &dquot->dq_dqb; + + d->dqb_id = cpu_to_le32(dquot->dq_id); + d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count); + d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit); + d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit); + d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes); + d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit); + d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit); + d->dqb_curspace = cpu_to_le64(m->dqb_curspace); + d->dqb_btime = cpu_to_le64(m->dqb_btime); + d->dqb_itime = cpu_to_le64(m->dqb_itime); +} + +static int ocfs2_global_is_id(void *dp, struct dquot *dquot) +{ + struct ocfs2_global_disk_dqblk *d = dp; + struct ocfs2_mem_dqinfo *oinfo = + sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; + + if (qtree_entry_unused(&oinfo->dqi_gi, dp)) + return 0; + return le32_to_cpu(d->dqb_id) == dquot->dq_id; +} + +struct qtree_fmt_operations ocfs2_global_ops = { + .mem2disk_dqblk = ocfs2_global_mem2diskdqb, + .disk2mem_dqblk = ocfs2_global_disk2memdqb, + .is_id = ocfs2_global_is_id, +}; + + +struct buffer_head *ocfs2_read_quota_block(struct inode *inode, + int block, int *err) +{ + struct buffer_head *tmp = NULL; + + *err = ocfs2_read_virt_blocks(inode, block, 1, &tmp, 0, NULL); + if (*err) + mlog_errno(*err); + + return tmp; +} + +static struct buffer_head *ocfs2_get_quota_block(struct inode *inode, + int block, int *err) +{ + u64 pblock, pcount; + struct buffer_head *bh; + + down_read(&OCFS2_I(inode)->ip_alloc_sem); + *err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, + NULL); + up_read(&OCFS2_I(inode)->ip_alloc_sem); + if (*err) { + mlog_errno(*err); + return NULL; + } + bh = sb_getblk(inode->i_sb, pblock); + if (!bh) { + *err = -EIO; + mlog_errno(*err); + } + return bh; +} + +/* Read data from global quotafile - avoid pagecache and such because we cannot + * afford acquiring the locks... We use quota cluster lock to serialize + * operations. Caller is responsible for acquiring it. */ +ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data, + size_t len, loff_t off) +{ + struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; + struct inode *gqinode = oinfo->dqi_gqinode; + loff_t i_size = i_size_read(gqinode); + int offset = off & (sb->s_blocksize - 1); + sector_t blk = off >> sb->s_blocksize_bits; + int err = 0; + struct buffer_head *bh; + size_t toread, tocopy; + + if (off > i_size) + return 0; + if (off + len > i_size) + len = i_size - off; + toread = len; + while (toread > 0) { + tocopy = min((size_t)(sb->s_blocksize - offset), toread); + bh = ocfs2_read_quota_block(gqinode, blk, &err); + if (!bh) { + mlog_errno(err); + return err; + } + memcpy(data, bh->b_data + offset, tocopy); + brelse(bh); + offset = 0; + toread -= tocopy; + data += tocopy; + blk++; + } + return len; +} + +/* Write to quotafile (we know the transaction is already started and has + * enough credits) */ +ssize_t ocfs2_quota_write(struct super_block *sb, int type, + const char *data, size_t len, loff_t off) +{ + struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv; + struct inode *gqinode = oinfo->dqi_gqinode; + int offset = off & (sb->s_blocksize - 1); + sector_t blk = off >> sb->s_blocksize_bits; + int err = 0, new = 0; + struct buffer_head *bh; + handle_t *handle = journal_current_handle(); + + if (!handle) { + mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled " + "because transaction was not started.\n", + (unsigned long long)off, (unsigned long long)len); + return -EIO; + } + if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) { + WARN_ON(1); + len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset; + } + + mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA); + if (gqinode->i_size < off + len) { + down_write(&OCFS2_I(gqinode)->ip_alloc_sem); + err = ocfs2_extend_no_holes(gqinode, off + len, off); + up_write(&OCFS2_I(gqinode)->ip_alloc_sem); + if (err < 0) + goto out; + err = ocfs2_simple_size_update(gqinode, + oinfo->dqi_gqi_bh, + off + len); + if (err < 0) + goto out; + new = 1; + } + /* Not rewriting whole block? */ + if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) && + !new) { + bh = ocfs2_read_quota_block(gqinode, blk, &err); + if (!bh) { + mlog_errno(err); + return err; + } + err = ocfs2_journal_access(handle, gqinode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); + } else { + bh = ocfs2_get_quota_block(gqinode, blk, &err); + if (!bh) { + mlog_errno(err); + return err; + } + err = ocfs2_journal_access(handle, gqinode, bh, + OCFS2_JOURNAL_ACCESS_CREATE); + } + if (err < 0) { + brelse(bh); + goto out; + } + lock_buffer(bh); + if (new) + memset(bh->b_data, 0, sb->s_blocksize); + memcpy(bh->b_data + offset, data, len); + flush_dcache_page(bh->b_page); + unlock_buffer(bh); + ocfs2_set_buffer_uptodate(gqinode, bh); + err = ocfs2_journal_dirty(handle, bh); + brelse(bh); + if (err < 0) + goto out; +out: + if (err) { + mutex_unlock(&gqinode->i_mutex); + mlog_errno(err); + return err; + } + gqinode->i_version++; + ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh); + mutex_unlock(&gqinode->i_mutex); + return len; +} + +int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex) +{ + int status; + struct buffer_head *bh = NULL; + + status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex); + if (status < 0) + return status; + spin_lock(&dq_data_lock); + if (!oinfo->dqi_gqi_count++) + oinfo->dqi_gqi_bh = bh; + else + WARN_ON(bh != oinfo->dqi_gqi_bh); + spin_unlock(&dq_data_lock); + return 0; +} + +void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex) +{ + ocfs2_inode_unlock(oinfo->dqi_gqinode, ex); + brelse(oinfo->dqi_gqi_bh); + spin_lock(&dq_data_lock); + if (!--oinfo->dqi_gqi_count) + oinfo->dqi_gqi_bh = NULL; + spin_unlock(&dq_data_lock); +} + +/* Read information header from global quota file */ +int ocfs2_global_read_info(struct super_block *sb, int type) +{ + struct inode *gqinode = NULL; + unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE, + GROUP_QUOTA_SYSTEM_INODE }; + struct ocfs2_global_disk_dqinfo dinfo; + struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv; + int status; + + mlog_entry_void(); + + /* Read global header */ + gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type], + OCFS2_INVALID_SLOT); + if (!gqinode) { + mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n", + type); + status = -EINVAL; + goto out_err; + } + oinfo->dqi_gi.dqi_sb = sb; + oinfo->dqi_gi.dqi_type = type; + ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo); + oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk); + oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops; + oinfo->dqi_gqi_bh = NULL; + oinfo->dqi_gqi_count = 0; + oinfo->dqi_gqinode = gqinode; + status = ocfs2_lock_global_qf(oinfo, 0); + if (status < 0) { + mlog_errno(status); + goto out_err; + } + status = sb->s_op->quota_read(sb, type, (char *)&dinfo, + sizeof(struct ocfs2_global_disk_dqinfo), + OCFS2_GLOBAL_INFO_OFF); + ocfs2_unlock_global_qf(oinfo, 0); + if (status != sizeof(struct ocfs2_global_disk_dqinfo)) { + mlog(ML_ERROR, "Cannot read global quota info (%d).\n", + status); + if (status >= 0) + status = -EIO; + mlog_errno(status); + goto out_err; + } + info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); + info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); + oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms); + oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); + oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); + oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); + oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits; + oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize - + OCFS2_QBLK_RESERVED_SPACE; + oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi); +out_err: + mlog_exit(status); + return status; +} + +/* Write information to global quota file. Expects exlusive lock on quota + * file inode and quota info */ +static int __ocfs2_global_write_info(struct super_block *sb, int type) +{ + struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv; + struct ocfs2_global_disk_dqinfo dinfo; + ssize_t size; + + spin_lock(&dq_data_lock); + info->dqi_flags &= ~DQF_INFO_DIRTY; + dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace); + dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace); + spin_unlock(&dq_data_lock); + dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms); + dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks); + dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk); + dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry); + size = sb->s_op->quota_write(sb, type, (char *)&dinfo, + sizeof(struct ocfs2_global_disk_dqinfo), + OCFS2_GLOBAL_INFO_OFF); + if (size != sizeof(struct ocfs2_global_disk_dqinfo)) { + mlog(ML_ERROR, "Cannot write global quota info structure\n"); + if (size >= 0) + size = -EIO; + return size; + } + return 0; +} + +int ocfs2_global_write_info(struct super_block *sb, int type) +{ + int err; + struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv; + + err = ocfs2_qinfo_lock(info, 1); + if (err < 0) + return err; + err = __ocfs2_global_write_info(sb, type); + ocfs2_qinfo_unlock(info, 1); + return err; +} + +/* Read in information from global quota file and acquire a reference to it. + * dquot_acquire() has already started the transaction and locked quota file */ +int ocfs2_global_read_dquot(struct dquot *dquot) +{ + int err, err2, ex = 0; + struct ocfs2_mem_dqinfo *info = + sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; + + err = ocfs2_qinfo_lock(info, 0); + if (err < 0) + goto out; + err = qtree_read_dquot(&info->dqi_gi, dquot); + if (err < 0) + goto out_qlock; + OCFS2_DQUOT(dquot)->dq_use_count++; + OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace; + OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes; + if (!dquot->dq_off) { /* No real quota entry? */ + /* Upgrade to exclusive lock for allocation */ + err = ocfs2_qinfo_lock(info, 1); + if (err < 0) + goto out_qlock; + ex = 1; + } + err = qtree_write_dquot(&info->dqi_gi, dquot); + if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) { + err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type); + if (!err) + err = err2; + } +out_qlock: + if (ex) + ocfs2_qinfo_unlock(info, 1); + ocfs2_qinfo_unlock(info, 0); +out: + if (err < 0) + mlog_errno(err); + return err; +} + +/* Sync local information about quota modifications with global quota file. + * Caller must have started the transaction and obtained exclusive lock for + * global quota file inode */ +int __ocfs2_sync_dquot(struct dquot *dquot, int freeing) +{ + int err, err2; + struct super_block *sb = dquot->dq_sb; + int type = dquot->dq_type; + struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv; + struct ocfs2_global_disk_dqblk dqblk; + s64 spacechange, inodechange; + time_t olditime, oldbtime; + + err = sb->s_op->quota_read(sb, type, (char *)&dqblk, + sizeof(struct ocfs2_global_disk_dqblk), + dquot->dq_off); + if (err != sizeof(struct ocfs2_global_disk_dqblk)) { + if (err >= 0) { + mlog(ML_ERROR, "Short read from global quota file " + "(%u read)\n", err); + err = -EIO; + } + goto out; + } + + /* Update space and inode usage. Get also other information from + * global quota file so that we don't overwrite any changes there. + * We are */ + spin_lock(&dq_data_lock); + spacechange = dquot->dq_dqb.dqb_curspace - + OCFS2_DQUOT(dquot)->dq_origspace; + inodechange = dquot->dq_dqb.dqb_curinodes - + OCFS2_DQUOT(dquot)->dq_originodes; + olditime = dquot->dq_dqb.dqb_itime; + oldbtime = dquot->dq_dqb.dqb_btime; + ocfs2_global_disk2memdqb(dquot, &dqblk); + mlog(0, "Syncing global dquot %d space %lld+%lld, inodes %lld+%lld\n", + dquot->dq_id, dquot->dq_dqb.dqb_curspace, spacechange, + dquot->dq_dqb.dqb_curinodes, inodechange); + if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags)) + dquot->dq_dqb.dqb_curspace += spacechange; + if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags)) + dquot->dq_dqb.dqb_curinodes += inodechange; + /* Set properly space grace time... */ + if (dquot->dq_dqb.dqb_bsoftlimit && + dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) { + if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) && + oldbtime > 0) { + if (dquot->dq_dqb.dqb_btime > 0) + dquot->dq_dqb.dqb_btime = + min(dquot->dq_dqb.dqb_btime, oldbtime); + else + dquot->dq_dqb.dqb_btime = oldbtime; + } + } else { + dquot->dq_dqb.dqb_btime = 0; + clear_bit(DQ_BLKS_B, &dquot->dq_flags); + } + /* Set properly inode grace time... */ + if (dquot->dq_dqb.dqb_isoftlimit && + dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) { + if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) && + olditime > 0) { + if (dquot->dq_dqb.dqb_itime > 0) + dquot->dq_dqb.dqb_itime = + min(dquot->dq_dqb.dqb_itime, olditime); + else + dquot->dq_dqb.dqb_itime = olditime; + } + } else { + dquot->dq_dqb.dqb_itime = 0; + clear_bit(DQ_INODES_B, &dquot->dq_flags); + } + /* All information is properly updated, clear the flags */ + __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); + __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); + __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); + __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); + __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); + __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); + OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace; + OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes; + spin_unlock(&dq_data_lock); + err = ocfs2_qinfo_lock(info, freeing); + if (err < 0) { + mlog(ML_ERROR, "Failed to lock quota info, loosing quota write" + " (type=%d, id=%u)\n", dquot->dq_type, + (unsigned)dquot->dq_id); + goto out; + } + if (freeing) + OCFS2_DQUOT(dquot)->dq_use_count--; + err = qtree_write_dquot(&info->dqi_gi, dquot); + if (err < 0) + goto out_qlock; + if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) { + err = qtree_release_dquot(&info->dqi_gi, dquot); + if (info_dirty(sb_dqinfo(sb, type))) { + err2 = __ocfs2_global_write_info(sb, type); + if (!err) + err = err2; + } + } +out_qlock: + ocfs2_qinfo_unlock(info, freeing); +out: + if (err < 0) + mlog_errno(err); + return err; +} + +/* + * Wrappers for generic quota functions + */ + +static int ocfs2_write_dquot(struct dquot *dquot) +{ + handle_t *handle; + struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); + int status = 0; + + mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); + + handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out; + } + status = dquot_commit(dquot); + ocfs2_commit_trans(osb, handle); +out: + mlog_exit(status); + return status; +} + +int ocfs2_calc_qdel_credits(struct super_block *sb, int type) +{ + struct ocfs2_mem_dqinfo *oinfo; + int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, + OCFS2_FEATURE_RO_COMPAT_GRPQUOTA }; + + if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type])) + return 0; + + oinfo = sb_dqinfo(sb, type)->dqi_priv; + /* We modify tree, leaf block, global info, local chunk header, + * global and local inode */ + return oinfo->dqi_gi.dqi_qtree_depth + 2 + 1 + + 2 * OCFS2_INODE_UPDATE_CREDITS; +} + +static int ocfs2_release_dquot(struct dquot *dquot) +{ + handle_t *handle; + struct ocfs2_mem_dqinfo *oinfo = + sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; + struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); + int status = 0; + + mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); + + status = ocfs2_lock_global_qf(oinfo, 1); + if (status < 0) + goto out; + handle = ocfs2_start_trans(osb, + ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type)); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out_ilock; + } + status = dquot_release(dquot); + ocfs2_commit_trans(osb, handle); +out_ilock: + ocfs2_unlock_global_qf(oinfo, 1); +out: + mlog_exit(status); + return status; +} + +int ocfs2_calc_qinit_credits(struct super_block *sb, int type) +{ + struct ocfs2_mem_dqinfo *oinfo; + int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, + OCFS2_FEATURE_RO_COMPAT_GRPQUOTA }; + struct ocfs2_dinode *lfe, *gfe; + + if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type])) + return 0; + + oinfo = sb_dqinfo(sb, type)->dqi_priv; + gfe = (struct ocfs2_dinode *)oinfo->dqi_gqi_bh->b_data; + lfe = (struct ocfs2_dinode *)oinfo->dqi_lqi_bh->b_data; + /* We can extend local file + global file. In local file we + * can modify info, chunk header block and dquot block. In + * global file we can modify info, tree and leaf block */ + return ocfs2_calc_extend_credits(sb, &lfe->id2.i_list, 0) + + ocfs2_calc_extend_credits(sb, &gfe->id2.i_list, 0) + + 3 + oinfo->dqi_gi.dqi_qtree_depth + 2; +} + +static int ocfs2_acquire_dquot(struct dquot *dquot) +{ + handle_t *handle; + struct ocfs2_mem_dqinfo *oinfo = + sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; + struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); + int status = 0; + + mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); + /* We need an exclusive lock, because we're going to update use count + * and instantiate possibly new dquot structure */ + status = ocfs2_lock_global_qf(oinfo, 1); + if (status < 0) + goto out; + handle = ocfs2_start_trans(osb, + ocfs2_calc_qinit_credits(dquot->dq_sb, dquot->dq_type)); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out_ilock; + } + status = dquot_acquire(dquot); + ocfs2_commit_trans(osb, handle); +out_ilock: + ocfs2_unlock_global_qf(oinfo, 1); +out: + mlog_exit(status); + return status; +} + +static int ocfs2_mark_dquot_dirty(struct dquot *dquot) +{ + unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) | + (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) | + (1 << (DQ_LASTSET_B + QIF_INODES_B)) | + (1 << (DQ_LASTSET_B + QIF_SPACE_B)) | + (1 << (DQ_LASTSET_B + QIF_BTIME_B)) | + (1 << (DQ_LASTSET_B + QIF_ITIME_B)); + int sync = 0; + int status; + struct super_block *sb = dquot->dq_sb; + int type = dquot->dq_type; + struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; + handle_t *handle; + struct ocfs2_super *osb = OCFS2_SB(sb); + + mlog_entry("id=%u, type=%d", dquot->dq_id, type); + dquot_mark_dquot_dirty(dquot); + + /* In case user set some limits, sync dquot immediately to global + * quota file so that information propagates quicker */ + spin_lock(&dq_data_lock); + if (dquot->dq_flags & mask) + sync = 1; + spin_unlock(&dq_data_lock); + if (!sync) { + status = ocfs2_write_dquot(dquot); + goto out; + } + status = ocfs2_lock_global_qf(oinfo, 1); + if (status < 0) + goto out; + handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out_ilock; + } + status = ocfs2_sync_dquot(dquot); + if (status < 0) { + mlog_errno(status); + goto out_trans; + } + /* Now write updated local dquot structure */ + status = dquot_commit(dquot); +out_trans: + ocfs2_commit_trans(osb, handle); +out_ilock: + ocfs2_unlock_global_qf(oinfo, 1); +out: + mlog_exit(status); + return status; +} + +/* This should happen only after set_dqinfo(). */ +static int ocfs2_write_info(struct super_block *sb, int type) +{ + handle_t *handle; + int status = 0; + struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; + + mlog_entry_void(); + + status = ocfs2_lock_global_qf(oinfo, 1); + if (status < 0) + goto out; + handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out_ilock; + } + status = dquot_commit_info(sb, type); + ocfs2_commit_trans(OCFS2_SB(sb), handle); +out_ilock: + ocfs2_unlock_global_qf(oinfo, 1); +out: + mlog_exit(status); + return status; +} + +/* This is difficult. We have to lock quota inode and start transaction + * in this function but we don't want to take the penalty of exlusive + * quota file lock when we are just going to use cached structures. So + * we just take read lock check whether we have dquot cached and if so, + * we don't have to take the write lock... */ +static int ocfs2_dquot_initialize(struct inode *inode, int type) +{ + handle_t *handle = NULL; + int status = 0; + struct super_block *sb = inode->i_sb; + struct ocfs2_mem_dqinfo *oinfo; + int exclusive = 0; + int cnt; + qid_t id; + + mlog_entry_void(); + + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (type != -1 && cnt != type) + continue; + if (!sb_has_quota_active(sb, cnt)) + continue; + oinfo = sb_dqinfo(sb, cnt)->dqi_priv; + status = ocfs2_lock_global_qf(oinfo, 0); + if (status < 0) + goto out; + /* This is just a performance optimization not a reliable test. + * Since we hold an inode lock, noone can actually release + * the structure until we are finished with initialization. */ + if (inode->i_dquot[cnt] != NODQUOT) { + ocfs2_unlock_global_qf(oinfo, 0); + continue; + } + /* When we have inode lock, we know that no dquot_release() can + * run and thus we can safely check whether we need to + * read+modify global file to get quota information or whether + * our node already has it. */ + if (cnt == USRQUOTA) + id = inode->i_uid; + else if (cnt == GRPQUOTA) + id = inode->i_gid; + else + BUG(); + /* Obtain exclusion from quota off... */ + down_write(&sb_dqopt(sb)->dqptr_sem); + exclusive = !dquot_is_cached(sb, id, cnt); + up_write(&sb_dqopt(sb)->dqptr_sem); + if (exclusive) { + status = ocfs2_lock_global_qf(oinfo, 1); + if (status < 0) { + exclusive = 0; + mlog_errno(status); + goto out_ilock; + } + handle = ocfs2_start_trans(OCFS2_SB(sb), + ocfs2_calc_qinit_credits(sb, cnt)); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out_ilock; + } + } + dquot_initialize(inode, cnt); + if (exclusive) { + ocfs2_commit_trans(OCFS2_SB(sb), handle); + ocfs2_unlock_global_qf(oinfo, 1); + } + ocfs2_unlock_global_qf(oinfo, 0); + } + mlog_exit(0); + return 0; +out_ilock: + if (exclusive) + ocfs2_unlock_global_qf(oinfo, 1); + ocfs2_unlock_global_qf(oinfo, 0); +out: + mlog_exit(status); + return status; +} + +static int ocfs2_dquot_drop_slow(struct inode *inode) +{ + int status; + int cnt; + int got_lock[MAXQUOTAS] = {0, 0}; + handle_t *handle; + struct super_block *sb = inode->i_sb; + struct ocfs2_mem_dqinfo *oinfo; + + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (!sb_has_quota_active(sb, cnt)) + continue; + oinfo = sb_dqinfo(sb, cnt)->dqi_priv; + status = ocfs2_lock_global_qf(oinfo, 1); + if (status < 0) + goto out; + got_lock[cnt] = 1; + } + handle = ocfs2_start_trans(OCFS2_SB(sb), + ocfs2_calc_qinit_credits(sb, USRQUOTA) + + ocfs2_calc_qinit_credits(sb, GRPQUOTA)); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out; + } + dquot_drop(inode); + ocfs2_commit_trans(OCFS2_SB(sb), handle); +out: + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (got_lock[cnt]) { + oinfo = sb_dqinfo(sb, cnt)->dqi_priv; + ocfs2_unlock_global_qf(oinfo, 1); + } + return status; +} + +/* See the comment before ocfs2_dquot_initialize. */ +static int ocfs2_dquot_drop(struct inode *inode) +{ + int status = 0; + struct super_block *sb = inode->i_sb; + struct ocfs2_mem_dqinfo *oinfo; + int exclusive = 0; + int cnt; + int got_lock[MAXQUOTAS] = {0, 0}; + + mlog_entry_void(); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (!sb_has_quota_active(sb, cnt)) + continue; + oinfo = sb_dqinfo(sb, cnt)->dqi_priv; + status = ocfs2_lock_global_qf(oinfo, 0); + if (status < 0) + goto out; + got_lock[cnt] = 1; + } + /* Lock against anyone releasing references so that when when we check + * we know we are not going to be last ones to release dquot */ + down_write(&sb_dqopt(sb)->dqptr_sem); + /* Urgh, this is a terrible hack :( */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] != NODQUOT && + atomic_read(&inode->i_dquot[cnt]->dq_count) > 1) { + exclusive = 1; + break; + } + } + if (!exclusive) + dquot_drop_locked(inode); + up_write(&sb_dqopt(sb)->dqptr_sem); +out: + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (got_lock[cnt]) { + oinfo = sb_dqinfo(sb, cnt)->dqi_priv; + ocfs2_unlock_global_qf(oinfo, 0); + } + /* In case we bailed out because we had to do expensive locking + * do it now... */ + if (exclusive) + status = ocfs2_dquot_drop_slow(inode); + mlog_exit(status); + return status; +} + +static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type) +{ + struct ocfs2_dquot *dquot = + kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS); + + if (!dquot) + return NULL; + return &dquot->dq_dquot; +} + +static void ocfs2_destroy_dquot(struct dquot *dquot) +{ + kmem_cache_free(ocfs2_dquot_cachep, dquot); +} + +struct dquot_operations ocfs2_quota_operations = { + .initialize = ocfs2_dquot_initialize, + .drop = ocfs2_dquot_drop, + .alloc_space = dquot_alloc_space, + .alloc_inode = dquot_alloc_inode, + .free_space = dquot_free_space, + .free_inode = dquot_free_inode, + .transfer = dquot_transfer, + .write_dquot = ocfs2_write_dquot, + .acquire_dquot = ocfs2_acquire_dquot, + .release_dquot = ocfs2_release_dquot, + .mark_dirty = ocfs2_mark_dquot_dirty, + .write_info = ocfs2_write_info, + .alloc_dquot = ocfs2_alloc_dquot, + .destroy_dquot = ocfs2_destroy_dquot, +}; diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c new file mode 100644 index 000000000000..55c3f2f98dcd --- /dev/null +++ b/fs/ocfs2/quota_local.c @@ -0,0 +1,833 @@ +/* + * Implementation of operations over local quota file + */ + +#include +#include +#include +#include + +#define MLOG_MASK_PREFIX ML_QUOTA +#include + +#include "ocfs2_fs.h" +#include "ocfs2.h" +#include "inode.h" +#include "alloc.h" +#include "file.h" +#include "buffer_head_io.h" +#include "journal.h" +#include "sysfile.h" +#include "dlmglue.h" +#include "quota.h" + +/* Number of local quota structures per block */ +static inline unsigned int ol_quota_entries_per_block(struct super_block *sb) +{ + return ((sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) / + sizeof(struct ocfs2_local_disk_dqblk)); +} + +/* Number of blocks with entries in one chunk */ +static inline unsigned int ol_chunk_blocks(struct super_block *sb) +{ + return ((sb->s_blocksize - sizeof(struct ocfs2_local_disk_chunk) - + OCFS2_QBLK_RESERVED_SPACE) << 3) / + ol_quota_entries_per_block(sb); +} + +/* Number of entries in a chunk bitmap */ +static unsigned int ol_chunk_entries(struct super_block *sb) +{ + return ol_chunk_blocks(sb) * ol_quota_entries_per_block(sb); +} + +/* Offset of the chunk in quota file */ +static unsigned int ol_quota_chunk_block(struct super_block *sb, int c) +{ + /* 1 block for local quota file info, 1 block per chunk for chunk info */ + return 1 + (ol_chunk_blocks(sb) + 1) * c; +} + +/* Offset of the dquot structure in the quota file */ +static loff_t ol_dqblk_off(struct super_block *sb, int c, int off) +{ + int epb = ol_quota_entries_per_block(sb); + + return ((ol_quota_chunk_block(sb, c) + 1 + off / epb) + << sb->s_blocksize_bits) + + (off % epb) * sizeof(struct ocfs2_local_disk_dqblk); +} + +/* Compute block number from given offset */ +static inline unsigned int ol_dqblk_file_block(struct super_block *sb, loff_t off) +{ + return off >> sb->s_blocksize_bits; +} + +static inline unsigned int ol_dqblk_block_offset(struct super_block *sb, loff_t off) +{ + return off & ((1 << sb->s_blocksize_bits) - 1); +} + +/* Compute offset in the chunk of a structure with the given offset */ +static int ol_dqblk_chunk_off(struct super_block *sb, int c, loff_t off) +{ + int epb = ol_quota_entries_per_block(sb); + + return ((off >> sb->s_blocksize_bits) - + ol_quota_chunk_block(sb, c) - 1) * epb + + ((unsigned int)(off & ((1 << sb->s_blocksize_bits) - 1))) / + sizeof(struct ocfs2_local_disk_dqblk); +} + +/* Write bufferhead into the fs */ +static int ocfs2_modify_bh(struct inode *inode, struct buffer_head *bh, + void (*modify)(struct buffer_head *, void *), void *private) +{ + struct super_block *sb = inode->i_sb; + handle_t *handle; + int status; + + handle = ocfs2_start_trans(OCFS2_SB(sb), 1); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + return status; + } + status = ocfs2_journal_access(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + ocfs2_commit_trans(OCFS2_SB(sb), handle); + return status; + } + lock_buffer(bh); + modify(bh, private); + unlock_buffer(bh); + status = ocfs2_journal_dirty(handle, bh); + if (status < 0) { + mlog_errno(status); + ocfs2_commit_trans(OCFS2_SB(sb), handle); + return status; + } + status = ocfs2_commit_trans(OCFS2_SB(sb), handle); + if (status < 0) { + mlog_errno(status); + return status; + } + return 0; +} + +/* Check whether we understand format of quota files */ +static int ocfs2_local_check_quota_file(struct super_block *sb, int type) +{ + unsigned int lmagics[MAXQUOTAS] = OCFS2_LOCAL_QMAGICS; + unsigned int lversions[MAXQUOTAS] = OCFS2_LOCAL_QVERSIONS; + unsigned int gmagics[MAXQUOTAS] = OCFS2_GLOBAL_QMAGICS; + unsigned int gversions[MAXQUOTAS] = OCFS2_GLOBAL_QVERSIONS; + unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE, + GROUP_QUOTA_SYSTEM_INODE }; + struct buffer_head *bh; + struct inode *linode = sb_dqopt(sb)->files[type]; + struct inode *ginode = NULL; + struct ocfs2_disk_dqheader *dqhead; + int status, ret = 0; + + /* First check whether we understand local quota file */ + bh = ocfs2_read_quota_block(linode, 0, &status); + if (!bh) { + mlog_errno(status); + mlog(ML_ERROR, "failed to read quota file header (type=%d)\n", + type); + goto out_err; + } + dqhead = (struct ocfs2_disk_dqheader *)(bh->b_data); + if (le32_to_cpu(dqhead->dqh_magic) != lmagics[type]) { + mlog(ML_ERROR, "quota file magic does not match (%u != %u)," + " type=%d\n", le32_to_cpu(dqhead->dqh_magic), + lmagics[type], type); + goto out_err; + } + if (le32_to_cpu(dqhead->dqh_version) != lversions[type]) { + mlog(ML_ERROR, "quota file version does not match (%u != %u)," + " type=%d\n", le32_to_cpu(dqhead->dqh_version), + lversions[type], type); + goto out_err; + } + brelse(bh); + bh = NULL; + + /* Next check whether we understand global quota file */ + ginode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type], + OCFS2_INVALID_SLOT); + if (!ginode) { + mlog(ML_ERROR, "cannot get global quota file inode " + "(type=%d)\n", type); + goto out_err; + } + /* Since the header is read only, we don't care about locking */ + bh = ocfs2_read_quota_block(ginode, 0, &status); + if (!bh) { + mlog_errno(status); + mlog(ML_ERROR, "failed to read global quota file header " + "(type=%d)\n", type); + goto out_err; + } + dqhead = (struct ocfs2_disk_dqheader *)(bh->b_data); + if (le32_to_cpu(dqhead->dqh_magic) != gmagics[type]) { + mlog(ML_ERROR, "global quota file magic does not match " + "(%u != %u), type=%d\n", + le32_to_cpu(dqhead->dqh_magic), gmagics[type], type); + goto out_err; + } + if (le32_to_cpu(dqhead->dqh_version) != gversions[type]) { + mlog(ML_ERROR, "global quota file version does not match " + "(%u != %u), type=%d\n", + le32_to_cpu(dqhead->dqh_version), gversions[type], + type); + goto out_err; + } + + ret = 1; +out_err: + brelse(bh); + iput(ginode); + return ret; +} + +/* Release given list of quota file chunks */ +static void ocfs2_release_local_quota_bitmaps(struct list_head *head) +{ + struct ocfs2_quota_chunk *pos, *next; + + list_for_each_entry_safe(pos, next, head, qc_chunk) { + list_del(&pos->qc_chunk); + brelse(pos->qc_headerbh); + kmem_cache_free(ocfs2_qf_chunk_cachep, pos); + } +} + +/* Load quota bitmaps into memory */ +static int ocfs2_load_local_quota_bitmaps(struct inode *inode, + struct ocfs2_local_disk_dqinfo *ldinfo, + struct list_head *head) +{ + struct ocfs2_quota_chunk *newchunk; + int i, status; + + INIT_LIST_HEAD(head); + for (i = 0; i < le32_to_cpu(ldinfo->dqi_chunks); i++) { + newchunk = kmem_cache_alloc(ocfs2_qf_chunk_cachep, GFP_NOFS); + if (!newchunk) { + ocfs2_release_local_quota_bitmaps(head); + return -ENOMEM; + } + newchunk->qc_num = i; + newchunk->qc_headerbh = ocfs2_read_quota_block(inode, + ol_quota_chunk_block(inode->i_sb, i), + &status); + if (!newchunk->qc_headerbh) { + mlog_errno(status); + kmem_cache_free(ocfs2_qf_chunk_cachep, newchunk); + ocfs2_release_local_quota_bitmaps(head); + return status; + } + list_add_tail(&newchunk->qc_chunk, head); + } + return 0; +} + +static void olq_update_info(struct buffer_head *bh, void *private) +{ + struct mem_dqinfo *info = private; + struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv; + struct ocfs2_local_disk_dqinfo *ldinfo; + + ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data + + OCFS2_LOCAL_INFO_OFF); + spin_lock(&dq_data_lock); + ldinfo->dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK); + ldinfo->dqi_chunks = cpu_to_le32(oinfo->dqi_chunks); + ldinfo->dqi_blocks = cpu_to_le32(oinfo->dqi_blocks); + spin_unlock(&dq_data_lock); +} + +/* Read information header from quota file */ +static int ocfs2_local_read_info(struct super_block *sb, int type) +{ + struct ocfs2_local_disk_dqinfo *ldinfo; + struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct ocfs2_mem_dqinfo *oinfo; + struct inode *lqinode = sb_dqopt(sb)->files[type]; + int status; + struct buffer_head *bh = NULL; + int locked = 0; + + info->dqi_maxblimit = 0x7fffffffffffffffLL; + info->dqi_maxilimit = 0x7fffffffffffffffLL; + oinfo = kmalloc(sizeof(struct ocfs2_mem_dqinfo), GFP_NOFS); + if (!oinfo) { + mlog(ML_ERROR, "failed to allocate memory for ocfs2 quota" + " info."); + goto out_err; + } + info->dqi_priv = oinfo; + oinfo->dqi_type = type; + INIT_LIST_HEAD(&oinfo->dqi_chunk); + oinfo->dqi_lqi_bh = NULL; + oinfo->dqi_ibh = NULL; + + status = ocfs2_global_read_info(sb, type); + if (status < 0) + goto out_err; + + status = ocfs2_inode_lock(lqinode, &oinfo->dqi_lqi_bh, 1); + if (status < 0) { + mlog_errno(status); + goto out_err; + } + locked = 1; + + /* Now read local header */ + bh = ocfs2_read_quota_block(lqinode, 0, &status); + if (!bh) { + mlog_errno(status); + mlog(ML_ERROR, "failed to read quota file info header " + "(type=%d)\n", type); + goto out_err; + } + ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data + + OCFS2_LOCAL_INFO_OFF); + info->dqi_flags = le32_to_cpu(ldinfo->dqi_flags); + oinfo->dqi_chunks = le32_to_cpu(ldinfo->dqi_chunks); + oinfo->dqi_blocks = le32_to_cpu(ldinfo->dqi_blocks); + oinfo->dqi_ibh = bh; + + /* We crashed when using local quota file? */ + if (!(info->dqi_flags & OLQF_CLEAN)) + goto out_err; /* So far we just bail out. Later we should resync here */ + + status = ocfs2_load_local_quota_bitmaps(sb_dqopt(sb)->files[type], + ldinfo, + &oinfo->dqi_chunk); + if (status < 0) { + mlog_errno(status); + goto out_err; + } + + /* Now mark quota file as used */ + info->dqi_flags &= ~OLQF_CLEAN; + status = ocfs2_modify_bh(lqinode, bh, olq_update_info, info); + if (status < 0) { + mlog_errno(status); + goto out_err; + } + + return 0; +out_err: + if (oinfo) { + iput(oinfo->dqi_gqinode); + ocfs2_simple_drop_lockres(OCFS2_SB(sb), &oinfo->dqi_gqlock); + ocfs2_lock_res_free(&oinfo->dqi_gqlock); + brelse(oinfo->dqi_lqi_bh); + if (locked) + ocfs2_inode_unlock(lqinode, 1); + ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk); + kfree(oinfo); + } + brelse(bh); + return -1; +} + +/* Write local info to quota file */ +static int ocfs2_local_write_info(struct super_block *sb, int type) +{ + struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct buffer_head *bh = ((struct ocfs2_mem_dqinfo *)info->dqi_priv) + ->dqi_ibh; + int status; + + status = ocfs2_modify_bh(sb_dqopt(sb)->files[type], bh, olq_update_info, + info); + if (status < 0) { + mlog_errno(status); + return -1; + } + + return 0; +} + +/* Release info from memory */ +static int ocfs2_local_free_info(struct super_block *sb, int type) +{ + struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv; + struct ocfs2_quota_chunk *chunk; + struct ocfs2_local_disk_chunk *dchunk; + int mark_clean = 1, len; + int status; + + iput(oinfo->dqi_gqinode); + ocfs2_simple_drop_lockres(OCFS2_SB(sb), &oinfo->dqi_gqlock); + ocfs2_lock_res_free(&oinfo->dqi_gqlock); + list_for_each_entry(chunk, &oinfo->dqi_chunk, qc_chunk) { + dchunk = (struct ocfs2_local_disk_chunk *) + (chunk->qc_headerbh->b_data); + if (chunk->qc_num < oinfo->dqi_chunks - 1) { + len = ol_chunk_entries(sb); + } else { + len = (oinfo->dqi_blocks - + ol_quota_chunk_block(sb, chunk->qc_num) - 1) + * ol_quota_entries_per_block(sb); + } + /* Not all entries free? Bug! */ + if (le32_to_cpu(dchunk->dqc_free) != len) { + mlog(ML_ERROR, "releasing quota file with used " + "entries (type=%d)\n", type); + mark_clean = 0; + } + } + ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk); + + if (!mark_clean) + goto out; + + /* Mark local file as clean */ + info->dqi_flags |= OLQF_CLEAN; + status = ocfs2_modify_bh(sb_dqopt(sb)->files[type], + oinfo->dqi_ibh, + olq_update_info, + info); + if (status < 0) { + mlog_errno(status); + goto out; + } + +out: + ocfs2_inode_unlock(sb_dqopt(sb)->files[type], 1); + brelse(oinfo->dqi_ibh); + brelse(oinfo->dqi_lqi_bh); + kfree(oinfo); + return 0; +} + +static void olq_set_dquot(struct buffer_head *bh, void *private) +{ + struct ocfs2_dquot *od = private; + struct ocfs2_local_disk_dqblk *dqblk; + struct super_block *sb = od->dq_dquot.dq_sb; + + dqblk = (struct ocfs2_local_disk_dqblk *)(bh->b_data + + ol_dqblk_block_offset(sb, od->dq_local_off)); + + dqblk->dqb_id = cpu_to_le64(od->dq_dquot.dq_id); + spin_lock(&dq_data_lock); + dqblk->dqb_spacemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curspace - + od->dq_origspace); + dqblk->dqb_inodemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curinodes - + od->dq_originodes); + spin_unlock(&dq_data_lock); + mlog(0, "Writing local dquot %u space %lld inodes %lld\n", + od->dq_dquot.dq_id, dqblk->dqb_spacemod, dqblk->dqb_inodemod); +} + +/* Write dquot to local quota file */ +static int ocfs2_local_write_dquot(struct dquot *dquot) +{ + struct super_block *sb = dquot->dq_sb; + struct ocfs2_dquot *od = OCFS2_DQUOT(dquot); + struct buffer_head *bh; + int status; + + bh = ocfs2_read_quota_block(sb_dqopt(sb)->files[dquot->dq_type], + ol_dqblk_file_block(sb, od->dq_local_off), + &status); + if (!bh) { + mlog_errno(status); + goto out; + } + status = ocfs2_modify_bh(sb_dqopt(sb)->files[dquot->dq_type], bh, + olq_set_dquot, od); + if (status < 0) { + mlog_errno(status); + goto out; + } +out: + brelse(bh); + return status; +} + +/* Find free entry in local quota file */ +static struct ocfs2_quota_chunk *ocfs2_find_free_entry(struct super_block *sb, + int type, + int *offset) +{ + struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv; + struct ocfs2_quota_chunk *chunk; + struct ocfs2_local_disk_chunk *dchunk; + int found = 0, len; + + list_for_each_entry(chunk, &oinfo->dqi_chunk, qc_chunk) { + dchunk = (struct ocfs2_local_disk_chunk *) + chunk->qc_headerbh->b_data; + if (le32_to_cpu(dchunk->dqc_free) > 0) { + found = 1; + break; + } + } + if (!found) + return NULL; + + if (chunk->qc_num < oinfo->dqi_chunks - 1) { + len = ol_chunk_entries(sb); + } else { + len = (oinfo->dqi_blocks - + ol_quota_chunk_block(sb, chunk->qc_num) - 1) + * ol_quota_entries_per_block(sb); + } + + found = ocfs2_find_next_zero_bit(dchunk->dqc_bitmap, len, 0); + /* We failed? */ + if (found == len) { + mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u" + " entries free (type=%d)\n", chunk->qc_num, + le32_to_cpu(dchunk->dqc_free), type); + return ERR_PTR(-EIO); + } + *offset = found; + return chunk; +} + +/* Add new chunk to the local quota file */ +static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk( + struct super_block *sb, + int type, + int *offset) +{ + struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv; + struct inode *lqinode = sb_dqopt(sb)->files[type]; + struct ocfs2_quota_chunk *chunk = NULL; + struct ocfs2_local_disk_chunk *dchunk; + int status; + handle_t *handle; + struct buffer_head *bh = NULL; + u64 p_blkno; + + /* We are protected by dqio_sem so no locking needed */ + status = ocfs2_extend_no_holes(lqinode, + lqinode->i_size + 2 * sb->s_blocksize, + lqinode->i_size); + if (status < 0) { + mlog_errno(status); + goto out; + } + status = ocfs2_simple_size_update(lqinode, oinfo->dqi_lqi_bh, + lqinode->i_size + 2 * sb->s_blocksize); + if (status < 0) { + mlog_errno(status); + goto out; + } + + chunk = kmem_cache_alloc(ocfs2_qf_chunk_cachep, GFP_NOFS); + if (!chunk) { + status = -ENOMEM; + mlog_errno(status); + goto out; + } + + down_read(&OCFS2_I(lqinode)->ip_alloc_sem); + status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks, + &p_blkno, NULL, NULL); + up_read(&OCFS2_I(lqinode)->ip_alloc_sem); + if (status < 0) { + mlog_errno(status); + goto out; + } + bh = sb_getblk(sb, p_blkno); + if (!bh) { + status = -ENOMEM; + mlog_errno(status); + goto out; + } + dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data; + + handle = ocfs2_start_trans(OCFS2_SB(sb), 2); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out; + } + + status = ocfs2_journal_access(handle, lqinode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto out_trans; + } + lock_buffer(bh); + dchunk->dqc_free = ol_quota_entries_per_block(sb); + memset(dchunk->dqc_bitmap, 0, + sb->s_blocksize - sizeof(struct ocfs2_local_disk_chunk) - + OCFS2_QBLK_RESERVED_SPACE); + set_buffer_uptodate(bh); + unlock_buffer(bh); + status = ocfs2_journal_dirty(handle, bh); + if (status < 0) { + mlog_errno(status); + goto out_trans; + } + + oinfo->dqi_blocks += 2; + oinfo->dqi_chunks++; + status = ocfs2_local_write_info(sb, type); + if (status < 0) { + mlog_errno(status); + goto out_trans; + } + status = ocfs2_commit_trans(OCFS2_SB(sb), handle); + if (status < 0) { + mlog_errno(status); + goto out; + } + + list_add_tail(&chunk->qc_chunk, &oinfo->dqi_chunk); + chunk->qc_num = list_entry(chunk->qc_chunk.prev, + struct ocfs2_quota_chunk, + qc_chunk)->qc_num + 1; + chunk->qc_headerbh = bh; + *offset = 0; + return chunk; +out_trans: + ocfs2_commit_trans(OCFS2_SB(sb), handle); +out: + brelse(bh); + kmem_cache_free(ocfs2_qf_chunk_cachep, chunk); + return ERR_PTR(status); +} + +/* Find free entry in local quota file */ +static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file( + struct super_block *sb, + int type, + int *offset) +{ + struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv; + struct ocfs2_quota_chunk *chunk; + struct inode *lqinode = sb_dqopt(sb)->files[type]; + struct ocfs2_local_disk_chunk *dchunk; + int epb = ol_quota_entries_per_block(sb); + unsigned int chunk_blocks; + int status; + handle_t *handle; + + if (list_empty(&oinfo->dqi_chunk)) + return ocfs2_local_quota_add_chunk(sb, type, offset); + /* Is the last chunk full? */ + chunk = list_entry(oinfo->dqi_chunk.prev, + struct ocfs2_quota_chunk, qc_chunk); + chunk_blocks = oinfo->dqi_blocks - + ol_quota_chunk_block(sb, chunk->qc_num) - 1; + if (ol_chunk_blocks(sb) == chunk_blocks) + return ocfs2_local_quota_add_chunk(sb, type, offset); + + /* We are protected by dqio_sem so no locking needed */ + status = ocfs2_extend_no_holes(lqinode, + lqinode->i_size + sb->s_blocksize, + lqinode->i_size); + if (status < 0) { + mlog_errno(status); + goto out; + } + status = ocfs2_simple_size_update(lqinode, oinfo->dqi_lqi_bh, + lqinode->i_size + sb->s_blocksize); + if (status < 0) { + mlog_errno(status); + goto out; + } + handle = ocfs2_start_trans(OCFS2_SB(sb), 2); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out; + } + status = ocfs2_journal_access(handle, lqinode, chunk->qc_headerbh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto out_trans; + } + + dchunk = (struct ocfs2_local_disk_chunk *)chunk->qc_headerbh->b_data; + lock_buffer(chunk->qc_headerbh); + le32_add_cpu(&dchunk->dqc_free, ol_quota_entries_per_block(sb)); + unlock_buffer(chunk->qc_headerbh); + status = ocfs2_journal_dirty(handle, chunk->qc_headerbh); + if (status < 0) { + mlog_errno(status); + goto out_trans; + } + oinfo->dqi_blocks++; + status = ocfs2_local_write_info(sb, type); + if (status < 0) { + mlog_errno(status); + goto out_trans; + } + + status = ocfs2_commit_trans(OCFS2_SB(sb), handle); + if (status < 0) { + mlog_errno(status); + goto out; + } + *offset = chunk_blocks * epb; + return chunk; +out_trans: + ocfs2_commit_trans(OCFS2_SB(sb), handle); +out: + return ERR_PTR(status); +} + +void olq_alloc_dquot(struct buffer_head *bh, void *private) +{ + int *offset = private; + struct ocfs2_local_disk_chunk *dchunk; + + dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data; + ocfs2_set_bit(*offset, dchunk->dqc_bitmap); + le32_add_cpu(&dchunk->dqc_free, -1); +} + +/* Create dquot in the local file for given id */ +static int ocfs2_create_local_dquot(struct dquot *dquot) +{ + struct super_block *sb = dquot->dq_sb; + int type = dquot->dq_type; + struct inode *lqinode = sb_dqopt(sb)->files[type]; + struct ocfs2_quota_chunk *chunk; + struct ocfs2_dquot *od = OCFS2_DQUOT(dquot); + int offset; + int status; + + chunk = ocfs2_find_free_entry(sb, type, &offset); + if (!chunk) { + chunk = ocfs2_extend_local_quota_file(sb, type, &offset); + if (IS_ERR(chunk)) + return PTR_ERR(chunk); + } else if (IS_ERR(chunk)) { + return PTR_ERR(chunk); + } + od->dq_local_off = ol_dqblk_off(sb, chunk->qc_num, offset); + od->dq_chunk = chunk; + + /* Initialize dquot structure on disk */ + status = ocfs2_local_write_dquot(dquot); + if (status < 0) { + mlog_errno(status); + goto out; + } + + /* Mark structure as allocated */ + status = ocfs2_modify_bh(lqinode, chunk->qc_headerbh, olq_alloc_dquot, + &offset); + if (status < 0) { + mlog_errno(status); + goto out; + } +out: + return status; +} + +/* Create entry in local file for dquot, load data from the global file */ +static int ocfs2_local_read_dquot(struct dquot *dquot) +{ + int status; + + mlog_entry("id=%u, type=%d\n", dquot->dq_id, dquot->dq_type); + + status = ocfs2_global_read_dquot(dquot); + if (status < 0) { + mlog_errno(status); + goto out_err; + } + + /* Now create entry in the local quota file */ + status = ocfs2_create_local_dquot(dquot); + if (status < 0) { + mlog_errno(status); + goto out_err; + } + mlog_exit(0); + return 0; +out_err: + mlog_exit(status); + return status; +} + +/* Release dquot structure from local quota file. ocfs2_release_dquot() has + * already started a transaction and obtained exclusive lock for global + * quota file. */ +static int ocfs2_local_release_dquot(struct dquot *dquot) +{ + int status; + int type = dquot->dq_type; + struct ocfs2_dquot *od = OCFS2_DQUOT(dquot); + struct super_block *sb = dquot->dq_sb; + struct ocfs2_local_disk_chunk *dchunk; + int offset; + handle_t *handle = journal_current_handle(); + + BUG_ON(!handle); + /* First write all local changes to global file */ + status = ocfs2_global_release_dquot(dquot); + if (status < 0) { + mlog_errno(status); + goto out; + } + + status = ocfs2_journal_access(handle, sb_dqopt(sb)->files[type], + od->dq_chunk->qc_headerbh, OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto out; + } + offset = ol_dqblk_chunk_off(sb, od->dq_chunk->qc_num, + od->dq_local_off); + dchunk = (struct ocfs2_local_disk_chunk *) + (od->dq_chunk->qc_headerbh->b_data); + /* Mark structure as freed */ + lock_buffer(od->dq_chunk->qc_headerbh); + ocfs2_clear_bit(offset, dchunk->dqc_bitmap); + le32_add_cpu(&dchunk->dqc_free, 1); + unlock_buffer(od->dq_chunk->qc_headerbh); + status = ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh); + if (status < 0) { + mlog_errno(status); + goto out; + } + status = 0; +out: + /* Clear the read bit so that next time someone uses this + * dquot he reads fresh info from disk and allocates local + * dquot structure */ + clear_bit(DQ_READ_B, &dquot->dq_flags); + return status; +} + +static struct quota_format_ops ocfs2_format_ops = { + .check_quota_file = ocfs2_local_check_quota_file, + .read_file_info = ocfs2_local_read_info, + .write_file_info = ocfs2_global_write_info, + .free_file_info = ocfs2_local_free_info, + .read_dqblk = ocfs2_local_read_dquot, + .commit_dqblk = ocfs2_local_write_dquot, + .release_dqblk = ocfs2_local_release_dquot, +}; + +struct quota_format_type ocfs2_quota_format = { + .qf_fmt_id = QFMT_OCFS2, + .qf_ops = &ocfs2_format_ops, + .qf_owner = THIS_MODULE +}; diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 41bb0197cf4c..7bb83e41581e 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -65,10 +65,13 @@ #include "uptodate.h" #include "ver.h" #include "xattr.h" +#include "quota.h" #include "buffer_head_io.h" static struct kmem_cache *ocfs2_inode_cachep = NULL; +struct kmem_cache *ocfs2_dquot_cachep; +struct kmem_cache *ocfs2_qf_chunk_cachep; /* OCFS2 needs to schedule several differnt types of work which * require cluster locking, disk I/O, recovery waits, etc. Since these @@ -137,6 +140,8 @@ static const struct super_operations ocfs2_sops = { .put_super = ocfs2_put_super, .remount_fs = ocfs2_remount, .show_options = ocfs2_show_options, + .quota_read = ocfs2_quota_read, + .quota_write = ocfs2_quota_write, }; enum { @@ -1104,6 +1109,7 @@ static int __init ocfs2_init(void) ocfs2_set_locking_protocol(); + status = register_quota_format(&ocfs2_quota_format); leave: if (status < 0) { ocfs2_free_mem_caches(); @@ -1127,6 +1133,8 @@ static void __exit ocfs2_exit(void) destroy_workqueue(ocfs2_wq); } + unregister_quota_format(&ocfs2_quota_format); + debugfs_remove(ocfs2_debugfs_root); ocfs2_free_mem_caches(); @@ -1242,8 +1250,27 @@ static int ocfs2_initialize_mem_caches(void) (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), ocfs2_inode_init_once); - if (!ocfs2_inode_cachep) + ocfs2_dquot_cachep = kmem_cache_create("ocfs2_dquot_cache", + sizeof(struct ocfs2_dquot), + 0, + (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD), + NULL); + ocfs2_qf_chunk_cachep = kmem_cache_create("ocfs2_qf_chunk_cache", + sizeof(struct ocfs2_quota_chunk), + 0, + (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD), + NULL); + if (!ocfs2_inode_cachep || !ocfs2_dquot_cachep || + !ocfs2_qf_chunk_cachep) { + if (ocfs2_inode_cachep) + kmem_cache_destroy(ocfs2_inode_cachep); + if (ocfs2_dquot_cachep) + kmem_cache_destroy(ocfs2_dquot_cachep); + if (ocfs2_qf_chunk_cachep) + kmem_cache_destroy(ocfs2_qf_chunk_cachep); return -ENOMEM; + } return 0; } @@ -1252,8 +1279,15 @@ static void ocfs2_free_mem_caches(void) { if (ocfs2_inode_cachep) kmem_cache_destroy(ocfs2_inode_cachep); - ocfs2_inode_cachep = NULL; + + if (ocfs2_dquot_cachep) + kmem_cache_destroy(ocfs2_dquot_cachep); + ocfs2_dquot_cachep = NULL; + + if (ocfs2_qf_chunk_cachep) + kmem_cache_destroy(ocfs2_qf_chunk_cachep); + ocfs2_qf_chunk_cachep = NULL; } static int ocfs2_get_sector(struct super_block *sb, -- cgit From a90714c150e3ce677c57a9dac3ab1ec342c75a95 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 9 Oct 2008 19:38:40 +0200 Subject: ocfs2: Add quota calls for allocation and freeing of inodes and space Add quota calls for allocation and freeing of inodes and space, also update estimates on number of needed credits for a transaction. Move out inode allocation from ocfs2_mknod_locked() because vfs_dq_init() must be called outside of a transaction. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ocfs2/alloc.c | 20 +++++++++++-- fs/ocfs2/aops.c | 16 +++++++++-- fs/ocfs2/dir.c | 24 ++++++++++++++-- fs/ocfs2/file.c | 72 ++++++++++++++++++++++++++++++++++++++++++---- fs/ocfs2/inode.c | 10 +++++-- fs/ocfs2/journal.h | 84 ++++++++++++++++++++++++++++++++++++++++++++---------- fs/ocfs2/namei.c | 44 +++++++++++++++++++++++++--- fs/ocfs2/xattr.c | 14 +++++---- 8 files changed, 245 insertions(+), 39 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 69d67ab069bb..84a7bd4db5da 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -28,6 +28,7 @@ #include #include #include +#include #define MLOG_MASK_PREFIX ML_DISK_ALLOC #include @@ -5322,7 +5323,7 @@ int ocfs2_remove_btree_range(struct inode *inode, } } - handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS); + handle = ocfs2_start_trans(osb, ocfs2_remove_extent_credits(osb->sb)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); @@ -6552,6 +6553,8 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb, goto bail; } + vfs_dq_free_space_nodirty(inode, + ocfs2_clusters_to_bytes(osb->sb, clusters_to_del)); spin_lock(&OCFS2_I(inode)->ip_lock); OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) - clusters_to_del; @@ -6860,6 +6863,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, struct page **pages = NULL; loff_t end = osb->s_clustersize; struct ocfs2_extent_tree et; + int did_quota = 0; has_data = i_size_read(inode) ? 1 : 0; @@ -6879,7 +6883,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, } } - handle = ocfs2_start_trans(osb, OCFS2_INLINE_TO_EXTENTS_CREDITS); + handle = ocfs2_start_trans(osb, + ocfs2_inline_to_extents_credits(osb->sb)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); @@ -6898,6 +6903,13 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, unsigned int page_end; u64 phys; + if (vfs_dq_alloc_space_nodirty(inode, + ocfs2_clusters_to_bytes(osb->sb, 1))) { + ret = -EDQUOT; + goto out_commit; + } + did_quota = 1; + ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, &num); if (ret) { @@ -6971,6 +6983,10 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, } out_commit: + if (ret < 0 && did_quota) + vfs_dq_free_space_nodirty(inode, + ocfs2_clusters_to_bytes(osb->sb, 1)); + ocfs2_commit_trans(osb, handle); out_unlock: diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 6af79adb2eca..6b647ec87bb3 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -27,6 +27,7 @@ #include #include #include +#include #define MLOG_MASK_PREFIX ML_FILE_IO #include @@ -1730,6 +1731,11 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, wc->w_handle = handle; + if (clusters_to_alloc && vfs_dq_alloc_space_nodirty(inode, + ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc))) { + ret = -EDQUOT; + goto out_commit; + } /* * We don't want this to fail in ocfs2_write_end(), so do it * here. @@ -1738,7 +1744,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); - goto out_commit; + goto out_quota; } /* @@ -1751,14 +1757,14 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, mmap_page); if (ret) { mlog_errno(ret); - goto out_commit; + goto out_quota; } ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, len); if (ret) { mlog_errno(ret); - goto out_commit; + goto out_quota; } if (data_ac) @@ -1770,6 +1776,10 @@ success: *pagep = wc->w_target_page; *fsdata = wc; return 0; +out_quota: + if (clusters_to_alloc) + vfs_dq_free_space(inode, + ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); out_commit: ocfs2_commit_trans(osb, handle); diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index d83cff95759e..3708fe482e3e 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -40,6 +40,7 @@ #include #include #include +#include #define MLOG_MASK_PREFIX ML_NAMEI #include @@ -1210,9 +1211,9 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, unsigned int blocks_wanted, struct buffer_head **first_block_bh) { - int ret, credits = OCFS2_INLINE_TO_EXTENTS_CREDITS; u32 alloc, bit_off, len; struct super_block *sb = dir->i_sb; + int ret, credits = ocfs2_inline_to_extents_credits(sb); u64 blkno, bytes = blocks_wanted << sb->s_blocksize_bits; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(dir); @@ -1221,6 +1222,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; handle_t *handle; struct ocfs2_extent_tree et; + int did_quota = 0; ocfs2_init_dinode_extent_tree(&et, dir, di_bh); @@ -1258,6 +1260,12 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, goto out_sem; } + if (vfs_dq_alloc_space_nodirty(dir, + ocfs2_clusters_to_bytes(osb->sb, alloc))) { + ret = -EDQUOT; + goto out_commit; + } + did_quota = 1; /* * Try to claim as many clusters as the bitmap can give though * if we only get one now, that's enough to continue. The rest @@ -1380,6 +1388,9 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, dirdata_bh = NULL; out_commit: + if (ret < 0 && did_quota) + vfs_dq_free_space_nodirty(dir, + ocfs2_clusters_to_bytes(osb->sb, 2)); ocfs2_commit_trans(osb, handle); out_sem: @@ -1404,7 +1415,7 @@ static int ocfs2_do_extend_dir(struct super_block *sb, struct buffer_head **new_bh) { int status; - int extend; + int extend, did_quota = 0; u64 p_blkno, v_blkno; spin_lock(&OCFS2_I(dir)->ip_lock); @@ -1414,6 +1425,13 @@ static int ocfs2_do_extend_dir(struct super_block *sb, if (extend) { u32 offset = OCFS2_I(dir)->ip_clusters; + if (vfs_dq_alloc_space_nodirty(dir, + ocfs2_clusters_to_bytes(sb, 1))) { + status = -EDQUOT; + goto bail; + } + did_quota = 1; + status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset, 1, 0, parent_fe_bh, handle, data_ac, meta_ac, NULL); @@ -1439,6 +1457,8 @@ static int ocfs2_do_extend_dir(struct super_block *sb, } status = 0; bail: + if (did_quota && status < 0) + vfs_dq_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1)); mlog_exit(status); return status; } diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 372d96505a79..9374d374a264 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -35,6 +35,7 @@ #include #include #include +#include #define MLOG_MASK_PREFIX ML_INODE #include @@ -57,6 +58,7 @@ #include "super.h" #include "xattr.h" #include "acl.h" +#include "quota.h" #include "buffer_head_io.h" @@ -534,6 +536,7 @@ static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start, enum ocfs2_alloc_restarted why; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_extent_tree et; + int did_quota = 0; mlog_entry("(clusters_to_add = %u)\n", clusters_to_add); @@ -577,6 +580,13 @@ restart_all: } restarted_transaction: + if (vfs_dq_alloc_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb, + clusters_to_add))) { + status = -EDQUOT; + goto leave; + } + did_quota = 1; + /* reserve a write to the file entry early on - that we if we * run out of credits in the allocation path, we can still * update i_size. */ @@ -614,6 +624,10 @@ restarted_transaction: spin_lock(&OCFS2_I(inode)->ip_lock); clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters); spin_unlock(&OCFS2_I(inode)->ip_lock); + /* Release unused quota reservation */ + vfs_dq_free_space(inode, + ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); + did_quota = 0; if (why != RESTART_NONE && clusters_to_add) { if (why == RESTART_META) { @@ -646,6 +660,9 @@ restarted_transaction: OCFS2_I(inode)->ip_clusters, (long long)i_size_read(inode)); leave: + if (status < 0 && did_quota) + vfs_dq_free_space(inode, + ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); if (handle) { ocfs2_commit_trans(osb, handle); handle = NULL; @@ -877,6 +894,9 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) struct ocfs2_super *osb = OCFS2_SB(sb); struct buffer_head *bh = NULL; handle_t *handle = NULL; + int locked[MAXQUOTAS] = {0, 0}; + int credits, qtype; + struct ocfs2_mem_dqinfo *oinfo; mlog_entry("(0x%p, '%.*s')\n", dentry, dentry->d_name.len, dentry->d_name.name); @@ -947,11 +967,47 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) } } - handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); - if (IS_ERR(handle)) { - status = PTR_ERR(handle); - mlog_errno(status); - goto bail_unlock; + if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || + (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { + credits = OCFS2_INODE_UPDATE_CREDITS; + if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid + && OCFS2_HAS_RO_COMPAT_FEATURE(sb, + OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { + oinfo = sb_dqinfo(sb, USRQUOTA)->dqi_priv; + status = ocfs2_lock_global_qf(oinfo, 1); + if (status < 0) + goto bail_unlock; + credits += ocfs2_calc_qinit_credits(sb, USRQUOTA) + + ocfs2_calc_qdel_credits(sb, USRQUOTA); + locked[USRQUOTA] = 1; + } + if (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid + && OCFS2_HAS_RO_COMPAT_FEATURE(sb, + OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) { + oinfo = sb_dqinfo(sb, GRPQUOTA)->dqi_priv; + status = ocfs2_lock_global_qf(oinfo, 1); + if (status < 0) + goto bail_unlock; + credits += ocfs2_calc_qinit_credits(sb, GRPQUOTA) + + ocfs2_calc_qdel_credits(sb, GRPQUOTA); + locked[GRPQUOTA] = 1; + } + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto bail_unlock; + } + status = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; + if (status < 0) + goto bail_commit; + } else { + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto bail_unlock; + } } /* @@ -974,6 +1030,12 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) bail_commit: ocfs2_commit_trans(osb, handle); bail_unlock: + for (qtype = 0; qtype < MAXQUOTAS; qtype++) { + if (!locked[qtype]) + continue; + oinfo = sb_dqinfo(sb, qtype)->dqi_priv; + ocfs2_unlock_global_qf(oinfo, 1); + } ocfs2_inode_unlock(inode, 1); bail_unlock_rw: if (size_change) diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 50dbc486ef71..288512c9dbc2 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -603,7 +604,8 @@ static int ocfs2_remove_inode(struct inode *inode, goto bail; } - handle = ocfs2_start_trans(osb, OCFS2_DELETE_INODE_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_DELETE_INODE_CREDITS + + ocfs2_quota_trans_credits(inode->i_sb)); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); @@ -635,6 +637,7 @@ static int ocfs2_remove_inode(struct inode *inode, } ocfs2_remove_from_cache(inode, di_bh); + vfs_dq_free_inode(inode); status = ocfs2_free_dinode(handle, inode_alloc_inode, inode_alloc_bh, di); @@ -917,7 +920,10 @@ void ocfs2_delete_inode(struct inode *inode) mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino); - if (is_bad_inode(inode)) { + /* When we fail in read_inode() we mark inode as bad. The second test + * catches the case when inode allocation fails before allocating + * a block for inode. */ + if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno) { mlog(0, "Skipping delete of bad inode\n"); goto bail; } diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 8203980fefed..ee08e9c1fc12 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -284,6 +284,37 @@ int ocfs2_journal_dirty(handle_t *handle, /* extended attribute block update */ #define OCFS2_XATTR_BLOCK_UPDATE_CREDITS 1 +/* global quotafile inode update, data block */ +#define OCFS2_QINFO_WRITE_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1) + +/* + * The two writes below can accidentally see global info dirty due + * to set_info() quotactl so make them prepared for the writes. + */ +/* quota data block, global info */ +/* Write to local quota file */ +#define OCFS2_QWRITE_CREDITS (OCFS2_QINFO_WRITE_CREDITS + 1) + +/* global quota data block, local quota data block, global quota inode, + * global quota info */ +#define OCFS2_QSYNC_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 3) + +static inline int ocfs2_quota_trans_credits(struct super_block *sb) +{ + int credits = 0; + + if (OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) + credits += OCFS2_QWRITE_CREDITS; + if (OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) + credits += OCFS2_QWRITE_CREDITS; + return credits; +} + +/* Number of credits needed for removing quota structure from file */ +int ocfs2_calc_qdel_credits(struct super_block *sb, int type); +/* Number of credits needed for initialization of new quota structure */ +int ocfs2_calc_qinit_credits(struct super_block *sb, int type); + /* group extend. inode update and last group update. */ #define OCFS2_GROUP_EXTEND_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1) @@ -294,8 +325,11 @@ int ocfs2_journal_dirty(handle_t *handle, * prev. group desc. if we relink. */ #define OCFS2_SUBALLOC_ALLOC (3) -#define OCFS2_INLINE_TO_EXTENTS_CREDITS (OCFS2_SUBALLOC_ALLOC \ - + OCFS2_INODE_UPDATE_CREDITS) +static inline int ocfs2_inline_to_extents_credits(struct super_block *sb) +{ + return OCFS2_SUBALLOC_ALLOC + OCFS2_INODE_UPDATE_CREDITS + + ocfs2_quota_trans_credits(sb); +} /* dinode + group descriptor update. We don't relink on free yet. */ #define OCFS2_SUBALLOC_FREE (2) @@ -304,16 +338,23 @@ int ocfs2_journal_dirty(handle_t *handle, #define OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC (OCFS2_SUBALLOC_FREE \ + OCFS2_TRUNCATE_LOG_UPDATE) -#define OCFS2_REMOVE_EXTENT_CREDITS (OCFS2_TRUNCATE_LOG_UPDATE + OCFS2_INODE_UPDATE_CREDITS) +static inline int ocfs2_remove_extent_credits(struct super_block *sb) +{ + return OCFS2_TRUNCATE_LOG_UPDATE + OCFS2_INODE_UPDATE_CREDITS + + ocfs2_quota_trans_credits(sb); +} /* data block for new dir/symlink, 2 for bitmap updates (bitmap fe + * bitmap block for the new bit) */ #define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2) /* parent fe, parent block, new file entry, inode alloc fe, inode alloc - * group descriptor + mkdir/symlink blocks */ -#define OCFS2_MKNOD_CREDITS (3 + OCFS2_SUBALLOC_ALLOC \ - + OCFS2_DIR_LINK_ADDITIONAL_CREDITS) + * group descriptor + mkdir/symlink blocks + quota update */ +static inline int ocfs2_mknod_credits(struct super_block *sb) +{ + return 3 + OCFS2_SUBALLOC_ALLOC + OCFS2_DIR_LINK_ADDITIONAL_CREDITS + + ocfs2_quota_trans_credits(sb); +} /* local alloc metadata change + main bitmap updates */ #define OCFS2_WINDOW_MOVE_CREDITS (OCFS2_INODE_UPDATE_CREDITS \ @@ -323,13 +364,21 @@ int ocfs2_journal_dirty(handle_t *handle, * for the dinode, one for the new block. */ #define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2) -/* file update (nlink, etc) + directory mtime/ctime + dir entry block */ -#define OCFS2_LINK_CREDITS (2*OCFS2_INODE_UPDATE_CREDITS + 1) +/* file update (nlink, etc) + directory mtime/ctime + dir entry block + quota + * update on dir */ +static inline int ocfs2_link_credits(struct super_block *sb) +{ + return 2*OCFS2_INODE_UPDATE_CREDITS + 1 + + ocfs2_quota_trans_credits(sb); +} /* inode + dir inode (if we unlink a dir), + dir entry block + orphan * dir inode link */ -#define OCFS2_UNLINK_CREDITS (2 * OCFS2_INODE_UPDATE_CREDITS + 1 \ - + OCFS2_LINK_CREDITS) +static inline int ocfs2_unlink_credits(struct super_block *sb) +{ + /* The quota update from ocfs2_link_credits is unused here... */ + return 2 * OCFS2_INODE_UPDATE_CREDITS + 1 + ocfs2_link_credits(sb); +} /* dinode + orphan dir dinode + inode alloc dinode + orphan dir entry + * inode alloc group descriptor */ @@ -338,8 +387,10 @@ int ocfs2_journal_dirty(handle_t *handle, /* dinode update, old dir dinode update, new dir dinode update, old * dir dir entry, new dir dir entry, dir entry update for renaming * directory + target unlink */ -#define OCFS2_RENAME_CREDITS (3 * OCFS2_INODE_UPDATE_CREDITS + 3 \ - + OCFS2_UNLINK_CREDITS) +static inline int ocfs2_rename_credits(struct super_block *sb) +{ + return 3 * OCFS2_INODE_UPDATE_CREDITS + 3 + ocfs2_unlink_credits(sb); +} /* global bitmap dinode, group desc., relinked group, * suballocator dinode, group desc., relinked group, @@ -377,18 +428,19 @@ static inline int ocfs2_calc_extend_credits(struct super_block *sb, * credit for the dinode there. */ extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); - return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks; + return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks + + ocfs2_quota_trans_credits(sb); } static inline int ocfs2_calc_symlink_credits(struct super_block *sb) { - int blocks = OCFS2_MKNOD_CREDITS; + int blocks = ocfs2_mknod_credits(sb); /* links can be longer than one block so we may update many * within our single allocated extent. */ blocks += ocfs2_clusters_to_blocks(sb, 1); - return blocks; + return blocks + ocfs2_quota_trans_credits(sb); } static inline int ocfs2_calc_group_alloc_credits(struct super_block *sb, @@ -425,6 +477,8 @@ static inline int ocfs2_calc_tree_trunc_credits(struct super_block *sb, /* update to the truncate log. */ credits += OCFS2_TRUNCATE_LOG_UPDATE; + credits += ocfs2_quota_trans_credits(sb); + return credits; } diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 0134bafdab9e..6173807ba23b 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -40,6 +40,7 @@ #include #include #include +#include #define MLOG_MASK_PREFIX ML_NAMEI #include @@ -212,6 +213,7 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, int mode) } else inode->i_gid = current_fsgid(); inode->i_mode = mode; + vfs_dq_init(inode); return inode; } @@ -236,6 +238,7 @@ static int ocfs2_mknod(struct inode *dir, struct ocfs2_security_xattr_info si = { .enable = 1, }; + int did_quota_inode = 0; mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode, (unsigned long)dev, dentry->d_name.len, @@ -323,7 +326,8 @@ static int ocfs2_mknod(struct inode *dir, goto leave; } - handle = ocfs2_start_trans(osb, OCFS2_MKNOD_CREDITS + xattr_credits); + handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb) + + xattr_credits); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -331,6 +335,15 @@ static int ocfs2_mknod(struct inode *dir, goto leave; } + /* We don't use standard VFS wrapper because we don't want vfs_dq_init + * to be called. */ + if (sb_any_quota_active(osb->sb) && + osb->sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) { + status = -EDQUOT; + goto leave; + } + did_quota_inode = 1; + /* do the real work now. */ status = ocfs2_mknod_locked(osb, dir, inode, dentry, dev, &new_fe_bh, parent_fe_bh, handle, @@ -399,6 +412,8 @@ static int ocfs2_mknod(struct inode *dir, d_instantiate(dentry, inode); status = 0; leave: + if (status < 0 && did_quota_inode) + vfs_dq_free_inode(inode); if (handle) ocfs2_commit_trans(osb, handle); @@ -641,7 +656,7 @@ static int ocfs2_link(struct dentry *old_dentry, goto out_unlock_inode; } - handle = ocfs2_start_trans(osb, OCFS2_LINK_CREDITS); + handle = ocfs2_start_trans(osb, ocfs2_link_credits(osb->sb)); if (IS_ERR(handle)) { err = PTR_ERR(handle); handle = NULL; @@ -828,7 +843,7 @@ static int ocfs2_unlink(struct inode *dir, } } - handle = ocfs2_start_trans(osb, OCFS2_UNLINK_CREDITS); + handle = ocfs2_start_trans(osb, ocfs2_unlink_credits(osb->sb)); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -1234,7 +1249,7 @@ static int ocfs2_rename(struct inode *old_dir, } } - handle = ocfs2_start_trans(osb, OCFS2_RENAME_CREDITS); + handle = ocfs2_start_trans(osb, ocfs2_rename_credits(osb->sb)); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -1555,6 +1570,7 @@ static int ocfs2_symlink(struct inode *dir, struct ocfs2_security_xattr_info si = { .enable = 1, }; + int did_quota = 0, did_quota_inode = 0; mlog_entry("(0x%p, 0x%p, symname='%s' actual='%.*s')\n", dir, dentry, symname, dentry->d_name.len, dentry->d_name.name); @@ -1648,6 +1664,15 @@ static int ocfs2_symlink(struct inode *dir, goto bail; } + /* We don't use standard VFS wrapper because we don't want vfs_dq_init + * to be called. */ + if (sb_any_quota_active(osb->sb) && + osb->sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) { + status = -EDQUOT; + goto bail; + } + did_quota_inode = 1; + status = ocfs2_mknod_locked(osb, dir, inode, dentry, 0, &new_fe_bh, parent_fe_bh, handle, inode_ac); @@ -1663,6 +1688,12 @@ static int ocfs2_symlink(struct inode *dir, u32 offset = 0; inode->i_op = &ocfs2_symlink_inode_operations; + if (vfs_dq_alloc_space_nodirty(inode, + ocfs2_clusters_to_bytes(osb->sb, 1))) { + status = -EDQUOT; + goto bail; + } + did_quota = 1; status = ocfs2_add_inode_data(osb, inode, &offset, 1, 0, new_fe_bh, handle, data_ac, NULL, @@ -1728,6 +1759,11 @@ static int ocfs2_symlink(struct inode *dir, dentry->d_op = &ocfs2_dentry_ops; d_instantiate(dentry, inode); bail: + if (status < 0 && did_quota) + vfs_dq_free_space_nodirty(inode, + ocfs2_clusters_to_bytes(osb->sb, 1)); + if (status < 0 && did_quota_inode) + vfs_dq_free_inode(inode); if (handle) ocfs2_commit_trans(osb, handle); diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 9cb71e1c7c60..3b9634c7d296 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1665,7 +1665,8 @@ static int ocfs2_remove_value_outside(struct inode*inode, ocfs2_init_dealloc_ctxt(&ctxt.dealloc); - ctxt.handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS); + ctxt.handle = ocfs2_start_trans(osb, + ocfs2_remove_extent_credits(osb->sb)); if (IS_ERR(ctxt.handle)) { ret = PTR_ERR(ctxt.handle); mlog_errno(ret); @@ -2233,7 +2234,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, */ if (!xi->value) { if (!ocfs2_xattr_is_local(xe)) - credits += OCFS2_REMOVE_EXTENT_CREDITS; + credits += ocfs2_remove_extent_credits(inode->i_sb); goto out; } @@ -2250,7 +2251,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, */ if (ocfs2_xattr_can_be_in_inode(inode, xi, xis)) { clusters_add += new_clusters; - credits += OCFS2_REMOVE_EXTENT_CREDITS + + credits += ocfs2_remove_extent_credits(inode->i_sb) + OCFS2_INODE_UPDATE_CREDITS; if (!ocfs2_xattr_is_local(xe)) credits += ocfs2_calc_extend_credits( @@ -2275,7 +2276,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, xv = &def_xv.xv; if (old_clusters >= new_clusters) { - credits += OCFS2_REMOVE_EXTENT_CREDITS; + credits += ocfs2_remove_extent_credits(inode->i_sb); goto out; } else { meta_add += ocfs2_extend_meta_needed(&xv->xr_list); @@ -4750,7 +4751,7 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode, } } - handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS); + handle = ocfs2_start_trans(osb, ocfs2_remove_extent_credits(osb->sb)); if (IS_ERR(handle)) { ret = -ENOMEM; mlog_errno(ret); @@ -5109,7 +5110,8 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, ocfs2_init_dealloc_ctxt(&ctxt.dealloc); - ctxt.handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS); + ctxt.handle = ocfs2_start_trans(osb, + ocfs2_remove_extent_credits(osb->sb)); if (IS_ERR(ctxt.handle)) { ret = PTR_ERR(ctxt.handle); mlog_errno(ret); -- cgit From 171bf93ce11f4c9929fdce6ce63df8da2f3c4475 Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Mon, 20 Oct 2008 15:36:47 +0200 Subject: ocfs2: Periodic quota syncing This patch creates a work queue for periodic syncing of locally cached quota information to the global quota files. We constantly queue a delayed work item, to get the periodic behavior. Signed-off-by: Mark Fasheh Acked-by: Jan Kara --- fs/ocfs2/quota.h | 5 +++ fs/ocfs2/quota_global.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/ocfs2/quota_local.c | 4 +++ fs/ocfs2/super.c | 7 ++++ 4 files changed, 101 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h index 1f1c86311b32..e2233d51507f 100644 --- a/fs/ocfs2/quota.h +++ b/fs/ocfs2/quota.h @@ -39,6 +39,7 @@ struct ocfs2_mem_dqinfo { unsigned int dqi_chunks; /* Number of chunks in local quota file */ unsigned int dqi_blocks; /* Number of blocks allocated for local quota file */ unsigned int dqi_syncms; /* How often should we sync with other nodes */ + unsigned int dqi_syncjiff; /* Precomputed dqi_syncms in jiffies */ struct list_head dqi_chunk; /* List of chunks */ struct inode *dqi_gqinode; /* Global quota file inode */ struct ocfs2_lock_res dqi_gqlock; /* Lock protecting quota information structure */ @@ -47,6 +48,7 @@ struct ocfs2_mem_dqinfo { struct buffer_head *dqi_lqi_bh; /* Buffer head with local quota file inode */ struct buffer_head *dqi_ibh; /* Buffer with information header */ struct qtree_mem_dqinfo dqi_gi; /* Info about global file */ + struct delayed_work dqi_sync_work; /* Work for syncing dquots */ }; static inline struct ocfs2_dquot *OCFS2_DQUOT(struct dquot *dquot) @@ -90,4 +92,7 @@ struct buffer_head *ocfs2_read_quota_block(struct inode *inode, extern struct dquot_operations ocfs2_quota_operations; extern struct quota_format_type ocfs2_quota_format; +int ocfs2_quota_setup(void); +void ocfs2_quota_shutdown(void); + #endif /* _OCFS2_QUOTA_H */ diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index af8340c45367..adf53508bdb8 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -1,10 +1,14 @@ /* * Implementation of operations over global quota file */ +#include #include #include #include #include +#include +#include +#include #define MLOG_MASK_PREFIX ML_QUOTA #include @@ -20,6 +24,10 @@ #include "uptodate.h" #include "quota.h" +static struct workqueue_struct *ocfs2_quota_wq = NULL; + +static void qsync_work_fn(struct work_struct *work); + static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp) { struct ocfs2_global_disk_dqblk *d = dp; @@ -313,6 +321,7 @@ int ocfs2_global_read_info(struct super_block *sb, int type) info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms); + oinfo->dqi_syncjiff = msecs_to_jiffies(oinfo->dqi_syncms); oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); @@ -320,6 +329,10 @@ int ocfs2_global_read_info(struct super_block *sb, int type) oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE; oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi); + INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn); + queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work, + oinfo->dqi_syncjiff); + out_err: mlog_exit(status); return status; @@ -519,6 +532,61 @@ out: return err; } +/* + * Functions for periodic syncing of dquots with global file + */ +static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type) +{ + handle_t *handle; + struct super_block *sb = dquot->dq_sb; + struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; + struct ocfs2_super *osb = OCFS2_SB(sb); + int status = 0; + + mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id, + dquot->dq_type, type, sb->s_id); + if (type != dquot->dq_type) + goto out; + status = ocfs2_lock_global_qf(oinfo, 1); + if (status < 0) + goto out; + + handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out_ilock; + } + mutex_lock(&sb_dqopt(sb)->dqio_mutex); + status = ocfs2_sync_dquot(dquot); + mutex_unlock(&sb_dqopt(sb)->dqio_mutex); + if (status < 0) + mlog_errno(status); + /* We have to write local structure as well... */ + dquot_mark_dquot_dirty(dquot); + status = dquot_commit(dquot); + if (status < 0) + mlog_errno(status); + ocfs2_commit_trans(osb, handle); +out_ilock: + ocfs2_unlock_global_qf(oinfo, 1); +out: + mlog_exit(status); + return status; +} + +static void qsync_work_fn(struct work_struct *work) +{ + struct ocfs2_mem_dqinfo *oinfo = container_of(work, + struct ocfs2_mem_dqinfo, + dqi_sync_work.work); + struct super_block *sb = oinfo->dqi_gqinode->i_sb; + + dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type); + queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work, + oinfo->dqi_syncjiff); +} + /* * Wrappers for generic quota functions */ @@ -917,3 +985,20 @@ struct dquot_operations ocfs2_quota_operations = { .alloc_dquot = ocfs2_alloc_dquot, .destroy_dquot = ocfs2_destroy_dquot, }; + +int ocfs2_quota_setup(void) +{ + ocfs2_quota_wq = create_workqueue("o2quot"); + if (!ocfs2_quota_wq) + return -ENOMEM; + return 0; +} + +void ocfs2_quota_shutdown(void) +{ + if (ocfs2_quota_wq) { + flush_workqueue(ocfs2_quota_wq); + destroy_workqueue(ocfs2_quota_wq); + ocfs2_quota_wq = NULL; + } +} diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index 55c3f2f98dcd..40e82b483136 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -368,6 +368,10 @@ static int ocfs2_local_free_info(struct super_block *sb, int type) int mark_clean = 1, len; int status; + /* At this point we know there are no more dquots and thus + * even if there's some sync in the pdflush queue, it won't + * find any dquots and return without doing anything */ + cancel_delayed_work_sync(&oinfo->dqi_sync_work); iput(oinfo->dqi_gqinode); ocfs2_simple_drop_lockres(OCFS2_SB(sb), &oinfo->dqi_gqlock); ocfs2_lock_res_free(&oinfo->dqi_gqlock); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 7bb83e41581e..60f1d29421ad 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1107,11 +1107,16 @@ static int __init ocfs2_init(void) mlog(ML_ERROR, "Unable to create ocfs2 debugfs root.\n"); } + status = ocfs2_quota_setup(); + if (status) + goto leave; + ocfs2_set_locking_protocol(); status = register_quota_format(&ocfs2_quota_format); leave: if (status < 0) { + ocfs2_quota_shutdown(); ocfs2_free_mem_caches(); exit_ocfs2_uptodate_cache(); } @@ -1128,6 +1133,8 @@ static void __exit ocfs2_exit(void) { mlog_entry_void(); + ocfs2_quota_shutdown(); + if (ocfs2_wq) { flush_workqueue(ocfs2_wq); destroy_workqueue(ocfs2_wq); -- cgit From 2205363dce7447b8e85f1ead14387664c1a98753 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 20 Oct 2008 23:50:38 +0200 Subject: ocfs2: Implement quota recovery Implement functions for recovery after a crash. Functions just read local quota file and sync info to global quota file. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ocfs2/journal.c | 108 +++++++++--- fs/ocfs2/journal.h | 1 + fs/ocfs2/ocfs2.h | 4 +- fs/ocfs2/quota.h | 21 +++ fs/ocfs2/quota_global.c | 1 - fs/ocfs2/quota_local.c | 425 +++++++++++++++++++++++++++++++++++++++++++++++- 6 files changed, 528 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 11a1178d5ee8..c60242018d9a 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -45,6 +45,7 @@ #include "slot_map.h" #include "super.h" #include "sysfile.h" +#include "quota.h" #include "buffer_head_io.h" @@ -52,7 +53,7 @@ DEFINE_SPINLOCK(trans_inc_lock); static int ocfs2_force_read_journal(struct inode *inode); static int ocfs2_recover_node(struct ocfs2_super *osb, - int node_num); + int node_num, int slot_num); static int __ocfs2_recovery_thread(void *arg); static int ocfs2_commit_cache(struct ocfs2_super *osb); static int ocfs2_wait_on_mount(struct ocfs2_super *osb); @@ -857,6 +858,7 @@ struct ocfs2_la_recovery_item { int lri_slot; struct ocfs2_dinode *lri_la_dinode; struct ocfs2_dinode *lri_tl_dinode; + struct ocfs2_quota_recovery *lri_qrec; }; /* Does the second half of the recovery process. By this point, the @@ -877,6 +879,7 @@ void ocfs2_complete_recovery(struct work_struct *work) struct ocfs2_super *osb = journal->j_osb; struct ocfs2_dinode *la_dinode, *tl_dinode; struct ocfs2_la_recovery_item *item, *n; + struct ocfs2_quota_recovery *qrec; LIST_HEAD(tmp_la_list); mlog_entry_void(); @@ -922,6 +925,16 @@ void ocfs2_complete_recovery(struct work_struct *work) if (ret < 0) mlog_errno(ret); + qrec = item->lri_qrec; + if (qrec) { + mlog(0, "Recovering quota files"); + ret = ocfs2_finish_quota_recovery(osb, qrec, + item->lri_slot); + if (ret < 0) + mlog_errno(ret); + /* Recovery info is already freed now */ + } + kfree(item); } @@ -935,7 +948,8 @@ void ocfs2_complete_recovery(struct work_struct *work) static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, int slot_num, struct ocfs2_dinode *la_dinode, - struct ocfs2_dinode *tl_dinode) + struct ocfs2_dinode *tl_dinode, + struct ocfs2_quota_recovery *qrec) { struct ocfs2_la_recovery_item *item; @@ -950,6 +964,9 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, if (tl_dinode) kfree(tl_dinode); + if (qrec) + ocfs2_free_quota_recovery(qrec); + mlog_errno(-ENOMEM); return; } @@ -958,6 +975,7 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, item->lri_la_dinode = la_dinode; item->lri_slot = slot_num; item->lri_tl_dinode = tl_dinode; + item->lri_qrec = qrec; spin_lock(&journal->j_lock); list_add_tail(&item->lri_list, &journal->j_la_cleanups); @@ -977,6 +995,7 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb) ocfs2_queue_recovery_completion(journal, osb->slot_num, osb->local_alloc_copy, + NULL, NULL); ocfs2_schedule_truncate_log_flush(osb, 0); @@ -985,11 +1004,26 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb) } } +void ocfs2_complete_quota_recovery(struct ocfs2_super *osb) +{ + if (osb->quota_rec) { + ocfs2_queue_recovery_completion(osb->journal, + osb->slot_num, + NULL, + NULL, + osb->quota_rec); + osb->quota_rec = NULL; + } +} + static int __ocfs2_recovery_thread(void *arg) { - int status, node_num; + int status, node_num, slot_num; struct ocfs2_super *osb = arg; struct ocfs2_recovery_map *rm = osb->recovery_map; + int *rm_quota = NULL; + int rm_quota_used = 0, i; + struct ocfs2_quota_recovery *qrec; mlog_entry_void(); @@ -998,6 +1032,11 @@ static int __ocfs2_recovery_thread(void *arg) goto bail; } + rm_quota = kzalloc(osb->max_slots * sizeof(int), GFP_NOFS); + if (!rm_quota) { + status = -ENOMEM; + goto bail; + } restart: status = ocfs2_super_lock(osb, 1); if (status < 0) { @@ -1011,8 +1050,28 @@ restart: * clear it until ocfs2_recover_node() has succeeded. */ node_num = rm->rm_entries[0]; spin_unlock(&osb->osb_lock); - - status = ocfs2_recover_node(osb, node_num); + mlog(0, "checking node %d\n", node_num); + slot_num = ocfs2_node_num_to_slot(osb, node_num); + if (slot_num == -ENOENT) { + status = 0; + mlog(0, "no slot for this node, so no recovery" + "required.\n"); + goto skip_recovery; + } + mlog(0, "node %d was using slot %d\n", node_num, slot_num); + + /* It is a bit subtle with quota recovery. We cannot do it + * immediately because we have to obtain cluster locks from + * quota files and we also don't want to just skip it because + * then quota usage would be out of sync until some node takes + * the slot. So we remember which nodes need quota recovery + * and when everything else is done, we recover quotas. */ + for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++); + if (i == rm_quota_used) + rm_quota[rm_quota_used++] = slot_num; + + status = ocfs2_recover_node(osb, node_num, slot_num); +skip_recovery: if (!status) { ocfs2_recovery_map_clear(osb, node_num); } else { @@ -1034,13 +1093,27 @@ restart: if (status < 0) mlog_errno(status); + /* Now it is right time to recover quotas... We have to do this under + * superblock lock so that noone can start using the slot (and crash) + * before we recover it */ + for (i = 0; i < rm_quota_used; i++) { + qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]); + if (IS_ERR(qrec)) { + status = PTR_ERR(qrec); + mlog_errno(status); + continue; + } + ocfs2_queue_recovery_completion(osb->journal, rm_quota[i], + NULL, NULL, qrec); + } + ocfs2_super_unlock(osb, 1); /* We always run recovery on our own orphan dir - the dead * node(s) may have disallowd a previos inode delete. Re-processing * is therefore required. */ ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL, - NULL); + NULL, NULL); bail: mutex_lock(&osb->recovery_lock); @@ -1055,6 +1128,9 @@ bail: mutex_unlock(&osb->recovery_lock); + if (rm_quota) + kfree(rm_quota); + mlog_exit(status); /* no one is callint kthread_stop() for us so the kthread() api * requires that we call do_exit(). And it isn't exported, but @@ -1282,31 +1358,19 @@ done: * far less concerning. */ static int ocfs2_recover_node(struct ocfs2_super *osb, - int node_num) + int node_num, int slot_num) { int status = 0; - int slot_num; struct ocfs2_dinode *la_copy = NULL; struct ocfs2_dinode *tl_copy = NULL; - mlog_entry("(node_num=%d, osb->node_num = %d)\n", - node_num, osb->node_num); - - mlog(0, "checking node %d\n", node_num); + mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n", + node_num, slot_num, osb->node_num); /* Should not ever be called to recover ourselves -- in that * case we should've called ocfs2_journal_load instead. */ BUG_ON(osb->node_num == node_num); - slot_num = ocfs2_node_num_to_slot(osb, node_num); - if (slot_num == -ENOENT) { - status = 0; - mlog(0, "no slot for this node, so no recovery required.\n"); - goto done; - } - - mlog(0, "node %d was using slot %d\n", node_num, slot_num); - status = ocfs2_replay_journal(osb, node_num, slot_num); if (status < 0) { if (status == -EBUSY) { @@ -1342,7 +1406,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb, /* This will kfree the memory pointed to by la_copy and tl_copy */ ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy, - tl_copy); + tl_copy, NULL); status = 0; done: diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index ee08e9c1fc12..37013bf9ce28 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -168,6 +168,7 @@ void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num); int ocfs2_mark_dead_nodes(struct ocfs2_super *osb); void ocfs2_complete_mount_recovery(struct ocfs2_super *osb); +void ocfs2_complete_quota_recovery(struct ocfs2_super *osb); static inline void ocfs2_start_checkpoint(struct ocfs2_super *osb) { diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index f04b229fc757..6b25b4aa7205 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -206,6 +206,7 @@ enum ocfs2_mount_options struct ocfs2_journal; struct ocfs2_slot_info; struct ocfs2_recovery_map; +struct ocfs2_quota_recovery; struct ocfs2_super { struct task_struct *commit_task; @@ -287,10 +288,11 @@ struct ocfs2_super char *local_alloc_debug_buf; #endif - /* Next two fields are for local node slot recovery during + /* Next three fields are for local node slot recovery during * mount. */ int dirty; struct ocfs2_dinode *local_alloc_copy; + struct ocfs2_quota_recovery *quota_rec; struct ocfs2_alloc_stats alloc_stats; char dev_str[20]; /* "major,minor" of the device */ diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h index e2233d51507f..04872b45b990 100644 --- a/fs/ocfs2/quota.h +++ b/fs/ocfs2/quota.h @@ -33,6 +33,17 @@ struct ocfs2_dquot { s64 dq_originodes; /* Last globally synced inode usage */ }; +/* Description of one chunk to recover in memory */ +struct ocfs2_recovery_chunk { + struct list_head rc_list; /* List of chunks */ + int rc_chunk; /* Chunk number */ + unsigned long *rc_bitmap; /* Bitmap of entries to recover */ +}; + +struct ocfs2_quota_recovery { + struct list_head r_list[MAXQUOTAS]; /* List of chunks to recover */ +}; + /* In-memory structure with quota header information */ struct ocfs2_mem_dqinfo { unsigned int dqi_type; /* Quota type this structure describes */ @@ -49,6 +60,10 @@ struct ocfs2_mem_dqinfo { struct buffer_head *dqi_ibh; /* Buffer with information header */ struct qtree_mem_dqinfo dqi_gi; /* Info about global file */ struct delayed_work dqi_sync_work; /* Work for syncing dquots */ + struct ocfs2_quota_recovery *dqi_rec; /* Pointer to recovery + * information, in case we + * enable quotas on file + * needing it */ }; static inline struct ocfs2_dquot *OCFS2_DQUOT(struct dquot *dquot) @@ -67,6 +82,12 @@ extern struct kmem_cache *ocfs2_qf_chunk_cachep; extern struct qtree_fmt_operations ocfs2_global_ops; +struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery( + struct ocfs2_super *osb, int slot_num); +int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, + struct ocfs2_quota_recovery *rec, + int slot_num); +void ocfs2_free_quota_recovery(struct ocfs2_quota_recovery *rec); ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); ssize_t ocfs2_quota_write(struct super_block *sb, int type, diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index adf53508bdb8..49b536a2190d 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -87,7 +87,6 @@ struct qtree_fmt_operations ocfs2_global_ops = { .is_id = ocfs2_global_is_id, }; - struct buffer_head *ocfs2_read_quota_block(struct inode *inode, int block, int *err) { diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index 40e82b483136..b98562174cd0 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -49,14 +49,25 @@ static unsigned int ol_quota_chunk_block(struct super_block *sb, int c) return 1 + (ol_chunk_blocks(sb) + 1) * c; } -/* Offset of the dquot structure in the quota file */ -static loff_t ol_dqblk_off(struct super_block *sb, int c, int off) +static unsigned int ol_dqblk_block(struct super_block *sb, int c, int off) +{ + int epb = ol_quota_entries_per_block(sb); + + return ol_quota_chunk_block(sb, c) + 1 + off / epb; +} + +static unsigned int ol_dqblk_block_off(struct super_block *sb, int c, int off) { int epb = ol_quota_entries_per_block(sb); - return ((ol_quota_chunk_block(sb, c) + 1 + off / epb) - << sb->s_blocksize_bits) + - (off % epb) * sizeof(struct ocfs2_local_disk_dqblk); + return (off % epb) * sizeof(struct ocfs2_local_disk_dqblk); +} + +/* Offset of the dquot structure in the quota file */ +static loff_t ol_dqblk_off(struct super_block *sb, int c, int off) +{ + return (ol_dqblk_block(sb, c, off) << sb->s_blocksize_bits) + + ol_dqblk_block_off(sb, c, off); } /* Compute block number from given offset */ @@ -253,6 +264,379 @@ static void olq_update_info(struct buffer_head *bh, void *private) spin_unlock(&dq_data_lock); } +static int ocfs2_add_recovery_chunk(struct super_block *sb, + struct ocfs2_local_disk_chunk *dchunk, + int chunk, + struct list_head *head) +{ + struct ocfs2_recovery_chunk *rc; + + rc = kmalloc(sizeof(struct ocfs2_recovery_chunk), GFP_NOFS); + if (!rc) + return -ENOMEM; + rc->rc_chunk = chunk; + rc->rc_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); + if (!rc->rc_bitmap) { + kfree(rc); + return -ENOMEM; + } + memcpy(rc->rc_bitmap, dchunk->dqc_bitmap, + (ol_chunk_entries(sb) + 7) >> 3); + list_add_tail(&rc->rc_list, head); + return 0; +} + +static void free_recovery_list(struct list_head *head) +{ + struct ocfs2_recovery_chunk *next; + struct ocfs2_recovery_chunk *rchunk; + + list_for_each_entry_safe(rchunk, next, head, rc_list) { + list_del(&rchunk->rc_list); + kfree(rchunk->rc_bitmap); + kfree(rchunk); + } +} + +void ocfs2_free_quota_recovery(struct ocfs2_quota_recovery *rec) +{ + int type; + + for (type = 0; type < MAXQUOTAS; type++) + free_recovery_list(&(rec->r_list[type])); + kfree(rec); +} + +/* Load entries in our quota file we have to recover*/ +static int ocfs2_recovery_load_quota(struct inode *lqinode, + struct ocfs2_local_disk_dqinfo *ldinfo, + int type, + struct list_head *head) +{ + struct super_block *sb = lqinode->i_sb; + struct buffer_head *hbh; + struct ocfs2_local_disk_chunk *dchunk; + int i, chunks = le32_to_cpu(ldinfo->dqi_chunks); + int status = 0; + + for (i = 0; i < chunks; i++) { + hbh = ocfs2_read_quota_block(lqinode, + ol_quota_chunk_block(sb, i), + &status); + if (!hbh) { + mlog_errno(status); + break; + } + dchunk = (struct ocfs2_local_disk_chunk *)hbh->b_data; + if (le32_to_cpu(dchunk->dqc_free) < ol_chunk_entries(sb)) + status = ocfs2_add_recovery_chunk(sb, dchunk, i, head); + brelse(hbh); + if (status < 0) + break; + } + if (status < 0) + free_recovery_list(head); + return status; +} + +static struct ocfs2_quota_recovery *ocfs2_alloc_quota_recovery(void) +{ + int type; + struct ocfs2_quota_recovery *rec; + + rec = kmalloc(sizeof(struct ocfs2_quota_recovery), GFP_NOFS); + if (!rec) + return NULL; + for (type = 0; type < MAXQUOTAS; type++) + INIT_LIST_HEAD(&(rec->r_list[type])); + return rec; +} + +/* Load information we need for quota recovery into memory */ +struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery( + struct ocfs2_super *osb, + int slot_num) +{ + unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, + OCFS2_FEATURE_RO_COMPAT_GRPQUOTA}; + unsigned int ino[MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE, + LOCAL_GROUP_QUOTA_SYSTEM_INODE }; + struct super_block *sb = osb->sb; + struct ocfs2_local_disk_dqinfo *ldinfo; + struct inode *lqinode; + struct buffer_head *bh; + int type; + int status = 0; + struct ocfs2_quota_recovery *rec; + + mlog(ML_NOTICE, "Beginning quota recovery in slot %u\n", slot_num); + rec = ocfs2_alloc_quota_recovery(); + if (!rec) + return ERR_PTR(-ENOMEM); + /* First init... */ + + for (type = 0; type < MAXQUOTAS; type++) { + if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type])) + continue; + /* At this point, journal of the slot is already replayed so + * we can trust metadata and data of the quota file */ + lqinode = ocfs2_get_system_file_inode(osb, ino[type], slot_num); + if (!lqinode) { + status = -ENOENT; + goto out; + } + status = ocfs2_inode_lock_full(lqinode, NULL, 1, + OCFS2_META_LOCK_RECOVERY); + if (status < 0) { + mlog_errno(status); + goto out_put; + } + /* Now read local header */ + bh = ocfs2_read_quota_block(lqinode, 0, &status); + if (!bh) { + mlog_errno(status); + mlog(ML_ERROR, "failed to read quota file info header " + "(slot=%d type=%d)\n", slot_num, type); + goto out_lock; + } + ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data + + OCFS2_LOCAL_INFO_OFF); + status = ocfs2_recovery_load_quota(lqinode, ldinfo, type, + &rec->r_list[type]); + brelse(bh); +out_lock: + ocfs2_inode_unlock(lqinode, 1); +out_put: + iput(lqinode); + if (status < 0) + break; + } +out: + if (status < 0) { + ocfs2_free_quota_recovery(rec); + rec = ERR_PTR(status); + } + return rec; +} + +/* Sync changes in local quota file into global quota file and + * reinitialize local quota file. + * The function expects local quota file to be already locked and + * dqonoff_mutex locked. */ +static int ocfs2_recover_local_quota_file(struct inode *lqinode, + int type, + struct ocfs2_quota_recovery *rec) +{ + struct super_block *sb = lqinode->i_sb; + struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; + struct ocfs2_local_disk_chunk *dchunk; + struct ocfs2_local_disk_dqblk *dqblk; + struct dquot *dquot; + handle_t *handle; + struct buffer_head *hbh = NULL, *qbh = NULL; + int status = 0; + int bit, chunk; + struct ocfs2_recovery_chunk *rchunk, *next; + qsize_t spacechange, inodechange; + + mlog_entry("ino=%lu type=%u", (unsigned long)lqinode->i_ino, type); + + status = ocfs2_lock_global_qf(oinfo, 1); + if (status < 0) + goto out; + + list_for_each_entry_safe(rchunk, next, &(rec->r_list[type]), rc_list) { + chunk = rchunk->rc_chunk; + hbh = ocfs2_read_quota_block(lqinode, + ol_quota_chunk_block(sb, chunk), + &status); + if (!hbh) { + mlog_errno(status); + break; + } + dchunk = (struct ocfs2_local_disk_chunk *)hbh->b_data; + for_each_bit(bit, rchunk->rc_bitmap, ol_chunk_entries(sb)) { + qbh = ocfs2_read_quota_block(lqinode, + ol_dqblk_block(sb, chunk, bit), + &status); + if (!qbh) { + mlog_errno(status); + break; + } + dqblk = (struct ocfs2_local_disk_dqblk *)(qbh->b_data + + ol_dqblk_block_off(sb, chunk, bit)); + dquot = dqget(sb, le64_to_cpu(dqblk->dqb_id), type); + if (!dquot) { + status = -EIO; + mlog(ML_ERROR, "Failed to get quota structure " + "for id %u, type %d. Cannot finish quota " + "file recovery.\n", + (unsigned)le64_to_cpu(dqblk->dqb_id), + type); + goto out_put_bh; + } + handle = ocfs2_start_trans(OCFS2_SB(sb), + OCFS2_QSYNC_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out_put_dquot; + } + mutex_lock(&sb_dqopt(sb)->dqio_mutex); + spin_lock(&dq_data_lock); + /* Add usage from quota entry into quota changes + * of our node. Auxiliary variables are important + * due to signedness */ + spacechange = le64_to_cpu(dqblk->dqb_spacemod); + inodechange = le64_to_cpu(dqblk->dqb_inodemod); + dquot->dq_dqb.dqb_curspace += spacechange; + dquot->dq_dqb.dqb_curinodes += inodechange; + spin_unlock(&dq_data_lock); + /* We want to drop reference held by the crashed + * node. Since we have our own reference we know + * global structure actually won't be freed. */ + status = ocfs2_global_release_dquot(dquot); + if (status < 0) { + mlog_errno(status); + goto out_commit; + } + /* Release local quota file entry */ + status = ocfs2_journal_access(handle, lqinode, + qbh, OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto out_commit; + } + lock_buffer(qbh); + WARN_ON(!ocfs2_test_bit(bit, dchunk->dqc_bitmap)); + ocfs2_clear_bit(bit, dchunk->dqc_bitmap); + le32_add_cpu(&dchunk->dqc_free, 1); + unlock_buffer(qbh); + status = ocfs2_journal_dirty(handle, qbh); + if (status < 0) + mlog_errno(status); +out_commit: + mutex_unlock(&sb_dqopt(sb)->dqio_mutex); + ocfs2_commit_trans(OCFS2_SB(sb), handle); +out_put_dquot: + dqput(dquot); +out_put_bh: + brelse(qbh); + if (status < 0) + break; + } + brelse(hbh); + list_del(&rchunk->rc_list); + kfree(rchunk->rc_bitmap); + kfree(rchunk); + if (status < 0) + break; + } + ocfs2_unlock_global_qf(oinfo, 1); +out: + if (status < 0) + free_recovery_list(&(rec->r_list[type])); + mlog_exit(status); + return status; +} + +/* Recover local quota files for given node different from us */ +int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, + struct ocfs2_quota_recovery *rec, + int slot_num) +{ + unsigned int ino[MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE, + LOCAL_GROUP_QUOTA_SYSTEM_INODE }; + struct super_block *sb = osb->sb; + struct ocfs2_local_disk_dqinfo *ldinfo; + struct buffer_head *bh; + handle_t *handle; + int type; + int status = 0; + struct inode *lqinode; + unsigned int flags; + + mlog(ML_NOTICE, "Finishing quota recovery in slot %u\n", slot_num); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + for (type = 0; type < MAXQUOTAS; type++) { + if (list_empty(&(rec->r_list[type]))) + continue; + mlog(0, "Recovering quota in slot %d\n", slot_num); + lqinode = ocfs2_get_system_file_inode(osb, ino[type], slot_num); + if (!lqinode) { + status = -ENOENT; + goto out; + } + status = ocfs2_inode_lock_full(lqinode, NULL, 1, + OCFS2_META_LOCK_NOQUEUE); + /* Someone else is holding the lock? Then he must be + * doing the recovery. Just skip the file... */ + if (status == -EAGAIN) { + mlog(ML_NOTICE, "skipping quota recovery for slot %d " + "because quota file is locked.\n", slot_num); + status = 0; + goto out_put; + } else if (status < 0) { + mlog_errno(status); + goto out_put; + } + /* Now read local header */ + bh = ocfs2_read_quota_block(lqinode, 0, &status); + if (!bh) { + mlog_errno(status); + mlog(ML_ERROR, "failed to read quota file info header " + "(slot=%d type=%d)\n", slot_num, type); + goto out_lock; + } + ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data + + OCFS2_LOCAL_INFO_OFF); + /* Is recovery still needed? */ + flags = le32_to_cpu(ldinfo->dqi_flags); + if (!(flags & OLQF_CLEAN)) + status = ocfs2_recover_local_quota_file(lqinode, + type, + rec); + /* We don't want to mark file as clean when it is actually + * active */ + if (slot_num == osb->slot_num) + goto out_bh; + /* Mark quota file as clean if we are recovering quota file of + * some other node. */ + handle = ocfs2_start_trans(osb, 1); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out_bh; + } + status = ocfs2_journal_access(handle, lqinode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto out_trans; + } + lock_buffer(bh); + ldinfo->dqi_flags = cpu_to_le32(flags | OLQF_CLEAN); + unlock_buffer(bh); + status = ocfs2_journal_dirty(handle, bh); + if (status < 0) + mlog_errno(status); +out_trans: + ocfs2_commit_trans(osb, handle); +out_bh: + brelse(bh); +out_lock: + ocfs2_inode_unlock(lqinode, 1); +out_put: + iput(lqinode); + if (status < 0) + break; + } +out: + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + kfree(rec); + return status; +} + /* Read information header from quota file */ static int ocfs2_local_read_info(struct super_block *sb, int type) { @@ -262,6 +646,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type) struct inode *lqinode = sb_dqopt(sb)->files[type]; int status; struct buffer_head *bh = NULL; + struct ocfs2_quota_recovery *rec; int locked = 0; info->dqi_maxblimit = 0x7fffffffffffffffLL; @@ -275,6 +660,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type) info->dqi_priv = oinfo; oinfo->dqi_type = type; INIT_LIST_HEAD(&oinfo->dqi_chunk); + oinfo->dqi_rec = NULL; oinfo->dqi_lqi_bh = NULL; oinfo->dqi_ibh = NULL; @@ -305,10 +691,27 @@ static int ocfs2_local_read_info(struct super_block *sb, int type) oinfo->dqi_ibh = bh; /* We crashed when using local quota file? */ - if (!(info->dqi_flags & OLQF_CLEAN)) - goto out_err; /* So far we just bail out. Later we should resync here */ + if (!(info->dqi_flags & OLQF_CLEAN)) { + rec = OCFS2_SB(sb)->quota_rec; + if (!rec) { + rec = ocfs2_alloc_quota_recovery(); + if (!rec) { + status = -ENOMEM; + mlog_errno(status); + goto out_err; + } + OCFS2_SB(sb)->quota_rec = rec; + } - status = ocfs2_load_local_quota_bitmaps(sb_dqopt(sb)->files[type], + status = ocfs2_recovery_load_quota(lqinode, ldinfo, type, + &rec->r_list[type]); + if (status < 0) { + mlog_errno(status); + goto out_err; + } + } + + status = ocfs2_load_local_quota_bitmaps(lqinode, ldinfo, &oinfo->dqi_chunk); if (status < 0) { @@ -394,6 +797,12 @@ static int ocfs2_local_free_info(struct super_block *sb, int type) } ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk); + /* dqonoff_mutex protects us against racing with recovery thread... */ + if (oinfo->dqi_rec) { + ocfs2_free_quota_recovery(oinfo->dqi_rec); + mark_clean = 0; + } + if (!mark_clean) goto out; -- cgit From 19ece546a418997226bd91552fbc41abcb05cea6 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 21 Aug 2008 20:13:17 +0200 Subject: ocfs2: Enable quota accounting on mount, disable on umount Enable quota usage tracking on mount and disable it on umount. Also add support for quota on and quota off quotactls and usrquota and grpquota mount options. Add quota features among supported ones. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ocfs2/journal.c | 20 ++++- fs/ocfs2/ocfs2.h | 3 + fs/ocfs2/ocfs2_fs.h | 4 +- fs/ocfs2/super.c | 222 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 245 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index c60242018d9a..302f1144a708 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -56,7 +56,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb, int node_num, int slot_num); static int __ocfs2_recovery_thread(void *arg); static int ocfs2_commit_cache(struct ocfs2_super *osb); -static int ocfs2_wait_on_mount(struct ocfs2_super *osb); +static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota); static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, int dirty, int replayed); static int ocfs2_trylock_journal(struct ocfs2_super *osb, @@ -65,6 +65,17 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb, int slot); static int ocfs2_commit_thread(void *arg); +static inline int ocfs2_wait_on_mount(struct ocfs2_super *osb) +{ + return __ocfs2_wait_on_mount(osb, 0); +} + +static inline int ocfs2_wait_on_quotas(struct ocfs2_super *osb) +{ + return __ocfs2_wait_on_mount(osb, 1); +} + + /* * The recovery_list is a simple linked list of node numbers to recover. @@ -895,6 +906,8 @@ void ocfs2_complete_recovery(struct work_struct *work) mlog(0, "Complete recovery for slot %d\n", item->lri_slot); + ocfs2_wait_on_quotas(osb); + la_dinode = item->lri_la_dinode; if (la_dinode) { mlog(0, "Clean up local alloc %llu\n", @@ -1701,13 +1714,14 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb, return ret; } -static int ocfs2_wait_on_mount(struct ocfs2_super *osb) +static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota) { /* This check is good because ocfs2 will wait on our recovery * thread before changing it to something other than MOUNTED * or DISABLED. */ wait_event(osb->osb_mount_event, - atomic_read(&osb->vol_state) == VOLUME_MOUNTED || + (!quota && atomic_read(&osb->vol_state) == VOLUME_MOUNTED) || + atomic_read(&osb->vol_state) == VOLUME_MOUNTED_QUOTAS || atomic_read(&osb->vol_state) == VOLUME_DISABLED); /* If there's an error on mount, then we may never get to the diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 6b25b4aa7205..5c777988042f 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -161,6 +161,7 @@ enum ocfs2_vol_state { VOLUME_INIT = 0, VOLUME_MOUNTED, + VOLUME_MOUNTED_QUOTAS, VOLUME_DISMOUNTED, VOLUME_DISABLED }; @@ -196,6 +197,8 @@ enum ocfs2_mount_options OCFS2_MOUNT_NOUSERXATTR = 1 << 6, /* No user xattr */ OCFS2_MOUNT_INODE64 = 1 << 7, /* Allow inode numbers > 2^32 */ OCFS2_MOUNT_POSIX_ACL = 1 << 8, /* POSIX access control lists */ + OCFS2_MOUNT_USRQUOTA = 1 << 9, /* We support user quotas */ + OCFS2_MOUNT_GRPQUOTA = 1 << 10, /* We support group quotas */ }; #define OCFS2_OSB_SOFT_RO 0x0001 diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 0a5ac790a628..359732e18e82 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -94,7 +94,9 @@ | OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP \ | OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK \ | OCFS2_FEATURE_INCOMPAT_XATTR) -#define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN) +#define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ + | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ + | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA) /* * Heartbeat-only devices are missing journals and other files. The diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 60f1d29421ad..2eb657c3e7a8 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -41,6 +41,7 @@ #include #include #include +#include #define MLOG_MASK_PREFIX ML_SUPER #include @@ -127,6 +128,9 @@ static int ocfs2_get_sector(struct super_block *sb, static void ocfs2_write_super(struct super_block *sb); static struct inode *ocfs2_alloc_inode(struct super_block *sb); static void ocfs2_destroy_inode(struct inode *inode); +static int ocfs2_susp_quotas(struct ocfs2_super *osb, int unsuspend); +static int ocfs2_enable_quotas(struct ocfs2_super *osb); +static void ocfs2_disable_quotas(struct ocfs2_super *osb); static const struct super_operations ocfs2_sops = { .statfs = ocfs2_statfs, @@ -165,6 +169,8 @@ enum { Opt_inode64, Opt_acl, Opt_noacl, + Opt_usrquota, + Opt_grpquota, Opt_err, }; @@ -189,6 +195,8 @@ static const match_table_t tokens = { {Opt_inode64, "inode64"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, + {Opt_usrquota, "usrquota"}, + {Opt_grpquota, "grpquota"}, {Opt_err, NULL} }; @@ -452,6 +460,12 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) /* We're going to/from readonly mode. */ if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) { + /* Disable quota accounting before remounting RO */ + if (*flags & MS_RDONLY) { + ret = ocfs2_susp_quotas(osb, 0); + if (ret < 0) + goto out; + } /* Lock here so the check of HARD_RO and the potential * setting of SOFT_RO is atomic. */ spin_lock(&osb->osb_lock); @@ -487,6 +501,21 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) } unlock_osb: spin_unlock(&osb->osb_lock); + /* Enable quota accounting after remounting RW */ + if (!ret && !(*flags & MS_RDONLY)) { + if (sb_any_quota_suspended(sb)) + ret = ocfs2_susp_quotas(osb, 1); + else + ret = ocfs2_enable_quotas(osb); + if (ret < 0) { + /* Return back changes... */ + spin_lock(&osb->osb_lock); + sb->s_flags |= MS_RDONLY; + osb->osb_flags |= OCFS2_OSB_SOFT_RO; + spin_unlock(&osb->osb_lock); + goto out; + } + } } if (!ret) { @@ -647,6 +676,131 @@ static int ocfs2_verify_userspace_stack(struct ocfs2_super *osb, return 0; } +static int ocfs2_susp_quotas(struct ocfs2_super *osb, int unsuspend) +{ + int type; + struct super_block *sb = osb->sb; + unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, + OCFS2_FEATURE_RO_COMPAT_GRPQUOTA}; + int status = 0; + + for (type = 0; type < MAXQUOTAS; type++) { + if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type])) + continue; + if (unsuspend) + status = vfs_quota_enable( + sb_dqopt(sb)->files[type], + type, QFMT_OCFS2, + DQUOT_SUSPENDED); + else + status = vfs_quota_disable(sb, type, + DQUOT_SUSPENDED); + if (status < 0) + break; + } + if (status < 0) + mlog(ML_ERROR, "Failed to suspend/unsuspend quotas on " + "remount (error = %d).\n", status); + return status; +} + +static int ocfs2_enable_quotas(struct ocfs2_super *osb) +{ + struct inode *inode[MAXQUOTAS] = { NULL, NULL }; + struct super_block *sb = osb->sb; + unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, + OCFS2_FEATURE_RO_COMPAT_GRPQUOTA}; + unsigned int ino[MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE, + LOCAL_GROUP_QUOTA_SYSTEM_INODE }; + int status; + int type; + + sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NEGATIVE_USAGE; + for (type = 0; type < MAXQUOTAS; type++) { + if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type])) + continue; + inode[type] = ocfs2_get_system_file_inode(osb, ino[type], + osb->slot_num); + if (!inode[type]) { + status = -ENOENT; + goto out_quota_off; + } + status = vfs_quota_enable(inode[type], type, QFMT_OCFS2, + DQUOT_USAGE_ENABLED); + if (status < 0) + goto out_quota_off; + } + + for (type = 0; type < MAXQUOTAS; type++) + iput(inode[type]); + return 0; +out_quota_off: + ocfs2_disable_quotas(osb); + for (type = 0; type < MAXQUOTAS; type++) + iput(inode[type]); + mlog_errno(status); + return status; +} + +static void ocfs2_disable_quotas(struct ocfs2_super *osb) +{ + int type; + struct inode *inode; + struct super_block *sb = osb->sb; + + /* We mostly ignore errors in this function because there's not much + * we can do when we see them */ + for (type = 0; type < MAXQUOTAS; type++) { + if (!sb_has_quota_loaded(sb, type)) + continue; + inode = igrab(sb->s_dquot.files[type]); + /* Turn off quotas. This will remove all dquot structures from + * memory and so they will be automatically synced to global + * quota files */ + vfs_quota_disable(sb, type, DQUOT_USAGE_ENABLED | + DQUOT_LIMITS_ENABLED); + if (!inode) + continue; + iput(inode); + } +} + +/* Handle quota on quotactl */ +static int ocfs2_quota_on(struct super_block *sb, int type, int format_id, + char *path, int remount) +{ + unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, + OCFS2_FEATURE_RO_COMPAT_GRPQUOTA}; + + if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type])) + return -EINVAL; + + if (remount) + return 0; /* Just ignore it has been handled in + * ocfs2_remount() */ + return vfs_quota_enable(sb_dqopt(sb)->files[type], type, + format_id, DQUOT_LIMITS_ENABLED); +} + +/* Handle quota off quotactl */ +static int ocfs2_quota_off(struct super_block *sb, int type, int remount) +{ + if (remount) + return 0; /* Ignore now and handle later in + * ocfs2_remount() */ + return vfs_quota_disable(sb, type, DQUOT_LIMITS_ENABLED); +} + +static struct quotactl_ops ocfs2_quotactl_ops = { + .quota_on = ocfs2_quota_on, + .quota_off = ocfs2_quota_off, + .quota_sync = vfs_quota_sync, + .get_info = vfs_get_dqinfo, + .set_info = vfs_set_dqinfo, + .get_dqblk = vfs_get_dqblk, + .set_dqblk = vfs_set_dqblk, +}; + static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) { struct dentry *root; @@ -689,6 +843,22 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) osb->osb_commit_interval = parsed_options.commit_interval; osb->local_alloc_default_bits = ocfs2_megabytes_to_clusters(sb, parsed_options.localalloc_opt); osb->local_alloc_bits = osb->local_alloc_default_bits; + if (osb->s_mount_opt & OCFS2_MOUNT_USRQUOTA && + !OCFS2_HAS_RO_COMPAT_FEATURE(sb, + OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { + status = -EINVAL; + mlog(ML_ERROR, "User quotas were requested, but this " + "filesystem does not have the feature enabled.\n"); + goto read_super_error; + } + if (osb->s_mount_opt & OCFS2_MOUNT_GRPQUOTA && + !OCFS2_HAS_RO_COMPAT_FEATURE(sb, + OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) { + status = -EINVAL; + mlog(ML_ERROR, "Group quotas were requested, but this " + "filesystem does not have the feature enabled.\n"); + goto read_super_error; + } status = ocfs2_verify_userspace_stack(osb, &parsed_options); if (status) @@ -793,6 +963,28 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) atomic_set(&osb->vol_state, VOLUME_MOUNTED); wake_up(&osb->osb_mount_event); + /* Now we can initialize quotas because we can afford to wait + * for cluster locks recovery now. That also means that truncation + * log recovery can happen but that waits for proper quota setup */ + if (!(sb->s_flags & MS_RDONLY)) { + status = ocfs2_enable_quotas(osb); + if (status < 0) { + /* We have to err-out specially here because + * s_root is already set */ + mlog_errno(status); + atomic_set(&osb->vol_state, VOLUME_DISABLED); + wake_up(&osb->osb_mount_event); + mlog_exit(status); + return status; + } + } + + ocfs2_complete_quota_recovery(osb); + + /* Now we wake up again for processes waiting for quotas */ + atomic_set(&osb->vol_state, VOLUME_MOUNTED_QUOTAS); + wake_up(&osb->osb_mount_event); + mlog_exit(status); return status; @@ -980,6 +1172,28 @@ static int ocfs2_parse_options(struct super_block *sb, case Opt_inode64: mopt->mount_opt |= OCFS2_MOUNT_INODE64; break; + case Opt_usrquota: + /* We check only on remount, otherwise features + * aren't yet initialized. */ + if (is_remount && !OCFS2_HAS_RO_COMPAT_FEATURE(sb, + OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { + mlog(ML_ERROR, "User quota requested but " + "filesystem feature is not set\n"); + status = 0; + goto bail; + } + mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA; + break; + case Opt_grpquota: + if (is_remount && !OCFS2_HAS_RO_COMPAT_FEATURE(sb, + OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) { + mlog(ML_ERROR, "Group quota requested but " + "filesystem feature is not set\n"); + status = 0; + goto bail; + } + mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA; + break; #ifdef CONFIG_OCFS2_FS_POSIX_ACL case Opt_acl: mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL; @@ -1056,6 +1270,10 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt) if (osb->osb_cluster_stack[0]) seq_printf(s, ",cluster_stack=%.*s", OCFS2_STACK_LABEL_LEN, osb->osb_cluster_stack); + if (opts & OCFS2_MOUNT_USRQUOTA) + seq_printf(s, ",usrquota"); + if (opts & OCFS2_MOUNT_GRPQUOTA) + seq_printf(s, ",grpquota"); if (opts & OCFS2_MOUNT_NOUSERXATTR) seq_printf(s, ",nouser_xattr"); @@ -1394,6 +1612,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) osb = OCFS2_SB(sb); BUG_ON(!osb); + ocfs2_disable_quotas(osb); + ocfs2_shutdown_local_alloc(osb); ocfs2_truncate_log_shutdown(osb); @@ -1504,6 +1724,8 @@ static int ocfs2_initialize_super(struct super_block *sb, sb->s_fs_info = osb; sb->s_op = &ocfs2_sops; sb->s_export_op = &ocfs2_export_ops; + sb->s_qcop = &ocfs2_quotactl_ops; + sb->dq_op = &ocfs2_quota_operations; sb->s_xattr = ocfs2_xattr_handlers; sb->s_time_gran = 1; sb->s_flags |= MS_NOATIME; -- cgit From b86c86fa1feb50221dc16071ae5b8a4acf3bd32c Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Tue, 18 Nov 2008 17:16:47 -0800 Subject: ocfs2: Use BH_JBDPrivateStart instead of BH_Unshadow This is safer. We no longer have to worry about tracking changes to jbd_state_bits. Signed-off-by: Mark Fasheh --- fs/ocfs2/buffer_head_io.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 0e9eed0c223f..15c8e6deee2e 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -42,11 +42,10 @@ /* * Bits on bh->b_state used by ocfs2. * - * These MUST be after the JBD2 bits. Currently BH_Unshadow is the last - * JBD2 bit. + * These MUST be after the JBD2 bits. Hence, we use BH_JBDPrivateStart. */ enum ocfs2_state_bits { - BH_NeedsValidate = BH_Unshadow + 1, + BH_NeedsValidate = BH_JBDPrivateStart, }; /* Expand the magic b_state functions */ -- cgit From 57a09a7b3d9445a17c78d544f1e49d4d7d61705a Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 25 Nov 2008 15:31:26 +0100 Subject: ocfs2: Add missing initialization Add missing variable initialization to ocfs2_dquot_drop_slow(). Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ocfs2/quota_global.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 49b536a2190d..10ecb33298d8 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -870,7 +870,7 @@ out: static int ocfs2_dquot_drop_slow(struct inode *inode) { - int status; + int status = 0; int cnt; int got_lock[MAXQUOTAS] = {0, 0}; handle_t *handle; -- cgit From 85eb8b73d66530bb7b931789ae7a5ec9744eed34 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 25 Nov 2008 15:31:27 +0100 Subject: ocfs2: Fix ocfs2_read_quota_block() error handling. ocfs2_bread() has become ocfs2_read_virt_blocks(), with a prototype to match ocfs2_read_blocks(). The quota code, converting from ocfs2_bread(), wraps the call to ocfs2_read_virt_blocks() in ocfs2_read_quota_block(). Unfortunately, the prototype of ocfs2_read_quota_block() matches the old prototype of ocfs2_bread(). The problem is that ocfs2_bread() returned the buffer head, and callers assumed that a NULL pointer was indicative of error. It wasn't. This is why ocfs2_bread() took an int*err argument as well. The new prototype of ocfs2_read_virt_blocks() avoids this error handling confusion. Let's change ocfs2_read_quota_block() to match. Signed-off-by: Joel Becker Acked-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ocfs2/dlmglue.c | 6 ++--- fs/ocfs2/quota.h | 4 ++-- fs/ocfs2/quota_global.c | 34 +++++++++++++++----------- fs/ocfs2/quota_local.c | 64 +++++++++++++++++++++++++++---------------------- 4 files changed, 60 insertions(+), 48 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 058aa86490ae..b1c75911d8ad 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -3519,7 +3519,7 @@ static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo) oinfo->dqi_gi.dqi_type); struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock; struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb); - struct buffer_head *bh; + struct buffer_head *bh = NULL; struct ocfs2_global_disk_dqinfo *gdinfo; int status = 0; @@ -3532,8 +3532,8 @@ static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo) oinfo->dqi_gi.dqi_free_entry = be32_to_cpu(lvb->lvb_free_entry); } else { - bh = ocfs2_read_quota_block(oinfo->dqi_gqinode, 0, &status); - if (!bh) { + status = ocfs2_read_quota_block(oinfo->dqi_gqinode, 0, &bh); + if (status) { mlog_errno(status); goto bail; } diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h index 04872b45b990..7365e2e08706 100644 --- a/fs/ocfs2/quota.h +++ b/fs/ocfs2/quota.h @@ -107,8 +107,8 @@ static inline int ocfs2_global_release_dquot(struct dquot *dquot) int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex); void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex); -struct buffer_head *ocfs2_read_quota_block(struct inode *inode, - int block, int *err); +int ocfs2_read_quota_block(struct inode *inode, u64 v_block, + struct buffer_head **bh); extern struct dquot_operations ocfs2_quota_operations; extern struct quota_format_type ocfs2_quota_format; diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 10ecb33298d8..2bdcddd3f1c4 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -87,16 +87,21 @@ struct qtree_fmt_operations ocfs2_global_ops = { .is_id = ocfs2_global_is_id, }; -struct buffer_head *ocfs2_read_quota_block(struct inode *inode, - int block, int *err) +int ocfs2_read_quota_block(struct inode *inode, u64 v_block, + struct buffer_head **bh) { - struct buffer_head *tmp = NULL; + int rc = 0; + struct buffer_head *tmp = *bh; - *err = ocfs2_read_virt_blocks(inode, block, 1, &tmp, 0, NULL); - if (*err) - mlog_errno(*err); + rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0, NULL); + if (rc) + mlog_errno(rc); + + /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */ + if (!rc && !*bh) + *bh = tmp; - return tmp; + return rc; } static struct buffer_head *ocfs2_get_quota_block(struct inode *inode, @@ -143,8 +148,9 @@ ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data, toread = len; while (toread > 0) { tocopy = min((size_t)(sb->s_blocksize - offset), toread); - bh = ocfs2_read_quota_block(gqinode, blk, &err); - if (!bh) { + bh = NULL; + err = ocfs2_read_quota_block(gqinode, blk, &bh); + if (err) { mlog_errno(err); return err; } @@ -169,7 +175,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type, int offset = off & (sb->s_blocksize - 1); sector_t blk = off >> sb->s_blocksize_bits; int err = 0, new = 0; - struct buffer_head *bh; + struct buffer_head *bh = NULL; handle_t *handle = journal_current_handle(); if (!handle) { @@ -200,13 +206,13 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type, /* Not rewriting whole block? */ if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) && !new) { - bh = ocfs2_read_quota_block(gqinode, blk, &err); - if (!bh) { + err = ocfs2_read_quota_block(gqinode, blk, &bh); + if (err) { mlog_errno(err); return err; } err = ocfs2_journal_access(handle, gqinode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + OCFS2_JOURNAL_ACCESS_WRITE); } else { bh = ocfs2_get_quota_block(gqinode, blk, &err); if (!bh) { @@ -214,7 +220,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type, return err; } err = ocfs2_journal_access(handle, gqinode, bh, - OCFS2_JOURNAL_ACCESS_CREATE); + OCFS2_JOURNAL_ACCESS_CREATE); } if (err < 0) { brelse(bh); diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index b98562174cd0..7053664f66a6 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -139,15 +139,15 @@ static int ocfs2_local_check_quota_file(struct super_block *sb, int type) unsigned int gversions[MAXQUOTAS] = OCFS2_GLOBAL_QVERSIONS; unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE, GROUP_QUOTA_SYSTEM_INODE }; - struct buffer_head *bh; + struct buffer_head *bh = NULL; struct inode *linode = sb_dqopt(sb)->files[type]; struct inode *ginode = NULL; struct ocfs2_disk_dqheader *dqhead; int status, ret = 0; /* First check whether we understand local quota file */ - bh = ocfs2_read_quota_block(linode, 0, &status); - if (!bh) { + status = ocfs2_read_quota_block(linode, 0, &bh); + if (status) { mlog_errno(status); mlog(ML_ERROR, "failed to read quota file header (type=%d)\n", type); @@ -178,8 +178,8 @@ static int ocfs2_local_check_quota_file(struct super_block *sb, int type) goto out_err; } /* Since the header is read only, we don't care about locking */ - bh = ocfs2_read_quota_block(ginode, 0, &status); - if (!bh) { + status = ocfs2_read_quota_block(ginode, 0, &bh); + if (status) { mlog_errno(status); mlog(ML_ERROR, "failed to read global quota file header " "(type=%d)\n", type); @@ -235,10 +235,11 @@ static int ocfs2_load_local_quota_bitmaps(struct inode *inode, return -ENOMEM; } newchunk->qc_num = i; - newchunk->qc_headerbh = ocfs2_read_quota_block(inode, + newchunk->qc_headerbh = NULL; + status = ocfs2_read_quota_block(inode, ol_quota_chunk_block(inode->i_sb, i), - &status); - if (!newchunk->qc_headerbh) { + &newchunk->qc_headerbh); + if (status) { mlog_errno(status); kmem_cache_free(ocfs2_qf_chunk_cachep, newchunk); ocfs2_release_local_quota_bitmaps(head); @@ -320,10 +321,11 @@ static int ocfs2_recovery_load_quota(struct inode *lqinode, int status = 0; for (i = 0; i < chunks; i++) { - hbh = ocfs2_read_quota_block(lqinode, - ol_quota_chunk_block(sb, i), - &status); - if (!hbh) { + hbh = NULL; + status = ocfs2_read_quota_block(lqinode, + ol_quota_chunk_block(sb, i), + &hbh); + if (status) { mlog_errno(status); break; } @@ -392,8 +394,9 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery( goto out_put; } /* Now read local header */ - bh = ocfs2_read_quota_block(lqinode, 0, &status); - if (!bh) { + bh = NULL; + status = ocfs2_read_quota_block(lqinode, 0, &bh); + if (status) { mlog_errno(status); mlog(ML_ERROR, "failed to read quota file info header " "(slot=%d type=%d)\n", slot_num, type); @@ -447,19 +450,21 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode, list_for_each_entry_safe(rchunk, next, &(rec->r_list[type]), rc_list) { chunk = rchunk->rc_chunk; - hbh = ocfs2_read_quota_block(lqinode, - ol_quota_chunk_block(sb, chunk), - &status); - if (!hbh) { + hbh = NULL; + status = ocfs2_read_quota_block(lqinode, + ol_quota_chunk_block(sb, chunk), + &hbh); + if (status) { mlog_errno(status); break; } dchunk = (struct ocfs2_local_disk_chunk *)hbh->b_data; for_each_bit(bit, rchunk->rc_bitmap, ol_chunk_entries(sb)) { - qbh = ocfs2_read_quota_block(lqinode, + qbh = NULL; + status = ocfs2_read_quota_block(lqinode, ol_dqblk_block(sb, chunk, bit), - &status); - if (!qbh) { + &qbh); + if (status) { mlog_errno(status); break; } @@ -581,8 +586,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, goto out_put; } /* Now read local header */ - bh = ocfs2_read_quota_block(lqinode, 0, &status); - if (!bh) { + bh = NULL; + status = ocfs2_read_quota_block(lqinode, 0, &bh); + if (status) { mlog_errno(status); mlog(ML_ERROR, "failed to read quota file info header " "(slot=%d type=%d)\n", slot_num, type); @@ -676,8 +682,8 @@ static int ocfs2_local_read_info(struct super_block *sb, int type) locked = 1; /* Now read local header */ - bh = ocfs2_read_quota_block(lqinode, 0, &status); - if (!bh) { + status = ocfs2_read_quota_block(lqinode, 0, &bh); + if (status) { mlog_errno(status); mlog(ML_ERROR, "failed to read quota file info header " "(type=%d)\n", type); @@ -850,13 +856,13 @@ static int ocfs2_local_write_dquot(struct dquot *dquot) { struct super_block *sb = dquot->dq_sb; struct ocfs2_dquot *od = OCFS2_DQUOT(dquot); - struct buffer_head *bh; + struct buffer_head *bh = NULL; int status; - bh = ocfs2_read_quota_block(sb_dqopt(sb)->files[dquot->dq_type], + status = ocfs2_read_quota_block(sb_dqopt(sb)->files[dquot->dq_type], ol_dqblk_file_block(sb, od->dq_local_off), - &status); - if (!bh) { + &bh); + if (status) { mlog_errno(status); goto out; } -- cgit From af09e51b6810d3408db1c0e956b3b0687b0e3723 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 25 Nov 2008 15:31:28 +0100 Subject: ocfs2: Fix oops when extending quota files We have to mark buffer as uptodate before calling ocfs2_journal_access() and ocfs2_set_buffer_uptodate() does not do this for us. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ocfs2/quota_global.c | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 2bdcddd3f1c4..8fceb0c49b3e 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -174,7 +174,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type, struct inode *gqinode = oinfo->dqi_gqinode; int offset = off & (sb->s_blocksize - 1); sector_t blk = off >> sb->s_blocksize_bits; - int err = 0, new = 0; + int err = 0, new = 0, ja_type; struct buffer_head *bh = NULL; handle_t *handle = journal_current_handle(); @@ -207,32 +207,28 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type, if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) && !new) { err = ocfs2_read_quota_block(gqinode, blk, &bh); - if (err) { - mlog_errno(err); - return err; - } - err = ocfs2_journal_access(handle, gqinode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ja_type = OCFS2_JOURNAL_ACCESS_WRITE; } else { bh = ocfs2_get_quota_block(gqinode, blk, &err); - if (!bh) { - mlog_errno(err); - return err; - } - err = ocfs2_journal_access(handle, gqinode, bh, - OCFS2_JOURNAL_ACCESS_CREATE); + ja_type = OCFS2_JOURNAL_ACCESS_CREATE; } - if (err < 0) { - brelse(bh); - goto out; + if (err) { + mlog_errno(err); + return err; } lock_buffer(bh); if (new) memset(bh->b_data, 0, sb->s_blocksize); memcpy(bh->b_data + offset, data, len); flush_dcache_page(bh->b_page); + set_buffer_uptodate(bh); unlock_buffer(bh); ocfs2_set_buffer_uptodate(gqinode, bh); + err = ocfs2_journal_access(handle, gqinode, bh, ja_type); + if (err < 0) { + brelse(bh); + goto out; + } err = ocfs2_journal_dirty(handle, bh); brelse(bh); if (err < 0) -- cgit From 53a3604610e92a5344cf8003c19975583e71a598 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 25 Nov 2008 15:31:29 +0100 Subject: ocfs2: Make ocfs2_get_quota_block() consistent with ocfs2_read_quota_block() Make function return error status and not buffer pointer so that it's consistent with ocfs2_read_quota_block(). Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ocfs2/quota_global.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 8fceb0c49b3e..e527ec6e0133 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -104,26 +104,25 @@ int ocfs2_read_quota_block(struct inode *inode, u64 v_block, return rc; } -static struct buffer_head *ocfs2_get_quota_block(struct inode *inode, - int block, int *err) +static int ocfs2_get_quota_block(struct inode *inode, int block, + struct buffer_head **bh) { u64 pblock, pcount; - struct buffer_head *bh; + int err; down_read(&OCFS2_I(inode)->ip_alloc_sem); - *err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, - NULL); + err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, NULL); up_read(&OCFS2_I(inode)->ip_alloc_sem); - if (*err) { - mlog_errno(*err); - return NULL; + if (err) { + mlog_errno(err); + return err; } - bh = sb_getblk(inode->i_sb, pblock); - if (!bh) { - *err = -EIO; - mlog_errno(*err); + *bh = sb_getblk(inode->i_sb, pblock); + if (!*bh) { + err = -EIO; + mlog_errno(err); } - return bh; + return err;; } /* Read data from global quotafile - avoid pagecache and such because we cannot @@ -209,7 +208,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type, err = ocfs2_read_quota_block(gqinode, blk, &bh); ja_type = OCFS2_JOURNAL_ACCESS_WRITE; } else { - bh = ocfs2_get_quota_block(gqinode, blk, &err); + err = ocfs2_get_quota_block(gqinode, blk, &bh); ja_type = OCFS2_JOURNAL_ACCESS_CREATE; } if (err) { -- cgit From 9a2f3866c825c67c3a5806799cdc93fb7517f0c4 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 25 Nov 2008 15:31:30 +0100 Subject: ocfs2: Fix build warnings (64-bit types vs long long) fs/ocfs2/quota_local.c: In function 'olq_set_dquot': fs/ocfs2/quota_local.c:844: warning: format '%lld' expects type 'long long int', but argument 7 has type '__le64' fs/ocfs2/quota_local.c:844: warning: format '%lld' expects type 'long long int', but argument 8 has type '__le64' fs/ocfs2/quota_local.c:844: warning: format '%lld' expects type 'long long int', but argument 7 has type '__le64' fs/ocfs2/quota_local.c:844: warning: format '%lld' expects type 'long long int', but argument 8 has type '__le64' fs/ocfs2/quota_local.c:844: warning: format '%lld' expects type 'long long int', but argument 7 has type '__le64' fs/ocfs2/quota_local.c:844: warning: format '%lld' expects type 'long long int', but argument 8 has type '__le64' fs/ocfs2/quota_global.c: In function '__ocfs2_sync_dquot': fs/ocfs2/quota_global.c:457: warning: format '%lld' expects type 'long long int', but argument 8 has type 's64' fs/ocfs2/quota_global.c:457: warning: format '%lld' expects type 'long long int', but argument 10 has type 's64' fs/ocfs2/quota_global.c:457: warning: format '%lld' expects type 'long long int', but argument 8 has type 's64' fs/ocfs2/quota_global.c:457: warning: format '%lld' expects type 'long long int', but argument 10 has type 's64' fs/ocfs2/quota_global.c:457: warning: format '%lld' expects type 'long long int', but argument 8 has type 's64' fs/ocfs2/quota_global.c:457: warning: format '%lld' expects type 'long long int', but argument 10 has type 's64' Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ocfs2/quota_global.c | 6 +++--- fs/ocfs2/quota_local.c | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index e527ec6e0133..054d52bd8258 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -457,9 +457,9 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing) olditime = dquot->dq_dqb.dqb_itime; oldbtime = dquot->dq_dqb.dqb_btime; ocfs2_global_disk2memdqb(dquot, &dqblk); - mlog(0, "Syncing global dquot %d space %lld+%lld, inodes %lld+%lld\n", - dquot->dq_id, dquot->dq_dqb.dqb_curspace, spacechange, - dquot->dq_dqb.dqb_curinodes, inodechange); + mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n", + dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange, + dquot->dq_dqb.dqb_curinodes, (long long)inodechange); if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags)) dquot->dq_dqb.dqb_curspace += spacechange; if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags)) diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index 7053664f66a6..b5ddb22e6278 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -848,7 +848,8 @@ static void olq_set_dquot(struct buffer_head *bh, void *private) od->dq_originodes); spin_unlock(&dq_data_lock); mlog(0, "Writing local dquot %u space %lld inodes %lld\n", - od->dq_dquot.dq_id, dqblk->dqb_spacemod, dqblk->dqb_inodemod); + od->dq_dquot.dq_id, (long long)le64_to_cpu(dqblk->dqb_spacemod), + (long long)le64_to_cpu(dqblk->dqb_inodemod)); } /* Write dquot to local quota file */ -- cgit From 7d9056ba20ebed6e3937a2e23183f6117919cb00 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 25 Nov 2008 15:31:32 +0100 Subject: quota: Export dquot_alloc() and dquot_destroy() functions These are default functions for creating and destroying quota structures and they should be used from filesystems. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/dquot.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/dquot.c b/fs/dquot.c index 075dc76904e7..61bfff64e5af 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -413,10 +413,11 @@ out_dqlock: return ret; } -static void dquot_destroy(struct dquot *dquot) +void dquot_destroy(struct dquot *dquot) { kmem_cache_free(dquot_cachep, dquot); } +EXPORT_SYMBOL(dquot_destroy); static inline void do_destroy_dquot(struct dquot *dquot) { @@ -668,10 +669,11 @@ we_slept: spin_unlock(&dq_list_lock); } -static struct dquot *dquot_alloc(struct super_block *sb, int type) +struct dquot *dquot_alloc(struct super_block *sb, int type) { return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); } +EXPORT_SYMBOL(dquot_alloc); static struct dquot *get_empty_dquot(struct super_block *sb, int type) { -- cgit From 4103003b3abb85af9dec9e60616ae086c2bcb4c9 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 25 Nov 2008 15:31:33 +0100 Subject: reiserfs: Add default allocation routines for quota structures Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/reiserfs/super.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index a9b393a5815d..c55651f1407c 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -649,6 +649,8 @@ static struct dquot_operations reiserfs_quota_operations = { .release_dquot = reiserfs_release_dquot, .mark_dirty = reiserfs_mark_dquot_dirty, .write_info = reiserfs_write_info, + .alloc_dquot = dquot_alloc, + .destroy_dquot = dquot_destroy, }; static struct quotactl_ops reiserfs_qctl_operations = { -- cgit From 157091a2c3cdc71422cbc71eace205cf1b9f2200 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 25 Nov 2008 15:31:34 +0100 Subject: ext3: Add default allocation routines for quota structures Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ext3/super.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 250ec53195cb..c22d01467bd1 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -713,7 +713,9 @@ static struct dquot_operations ext3_quota_operations = { .acquire_dquot = ext3_acquire_dquot, .release_dquot = ext3_release_dquot, .mark_dirty = ext3_mark_dquot_dirty, - .write_info = ext3_write_info + .write_info = ext3_write_info, + .alloc_dquot = dquot_alloc, + .destroy_dquot = dquot_destroy, }; static struct quotactl_ops ext3_qctl_operations = { -- cgit From a5b5ee320185adc091a3a31630d278806b19d8f0 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 25 Nov 2008 15:31:35 +0100 Subject: ext4: Add default allocation routines for quota structures Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh --- fs/ext4/super.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 49fcf8864e76..9494bb249390 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -803,7 +803,9 @@ static struct dquot_operations ext4_quota_operations = { .acquire_dquot = ext4_acquire_dquot, .release_dquot = ext4_release_dquot, .mark_dirty = ext4_mark_dquot_dirty, - .write_info = ext4_write_info + .write_info = ext4_write_info, + .alloc_dquot = dquot_alloc, + .destroy_dquot = dquot_destroy, }; static struct quotactl_ops ext4_qctl_operations = { -- cgit From e35ff98f7c37b7bc901b4b90a66a0287565e456c Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Wed, 26 Nov 2008 16:20:19 -0800 Subject: ocfs2: fix indendation in ocfs2_dquot_drop_slow Signed-off-by: Tao Ma Signed-off-by: Mark Fasheh --- fs/ocfs2/quota_global.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 054d52bd8258..a10faebe88a1 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -893,7 +893,7 @@ static int ocfs2_dquot_drop_slow(struct inode *inode) if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); - goto out; + goto out; } dquot_drop(inode); ocfs2_commit_trans(OCFS2_SB(sb), handle); -- cgit From df32b3343aa11e0c7f54783594b24321d17d376f Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Tue, 25 Nov 2008 07:21:36 +0800 Subject: ocfs2/quota: sparse fixes for quota Fix 2 minor things in quota. They are both found by sparse check. 1. an endian bug in ocfs2_local_quota_add_chunk. 2. change olq_alloc_dquot to static. Signed-off-by: Tao Ma Signed-off-by: Mark Fasheh --- fs/ocfs2/quota_local.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index b5ddb22e6278..d451b715aefe 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -988,7 +988,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk( goto out_trans; } lock_buffer(bh); - dchunk->dqc_free = ol_quota_entries_per_block(sb); + dchunk->dqc_free = cpu_to_le32(ol_quota_entries_per_block(sb)); memset(dchunk->dqc_bitmap, 0, sb->s_blocksize - sizeof(struct ocfs2_local_disk_chunk) - OCFS2_QBLK_RESERVED_SPACE); @@ -1110,7 +1110,7 @@ out: return ERR_PTR(status); } -void olq_alloc_dquot(struct buffer_head *bh, void *private) +static void olq_alloc_dquot(struct buffer_head *bh, void *private) { int *offset = private; struct ocfs2_local_disk_chunk *dchunk; -- cgit From 548b0f22bb7497ba76f91627b99f9fed53a91704 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Mon, 24 Nov 2008 19:32:13 -0800 Subject: ocfs2: Dirty the entire bucket in ocfs2_bucket_value_truncate() ocfs2_bucket_value_truncate() currently takes the first bh of the bucket, and magically plays around with the value bh - even though the bucket structure in the calling function already has it. In addition, future code wants to always dirty the entire bucket when it is changed. So let's pass the entire bucket into this function, skip any block reads (we have them), and add the access/dirty logic. ocfs2_xattr_update_value_size() is no longer necessary, as it only did one thing other than journal access/dirty. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 74 +++++++++++++++++++++----------------------------------- 1 file changed, 28 insertions(+), 46 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 3b9634c7d296..6db68a23a296 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -4580,31 +4580,6 @@ out: return ret; } -static int ocfs2_xattr_value_update_size(struct inode *inode, - handle_t *handle, - struct buffer_head *xe_bh, - struct ocfs2_xattr_entry *xe, - u64 new_size) -{ - int ret; - - ret = ocfs2_journal_access(handle, inode, xe_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - - xe->xe_value_size = cpu_to_le64(new_size); - - ret = ocfs2_journal_dirty(handle, xe_bh); - if (ret < 0) - mlog_errno(ret); - -out: - return ret; -} - /* * Truncate the specified xe_off entry in xattr bucket. * bucket is indicated by header_bh and len is the new length. @@ -4613,7 +4588,7 @@ out: * Copy the new updated xe and xe_value_root to new_xe and new_xv if needed. */ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, - struct buffer_head *header_bh, + struct ocfs2_xattr_bucket *bucket, int xe_off, int len, struct ocfs2_xattr_set_ctxt *ctxt) @@ -4623,8 +4598,7 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, struct buffer_head *value_bh = NULL; struct ocfs2_xattr_value_root *xv; struct ocfs2_xattr_entry *xe; - struct ocfs2_xattr_header *xh = - (struct ocfs2_xattr_header *)header_bh->b_data; + struct ocfs2_xattr_header *xh = bucket_xh(bucket); size_t blocksize = inode->i_sb->s_blocksize; xe = &xh->xh_entries[xe_off]; @@ -4638,34 +4612,41 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, /* We don't allow ocfs2_xattr_value to be stored in different block. */ BUG_ON(value_blk != (offset + OCFS2_XATTR_ROOT_SIZE - 1) / blocksize); - value_blk += header_bh->b_blocknr; - ret = ocfs2_read_block(inode, value_blk, &value_bh, NULL); - if (ret) { - mlog_errno(ret); - goto out; - } + value_bh = bucket->bu_bhs[value_blk]; + BUG_ON(!value_bh); xv = (struct ocfs2_xattr_value_root *) (value_bh->b_data + offset % blocksize); - mlog(0, "truncate %u in xattr bucket %llu to %d bytes.\n", - xe_off, (unsigned long long)header_bh->b_blocknr, len); - ret = ocfs2_xattr_value_truncate(inode, value_bh, xv, len, ctxt); + ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_xattr_value_update_size(inode, ctxt->handle, - header_bh, xe, len); + /* + * From here on out we have to dirty the bucket. The generic + * value calls only modify one of the bucket's bhs, but we need + * to send the bucket at once. So if they error, they *could* have + * modified something. We have to assume they did, and dirty + * the whole bucket. This leaves us in a consistent state. + */ + mlog(0, "truncate %u in xattr bucket %llu to %d bytes.\n", + xe_off, (unsigned long long)bucket_blkno(bucket), len); + ret = ocfs2_xattr_value_truncate(inode, value_bh, xv, len, ctxt); if (ret) { mlog_errno(ret); - goto out; + goto out_dirty; } + xe->xe_value_size = cpu_to_le64(len); + +out_dirty: + ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket); + out: - brelse(value_bh); return ret; } @@ -4681,7 +4662,7 @@ static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode, BUG_ON(!xs->bucket->bu_bhs[0] || !xe || ocfs2_xattr_is_local(xe)); offset = xe - xh->xh_entries; - ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket->bu_bhs[0], + ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket, offset, len, ctxt); if (ret) mlog_errno(ret); @@ -5107,11 +5088,13 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, struct ocfs2_xattr_entry *xe; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_xattr_set_ctxt ctxt = {NULL, NULL,}; + int credits = ocfs2_remove_extent_credits(osb->sb) + + ocfs2_blocks_per_xattr_bucket(inode->i_sb); + ocfs2_init_dealloc_ctxt(&ctxt.dealloc); - ctxt.handle = ocfs2_start_trans(osb, - ocfs2_remove_extent_credits(osb->sb)); + ctxt.handle = ocfs2_start_trans(osb, credits); if (IS_ERR(ctxt.handle)) { ret = PTR_ERR(ctxt.handle); mlog_errno(ret); @@ -5123,8 +5106,7 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, if (ocfs2_xattr_is_local(xe)) continue; - ret = ocfs2_xattr_bucket_value_truncate(inode, - bucket->bu_bhs[0], + ret = ocfs2_xattr_bucket_value_truncate(inode, bucket, i, 0, &ctxt); if (ret) { mlog_errno(ret); -- cgit From 88c3b0622acf82c7c86fbc066e81e15edc7c1685 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Thu, 11 Dec 2008 08:54:11 +0800 Subject: ocfs2: Narrow the transaction for deleting xattrs from a bucket. We move the transaction into the loop because in ocfs2_remove_extent, we will double the credits in function ocfs2_extend_rotate_transaction. So if we have a large loop number, we will soon waste much the journal space. Signed-off-by: Tao Ma Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 6db68a23a296..df53a2ce2de5 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -5094,30 +5094,30 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, ocfs2_init_dealloc_ctxt(&ctxt.dealloc); - ctxt.handle = ocfs2_start_trans(osb, credits); - if (IS_ERR(ctxt.handle)) { - ret = PTR_ERR(ctxt.handle); - mlog_errno(ret); - goto out; - } - for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { xe = &xh->xh_entries[i]; if (ocfs2_xattr_is_local(xe)) continue; + ctxt.handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(ctxt.handle)) { + ret = PTR_ERR(ctxt.handle); + mlog_errno(ret); + break; + } + ret = ocfs2_xattr_bucket_value_truncate(inode, bucket, i, 0, &ctxt); + + ocfs2_commit_trans(osb, ctxt.handle); if (ret) { mlog_errno(ret); break; } } - ret = ocfs2_commit_trans(osb, ctxt.handle); ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &ctxt.dealloc); -out: return ret; } -- cgit From 92de109ade7999084fb0bfcc65d603252504e0d0 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 25 Nov 2008 17:06:40 -0800 Subject: ocfs2: Dirty the entire first bucket in ocfs2_extend_xattr_bucket() ocfs2_extend_xattr_bucket() takes an extent of buckets and shifts some of them down to make room for a new xattr. It is passed the first bh of the first bucket, because that is where we store the number of buckets in the extent. However, future code wants to always dirty the entire bucket when it is changed. So let's pass the entire bucket into this function, skip any block reads (we have them), and add the access/dirty logic. We also can skip passing in the target bucket bh - we only need its block number. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 85 ++++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 55 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index df53a2ce2de5..ed1e95967565 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -3905,7 +3905,7 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, mlog_errno(ret); goto out; } - + ret = ocfs2_read_xattr_bucket(s_bucket, s_blkno); if (ret) goto out; @@ -4232,37 +4232,45 @@ leave: } /* - * Extend a new xattr bucket and move xattrs to the end one by one until - * We meet with start_bh. Only move half of the xattrs to the bucket after it. + * We are given an extent. 'first' is the bucket at the very front of + * the extent. The extent has space for an additional bucket past + * bucket_xh(first)->xh_num_buckets. 'target_blkno' is the block number + * of the target bucket. We wish to shift every bucket past the target + * down one, filling in that additional space. When we get back to the + * target, we split the target between itself and the now-empty bucket + * at target+1 (aka, target_blkno + blks_per_bucket). */ static int ocfs2_extend_xattr_bucket(struct inode *inode, handle_t *handle, - struct buffer_head *first_bh, - struct buffer_head *start_bh, + struct ocfs2_xattr_bucket *first, + u64 target_blk, u32 num_clusters) { int ret, credits; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); - u64 start_blk = start_bh->b_blocknr, end_blk; - u32 num_buckets = num_clusters * ocfs2_xattr_buckets_per_cluster(osb); - struct ocfs2_xattr_header *first_xh = - (struct ocfs2_xattr_header *)first_bh->b_data; - u16 bucket = le16_to_cpu(first_xh->xh_num_buckets); + u64 end_blk; + u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets); mlog(0, "extend xattr bucket in %llu, xattr extend rec starting " - "from %llu, len = %u\n", (unsigned long long)start_blk, - (unsigned long long)first_bh->b_blocknr, num_clusters); + "from %llu, len = %u\n", (unsigned long long)target_blk, + (unsigned long long)bucket_blkno(first), num_clusters); - BUG_ON(bucket >= num_buckets); + /* The extent must have room for an additional bucket */ + BUG_ON(new_bucket >= + (num_clusters * ocfs2_xattr_buckets_per_cluster(osb))); - end_blk = first_bh->b_blocknr + (bucket - 1) * blk_per_bucket; + /* end_blk points to the last existing bucket */ + end_blk = bucket_blkno(first) + ((new_bucket - 1) * blk_per_bucket); /* - * We will touch all the buckets after the start_bh(include it). - * Then we add one more bucket. + * end_blk is the start of the last existing bucket. + * Thus, (end_blk - target_blk) covers the target bucket and + * every bucket after it up to, but not including, the last + * existing bucket. Then we add the last existing bucket, the + * new bucket, and the first bucket (3 * blk_per_bucket). */ - credits = end_blk - start_blk + 3 * blk_per_bucket + 1 + + credits = (end_blk - target_blk) + (3 * blk_per_bucket) + handle->h_buffer_credits; ret = ocfs2_extend_trans(handle, credits); if (ret) { @@ -4270,14 +4278,14 @@ static int ocfs2_extend_xattr_bucket(struct inode *inode, goto out; } - ret = ocfs2_journal_access(handle, inode, first_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_xattr_bucket_journal_access(handle, first, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } - while (end_blk != start_blk) { + while (end_blk != target_blk) { ret = ocfs2_cp_xattr_bucket(inode, handle, end_blk, end_blk + blk_per_bucket, 0); if (ret) @@ -4285,12 +4293,12 @@ static int ocfs2_extend_xattr_bucket(struct inode *inode, end_blk -= blk_per_bucket; } - /* Move half of the xattr in start_blk to the next bucket. */ - ret = ocfs2_divide_xattr_bucket(inode, handle, start_blk, - start_blk + blk_per_bucket, NULL, 0); + /* Move half of the xattr in target_blkno to the next bucket. */ + ret = ocfs2_divide_xattr_bucket(inode, handle, target_blk, + target_blk + blk_per_bucket, NULL, 0); - le16_add_cpu(&first_xh->xh_num_buckets, 1); - ocfs2_journal_dirty(handle, first_bh); + le16_add_cpu(&bucket_xh(first)->xh_num_buckets, 1); + ocfs2_xattr_bucket_journal_dirty(handle, first); out: return ret; @@ -4324,10 +4332,19 @@ static int ocfs2_add_new_xattr_bucket(struct inode *inode, int ret, num_buckets, extend = 1; u64 p_blkno; u32 e_cpos, num_clusters; + /* The bucket at the front of the extent */ + struct ocfs2_xattr_bucket *first; mlog(0, "Add new xattr bucket starting form %llu\n", (unsigned long long)header_bh->b_blocknr); + first = ocfs2_xattr_bucket_new(inode); + if (!first) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + /* * Add refrence for header_bh here because it may be * changed in ocfs2_add_new_xattr_cluster and we need @@ -4367,17 +4384,25 @@ static int ocfs2_add_new_xattr_bucket(struct inode *inode, } } - if (extend) + if (extend) { + /* These bucket reads should be cached */ + ret = ocfs2_read_xattr_bucket(first, first_bh->b_blocknr); + if (ret) { + mlog_errno(ret); + goto out; + } ret = ocfs2_extend_xattr_bucket(inode, ctxt->handle, - first_bh, - header_bh, + first, header_bh->b_blocknr, num_clusters); - if (ret) - mlog_errno(ret); + if (ret) + mlog_errno(ret); + } + out: brelse(first_bh); brelse(header_bh); + ocfs2_xattr_bucket_free(first); return ret; } -- cgit From 15d609293d1954465a4788b9b182214323c6a2a1 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 25 Nov 2008 18:36:42 -0800 Subject: ocfs2: Dirty the entire first bucket in ocfs2_cp_xattr_cluster(). ocfs2_cp_xattr_cluster() takes the last bucket of a full extent and copies it over to a new extent. It then updates the headers of both extents to reflect the new state. It is passed the first bh of the first bucket in order to update that first extent's bucket count. It reads and dirties the first bh of the new extent for the same reason. However, future code wants to always dirty the entire bucket when it is changed. So it is changed to read the entire bucket it is updating for both extents. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 80 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 48 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index ed1e95967565..4dba34758827 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -3936,9 +3936,10 @@ out: } /* - * Copy one xattr cluster from src_blk to to_blk. - * The to_blk will become the first bucket header of the cluster, so its - * xh_num_buckets will be initialized as the bucket num in the cluster. + * src_blk points to the last cluster of an existing extent. to_blk + * points to a newly allocated extent. We copy the cluster over to the + * new extent, initializing its xh_num_buckets. The old extent's + * xh_num_buckets shrinks by the same amount. */ static int ocfs2_cp_xattr_cluster(struct inode *inode, handle_t *handle, @@ -3950,27 +3951,42 @@ static int ocfs2_cp_xattr_cluster(struct inode *inode, int i, ret, credits; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); + int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); int num_buckets = ocfs2_xattr_buckets_per_cluster(osb); - struct buffer_head *bh = NULL; - struct ocfs2_xattr_header *xh; - u64 to_blk_start = to_blk; + struct ocfs2_xattr_bucket *old_first, *new_first; mlog(0, "cp xattrs from cluster %llu to %llu\n", (unsigned long long)src_blk, (unsigned long long)to_blk); + /* The first bucket of the original extent */ + old_first = ocfs2_xattr_bucket_new(inode); + /* The first bucket of the new extent */ + new_first = ocfs2_xattr_bucket_new(inode); + if (!old_first || !new_first) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_read_xattr_bucket(old_first, first_bh->b_blocknr); + if (ret) { + mlog_errno(ret); + goto out; + } + /* - * We need to update the new cluster and 1 more for the update of - * the 1st bucket of the previous extent rec. + * We need to update the first bucket of the old extent and the + * entire first cluster of the new extent. */ - credits = bpc + 1 + handle->h_buffer_credits; + credits = blks_per_bucket + bpc + handle->h_buffer_credits; ret = ocfs2_extend_trans(handle, credits); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_journal_access(handle, inode, first_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_xattr_bucket_journal_access(handle, old_first, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -3978,45 +3994,45 @@ static int ocfs2_cp_xattr_cluster(struct inode *inode, for (i = 0; i < num_buckets; i++) { ret = ocfs2_cp_xattr_bucket(inode, handle, - src_blk, to_blk, 1); + src_blk + (i * blks_per_bucket), + to_blk + (i * blks_per_bucket), + 1); if (ret) { mlog_errno(ret); goto out; } - - src_blk += ocfs2_blocks_per_xattr_bucket(inode->i_sb); - to_blk += ocfs2_blocks_per_xattr_bucket(inode->i_sb); } - /* update the old bucket header. */ - xh = (struct ocfs2_xattr_header *)first_bh->b_data; - le16_add_cpu(&xh->xh_num_buckets, -num_buckets); - - ocfs2_journal_dirty(handle, first_bh); - - /* update the new bucket header. */ - ret = ocfs2_read_block(inode, to_blk_start, &bh, NULL); - if (ret < 0) { + /* + * Get the new bucket ready before we dirty anything + * (This actually shouldn't fail, because we already dirtied + * it once in ocfs2_cp_xattr_bucket()). + */ + ret = ocfs2_read_xattr_bucket(new_first, to_blk); + if (ret) { mlog_errno(ret); goto out; } - - ret = ocfs2_journal_access(handle, inode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_xattr_bucket_journal_access(handle, new_first, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } - xh = (struct ocfs2_xattr_header *)bh->b_data; - xh->xh_num_buckets = cpu_to_le16(num_buckets); + /* Now update the headers */ + le16_add_cpu(&bucket_xh(old_first)->xh_num_buckets, -num_buckets); + ocfs2_xattr_bucket_journal_dirty(handle, old_first); - ocfs2_journal_dirty(handle, bh); + bucket_xh(new_first)->xh_num_buckets = cpu_to_le16(num_buckets); + ocfs2_xattr_bucket_journal_dirty(handle, new_first); if (first_hash) - *first_hash = le32_to_cpu(xh->xh_entries[0].xe_name_hash); + *first_hash = le32_to_cpu(bucket_xh(new_first)->xh_entries[0].xe_name_hash); + out: - brelse(bh); + ocfs2_xattr_bucket_free(new_first); + ocfs2_xattr_bucket_free(old_first); return ret; } -- cgit From 2b656c1d6fc5ba7791a360766780a212faed5705 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 25 Nov 2008 19:00:15 -0800 Subject: ocfs2: Explain t_is_new in ocfs2_cp_xattr_cluster(). I was unsure of the JOURNAL_ACCESS parameters in ocfs2_cp_xattr_cluster(). They're based on the function argument 't_is_new', but I couldn't quite figure out how t_is_new mapped to allocation. ocfs2_cp_xattr_cluster() actually overwrites the target, regardless of t_is_new. Well, I just figured it out. So I'm adding a big fat comment for those who come after me. ocfs2_divide_xattr_cluster() has the same behavior. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 4dba34758827..5efcf4e85d7c 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -3747,6 +3747,11 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, goto out; } + /* + * Hey, if we're overwriting t_bucket, what difference does + * ACCESS_CREATE vs ACCESS_WRITE make? See the comment in the + * same part of ocfs2_cp_xattr_bucket(). + */ ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket, new_bucket_head ? OCFS2_JOURNAL_ACCESS_CREATE : @@ -3918,6 +3923,18 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, if (ret) goto out; + /* + * Hey, if we're overwriting t_bucket, what difference does + * ACCESS_CREATE vs ACCESS_WRITE make? Well, if we allocated a new + * cluster to fill, we came here from ocfs2_cp_xattr_cluster(), and + * it is really new - ACCESS_CREATE is required. But we also + * might have moved data out of t_bucket before extending back + * into it. ocfs2_add_new_xattr_bucket() can do this - its call + * to ocfs2_add_new_xattr_cluster() may have created a new extent + * and copied out the end of the old extent. Then it re-extends + * the old extent back to create space for new xattrs. That's + * how we get here, and the bucket isn't really new. + */ ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket, t_is_new ? OCFS2_JOURNAL_ACCESS_CREATE : -- cgit From b5c03e746959bb005b987e9d8511df46680c3daa Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 25 Nov 2008 19:58:16 -0800 Subject: ocfs2: Use ocfs2_cp_xattr_bucket() in ocfs2_mv_xattr_bucket_cross_cluster(). The buffer copy loop of ocfs2_mv_xattr_bucket_cross_cluster() actually looks a lot like ocfs2_cp_xattr_bucket(). Let's just use that instead. We also use bucket operations to update the buckets at the start of each extent. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 169 ++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 104 insertions(+), 65 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 5efcf4e85d7c..5be99666f02c 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -170,6 +170,11 @@ static int ocfs2_xattr_set_entry_index_block(struct inode *inode, static int ocfs2_delete_xattr_index_block(struct inode *inode, struct buffer_head *xb_bh); +static int ocfs2_cp_xattr_bucket(struct inode *inode, + handle_t *handle, + u64 s_blkno, + u64 t_blkno, + int t_is_new); static inline u16 ocfs2_xattr_buckets_per_cluster(struct ocfs2_super *osb) { @@ -3526,13 +3531,21 @@ out: } /* - * Move half nums of the xattr bucket in the previous cluster to this new - * cluster. We only touch the last cluster of the previous extend record. + * prev_blkno points to the start of an existing extent. new_blkno + * points to a newly allocated extent. Because we know each of our + * clusters contains more than bucket, we can easily split one cluster + * at a bucket boundary. So we take the last cluster of the existing + * extent and split it down the middle. We move the last half of the + * buckets in the last cluster of the existing extent over to the new + * extent. + * + * first_bh is the buffer at prev_blkno so we can update the existing + * extent's bucket count. header_bh is the bucket were we were hoping + * to insert our xattr. If the bucket move places the target in the new + * extent, we'll update first_bh and header_bh after modifying the old + * extent. * - * first_bh is the first buffer_head of a series of bucket in the same - * extent rec and header_bh is the header of one bucket in this cluster. - * They will be updated if we move the data header_bh contains to the new - * cluster. first_hash will be set as the 1st xe's name_hash of the new cluster. + * first_hash will be set as the 1st xe's name_hash in the new extent. */ static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode, handle_t *handle, @@ -3545,105 +3558,131 @@ static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode, { int i, ret, credits; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); int num_buckets = ocfs2_xattr_buckets_per_cluster(osb); - int blocksize = inode->i_sb->s_blocksize; - struct buffer_head *old_bh, *new_bh, *prev_bh, *new_first_bh = NULL; - struct ocfs2_xattr_header *new_xh; + int to_move = num_buckets / 2; + u64 last_cluster_blkno, src_blkno; struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)((*first_bh)->b_data); + struct ocfs2_xattr_bucket *old_first, *new_first; BUG_ON(le16_to_cpu(xh->xh_num_buckets) < num_buckets); BUG_ON(OCFS2_XATTR_BUCKET_SIZE == osb->s_clustersize); - prev_bh = *first_bh; - get_bh(prev_bh); - xh = (struct ocfs2_xattr_header *)prev_bh->b_data; - - prev_blkno += (num_clusters - 1) * bpc + bpc / 2; + last_cluster_blkno = prev_blkno + ((num_clusters - 1) * bpc); + src_blkno = last_cluster_blkno + (to_move * blks_per_bucket); mlog(0, "move half of xattrs in cluster %llu to %llu\n", (unsigned long long)prev_blkno, (unsigned long long)new_blkno); + /* The first bucket of the original extent */ + old_first = ocfs2_xattr_bucket_new(inode); + /* The first bucket of the new extent */ + new_first = ocfs2_xattr_bucket_new(inode); + if (!old_first || !new_first) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_read_xattr_bucket(old_first, prev_blkno); + if (ret) { + mlog_errno(ret); + goto out; + } + /* - * We need to update the 1st half of the new cluster and - * 1 more for the update of the 1st bucket of the previous - * extent record. + * We need to update the 1st half of the new extent, and we + * need to update the first bucket of the old extent. */ - credits = bpc / 2 + 1 + handle->h_buffer_credits; + credits = ((to_move + 1) * blks_per_bucket) + handle->h_buffer_credits; ret = ocfs2_extend_trans(handle, credits); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_journal_access(handle, inode, prev_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_xattr_bucket_journal_access(handle, old_first, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } - for (i = 0; i < bpc / 2; i++, prev_blkno++, new_blkno++) { - old_bh = new_bh = NULL; - new_bh = sb_getblk(inode->i_sb, new_blkno); - if (!new_bh) { - ret = -EIO; + for (i = 0; i < to_move; i++) { + ret = ocfs2_cp_xattr_bucket(inode, handle, + src_blkno + (i * blks_per_bucket), + new_blkno + (i * blks_per_bucket), + 1); + if (ret) { mlog_errno(ret); goto out; } + } - ocfs2_set_new_buffer_uptodate(inode, new_bh); + /* + * Get the new bucket ready before we dirty anything + * (This actually shouldn't fail, because we already dirtied + * it once in ocfs2_cp_xattr_bucket()). + */ + ret = ocfs2_read_xattr_bucket(new_first, new_blkno); + if (ret) { + mlog_errno(ret); + goto out; + } + ret = ocfs2_xattr_bucket_journal_access(handle, new_first, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } - ret = ocfs2_journal_access(handle, inode, new_bh, - OCFS2_JOURNAL_ACCESS_CREATE); - if (ret < 0) { - mlog_errno(ret); - brelse(new_bh); - goto out; - } + /* Now update the headers */ + le16_add_cpu(&bucket_xh(old_first)->xh_num_buckets, -to_move); + ocfs2_xattr_bucket_journal_dirty(handle, old_first); - ret = ocfs2_read_block(inode, prev_blkno, &old_bh, NULL); - if (ret < 0) { - mlog_errno(ret); - brelse(new_bh); - goto out; - } + bucket_xh(new_first)->xh_num_buckets = cpu_to_le16(to_move); + ocfs2_xattr_bucket_journal_dirty(handle, new_first); - memcpy(new_bh->b_data, old_bh->b_data, blocksize); + if (first_hash) + *first_hash = le32_to_cpu(bucket_xh(new_first)->xh_entries[0].xe_name_hash); - if (i == 0) { - new_xh = (struct ocfs2_xattr_header *)new_bh->b_data; - new_xh->xh_num_buckets = cpu_to_le16(num_buckets / 2); + /* + * If the target bucket is anywhere past src_blkno, we moved + * it to the new extent. We need to update first_bh and header_bh. + */ + if ((*header_bh)->b_blocknr >= src_blkno) { + /* We're done with old_first, so we can re-use it. */ + ocfs2_xattr_bucket_relse(old_first); - if (first_hash) - *first_hash = le32_to_cpu( - new_xh->xh_entries[0].xe_name_hash); - new_first_bh = new_bh; - get_bh(new_first_bh); - } + /* Find the block for the new target bucket */ + src_blkno = new_blkno + + ((*header_bh)->b_blocknr - src_blkno); - ocfs2_journal_dirty(handle, new_bh); + /* + * This shouldn't fail - the buffers are in the + * journal from ocfs2_cp_xattr_bucket(). + */ + ret = ocfs2_read_xattr_bucket(old_first, src_blkno); + if (ret) { + mlog_errno(ret); + goto out; + } - if (*header_bh == old_bh) { - brelse(*header_bh); - *header_bh = new_bh; - get_bh(*header_bh); + brelse(*first_bh); + *first_bh = new_first->bu_bhs[0]; + get_bh(*first_bh); - brelse(*first_bh); - *first_bh = new_first_bh; - get_bh(*first_bh); - } - brelse(new_bh); - brelse(old_bh); + brelse(*header_bh); + *header_bh = old_first->bu_bhs[0]; + get_bh(*header_bh); } - le16_add_cpu(&xh->xh_num_buckets, -(num_buckets / 2)); - - ocfs2_journal_dirty(handle, prev_bh); out: - brelse(prev_bh); - brelse(new_first_bh); + ocfs2_xattr_bucket_free(new_first); + ocfs2_xattr_bucket_free(old_first); + return ret; } -- cgit From 874d65af1c8b8f6456a934701e6828d3017be029 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Wed, 26 Nov 2008 13:02:18 -0800 Subject: ocfs2: Rename ocfs2_cp_xattr_cluster() to ocfs2_mv_xattr_buckets(). ocfs2_cp_xattr_cluster() takes the last cluster of an xattr extent, copies its buckets to the front of a new extent, and then shrinks the bucket count of the original extent. So it's really moving the data, not copying it. While we're here, the function doesn't need a buffer_head for the old extent, just the block number. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 5be99666f02c..c1f2e0690747 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -3965,11 +3965,12 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, /* * Hey, if we're overwriting t_bucket, what difference does * ACCESS_CREATE vs ACCESS_WRITE make? Well, if we allocated a new - * cluster to fill, we came here from ocfs2_cp_xattr_cluster(), and - * it is really new - ACCESS_CREATE is required. But we also - * might have moved data out of t_bucket before extending back - * into it. ocfs2_add_new_xattr_bucket() can do this - its call - * to ocfs2_add_new_xattr_cluster() may have created a new extent + * cluster to fill, we came here from + * ocfs2_mv_xattr_buckets(), and it is really new - + * ACCESS_CREATE is required. But we also might have moved data + * out of t_bucket before extending back into it. + * ocfs2_add_new_xattr_bucket() can do this - its call to + * ocfs2_add_new_xattr_cluster() may have created a new extent * and copied out the end of the old extent. Then it re-extends * the old extent back to create space for new xattrs. That's * how we get here, and the bucket isn't really new. @@ -3992,17 +3993,16 @@ out: } /* - * src_blk points to the last cluster of an existing extent. to_blk - * points to a newly allocated extent. We copy the cluster over to the - * new extent, initializing its xh_num_buckets. The old extent's - * xh_num_buckets shrinks by the same amount. + * src_blk points to the start of an existing extent. last_blk points to + * last cluster in that extent. to_blk points to a newly allocated + * extent. We copy the buckets from cluster at last_blk to the new extent, + * initializing its xh_num_buckets. The old extent's xh_num_buckets + * shrinks by the same amount. */ -static int ocfs2_cp_xattr_cluster(struct inode *inode, +static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle, - struct buffer_head *first_bh, - u64 src_blk, - u64 to_blk, - u32 *first_hash) + u64 src_blk, u64 last_blk, + u64 to_blk, u32 *first_hash) { int i, ret, credits; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); @@ -4011,8 +4011,8 @@ static int ocfs2_cp_xattr_cluster(struct inode *inode, int num_buckets = ocfs2_xattr_buckets_per_cluster(osb); struct ocfs2_xattr_bucket *old_first, *new_first; - mlog(0, "cp xattrs from cluster %llu to %llu\n", - (unsigned long long)src_blk, (unsigned long long)to_blk); + mlog(0, "mv xattrs from cluster %llu to %llu\n", + (unsigned long long)last_blk, (unsigned long long)to_blk); /* The first bucket of the original extent */ old_first = ocfs2_xattr_bucket_new(inode); @@ -4024,7 +4024,7 @@ static int ocfs2_cp_xattr_cluster(struct inode *inode, goto out; } - ret = ocfs2_read_xattr_bucket(old_first, first_bh->b_blocknr); + ret = ocfs2_read_xattr_bucket(old_first, src_blk); if (ret) { mlog_errno(ret); goto out; @@ -4050,7 +4050,7 @@ static int ocfs2_cp_xattr_cluster(struct inode *inode, for (i = 0; i < num_buckets; i++) { ret = ocfs2_cp_xattr_bucket(inode, handle, - src_blk + (i * blks_per_bucket), + last_blk + (i * blks_per_bucket), to_blk + (i * blks_per_bucket), 1); if (ret) { @@ -4175,8 +4175,10 @@ static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode, u64 last_blk = prev_blk + bpc * (prev_clusters - 1); if (prev_clusters > 1 && (*header_bh)->b_blocknr != last_blk) - ret = ocfs2_cp_xattr_cluster(inode, handle, *first_bh, - last_blk, new_blk, + ret = ocfs2_mv_xattr_buckets(inode, handle, + (*first_bh)->b_blocknr, + last_blk, + new_blk, v_start); else { ret = ocfs2_divide_xattr_cluster(inode, handle, -- cgit From 54ecb6b6df54bf72befb359b21f3759b2952f9d9 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Wed, 26 Nov 2008 13:18:31 -0800 Subject: ocfs2: ocfs2_mv_xattr_buckets() can handle a partial cluster now. If you look at ocfs2_mv_xattr_bucket_cross_cluster(), you'll notice that two-thirds of the code is almost identical to ocfs2_mv_xattr_buckets(). The only difference is that ocfs2_mv_xattr_buckets() moves a whole cluster's worth, while ocfs2_mv_xattr_bucket_cross_cluster() moves half the cluster. We change ocfs2_mv_xattr_buckets() to allow moving partial clusters. The original caller of ocfs2_mv_xattr_buckets() still moves the whole cluster's worth - it just passes a start_bucket of 0. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index c1f2e0690747..97340940cee2 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -3995,18 +3995,19 @@ out: /* * src_blk points to the start of an existing extent. last_blk points to * last cluster in that extent. to_blk points to a newly allocated - * extent. We copy the buckets from cluster at last_blk to the new extent, - * initializing its xh_num_buckets. The old extent's xh_num_buckets - * shrinks by the same amount. + * extent. We copy the buckets from the cluster at last_blk to the new + * extent. If start_bucket is non-zero, we skip that many buckets before + * we start copying. The new extent's xh_num_buckets gets set to the + * number of buckets we copied. The old extent's xh_num_buckets shrinks + * by the same amount. */ -static int ocfs2_mv_xattr_buckets(struct inode *inode, - handle_t *handle, - u64 src_blk, u64 last_blk, - u64 to_blk, u32 *first_hash) +static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle, + u64 src_blk, u64 last_blk, u64 to_blk, + unsigned int start_bucket, + u32 *first_hash) { int i, ret, credits; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); int num_buckets = ocfs2_xattr_buckets_per_cluster(osb); struct ocfs2_xattr_bucket *old_first, *new_first; @@ -4014,6 +4015,12 @@ static int ocfs2_mv_xattr_buckets(struct inode *inode, mlog(0, "mv xattrs from cluster %llu to %llu\n", (unsigned long long)last_blk, (unsigned long long)to_blk); + BUG_ON(start_bucket >= num_buckets); + if (start_bucket) { + num_buckets -= start_bucket; + last_blk += (start_bucket * blks_per_bucket); + } + /* The first bucket of the original extent */ old_first = ocfs2_xattr_bucket_new(inode); /* The first bucket of the new extent */ @@ -4031,10 +4038,11 @@ static int ocfs2_mv_xattr_buckets(struct inode *inode, } /* - * We need to update the first bucket of the old extent and the - * entire first cluster of the new extent. + * We need to update the first bucket of the old extent and all + * the buckets going to the new extent. */ - credits = blks_per_bucket + bpc + handle->h_buffer_credits; + credits = ((num_buckets + 1) * blks_per_bucket) + + handle->h_buffer_credits; ret = ocfs2_extend_trans(handle, credits); if (ret) { mlog_errno(ret); @@ -4177,8 +4185,7 @@ static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode, if (prev_clusters > 1 && (*header_bh)->b_blocknr != last_blk) ret = ocfs2_mv_xattr_buckets(inode, handle, (*first_bh)->b_blocknr, - last_blk, - new_blk, + last_blk, new_blk, 0, v_start); else { ret = ocfs2_divide_xattr_cluster(inode, handle, -- cgit From c58b6032f93358871361a92d7743dbc85d27084e Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Wed, 26 Nov 2008 13:36:24 -0800 Subject: ocfs2: Use ocfs2_mv_xattr_buckets() in ocfs2_mv_xattr_bucket_cross_cluster(). Now that ocfs2_mv_xattr_buckets() can move a partial cluster's worth of buckets, ocfs2_mv_xattr_bucket_cross_cluster() can use it. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 110 +++++++++++++++---------------------------------------- 1 file changed, 29 insertions(+), 81 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 97340940cee2..c3189286679a 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -170,11 +170,10 @@ static int ocfs2_xattr_set_entry_index_block(struct inode *inode, static int ocfs2_delete_xattr_index_block(struct inode *inode, struct buffer_head *xb_bh); -static int ocfs2_cp_xattr_bucket(struct inode *inode, - handle_t *handle, - u64 s_blkno, - u64 t_blkno, - int t_is_new); +static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle, + u64 src_blk, u64 last_blk, u64 to_blk, + unsigned int start_bucket, + u32 *first_hash); static inline u16 ocfs2_xattr_buckets_per_cluster(struct ocfs2_super *osb) { @@ -3556,115 +3555,64 @@ static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode, u32 num_clusters, u32 *first_hash) { - int i, ret, credits; + int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); - int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); int num_buckets = ocfs2_xattr_buckets_per_cluster(osb); int to_move = num_buckets / 2; - u64 last_cluster_blkno, src_blkno; + u64 src_blkno; + u64 last_cluster_blkno = prev_blkno + + ((num_clusters - 1) * ocfs2_clusters_to_blocks(inode->i_sb, 1)); struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)((*first_bh)->b_data); - struct ocfs2_xattr_bucket *old_first, *new_first; + struct ocfs2_xattr_bucket *new_target, *new_first; BUG_ON(le16_to_cpu(xh->xh_num_buckets) < num_buckets); BUG_ON(OCFS2_XATTR_BUCKET_SIZE == osb->s_clustersize); - last_cluster_blkno = prev_blkno + ((num_clusters - 1) * bpc); - src_blkno = last_cluster_blkno + (to_move * blks_per_bucket); - mlog(0, "move half of xattrs in cluster %llu to %llu\n", - (unsigned long long)prev_blkno, (unsigned long long)new_blkno); + (unsigned long long)last_cluster_blkno, (unsigned long long)new_blkno); - /* The first bucket of the original extent */ - old_first = ocfs2_xattr_bucket_new(inode); /* The first bucket of the new extent */ new_first = ocfs2_xattr_bucket_new(inode); - if (!old_first || !new_first) { + /* The target bucket if it was moved to the new extent */ + new_target = ocfs2_xattr_bucket_new(inode); + if (!new_target || !new_first) { ret = -ENOMEM; mlog_errno(ret); goto out; } - ret = ocfs2_read_xattr_bucket(old_first, prev_blkno); + ret = ocfs2_mv_xattr_buckets(inode, handle, prev_blkno, + last_cluster_blkno, new_blkno, + to_move, first_hash); if (ret) { mlog_errno(ret); goto out; } - /* - * We need to update the 1st half of the new extent, and we - * need to update the first bucket of the old extent. - */ - credits = ((to_move + 1) * blks_per_bucket) + handle->h_buffer_credits; - ret = ocfs2_extend_trans(handle, credits); - if (ret) { - mlog_errno(ret); - goto out; - } - - ret = ocfs2_xattr_bucket_journal_access(handle, old_first, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - mlog_errno(ret); - goto out; - } - - for (i = 0; i < to_move; i++) { - ret = ocfs2_cp_xattr_bucket(inode, handle, - src_blkno + (i * blks_per_bucket), - new_blkno + (i * blks_per_bucket), - 1); - if (ret) { - mlog_errno(ret); - goto out; - } - } - - /* - * Get the new bucket ready before we dirty anything - * (This actually shouldn't fail, because we already dirtied - * it once in ocfs2_cp_xattr_bucket()). - */ - ret = ocfs2_read_xattr_bucket(new_first, new_blkno); - if (ret) { - mlog_errno(ret); - goto out; - } - ret = ocfs2_xattr_bucket_journal_access(handle, new_first, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - mlog_errno(ret); - goto out; - } - - /* Now update the headers */ - le16_add_cpu(&bucket_xh(old_first)->xh_num_buckets, -to_move); - ocfs2_xattr_bucket_journal_dirty(handle, old_first); - - bucket_xh(new_first)->xh_num_buckets = cpu_to_le16(to_move); - ocfs2_xattr_bucket_journal_dirty(handle, new_first); - - if (first_hash) - *first_hash = le32_to_cpu(bucket_xh(new_first)->xh_entries[0].xe_name_hash); + /* This is the first bucket that got moved */ + src_blkno = last_cluster_blkno + (to_move * blks_per_bucket); /* - * If the target bucket is anywhere past src_blkno, we moved - * it to the new extent. We need to update first_bh and header_bh. + * If the target bucket was part of the moved buckets, we need to + * update first_bh and header_bh. */ if ((*header_bh)->b_blocknr >= src_blkno) { - /* We're done with old_first, so we can re-use it. */ - ocfs2_xattr_bucket_relse(old_first); - /* Find the block for the new target bucket */ src_blkno = new_blkno + ((*header_bh)->b_blocknr - src_blkno); /* - * This shouldn't fail - the buffers are in the + * These shouldn't fail - the buffers are in the * journal from ocfs2_cp_xattr_bucket(). */ - ret = ocfs2_read_xattr_bucket(old_first, src_blkno); + ret = ocfs2_read_xattr_bucket(new_first, new_blkno); + if (ret) { + mlog_errno(ret); + goto out; + } + ret = ocfs2_read_xattr_bucket(new_target, src_blkno); if (ret) { mlog_errno(ret); goto out; @@ -3675,13 +3623,13 @@ static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode, get_bh(*first_bh); brelse(*header_bh); - *header_bh = old_first->bu_bhs[0]; + *header_bh = new_target->bu_bhs[0]; get_bh(*header_bh); } out: ocfs2_xattr_bucket_free(new_first); - ocfs2_xattr_bucket_free(old_first); + ocfs2_xattr_bucket_free(new_target); return ret; } -- cgit From 92cf3adf48097b7561a3c83f800ed3b2b25b18d4 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Wed, 26 Nov 2008 14:12:09 -0800 Subject: ocfs2: Start using buckets in ocfs2_adjust_xattr_cross_cluster(). We want to be passing around buckets instead of buffer_heads. Let's get them into ocfs2_adjust_xattr_cross_cluster. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 44 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 37 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index c3189286679a..975ba3653feb 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -4111,28 +4111,54 @@ static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode, u32 *v_start, int *extend) { - int ret = 0; - int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); + int ret; + struct ocfs2_xattr_bucket *first, *target; mlog(0, "adjust xattrs from cluster %llu len %u to %llu\n", (unsigned long long)prev_blk, prev_clusters, (unsigned long long)new_blk); + /* The first bucket of the original extent */ + first = ocfs2_xattr_bucket_new(inode); + /* The target bucket for insert */ + target = ocfs2_xattr_bucket_new(inode); + if (!first || !target) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + BUG_ON(prev_blk != (*first_bh)->b_blocknr); + ret = ocfs2_read_xattr_bucket(first, prev_blk); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_read_xattr_bucket(target, (*header_bh)->b_blocknr); + if (ret) { + mlog_errno(ret); + goto out; + } + if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) ret = ocfs2_mv_xattr_bucket_cross_cluster(inode, handle, first_bh, header_bh, new_blk, - prev_blk, + bucket_blkno(first), prev_clusters, v_start); else { - u64 last_blk = prev_blk + bpc * (prev_clusters - 1); + /* The start of the last cluster in the first extent */ + u64 last_blk = bucket_blkno(first) + + ((prev_clusters - 1) * + ocfs2_clusters_to_blocks(inode->i_sb, 1)); - if (prev_clusters > 1 && (*header_bh)->b_blocknr != last_blk) + if (prev_clusters > 1 && bucket_blkno(target) != last_blk) ret = ocfs2_mv_xattr_buckets(inode, handle, - (*first_bh)->b_blocknr, + bucket_blkno(first), last_blk, new_blk, 0, v_start); else { @@ -4140,11 +4166,15 @@ static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode, last_blk, new_blk, v_start); - if ((*header_bh)->b_blocknr == last_blk && extend) + if ((bucket_blkno(target) == last_blk) && extend) *extend = 0; } } +out: + ocfs2_xattr_bucket_free(first); + ocfs2_xattr_bucket_free(target); + return ret; } -- cgit From 41cb814866110b6e35dad7569ecf96163c3bb824 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Wed, 26 Nov 2008 14:25:21 -0800 Subject: ocfs2: Pass buckets into ocfs2_mv_xattr_bucket_cross_cluster(). Now that ocfs2_adjust_xattr_cross_cluster() has buckets, it can pass them into ocfs2_mv_xattr_bucket_cross_cluster(). It no longer has to care about buffer_heads. The manipulation of first_bh and header_bh moves up to ocfs2_adjust_xattr_cross_cluster(). Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 84 +++++++++++++++++++++++++------------------------------- 1 file changed, 37 insertions(+), 47 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 975ba3653feb..2f16f50ebcba 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -3548,42 +3548,28 @@ out: */ static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode, handle_t *handle, - struct buffer_head **first_bh, - struct buffer_head **header_bh, + struct ocfs2_xattr_bucket *first, + struct ocfs2_xattr_bucket *target, u64 new_blkno, - u64 prev_blkno, u32 num_clusters, u32 *first_hash) { int ret; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); - int num_buckets = ocfs2_xattr_buckets_per_cluster(osb); + struct super_block *sb = inode->i_sb; + int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(sb); + int num_buckets = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb)); int to_move = num_buckets / 2; u64 src_blkno; - u64 last_cluster_blkno = prev_blkno + - ((num_clusters - 1) * ocfs2_clusters_to_blocks(inode->i_sb, 1)); - struct ocfs2_xattr_header *xh = - (struct ocfs2_xattr_header *)((*first_bh)->b_data); - struct ocfs2_xattr_bucket *new_target, *new_first; + u64 last_cluster_blkno = bucket_blkno(first) + + ((num_clusters - 1) * ocfs2_clusters_to_blocks(sb, 1)); - BUG_ON(le16_to_cpu(xh->xh_num_buckets) < num_buckets); - BUG_ON(OCFS2_XATTR_BUCKET_SIZE == osb->s_clustersize); + BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets); + BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize); mlog(0, "move half of xattrs in cluster %llu to %llu\n", (unsigned long long)last_cluster_blkno, (unsigned long long)new_blkno); - /* The first bucket of the new extent */ - new_first = ocfs2_xattr_bucket_new(inode); - /* The target bucket if it was moved to the new extent */ - new_target = ocfs2_xattr_bucket_new(inode); - if (!new_target || !new_first) { - ret = -ENOMEM; - mlog_errno(ret); - goto out; - } - - ret = ocfs2_mv_xattr_buckets(inode, handle, prev_blkno, + ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first), last_cluster_blkno, new_blkno, to_move, first_hash); if (ret) { @@ -3596,41 +3582,32 @@ static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode, /* * If the target bucket was part of the moved buckets, we need to - * update first_bh and header_bh. + * update first and target. */ - if ((*header_bh)->b_blocknr >= src_blkno) { + if (bucket_blkno(target) >= src_blkno) { /* Find the block for the new target bucket */ src_blkno = new_blkno + - ((*header_bh)->b_blocknr - src_blkno); + (bucket_blkno(target) - src_blkno); + + ocfs2_xattr_bucket_relse(first); + ocfs2_xattr_bucket_relse(target); /* * These shouldn't fail - the buffers are in the * journal from ocfs2_cp_xattr_bucket(). */ - ret = ocfs2_read_xattr_bucket(new_first, new_blkno); + ret = ocfs2_read_xattr_bucket(first, new_blkno); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_read_xattr_bucket(new_target, src_blkno); - if (ret) { + ret = ocfs2_read_xattr_bucket(target, src_blkno); + if (ret) mlog_errno(ret); - goto out; - } - brelse(*first_bh); - *first_bh = new_first->bu_bhs[0]; - get_bh(*first_bh); - - brelse(*header_bh); - *header_bh = new_target->bu_bhs[0]; - get_bh(*header_bh); } out: - ocfs2_xattr_bucket_free(new_first); - ocfs2_xattr_bucket_free(new_target); - return ret; } @@ -4141,16 +4118,29 @@ static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode, goto out; } - if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) + if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) { ret = ocfs2_mv_xattr_bucket_cross_cluster(inode, handle, - first_bh, - header_bh, + first, target, new_blk, - bucket_blkno(first), prev_clusters, v_start); - else { + if (ret) { + mlog_errno(ret); + goto out; + } + + /* Did first+target get moved? */ + if (prev_blk != bucket_blkno(first)) { + brelse(*first_bh); + *first_bh = first->bu_bhs[0]; + get_bh(*first_bh); + + brelse(*header_bh); + *header_bh = target->bu_bhs[0]; + get_bh(*header_bh); + } + } else { /* The start of the last cluster in the first extent */ u64 last_blk = bucket_blkno(first) + ((prev_clusters - 1) * -- cgit From 012ee910876e251621705e8dea7c353fd4914e19 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Wed, 26 Nov 2008 14:43:31 -0800 Subject: ocfs2: Move buckets up into ocfs2_add_new_xattr_cluster(). Lift the buckets from ocfs2_adjust_xattr_cross_cluster() up into ocfs2_add_new_xattr_cluster(). Now ocfs2_adjust_xattr_cross_cluster() doesn't deal with buffer_heads. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 100 +++++++++++++++++++++++++++---------------------------- 1 file changed, 49 insertions(+), 51 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 2f16f50ebcba..4b247047b7aa 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -4080,44 +4080,19 @@ static int ocfs2_divide_xattr_cluster(struct inode *inode, */ static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode, handle_t *handle, - struct buffer_head **first_bh, - struct buffer_head **header_bh, + struct ocfs2_xattr_bucket *first, + struct ocfs2_xattr_bucket *target, u64 new_blk, - u64 prev_blk, u32 prev_clusters, u32 *v_start, int *extend) { int ret; - struct ocfs2_xattr_bucket *first, *target; mlog(0, "adjust xattrs from cluster %llu len %u to %llu\n", - (unsigned long long)prev_blk, prev_clusters, + (unsigned long long)bucket_blkno(first), prev_clusters, (unsigned long long)new_blk); - /* The first bucket of the original extent */ - first = ocfs2_xattr_bucket_new(inode); - /* The target bucket for insert */ - target = ocfs2_xattr_bucket_new(inode); - if (!first || !target) { - ret = -ENOMEM; - mlog_errno(ret); - goto out; - } - - BUG_ON(prev_blk != (*first_bh)->b_blocknr); - ret = ocfs2_read_xattr_bucket(first, prev_blk); - if (ret) { - mlog_errno(ret); - goto out; - } - - ret = ocfs2_read_xattr_bucket(target, (*header_bh)->b_blocknr); - if (ret) { - mlog_errno(ret); - goto out; - } - if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) { ret = ocfs2_mv_xattr_bucket_cross_cluster(inode, handle, @@ -4125,46 +4100,33 @@ static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode, new_blk, prev_clusters, v_start); - if (ret) { + if (ret) mlog_errno(ret); - goto out; - } - - /* Did first+target get moved? */ - if (prev_blk != bucket_blkno(first)) { - brelse(*first_bh); - *first_bh = first->bu_bhs[0]; - get_bh(*first_bh); - - brelse(*header_bh); - *header_bh = target->bu_bhs[0]; - get_bh(*header_bh); - } } else { /* The start of the last cluster in the first extent */ u64 last_blk = bucket_blkno(first) + ((prev_clusters - 1) * ocfs2_clusters_to_blocks(inode->i_sb, 1)); - if (prev_clusters > 1 && bucket_blkno(target) != last_blk) + if (prev_clusters > 1 && bucket_blkno(target) != last_blk) { ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first), last_blk, new_blk, 0, v_start); - else { + if (ret) + mlog_errno(ret); + } else { ret = ocfs2_divide_xattr_cluster(inode, handle, last_blk, new_blk, v_start); + if (ret) + mlog_errno(ret); if ((bucket_blkno(target) == last_blk) && extend) *extend = 0; } } -out: - ocfs2_xattr_bucket_free(first); - ocfs2_xattr_bucket_free(target); - return ret; } @@ -4202,6 +4164,7 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, handle_t *handle = ctxt->handle; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_extent_tree et; + struct ocfs2_xattr_bucket *first, *target; mlog(0, "Add new xattr cluster for %llu, previous xattr hash = %u, " "previous xattr blkno = %llu\n", @@ -4210,6 +4173,29 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, ocfs2_init_xattr_tree_extent_tree(&et, inode, root_bh); + /* The first bucket of the original extent */ + first = ocfs2_xattr_bucket_new(inode); + /* The target bucket for insert */ + target = ocfs2_xattr_bucket_new(inode); + if (!first || !target) { + ret = -ENOMEM; + mlog_errno(ret); + goto leave; + } + + BUG_ON(prev_blkno != (*first_bh)->b_blocknr); + ret = ocfs2_read_xattr_bucket(first, prev_blkno); + if (ret) { + mlog_errno(ret); + goto leave; + } + + ret = ocfs2_read_xattr_bucket(target, (*header_bh)->b_blocknr); + if (ret) { + mlog_errno(ret); + goto leave; + } + ret = ocfs2_journal_access(handle, inode, root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { @@ -4250,10 +4236,9 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, } else { ret = ocfs2_adjust_xattr_cross_cluster(inode, handle, - first_bh, - header_bh, + first, + target, block, - prev_blkno, prev_clusters, &v_start, extend); @@ -4261,6 +4246,17 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, mlog_errno(ret); goto leave; } + + /* Did first+target get moved? */ + if (prev_blkno != bucket_blkno(first)) { + brelse(*first_bh); + *first_bh = first->bu_bhs[0]; + get_bh(*first_bh); + + brelse(*header_bh); + *header_bh = target->bu_bhs[0]; + get_bh(*header_bh); + } } mlog(0, "Insert %u clusters at block %llu for xattr at %u\n", @@ -4277,6 +4273,8 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, mlog_errno(ret); leave: + ocfs2_xattr_bucket_free(first); + ocfs2_xattr_bucket_free(target); return ret; } -- cgit From ed29c0ca14871021fc8aced74650648dcb2c6e81 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Wed, 26 Nov 2008 15:08:44 -0800 Subject: ocfs2: Move buckets up into ocfs2_add_new_xattr_bucket(). Lift the buckets from ocfs2_add_new_xattr_cluster() up into ocfs2_add_new_xattr_bucket(). Now ocfs2_add_new_xattr_cluster() doesn't deal with buffer_heads. In fact, we no longer have to play get_bh() tricks at all. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 105 +++++++++++++++++-------------------------------------- 1 file changed, 32 insertions(+), 73 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 4b247047b7aa..5a5a1bd7eede 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -4148,11 +4148,10 @@ static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode, */ static int ocfs2_add_new_xattr_cluster(struct inode *inode, struct buffer_head *root_bh, - struct buffer_head **first_bh, - struct buffer_head **header_bh, + struct ocfs2_xattr_bucket *first, + struct ocfs2_xattr_bucket *target, u32 *num_clusters, u32 prev_cpos, - u64 prev_blkno, int *extend, struct ocfs2_xattr_set_ctxt *ctxt) { @@ -4164,38 +4163,14 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, handle_t *handle = ctxt->handle; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_extent_tree et; - struct ocfs2_xattr_bucket *first, *target; mlog(0, "Add new xattr cluster for %llu, previous xattr hash = %u, " "previous xattr blkno = %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, - prev_cpos, (unsigned long long)prev_blkno); + prev_cpos, (unsigned long long)bucket_blkno(first)); ocfs2_init_xattr_tree_extent_tree(&et, inode, root_bh); - /* The first bucket of the original extent */ - first = ocfs2_xattr_bucket_new(inode); - /* The target bucket for insert */ - target = ocfs2_xattr_bucket_new(inode); - if (!first || !target) { - ret = -ENOMEM; - mlog_errno(ret); - goto leave; - } - - BUG_ON(prev_blkno != (*first_bh)->b_blocknr); - ret = ocfs2_read_xattr_bucket(first, prev_blkno); - if (ret) { - mlog_errno(ret); - goto leave; - } - - ret = ocfs2_read_xattr_bucket(target, (*header_bh)->b_blocknr); - if (ret) { - mlog_errno(ret); - goto leave; - } - ret = ocfs2_journal_access(handle, inode, root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { @@ -4217,7 +4192,7 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, mlog(0, "Allocating %u clusters at block %u for xattr in inode %llu\n", num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno); - if (prev_blkno + prev_clusters * bpc == block && + if (bucket_blkno(first) + (prev_clusters * bpc) == block && (prev_clusters + num_bits) << osb->s_clustersize_bits <= OCFS2_MAX_XATTR_TREE_LEAF_SIZE) { /* @@ -4246,17 +4221,6 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, mlog_errno(ret); goto leave; } - - /* Did first+target get moved? */ - if (prev_blkno != bucket_blkno(first)) { - brelse(*first_bh); - *first_bh = first->bu_bhs[0]; - get_bh(*first_bh); - - brelse(*header_bh); - *header_bh = target->bu_bhs[0]; - get_bh(*header_bh); - } } mlog(0, "Insert %u clusters at block %llu for xattr at %u\n", @@ -4273,8 +4237,6 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, mlog_errno(ret); leave: - ocfs2_xattr_bucket_free(first); - ocfs2_xattr_bucket_free(target); return ret; } @@ -4357,16 +4319,16 @@ out: * We will move all the buckets starting from header_bh to the next place. As * for this one, half num of its xattrs will be moved to the next one. * - * We will allocate a new cluster if current cluster is full and adjust - * header_bh and first_bh if the insert place is moved to the new cluster. + * We will allocate a new cluster if current cluster is full. The + * underlying calls will make sure that there is space at the target + * bucket, shifting buckets around if necessary. 'target' may be updated + * by those calls. */ static int ocfs2_add_new_xattr_bucket(struct inode *inode, struct buffer_head *xb_bh, struct buffer_head *header_bh, struct ocfs2_xattr_set_ctxt *ctxt) { - struct ocfs2_xattr_header *first_xh = NULL; - struct buffer_head *first_bh = NULL; struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)xb_bh->b_data; struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root; @@ -4374,31 +4336,26 @@ static int ocfs2_add_new_xattr_bucket(struct inode *inode, struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)header_bh->b_data; u32 name_hash = le32_to_cpu(xh->xh_entries[0].xe_name_hash); - struct super_block *sb = inode->i_sb; - struct ocfs2_super *osb = OCFS2_SB(sb); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int ret, num_buckets, extend = 1; u64 p_blkno; u32 e_cpos, num_clusters; /* The bucket at the front of the extent */ - struct ocfs2_xattr_bucket *first; + struct ocfs2_xattr_bucket *first, *target; mlog(0, "Add new xattr bucket starting form %llu\n", (unsigned long long)header_bh->b_blocknr); + /* The first bucket of the original extent */ first = ocfs2_xattr_bucket_new(inode); - if (!first) { + /* The target bucket for insert */ + target = ocfs2_xattr_bucket_new(inode); + if (!first || !target) { ret = -ENOMEM; mlog_errno(ret); goto out; } - /* - * Add refrence for header_bh here because it may be - * changed in ocfs2_add_new_xattr_cluster and we need - * to free it in the end. - */ - get_bh(header_bh); - ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &e_cpos, &num_clusters, el); if (ret) { @@ -4406,23 +4363,30 @@ static int ocfs2_add_new_xattr_bucket(struct inode *inode, goto out; } - ret = ocfs2_read_block(inode, p_blkno, &first_bh, NULL); + ret = ocfs2_read_xattr_bucket(first, p_blkno); if (ret) { mlog_errno(ret); goto out; } - num_buckets = ocfs2_xattr_buckets_per_cluster(osb) * num_clusters; - first_xh = (struct ocfs2_xattr_header *)first_bh->b_data; + ret = ocfs2_read_xattr_bucket(target, header_bh->b_blocknr); + if (ret) { + mlog_errno(ret); + goto out; + } - if (num_buckets == le16_to_cpu(first_xh->xh_num_buckets)) { + num_buckets = ocfs2_xattr_buckets_per_cluster(osb) * num_clusters; + if (num_buckets == le16_to_cpu(bucket_xh(first)->xh_num_buckets)) { + /* + * This can move first+target if the target bucket moves + * to the new extent. + */ ret = ocfs2_add_new_xattr_cluster(inode, xb_bh, - &first_bh, - &header_bh, + first, + target, &num_clusters, e_cpos, - p_blkno, &extend, ctxt); if (ret) { @@ -4432,24 +4396,19 @@ static int ocfs2_add_new_xattr_bucket(struct inode *inode, } if (extend) { - /* These bucket reads should be cached */ - ret = ocfs2_read_xattr_bucket(first, first_bh->b_blocknr); - if (ret) { - mlog_errno(ret); - goto out; - } ret = ocfs2_extend_xattr_bucket(inode, ctxt->handle, - first, header_bh->b_blocknr, + first, + bucket_blkno(target), num_clusters); if (ret) mlog_errno(ret); } out: - brelse(first_bh); - brelse(header_bh); ocfs2_xattr_bucket_free(first); + ocfs2_xattr_bucket_free(target); + return ret; } -- cgit From 91f2033fa997aa92607470ed1ef90685b9d77a8c Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Wed, 26 Nov 2008 15:25:41 -0800 Subject: ocfs2: Pass xs->bucket into ocfs2_add_new_xattr_bucket(). Pass the actual target bucket for insert through to ocfs2_add_new_xattr_bucket(). Now growing a bucket has no buffer_head knowledge. ocfs2_add_new_xattr_bucket() leavs xs->bucket in the proper state for insert. However, it doesn't update the rest of the search fields in xs, so we still have to relse() and re-find. That's OK, because everything is cached. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 52 +++++++++++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 5a5a1bd7eede..dfc51c305bb9 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -4314,43 +4314,42 @@ out: } /* - * Add new xattr bucket in an extent record and adjust the buckets accordingly. - * xb_bh is the ocfs2_xattr_block. - * We will move all the buckets starting from header_bh to the next place. As - * for this one, half num of its xattrs will be moved to the next one. + * Add new xattr bucket in an extent record and adjust the buckets + * accordingly. xb_bh is the ocfs2_xattr_block, and target is the + * bucket we want to insert into. * - * We will allocate a new cluster if current cluster is full. The - * underlying calls will make sure that there is space at the target - * bucket, shifting buckets around if necessary. 'target' may be updated - * by those calls. + * In the easy case, we will move all the buckets after target down by + * one. Half of target's xattrs will be moved to the next bucket. + * + * If current cluster is full, we'll allocate a new one. This may not + * be contiguous. The underlying calls will make sure that there is + * space for the insert, shifting buckets around if necessary. + * 'target' may be moved by those calls. */ static int ocfs2_add_new_xattr_bucket(struct inode *inode, struct buffer_head *xb_bh, - struct buffer_head *header_bh, + struct ocfs2_xattr_bucket *target, struct ocfs2_xattr_set_ctxt *ctxt) { struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)xb_bh->b_data; struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root; struct ocfs2_extent_list *el = &xb_root->xt_list; - struct ocfs2_xattr_header *xh = - (struct ocfs2_xattr_header *)header_bh->b_data; - u32 name_hash = le32_to_cpu(xh->xh_entries[0].xe_name_hash); + u32 name_hash = + le32_to_cpu(bucket_xh(target)->xh_entries[0].xe_name_hash); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int ret, num_buckets, extend = 1; u64 p_blkno; u32 e_cpos, num_clusters; /* The bucket at the front of the extent */ - struct ocfs2_xattr_bucket *first, *target; + struct ocfs2_xattr_bucket *first; - mlog(0, "Add new xattr bucket starting form %llu\n", - (unsigned long long)header_bh->b_blocknr); + mlog(0, "Add new xattr bucket starting from %llu\n", + (unsigned long long)bucket_blkno(target)); /* The first bucket of the original extent */ first = ocfs2_xattr_bucket_new(inode); - /* The target bucket for insert */ - target = ocfs2_xattr_bucket_new(inode); - if (!first || !target) { + if (!first) { ret = -ENOMEM; mlog_errno(ret); goto out; @@ -4369,12 +4368,6 @@ static int ocfs2_add_new_xattr_bucket(struct inode *inode, goto out; } - ret = ocfs2_read_xattr_bucket(target, header_bh->b_blocknr); - if (ret) { - mlog_errno(ret); - goto out; - } - num_buckets = ocfs2_xattr_buckets_per_cluster(osb) * num_clusters; if (num_buckets == le16_to_cpu(bucket_xh(first)->xh_num_buckets)) { /* @@ -4407,7 +4400,6 @@ static int ocfs2_add_new_xattr_bucket(struct inode *inode, out: ocfs2_xattr_bucket_free(first); - ocfs2_xattr_bucket_free(target); return ret; } @@ -5083,15 +5075,21 @@ try_again: ret = ocfs2_add_new_xattr_bucket(inode, xs->xattr_bh, - xs->bucket->bu_bhs[0], + xs->bucket, ctxt); if (ret) { mlog_errno(ret); goto out; } + /* + * ocfs2_add_new_xattr_bucket() will have updated + * xs->bucket if it moved, but it will not have updated + * any of the other search fields. Thus, we drop it and + * re-search. Everything should be cached, so it'll be + * quick. + */ ocfs2_xattr_bucket_relse(xs->bucket); - ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh, xi->name_index, xi->name, xs); -- cgit From 754938c142ae0c28360426c43f965ddc5164b21e Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Mon, 15 Dec 2008 06:03:41 +0800 Subject: ocfs2/quota: Add QUOTA in mlog_attribute. A new mlog mask has to be added into mlog_attribute before it can be really used in mlog. ML_QUOTA is only added in masklog.h, so add it to the array to enable it. Signed-off-by: Tao Ma Signed-off-by: Mark Fasheh --- fs/ocfs2/cluster/masklog.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c index d8a0cb92cef6..96df5416993e 100644 --- a/fs/ocfs2/cluster/masklog.c +++ b/fs/ocfs2/cluster/masklog.c @@ -110,6 +110,7 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = { define_mask(QUORUM), define_mask(EXPORT), define_mask(XATTR), + define_mask(QUOTA), define_mask(ERROR), define_mask(NOTICE), define_mask(KTHREAD), -- cgit From e06c8227fd94ec181849ba206bf032be31c4295c Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 11 Sep 2008 15:35:47 -0700 Subject: jbd2: Add buffer triggers Filesystems often to do compute intensive operation on some metadata. If this operation is repeated many times, it can be very expensive. It would be much nicer if the operation could be performed once before a buffer goes to disk. This adds triggers to jbd2 buffer heads. Just before writing a metadata buffer to the journal, jbd2 will optionally call a commit trigger associated with the buffer. If the journal is aborted, an abort trigger will be called on any dirty buffers as they are dropped from pending transactions. ocfs2 will use this feature. Initially I tried to come up with a more generic trigger that could be used for non-buffer-related events like transaction completion. It doesn't tie nicely, because the information a buffer trigger needs (specific to a journal_head) isn't the same as what a transaction trigger needs (specific to a tranaction_t or perhaps journal_t). So I implemented a buffer set, with the understanding that journal/transaction wide triggers should be implemented separately. There is only one trigger set allowed per buffer. I can't think of any reason to attach more than one set. Contrast this with a journal or transaction in which multiple places may want to watch the entire transaction separately. The trigger sets are considered static allocation from the jbd2 perspective. ocfs2 will just have one trigger set per block type, setting the same set on every bh of the same type. Signed-off-by: Joel Becker Cc: "Theodore Ts'o" Cc: Signed-off-by: Mark Fasheh --- fs/jbd2/commit.c | 9 +++++++++ fs/jbd2/journal.c | 19 +++++++++++++++++++ fs/jbd2/transaction.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+) (limited to 'fs') diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index ebc667bc54a8..c8a1bace685a 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -509,6 +509,10 @@ void jbd2_journal_commit_transaction(journal_t *journal) if (is_journal_aborted(journal)) { clear_buffer_jbddirty(jh2bh(jh)); JBUFFER_TRACE(jh, "journal is aborting: refile"); + jbd2_buffer_abort_trigger(jh, + jh->b_frozen_data ? + jh->b_frozen_triggers : + jh->b_triggers); jbd2_journal_refile_buffer(journal, jh); /* If that was the last one, we need to clean up * any descriptor buffers which may have been @@ -844,6 +848,9 @@ restart_loop: * data. * * Otherwise, we can just throw away the frozen data now. + * + * We also know that the frozen data has already fired + * its triggers if they exist, so we can clear that too. */ if (jh->b_committed_data) { jbd2_free(jh->b_committed_data, bh->b_size); @@ -851,10 +858,12 @@ restart_loop: if (jh->b_frozen_data) { jh->b_committed_data = jh->b_frozen_data; jh->b_frozen_data = NULL; + jh->b_frozen_triggers = NULL; } } else if (jh->b_frozen_data) { jbd2_free(jh->b_frozen_data, bh->b_size); jh->b_frozen_data = NULL; + jh->b_frozen_triggers = NULL; } spin_lock(&journal->j_list_lock); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index e70d657a19f8..f6bff9d6f8df 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -50,6 +50,7 @@ EXPORT_SYMBOL(jbd2_journal_unlock_updates); EXPORT_SYMBOL(jbd2_journal_get_write_access); EXPORT_SYMBOL(jbd2_journal_get_create_access); EXPORT_SYMBOL(jbd2_journal_get_undo_access); +EXPORT_SYMBOL(jbd2_journal_set_triggers); EXPORT_SYMBOL(jbd2_journal_dirty_metadata); EXPORT_SYMBOL(jbd2_journal_release_buffer); EXPORT_SYMBOL(jbd2_journal_forget); @@ -290,6 +291,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, struct page *new_page; unsigned int new_offset; struct buffer_head *bh_in = jh2bh(jh_in); + struct jbd2_buffer_trigger_type *triggers; /* * The buffer really shouldn't be locked: only the current committing @@ -314,12 +316,22 @@ repeat: done_copy_out = 1; new_page = virt_to_page(jh_in->b_frozen_data); new_offset = offset_in_page(jh_in->b_frozen_data); + triggers = jh_in->b_frozen_triggers; } else { new_page = jh2bh(jh_in)->b_page; new_offset = offset_in_page(jh2bh(jh_in)->b_data); + triggers = jh_in->b_triggers; } mapped_data = kmap_atomic(new_page, KM_USER0); + /* + * Fire any commit trigger. Do this before checking for escaping, + * as the trigger may modify the magic offset. If a copy-out + * happens afterwards, it will have the correct data in the buffer. + */ + jbd2_buffer_commit_trigger(jh_in, mapped_data + new_offset, + triggers); + /* * Check for escaping */ @@ -352,6 +364,13 @@ repeat: new_page = virt_to_page(tmp); new_offset = offset_in_page(tmp); done_copy_out = 1; + + /* + * This isn't strictly necessary, as we're using frozen + * data for the escaping, but it keeps consistency with + * b_frozen_data usage. + */ + jh_in->b_frozen_triggers = jh_in->b_triggers; } /* diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 39b7805a599a..4f925a4f3d05 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -741,6 +741,12 @@ done: source = kmap_atomic(page, KM_USER0); memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); kunmap_atomic(source, KM_USER0); + + /* + * Now that the frozen data is saved off, we need to store + * any matching triggers. + */ + jh->b_frozen_triggers = jh->b_triggers; } jbd_unlock_bh_state(bh); @@ -943,6 +949,47 @@ out: return err; } +/** + * void jbd2_journal_set_triggers() - Add triggers for commit writeout + * @bh: buffer to trigger on + * @type: struct jbd2_buffer_trigger_type containing the trigger(s). + * + * Set any triggers on this journal_head. This is always safe, because + * triggers for a committing buffer will be saved off, and triggers for + * a running transaction will match the buffer in that transaction. + * + * Call with NULL to clear the triggers. + */ +void jbd2_journal_set_triggers(struct buffer_head *bh, + struct jbd2_buffer_trigger_type *type) +{ + struct journal_head *jh = bh2jh(bh); + + jh->b_triggers = type; +} + +void jbd2_buffer_commit_trigger(struct journal_head *jh, void *mapped_data, + struct jbd2_buffer_trigger_type *triggers) +{ + struct buffer_head *bh = jh2bh(jh); + + if (!triggers || !triggers->t_commit) + return; + + triggers->t_commit(triggers, bh, mapped_data, bh->b_size); +} + +void jbd2_buffer_abort_trigger(struct journal_head *jh, + struct jbd2_buffer_trigger_type *triggers) +{ + if (!triggers || !triggers->t_abort) + return; + + triggers->t_abort(triggers, jh2bh(jh)); +} + + + /** * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata * @handle: transaction to add buffer to. -- cgit From ab552d54673f262d7f70014003d3928d29270f22 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 16 Oct 2008 17:50:30 -0700 Subject: ocfs2: Add the on-disk structures for metadata checksums. Define struct ocfs2_block_check, an 8-byte structure containing a 32bit crc32_le and a 16bit hamming code ecc. This will be used for metadata checksums. Add the structure to free spaces in the various metadata structures. Add the OCFS2_FEATURE_INCOMPAT_META_ECC bit. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/ocfs2_fs.h | 55 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 50 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 359732e18e82..290fa26fba6e 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -149,6 +149,9 @@ /* Support for extended attributes */ #define OCFS2_FEATURE_INCOMPAT_XATTR 0x0200 +/* Metadata checksum and error correction */ +#define OCFS2_FEATURE_INCOMPAT_META_ECC 0x0800 + /* * backup superblock flag is used to indicate that this volume * has backup superblocks. @@ -426,6 +429,22 @@ static unsigned char ocfs2_type_by_mode[S_IFMT >> S_SHIFT] = { */ #define OCFS2_RAW_SB(dinode) (&((dinode)->id2.i_super)) +/* + * Block checking structure. This is used in metadata to validate the + * contents. If OCFS2_FEATURE_INCOMPAT_META_ECC is not set, it is all + * zeros. + */ +struct ocfs2_block_check { +/*00*/ __le32 bc_crc32e; /* 802.3 Ethernet II CRC32 */ + __le16 bc_ecc; /* Single-error-correction parity vector. + This is a simple Hamming code dependant + on the blocksize. OCFS2's maximum + blocksize, 4K, requires 16 parity bits, + so we fit in __le16. */ + __le16 bc_reserved1; +/*08*/ +}; + /* * On disk extent record for OCFS2 * It describes a range of clusters on disk. @@ -513,7 +532,7 @@ struct ocfs2_truncate_log { struct ocfs2_extent_block { /*00*/ __u8 h_signature[8]; /* Signature for verification */ - __le64 h_reserved1; + struct ocfs2_block_check h_check; /* Error checking */ /*10*/ __le16 h_suballoc_slot; /* Slot suballocator this extent_header belongs to */ __le16 h_suballoc_bit; /* Bit offset in suballocator @@ -683,7 +702,8 @@ struct ocfs2_dinode { was set in i_flags */ __le16 i_dyn_features; __le64 i_xattr_loc; -/*80*/ __le64 i_reserved2[7]; +/*80*/ struct ocfs2_block_check i_check; /* Error checking */ +/*88*/ __le64 i_reserved2[6]; /*B8*/ union { __le64 i_pad1; /* Generic way to refer to this 64bit union */ @@ -750,7 +770,8 @@ struct ocfs2_group_desc /*20*/ __le64 bg_parent_dinode; /* dinode which owns me, in blocks */ __le64 bg_blkno; /* Offset on disk, in blocks */ -/*30*/ __le64 bg_reserved2[2]; +/*30*/ struct ocfs2_block_check bg_check; /* Error checking */ + __le64 bg_reserved2; /*40*/ __u8 bg_bitmap[0]; }; @@ -793,7 +814,12 @@ struct ocfs2_xattr_header { in this extent record, only valid in the first bucket. */ - __le64 xh_csum; + struct ocfs2_block_check xh_check; /* Error checking + (Note, this is only + used for xattr + buckets. A block uses + xb_check and sets + this field to zero.) */ struct ocfs2_xattr_entry xh_entries[0]; /* xattr entry list. */ }; @@ -844,7 +870,7 @@ struct ocfs2_xattr_block { block group */ __le32 xb_fs_generation; /* Must match super block */ /*10*/ __le64 xb_blkno; /* Offset on disk, in blocks */ - __le64 xb_csum; + struct ocfs2_block_check xb_check; /* Error checking */ /*20*/ __le16 xb_flags; /* Indicates whether this block contains real xattr or a xattr tree. */ __le16 xb_reserved0; @@ -988,6 +1014,25 @@ struct ocfs2_local_disk_dqblk { /*10*/ __le64 dqb_inodemod; /* Change in the amount of used inodes */ }; + +/* + * The quota trailer lives at the end of each quota block. + */ + +struct ocfs2_disk_dqtrailer { +/*00*/ struct ocfs2_block_check dq_check; /* Error checking */ +/*08*/ /* Cannot be larger than OCFS2_QBLK_RESERVED_SPACE */ +}; + +static inline struct ocfs2_disk_dqtrailer *ocfs2_block_dqtrailer(int blocksize, + void *buf) +{ + char *ptr = buf; + ptr += blocksize - OCFS2_QBLK_RESERVED_SPACE; + + return (struct ocfs2_disk_dqtrailer *)ptr; +} + #ifdef __KERNEL__ static inline int ocfs2_fast_symlink_chars(struct super_block *sb) { -- cgit From 70ad1ba7b48364d758a112df0823edc5ca6632aa Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 16 Oct 2008 17:54:25 -0700 Subject: ocfs2: Add the underlying blockcheck code. This is the code that computes crc32 and ecc for ocfs2 metadata blocks. There are high-level functions that check whether the filesystem has the ecc feature, mid-level functions that work on a single block or array of buffer_heads, and the low-level ecc hamming code that can handle multiple buffers like crc32_le(). It's not hooked up to the filesystem yet. Signed-off-by: Joel Becker Cc: Christoph Hellwig Signed-off-by: Mark Fasheh --- fs/ocfs2/Makefile | 1 + fs/ocfs2/blockcheck.c | 480 ++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ocfs2/blockcheck.h | 82 +++++++++ fs/ocfs2/ocfs2.h | 8 + 4 files changed, 571 insertions(+) create mode 100644 fs/ocfs2/blockcheck.c create mode 100644 fs/ocfs2/blockcheck.h (limited to 'fs') diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile index 7e4b361b755c..01596079dd63 100644 --- a/fs/ocfs2/Makefile +++ b/fs/ocfs2/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_OCFS2_FS_USERSPACE_CLUSTER) += ocfs2_stack_user.o ocfs2-objs := \ alloc.o \ aops.o \ + blockcheck.o \ buffer_head_io.o \ dcache.o \ dir.o \ diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c new file mode 100644 index 000000000000..2bf3d7f61aec --- /dev/null +++ b/fs/ocfs2/blockcheck.c @@ -0,0 +1,480 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * blockcheck.c + * + * Checksum and ECC codes for the OCFS2 userspace library. + * + * Copyright (C) 2006, 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include "ocfs2.h" + +#include "blockcheck.h" + + + +/* + * We use the following conventions: + * + * d = # data bits + * p = # parity bits + * c = # total code bits (d + p) + */ +static int calc_parity_bits(unsigned int d) +{ + unsigned int p; + + /* + * Bits required for Single Error Correction is as follows: + * + * d + p + 1 <= 2^p + * + * We're restricting ourselves to 31 bits of parity, that should be + * sufficient. + */ + for (p = 1; p < 32; p++) + { + if ((d + p + 1) <= (1 << p)) + return p; + } + + return 0; +} + +/* + * Calculate the bit offset in the hamming code buffer based on the bit's + * offset in the data buffer. Since the hamming code reserves all + * power-of-two bits for parity, the data bit number and the code bit + * number are offest by all the parity bits beforehand. + * + * Recall that bit numbers in hamming code are 1-based. This function + * takes the 0-based data bit from the caller. + * + * An example. Take bit 1 of the data buffer. 1 is a power of two (2^0), + * so it's a parity bit. 2 is a power of two (2^1), so it's a parity bit. + * 3 is not a power of two. So bit 1 of the data buffer ends up as bit 3 + * in the code buffer. + */ +static unsigned int calc_code_bit(unsigned int i) +{ + unsigned int b, p; + + /* + * Data bits are 0-based, but we're talking code bits, which + * are 1-based. + */ + b = i + 1; + + /* + * For every power of two below our bit number, bump our bit. + * + * We compare with (b + 1) becuase we have to compare with what b + * would be _if_ it were bumped up by the parity bit. Capice? + */ + for (p = 0; (1 << p) < (b + 1); p++) + b++; + + return b; +} + +/* + * This is the low level encoder function. It can be called across + * multiple hunks just like the crc32 code. 'd' is the number of bits + * _in_this_hunk_. nr is the bit offset of this hunk. So, if you had + * two 512B buffers, you would do it like so: + * + * parity = ocfs2_hamming_encode(0, buf1, 512 * 8, 0); + * parity = ocfs2_hamming_encode(parity, buf2, 512 * 8, 512 * 8); + * + * If you just have one buffer, use ocfs2_hamming_encode_block(). + */ +u32 ocfs2_hamming_encode(u32 parity, void *data, unsigned int d, unsigned int nr) +{ + unsigned int p = calc_parity_bits(nr + d); + unsigned int i, j, b; + + BUG_ON(!p); + + /* + * b is the hamming code bit number. Hamming code specifies a + * 1-based array, but C uses 0-based. So 'i' is for C, and 'b' is + * for the algorithm. + * + * The i++ in the for loop is so that the start offset passed + * to ocfs2_find_next_bit_set() is one greater than the previously + * found bit. + */ + for (i = 0; (i = ocfs2_find_next_bit(data, d, i)) < d; i++) + { + /* + * i is the offset in this hunk, nr + i is the total bit + * offset. + */ + b = calc_code_bit(nr + i); + + for (j = 0; j < p; j++) + { + /* + * Data bits in the resultant code are checked by + * parity bits that are part of the bit number + * representation. Huh? + * + * + * In other words, the parity bit at position 2^k + * checks bits in positions having bit k set in + * their binary representation. Conversely, for + * instance, bit 13, i.e. 1101(2), is checked by + * bits 1000(2) = 8, 0100(2)=4 and 0001(2) = 1. + * + * + * Note that 'k' is the _code_ bit number. 'b' in + * our loop. + */ + if (b & (1 << j)) + parity ^= (1 << j); + } + } + + /* While the data buffer was treated as little endian, the + * return value is in host endian. */ + return parity; +} + +u32 ocfs2_hamming_encode_block(void *data, unsigned int blocksize) +{ + return ocfs2_hamming_encode(0, data, blocksize * 8, 0); +} + +/* + * Like ocfs2_hamming_encode(), this can handle hunks. nr is the bit + * offset of the current hunk. If bit to be fixed is not part of the + * current hunk, this does nothing. + * + * If you only have one hunk, use ocfs2_hamming_fix_block(). + */ +void ocfs2_hamming_fix(void *data, unsigned int d, unsigned int nr, + unsigned int fix) +{ + unsigned int p = calc_parity_bits(nr + d); + unsigned int i, b; + + BUG_ON(!p); + + /* + * If the bit to fix has an hweight of 1, it's a parity bit. One + * busted parity bit is its own error. Nothing to do here. + */ + if (hweight32(fix) == 1) + return; + + /* + * nr + d is the bit right past the data hunk we're looking at. + * If fix after that, nothing to do + */ + if (fix >= calc_code_bit(nr + d)) + return; + + /* + * nr is the offset in the data hunk we're starting at. Let's + * start b at the offset in the code buffer. See hamming_encode() + * for a more detailed description of 'b'. + */ + b = calc_code_bit(nr); + /* If the fix is before this hunk, nothing to do */ + if (fix < b) + return; + + for (i = 0; i < d; i++, b++) + { + /* Skip past parity bits */ + while (hweight32(b) == 1) + b++; + + /* + * i is the offset in this data hunk. + * nr + i is the offset in the total data buffer. + * b is the offset in the total code buffer. + * + * Thus, when b == fix, bit i in the current hunk needs + * fixing. + */ + if (b == fix) + { + if (ocfs2_test_bit(i, data)) + ocfs2_clear_bit(i, data); + else + ocfs2_set_bit(i, data); + break; + } + } +} + +void ocfs2_hamming_fix_block(void *data, unsigned int blocksize, + unsigned int fix) +{ + ocfs2_hamming_fix(data, blocksize * 8, 0, fix); +} + +/* + * This function generates check information for a block. + * data is the block to be checked. bc is a pointer to the + * ocfs2_block_check structure describing the crc32 and the ecc. + * + * bc should be a pointer inside data, as the function will + * take care of zeroing it before calculating the check information. If + * bc does not point inside data, the caller must make sure any inline + * ocfs2_block_check structures are zeroed. + * + * The data buffer must be in on-disk endian (little endian for ocfs2). + * bc will be filled with little-endian values and will be ready to go to + * disk. + */ +void ocfs2_block_check_compute(void *data, size_t blocksize, + struct ocfs2_block_check *bc) +{ + u32 crc; + u32 ecc; + + memset(bc, 0, sizeof(struct ocfs2_block_check)); + + crc = crc32_le(~0, data, blocksize); + ecc = ocfs2_hamming_encode_block(data, blocksize); + + /* + * No ecc'd ocfs2 structure is larger than 4K, so ecc will be no + * larger than 16 bits. + */ + BUG_ON(ecc > USHORT_MAX); + + bc->bc_crc32e = cpu_to_le32(crc); + bc->bc_ecc = cpu_to_le16((u16)ecc); +} + +/* + * This function validates existing check information. Like _compute, + * the function will take care of zeroing bc before calculating check codes. + * If bc is not a pointer inside data, the caller must have zeroed any + * inline ocfs2_block_check structures. + * + * Again, the data passed in should be the on-disk endian. + */ +int ocfs2_block_check_validate(void *data, size_t blocksize, + struct ocfs2_block_check *bc) +{ + int rc = 0; + struct ocfs2_block_check check; + u32 crc, ecc; + + check.bc_crc32e = le32_to_cpu(bc->bc_crc32e); + check.bc_ecc = le16_to_cpu(bc->bc_ecc); + + memset(bc, 0, sizeof(struct ocfs2_block_check)); + + /* Fast path - if the crc32 validates, we're good to go */ + crc = crc32_le(~0, data, blocksize); + if (crc == check.bc_crc32e) + goto out; + + /* Ok, try ECC fixups */ + ecc = ocfs2_hamming_encode_block(data, blocksize); + ocfs2_hamming_fix_block(data, blocksize, ecc ^ check.bc_ecc); + + /* And check the crc32 again */ + crc = crc32_le(~0, data, blocksize); + if (crc == check.bc_crc32e) + goto out; + + rc = -EIO; + +out: + bc->bc_crc32e = cpu_to_le32(check.bc_crc32e); + bc->bc_ecc = cpu_to_le16(check.bc_ecc); + + return rc; +} + +/* + * This function generates check information for a list of buffer_heads. + * bhs is the blocks to be checked. bc is a pointer to the + * ocfs2_block_check structure describing the crc32 and the ecc. + * + * bc should be a pointer inside data, as the function will + * take care of zeroing it before calculating the check information. If + * bc does not point inside data, the caller must make sure any inline + * ocfs2_block_check structures are zeroed. + * + * The data buffer must be in on-disk endian (little endian for ocfs2). + * bc will be filled with little-endian values and will be ready to go to + * disk. + */ +void ocfs2_block_check_compute_bhs(struct buffer_head **bhs, int nr, + struct ocfs2_block_check *bc) +{ + int i; + u32 crc, ecc; + + BUG_ON(nr < 0); + + if (!nr) + return; + + memset(bc, 0, sizeof(struct ocfs2_block_check)); + + for (i = 0, crc = ~0, ecc = 0; i < nr; i++) { + crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); + /* + * The number of bits in a buffer is obviously b_size*8. + * The offset of this buffer is b_size*i, so the bit offset + * of this buffer is b_size*8*i. + */ + ecc = (u16)ocfs2_hamming_encode(ecc, bhs[i]->b_data, + bhs[i]->b_size * 8, + bhs[i]->b_size * 8 * i); + } + + /* + * No ecc'd ocfs2 structure is larger than 4K, so ecc will be no + * larger than 16 bits. + */ + BUG_ON(ecc > USHORT_MAX); + + bc->bc_crc32e = cpu_to_le32(crc); + bc->bc_ecc = cpu_to_le16((u16)ecc); +} + +/* + * This function validates existing check information on a list of + * buffer_heads. Like _compute_bhs, the function will take care of + * zeroing bc before calculating check codes. If bc is not a pointer + * inside data, the caller must have zeroed any inline + * ocfs2_block_check structures. + * + * Again, the data passed in should be the on-disk endian. + */ +int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr, + struct ocfs2_block_check *bc) +{ + int i, rc = 0; + struct ocfs2_block_check check; + u32 crc, ecc, fix; + + BUG_ON(nr < 0); + + if (!nr) + return 0; + + check.bc_crc32e = le32_to_cpu(bc->bc_crc32e); + check.bc_ecc = le16_to_cpu(bc->bc_ecc); + + memset(bc, 0, sizeof(struct ocfs2_block_check)); + + /* Fast path - if the crc32 validates, we're good to go */ + for (i = 0, crc = ~0; i < nr; i++) + crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); + if (crc == check.bc_crc32e) + goto out; + + mlog(ML_ERROR, + "CRC32 failed: stored: %u, computed %u. Applying ECC.\n", + (unsigned int)check.bc_crc32e, (unsigned int)crc); + + /* Ok, try ECC fixups */ + for (i = 0, ecc = 0; i < nr; i++) { + /* + * The number of bits in a buffer is obviously b_size*8. + * The offset of this buffer is b_size*i, so the bit offset + * of this buffer is b_size*8*i. + */ + ecc = (u16)ocfs2_hamming_encode(ecc, bhs[i]->b_data, + bhs[i]->b_size * 8, + bhs[i]->b_size * 8 * i); + } + fix = ecc ^ check.bc_ecc; + for (i = 0; i < nr; i++) { + /* + * Try the fix against each buffer. It will only affect + * one of them. + */ + ocfs2_hamming_fix(bhs[i]->b_data, bhs[i]->b_size * 8, + bhs[i]->b_size * 8 * i, fix); + } + + /* And check the crc32 again */ + for (i = 0, crc = ~0; i < nr; i++) + crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); + if (crc == check.bc_crc32e) + goto out; + + mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n", + (unsigned int)check.bc_crc32e, (unsigned int)crc); + + rc = -EIO; + +out: + bc->bc_crc32e = cpu_to_le32(check.bc_crc32e); + bc->bc_ecc = cpu_to_le16(check.bc_ecc); + + return rc; +} + +/* + * These are the main API. They check the superblock flag before + * calling the underlying operations. + * + * They expect the buffer(s) to be in disk format. + */ +void ocfs2_compute_meta_ecc(struct super_block *sb, void *data, + struct ocfs2_block_check *bc) +{ + if (ocfs2_meta_ecc(OCFS2_SB(sb))) + ocfs2_block_check_compute(data, sb->s_blocksize, bc); +} + +int ocfs2_validate_meta_ecc(struct super_block *sb, void *data, + struct ocfs2_block_check *bc) +{ + int rc = 0; + + if (ocfs2_meta_ecc(OCFS2_SB(sb))) + rc = ocfs2_block_check_validate(data, sb->s_blocksize, bc); + + return rc; +} + +void ocfs2_compute_meta_ecc_bhs(struct super_block *sb, + struct buffer_head **bhs, int nr, + struct ocfs2_block_check *bc) +{ + if (ocfs2_meta_ecc(OCFS2_SB(sb))) + ocfs2_block_check_compute_bhs(bhs, nr, bc); +} + +int ocfs2_validate_meta_ecc_bhs(struct super_block *sb, + struct buffer_head **bhs, int nr, + struct ocfs2_block_check *bc) +{ + int rc = 0; + + if (ocfs2_meta_ecc(OCFS2_SB(sb))) + rc = ocfs2_block_check_validate_bhs(bhs, nr, bc); + + return rc; +} + diff --git a/fs/ocfs2/blockcheck.h b/fs/ocfs2/blockcheck.h new file mode 100644 index 000000000000..70ec3feda32f --- /dev/null +++ b/fs/ocfs2/blockcheck.h @@ -0,0 +1,82 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * blockcheck.h + * + * Checksum and ECC codes for the OCFS2 userspace library. + * + * Copyright (C) 2004, 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef OCFS2_BLOCKCHECK_H +#define OCFS2_BLOCKCHECK_H + + +/* High level block API */ +void ocfs2_compute_meta_ecc(struct super_block *sb, void *data, + struct ocfs2_block_check *bc); +int ocfs2_validate_meta_ecc(struct super_block *sb, void *data, + struct ocfs2_block_check *bc); +void ocfs2_compute_meta_ecc_bhs(struct super_block *sb, + struct buffer_head **bhs, int nr, + struct ocfs2_block_check *bc); +int ocfs2_validate_meta_ecc_bhs(struct super_block *sb, + struct buffer_head **bhs, int nr, + struct ocfs2_block_check *bc); + +/* Lower level API */ +void ocfs2_block_check_compute(void *data, size_t blocksize, + struct ocfs2_block_check *bc); +int ocfs2_block_check_validate(void *data, size_t blocksize, + struct ocfs2_block_check *bc); +void ocfs2_block_check_compute_bhs(struct buffer_head **bhs, int nr, + struct ocfs2_block_check *bc); +int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr, + struct ocfs2_block_check *bc); + +/* + * Hamming code functions + */ + +/* + * Encoding hamming code parity bits for a buffer. + * + * This is the low level encoder function. It can be called across + * multiple hunks just like the crc32 code. 'd' is the number of bits + * _in_this_hunk_. nr is the bit offset of this hunk. So, if you had + * two 512B buffers, you would do it like so: + * + * parity = ocfs2_hamming_encode(0, buf1, 512 * 8, 0); + * parity = ocfs2_hamming_encode(parity, buf2, 512 * 8, 512 * 8); + * + * If you just have one buffer, use ocfs2_hamming_encode_block(). + */ +u32 ocfs2_hamming_encode(u32 parity, void *data, unsigned int d, + unsigned int nr); +/* + * Fix a buffer with a bit error. The 'fix' is the original parity + * xor'd with the parity calculated now. + * + * Like ocfs2_hamming_encode(), this can handle hunks. nr is the bit + * offset of the current hunk. If bit to be fixed is not part of the + * current hunk, this does nothing. + * + * If you only have one buffer, use ocfs2_hamming_fix_block(). + */ +void ocfs2_hamming_fix(void *data, unsigned int d, unsigned int nr, + unsigned int fix); + +/* Convenience wrappers for a single buffer of data */ +extern u32 ocfs2_hamming_encode_block(void *data, unsigned int blocksize); +extern void ocfs2_hamming_fix_block(void *data, unsigned int blocksize, + unsigned int fix); +#endif diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 5c777988042f..2bb389fe7397 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -382,6 +382,13 @@ static inline int ocfs2_supports_xattr(struct ocfs2_super *osb) return 0; } +static inline int ocfs2_meta_ecc(struct ocfs2_super *osb) +{ + if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_META_ECC) + return 1; + return 0; +} + /* set / clear functions because cluster events can make these happen * in parallel so we want the transitions to be atomic. this also * means that any future flags osb_flags must be protected by spinlock @@ -615,5 +622,6 @@ static inline s16 ocfs2_get_inode_steal_slot(struct ocfs2_super *osb) #define ocfs2_clear_bit ext2_clear_bit #define ocfs2_test_bit ext2_test_bit #define ocfs2_find_next_zero_bit ext2_find_next_zero_bit +#define ocfs2_find_next_bit ext2_find_next_bit #endif /* OCFS2_H */ -- cgit From 684ef278377725d505aa23259ee673dab9b11851 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 2 Dec 2008 17:44:05 -0800 Subject: ocfs2: Add a validation hook for quota block reads. Add a currently-returns-success hook for quota block reads. We'll be adding checks to this. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/quota_global.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index a10faebe88a1..7dbcfd7f65e6 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -87,13 +87,25 @@ struct qtree_fmt_operations ocfs2_global_ops = { .is_id = ocfs2_global_is_id, }; +static int ocfs2_validate_quota_block(struct super_block *sb, + struct buffer_head *bh) +{ + struct ocfs2_disk_dqtrailer *dqt = ocfs2_dq_trailer(sb, bh->b_data); + + mlog(0, "Validating quota block %llu\n", + (unsigned long long)bh->b_blocknr); + + return 0; +} + int ocfs2_read_quota_block(struct inode *inode, u64 v_block, struct buffer_head **bh) { int rc = 0; struct buffer_head *tmp = *bh; - rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0, NULL); + rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0, + ocfs2_validate_quota_block); if (rc) mlog_errno(rc); -- cgit From d6b32bbb3eae3fb787f1c33bf9f767ca1ddeb208 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Fri, 17 Oct 2008 14:55:01 -0700 Subject: ocfs2: block read meta ecc. Add block check calls to the read_block validate functions. This is the almost all of the read-side checking of metaecc. xattr buckets are not checked yet. Writes are also unchecked, and so a read-write mount will quickly fail. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/alloc.c | 17 +++++++++++++++++ fs/ocfs2/blockcheck.c | 9 +++++++++ fs/ocfs2/inode.c | 18 +++++++++++++++++- fs/ocfs2/quota_global.c | 13 +++++++++++-- fs/ocfs2/suballoc.c | 31 ++++++++++++++++++++++++++++++- fs/ocfs2/xattr.c | 17 +++++++++++++++++ 6 files changed, 101 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 84a7bd4db5da..6b27f74bb346 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -37,6 +37,7 @@ #include "alloc.h" #include "aops.h" +#include "blockcheck.h" #include "dlmglue.h" #include "extent_map.h" #include "inode.h" @@ -682,12 +683,28 @@ struct ocfs2_merge_ctxt { static int ocfs2_validate_extent_block(struct super_block *sb, struct buffer_head *bh) { + int rc; struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)bh->b_data; mlog(0, "Validating extent block %llu\n", (unsigned long long)bh->b_blocknr); + BUG_ON(!buffer_uptodate(bh)); + + /* + * If the ecc fails, we return the error but otherwise + * leave the filesystem running. We know any error is + * local to this block. + */ + rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &eb->h_check); + if (rc) + return rc; + + /* + * Errors after here are fatal. + */ + if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { ocfs2_error(sb, "Extent block #%llu has bad signature %.*s", diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c index 2bf3d7f61aec..2ce6ae5e4b8c 100644 --- a/fs/ocfs2/blockcheck.c +++ b/fs/ocfs2/blockcheck.c @@ -24,6 +24,8 @@ #include #include +#include + #include "ocfs2.h" #include "blockcheck.h" @@ -292,6 +294,10 @@ int ocfs2_block_check_validate(void *data, size_t blocksize, if (crc == check.bc_crc32e) goto out; + mlog(ML_ERROR, + "CRC32 failed: stored: %u, computed %u. Applying ECC.\n", + (unsigned int)check.bc_crc32e, (unsigned int)crc); + /* Ok, try ECC fixups */ ecc = ocfs2_hamming_encode_block(data, blocksize); ocfs2_hamming_fix_block(data, blocksize, ecc ^ check.bc_ecc); @@ -301,6 +307,9 @@ int ocfs2_block_check_validate(void *data, size_t blocksize, if (crc == check.bc_crc32e) goto out; + mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n", + (unsigned int)check.bc_crc32e, (unsigned int)crc); + rc = -EIO; out: diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 288512c9dbc2..9370b652ab94 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -38,6 +38,7 @@ #include "ocfs2.h" #include "alloc.h" +#include "blockcheck.h" #include "dlmglue.h" #include "extent_map.h" #include "file.h" @@ -1262,7 +1263,7 @@ void ocfs2_refresh_inode(struct inode *inode, int ocfs2_validate_inode_block(struct super_block *sb, struct buffer_head *bh) { - int rc = -EINVAL; + int rc; struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; mlog(0, "Validating dinode %llu\n", @@ -1270,6 +1271,21 @@ int ocfs2_validate_inode_block(struct super_block *sb, BUG_ON(!buffer_uptodate(bh)); + /* + * If the ecc fails, we return the error but otherwise + * leave the filesystem running. We know any error is + * local to this block. + */ + rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check); + if (rc) + goto bail; + + /* + * Errors after here are fatal. + */ + + rc = -EINVAL; + if (!OCFS2_IS_VALID_DINODE(di)) { ocfs2_error(sb, "Invalid dinode #%llu: signature = %.*s\n", (unsigned long long)bh->b_blocknr, 7, diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 7dbcfd7f65e6..a0b8b14cca8f 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -16,6 +16,7 @@ #include "ocfs2_fs.h" #include "ocfs2.h" #include "alloc.h" +#include "blockcheck.h" #include "inode.h" #include "journal.h" #include "file.h" @@ -90,12 +91,20 @@ struct qtree_fmt_operations ocfs2_global_ops = { static int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh) { - struct ocfs2_disk_dqtrailer *dqt = ocfs2_dq_trailer(sb, bh->b_data); + struct ocfs2_disk_dqtrailer *dqt = + ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data); mlog(0, "Validating quota block %llu\n", (unsigned long long)bh->b_blocknr); - return 0; + BUG_ON(!buffer_uptodate(bh)); + + /* + * If the ecc fails, we return the error but otherwise + * leave the filesystem running. We know any error is + * local to this block. + */ + return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check); } int ocfs2_read_quota_block(struct inode *inode, u64 v_block, diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 226fe21f2608..78755766c329 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -35,6 +35,7 @@ #include "ocfs2.h" #include "alloc.h" +#include "blockcheck.h" #include "dlmglue.h" #include "inode.h" #include "journal.h" @@ -250,8 +251,18 @@ int ocfs2_check_group_descriptor(struct super_block *sb, struct buffer_head *bh) { int rc; + struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; + + BUG_ON(!buffer_uptodate(bh)); - rc = ocfs2_validate_gd_self(sb, bh, 1); + /* + * If the ecc fails, we return the error but otherwise + * leave the filesystem running. We know any error is + * local to this block. + */ + rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &gd->bg_check); + if (!rc) + rc = ocfs2_validate_gd_self(sb, bh, 1); if (!rc) rc = ocfs2_validate_gd_parent(sb, di, bh, 1); @@ -261,9 +272,27 @@ int ocfs2_check_group_descriptor(struct super_block *sb, static int ocfs2_validate_group_descriptor(struct super_block *sb, struct buffer_head *bh) { + int rc; + struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; + mlog(0, "Validating group descriptor %llu\n", (unsigned long long)bh->b_blocknr); + BUG_ON(!buffer_uptodate(bh)); + + /* + * If the ecc fails, we return the error but otherwise + * leave the filesystem running. We know any error is + * local to this block. + */ + rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &gd->bg_check); + if (rc) + return rc; + + /* + * Errors after here are fatal. + */ + return ocfs2_validate_gd_self(sb, bh, 0); } diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index dfc51c305bb9..bc822d6ba542 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -42,6 +42,7 @@ #include "ocfs2.h" #include "alloc.h" +#include "blockcheck.h" #include "dlmglue.h" #include "file.h" #include "symlink.h" @@ -322,12 +323,28 @@ static void ocfs2_xattr_bucket_copy_data(struct ocfs2_xattr_bucket *dest, static int ocfs2_validate_xattr_block(struct super_block *sb, struct buffer_head *bh) { + int rc; struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)bh->b_data; mlog(0, "Validating xattr block %llu\n", (unsigned long long)bh->b_blocknr); + BUG_ON(!buffer_uptodate(bh)); + + /* + * If the ecc fails, we return the error but otherwise + * leave the filesystem running. We know any error is + * local to this block. + */ + rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &xb->xb_check); + if (rc) + return rc; + + /* + * Errors after here are fatal + */ + if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) { ocfs2_error(sb, "Extended attribute block #%llu has bad " -- cgit From 50655ae9e91d272d48997bada59efe166aa5e343 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 11 Sep 2008 15:53:07 -0700 Subject: ocfs2: Add journal_access functions with jbd2 triggers. We create wrappers for ocfs2_journal_access() that are specific to the type of metadata block. This allows us to associate jbd2 commit triggers with the block. The triggers will compute metadata ecc in a future commit. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/journal.c | 159 +++++++++++++++++++++++++++++++++++++++++++++++++++-- fs/ocfs2/journal.h | 31 +++++++++-- 2 files changed, 181 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 302f1144a708..2daa5848faf2 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -35,6 +35,7 @@ #include "ocfs2.h" #include "alloc.h" +#include "blockcheck.h" #include "dir.h" #include "dlmglue.h" #include "extent_map.h" @@ -369,10 +370,110 @@ bail: return status; } -int ocfs2_journal_access(handle_t *handle, - struct inode *inode, - struct buffer_head *bh, - int type) +struct ocfs2_triggers { + struct jbd2_buffer_trigger_type ot_triggers; + int ot_offset; +}; + +static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers) +{ + return container_of(triggers, struct ocfs2_triggers, ot_triggers); +} + +static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers, + struct buffer_head *bh, + void *data, size_t size) +{ + struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers); + + /* + * We aren't guaranteed to have the superblock here, so we + * must unconditionally compute the ecc data. + * __ocfs2_journal_access() will only set the triggers if + * metaecc is enabled. + */ + ocfs2_block_check_compute(data, size, data + ot->ot_offset); +} + +/* + * Quota blocks have their own trigger because the struct ocfs2_block_check + * offset depends on the blocksize. + */ +static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers, + struct buffer_head *bh, + void *data, size_t size) +{ + struct ocfs2_disk_dqtrailer *dqt = + ocfs2_block_dqtrailer(size, data); + + /* + * We aren't guaranteed to have the superblock here, so we + * must unconditionally compute the ecc data. + * __ocfs2_journal_access() will only set the triggers if + * metaecc is enabled. + */ + ocfs2_block_check_compute(data, size, &dqt->dq_check); +} + +static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers, + struct buffer_head *bh) +{ + mlog(ML_ERROR, + "ocfs2_abort_trigger called by JBD2. bh = 0x%lx, " + "bh->b_blocknr = %llu\n", + (unsigned long)bh, + (unsigned long long)bh->b_blocknr); + + /* We aren't guaranteed to have the superblock here - but if we + * don't, it'll just crash. */ + ocfs2_error(bh->b_assoc_map->host->i_sb, + "JBD2 has aborted our journal, ocfs2 cannot continue\n"); +} + +static struct ocfs2_triggers di_triggers = { + .ot_triggers = { + .t_commit = ocfs2_commit_trigger, + .t_abort = ocfs2_abort_trigger, + }, + .ot_offset = offsetof(struct ocfs2_dinode, i_check), +}; + +static struct ocfs2_triggers eb_triggers = { + .ot_triggers = { + .t_commit = ocfs2_commit_trigger, + .t_abort = ocfs2_abort_trigger, + }, + .ot_offset = offsetof(struct ocfs2_extent_block, h_check), +}; + +static struct ocfs2_triggers gd_triggers = { + .ot_triggers = { + .t_commit = ocfs2_commit_trigger, + .t_abort = ocfs2_abort_trigger, + }, + .ot_offset = offsetof(struct ocfs2_group_desc, bg_check), +}; + +static struct ocfs2_triggers xb_triggers = { + .ot_triggers = { + .t_commit = ocfs2_commit_trigger, + .t_abort = ocfs2_abort_trigger, + }, + .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check), +}; + +static struct ocfs2_triggers dq_triggers = { + .ot_triggers = { + .t_commit = ocfs2_dq_commit_trigger, + .t_abort = ocfs2_abort_trigger, + }, +}; + +static int __ocfs2_journal_access(handle_t *handle, + struct inode *inode, + struct buffer_head *bh, + struct ocfs2_triggers *triggers, + int type) { int status; @@ -418,6 +519,8 @@ int ocfs2_journal_access(handle_t *handle, status = -EINVAL; mlog(ML_ERROR, "Uknown access type!\n"); } + if (!status && ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)) && triggers) + jbd2_journal_set_triggers(bh, &triggers->ot_triggers); mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); if (status < 0) @@ -428,6 +531,54 @@ int ocfs2_journal_access(handle_t *handle, return status; } +int ocfs2_journal_access_di(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type) +{ + return __ocfs2_journal_access(handle, inode, bh, &di_triggers, + type); +} + +int ocfs2_journal_access_eb(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type) +{ + return __ocfs2_journal_access(handle, inode, bh, &eb_triggers, + type); +} + +int ocfs2_journal_access_gd(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type) +{ + return __ocfs2_journal_access(handle, inode, bh, &gd_triggers, + type); +} + +int ocfs2_journal_access_db(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type) +{ + /* Right now, nothing for dirblocks */ + return __ocfs2_journal_access(handle, inode, bh, NULL, type); +} + +int ocfs2_journal_access_xb(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type) +{ + return __ocfs2_journal_access(handle, inode, bh, &xb_triggers, + type); +} + +int ocfs2_journal_access_dq(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type) +{ + return __ocfs2_journal_access(handle, inode, bh, &dq_triggers, + type); +} + +int ocfs2_journal_access(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type) +{ + return __ocfs2_journal_access(handle, inode, bh, NULL, type); +} + int ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh) { diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 37013bf9ce28..bca370dab021 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -212,9 +212,12 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode) * ocfs2_extend_trans - Extend a handle by nblocks credits. This may * commit the handle to disk in the process, but will * not release any locks taken during the transaction. - * ocfs2_journal_access - Notify the handle that we want to journal this + * ocfs2_journal_access* - Notify the handle that we want to journal this * buffer. Will have to call ocfs2_journal_dirty once * we've actually dirtied it. Type is one of . or . + * Always call the specific flavor of + * ocfs2_journal_access_*() unless you intend to + * manage the checksum by hand. * ocfs2_journal_dirty - Mark a journalled buffer as having dirty data. * ocfs2_jbd2_file_inode - Mark an inode so that its data goes out before * the current handle commits. @@ -244,10 +247,28 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks); #define OCFS2_JOURNAL_ACCESS_WRITE 1 #define OCFS2_JOURNAL_ACCESS_UNDO 2 -int ocfs2_journal_access(handle_t *handle, - struct inode *inode, - struct buffer_head *bh, - int type); +/* ocfs2_inode */ +int ocfs2_journal_access_di(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type); +/* ocfs2_extent_block */ +int ocfs2_journal_access_eb(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type); +/* ocfs2_group_desc */ +int ocfs2_journal_access_gd(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type); +/* ocfs2_xattr_block */ +int ocfs2_journal_access_xb(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type); +/* quota blocks */ +int ocfs2_journal_access_dq(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type); +/* dirblock */ +int ocfs2_journal_access_db(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type); +/* Anything that has no ecc */ +int ocfs2_journal_access(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type); + /* * A word about the journal_access/journal_dirty "dance". It is * entirely legal to journal_access a buffer more than once (as long -- cgit From ffdd7a54631f07918b75e324d86713a08c11ec06 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Fri, 17 Oct 2008 22:32:01 -0700 Subject: ocfs2: Wrap up the common use cases of ocfs2_new_path(). The majority of ocfs2_new_path() calls are: ocfs2_new_path(path_root_bh(otherpath), path_root_el(otherpath)); Let's call that ocfs2_new_path_from_path(). The rest do similar things from struct ocfs2_extent_tree. Let's call those ocfs2_new_path_from_et(). This will make the next change easier. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/alloc.c | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 6b27f74bb346..c22ff49b5e33 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -532,6 +532,16 @@ static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh, return path; } +static struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path) +{ + return ocfs2_new_path(path_root_bh(path), path_root_el(path)); +} + +static struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et) +{ + return ocfs2_new_path(et->et_root_bh, et->et_root_el); +} + /* * Convenience function to journal all components in a path. */ @@ -2150,8 +2160,7 @@ static int ocfs2_rotate_tree_right(struct inode *inode, *ret_left_path = NULL; - left_path = ocfs2_new_path(path_root_bh(right_path), - path_root_el(right_path)); + left_path = ocfs2_new_path_from_path(right_path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); @@ -2692,8 +2701,7 @@ static int __ocfs2_rotate_tree_left(struct inode *inode, goto out; } - left_path = ocfs2_new_path(path_root_bh(path), - path_root_el(path)); + left_path = ocfs2_new_path_from_path(path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); @@ -2702,8 +2710,7 @@ static int __ocfs2_rotate_tree_left(struct inode *inode, ocfs2_cp_path(left_path, path); - right_path = ocfs2_new_path(path_root_bh(path), - path_root_el(path)); + right_path = ocfs2_new_path_from_path(path); if (!right_path) { ret = -ENOMEM; mlog_errno(ret); @@ -2833,8 +2840,7 @@ static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle, * We have a path to the left of this one - it needs * an update too. */ - left_path = ocfs2_new_path(path_root_bh(path), - path_root_el(path)); + left_path = ocfs2_new_path_from_path(path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); @@ -3075,8 +3081,7 @@ static int ocfs2_get_right_path(struct inode *inode, /* This function shouldn't be called for the rightmost leaf. */ BUG_ON(right_cpos == 0); - right_path = ocfs2_new_path(path_root_bh(left_path), - path_root_el(left_path)); + right_path = ocfs2_new_path_from_path(left_path); if (!right_path) { ret = -ENOMEM; mlog_errno(ret); @@ -3247,8 +3252,7 @@ static int ocfs2_get_left_path(struct inode *inode, /* This function shouldn't be called for the leftmost leaf. */ BUG_ON(left_cpos == 0); - left_path = ocfs2_new_path(path_root_bh(right_path), - path_root_el(right_path)); + left_path = ocfs2_new_path_from_path(right_path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); @@ -3780,8 +3784,7 @@ static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle, * leftmost leaf. */ if (left_cpos) { - left_path = ocfs2_new_path(path_root_bh(right_path), - path_root_el(right_path)); + left_path = ocfs2_new_path_from_path(right_path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); @@ -4018,7 +4021,7 @@ static int ocfs2_do_insert_extent(struct inode *inode, goto out_update_clusters; } - right_path = ocfs2_new_path(et->et_root_bh, et->et_root_el); + right_path = ocfs2_new_path_from_et(et); if (!right_path) { ret = -ENOMEM; mlog_errno(ret); @@ -4130,8 +4133,7 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, goto out; if (left_cpos != 0) { - left_path = ocfs2_new_path(path_root_bh(path), - path_root_el(path)); + left_path = ocfs2_new_path_from_path(path); if (!left_path) goto out; @@ -4187,8 +4189,7 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, if (right_cpos == 0) goto out; - right_path = ocfs2_new_path(path_root_bh(path), - path_root_el(path)); + right_path = ocfs2_new_path_from_path(path); if (!right_path) goto out; @@ -4381,7 +4382,7 @@ static int ocfs2_figure_insert_type(struct inode *inode, return 0; } - path = ocfs2_new_path(et->et_root_bh, et->et_root_el); + path = ocfs2_new_path_from_et(et); if (!path) { ret = -ENOMEM; mlog_errno(ret); @@ -4910,7 +4911,7 @@ int ocfs2_mark_extent_written(struct inode *inode, if (et->et_ops == &ocfs2_dinode_et_ops) ocfs2_extent_map_trunc(inode, 0); - left_path = ocfs2_new_path(et->et_root_bh, et->et_root_el); + left_path = ocfs2_new_path_from_et(et); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); @@ -5082,8 +5083,7 @@ static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle, } if (left_cpos && le16_to_cpu(el->l_next_free_rec) > 1) { - left_path = ocfs2_new_path(path_root_bh(path), - path_root_el(path)); + left_path = ocfs2_new_path_from_path(path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); @@ -5192,7 +5192,7 @@ int ocfs2_remove_extent(struct inode *inode, ocfs2_extent_map_trunc(inode, 0); - path = ocfs2_new_path(et->et_root_bh, et->et_root_el); + path = ocfs2_new_path_from_et(et); if (!path) { ret = -ENOMEM; mlog_errno(ret); -- cgit From 13723d00e374c2a6d6ccb5af6de965e89c3e1b01 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Fri, 17 Oct 2008 19:25:01 -0700 Subject: ocfs2: Use metadata-specific ocfs2_journal_access_*() functions. The per-metadata-type ocfs2_journal_access_*() functions hook up jbd2 commit triggers and allow us to compute metadata ecc right before the buffers are written out. This commit provides ecc for inodes, extent blocks, group descriptors, and quota blocks. It is not safe to use extened attributes and metaecc at the same time yet. The ocfs2_extent_tree and ocfs2_path abstractions in alloc.c both hide the type of block at their root. Before, it didn't matter, but now the root block must use the appropriate ocfs2_journal_access_*() function. To keep this abstract, the structures now have a pointer to the matching journal_access function and a wrapper call to call it. A few places use naked ocfs2_write_block() calls instead of adding the blocks to the journal. We make sure to calculate their checksum and ecc before the write. Since we pass around the journal_access functions. Let's typedef them in ocfs2.h. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/alloc.c | 233 ++++++++++++++++++++++++++++-------------------- fs/ocfs2/alloc.h | 5 +- fs/ocfs2/aops.c | 8 +- fs/ocfs2/dir.c | 48 ++++++---- fs/ocfs2/file.c | 16 ++-- fs/ocfs2/inode.c | 17 ++-- fs/ocfs2/journal.c | 2 + fs/ocfs2/journal.h | 3 +- fs/ocfs2/localalloc.c | 18 ++-- fs/ocfs2/namei.c | 38 ++++---- fs/ocfs2/ocfs2.h | 4 + fs/ocfs2/quota_global.c | 2 +- fs/ocfs2/quota_local.c | 18 ++-- fs/ocfs2/resize.c | 16 ++-- fs/ocfs2/suballoc.c | 58 ++++++------ 15 files changed, 280 insertions(+), 206 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index c22ff49b5e33..6e58fd557e5b 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -298,11 +298,13 @@ static struct ocfs2_extent_tree_operations ocfs2_xattr_tree_et_ops = { static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et, struct inode *inode, struct buffer_head *bh, + ocfs2_journal_access_func access, void *obj, struct ocfs2_extent_tree_operations *ops) { et->et_ops = ops; et->et_root_bh = bh; + et->et_root_journal_access = access; if (!obj) obj = (void *)bh->b_data; et->et_object = obj; @@ -318,15 +320,16 @@ void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et, struct inode *inode, struct buffer_head *bh) { - __ocfs2_init_extent_tree(et, inode, bh, NULL, &ocfs2_dinode_et_ops); + __ocfs2_init_extent_tree(et, inode, bh, ocfs2_journal_access_di, + NULL, &ocfs2_dinode_et_ops); } void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et, struct inode *inode, struct buffer_head *bh) { - __ocfs2_init_extent_tree(et, inode, bh, NULL, - &ocfs2_xattr_tree_et_ops); + __ocfs2_init_extent_tree(et, inode, bh, ocfs2_journal_access_xb, + NULL, &ocfs2_xattr_tree_et_ops); } void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et, @@ -334,7 +337,7 @@ void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et, struct buffer_head *bh, struct ocfs2_xattr_value_root *xv) { - __ocfs2_init_extent_tree(et, inode, bh, xv, + __ocfs2_init_extent_tree(et, inode, bh, ocfs2_journal_access, xv, &ocfs2_xattr_value_et_ops); } @@ -356,6 +359,15 @@ static inline void ocfs2_et_update_clusters(struct inode *inode, et->et_ops->eo_update_clusters(inode, et, clusters); } +static inline int ocfs2_et_root_journal_access(handle_t *handle, + struct inode *inode, + struct ocfs2_extent_tree *et, + int type) +{ + return et->et_root_journal_access(handle, inode, et->et_root_bh, + type); +} + static inline int ocfs2_et_insert_check(struct inode *inode, struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec) @@ -396,12 +408,14 @@ struct ocfs2_path_item { #define OCFS2_MAX_PATH_DEPTH 5 struct ocfs2_path { - int p_tree_depth; - struct ocfs2_path_item p_node[OCFS2_MAX_PATH_DEPTH]; + int p_tree_depth; + ocfs2_journal_access_func p_root_access; + struct ocfs2_path_item p_node[OCFS2_MAX_PATH_DEPTH]; }; #define path_root_bh(_path) ((_path)->p_node[0].bh) #define path_root_el(_path) ((_path)->p_node[0].el) +#define path_root_access(_path)((_path)->p_root_access) #define path_leaf_bh(_path) ((_path)->p_node[(_path)->p_tree_depth].bh) #define path_leaf_el(_path) ((_path)->p_node[(_path)->p_tree_depth].el) #define path_num_items(_path) ((_path)->p_tree_depth + 1) @@ -434,6 +448,8 @@ static void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root) */ if (keep_root) depth = le16_to_cpu(path_root_el(path)->l_tree_depth); + else + path_root_access(path) = NULL; path->p_tree_depth = depth; } @@ -459,6 +475,7 @@ static void ocfs2_cp_path(struct ocfs2_path *dest, struct ocfs2_path *src) BUG_ON(path_root_bh(dest) != path_root_bh(src)); BUG_ON(path_root_el(dest) != path_root_el(src)); + BUG_ON(path_root_access(dest) != path_root_access(src)); ocfs2_reinit_path(dest, 1); @@ -480,6 +497,7 @@ static void ocfs2_mv_path(struct ocfs2_path *dest, struct ocfs2_path *src) int i; BUG_ON(path_root_bh(dest) != path_root_bh(src)); + BUG_ON(path_root_access(dest) != path_root_access(src)); for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) { brelse(dest->p_node[i].bh); @@ -515,7 +533,8 @@ static inline void ocfs2_path_insert_eb(struct ocfs2_path *path, int index, } static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh, - struct ocfs2_extent_list *root_el) + struct ocfs2_extent_list *root_el, + ocfs2_journal_access_func access) { struct ocfs2_path *path; @@ -527,6 +546,7 @@ static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh, get_bh(root_bh); path_root_bh(path) = root_bh; path_root_el(path) = root_el; + path_root_access(path) = access; } return path; @@ -534,12 +554,38 @@ static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh, static struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path) { - return ocfs2_new_path(path_root_bh(path), path_root_el(path)); + return ocfs2_new_path(path_root_bh(path), path_root_el(path), + path_root_access(path)); } static struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et) { - return ocfs2_new_path(et->et_root_bh, et->et_root_el); + return ocfs2_new_path(et->et_root_bh, et->et_root_el, + et->et_root_journal_access); +} + +/* + * Journal the buffer at depth idx. All idx>0 are extent_blocks, + * otherwise it's the root_access function. + * + * I don't like the way this function's name looks next to + * ocfs2_journal_access_path(), but I don't have a better one. + */ +static int ocfs2_path_bh_journal_access(handle_t *handle, + struct inode *inode, + struct ocfs2_path *path, + int idx) +{ + ocfs2_journal_access_func access = path_root_access(path); + + if (!access) + access = ocfs2_journal_access; + + if (idx) + access = ocfs2_journal_access_eb; + + return access(handle, inode, path->p_node[idx].bh, + OCFS2_JOURNAL_ACCESS_WRITE); } /* @@ -554,8 +600,7 @@ static int ocfs2_journal_access_path(struct inode *inode, handle_t *handle, goto out; for(i = 0; i < path_num_items(path); i++) { - ret = ocfs2_journal_access(handle, inode, path->p_node[i].bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, path, i); if (ret < 0) { mlog_errno(ret); goto out; @@ -708,8 +753,11 @@ static int ocfs2_validate_extent_block(struct super_block *sb, * local to this block. */ rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &eb->h_check); - if (rc) + if (rc) { + mlog(ML_ERROR, "Checksum failed for extent block %llu\n", + (unsigned long long)bh->b_blocknr); return rc; + } /* * Errors after here are fatal. @@ -842,8 +890,8 @@ static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb, } ocfs2_set_new_buffer_uptodate(inode, bhs[i]); - status = ocfs2_journal_access(handle, inode, bhs[i], - OCFS2_JOURNAL_ACCESS_CREATE); + status = ocfs2_journal_access_eb(handle, inode, bhs[i], + OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; @@ -986,8 +1034,8 @@ static int ocfs2_add_branch(struct ocfs2_super *osb, BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); eb_el = &eb->h_list; - status = ocfs2_journal_access(handle, inode, bh, - OCFS2_JOURNAL_ACCESS_CREATE); + status = ocfs2_journal_access_eb(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; @@ -1026,21 +1074,21 @@ static int ocfs2_add_branch(struct ocfs2_super *osb, * journal_dirty erroring as it won't unless we've aborted the * handle (in which case we would never be here) so reserving * the write with journal_access is all we need to do. */ - status = ocfs2_journal_access(handle, inode, *last_eb_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_eb(handle, inode, *last_eb_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } - status = ocfs2_journal_access(handle, inode, et->et_root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_et_root_journal_access(handle, inode, et, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } if (eb_bh) { - status = ocfs2_journal_access(handle, inode, eb_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_eb(handle, inode, eb_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; @@ -1129,8 +1177,8 @@ static int ocfs2_shift_tree_depth(struct ocfs2_super *osb, eb_el = &eb->h_list; root_el = et->et_root_el; - status = ocfs2_journal_access(handle, inode, new_eb_bh, - OCFS2_JOURNAL_ACCESS_CREATE); + status = ocfs2_journal_access_eb(handle, inode, new_eb_bh, + OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; @@ -1148,8 +1196,8 @@ static int ocfs2_shift_tree_depth(struct ocfs2_super *osb, goto bail; } - status = ocfs2_journal_access(handle, inode, et->et_root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_et_root_journal_access(handle, inode, et, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; @@ -1918,25 +1966,23 @@ static int ocfs2_rotate_subtree_right(struct inode *inode, root_bh = left_path->p_node[subtree_index].bh; BUG_ON(root_bh != right_path->p_node[subtree_index].bh); - ret = ocfs2_journal_access(handle, inode, root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, right_path, + subtree_index); if (ret) { mlog_errno(ret); goto out; } for(i = subtree_index + 1; i < path_num_items(right_path); i++) { - ret = ocfs2_journal_access(handle, inode, - right_path->p_node[i].bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, + right_path, i); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_journal_access(handle, inode, - left_path->p_node[i].bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, + left_path, i); if (ret) { mlog_errno(ret); goto out; @@ -2455,9 +2501,9 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle, return -EAGAIN; if (le16_to_cpu(right_leaf_el->l_next_free_rec) > 1) { - ret = ocfs2_journal_access(handle, inode, - path_leaf_bh(right_path), - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_eb(handle, inode, + path_leaf_bh(right_path), + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -2474,8 +2520,8 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle, * We have to update i_last_eb_blk during the meta * data delete. */ - ret = ocfs2_journal_access(handle, inode, et_root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_et_root_journal_access(handle, inode, et, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -2490,25 +2536,23 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle, */ BUG_ON(right_has_empty && !del_right_subtree); - ret = ocfs2_journal_access(handle, inode, root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, right_path, + subtree_index); if (ret) { mlog_errno(ret); goto out; } for(i = subtree_index + 1; i < path_num_items(right_path); i++) { - ret = ocfs2_journal_access(handle, inode, - right_path->p_node[i].bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, + right_path, i); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_journal_access(handle, inode, - left_path->p_node[i].bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, + left_path, i); if (ret) { mlog_errno(ret); goto out; @@ -2653,16 +2697,17 @@ out: static int ocfs2_rotate_rightmost_leaf_left(struct inode *inode, handle_t *handle, - struct buffer_head *bh, - struct ocfs2_extent_list *el) + struct ocfs2_path *path) { int ret; + struct buffer_head *bh = path_leaf_bh(path); + struct ocfs2_extent_list *el = path_leaf_el(path); if (!ocfs2_is_empty_extent(&el->l_recs[0])) return 0; - ret = ocfs2_journal_access(handle, inode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, path, + path_num_items(path) - 1); if (ret) { mlog_errno(ret); goto out; @@ -2744,9 +2789,8 @@ static int __ocfs2_rotate_tree_left(struct inode *inode, * Caller might still want to make changes to the * tree root, so re-add it to the journal here. */ - ret = ocfs2_journal_access(handle, inode, - path_root_bh(left_path), - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, + left_path, 0); if (ret) { mlog_errno(ret); goto out; @@ -2929,8 +2973,7 @@ rightmost_no_delete: * it up front. */ ret = ocfs2_rotate_rightmost_leaf_left(inode, handle, - path_leaf_bh(path), - path_leaf_el(path)); + path); if (ret) mlog_errno(ret); goto out; @@ -3164,8 +3207,8 @@ static int ocfs2_merge_rec_right(struct inode *inode, root_bh = left_path->p_node[subtree_index].bh; BUG_ON(root_bh != right_path->p_node[subtree_index].bh); - ret = ocfs2_journal_access(handle, inode, root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, right_path, + subtree_index); if (ret) { mlog_errno(ret); goto out; @@ -3173,17 +3216,15 @@ static int ocfs2_merge_rec_right(struct inode *inode, for (i = subtree_index + 1; i < path_num_items(right_path); i++) { - ret = ocfs2_journal_access(handle, inode, - right_path->p_node[i].bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, + right_path, i); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_journal_access(handle, inode, - left_path->p_node[i].bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, + left_path, i); if (ret) { mlog_errno(ret); goto out; @@ -3195,8 +3236,8 @@ static int ocfs2_merge_rec_right(struct inode *inode, right_rec = &el->l_recs[index + 1]; } - ret = ocfs2_journal_access(handle, inode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, left_path, + path_num_items(left_path) - 1); if (ret) { mlog_errno(ret); goto out; @@ -3335,8 +3376,8 @@ static int ocfs2_merge_rec_left(struct inode *inode, root_bh = left_path->p_node[subtree_index].bh; BUG_ON(root_bh != right_path->p_node[subtree_index].bh); - ret = ocfs2_journal_access(handle, inode, root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, right_path, + subtree_index); if (ret) { mlog_errno(ret); goto out; @@ -3344,17 +3385,15 @@ static int ocfs2_merge_rec_left(struct inode *inode, for (i = subtree_index + 1; i < path_num_items(right_path); i++) { - ret = ocfs2_journal_access(handle, inode, - right_path->p_node[i].bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, + right_path, i); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_journal_access(handle, inode, - left_path->p_node[i].bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, + left_path, i); if (ret) { mlog_errno(ret); goto out; @@ -3366,8 +3405,8 @@ static int ocfs2_merge_rec_left(struct inode *inode, has_empty_extent = 1; } - ret = ocfs2_journal_access(handle, inode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_path_bh_journal_access(handle, inode, left_path, + path_num_items(left_path) - 1); if (ret) { mlog_errno(ret); goto out; @@ -4009,8 +4048,8 @@ static int ocfs2_do_insert_extent(struct inode *inode, el = et->et_root_el; - ret = ocfs2_journal_access(handle, inode, et->et_root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_et_root_journal_access(handle, inode, et, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -4071,8 +4110,8 @@ static int ocfs2_do_insert_extent(struct inode *inode, * ocfs2_rotate_tree_right() might have extended the * transaction without re-journaling our tree root. */ - ret = ocfs2_journal_access(handle, inode, et->et_root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_et_root_journal_access(handle, inode, et, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -4593,9 +4632,9 @@ int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb, BUG_ON(num_bits > clusters_to_add); - /* reserve our write early -- insert_extent may update the inode */ - status = ocfs2_journal_access(handle, inode, et->et_root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + /* reserve our write early -- insert_extent may update the tree root */ + status = ocfs2_et_root_journal_access(handle, inode, et, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto leave; @@ -5347,8 +5386,8 @@ int ocfs2_remove_btree_range(struct inode *inode, goto out; } - ret = ocfs2_journal_access(handle, inode, et->et_root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_et_root_journal_access(handle, inode, et, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -5461,8 +5500,8 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb, goto bail; } - status = ocfs2_journal_access(handle, tl_inode, tl_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, tl_inode, tl_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; @@ -5523,8 +5562,8 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, while (i >= 0) { /* Caller has given us at least enough credits to * update the truncate log dinode */ - status = ocfs2_journal_access(handle, tl_inode, tl_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, tl_inode, tl_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; @@ -5780,6 +5819,7 @@ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb, * tl_used. */ tl->tl_used = 0; + ocfs2_compute_meta_ecc(osb->sb, tl_bh->b_data, &di->i_check); status = ocfs2_write_block(osb, tl_bh, tl_inode); if (status < 0) { mlog_errno(status); @@ -6546,8 +6586,8 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb, } if (last_eb_bh) { - status = ocfs2_journal_access(handle, inode, last_eb_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_eb(handle, inode, last_eb_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; @@ -6908,8 +6948,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, goto out_unlock; } - ret = ocfs2_journal_access(handle, inode, di_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_di(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; @@ -7043,7 +7083,8 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb, new_highest_cpos = ocfs2_clusters_for_bytes(osb->sb, i_size_read(inode)); - path = ocfs2_new_path(fe_bh, &di->id2.i_list); + path = ocfs2_new_path(fe_bh, &di->id2.i_list, + ocfs2_journal_access_di); if (!path) { status = -ENOMEM; mlog_errno(status); @@ -7276,8 +7317,8 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, goto out; } - ret = ocfs2_journal_access(handle, inode, di_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_di(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 59d37d1b7d4c..4b6fea22748a 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h @@ -45,7 +45,9 @@ * * ocfs2_extent_tree contains info for the root of the b-tree, it must have a * root ocfs2_extent_list and a root_bh so that they can be used in the b-tree - * functions. + * functions. With metadata ecc, we now call different journal_access + * functions for each type of metadata, so it must have the + * root_journal_access function. * ocfs2_extent_tree_operations abstract the normal operations we do for * the root of extent b-tree. */ @@ -54,6 +56,7 @@ struct ocfs2_extent_tree { struct ocfs2_extent_tree_operations *et_ops; struct buffer_head *et_root_bh; struct ocfs2_extent_list *et_root_el; + ocfs2_journal_access_func et_root_journal_access; void *et_object; unsigned int et_max_leaf_clusters; }; diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 6b647ec87bb3..a067a6cffb01 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -1512,8 +1512,8 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, goto out; } - ret = ocfs2_journal_access(handle, inode, wc->w_di_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_di(handle, inode, wc->w_di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { ocfs2_commit_trans(osb, handle); @@ -1740,8 +1740,8 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, * We don't want this to fail in ocfs2_write_end(), so do it * here. */ - ret = ocfs2_journal_access(handle, inode, wc->w_di_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_di(handle, inode, wc->w_di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_quota; diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 3708fe482e3e..45e4e03d8f71 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -378,14 +378,18 @@ int ocfs2_update_entry(struct inode *dir, handle_t *handle, struct inode *new_entry_inode) { int ret; + ocfs2_journal_access_func access = ocfs2_journal_access_db; /* * The same code works fine for both inline-data and extent - * based directories, so no need to split this up. + * based directories, so no need to split this up. The only + * difference is the journal_access function. */ - ret = ocfs2_journal_access(handle, dir, de_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) + access = ocfs2_journal_access_di; + + ret = access(handle, dir, de_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -407,9 +411,13 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir, { struct ocfs2_dir_entry *de, *pde; int i, status = -ENOENT; + ocfs2_journal_access_func access = ocfs2_journal_access_db; mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", handle, dir, de_del, bh); + if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) + access = ocfs2_journal_access_di; + i = 0; pde = NULL; de = (struct ocfs2_dir_entry *) first_de; @@ -420,8 +428,8 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir, goto bail; } if (de == de_del) { - status = ocfs2_journal_access(handle, dir, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = access(handle, dir, bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { status = -EIO; mlog_errno(status); @@ -581,8 +589,14 @@ int __ocfs2_add_entry(handle_t *handle, goto bail; } - status = ocfs2_journal_access(handle, dir, insert_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + if (insert_bh == parent_fe_bh) + status = ocfs2_journal_access_di(handle, dir, + insert_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + else + status = ocfs2_journal_access_db(handle, dir, + insert_bh, + OCFS2_JOURNAL_ACCESS_WRITE); /* By now the buffer is marked for journaling */ offset += le16_to_cpu(de->rec_len); if (le64_to_cpu(de->inode)) { @@ -1081,8 +1095,8 @@ static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb, struct ocfs2_inline_data *data = &di->id2.i_data; unsigned int size = le16_to_cpu(data->id_count); - ret = ocfs2_journal_access(handle, inode, di_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_di(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -1129,8 +1143,8 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb, ocfs2_set_new_buffer_uptodate(inode, new_bh); - status = ocfs2_journal_access(handle, inode, new_bh, - OCFS2_JOURNAL_ACCESS_CREATE); + status = ocfs2_journal_access_db(handle, inode, new_bh, + OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; @@ -1292,8 +1306,8 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, ocfs2_set_new_buffer_uptodate(dir, dirdata_bh); - ret = ocfs2_journal_access(handle, dir, dirdata_bh, - OCFS2_JOURNAL_ACCESS_CREATE); + ret = ocfs2_journal_access_db(handle, dir, dirdata_bh, + OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); goto out_commit; @@ -1319,8 +1333,8 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, * We let the later dirent insert modify c/mtime - to the user * the data hasn't changed. */ - ret = ocfs2_journal_access(handle, dir, di_bh, - OCFS2_JOURNAL_ACCESS_CREATE); + ret = ocfs2_journal_access_di(handle, dir, di_bh, + OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); goto out_commit; @@ -1583,8 +1597,8 @@ do_extend: ocfs2_set_new_buffer_uptodate(dir, new_bh); - status = ocfs2_journal_access(handle, dir, new_bh, - OCFS2_JOURNAL_ACCESS_CREATE); + status = ocfs2_journal_access_db(handle, dir, new_bh, + OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 9374d374a264..e8f795f978aa 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -256,8 +256,8 @@ int ocfs2_update_inode_atime(struct inode *inode, goto out; } - ret = ocfs2_journal_access(handle, inode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_di(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; @@ -353,8 +353,8 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb, goto out; } - status = ocfs2_journal_access(handle, inode, fe_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, inode, fe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto out_commit; @@ -590,8 +590,8 @@ restarted_transaction: /* reserve a write to the file entry early on - that we if we * run out of credits in the allocation path, we can still * update i_size. */ - status = ocfs2_journal_access(handle, inode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto leave; @@ -1121,8 +1121,8 @@ static int __ocfs2_write_remove_suid(struct inode *inode, goto out; } - ret = ocfs2_journal_access(handle, inode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_di(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out_trans; diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 9370b652ab94..229e707bc050 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -537,8 +537,8 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb, goto out; } - status = ocfs2_journal_access(handle, inode, fe_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, inode, fe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto out; @@ -621,8 +621,8 @@ static int ocfs2_remove_inode(struct inode *inode, } /* set the inodes dtime */ - status = ocfs2_journal_access(handle, inode, di_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail_commit; @@ -1190,8 +1190,8 @@ int ocfs2_mark_inode_dirty(handle_t *handle, mlog_entry("(inode %llu)\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); - status = ocfs2_journal_access(handle, inode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto leave; @@ -1277,8 +1277,11 @@ int ocfs2_validate_inode_block(struct super_block *sb, * local to this block. */ rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check); - if (rc) + if (rc) { + mlog(ML_ERROR, "Checksum failed for dinode %llu\n", + (unsigned long long)bh->b_blocknr); goto bail; + } /* * Errors after here are fatal. diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 2daa5848faf2..3b54dba0f74b 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -752,6 +752,7 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, if (replayed) ocfs2_bump_recovery_generation(fe); + ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check); status = ocfs2_write_block(osb, bh, journal->j_inode); if (status < 0) mlog_errno(status); @@ -1486,6 +1487,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb, osb->slot_recovery_generations[slot_num] = ocfs2_get_recovery_generation(fe); + ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check); status = ocfs2_write_block(osb, bh, inode); if (status < 0) mlog_errno(status); diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index bca370dab021..3c3532e1307c 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -247,9 +247,10 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks); #define OCFS2_JOURNAL_ACCESS_WRITE 1 #define OCFS2_JOURNAL_ACCESS_UNDO 2 + /* ocfs2_inode */ int ocfs2_journal_access_di(handle_t *handle, struct inode *inode, - struct buffer_head *bh, int type); + struct buffer_head *bh, int type); /* ocfs2_extent_block */ int ocfs2_journal_access_eb(handle_t *handle, struct inode *inode, struct buffer_head *bh, int type); diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 19cfb1b9ce09..ec70cdbe77fc 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -36,6 +36,7 @@ #include "ocfs2.h" #include "alloc.h" +#include "blockcheck.h" #include "dlmglue.h" #include "inode.h" #include "journal.h" @@ -382,8 +383,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) } memcpy(alloc_copy, alloc, bh->b_size); - status = ocfs2_journal_access(handle, local_alloc_inode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, local_alloc_inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto out_commit; @@ -476,6 +477,7 @@ int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb, alloc = (struct ocfs2_dinode *) alloc_bh->b_data; ocfs2_clear_local_alloc(alloc); + ocfs2_compute_meta_ecc(osb->sb, alloc_bh->b_data, &alloc->i_check); status = ocfs2_write_block(osb, alloc_bh, inode); if (status < 0) mlog_errno(status); @@ -762,9 +764,9 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, * delete bits from it! */ *num_bits = bits_wanted; - status = ocfs2_journal_access(handle, local_alloc_inode, - osb->local_alloc_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, local_alloc_inode, + osb->local_alloc_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; @@ -1240,9 +1242,9 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, } memcpy(alloc_copy, alloc, osb->local_alloc_bh->b_size); - status = ocfs2_journal_access(handle, local_alloc_inode, - osb->local_alloc_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, local_alloc_inode, + osb->local_alloc_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 6173807ba23b..084aba86c3b2 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -361,8 +361,8 @@ static int ocfs2_mknod(struct inode *dir, goto leave; } - status = ocfs2_journal_access(handle, dir, parent_fe_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, dir, parent_fe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto leave; @@ -493,8 +493,8 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, } ocfs2_set_new_buffer_uptodate(inode, *new_fe_bh); - status = ocfs2_journal_access(handle, inode, *new_fe_bh, - OCFS2_JOURNAL_ACCESS_CREATE); + status = ocfs2_journal_access_di(handle, inode, *new_fe_bh, + OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto leave; @@ -664,8 +664,8 @@ static int ocfs2_link(struct dentry *old_dentry, goto out_unlock_inode; } - err = ocfs2_journal_access(handle, inode, fe_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + err = ocfs2_journal_access_di(handle, inode, fe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (err < 0) { mlog_errno(err); goto out_commit; @@ -851,8 +851,8 @@ static int ocfs2_unlink(struct inode *dir, goto leave; } - status = ocfs2_journal_access(handle, inode, fe_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, inode, fe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto leave; @@ -1265,8 +1265,8 @@ static int ocfs2_rename(struct inode *old_dir, goto bail; } } - status = ocfs2_journal_access(handle, new_inode, newfe_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, new_inode, newfe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; @@ -1312,8 +1312,8 @@ static int ocfs2_rename(struct inode *old_dir, old_inode->i_ctime = CURRENT_TIME; mark_inode_dirty(old_inode); - status = ocfs2_journal_access(handle, old_inode, old_inode_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, old_inode, old_inode_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status >= 0) { old_di = (struct ocfs2_dinode *) old_inode_bh->b_data; @@ -1389,9 +1389,9 @@ static int ocfs2_rename(struct inode *old_dir, (int)old_dir_nlink, old_dir->i_nlink); } else { struct ocfs2_dinode *fe; - status = ocfs2_journal_access(handle, old_dir, - old_dir_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, old_dir, + old_dir_bh, + OCFS2_JOURNAL_ACCESS_WRITE); fe = (struct ocfs2_dinode *) old_dir_bh->b_data; fe->i_links_count = cpu_to_le16(old_dir->i_nlink); status = ocfs2_journal_dirty(handle, old_dir_bh); @@ -1898,8 +1898,8 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, goto leave; } - status = ocfs2_journal_access(handle, orphan_dir_inode, orphan_dir_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, orphan_dir_inode, orphan_dir_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto leave; @@ -1986,8 +1986,8 @@ int ocfs2_orphan_del(struct ocfs2_super *osb, goto leave; } - status = ocfs2_journal_access(handle,orphan_dir_inode, orphan_dir_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle,orphan_dir_inode, orphan_dir_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto leave; diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 2bb389fe7397..bad87d0a03c9 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -339,6 +339,10 @@ struct ocfs2_super #define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info) +/* Useful typedef for passing around journal access functions */ +typedef int (*ocfs2_journal_access_func)(handle_t *handle, struct inode *inode, + struct buffer_head *bh, int type); + static inline int ocfs2_should_order_data(struct inode *inode) { if (!S_ISREG(inode->i_mode)) diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index a0b8b14cca8f..444aa5a467fb 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -244,7 +244,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type, set_buffer_uptodate(bh); unlock_buffer(bh); ocfs2_set_buffer_uptodate(gqinode, bh); - err = ocfs2_journal_access(handle, gqinode, bh, ja_type); + err = ocfs2_journal_access_dq(handle, gqinode, bh, ja_type); if (err < 0) { brelse(bh); goto out; diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index d451b715aefe..07deec5e9721 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -106,8 +106,8 @@ static int ocfs2_modify_bh(struct inode *inode, struct buffer_head *bh, mlog_errno(status); return status; } - status = ocfs2_journal_access(handle, inode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_dq(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); ocfs2_commit_trans(OCFS2_SB(sb), handle); @@ -506,7 +506,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode, goto out_commit; } /* Release local quota file entry */ - status = ocfs2_journal_access(handle, lqinode, + status = ocfs2_journal_access_dq(handle, lqinode, qbh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -614,8 +614,8 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, mlog_errno(status); goto out_bh; } - status = ocfs2_journal_access(handle, lqinode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_dq(handle, lqinode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto out_trans; @@ -981,8 +981,8 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk( goto out; } - status = ocfs2_journal_access(handle, lqinode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_dq(handle, lqinode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto out_trans; @@ -1074,7 +1074,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file( mlog_errno(status); goto out; } - status = ocfs2_journal_access(handle, lqinode, chunk->qc_headerbh, + status = ocfs2_journal_access_dq(handle, lqinode, chunk->qc_headerbh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -1207,7 +1207,7 @@ static int ocfs2_local_release_dquot(struct dquot *dquot) goto out; } - status = ocfs2_journal_access(handle, sb_dqopt(sb)->files[type], + status = ocfs2_journal_access_dq(handle, sb_dqopt(sb)->files[type], od->dq_chunk->qc_headerbh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c index 867de3ebfcaf..424adaa5f900 100644 --- a/fs/ocfs2/resize.c +++ b/fs/ocfs2/resize.c @@ -106,8 +106,8 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle, mlog_entry("(new_clusters=%d, first_new_cluster = %u)\n", new_clusters, first_new_cluster); - ret = ocfs2_journal_access(handle, bm_inode, group_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_gd(handle, bm_inode, group_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out; @@ -141,8 +141,8 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle, } /* update the inode accordingly. */ - ret = ocfs2_journal_access(handle, bm_inode, bm_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_di(handle, bm_inode, bm_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out_rollback; @@ -536,8 +536,8 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input) cl = &fe->id2.i_chain; cr = &cl->cl_recs[input->chain]; - ret = ocfs2_journal_access(handle, main_bm_inode, group_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_gd(handle, main_bm_inode, group_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out_commit; @@ -552,8 +552,8 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input) goto out_commit; } - ret = ocfs2_journal_access(handle, main_bm_inode, main_bm_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_di(handle, main_bm_inode, main_bm_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out_commit; diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 78755766c329..a69628603e18 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -261,7 +261,11 @@ int ocfs2_check_group_descriptor(struct super_block *sb, * local to this block. */ rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &gd->bg_check); - if (!rc) + if (rc) { + mlog(ML_ERROR, + "Checksum failed for group descriptor %llu\n", + (unsigned long long)bh->b_blocknr); + } else rc = ocfs2_validate_gd_self(sb, bh, 1); if (!rc) rc = ocfs2_validate_gd_parent(sb, di, bh, 1); @@ -343,10 +347,10 @@ static int ocfs2_block_group_fill(handle_t *handle, goto bail; } - status = ocfs2_journal_access(handle, - alloc_inode, - bg_bh, - OCFS2_JOURNAL_ACCESS_CREATE); + status = ocfs2_journal_access_gd(handle, + alloc_inode, + bg_bh, + OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; @@ -476,8 +480,8 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb, bg = (struct ocfs2_group_desc *) bg_bh->b_data; - status = ocfs2_journal_access(handle, alloc_inode, - bh, OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, alloc_inode, + bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; @@ -986,10 +990,10 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle, if (ocfs2_is_cluster_bitmap(alloc_inode)) journal_type = OCFS2_JOURNAL_ACCESS_UNDO; - status = ocfs2_journal_access(handle, - alloc_inode, - group_bh, - journal_type); + status = ocfs2_journal_access_gd(handle, + alloc_inode, + group_bh, + journal_type); if (status < 0) { mlog_errno(status); goto bail; @@ -1060,8 +1064,8 @@ static int ocfs2_relink_block_group(handle_t *handle, bg_ptr = le64_to_cpu(bg->bg_next_group); prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group); - status = ocfs2_journal_access(handle, alloc_inode, prev_bg_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_gd(handle, alloc_inode, prev_bg_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto out_rollback; @@ -1075,8 +1079,8 @@ static int ocfs2_relink_block_group(handle_t *handle, goto out_rollback; } - status = ocfs2_journal_access(handle, alloc_inode, bg_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_gd(handle, alloc_inode, bg_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto out_rollback; @@ -1090,8 +1094,8 @@ static int ocfs2_relink_block_group(handle_t *handle, goto out_rollback; } - status = ocfs2_journal_access(handle, alloc_inode, fe_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, alloc_inode, fe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto out_rollback; @@ -1242,8 +1246,8 @@ static int ocfs2_alloc_dinode_update_counts(struct inode *inode, struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain; - ret = ocfs2_journal_access(handle, inode, di_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_di(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out; @@ -1414,10 +1418,10 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, /* Ok, claim our bits now: set the info on dinode, chainlist * and then the group */ - status = ocfs2_journal_access(handle, - alloc_inode, - ac->ac_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, + alloc_inode, + ac->ac_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; @@ -1824,8 +1828,8 @@ static inline int ocfs2_block_group_clear_bits(handle_t *handle, if (ocfs2_is_cluster_bitmap(alloc_inode)) journal_type = OCFS2_JOURNAL_ACCESS_UNDO; - status = ocfs2_journal_access(handle, alloc_inode, group_bh, - journal_type); + status = ocfs2_journal_access_gd(handle, alloc_inode, group_bh, + journal_type); if (status < 0) { mlog_errno(status); goto bail; @@ -1900,8 +1904,8 @@ int ocfs2_free_suballoc_bits(handle_t *handle, goto bail; } - status = ocfs2_journal_access(handle, alloc_inode, alloc_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, alloc_inode, alloc_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; -- cgit From 4d0e214ee83185fcaa2cb97cd026d32bdc5c994a Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Fri, 5 Dec 2008 11:19:37 -0800 Subject: ocfs2: Add ecc and checksums to ocfs2 xattr buckets. The xattr bucket can span multiple blocks on disk. We have wrappers for this structure in the code. We use the new multi-block ecc calls to calculate and validate the bucket. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index bc822d6ba542..7c2f4c9d1bd9 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -273,6 +273,15 @@ static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket, rc = ocfs2_read_blocks(bucket->bu_inode, xb_blkno, bucket->bu_blocks, bucket->bu_bhs, 0, NULL); + if (!rc) { + rc = ocfs2_validate_meta_ecc_bhs(bucket->bu_inode->i_sb, + bucket->bu_bhs, + bucket->bu_blocks, + &bucket_xh(bucket)->xh_check); + if (rc) + mlog_errno(rc); + } + if (rc) ocfs2_xattr_bucket_relse(bucket); return rc; @@ -301,6 +310,10 @@ static void ocfs2_xattr_bucket_journal_dirty(handle_t *handle, { int i; + ocfs2_compute_meta_ecc_bhs(bucket->bu_inode->i_sb, + bucket->bu_bhs, bucket->bu_blocks, + &bucket_xh(bucket)->xh_check); + for (i = 0; i < bucket->bu_blocks; i++) ocfs2_journal_dirty(handle, bucket->bu_bhs[i]); } -- cgit From 2a50a743bdaab104155bd9e988d2ba3bb4177263 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 9 Dec 2008 14:24:33 -0800 Subject: ocfs2: Create ocfs2_xattr_value_buf. When an ocfs2 extended attribute is large enough to require its own allocation tree, we root it with an ocfs2_xattr_value_root. However, these roots can be a part of inodes, xattr blocks, or xattr buckets. Thus, they need a different journal access function for each container. We wrap the bh, its journal access function, and the value root (xv) in a structure called ocfs2_xattr_valu_buf. This is a package that can be passed around. In this first pass, we simply pass it to the extent tree code. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/alloc.c | 25 +++++++++++-------------- fs/ocfs2/alloc.h | 4 ++-- fs/ocfs2/xattr.c | 34 ++++++++++++++++++++++------------ fs/ocfs2/xattr.h | 14 ++++++++++++++ 4 files changed, 49 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 6e58fd557e5b..874c0bd9e1cc 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -48,6 +48,7 @@ #include "file.h" #include "super.h" #include "uptodate.h" +#include "xattr.h" #include "buffer_head_io.h" @@ -207,36 +208,33 @@ static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et) static void ocfs2_xattr_value_fill_root_el(struct ocfs2_extent_tree *et) { - struct ocfs2_xattr_value_root *xv = et->et_object; + struct ocfs2_xattr_value_buf *vb = et->et_object; - et->et_root_el = &xv->xr_list; + et->et_root_el = &vb->vb_xv->xr_list; } static void ocfs2_xattr_value_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 blkno) { - struct ocfs2_xattr_value_root *xv = - (struct ocfs2_xattr_value_root *)et->et_object; + struct ocfs2_xattr_value_buf *vb = et->et_object; - xv->xr_last_eb_blk = cpu_to_le64(blkno); + vb->vb_xv->xr_last_eb_blk = cpu_to_le64(blkno); } static u64 ocfs2_xattr_value_get_last_eb_blk(struct ocfs2_extent_tree *et) { - struct ocfs2_xattr_value_root *xv = - (struct ocfs2_xattr_value_root *) et->et_object; + struct ocfs2_xattr_value_buf *vb = et->et_object; - return le64_to_cpu(xv->xr_last_eb_blk); + return le64_to_cpu(vb->vb_xv->xr_last_eb_blk); } static void ocfs2_xattr_value_update_clusters(struct inode *inode, struct ocfs2_extent_tree *et, u32 clusters) { - struct ocfs2_xattr_value_root *xv = - (struct ocfs2_xattr_value_root *)et->et_object; + struct ocfs2_xattr_value_buf *vb = et->et_object; - le32_add_cpu(&xv->xr_clusters, clusters); + le32_add_cpu(&vb->vb_xv->xr_clusters, clusters); } static struct ocfs2_extent_tree_operations ocfs2_xattr_value_et_ops = { @@ -334,10 +332,9 @@ void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et, void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et, struct inode *inode, - struct buffer_head *bh, - struct ocfs2_xattr_value_root *xv) + struct ocfs2_xattr_value_buf *vb) { - __ocfs2_init_extent_tree(et, inode, bh, ocfs2_journal_access, xv, + __ocfs2_init_extent_tree(et, inode, vb->vb_bh, vb->vb_access, vb, &ocfs2_xattr_value_et_ops); } diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 4b6fea22748a..cceff5c37f47 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h @@ -71,10 +71,10 @@ void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et, void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et, struct inode *inode, struct buffer_head *bh); +struct ocfs2_xattr_value_buf; void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et, struct inode *inode, - struct buffer_head *bh, - struct ocfs2_xattr_value_root *xv); + struct ocfs2_xattr_value_buf *vb); /* * Read an extent block into *bh. If *bh is NULL, a bh will be diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 7c2f4c9d1bd9..123d378aba9e 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -581,21 +581,26 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, handle_t *handle = ctxt->handle; enum ocfs2_alloc_restarted why; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - u32 prev_clusters, logical_start = le32_to_cpu(xv->xr_clusters); + struct ocfs2_xattr_value_buf vb = { + .vb_bh = xattr_bh, + .vb_xv = xv, + .vb_access = ocfs2_journal_access, + }; + u32 prev_clusters, logical_start = le32_to_cpu(vb.vb_xv->xr_clusters); struct ocfs2_extent_tree et; mlog(0, "(clusters_to_add for xattr= %u)\n", clusters_to_add); - ocfs2_init_xattr_value_extent_tree(&et, inode, xattr_bh, xv); + ocfs2_init_xattr_value_extent_tree(&et, inode, &vb); - status = ocfs2_journal_access(handle, inode, xattr_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = vb.vb_access(handle, inode, vb.vb_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto leave; } - prev_clusters = le32_to_cpu(xv->xr_clusters); + prev_clusters = le32_to_cpu(vb.vb_xv->xr_clusters); status = ocfs2_add_clusters_in_btree(osb, inode, &logical_start, @@ -611,13 +616,13 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, goto leave; } - status = ocfs2_journal_dirty(handle, xattr_bh); + status = ocfs2_journal_dirty(handle, vb.vb_bh); if (status < 0) { mlog_errno(status); goto leave; } - clusters_to_add -= le32_to_cpu(xv->xr_clusters) - prev_clusters; + clusters_to_add -= le32_to_cpu(vb.vb_xv->xr_clusters) - prev_clusters; /* * We should have already allocated enough space before the transaction, @@ -640,11 +645,16 @@ static int __ocfs2_remove_xattr_range(struct inode *inode, u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); handle_t *handle = ctxt->handle; struct ocfs2_extent_tree et; + struct ocfs2_xattr_value_buf vb = { + .vb_bh = root_bh, + .vb_xv = xv, + .vb_access = ocfs2_journal_access, + }; - ocfs2_init_xattr_value_extent_tree(&et, inode, root_bh, xv); + ocfs2_init_xattr_value_extent_tree(&et, inode, &vb); - ret = ocfs2_journal_access(handle, inode, root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = vb.vb_access(handle, inode, vb.vb_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -657,9 +667,9 @@ static int __ocfs2_remove_xattr_range(struct inode *inode, goto out; } - le32_add_cpu(&xv->xr_clusters, -len); + le32_add_cpu(&vb.vb_xv->xr_clusters, -len); - ret = ocfs2_journal_dirty(handle, root_bh); + ret = ocfs2_journal_dirty(handle, vb.vb_bh); if (ret) { mlog_errno(ret); goto out; diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h index 9a67e7d8f812..5a1ebc789f7e 100644 --- a/fs/ocfs2/xattr.h +++ b/fs/ocfs2/xattr.h @@ -70,4 +70,18 @@ int ocfs2_calc_xattr_init(struct inode *, struct buffer_head *, int, struct ocfs2_security_xattr_info *, int *, int *, struct ocfs2_alloc_context **); +/* + * xattrs can live inside an inode, as part of an external xattr block, + * or inside an xattr bucket, which is the leaf of a tree rooted in an + * xattr block. Some of the xattr calls, especially the value setting + * functions, want to treat each of these locations as equal. Let's wrap + * them in a structure that we can pass around instead of raw buffer_heads. + */ +struct ocfs2_xattr_value_buf { + struct buffer_head *vb_bh; + ocfs2_journal_access_func vb_access; + struct ocfs2_xattr_value_root *vb_xv; +}; + + #endif /* OCFS2_XATTR_H */ -- cgit From d72cc72d57ecaf9047da51269dabd6880c1399ac Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 9 Dec 2008 14:30:41 -0800 Subject: ocfs2: Pull ocfs2_xattr_value_buf up from __ocfs2_remove_xattr_range(). Place an ocfs2_xattr_value_buf in __ocfs2_xattr_shrink_size() and pass it down to __ocfs2_remove_xattr_range(). Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 123d378aba9e..3b059cf2eb45 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -636,8 +636,7 @@ leave: } static int __ocfs2_remove_xattr_range(struct inode *inode, - struct buffer_head *root_bh, - struct ocfs2_xattr_value_root *xv, + struct ocfs2_xattr_value_buf *vb, u32 cpos, u32 phys_cpos, u32 len, struct ocfs2_xattr_set_ctxt *ctxt) { @@ -645,16 +644,11 @@ static int __ocfs2_remove_xattr_range(struct inode *inode, u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); handle_t *handle = ctxt->handle; struct ocfs2_extent_tree et; - struct ocfs2_xattr_value_buf vb = { - .vb_bh = root_bh, - .vb_xv = xv, - .vb_access = ocfs2_journal_access, - }; - ocfs2_init_xattr_value_extent_tree(&et, inode, &vb); + ocfs2_init_xattr_value_extent_tree(&et, inode, vb); - ret = vb.vb_access(handle, inode, vb.vb_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = vb->vb_access(handle, inode, vb->vb_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -667,9 +661,9 @@ static int __ocfs2_remove_xattr_range(struct inode *inode, goto out; } - le32_add_cpu(&vb.vb_xv->xr_clusters, -len); + le32_add_cpu(&vb->vb_xv->xr_clusters, -len); - ret = ocfs2_journal_dirty(handle, vb.vb_bh); + ret = ocfs2_journal_dirty(handle, vb->vb_bh); if (ret) { mlog_errno(ret); goto out; @@ -693,6 +687,11 @@ static int ocfs2_xattr_shrink_size(struct inode *inode, int ret = 0; u32 trunc_len, cpos, phys_cpos, alloc_size; u64 block; + struct ocfs2_xattr_value_buf vb = { + .vb_bh = root_bh, + .vb_xv = xv, + .vb_access = ocfs2_journal_access, + }; if (old_clusters <= new_clusters) return 0; @@ -701,7 +700,8 @@ static int ocfs2_xattr_shrink_size(struct inode *inode, trunc_len = old_clusters - new_clusters; while (trunc_len) { ret = ocfs2_xattr_get_clusters(inode, cpos, &phys_cpos, - &alloc_size, &xv->xr_list); + &alloc_size, + &vb.vb_xv->xr_list); if (ret) { mlog_errno(ret); goto out; @@ -710,7 +710,7 @@ static int ocfs2_xattr_shrink_size(struct inode *inode, if (alloc_size > trunc_len) alloc_size = trunc_len; - ret = __ocfs2_remove_xattr_range(inode, root_bh, xv, cpos, + ret = __ocfs2_remove_xattr_range(inode, &vb, cpos, phys_cpos, alloc_size, ctxt); if (ret) { -- cgit From 19b801f45fa5e4840b9be3dcf1e73b08f35b04d9 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 9 Dec 2008 14:36:50 -0800 Subject: ocfs2: Pull ocfs2_xattr_value_buf up into ocfs2_xattr_value_truncate(). Place an ocfs2_xattr_value_buf in ocfs2_xattr_value_truncate() and pass it down to ocfs2_xattr_shrink_size(). We can also pass it into ocfs2_xattr_extend_allocation(), replacing its ocfs2_xattr_value_buf. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 41 +++++++++++++++++------------------------ 1 file changed, 17 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 3b059cf2eb45..4ce8019f0ef1 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -573,34 +573,28 @@ int ocfs2_calc_xattr_init(struct inode *dir, static int ocfs2_xattr_extend_allocation(struct inode *inode, u32 clusters_to_add, - struct buffer_head *xattr_bh, - struct ocfs2_xattr_value_root *xv, + struct ocfs2_xattr_value_buf *vb, struct ocfs2_xattr_set_ctxt *ctxt) { int status = 0; handle_t *handle = ctxt->handle; enum ocfs2_alloc_restarted why; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct ocfs2_xattr_value_buf vb = { - .vb_bh = xattr_bh, - .vb_xv = xv, - .vb_access = ocfs2_journal_access, - }; - u32 prev_clusters, logical_start = le32_to_cpu(vb.vb_xv->xr_clusters); + u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters); struct ocfs2_extent_tree et; mlog(0, "(clusters_to_add for xattr= %u)\n", clusters_to_add); - ocfs2_init_xattr_value_extent_tree(&et, inode, &vb); + ocfs2_init_xattr_value_extent_tree(&et, inode, vb); - status = vb.vb_access(handle, inode, vb.vb_bh, + status = vb->vb_access(handle, inode, vb->vb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto leave; } - prev_clusters = le32_to_cpu(vb.vb_xv->xr_clusters); + prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters); status = ocfs2_add_clusters_in_btree(osb, inode, &logical_start, @@ -616,13 +610,13 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, goto leave; } - status = ocfs2_journal_dirty(handle, vb.vb_bh); + status = ocfs2_journal_dirty(handle, vb->vb_bh); if (status < 0) { mlog_errno(status); goto leave; } - clusters_to_add -= le32_to_cpu(vb.vb_xv->xr_clusters) - prev_clusters; + clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) - prev_clusters; /* * We should have already allocated enough space before the transaction, @@ -680,18 +674,12 @@ out: static int ocfs2_xattr_shrink_size(struct inode *inode, u32 old_clusters, u32 new_clusters, - struct buffer_head *root_bh, - struct ocfs2_xattr_value_root *xv, + struct ocfs2_xattr_value_buf *vb, struct ocfs2_xattr_set_ctxt *ctxt) { int ret = 0; u32 trunc_len, cpos, phys_cpos, alloc_size; u64 block; - struct ocfs2_xattr_value_buf vb = { - .vb_bh = root_bh, - .vb_xv = xv, - .vb_access = ocfs2_journal_access, - }; if (old_clusters <= new_clusters) return 0; @@ -701,7 +689,7 @@ static int ocfs2_xattr_shrink_size(struct inode *inode, while (trunc_len) { ret = ocfs2_xattr_get_clusters(inode, cpos, &phys_cpos, &alloc_size, - &vb.vb_xv->xr_list); + &vb->vb_xv->xr_list); if (ret) { mlog_errno(ret); goto out; @@ -710,7 +698,7 @@ static int ocfs2_xattr_shrink_size(struct inode *inode, if (alloc_size > trunc_len) alloc_size = trunc_len; - ret = __ocfs2_remove_xattr_range(inode, &vb, cpos, + ret = __ocfs2_remove_xattr_range(inode, vb, cpos, phys_cpos, alloc_size, ctxt); if (ret) { @@ -738,6 +726,11 @@ static int ocfs2_xattr_value_truncate(struct inode *inode, int ret; u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, len); u32 old_clusters = le32_to_cpu(xv->xr_clusters); + struct ocfs2_xattr_value_buf vb = { + .vb_bh = root_bh, + .vb_xv = xv, + .vb_access = ocfs2_journal_access, + }; if (new_clusters == old_clusters) return 0; @@ -745,11 +738,11 @@ static int ocfs2_xattr_value_truncate(struct inode *inode, if (new_clusters > old_clusters) ret = ocfs2_xattr_extend_allocation(inode, new_clusters - old_clusters, - root_bh, xv, ctxt); + &vb, ctxt); else ret = ocfs2_xattr_shrink_size(inode, old_clusters, new_clusters, - root_bh, xv, ctxt); + &vb, ctxt); return ret; } -- cgit From b3e5d37905730dc5ddff717f55ed830caa80ea0e Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 9 Dec 2008 15:01:04 -0800 Subject: ocfs2: Pass ocfs2_xattr_value_buf into ocfs2_xattr_value_truncate(). The callers of ocfs2_xattr_value_truncate() now pass in ocfs2_xattr_value_bufs. These callers are the ones that calculated the xv location, so they are the right starting point. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 66 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 34 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 4ce8019f0ef1..409f9eeec703 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -718,19 +718,13 @@ out: } static int ocfs2_xattr_value_truncate(struct inode *inode, - struct buffer_head *root_bh, - struct ocfs2_xattr_value_root *xv, + struct ocfs2_xattr_value_buf *vb, int len, struct ocfs2_xattr_set_ctxt *ctxt) { int ret; u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, len); - u32 old_clusters = le32_to_cpu(xv->xr_clusters); - struct ocfs2_xattr_value_buf vb = { - .vb_bh = root_bh, - .vb_xv = xv, - .vb_access = ocfs2_journal_access, - }; + u32 old_clusters = le32_to_cpu(vb->vb_xv->xr_clusters); if (new_clusters == old_clusters) return 0; @@ -738,11 +732,11 @@ static int ocfs2_xattr_value_truncate(struct inode *inode, if (new_clusters > old_clusters) ret = ocfs2_xattr_extend_allocation(inode, new_clusters - old_clusters, - &vb, ctxt); + vb, ctxt); else ret = ocfs2_xattr_shrink_size(inode, old_clusters, new_clusters, - &vb, ctxt); + vb, ctxt); return ret; } @@ -1330,6 +1324,10 @@ static int ocfs2_xattr_set_value_outside(struct inode *inode, struct ocfs2_xattr_value_root *xv = NULL; size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; int ret = 0; + struct ocfs2_xattr_value_buf vb = { + .vb_bh = xs->xattr_bh, + .vb_access = ocfs2_journal_access + }; memset(val, 0, size); memcpy(val, xi->name, name_len); @@ -1340,9 +1338,9 @@ static int ocfs2_xattr_set_value_outside(struct inode *inode, xv->xr_list.l_tree_depth = 0; xv->xr_list.l_count = cpu_to_le16(1); xv->xr_list.l_next_free_rec = 0; + vb.vb_xv = xv; - ret = ocfs2_xattr_value_truncate(inode, xs->xattr_bh, xv, - xi->value_len, ctxt); + ret = ocfs2_xattr_value_truncate(inode, &vb, xi->value_len, ctxt); if (ret < 0) { mlog_errno(ret); return ret; @@ -1352,7 +1350,7 @@ static int ocfs2_xattr_set_value_outside(struct inode *inode, mlog_errno(ret); return ret; } - ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, xv, + ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb.vb_xv, xi->value, xi->value_len); if (ret < 0) mlog_errno(ret); @@ -1550,9 +1548,12 @@ static int ocfs2_xattr_set_entry(struct inode *inode, goto out; } else if (!ocfs2_xattr_is_local(xs->here)) { /* For existing xattr which has value outside */ - struct ocfs2_xattr_value_root *xv = NULL; - xv = (struct ocfs2_xattr_value_root *)(val + - OCFS2_XATTR_SIZE(name_len)); + struct ocfs2_xattr_value_buf vb = { + .vb_bh = xs->xattr_bh, + .vb_xv = (struct ocfs2_xattr_value_root *) + (val + OCFS2_XATTR_SIZE(name_len)), + .vb_access = ocfs2_journal_access, + }; if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) { /* @@ -1561,8 +1562,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode, * then set new value with set_value_outside(). */ ret = ocfs2_xattr_value_truncate(inode, - xs->xattr_bh, - xv, + &vb, xi->value_len, ctxt); if (ret < 0) { @@ -1582,7 +1582,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode, ret = __ocfs2_xattr_set_value_outside(inode, handle, - xv, + vb.vb_xv, xi->value, xi->value_len); if (ret < 0) @@ -1594,8 +1594,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode, * just trucate old value to zero. */ ret = ocfs2_xattr_value_truncate(inode, - xs->xattr_bh, - xv, + &vb, 0, ctxt); if (ret < 0) @@ -1714,15 +1713,17 @@ static int ocfs2_remove_value_outside(struct inode*inode, struct ocfs2_xattr_entry *entry = &header->xh_entries[i]; if (!ocfs2_xattr_is_local(entry)) { - struct ocfs2_xattr_value_root *xv; + struct ocfs2_xattr_value_buf vb = { + .vb_bh = bh, + .vb_access = ocfs2_journal_access, + }; void *val; val = (void *)header + le16_to_cpu(entry->xe_name_offset); - xv = (struct ocfs2_xattr_value_root *) + vb.vb_xv = (struct ocfs2_xattr_value_root *) (val + OCFS2_XATTR_SIZE(entry->xe_name_len)); - ret = ocfs2_xattr_value_truncate(inode, bh, xv, - 0, &ctxt); + ret = ocfs2_xattr_value_truncate(inode, &vb, 0, &ctxt); if (ret < 0) { mlog_errno(ret); break; @@ -4651,11 +4652,12 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, { int ret, offset; u64 value_blk; - struct buffer_head *value_bh = NULL; - struct ocfs2_xattr_value_root *xv; struct ocfs2_xattr_entry *xe; struct ocfs2_xattr_header *xh = bucket_xh(bucket); size_t blocksize = inode->i_sb->s_blocksize; + struct ocfs2_xattr_value_buf vb = { + .vb_access = ocfs2_journal_access, + }; xe = &xh->xh_entries[xe_off]; @@ -4669,11 +4671,11 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, /* We don't allow ocfs2_xattr_value to be stored in different block. */ BUG_ON(value_blk != (offset + OCFS2_XATTR_ROOT_SIZE - 1) / blocksize); - value_bh = bucket->bu_bhs[value_blk]; - BUG_ON(!value_bh); + vb.vb_bh = bucket->bu_bhs[value_blk]; + BUG_ON(!vb.vb_bh); - xv = (struct ocfs2_xattr_value_root *) - (value_bh->b_data + offset % blocksize); + vb.vb_xv = (struct ocfs2_xattr_value_root *) + (vb.vb_bh->b_data + offset % blocksize); ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket, OCFS2_JOURNAL_ACCESS_WRITE); @@ -4691,7 +4693,7 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, */ mlog(0, "truncate %u in xattr bucket %llu to %d bytes.\n", xe_off, (unsigned long long)bucket_blkno(bucket), len); - ret = ocfs2_xattr_value_truncate(inode, value_bh, xv, len, ctxt); + ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt); if (ret) { mlog_errno(ret); goto out_dirty; -- cgit From 0c748e95327d00e9eb19d0f34b32147ecbc02137 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 9 Dec 2008 15:46:15 -0800 Subject: ocfs2: Pass value buf to ocfs2_xattr_update_entry(). ocfs2_xattr_update_entry() updates the entry portion of an xattr buffer. This can be part of multiple metadata block types, so pass the buffer in via an ocfs2_xattr_value_buf. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 409f9eeec703..6a056122771d 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1282,12 +1282,13 @@ static int ocfs2_xattr_update_entry(struct inode *inode, handle_t *handle, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_value_buf *vb, size_t offs) { int ret; - ret = ocfs2_journal_access(handle, inode, xs->xattr_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = vb->vb_access(handle, inode, vb->vb_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -1301,7 +1302,7 @@ static int ocfs2_xattr_update_entry(struct inode *inode, ocfs2_xattr_set_local(xs->here, 0); ocfs2_xattr_hash_entry(inode, xs->header, xs->here); - ret = ocfs2_journal_dirty(handle, xs->xattr_bh); + ret = ocfs2_journal_dirty(handle, vb->vb_bh); if (ret < 0) mlog_errno(ret); out: @@ -1345,7 +1346,7 @@ static int ocfs2_xattr_set_value_outside(struct inode *inode, mlog_errno(ret); return ret; } - ret = ocfs2_xattr_update_entry(inode, ctxt->handle, xi, xs, offs); + ret = ocfs2_xattr_update_entry(inode, ctxt->handle, xi, xs, &vb, offs); if (ret < 0) { mlog_errno(ret); return ret; @@ -1574,6 +1575,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode, handle, xi, xs, + &vb, offs); if (ret < 0) { mlog_errno(ret); -- cgit From 512620f44df85df87348fc9a6fc54fcaa254b8d3 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 9 Dec 2008 15:58:35 -0800 Subject: ocfs2: Use ocfs2_xattr_value_buf in ocfs2_xattr_set_entry(). ocfs2_xattr_set_entry is the function that knows what type of block it is setting into. This is what we wanted from ocfs2_xattr_value_buf. Plus, moving the value buf up into ocfs2_xattr_set_entry() allows us to pass it into ocfs2_xattr_set_value_outside() and ocfs2_xattr_cleanup(). Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 53 +++++++++++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 6a056122771d..c08b5e8746c3 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1252,6 +1252,7 @@ static int ocfs2_xattr_cleanup(struct inode *inode, handle_t *handle, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_value_buf *vb, size_t offs) { int ret = 0; @@ -1259,8 +1260,8 @@ static int ocfs2_xattr_cleanup(struct inode *inode, void *val = xs->base + offs; size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; - ret = ocfs2_journal_access(handle, inode, xs->xattr_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = vb->vb_access(handle, inode, vb->vb_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -1271,7 +1272,7 @@ static int ocfs2_xattr_cleanup(struct inode *inode, memset((void *)xs->here, 0, sizeof(struct ocfs2_xattr_entry)); memset(val, 0, size); - ret = ocfs2_journal_dirty(handle, xs->xattr_bh); + ret = ocfs2_journal_dirty(handle, vb->vb_bh); if (ret < 0) mlog_errno(ret); out: @@ -1318,6 +1319,7 @@ static int ocfs2_xattr_set_value_outside(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt, + struct ocfs2_xattr_value_buf *vb, size_t offs) { size_t name_len = strlen(xi->name); @@ -1325,10 +1327,6 @@ static int ocfs2_xattr_set_value_outside(struct inode *inode, struct ocfs2_xattr_value_root *xv = NULL; size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; int ret = 0; - struct ocfs2_xattr_value_buf vb = { - .vb_bh = xs->xattr_bh, - .vb_access = ocfs2_journal_access - }; memset(val, 0, size); memcpy(val, xi->name, name_len); @@ -1339,19 +1337,19 @@ static int ocfs2_xattr_set_value_outside(struct inode *inode, xv->xr_list.l_tree_depth = 0; xv->xr_list.l_count = cpu_to_le16(1); xv->xr_list.l_next_free_rec = 0; - vb.vb_xv = xv; + vb->vb_xv = xv; - ret = ocfs2_xattr_value_truncate(inode, &vb, xi->value_len, ctxt); + ret = ocfs2_xattr_value_truncate(inode, vb, xi->value_len, ctxt); if (ret < 0) { mlog_errno(ret); return ret; } - ret = ocfs2_xattr_update_entry(inode, ctxt->handle, xi, xs, &vb, offs); + ret = ocfs2_xattr_update_entry(inode, ctxt->handle, xi, xs, vb, offs); if (ret < 0) { mlog_errno(ret); return ret; } - ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb.vb_xv, + ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb->vb_xv, xi->value, xi->value_len); if (ret < 0) mlog_errno(ret); @@ -1488,6 +1486,16 @@ static int ocfs2_xattr_set_entry(struct inode *inode, .value = xi->value, .value_len = xi->value_len, }; + struct ocfs2_xattr_value_buf vb = { + .vb_bh = xs->xattr_bh, + .vb_access = ocfs2_journal_access_di, + }; + + if (!(flag & OCFS2_INLINE_XATTR_FL)) { + BUG_ON(xs->xattr_bh == xs->inode_bh); + vb.vb_access = ocfs2_journal_access_xb; + } else + BUG_ON(xs->xattr_bh != xs->inode_bh); /* Compute min_offs, last and free space. */ last = xs->header->xh_entries; @@ -1543,18 +1551,14 @@ static int ocfs2_xattr_set_entry(struct inode *inode, if (ocfs2_xattr_is_local(xs->here) && size == size_l) { /* Replace existing local xattr with tree root */ ret = ocfs2_xattr_set_value_outside(inode, xi, xs, - ctxt, offs); + ctxt, &vb, offs); if (ret < 0) mlog_errno(ret); goto out; } else if (!ocfs2_xattr_is_local(xs->here)) { /* For existing xattr which has value outside */ - struct ocfs2_xattr_value_buf vb = { - .vb_bh = xs->xattr_bh, - .vb_xv = (struct ocfs2_xattr_value_root *) - (val + OCFS2_XATTR_SIZE(name_len)), - .vb_access = ocfs2_journal_access, - }; + vb.vb_xv = (struct ocfs2_xattr_value_root *) + (val + OCFS2_XATTR_SIZE(name_len)); if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) { /* @@ -1605,16 +1609,16 @@ static int ocfs2_xattr_set_entry(struct inode *inode, } } - ret = ocfs2_journal_access(handle, inode, xs->inode_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_di(handle, inode, xs->inode_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } if (!(flag & OCFS2_INLINE_XATTR_FL)) { - ret = ocfs2_journal_access(handle, inode, xs->xattr_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = vb.vb_access(handle, inode, vb.vb_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -1674,7 +1678,8 @@ static int ocfs2_xattr_set_entry(struct inode *inode, * This is the second step for value size > INLINE_SIZE. */ size_t offs = le16_to_cpu(xs->here->xe_name_offset); - ret = ocfs2_xattr_set_value_outside(inode, xi, xs, ctxt, offs); + ret = ocfs2_xattr_set_value_outside(inode, xi, xs, ctxt, + &vb, offs); if (ret < 0) { int ret2; @@ -1684,7 +1689,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode, * the junk tree root we have already set in local. */ ret2 = ocfs2_xattr_cleanup(inode, ctxt->handle, - xi, xs, offs); + xi, xs, &vb, offs); if (ret2 < 0) mlog_errno(ret2); } -- cgit From 4311901daabe1d0f22cfcf86c57ad450f14b4e9f Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 9 Dec 2008 16:24:43 -0800 Subject: ocfs2: Pass value buf to ocfs2_remove_value_outside(). ocfs2_remove_value_outside() needs to know the type of buffer it is looking at. Pass in an ocfs2_xattr_value_buf. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index c08b5e8746c3..d2760e644751 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1699,7 +1699,7 @@ out: } static int ocfs2_remove_value_outside(struct inode*inode, - struct buffer_head *bh, + struct ocfs2_xattr_value_buf *vb, struct ocfs2_xattr_header *header) { int ret = 0, i; @@ -1720,17 +1720,13 @@ static int ocfs2_remove_value_outside(struct inode*inode, struct ocfs2_xattr_entry *entry = &header->xh_entries[i]; if (!ocfs2_xattr_is_local(entry)) { - struct ocfs2_xattr_value_buf vb = { - .vb_bh = bh, - .vb_access = ocfs2_journal_access, - }; void *val; val = (void *)header + le16_to_cpu(entry->xe_name_offset); - vb.vb_xv = (struct ocfs2_xattr_value_root *) + vb->vb_xv = (struct ocfs2_xattr_value_root *) (val + OCFS2_XATTR_SIZE(entry->xe_name_len)); - ret = ocfs2_xattr_value_truncate(inode, &vb, 0, &ctxt); + ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt); if (ret < 0) { mlog_errno(ret); break; @@ -1752,12 +1748,16 @@ static int ocfs2_xattr_ibody_remove(struct inode *inode, struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_xattr_header *header; int ret; + struct ocfs2_xattr_value_buf vb = { + .vb_bh = di_bh, + .vb_access = ocfs2_journal_access_di, + }; header = (struct ocfs2_xattr_header *) ((void *)di + inode->i_sb->s_blocksize - le16_to_cpu(di->i_xattr_inline_size)); - ret = ocfs2_remove_value_outside(inode, di_bh, header); + ret = ocfs2_remove_value_outside(inode, &vb, header); return ret; } @@ -1767,11 +1767,15 @@ static int ocfs2_xattr_block_remove(struct inode *inode, { struct ocfs2_xattr_block *xb; int ret = 0; + struct ocfs2_xattr_value_buf vb = { + .vb_bh = blk_bh, + .vb_access = ocfs2_journal_access_xb, + }; xb = (struct ocfs2_xattr_block *)blk_bh->b_data; if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) { struct ocfs2_xattr_header *header = &(xb->xb_attrs.xb_header); - ret = ocfs2_remove_value_outside(inode, blk_bh, header); + ret = ocfs2_remove_value_outside(inode, &vb, header); } else ret = ocfs2_delete_xattr_index_block(inode, blk_bh); -- cgit From 84008972491ca91b240f106191519781dabb8016 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 9 Dec 2008 16:11:49 -0800 Subject: ocfs2: Use proper journal_access function in xattr.c Change the rest of the naked ocfs2_journal_access() calls in fs/ocfs2/xattr.c to use the appropriate ocfs2_journal_access_*() call for their metadata type. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index d2760e644751..17028aa7bc26 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1894,8 +1894,8 @@ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh) mlog_errno(ret); goto out; } - ret = ocfs2_journal_access(handle, inode, di_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_di(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; @@ -2103,8 +2103,8 @@ static int ocfs2_xattr_block_set(struct inode *inode, int ret; if (!xs->xattr_bh) { - ret = ocfs2_journal_access(handle, inode, xs->inode_bh, - OCFS2_JOURNAL_ACCESS_CREATE); + ret = ocfs2_journal_access_di(handle, inode, xs->inode_bh, + OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { mlog_errno(ret); goto end; @@ -2121,8 +2121,8 @@ static int ocfs2_xattr_block_set(struct inode *inode, new_bh = sb_getblk(inode->i_sb, first_blkno); ocfs2_set_new_buffer_uptodate(inode, new_bh); - ret = ocfs2_journal_access(handle, inode, new_bh, - OCFS2_JOURNAL_ACCESS_CREATE); + ret = ocfs2_journal_access_xb(handle, inode, new_bh, + OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { mlog_errno(ret); goto end; @@ -3377,8 +3377,8 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, */ down_write(&oi->ip_alloc_sem); - ret = ocfs2_journal_access(handle, inode, xb_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_xb(handle, inode, xb_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -4216,8 +4216,8 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, ocfs2_init_xattr_tree_extent_tree(&et, inode, root_bh); - ret = ocfs2_journal_access(handle, inode, root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_xb(handle, inode, root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto leave; @@ -4808,8 +4808,8 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode, goto out; } - ret = ocfs2_journal_access(handle, inode, root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_xb(handle, inode, root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; -- cgit From 87d35a74b15ec703910a63e0667692fb5e267be0 Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Wed, 10 Dec 2008 17:36:25 -0800 Subject: ocfs2: Add directory block trailers. Future ocfs2 features metaecc and indexed directories need to store a little bit of data in each dirblock. For compatibility, we place this in a trailer at the end of the dirblock. The trailer plays itself as an empty dirent, so that if the features are turned off, it can be reused without requiring a tunefs scan. This code adds the trailer and validates it when the block is read in. [ Mark is the original author, but I reinserted this code before his dir index work. -- Joel ] Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/dir.c | 197 ++++++++++++++++++++++++++++++++++++++++++++++++---- fs/ocfs2/ocfs2.h | 3 + fs/ocfs2/ocfs2_fs.h | 29 ++++++++ 3 files changed, 215 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 45e4e03d8f71..1efd0ab680cf 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -83,6 +83,63 @@ static int ocfs2_do_extend_dir(struct super_block *sb, struct ocfs2_alloc_context *meta_ac, struct buffer_head **new_bh); +/* + * These are distinct checks because future versions of the file system will + * want to have a trailing dirent structure independent of indexing. + */ +static int ocfs2_dir_has_trailer(struct inode *dir) +{ + if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) + return 0; + + return ocfs2_meta_ecc(OCFS2_SB(dir->i_sb)); +} + +static int ocfs2_supports_dir_trailer(struct ocfs2_super *osb) +{ + return ocfs2_meta_ecc(osb); +} + +static inline unsigned int ocfs2_dir_trailer_blk_off(struct super_block *sb) +{ + return sb->s_blocksize - sizeof(struct ocfs2_dir_block_trailer); +} + +#define ocfs2_trailer_from_bh(_bh, _sb) ((struct ocfs2_dir_block_trailer *) ((_bh)->b_data + ocfs2_dir_trailer_blk_off((_sb)))) + +/* + * XXX: This is executed once on every dirent. We should consider optimizing + * it. + */ +static int ocfs2_skip_dir_trailer(struct inode *dir, + struct ocfs2_dir_entry *de, + unsigned long offset, + unsigned long blklen) +{ + unsigned long toff = blklen - sizeof(struct ocfs2_dir_block_trailer); + + if (!ocfs2_dir_has_trailer(dir)) + return 0; + + if (offset != toff) + return 0; + + return 1; +} + +static void ocfs2_init_dir_trailer(struct inode *inode, + struct buffer_head *bh) +{ + struct ocfs2_dir_block_trailer *trailer; + + trailer = ocfs2_trailer_from_bh(bh, inode->i_sb); + strcpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE); + trailer->db_compat_rec_len = + cpu_to_le16(sizeof(struct ocfs2_dir_block_trailer)); + trailer->db_parent_dinode = cpu_to_le64(OCFS2_I(inode)->ip_blkno); + trailer->db_blkno = cpu_to_le64(bh->b_blocknr); +} + /* * bh passed here can be an inode block or a dir data block, depending * on the inode inline data flag. @@ -232,16 +289,60 @@ static int ocfs2_read_dir_block(struct inode *inode, u64 v_block, { int rc = 0; struct buffer_head *tmp = *bh; + struct ocfs2_dir_block_trailer *trailer; rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, flags, ocfs2_validate_dir_block); - if (rc) + if (rc) { mlog_errno(rc); + goto out; + } + + /* + * We check the trailer here rather than in + * ocfs2_validate_dir_block() because that function doesn't have + * the inode to test. + */ + if (!(flags & OCFS2_BH_READAHEAD) && + ocfs2_dir_has_trailer(inode)) { + trailer = ocfs2_trailer_from_bh(tmp, inode->i_sb); + if (!OCFS2_IS_VALID_DIR_TRAILER(trailer)) { + rc = -EINVAL; + ocfs2_error(inode->i_sb, + "Invalid dirblock #%llu: " + "signature = %.*s\n", + (unsigned long long)tmp->b_blocknr, 7, + trailer->db_signature); + goto out; + } + if (le64_to_cpu(trailer->db_blkno) != tmp->b_blocknr) { + rc = -EINVAL; + ocfs2_error(inode->i_sb, + "Directory block #%llu has an invalid " + "db_blkno of %llu", + (unsigned long long)tmp->b_blocknr, + (unsigned long long)le64_to_cpu(trailer->db_blkno)); + goto out; + } + if (le64_to_cpu(trailer->db_parent_dinode) != + OCFS2_I(inode)->ip_blkno) { + rc = -EINVAL; + ocfs2_error(inode->i_sb, + "Directory block #%llu on dinode " + "#%llu has an invalid parent_dinode " + "of %llu", + (unsigned long long)tmp->b_blocknr, + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)le64_to_cpu(trailer->db_blkno)); + goto out; + } + } /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */ - if (!rc && !*bh) + if (!*bh) *bh = tmp; +out: return rc ? -EIO : 0; } @@ -581,6 +682,16 @@ int __ocfs2_add_entry(handle_t *handle, goto bail; } + /* We're guaranteed that we should have space, so we + * can't possibly have hit the trailer...right? */ + mlog_bug_on_msg(ocfs2_skip_dir_trailer(dir, de, offset, size), + "Hit dir trailer trying to insert %.*s " + "(namelen %d) into directory %llu. " + "offset is %lu, trailer offset is %d\n", + namelen, name, namelen, + (unsigned long long)parent_fe_bh->b_blocknr, + offset, ocfs2_dir_trailer_blk_off(dir->i_sb)); + if (ocfs2_dirent_would_fit(de, rec_len)) { dir->i_mtime = dir->i_ctime = CURRENT_TIME; retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh); @@ -622,6 +733,7 @@ int __ocfs2_add_entry(handle_t *handle, retval = 0; goto bail; } + offset += le16_to_cpu(de->rec_len); de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len)); } @@ -1059,9 +1171,15 @@ int ocfs2_empty_dir(struct inode *inode) return !priv.seen_other; } -static void ocfs2_fill_initial_dirents(struct inode *inode, - struct inode *parent, - char *start, unsigned int size) +/* + * Fills "." and ".." dirents in a new directory block. Returns dirent for + * "..", which might be used during creation of a directory with a trailing + * header. It is otherwise safe to ignore the return code. + */ +static struct ocfs2_dir_entry *ocfs2_fill_initial_dirents(struct inode *inode, + struct inode *parent, + char *start, + unsigned int size) { struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start; @@ -1078,6 +1196,8 @@ static void ocfs2_fill_initial_dirents(struct inode *inode, de->name_len = 2; strcpy(de->name, ".."); ocfs2_set_de_type(de, S_IFDIR); + + return de; } /* @@ -1130,10 +1250,15 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb, struct ocfs2_alloc_context *data_ac) { int status; + unsigned int size = osb->sb->s_blocksize; struct buffer_head *new_bh = NULL; + struct ocfs2_dir_entry *de; mlog_entry_void(); + if (ocfs2_supports_dir_trailer(osb)) + size = ocfs2_dir_trailer_blk_off(parent->i_sb); + status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh, data_ac, NULL, &new_bh); if (status < 0) { @@ -1151,8 +1276,9 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb, } memset(new_bh->b_data, 0, osb->sb->s_blocksize); - ocfs2_fill_initial_dirents(inode, parent, new_bh->b_data, - osb->sb->s_blocksize); + de = ocfs2_fill_initial_dirents(inode, parent, new_bh->b_data, size); + if (ocfs2_supports_dir_trailer(osb)) + ocfs2_init_dir_trailer(inode, new_bh); status = ocfs2_journal_dirty(handle, new_bh); if (status < 0) { @@ -1193,13 +1319,27 @@ int ocfs2_fill_new_dir(struct ocfs2_super *osb, data_ac); } +/* + * Expand rec_len of the rightmost dirent in a directory block so that it + * contains the end of our valid space for dirents. We do this during + * expansion from an inline directory to one with extents. The first dir block + * in that case is taken from the inline data portion of the inode block. + * + * We add the dir trailer if this filesystem wants it. + */ static void ocfs2_expand_last_dirent(char *start, unsigned int old_size, - unsigned int new_size) + struct super_block *sb) { struct ocfs2_dir_entry *de; struct ocfs2_dir_entry *prev_de; char *de_buf, *limit; - unsigned int bytes = new_size - old_size; + unsigned int new_size = sb->s_blocksize; + unsigned int bytes; + + if (ocfs2_supports_dir_trailer(OCFS2_SB(sb))) + new_size = ocfs2_dir_trailer_blk_off(sb); + + bytes = new_size - old_size; limit = start + old_size; de_buf = start; @@ -1316,8 +1456,9 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, memcpy(dirdata_bh->b_data, di->id2.i_data.id_data, i_size_read(dir)); memset(dirdata_bh->b_data + i_size_read(dir), 0, sb->s_blocksize - i_size_read(dir)); - ocfs2_expand_last_dirent(dirdata_bh->b_data, i_size_read(dir), - sb->s_blocksize); + ocfs2_expand_last_dirent(dirdata_bh->b_data, i_size_read(dir), sb); + if (ocfs2_supports_dir_trailer(osb)) + ocfs2_init_dir_trailer(dir, dirdata_bh); ret = ocfs2_journal_dirty(handle, dirdata_bh); if (ret) { @@ -1604,9 +1745,15 @@ do_extend: goto bail; } memset(new_bh->b_data, 0, sb->s_blocksize); + de = (struct ocfs2_dir_entry *) new_bh->b_data; de->inode = 0; - de->rec_len = cpu_to_le16(sb->s_blocksize); + if (ocfs2_dir_has_trailer(dir)) { + de->rec_len = cpu_to_le16(ocfs2_dir_trailer_blk_off(sb)); + ocfs2_init_dir_trailer(dir, new_bh); + } else { + de->rec_len = cpu_to_le16(sb->s_blocksize); + } status = ocfs2_journal_dirty(handle, new_bh); if (status < 0) { mlog_errno(status); @@ -1648,11 +1795,21 @@ static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh, unsigned int *blocks_wanted) { int ret; + struct super_block *sb = dir->i_sb; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_dir_entry *de, *last_de = NULL; char *de_buf, *limit; unsigned long offset = 0; - unsigned int rec_len, new_rec_len; + unsigned int rec_len, new_rec_len, free_space = dir->i_sb->s_blocksize; + + /* + * This calculates how many free bytes we'd have in block zero, should + * this function force expansion to an extent tree. + */ + if (ocfs2_supports_dir_trailer(OCFS2_SB(sb))) + free_space = ocfs2_dir_trailer_blk_off(sb) - i_size_read(dir); + else + free_space = dir->i_sb->s_blocksize - i_size_read(dir); de_buf = di->id2.i_data.id_data; limit = de_buf + i_size_read(dir); @@ -1669,6 +1826,11 @@ static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh, ret = -EEXIST; goto out; } + /* + * No need to check for a trailing dirent record here as + * they're not used for inline dirs. + */ + if (ocfs2_dirent_would_fit(de, rec_len)) { /* Ok, we found a spot. Return this bh and let * the caller actually fill it in. */ @@ -1689,7 +1851,7 @@ static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh, * dirent can be found. */ *blocks_wanted = 1; - new_rec_len = le16_to_cpu(last_de->rec_len) + (dir->i_sb->s_blocksize - i_size_read(dir)); + new_rec_len = le16_to_cpu(last_de->rec_len) + free_space; if (new_rec_len < (rec_len + OCFS2_DIR_REC_LEN(last_de->name_len))) *blocks_wanted = 2; @@ -1707,6 +1869,7 @@ static int ocfs2_find_dir_space_el(struct inode *dir, const char *name, struct ocfs2_dir_entry *de; struct super_block *sb = dir->i_sb; int status; + int blocksize = dir->i_sb->s_blocksize; status = ocfs2_read_dir_block(dir, 0, &bh, 0); if (status) { @@ -1748,6 +1911,11 @@ static int ocfs2_find_dir_space_el(struct inode *dir, const char *name, status = -EEXIST; goto bail; } + + if (ocfs2_skip_dir_trailer(dir, de, offset % blocksize, + blocksize)) + goto next; + if (ocfs2_dirent_would_fit(de, rec_len)) { /* Ok, we found a spot. Return this bh and let * the caller actually fill it in. */ @@ -1756,6 +1924,7 @@ static int ocfs2_find_dir_space_el(struct inode *dir, const char *name, status = 0; goto bail; } +next: offset += le16_to_cpu(de->rec_len); de = (struct ocfs2_dir_entry *)((char *) de + le16_to_cpu(de->rec_len)); } diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index bad87d0a03c9..ad5c24a29edd 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -470,6 +470,9 @@ static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb) #define OCFS2_IS_VALID_XATTR_BLOCK(ptr) \ (!strcmp((ptr)->xb_signature, OCFS2_XATTR_BLOCK_SIGNATURE)) +#define OCFS2_IS_VALID_DIR_TRAILER(ptr) \ + (!strcmp((ptr)->db_signature, OCFS2_DIR_TRAILER_SIGNATURE)) + static inline unsigned long ino_from_blkno(struct super_block *sb, u64 blkno) { diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 290fa26fba6e..af0013b9c17f 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -65,6 +65,7 @@ #define OCFS2_EXTENT_BLOCK_SIGNATURE "EXBLK01" #define OCFS2_GROUP_DESC_SIGNATURE "GROUP01" #define OCFS2_XATTR_BLOCK_SIGNATURE "XATTR01" +#define OCFS2_DIR_TRAILER_SIGNATURE "DIRTRL1" /* Compatibility flags */ #define OCFS2_HAS_COMPAT_FEATURE(sb,mask) \ @@ -751,6 +752,34 @@ struct ocfs2_dir_entry { /* Actual on-disk length specified by rec_len */ } __attribute__ ((packed)); +/* + * Per-block record for the unindexed directory btree. This is carefully + * crafted so that the rec_len and name_len records of an ocfs2_dir_entry are + * mirrored. That way, the directory manipulation code needs a minimal amount + * of update. + * + * NOTE: Keep this structure aligned to a multiple of 4 bytes. + */ +struct ocfs2_dir_block_trailer { +/*00*/ __le64 db_compat_inode; /* Always zero. Was inode */ + + __le16 db_compat_rec_len; /* Backwards compatible with + * ocfs2_dir_entry. */ + __u8 db_compat_name_len; /* Always zero. Was name_len */ + __u8 db_reserved0; + __le16 db_reserved1; + __le16 db_free_rec_len; /* Size of largest empty hole + * in this block. (unused) */ +/*10*/ __u8 db_signature[8]; /* Signature for verification */ + __le64 db_reserved2; + __le64 db_free_next; /* Next block in list (unused) */ +/*20*/ __le64 db_blkno; /* Offset on disk, in blocks */ + __le64 db_parent_dinode; /* dinode which owns me, in + blocks */ +/*30*/ __le64 db_check; /* Error checking */ +/*40*/ +}; + /* * On disk allocator group structure for OCFS2 */ -- cgit From c175a518b4a1d514483abf61813ce5d855917164 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Wed, 10 Dec 2008 17:58:22 -0800 Subject: ocfs2: Checksum and ECC for directory blocks. Use the db_check field of ocfs2_dir_block_trailer to crc/ecc the dirblocks. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/dir.c | 37 +++++++++++++++++++++++++++++++++++-- fs/ocfs2/dir.h | 2 ++ fs/ocfs2/journal.c | 31 +++++++++++++++++++++++++++++-- fs/ocfs2/ocfs2_fs.h | 2 +- 4 files changed, 67 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 1efd0ab680cf..f2c4098cf337 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -48,6 +48,7 @@ #include "ocfs2.h" #include "alloc.h" +#include "blockcheck.h" #include "dir.h" #include "dlmglue.h" #include "extent_map.h" @@ -107,6 +108,17 @@ static inline unsigned int ocfs2_dir_trailer_blk_off(struct super_block *sb) #define ocfs2_trailer_from_bh(_bh, _sb) ((struct ocfs2_dir_block_trailer *) ((_bh)->b_data + ocfs2_dir_trailer_blk_off((_sb)))) +/* XXX ocfs2_block_dqtrailer() is similar but not quite - can we make + * them more consistent? */ +struct ocfs2_dir_block_trailer *ocfs2_dir_trailer_from_size(int blocksize, + void *data) +{ + char *p = data; + + p += blocksize - sizeof(struct ocfs2_dir_block_trailer); + return (struct ocfs2_dir_block_trailer *)p; +} + /* * XXX: This is executed once on every dirent. We should consider optimizing * it. @@ -268,14 +280,35 @@ out: static int ocfs2_validate_dir_block(struct super_block *sb, struct buffer_head *bh) { + int rc; + struct ocfs2_dir_block_trailer *trailer = + ocfs2_trailer_from_bh(bh, sb); + + /* - * Nothing yet. We don't validate dirents here, that's handled + * We don't validate dirents here, that's handled * in-place when the code walks them. */ mlog(0, "Validating dirblock %llu\n", (unsigned long long)bh->b_blocknr); - return 0; + BUG_ON(!buffer_uptodate(bh)); + + /* + * If the ecc fails, we return the error but otherwise + * leave the filesystem running. We know any error is + * local to this block. + * + * Note that we are safe to call this even if the directory + * doesn't have a trailer. Filesystems without metaecc will do + * nothing, and filesystems with it will have one. + */ + rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &trailer->db_check); + if (rc) + mlog(ML_ERROR, "Checksum failed for dinode %llu\n", + (unsigned long long)bh->b_blocknr); + + return rc; } /* diff --git a/fs/ocfs2/dir.h b/fs/ocfs2/dir.h index ce48b9080d87..c511e2e18e9f 100644 --- a/fs/ocfs2/dir.h +++ b/fs/ocfs2/dir.h @@ -83,4 +83,6 @@ int ocfs2_fill_new_dir(struct ocfs2_super *osb, struct buffer_head *fe_bh, struct ocfs2_alloc_context *data_ac); +struct ocfs2_dir_block_trailer *ocfs2_dir_trailer_from_size(int blocksize, + void *data); #endif /* OCFS2_DIR_H */ diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 3b54dba0f74b..57d7d25a2b9a 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -415,6 +415,26 @@ static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers, ocfs2_block_check_compute(data, size, &dqt->dq_check); } +/* + * Directory blocks also have their own trigger because the + * struct ocfs2_block_check offset depends on the blocksize. + */ +static void ocfs2_db_commit_trigger(struct jbd2_buffer_trigger_type *triggers, + struct buffer_head *bh, + void *data, size_t size) +{ + struct ocfs2_dir_block_trailer *trailer = + ocfs2_dir_trailer_from_size(size, data); + + /* + * We aren't guaranteed to have the superblock here, so we + * must unconditionally compute the ecc data. + * __ocfs2_journal_access() will only set the triggers if + * metaecc is enabled. + */ + ocfs2_block_check_compute(data, size, &trailer->db_check); +} + static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers, struct buffer_head *bh) { @@ -454,6 +474,13 @@ static struct ocfs2_triggers gd_triggers = { .ot_offset = offsetof(struct ocfs2_group_desc, bg_check), }; +static struct ocfs2_triggers db_triggers = { + .ot_triggers = { + .t_commit = ocfs2_db_commit_trigger, + .t_abort = ocfs2_abort_trigger, + }, +}; + static struct ocfs2_triggers xb_triggers = { .ot_triggers = { .t_commit = ocfs2_commit_trigger, @@ -555,8 +582,8 @@ int ocfs2_journal_access_gd(handle_t *handle, struct inode *inode, int ocfs2_journal_access_db(handle_t *handle, struct inode *inode, struct buffer_head *bh, int type) { - /* Right now, nothing for dirblocks */ - return __ocfs2_journal_access(handle, inode, bh, NULL, type); + return __ocfs2_journal_access(handle, inode, bh, &db_triggers, + type); } int ocfs2_journal_access_xb(handle_t *handle, struct inode *inode, diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index af0013b9c17f..698ef3d27121 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -776,7 +776,7 @@ struct ocfs2_dir_block_trailer { /*20*/ __le64 db_blkno; /* Offset on disk, in blocks */ __le64 db_parent_dinode; /* dinode which owns me, in blocks */ -/*30*/ __le64 db_check; /* Error checking */ +/*30*/ struct ocfs2_block_check db_check; /* Error checking */ /*40*/ }; -- cgit From d030cc978e9e636dc39ce9a9e8282d48698a3b30 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 11 Dec 2008 15:04:14 -0800 Subject: ocfs2: Validate superblock with checksum and ecc. The superblock is read via a raw call. Validate it after we find it from its signature. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/super.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 2eb657c3e7a8..43ed11345b59 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -52,6 +52,7 @@ #include "ocfs1_fs_compat.h" #include "alloc.h" +#include "blockcheck.h" #include "dlmglue.h" #include "export.h" #include "extent_map.h" @@ -1989,6 +1990,15 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di, if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE, strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) { + /* We have to do a raw check of the feature here */ + if (le32_to_cpu(di->id2.i_super.s_feature_incompat) & + OCFS2_FEATURE_INCOMPAT_META_ECC) { + status = ocfs2_block_check_validate(bh->b_data, + bh->b_size, + &di->i_check); + if (status) + goto out; + } status = -EINVAL; if ((1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits)) != blksz) { mlog(ML_ERROR, "found superblock with incorrect block " @@ -2030,6 +2040,7 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di, } } +out: mlog_exit(status); return status; } -- cgit From 9d28cfb73f3abccce001daf2d247b16bf20e2248 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Thu, 16 Oct 2008 17:53:29 -0700 Subject: ocfs2: Enable metadata checksums. Add OCFS2_FEATURE_INCOMPAT_META_ECC to the list of supported features. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/ocfs2_fs.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 698ef3d27121..c7ae45aaa36c 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -94,7 +94,8 @@ | OCFS2_FEATURE_INCOMPAT_INLINE_DATA \ | OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP \ | OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK \ - | OCFS2_FEATURE_INCOMPAT_XATTR) + | OCFS2_FEATURE_INCOMPAT_XATTR \ + | OCFS2_FEATURE_INCOMPAT_META_ECC) #define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA) -- cgit From e798b3f8a920c82a8e556dd54df97f0d3d0f9144 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Mon, 15 Dec 2008 17:13:48 -0800 Subject: ocfs2: Don't hand-code xor in ocfs2_hamming_encode(). When I wrote ocfs2_hamming_encode(), I was following documentation of the algorithm and didn't have quite the (possibly still imperfect) grasp of it I do now. As part of this, I literally hand-coded xor. I would test a bit, and then add that bit via xor to the parity word. I can, of course, just do a single xor of the parity word and the source word (the code buffer bit offset). This cuts CPU usage by 53% on a mostly populated buffer (an inode containing utmp.h inline). Joel Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/blockcheck.c | 67 +++++++++++++++------------------------------------ 1 file changed, 20 insertions(+), 47 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c index 2ce6ae5e4b8c..1d5083cef3a2 100644 --- a/fs/ocfs2/blockcheck.c +++ b/fs/ocfs2/blockcheck.c @@ -31,7 +31,6 @@ #include "blockcheck.h" - /* * We use the following conventions: * @@ -39,26 +38,6 @@ * p = # parity bits * c = # total code bits (d + p) */ -static int calc_parity_bits(unsigned int d) -{ - unsigned int p; - - /* - * Bits required for Single Error Correction is as follows: - * - * d + p + 1 <= 2^p - * - * We're restricting ourselves to 31 bits of parity, that should be - * sufficient. - */ - for (p = 1; p < 32; p++) - { - if ((d + p + 1) <= (1 << p)) - return p; - } - - return 0; -} /* * Calculate the bit offset in the hamming code buffer based on the bit's @@ -109,10 +88,9 @@ static unsigned int calc_code_bit(unsigned int i) */ u32 ocfs2_hamming_encode(u32 parity, void *data, unsigned int d, unsigned int nr) { - unsigned int p = calc_parity_bits(nr + d); - unsigned int i, j, b; + unsigned int i, b; - BUG_ON(!p); + BUG_ON(!d); /* * b is the hamming code bit number. Hamming code specifies a @@ -131,27 +109,23 @@ u32 ocfs2_hamming_encode(u32 parity, void *data, unsigned int d, unsigned int nr */ b = calc_code_bit(nr + i); - for (j = 0; j < p; j++) - { - /* - * Data bits in the resultant code are checked by - * parity bits that are part of the bit number - * representation. Huh? - * - * - * In other words, the parity bit at position 2^k - * checks bits in positions having bit k set in - * their binary representation. Conversely, for - * instance, bit 13, i.e. 1101(2), is checked by - * bits 1000(2) = 8, 0100(2)=4 and 0001(2) = 1. - * - * - * Note that 'k' is the _code_ bit number. 'b' in - * our loop. - */ - if (b & (1 << j)) - parity ^= (1 << j); - } + /* + * Data bits in the resultant code are checked by + * parity bits that are part of the bit number + * representation. Huh? + * + * + * In other words, the parity bit at position 2^k + * checks bits in positions having bit k set in + * their binary representation. Conversely, for + * instance, bit 13, i.e. 1101(2), is checked by + * bits 1000(2) = 8, 0100(2)=4 and 0001(2) = 1. + * + * + * Note that 'k' is the _code_ bit number. 'b' in + * our loop. + */ + parity ^= b; } /* While the data buffer was treated as little endian, the @@ -174,10 +148,9 @@ u32 ocfs2_hamming_encode_block(void *data, unsigned int blocksize) void ocfs2_hamming_fix(void *data, unsigned int d, unsigned int nr, unsigned int fix) { - unsigned int p = calc_parity_bits(nr + d); unsigned int i, b; - BUG_ON(!p); + BUG_ON(!d); /* * If the bit to fix has an hweight of 1, it's a parity bit. One -- cgit From 7bb458a58588f397068e4166c615e9fcc7480c16 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Mon, 15 Dec 2008 18:24:33 -0800 Subject: ocfs2: Another hamming code optimization. In the calc_code_bit() function, we must find all powers of two beneath the code bit number, *after* it's shifted by those powers of two. This requires a loop to see where it ends up. We can optimize it by starting at its most significant bit. This shaves 32% off the time, for a total of 67.6% shaved off of the original, naive implementation. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/blockcheck.c | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c index 1d5083cef3a2..f102ec939c90 100644 --- a/fs/ocfs2/blockcheck.c +++ b/fs/ocfs2/blockcheck.c @@ -39,6 +39,35 @@ * c = # total code bits (d + p) */ + +/* + * Find the log base 2 of 32-bit v. + * + * Algorithm found on http://graphics.stanford.edu/~seander/bithacks.html, + * by Sean Eron Anderson. Code on the page is in the public domain unless + * otherwise noted. + * + * This particular algorithm is credited to Eric Cole. + */ +static int find_highest_bit_set(unsigned int v) +{ + + static const int MultiplyDeBruijnBitPosition[32] = + { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 + }; + + v |= v >> 1; /* first round down to power of 2 */ + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v = (v >> 1) + 1; + + return MultiplyDeBruijnBitPosition[(u32)(v * 0x077CB531UL) >> 27]; +} + /* * Calculate the bit offset in the hamming code buffer based on the bit's * offset in the data buffer. Since the hamming code reserves all @@ -63,13 +92,22 @@ static unsigned int calc_code_bit(unsigned int i) */ b = i + 1; + /* + * As a cheat, we know that all bits below b's highest bit must be + * parity bits, so we can start there. + */ + p = find_highest_bit_set(b); + b += p; + /* * For every power of two below our bit number, bump our bit. * * We compare with (b + 1) becuase we have to compare with what b * would be _if_ it were bumped up by the parity bit. Capice? + * + * We start p at 2^p because of the cheat above. */ - for (p = 0; (1 << p) < (b + 1); p++) + for (p = (1 << p); p < (b + 1); p <<= 1) b++; return b; -- cgit From 58896c4d0e5868360ea0693c607d5bf74f79da6b Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 16 Dec 2008 13:54:40 -0800 Subject: ocfs2: One more hamming code optimization. The previous optimization used a fast find-highest-bit-set operation to give us a good starting point in calc_code_bit(). This version lets the caller cache the previous code buffer bit offset. Thus, the next call always starts where the last one left off. This reduces the calculation another 39%, for a total 80% reduction from the original, naive implementation. At least, on my machine. This also brings the parity calculation to within an order of magnitude of the crc32 calculation. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/blockcheck.c | 61 ++++++++++++++++----------------------------------- 1 file changed, 19 insertions(+), 42 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c index f102ec939c90..2a947c44e594 100644 --- a/fs/ocfs2/blockcheck.c +++ b/fs/ocfs2/blockcheck.c @@ -40,34 +40,6 @@ */ -/* - * Find the log base 2 of 32-bit v. - * - * Algorithm found on http://graphics.stanford.edu/~seander/bithacks.html, - * by Sean Eron Anderson. Code on the page is in the public domain unless - * otherwise noted. - * - * This particular algorithm is credited to Eric Cole. - */ -static int find_highest_bit_set(unsigned int v) -{ - - static const int MultiplyDeBruijnBitPosition[32] = - { - 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, - 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 - }; - - v |= v >> 1; /* first round down to power of 2 */ - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - v = (v >> 1) + 1; - - return MultiplyDeBruijnBitPosition[(u32)(v * 0x077CB531UL) >> 27]; -} - /* * Calculate the bit offset in the hamming code buffer based on the bit's * offset in the data buffer. Since the hamming code reserves all @@ -81,10 +53,14 @@ static int find_highest_bit_set(unsigned int v) * so it's a parity bit. 2 is a power of two (2^1), so it's a parity bit. * 3 is not a power of two. So bit 1 of the data buffer ends up as bit 3 * in the code buffer. + * + * The caller can pass in *p if it wants to keep track of the most recent + * number of parity bits added. This allows the function to start the + * calculation at the last place. */ -static unsigned int calc_code_bit(unsigned int i) +static unsigned int calc_code_bit(unsigned int i, unsigned int *p_cache) { - unsigned int b, p; + unsigned int b, p = 0; /* * Data bits are 0-based, but we're talking code bits, which @@ -92,24 +68,25 @@ static unsigned int calc_code_bit(unsigned int i) */ b = i + 1; - /* - * As a cheat, we know that all bits below b's highest bit must be - * parity bits, so we can start there. - */ - p = find_highest_bit_set(b); + /* Use the cache if it is there */ + if (p_cache) + p = *p_cache; b += p; /* * For every power of two below our bit number, bump our bit. * - * We compare with (b + 1) becuase we have to compare with what b + * We compare with (b + 1) because we have to compare with what b * would be _if_ it were bumped up by the parity bit. Capice? * - * We start p at 2^p because of the cheat above. + * p is set above. */ - for (p = (1 << p); p < (b + 1); p <<= 1) + for (; (1 << p) < (b + 1); p++) b++; + if (p_cache) + *p_cache = p; + return b; } @@ -126,7 +103,7 @@ static unsigned int calc_code_bit(unsigned int i) */ u32 ocfs2_hamming_encode(u32 parity, void *data, unsigned int d, unsigned int nr) { - unsigned int i, b; + unsigned int i, b, p = 0; BUG_ON(!d); @@ -145,7 +122,7 @@ u32 ocfs2_hamming_encode(u32 parity, void *data, unsigned int d, unsigned int nr * i is the offset in this hunk, nr + i is the total bit * offset. */ - b = calc_code_bit(nr + i); + b = calc_code_bit(nr + i, &p); /* * Data bits in the resultant code are checked by @@ -201,7 +178,7 @@ void ocfs2_hamming_fix(void *data, unsigned int d, unsigned int nr, * nr + d is the bit right past the data hunk we're looking at. * If fix after that, nothing to do */ - if (fix >= calc_code_bit(nr + d)) + if (fix >= calc_code_bit(nr + d, NULL)) return; /* @@ -209,7 +186,7 @@ void ocfs2_hamming_fix(void *data, unsigned int d, unsigned int nr, * start b at the offset in the code buffer. See hamming_encode() * for a more detailed description of 'b'. */ - b = calc_code_bit(nr); + b = calc_code_bit(nr, NULL); /* If the fix is before this hunk, nothing to do */ if (fix < b) return; -- cgit From 2b83256407687613e906bee93d98a25339128a4d Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Tue, 16 Dec 2008 15:49:19 -0800 Subject: ocfs2/dlm: Fix a race between migrate request and exit domain Patch address a racing migrate request message and an exit domain message. Instead of blocking exit domains for the duration of the migrate, we ignore failure to deliver that message. This is because an exiting domain should not have any active locks and thus has no role to play in the migration. Signed-off-by: Sunil Mushran Signed-off-by: Mark Fasheh --- fs/ocfs2/dlm/dlmmaster.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 44f87caf3683..92fd1d7d6126 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -2949,7 +2949,7 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm, struct dlm_node_iter *iter) { struct dlm_migrate_request migrate; - int ret, status = 0; + int ret, skip, status = 0; int nodenum; memset(&migrate, 0, sizeof(migrate)); @@ -2966,12 +2966,27 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm, nodenum == new_master) continue; + /* We could race exit domain. If exited, skip. */ + spin_lock(&dlm->spinlock); + skip = (!test_bit(nodenum, dlm->domain_map)); + spin_unlock(&dlm->spinlock); + if (skip) { + clear_bit(nodenum, iter->node_map); + continue; + } + ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, &migrate, sizeof(migrate), nodenum, &status); - if (ret < 0) - mlog_errno(ret); - else if (status < 0) { + if (ret < 0) { + mlog(0, "migrate_request returned %d!\n", ret); + if (!dlm_is_host_down(ret)) { + mlog(ML_ERROR, "unhandled error=%d!\n", ret); + BUG(); + } + clear_bit(nodenum, iter->node_map); + ret = 0; + } else if (status < 0) { mlog(0, "migrate request (node %u) returned %d!\n", nodenum, status); ret = status; -- cgit From 57dff2676eb68d805883a2204faaa5339ac44e03 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Tue, 16 Dec 2008 15:49:20 -0800 Subject: ocfs2/dlm: Clean up errors in dlm_proxy_ast_handler() Patch cleans printed errors in dlm_proxy_ast_handler(). The errors now includes the node number that sent the (b)ast. Also it reduces the number of endian swaps of the cookie. Signed-off-by: Sunil Mushran Signed-off-by: Mark Fasheh --- fs/ocfs2/dlm/dlmast.c | 52 ++++++++++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index 644bee55d8ba..d07ddbe4b283 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c @@ -275,6 +275,7 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, struct list_head *iter, *head=NULL; u64 cookie; u32 flags; + u8 node; if (!dlm_grab(dlm)) { dlm_error(DLM_REJECTED); @@ -286,18 +287,21 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, name = past->name; locklen = past->namelen; - cookie = be64_to_cpu(past->cookie); + cookie = past->cookie; flags = be32_to_cpu(past->flags); + node = past->node_idx; if (locklen > DLM_LOCKID_NAME_MAX) { ret = DLM_IVBUFLEN; - mlog(ML_ERROR, "Invalid name length in proxy ast handler!\n"); + mlog(ML_ERROR, "Invalid name length (%d) in proxy ast " + "handler!\n", locklen); goto leave; } if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == (LKM_PUT_LVB|LKM_GET_LVB)) { - mlog(ML_ERROR, "both PUT and GET lvb specified\n"); + mlog(ML_ERROR, "Both PUT and GET lvb specified, (0x%x)\n", + flags); ret = DLM_BADARGS; goto leave; } @@ -310,22 +314,21 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, if (past->type != DLM_AST && past->type != DLM_BAST) { mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu" - "name=%.*s\n", past->type, - dlm_get_lock_cookie_node(cookie), - dlm_get_lock_cookie_seq(cookie), - locklen, name); + "name=%.*s, node=%u\n", past->type, + dlm_get_lock_cookie_node(be64_to_cpu(cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), + locklen, name, node); ret = DLM_IVLOCKID; goto leave; } res = dlm_lookup_lockres(dlm, name, locklen); if (!res) { - mlog(0, "got %sast for unknown lockres! " - "cookie=%u:%llu, name=%.*s, namelen=%u\n", - past->type == DLM_AST ? "" : "b", - dlm_get_lock_cookie_node(cookie), - dlm_get_lock_cookie_seq(cookie), - locklen, name, locklen); + mlog(0, "Got %sast for unknown lockres! cookie=%u:%llu, " + "name=%.*s, node=%u\n", (past->type == DLM_AST ? "" : "b"), + dlm_get_lock_cookie_node(be64_to_cpu(cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), + locklen, name, node); ret = DLM_IVLOCKID; goto leave; } @@ -337,12 +340,12 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_RECOVERING) { - mlog(0, "responding with DLM_RECOVERING!\n"); + mlog(0, "Responding with DLM_RECOVERING!\n"); ret = DLM_RECOVERING; goto unlock_out; } if (res->state & DLM_LOCK_RES_MIGRATING) { - mlog(0, "responding with DLM_MIGRATING!\n"); + mlog(0, "Responding with DLM_MIGRATING!\n"); ret = DLM_MIGRATING; goto unlock_out; } @@ -351,7 +354,7 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, lock = NULL; list_for_each(iter, head) { lock = list_entry (iter, struct dlm_lock, list); - if (be64_to_cpu(lock->ml.cookie) == cookie) + if (lock->ml.cookie == cookie) goto do_ast; } @@ -363,15 +366,15 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, list_for_each(iter, head) { lock = list_entry (iter, struct dlm_lock, list); - if (be64_to_cpu(lock->ml.cookie) == cookie) + if (lock->ml.cookie == cookie) goto do_ast; } - mlog(0, "got %sast for unknown lock! cookie=%u:%llu, " - "name=%.*s, namelen=%u\n", past->type == DLM_AST ? "" : "b", - dlm_get_lock_cookie_node(cookie), - dlm_get_lock_cookie_seq(cookie), - locklen, name, locklen); + mlog(0, "Got %sast for unknown lock! cookie=%u:%llu, name=%.*s, " + "node=%u\n", past->type == DLM_AST ? "" : "b", + dlm_get_lock_cookie_node(be64_to_cpu(cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), + locklen, name, node); ret = DLM_NORMAL; unlock_out: @@ -383,8 +386,8 @@ do_ast: if (past->type == DLM_AST) { /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->granted); - mlog(0, "ast: adding to granted list... type=%d, " - "convert_type=%d\n", lock->ml.type, lock->ml.convert_type); + mlog(0, "ast: Adding to granted list... type=%d, " + "convert_type=%d\n", lock->ml.type, lock->ml.convert_type); if (lock->ml.convert_type != LKM_IVMODE) { lock->ml.type = lock->ml.convert_type; lock->ml.convert_type = LKM_IVMODE; @@ -408,7 +411,6 @@ do_ast: dlm_do_local_bast(dlm, res, lock, past->blocked_type); leave: - if (res) dlm_lockres_put(res); -- cgit From d4f7e650e55af6b235871126f747da88600e8040 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Tue, 16 Dec 2008 15:49:21 -0800 Subject: ocfs2/dlm: Hold off sending lockres drop ref message while lockres is migrating During lockres purge, o2dlm sends a drop reference message to the lockres master. This patch delays the message if the lockres is being migrated. Fixes oss bugzilla#1012 http://oss.oracle.com/bugzilla/show_bug.cgi?id=1012 Signed-off-by: Sunil Mushran Signed-off-by: Mark Fasheh --- fs/ocfs2/dlm/dlmthread.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 4060bb328bc8..d1295203029f 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c @@ -181,7 +181,8 @@ static int dlm_purge_lockres(struct dlm_ctxt *dlm, spin_lock(&res->spinlock); /* This ensures that clear refmap is sent after the set */ - __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); + __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_SETREF_INPROG | + DLM_LOCK_RES_MIGRATING)); spin_unlock(&res->spinlock); /* clear our bit from the master's refmap, ignore errors */ -- cgit From b0d4f817ba5de8adb875ace594554a96d7737710 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Tue, 16 Dec 2008 15:49:22 -0800 Subject: ocfs2/dlm: Fix race in adding/removing lockres' to/from the tracking list This patch adds a new lock, dlm->tracking_lock, to protect adding/removing lockres' to/from the dlm->tracking_list. We were previously using dlm->spinlock for the same, but that proved inadequate as we could be freeing a lockres from a context that did not hold that lock. As the new lock only protects this list, we can explicitly take it when removing the lockres from the tracking list. This bug was exposed when testing multiple processes concurrently flock() the same file. Signed-off-by: Sunil Mushran Signed-off-by: Mark Fasheh --- fs/ocfs2/dlm/dlmcommon.h | 3 +++ fs/ocfs2/dlm/dlmdebug.c | 53 ++++++++++++++++++++++-------------------------- fs/ocfs2/dlm/dlmdomain.c | 1 + fs/ocfs2/dlm/dlmmaster.c | 10 +++++++++ 4 files changed, 38 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index d5a86fb81a49..bb53714813ab 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h @@ -140,6 +140,7 @@ struct dlm_ctxt unsigned int purge_count; spinlock_t spinlock; spinlock_t ast_lock; + spinlock_t track_lock; char *name; u8 node_num; u32 key; @@ -316,6 +317,8 @@ struct dlm_lock_resource * put on a list for the dlm thread to run. */ unsigned long last_used; + struct dlm_ctxt *dlm; + unsigned migration_pending:1; atomic_t asts_reserved; spinlock_t spinlock; diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 1b81dcba175d..b32f60a5acfb 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -630,43 +630,38 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos) { struct debug_lockres *dl = m->private; struct dlm_ctxt *dlm = dl->dl_ctxt; + struct dlm_lock_resource *oldres = dl->dl_res; struct dlm_lock_resource *res = NULL; + struct list_head *track_list; - spin_lock(&dlm->spinlock); + spin_lock(&dlm->track_lock); + if (oldres) + track_list = &oldres->tracking; + else + track_list = &dlm->tracking_list; - if (dl->dl_res) { - list_for_each_entry(res, &dl->dl_res->tracking, tracking) { - if (dl->dl_res) { - dlm_lockres_put(dl->dl_res); - dl->dl_res = NULL; - } - if (&res->tracking == &dlm->tracking_list) { - mlog(0, "End of list found, %p\n", res); - dl = NULL; - break; - } + list_for_each_entry(res, track_list, tracking) { + if (&res->tracking == &dlm->tracking_list) + res = NULL; + else dlm_lockres_get(res); - dl->dl_res = res; - break; - } - } else { - if (!list_empty(&dlm->tracking_list)) { - list_for_each_entry(res, &dlm->tracking_list, tracking) - break; - dlm_lockres_get(res); - dl->dl_res = res; - } else - dl = NULL; + break; } + spin_unlock(&dlm->track_lock); - if (dl) { - spin_lock(&dl->dl_res->spinlock); - dump_lockres(dl->dl_res, dl->dl_buf, dl->dl_len - 1); - spin_unlock(&dl->dl_res->spinlock); - } + if (oldres) + dlm_lockres_put(oldres); - spin_unlock(&dlm->spinlock); + dl->dl_res = res; + + if (res) { + spin_lock(&res->spinlock); + dump_lockres(res, dl->dl_buf, dl->dl_len - 1); + spin_unlock(&res->spinlock); + } else + dl = NULL; + /* passed to seq_show */ return dl; } diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 63f8125824e8..d8d578f45613 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -1550,6 +1550,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, spin_lock_init(&dlm->spinlock); spin_lock_init(&dlm->master_lock); spin_lock_init(&dlm->ast_lock); + spin_lock_init(&dlm->track_lock); INIT_LIST_HEAD(&dlm->list); INIT_LIST_HEAD(&dlm->dirty_list); INIT_LIST_HEAD(&dlm->reco.resources); diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 92fd1d7d6126..cbf3abe24cdb 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -505,8 +505,10 @@ void dlm_change_lockres_owner(struct dlm_ctxt *dlm, static void dlm_lockres_release(struct kref *kref) { struct dlm_lock_resource *res; + struct dlm_ctxt *dlm; res = container_of(kref, struct dlm_lock_resource, refs); + dlm = res->dlm; /* This should not happen -- all lockres' have a name * associated with them at init time. */ @@ -515,6 +517,7 @@ static void dlm_lockres_release(struct kref *kref) mlog(0, "destroying lockres %.*s\n", res->lockname.len, res->lockname.name); + spin_lock(&dlm->track_lock); if (!list_empty(&res->tracking)) list_del_init(&res->tracking); else { @@ -522,6 +525,9 @@ static void dlm_lockres_release(struct kref *kref) res->lockname.len, res->lockname.name); dlm_print_one_lock_resource(res); } + spin_unlock(&dlm->track_lock); + + dlm_put(dlm); if (!hlist_unhashed(&res->hash_node) || !list_empty(&res->granted) || @@ -595,6 +601,10 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm, res->migration_pending = 0; res->inflight_locks = 0; + /* put in dlm_lockres_release */ + dlm_grab(dlm); + res->dlm = dlm; + kref_init(&res->refs); /* just for consistency */ -- cgit From 7b791d68562e4ce5ab57cbacb10a1ad4ee33956e Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Tue, 16 Dec 2008 15:49:23 -0800 Subject: ocfs2/dlm: Fix race during lockres mastery dlm_get_lock_resource() is supposed to return a lock resource with a proper master. If multiple concurrent threads attempt to lookup the lockres for the same lockid while the lock mastery in underway, one or more threads are likely to return a lockres without a proper master. This patch makes the threads wait in dlm_get_lock_resource() while the mastery is underway, ensuring all threads return the lockres with a proper master. This issue is known to be limited to users using the flock() syscall. For all other fs operations, the ocfs2 dlmglue layer serializes the dlm op for each lockid. Users encountering this bug will see flock() return EINVAL and dmesg have the following error: ERROR: Dlm error "DLM_BADARGS" while calling dlmlock on resource : bad api args Reported-by: Coly Li Signed-off-by: Sunil Mushran Signed-off-by: Mark Fasheh --- fs/ocfs2/dlm/dlmmaster.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index cbf3abe24cdb..54e182a27caf 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -732,14 +732,21 @@ lookup: if (tmpres) { int dropping_ref = 0; + spin_unlock(&dlm->spinlock); + spin_lock(&tmpres->spinlock); + /* We wait for the other thread that is mastering the resource */ + if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { + __dlm_wait_on_lockres(tmpres); + BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); + } + if (tmpres->owner == dlm->node_num) { BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF); dlm_lockres_grab_inflight_ref(dlm, tmpres); } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) dropping_ref = 1; spin_unlock(&tmpres->spinlock); - spin_unlock(&dlm->spinlock); /* wait until done messaging the master, drop our ref to allow * the lockres to be purged, start over. */ -- cgit From 71d548a6af36fe98c95fbd0522147f842bd5f054 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Fri, 5 Dec 2008 06:20:54 +0800 Subject: ocfs2/xattr: Remove extend_trans call and add its credits from the beginning Actually, when setting a new xattr value, we know it from the very beginning, and it isn't like the extension of bucket in which case we can't figure it out. So remove ocfs2_extend_trans in that function and calculate it before the transaction. It also relieve acl operation from the worry about the side effect of ocfs2_extend_trans. Signed-off-by: Tao Ma Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 17028aa7bc26..93a1ab4fe1da 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1169,7 +1169,7 @@ static int __ocfs2_xattr_set_value_outside(struct inode *inode, const void *value, int value_len) { - int ret = 0, i, cp_len, credits; + int ret = 0, i, cp_len; u16 blocksize = inode->i_sb->s_blocksize; u32 p_cluster, num_clusters; u32 cpos = 0, bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); @@ -1179,18 +1179,6 @@ static int __ocfs2_xattr_set_value_outside(struct inode *inode, BUG_ON(clusters > le32_to_cpu(xv->xr_clusters)); - /* - * In __ocfs2_xattr_set_value_outside has already been dirtied, - * so we don't need to worry about whether ocfs2_extend_trans - * will create a new transactio for us or not. - */ - credits = clusters * bpc; - ret = ocfs2_extend_trans(handle, credits); - if (ret) { - mlog_errno(ret); - goto out; - } - while (cpos < clusters) { ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, &num_clusters, &xv->xr_list); @@ -2233,6 +2221,15 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, xi->value_len); u64 value_size; + /* + * Calculate the clusters we need to write. + * No matter whether we replace an old one or add a new one, + * we need this for writing. + */ + if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) + credits += new_clusters * + ocfs2_clusters_to_blocks(inode->i_sb, 1); + if (xis->not_found && xbs->not_found) { credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb); -- cgit From 4b3f6209bf9eec46fe5ebb168718fef5c443c157 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Fri, 5 Dec 2008 06:20:55 +0800 Subject: ocfs2/xattr: Always updating ctime during xattr set. In xattr set, we should always update ctime if the operation goes sucessfully. The old one mistakenly put it in ocfs2_xattr_set_entry which is only called when we set xattr in inode or xattr block. The side benefit is that it resolve the bug 1052 since in that scenario, ocfs2_calc_xattr_set_need only calc out the xattr set credits while ocfs2_xattr_set_entry update the inode also which isn't concerned with the process of xattr set. Signed-off-by: Tao Ma Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 93a1ab4fe1da..3e2e92d70594 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1651,10 +1651,6 @@ static int ocfs2_xattr_set_entry(struct inode *inode, oi->ip_dyn_features |= flag; di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); spin_unlock(&oi->ip_lock); - /* Update inode ctime */ - inode->i_ctime = CURRENT_TIME; - di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); - di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); ret = ocfs2_journal_dirty(handle, xs->inode_bh); if (ret < 0) @@ -2574,6 +2570,20 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, } } + if (!ret) { + /* Update inode ctime. */ + ret = ocfs2_journal_access(ctxt->handle, inode, xis->inode_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + inode->i_ctime = CURRENT_TIME; + di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); + di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); + ocfs2_journal_dirty(ctxt->handle, xis->inode_bh); + } out: return ret; } @@ -2750,6 +2760,8 @@ int ocfs2_xattr_set(struct inode *inode, goto cleanup; } + /* we need to update inode's ctime field, so add credit for it. */ + credits += OCFS2_INODE_UPDATE_CREDITS; ctxt.handle = ocfs2_start_trans(osb, credits); if (IS_ERR(ctxt.handle)) { ret = PTR_ERR(ctxt.handle); -- cgit From 90cb546cada68bb8c2278afdb4b65c2ac11f2877 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Fri, 5 Dec 2008 06:20:56 +0800 Subject: ocfs2/xattr: fix credits calculation during index create When creating a xattr index block, the old calculation forget to add credits for the meta change of the alloc file. So add more credits and more comments to explain it. Signed-off-by: Tao Ma Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 3e2e92d70594..73fb9f762512 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -2359,13 +2359,21 @@ meta_guess: } else xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data; + /* + * If there is already an xattr tree, good, we can calculate + * like other b-trees. Otherwise we may have the chance of + * create a tree, the credit calculation is borrowed from + * ocfs2_calc_extend_credits with root_el = NULL. And the + * new tree will be cluster based, so no meta is needed. + */ if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) { struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list; meta_add += ocfs2_extend_meta_needed(el); credits += ocfs2_calc_extend_credits(inode->i_sb, el, 1); - } + } else + credits += OCFS2_SUBALLOC_ALLOC + 1; /* * This cluster will be used either for new bucket or for -- cgit From 0e445b6fe93c723fe8093fd04ddfeb11ae2de082 Mon Sep 17 00:00:00 2001 From: Tiger Yang Date: Tue, 9 Dec 2008 16:42:51 +0800 Subject: ocfs2: calculate and reserve credits for xattr value in mknod We extend the credits for xattr's large value in set_value_outside before, this can give rise to a credits issue when we set one security entry and two acl entries duing mknod. As we remove extend_trans form set_value_outside, we must calculate and reserve the credits for xattr's large value in mknod. Signed-off-by: Tiger Yang Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 40 ++++++++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 73fb9f762512..e5be470e7504 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -490,9 +490,14 @@ int ocfs2_calc_security_init(struct inode *dir, } /* reserve clusters for xattr value which will be set in B tree*/ - if (si->value_len > OCFS2_XATTR_INLINE_SIZE) - *want_clusters += ocfs2_clusters_for_bytes(dir->i_sb, - si->value_len); + if (si->value_len > OCFS2_XATTR_INLINE_SIZE) { + int new_clusters = ocfs2_clusters_for_bytes(dir->i_sb, + si->value_len); + + *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb, + new_clusters); + *want_clusters += new_clusters; + } return ret; } @@ -506,9 +511,7 @@ int ocfs2_calc_xattr_init(struct inode *dir, { int ret = 0; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); - int s_size = 0; - int a_size = 0; - int acl_len = 0; + int s_size = 0, a_size = 0, acl_len = 0, new_clusters; if (si->enable) s_size = ocfs2_xattr_entry_real_size(strlen(si->name), @@ -556,16 +559,25 @@ int ocfs2_calc_xattr_init(struct inode *dir, *xattr_credits += ocfs2_blocks_per_xattr_bucket(dir->i_sb); } - /* reserve clusters for xattr value which will be set in B tree*/ - if (si->enable && si->value_len > OCFS2_XATTR_INLINE_SIZE) - *want_clusters += ocfs2_clusters_for_bytes(dir->i_sb, - si->value_len); + /* + * reserve credits and clusters for xattrs which has large value + * and have to be set outside + */ + if (si->enable && si->value_len > OCFS2_XATTR_INLINE_SIZE) { + new_clusters = ocfs2_clusters_for_bytes(dir->i_sb, + si->value_len); + *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb, + new_clusters); + *want_clusters += new_clusters; + } if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL && acl_len > OCFS2_XATTR_INLINE_SIZE) { - *want_clusters += ocfs2_clusters_for_bytes(dir->i_sb, acl_len); - if (S_ISDIR(mode)) - *want_clusters += ocfs2_clusters_for_bytes(dir->i_sb, - acl_len); + /* for directory, it has DEFAULT and ACCESS two types of acls */ + new_clusters = (S_ISDIR(mode) ? 2 : 1) * + ocfs2_clusters_for_bytes(dir->i_sb, acl_len); + *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb, + new_clusters); + *want_clusters += new_clusters; } return ret; -- cgit From 008aafaf0b4aa0476da483e3c6e3edbe951811ff Mon Sep 17 00:00:00 2001 From: Tiger Yang Date: Tue, 9 Dec 2008 16:43:08 +0800 Subject: ocfs2: alloc xattr bucket in ocfs2_xattr_set_handle In extreme situation, may need xattr bucket for setting security entry and acl entries during mknod. This only happens when block size is too small. Signed-off-by: Tiger Yang Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index e5be470e7504..095b0bb6e590 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -2611,9 +2611,7 @@ out: /* * This function only called duing creating inode * for init security/acl xattrs of the new inode. - * The xattrs could be put into ibody or extent block, - * xattr bucket would not be use in this case. - * transanction credits also be reserved in here. + * All transanction credits have been reserved in mknod. */ int ocfs2_xattr_set_handle(handle_t *handle, struct inode *inode, @@ -2653,6 +2651,19 @@ int ocfs2_xattr_set_handle(handle_t *handle, if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb))) return -EOPNOTSUPP; + /* + * In extreme situation, may need xattr bucket when + * block size is too small. And we have already reserved + * the credits for bucket in mknod. + */ + if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) { + xbs.bucket = ocfs2_xattr_bucket_new(inode); + if (!xbs.bucket) { + mlog_errno(-ENOMEM); + return -ENOMEM; + } + } + xis.inode_bh = xbs.inode_bh = di_bh; di = (struct ocfs2_dinode *)di_bh->b_data; @@ -2672,6 +2683,7 @@ int ocfs2_xattr_set_handle(handle_t *handle, cleanup: up_write(&OCFS2_I(inode)->ip_xattr_sem); brelse(xbs.xattr_bh); + ocfs2_xattr_bucket_free(xbs.bucket); return ret; } -- cgit From 38d59ef61c11cafc50a66787bdbbe80d58bbd9c0 Mon Sep 17 00:00:00 2001 From: Tiger Yang Date: Wed, 17 Dec 2008 10:22:56 +0800 Subject: ocfs2: Add xattr support checking in init_security We must check whether ocfs2 volume support xattr in init_security, if not support xattr and security is enable, would cause failure of mknod. Signed-off-by: Tiger Yang Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 095b0bb6e590..e1d638af6ac3 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -5324,6 +5324,9 @@ int ocfs2_init_security_get(struct inode *inode, struct inode *dir, struct ocfs2_security_xattr_info *si) { + /* check whether ocfs2 support feature xattr */ + if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb))) + return -EOPNOTSUPP; return security_inode_init_security(inode, dir, &si->name, &si->value, &si->value_len); } -- cgit From a641dc2a5a1445eb4cb491080dfc41c42a9eb37d Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Wed, 24 Dec 2008 16:03:48 -0800 Subject: ocfs2: remove unneeded lvb casts dlmglue.c has lots of code which casts the return value of ocfs2_dlm_lvb(). This is pointless however, as ocfs2_dlm_lvb() returns void *. Signed-off-by: Mark Fasheh --- fs/ocfs2/dlmglue.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index b1c75911d8ad..f731ab491795 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -115,8 +115,7 @@ static void ocfs2_dump_meta_lvb_info(u64 level, unsigned int line, struct ocfs2_lock_res *lockres) { - struct ocfs2_meta_lvb *lvb = - (struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb); + struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb); mlog(level, "LVB information for %s (called from %s:%u):\n", lockres->l_name, function, line); @@ -1864,7 +1863,7 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode) mlog_entry_void(); - lvb = (struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb); + lvb = ocfs2_dlm_lvb(&lockres->l_lksb); /* * Invalidate the LVB of a deleted inode - this way other @@ -1916,7 +1915,7 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode) mlog_meta_lvb(0, lockres); - lvb = (struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb); + lvb = ocfs2_dlm_lvb(&lockres->l_lksb); /* We're safe here without the lockres lock... */ spin_lock(&oi->ip_lock); @@ -1951,8 +1950,7 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode) static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode, struct ocfs2_lock_res *lockres) { - struct ocfs2_meta_lvb *lvb = - (struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb); + struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb); if (lvb->lvb_version == OCFS2_LVB_VERSION && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation) @@ -3489,7 +3487,7 @@ static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres) mlog_entry_void(); - lvb = (struct ocfs2_qinfo_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb); + lvb = ocfs2_dlm_lvb(&lockres->l_lksb); lvb->lvb_version = OCFS2_QINFO_LVB_VERSION; lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace); lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace); -- cgit From dad7d975e4bd893c79fd122105b37b9a1776816a Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Wed, 24 Dec 2008 16:33:08 -0800 Subject: ocfs2: use min_t in ocfs2_quota_read() This is preferred to min(). Signed-off-by: Mark Fasheh --- fs/ocfs2/quota_global.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 444aa5a467fb..6aff8f2d3e49 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -167,7 +167,7 @@ ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data, len = i_size - off; toread = len; while (toread > 0) { - tocopy = min((size_t)(sb->s_blocksize - offset), toread); + tocopy = min_t(size_t, (sb->s_blocksize - offset), toread); bh = NULL; err = ocfs2_read_quota_block(gqinode, blk, &bh); if (err) { -- cgit From 9047beabb8a396f0b18de1e4a9ab920cf92054af Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Mon, 5 Jan 2009 14:45:24 +0800 Subject: ocfs2: Access the right buffer_head in ocfs2_merge_rec_left. In commit "ocfs2: Use metadata-specific ocfs2_journal_access_*() functions", the wrong buffer_head is accessed. So change it to the right buffer_head. Signed-off-by: Tao Ma Acked-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/alloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 874c0bd9e1cc..54ff4c77aaa3 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -3402,8 +3402,8 @@ static int ocfs2_merge_rec_left(struct inode *inode, has_empty_extent = 1; } - ret = ocfs2_path_bh_journal_access(handle, inode, left_path, - path_num_items(left_path) - 1); + ret = ocfs2_path_bh_journal_access(handle, inode, right_path, + path_num_items(right_path) - 1); if (ret) { mlog_errno(ret); goto out; -- cgit From 5b6f1eb97d462a45be3b30759758b5fdbb562c8c Mon Sep 17 00:00:00 2001 From: Alain Knaff Date: Mon, 10 Nov 2008 17:08:08 -0800 Subject: vfs: lseek(fd, 0, SEEK_CUR) race condition This patch fixes a race condition in lseek. While it is expected that unpredictable behaviour may result while repositioning the offset of a file descriptor concurrently with reading/writing to the same file descriptor, this should not happen when merely *reading* the file descriptor's offset. Unfortunately, the only portable way in Unix to read a file descriptor's offset is lseek(fd, 0, SEEK_CUR); however executing this concurrently with read/write may mess up the position. [with fixes from akpm] Signed-off-by: Alain Knaff Signed-off-by: Andrew Morton Signed-off-by: Al Viro --- fs/read_write.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'fs') diff --git a/fs/read_write.c b/fs/read_write.c index 969a6d9c020b..5cc6924eb158 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -50,6 +50,14 @@ generic_file_llseek_unlocked(struct file *file, loff_t offset, int origin) offset += inode->i_size; break; case SEEK_CUR: + /* + * Here we special-case the lseek(fd, 0, SEEK_CUR) + * position-querying operation. Avoid rewriting the "same" + * f_pos value back to the file because a concurrent read(), + * write() or lseek() might have altered it + */ + if (offset == 0) + return file->f_pos; offset += file->f_pos; break; } @@ -105,6 +113,10 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin) offset += i_size_read(file->f_path.dentry->d_inode); break; case SEEK_CUR: + if (offset == 0) { + retval = file->f_pos; + goto out; + } offset += file->f_pos; } retval = -EINVAL; @@ -115,6 +127,7 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin) } retval = offset; } +out: unlock_kernel(); return retval; } -- cgit From c765d479037808532310212e9b3fa95760e975f2 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 4 Dec 2008 09:50:55 -0500 Subject: affs: do not zero ->i_op it is already set to empty table and should never be NULL Signed-off-by: Al Viro --- fs/affs/inode.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/affs/inode.c b/fs/affs/inode.c index 415d9c67ac16..3c4ec7d864c4 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c @@ -119,8 +119,7 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino) goto bad_inode; #else inode->i_mode |= S_IFDIR; - inode->i_op = NULL; - inode->i_fop = NULL; + /* ... and leave ->i_op and ->i_fop pointing to empty */ break; #endif case ST_LINKFILE: -- cgit From 261964c60ff6524076d439da9386d4782729c4d9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 4 Dec 2008 09:57:25 -0500 Subject: isofs check for NULL ->i_op in root directory is dead code for one thing it never happens, for another we check that inode is a directory right after that place anyway (and we'd already checked that reading it from disk has not failed). Signed-off-by: Al Viro --- fs/isofs/inode.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'fs') diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 3f8af0f1505b..6147ec3643a0 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -855,10 +855,6 @@ root_found: } sbi->s_joliet_level = joliet_level; - /* check the root inode */ - if (!inode->i_op) - goto out_bad_root; - /* Make sure the root inode is a directory */ if (!S_ISDIR(inode->i_mode)) { printk(KERN_WARNING @@ -886,8 +882,6 @@ root_found: /* * Display error messages and free resources. */ -out_bad_root: - printk(KERN_WARNING "%s: root inode not initialized\n", __func__); out_iput: iput(inode); goto out_no_inode; -- cgit From 9742df331deb3fce95b321f38d4ea0c4e75edb63 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 4 Dec 2008 09:59:23 -0500 Subject: ntfs: don't NULL i_op it's already set to empty table (and no, ntfs doesn't have any explicit checks for NULL ->i_op or NULL ->i_fop) Signed-off-by: Al Viro --- fs/ntfs/inode.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index e9da092e2772..86bef156cf0a 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -1406,9 +1406,6 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) ni->allocated_size = sle64_to_cpu( a->data.non_resident.allocated_size); } - /* Setup the operations for this attribute inode. */ - vi->i_op = NULL; - vi->i_fop = NULL; if (NInoMstProtected(ni)) vi->i_mapping->a_ops = &ntfs_mst_aops; else -- cgit From acfa4380efe77e290d3a96b11cd4c9f24f4fbb18 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 4 Dec 2008 10:06:33 -0500 Subject: inode->i_op is never NULL We used to have rather schizophrenic set of checks for NULL ->i_op even though it had been eliminated years ago. You'd need to go out of your way to set it to NULL explicitly _and_ a bunch of code would die on such inodes anyway. After killing two remaining places that still did that bogosity, all that crap can go away. Signed-off-by: Al Viro --- fs/cifs/inode.c | 2 +- fs/ecryptfs/inode.c | 3 +-- fs/namei.c | 32 +++++++++++++------------------- fs/nfsd/vfs.c | 8 ++++---- fs/open.c | 2 +- fs/stat.c | 2 +- fs/xattr.c | 2 +- 7 files changed, 22 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index f247da9f4edc..5ab9896fdcb2 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1641,7 +1641,7 @@ do_expand: i_size_write(inode, offset); spin_unlock(&inode->i_lock); out_truncate: - if (inode->i_op && inode->i_op->truncate) + if (inode->i_op->truncate) inode->i_op->truncate(inode); return 0; out_sig: diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 5e78fc179886..0111906a8877 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -612,8 +612,7 @@ ecryptfs_readlink(struct dentry *dentry, char __user * buf, int bufsiz) struct ecryptfs_crypt_stat *crypt_stat; lower_dentry = ecryptfs_dentry_to_lower(dentry); - if (!lower_dentry->d_inode->i_op || - !lower_dentry->d_inode->i_op->readlink) { + if (!lower_dentry->d_inode->i_op->readlink) { rc = -EINVAL; goto out; } diff --git a/fs/namei.c b/fs/namei.c index dd5c9f0bf829..1f6656c3d1b9 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -257,7 +257,7 @@ int inode_permission(struct inode *inode, int mask) return -EACCES; } - if (inode->i_op && inode->i_op->permission) + if (inode->i_op->permission) retval = inode->i_op->permission(inode, mask); else retval = generic_permission(inode, mask, NULL); @@ -432,7 +432,7 @@ static int exec_permission_lite(struct inode *inode) { umode_t mode = inode->i_mode; - if (inode->i_op && inode->i_op->permission) + if (inode->i_op->permission) return -EAGAIN; if (current_fsuid() == inode->i_uid) @@ -908,9 +908,6 @@ static int __link_path_walk(const char *name, struct nameidata *nd) inode = next.dentry->d_inode; if (!inode) goto out_dput; - err = -ENOTDIR; - if (!inode->i_op) - goto out_dput; if (inode->i_op->follow_link) { err = do_follow_link(&next, nd); @@ -920,9 +917,6 @@ static int __link_path_walk(const char *name, struct nameidata *nd) inode = nd->path.dentry->d_inode; if (!inode) break; - err = -ENOTDIR; - if (!inode->i_op) - break; } else path_to_nameidata(&next, nd); err = -ENOTDIR; @@ -961,7 +955,7 @@ last_component: break; inode = next.dentry->d_inode; if ((lookup_flags & LOOKUP_FOLLOW) - && inode && inode->i_op && inode->i_op->follow_link) { + && inode && inode->i_op->follow_link) { err = do_follow_link(&next, nd); if (err) goto return_err; @@ -973,7 +967,7 @@ last_component: break; if (lookup_flags & LOOKUP_DIRECTORY) { err = -ENOTDIR; - if (!inode->i_op || !inode->i_op->lookup) + if (!inode->i_op->lookup) break; } goto return_base; @@ -1469,7 +1463,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode, if (error) return error; - if (!dir->i_op || !dir->i_op->create) + if (!dir->i_op->create) return -EACCES; /* shouldn't it be ENOSYS? */ mode &= S_IALLUGO; mode |= S_IFREG; @@ -1752,7 +1746,7 @@ do_last: error = -ENOENT; if (!path.dentry->d_inode) goto exit_dput; - if (path.dentry->d_inode->i_op && path.dentry->d_inode->i_op->follow_link) + if (path.dentry->d_inode->i_op->follow_link) goto do_link; path_to_nameidata(&path, &nd); @@ -1933,7 +1927,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD)) return -EPERM; - if (!dir->i_op || !dir->i_op->mknod) + if (!dir->i_op->mknod) return -EPERM; error = devcgroup_inode_mknod(mode, dev); @@ -2035,7 +2029,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) if (error) return error; - if (!dir->i_op || !dir->i_op->mkdir) + if (!dir->i_op->mkdir) return -EPERM; mode &= (S_IRWXUGO|S_ISVTX); @@ -2126,7 +2120,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) if (error) return error; - if (!dir->i_op || !dir->i_op->rmdir) + if (!dir->i_op->rmdir) return -EPERM; DQUOT_INIT(dir); @@ -2213,7 +2207,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry) if (error) return error; - if (!dir->i_op || !dir->i_op->unlink) + if (!dir->i_op->unlink) return -EPERM; DQUOT_INIT(dir); @@ -2320,7 +2314,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) if (error) return error; - if (!dir->i_op || !dir->i_op->symlink) + if (!dir->i_op->symlink) return -EPERM; error = security_inode_symlink(dir, dentry, oldname); @@ -2401,7 +2395,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; - if (!dir->i_op || !dir->i_op->link) + if (!dir->i_op->link) return -EPERM; if (S_ISDIR(inode->i_mode)) return -EPERM; @@ -2608,7 +2602,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (error) return error; - if (!old_dir->i_op || !old_dir->i_op->rename) + if (!old_dir->i_op->rename) return -EPERM; DQUOT_INIT(old_dir); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index d1c5f787b365..5245a3965004 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1211,7 +1211,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, dirp = dentry->d_inode; err = nfserr_notdir; - if(!dirp->i_op || !dirp->i_op->lookup) + if (!dirp->i_op->lookup) goto out; /* * Check whether the response file handle has been verified yet. @@ -1347,7 +1347,7 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp, /* Get all the sanity checks out of the way before * we lock the parent. */ err = nfserr_notdir; - if(!dirp->i_op || !dirp->i_op->lookup) + if (!dirp->i_op->lookup) goto out; fh_lock_nested(fhp, I_MUTEX_PARENT); @@ -1482,7 +1482,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) inode = dentry->d_inode; err = nfserr_inval; - if (!inode->i_op || !inode->i_op->readlink) + if (!inode->i_op->readlink) goto out; touch_atime(fhp->fh_export->ex_path.mnt, dentry); @@ -2162,7 +2162,7 @@ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl) size_t size; int error; - if (!IS_POSIXACL(inode) || !inode->i_op || + if (!IS_POSIXACL(inode) || !inode->i_op->setxattr || !inode->i_op->removexattr) return -EOPNOTSUPP; switch(type) { diff --git a/fs/open.c b/fs/open.c index 1cd7d40e9991..d882fd2351d6 100644 --- a/fs/open.c +++ b/fs/open.c @@ -412,7 +412,7 @@ asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len) if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0)) goto out_fput; - if (inode->i_op && inode->i_op->fallocate) + if (inode->i_op->fallocate) ret = inode->i_op->fallocate(inode, mode, offset, len); else ret = -EOPNOTSUPP; diff --git a/fs/stat.c b/fs/stat.c index 7c46fbeb8b76..7e12a6f82795 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -305,7 +305,7 @@ asmlinkage long sys_readlinkat(int dfd, const char __user *pathname, struct inode *inode = path.dentry->d_inode; error = -EINVAL; - if (inode->i_op && inode->i_op->readlink) { + if (inode->i_op->readlink) { error = security_inode_readlink(path.dentry); if (!error) { touch_atime(path.mnt, path.dentry); diff --git a/fs/xattr.c b/fs/xattr.c index 468377e66531..237804cd6b56 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -175,7 +175,7 @@ vfs_listxattr(struct dentry *d, char *list, size_t size) if (error) return error; error = -EOPNOTSUPP; - if (d->d_inode->i_op && d->d_inode->i_op->listxattr) { + if (d->d_inode->i_op->listxattr) { error = d->d_inode->i_op->listxattr(d, list, size); } else { error = security_inode_listsecurity(d->d_inode, list, size); -- cgit From 56ff5efad96182f4d3cb3dc6b07396762c658f16 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 9 Dec 2008 09:34:39 -0500 Subject: zero i_uid/i_gid on inode allocation ... and don't bother in callers. Don't bother with zeroing i_blocks, while we are at it - it's already been zeroed. i_mode is not worth the effort; it has no common default value. Signed-off-by: Al Viro --- fs/autofs/inode.c | 2 -- fs/autofs4/inode.c | 4 ---- fs/binfmt_misc.c | 3 --- fs/configfs/inode.c | 3 --- fs/cramfs/inode.c | 2 -- fs/debugfs/inode.c | 3 --- fs/devpts/inode.c | 4 ---- fs/hugetlbfs/inode.c | 1 - fs/inode.c | 2 ++ fs/libfs.c | 5 ----- fs/ocfs2/dlm/dlmfs.c | 2 -- fs/omfs/inode.c | 1 - fs/openpromfs/inode.c | 3 --- fs/proc/base.c | 4 ---- fs/proc/proc_sysctl.c | 1 - fs/ramfs/inode.c | 1 - fs/romfs/inode.c | 1 - fs/sysfs/inode.c | 3 --- 18 files changed, 2 insertions(+), 43 deletions(-) (limited to 'fs') diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c index c773680d5c60..e1734f2d6e26 100644 --- a/fs/autofs/inode.c +++ b/fs/autofs/inode.c @@ -251,13 +251,11 @@ struct inode *autofs_iget(struct super_block *sb, unsigned long ino) inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; inode->i_nlink = 2; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; - inode->i_blocks = 0; if (ino == AUTOFS_ROOT_INO) { inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR; inode->i_op = &autofs_root_inode_operations; inode->i_fop = &autofs_root_operations; - inode->i_uid = inode->i_gid = 0; /* Changed in read_super */ goto done; } diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 7b19802cfef4..cfc23e53b6f4 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -455,11 +455,7 @@ struct inode *autofs4_get_inode(struct super_block *sb, if (sb->s_root) { inode->i_uid = sb->s_root->d_inode->i_uid; inode->i_gid = sb->s_root->d_inode->i_gid; - } else { - inode->i_uid = 0; - inode->i_gid = 0; } - inode->i_blocks = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; if (S_ISDIR(inf->mode)) { diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index f2744ab4e5b3..e1158cb4fbd6 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -496,9 +496,6 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode) if (inode) { inode->i_mode = mode; - inode->i_uid = 0; - inode->i_gid = 0; - inode->i_blocks = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb); } diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 4803ccc94480..5d349d38e056 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -117,8 +117,6 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr) static inline void set_default_inode_attr(struct inode * inode, mode_t mode) { inode->i_mode = mode; - inode->i_uid = 0; - inode->i_gid = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; } @@ -136,7 +134,6 @@ struct inode * configfs_new_inode(mode_t mode, struct configfs_dirent * sd) { struct inode * inode = new_inode(configfs_sb); if (inode) { - inode->i_blocks = 0; inode->i_mapping->a_ops = &configfs_aops; inode->i_mapping->backing_dev_info = &configfs_backing_dev_info; inode->i_op = &configfs_inode_operations; diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index f40423eb1a14..a07338d2d140 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -83,8 +83,6 @@ static struct inode *get_cramfs_inode(struct super_block *sb, inode->i_op = &page_symlink_inode_operations; inode->i_data.a_ops = &cramfs_aops; } else { - inode->i_size = 0; - inode->i_blocks = 0; init_special_inode(inode, inode->i_mode, old_decode_dev(cramfs_inode->size)); } diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 3dbe2169cf36..81ae9ea3c6e1 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -37,9 +37,6 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d if (inode) { inode->i_mode = mode; - inode->i_uid = 0; - inode->i_gid = 0; - inode->i_blocks = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; switch (mode & S_IFMT) { default: diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index fff96e152c0c..5f3231b9633f 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -189,8 +189,6 @@ static int mknod_ptmx(struct super_block *sb) } inode->i_ino = 2; - inode->i_uid = inode->i_gid = 0; - inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; mode = S_IFCHR|opts->ptmxmode; @@ -300,8 +298,6 @@ devpts_fill_super(struct super_block *s, void *data, int silent) goto free_fsi; inode->i_ino = 1; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; - inode->i_blocks = 0; - inode->i_uid = inode->i_gid = 0; inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR; inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 7d479ce3aceb..0ab0c6f5f438 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -506,7 +506,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid, inode->i_mode = mode; inode->i_uid = uid; inode->i_gid = gid; - inode->i_blocks = 0; inode->i_mapping->a_ops = &hugetlbfs_aops; inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; diff --git a/fs/inode.c b/fs/inode.c index 7de1cda92489..bd48e5e6d3e8 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -131,6 +131,8 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) inode->i_op = &empty_iops; inode->i_fop = &empty_fops; inode->i_nlink = 1; + inode->i_uid = 0; + inode->i_gid = 0; atomic_set(&inode->i_writecount, 0); inode->i_size = 0; inode->i_blocks = 0; diff --git a/fs/libfs.c b/fs/libfs.c index e960a8321902..7de05f7ce746 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -231,7 +231,6 @@ int get_sb_pseudo(struct file_system_type *fs_type, char *name, */ root->i_ino = 1; root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR; - root->i_uid = root->i_gid = 0; root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME; dentry = d_alloc(NULL, &d_name); if (!dentry) { @@ -436,8 +435,6 @@ int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files */ inode->i_ino = 1; inode->i_mode = S_IFDIR | 0755; - inode->i_uid = inode->i_gid = 0; - inode->i_blocks = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; @@ -464,8 +461,6 @@ int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files if (!inode) goto out; inode->i_mode = S_IFREG | files->mode; - inode->i_uid = inode->i_gid = 0; - inode->i_blocks = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_fop = files->ops; inode->i_ino = i; diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c index 6f7a77d54020..1c9efb406a96 100644 --- a/fs/ocfs2/dlm/dlmfs.c +++ b/fs/ocfs2/dlm/dlmfs.c @@ -341,7 +341,6 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb) inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); - inode->i_blocks = 0; inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inc_nlink(inode); @@ -367,7 +366,6 @@ static struct inode *dlmfs_get_inode(struct inode *parent, inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); - inode->i_blocks = 0; inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index 6afe57c84f84..633e9dc972bb 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c @@ -39,7 +39,6 @@ struct inode *omfs_new_inode(struct inode *dir, int mode) inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); - inode->i_blocks = 0; inode->i_mapping->a_ops = &omfs_aops; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c index d41bdc784de4..ffcd04f0012c 100644 --- a/fs/openpromfs/inode.c +++ b/fs/openpromfs/inode.c @@ -256,9 +256,6 @@ found: break; } - inode->i_gid = 0; - inode->i_uid = 0; - d_add(dentry, inode); return NULL; } diff --git a/fs/proc/base.c b/fs/proc/base.c index cad92c1ac2b3..10fd5223d600 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1426,8 +1426,6 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st if (!ei->pid) goto out_unlock; - inode->i_uid = 0; - inode->i_gid = 0; if (task_dumpable(task)) { rcu_read_lock(); cred = __task_cred(task); @@ -2349,8 +2347,6 @@ static struct dentry *proc_base_instantiate(struct inode *dir, if (!ei->pid) goto out_iput; - inode->i_uid = 0; - inode->i_gid = 0; inode->i_mode = p->mode; if (S_ISDIR(inode->i_mode)) inode->i_nlink = 2; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 06ed10b7da9e..94fcfff6863a 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -31,7 +31,6 @@ static struct inode *proc_sys_make_inode(struct super_block *sb, inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_flags |= S_PRIVATE; /* tell selinux to ignore this inode */ inode->i_mode = table->mode; - inode->i_uid = inode->i_gid = 0; if (!table->child) { inode->i_mode |= S_IFREG; inode->i_op = &proc_sys_inode_operations; diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index a83a3518ae33..b7e6ac706b87 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -57,7 +57,6 @@ struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev) inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); - inode->i_blocks = 0; inode->i_mapping->a_ops = &ramfs_aops; inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c index 60d2f822e87b..c97d4c931715 100644 --- a/fs/romfs/inode.c +++ b/fs/romfs/inode.c @@ -524,7 +524,6 @@ romfs_iget(struct super_block *sb, unsigned long ino) i->i_size = be32_to_cpu(ri.size); i->i_mtime.tv_sec = i->i_atime.tv_sec = i->i_ctime.tv_sec = 0; i->i_mtime.tv_nsec = i->i_atime.tv_nsec = i->i_ctime.tv_nsec = 0; - i->i_uid = i->i_gid = 0; /* Precalculate the data offset */ ino = romfs_strnlen(i, ino+ROMFH_SIZE, ROMFS_MAXFN); diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index eb53c632f856..dfa3d94cfc74 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c @@ -107,8 +107,6 @@ int sysfs_setattr(struct dentry * dentry, struct iattr * iattr) static inline void set_default_inode_attr(struct inode * inode, mode_t mode) { inode->i_mode = mode; - inode->i_uid = 0; - inode->i_gid = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; } @@ -149,7 +147,6 @@ static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode) { struct bin_attribute *bin_attr; - inode->i_blocks = 0; inode->i_mapping->a_ops = &sysfs_aops; inode->i_mapping->backing_dev_info = &sysfs_backing_dev_info; inode->i_op = &sysfs_inode_operations; -- cgit From 6110e3abbff8b785907d4db50240e63c1be726e3 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 17 Dec 2008 13:53:20 -0500 Subject: sys_execve and sys_uselib do not call into fsnotify sys_execve and sys_uselib do not call into fsnotify so inotify does not get open events for these types of syscalls. This patch simply makes the requisite fsnotify calls. Signed-off-by: Eric Paris Signed-off-by: Al Viro --- fs/exec.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index 3ef9cf9b1871..9c33f542dc77 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include @@ -132,6 +133,8 @@ asmlinkage long sys_uselib(const char __user * library) if (IS_ERR(file)) goto out; + fsnotify_open(file->f_path.dentry); + error = -ENOEXEC; if(file->f_op) { struct linux_binfmt * fmt; @@ -684,6 +687,8 @@ struct file *open_exec(const char *name) if (IS_ERR(file)) return file; + fsnotify_open(file->f_path.dentry); + err = deny_write_access(file); if (err) { fput(file); -- cgit From 4c728ef583b3d82266584da5cb068294c09df31e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 Dec 2008 21:11:15 +0100 Subject: add a vfs_fsync helper Fsync currently has a fdatawrite/fdatawait pair around the method call, and a mutex_lock/unlock of the inode mutex. All callers of fsync have to duplicate this, but we have a few and most of them don't quite get it right. This patch adds a new vfs_fsync that takes care of this. It's a little more complicated as usual as ->fsync might get a NULL file pointer and just a dentry from nfsd, but otherwise gets afile and we want to take the mapping and file operations from it when it is there. Notes on the fsync callers: - ecryptfs wasn't calling filemap_fdatawrite / filemap_fdatawait on the lower file - coda wasn't calling filemap_fdatawrite / filemap_fdatawait on the host file, and returning 0 when ->fsync was missing - shm wasn't calling either filemap_fdatawrite / filemap_fdatawait nor taking i_mutex. Now given that shared memory doesn't have disk backing not doing anything in fsync seems fine and I left it out of the vfs_fsync conversion for now, but in that case we might just not pass it through to the lower file at all but just call the no-op simple_sync_file directly. [and now actually export vfs_fsync] Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/coda/file.c | 12 ++---------- fs/ecryptfs/file.c | 15 +++------------ fs/nfsd/vfs.c | 35 +++-------------------------------- fs/sync.c | 48 +++++++++++++++++++++++++++++++++++++----------- 4 files changed, 45 insertions(+), 65 deletions(-) (limited to 'fs') diff --git a/fs/coda/file.c b/fs/coda/file.c index 466303db2df6..6a347fbc998a 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -201,8 +201,7 @@ int coda_release(struct inode *coda_inode, struct file *coda_file) int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync) { struct file *host_file; - struct dentry *host_dentry; - struct inode *host_inode, *coda_inode = coda_dentry->d_inode; + struct inode *coda_inode = coda_dentry->d_inode; struct coda_file_info *cfi; int err = 0; @@ -214,14 +213,7 @@ int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync) BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); host_file = cfi->cfi_container; - if (host_file->f_op && host_file->f_op->fsync) { - host_dentry = host_file->f_path.dentry; - host_inode = host_dentry->d_inode; - mutex_lock(&host_inode->i_mutex); - err = host_file->f_op->fsync(host_file, host_dentry, datasync); - mutex_unlock(&host_inode->i_mutex); - } - + err = vfs_fsync(host_file, host_file->f_path.dentry, datasync); if ( !err && !datasync ) { lock_kernel(); err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode)); diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index eb3dc4c7ac06..713834371229 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -275,18 +275,9 @@ static int ecryptfs_release(struct inode *inode, struct file *file) static int ecryptfs_fsync(struct file *file, struct dentry *dentry, int datasync) { - struct file *lower_file = ecryptfs_file_to_lower(file); - struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); - struct inode *lower_inode = lower_dentry->d_inode; - int rc = -EINVAL; - - if (lower_inode->i_fop->fsync) { - mutex_lock(&lower_inode->i_mutex); - rc = lower_inode->i_fop->fsync(lower_file, lower_dentry, - datasync); - mutex_unlock(&lower_inode->i_mutex); - } - return rc; + return vfs_fsync(ecryptfs_file_to_lower(file), + ecryptfs_dentry_to_lower(dentry), + datasync); } static int ecryptfs_fasync(int fd, struct file *file, int flag) diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 5245a3965004..44aa92aba891 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -744,45 +744,16 @@ nfsd_close(struct file *filp) fput(filp); } -/* - * Sync a file - * As this calls fsync (not fdatasync) there is no need for a write_inode - * after it. - */ -static inline int nfsd_dosync(struct file *filp, struct dentry *dp, - const struct file_operations *fop) -{ - struct inode *inode = dp->d_inode; - int (*fsync) (struct file *, struct dentry *, int); - int err; - - err = filemap_fdatawrite(inode->i_mapping); - if (err == 0 && fop && (fsync = fop->fsync)) - err = fsync(filp, dp, 0); - if (err == 0) - err = filemap_fdatawait(inode->i_mapping); - - return err; -} - - static int nfsd_sync(struct file *filp) { - int err; - struct inode *inode = filp->f_path.dentry->d_inode; - dprintk("nfsd: sync file %s\n", filp->f_path.dentry->d_name.name); - mutex_lock(&inode->i_mutex); - err=nfsd_dosync(filp, filp->f_path.dentry, filp->f_op); - mutex_unlock(&inode->i_mutex); - - return err; + return vfs_fsync(filp, filp->f_path.dentry, 0); } int -nfsd_sync_dir(struct dentry *dp) +nfsd_sync_dir(struct dentry *dentry) { - return nfsd_dosync(NULL, dp, dp->d_inode->i_fop); + return vfs_fsync(NULL, dentry, 0); } /* diff --git a/fs/sync.c b/fs/sync.c index 2967562d416f..0921d6d4b5e6 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -75,14 +75,39 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync) return ret; } -long do_fsync(struct file *file, int datasync) +/** + * vfs_fsync - perform a fsync or fdatasync on a file + * @file: file to sync + * @dentry: dentry of @file + * @data: only perform a fdatasync operation + * + * Write back data and metadata for @file to disk. If @datasync is + * set only metadata needed to access modified file data is written. + * + * In case this function is called from nfsd @file may be %NULL and + * only @dentry is set. This can only happen when the filesystem + * implements the export_operations API. + */ +int vfs_fsync(struct file *file, struct dentry *dentry, int datasync) { - int ret; - int err; - struct address_space *mapping = file->f_mapping; + const struct file_operations *fop; + struct address_space *mapping; + int err, ret; + + /* + * Get mapping and operations from the file in case we have + * as file, or get the default values for them in case we + * don't have a struct file available. Damn nfsd.. + */ + if (file) { + mapping = file->f_mapping; + fop = file->f_op; + } else { + mapping = dentry->d_inode->i_mapping; + fop = dentry->d_inode->i_fop; + } - if (!file->f_op || !file->f_op->fsync) { - /* Why? We can still call filemap_fdatawrite */ + if (!fop || !fop->fsync) { ret = -EINVAL; goto out; } @@ -94,7 +119,7 @@ long do_fsync(struct file *file, int datasync) * livelocks in fsync_buffers_list(). */ mutex_lock(&mapping->host->i_mutex); - err = file->f_op->fsync(file, file->f_path.dentry, datasync); + err = fop->fsync(file, dentry, datasync); if (!ret) ret = err; mutex_unlock(&mapping->host->i_mutex); @@ -104,15 +129,16 @@ long do_fsync(struct file *file, int datasync) out: return ret; } +EXPORT_SYMBOL(vfs_fsync); -static long __do_fsync(unsigned int fd, int datasync) +static int do_fsync(unsigned int fd, int datasync) { struct file *file; int ret = -EBADF; file = fget(fd); if (file) { - ret = do_fsync(file, datasync); + ret = vfs_fsync(file, file->f_path.dentry, datasync); fput(file); } return ret; @@ -120,12 +146,12 @@ static long __do_fsync(unsigned int fd, int datasync) asmlinkage long sys_fsync(unsigned int fd) { - return __do_fsync(fd, 0); + return do_fsync(fd, 0); } asmlinkage long sys_fdatasync(unsigned int fd) { - return __do_fsync(fd, 1); + return do_fsync(fd, 1); } /* -- cgit From d8e9650dff48055057253ca30933605bd7d0733b Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 25 Dec 2008 13:32:15 +0800 Subject: vfs: remove duplicate code in get_fs_type() save 14 bytes: text data bss dec hex filename 1354 32 4 1390 56e fs/filesystems.o.before text data bss dec hex filename 1340 32 4 1376 560 fs/filesystems.o Signed-off-by: Li Zefan Signed-off-by: Al Viro --- fs/filesystems.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/filesystems.c b/fs/filesystems.c index d0e20ced62dd..d488dcd7f2bb 100644 --- a/fs/filesystems.c +++ b/fs/filesystems.c @@ -253,24 +253,27 @@ static int __init proc_filesystems_init(void) module_init(proc_filesystems_init); #endif -struct file_system_type *get_fs_type(const char *name) +static struct file_system_type *__get_fs_type(const char *name, int len) { struct file_system_type *fs; - const char *dot = strchr(name, '.'); - unsigned len = dot ? dot - name : strlen(name); read_lock(&file_systems_lock); fs = *(find_filesystem(name, len)); if (fs && !try_module_get(fs->owner)) fs = NULL; read_unlock(&file_systems_lock); - if (!fs && (request_module("%.*s", len, name) == 0)) { - read_lock(&file_systems_lock); - fs = *(find_filesystem(name, len)); - if (fs && !try_module_get(fs->owner)) - fs = NULL; - read_unlock(&file_systems_lock); - } + return fs; +} + +struct file_system_type *get_fs_type(const char *name) +{ + struct file_system_type *fs; + const char *dot = strchr(name, '.'); + int len = dot ? dot - name : strlen(name); + + fs = __get_fs_type(name, len); + if (!fs && (request_module("%.*s", len, name) == 0)) + fs = __get_fs_type(name, len); if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { put_filesystem(fs); -- cgit From 5b45d96bf963afeb931a75faf02fb424e446e5a9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 29 Dec 2008 07:40:31 -0500 Subject: fix the treatment of jfs special inodes We used to put them on a single list, without any locking. Racy. Signed-off-by: Al Viro --- fs/jfs/jfs_imap.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index d6363d8309d0..0f94381ca6d0 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c @@ -58,9 +58,9 @@ /* * __mark_inode_dirty expects inodes to be hashed. Since we don't want - * special inodes in the fileset inode space, we hash them to a dummy head + * special inodes in the fileset inode space, we make them appear hashed, + * but do not put on any lists. */ -static HLIST_HEAD(aggregate_hash); /* * imap locks @@ -496,7 +496,11 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary) /* release the page */ release_metapage(mp); - hlist_add_head(&ip->i_hash, &aggregate_hash); + /* + * that will look hashed, but won't be on any list; hlist_del() + * will work fine and require no locking. + */ + ip->i_hash.pprev = &ip->i_hash.next; return (ip); } -- cgit From 2f1169e2dc0c70e213f79ada88a10912cc2fbe94 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 2 Jan 2009 08:16:51 -0500 Subject: fix breakage in reiserfs_new_inode() now that we use ih.key earlier, we need to do all its setup early enough Signed-off-by: Al Viro --- fs/reiserfs/inode.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 145c2d3e5e01..1306d4f0f447 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1782,6 +1782,12 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, goto out_bad_inode; } args.objectid = inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid); + if (old_format_only(sb)) + make_le_item_head(&ih, NULL, KEY_FORMAT_3_5, SD_OFFSET, + TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT); + else + make_le_item_head(&ih, NULL, KEY_FORMAT_3_6, SD_OFFSET, + TYPE_STAT_DATA, SD_SIZE, MAX_US_INT); memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE); args.dirid = le32_to_cpu(ih.ih_key.k_dir_id); if (insert_inode_locked4(inode, args.objectid, @@ -1834,13 +1840,6 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, reiserfs_init_acl_default(inode); reiserfs_init_xattr_rwsem(inode); - if (old_format_only(sb)) - make_le_item_head(&ih, NULL, KEY_FORMAT_3_5, SD_OFFSET, - TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT); - else - make_le_item_head(&ih, NULL, KEY_FORMAT_3_6, SD_OFFSET, - TYPE_STAT_DATA, SD_SIZE, MAX_US_INT); - /* key to search for correct place for new stat data */ _make_cpu_key(&key, KEY_FORMAT_3_6, le32_to_cpu(ih.ih_key.k_dir_id), le32_to_cpu(ih.ih_key.k_objectid), SD_OFFSET, -- cgit From 4ae8978cf92a96257cd8998a49e781be83571d64 Mon Sep 17 00:00:00 2001 From: Michael Kerrisk Date: Mon, 5 Jan 2009 07:19:16 -0500 Subject: inotify: fix type errors in interfaces The problems lie in the types used for some inotify interfaces, both at the kernel level and at the glibc level. This mail addresses the kernel problem. I will follow up with some suggestions for glibc changes. For the sys_inotify_rm_watch() interface, the type of the 'wd' argument is currently 'u32', it should be '__s32' . That is Robert's suggestion, and is consistent with the other declarations of watch descriptors in the kernel source, in particular, the inotify_event structure in include/linux/inotify.h: struct inotify_event { __s32 wd; /* watch descriptor */ __u32 mask; /* watch mask */ __u32 cookie; /* cookie to synchronize two events */ __u32 len; /* length (including nulls) of name */ char name[0]; /* stub for possible name */ }; The patch makes the changes needed for inotify_rm_watch(). Signed-off-by: Michael Kerrisk Cc: Robert Love Cc: Vegard Nossum Cc: Ulrich Drepper Signed-off-by: Andrew Morton Signed-off-by: Al Viro --- fs/notify/inotify/inotify_user.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 400f8064a548..81b8644b0136 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -704,7 +704,7 @@ fput_and_out: return ret; } -asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) +asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd) { struct file *filp; struct inotify_device *dev; -- cgit