diff options
Diffstat (limited to 'fs/quota')
-rw-r--r-- | fs/quota/Kconfig | 15 | ||||
-rw-r--r-- | fs/quota/dquot.c | 230 | ||||
-rw-r--r-- | fs/quota/quota.c | 18 | ||||
-rw-r--r-- | fs/quota/quota_tree.c | 152 | ||||
-rw-r--r-- | fs/quota/quota_v1.c | 9 | ||||
-rw-r--r-- | fs/quota/quota_v2.c | 44 |
6 files changed, 287 insertions, 181 deletions
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig index 4c925e55dbcd..818083a36bef 100644 --- a/fs/quota/Kconfig +++ b/fs/quota/Kconfig @@ -9,14 +9,13 @@ config QUOTA help If you say Y here, you will be able to set per user limits for disk usage (also called disk quotas). Currently, it works for the - ext2, ext3, ext4, f2fs, jfs, ocfs2 and reiserfs file systems. - Note that gfs2 and xfs use their own quota system. - Ext3, ext4 and reiserfs also support journaled quotas for which - you don't need to run quotacheck(8) after an unclean shutdown. - For further details, read the Quota mini-HOWTO, available from - <https://www.tldp.org/docs.html#howto>, or the documentation provided - with the quota tools. Probably the quota support is only useful for - multi user systems. If unsure, say N. + ext2, ext3, ext4, f2fs, jfs and ocfs2 file systems. Note that gfs2 + and xfs use their own quota system. Ext3 and ext4 also support + journaled quotas for which you don't need to run quotacheck(8) after + an unclean shutdown. For further details, read the Quota mini-HOWTO, + available from <https://www.tldp.org/docs.html#howto>, or the + documentation provided with the quota tools. Probably the quota + support is only useful for multi user systems. If unsure, say N. config QUOTA_NETLINK_INTERFACE bool "Report quota messages through netlink interface" diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 1f0c754416b6..825c5c2e0962 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -80,7 +80,6 @@ #include <linux/quotaops.h> #include <linux/blkdev.h> #include <linux/sched/mm.h> -#include "../internal.h" /* ugh */ #include <linux/uaccess.h> @@ -163,13 +162,12 @@ static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; /* SLAB cache for dquot structures */ static struct kmem_cache *dquot_cachep; -int register_quota_format(struct quota_format_type *fmt) +void register_quota_format(struct quota_format_type *fmt) { spin_lock(&dq_list_lock); fmt->qf_next = quota_formats; quota_formats = fmt; spin_unlock(&dq_list_lock); - return 0; } EXPORT_SYMBOL(register_quota_format); @@ -399,16 +397,18 @@ int dquot_mark_dquot_dirty(struct dquot *dquot) EXPORT_SYMBOL(dquot_mark_dquot_dirty); /* Dirtify all the dquots - this can block when journalling */ -static inline int mark_all_dquot_dirty(struct dquot * const *dquot) +static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots) { int ret, err, cnt; + struct dquot *dquot; ret = err = 0; for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (dquot[cnt]) + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (dquot) /* Even in case of error we have to continue */ - ret = mark_dquot_dirty(dquot[cnt]); - if (!err) + ret = mark_dquot_dirty(dquot); + if (!err && ret < 0) err = ret; } return err; @@ -688,6 +688,8 @@ int dquot_writeback_dquots(struct super_block *sb, int type) WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount)); + flush_delayed_work("a_release_work); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && cnt != type) continue; @@ -875,10 +877,7 @@ void dqput(struct dquot *dquot) } /* Need to release dquot? */ -#ifdef CONFIG_QUOTA_DEBUG - /* sanity check */ - BUG_ON(!list_empty(&dquot->dq_free)); -#endif + WARN_ON_ONCE(!list_empty(&dquot->dq_free)); put_releasing_dquots(dquot); atomic_dec(&dquot->dq_count); spin_unlock(&dq_list_lock); @@ -987,9 +986,8 @@ we_slept: * smp_mb__before_atomic() in dquot_acquire(). */ smp_rmb(); -#ifdef CONFIG_QUOTA_DEBUG - BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ -#endif + /* Has somebody invalidated entry under us? */ + WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash)); out: if (empty) do_destroy_dquot(empty); @@ -998,14 +996,14 @@ out: } EXPORT_SYMBOL(dqget); -static inline struct dquot **i_dquot(struct inode *inode) +static inline struct dquot __rcu **i_dquot(struct inode *inode) { return inode->i_sb->s_op->get_dquots(inode); } static int dqinit_needed(struct inode *inode, int type) { - struct dquot * const *dquots; + struct dquot __rcu * const *dquots; int cnt; if (IS_NOQUOTA(inode)) @@ -1095,14 +1093,16 @@ static void remove_dquot_ref(struct super_block *sb, int type) */ spin_lock(&dq_data_lock); if (!IS_NOQUOTA(inode)) { - struct dquot **dquots = i_dquot(inode); - struct dquot *dquot = dquots[type]; + struct dquot __rcu **dquots = i_dquot(inode); + struct dquot *dquot = srcu_dereference_check( + dquots[type], &dquot_srcu, + lockdep_is_held(&dq_data_lock)); #ifdef CONFIG_QUOTA_DEBUG if (unlikely(inode_get_rsv_space(inode) > 0)) reserved = 1; #endif - dquots[type] = NULL; + rcu_assign_pointer(dquots[type], NULL); if (dquot) dqput(dquot); } @@ -1455,7 +1455,8 @@ static int inode_quota_active(const struct inode *inode) static int __dquot_initialize(struct inode *inode, int type) { int cnt, init_needed = 0; - struct dquot **dquots, *got[MAXQUOTAS] = {}; + struct dquot __rcu **dquots; + struct dquot *got[MAXQUOTAS] = {}; struct super_block *sb = inode->i_sb; qsize_t rsv; int ret = 0; @@ -1530,7 +1531,7 @@ static int __dquot_initialize(struct inode *inode, int type) if (!got[cnt]) continue; if (!dquots[cnt]) { - dquots[cnt] = got[cnt]; + rcu_assign_pointer(dquots[cnt], got[cnt]); got[cnt] = NULL; /* * Make quota reservation system happy if someone @@ -1538,12 +1539,16 @@ static int __dquot_initialize(struct inode *inode, int type) */ rsv = inode_get_rsv_space(inode); if (unlikely(rsv)) { + struct dquot *dquot = srcu_dereference_check( + dquots[cnt], &dquot_srcu, + lockdep_is_held(&dq_data_lock)); + spin_lock(&inode->i_lock); /* Get reservation again under proper lock */ rsv = __inode_get_rsv_space(inode); - spin_lock(&dquots[cnt]->dq_dqb_lock); - dquots[cnt]->dq_dqb.dqb_rsvspace += rsv; - spin_unlock(&dquots[cnt]->dq_dqb_lock); + spin_lock(&dquot->dq_dqb_lock); + dquot->dq_dqb.dqb_rsvspace += rsv; + spin_unlock(&dquot->dq_dqb_lock); spin_unlock(&inode->i_lock); } } @@ -1565,7 +1570,7 @@ EXPORT_SYMBOL(dquot_initialize); bool dquot_initialize_needed(struct inode *inode) { - struct dquot **dquots; + struct dquot __rcu **dquots; int i; if (!inode_quota_active(inode)) @@ -1590,13 +1595,14 @@ EXPORT_SYMBOL(dquot_initialize_needed); static void __dquot_drop(struct inode *inode) { int cnt; - struct dquot **dquots = i_dquot(inode); + struct dquot __rcu **dquots = i_dquot(inode); struct dquot *put[MAXQUOTAS]; spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - put[cnt] = dquots[cnt]; - dquots[cnt] = NULL; + put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu, + lockdep_is_held(&dq_data_lock)); + rcu_assign_pointer(dquots[cnt], NULL); } spin_unlock(&dq_data_lock); dqput_all(put); @@ -1604,7 +1610,7 @@ static void __dquot_drop(struct inode *inode) void dquot_drop(struct inode *inode) { - struct dquot * const *dquots; + struct dquot __rcu * const *dquots; int cnt; if (IS_NOQUOTA(inode)) @@ -1677,7 +1683,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) int cnt, ret = 0, index; struct dquot_warn warn[MAXQUOTAS]; int reserve = flags & DQUOT_SPACE_RESERVE; - struct dquot **dquots; + struct dquot __rcu **dquots; + struct dquot *dquot; if (!inode_quota_active(inode)) { if (reserve) { @@ -1697,27 +1704,26 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) index = srcu_read_lock(&dquot_srcu); spin_lock(&inode->i_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (!dquots[cnt]) + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (!dquot) continue; if (reserve) { - ret = dquot_add_space(dquots[cnt], 0, number, flags, - &warn[cnt]); + ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]); } else { - ret = dquot_add_space(dquots[cnt], number, 0, flags, - &warn[cnt]); + ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]); } if (ret) { /* Back out changes we already did */ for (cnt--; cnt >= 0; cnt--) { - if (!dquots[cnt]) + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (!dquot) continue; - spin_lock(&dquots[cnt]->dq_dqb_lock); + spin_lock(&dquot->dq_dqb_lock); if (reserve) - dquot_free_reserved_space(dquots[cnt], - number); + dquot_free_reserved_space(dquot, number); else - dquot_decr_space(dquots[cnt], number); - spin_unlock(&dquots[cnt]->dq_dqb_lock); + dquot_decr_space(dquot, number); + spin_unlock(&dquot->dq_dqb_lock); } spin_unlock(&inode->i_lock); goto out_flush_warn; @@ -1731,7 +1737,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) if (reserve) goto out_flush_warn; - mark_all_dquot_dirty(dquots); + ret = mark_all_dquot_dirty(dquots); out_flush_warn: srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); @@ -1747,7 +1753,8 @@ int dquot_alloc_inode(struct inode *inode) { int cnt, ret = 0, index; struct dquot_warn warn[MAXQUOTAS]; - struct dquot * const *dquots; + struct dquot __rcu * const *dquots; + struct dquot *dquot; if (!inode_quota_active(inode)) return 0; @@ -1758,17 +1765,19 @@ int dquot_alloc_inode(struct inode *inode) index = srcu_read_lock(&dquot_srcu); spin_lock(&inode->i_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (!dquots[cnt]) + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (!dquot) continue; - ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]); + ret = dquot_add_inodes(dquot, 1, &warn[cnt]); if (ret) { for (cnt--; cnt >= 0; cnt--) { - if (!dquots[cnt]) + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (!dquot) continue; /* Back out changes we already did */ - spin_lock(&dquots[cnt]->dq_dqb_lock); - dquot_decr_inodes(dquots[cnt], 1); - spin_unlock(&dquots[cnt]->dq_dqb_lock); + spin_lock(&dquot->dq_dqb_lock); + dquot_decr_inodes(dquot, 1); + spin_unlock(&dquot->dq_dqb_lock); } goto warn_put_all; } @@ -1777,7 +1786,7 @@ int dquot_alloc_inode(struct inode *inode) warn_put_all: spin_unlock(&inode->i_lock); if (ret == 0) - mark_all_dquot_dirty(dquots); + ret = mark_all_dquot_dirty(dquots); srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); return ret; @@ -1789,7 +1798,8 @@ EXPORT_SYMBOL(dquot_alloc_inode); */ void dquot_claim_space_nodirty(struct inode *inode, qsize_t number) { - struct dquot **dquots; + struct dquot __rcu **dquots; + struct dquot *dquot; int cnt, index; if (!inode_quota_active(inode)) { @@ -1805,9 +1815,8 @@ void dquot_claim_space_nodirty(struct inode *inode, qsize_t number) spin_lock(&inode->i_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (dquots[cnt]) { - struct dquot *dquot = dquots[cnt]; - + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (dquot) { spin_lock(&dquot->dq_dqb_lock); if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number)) number = dquot->dq_dqb.dqb_rsvspace; @@ -1822,7 +1831,6 @@ void dquot_claim_space_nodirty(struct inode *inode, qsize_t number) spin_unlock(&inode->i_lock); mark_all_dquot_dirty(dquots); srcu_read_unlock(&dquot_srcu, index); - return; } EXPORT_SYMBOL(dquot_claim_space_nodirty); @@ -1831,7 +1839,8 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty); */ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) { - struct dquot **dquots; + struct dquot __rcu **dquots; + struct dquot *dquot; int cnt, index; if (!inode_quota_active(inode)) { @@ -1847,9 +1856,8 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) spin_lock(&inode->i_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (dquots[cnt]) { - struct dquot *dquot = dquots[cnt]; - + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (dquot) { spin_lock(&dquot->dq_dqb_lock); if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number)) number = dquot->dq_dqb.dqb_curspace; @@ -1864,7 +1872,6 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) spin_unlock(&inode->i_lock); mark_all_dquot_dirty(dquots); srcu_read_unlock(&dquot_srcu, index); - return; } EXPORT_SYMBOL(dquot_reclaim_space_nodirty); @@ -1875,7 +1882,8 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) { unsigned int cnt; struct dquot_warn warn[MAXQUOTAS]; - struct dquot **dquots; + struct dquot __rcu **dquots; + struct dquot *dquot; int reserve = flags & DQUOT_SPACE_RESERVE, index; if (!inode_quota_active(inode)) { @@ -1896,17 +1904,18 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) int wtype; warn[cnt].w_type = QUOTA_NL_NOWARN; - if (!dquots[cnt]) + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (!dquot) continue; - spin_lock(&dquots[cnt]->dq_dqb_lock); - wtype = info_bdq_free(dquots[cnt], number); + spin_lock(&dquot->dq_dqb_lock); + wtype = info_bdq_free(dquot, number); if (wtype != QUOTA_NL_NOWARN) - prepare_warning(&warn[cnt], dquots[cnt], wtype); + prepare_warning(&warn[cnt], dquot, wtype); if (reserve) - dquot_free_reserved_space(dquots[cnt], number); + dquot_free_reserved_space(dquot, number); else - dquot_decr_space(dquots[cnt], number); - spin_unlock(&dquots[cnt]->dq_dqb_lock); + dquot_decr_space(dquot, number); + spin_unlock(&dquot->dq_dqb_lock); } if (reserve) *inode_reserved_space(inode) -= number; @@ -1930,7 +1939,8 @@ void dquot_free_inode(struct inode *inode) { unsigned int cnt; struct dquot_warn warn[MAXQUOTAS]; - struct dquot * const *dquots; + struct dquot __rcu * const *dquots; + struct dquot *dquot; int index; if (!inode_quota_active(inode)) @@ -1941,16 +1951,16 @@ void dquot_free_inode(struct inode *inode) spin_lock(&inode->i_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { int wtype; - warn[cnt].w_type = QUOTA_NL_NOWARN; - if (!dquots[cnt]) + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (!dquot) continue; - spin_lock(&dquots[cnt]->dq_dqb_lock); - wtype = info_idq_free(dquots[cnt], 1); + spin_lock(&dquot->dq_dqb_lock); + wtype = info_idq_free(dquot, 1); if (wtype != QUOTA_NL_NOWARN) - prepare_warning(&warn[cnt], dquots[cnt], wtype); - dquot_decr_inodes(dquots[cnt], 1); - spin_unlock(&dquots[cnt]->dq_dqb_lock); + prepare_warning(&warn[cnt], dquot, wtype); + dquot_decr_inodes(dquot, 1); + spin_unlock(&dquot->dq_dqb_lock); } spin_unlock(&inode->i_lock); mark_all_dquot_dirty(dquots); @@ -1976,8 +1986,9 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) qsize_t cur_space; qsize_t rsv_space = 0; qsize_t inode_usage = 1; + struct dquot __rcu **dquots; struct dquot *transfer_from[MAXQUOTAS] = {}; - int cnt, ret = 0; + int cnt, index, ret = 0, err; char is_valid[MAXQUOTAS] = {}; struct dquot_warn warn_to[MAXQUOTAS]; struct dquot_warn warn_from_inodes[MAXQUOTAS]; @@ -2008,6 +2019,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) } cur_space = __inode_get_bytes(inode); rsv_space = __inode_get_rsv_space(inode); + dquots = i_dquot(inode); /* * Build the transfer_from list, check limits, and update usage in * the target structures. @@ -2022,7 +2034,8 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) if (!sb_has_quota_active(inode->i_sb, cnt)) continue; is_valid[cnt] = 1; - transfer_from[cnt] = i_dquot(inode)[cnt]; + transfer_from[cnt] = srcu_dereference_check(dquots[cnt], + &dquot_srcu, lockdep_is_held(&dq_data_lock)); ret = dquot_add_inodes(transfer_to[cnt], inode_usage, &warn_to[cnt]); if (ret) @@ -2061,13 +2074,25 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) rsv_space); spin_unlock(&transfer_from[cnt]->dq_dqb_lock); } - i_dquot(inode)[cnt] = transfer_to[cnt]; + rcu_assign_pointer(dquots[cnt], transfer_to[cnt]); } spin_unlock(&inode->i_lock); spin_unlock(&dq_data_lock); - mark_all_dquot_dirty(transfer_from); - mark_all_dquot_dirty(transfer_to); + /* + * These arrays are local and we hold dquot references so we don't need + * the srcu protection but still take dquot_srcu to avoid warning in + * mark_all_dquot_dirty(). + */ + index = srcu_read_lock(&dquot_srcu); + err = mark_all_dquot_dirty((struct dquot __rcu **)transfer_from); + if (err < 0) + ret = err; + err = mark_all_dquot_dirty((struct dquot __rcu **)transfer_to); + if (err < 0) + ret = err; + srcu_read_unlock(&dquot_srcu, index); + flush_warnings(warn_to); flush_warnings(warn_from_inodes); flush_warnings(warn_from_space); @@ -2075,7 +2100,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) for (cnt = 0; cnt < MAXQUOTAS; cnt++) if (is_valid[cnt]) transfer_to[cnt] = transfer_from[cnt]; - return 0; + return ret; over_quota: /* Back out changes we already did */ for (cnt--; cnt >= 0; cnt--) { @@ -2219,9 +2244,7 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags) int cnt; struct quota_info *dqopt = sb_dqopt(sb); - /* s_umount should be held in exclusive mode */ - if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount))) - up_read(&sb->s_umount); + rwsem_assert_held_write(&sb->s_umount); /* Cannot turn off usage accounting without turning off limits, or * suspend quotas and simultaneously turn quotas off. */ @@ -2381,15 +2404,17 @@ static int vfs_setup_quota_inode(struct inode *inode, int type) int dquot_load_quota_sb(struct super_block *sb, int type, int format_id, unsigned int flags) { - struct quota_format_type *fmt = find_quota_format(format_id); + struct quota_format_type *fmt; struct quota_info *dqopt = sb_dqopt(sb); int error; lockdep_assert_held_write(&sb->s_umount); /* Just unsuspend quotas? */ - BUG_ON(flags & DQUOT_SUSPENDED); + if (WARN_ON_ONCE(flags & DQUOT_SUSPENDED)) + return -EINVAL; + fmt = find_quota_format(format_id); if (!fmt) return -ESRCH; if (!sb->dq_op || !sb->s_qcop || @@ -2482,9 +2507,7 @@ int dquot_resume(struct super_block *sb, int type) int ret = 0, cnt; unsigned int flags; - /* s_umount should be held in exclusive mode */ - if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount))) - up_read(&sb->s_umount); + rwsem_assert_held_write(&sb->s_umount); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && cnt != type) @@ -2572,7 +2595,8 @@ static int dquot_quota_enable(struct super_block *sb, unsigned int flags) goto out_err; } if (sb_has_quota_limits_enabled(sb, type)) { - ret = -EBUSY; + /* compatible with XFS */ + ret = -EEXIST; goto out_err; } spin_lock(&dq_state_lock); @@ -2586,9 +2610,6 @@ out_err: if (flags & qtype_enforce_flag(type)) dquot_disable(sb, type, DQUOT_LIMITS_ENABLED); } - /* Error code translation for better compatibility with XFS */ - if (ret == -EBUSY) - ret = -EEXIST; return ret; } @@ -2702,6 +2723,7 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) struct mem_dqblk *dm = &dquot->dq_dqb; int check_blim = 0, check_ilim = 0; struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; + int ret; if (di->d_fieldmask & ~VFS_QC_MASK) return -EINVAL; @@ -2783,8 +2805,9 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) else set_bit(DQ_FAKE_B, &dquot->dq_flags); spin_unlock(&dquot->dq_dqb_lock); - mark_dquot_dirty(dquot); - + ret = mark_dquot_dirty(dquot); + if (ret < 0) + return ret; return 0; } @@ -2887,7 +2910,7 @@ const struct quotactl_ops dquot_quotactl_sysfile_ops = { }; EXPORT_SYMBOL(dquot_quotactl_sysfile_ops); -static int do_proc_dqstats(struct ctl_table *table, int write, +static int do_proc_dqstats(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { unsigned int type = (unsigned long *)table->data - dqstats.stat; @@ -2903,7 +2926,7 @@ static int do_proc_dqstats(struct ctl_table *table, int write, return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } -static struct ctl_table fs_dqstats_table[] = { +static const struct ctl_table fs_dqstats_table[] = { { .procname = "lookups", .data = &dqstats.stat[DQST_LOOKUPS], @@ -2984,7 +3007,7 @@ static int __init dquot_init(void) dquot_cachep = kmem_cache_create("dquot", sizeof(struct dquot), sizeof(unsigned long) * 4, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD|SLAB_PANIC), + SLAB_PANIC), NULL); order = 0; @@ -2992,11 +3015,10 @@ static int __init dquot_init(void) if (!dquot_hash) panic("Cannot create dquot hash table"); - for (i = 0; i < _DQST_DQSTAT_LAST; i++) { - ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL); - if (ret) - panic("Cannot create dquot stat counters"); - } + ret = percpu_counter_init_many(dqstats.counter, 0, GFP_KERNEL, + _DQST_DQSTAT_LAST); + if (ret) + panic("Cannot create dquot stat counters"); /* Find power-of-two hlist_heads which can fit into allocation */ nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 0e41fb84060f..7c2b75a44485 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -976,24 +976,22 @@ SYSCALL_DEFINE4(quotactl_fd, unsigned int, fd, unsigned int, cmd, struct super_block *sb; unsigned int cmds = cmd >> SUBCMDSHIFT; unsigned int type = cmd & SUBCMDMASK; - struct fd f; + CLASS(fd_raw, f)(fd); int ret; - f = fdget_raw(fd); - if (!f.file) + if (fd_empty(f)) return -EBADF; - ret = -EINVAL; if (type >= MAXQUOTAS) - goto out; + return -EINVAL; if (quotactl_cmd_write(cmds)) { - ret = mnt_want_write(f.file->f_path.mnt); + ret = mnt_want_write(fd_file(f)->f_path.mnt); if (ret) - goto out; + return ret; } - sb = f.file->f_path.mnt->mnt_sb; + sb = fd_file(f)->f_path.mnt->mnt_sb; if (quotactl_cmd_onoff(cmds)) down_write(&sb->s_umount); else @@ -1007,8 +1005,6 @@ SYSCALL_DEFINE4(quotactl_fd, unsigned int, fd, unsigned int, cmd, up_read(&sb->s_umount); if (quotactl_cmd_write(cmds)) - mnt_drop_write(f.file->f_path.mnt); -out: - fdput(f); + mnt_drop_write(fd_file(f)->f_path.mnt); return ret; } diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index 0f1493e0f6d0..afceef3ddfaa 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -21,6 +21,12 @@ MODULE_AUTHOR("Jan Kara"); MODULE_DESCRIPTION("Quota trie support"); MODULE_LICENSE("GPL"); +/* + * Maximum quota tree depth we support. Only to limit recursion when working + * with the tree. + */ +#define MAX_QTREE_DEPTH 6 + #define __QUOTA_QT_PARANOIA static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) @@ -108,7 +114,7 @@ static int check_dquot_block_header(struct qtree_mem_dqinfo *info, /* Remove empty block from list and return it */ static int get_free_dqblk(struct qtree_mem_dqinfo *info) { - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; int ret, blk; @@ -160,7 +166,7 @@ static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk) static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint blk) { - char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS); + char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; uint nextblk = le32_to_cpu(dh->dqdh_next_free); uint prevblk = le32_to_cpu(dh->dqdh_prev_free); @@ -207,7 +213,7 @@ out_buf: static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint blk) { - char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS); + char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; int err; @@ -255,7 +261,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, { uint blk, i; struct qt_disk_dqdbheader *dh; - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); char *ddquot; *err = 0; @@ -327,27 +333,36 @@ out_buf: /* Insert reference to structure into the trie */ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, - uint *treeblk, int depth) + uint *blks, int depth) { - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); int ret = 0, newson = 0, newact = 0; __le32 *ref; uint newblk; + int i; if (!buf) return -ENOMEM; - if (!*treeblk) { + if (!blks[depth]) { ret = get_free_dqblk(info); if (ret < 0) goto out_buf; - *treeblk = ret; + for (i = 0; i < depth; i++) + if (ret == blks[i]) { + quota_error(dquot->dq_sb, + "Free block already used in tree: block %u", + ret); + ret = -EIO; + goto out_buf; + } + blks[depth] = ret; memset(buf, 0, info->dqi_usable_bs); newact = 1; } else { - ret = read_blk(info, *treeblk, buf); + ret = read_blk(info, blks[depth], buf); if (ret < 0) { quota_error(dquot->dq_sb, "Can't read tree quota " - "block %u", *treeblk); + "block %u", blks[depth]); goto out_buf; } } @@ -357,8 +372,20 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, info->dqi_blocks - 1); if (ret) goto out_buf; - if (!newblk) + if (!newblk) { newson = 1; + } else { + for (i = 0; i <= depth; i++) + if (newblk == blks[i]) { + quota_error(dquot->dq_sb, + "Cycle in quota tree detected: block %u index %u", + blks[depth], + get_index(info, dquot->dq_id, depth)); + ret = -EIO; + goto out_buf; + } + } + blks[depth + 1] = newblk; if (depth == info->dqi_qtree_depth - 1) { #ifdef __QUOTA_QT_PARANOIA if (newblk) { @@ -370,16 +397,16 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, goto out_buf; } #endif - newblk = find_free_dqentry(info, dquot, &ret); + blks[depth + 1] = find_free_dqentry(info, dquot, &ret); } else { - ret = do_insert_tree(info, dquot, &newblk, depth+1); + ret = do_insert_tree(info, dquot, blks, depth + 1); } if (newson && ret >= 0) { ref[get_index(info, dquot->dq_id, depth)] = - cpu_to_le32(newblk); - ret = write_blk(info, *treeblk, buf); + cpu_to_le32(blks[depth + 1]); + ret = write_blk(info, blks[depth], buf); } else if (newact && ret < 0) { - put_free_dqblk(info, buf, *treeblk); + put_free_dqblk(info, buf, blks[depth]); } out_buf: kfree(buf); @@ -390,7 +417,7 @@ out_buf: static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot) { - int tmp = QT_TREEOFF; + uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; #ifdef __QUOTA_QT_PARANOIA if (info->dqi_blocks <= QT_TREEOFF) { @@ -398,7 +425,11 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, return -EIO; } #endif - return do_insert_tree(info, dquot, &tmp, 0); + if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { + quota_error(dquot->dq_sb, "Quota tree depth too big!"); + return -EIO; + } + return do_insert_tree(info, dquot, blks, 0); } /* @@ -410,7 +441,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) int type = dquot->dq_id.type; struct super_block *sb = dquot->dq_sb; ssize_t ret; - char *ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS); + char *ddquot = kmalloc(info->dqi_entry_size, GFP_KERNEL); if (!ddquot) return -ENOMEM; @@ -449,7 +480,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint blk) { struct qt_disk_dqdbheader *dh; - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); int ret = 0; if (!buf) @@ -511,19 +542,20 @@ out_buf: /* Remove reference to dquot from tree */ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, - uint *blk, int depth) + uint *blks, int depth) { - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); int ret = 0; uint newblk; __le32 *ref = (__le32 *)buf; + int i; if (!buf) return -ENOMEM; - ret = read_blk(info, *blk, buf); + ret = read_blk(info, blks[depth], buf); if (ret < 0) { quota_error(dquot->dq_sb, "Can't read quota data block %u", - *blk); + blks[depth]); goto out_buf; } newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); @@ -532,29 +564,38 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, if (ret) goto out_buf; + for (i = 0; i <= depth; i++) + if (newblk == blks[i]) { + quota_error(dquot->dq_sb, + "Cycle in quota tree detected: block %u index %u", + blks[depth], + get_index(info, dquot->dq_id, depth)); + ret = -EIO; + goto out_buf; + } if (depth == info->dqi_qtree_depth - 1) { ret = free_dqentry(info, dquot, newblk); - newblk = 0; + blks[depth + 1] = 0; } else { - ret = remove_tree(info, dquot, &newblk, depth+1); + blks[depth + 1] = newblk; + ret = remove_tree(info, dquot, blks, depth + 1); } - if (ret >= 0 && !newblk) { - int i; + if (ret >= 0 && !blks[depth + 1]) { ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); /* Block got empty? */ for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++) ; /* Don't put the root block into the free block list */ if (i == (info->dqi_usable_bs >> 2) - && *blk != QT_TREEOFF) { - put_free_dqblk(info, buf, *blk); - *blk = 0; + && blks[depth] != QT_TREEOFF) { + put_free_dqblk(info, buf, blks[depth]); + blks[depth] = 0; } else { - ret = write_blk(info, *blk, buf); + ret = write_blk(info, blks[depth], buf); if (ret < 0) quota_error(dquot->dq_sb, "Can't write quota tree block %u", - *blk); + blks[depth]); } } out_buf: @@ -565,11 +606,15 @@ out_buf: /* Delete dquot from tree */ int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) { - uint tmp = QT_TREEOFF; + uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; if (!dquot->dq_off) /* Even not allocated? */ return 0; - return remove_tree(info, dquot, &tmp, 0); + if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { + quota_error(dquot->dq_sb, "Quota tree depth too big!"); + return -EIO; + } + return remove_tree(info, dquot, blks, 0); } EXPORT_SYMBOL(qtree_delete_dquot); @@ -577,7 +622,7 @@ EXPORT_SYMBOL(qtree_delete_dquot); static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint blk) { - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); loff_t ret = 0; int i; char *ddquot; @@ -613,18 +658,20 @@ out_buf: /* Find entry for given id in the tree */ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, - struct dquot *dquot, uint blk, int depth) + struct dquot *dquot, uint *blks, int depth) { - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); loff_t ret = 0; __le32 *ref = (__le32 *)buf; + uint blk; + int i; if (!buf) return -ENOMEM; - ret = read_blk(info, blk, buf); + ret = read_blk(info, blks[depth], buf); if (ret < 0) { quota_error(dquot->dq_sb, "Can't read quota tree block %u", - blk); + blks[depth]); goto out_buf; } ret = 0; @@ -636,8 +683,19 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, if (ret) goto out_buf; + /* Check for cycles in the tree */ + for (i = 0; i <= depth; i++) + if (blk == blks[i]) { + quota_error(dquot->dq_sb, + "Cycle in quota tree detected: block %u index %u", + blks[depth], + get_index(info, dquot->dq_id, depth)); + ret = -EIO; + goto out_buf; + } + blks[depth + 1] = blk; if (depth < info->dqi_qtree_depth - 1) - ret = find_tree_dqentry(info, dquot, blk, depth+1); + ret = find_tree_dqentry(info, dquot, blks, depth + 1); else ret = find_block_dqentry(info, dquot, blk); out_buf: @@ -649,7 +707,13 @@ out_buf: static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot) { - return find_tree_dqentry(info, dquot, QT_TREEOFF, 0); + uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; + + if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { + quota_error(dquot->dq_sb, "Quota tree depth too big!"); + return -EIO; + } + return find_tree_dqentry(info, dquot, blks, 0); } int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) @@ -684,7 +748,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) } dquot->dq_off = offset; } - ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS); + ddquot = kmalloc(info->dqi_entry_size, GFP_KERNEL); if (!ddquot) return -ENOMEM; ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size, @@ -728,7 +792,7 @@ EXPORT_SYMBOL(qtree_release_dquot); static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id, unsigned int blk, int depth) { - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); __le32 *ref = (__le32 *)buf; ssize_t ret; unsigned int epb = info->dqi_usable_bs >> 2; diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c index a0db3f195e95..6f7f0b4afba9 100644 --- a/fs/quota/quota_v1.c +++ b/fs/quota/quota_v1.c @@ -160,9 +160,11 @@ static int v1_read_file_info(struct super_block *sb, int type) { struct quota_info *dqopt = sb_dqopt(sb); struct v1_disk_dqblk dqblk; + unsigned int memalloc; int ret; down_read(&dqopt->dqio_sem); + memalloc = memalloc_nofs_save(); ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0)); if (ret != sizeof(struct v1_disk_dqblk)) { @@ -179,6 +181,7 @@ static int v1_read_file_info(struct super_block *sb, int type) dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; out: + memalloc_nofs_restore(memalloc); up_read(&dqopt->dqio_sem); return ret; } @@ -187,9 +190,11 @@ static int v1_write_file_info(struct super_block *sb, int type) { struct quota_info *dqopt = sb_dqopt(sb); struct v1_disk_dqblk dqblk; + unsigned int memalloc; int ret; down_write(&dqopt->dqio_sem); + memalloc = memalloc_nofs_save(); ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0)); if (ret != sizeof(struct v1_disk_dqblk)) { @@ -209,6 +214,7 @@ static int v1_write_file_info(struct super_block *sb, int type) else if (ret >= 0) ret = -EIO; out: + memalloc_nofs_restore(memalloc); up_write(&dqopt->dqio_sem); return ret; } @@ -229,7 +235,8 @@ static struct quota_format_type v1_quota_format = { static int __init init_v1_quota_format(void) { - return register_quota_format(&v1_quota_format); + register_quota_format(&v1_quota_format); + return 0; } static void __exit exit_v1_quota_format(void) diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index ae99e7b88205..1fda93dcbc1b 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c @@ -96,9 +96,11 @@ static int v2_read_file_info(struct super_block *sb, int type) struct qtree_mem_dqinfo *qinfo; ssize_t size; unsigned int version; + unsigned int memalloc; int ret; down_read(&dqopt->dqio_sem); + memalloc = memalloc_nofs_save(); ret = v2_read_header(sb, type, &dqhead); if (ret < 0) goto out; @@ -119,7 +121,7 @@ static int v2_read_file_info(struct super_block *sb, int type) ret = -EIO; goto out; } - info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS); + info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_KERNEL); if (!info->dqi_priv) { ret = -ENOMEM; goto out; @@ -166,14 +168,17 @@ static int v2_read_file_info(struct super_block *sb, int type) i_size_read(sb_dqopt(sb)->files[type])); goto out_free; } - if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) { - quota_error(sb, "Free block number too big (%u >= %u).", - qinfo->dqi_free_blk, qinfo->dqi_blocks); + if (qinfo->dqi_free_blk && (qinfo->dqi_free_blk <= QT_TREEOFF || + qinfo->dqi_free_blk >= qinfo->dqi_blocks)) { + quota_error(sb, "Free block number %u out of range (%u, %u).", + qinfo->dqi_free_blk, QT_TREEOFF, qinfo->dqi_blocks); goto out_free; } - if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) { - quota_error(sb, "Block with free entry too big (%u >= %u).", - qinfo->dqi_free_entry, qinfo->dqi_blocks); + if (qinfo->dqi_free_entry && (qinfo->dqi_free_entry <= QT_TREEOFF || + qinfo->dqi_free_entry >= qinfo->dqi_blocks)) { + quota_error(sb, "Block with free entry %u out of range (%u, %u).", + qinfo->dqi_free_entry, QT_TREEOFF, + qinfo->dqi_blocks); goto out_free; } ret = 0; @@ -183,6 +188,7 @@ out_free: info->dqi_priv = NULL; } out: + memalloc_nofs_restore(memalloc); up_read(&dqopt->dqio_sem); return ret; } @@ -195,8 +201,10 @@ static int v2_write_file_info(struct super_block *sb, int type) struct mem_dqinfo *info = &dqopt->info[type]; struct qtree_mem_dqinfo *qinfo = info->dqi_priv; ssize_t size; + unsigned int memalloc; down_write(&dqopt->dqio_sem); + memalloc = memalloc_nofs_save(); spin_lock(&dq_data_lock); info->dqi_flags &= ~DQF_INFO_DIRTY; dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace); @@ -209,6 +217,7 @@ static int v2_write_file_info(struct super_block *sb, int type) dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry); size = sb->s_op->quota_write(sb, type, (char *)&dinfo, sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); + memalloc_nofs_restore(memalloc); up_write(&dqopt->dqio_sem); if (size != sizeof(struct v2_disk_dqinfo)) { quota_error(sb, "Can't write info structure"); @@ -328,11 +337,14 @@ static int v2_read_dquot(struct dquot *dquot) { struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); int ret; + unsigned int memalloc; down_read(&dqopt->dqio_sem); + memalloc = memalloc_nofs_save(); ret = qtree_read_dquot( sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot); + memalloc_nofs_restore(memalloc); up_read(&dqopt->dqio_sem); return ret; } @@ -342,6 +354,7 @@ static int v2_write_dquot(struct dquot *dquot) struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); int ret; bool alloc = false; + unsigned int memalloc; /* * If space for dquot is already allocated, we don't need any @@ -355,9 +368,11 @@ static int v2_write_dquot(struct dquot *dquot) } else { down_read(&dqopt->dqio_sem); } + memalloc = memalloc_nofs_save(); ret = qtree_write_dquot( sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot); + memalloc_nofs_restore(memalloc); if (alloc) up_write(&dqopt->dqio_sem); else @@ -368,10 +383,13 @@ static int v2_write_dquot(struct dquot *dquot) static int v2_release_dquot(struct dquot *dquot) { struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); + unsigned int memalloc; int ret; down_write(&dqopt->dqio_sem); + memalloc = memalloc_nofs_save(); ret = qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot); + memalloc_nofs_restore(memalloc); up_write(&dqopt->dqio_sem); return ret; @@ -386,10 +404,13 @@ static int v2_free_file_info(struct super_block *sb, int type) static int v2_get_next_id(struct super_block *sb, struct kqid *qid) { struct quota_info *dqopt = sb_dqopt(sb); + unsigned int memalloc; int ret; down_read(&dqopt->dqio_sem); + memalloc = memalloc_nofs_save(); ret = qtree_get_next_id(sb_dqinfo(sb, qid->type)->dqi_priv, qid); + memalloc_nofs_restore(memalloc); up_read(&dqopt->dqio_sem); return ret; } @@ -419,12 +440,9 @@ static struct quota_format_type v2r1_quota_format = { static int __init init_v2_quota_format(void) { - int ret; - - ret = register_quota_format(&v2r0_quota_format); - if (ret) - return ret; - return register_quota_format(&v2r1_quota_format); + register_quota_format(&v2r0_quota_format); + register_quota_format(&v2r1_quota_format); + return 0; } static void __exit exit_v2_quota_format(void) |