summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_qm.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_qm.c')
-rw-r--r--fs/xfs/xfs_qm.c1324
1 files changed, 750 insertions, 574 deletions
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 52ed7904df10..95be67ac6eb4 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -13,19 +13,23 @@
#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
-#include "xfs_ialloc.h"
-#include "xfs_itable.h"
+#include "xfs_iwalk.h"
#include "xfs_quota.h"
-#include "xfs_error.h"
#include "xfs_bmap.h"
-#include "xfs_bmap_btree.h"
#include "xfs_bmap_util.h"
#include "xfs_trans.h"
#include "xfs_trans_space.h"
#include "xfs_qm.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
-#include "xfs_cksum.h"
+#include "xfs_error.h"
+#include "xfs_ag.h"
+#include "xfs_ialloc.h"
+#include "xfs_log_priv.h"
+#include "xfs_health.h"
+#include "xfs_da_format.h"
+#include "xfs_metafile.h"
+#include "xfs_rtgroup.h"
/*
* The global quota manager. There is only one of these for the entire
@@ -33,10 +37,9 @@
* quota functionality, including maintaining the freelist and hash
* tables of dquots.
*/
-STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
-STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
+STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
+STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
-STATIC void xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
/*
* We use the batch lookup interface to iterate over the dquots as it
@@ -50,7 +53,7 @@ STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
STATIC int
xfs_qm_dquot_walk(
struct xfs_mount *mp,
- int type,
+ xfs_dqtype_t type,
int (*execute)(struct xfs_dquot *dqp, void *data),
void *data)
{
@@ -68,7 +71,7 @@ restart:
while (1) {
struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
- int error = 0;
+ int error;
int i;
mutex_lock(&qi->qi_tree_lock);
@@ -82,7 +85,7 @@ restart:
for (i = 0; i < nr_found; i++) {
struct xfs_dquot *dqp = batch[i];
- next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
+ next_index = dqp->q_id + 1;
error = execute(batch[i], data);
if (error == -EAGAIN) {
@@ -122,17 +125,18 @@ xfs_qm_dqpurge(
struct xfs_dquot *dqp,
void *data)
{
- struct xfs_mount *mp = dqp->q_mount;
- struct xfs_quotainfo *qi = mp->m_quotainfo;
+ struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
- xfs_dqlock(dqp);
- if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
- xfs_dqunlock(dqp);
+ spin_lock(&dqp->q_lockref.lock);
+ if (dqp->q_lockref.count > 0 || __lockref_is_dead(&dqp->q_lockref)) {
+ spin_unlock(&dqp->q_lockref.lock);
return -EAGAIN;
}
+ lockref_mark_dead(&dqp->q_lockref);
+ spin_unlock(&dqp->q_lockref.lock);
- dqp->dq_flags |= XFS_DQ_FREEING;
-
+ mutex_lock(&dqp->q_qlock);
+ xfs_qm_dqunpin_wait(dqp);
xfs_dqflock(dqp);
/*
@@ -148,23 +152,37 @@ xfs_qm_dqpurge(
* We don't care about getting disk errors here. We need
* to purge this dquot anyway, so we go ahead regardless.
*/
- error = xfs_qm_dqflush(dqp, &bp);
+ error = xfs_dquot_use_attached_buf(dqp, &bp);
+ if (error == -EAGAIN) {
+ /* resurrect the refcount from the dead. */
+ dqp->q_lockref.count = 0;
+ goto out_funlock;
+ }
+ if (!bp)
+ goto out_funlock;
+
+ /*
+ * dqflush completes dqflock on error, and the bwrite ioend
+ * does it on success.
+ */
+ error = xfs_qm_dqflush(dqp, bp);
if (!error) {
error = xfs_bwrite(bp);
xfs_buf_relse(bp);
}
xfs_dqflock(dqp);
}
+ xfs_dquot_detach_buf(dqp);
+out_funlock:
ASSERT(atomic_read(&dqp->q_pincount) == 0);
- ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
+ ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
- radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
- be32_to_cpu(dqp->q_core.d_id));
+ radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
qi->qi_dquots--;
/*
@@ -172,8 +190,8 @@ xfs_qm_dqpurge(
* hits zero, so it really should be on the freelist here.
*/
ASSERT(!list_empty(&dqp->q_lru));
- list_lru_del(&qi->qi_lru, &dqp->q_lru);
- XFS_STATS_DEC(mp, xs_qm_dquot_unused);
+ list_lru_del_obj(&qi->qi_lru, &dqp->q_lru);
+ XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
xfs_qm_dqdestroy(dqp);
return 0;
@@ -182,17 +200,13 @@ xfs_qm_dqpurge(
/*
* Purge the dquot cache.
*/
-void
+static void
xfs_qm_dqpurge_all(
- struct xfs_mount *mp,
- uint flags)
+ struct xfs_mount *mp)
{
- if (flags & XFS_QMOPT_UQUOTA)
- xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
- if (flags & XFS_QMOPT_GQUOTA)
- xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
- if (flags & XFS_QMOPT_PQUOTA)
- xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
+ xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
+ xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
+ xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
}
/*
@@ -203,11 +217,48 @@ xfs_qm_unmount(
struct xfs_mount *mp)
{
if (mp->m_quotainfo) {
- xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
+ xfs_qm_dqpurge_all(mp);
xfs_qm_destroy_quotainfo(mp);
}
}
+static void
+xfs_qm_unmount_rt(
+ struct xfs_mount *mp)
+{
+ struct xfs_rtgroup *rtg = xfs_rtgroup_grab(mp, 0);
+
+ if (!rtg)
+ return;
+ if (rtg_bitmap(rtg))
+ xfs_qm_dqdetach(rtg_bitmap(rtg));
+ if (rtg_summary(rtg))
+ xfs_qm_dqdetach(rtg_summary(rtg));
+ xfs_rtgroup_rele(rtg);
+}
+
+STATIC void
+xfs_qm_destroy_quotainos(
+ struct xfs_quotainfo *qi)
+{
+ if (qi->qi_uquotaip) {
+ xfs_irele(qi->qi_uquotaip);
+ qi->qi_uquotaip = NULL; /* paranoia */
+ }
+ if (qi->qi_gquotaip) {
+ xfs_irele(qi->qi_gquotaip);
+ qi->qi_gquotaip = NULL;
+ }
+ if (qi->qi_pquotaip) {
+ xfs_irele(qi->qi_pquotaip);
+ qi->qi_pquotaip = NULL;
+ }
+ if (qi->qi_dirip) {
+ xfs_irele(qi->qi_dirip);
+ qi->qi_dirip = NULL;
+ }
+}
+
/*
* Called from the vfsops layer.
*/
@@ -221,74 +272,19 @@ xfs_qm_unmount_quotas(
*/
ASSERT(mp->m_rootip);
xfs_qm_dqdetach(mp->m_rootip);
- if (mp->m_rbmip)
- xfs_qm_dqdetach(mp->m_rbmip);
- if (mp->m_rsumip)
- xfs_qm_dqdetach(mp->m_rsumip);
-
- /*
- * Release the quota inodes.
- */
- if (mp->m_quotainfo) {
- if (mp->m_quotainfo->qi_uquotaip) {
- xfs_irele(mp->m_quotainfo->qi_uquotaip);
- mp->m_quotainfo->qi_uquotaip = NULL;
- }
- if (mp->m_quotainfo->qi_gquotaip) {
- xfs_irele(mp->m_quotainfo->qi_gquotaip);
- mp->m_quotainfo->qi_gquotaip = NULL;
- }
- if (mp->m_quotainfo->qi_pquotaip) {
- xfs_irele(mp->m_quotainfo->qi_pquotaip);
- mp->m_quotainfo->qi_pquotaip = NULL;
- }
- }
-}
-
-STATIC int
-xfs_qm_dqattach_one(
- xfs_inode_t *ip,
- xfs_dqid_t id,
- uint type,
- bool doalloc,
- xfs_dquot_t **IO_idqpp)
-{
- xfs_dquot_t *dqp;
- int error;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- error = 0;
/*
- * See if we already have it in the inode itself. IO_idqpp is &i_udquot
- * or &i_gdquot. This made the code look weird, but made the logic a lot
- * simpler.
+ * For pre-RTG file systems, the RT inodes have quotas attached,
+ * detach them now.
*/
- dqp = *IO_idqpp;
- if (dqp) {
- trace_xfs_dqattach_found(dqp);
- return 0;
- }
+ if (!xfs_has_rtgroups(mp))
+ xfs_qm_unmount_rt(mp);
/*
- * Find the dquot from somewhere. This bumps the reference count of
- * dquot and returns it locked. This can return ENOENT if dquot didn't
- * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
- * turned off suddenly.
- */
- error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
- if (error)
- return error;
-
- trace_xfs_dqattach_get(dqp);
-
- /*
- * dqget may have dropped and re-acquired the ilock, but it guarantees
- * that the dquot returned is the one that should go in the inode.
+ * Release the quota inodes.
*/
- *IO_idqpp = dqp;
- xfs_dqunlock(dqp);
- return 0;
+ if (mp->m_quotainfo)
+ xfs_qm_destroy_quotainos(mp->m_quotainfo);
}
static bool
@@ -297,14 +293,14 @@ xfs_qm_need_dqattach(
{
struct xfs_mount *mp = ip->i_mount;
- if (!XFS_IS_QUOTA_RUNNING(mp))
- return false;
if (!XFS_IS_QUOTA_ON(mp))
return false;
if (!XFS_NOT_DQATTACHED(mp, ip))
return false;
if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
return false;
+ if (xfs_is_metadir_inode(ip))
+ return false;
return true;
}
@@ -326,10 +322,11 @@ xfs_qm_dqattach_locked(
if (!xfs_qm_need_dqattach(ip))
return 0;
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
+ ASSERT(!xfs_is_metadir_inode(ip));
if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
- error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
+ error = xfs_qm_dqget_inode(ip, XFS_DQTYPE_USER,
doalloc, &ip->i_udquot);
if (error)
goto done;
@@ -337,7 +334,7 @@ xfs_qm_dqattach_locked(
}
if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
- error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
+ error = xfs_qm_dqget_inode(ip, XFS_DQTYPE_GROUP,
doalloc, &ip->i_gdquot);
if (error)
goto done;
@@ -345,7 +342,7 @@ xfs_qm_dqattach_locked(
}
if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
- error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
+ error = xfs_qm_dqget_inode(ip, XFS_DQTYPE_PROJ,
doalloc, &ip->i_pdquot);
if (error)
goto done;
@@ -357,7 +354,7 @@ done:
* Don't worry about the dquots that we may have attached before any
* error - they'll get detached later if it has not already been done.
*/
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -386,6 +383,8 @@ void
xfs_qm_dqdetach(
xfs_inode_t *ip)
{
+ if (xfs_is_metadir_inode(ip))
+ return;
if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
return;
@@ -415,23 +414,40 @@ static enum lru_status
xfs_qm_dquot_isolate(
struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lru_lock,
void *arg)
- __releases(lru_lock) __acquires(lru_lock)
+ __releases(&lru->lock) __acquires(&lru->lock)
{
struct xfs_dquot *dqp = container_of(item,
struct xfs_dquot, q_lru);
struct xfs_qm_isolate *isol = arg;
+ enum lru_status ret = LRU_SKIP;
- if (!xfs_dqlock_nowait(dqp))
+ if (!spin_trylock(&dqp->q_lockref.lock))
goto out_miss_busy;
/*
+ * If something else is freeing this dquot and hasn't yet removed it
+ * from the LRU, leave it for the freeing task to complete the freeing
+ * process rather than risk it being free from under us here.
+ */
+ if (__lockref_is_dead(&dqp->q_lockref))
+ goto out_miss_unlock;
+
+ /*
+ * If the dquot is pinned or dirty, rotate it to the end of the LRU to
+ * give some time for it to be cleaned before we try to isolate it
+ * again.
+ */
+ ret = LRU_ROTATE;
+ if (XFS_DQ_IS_DIRTY(dqp) || atomic_read(&dqp->q_pincount) > 0)
+ goto out_miss_unlock;
+
+ /*
* This dquot has acquired a reference in the meantime remove it from
* the freelist and try again.
*/
- if (dqp->q_nrefs) {
- xfs_dqunlock(dqp);
+ if (dqp->q_lockref.count) {
+ spin_unlock(&dqp->q_lockref.lock);
XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
trace_xfs_dqreclaim_want(dqp);
@@ -441,58 +457,35 @@ xfs_qm_dquot_isolate(
}
/*
- * If the dquot is dirty, flush it. If it's already being flushed, just
- * skip it so there is time for the IO to complete before we try to
- * reclaim it again on the next LRU pass.
+ * The dquot may still be under IO, in which case the flush lock will be
+ * held. If we can't get the flush lock now, just skip over the dquot as
+ * if it was dirty.
*/
- if (!xfs_dqflock_nowait(dqp)) {
- xfs_dqunlock(dqp);
- goto out_miss_busy;
- }
-
- if (XFS_DQ_IS_DIRTY(dqp)) {
- struct xfs_buf *bp = NULL;
- int error;
-
- trace_xfs_dqreclaim_dirty(dqp);
-
- /* we have to drop the LRU lock to flush the dquot */
- spin_unlock(lru_lock);
-
- error = xfs_qm_dqflush(dqp, &bp);
- if (error)
- goto out_unlock_dirty;
+ if (!xfs_dqflock_nowait(dqp))
+ goto out_miss_unlock;
- xfs_buf_delwri_queue(bp, &isol->buffers);
- xfs_buf_relse(bp);
- goto out_unlock_dirty;
- }
+ ASSERT(!XFS_DQ_IS_DIRTY(dqp));
+ xfs_dquot_detach_buf(dqp);
xfs_dqfunlock(dqp);
/*
* Prevent lookups now that we are past the point of no return.
*/
- dqp->dq_flags |= XFS_DQ_FREEING;
- xfs_dqunlock(dqp);
+ lockref_mark_dead(&dqp->q_lockref);
+ spin_unlock(&dqp->q_lockref.lock);
- ASSERT(dqp->q_nrefs == 0);
list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
trace_xfs_dqreclaim_done(dqp);
XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
return LRU_REMOVED;
+out_miss_unlock:
+ spin_unlock(&dqp->q_lockref.lock);
out_miss_busy:
trace_xfs_dqreclaim_busy(dqp);
XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
- return LRU_SKIP;
-
-out_unlock_dirty:
- trace_xfs_dqreclaim_busy(dqp);
- XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
- xfs_dqunlock(dqp);
- spin_lock(lru_lock);
- return LRU_RETRY;
+ return ret;
}
static unsigned long
@@ -500,8 +493,7 @@ xfs_qm_shrink_scan(
struct shrinker *shrink,
struct shrink_control *sc)
{
- struct xfs_quotainfo *qi = container_of(shrink,
- struct xfs_quotainfo, qi_shrinker);
+ struct xfs_quotainfo *qi = shrink->private_data;
struct xfs_qm_isolate isol;
unsigned long freed;
int error;
@@ -535,40 +527,37 @@ xfs_qm_shrink_count(
struct shrinker *shrink,
struct shrink_control *sc)
{
- struct xfs_quotainfo *qi = container_of(shrink,
- struct xfs_quotainfo, qi_shrinker);
+ struct xfs_quotainfo *qi = shrink->private_data;
return list_lru_shrink_count(&qi->qi_lru, sc);
}
STATIC void
xfs_qm_set_defquota(
- xfs_mount_t *mp,
- uint type,
- xfs_quotainfo_t *qinf)
+ struct xfs_mount *mp,
+ xfs_dqtype_t type,
+ struct xfs_quotainfo *qinf)
{
- xfs_dquot_t *dqp;
- struct xfs_def_quota *defq;
- struct xfs_disk_dquot *ddqp;
+ struct xfs_dquot *dqp;
+ struct xfs_def_quota *defq;
int error;
error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
if (error)
return;
- ddqp = &dqp->q_core;
- defq = xfs_get_defquota(dqp, qinf);
+ defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
/*
* Timers and warnings have been already set, let's just set the
* default limits for this quota type
*/
- defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
- defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
- defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
- defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
- defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
- defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
+ defq->blk.hard = dqp->q_blk.hardlimit;
+ defq->blk.soft = dqp->q_blk.softlimit;
+ defq->ino.hard = dqp->q_ino.hardlimit;
+ defq->ino.soft = dqp->q_ino.softlimit;
+ defq->rtb.hard = dqp->q_rtb.hardlimit;
+ defq->rtb.soft = dqp->q_rtb.softlimit;
xfs_qm_dqdestroy(dqp);
}
@@ -576,19 +565,18 @@ xfs_qm_set_defquota(
static void
xfs_qm_init_timelimits(
struct xfs_mount *mp,
- struct xfs_quotainfo *qinf)
+ xfs_dqtype_t type)
{
- struct xfs_disk_dquot *ddqp;
+ struct xfs_quotainfo *qinf = mp->m_quotainfo;
+ struct xfs_def_quota *defq;
struct xfs_dquot *dqp;
- uint type;
int error;
- qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
- qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
- qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
- qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
- qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
- qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
+ defq = xfs_get_defquota(qinf, type);
+
+ defq->blk.time = XFS_QM_BTIMELIMIT;
+ defq->ino.time = XFS_QM_ITIMELIMIT;
+ defq->rtb.time = XFS_QM_RTBTIMELIMIT;
/*
* We try to get the limits from the superuser's limits fields.
@@ -596,43 +584,180 @@ xfs_qm_init_timelimits(
*
* Since we may not have done a quotacheck by this point, just read
* the dquot without attaching it to any hashtables or lists.
- *
- * Timers and warnings are globally set by the first timer found in
- * user/group/proj quota types, otherwise a default value is used.
- * This should be split into different fields per quota type.
*/
- if (XFS_IS_UQUOTA_RUNNING(mp))
- type = XFS_DQ_USER;
- else if (XFS_IS_GQUOTA_RUNNING(mp))
- type = XFS_DQ_GROUP;
- else
- type = XFS_DQ_PROJ;
error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
if (error)
return;
- ddqp = &dqp->q_core;
/*
* The warnings and timers set the grace period given to
* a user or group before he or she can not perform any
* more writing. If it is zero, a default is used.
*/
- if (ddqp->d_btimer)
- qinf->qi_btimelimit = be32_to_cpu(ddqp->d_btimer);
- if (ddqp->d_itimer)
- qinf->qi_itimelimit = be32_to_cpu(ddqp->d_itimer);
- if (ddqp->d_rtbtimer)
- qinf->qi_rtbtimelimit = be32_to_cpu(ddqp->d_rtbtimer);
- if (ddqp->d_bwarns)
- qinf->qi_bwarnlimit = be16_to_cpu(ddqp->d_bwarns);
- if (ddqp->d_iwarns)
- qinf->qi_iwarnlimit = be16_to_cpu(ddqp->d_iwarns);
- if (ddqp->d_rtbwarns)
- qinf->qi_rtbwarnlimit = be16_to_cpu(ddqp->d_rtbwarns);
+ if (dqp->q_blk.timer)
+ defq->blk.time = dqp->q_blk.timer;
+ if (dqp->q_ino.timer)
+ defq->ino.time = dqp->q_ino.timer;
+ if (dqp->q_rtb.timer)
+ defq->rtb.time = dqp->q_rtb.timer;
xfs_qm_dqdestroy(dqp);
}
+static int
+xfs_qm_load_metadir_qinos(
+ struct xfs_mount *mp,
+ struct xfs_quotainfo *qi)
+{
+ struct xfs_trans *tp;
+ int error;
+
+ tp = xfs_trans_alloc_empty(mp);
+ error = xfs_dqinode_load_parent(tp, &qi->qi_dirip);
+ if (error == -ENOENT) {
+ /* no quota dir directory, but we'll create one later */
+ error = 0;
+ goto out_trans;
+ }
+ if (error)
+ goto out_trans;
+
+ if (XFS_IS_UQUOTA_ON(mp)) {
+ error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_USER,
+ &qi->qi_uquotaip);
+ if (error && error != -ENOENT)
+ goto out_trans;
+ }
+
+ if (XFS_IS_GQUOTA_ON(mp)) {
+ error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_GROUP,
+ &qi->qi_gquotaip);
+ if (error && error != -ENOENT)
+ goto out_trans;
+ }
+
+ if (XFS_IS_PQUOTA_ON(mp)) {
+ error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_PROJ,
+ &qi->qi_pquotaip);
+ if (error && error != -ENOENT)
+ goto out_trans;
+ }
+
+ error = 0;
+out_trans:
+ xfs_trans_cancel(tp);
+ return error;
+}
+
+/* Create quota inodes in the metadata directory tree. */
+STATIC int
+xfs_qm_create_metadir_qinos(
+ struct xfs_mount *mp,
+ struct xfs_quotainfo *qi)
+{
+ int error;
+
+ if (!qi->qi_dirip) {
+ error = xfs_dqinode_mkdir_parent(mp, &qi->qi_dirip);
+ if (error && error != -EEXIST)
+ return error;
+ /*
+ * If the /quotas dirent points to an inode that isn't
+ * loadable, qi_dirip will be NULL but mkdir_parent will return
+ * -EEXIST. In this case the metadir is corrupt, so bail out.
+ */
+ if (XFS_IS_CORRUPT(mp, qi->qi_dirip == NULL))
+ return -EFSCORRUPTED;
+ }
+
+ if (XFS_IS_UQUOTA_ON(mp) && !qi->qi_uquotaip) {
+ error = xfs_dqinode_metadir_create(qi->qi_dirip,
+ XFS_DQTYPE_USER, &qi->qi_uquotaip);
+ if (error)
+ return error;
+ }
+
+ if (XFS_IS_GQUOTA_ON(mp) && !qi->qi_gquotaip) {
+ error = xfs_dqinode_metadir_create(qi->qi_dirip,
+ XFS_DQTYPE_GROUP, &qi->qi_gquotaip);
+ if (error)
+ return error;
+ }
+
+ if (XFS_IS_PQUOTA_ON(mp) && !qi->qi_pquotaip) {
+ error = xfs_dqinode_metadir_create(qi->qi_dirip,
+ XFS_DQTYPE_PROJ, &qi->qi_pquotaip);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/*
+ * Add QUOTABIT to sb_versionnum and initialize qflags in preparation for
+ * creating quota files on a metadir filesystem.
+ */
+STATIC int
+xfs_qm_prep_metadir_sb(
+ struct xfs_mount *mp)
+{
+ struct xfs_trans *tp;
+ int error;
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_sb, 0, 0, 0, &tp);
+ if (error)
+ return error;
+
+ spin_lock(&mp->m_sb_lock);
+
+ xfs_add_quota(mp);
+
+ /* qflags will get updated fully _after_ quotacheck */
+ mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
+
+ spin_unlock(&mp->m_sb_lock);
+ xfs_log_sb(tp);
+
+ return xfs_trans_commit(tp);
+}
+
+/*
+ * Load existing quota inodes or create them. Since this is a V5 filesystem,
+ * we don't have to deal with the grp/prjquota switcheroo thing from V4.
+ */
+STATIC int
+xfs_qm_init_metadir_qinos(
+ struct xfs_mount *mp)
+{
+ struct xfs_quotainfo *qi = mp->m_quotainfo;
+ int error;
+
+ if (!xfs_has_quota(mp)) {
+ error = xfs_qm_prep_metadir_sb(mp);
+ if (error)
+ return error;
+ }
+
+ error = xfs_qm_load_metadir_qinos(mp, qi);
+ if (error)
+ goto out_err;
+
+ error = xfs_qm_create_metadir_qinos(mp, qi);
+ if (error)
+ goto out_err;
+
+ /* The only user of the quota dir inode is online fsck */
+#if !IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB)
+ xfs_irele(qi->qi_dirip);
+ qi->qi_dirip = NULL;
+#endif
+ return 0;
+out_err:
+ xfs_qm_destroy_quotainos(mp->m_quotainfo);
+ return error;
+}
+
/*
* This initializes all the quota information that's kept in the
* mount structure
@@ -644,9 +769,10 @@ xfs_qm_init_quotainfo(
struct xfs_quotainfo *qinf;
int error;
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
+ ASSERT(XFS_IS_QUOTA_ON(mp));
- qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
+ qinf = mp->m_quotainfo = kzalloc(sizeof(struct xfs_quotainfo),
+ GFP_KERNEL | __GFP_NOFAIL);
error = list_lru_init(&qinf->qi_lru);
if (error)
@@ -656,13 +782,16 @@ xfs_qm_init_quotainfo(
* See if quotainodes are setup, and if not, allocate them,
* and change the superblock accordingly.
*/
- error = xfs_qm_init_quotainos(mp);
+ if (xfs_has_metadir(mp))
+ error = xfs_qm_init_metadir_qinos(mp);
+ else
+ error = xfs_qm_init_quotainos(mp);
if (error)
goto out_free_lru;
- INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
- INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
- INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
+ INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_KERNEL);
+ INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_KERNEL);
+ INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_KERNEL);
mutex_init(&qinf->qi_tree_lock);
/* mutex used to serialize quotaoffs */
@@ -671,26 +800,46 @@ xfs_qm_init_quotainfo(
/* Precalc some constants */
qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
+ if (xfs_has_bigtime(mp)) {
+ qinf->qi_expiry_min =
+ xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
+ qinf->qi_expiry_max =
+ xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
+ } else {
+ qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
+ qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
+ }
+ trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
+ qinf->qi_expiry_max);
mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
- xfs_qm_init_timelimits(mp, qinf);
+ xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
+ xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
+ xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
+
+ if (XFS_IS_UQUOTA_ON(mp))
+ xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
+ if (XFS_IS_GQUOTA_ON(mp))
+ xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
+ if (XFS_IS_PQUOTA_ON(mp))
+ xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
+
+ qinf->qi_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-qm:%s",
+ mp->m_super->s_id);
+ if (!qinf->qi_shrinker) {
+ error = -ENOMEM;
+ goto out_free_inos;
+ }
- if (XFS_IS_UQUOTA_RUNNING(mp))
- xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
- if (XFS_IS_GQUOTA_RUNNING(mp))
- xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
- if (XFS_IS_PQUOTA_RUNNING(mp))
- xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
+ qinf->qi_shrinker->count_objects = xfs_qm_shrink_count;
+ qinf->qi_shrinker->scan_objects = xfs_qm_shrink_scan;
+ qinf->qi_shrinker->private_data = qinf;
- qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
- qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
- qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
- qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
+ shrinker_register(qinf->qi_shrinker);
- error = register_shrinker(&qinf->qi_shrinker);
- if (error)
- goto out_free_inos;
+ xfs_hooks_init(&qinf->qi_mod_ino_dqtrx_hooks);
+ xfs_hooks_init(&qinf->qi_apply_dqtrx_hooks);
return 0;
@@ -701,7 +850,7 @@ out_free_inos:
out_free_lru:
list_lru_destroy(&qinf->qi_lru);
out_free_qinf:
- kmem_free(qinf);
+ kfree(qinf);
mp->m_quotainfo = NULL;
return error;
}
@@ -713,37 +862,49 @@ out_free_qinf:
*/
void
xfs_qm_destroy_quotainfo(
- xfs_mount_t *mp)
+ struct xfs_mount *mp)
{
- xfs_quotainfo_t *qi;
+ struct xfs_quotainfo *qi;
qi = mp->m_quotainfo;
ASSERT(qi != NULL);
- unregister_shrinker(&qi->qi_shrinker);
+ shrinker_free(qi->qi_shrinker);
list_lru_destroy(&qi->qi_lru);
xfs_qm_destroy_quotainos(qi);
mutex_destroy(&qi->qi_tree_lock);
mutex_destroy(&qi->qi_quotaofflock);
- kmem_free(qi);
+ kfree(qi);
mp->m_quotainfo = NULL;
}
+static inline enum xfs_metafile_type
+xfs_qm_metafile_type(
+ unsigned int flags)
+{
+ if (flags & XFS_QMOPT_UQUOTA)
+ return XFS_METAFILE_USRQUOTA;
+ else if (flags & XFS_QMOPT_GQUOTA)
+ return XFS_METAFILE_GRPQUOTA;
+ return XFS_METAFILE_PRJQUOTA;
+}
+
/*
* Create an inode and return with a reference already taken, but unlocked
* This is how we create quota inodes
*/
STATIC int
xfs_qm_qino_alloc(
- xfs_mount_t *mp,
- xfs_inode_t **ip,
- uint flags)
+ struct xfs_mount *mp,
+ struct xfs_inode **ipp,
+ unsigned int flags)
{
- xfs_trans_t *tp;
- int error;
- bool need_alloc = true;
+ struct xfs_trans *tp;
+ enum xfs_metafile_type metafile_type = xfs_qm_metafile_type(flags);
+ int error;
+ bool need_alloc = true;
- *ip = NULL;
+ *ipp = NULL;
/*
* With superblock that doesn't have separate pquotino, we
* share an inode between gquota and pquota. If the on-disk
@@ -751,23 +912,32 @@ xfs_qm_qino_alloc(
* with PQUOTA, just use sb_gquotino for sb_pquotino and
* vice-versa.
*/
- if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
+ if (!xfs_has_pquotino(mp) &&
(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
xfs_ino_t ino = NULLFSINO;
if ((flags & XFS_QMOPT_PQUOTA) &&
(mp->m_sb.sb_gquotino != NULLFSINO)) {
ino = mp->m_sb.sb_gquotino;
- ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
+ if (XFS_IS_CORRUPT(mp,
+ mp->m_sb.sb_pquotino != NULLFSINO)) {
+ xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA);
+ return -EFSCORRUPTED;
+ }
} else if ((flags & XFS_QMOPT_GQUOTA) &&
(mp->m_sb.sb_pquotino != NULLFSINO)) {
ino = mp->m_sb.sb_pquotino;
- ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
+ if (XFS_IS_CORRUPT(mp,
+ mp->m_sb.sb_gquotino != NULLFSINO)) {
+ xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA);
+ return -EFSCORRUPTED;
+ }
}
if (ino != NULLFSINO) {
- error = xfs_iget(mp, NULL, ino, 0, 0, ip);
+ error = xfs_metafile_iget(mp, ino, metafile_type, ipp);
if (error)
return error;
+
mp->m_sb.sb_gquotino = NULLFSINO;
mp->m_sb.sb_pquotino = NULLFSINO;
need_alloc = false;
@@ -775,16 +945,27 @@ xfs_qm_qino_alloc(
}
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
- XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp);
+ need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
+ 0, 0, &tp);
if (error)
return error;
if (need_alloc) {
- error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip);
+ struct xfs_icreate_args args = {
+ .mode = S_IFREG,
+ .flags = XFS_ICREATE_UNLINKABLE,
+ };
+ xfs_ino_t ino;
+
+ error = xfs_dialloc(&tp, &args, &ino);
+ if (!error)
+ error = xfs_icreate(tp, ino, &args, ipp);
if (error) {
xfs_trans_cancel(tp);
return error;
}
+ if (xfs_has_metadir(mp))
+ xfs_metafile_set_iflag(tp, *ipp, metafile_type);
}
/*
@@ -794,9 +975,9 @@ xfs_qm_qino_alloc(
*/
spin_lock(&mp->m_sb_lock);
if (flags & XFS_QMOPT_SBVERSION) {
- ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
+ ASSERT(!xfs_has_quota(mp));
- xfs_sb_version_addquota(&mp->m_sb);
+ xfs_add_quota(mp);
mp->m_sb.sb_uquotino = NULLFSINO;
mp->m_sb.sb_gquotino = NULLFSINO;
mp->m_sb.sb_pquotino = NULLFSINO;
@@ -805,35 +986,36 @@ xfs_qm_qino_alloc(
mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
}
if (flags & XFS_QMOPT_UQUOTA)
- mp->m_sb.sb_uquotino = (*ip)->i_ino;
+ mp->m_sb.sb_uquotino = (*ipp)->i_ino;
else if (flags & XFS_QMOPT_GQUOTA)
- mp->m_sb.sb_gquotino = (*ip)->i_ino;
+ mp->m_sb.sb_gquotino = (*ipp)->i_ino;
else
- mp->m_sb.sb_pquotino = (*ip)->i_ino;
+ mp->m_sb.sb_pquotino = (*ipp)->i_ino;
spin_unlock(&mp->m_sb_lock);
xfs_log_sb(tp);
error = xfs_trans_commit(tp);
if (error) {
- ASSERT(XFS_FORCED_SHUTDOWN(mp));
+ ASSERT(xfs_is_shutdown(mp));
xfs_alert(mp, "%s failed (error %d)!", __func__, error);
}
- if (need_alloc)
- xfs_finish_inode_setup(*ip);
+ if (need_alloc) {
+ xfs_iunlock(*ipp, XFS_ILOCK_EXCL);
+ xfs_finish_inode_setup(*ipp);
+ }
return error;
}
STATIC void
xfs_qm_reset_dqcounts(
- xfs_mount_t *mp,
- xfs_buf_t *bp,
- xfs_dqid_t id,
- uint type)
+ struct xfs_mount *mp,
+ struct xfs_buf *bp,
+ xfs_dqid_t id,
+ xfs_dqtype_t type)
{
struct xfs_dqblk *dqb;
int j;
- xfs_failaddr_t fa;
trace_xfs_reset_dqcounts(bp, _RET_IP_);
@@ -843,7 +1025,7 @@ xfs_qm_reset_dqcounts(
*/
#ifdef DEBUG
j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
- sizeof(xfs_dqblk_t);
+ sizeof(struct xfs_dqblk);
ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
#endif
dqb = bp->b_addr;
@@ -858,26 +1040,36 @@ xfs_qm_reset_dqcounts(
* find uninitialised dquot blks. See comment in
* xfs_dquot_verify.
*/
- fa = xfs_dqblk_verify(mp, &dqb[j], id + j, type);
- if (fa)
+ if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
+ (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
xfs_dqblk_repair(mp, &dqb[j], id + j, type);
/*
* Reset type in case we are reusing group quota file for
* project quotas or vice versa
*/
- ddq->d_flags = type;
+ ddq->d_type = type;
ddq->d_bcount = 0;
ddq->d_icount = 0;
ddq->d_rtbcount = 0;
- ddq->d_btimer = 0;
- ddq->d_itimer = 0;
- ddq->d_rtbtimer = 0;
- ddq->d_bwarns = 0;
- ddq->d_iwarns = 0;
- ddq->d_rtbwarns = 0;
-
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+
+ /*
+ * dquot id 0 stores the default grace period and the maximum
+ * warning limit that were set by the administrator, so we
+ * should not reset them.
+ */
+ if (ddq->d_id != 0) {
+ ddq->d_btimer = 0;
+ ddq->d_itimer = 0;
+ ddq->d_rtbtimer = 0;
+ ddq->d_bwarns = 0;
+ ddq->d_iwarns = 0;
+ ddq->d_rtbwarns = 0;
+ if (xfs_has_bigtime(mp))
+ ddq->d_type |= XFS_DQTYPE_BIGTIME;
+ }
+
+ if (xfs_has_crc(mp)) {
xfs_update_cksum((char *)&dqb[j],
sizeof(struct xfs_dqblk),
XFS_DQUOT_CRC_OFF);
@@ -891,17 +1083,13 @@ xfs_qm_reset_dqcounts_all(
xfs_dqid_t firstid,
xfs_fsblock_t bno,
xfs_filblks_t blkcnt,
- uint flags,
+ xfs_dqtype_t type,
struct list_head *buffer_list)
{
struct xfs_buf *bp;
- int error;
- int type;
+ int error = 0;
ASSERT(blkcnt > 0);
- type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
- (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
- error = 0;
/*
* Blkcnt arg can be a very big number, and might even be
@@ -961,7 +1149,7 @@ STATIC int
xfs_qm_reset_dqcounts_buf(
struct xfs_mount *mp,
struct xfs_inode *qip,
- uint flags,
+ xfs_dqtype_t type,
struct list_head *buffer_list)
{
struct xfs_bmbt_irec *map;
@@ -979,10 +1167,11 @@ xfs_qm_reset_dqcounts_buf(
* trans_reserve. But, this gets called during quotacheck, and that
* happens only at mount time which is single threaded.
*/
- if (qip->i_d.di_nblocks == 0)
+ if (qip->i_nblocks == 0)
return 0;
- map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
+ map = kmalloc(XFS_DQITER_MAP_SIZE * sizeof(*map),
+ GFP_KERNEL | __GFP_NOFAIL);
lblkno = 0;
maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
@@ -1037,14 +1226,14 @@ xfs_qm_reset_dqcounts_buf(
error = xfs_qm_reset_dqcounts_all(mp, firstid,
map[i].br_startblock,
map[i].br_blockcount,
- flags, buffer_list);
+ type, buffer_list);
if (error)
goto out;
}
} while (nmaps > 0);
out:
- kmem_free(map);
+ kfree(map);
return error;
}
@@ -1059,7 +1248,7 @@ out:
STATIC int
xfs_qm_quotacheck_dqadjust(
struct xfs_inode *ip,
- uint type,
+ xfs_dqtype_t type,
xfs_qcnt_t nblks,
xfs_qcnt_t rtblks)
{
@@ -1079,21 +1268,26 @@ xfs_qm_quotacheck_dqadjust(
return error;
}
+ mutex_lock(&dqp->q_qlock);
+ error = xfs_dquot_attach_buf(NULL, dqp);
+ if (error)
+ goto out_unlock;
+
trace_xfs_dqadjust(dqp);
/*
* Adjust the inode count and the block count to reflect this inode's
* resource usage.
*/
- be64_add_cpu(&dqp->q_core.d_icount, 1);
- dqp->q_res_icount++;
+ dqp->q_ino.count++;
+ dqp->q_ino.reserved++;
if (nblks) {
- be64_add_cpu(&dqp->q_core.d_bcount, nblks);
- dqp->q_res_bcount += nblks;
+ dqp->q_blk.count += nblks;
+ dqp->q_blk.reserved += nblks;
}
if (rtblks) {
- be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
- dqp->q_res_rtbcount += rtblks;
+ dqp->q_rtb.count += rtblks;
+ dqp->q_rtb.reserved += rtblks;
}
/*
@@ -1101,14 +1295,16 @@ xfs_qm_quotacheck_dqadjust(
*
* There are no timers for the default values set in the root dquot.
*/
- if (dqp->q_core.d_id) {
- xfs_qm_adjust_dqlimits(mp, dqp);
- xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
+ if (dqp->q_id) {
+ xfs_qm_adjust_dqlimits(dqp);
+ xfs_qm_adjust_dqtimers(dqp);
}
- dqp->dq_flags |= XFS_DQ_DIRTY;
- xfs_qm_dqput(dqp);
- return 0;
+ dqp->q_flags |= XFS_DQFLAG_DIRTY;
+out_unlock:
+ mutex_unlock(&dqp->q_qlock);
+ xfs_qm_dqrele(dqp);
+ return error;
}
/*
@@ -1118,54 +1314,65 @@ xfs_qm_quotacheck_dqadjust(
/* ARGSUSED */
STATIC int
xfs_qm_dqusage_adjust(
- xfs_mount_t *mp, /* mount point for filesystem */
- xfs_ino_t ino, /* inode number to get data for */
- void __user *buffer, /* not used */
- int ubsize, /* not used */
- int *ubused, /* not used */
- int *res) /* result code value */
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ xfs_ino_t ino,
+ void *data)
{
- xfs_inode_t *ip;
- xfs_qcnt_t nblks;
- xfs_filblks_t rtblks = 0; /* total rt blks */
- int error;
+ struct xfs_inode *ip;
+ xfs_filblks_t nblks, rtblks;
+ unsigned int lock_mode;
+ int error;
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
+ ASSERT(XFS_IS_QUOTA_ON(mp));
/*
* rootino must have its resources accounted for, not so with the quota
* inodes.
*/
- if (xfs_is_quota_inode(&mp->m_sb, ino)) {
- *res = BULKSTAT_RV_NOTHING;
- return -EINVAL;
- }
+ if (xfs_is_quota_inode(&mp->m_sb, ino))
+ return 0;
/*
* We don't _need_ to take the ilock EXCL here because quotacheck runs
* at mount time and therefore nobody will be racing chown/chproj.
*/
- error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, 0, &ip);
- if (error) {
- *res = BULKSTAT_RV_NOTHING;
+ error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
+ if (error == -EINVAL || error == -ENOENT)
+ return 0;
+ if (error)
return error;
+
+ /*
+ * Reload the incore unlinked list to avoid failure in inodegc.
+ * Use an unlocked check here because unrecovered unlinked inodes
+ * should be somewhat rare.
+ */
+ if (xfs_inode_unlinked_incomplete(ip)) {
+ error = xfs_inode_reload_unlinked(ip);
+ if (error) {
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ goto error0;
+ }
}
+ /* Metadata directory files are not accounted to user-visible quotas. */
+ if (xfs_is_metadir_inode(ip))
+ goto error0;
+
ASSERT(ip->i_delayed_blks == 0);
+ lock_mode = xfs_ilock_data_map_shared(ip);
if (XFS_IS_REALTIME_INODE(ip)) {
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
-
- if (!(ifp->if_flags & XFS_IFEXTENTS)) {
- error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
- if (error)
- goto error0;
+ error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
+ if (error) {
+ xfs_iunlock(ip, lock_mode);
+ goto error0;
}
-
- xfs_bmap_count_leaves(ifp, &rtblks);
}
-
- nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
+ xfs_inode_count_blocks(tp, ip, &nblks, &rtblks);
+ xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED);
+ xfs_iunlock(ip, lock_mode);
/*
* Add the (disk blocks and inode) resources occupied by this
@@ -1180,33 +1387,28 @@ xfs_qm_dqusage_adjust(
* and quotaoffs don't race. (Quotachecks happen at mount time only).
*/
if (XFS_IS_UQUOTA_ON(mp)) {
- error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_USER, nblks,
+ error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
rtblks);
if (error)
goto error0;
}
if (XFS_IS_GQUOTA_ON(mp)) {
- error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_GROUP, nblks,
+ error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
rtblks);
if (error)
goto error0;
}
if (XFS_IS_PQUOTA_ON(mp)) {
- error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_PROJ, nblks,
+ error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
rtblks);
if (error)
goto error0;
}
- xfs_irele(ip);
- *res = BULKSTAT_RV_DIDONE;
- return 0;
-
error0:
xfs_irele(ip);
- *res = BULKSTAT_RV_GIVEUP;
return error;
}
@@ -1215,50 +1417,35 @@ xfs_qm_flush_one(
struct xfs_dquot *dqp,
void *data)
{
- struct xfs_mount *mp = dqp->q_mount;
struct list_head *buffer_list = data;
struct xfs_buf *bp = NULL;
int error = 0;
- xfs_dqlock(dqp);
- if (dqp->dq_flags & XFS_DQ_FREEING)
- goto out_unlock;
+ if (!lockref_get_not_dead(&dqp->q_lockref))
+ return 0;
+
+ mutex_lock(&dqp->q_qlock);
if (!XFS_DQ_IS_DIRTY(dqp))
goto out_unlock;
- /*
- * The only way the dquot is already flush locked by the time quotacheck
- * gets here is if reclaim flushed it before the dqadjust walk dirtied
- * it for the final time. Quotacheck collects all dquot bufs in the
- * local delwri queue before dquots are dirtied, so reclaim can't have
- * possibly queued it for I/O. The only way out is to push the buffer to
- * cycle the flush lock.
- */
- if (!xfs_dqflock_nowait(dqp)) {
- /* buf is pinned in-core by delwri list */
- bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
- mp->m_quotainfo->qi_dqchunklen, 0);
- if (!bp) {
- error = -EINVAL;
- goto out_unlock;
- }
- xfs_buf_unlock(bp);
-
- xfs_buf_delwri_pushbuf(bp, buffer_list);
- xfs_buf_rele(bp);
-
- error = -EAGAIN;
- goto out_unlock;
- }
+ xfs_qm_dqunpin_wait(dqp);
+ xfs_dqflock(dqp);
- error = xfs_qm_dqflush(dqp, &bp);
+ error = xfs_dquot_use_attached_buf(dqp, &bp);
if (error)
goto out_unlock;
+ if (!bp) {
+ error = -EFSCORRUPTED;
+ goto out_unlock;
+ }
- xfs_buf_delwri_queue(bp, buffer_list);
+ error = xfs_qm_dqflush(dqp, bp);
+ if (!error)
+ xfs_buf_delwri_queue(bp, buffer_list);
xfs_buf_relse(bp);
out_unlock:
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
+ xfs_qm_dqrele(dqp);
return error;
}
@@ -1270,22 +1457,17 @@ STATIC int
xfs_qm_quotacheck(
xfs_mount_t *mp)
{
- int done, count, error, error2;
- xfs_ino_t lastino;
- size_t structsz;
+ int error, error2;
uint flags;
LIST_HEAD (buffer_list);
struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
- count = INT_MAX;
- structsz = 1;
- lastino = 0;
flags = 0;
ASSERT(uip || gip || pip);
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
+ ASSERT(XFS_IS_QUOTA_ON(mp));
xfs_notice(mp, "Quotacheck needed: Please wait.");
@@ -1295,7 +1477,7 @@ xfs_qm_quotacheck(
* We don't log our changes till later.
*/
if (uip) {
- error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_QMOPT_UQUOTA,
+ error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
&buffer_list);
if (error)
goto error_return;
@@ -1303,7 +1485,7 @@ xfs_qm_quotacheck(
}
if (gip) {
- error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_QMOPT_GQUOTA,
+ error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
&buffer_list);
if (error)
goto error_return;
@@ -1311,42 +1493,42 @@ xfs_qm_quotacheck(
}
if (pip) {
- error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_QMOPT_PQUOTA,
+ error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
&buffer_list);
if (error)
goto error_return;
flags |= XFS_PQUOTA_CHKD;
}
- do {
- /*
- * Iterate thru all the inodes in the file system,
- * adjusting the corresponding dquot counters in core.
- */
- error = xfs_bulkstat(mp, &lastino, &count,
- xfs_qm_dqusage_adjust,
- structsz, NULL, &done);
- if (error)
- break;
+ xfs_set_quotacheck_running(mp);
+ error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
+ NULL);
+ xfs_clear_quotacheck_running(mp);
- } while (!done);
+ /*
+ * On error, the inode walk may have partially populated the dquot
+ * caches. We must purge them before disabling quota and tearing down
+ * the quotainfo, or else the dquots will leak.
+ */
+ if (error)
+ goto error_purge;
/*
* We've made all the changes that we need to make incore. Flush them
* down to disk buffers if everything was updated successfully.
*/
if (XFS_IS_UQUOTA_ON(mp)) {
- error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
+ error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
&buffer_list);
}
if (XFS_IS_GQUOTA_ON(mp)) {
- error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
+ error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
&buffer_list);
if (!error)
error = error2;
}
if (XFS_IS_PQUOTA_ON(mp)) {
- error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
+ error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
&buffer_list);
if (!error)
error = error2;
@@ -1363,10 +1545,8 @@ xfs_qm_quotacheck(
* and turn quotaoff. The dquots won't be attached to any of the inodes
* at this point (because we intentionally didn't in dqget_noattach).
*/
- if (error) {
- xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
- goto error_return;
- }
+ if (error)
+ goto error_purge;
/*
* If one type of quotas is off, then it will lose its
@@ -1376,7 +1556,7 @@ xfs_qm_quotacheck(
mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
mp->m_qflags |= flags;
- error_return:
+error_return:
xfs_buf_delwri_cancel(&buffer_list);
if (error) {
@@ -1392,9 +1572,28 @@ xfs_qm_quotacheck(
xfs_warn(mp,
"Quotacheck: Failed to reset quota flags.");
}
- } else
+ xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
+ } else {
xfs_notice(mp, "Quotacheck: Done.");
+ xfs_fs_mark_healthy(mp, XFS_SICK_FS_QUOTACHECK);
+ }
+
return error;
+
+error_purge:
+ /*
+ * On error, we may have inodes queued for inactivation. This may try
+ * to attach dquots to the inode before running cleanup operations on
+ * the inode and this can race with the xfs_qm_destroy_quotainfo() call
+ * below that frees mp->m_quotainfo. To avoid this race, flush all the
+ * pending inodegc operations before we purge the dquots from memory,
+ * ensuring that background inactivation is idle whilst we turn off
+ * quotas.
+ */
+ xfs_inodegc_flush(mp);
+ xfs_qm_dqpurge_all(mp);
+ goto error_return;
+
}
/*
@@ -1414,16 +1613,18 @@ xfs_qm_mount_quotas(
uint sbf;
/*
- * If quotas on realtime volumes is not supported, we disable
- * quotas immediately.
+ * If quotas on realtime volumes is not supported, disable quotas
+ * immediately. We only support rtquota if rtgroups are enabled to
+ * avoid problems with older kernels.
*/
- if (mp->m_sb.sb_rextents) {
+ if (mp->m_sb.sb_rextents &&
+ (!xfs_has_rtgroups(mp) || xfs_has_zoned(mp))) {
xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
mp->m_qflags = 0;
goto write_changes;
}
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
+ ASSERT(XFS_IS_QUOTA_ON(mp));
/*
* Allocate the quotainfo structure inside the mount struct, and
@@ -1478,19 +1679,52 @@ xfs_qm_mount_quotas(
* the incore structures are convinced that quotas are
* off, but the on disk superblock doesn't know that !
*/
- ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
+ ASSERT(!(XFS_IS_QUOTA_ON(mp)));
xfs_alert(mp, "%s: Superblock update failed!",
__func__);
}
}
if (error) {
- xfs_warn(mp, "Failed to initialize disk quotas.");
+ xfs_warn(mp, "Failed to initialize disk quotas, err %d.", error);
return;
}
}
/*
+ * Load the inode for a given type of quota, assuming that the sb fields have
+ * been sorted out. This is not true when switching quota types on a V4
+ * filesystem, so do not use this function for that.
+ *
+ * Returns -ENOENT if the quota inode field is NULLFSINO; 0 and an inode on
+ * success; or a negative errno.
+ */
+int
+xfs_qm_qino_load(
+ struct xfs_mount *mp,
+ xfs_dqtype_t type,
+ struct xfs_inode **ipp)
+{
+ struct xfs_trans *tp;
+ struct xfs_inode *dp = NULL;
+ int error;
+
+ tp = xfs_trans_alloc_empty(mp);
+ if (xfs_has_metadir(mp)) {
+ error = xfs_dqinode_load_parent(tp, &dp);
+ if (error)
+ goto out_cancel;
+ }
+
+ error = xfs_dqinode_load(tp, dp, type, ipp);
+ if (dp)
+ xfs_irele(dp);
+out_cancel:
+ xfs_trans_cancel(tp);
+ return error;
+}
+
+/*
* This is called after the superblock has been read in and we're ready to
* iget the quota inodes.
*/
@@ -1509,28 +1743,25 @@ xfs_qm_init_quotainos(
/*
* Get the uquota and gquota inodes
*/
- if (xfs_sb_version_hasquota(&mp->m_sb)) {
+ if (xfs_has_quota(mp)) {
if (XFS_IS_UQUOTA_ON(mp) &&
mp->m_sb.sb_uquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_uquotino > 0);
- error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
- 0, 0, &uip);
+ error = xfs_qm_qino_load(mp, XFS_DQTYPE_USER, &uip);
if (error)
return error;
}
if (XFS_IS_GQUOTA_ON(mp) &&
mp->m_sb.sb_gquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_gquotino > 0);
- error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
- 0, 0, &gip);
+ error = xfs_qm_qino_load(mp, XFS_DQTYPE_GROUP, &gip);
if (error)
goto error_rele;
}
if (XFS_IS_PQUOTA_ON(mp) &&
mp->m_sb.sb_pquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_pquotino > 0);
- error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
- 0, 0, &pip);
+ error = xfs_qm_qino_load(mp, XFS_DQTYPE_PROJ, &pip);
if (error)
goto error_rele;
}
@@ -1584,24 +1815,6 @@ error_rele:
}
STATIC void
-xfs_qm_destroy_quotainos(
- xfs_quotainfo_t *qi)
-{
- if (qi->qi_uquotaip) {
- xfs_irele(qi->qi_uquotaip);
- qi->qi_uquotaip = NULL; /* paranoia */
- }
- if (qi->qi_gquotaip) {
- xfs_irele(qi->qi_gquotaip);
- qi->qi_gquotaip = NULL;
- }
- if (qi->qi_pquotaip) {
- xfs_irele(qi->qi_pquotaip);
- qi->qi_pquotaip = NULL;
- }
-}
-
-STATIC void
xfs_qm_dqfree_one(
struct xfs_dquot *dqp)
{
@@ -1609,8 +1822,7 @@ xfs_qm_dqfree_one(
struct xfs_quotainfo *qi = mp->m_quotainfo;
mutex_lock(&qi->qi_tree_lock);
- radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
- be32_to_cpu(dqp->q_core.d_id));
+ radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
qi->qi_dquots--;
mutex_unlock(&qi->qi_tree_lock);
@@ -1634,8 +1846,8 @@ xfs_qm_dqfree_one(
int
xfs_qm_vop_dqalloc(
struct xfs_inode *ip,
- xfs_dqid_t uid,
- xfs_dqid_t gid,
+ kuid_t uid,
+ kgid_t gid,
prid_t prid,
uint flags,
struct xfs_dquot **O_udqpp,
@@ -1643,56 +1855,42 @@ xfs_qm_vop_dqalloc(
struct xfs_dquot **O_pdqpp)
{
struct xfs_mount *mp = ip->i_mount;
+ struct inode *inode = VFS_I(ip);
+ struct user_namespace *user_ns = inode->i_sb->s_user_ns;
struct xfs_dquot *uq = NULL;
struct xfs_dquot *gq = NULL;
struct xfs_dquot *pq = NULL;
int error;
- uint lockflags;
- if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
+ if (!XFS_IS_QUOTA_ON(mp))
return 0;
- lockflags = XFS_ILOCK_EXCL;
- xfs_ilock(ip, lockflags);
+ ASSERT(!xfs_is_metadir_inode(ip));
if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
- gid = ip->i_d.di_gid;
+ gid = inode->i_gid;
/*
* Attach the dquot(s) to this inode, doing a dquot allocation
* if necessary. The dquot(s) will not be locked.
*/
if (XFS_NOT_DQATTACHED(mp, ip)) {
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_qm_dqattach_locked(ip, true);
- if (error) {
- xfs_iunlock(ip, lockflags);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ if (error)
return error;
- }
}
if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
- if (ip->i_d.di_uid != uid) {
- /*
- * What we need is the dquot that has this uid, and
- * if we send the inode to dqget, the uid of the inode
- * takes priority over what's sent in the uid argument.
- * We must unlock inode here before calling dqget if
- * we're not sending the inode, because otherwise
- * we'll deadlock by doing trans_reserve while
- * holding ilock.
- */
- xfs_iunlock(ip, lockflags);
- error = xfs_qm_dqget(mp, uid, XFS_DQ_USER, true, &uq);
+ ASSERT(O_udqpp);
+ if (!uid_eq(inode->i_uid, uid)) {
+ error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
+ XFS_DQTYPE_USER, true, &uq);
if (error) {
ASSERT(error != -ENOENT);
return error;
}
- /*
- * Get the ilock in the right order.
- */
- xfs_dqunlock(uq);
- lockflags = XFS_ILOCK_SHARED;
- xfs_ilock(ip, lockflags);
} else {
/*
* Take an extra reference, because we'll return
@@ -1703,42 +1901,35 @@ xfs_qm_vop_dqalloc(
}
}
if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
- if (ip->i_d.di_gid != gid) {
- xfs_iunlock(ip, lockflags);
- error = xfs_qm_dqget(mp, gid, XFS_DQ_GROUP, true, &gq);
+ ASSERT(O_gdqpp);
+ if (!gid_eq(inode->i_gid, gid)) {
+ error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
+ XFS_DQTYPE_GROUP, true, &gq);
if (error) {
ASSERT(error != -ENOENT);
goto error_rele;
}
- xfs_dqunlock(gq);
- lockflags = XFS_ILOCK_SHARED;
- xfs_ilock(ip, lockflags);
} else {
ASSERT(ip->i_gdquot);
gq = xfs_qm_dqhold(ip->i_gdquot);
}
}
if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
- if (xfs_get_projid(ip) != prid) {
- xfs_iunlock(ip, lockflags);
- error = xfs_qm_dqget(mp, (xfs_dqid_t)prid, XFS_DQ_PROJ,
- true, &pq);
+ ASSERT(O_pdqpp);
+ if (ip->i_projid != prid) {
+ error = xfs_qm_dqget(mp, prid,
+ XFS_DQTYPE_PROJ, true, &pq);
if (error) {
ASSERT(error != -ENOENT);
goto error_rele;
}
- xfs_dqunlock(pq);
- lockflags = XFS_ILOCK_SHARED;
- xfs_ilock(ip, lockflags);
} else {
ASSERT(ip->i_pdquot);
pq = xfs_qm_dqhold(ip->i_pdquot);
}
}
- if (uq)
- trace_xfs_dquot_dqalloc(ip);
+ trace_xfs_dquot_dqalloc(ip);
- xfs_iunlock(ip, lockflags);
if (O_udqpp)
*O_udqpp = uq;
else
@@ -1763,135 +1954,75 @@ error_rele:
* Actually transfer ownership, and do dquot modifications.
* These were already reserved.
*/
-xfs_dquot_t *
+struct xfs_dquot *
xfs_qm_vop_chown(
- xfs_trans_t *tp,
- xfs_inode_t *ip,
- xfs_dquot_t **IO_olddq,
- xfs_dquot_t *newdq)
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ struct xfs_dquot **IO_olddq,
+ struct xfs_dquot *newdq)
{
- xfs_dquot_t *prevdq;
- uint bfield = XFS_IS_REALTIME_INODE(ip) ?
- XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
-
+ struct xfs_dquot *prevdq;
+ xfs_filblks_t dblocks, rblocks;
+ bool isrt = XFS_IS_REALTIME_INODE(ip);
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
+ xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
+ ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
+ ASSERT(!xfs_is_metadir_inode(ip));
/* old dquot */
prevdq = *IO_olddq;
ASSERT(prevdq);
ASSERT(prevdq != newdq);
- xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
- xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
+ xfs_inode_count_blocks(tp, ip, &dblocks, &rblocks);
+
+ xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_BCOUNT,
+ -(xfs_qcnt_t)dblocks);
+ xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_RTBCOUNT,
+ -(xfs_qcnt_t)rblocks);
+ xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
/* the sparkling new dquot */
- xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
- xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
+ xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_BCOUNT, dblocks);
+ xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_RTBCOUNT, rblocks);
+ xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1);
/*
- * Take an extra reference, because the inode is going to keep
- * this dquot pointer even after the trans_commit.
+ * Back when we made quota reservations for the chown, we reserved the
+ * ondisk blocks + delalloc blocks with the new dquot. Now that we've
+ * switched the dquots, decrease the new dquot's block reservation
+ * (having already bumped up the real counter) so that we don't have
+ * any reservation to give back when we commit.
*/
- *IO_olddq = xfs_qm_dqhold(newdq);
-
- return prevdq;
-}
-
-/*
- * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
- */
-int
-xfs_qm_vop_chown_reserve(
- struct xfs_trans *tp,
- struct xfs_inode *ip,
- struct xfs_dquot *udqp,
- struct xfs_dquot *gdqp,
- struct xfs_dquot *pdqp,
- uint flags)
-{
- struct xfs_mount *mp = ip->i_mount;
- uint delblks, blkflags, prjflags = 0;
- struct xfs_dquot *udq_unres = NULL;
- struct xfs_dquot *gdq_unres = NULL;
- struct xfs_dquot *pdq_unres = NULL;
- struct xfs_dquot *udq_delblks = NULL;
- struct xfs_dquot *gdq_delblks = NULL;
- struct xfs_dquot *pdq_delblks = NULL;
- int error;
-
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
-
- delblks = ip->i_delayed_blks;
- blkflags = XFS_IS_REALTIME_INODE(ip) ?
- XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
+ xfs_trans_mod_dquot(tp, newdq,
+ isrt ? XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
+ -ip->i_delayed_blks);
- if (XFS_IS_UQUOTA_ON(mp) && udqp &&
- ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
- udq_delblks = udqp;
- /*
- * If there are delayed allocation blocks, then we have to
- * unreserve those from the old dquot, and add them to the
- * new dquot.
- */
- if (delblks) {
- ASSERT(ip->i_udquot);
- udq_unres = ip->i_udquot;
- }
- }
- if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
- ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
- gdq_delblks = gdqp;
- if (delblks) {
- ASSERT(ip->i_gdquot);
- gdq_unres = ip->i_gdquot;
- }
- }
-
- if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
- xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
- prjflags = XFS_QMOPT_ENOSPC;
- pdq_delblks = pdqp;
- if (delblks) {
- ASSERT(ip->i_pdquot);
- pdq_unres = ip->i_pdquot;
- }
+ /*
+ * Give the incore reservation for delalloc blocks back to the old
+ * dquot. We don't normally handle delalloc quota reservations
+ * transactionally, so just lock the dquot and subtract from the
+ * reservation. Dirty the transaction because it's too late to turn
+ * back now.
+ */
+ tp->t_flags |= XFS_TRANS_DIRTY;
+ mutex_lock(&prevdq->q_qlock);
+ if (isrt) {
+ ASSERT(prevdq->q_rtb.reserved >= ip->i_delayed_blks);
+ prevdq->q_rtb.reserved -= ip->i_delayed_blks;
+ } else {
+ ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
+ prevdq->q_blk.reserved -= ip->i_delayed_blks;
}
-
- error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
- udq_delblks, gdq_delblks, pdq_delblks,
- ip->i_d.di_nblocks, 1,
- flags | blkflags | prjflags);
- if (error)
- return error;
+ mutex_unlock(&prevdq->q_qlock);
/*
- * Do the delayed blks reservations/unreservations now. Since, these
- * are done without the help of a transaction, if a reservation fails
- * its previous reservations won't be automatically undone by trans
- * code. So, we have to do it manually here.
+ * Take an extra reference, because the inode is going to keep
+ * this dquot pointer even after the trans_commit.
*/
- if (delblks) {
- /*
- * Do the reservations first. Unreservation can't fail.
- */
- ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
- ASSERT(udq_unres || gdq_unres || pdq_unres);
- error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
- udq_delblks, gdq_delblks, pdq_delblks,
- (xfs_qcnt_t)delblks, 0,
- flags | blkflags | prjflags);
- if (error)
- return error;
- xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
- udq_unres, gdq_unres, pdq_unres,
- -((xfs_qcnt_t)delblks), 0, blkflags);
- }
+ *IO_olddq = xfs_qm_dqhold(newdq);
- return 0;
+ return prevdq;
}
int
@@ -1901,7 +2032,7 @@ xfs_qm_vop_rename_dqattach(
struct xfs_mount *mp = i_tab[0]->i_mount;
int i;
- if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
+ if (!XFS_IS_QUOTA_ON(mp))
return 0;
for (i = 0; (i < 4 && i_tab[i]); i++) {
@@ -1932,31 +2063,76 @@ xfs_qm_vop_create_dqattach(
{
struct xfs_mount *mp = tp->t_mountp;
- if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
+ if (!XFS_IS_QUOTA_ON(mp))
return;
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
+ xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
+ ASSERT(!xfs_is_metadir_inode(ip));
if (udqp && XFS_IS_UQUOTA_ON(mp)) {
ASSERT(ip->i_udquot == NULL);
- ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
+ ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
ip->i_udquot = xfs_qm_dqhold(udqp);
- xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
}
if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
ASSERT(ip->i_gdquot == NULL);
- ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
+ ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
+
ip->i_gdquot = xfs_qm_dqhold(gdqp);
- xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
}
if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
ASSERT(ip->i_pdquot == NULL);
- ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
+ ASSERT(ip->i_projid == pdqp->q_id);
ip->i_pdquot = xfs_qm_dqhold(pdqp);
- xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
}
+
+ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1);
}
+/* Decide if this inode's dquot is near an enforcement boundary. */
+bool
+xfs_inode_near_dquot_enforcement(
+ struct xfs_inode *ip,
+ xfs_dqtype_t type)
+{
+ struct xfs_dquot *dqp;
+ struct xfs_dquot_res *res;
+ struct xfs_dquot_pre *pre;
+ int64_t freesp;
+
+ /* We only care for quotas that are enabled and enforced. */
+ dqp = xfs_inode_dquot(ip, type);
+ if (!dqp || !xfs_dquot_is_enforced(dqp))
+ return false;
+
+ if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
+ xfs_dquot_res_over_limits(&dqp->q_blk) ||
+ xfs_dquot_res_over_limits(&dqp->q_rtb))
+ return true;
+
+ if (XFS_IS_REALTIME_INODE(ip)) {
+ res = &dqp->q_rtb;
+ pre = &dqp->q_rtb_prealloc;
+ } else {
+ res = &dqp->q_blk;
+ pre = &dqp->q_blk_prealloc;
+ }
+
+ /* For space on the data device, check the various thresholds. */
+ if (!pre->q_prealloc_hi_wmark)
+ return false;
+
+ if (res->reserved < pre->q_prealloc_lo_wmark)
+ return false;
+
+ if (res->reserved >= pre->q_prealloc_hi_wmark)
+ return true;
+
+ freesp = pre->q_prealloc_hi_wmark - res->reserved;
+ if (freesp < pre->q_low_space[XFS_QLOWSP_5_PCNT])
+ return true;
+
+ return false;
+}