summaryrefslogtreecommitdiff
path: root/fs/xfs/scrub/quota.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/scrub/quota.c')
-rw-r--r--fs/xfs/scrub/quota.c251
1 files changed, 161 insertions, 90 deletions
diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c
index 782d582d3edd..5c5374c44c5a 100644
--- a/fs/xfs/scrub/quota.c
+++ b/fs/xfs/scrub/quota.c
@@ -1,45 +1,37 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (C) 2017 Oracle. All Rights Reserved.
- * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
+#include "xfs_bit.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
-#include "xfs_defer.h"
-#include "xfs_btree.h"
-#include "xfs_bit.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
-#include "xfs_sb.h"
#include "xfs_inode.h"
-#include "xfs_inode_fork.h"
-#include "xfs_alloc.h"
-#include "xfs_bmap.h"
#include "xfs_quota.h"
#include "xfs_qm.h"
-#include "xfs_dquot.h"
-#include "xfs_dquot_item.h"
-#include "scrub/xfs_scrub.h"
+#include "xfs_bmap.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
-#include "scrub/trace.h"
+#include "scrub/quota.h"
/* Convert a scrub type code to a DQ flag, or return 0 if error. */
-static inline uint
+xfs_dqtype_t
xchk_quota_to_dqtype(
struct xfs_scrub *sc)
{
switch (sc->sm->sm_type) {
case XFS_SCRUB_TYPE_UQUOTA:
- return XFS_DQ_USER;
+ return XFS_DQTYPE_USER;
case XFS_SCRUB_TYPE_GQUOTA:
- return XFS_DQ_GROUP;
+ return XFS_DQTYPE_GROUP;
case XFS_SCRUB_TYPE_PQUOTA:
- return XFS_DQ_PROJ;
+ return XFS_DQTYPE_PROJ;
default:
return 0;
}
@@ -48,28 +40,33 @@ xchk_quota_to_dqtype(
/* Set us up to scrub a quota. */
int
xchk_setup_quota(
- struct xfs_scrub *sc,
- struct xfs_inode *ip)
+ struct xfs_scrub *sc)
{
- uint dqtype;
+ xfs_dqtype_t dqtype;
int error;
- if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp))
+ if (!XFS_IS_QUOTA_ON(sc->mp))
return -ENOENT;
dqtype = xchk_quota_to_dqtype(sc);
if (dqtype == 0)
return -EINVAL;
- sc->has_quotaofflock = true;
- mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
+
if (!xfs_this_quota_on(sc->mp, dqtype))
return -ENOENT;
- error = xchk_setup_fs(sc, ip);
+
+ if (xchk_need_intent_drain(sc))
+ xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
+
+ error = xchk_setup_fs(sc);
if (error)
return error;
- sc->ip = xfs_quota_inode(sc->mp, dqtype);
- xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
- sc->ilock_flags = XFS_ILOCK_EXCL;
+
+ error = xchk_install_live_inode(sc, xfs_quota_inode(sc->mp, dqtype));
+ if (error)
+ return error;
+
+ xchk_ilock(sc, XFS_ILOCK_EXCL);
return 0;
}
@@ -80,56 +77,102 @@ struct xchk_quota_info {
xfs_dqid_t last_id;
};
+/* There's a written block backing this dquot, right? */
+STATIC int
+xchk_quota_item_bmap(
+ struct xfs_scrub *sc,
+ struct xfs_dquot *dq,
+ xfs_fileoff_t offset)
+{
+ struct xfs_bmbt_irec irec;
+ struct xfs_mount *mp = sc->mp;
+ int nmaps = 1;
+ int error;
+
+ if (!xfs_verify_fileoff(mp, offset)) {
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ return 0;
+ }
+
+ if (dq->q_fileoffset != offset) {
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ return 0;
+ }
+
+ error = xfs_bmapi_read(sc->ip, offset, 1, &irec, &nmaps, 0);
+ if (error)
+ return error;
+
+ if (nmaps != 1) {
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ return 0;
+ }
+
+ if (!xfs_verify_fsbno(mp, irec.br_startblock))
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ if (XFS_FSB_TO_DADDR(mp, irec.br_startblock) != dq->q_blkno)
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ if (!xfs_bmap_is_written_extent(&irec))
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+
+ return 0;
+}
+
+/* Complain if a quota timer is incorrectly set. */
+static inline void
+xchk_quota_item_timer(
+ struct xfs_scrub *sc,
+ xfs_fileoff_t offset,
+ const struct xfs_dquot_res *res)
+{
+ if ((res->softlimit && res->count > res->softlimit) ||
+ (res->hardlimit && res->count > res->hardlimit)) {
+ if (!res->timer)
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ } else {
+ if (res->timer)
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ }
+}
+
/* Scrub the fields in an individual quota item. */
STATIC int
xchk_quota_item(
- struct xfs_dquot *dq,
- uint dqtype,
- void *priv)
+ struct xchk_quota_info *sqi,
+ struct xfs_dquot *dq)
{
- struct xchk_quota_info *sqi = priv;
struct xfs_scrub *sc = sqi->sc;
struct xfs_mount *mp = sc->mp;
- struct xfs_disk_dquot *d = &dq->q_core;
struct xfs_quotainfo *qi = mp->m_quotainfo;
xfs_fileoff_t offset;
- unsigned long long bsoft;
- unsigned long long isoft;
- unsigned long long rsoft;
- unsigned long long bhard;
- unsigned long long ihard;
- unsigned long long rhard;
- unsigned long long bcount;
- unsigned long long icount;
- unsigned long long rcount;
xfs_ino_t fs_icount;
- xfs_dqid_t id = be32_to_cpu(d->d_id);
+ int error = 0;
+
+ if (xchk_should_terminate(sc, &error))
+ return error;
+
+ /*
+ * We want to validate the bmap record for the storage backing this
+ * dquot, so we need to lock the dquot and the quota file. For quota
+ * operations, the locking order is first the ILOCK and then the dquot.
+ */
+ xchk_ilock(sc, XFS_ILOCK_SHARED);
+ mutex_lock(&dq->q_qlock);
/*
* Except for the root dquot, the actual dquot we got must either have
* the same or higher id as we saw before.
*/
- offset = id / qi->qi_dqperchunk;
- if (id && id <= sqi->last_id)
- xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
-
- sqi->last_id = id;
-
- /* Did we get the dquot type we wanted? */
- if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES))
+ offset = dq->q_id / qi->qi_dqperchunk;
+ if (dq->q_id && dq->q_id <= sqi->last_id)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
- if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0))
- xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
-
- /* Check the limits. */
- bhard = be64_to_cpu(d->d_blk_hardlimit);
- ihard = be64_to_cpu(d->d_ino_hardlimit);
- rhard = be64_to_cpu(d->d_rtb_hardlimit);
+ sqi->last_id = dq->q_id;
- bsoft = be64_to_cpu(d->d_blk_softlimit);
- isoft = be64_to_cpu(d->d_ino_softlimit);
- rsoft = be64_to_cpu(d->d_rtb_softlimit);
+ error = xchk_quota_item_bmap(sc, dq, offset);
+ xchk_iunlock(sc, XFS_ILOCK_SHARED);
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, offset, &error))
+ return error;
/*
* Warn if the hard limits are larger than the fs.
@@ -139,25 +182,22 @@ xchk_quota_item(
* Complain about corruption if the soft limit is greater than
* the hard limit.
*/
- if (bhard > mp->m_sb.sb_dblocks)
+ if (dq->q_blk.hardlimit > mp->m_sb.sb_dblocks)
xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
- if (bsoft > bhard)
+ if (dq->q_blk.softlimit > dq->q_blk.hardlimit)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
- if (ihard > mp->m_maxicount)
+ if (dq->q_ino.hardlimit > M_IGEO(mp)->maxicount)
xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
- if (isoft > ihard)
+ if (dq->q_ino.softlimit > dq->q_ino.hardlimit)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
- if (rhard > mp->m_sb.sb_rblocks)
+ if (dq->q_rtb.hardlimit > mp->m_sb.sb_rblocks)
xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
- if (rsoft > rhard)
+ if (dq->q_rtb.softlimit > dq->q_rtb.hardlimit)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/* Check the resource counts. */
- bcount = be64_to_cpu(d->d_bcount);
- icount = be64_to_cpu(d->d_icount);
- rcount = be64_to_cpu(d->d_rtbcount);
fs_icount = percpu_counter_sum(&mp->m_icount);
/*
@@ -165,16 +205,22 @@ xchk_quota_item(
* a reflink filesystem we're allowed to exceed physical space
* if there are no quota limits.
*/
- if (xfs_sb_version_hasreflink(&mp->m_sb)) {
- if (mp->m_sb.sb_dblocks < bcount)
+ if (xfs_has_reflink(mp)) {
+ if (mp->m_sb.sb_dblocks < dq->q_blk.count)
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK,
+ offset);
+ if (mp->m_sb.sb_rblocks < dq->q_rtb.count)
xchk_fblock_set_warning(sc, XFS_DATA_FORK,
offset);
} else {
- if (mp->m_sb.sb_dblocks < bcount)
+ if (mp->m_sb.sb_dblocks < dq->q_blk.count)
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ offset);
+ if (mp->m_sb.sb_rblocks < dq->q_rtb.count)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
offset);
}
- if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks)
+ if (dq->q_ino.count > fs_icount)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/*
@@ -182,13 +228,30 @@ xchk_quota_item(
* lower limit than the actual usage. However, we flag it for
* admin review.
*/
- if (id != 0 && bhard != 0 && bcount > bhard)
+ if (dq->q_id == 0)
+ goto out;
+
+ if (dq->q_blk.hardlimit != 0 &&
+ dq->q_blk.count > dq->q_blk.hardlimit)
xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
- if (id != 0 && ihard != 0 && icount > ihard)
+
+ if (dq->q_ino.hardlimit != 0 &&
+ dq->q_ino.count > dq->q_ino.hardlimit)
xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
- if (id != 0 && rhard != 0 && rcount > rhard)
+
+ if (dq->q_rtb.hardlimit != 0 &&
+ dq->q_rtb.count > dq->q_rtb.hardlimit)
xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ xchk_quota_item_timer(sc, offset, &dq->q_blk);
+ xchk_quota_item_timer(sc, offset, &dq->q_ino);
+ xchk_quota_item_timer(sc, offset, &dq->q_rtb);
+
+out:
+ mutex_unlock(&dq->q_qlock);
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return -ECANCELED;
+
return 0;
}
@@ -210,16 +273,17 @@ xchk_quota_data_fork(
return error;
/* Check for data fork problems that apply only to quota files. */
- max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
- ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
+ max_dqid_off = XFS_DQ_ID_MAX / qi->qi_dqperchunk;
+ ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK);
for_each_xfs_iext(ifp, &icur, &irec) {
if (xchk_should_terminate(sc, &error))
break;
+
/*
- * delalloc extents or blocks mapped above the highest
+ * delalloc/unwritten extents or blocks mapped above the highest
* quota id shouldn't happen.
*/
- if (isnullstartblock(irec.br_startblock) ||
+ if (!xfs_bmap_is_written_extent(&irec) ||
irec.br_startoff > max_dqid_off ||
irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
@@ -236,10 +300,12 @@ int
xchk_quota(
struct xfs_scrub *sc)
{
- struct xchk_quota_info sqi;
+ struct xchk_dqiter cursor = { };
+ struct xchk_quota_info sqi = { .sc = sc };
struct xfs_mount *mp = sc->mp;
struct xfs_quotainfo *qi = mp->m_quotainfo;
- uint dqtype;
+ struct xfs_dquot *dq;
+ xfs_dqtype_t dqtype;
int error = 0;
dqtype = xchk_quota_to_dqtype(sc);
@@ -256,13 +322,18 @@ xchk_quota(
* data fork we have to drop ILOCK_EXCL to use the regular dquot
* functions.
*/
- xfs_iunlock(sc->ip, sc->ilock_flags);
- sc->ilock_flags = 0;
- sqi.sc = sc;
- sqi.last_id = 0;
- error = xfs_qm_dqiterate(mp, dqtype, xchk_quota_item, &sqi);
- sc->ilock_flags = XFS_ILOCK_EXCL;
- xfs_ilock(sc->ip, sc->ilock_flags);
+ xchk_iunlock(sc, sc->ilock_flags);
+
+ /* Now look for things that the quota verifiers won't complain about. */
+ xchk_dqiter_init(&cursor, sc, dqtype);
+ while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
+ error = xchk_quota_item(&sqi, dq);
+ xfs_qm_dqrele(dq);
+ if (error)
+ break;
+ }
+ if (error == -ECANCELED)
+ error = 0;
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK,
sqi.last_id * qi->qi_dqperchunk, &error))
goto out;