summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_dquot.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_dquot.c')
-rw-r--r--fs/xfs/xfs_dquot.c1092
1 files changed, 671 insertions, 421 deletions
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index d5b7f03e93c8..612ca682a513 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -23,13 +23,15 @@
#include "xfs_trace.h"
#include "xfs_log.h"
#include "xfs_bmap_btree.h"
+#include "xfs_error.h"
+#include "xfs_health.h"
/*
* Lock order:
*
* ip->i_lock
* qi->qi_tree_lock
- * dquot->q_qlock (xfs_dqlock() and friends)
+ * dquot->q_qlock
* dquot->q_flush (xfs_dqflock() and friends)
* qi->qi_lru_lock
*
@@ -37,12 +39,60 @@
* otherwise by the lowest id first, see xfs_dqlock2.
*/
-struct kmem_zone *xfs_qm_dqtrxzone;
-static struct kmem_zone *xfs_qm_dqzone;
+struct kmem_cache *xfs_dqtrx_cache;
+static struct kmem_cache *xfs_dquot_cache;
static struct lock_class_key xfs_dquot_group_class;
static struct lock_class_key xfs_dquot_project_class;
+/* Record observations of quota corruption with the health tracking system. */
+static void
+xfs_dquot_mark_sick(
+ struct xfs_dquot *dqp)
+{
+ struct xfs_mount *mp = dqp->q_mount;
+
+ switch (dqp->q_type) {
+ case XFS_DQTYPE_USER:
+ xfs_fs_mark_sick(mp, XFS_SICK_FS_UQUOTA);
+ break;
+ case XFS_DQTYPE_GROUP:
+ xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA);
+ break;
+ case XFS_DQTYPE_PROJ:
+ xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA);
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
+/*
+ * Detach the dquot buffer if it's still attached, because we can get called
+ * through dqpurge after a log shutdown. Caller must hold the dqflock or have
+ * otherwise isolated the dquot.
+ */
+void
+xfs_dquot_detach_buf(
+ struct xfs_dquot *dqp)
+{
+ struct xfs_dq_logitem *qlip = &dqp->q_logitem;
+ struct xfs_buf *bp = NULL;
+
+ spin_lock(&qlip->qli_lock);
+ if (qlip->qli_item.li_buf) {
+ bp = qlip->qli_item.li_buf;
+ qlip->qli_item.li_buf = NULL;
+ }
+ spin_unlock(&qlip->qli_lock);
+ if (bp) {
+ xfs_buf_lock(bp);
+ list_del_init(&qlip->qli_item.li_bio_list);
+ xfs_buf_relse(bp);
+ }
+}
+
/*
* This is called to free all the memory associated with a dquot
*/
@@ -51,12 +101,13 @@ xfs_qm_dqdestroy(
struct xfs_dquot *dqp)
{
ASSERT(list_empty(&dqp->q_lru));
+ ASSERT(dqp->q_logitem.qli_item.li_buf == NULL);
- kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
+ kvfree(dqp->q_logitem.qli_item.li_lv_shadow);
mutex_destroy(&dqp->q_qlock);
XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
- kmem_cache_free(xfs_qm_dqzone, dqp);
+ kmem_cache_free(xfs_dquot_cache, dqp);
}
/*
@@ -66,38 +117,79 @@ xfs_qm_dqdestroy(
*/
void
xfs_qm_adjust_dqlimits(
- struct xfs_mount *mp,
struct xfs_dquot *dq)
{
+ struct xfs_mount *mp = dq->q_mount;
struct xfs_quotainfo *q = mp->m_quotainfo;
- struct xfs_disk_dquot *d = &dq->q_core;
struct xfs_def_quota *defq;
int prealloc = 0;
- ASSERT(d->d_id);
+ ASSERT(dq->q_id);
defq = xfs_get_defquota(q, xfs_dquot_type(dq));
- if (defq->bsoftlimit && !d->d_blk_softlimit) {
- d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit);
+ if (!dq->q_blk.softlimit) {
+ dq->q_blk.softlimit = defq->blk.soft;
prealloc = 1;
}
- if (defq->bhardlimit && !d->d_blk_hardlimit) {
- d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit);
+ if (!dq->q_blk.hardlimit) {
+ dq->q_blk.hardlimit = defq->blk.hard;
prealloc = 1;
}
- if (defq->isoftlimit && !d->d_ino_softlimit)
- d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit);
- if (defq->ihardlimit && !d->d_ino_hardlimit)
- d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit);
- if (defq->rtbsoftlimit && !d->d_rtb_softlimit)
- d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit);
- if (defq->rtbhardlimit && !d->d_rtb_hardlimit)
- d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit);
+ if (!dq->q_ino.softlimit)
+ dq->q_ino.softlimit = defq->ino.soft;
+ if (!dq->q_ino.hardlimit)
+ dq->q_ino.hardlimit = defq->ino.hard;
+ if (!dq->q_rtb.softlimit)
+ dq->q_rtb.softlimit = defq->rtb.soft;
+ if (!dq->q_rtb.hardlimit)
+ dq->q_rtb.hardlimit = defq->rtb.hard;
if (prealloc)
xfs_dquot_set_prealloc_limits(dq);
}
+/* Set the expiration time of a quota's grace period. */
+time64_t
+xfs_dquot_set_timeout(
+ struct xfs_mount *mp,
+ time64_t timeout)
+{
+ struct xfs_quotainfo *qi = mp->m_quotainfo;
+
+ return clamp_t(time64_t, timeout, qi->qi_expiry_min,
+ qi->qi_expiry_max);
+}
+
+/* Set the length of the default grace period. */
+time64_t
+xfs_dquot_set_grace_period(
+ time64_t grace)
+{
+ return clamp_t(time64_t, grace, XFS_DQ_GRACE_MIN, XFS_DQ_GRACE_MAX);
+}
+
+/*
+ * Determine if this quota counter is over either limit and set the quota
+ * timers as appropriate.
+ */
+static inline void
+xfs_qm_adjust_res_timer(
+ struct xfs_mount *mp,
+ struct xfs_dquot_res *res,
+ struct xfs_quota_limits *qlim)
+{
+ ASSERT(res->hardlimit == 0 || res->softlimit <= res->hardlimit);
+
+ if ((res->softlimit && res->count > res->softlimit) ||
+ (res->hardlimit && res->count > res->hardlimit)) {
+ if (res->timer == 0)
+ res->timer = xfs_dquot_set_timeout(mp,
+ ktime_get_real_seconds() + qlim->time);
+ } else {
+ res->timer = 0;
+ }
+}
+
/*
* Check the limits and timers of a dquot and start or reset timers
* if necessary.
@@ -113,109 +205,31 @@ xfs_qm_adjust_dqlimits(
*/
void
xfs_qm_adjust_dqtimers(
- struct xfs_mount *mp,
struct xfs_dquot *dq)
{
+ struct xfs_mount *mp = dq->q_mount;
struct xfs_quotainfo *qi = mp->m_quotainfo;
- struct xfs_disk_dquot *d = &dq->q_core;
struct xfs_def_quota *defq;
- ASSERT(d->d_id);
+ ASSERT(dq->q_id);
defq = xfs_get_defquota(qi, xfs_dquot_type(dq));
-#ifdef DEBUG
- if (d->d_blk_hardlimit)
- ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
- be64_to_cpu(d->d_blk_hardlimit));
- if (d->d_ino_hardlimit)
- ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
- be64_to_cpu(d->d_ino_hardlimit));
- if (d->d_rtb_hardlimit)
- ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
- be64_to_cpu(d->d_rtb_hardlimit));
-#endif
-
- if (!d->d_btimer) {
- if ((d->d_blk_softlimit &&
- (be64_to_cpu(d->d_bcount) >
- be64_to_cpu(d->d_blk_softlimit))) ||
- (d->d_blk_hardlimit &&
- (be64_to_cpu(d->d_bcount) >
- be64_to_cpu(d->d_blk_hardlimit)))) {
- d->d_btimer = cpu_to_be32(ktime_get_real_seconds() +
- defq->btimelimit);
- } else {
- d->d_bwarns = 0;
- }
- } else {
- if ((!d->d_blk_softlimit ||
- (be64_to_cpu(d->d_bcount) <=
- be64_to_cpu(d->d_blk_softlimit))) &&
- (!d->d_blk_hardlimit ||
- (be64_to_cpu(d->d_bcount) <=
- be64_to_cpu(d->d_blk_hardlimit)))) {
- d->d_btimer = 0;
- }
- }
-
- if (!d->d_itimer) {
- if ((d->d_ino_softlimit &&
- (be64_to_cpu(d->d_icount) >
- be64_to_cpu(d->d_ino_softlimit))) ||
- (d->d_ino_hardlimit &&
- (be64_to_cpu(d->d_icount) >
- be64_to_cpu(d->d_ino_hardlimit)))) {
- d->d_itimer = cpu_to_be32(ktime_get_real_seconds() +
- defq->itimelimit);
- } else {
- d->d_iwarns = 0;
- }
- } else {
- if ((!d->d_ino_softlimit ||
- (be64_to_cpu(d->d_icount) <=
- be64_to_cpu(d->d_ino_softlimit))) &&
- (!d->d_ino_hardlimit ||
- (be64_to_cpu(d->d_icount) <=
- be64_to_cpu(d->d_ino_hardlimit)))) {
- d->d_itimer = 0;
- }
- }
-
- if (!d->d_rtbtimer) {
- if ((d->d_rtb_softlimit &&
- (be64_to_cpu(d->d_rtbcount) >
- be64_to_cpu(d->d_rtb_softlimit))) ||
- (d->d_rtb_hardlimit &&
- (be64_to_cpu(d->d_rtbcount) >
- be64_to_cpu(d->d_rtb_hardlimit)))) {
- d->d_rtbtimer = cpu_to_be32(ktime_get_real_seconds() +
- defq->rtbtimelimit);
- } else {
- d->d_rtbwarns = 0;
- }
- } else {
- if ((!d->d_rtb_softlimit ||
- (be64_to_cpu(d->d_rtbcount) <=
- be64_to_cpu(d->d_rtb_softlimit))) &&
- (!d->d_rtb_hardlimit ||
- (be64_to_cpu(d->d_rtbcount) <=
- be64_to_cpu(d->d_rtb_hardlimit)))) {
- d->d_rtbtimer = 0;
- }
- }
+ xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_blk, &defq->blk);
+ xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_ino, &defq->ino);
+ xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_rtb, &defq->rtb);
}
/*
* initialize a buffer full of dquots and log the whole thing
*/
-STATIC void
+void
xfs_qm_init_dquot_blk(
struct xfs_trans *tp,
- struct xfs_mount *mp,
xfs_dqid_t id,
- uint type,
+ xfs_dqtype_t type,
struct xfs_buf *bp)
{
+ struct xfs_mount *mp = tp->t_mountp;
struct xfs_quotainfo *q = mp->m_quotainfo;
struct xfs_dqblk *d;
xfs_dqid_t curid;
@@ -226,6 +240,24 @@ xfs_qm_init_dquot_blk(
ASSERT(tp);
ASSERT(xfs_buf_islocked(bp));
+ switch (type) {
+ case XFS_DQTYPE_USER:
+ qflag = XFS_UQUOTA_CHKD;
+ blftype = XFS_BLF_UDQUOT_BUF;
+ break;
+ case XFS_DQTYPE_PROJ:
+ qflag = XFS_PQUOTA_CHKD;
+ blftype = XFS_BLF_PDQUOT_BUF;
+ break;
+ case XFS_DQTYPE_GROUP:
+ qflag = XFS_GQUOTA_CHKD;
+ blftype = XFS_BLF_GDQUOT_BUF;
+ break;
+ default:
+ ASSERT(0);
+ return;
+ }
+
d = bp->b_addr;
/*
@@ -237,25 +269,16 @@ xfs_qm_init_dquot_blk(
d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
d->dd_diskdq.d_id = cpu_to_be32(curid);
- d->dd_diskdq.d_flags = type;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ d->dd_diskdq.d_type = type;
+ if (curid > 0 && xfs_has_bigtime(mp))
+ d->dd_diskdq.d_type |= XFS_DQTYPE_BIGTIME;
+ if (xfs_has_crc(mp)) {
uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
XFS_DQUOT_CRC_OFF);
}
}
- if (type & XFS_DQ_USER) {
- qflag = XFS_UQUOTA_CHKD;
- blftype = XFS_BLF_UDQUOT_BUF;
- } else if (type & XFS_DQ_PROJ) {
- qflag = XFS_PQUOTA_CHKD;
- blftype = XFS_BLF_PDQUOT_BUF;
- } else {
- qflag = XFS_GQUOTA_CHKD;
- blftype = XFS_BLF_GDQUOT_BUF;
- }
-
xfs_trans_dquot_buf(tp, bp, blftype);
/*
@@ -280,6 +303,25 @@ xfs_qm_init_dquot_blk(
xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
}
+static void
+xfs_dquot_set_prealloc(
+ struct xfs_dquot_pre *pre,
+ const struct xfs_dquot_res *res)
+{
+ xfs_qcnt_t space;
+
+ pre->q_prealloc_hi_wmark = res->hardlimit;
+ pre->q_prealloc_lo_wmark = res->softlimit;
+
+ space = div_u64(pre->q_prealloc_hi_wmark, 100);
+ if (!pre->q_prealloc_lo_wmark)
+ pre->q_prealloc_lo_wmark = space * 95;
+
+ pre->q_low_space[XFS_QLOWSP_1_PCNT] = space;
+ pre->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
+ pre->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
+}
+
/*
* Initialize the dynamic speculative preallocation thresholds. The lo/hi
* watermarks correspond to the soft and hard limits by default. If a soft limit
@@ -288,22 +330,8 @@ xfs_qm_init_dquot_blk(
void
xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
{
- uint64_t space;
-
- dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
- dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
- if (!dqp->q_prealloc_lo_wmark) {
- dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
- do_div(dqp->q_prealloc_lo_wmark, 100);
- dqp->q_prealloc_lo_wmark *= 95;
- }
-
- space = dqp->q_prealloc_hi_wmark;
-
- do_div(space, 100);
- dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
- dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
- dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
+ xfs_dquot_set_prealloc(&dqp->q_blk_prealloc, &dqp->q_blk);
+ xfs_dquot_set_prealloc(&dqp->q_rtb_prealloc, &dqp->q_rtb);
}
/*
@@ -313,39 +341,50 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
*/
STATIC int
xfs_dquot_disk_alloc(
- struct xfs_trans **tpp,
struct xfs_dquot *dqp,
struct xfs_buf **bpp)
{
struct xfs_bmbt_irec map;
- struct xfs_trans *tp = *tpp;
- struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_trans *tp;
+ struct xfs_mount *mp = dqp->q_mount;
struct xfs_buf *bp;
- struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags);
+ xfs_dqtype_t qtype = xfs_dquot_type(dqp);
+ struct xfs_inode *quotip = xfs_quota_inode(mp, qtype);
int nmaps = 1;
int error;
trace_xfs_dqalloc(dqp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
+ XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
+ if (error)
+ return error;
+
xfs_ilock(quotip, XFS_ILOCK_EXCL);
- if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
+ xfs_trans_ijoin(tp, quotip, 0);
+
+ if (!xfs_this_quota_on(dqp->q_mount, qtype)) {
/*
* Return if this type of quotas is turned off while we didn't
* have an inode lock
*/
- xfs_iunlock(quotip, XFS_ILOCK_EXCL);
- return -ESRCH;
+ error = -ESRCH;
+ goto err_cancel;
}
+ error = xfs_iext_count_extend(tp, quotip, XFS_DATA_FORK,
+ XFS_IEXT_ADD_NOSPLIT_CNT);
+ if (error)
+ goto err_cancel;
+
/* Create the block mapping. */
- xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map,
&nmaps);
if (error)
- return error;
+ goto err_cancel;
+
ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
- ASSERT(nmaps == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
(map.br_startblock != HOLESTARTBLOCK));
@@ -358,15 +397,14 @@ xfs_dquot_disk_alloc(
error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen, 0, &bp);
if (error)
- return error;
+ goto err_cancel;
bp->b_ops = &xfs_dquot_buf_ops;
/*
* Make a chunk of dquots out of this buffer and log
* the entire thing.
*/
- xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
- dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
+ xfs_qm_init_dquot_blk(tp, dqp->q_id, qtype, bp);
xfs_buf_set_ref(bp, XFS_DQUOT_REF);
/*
@@ -389,16 +427,25 @@ xfs_dquot_disk_alloc(
* is responsible for unlocking any buffer passed back, either
* manually or by committing the transaction. On error, the buffer is
* released and not passed back.
+ *
+ * Keep the quota inode ILOCKed until after the transaction commit to
+ * maintain the atomicity of bmap/rmap updates.
*/
xfs_trans_bhold(tp, bp);
- error = xfs_defer_finish(tpp);
+ error = xfs_trans_commit(tp);
+ xfs_iunlock(quotip, XFS_ILOCK_EXCL);
if (error) {
- xfs_trans_bhold_release(*tpp, bp);
- xfs_trans_brelse(*tpp, bp);
+ xfs_buf_relse(bp);
return error;
}
+
*bpp = bp;
return 0;
+
+err_cancel:
+ xfs_trans_cancel(tp);
+ xfs_iunlock(quotip, XFS_ILOCK_EXCL);
+ return error;
}
/*
@@ -413,13 +460,14 @@ xfs_dquot_disk_read(
{
struct xfs_bmbt_irec map;
struct xfs_buf *bp;
- struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags);
+ xfs_dqtype_t qtype = xfs_dquot_type(dqp);
+ struct xfs_inode *quotip = xfs_quota_inode(mp, qtype);
uint lock_mode;
int nmaps = 1;
int error;
lock_mode = xfs_ilock_data_map_shared(quotip);
- if (!xfs_this_quota_on(mp, dqp->dq_flags)) {
+ if (!xfs_this_quota_on(mp, qtype)) {
/*
* Return if this type of quotas is turned off while we
* didn't have the quota inode lock.
@@ -454,6 +502,8 @@ xfs_dquot_disk_read(
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen, 0, &bp,
&xfs_dquot_buf_ops);
+ if (xfs_metadata_is_sick(error))
+ xfs_dquot_mark_sick(dqp);
if (error) {
ASSERT(bp == NULL);
return error;
@@ -471,14 +521,14 @@ STATIC struct xfs_dquot *
xfs_dquot_alloc(
struct xfs_mount *mp,
xfs_dqid_t id,
- uint type)
+ xfs_dqtype_t type)
{
struct xfs_dquot *dqp;
- dqp = kmem_zone_zalloc(xfs_qm_dqzone, 0);
+ dqp = kmem_cache_zalloc(xfs_dquot_cache, GFP_KERNEL | __GFP_NOFAIL);
- dqp->dq_flags = type;
- dqp->q_core.d_id = cpu_to_be32(id);
+ dqp->q_type = type;
+ dqp->q_id = id;
dqp->q_mount = mp;
INIT_LIST_HEAD(&dqp->q_lru);
mutex_init(&dqp->q_qlock);
@@ -488,7 +538,7 @@ xfs_dquot_alloc(
* Offset of dquot in the (fixed sized) dquot chunk.
*/
dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
- sizeof(xfs_dqblk_t);
+ sizeof(struct xfs_dqblk);
/*
* Because we want to use a counting completion, complete
@@ -503,13 +553,13 @@ xfs_dquot_alloc(
* quotas.
*/
switch (type) {
- case XFS_DQ_USER:
+ case XFS_DQTYPE_USER:
/* uses the default lock class */
break;
- case XFS_DQ_GROUP:
+ case XFS_DQTYPE_GROUP:
lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
break;
- case XFS_DQ_PROJ:
+ case XFS_DQTYPE_PROJ:
lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
break;
default:
@@ -523,64 +573,125 @@ xfs_dquot_alloc(
return dqp;
}
+/* Check the ondisk dquot's id and type match what the incore dquot expects. */
+static bool
+xfs_dquot_check_type(
+ struct xfs_dquot *dqp,
+ struct xfs_disk_dquot *ddqp)
+{
+ uint8_t ddqp_type;
+ uint8_t dqp_type;
+
+ ddqp_type = ddqp->d_type & XFS_DQTYPE_REC_MASK;
+ dqp_type = xfs_dquot_type(dqp);
+
+ if (be32_to_cpu(ddqp->d_id) != dqp->q_id)
+ return false;
+
+ /*
+ * V5 filesystems always expect an exact type match. V4 filesystems
+ * expect an exact match for user dquots and for non-root group and
+ * project dquots.
+ */
+ if (xfs_has_crc(dqp->q_mount) ||
+ dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0)
+ return ddqp_type == dqp_type;
+
+ /*
+ * V4 filesystems support either group or project quotas, but not both
+ * at the same time. The non-user quota file can be switched between
+ * group and project quota uses depending on the mount options, which
+ * means that we can encounter the other type when we try to load quota
+ * defaults. Quotacheck will soon reset the entire quota file
+ * (including the root dquot) anyway, but don't log scary corruption
+ * reports to dmesg.
+ */
+ return ddqp_type == XFS_DQTYPE_GROUP || ddqp_type == XFS_DQTYPE_PROJ;
+}
+
/* Copy the in-core quota fields in from the on-disk buffer. */
-STATIC void
+STATIC int
xfs_dquot_from_disk(
struct xfs_dquot *dqp,
struct xfs_buf *bp)
{
- struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset;
+ struct xfs_dqblk *dqb = xfs_buf_offset(bp, dqp->q_bufoffset);
+ struct xfs_disk_dquot *ddqp = &dqb->dd_diskdq;
+
+ /*
+ * Ensure that we got the type and ID we were looking for.
+ * Everything else was checked by the dquot buffer verifier.
+ */
+ if (!xfs_dquot_check_type(dqp, ddqp)) {
+ xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR,
+ "Metadata corruption detected at %pS, quota %u",
+ __this_address, dqp->q_id);
+ xfs_alert(bp->b_mount, "Unmount and run xfs_repair");
+ xfs_dquot_mark_sick(dqp);
+ return -EFSCORRUPTED;
+ }
/* copy everything from disk dquot to the incore dquot */
- memcpy(&dqp->q_core, ddqp, sizeof(struct xfs_disk_dquot));
+ dqp->q_type = ddqp->d_type;
+ dqp->q_blk.hardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
+ dqp->q_blk.softlimit = be64_to_cpu(ddqp->d_blk_softlimit);
+ dqp->q_ino.hardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
+ dqp->q_ino.softlimit = be64_to_cpu(ddqp->d_ino_softlimit);
+ dqp->q_rtb.hardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
+ dqp->q_rtb.softlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
+
+ dqp->q_blk.count = be64_to_cpu(ddqp->d_bcount);
+ dqp->q_ino.count = be64_to_cpu(ddqp->d_icount);
+ dqp->q_rtb.count = be64_to_cpu(ddqp->d_rtbcount);
+
+ dqp->q_blk.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_btimer);
+ dqp->q_ino.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_itimer);
+ dqp->q_rtb.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_rtbtimer);
/*
* Reservation counters are defined as reservation plus current usage
* to avoid having to add every time.
*/
- dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
- dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
- dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
+ dqp->q_blk.reserved = dqp->q_blk.count;
+ dqp->q_ino.reserved = dqp->q_ino.count;
+ dqp->q_rtb.reserved = dqp->q_rtb.count;
/* initialize the dquot speculative prealloc thresholds */
xfs_dquot_set_prealloc_limits(dqp);
+ return 0;
}
-/* Allocate and initialize the dquot buffer for this in-core dquot. */
-static int
-xfs_qm_dqread_alloc(
- struct xfs_mount *mp,
- struct xfs_dquot *dqp,
- struct xfs_buf **bpp)
+/* Copy the in-core quota fields into the on-disk buffer. */
+void
+xfs_dquot_to_disk(
+ struct xfs_disk_dquot *ddqp,
+ struct xfs_dquot *dqp)
{
- struct xfs_trans *tp;
- int error;
-
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
- XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
- if (error)
- goto err;
-
- error = xfs_dquot_disk_alloc(&tp, dqp, bpp);
- if (error)
- goto err_cancel;
-
- error = xfs_trans_commit(tp);
- if (error) {
- /*
- * Buffer was held to the transaction, so we have to unlock it
- * manually here because we're not passing it back.
- */
- xfs_buf_relse(*bpp);
- *bpp = NULL;
- goto err;
- }
- return 0;
-
-err_cancel:
- xfs_trans_cancel(tp);
-err:
- return error;
+ ddqp->d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
+ ddqp->d_version = XFS_DQUOT_VERSION;
+ ddqp->d_type = dqp->q_type;
+ ddqp->d_id = cpu_to_be32(dqp->q_id);
+ ddqp->d_pad0 = 0;
+ ddqp->d_pad = 0;
+
+ ddqp->d_blk_hardlimit = cpu_to_be64(dqp->q_blk.hardlimit);
+ ddqp->d_blk_softlimit = cpu_to_be64(dqp->q_blk.softlimit);
+ ddqp->d_ino_hardlimit = cpu_to_be64(dqp->q_ino.hardlimit);
+ ddqp->d_ino_softlimit = cpu_to_be64(dqp->q_ino.softlimit);
+ ddqp->d_rtb_hardlimit = cpu_to_be64(dqp->q_rtb.hardlimit);
+ ddqp->d_rtb_softlimit = cpu_to_be64(dqp->q_rtb.softlimit);
+
+ ddqp->d_bcount = cpu_to_be64(dqp->q_blk.count);
+ ddqp->d_icount = cpu_to_be64(dqp->q_ino.count);
+ ddqp->d_rtbcount = cpu_to_be64(dqp->q_rtb.count);
+
+ ddqp->d_bwarns = 0;
+ ddqp->d_iwarns = 0;
+ ddqp->d_rtbwarns = 0;
+
+ ddqp->d_btimer = xfs_dquot_to_disk_ts(dqp, dqp->q_blk.timer);
+ ddqp->d_itimer = xfs_dquot_to_disk_ts(dqp, dqp->q_ino.timer);
+ ddqp->d_rtbtimer = xfs_dquot_to_disk_ts(dqp, dqp->q_rtb.timer);
}
/*
@@ -592,7 +703,7 @@ static int
xfs_qm_dqread(
struct xfs_mount *mp,
xfs_dqid_t id,
- uint type,
+ xfs_dqtype_t type,
bool can_alloc,
struct xfs_dquot **dqpp)
{
@@ -606,7 +717,7 @@ xfs_qm_dqread(
/* Try to read the buffer, allocating if necessary. */
error = xfs_dquot_disk_read(mp, dqp, &bp);
if (error == -ENOENT && can_alloc)
- error = xfs_qm_dqread_alloc(mp, dqp, &bp);
+ error = xfs_dquot_disk_alloc(dqp, &bp);
if (error)
goto err;
@@ -617,9 +728,11 @@ xfs_qm_dqread(
* further.
*/
ASSERT(xfs_buf_islocked(bp));
- xfs_dquot_from_disk(dqp, bp);
-
+ error = xfs_dquot_from_disk(dqp, bp);
xfs_buf_relse(bp);
+ if (error)
+ goto err;
+
*dqpp = dqp;
return error;
@@ -638,7 +751,7 @@ err:
static int
xfs_dq_get_next_id(
struct xfs_mount *mp,
- uint type,
+ xfs_dqtype_t type,
xfs_dqid_t *id)
{
struct xfs_inode *quotip = xfs_quota_inode(mp, type);
@@ -663,11 +776,9 @@ xfs_dq_get_next_id(
start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
lock_flags = xfs_ilock_data_map_shared(quotip);
- if (!(quotip->i_df.if_flags & XFS_IFEXTENTS)) {
- error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
- if (error)
- return error;
- }
+ error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
+ if (error)
+ return error;
if (xfs_iext_lookup_extent(quotip, &quotip->i_df, start, &cur, &got)) {
/* contiguous chunk, bump startoff for the id calculation */
@@ -690,10 +801,11 @@ xfs_dq_get_next_id(
static struct xfs_dquot *
xfs_qm_dqget_cache_lookup(
struct xfs_mount *mp,
- struct xfs_quotainfo *qi,
- struct radix_tree_root *tree,
- xfs_dqid_t id)
+ xfs_dqid_t id,
+ xfs_dqtype_t type)
{
+ struct xfs_quotainfo *qi = mp->m_quotainfo;
+ struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
struct xfs_dquot *dqp;
restart:
@@ -705,16 +817,12 @@ restart:
return NULL;
}
- xfs_dqlock(dqp);
- if (dqp->dq_flags & XFS_DQ_FREEING) {
- xfs_dqunlock(dqp);
+ if (!lockref_get_not_dead(&dqp->q_lockref)) {
mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_freeing(dqp);
delay(1);
goto restart;
}
-
- dqp->q_nrefs++;
mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_hit(dqp);
@@ -725,58 +833,59 @@ restart:
/*
* Try to insert a new dquot into the in-core cache. If an error occurs the
* caller should throw away the dquot and start over. Otherwise, the dquot
- * is returned locked (and held by the cache) as if there had been a cache
- * hit.
+ * is returned (and held by the cache) as if there had been a cache hit.
+ *
+ * The insert needs to be done under memalloc_nofs context because the radix
+ * tree can do memory allocation during insert. The qi->qi_tree_lock is taken in
+ * memory reclaim when freeing unused dquots, so we cannot have the radix tree
+ * node allocation recursing into filesystem reclaim whilst we hold the
+ * qi_tree_lock.
*/
static int
xfs_qm_dqget_cache_insert(
struct xfs_mount *mp,
- struct xfs_quotainfo *qi,
- struct radix_tree_root *tree,
xfs_dqid_t id,
+ xfs_dqtype_t type,
struct xfs_dquot *dqp)
{
+ struct xfs_quotainfo *qi = mp->m_quotainfo;
+ struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
+ unsigned int nofs_flags;
int error;
+ nofs_flags = memalloc_nofs_save();
mutex_lock(&qi->qi_tree_lock);
error = radix_tree_insert(tree, id, dqp);
if (unlikely(error)) {
- /* Duplicate found! Caller must try again. */
- WARN_ON(error != -EEXIST);
- mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_dup(dqp);
- return error;
+ goto out_unlock;
}
- /* Return a locked dquot to the caller, with a reference taken. */
- xfs_dqlock(dqp);
- dqp->q_nrefs = 1;
-
+ lockref_init(&dqp->q_lockref);
qi->qi_dquots++;
- mutex_unlock(&qi->qi_tree_lock);
- return 0;
+out_unlock:
+ mutex_unlock(&qi->qi_tree_lock);
+ memalloc_nofs_restore(nofs_flags);
+ return error;
}
/* Check our input parameters. */
static int
xfs_qm_dqget_checks(
struct xfs_mount *mp,
- uint type)
+ xfs_dqtype_t type)
{
- if (WARN_ON_ONCE(!XFS_IS_QUOTA_RUNNING(mp)))
- return -ESRCH;
-
switch (type) {
- case XFS_DQ_USER:
+ case XFS_DQTYPE_USER:
if (!XFS_IS_UQUOTA_ON(mp))
return -ESRCH;
return 0;
- case XFS_DQ_GROUP:
+ case XFS_DQTYPE_GROUP:
if (!XFS_IS_GQUOTA_ON(mp))
return -ESRCH;
return 0;
- case XFS_DQ_PROJ:
+ case XFS_DQTYPE_PROJ:
if (!XFS_IS_PQUOTA_ON(mp))
return -ESRCH;
return 0;
@@ -787,19 +896,17 @@ xfs_qm_dqget_checks(
}
/*
- * Given the file system, id, and type (UDQUOT/GDQUOT), return a a locked
+ * Given the file system, id, and type (UDQUOT/GDQUOT/PDQUOT), return a
* dquot, doing an allocation (if requested) as needed.
*/
int
xfs_qm_dqget(
struct xfs_mount *mp,
xfs_dqid_t id,
- uint type,
+ xfs_dqtype_t type,
bool can_alloc,
struct xfs_dquot **O_dqpp)
{
- struct xfs_quotainfo *qi = mp->m_quotainfo;
- struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
struct xfs_dquot *dqp;
int error;
@@ -808,28 +915,30 @@ xfs_qm_dqget(
return error;
restart:
- dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
- if (dqp) {
- *O_dqpp = dqp;
- return 0;
- }
+ dqp = xfs_qm_dqget_cache_lookup(mp, id, type);
+ if (dqp)
+ goto found;
error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
if (error)
return error;
- error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
+ error = xfs_qm_dqget_cache_insert(mp, id, type, dqp);
if (error) {
- /*
- * Duplicate found. Just throw away the new dquot and start
- * over.
- */
xfs_qm_dqdestroy(dqp);
- XFS_STATS_INC(mp, xs_qm_dquot_dups);
- goto restart;
+ if (error == -EEXIST) {
+ /*
+ * Duplicate found. Just throw away the new dquot and
+ * start over.
+ */
+ XFS_STATS_INC(mp, xs_qm_dquot_dups);
+ goto restart;
+ }
+ return error;
}
trace_xfs_dqget_miss(dqp);
+found:
*O_dqpp = dqp;
return 0;
}
@@ -844,7 +953,7 @@ int
xfs_qm_dqget_uncached(
struct xfs_mount *mp,
xfs_dqid_t id,
- uint type,
+ xfs_dqtype_t type,
struct xfs_dquot **dqpp)
{
int error;
@@ -860,15 +969,15 @@ xfs_qm_dqget_uncached(
xfs_dqid_t
xfs_qm_id_for_quotatype(
struct xfs_inode *ip,
- uint type)
+ xfs_dqtype_t type)
{
switch (type) {
- case XFS_DQ_USER:
+ case XFS_DQTYPE_USER:
return i_uid_read(VFS_I(ip));
- case XFS_DQ_GROUP:
+ case XFS_DQTYPE_GROUP:
return i_gid_read(VFS_I(ip));
- case XFS_DQ_PROJ:
- return ip->i_d.di_projid;
+ case XFS_DQTYPE_PROJ:
+ return ip->i_projid;
}
ASSERT(0);
return 0;
@@ -882,32 +991,32 @@ xfs_qm_id_for_quotatype(
int
xfs_qm_dqget_inode(
struct xfs_inode *ip,
- uint type,
+ xfs_dqtype_t type,
bool can_alloc,
- struct xfs_dquot **O_dqpp)
+ struct xfs_dquot **dqpp)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_quotainfo *qi = mp->m_quotainfo;
- struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
struct xfs_dquot *dqp;
xfs_dqid_t id;
int error;
+ ASSERT(!*dqpp);
+ xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
+
error = xfs_qm_dqget_checks(mp, type);
if (error)
return error;
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
ASSERT(xfs_inode_dquot(ip, type) == NULL);
+ ASSERT(!xfs_is_metadir_inode(ip));
id = xfs_qm_id_for_quotatype(ip, type);
restart:
- dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
- if (dqp) {
- *O_dqpp = dqp;
- return 0;
- }
+ dqp = xfs_qm_dqget_cache_lookup(mp, id, type);
+ if (dqp)
+ goto found;
/*
* Dquot cache miss. We don't want to keep the inode lock across
@@ -933,7 +1042,6 @@ restart:
if (dqp1) {
xfs_qm_dqdestroy(dqp);
dqp = dqp1;
- xfs_dqlock(dqp);
goto dqret;
}
} else {
@@ -942,21 +1050,26 @@ restart:
return -ESRCH;
}
- error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
+ error = xfs_qm_dqget_cache_insert(mp, id, type, dqp);
if (error) {
- /*
- * Duplicate found. Just throw away the new dquot and start
- * over.
- */
xfs_qm_dqdestroy(dqp);
- XFS_STATS_INC(mp, xs_qm_dquot_dups);
- goto restart;
+ if (error == -EEXIST) {
+ /*
+ * Duplicate found. Just throw away the new dquot and
+ * start over.
+ */
+ XFS_STATS_INC(mp, xs_qm_dquot_dups);
+ goto restart;
+ }
+ return error;
}
dqret:
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
trace_xfs_dqget_miss(dqp);
- *O_dqpp = dqp;
+found:
+ trace_xfs_dqattach_get(dqp);
+ *dqpp = dqp;
return 0;
}
@@ -968,7 +1081,7 @@ int
xfs_qm_dqget_next(
struct xfs_mount *mp,
xfs_dqid_t id,
- uint type,
+ xfs_dqtype_t type,
struct xfs_dquot **dqpp)
{
struct xfs_dquot *dqp;
@@ -982,45 +1095,21 @@ xfs_qm_dqget_next(
else if (error != 0)
break;
+ mutex_lock(&dqp->q_qlock);
if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
*dqpp = dqp;
return 0;
}
- xfs_qm_dqput(dqp);
+ mutex_unlock(&dqp->q_qlock);
+ xfs_qm_dqrele(dqp);
}
return error;
}
/*
- * Release a reference to the dquot (decrement ref-count) and unlock it.
- *
- * If there is a group quota attached to this dquot, carefully release that
- * too without tripping over deadlocks'n'stuff.
- */
-void
-xfs_qm_dqput(
- struct xfs_dquot *dqp)
-{
- ASSERT(dqp->q_nrefs > 0);
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
-
- trace_xfs_dqput(dqp);
-
- if (--dqp->q_nrefs == 0) {
- struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
- trace_xfs_dqput_free(dqp);
-
- if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
- XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
- }
- xfs_dqunlock(dqp);
-}
-
-/*
- * Release a dquot. Flush it if dirty, then dqput() it.
- * dquot must not be locked.
+ * Release a reference to the dquot.
*/
void
xfs_qm_dqrele(
@@ -1031,14 +1120,16 @@ xfs_qm_dqrele(
trace_xfs_dqrele(dqp);
- xfs_dqlock(dqp);
- /*
- * We don't care to flush it if the dquot is dirty here.
- * That will create stutters that we want to avoid.
- * Instead we do a delayed write when we try to reclaim
- * a dirty dquot. Also xfs_sync will take part of the burden...
- */
- xfs_qm_dqput(dqp);
+ if (lockref_put_or_lock(&dqp->q_lockref))
+ return;
+ if (!--dqp->q_lockref.count) {
+ struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
+
+ trace_xfs_dqrele_free(dqp);
+ if (list_lru_add_obj(&qi->qi_lru, &dqp->q_lru))
+ XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
+ }
+ spin_unlock(&dqp->q_lockref.lock);
}
/*
@@ -1048,14 +1139,15 @@ xfs_qm_dqrele(
* from the AIL if it has not been re-logged, and unlocking the dquot's
* flush lock. This behavior is very similar to that of inodes..
*/
-STATIC void
+static void
xfs_qm_dqflush_done(
- struct xfs_buf *bp,
struct xfs_log_item *lip)
{
- struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip;
- struct xfs_dquot *dqp = qip->qli_dquot;
+ struct xfs_dq_logitem *qlip =
+ container_of(lip, struct xfs_dq_logitem, qli_item);
+ struct xfs_dquot *dqp = qlip->qli_dquot;
struct xfs_ail *ailp = lip->li_ailp;
+ struct xfs_buf *bp = NULL;
xfs_lsn_t tail_lsn;
/*
@@ -1067,30 +1159,198 @@ xfs_qm_dqflush_done(
* holding the lock before removing the dquot from the AIL.
*/
if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) &&
- ((lip->li_lsn == qip->qli_flush_lsn) ||
+ (lip->li_lsn == qlip->qli_flush_lsn ||
test_bit(XFS_LI_FAILED, &lip->li_flags))) {
-
spin_lock(&ailp->ail_lock);
- if (lip->li_lsn == qip->qli_flush_lsn) {
+ clear_bit(XFS_LI_FAILED, &lip->li_flags);
+ if (lip->li_lsn == qlip->qli_flush_lsn) {
/* xfs_ail_update_finish() drops the AIL lock */
tail_lsn = xfs_ail_delete_one(ailp, lip);
xfs_ail_update_finish(ailp, tail_lsn);
} else {
- /*
- * Clear the failed state since we are about to drop the
- * flush lock
- */
- xfs_clear_li_failed(lip);
spin_unlock(&ailp->ail_lock);
}
}
/*
+ * If this dquot hasn't been dirtied since initiating the last dqflush,
+ * release the buffer reference. We already unlinked this dquot item
+ * from the buffer.
+ */
+ spin_lock(&qlip->qli_lock);
+ if (!qlip->qli_dirty) {
+ bp = lip->li_buf;
+ lip->li_buf = NULL;
+ }
+ spin_unlock(&qlip->qli_lock);
+ if (bp)
+ xfs_buf_rele(bp);
+
+ /*
* Release the dq's flush lock since we're done with it.
*/
xfs_dqfunlock(dqp);
}
+void
+xfs_buf_dquot_iodone(
+ struct xfs_buf *bp)
+{
+ struct xfs_log_item *lip, *n;
+
+ list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
+ list_del_init(&lip->li_bio_list);
+ xfs_qm_dqflush_done(lip);
+ }
+}
+
+/* Check incore dquot for errors before we flush. */
+static xfs_failaddr_t
+xfs_qm_dqflush_check(
+ struct xfs_dquot *dqp)
+{
+ xfs_dqtype_t type = xfs_dquot_type(dqp);
+
+ if (type != XFS_DQTYPE_USER &&
+ type != XFS_DQTYPE_GROUP &&
+ type != XFS_DQTYPE_PROJ)
+ return __this_address;
+
+ if (dqp->q_id == 0)
+ return NULL;
+
+ if (dqp->q_blk.softlimit && dqp->q_blk.count > dqp->q_blk.softlimit &&
+ !dqp->q_blk.timer)
+ return __this_address;
+
+ if (dqp->q_ino.softlimit && dqp->q_ino.count > dqp->q_ino.softlimit &&
+ !dqp->q_ino.timer)
+ return __this_address;
+
+ if (dqp->q_rtb.softlimit && dqp->q_rtb.count > dqp->q_rtb.softlimit &&
+ !dqp->q_rtb.timer)
+ return __this_address;
+
+ /* bigtime flag should never be set on root dquots */
+ if (dqp->q_type & XFS_DQTYPE_BIGTIME) {
+ if (!xfs_has_bigtime(dqp->q_mount))
+ return __this_address;
+ if (dqp->q_id == 0)
+ return __this_address;
+ }
+
+ return NULL;
+}
+
+/*
+ * Get the buffer containing the on-disk dquot.
+ *
+ * Requires dquot flush lock, will clear the dirty flag, delete the quota log
+ * item from the AIL, and shut down the system if something goes wrong.
+ */
+static int
+xfs_dquot_read_buf(
+ struct xfs_trans *tp,
+ struct xfs_dquot *dqp,
+ struct xfs_buf **bpp)
+{
+ struct xfs_mount *mp = dqp->q_mount;
+ struct xfs_buf *bp = NULL;
+ int error;
+
+ error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
+ mp->m_quotainfo->qi_dqchunklen, 0,
+ &bp, &xfs_dquot_buf_ops);
+ if (xfs_metadata_is_sick(error))
+ xfs_dquot_mark_sick(dqp);
+ if (error)
+ goto out_abort;
+
+ *bpp = bp;
+ return 0;
+
+out_abort:
+ dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
+ xfs_trans_ail_delete(&dqp->q_logitem.qli_item, 0);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ return error;
+}
+
+/*
+ * Attach a dquot buffer to this dquot to avoid allocating a buffer during a
+ * dqflush, since dqflush can be called from reclaim context. Caller must hold
+ * the dqlock.
+ */
+int
+xfs_dquot_attach_buf(
+ struct xfs_trans *tp,
+ struct xfs_dquot *dqp)
+{
+ struct xfs_dq_logitem *qlip = &dqp->q_logitem;
+ struct xfs_log_item *lip = &qlip->qli_item;
+ int error;
+
+ spin_lock(&qlip->qli_lock);
+ if (!lip->li_buf) {
+ struct xfs_buf *bp = NULL;
+
+ spin_unlock(&qlip->qli_lock);
+ error = xfs_dquot_read_buf(tp, dqp, &bp);
+ if (error)
+ return error;
+
+ /*
+ * Hold the dquot buffer so that we retain our ref to it after
+ * detaching it from the transaction, then give that ref to the
+ * dquot log item so that the AIL does not have to read the
+ * dquot buffer to push this item.
+ */
+ xfs_buf_hold(bp);
+ xfs_trans_brelse(tp, bp);
+
+ spin_lock(&qlip->qli_lock);
+ lip->li_buf = bp;
+ }
+ qlip->qli_dirty = true;
+ spin_unlock(&qlip->qli_lock);
+
+ return 0;
+}
+
+/*
+ * Get a new reference the dquot buffer attached to this dquot for a dqflush
+ * operation.
+ *
+ * Returns 0 and a NULL bp if none was attached to the dquot; 0 and a locked
+ * bp; or -EAGAIN if the buffer could not be locked.
+ */
+int
+xfs_dquot_use_attached_buf(
+ struct xfs_dquot *dqp,
+ struct xfs_buf **bpp)
+{
+ struct xfs_buf *bp = dqp->q_logitem.qli_item.li_buf;
+
+ /*
+ * A NULL buffer can happen if the dquot dirty flag was set but the
+ * filesystem shut down before transaction commit happened. In that
+ * case we're not going to flush anyway.
+ */
+ if (!bp) {
+ ASSERT(xfs_is_shutdown(dqp->q_mount));
+
+ *bpp = NULL;
+ return 0;
+ }
+
+ if (!xfs_buf_trylock(bp))
+ return -EAGAIN;
+
+ xfs_buf_hold(bp);
+ *bpp = bp;
+ return 0;
+}
+
/*
* Write a modified dquot to disk.
* The dquot must be locked and the flush lock too taken by caller.
@@ -1102,63 +1362,47 @@ xfs_qm_dqflush_done(
int
xfs_qm_dqflush(
struct xfs_dquot *dqp,
- struct xfs_buf **bpp)
+ struct xfs_buf *bp)
{
struct xfs_mount *mp = dqp->q_mount;
- struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
- struct xfs_buf *bp;
- struct xfs_dqblk *dqb;
- struct xfs_disk_dquot *ddqp;
+ struct xfs_dq_logitem *qlip = &dqp->q_logitem;
+ struct xfs_log_item *lip = &qlip->qli_item;
+ struct xfs_dqblk *dqblk;
xfs_failaddr_t fa;
int error;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
ASSERT(!completion_done(&dqp->q_flush));
+ ASSERT(atomic_read(&dqp->q_pincount) == 0);
trace_xfs_dqflush(dqp);
-
- *bpp = NULL;
-
- xfs_qm_dqunpin_wait(dqp);
-
- /*
- * Get the buffer containing the on-disk dquot
- */
- error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
- mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
- &bp, &xfs_dquot_buf_ops);
- if (error == -EAGAIN)
- goto out_unlock;
- if (error)
- goto out_abort;
-
- /*
- * Calculate the location of the dquot inside the buffer.
- */
- dqb = bp->b_addr + dqp->q_bufoffset;
- ddqp = &dqb->dd_diskdq;
-
- /* sanity check the in-core structure before we flush */
- fa = xfs_dquot_verify(mp, &dqp->q_core, be32_to_cpu(dqp->q_core.d_id),
- 0);
+ fa = xfs_qm_dqflush_check(dqp);
if (fa) {
xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
- be32_to_cpu(dqp->q_core.d_id), fa);
- xfs_buf_relse(bp);
+ dqp->q_id, fa);
+ xfs_dquot_mark_sick(dqp);
error = -EFSCORRUPTED;
goto out_abort;
}
- /* This is the only portion of data that needs to persist */
- memcpy(ddqp, &dqp->q_core, sizeof(struct xfs_disk_dquot));
+ /* Flush the incore dquot to the ondisk buffer. */
+ dqblk = xfs_buf_offset(bp, dqp->q_bufoffset);
+ xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp);
/*
* Clear the dirty field and remember the flush lsn for later use.
*/
- dqp->dq_flags &= ~XFS_DQ_DIRTY;
+ dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
- xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
- &dqp->q_logitem.qli_item.li_lsn);
+ /*
+ * We hold the dquot lock, so nobody can dirty it while we're
+ * scheduling the write out. Clear the dirty-since-flush flag.
+ */
+ spin_lock(&qlip->qli_lock);
+ qlip->qli_dirty = false;
+ spin_unlock(&qlip->qli_lock);
+
+ xfs_trans_ail_copy_lsn(mp->m_ail, &qlip->qli_flush_lsn, &lip->li_lsn);
/*
* copy the lsn into the on-disk dquot now while we have the in memory
@@ -1169,18 +1413,18 @@ xfs_qm_dqflush(
* buffer always has a valid CRC. This ensures there is no possibility
* of a dquot without an up-to-date CRC getting to disk.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
- dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
- xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
+ if (xfs_has_crc(mp)) {
+ dqblk->dd_lsn = cpu_to_be64(lip->li_lsn);
+ xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk),
XFS_DQUOT_CRC_OFF);
}
/*
- * Attach an iodone routine so that we can remove this dquot from the
- * AIL and release the flush lock once the dquot is synced to disk.
+ * Attach the dquot to the buffer so that we can remove this dquot from
+ * the AIL and release the flush lock once the dquot is synced to disk.
*/
- xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
- &dqp->q_logitem.qli_item);
+ bp->b_iodone = xfs_buf_dquot_iodone;
+ list_add_tail(&lip->li_bio_list, &bp->b_li_list);
/*
* If the buffer is pinned then push on the log so we won't
@@ -1192,14 +1436,12 @@ xfs_qm_dqflush(
}
trace_xfs_dqflush_done(dqp);
- *bpp = bp;
return 0;
out_abort:
- dqp->dq_flags &= ~XFS_DQ_DIRTY;
+ dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
xfs_trans_ail_delete(lip, 0);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
-out_unlock:
xfs_dqfunlock(dqp);
return error;
}
@@ -1217,8 +1459,7 @@ xfs_dqlock2(
{
if (d1 && d2) {
ASSERT(d1 != d2);
- if (be32_to_cpu(d1->q_core.d_id) >
- be32_to_cpu(d2->q_core.d_id)) {
+ if (d1->q_id > d2->q_id) {
mutex_lock(&d2->q_qlock);
mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
} else {
@@ -1232,25 +1473,66 @@ xfs_dqlock2(
}
}
+static int
+xfs_dqtrx_cmp(
+ const void *a,
+ const void *b)
+{
+ const struct xfs_dqtrx *qa = a;
+ const struct xfs_dqtrx *qb = b;
+
+ if (qa->qt_dquot->q_id > qb->qt_dquot->q_id)
+ return 1;
+ if (qa->qt_dquot->q_id < qb->qt_dquot->q_id)
+ return -1;
+ return 0;
+}
+
+void
+xfs_dqlockn(
+ struct xfs_dqtrx *q)
+{
+ unsigned int i;
+
+ BUILD_BUG_ON(XFS_QM_TRANS_MAXDQS > MAX_LOCKDEP_SUBCLASSES);
+
+ /* Sort in order of dquot id, do not allow duplicates */
+ for (i = 0; i < XFS_QM_TRANS_MAXDQS && q[i].qt_dquot != NULL; i++) {
+ unsigned int j;
+
+ for (j = 0; j < i; j++)
+ ASSERT(q[i].qt_dquot != q[j].qt_dquot);
+ }
+ if (i == 0)
+ return;
+
+ sort(q, i, sizeof(struct xfs_dqtrx), xfs_dqtrx_cmp, NULL);
+
+ mutex_lock(&q[0].qt_dquot->q_qlock);
+ for (i = 1; i < XFS_QM_TRANS_MAXDQS && q[i].qt_dquot != NULL; i++)
+ mutex_lock_nested(&q[i].qt_dquot->q_qlock,
+ XFS_QLOCK_NESTED + i - 1);
+}
+
int __init
xfs_qm_init(void)
{
- xfs_qm_dqzone = kmem_cache_create("xfs_dquot",
+ xfs_dquot_cache = kmem_cache_create("xfs_dquot",
sizeof(struct xfs_dquot),
0, 0, NULL);
- if (!xfs_qm_dqzone)
+ if (!xfs_dquot_cache)
goto out;
- xfs_qm_dqtrxzone = kmem_cache_create("xfs_dqtrx",
+ xfs_dqtrx_cache = kmem_cache_create("xfs_dqtrx",
sizeof(struct xfs_dquot_acct),
0, 0, NULL);
- if (!xfs_qm_dqtrxzone)
- goto out_free_dqzone;
+ if (!xfs_dqtrx_cache)
+ goto out_free_dquot_cache;
return 0;
-out_free_dqzone:
- kmem_cache_destroy(xfs_qm_dqzone);
+out_free_dquot_cache:
+ kmem_cache_destroy(xfs_dquot_cache);
out:
return -ENOMEM;
}
@@ -1258,38 +1540,6 @@ out:
void
xfs_qm_exit(void)
{
- kmem_cache_destroy(xfs_qm_dqtrxzone);
- kmem_cache_destroy(xfs_qm_dqzone);
-}
-
-/*
- * Iterate every dquot of a particular type. The caller must ensure that the
- * particular quota type is active. iter_fn can return negative error codes,
- * or -ECANCELED to indicate that it wants to stop iterating.
- */
-int
-xfs_qm_dqiterate(
- struct xfs_mount *mp,
- uint dqtype,
- xfs_qm_dqiterate_fn iter_fn,
- void *priv)
-{
- struct xfs_dquot *dq;
- xfs_dqid_t id = 0;
- int error;
-
- do {
- error = xfs_qm_dqget_next(mp, id, dqtype, &dq);
- if (error == -ENOENT)
- return 0;
- if (error)
- return error;
-
- error = iter_fn(dq, dqtype, priv);
- id = be32_to_cpu(dq->q_core.d_id);
- xfs_qm_dqput(dq);
- id++;
- } while (error == 0 && id != 0);
-
- return error;
+ kmem_cache_destroy(xfs_dqtrx_cache);
+ kmem_cache_destroy(xfs_dquot_cache);
}