diff options
Diffstat (limited to 'fs/xfs/xfs_qm.c')
| -rw-r--r-- | fs/xfs/xfs_qm.c | 753 |
1 files changed, 475 insertions, 278 deletions
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 6abcc34fafd8..95be67ac6eb4 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -26,6 +26,10 @@ #include "xfs_ag.h" #include "xfs_ialloc.h" #include "xfs_log_priv.h" +#include "xfs_health.h" +#include "xfs_da_format.h" +#include "xfs_metafile.h" +#include "xfs_rtgroup.h" /* * The global quota manager. There is only one of these for the entire @@ -36,7 +40,6 @@ STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp); STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp); -STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi); STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); /* * We use the batch lookup interface to iterate over the dquots as it @@ -123,14 +126,17 @@ xfs_qm_dqpurge( void *data) { struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; - int error = -EAGAIN; - xfs_dqlock(dqp); - if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0) - goto out_unlock; - - dqp->q_flags |= XFS_DQFLAG_FREEING; + spin_lock(&dqp->q_lockref.lock); + if (dqp->q_lockref.count > 0 || __lockref_is_dead(&dqp->q_lockref)) { + spin_unlock(&dqp->q_lockref.lock); + return -EAGAIN; + } + lockref_mark_dead(&dqp->q_lockref); + spin_unlock(&dqp->q_lockref.lock); + mutex_lock(&dqp->q_qlock); + xfs_qm_dqunpin_wait(dqp); xfs_dqflock(dqp); /* @@ -140,28 +146,41 @@ xfs_qm_dqpurge( */ if (XFS_DQ_IS_DIRTY(dqp)) { struct xfs_buf *bp = NULL; + int error; /* * We don't care about getting disk errors here. We need * to purge this dquot anyway, so we go ahead regardless. */ - error = xfs_qm_dqflush(dqp, &bp); + error = xfs_dquot_use_attached_buf(dqp, &bp); + if (error == -EAGAIN) { + /* resurrect the refcount from the dead. */ + dqp->q_lockref.count = 0; + goto out_funlock; + } + if (!bp) + goto out_funlock; + + /* + * dqflush completes dqflock on error, and the bwrite ioend + * does it on success. + */ + error = xfs_qm_dqflush(dqp, bp); if (!error) { error = xfs_bwrite(bp); xfs_buf_relse(bp); - } else if (error == -EAGAIN) { - dqp->q_flags &= ~XFS_DQFLAG_FREEING; - goto out_unlock; } xfs_dqflock(dqp); } + xfs_dquot_detach_buf(dqp); +out_funlock: ASSERT(atomic_read(&dqp->q_pincount) == 0); ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) || !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags)); xfs_dqfunlock(dqp); - xfs_dqunlock(dqp); + mutex_unlock(&dqp->q_qlock); radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id); qi->qi_dquots--; @@ -171,15 +190,11 @@ xfs_qm_dqpurge( * hits zero, so it really should be on the freelist here. */ ASSERT(!list_empty(&dqp->q_lru)); - list_lru_del(&qi->qi_lru, &dqp->q_lru); + list_lru_del_obj(&qi->qi_lru, &dqp->q_lru); XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); xfs_qm_dqdestroy(dqp); return 0; - -out_unlock: - xfs_dqunlock(dqp); - return error; } /* @@ -207,6 +222,43 @@ xfs_qm_unmount( } } +static void +xfs_qm_unmount_rt( + struct xfs_mount *mp) +{ + struct xfs_rtgroup *rtg = xfs_rtgroup_grab(mp, 0); + + if (!rtg) + return; + if (rtg_bitmap(rtg)) + xfs_qm_dqdetach(rtg_bitmap(rtg)); + if (rtg_summary(rtg)) + xfs_qm_dqdetach(rtg_summary(rtg)); + xfs_rtgroup_rele(rtg); +} + +STATIC void +xfs_qm_destroy_quotainos( + struct xfs_quotainfo *qi) +{ + if (qi->qi_uquotaip) { + xfs_irele(qi->qi_uquotaip); + qi->qi_uquotaip = NULL; /* paranoia */ + } + if (qi->qi_gquotaip) { + xfs_irele(qi->qi_gquotaip); + qi->qi_gquotaip = NULL; + } + if (qi->qi_pquotaip) { + xfs_irele(qi->qi_pquotaip); + qi->qi_pquotaip = NULL; + } + if (qi->qi_dirip) { + xfs_irele(qi->qi_dirip); + qi->qi_dirip = NULL; + } +} + /* * Called from the vfsops layer. */ @@ -220,73 +272,19 @@ xfs_qm_unmount_quotas( */ ASSERT(mp->m_rootip); xfs_qm_dqdetach(mp->m_rootip); - if (mp->m_rbmip) - xfs_qm_dqdetach(mp->m_rbmip); - if (mp->m_rsumip) - xfs_qm_dqdetach(mp->m_rsumip); /* - * Release the quota inodes. + * For pre-RTG file systems, the RT inodes have quotas attached, + * detach them now. */ - if (mp->m_quotainfo) { - if (mp->m_quotainfo->qi_uquotaip) { - xfs_irele(mp->m_quotainfo->qi_uquotaip); - mp->m_quotainfo->qi_uquotaip = NULL; - } - if (mp->m_quotainfo->qi_gquotaip) { - xfs_irele(mp->m_quotainfo->qi_gquotaip); - mp->m_quotainfo->qi_gquotaip = NULL; - } - if (mp->m_quotainfo->qi_pquotaip) { - xfs_irele(mp->m_quotainfo->qi_pquotaip); - mp->m_quotainfo->qi_pquotaip = NULL; - } - } -} - -STATIC int -xfs_qm_dqattach_one( - struct xfs_inode *ip, - xfs_dqtype_t type, - bool doalloc, - struct xfs_dquot **IO_idqpp) -{ - struct xfs_dquot *dqp; - int error; - - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - error = 0; + if (!xfs_has_rtgroups(mp)) + xfs_qm_unmount_rt(mp); /* - * See if we already have it in the inode itself. IO_idqpp is &i_udquot - * or &i_gdquot. This made the code look weird, but made the logic a lot - * simpler. - */ - dqp = *IO_idqpp; - if (dqp) { - trace_xfs_dqattach_found(dqp); - return 0; - } - - /* - * Find the dquot from somewhere. This bumps the reference count of - * dquot and returns it locked. This can return ENOENT if dquot didn't - * exist on disk and we didn't ask it to allocate; ESRCH if quotas got - * turned off suddenly. - */ - error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp); - if (error) - return error; - - trace_xfs_dqattach_get(dqp); - - /* - * dqget may have dropped and re-acquired the ilock, but it guarantees - * that the dquot returned is the one that should go in the inode. + * Release the quota inodes. */ - *IO_idqpp = dqp; - xfs_dqunlock(dqp); - return 0; + if (mp->m_quotainfo) + xfs_qm_destroy_quotainos(mp->m_quotainfo); } static bool @@ -301,6 +299,8 @@ xfs_qm_need_dqattach( return false; if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) return false; + if (xfs_is_metadir_inode(ip)) + return false; return true; } @@ -322,10 +322,11 @@ xfs_qm_dqattach_locked( if (!xfs_qm_need_dqattach(ip)) return 0; - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); + ASSERT(!xfs_is_metadir_inode(ip)); if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) { - error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER, + error = xfs_qm_dqget_inode(ip, XFS_DQTYPE_USER, doalloc, &ip->i_udquot); if (error) goto done; @@ -333,7 +334,7 @@ xfs_qm_dqattach_locked( } if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) { - error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP, + error = xfs_qm_dqget_inode(ip, XFS_DQTYPE_GROUP, doalloc, &ip->i_gdquot); if (error) goto done; @@ -341,7 +342,7 @@ xfs_qm_dqattach_locked( } if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) { - error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ, + error = xfs_qm_dqget_inode(ip, XFS_DQTYPE_PROJ, doalloc, &ip->i_pdquot); if (error) goto done; @@ -353,7 +354,7 @@ done: * Don't worry about the dquots that we may have attached before any * error - they'll get detached later if it has not already been done. */ - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); return error; } @@ -382,6 +383,8 @@ void xfs_qm_dqdetach( xfs_inode_t *ip) { + if (xfs_is_metadir_inode(ip)) + return; if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot)) return; @@ -411,15 +414,15 @@ static enum lru_status xfs_qm_dquot_isolate( struct list_head *item, struct list_lru_one *lru, - spinlock_t *lru_lock, void *arg) - __releases(lru_lock) __acquires(lru_lock) + __releases(&lru->lock) __acquires(&lru->lock) { struct xfs_dquot *dqp = container_of(item, struct xfs_dquot, q_lru); struct xfs_qm_isolate *isol = arg; + enum lru_status ret = LRU_SKIP; - if (!xfs_dqlock_nowait(dqp)) + if (!spin_trylock(&dqp->q_lockref.lock)) goto out_miss_busy; /* @@ -427,15 +430,24 @@ xfs_qm_dquot_isolate( * from the LRU, leave it for the freeing task to complete the freeing * process rather than risk it being free from under us here. */ - if (dqp->q_flags & XFS_DQFLAG_FREEING) + if (__lockref_is_dead(&dqp->q_lockref)) + goto out_miss_unlock; + + /* + * If the dquot is pinned or dirty, rotate it to the end of the LRU to + * give some time for it to be cleaned before we try to isolate it + * again. + */ + ret = LRU_ROTATE; + if (XFS_DQ_IS_DIRTY(dqp) || atomic_read(&dqp->q_pincount) > 0) goto out_miss_unlock; /* * This dquot has acquired a reference in the meantime remove it from * the freelist and try again. */ - if (dqp->q_nrefs) { - xfs_dqunlock(dqp); + if (dqp->q_lockref.count) { + spin_unlock(&dqp->q_lockref.lock); XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants); trace_xfs_dqreclaim_want(dqp); @@ -445,39 +457,23 @@ xfs_qm_dquot_isolate( } /* - * If the dquot is dirty, flush it. If it's already being flushed, just - * skip it so there is time for the IO to complete before we try to - * reclaim it again on the next LRU pass. + * The dquot may still be under IO, in which case the flush lock will be + * held. If we can't get the flush lock now, just skip over the dquot as + * if it was dirty. */ if (!xfs_dqflock_nowait(dqp)) goto out_miss_unlock; - if (XFS_DQ_IS_DIRTY(dqp)) { - struct xfs_buf *bp = NULL; - int error; - - trace_xfs_dqreclaim_dirty(dqp); - - /* we have to drop the LRU lock to flush the dquot */ - spin_unlock(lru_lock); - - error = xfs_qm_dqflush(dqp, &bp); - if (error) - goto out_unlock_dirty; - - xfs_buf_delwri_queue(bp, &isol->buffers); - xfs_buf_relse(bp); - goto out_unlock_dirty; - } + ASSERT(!XFS_DQ_IS_DIRTY(dqp)); + xfs_dquot_detach_buf(dqp); xfs_dqfunlock(dqp); /* * Prevent lookups now that we are past the point of no return. */ - dqp->q_flags |= XFS_DQFLAG_FREEING; - xfs_dqunlock(dqp); + lockref_mark_dead(&dqp->q_lockref); + spin_unlock(&dqp->q_lockref.lock); - ASSERT(dqp->q_nrefs == 0); list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose); XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); trace_xfs_dqreclaim_done(dqp); @@ -485,18 +481,11 @@ xfs_qm_dquot_isolate( return LRU_REMOVED; out_miss_unlock: - xfs_dqunlock(dqp); + spin_unlock(&dqp->q_lockref.lock); out_miss_busy: trace_xfs_dqreclaim_busy(dqp); XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses); - return LRU_SKIP; - -out_unlock_dirty: - trace_xfs_dqreclaim_busy(dqp); - XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses); - xfs_dqunlock(dqp); - spin_lock(lru_lock); - return LRU_RETRY; + return ret; } static unsigned long @@ -504,8 +493,7 @@ xfs_qm_shrink_scan( struct shrinker *shrink, struct shrink_control *sc) { - struct xfs_quotainfo *qi = container_of(shrink, - struct xfs_quotainfo, qi_shrinker); + struct xfs_quotainfo *qi = shrink->private_data; struct xfs_qm_isolate isol; unsigned long freed; int error; @@ -539,8 +527,7 @@ xfs_qm_shrink_count( struct shrinker *shrink, struct shrink_control *sc) { - struct xfs_quotainfo *qi = container_of(shrink, - struct xfs_quotainfo, qi_shrinker); + struct xfs_quotainfo *qi = shrink->private_data; return list_lru_shrink_count(&qi->qi_lru, sc); } @@ -617,6 +604,160 @@ xfs_qm_init_timelimits( xfs_qm_dqdestroy(dqp); } +static int +xfs_qm_load_metadir_qinos( + struct xfs_mount *mp, + struct xfs_quotainfo *qi) +{ + struct xfs_trans *tp; + int error; + + tp = xfs_trans_alloc_empty(mp); + error = xfs_dqinode_load_parent(tp, &qi->qi_dirip); + if (error == -ENOENT) { + /* no quota dir directory, but we'll create one later */ + error = 0; + goto out_trans; + } + if (error) + goto out_trans; + + if (XFS_IS_UQUOTA_ON(mp)) { + error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_USER, + &qi->qi_uquotaip); + if (error && error != -ENOENT) + goto out_trans; + } + + if (XFS_IS_GQUOTA_ON(mp)) { + error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_GROUP, + &qi->qi_gquotaip); + if (error && error != -ENOENT) + goto out_trans; + } + + if (XFS_IS_PQUOTA_ON(mp)) { + error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_PROJ, + &qi->qi_pquotaip); + if (error && error != -ENOENT) + goto out_trans; + } + + error = 0; +out_trans: + xfs_trans_cancel(tp); + return error; +} + +/* Create quota inodes in the metadata directory tree. */ +STATIC int +xfs_qm_create_metadir_qinos( + struct xfs_mount *mp, + struct xfs_quotainfo *qi) +{ + int error; + + if (!qi->qi_dirip) { + error = xfs_dqinode_mkdir_parent(mp, &qi->qi_dirip); + if (error && error != -EEXIST) + return error; + /* + * If the /quotas dirent points to an inode that isn't + * loadable, qi_dirip will be NULL but mkdir_parent will return + * -EEXIST. In this case the metadir is corrupt, so bail out. + */ + if (XFS_IS_CORRUPT(mp, qi->qi_dirip == NULL)) + return -EFSCORRUPTED; + } + + if (XFS_IS_UQUOTA_ON(mp) && !qi->qi_uquotaip) { + error = xfs_dqinode_metadir_create(qi->qi_dirip, + XFS_DQTYPE_USER, &qi->qi_uquotaip); + if (error) + return error; + } + + if (XFS_IS_GQUOTA_ON(mp) && !qi->qi_gquotaip) { + error = xfs_dqinode_metadir_create(qi->qi_dirip, + XFS_DQTYPE_GROUP, &qi->qi_gquotaip); + if (error) + return error; + } + + if (XFS_IS_PQUOTA_ON(mp) && !qi->qi_pquotaip) { + error = xfs_dqinode_metadir_create(qi->qi_dirip, + XFS_DQTYPE_PROJ, &qi->qi_pquotaip); + if (error) + return error; + } + + return 0; +} + +/* + * Add QUOTABIT to sb_versionnum and initialize qflags in preparation for + * creating quota files on a metadir filesystem. + */ +STATIC int +xfs_qm_prep_metadir_sb( + struct xfs_mount *mp) +{ + struct xfs_trans *tp; + int error; + + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_sb, 0, 0, 0, &tp); + if (error) + return error; + + spin_lock(&mp->m_sb_lock); + + xfs_add_quota(mp); + + /* qflags will get updated fully _after_ quotacheck */ + mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT; + + spin_unlock(&mp->m_sb_lock); + xfs_log_sb(tp); + + return xfs_trans_commit(tp); +} + +/* + * Load existing quota inodes or create them. Since this is a V5 filesystem, + * we don't have to deal with the grp/prjquota switcheroo thing from V4. + */ +STATIC int +xfs_qm_init_metadir_qinos( + struct xfs_mount *mp) +{ + struct xfs_quotainfo *qi = mp->m_quotainfo; + int error; + + if (!xfs_has_quota(mp)) { + error = xfs_qm_prep_metadir_sb(mp); + if (error) + return error; + } + + error = xfs_qm_load_metadir_qinos(mp, qi); + if (error) + goto out_err; + + error = xfs_qm_create_metadir_qinos(mp, qi); + if (error) + goto out_err; + + /* The only user of the quota dir inode is online fsck */ +#if !IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB) + xfs_irele(qi->qi_dirip); + qi->qi_dirip = NULL; +#endif + return 0; +out_err: + xfs_qm_destroy_quotainos(mp->m_quotainfo); + return error; +} + /* * This initializes all the quota information that's kept in the * mount structure @@ -630,7 +771,8 @@ xfs_qm_init_quotainfo( ASSERT(XFS_IS_QUOTA_ON(mp)); - qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0); + qinf = mp->m_quotainfo = kzalloc(sizeof(struct xfs_quotainfo), + GFP_KERNEL | __GFP_NOFAIL); error = list_lru_init(&qinf->qi_lru); if (error) @@ -640,13 +782,16 @@ xfs_qm_init_quotainfo( * See if quotainodes are setup, and if not, allocate them, * and change the superblock accordingly. */ - error = xfs_qm_init_quotainos(mp); + if (xfs_has_metadir(mp)) + error = xfs_qm_init_metadir_qinos(mp); + else + error = xfs_qm_init_quotainos(mp); if (error) goto out_free_lru; - INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); - INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); - INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS); + INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_KERNEL); + INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_KERNEL); + INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_KERNEL); mutex_init(&qinf->qi_tree_lock); /* mutex used to serialize quotaoffs */ @@ -680,15 +825,21 @@ xfs_qm_init_quotainfo( if (XFS_IS_PQUOTA_ON(mp)) xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf); - qinf->qi_shrinker.count_objects = xfs_qm_shrink_count; - qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan; - qinf->qi_shrinker.seeks = DEFAULT_SEEKS; - qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; - - error = register_shrinker(&qinf->qi_shrinker, "xfs-qm:%s", - mp->m_super->s_id); - if (error) + qinf->qi_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-qm:%s", + mp->m_super->s_id); + if (!qinf->qi_shrinker) { + error = -ENOMEM; goto out_free_inos; + } + + qinf->qi_shrinker->count_objects = xfs_qm_shrink_count; + qinf->qi_shrinker->scan_objects = xfs_qm_shrink_scan; + qinf->qi_shrinker->private_data = qinf; + + shrinker_register(qinf->qi_shrinker); + + xfs_hooks_init(&qinf->qi_mod_ino_dqtrx_hooks); + xfs_hooks_init(&qinf->qi_apply_dqtrx_hooks); return 0; @@ -699,7 +850,7 @@ out_free_inos: out_free_lru: list_lru_destroy(&qinf->qi_lru); out_free_qinf: - kmem_free(qinf); + kfree(qinf); mp->m_quotainfo = NULL; return error; } @@ -718,15 +869,26 @@ xfs_qm_destroy_quotainfo( qi = mp->m_quotainfo; ASSERT(qi != NULL); - unregister_shrinker(&qi->qi_shrinker); + shrinker_free(qi->qi_shrinker); list_lru_destroy(&qi->qi_lru); xfs_qm_destroy_quotainos(qi); mutex_destroy(&qi->qi_tree_lock); mutex_destroy(&qi->qi_quotaofflock); - kmem_free(qi); + kfree(qi); mp->m_quotainfo = NULL; } +static inline enum xfs_metafile_type +xfs_qm_metafile_type( + unsigned int flags) +{ + if (flags & XFS_QMOPT_UQUOTA) + return XFS_METAFILE_USRQUOTA; + else if (flags & XFS_QMOPT_GQUOTA) + return XFS_METAFILE_GRPQUOTA; + return XFS_METAFILE_PRJQUOTA; +} + /* * Create an inode and return with a reference already taken, but unlocked * This is how we create quota inodes @@ -738,6 +900,7 @@ xfs_qm_qino_alloc( unsigned int flags) { struct xfs_trans *tp; + enum xfs_metafile_type metafile_type = xfs_qm_metafile_type(flags); int error; bool need_alloc = true; @@ -757,19 +920,24 @@ xfs_qm_qino_alloc( (mp->m_sb.sb_gquotino != NULLFSINO)) { ino = mp->m_sb.sb_gquotino; if (XFS_IS_CORRUPT(mp, - mp->m_sb.sb_pquotino != NULLFSINO)) + mp->m_sb.sb_pquotino != NULLFSINO)) { + xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA); return -EFSCORRUPTED; + } } else if ((flags & XFS_QMOPT_GQUOTA) && (mp->m_sb.sb_pquotino != NULLFSINO)) { ino = mp->m_sb.sb_pquotino; if (XFS_IS_CORRUPT(mp, - mp->m_sb.sb_gquotino != NULLFSINO)) + mp->m_sb.sb_gquotino != NULLFSINO)) { + xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA); return -EFSCORRUPTED; + } } if (ino != NULLFSINO) { - error = xfs_iget(mp, NULL, ino, 0, 0, ipp); + error = xfs_metafile_iget(mp, ino, metafile_type, ipp); if (error) return error; + mp->m_sb.sb_gquotino = NULLFSINO; mp->m_sb.sb_pquotino = NULLFSINO; need_alloc = false; @@ -783,16 +951,21 @@ xfs_qm_qino_alloc( return error; if (need_alloc) { + struct xfs_icreate_args args = { + .mode = S_IFREG, + .flags = XFS_ICREATE_UNLINKABLE, + }; xfs_ino_t ino; - error = xfs_dialloc(&tp, 0, S_IFREG, &ino); + error = xfs_dialloc(&tp, &args, &ino); if (!error) - error = xfs_init_new_inode(&nop_mnt_idmap, tp, NULL, ino, - S_IFREG, 1, 0, 0, false, ipp); + error = xfs_icreate(tp, ino, &args, ipp); if (error) { xfs_trans_cancel(tp); return error; } + if (xfs_has_metadir(mp)) + xfs_metafile_set_iflag(tp, *ipp, metafile_type); } /* @@ -826,8 +999,10 @@ xfs_qm_qino_alloc( ASSERT(xfs_is_shutdown(mp)); xfs_alert(mp, "%s failed (error %d)!", __func__, error); } - if (need_alloc) + if (need_alloc) { + xfs_iunlock(*ipp, XFS_ILOCK_EXCL); xfs_finish_inode_setup(*ipp); + } return error; } @@ -995,7 +1170,8 @@ xfs_qm_reset_dqcounts_buf( if (qip->i_nblocks == 0) return 0; - map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0); + map = kmalloc(XFS_DQITER_MAP_SIZE * sizeof(*map), + GFP_KERNEL | __GFP_NOFAIL); lblkno = 0; maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); @@ -1057,7 +1233,7 @@ xfs_qm_reset_dqcounts_buf( } while (nmaps > 0); out: - kmem_free(map); + kfree(map); return error; } @@ -1092,6 +1268,11 @@ xfs_qm_quotacheck_dqadjust( return error; } + mutex_lock(&dqp->q_qlock); + error = xfs_dquot_attach_buf(NULL, dqp); + if (error) + goto out_unlock; + trace_xfs_dqadjust(dqp); /* @@ -1120,8 +1301,10 @@ xfs_qm_quotacheck_dqadjust( } dqp->q_flags |= XFS_DQFLAG_DIRTY; - xfs_qm_dqput(dqp); - return 0; +out_unlock: + mutex_unlock(&dqp->q_qlock); + xfs_qm_dqrele(dqp); + return error; } /* @@ -1137,8 +1320,8 @@ xfs_qm_dqusage_adjust( void *data) { struct xfs_inode *ip; - xfs_qcnt_t nblks; - xfs_filblks_t rtblks = 0; /* total rt blks */ + xfs_filblks_t nblks, rtblks; + unsigned int lock_mode; int error; ASSERT(XFS_IS_QUOTA_ON(mp)); @@ -1160,19 +1343,36 @@ xfs_qm_dqusage_adjust( if (error) return error; + /* + * Reload the incore unlinked list to avoid failure in inodegc. + * Use an unlocked check here because unrecovered unlinked inodes + * should be somewhat rare. + */ + if (xfs_inode_unlinked_incomplete(ip)) { + error = xfs_inode_reload_unlinked(ip); + if (error) { + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); + goto error0; + } + } + + /* Metadata directory files are not accounted to user-visible quotas. */ + if (xfs_is_metadir_inode(ip)) + goto error0; + ASSERT(ip->i_delayed_blks == 0); + lock_mode = xfs_ilock_data_map_shared(ip); if (XFS_IS_REALTIME_INODE(ip)) { - struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); - error = xfs_iread_extents(tp, ip, XFS_DATA_FORK); - if (error) + if (error) { + xfs_iunlock(ip, lock_mode); goto error0; - - xfs_bmap_count_leaves(ifp, &rtblks); + } } - - nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks; + xfs_inode_count_blocks(tp, ip, &nblks, &rtblks); + xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED); + xfs_iunlock(ip, lock_mode); /* * Add the (disk blocks and inode) resources occupied by this @@ -1217,54 +1417,35 @@ xfs_qm_flush_one( struct xfs_dquot *dqp, void *data) { - struct xfs_mount *mp = dqp->q_mount; struct list_head *buffer_list = data; struct xfs_buf *bp = NULL; int error = 0; - xfs_dqlock(dqp); - if (dqp->q_flags & XFS_DQFLAG_FREEING) - goto out_unlock; + if (!lockref_get_not_dead(&dqp->q_lockref)) + return 0; + + mutex_lock(&dqp->q_qlock); if (!XFS_DQ_IS_DIRTY(dqp)) goto out_unlock; - /* - * The only way the dquot is already flush locked by the time quotacheck - * gets here is if reclaim flushed it before the dqadjust walk dirtied - * it for the final time. Quotacheck collects all dquot bufs in the - * local delwri queue before dquots are dirtied, so reclaim can't have - * possibly queued it for I/O. The only way out is to push the buffer to - * cycle the flush lock. - */ - if (!xfs_dqflock_nowait(dqp)) { - /* buf is pinned in-core by delwri list */ - error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno, - mp->m_quotainfo->qi_dqchunklen, 0, &bp); - if (error) - goto out_unlock; - - if (!(bp->b_flags & _XBF_DELWRI_Q)) { - error = -EAGAIN; - xfs_buf_relse(bp); - goto out_unlock; - } - xfs_buf_unlock(bp); - - xfs_buf_delwri_pushbuf(bp, buffer_list); - xfs_buf_rele(bp); - - error = -EAGAIN; - goto out_unlock; - } + xfs_qm_dqunpin_wait(dqp); + xfs_dqflock(dqp); - error = xfs_qm_dqflush(dqp, &bp); + error = xfs_dquot_use_attached_buf(dqp, &bp); if (error) goto out_unlock; + if (!bp) { + error = -EFSCORRUPTED; + goto out_unlock; + } - xfs_buf_delwri_queue(bp, buffer_list); + error = xfs_qm_dqflush(dqp, bp); + if (!error) + xfs_buf_delwri_queue(bp, buffer_list); xfs_buf_relse(bp); out_unlock: - xfs_dqunlock(dqp); + mutex_unlock(&dqp->q_qlock); + xfs_qm_dqrele(dqp); return error; } @@ -1319,8 +1500,10 @@ xfs_qm_quotacheck( flags |= XFS_PQUOTA_CHKD; } + xfs_set_quotacheck_running(mp); error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true, NULL); + xfs_clear_quotacheck_running(mp); /* * On error, the inode walk may have partially populated the dquot @@ -1389,8 +1572,12 @@ error_return: xfs_warn(mp, "Quotacheck: Failed to reset quota flags."); } - } else + xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK); + } else { xfs_notice(mp, "Quotacheck: Done."); + xfs_fs_mark_healthy(mp, XFS_SICK_FS_QUOTACHECK); + } + return error; error_purge: @@ -1426,10 +1613,12 @@ xfs_qm_mount_quotas( uint sbf; /* - * If quotas on realtime volumes is not supported, we disable - * quotas immediately. + * If quotas on realtime volumes is not supported, disable quotas + * immediately. We only support rtquota if rtgroups are enabled to + * avoid problems with older kernels. */ - if (mp->m_sb.sb_rextents) { + if (mp->m_sb.sb_rextents && + (!xfs_has_rtgroups(mp) || xfs_has_zoned(mp))) { xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); mp->m_qflags = 0; goto write_changes; @@ -1497,12 +1686,45 @@ xfs_qm_mount_quotas( } if (error) { - xfs_warn(mp, "Failed to initialize disk quotas."); + xfs_warn(mp, "Failed to initialize disk quotas, err %d.", error); return; } } /* + * Load the inode for a given type of quota, assuming that the sb fields have + * been sorted out. This is not true when switching quota types on a V4 + * filesystem, so do not use this function for that. + * + * Returns -ENOENT if the quota inode field is NULLFSINO; 0 and an inode on + * success; or a negative errno. + */ +int +xfs_qm_qino_load( + struct xfs_mount *mp, + xfs_dqtype_t type, + struct xfs_inode **ipp) +{ + struct xfs_trans *tp; + struct xfs_inode *dp = NULL; + int error; + + tp = xfs_trans_alloc_empty(mp); + if (xfs_has_metadir(mp)) { + error = xfs_dqinode_load_parent(tp, &dp); + if (error) + goto out_cancel; + } + + error = xfs_dqinode_load(tp, dp, type, ipp); + if (dp) + xfs_irele(dp); +out_cancel: + xfs_trans_cancel(tp); + return error; +} + +/* * This is called after the superblock has been read in and we're ready to * iget the quota inodes. */ @@ -1525,24 +1747,21 @@ xfs_qm_init_quotainos( if (XFS_IS_UQUOTA_ON(mp) && mp->m_sb.sb_uquotino != NULLFSINO) { ASSERT(mp->m_sb.sb_uquotino > 0); - error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, - 0, 0, &uip); + error = xfs_qm_qino_load(mp, XFS_DQTYPE_USER, &uip); if (error) return error; } if (XFS_IS_GQUOTA_ON(mp) && mp->m_sb.sb_gquotino != NULLFSINO) { ASSERT(mp->m_sb.sb_gquotino > 0); - error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, - 0, 0, &gip); + error = xfs_qm_qino_load(mp, XFS_DQTYPE_GROUP, &gip); if (error) goto error_rele; } if (XFS_IS_PQUOTA_ON(mp) && mp->m_sb.sb_pquotino != NULLFSINO) { ASSERT(mp->m_sb.sb_pquotino > 0); - error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino, - 0, 0, &pip); + error = xfs_qm_qino_load(mp, XFS_DQTYPE_PROJ, &pip); if (error) goto error_rele; } @@ -1596,24 +1815,6 @@ error_rele: } STATIC void -xfs_qm_destroy_quotainos( - struct xfs_quotainfo *qi) -{ - if (qi->qi_uquotaip) { - xfs_irele(qi->qi_uquotaip); - qi->qi_uquotaip = NULL; /* paranoia */ - } - if (qi->qi_gquotaip) { - xfs_irele(qi->qi_gquotaip); - qi->qi_gquotaip = NULL; - } - if (qi->qi_pquotaip) { - xfs_irele(qi->qi_pquotaip); - qi->qi_pquotaip = NULL; - } -} - -STATIC void xfs_qm_dqfree_one( struct xfs_dquot *dqp) { @@ -1660,13 +1861,11 @@ xfs_qm_vop_dqalloc( struct xfs_dquot *gq = NULL; struct xfs_dquot *pq = NULL; int error; - uint lockflags; if (!XFS_IS_QUOTA_ON(mp)) return 0; - lockflags = XFS_ILOCK_EXCL; - xfs_ilock(ip, lockflags); + ASSERT(!xfs_is_metadir_inode(ip)); if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) gid = inode->i_gid; @@ -1676,38 +1875,22 @@ xfs_qm_vop_dqalloc( * if necessary. The dquot(s) will not be locked. */ if (XFS_NOT_DQATTACHED(mp, ip)) { + xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_qm_dqattach_locked(ip, true); - if (error) { - xfs_iunlock(ip, lockflags); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + if (error) return error; - } } if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { ASSERT(O_udqpp); if (!uid_eq(inode->i_uid, uid)) { - /* - * What we need is the dquot that has this uid, and - * if we send the inode to dqget, the uid of the inode - * takes priority over what's sent in the uid argument. - * We must unlock inode here before calling dqget if - * we're not sending the inode, because otherwise - * we'll deadlock by doing trans_reserve while - * holding ilock. - */ - xfs_iunlock(ip, lockflags); error = xfs_qm_dqget(mp, from_kuid(user_ns, uid), XFS_DQTYPE_USER, true, &uq); if (error) { ASSERT(error != -ENOENT); return error; } - /* - * Get the ilock in the right order. - */ - xfs_dqunlock(uq); - lockflags = XFS_ILOCK_SHARED; - xfs_ilock(ip, lockflags); } else { /* * Take an extra reference, because we'll return @@ -1720,16 +1903,12 @@ xfs_qm_vop_dqalloc( if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { ASSERT(O_gdqpp); if (!gid_eq(inode->i_gid, gid)) { - xfs_iunlock(ip, lockflags); error = xfs_qm_dqget(mp, from_kgid(user_ns, gid), XFS_DQTYPE_GROUP, true, &gq); if (error) { ASSERT(error != -ENOENT); goto error_rele; } - xfs_dqunlock(gq); - lockflags = XFS_ILOCK_SHARED; - xfs_ilock(ip, lockflags); } else { ASSERT(ip->i_gdquot); gq = xfs_qm_dqhold(ip->i_gdquot); @@ -1738,16 +1917,12 @@ xfs_qm_vop_dqalloc( if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { ASSERT(O_pdqpp); if (ip->i_projid != prid) { - xfs_iunlock(ip, lockflags); error = xfs_qm_dqget(mp, prid, XFS_DQTYPE_PROJ, true, &pq); if (error) { ASSERT(error != -ENOENT); goto error_rele; } - xfs_dqunlock(pq); - lockflags = XFS_ILOCK_SHARED; - xfs_ilock(ip, lockflags); } else { ASSERT(ip->i_pdquot); pq = xfs_qm_dqhold(ip->i_pdquot); @@ -1755,7 +1930,6 @@ xfs_qm_vop_dqalloc( } trace_xfs_dquot_dqalloc(ip); - xfs_iunlock(ip, lockflags); if (O_udqpp) *O_udqpp = uq; else @@ -1788,24 +1962,30 @@ xfs_qm_vop_chown( struct xfs_dquot *newdq) { struct xfs_dquot *prevdq; - uint bfield = XFS_IS_REALTIME_INODE(ip) ? - XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; - + xfs_filblks_t dblocks, rblocks; + bool isrt = XFS_IS_REALTIME_INODE(ip); - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); ASSERT(XFS_IS_QUOTA_ON(ip->i_mount)); + ASSERT(!xfs_is_metadir_inode(ip)); /* old dquot */ prevdq = *IO_olddq; ASSERT(prevdq); ASSERT(prevdq != newdq); - xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks)); - xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1); + xfs_inode_count_blocks(tp, ip, &dblocks, &rblocks); + + xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_BCOUNT, + -(xfs_qcnt_t)dblocks); + xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_RTBCOUNT, + -(xfs_qcnt_t)rblocks); + xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1); /* the sparkling new dquot */ - xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks); - xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); + xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_BCOUNT, dblocks); + xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_RTBCOUNT, rblocks); + xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1); /* * Back when we made quota reservations for the chown, we reserved the @@ -1814,7 +1994,8 @@ xfs_qm_vop_chown( * (having already bumped up the real counter) so that we don't have * any reservation to give back when we commit. */ - xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS, + xfs_trans_mod_dquot(tp, newdq, + isrt ? XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS, -ip->i_delayed_blks); /* @@ -1825,10 +2006,15 @@ xfs_qm_vop_chown( * back now. */ tp->t_flags |= XFS_TRANS_DIRTY; - xfs_dqlock(prevdq); - ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks); - prevdq->q_blk.reserved -= ip->i_delayed_blks; - xfs_dqunlock(prevdq); + mutex_lock(&prevdq->q_qlock); + if (isrt) { + ASSERT(prevdq->q_rtb.reserved >= ip->i_delayed_blks); + prevdq->q_rtb.reserved -= ip->i_delayed_blks; + } else { + ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks); + prevdq->q_blk.reserved -= ip->i_delayed_blks; + } + mutex_unlock(&prevdq->q_qlock); /* * Take an extra reference, because the inode is going to keep @@ -1880,29 +2066,29 @@ xfs_qm_vop_create_dqattach( if (!XFS_IS_QUOTA_ON(mp)) return; - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); + ASSERT(!xfs_is_metadir_inode(ip)); if (udqp && XFS_IS_UQUOTA_ON(mp)) { ASSERT(ip->i_udquot == NULL); ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id); ip->i_udquot = xfs_qm_dqhold(udqp); - xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); } if (gdqp && XFS_IS_GQUOTA_ON(mp)) { ASSERT(ip->i_gdquot == NULL); ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id); ip->i_gdquot = xfs_qm_dqhold(gdqp); - xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); } if (pdqp && XFS_IS_PQUOTA_ON(mp)) { ASSERT(ip->i_pdquot == NULL); ASSERT(ip->i_projid == pdqp->q_id); ip->i_pdquot = xfs_qm_dqhold(pdqp); - xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1); } + + xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1); } /* Decide if this inode's dquot is near an enforcement boundary. */ @@ -1912,6 +2098,8 @@ xfs_inode_near_dquot_enforcement( xfs_dqtype_t type) { struct xfs_dquot *dqp; + struct xfs_dquot_res *res; + struct xfs_dquot_pre *pre; int64_t freesp; /* We only care for quotas that are enabled and enforced. */ @@ -1920,21 +2108,30 @@ xfs_inode_near_dquot_enforcement( return false; if (xfs_dquot_res_over_limits(&dqp->q_ino) || + xfs_dquot_res_over_limits(&dqp->q_blk) || xfs_dquot_res_over_limits(&dqp->q_rtb)) return true; + if (XFS_IS_REALTIME_INODE(ip)) { + res = &dqp->q_rtb; + pre = &dqp->q_rtb_prealloc; + } else { + res = &dqp->q_blk; + pre = &dqp->q_blk_prealloc; + } + /* For space on the data device, check the various thresholds. */ - if (!dqp->q_prealloc_hi_wmark) + if (!pre->q_prealloc_hi_wmark) return false; - if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark) + if (res->reserved < pre->q_prealloc_lo_wmark) return false; - if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark) + if (res->reserved >= pre->q_prealloc_hi_wmark) return true; - freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved; - if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT]) + freesp = pre->q_prealloc_hi_wmark - res->reserved; + if (freesp < pre->q_low_space[XFS_QLOWSP_5_PCNT]) return true; return false; |
