summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/xfs/libxfs/xfs_ag.c280
-rw-r--r--fs/xfs/libxfs/xfs_ag.h136
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c11
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.h15
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c111
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h2
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c31
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.h9
-rw-r--r--fs/xfs/libxfs/xfs_attr.c910
-rw-r--r--fs/xfs/libxfs/xfs_attr.h403
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c5
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h2
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c167
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.h8
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c3
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h1
-rw-r--r--fs/xfs/libxfs/xfs_btree.c15
-rw-r--r--fs/xfs/libxfs/xfs_btree.h10
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c637
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h40
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c46
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.h13
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c2
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h3
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c122
-rw-r--r--fs/xfs/libxfs/xfs_refcount.h9
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c39
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.h7
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c147
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h6
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c46
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.h6
-rw-r--r--fs/xfs/libxfs/xfs_sb.c146
-rw-r--r--fs/xfs/libxfs/xfs_sb.h9
-rw-r--r--fs/xfs/libxfs/xfs_shared.h20
-rw-r--r--fs/xfs/libxfs/xfs_types.c4
-rw-r--r--fs/xfs/libxfs/xfs_types.h1
-rw-r--r--fs/xfs/scrub/agheader.c1
-rw-r--r--fs/xfs/scrub/agheader_repair.c33
-rw-r--r--fs/xfs/scrub/alloc.c3
-rw-r--r--fs/xfs/scrub/bmap.c21
-rw-r--r--fs/xfs/scrub/common.c15
-rw-r--r--fs/xfs/scrub/fscounters.c42
-rw-r--r--fs/xfs/scrub/health.c2
-rw-r--r--fs/xfs/scrub/ialloc.c9
-rw-r--r--fs/xfs/scrub/refcount.c3
-rw-r--r--fs/xfs/scrub/repair.c14
-rw-r--r--fs/xfs/scrub/rmap.c3
-rw-r--r--fs/xfs/scrub/trace.c3
-rw-r--r--fs/xfs/xfs_attr_inactive.c2
-rw-r--r--fs/xfs/xfs_bio_io.c35
-rw-r--r--fs/xfs/xfs_bmap_util.c6
-rw-r--r--fs/xfs/xfs_buf.c311
-rw-r--r--fs/xfs/xfs_buf.h3
-rw-r--r--fs/xfs/xfs_buf_item.c97
-rw-r--r--fs/xfs/xfs_discard.c6
-rw-r--r--fs/xfs/xfs_dquot_item.c2
-rw-r--r--fs/xfs/xfs_extent_busy.c33
-rw-r--r--fs/xfs/xfs_extent_busy.h7
-rw-r--r--fs/xfs/xfs_file.c70
-rw-r--r--fs/xfs/xfs_filestream.c2
-rw-r--r--fs/xfs/xfs_fsmap.c80
-rw-r--r--fs/xfs/xfs_fsops.c24
-rw-r--r--fs/xfs/xfs_health.c15
-rw-r--r--fs/xfs/xfs_icache.c1156
-rw-r--r--fs/xfs/xfs_icache.h58
-rw-r--r--fs/xfs/xfs_inode.c234
-rw-r--r--fs/xfs/xfs_inode.h9
-rw-r--r--fs/xfs/xfs_inode_item.c18
-rw-r--r--fs/xfs/xfs_inode_item.h2
-rw-r--r--fs/xfs/xfs_ioctl.c43
-rw-r--r--fs/xfs/xfs_iops.c4
-rw-r--r--fs/xfs/xfs_iwalk.c84
-rw-r--r--fs/xfs/xfs_linux.h2
-rw-r--r--fs/xfs/xfs_log.c273
-rw-r--r--fs/xfs/xfs_log.h5
-rw-r--r--fs/xfs/xfs_log_cil.c138
-rw-r--r--fs/xfs/xfs_log_priv.h41
-rw-r--r--fs/xfs/xfs_log_recover.c61
-rw-r--r--fs/xfs/xfs_mount.c136
-rw-r--r--fs/xfs/xfs_mount.h110
-rw-r--r--fs/xfs/xfs_qm.c10
-rw-r--r--fs/xfs/xfs_qm.h1
-rw-r--r--fs/xfs/xfs_qm_syscalls.c54
-rw-r--r--fs/xfs/xfs_reflink.c13
-rw-r--r--fs/xfs/xfs_super.c10
-rw-r--r--fs/xfs/xfs_super.h1
-rw-r--r--fs/xfs/xfs_symlink.c9
-rw-r--r--fs/xfs/xfs_trace.c2
-rw-r--r--fs/xfs/xfs_trace.h115
-rw-r--r--fs/xfs/xfs_trans.c6
-rw-r--r--fs/xfs/xfs_trans.h4
92 files changed, 3827 insertions, 3036 deletions
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index c68a36688474..778ec52cce70 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -27,6 +27,276 @@
#include "xfs_defer.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
+#include "xfs_trace.h"
+#include "xfs_inode.h"
+#include "xfs_icache.h"
+
+
+/*
+ * Passive reference counting access wrappers to the perag structures. If the
+ * per-ag structure is to be freed, the freeing code is responsible for cleaning
+ * up objects with passive references before freeing the structure. This is
+ * things like cached buffers.
+ */
+struct xfs_perag *
+xfs_perag_get(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno)
+{
+ struct xfs_perag *pag;
+ int ref = 0;
+
+ rcu_read_lock();
+ pag = radix_tree_lookup(&mp->m_perag_tree, agno);
+ if (pag) {
+ ASSERT(atomic_read(&pag->pag_ref) >= 0);
+ ref = atomic_inc_return(&pag->pag_ref);
+ }
+ rcu_read_unlock();
+ trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
+ return pag;
+}
+
+/*
+ * search from @first to find the next perag with the given tag set.
+ */
+struct xfs_perag *
+xfs_perag_get_tag(
+ struct xfs_mount *mp,
+ xfs_agnumber_t first,
+ unsigned int tag)
+{
+ struct xfs_perag *pag;
+ int found;
+ int ref;
+
+ rcu_read_lock();
+ found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
+ (void **)&pag, first, 1, tag);
+ if (found <= 0) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ ref = atomic_inc_return(&pag->pag_ref);
+ rcu_read_unlock();
+ trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
+ return pag;
+}
+
+void
+xfs_perag_put(
+ struct xfs_perag *pag)
+{
+ int ref;
+
+ ASSERT(atomic_read(&pag->pag_ref) > 0);
+ ref = atomic_dec_return(&pag->pag_ref);
+ trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
+}
+
+/*
+ * xfs_initialize_perag_data
+ *
+ * Read in each per-ag structure so we can count up the number of
+ * allocated inodes, free inodes and used filesystem blocks as this
+ * information is no longer persistent in the superblock. Once we have
+ * this information, write it into the in-core superblock structure.
+ */
+int
+xfs_initialize_perag_data(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agcount)
+{
+ xfs_agnumber_t index;
+ struct xfs_perag *pag;
+ struct xfs_sb *sbp = &mp->m_sb;
+ uint64_t ifree = 0;
+ uint64_t ialloc = 0;
+ uint64_t bfree = 0;
+ uint64_t bfreelst = 0;
+ uint64_t btree = 0;
+ uint64_t fdblocks;
+ int error = 0;
+
+ for (index = 0; index < agcount; index++) {
+ /*
+ * read the agf, then the agi. This gets us
+ * all the information we need and populates the
+ * per-ag structures for us.
+ */
+ error = xfs_alloc_pagf_init(mp, NULL, index, 0);
+ if (error)
+ return error;
+
+ error = xfs_ialloc_pagi_init(mp, NULL, index);
+ if (error)
+ return error;
+ pag = xfs_perag_get(mp, index);
+ ifree += pag->pagi_freecount;
+ ialloc += pag->pagi_count;
+ bfree += pag->pagf_freeblks;
+ bfreelst += pag->pagf_flcount;
+ btree += pag->pagf_btreeblks;
+ xfs_perag_put(pag);
+ }
+ fdblocks = bfree + bfreelst + btree;
+
+ /*
+ * If the new summary counts are obviously incorrect, fail the
+ * mount operation because that implies the AGFs are also corrupt.
+ * Clear FS_COUNTERS so that we don't unmount with a dirty log, which
+ * will prevent xfs_repair from fixing anything.
+ */
+ if (fdblocks > sbp->sb_dblocks || ifree > ialloc) {
+ xfs_alert(mp, "AGF corruption. Please run xfs_repair.");
+ error = -EFSCORRUPTED;
+ goto out;
+ }
+
+ /* Overwrite incore superblock counters with just-read data */
+ spin_lock(&mp->m_sb_lock);
+ sbp->sb_ifree = ifree;
+ sbp->sb_icount = ialloc;
+ sbp->sb_fdblocks = fdblocks;
+ spin_unlock(&mp->m_sb_lock);
+
+ xfs_reinit_percpu_counters(mp);
+out:
+ xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS);
+ return error;
+}
+
+STATIC void
+__xfs_free_perag(
+ struct rcu_head *head)
+{
+ struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
+
+ ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
+ ASSERT(atomic_read(&pag->pag_ref) == 0);
+ kmem_free(pag);
+}
+
+/*
+ * Free up the per-ag resources associated with the mount structure.
+ */
+void
+xfs_free_perag(
+ struct xfs_mount *mp)
+{
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+
+ for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+ spin_lock(&mp->m_perag_lock);
+ pag = radix_tree_delete(&mp->m_perag_tree, agno);
+ spin_unlock(&mp->m_perag_lock);
+ ASSERT(pag);
+ ASSERT(atomic_read(&pag->pag_ref) == 0);
+
+ cancel_delayed_work_sync(&pag->pag_blockgc_work);
+ xfs_iunlink_destroy(pag);
+ xfs_buf_hash_destroy(pag);
+
+ call_rcu(&pag->rcu_head, __xfs_free_perag);
+ }
+}
+
+int
+xfs_initialize_perag(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agcount,
+ xfs_agnumber_t *maxagi)
+{
+ struct xfs_perag *pag;
+ xfs_agnumber_t index;
+ xfs_agnumber_t first_initialised = NULLAGNUMBER;
+ int error;
+
+ /*
+ * Walk the current per-ag tree so we don't try to initialise AGs
+ * that already exist (growfs case). Allocate and insert all the
+ * AGs we don't find ready for initialisation.
+ */
+ for (index = 0; index < agcount; index++) {
+ pag = xfs_perag_get(mp, index);
+ if (pag) {
+ xfs_perag_put(pag);
+ continue;
+ }
+
+ pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
+ if (!pag) {
+ error = -ENOMEM;
+ goto out_unwind_new_pags;
+ }
+ pag->pag_agno = index;
+ pag->pag_mount = mp;
+
+ error = radix_tree_preload(GFP_NOFS);
+ if (error)
+ goto out_free_pag;
+
+ spin_lock(&mp->m_perag_lock);
+ if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
+ WARN_ON_ONCE(1);
+ spin_unlock(&mp->m_perag_lock);
+ radix_tree_preload_end();
+ error = -EEXIST;
+ goto out_free_pag;
+ }
+ spin_unlock(&mp->m_perag_lock);
+ radix_tree_preload_end();
+
+ /* Place kernel structure only init below this point. */
+ spin_lock_init(&pag->pag_ici_lock);
+ spin_lock_init(&pag->pagb_lock);
+ spin_lock_init(&pag->pag_state_lock);
+ INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
+ INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
+ init_waitqueue_head(&pag->pagb_wait);
+ pag->pagb_count = 0;
+ pag->pagb_tree = RB_ROOT;
+
+ error = xfs_buf_hash_init(pag);
+ if (error)
+ goto out_remove_pag;
+
+ error = xfs_iunlink_init(pag);
+ if (error)
+ goto out_hash_destroy;
+
+ /* first new pag is fully initialized */
+ if (first_initialised == NULLAGNUMBER)
+ first_initialised = index;
+ }
+
+ index = xfs_set_inode_alloc(mp, agcount);
+
+ if (maxagi)
+ *maxagi = index;
+
+ mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
+ return 0;
+
+out_hash_destroy:
+ xfs_buf_hash_destroy(pag);
+out_remove_pag:
+ radix_tree_delete(&mp->m_perag_tree, index);
+out_free_pag:
+ kmem_free(pag);
+out_unwind_new_pags:
+ /* unwind any prior newly initialized pags */
+ for (index = first_initialised; index < agcount; index++) {
+ pag = radix_tree_delete(&mp->m_perag_tree, index);
+ if (!pag)
+ break;
+ xfs_buf_hash_destroy(pag);
+ xfs_iunlink_destroy(pag);
+ kmem_free(pag);
+ }
+ return error;
+}
static int
xfs_get_aghdr_buf(
@@ -43,7 +313,6 @@ xfs_get_aghdr_buf(
if (error)
return error;
- xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
bp->b_bn = blkno;
bp->b_maps[0].bm_bn = blkno;
bp->b_ops = ops;
@@ -510,6 +779,7 @@ xfs_ag_shrink_space(
struct xfs_buf *agibp, *agfbp;
struct xfs_agi *agi;
struct xfs_agf *agf;
+ xfs_agblock_t aglen;
int error, err2;
ASSERT(agno == mp->m_sb.sb_agcount - 1);
@@ -524,14 +794,14 @@ xfs_ag_shrink_space(
return error;
agf = agfbp->b_addr;
+ aglen = be32_to_cpu(agi->agi_length);
/* some extra paranoid checks before we shrink the ag */
if (XFS_IS_CORRUPT(mp, agf->agf_length != agi->agi_length))
return -EFSCORRUPTED;
- if (delta >= agi->agi_length)
+ if (delta >= aglen)
return -EINVAL;
- args.fsbno = XFS_AGB_TO_FSB(mp, agno,
- be32_to_cpu(agi->agi_length) - delta);
+ args.fsbno = XFS_AGB_TO_FSB(mp, agno, aglen - delta);
/*
* Disable perag reservations so it doesn't cause the allocation request
@@ -646,7 +916,7 @@ xfs_ag_extend_space(
* XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that
* this doesn't actually exist in the rmap btree.
*/
- error = xfs_rmap_free(tp, bp, id->agno,
+ error = xfs_rmap_free(tp, bp, bp->b_pag,
be32_to_cpu(agf->agf_length) - len,
len, &XFS_RMAP_OINFO_SKIP_UPDATE);
if (error)
diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h
index 4535de1d88ea..4c6f9045baca 100644
--- a/fs/xfs/libxfs/xfs_ag.h
+++ b/fs/xfs/libxfs/xfs_ag.h
@@ -9,6 +9,142 @@
struct xfs_mount;
struct xfs_trans;
+struct xfs_perag;
+
+/*
+ * Per-ag infrastructure
+ */
+
+/* per-AG block reservation data structures*/
+struct xfs_ag_resv {
+ /* number of blocks originally reserved here */
+ xfs_extlen_t ar_orig_reserved;
+ /* number of blocks reserved here */
+ xfs_extlen_t ar_reserved;
+ /* number of blocks originally asked for */
+ xfs_extlen_t ar_asked;
+};
+
+/*
+ * Per-ag incore structure, copies of information in agf and agi, to improve the
+ * performance of allocation group selection.
+ */
+struct xfs_perag {
+ struct xfs_mount *pag_mount; /* owner filesystem */
+ xfs_agnumber_t pag_agno; /* AG this structure belongs to */
+ atomic_t pag_ref; /* perag reference count */
+ char pagf_init; /* this agf's entry is initialized */
+ char pagi_init; /* this agi's entry is initialized */
+ char pagf_metadata; /* the agf is preferred to be metadata */
+ char pagi_inodeok; /* The agi is ok for inodes */
+ uint8_t pagf_levels[XFS_BTNUM_AGF];
+ /* # of levels in bno & cnt btree */
+ bool pagf_agflreset; /* agfl requires reset before use */
+ uint32_t pagf_flcount; /* count of blocks in freelist */
+ xfs_extlen_t pagf_freeblks; /* total free blocks */
+ xfs_extlen_t pagf_longest; /* longest free space */
+ uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */
+ xfs_agino_t pagi_freecount; /* number of free inodes */
+ xfs_agino_t pagi_count; /* number of allocated inodes */
+
+ /*
+ * Inode allocation search lookup optimisation.
+ * If the pagino matches, the search for new inodes
+ * doesn't need to search the near ones again straight away
+ */
+ xfs_agino_t pagl_pagino;
+ xfs_agino_t pagl_leftrec;
+ xfs_agino_t pagl_rightrec;
+
+ int pagb_count; /* pagb slots in use */
+ uint8_t pagf_refcount_level; /* recount btree height */
+
+ /* Blocks reserved for all kinds of metadata. */
+ struct xfs_ag_resv pag_meta_resv;
+ /* Blocks reserved for the reverse mapping btree. */
+ struct xfs_ag_resv pag_rmapbt_resv;
+
+ /* -- kernel only structures below this line -- */
+
+ /*
+ * Bitsets of per-ag metadata that have been checked and/or are sick.
+ * Callers should hold pag_state_lock before accessing this field.
+ */
+ uint16_t pag_checked;
+ uint16_t pag_sick;
+ spinlock_t pag_state_lock;
+
+ spinlock_t pagb_lock; /* lock for pagb_tree */
+ struct rb_root pagb_tree; /* ordered tree of busy extents */
+ unsigned int pagb_gen; /* generation count for pagb_tree */
+ wait_queue_head_t pagb_wait; /* woken when pagb_gen changes */
+
+ atomic_t pagf_fstrms; /* # of filestreams active in this AG */
+
+ spinlock_t pag_ici_lock; /* incore inode cache lock */
+ struct radix_tree_root pag_ici_root; /* incore inode cache root */
+ int pag_ici_reclaimable; /* reclaimable inodes */
+ unsigned long pag_ici_reclaim_cursor; /* reclaim restart point */
+
+ /* buffer cache index */
+ spinlock_t pag_buf_lock; /* lock for pag_buf_hash */
+ struct rhashtable pag_buf_hash;
+
+ /* for rcu-safe freeing */
+ struct rcu_head rcu_head;
+
+ /* background prealloc block trimming */
+ struct delayed_work pag_blockgc_work;
+
+ /*
+ * Unlinked inode information. This incore information reflects
+ * data stored in the AGI, so callers must hold the AGI buffer lock
+ * or have some other means to control concurrency.
+ */
+ struct rhashtable pagi_unlinked_hash;
+};
+
+int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t agcount,
+ xfs_agnumber_t *maxagi);
+int xfs_initialize_perag_data(struct xfs_mount *mp, xfs_agnumber_t agno);
+void xfs_free_perag(struct xfs_mount *mp);
+
+struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno);
+struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *mp, xfs_agnumber_t agno,
+ unsigned int tag);
+void xfs_perag_put(struct xfs_perag *pag);
+
+/*
+ * Perag iteration APIs
+ *
+ * XXX: for_each_perag_range() usage really needs an iterator to clean up when
+ * we terminate at end_agno because we may have taken a reference to the perag
+ * beyond end_agno. Right now callers have to be careful to catch and clean that
+ * up themselves. This is not necessary for the callers of for_each_perag() and
+ * for_each_perag_from() because they terminate at sb_agcount where there are
+ * no perag structures in tree beyond end_agno.
+ */
+#define for_each_perag_range(mp, next_agno, end_agno, pag) \
+ for ((pag) = xfs_perag_get((mp), (next_agno)); \
+ (pag) != NULL && (next_agno) <= (end_agno); \
+ (next_agno) = (pag)->pag_agno + 1, \
+ xfs_perag_put(pag), \
+ (pag) = xfs_perag_get((mp), (next_agno)))
+
+#define for_each_perag_from(mp, next_agno, pag) \
+ for_each_perag_range((mp), (next_agno), (mp)->m_sb.sb_agcount, (pag))
+
+
+#define for_each_perag(mp, agno, pag) \
+ (agno) = 0; \
+ for_each_perag_from((mp), (agno), (pag))
+
+#define for_each_perag_tag(mp, agno, pag, tag) \
+ for ((agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \
+ (pag) != NULL; \
+ (agno) = (pag)->pag_agno + 1, \
+ xfs_perag_put(pag), \
+ (pag) = xfs_perag_get_tag((mp), (agno), (tag)))
struct aghdr_init_data {
/* per ag data */
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index e0d5cdc57cc2..2aa2b3484c28 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -19,7 +19,7 @@
#include "xfs_btree.h"
#include "xfs_refcount_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_sb.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
/*
@@ -250,7 +250,6 @@ xfs_ag_resv_init(
struct xfs_trans *tp)
{
struct xfs_mount *mp = pag->pag_mount;
- xfs_agnumber_t agno = pag->pag_agno;
xfs_extlen_t ask;
xfs_extlen_t used;
int error = 0, error2;
@@ -260,11 +259,11 @@ xfs_ag_resv_init(
if (pag->pag_meta_resv.ar_asked == 0) {
ask = used = 0;
- error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask, &used);
+ error = xfs_refcountbt_calc_reserves(mp, tp, pag, &ask, &used);
if (error)
goto out;
- error = xfs_finobt_calc_reserves(mp, tp, agno, &ask, &used);
+ error = xfs_finobt_calc_reserves(mp, tp, pag, &ask, &used);
if (error)
goto out;
@@ -282,7 +281,7 @@ xfs_ag_resv_init(
mp->m_finobt_nores = true;
- error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask,
+ error = xfs_refcountbt_calc_reserves(mp, tp, pag, &ask,
&used);
if (error)
goto out;
@@ -300,7 +299,7 @@ xfs_ag_resv_init(
if (pag->pag_rmapbt_resv.ar_asked == 0) {
ask = used = 0;
- error = xfs_rmapbt_calc_reserves(mp, tp, agno, &ask, &used);
+ error = xfs_rmapbt_calc_reserves(mp, tp, pag, &ask, &used);
if (error)
goto out;
diff --git a/fs/xfs/libxfs/xfs_ag_resv.h b/fs/xfs/libxfs/xfs_ag_resv.h
index 8a8eb4bc48bb..b74b210008ea 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.h
+++ b/fs/xfs/libxfs/xfs_ag_resv.h
@@ -18,6 +18,21 @@ void xfs_ag_resv_alloc_extent(struct xfs_perag *pag, enum xfs_ag_resv_type type,
void xfs_ag_resv_free_extent(struct xfs_perag *pag, enum xfs_ag_resv_type type,
struct xfs_trans *tp, xfs_extlen_t len);
+static inline struct xfs_ag_resv *
+xfs_perag_resv(
+ struct xfs_perag *pag,
+ enum xfs_ag_resv_type type)
+{
+ switch (type) {
+ case XFS_AG_RESV_METADATA:
+ return &pag->pag_meta_resv;
+ case XFS_AG_RESV_RMAPBT:
+ return &pag->pag_rmapbt_resv;
+ default:
+ return NULL;
+ }
+}
+
/*
* RMAPBT reservation accounting wrappers. Since rmapbt blocks are sourced from
* the AGFL, they are allocated one at a time and the reservation updates don't
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index af3d5f9271f6..6929157d8d6e 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -10,7 +10,6 @@
#include "xfs_shared.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_btree.h"
@@ -24,6 +23,7 @@
#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_log.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_bmap.h"
@@ -230,7 +230,7 @@ xfs_alloc_get_rec(
int *stat) /* output: success/failure */
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_ag.agno;
+ xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
union xfs_btree_rec *rec;
int error;
@@ -776,7 +776,7 @@ xfs_alloc_cur_setup(
*/
if (!acur->cnt)
acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
- args->agbp, args->agno, XFS_BTNUM_CNT);
+ args->agbp, args->pag, XFS_BTNUM_CNT);
error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
if (error)
return error;
@@ -786,10 +786,10 @@ xfs_alloc_cur_setup(
*/
if (!acur->bnolt)
acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
- args->agbp, args->agno, XFS_BTNUM_BNO);
+ args->agbp, args->pag, XFS_BTNUM_BNO);
if (!acur->bnogt)
acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
- args->agbp, args->agno, XFS_BTNUM_BNO);
+ args->agbp, args->pag, XFS_BTNUM_BNO);
return i == 1 ? 0 : -ENOSPC;
}
@@ -1063,7 +1063,7 @@ xfs_alloc_ag_vextent_small(
if (fbno == NULLAGBLOCK)
goto out;
- xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
+ xfs_extent_busy_reuse(args->mp, args->pag, fbno, 1,
(args->datatype & XFS_ALLOC_NOBUSY));
if (args->datatype & XFS_ALLOC_USERDATA) {
@@ -1089,7 +1089,7 @@ xfs_alloc_ag_vextent_small(
* If we're feeding an AGFL block to something that doesn't live in the
* free space, we need to clear out the OWN_AG rmap.
*/
- error = xfs_rmap_free(args->tp, args->agbp, args->agno, fbno, 1,
+ error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1,
&XFS_RMAP_OINFO_AG);
if (error)
goto error;
@@ -1166,7 +1166,7 @@ xfs_alloc_ag_vextent(
/* if not file data, insert new block into the reverse map btree */
if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
- error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
+ error = xfs_rmap_alloc(args->tp, args->agbp, args->pag,
args->agbno, args->len, &args->oinfo);
if (error)
return error;
@@ -1178,7 +1178,7 @@ xfs_alloc_ag_vextent(
if (error)
return error;
- ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
+ ASSERT(!xfs_extent_busy_search(args->mp, args->pag,
args->agbno, args->len));
}
@@ -1217,7 +1217,7 @@ xfs_alloc_ag_vextent_exact(
* Allocate/initialize a cursor for the by-number freespace btree.
*/
bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_BNO);
+ args->pag, XFS_BTNUM_BNO);
/*
* Lookup bno and minlen in the btree (minlen is irrelevant, really).
@@ -1277,7 +1277,7 @@ xfs_alloc_ag_vextent_exact(
* Allocate/initialize a cursor for the by-size btree.
*/
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_CNT);
+ args->pag, XFS_BTNUM_CNT);
ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
args->len, XFSA_FIXUP_BNO_OK);
@@ -1674,9 +1674,8 @@ restart:
* Allocate and initialize a cursor for the by-size btree.
*/
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_CNT);
+ args->pag, XFS_BTNUM_CNT);
bno_cur = NULL;
- busy = false;
/*
* Look for an entry >= maxlen+alignment-1 blocks.
@@ -1837,7 +1836,7 @@ restart:
* Allocate and initialize a cursor for the by-block tree.
*/
bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_BNO);
+ args->pag, XFS_BTNUM_BNO);
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
rbno, rlen, XFSA_FIXUP_CNT_OK)))
goto error0;
@@ -1896,12 +1895,13 @@ xfs_free_ag_extent(
int haveright; /* have a right neighbor */
int i;
int error;
+ struct xfs_perag *pag = agbp->b_pag;
bno_cur = cnt_cur = NULL;
mp = tp->t_mountp;
if (!xfs_rmap_should_skip_owner_update(oinfo)) {
- error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
+ error = xfs_rmap_free(tp, agbp, pag, bno, len, oinfo);
if (error)
goto error0;
}
@@ -1909,7 +1909,7 @@ xfs_free_ag_extent(
/*
* Allocate and initialize a cursor for the by-block btree.
*/
- bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
+ bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_BNO);
/*
* Look for a neighboring block on the left (lower block numbers)
* that is contiguous with this space.
@@ -1979,7 +1979,7 @@ xfs_free_ag_extent(
/*
* Now allocate and initialize a cursor for the by-size tree.
*/
- cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
+ cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_CNT);
/*
* Have both left and right contiguous neighbors.
* Merge all three into a single free block.
@@ -2490,7 +2490,7 @@ xfs_exact_minlen_extent_available(
int error = 0;
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, agbp,
- args->agno, XFS_BTNUM_CNT);
+ args->pag, XFS_BTNUM_CNT);
error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
if (error)
goto out;
@@ -2693,21 +2693,21 @@ out_no_agbp:
* Get a block from the freelist.
* Returns with the buffer for the block gotten.
*/
-int /* error */
+int
xfs_alloc_get_freelist(
- xfs_trans_t *tp, /* transaction pointer */
- struct xfs_buf *agbp, /* buffer containing the agf structure */
- xfs_agblock_t *bnop, /* block address retrieved from freelist */
- int btreeblk) /* destination is a AGF btree */
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agblock_t *bnop,
+ int btreeblk)
{
- struct xfs_agf *agf = agbp->b_addr;
- struct xfs_buf *agflbp;/* buffer for a.g. freelist structure */
- xfs_agblock_t bno; /* block number returned */
- __be32 *agfl_bno;
- int error;
- int logflags;
- xfs_mount_t *mp = tp->t_mountp;
- xfs_perag_t *pag; /* per allocation group data */
+ struct xfs_agf *agf = agbp->b_addr;
+ struct xfs_buf *agflbp;
+ xfs_agblock_t bno;
+ __be32 *agfl_bno;
+ int error;
+ int logflags;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_perag *pag;
/*
* Freelist is empty, give up.
@@ -2817,20 +2817,20 @@ xfs_alloc_pagf_init(
/*
* Put the block on the freelist for the allocation group.
*/
-int /* error */
+int
xfs_alloc_put_freelist(
- xfs_trans_t *tp, /* transaction pointer */
- struct xfs_buf *agbp, /* buffer for a.g. freelist header */
- struct xfs_buf *agflbp,/* buffer for a.g. free block array */
- xfs_agblock_t bno, /* block being freed */
- int btreeblk) /* block came from a AGF btree */
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ struct xfs_buf *agflbp,
+ xfs_agblock_t bno,
+ int btreeblk)
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_agf *agf = agbp->b_addr;
- __be32 *blockp;/* pointer to array entry */
+ struct xfs_perag *pag;
+ __be32 *blockp;
int error;
int logflags;
- xfs_perag_t *pag; /* per allocation group data */
__be32 *agfl_bno;
int startoff;
@@ -3292,7 +3292,7 @@ error0:
int
xfs_free_extent_fix_freelist(
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
struct xfs_buf **agbp)
{
struct xfs_alloc_arg args;
@@ -3301,7 +3301,8 @@ xfs_free_extent_fix_freelist(
memset(&args, 0, sizeof(struct xfs_alloc_arg));
args.tp = tp;
args.mp = tp->t_mountp;
- args.agno = agno;
+ args.agno = pag->pag_agno;
+ args.pag = pag;
/*
* validate that the block number is legal - the enables us to detect
@@ -3310,17 +3311,12 @@ xfs_free_extent_fix_freelist(
if (args.agno >= args.mp->m_sb.sb_agcount)
return -EFSCORRUPTED;
- args.pag = xfs_perag_get(args.mp, args.agno);
- ASSERT(args.pag);
-
error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
if (error)
- goto out;
+ return error;
*agbp = args.agbp;
-out:
- xfs_perag_put(args.pag);
- return error;
+ return 0;
}
/*
@@ -3344,6 +3340,7 @@ __xfs_free_extent(
struct xfs_agf *agf;
int error;
unsigned int busy_flags = 0;
+ struct xfs_perag *pag;
ASSERT(len != 0);
ASSERT(type != XFS_AG_RESV_AGFL);
@@ -3352,33 +3349,37 @@ __xfs_free_extent(
XFS_ERRTAG_FREE_EXTENT))
return -EIO;
- error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
+ pag = xfs_perag_get(mp, agno);
+ error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
if (error)
- return error;
+ goto err;
agf = agbp->b_addr;
if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
error = -EFSCORRUPTED;
- goto err;
+ goto err_release;
}
/* validate the extent size is legal now we have the agf locked */
if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
error = -EFSCORRUPTED;
- goto err;
+ goto err_release;
}
error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
if (error)
- goto err;
+ goto err_release;
if (skip_discard)
busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
- xfs_extent_busy_insert(tp, agno, agbno, len, busy_flags);
+ xfs_extent_busy_insert(tp, pag, agbno, len, busy_flags);
+ xfs_perag_put(pag);
return 0;
-err:
+err_release:
xfs_trans_brelse(tp, agbp);
+err:
+ xfs_perag_put(pag);
return error;
}
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index a4427c5775c2..e30900b6f8ba 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -214,7 +214,7 @@ int xfs_alloc_read_agfl(struct xfs_mount *mp, struct xfs_trans *tp,
int xfs_free_agfl_block(struct xfs_trans *, xfs_agnumber_t, xfs_agblock_t,
struct xfs_buf *, struct xfs_owner_info *);
int xfs_alloc_fix_freelist(struct xfs_alloc_arg *args, int flags);
-int xfs_free_extent_fix_freelist(struct xfs_trans *tp, xfs_agnumber_t agno,
+int xfs_free_extent_fix_freelist(struct xfs_trans *tp, struct xfs_perag *pag,
struct xfs_buf **agbp);
xfs_extlen_t xfs_prealloc_blocks(struct xfs_mount *mp);
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index a43e4c50e69b..6b363f78cfa2 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -9,7 +9,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
#include "xfs_btree_staging.h"
@@ -19,6 +18,7 @@
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_trans.h"
+#include "xfs_ag.h"
STATIC struct xfs_btree_cur *
@@ -26,8 +26,7 @@ xfs_allocbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_ag.agbp, cur->bc_ag.agno,
- cur->bc_btnum);
+ cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum);
}
STATIC void
@@ -39,13 +38,12 @@ xfs_allocbt_set_root(
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
int btnum = cur->bc_btnum;
- struct xfs_perag *pag = agbp->b_pag;
ASSERT(ptr->s != 0);
agf->agf_roots[btnum] = ptr->s;
be32_add_cpu(&agf->agf_levels[btnum], inc);
- pag->pagf_levels[btnum] += inc;
+ cur->bc_ag.pag->pagf_levels[btnum] += inc;
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
}
@@ -72,7 +70,7 @@ xfs_allocbt_alloc_block(
}
atomic64_inc(&cur->bc_mp->m_allocbt_blks);
- xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1, false);
+ xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agbp->b_pag, bno, 1, false);
new->s = cpu_to_be32(bno);
@@ -86,7 +84,6 @@ xfs_allocbt_free_block(
struct xfs_buf *bp)
{
struct xfs_buf *agbp = cur->bc_ag.agbp;
- struct xfs_agf *agf = agbp->b_addr;
xfs_agblock_t bno;
int error;
@@ -96,7 +93,7 @@ xfs_allocbt_free_block(
return error;
atomic64_dec(&cur->bc_mp->m_allocbt_blks);
- xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
+ xfs_extent_busy_insert(cur->bc_tp, agbp->b_pag, bno, 1,
XFS_EXTENT_BUSY_SKIP_DISCARD);
return 0;
}
@@ -225,7 +222,7 @@ xfs_allocbt_init_ptr_from_cur(
{
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
+ ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_roots[cur->bc_btnum];
}
@@ -473,7 +470,7 @@ STATIC struct xfs_btree_cur *
xfs_allocbt_init_common(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_btnum_t btnum)
{
struct xfs_btree_cur *cur;
@@ -486,6 +483,7 @@ xfs_allocbt_init_common(
cur->bc_mp = mp;
cur->bc_btnum = btnum;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
+ cur->bc_ag.abt.active = false;
if (btnum == XFS_BTNUM_CNT) {
cur->bc_ops = &xfs_cntbt_ops;
@@ -496,8 +494,9 @@ xfs_allocbt_init_common(
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
}
- cur->bc_ag.agno = agno;
- cur->bc_ag.abt.active = false;
+ /* take a reference for the cursor */
+ atomic_inc(&pag->pag_ref);
+ cur->bc_ag.pag = pag;
if (xfs_sb_version_hascrc(&mp->m_sb))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
@@ -513,13 +512,13 @@ xfs_allocbt_init_cursor(
struct xfs_mount *mp, /* file system mount point */
struct xfs_trans *tp, /* transaction pointer */
struct xfs_buf *agbp, /* buffer for agf structure */
- xfs_agnumber_t agno, /* allocation group number */
+ struct xfs_perag *pag,
xfs_btnum_t btnum) /* btree identifier */
{
struct xfs_agf *agf = agbp->b_addr;
struct xfs_btree_cur *cur;
- cur = xfs_allocbt_init_common(mp, tp, agno, btnum);
+ cur = xfs_allocbt_init_common(mp, tp, pag, btnum);
if (btnum == XFS_BTNUM_CNT)
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
else
@@ -535,12 +534,12 @@ struct xfs_btree_cur *
xfs_allocbt_stage_cursor(
struct xfs_mount *mp,
struct xbtree_afakeroot *afake,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_btnum_t btnum)
{
struct xfs_btree_cur *cur;
- cur = xfs_allocbt_init_common(mp, NULL, agno, btnum);
+ cur = xfs_allocbt_init_common(mp, NULL, pag, btnum);
xfs_btree_stage_afakeroot(cur, afake);
return cur;
}
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.h b/fs/xfs/libxfs/xfs_alloc_btree.h
index a5b998e950fe..9eb4c667a6b8 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.h
+++ b/fs/xfs/libxfs/xfs_alloc_btree.h
@@ -13,6 +13,7 @@
struct xfs_buf;
struct xfs_btree_cur;
struct xfs_mount;
+struct xfs_perag;
struct xbtree_afakeroot;
/*
@@ -46,11 +47,11 @@ struct xbtree_afakeroot;
(maxrecs) * sizeof(xfs_alloc_key_t) + \
((index) - 1) * sizeof(xfs_alloc_ptr_t)))
-extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *,
- struct xfs_trans *, struct xfs_buf *,
- xfs_agnumber_t, xfs_btnum_t);
+extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *mp,
+ struct xfs_trans *tp, struct xfs_buf *bp,
+ struct xfs_perag *pag, xfs_btnum_t btnum);
struct xfs_btree_cur *xfs_allocbt_stage_cursor(struct xfs_mount *mp,
- struct xbtree_afakeroot *afake, xfs_agnumber_t agno,
+ struct xbtree_afakeroot *afake, struct xfs_perag *pag,
xfs_btnum_t btnum);
extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int);
extern xfs_extlen_t xfs_allocbt_calc_size(struct xfs_mount *mp,
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 96146f425e50..d9d7d5137b73 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -44,20 +44,27 @@ STATIC int xfs_attr_shortform_addname(xfs_da_args_t *args);
* Internal routines when attribute list is one block.
*/
STATIC int xfs_attr_leaf_get(xfs_da_args_t *args);
-STATIC int xfs_attr_leaf_addname(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_hasname(struct xfs_da_args *args, struct xfs_buf **bp);
+STATIC int xfs_attr_leaf_try_add(struct xfs_da_args *args, struct xfs_buf *bp);
/*
* Internal routines when attribute list is more than one block.
*/
STATIC int xfs_attr_node_get(xfs_da_args_t *args);
-STATIC int xfs_attr_node_addname(xfs_da_args_t *args);
-STATIC int xfs_attr_node_removename(xfs_da_args_t *args);
+STATIC void xfs_attr_restore_rmt_blk(struct xfs_da_args *args);
+STATIC int xfs_attr_node_addname(struct xfs_delattr_context *dac);
+STATIC int xfs_attr_node_addname_find_attr(struct xfs_delattr_context *dac);
+STATIC int xfs_attr_node_addname_clear_incomplete(
+ struct xfs_delattr_context *dac);
STATIC int xfs_attr_node_hasname(xfs_da_args_t *args,
struct xfs_da_state **state);
STATIC int xfs_attr_fillstate(xfs_da_state_t *state);
STATIC int xfs_attr_refillstate(xfs_da_state_t *state);
+STATIC int xfs_attr_set_iter(struct xfs_delattr_context *dac,
+ struct xfs_buf **leaf_bp);
+STATIC int xfs_attr_node_removename(struct xfs_da_args *args,
+ struct xfs_da_state *state);
int
xfs_inode_hasattr(
@@ -237,27 +244,77 @@ xfs_attr_is_shortform(
}
/*
- * Attempts to set an attr in shortform, or converts short form to leaf form if
- * there is not enough room. If the attr is set, the transaction is committed
- * and set to NULL.
+ * Checks to see if a delayed attribute transaction should be rolled. If so,
+ * transaction is finished or rolled as needed.
*/
STATIC int
-xfs_attr_set_shortform(
- struct xfs_da_args *args,
- struct xfs_buf **leaf_bp)
+xfs_attr_trans_roll(
+ struct xfs_delattr_context *dac)
{
- struct xfs_inode *dp = args->dp;
- int error, error2 = 0;
+ struct xfs_da_args *args = dac->da_args;
+ int error;
+
+ if (dac->flags & XFS_DAC_DEFER_FINISH) {
+ /*
+ * The caller wants us to finish all the deferred ops so that we
+ * avoid pinning the log tail with a large number of deferred
+ * ops.
+ */
+ dac->flags &= ~XFS_DAC_DEFER_FINISH;
+ error = xfs_defer_finish(&args->trans);
+ } else
+ error = xfs_trans_roll_inode(&args->trans, args->dp);
+
+ return error;
+}
+
+/*
+ * Set the attribute specified in @args.
+ */
+int
+xfs_attr_set_args(
+ struct xfs_da_args *args)
+{
+ struct xfs_buf *leaf_bp = NULL;
+ int error = 0;
+ struct xfs_delattr_context dac = {
+ .da_args = args,
+ };
+
+ do {
+ error = xfs_attr_set_iter(&dac, &leaf_bp);
+ if (error != -EAGAIN)
+ break;
+
+ error = xfs_attr_trans_roll(&dac);
+ if (error) {
+ if (leaf_bp)
+ xfs_trans_brelse(args->trans, leaf_bp);
+ return error;
+ }
+ } while (true);
+
+ return error;
+}
+
+STATIC int
+xfs_attr_sf_addname(
+ struct xfs_delattr_context *dac,
+ struct xfs_buf **leaf_bp)
+{
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_inode *dp = args->dp;
+ int error = 0;
/*
* Try to add the attr to the attribute list in the inode.
*/
error = xfs_attr_try_sf_addname(dp, args);
- if (error != -ENOSPC) {
- error2 = xfs_trans_commit(args->trans);
- args->trans = NULL;
- return error ? error : error2;
- }
+
+ /* Should only be 0, -EEXIST or -ENOSPC */
+ if (error != -ENOSPC)
+ return error;
+
/*
* It won't fit in the shortform, transform to a leaf block. GROT:
* another possible req'mt for a double-split btree op.
@@ -269,85 +326,300 @@ xfs_attr_set_shortform(
/*
* Prevent the leaf buffer from being unlocked so that a concurrent AIL
* push cannot grab the half-baked leaf buffer and run into problems
- * with the write verifier. Once we're done rolling the transaction we
- * can release the hold and add the attr to the leaf.
+ * with the write verifier.
*/
xfs_trans_bhold(args->trans, *leaf_bp);
- error = xfs_defer_finish(&args->trans);
- xfs_trans_bhold_release(args->trans, *leaf_bp);
- if (error) {
- xfs_trans_brelse(args->trans, *leaf_bp);
- return error;
- }
- return 0;
+ /*
+ * We're still in XFS_DAS_UNINIT state here. We've converted
+ * the attr fork to leaf format and will restart with the leaf
+ * add.
+ */
+ dac->flags |= XFS_DAC_DEFER_FINISH;
+ return -EAGAIN;
}
/*
* Set the attribute specified in @args.
+ * This routine is meant to function as a delayed operation, and may return
+ * -EAGAIN when the transaction needs to be rolled. Calling functions will need
+ * to handle this, and recall the function until a successful error code is
+ * returned.
*/
int
-xfs_attr_set_args(
- struct xfs_da_args *args)
+xfs_attr_set_iter(
+ struct xfs_delattr_context *dac,
+ struct xfs_buf **leaf_bp)
{
- struct xfs_inode *dp = args->dp;
- struct xfs_buf *leaf_bp = NULL;
- int error = 0;
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_buf *bp = NULL;
+ int forkoff, error = 0;
+
+ /* State machine switch */
+ switch (dac->dela_state) {
+ case XFS_DAS_UNINIT:
+ /*
+ * If the fork is shortform, attempt to add the attr. If there
+ * is no space, this converts to leaf format and returns
+ * -EAGAIN with the leaf buffer held across the roll. The caller
+ * will deal with a transaction roll error, but otherwise
+ * release the hold once we return with a clean transaction.
+ */
+ if (xfs_attr_is_shortform(dp))
+ return xfs_attr_sf_addname(dac, leaf_bp);
+ if (*leaf_bp != NULL) {
+ xfs_trans_bhold_release(args->trans, *leaf_bp);
+ *leaf_bp = NULL;
+ }
- /*
- * If the attribute list is already in leaf format, jump straight to
- * leaf handling. Otherwise, try to add the attribute to the shortform
- * list; if there's no room then convert the list to leaf format and try
- * again.
- */
- if (xfs_attr_is_shortform(dp)) {
+ if (xfs_attr_is_leaf(dp)) {
+ error = xfs_attr_leaf_try_add(args, *leaf_bp);
+ if (error == -ENOSPC) {
+ error = xfs_attr3_leaf_to_node(args);
+ if (error)
+ return error;
+
+ /*
+ * Finish any deferred work items and roll the
+ * transaction once more. The goal here is to
+ * call node_addname with the inode and
+ * transaction in the same state (inode locked
+ * and joined, transaction clean) no matter how
+ * we got to this step.
+ *
+ * At this point, we are still in
+ * XFS_DAS_UNINIT, but when we come back, we'll
+ * be a node, so we'll fall down into the node
+ * handling code below
+ */
+ dac->flags |= XFS_DAC_DEFER_FINISH;
+ return -EAGAIN;
+ } else if (error) {
+ return error;
+ }
+
+ dac->dela_state = XFS_DAS_FOUND_LBLK;
+ } else {
+ error = xfs_attr_node_addname_find_attr(dac);
+ if (error)
+ return error;
+
+ error = xfs_attr_node_addname(dac);
+ if (error)
+ return error;
+
+ dac->dela_state = XFS_DAS_FOUND_NBLK;
+ }
+ return -EAGAIN;
+ case XFS_DAS_FOUND_LBLK:
+ /*
+ * If there was an out-of-line value, allocate the blocks we
+ * identified for its storage and copy the value. This is done
+ * after we create the attribute so that we don't overflow the
+ * maximum size of a transaction and/or hit a deadlock.
+ */
+
+ /* Open coded xfs_attr_rmtval_set without trans handling */
+ if ((dac->flags & XFS_DAC_LEAF_ADDNAME_INIT) == 0) {
+ dac->flags |= XFS_DAC_LEAF_ADDNAME_INIT;
+ if (args->rmtblkno > 0) {
+ error = xfs_attr_rmtval_find_space(dac);
+ if (error)
+ return error;
+ }
+ }
/*
- * If the attr was successfully set in shortform, the
- * transaction is committed and set to NULL. Otherwise, is it
- * converted from shortform to leaf, and the transaction is
- * retained.
+ * Repeat allocating remote blocks for the attr value until
+ * blkcnt drops to zero.
*/
- error = xfs_attr_set_shortform(args, &leaf_bp);
- if (error || !args->trans)
+ if (dac->blkcnt > 0) {
+ error = xfs_attr_rmtval_set_blk(dac);
+ if (error)
+ return error;
+ return -EAGAIN;
+ }
+
+ error = xfs_attr_rmtval_set_value(args);
+ if (error)
return error;
- }
- if (xfs_attr_is_leaf(dp)) {
- error = xfs_attr_leaf_addname(args);
- if (error != -ENOSPC)
+ /*
+ * If this is not a rename, clear the incomplete flag and we're
+ * done.
+ */
+ if (!(args->op_flags & XFS_DA_OP_RENAME)) {
+ if (args->rmtblkno > 0)
+ error = xfs_attr3_leaf_clearflag(args);
return error;
+ }
/*
- * Promote the attribute list to the Btree format.
+ * If this is an atomic rename operation, we must "flip" the
+ * incomplete flags on the "new" and "old" attribute/value pairs
+ * so that one disappears and one appears atomically. Then we
+ * must remove the "old" attribute/value pair.
+ *
+ * In a separate transaction, set the incomplete flag on the
+ * "old" attr and clear the incomplete flag on the "new" attr.
*/
- error = xfs_attr3_leaf_to_node(args);
+ error = xfs_attr3_leaf_flipflags(args);
if (error)
return error;
+ /*
+ * Commit the flag value change and start the next trans in
+ * series.
+ */
+ dac->dela_state = XFS_DAS_FLIP_LFLAG;
+ return -EAGAIN;
+ case XFS_DAS_FLIP_LFLAG:
+ /*
+ * Dismantle the "old" attribute/value pair by removing a
+ * "remote" value (if it exists).
+ */
+ xfs_attr_restore_rmt_blk(args);
+ error = xfs_attr_rmtval_invalidate(args);
+ if (error)
+ return error;
+
+ /* fallthrough */
+ case XFS_DAS_RM_LBLK:
+ /* Set state in case xfs_attr_rmtval_remove returns -EAGAIN */
+ dac->dela_state = XFS_DAS_RM_LBLK;
+ if (args->rmtblkno) {
+ error = __xfs_attr_rmtval_remove(dac);
+ if (error)
+ return error;
+
+ dac->dela_state = XFS_DAS_RD_LEAF;
+ return -EAGAIN;
+ }
+ /* fallthrough */
+ case XFS_DAS_RD_LEAF:
/*
- * Finish any deferred work items and roll the transaction once
- * more. The goal here is to call node_addname with the inode
- * and transaction in the same state (inode locked and joined,
- * transaction clean) no matter how we got to this step.
+ * This is the last step for leaf format. Read the block with
+ * the old attr, remove the old attr, check for shortform
+ * conversion and return.
*/
- error = xfs_defer_finish(&args->trans);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno,
+ &bp);
if (error)
return error;
+ xfs_attr3_leaf_remove(bp, args);
+
+ forkoff = xfs_attr_shortform_allfit(bp, dp);
+ if (forkoff)
+ error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
+ /* bp is gone due to xfs_da_shrink_inode */
+
+ return error;
+
+ case XFS_DAS_FOUND_NBLK:
+ /*
+ * Find space for remote blocks and fall into the allocation
+ * state.
+ */
+ if (args->rmtblkno > 0) {
+ error = xfs_attr_rmtval_find_space(dac);
+ if (error)
+ return error;
+ }
+
+ /* fallthrough */
+ case XFS_DAS_ALLOC_NODE:
+ /*
+ * If there was an out-of-line value, allocate the blocks we
+ * identified for its storage and copy the value. This is done
+ * after we create the attribute so that we don't overflow the
+ * maximum size of a transaction and/or hit a deadlock.
+ */
+ dac->dela_state = XFS_DAS_ALLOC_NODE;
+ if (args->rmtblkno > 0) {
+ if (dac->blkcnt > 0) {
+ error = xfs_attr_rmtval_set_blk(dac);
+ if (error)
+ return error;
+ return -EAGAIN;
+ }
+
+ error = xfs_attr_rmtval_set_value(args);
+ if (error)
+ return error;
+ }
+
/*
- * Commit the current trans (including the inode) and
- * start a new one.
+ * If this was not a rename, clear the incomplete flag and we're
+ * done.
*/
- error = xfs_trans_roll_inode(&args->trans, dp);
+ if (!(args->op_flags & XFS_DA_OP_RENAME)) {
+ if (args->rmtblkno > 0)
+ error = xfs_attr3_leaf_clearflag(args);
+ goto out;
+ }
+
+ /*
+ * If this is an atomic rename operation, we must "flip" the
+ * incomplete flags on the "new" and "old" attribute/value pairs
+ * so that one disappears and one appears atomically. Then we
+ * must remove the "old" attribute/value pair.
+ *
+ * In a separate transaction, set the incomplete flag on the
+ * "old" attr and clear the incomplete flag on the "new" attr.
+ */
+ error = xfs_attr3_leaf_flipflags(args);
+ if (error)
+ goto out;
+ /*
+ * Commit the flag value change and start the next trans in
+ * series
+ */
+ dac->dela_state = XFS_DAS_FLIP_NFLAG;
+ return -EAGAIN;
+
+ case XFS_DAS_FLIP_NFLAG:
+ /*
+ * Dismantle the "old" attribute/value pair by removing a
+ * "remote" value (if it exists).
+ */
+ xfs_attr_restore_rmt_blk(args);
+
+ error = xfs_attr_rmtval_invalidate(args);
if (error)
return error;
- }
- error = xfs_attr_node_addname(args);
+ /* fallthrough */
+ case XFS_DAS_RM_NBLK:
+ /* Set state in case xfs_attr_rmtval_remove returns -EAGAIN */
+ dac->dela_state = XFS_DAS_RM_NBLK;
+ if (args->rmtblkno) {
+ error = __xfs_attr_rmtval_remove(dac);
+ if (error)
+ return error;
+
+ dac->dela_state = XFS_DAS_CLR_FLAG;
+ return -EAGAIN;
+ }
+
+ /* fallthrough */
+ case XFS_DAS_CLR_FLAG:
+ /*
+ * The last state for node format. Look up the old attr and
+ * remove it.
+ */
+ error = xfs_attr_node_addname_clear_incomplete(dac);
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+out:
return error;
}
+
/*
* Return EEXIST if attr is found, or ENOATTR if not
*/
@@ -382,16 +654,25 @@ xfs_has_attr(
*/
int
xfs_attr_remove_args(
- struct xfs_da_args *args)
+ struct xfs_da_args *args)
{
- if (!xfs_inode_hasattr(args->dp))
- return -ENOATTR;
+ int error;
+ struct xfs_delattr_context dac = {
+ .da_args = args,
+ };
- if (args->dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL)
- return xfs_attr_shortform_remove(args);
- if (xfs_attr_is_leaf(args->dp))
- return xfs_attr_leaf_removename(args);
- return xfs_attr_node_removename(args);
+ do {
+ error = xfs_attr_remove_iter(&dac);
+ if (error != -EAGAIN)
+ break;
+
+ error = xfs_attr_trans_roll(&dac);
+ if (error)
+ return error;
+
+ } while (true);
+
+ return error;
}
/*
@@ -559,7 +840,7 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
if (retval == -EEXIST) {
if (args->attr_flags & XATTR_CREATE)
return retval;
- retval = xfs_attr_shortform_remove(args);
+ retval = xfs_attr_sf_removename(args);
if (retval)
return retval;
/*
@@ -670,115 +951,6 @@ out_brelse:
return retval;
}
-
-/*
- * Add a name to the leaf attribute list structure
- *
- * This leaf block cannot have a "remote" value, we only call this routine
- * if bmap_one_block() says there is only one block (ie: no remote blks).
- */
-STATIC int
-xfs_attr_leaf_addname(
- struct xfs_da_args *args)
-{
- int error, forkoff;
- struct xfs_buf *bp = NULL;
- struct xfs_inode *dp = args->dp;
-
- trace_xfs_attr_leaf_addname(args);
-
- error = xfs_attr_leaf_try_add(args, bp);
- if (error)
- return error;
-
- /*
- * Commit the transaction that added the attr name so that
- * later routines can manage their own transactions.
- */
- error = xfs_trans_roll_inode(&args->trans, dp);
- if (error)
- return error;
-
- /*
- * If there was an out-of-line value, allocate the blocks we
- * identified for its storage and copy the value. This is done
- * after we create the attribute so that we don't overflow the
- * maximum size of a transaction and/or hit a deadlock.
- */
- if (args->rmtblkno > 0) {
- error = xfs_attr_rmtval_set(args);
- if (error)
- return error;
- }
-
- if (!(args->op_flags & XFS_DA_OP_RENAME)) {
- /*
- * Added a "remote" value, just clear the incomplete flag.
- */
- if (args->rmtblkno > 0)
- error = xfs_attr3_leaf_clearflag(args);
-
- return error;
- }
-
- /*
- * If this is an atomic rename operation, we must "flip" the incomplete
- * flags on the "new" and "old" attribute/value pairs so that one
- * disappears and one appears atomically. Then we must remove the "old"
- * attribute/value pair.
- *
- * In a separate transaction, set the incomplete flag on the "old" attr
- * and clear the incomplete flag on the "new" attr.
- */
-
- error = xfs_attr3_leaf_flipflags(args);
- if (error)
- return error;
- /*
- * Commit the flag value change and start the next trans in series.
- */
- error = xfs_trans_roll_inode(&args->trans, args->dp);
- if (error)
- return error;
-
- /*
- * Dismantle the "old" attribute/value pair by removing a "remote" value
- * (if it exists).
- */
- xfs_attr_restore_rmt_blk(args);
-
- if (args->rmtblkno) {
- error = xfs_attr_rmtval_invalidate(args);
- if (error)
- return error;
-
- error = xfs_attr_rmtval_remove(args);
- if (error)
- return error;
- }
-
- /*
- * Read in the block containing the "old" attr, then remove the "old"
- * attr from that block (neat, huh!)
- */
- error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno,
- &bp);
- if (error)
- return error;
-
- xfs_attr3_leaf_remove(bp, args);
-
- /*
- * If the result is small enough, shrink it all into the inode.
- */
- forkoff = xfs_attr_shortform_allfit(bp, dp);
- if (forkoff)
- error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
- /* bp is gone due to xfs_da_shrink_inode */
-
- return error;
-}
-
/*
* Return EEXIST if attr is found, or ENOATTR if not
*/
@@ -909,48 +1081,26 @@ xfs_attr_node_hasname(
* External routines when attribute list size > geo->blksize
*========================================================================*/
-/*
- * Add a name to a Btree-format attribute list.
- *
- * This will involve walking down the Btree, and may involve splitting
- * leaf nodes and even splitting intermediate nodes up to and including
- * the root node (a special case of an intermediate node).
- *
- * "Remote" attribute values confuse the issue and atomic rename operations
- * add a whole extra layer of confusion on top of that.
- */
STATIC int
-xfs_attr_node_addname(
- struct xfs_da_args *args)
+xfs_attr_node_addname_find_attr(
+ struct xfs_delattr_context *dac)
{
- struct xfs_da_state *state;
- struct xfs_da_state_blk *blk;
- struct xfs_inode *dp;
- int retval, error;
-
- trace_xfs_attr_node_addname(args);
+ struct xfs_da_args *args = dac->da_args;
+ int retval;
/*
- * Fill in bucket of arguments/results/context to carry around.
- */
- dp = args->dp;
-restart:
- /*
* Search to see if name already exists, and get back a pointer
* to where it should go.
*/
- error = 0;
- retval = xfs_attr_node_hasname(args, &state);
+ retval = xfs_attr_node_hasname(args, &dac->da_state);
if (retval != -ENOATTR && retval != -EEXIST)
- goto out;
+ return retval;
- blk = &state->path.blk[ state->path.active-1 ];
- ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE))
- goto out;
+ goto error;
if (retval == -EEXIST) {
if (args->attr_flags & XATTR_CREATE)
- goto out;
+ goto error;
trace_xfs_attr_node_replace(args);
@@ -968,8 +1118,44 @@ restart:
args->rmtvaluelen = 0;
}
- retval = xfs_attr3_leaf_add(blk->bp, state->args);
- if (retval == -ENOSPC) {
+ return 0;
+error:
+ if (dac->da_state)
+ xfs_da_state_free(dac->da_state);
+ return retval;
+}
+
+/*
+ * Add a name to a Btree-format attribute list.
+ *
+ * This will involve walking down the Btree, and may involve splitting
+ * leaf nodes and even splitting intermediate nodes up to and including
+ * the root node (a special case of an intermediate node).
+ *
+ * "Remote" attribute values confuse the issue and atomic rename operations
+ * add a whole extra layer of confusion on top of that.
+ *
+ * This routine is meant to function as a delayed operation, and may return
+ * -EAGAIN when the transaction needs to be rolled. Calling functions will need
+ * to handle this, and recall the function until a successful error code is
+ *returned.
+ */
+STATIC int
+xfs_attr_node_addname(
+ struct xfs_delattr_context *dac)
+{
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_da_state *state = dac->da_state;
+ struct xfs_da_state_blk *blk;
+ int error;
+
+ trace_xfs_attr_node_addname(args);
+
+ blk = &state->path.blk[state->path.active-1];
+ ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
+
+ error = xfs_attr3_leaf_add(blk->bp, state->args);
+ if (error == -ENOSPC) {
if (state->path.active == 1) {
/*
* Its really a single leaf node, but it had
@@ -981,19 +1167,16 @@ restart:
error = xfs_attr3_leaf_to_node(args);
if (error)
goto out;
- error = xfs_defer_finish(&args->trans);
- if (error)
- goto out;
/*
- * Commit the node conversion and start the next
- * trans in the chain.
+ * Now that we have converted the leaf to a node, we can
+ * roll the transaction, and try xfs_attr3_leaf_add
+ * again on re-entry. No need to set dela_state to do
+ * this. dela_state is still unset by this function at
+ * this point.
*/
- error = xfs_trans_roll_inode(&args->trans, dp);
- if (error)
- goto out;
-
- goto restart;
+ dac->flags |= XFS_DAC_DEFER_FINISH;
+ return -EAGAIN;
}
/*
@@ -1005,9 +1188,7 @@ restart:
error = xfs_da3_split(state);
if (error)
goto out;
- error = xfs_defer_finish(&args->trans);
- if (error)
- goto out;
+ dac->flags |= XFS_DAC_DEFER_FINISH;
} else {
/*
* Addition succeeded, update Btree hashvals.
@@ -1015,77 +1196,21 @@ restart:
xfs_da3_fixhashpath(state, &state->path);
}
- /*
- * Kill the state structure, we're done with it and need to
- * allow the buffers to come back later.
- */
- xfs_da_state_free(state);
- state = NULL;
-
- /*
- * Commit the leaf addition or btree split and start the next
- * trans in the chain.
- */
- error = xfs_trans_roll_inode(&args->trans, dp);
- if (error)
- goto out;
-
- /*
- * If there was an out-of-line value, allocate the blocks we
- * identified for its storage and copy the value. This is done
- * after we create the attribute so that we don't overflow the
- * maximum size of a transaction and/or hit a deadlock.
- */
- if (args->rmtblkno > 0) {
- error = xfs_attr_rmtval_set(args);
- if (error)
- return error;
- }
-
- if (!(args->op_flags & XFS_DA_OP_RENAME)) {
- /*
- * Added a "remote" value, just clear the incomplete flag.
- */
- if (args->rmtblkno > 0)
- error = xfs_attr3_leaf_clearflag(args);
- retval = error;
- goto out;
- }
-
- /*
- * If this is an atomic rename operation, we must "flip" the incomplete
- * flags on the "new" and "old" attribute/value pairs so that one
- * disappears and one appears atomically. Then we must remove the "old"
- * attribute/value pair.
- *
- * In a separate transaction, set the incomplete flag on the "old" attr
- * and clear the incomplete flag on the "new" attr.
- */
- error = xfs_attr3_leaf_flipflags(args);
- if (error)
- goto out;
- /*
- * Commit the flag value change and start the next trans in series
- */
- error = xfs_trans_roll_inode(&args->trans, args->dp);
- if (error)
- goto out;
-
- /*
- * Dismantle the "old" attribute/value pair by removing a "remote" value
- * (if it exists).
- */
- xfs_attr_restore_rmt_blk(args);
+out:
+ if (state)
+ xfs_da_state_free(state);
+ return error;
+}
- if (args->rmtblkno) {
- error = xfs_attr_rmtval_invalidate(args);
- if (error)
- return error;
- error = xfs_attr_rmtval_remove(args);
- if (error)
- return error;
- }
+STATIC int
+xfs_attr_node_addname_clear_incomplete(
+ struct xfs_delattr_context *dac)
+{
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_da_state *state = NULL;
+ int retval = 0;
+ int error = 0;
/*
* Re-find the "old" attribute entry after any split ops. The INCOMPLETE
@@ -1098,13 +1223,7 @@ restart:
if (error)
goto out;
- /*
- * Remove the name and update the hashvals in the tree.
- */
- blk = &state->path.blk[state->path.active-1];
- ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
- error = xfs_attr3_leaf_remove(blk->bp, args);
- xfs_da3_fixhashpath(state, &state->path);
+ error = xfs_attr_node_removename(args, state);
/*
* Check to see if the tree needs to be collapsed.
@@ -1190,14 +1309,16 @@ xfs_attr_leaf_mark_incomplete(
*/
STATIC
int xfs_attr_node_removename_setup(
- struct xfs_da_args *args,
- struct xfs_da_state **state)
+ struct xfs_delattr_context *dac)
{
- int error;
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_da_state **state = &dac->da_state;
+ int error;
error = xfs_attr_node_hasname(args, state);
if (error != -EEXIST)
return error;
+ error = 0;
ASSERT((*state)->path.blk[(*state)->path.active - 1].bp != NULL);
ASSERT((*state)->path.blk[(*state)->path.active - 1].magic ==
@@ -1206,97 +1327,164 @@ int xfs_attr_node_removename_setup(
if (args->rmtblkno > 0) {
error = xfs_attr_leaf_mark_incomplete(args, *state);
if (error)
- return error;
+ goto out;
- return xfs_attr_rmtval_invalidate(args);
+ error = xfs_attr_rmtval_invalidate(args);
}
+out:
+ if (error)
+ xfs_da_state_free(*state);
- return 0;
+ return error;
}
STATIC int
-xfs_attr_node_remove_rmt(
+xfs_attr_node_removename(
struct xfs_da_args *args,
struct xfs_da_state *state)
{
- int error = 0;
-
- error = xfs_attr_rmtval_remove(args);
- if (error)
- return error;
+ struct xfs_da_state_blk *blk;
+ int retval;
/*
- * Refill the state structure with buffers, the prior calls released our
- * buffers.
+ * Remove the name and update the hashvals in the tree.
*/
- return xfs_attr_refillstate(state);
+ blk = &state->path.blk[state->path.active-1];
+ ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
+ retval = xfs_attr3_leaf_remove(blk->bp, args);
+ xfs_da3_fixhashpath(state, &state->path);
+
+ return retval;
}
/*
- * Remove a name from a B-tree attribute list.
+ * Remove the attribute specified in @args.
*
* This will involve walking down the Btree, and may involve joining
* leaf nodes and even joining intermediate nodes up to and including
* the root node (a special case of an intermediate node).
+ *
+ * This routine is meant to function as either an in-line or delayed operation,
+ * and may return -EAGAIN when the transaction needs to be rolled. Calling
+ * functions will need to handle this, and call the function until a
+ * successful error code is returned.
*/
-STATIC int
-xfs_attr_node_removename(
- struct xfs_da_args *args)
+int
+xfs_attr_remove_iter(
+ struct xfs_delattr_context *dac)
{
- struct xfs_da_state *state;
- struct xfs_da_state_blk *blk;
- int retval, error;
- struct xfs_inode *dp = args->dp;
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_da_state *state = dac->da_state;
+ int retval, error = 0;
+ struct xfs_inode *dp = args->dp;
trace_xfs_attr_node_removename(args);
- error = xfs_attr_node_removename_setup(args, &state);
- if (error)
- goto out;
+ switch (dac->dela_state) {
+ case XFS_DAS_UNINIT:
+ if (!xfs_inode_hasattr(dp))
+ return -ENOATTR;
- /*
- * If there is an out-of-line value, de-allocate the blocks.
- * This is done before we remove the attribute so that we don't
- * overflow the maximum size of a transaction and/or hit a deadlock.
- */
- if (args->rmtblkno > 0) {
- error = xfs_attr_node_remove_rmt(args, state);
- if (error)
- goto out;
- }
+ /*
+ * Shortform or leaf formats don't require transaction rolls and
+ * thus state transitions. Call the right helper and return.
+ */
+ if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL)
+ return xfs_attr_sf_removename(args);
- /*
- * Remove the name and update the hashvals in the tree.
- */
- blk = &state->path.blk[ state->path.active-1 ];
- ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
- retval = xfs_attr3_leaf_remove(blk->bp, args);
- xfs_da3_fixhashpath(state, &state->path);
+ if (xfs_attr_is_leaf(dp))
+ return xfs_attr_leaf_removename(args);
- /*
- * Check to see if the tree needs to be collapsed.
- */
- if (retval && (state->path.active > 1)) {
- error = xfs_da3_join(state);
- if (error)
- goto out;
- error = xfs_defer_finish(&args->trans);
- if (error)
- goto out;
/*
- * Commit the Btree join operation and start a new trans.
+ * Node format may require transaction rolls. Set up the
+ * state context and fall into the state machine.
*/
- error = xfs_trans_roll_inode(&args->trans, dp);
- if (error)
- goto out;
- }
+ if (!dac->da_state) {
+ error = xfs_attr_node_removename_setup(dac);
+ if (error)
+ return error;
+ state = dac->da_state;
+ }
- /*
- * If the result is small enough, push it all into the inode.
- */
- if (xfs_attr_is_leaf(dp))
- error = xfs_attr_node_shrink(args, state);
+ /* fallthrough */
+ case XFS_DAS_RMTBLK:
+ dac->dela_state = XFS_DAS_RMTBLK;
+ /*
+ * If there is an out-of-line value, de-allocate the blocks.
+ * This is done before we remove the attribute so that we don't
+ * overflow the maximum size of a transaction and/or hit a
+ * deadlock.
+ */
+ if (args->rmtblkno > 0) {
+ /*
+ * May return -EAGAIN. Roll and repeat until all remote
+ * blocks are removed.
+ */
+ error = __xfs_attr_rmtval_remove(dac);
+ if (error == -EAGAIN)
+ return error;
+ else if (error)
+ goto out;
+
+ /*
+ * Refill the state structure with buffers (the prior
+ * calls released our buffers) and close out this
+ * transaction before proceeding.
+ */
+ ASSERT(args->rmtblkno == 0);
+ error = xfs_attr_refillstate(state);
+ if (error)
+ goto out;
+ dac->dela_state = XFS_DAS_RM_NAME;
+ dac->flags |= XFS_DAC_DEFER_FINISH;
+ return -EAGAIN;
+ }
+
+ /* fallthrough */
+ case XFS_DAS_RM_NAME:
+ /*
+ * If we came here fresh from a transaction roll, reattach all
+ * the buffers to the current transaction.
+ */
+ if (dac->dela_state == XFS_DAS_RM_NAME) {
+ error = xfs_attr_refillstate(state);
+ if (error)
+ goto out;
+ }
+
+ retval = xfs_attr_node_removename(args, state);
+
+ /*
+ * Check to see if the tree needs to be collapsed. If so, roll
+ * the transacton and fall into the shrink state.
+ */
+ if (retval && (state->path.active > 1)) {
+ error = xfs_da3_join(state);
+ if (error)
+ goto out;
+
+ dac->flags |= XFS_DAC_DEFER_FINISH;
+ dac->dela_state = XFS_DAS_RM_SHRINK;
+ return -EAGAIN;
+ }
+
+ /* fallthrough */
+ case XFS_DAS_RM_SHRINK:
+ /*
+ * If the result is small enough, push it all into the inode.
+ * This is our final state so it's safe to return a dirty
+ * transaction.
+ */
+ if (xfs_attr_is_leaf(dp))
+ error = xfs_attr_node_shrink(args, state);
+ ASSERT(error != -EAGAIN);
+ break;
+ default:
+ ASSERT(0);
+ error = -EINVAL;
+ goto out;
+ }
out:
if (state)
xfs_da_state_free(state);
diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
index 2b1f61987a9d..8de5d1d2733e 100644
--- a/fs/xfs/libxfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -74,6 +74,406 @@ struct xfs_attr_list_context {
};
+/*
+ * ========================================================================
+ * Structure used to pass context around among the delayed routines.
+ * ========================================================================
+ */
+
+/*
+ * Below is a state machine diagram for attr remove operations. The XFS_DAS_*
+ * states indicate places where the function would return -EAGAIN, and then
+ * immediately resume from after being called by the calling function. States
+ * marked as a "subroutine state" indicate that they belong to a subroutine, and
+ * so the calling function needs to pass them back to that subroutine to allow
+ * it to finish where it left off. But they otherwise do not have a role in the
+ * calling function other than just passing through.
+ *
+ * xfs_attr_remove_iter()
+ * │
+ * v
+ * have attr to remove? ──n──> done
+ * │
+ * y
+ * │
+ * v
+ * are we short form? ──y──> xfs_attr_shortform_remove ──> done
+ * │
+ * n
+ * │
+ * V
+ * are we leaf form? ──y──> xfs_attr_leaf_removename ──> done
+ * │
+ * n
+ * │
+ * V
+ * ┌── need to setup state?
+ * │ │
+ * n y
+ * │ │
+ * │ v
+ * │ find attr and get state
+ * │ attr has remote blks? ──n─┐
+ * │ │ v
+ * │ │ find and invalidate
+ * │ y the remote blocks.
+ * │ │ mark attr incomplete
+ * │ ├────────────────┘
+ * └──────────┤
+ * │
+ * v
+ * Have remote blks to remove? ───y─────┐
+ * │ ^ remove the blks
+ * │ │ │
+ * │ │ v
+ * │ XFS_DAS_RMTBLK <─n── done?
+ * │ re-enter with │
+ * │ one less blk to y
+ * │ remove │
+ * │ V
+ * │ refill the state
+ * n │
+ * │ v
+ * │ XFS_DAS_RM_NAME
+ * │ │
+ * ├─────────────────────────┘
+ * │
+ * v
+ * remove leaf and
+ * update hash with
+ * xfs_attr_node_remove_cleanup
+ * │
+ * v
+ * need to
+ * shrink tree? ─n─┐
+ * │ │
+ * y │
+ * │ │
+ * v │
+ * join leaf │
+ * │ │
+ * v │
+ * XFS_DAS_RM_SHRINK │
+ * │ │
+ * v │
+ * do the shrink │
+ * │ │
+ * v │
+ * free state <──┘
+ * │
+ * v
+ * done
+ *
+ *
+ * Below is a state machine diagram for attr set operations.
+ *
+ * It seems the challenge with understanding this system comes from trying to
+ * absorb the state machine all at once, when really one should only be looking
+ * at it with in the context of a single function. Once a state sensitive
+ * function is called, the idea is that it "takes ownership" of the
+ * state machine. It isn't concerned with the states that may have belonged to
+ * it's calling parent. Only the states relevant to itself or any other
+ * subroutines there in. Once a calling function hands off the state machine to
+ * a subroutine, it needs to respect the simple rule that it doesn't "own" the
+ * state machine anymore, and it's the responsibility of that calling function
+ * to propagate the -EAGAIN back up the call stack. Upon reentry, it is
+ * committed to re-calling that subroutine until it returns something other than
+ * -EAGAIN. Once that subroutine signals completion (by returning anything other
+ * than -EAGAIN), the calling function can resume using the state machine.
+ *
+ * xfs_attr_set_iter()
+ * │
+ * v
+ * ┌─y─ has an attr fork?
+ * │ |
+ * │ n
+ * │ |
+ * │ V
+ * │ add a fork
+ * │ │
+ * └──────────┤
+ * │
+ * V
+ * ┌─── is shortform?
+ * │ │
+ * │ y
+ * │ │
+ * │ V
+ * │ xfs_attr_set_fmt
+ * │ |
+ * │ V
+ * │ xfs_attr_try_sf_addname
+ * │ │
+ * │ V
+ * │ had enough ──y──> done
+ * │ space?
+ * n │
+ * │ n
+ * │ │
+ * │ V
+ * │ transform to leaf
+ * │ │
+ * │ V
+ * │ hold the leaf buffer
+ * │ │
+ * │ V
+ * │ return -EAGAIN
+ * │ Re-enter in
+ * │ leaf form
+ * │
+ * └─> release leaf buffer
+ * if needed
+ * │
+ * V
+ * ┌───n── fork has
+ * │ only 1 blk?
+ * │ │
+ * │ y
+ * │ │
+ * │ v
+ * │ xfs_attr_leaf_try_add()
+ * │ │
+ * │ v
+ * │ had enough ──────────────y─────────────┐
+ * │ space? │
+ * │ │ │
+ * │ n │
+ * │ │ │
+ * │ v │
+ * │ return -EAGAIN │
+ * │ re-enter in │
+ * │ node form │
+ * │ │ │
+ * └──────────┤ │
+ * │ │
+ * V │
+ * xfs_attr_node_addname_find_attr │
+ * determines if this │
+ * is create or rename │
+ * find space to store attr │
+ * │ │
+ * v │
+ * xfs_attr_node_addname │
+ * │ │
+ * v │
+ * fits in a node leaf? ────n─────┐ │
+ * │ ^ v │
+ * │ │ single leaf node? │
+ * │ │ │ │ │
+ * y │ y n │
+ * │ │ │ │ │
+ * v │ v v │
+ * update │ grow the leaf split if │
+ * hashvals └── return -EAGAIN needed │
+ * │ retry leaf add │ │
+ * │ on reentry │ │
+ * ├────────────────────────────┘ │
+ * │ │
+ * v │
+ * need to alloc │
+ * ┌─y── or flip flag? │
+ * │ │ │
+ * │ n │
+ * │ │ │
+ * │ v │
+ * │ done │
+ * │ │
+ * │ │
+ * │ XFS_DAS_FOUND_LBLK <────────────────┘
+ * │ │
+ * │ V
+ * │ xfs_attr_leaf_addname()
+ * │ │
+ * │ v
+ * │ ┌──first time through?
+ * │ │ │
+ * │ │ y
+ * │ │ │
+ * │ n v
+ * │ │ if we have rmt blks
+ * │ │ find space for them
+ * │ │ │
+ * │ └──────────┤
+ * │ │
+ * │ v
+ * │ still have
+ * │ ┌─n─ blks to alloc? <──┐
+ * │ │ │ │
+ * │ │ y │
+ * │ │ │ │
+ * │ │ v │
+ * │ │ alloc one blk │
+ * │ │ return -EAGAIN ──┘
+ * │ │ re-enter with one
+ * │ │ less blk to alloc
+ * │ │
+ * │ │
+ * │ └───> set the rmt
+ * │ value
+ * │ │
+ * │ v
+ * │ was this
+ * │ a rename? ──n─┐
+ * │ │ │
+ * │ y │
+ * │ │ │
+ * │ v │
+ * │ flip incomplete │
+ * │ flag │
+ * │ │ │
+ * │ v │
+ * │ XFS_DAS_FLIP_LFLAG │
+ * │ │ │
+ * │ v │
+ * │ need to remove │
+ * │ old bks? ──n──┤
+ * │ │ │
+ * │ y │
+ * │ │ │
+ * │ V │
+ * │ remove │
+ * │ ┌───> old blks │
+ * │ │ │ │
+ * │ XFS_DAS_RM_LBLK │ │
+ * │ ^ │ │
+ * │ │ v │
+ * │ └──y── more to │
+ * │ remove? │
+ * │ │ │
+ * │ n │
+ * │ │ │
+ * │ v │
+ * │ XFS_DAS_RD_LEAF │
+ * │ │ │
+ * │ v │
+ * │ remove leaf │
+ * │ │ │
+ * │ v │
+ * │ shrink to sf │
+ * │ if needed │
+ * │ │ │
+ * │ v │
+ * │ done <──────┘
+ * │
+ * └──────> XFS_DAS_FOUND_NBLK
+ * │
+ * v
+ * ┌─────n── need to
+ * │ alloc blks?
+ * │ │
+ * │ y
+ * │ │
+ * │ v
+ * │ find space
+ * │ │
+ * │ v
+ * │ ┌─>XFS_DAS_ALLOC_NODE
+ * │ │ │
+ * │ │ v
+ * │ │ alloc blk
+ * │ │ │
+ * │ │ v
+ * │ └──y── need to alloc
+ * │ more blocks?
+ * │ │
+ * │ n
+ * │ │
+ * │ v
+ * │ set the rmt value
+ * │ │
+ * │ v
+ * │ was this
+ * └────────> a rename? ──n─┐
+ * │ │
+ * y │
+ * │ │
+ * v │
+ * flip incomplete │
+ * flag │
+ * │ │
+ * v │
+ * XFS_DAS_FLIP_NFLAG │
+ * │ │
+ * v │
+ * need to │
+ * remove blks? ─n──┤
+ * │ │
+ * y │
+ * │ │
+ * v │
+ * remove │
+ * ┌────────> old blks │
+ * │ │ │
+ * XFS_DAS_RM_NBLK │ │
+ * ^ │ │
+ * │ v │
+ * └──────y── more to │
+ * remove │
+ * │ │
+ * n │
+ * │ │
+ * v │
+ * XFS_DAS_CLR_FLAG │
+ * │ │
+ * v │
+ * clear flags │
+ * │ │
+ * ├──────────┘
+ * │
+ * v
+ * done
+ */
+
+/*
+ * Enum values for xfs_delattr_context.da_state
+ *
+ * These values are used by delayed attribute operations to keep track of where
+ * they were before they returned -EAGAIN. A return code of -EAGAIN signals the
+ * calling function to roll the transaction, and then call the subroutine to
+ * finish the operation. The enum is then used by the subroutine to jump back
+ * to where it was and resume executing where it left off.
+ */
+enum xfs_delattr_state {
+ XFS_DAS_UNINIT = 0, /* No state has been set yet */
+ XFS_DAS_RMTBLK, /* Removing remote blks */
+ XFS_DAS_RM_NAME, /* Remove attr name */
+ XFS_DAS_RM_SHRINK, /* We are shrinking the tree */
+ XFS_DAS_FOUND_LBLK, /* We found leaf blk for attr */
+ XFS_DAS_FOUND_NBLK, /* We found node blk for attr */
+ XFS_DAS_FLIP_LFLAG, /* Flipped leaf INCOMPLETE attr flag */
+ XFS_DAS_RM_LBLK, /* A rename is removing leaf blocks */
+ XFS_DAS_RD_LEAF, /* Read in the new leaf */
+ XFS_DAS_ALLOC_NODE, /* We are allocating node blocks */
+ XFS_DAS_FLIP_NFLAG, /* Flipped node INCOMPLETE attr flag */
+ XFS_DAS_RM_NBLK, /* A rename is removing node blocks */
+ XFS_DAS_CLR_FLAG, /* Clear incomplete flag */
+};
+
+/*
+ * Defines for xfs_delattr_context.flags
+ */
+#define XFS_DAC_DEFER_FINISH 0x01 /* finish the transaction */
+#define XFS_DAC_LEAF_ADDNAME_INIT 0x02 /* xfs_attr_leaf_addname init*/
+
+/*
+ * Context used for keeping track of delayed attribute operations
+ */
+struct xfs_delattr_context {
+ struct xfs_da_args *da_args;
+
+ /* Used in xfs_attr_rmtval_set_blk to roll through allocating blocks */
+ struct xfs_bmbt_irec map;
+ xfs_dablk_t lblkno;
+ int blkcnt;
+
+ /* Used in xfs_attr_node_removename to roll through removing blocks */
+ struct xfs_da_state *da_state;
+
+ /* Used to keep track of current state of delayed operation */
+ unsigned int flags;
+ enum xfs_delattr_state dela_state;
+};
+
/*========================================================================
* Function prototypes for the kernel.
*========================================================================*/
@@ -92,6 +492,9 @@ int xfs_attr_set(struct xfs_da_args *args);
int xfs_attr_set_args(struct xfs_da_args *args);
int xfs_has_attr(struct xfs_da_args *args);
int xfs_attr_remove_args(struct xfs_da_args *args);
+int xfs_attr_remove_iter(struct xfs_delattr_context *dac);
bool xfs_attr_namecheck(const void *name, size_t length);
+void xfs_delattr_context_init(struct xfs_delattr_context *dac,
+ struct xfs_da_args *args);
#endif /* __XFS_ATTR_H__ */
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 556184b63061..b910bd209949 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -19,14 +19,15 @@
#include "xfs_bmap_btree.h"
#include "xfs_bmap.h"
#include "xfs_attr_sf.h"
-#include "xfs_attr_remote.h"
#include "xfs_attr.h"
+#include "xfs_attr_remote.h"
#include "xfs_attr_leaf.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_buf_item.h"
#include "xfs_dir2.h"
#include "xfs_log.h"
+#include "xfs_ag.h"
/*
@@ -773,7 +774,7 @@ xfs_attr_fork_remove(
* Remove an attribute from the shortform attribute list structure.
*/
int
-xfs_attr_shortform_remove(
+xfs_attr_sf_removename(
struct xfs_da_args *args)
{
struct xfs_attr_shortform *sf;
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index 9b1c59f40a26..efa757f1e912 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -51,7 +51,7 @@ int xfs_attr_shortform_lookup(struct xfs_da_args *args);
int xfs_attr_shortform_getvalue(struct xfs_da_args *args);
int xfs_attr_shortform_to_leaf(struct xfs_da_args *args,
struct xfs_buf **leaf_bp);
-int xfs_attr_shortform_remove(struct xfs_da_args *args);
+int xfs_attr_sf_removename(struct xfs_da_args *args);
int xfs_attr_sf_findname(struct xfs_da_args *args,
struct xfs_attr_sf_entry **sfep,
unsigned int *basep);
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 48d8e9caf86f..0c8bee3abc3b 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -439,9 +439,9 @@ xfs_attr_rmtval_get(
/*
* Find a "hole" in the attribute address space large enough for us to drop the
- * new attribute's value into
+ * new attributes value into
*/
-STATIC int
+int
xfs_attr_rmt_find_hole(
struct xfs_da_args *args)
{
@@ -468,7 +468,7 @@ xfs_attr_rmt_find_hole(
return 0;
}
-STATIC int
+int
xfs_attr_rmtval_set_value(
struct xfs_da_args *args)
{
@@ -562,69 +562,66 @@ xfs_attr_rmtval_stale(
}
/*
- * Write the value associated with an attribute into the out-of-line buffer
- * that we have defined for it.
+ * Find a hole for the attr and store it in the delayed attr context. This
+ * initializes the context to roll through allocating an attr extent for a
+ * delayed attr operation
*/
int
-xfs_attr_rmtval_set(
- struct xfs_da_args *args)
+xfs_attr_rmtval_find_space(
+ struct xfs_delattr_context *dac)
{
- struct xfs_inode *dp = args->dp;
- struct xfs_bmbt_irec map;
- xfs_dablk_t lblkno;
- int blkcnt;
- int nmap;
- int error;
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_bmbt_irec *map = &dac->map;
+ int error;
- trace_xfs_attr_rmtval_set(args);
+ dac->lblkno = 0;
+ dac->blkcnt = 0;
+ args->rmtblkcnt = 0;
+ args->rmtblkno = 0;
+ memset(map, 0, sizeof(struct xfs_bmbt_irec));
error = xfs_attr_rmt_find_hole(args);
if (error)
return error;
- blkcnt = args->rmtblkcnt;
- lblkno = (xfs_dablk_t)args->rmtblkno;
- /*
- * Roll through the "value", allocating blocks on disk as required.
- */
- while (blkcnt > 0) {
- /*
- * Allocate a single extent, up to the size of the value.
- *
- * Note that we have to consider this a data allocation as we
- * write the remote attribute without logging the contents.
- * Hence we must ensure that we aren't using blocks that are on
- * the busy list so that we don't overwrite blocks which have
- * recently been freed but their transactions are not yet
- * committed to disk. If we overwrite the contents of a busy
- * extent and then crash then the block may not contain the
- * correct metadata after log recovery occurs.
- */
- nmap = 1;
- error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
- blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map,
- &nmap);
- if (error)
- return error;
- error = xfs_defer_finish(&args->trans);
- if (error)
- return error;
+ dac->blkcnt = args->rmtblkcnt;
+ dac->lblkno = args->rmtblkno;
- ASSERT(nmap == 1);
- ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
- (map.br_startblock != HOLESTARTBLOCK));
- lblkno += map.br_blockcount;
- blkcnt -= map.br_blockcount;
+ return 0;
+}
- /*
- * Start the next trans in the chain.
- */
- error = xfs_trans_roll_inode(&args->trans, dp);
- if (error)
- return error;
- }
+/*
+ * Write one block of the value associated with an attribute into the
+ * out-of-line buffer that we have defined for it. This is similar to a subset
+ * of xfs_attr_rmtval_set, but records the current block to the delayed attr
+ * context, and leaves transaction handling to the caller.
+ */
+int
+xfs_attr_rmtval_set_blk(
+ struct xfs_delattr_context *dac)
+{
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_bmbt_irec *map = &dac->map;
+ int nmap;
+ int error;
+
+ nmap = 1;
+ error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)dac->lblkno,
+ dac->blkcnt, XFS_BMAPI_ATTRFORK, args->total,
+ map, &nmap);
+ if (error)
+ return error;
+
+ ASSERT(nmap == 1);
+ ASSERT((map->br_startblock != DELAYSTARTBLOCK) &&
+ (map->br_startblock != HOLESTARTBLOCK));
- return xfs_attr_rmtval_set_value(args);
+ /* roll attribute extent map forwards */
+ dac->lblkno += map->br_blockcount;
+ dac->blkcnt -= map->br_blockcount;
+
+ return 0;
}
/*
@@ -669,47 +666,17 @@ xfs_attr_rmtval_invalidate(
}
/*
- * Remove the value associated with an attribute by deleting the
- * out-of-line buffer that it is stored on.
- */
-int
-xfs_attr_rmtval_remove(
- struct xfs_da_args *args)
-{
- int error;
- int retval;
-
- trace_xfs_attr_rmtval_remove(args);
-
- /*
- * Keep de-allocating extents until the remote-value region is gone.
- */
- do {
- retval = __xfs_attr_rmtval_remove(args);
- if (retval && retval != -EAGAIN)
- return retval;
-
- /*
- * Close out trans and start the next one in the chain.
- */
- error = xfs_trans_roll_inode(&args->trans, args->dp);
- if (error)
- return error;
- } while (retval == -EAGAIN);
-
- return 0;
-}
-
-/*
* Remove the value associated with an attribute by deleting the out-of-line
- * buffer that it is stored on. Returns EAGAIN for the caller to refresh the
- * transaction and re-call the function
+ * buffer that it is stored on. Returns -EAGAIN for the caller to refresh the
+ * transaction and re-call the function. Callers should keep calling this
+ * routine until it returns something other than -EAGAIN.
*/
int
__xfs_attr_rmtval_remove(
- struct xfs_da_args *args)
+ struct xfs_delattr_context *dac)
{
- int error, done;
+ struct xfs_da_args *args = dac->da_args;
+ int error, done;
/*
* Unmap value blocks for this attr.
@@ -719,12 +686,20 @@ __xfs_attr_rmtval_remove(
if (error)
return error;
- error = xfs_defer_finish(&args->trans);
- if (error)
- return error;
-
- if (!done)
+ /*
+ * We don't need an explicit state here to pick up where we left off. We
+ * can figure it out using the !done return code. The actual value of
+ * attr->xattri_dela_state may be some value reminiscent of the calling
+ * function, but it's value is irrelevant with in the context of this
+ * function. Once we are done here, the next state is set as needed by
+ * the parent
+ */
+ if (!done) {
+ dac->flags |= XFS_DAC_DEFER_FINISH;
return -EAGAIN;
+ }
- return error;
+ args->rmtblkno = 0;
+ args->rmtblkcnt = 0;
+ return 0;
}
diff --git a/fs/xfs/libxfs/xfs_attr_remote.h b/fs/xfs/libxfs/xfs_attr_remote.h
index 9eee615da156..61b85b918db8 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.h
+++ b/fs/xfs/libxfs/xfs_attr_remote.h
@@ -9,10 +9,12 @@
int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
int xfs_attr_rmtval_get(struct xfs_da_args *args);
-int xfs_attr_rmtval_set(struct xfs_da_args *args);
-int xfs_attr_rmtval_remove(struct xfs_da_args *args);
int xfs_attr_rmtval_stale(struct xfs_inode *ip, struct xfs_bmbt_irec *map,
xfs_buf_flags_t incore_flags);
int xfs_attr_rmtval_invalidate(struct xfs_da_args *args);
-int __xfs_attr_rmtval_remove(struct xfs_da_args *args);
+int __xfs_attr_rmtval_remove(struct xfs_delattr_context *dac);
+int xfs_attr_rmt_find_hole(struct xfs_da_args *args);
+int xfs_attr_rmtval_set_value(struct xfs_da_args *args);
+int xfs_attr_rmtval_set_blk(struct xfs_delattr_context *dac);
+int xfs_attr_rmtval_find_space(struct xfs_delattr_context *dac);
#endif /* __XFS_ATTR_REMOTE_H__ */
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index a3e0e6f672d6..948092babb6a 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -31,6 +31,7 @@
#include "xfs_attr_leaf.h"
#include "xfs_filestream.h"
#include "xfs_rmap.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_refcount.h"
#include "xfs_icache.h"
@@ -1028,7 +1029,7 @@ xfs_bmap_add_attrfork_local(
/*
* Set an inode attr fork offset based on the format of the data fork.
*/
-int
+static int
xfs_bmap_set_attrforkoff(
struct xfs_inode *ip,
int size,
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index f9a390ecfb1d..67641f669918 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -187,7 +187,6 @@ void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
xfs_filblks_t len);
unsigned int xfs_bmap_compute_attr_offset(struct xfs_mount *mp);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
-int xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
void xfs_bmap_local_to_extents_empty(struct xfs_trans *tp,
struct xfs_inode *ip, int whichfork);
void __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 5b6fcb9b44e2..be74a6b53689 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -21,6 +21,7 @@
#include "xfs_alloc.h"
#include "xfs_log.h"
#include "xfs_btree_staging.h"
+#include "xfs_ag.h"
/*
* Cursor allocation zone.
@@ -215,7 +216,7 @@ xfs_btree_check_sptr(
{
if (level <= 0)
return false;
- return xfs_verify_agbno(cur->bc_mp, cur->bc_ag.agno, agbno);
+ return xfs_verify_agbno(cur->bc_mp, cur->bc_ag.pag->pag_agno, agbno);
}
/*
@@ -244,7 +245,7 @@ xfs_btree_check_ptr(
return 0;
xfs_err(cur->bc_mp,
"AG %u: Corrupt btree %d pointer at level %d index %d.",
- cur->bc_ag.agno, cur->bc_btnum,
+ cur->bc_ag.pag->pag_agno, cur->bc_btnum,
level, index);
}
@@ -376,6 +377,8 @@ xfs_btree_del_cursor(
XFS_FORCED_SHUTDOWN(cur->bc_mp));
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING))
kmem_free(cur->bc_ops);
+ if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS) && cur->bc_ag.pag)
+ xfs_perag_put(cur->bc_ag.pag);
kmem_cache_free(xfs_btree_cur_zone, cur);
}
@@ -885,13 +888,13 @@ xfs_btree_readahead_sblock(
if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
- xfs_btree_reada_bufs(cur->bc_mp, cur->bc_ag.agno,
+ xfs_btree_reada_bufs(cur->bc_mp, cur->bc_ag.pag->pag_agno,
left, 1, cur->bc_ops->buf_ops);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
- xfs_btree_reada_bufs(cur->bc_mp, cur->bc_ag.agno,
+ xfs_btree_reada_bufs(cur->bc_mp, cur->bc_ag.pag->pag_agno,
right, 1, cur->bc_ops->buf_ops);
rval++;
}
@@ -949,7 +952,7 @@ xfs_btree_ptr_to_daddr(
*daddr = XFS_FSB_TO_DADDR(cur->bc_mp, fsbno);
} else {
agbno = be32_to_cpu(ptr->s);
- *daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_ag.agno,
+ *daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_ag.pag->pag_agno,
agbno);
}
@@ -1150,7 +1153,7 @@ xfs_btree_init_block_cur(
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
owner = cur->bc_ino.ip->i_ino;
else
- owner = cur->bc_ag.agno;
+ owner = cur->bc_ag.pag->pag_agno;
xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
cur->bc_btnum, level, numrecs,
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 10e50cbacacf..4dbdc659c396 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -11,6 +11,7 @@ struct xfs_inode;
struct xfs_mount;
struct xfs_trans;
struct xfs_ifork;
+struct xfs_perag;
extern kmem_zone_t *xfs_btree_cur_zone;
@@ -180,11 +181,11 @@ union xfs_btree_irec {
/* Per-AG btree information. */
struct xfs_btree_cur_ag {
+ struct xfs_perag *pag;
union {
struct xfs_buf *agbp;
struct xbtree_afakeroot *afake; /* for staging cursor */
};
- xfs_agnumber_t agno;
union {
struct {
unsigned long nr_ops; /* # record updates */
@@ -231,6 +232,13 @@ typedef struct xfs_btree_cur
uint8_t bc_blocklog; /* log2(blocksize) of btree blocks */
xfs_btnum_t bc_btnum; /* identifies which btree type */
int bc_statoff; /* offset of btre stats array */
+
+ /*
+ * Short btree pointers need an agno to be able to turn the pointers
+ * into physical addresses for IO, so the btree cursor switches between
+ * bc_ino and bc_ag based on whether XFS_BTREE_LONG_PTRS is set for the
+ * cursor.
+ */
union {
struct xfs_btree_cur_ag bc_ag;
struct xfs_btree_cur_ino bc_ino;
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index eefdb518fe64..57d9cb632983 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -10,7 +10,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
@@ -27,6 +26,7 @@
#include "xfs_trace.h"
#include "xfs_log.h"
#include "xfs_rmap.h"
+#include "xfs_ag.h"
/*
* Lookup a record by ino in the btree given by cur.
@@ -105,7 +105,7 @@ xfs_inobt_get_rec(
int *stat)
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_ag.agno;
+ xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
union xfs_btree_rec *rec;
int error;
uint64_t realfree;
@@ -172,18 +172,17 @@ xfs_inobt_insert(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
+ struct xfs_perag *pag,
xfs_agino_t newino,
xfs_agino_t newlen,
xfs_btnum_t btnum)
{
struct xfs_btree_cur *cur;
- struct xfs_agi *agi = agbp->b_addr;
- xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
xfs_agino_t thisino;
int i;
int error;
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, btnum);
for (thisino = newino;
thisino < newino + newlen;
@@ -215,10 +214,9 @@ xfs_inobt_insert(
* Verify that the number of free inodes in the AGI is correct.
*/
#ifdef DEBUG
-STATIC int
+static int
xfs_check_agi_freecount(
- struct xfs_btree_cur *cur,
- struct xfs_agi *agi)
+ struct xfs_btree_cur *cur)
{
if (cur->bc_nlevels == 1) {
xfs_inobt_rec_incore_t rec;
@@ -244,12 +242,12 @@ xfs_check_agi_freecount(
} while (i == 1);
if (!XFS_FORCED_SHUTDOWN(cur->bc_mp))
- ASSERT(freecount == be32_to_cpu(agi->agi_freecount));
+ ASSERT(freecount == cur->bc_ag.pag->pagi_freecount);
}
return 0;
}
#else
-#define xfs_check_agi_freecount(cur, agi) 0
+#define xfs_check_agi_freecount(cur) 0
#endif
/*
@@ -520,18 +518,17 @@ xfs_inobt_insert_sprec(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
+ struct xfs_perag *pag,
int btnum,
struct xfs_inobt_rec_incore *nrec, /* in/out: new/merged rec. */
bool merge) /* merge or replace */
{
struct xfs_btree_cur *cur;
- struct xfs_agi *agi = agbp->b_addr;
- xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
int error;
int i;
struct xfs_inobt_rec_incore rec;
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, btnum);
/* the new record is pre-aligned so we know where to look */
error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
@@ -578,14 +575,14 @@ xfs_inobt_insert_sprec(
goto error;
}
- trace_xfs_irec_merge_pre(mp, agno, rec.ir_startino,
+ trace_xfs_irec_merge_pre(mp, pag->pag_agno, rec.ir_startino,
rec.ir_holemask, nrec->ir_startino,
nrec->ir_holemask);
/* merge to nrec to output the updated record */
__xfs_inobt_rec_merge(nrec, &rec);
- trace_xfs_irec_merge_post(mp, agno, nrec->ir_startino,
+ trace_xfs_irec_merge_post(mp, pag->pag_agno, nrec->ir_startino,
nrec->ir_holemask);
error = xfs_inobt_rec_check_count(mp, nrec);
@@ -606,28 +603,28 @@ error:
}
/*
- * Allocate new inodes in the allocation group specified by agbp.
- * Returns 0 if inodes were allocated in this AG; 1 if there was no space
- * in this AG; or the usual negative error code.
+ * Allocate new inodes in the allocation group specified by agbp. Returns 0 if
+ * inodes were allocated in this AG; -EAGAIN if there was no space in this AG so
+ * the caller knows it can try another AG, a hard -ENOSPC when over the maximum
+ * inode count threshold, or the usual negative error code for other errors.
*/
STATIC int
xfs_ialloc_ag_alloc(
struct xfs_trans *tp,
- struct xfs_buf *agbp)
+ struct xfs_buf *agbp,
+ struct xfs_perag *pag)
{
struct xfs_agi *agi;
struct xfs_alloc_arg args;
- xfs_agnumber_t agno;
int error;
xfs_agino_t newino; /* new first inode's number */
xfs_agino_t newlen; /* new number of inodes */
int isaligned = 0; /* inode allocation at stripe */
/* unit boundary */
/* init. to full chunk */
- uint16_t allocmask = (uint16_t) -1;
struct xfs_inobt_rec_incore rec;
- struct xfs_perag *pag;
struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp);
+ uint16_t allocmask = (uint16_t) -1;
int do_sparse = 0;
memset(&args, 0, sizeof(args));
@@ -660,14 +657,13 @@ xfs_ialloc_ag_alloc(
*/
agi = agbp->b_addr;
newino = be32_to_cpu(agi->agi_newino);
- agno = be32_to_cpu(agi->agi_seqno);
args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
igeo->ialloc_blks;
if (do_sparse)
goto sparse_alloc;
if (likely(newino != NULLAGINO &&
(args.agbno < be32_to_cpu(agi->agi_length)))) {
- args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
args.type = XFS_ALLOCTYPE_THIS_BNO;
args.prod = 1;
@@ -727,7 +723,7 @@ xfs_ialloc_ag_alloc(
* For now, just allocate blocks up front.
*/
args.agbno = be32_to_cpu(agi->agi_root);
- args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
/*
* Allocate a fixed-size extent of inodes.
*/
@@ -748,7 +744,7 @@ xfs_ialloc_ag_alloc(
if (isaligned && args.fsbno == NULLFSBLOCK) {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.agbno = be32_to_cpu(agi->agi_root);
- args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
args.alignment = igeo->cluster_align;
if ((error = xfs_alloc_vextent(&args)))
return error;
@@ -764,7 +760,7 @@ xfs_ialloc_ag_alloc(
sparse_alloc:
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.agbno = be32_to_cpu(agi->agi_root);
- args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
args.alignment = args.mp->m_sb.sb_spino_align;
args.prod = 1;
@@ -796,7 +792,7 @@ sparse_alloc:
}
if (args.fsbno == NULLFSBLOCK)
- return 1;
+ return -EAGAIN;
ASSERT(args.len == args.minlen);
@@ -809,7 +805,7 @@ sparse_alloc:
* rather than a linear progression to prevent the next generation
* number from being easily guessable.
*/
- error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, agno,
+ error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno,
args.agbno, args.len, prandom_u32());
if (error)
@@ -836,12 +832,12 @@ sparse_alloc:
* if necessary. If a merge does occur, rec is updated to the
* merged record.
*/
- error = xfs_inobt_insert_sprec(args.mp, tp, agbp, XFS_BTNUM_INO,
- &rec, true);
+ error = xfs_inobt_insert_sprec(args.mp, tp, agbp, pag,
+ XFS_BTNUM_INO, &rec, true);
if (error == -EFSCORRUPTED) {
xfs_alert(args.mp,
"invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
- XFS_AGINO_TO_INO(args.mp, agno,
+ XFS_AGINO_TO_INO(args.mp, pag->pag_agno,
rec.ir_startino),
rec.ir_holemask, rec.ir_count);
xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE);
@@ -861,21 +857,20 @@ sparse_alloc:
* existing record with this one.
*/
if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
- error = xfs_inobt_insert_sprec(args.mp, tp, agbp,
- XFS_BTNUM_FINO, &rec,
- false);
+ error = xfs_inobt_insert_sprec(args.mp, tp, agbp, pag,
+ XFS_BTNUM_FINO, &rec, false);
if (error)
return error;
}
} else {
/* full chunk - insert new records to both btrees */
- error = xfs_inobt_insert(args.mp, tp, agbp, newino, newlen,
+ error = xfs_inobt_insert(args.mp, tp, agbp, pag, newino, newlen,
XFS_BTNUM_INO);
if (error)
return error;
if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
- error = xfs_inobt_insert(args.mp, tp, agbp, newino,
+ error = xfs_inobt_insert(args.mp, tp, agbp, pag, newino,
newlen, XFS_BTNUM_FINO);
if (error)
return error;
@@ -887,7 +882,6 @@ sparse_alloc:
*/
be32_add_cpu(&agi->agi_count, newlen);
be32_add_cpu(&agi->agi_freecount, newlen);
- pag = agbp->b_pag;
pag->pagi_freecount += newlen;
pag->pagi_count += newlen;
agi->agi_newino = cpu_to_be32(newino);
@@ -905,139 +899,6 @@ sparse_alloc:
return 0;
}
-STATIC xfs_agnumber_t
-xfs_ialloc_next_ag(
- xfs_mount_t *mp)
-{
- xfs_agnumber_t agno;
-
- spin_lock(&mp->m_agirotor_lock);
- agno = mp->m_agirotor;
- if (++mp->m_agirotor >= mp->m_maxagi)
- mp->m_agirotor = 0;
- spin_unlock(&mp->m_agirotor_lock);
-
- return agno;
-}
-
-/*
- * Select an allocation group to look for a free inode in, based on the parent
- * inode and the mode. Return the allocation group buffer.
- */
-STATIC xfs_agnumber_t
-xfs_ialloc_ag_select(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_ino_t parent, /* parent directory inode number */
- umode_t mode) /* bits set to indicate file type */
-{
- xfs_agnumber_t agcount; /* number of ag's in the filesystem */
- xfs_agnumber_t agno; /* current ag number */
- int flags; /* alloc buffer locking flags */
- xfs_extlen_t ineed; /* blocks needed for inode allocation */
- xfs_extlen_t longest = 0; /* longest extent available */
- xfs_mount_t *mp; /* mount point structure */
- int needspace; /* file mode implies space allocated */
- xfs_perag_t *pag; /* per allocation group data */
- xfs_agnumber_t pagno; /* parent (starting) ag number */
- int error;
-
- /*
- * Files of these types need at least one block if length > 0
- * (and they won't fit in the inode, but that's hard to figure out).
- */
- needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
- mp = tp->t_mountp;
- agcount = mp->m_maxagi;
- if (S_ISDIR(mode))
- pagno = xfs_ialloc_next_ag(mp);
- else {
- pagno = XFS_INO_TO_AGNO(mp, parent);
- if (pagno >= agcount)
- pagno = 0;
- }
-
- ASSERT(pagno < agcount);
-
- /*
- * Loop through allocation groups, looking for one with a little
- * free space in it. Note we don't look for free inodes, exactly.
- * Instead, we include whether there is a need to allocate inodes
- * to mean that blocks must be allocated for them,
- * if none are currently free.
- */
- agno = pagno;
- flags = XFS_ALLOC_FLAG_TRYLOCK;
- for (;;) {
- pag = xfs_perag_get(mp, agno);
- if (!pag->pagi_inodeok) {
- xfs_ialloc_next_ag(mp);
- goto nextag;
- }
-
- if (!pag->pagi_init) {
- error = xfs_ialloc_pagi_init(mp, tp, agno);
- if (error)
- goto nextag;
- }
-
- if (pag->pagi_freecount) {
- xfs_perag_put(pag);
- return agno;
- }
-
- if (!pag->pagf_init) {
- error = xfs_alloc_pagf_init(mp, tp, agno, flags);
- if (error)
- goto nextag;
- }
-
- /*
- * Check that there is enough free space for the file plus a
- * chunk of inodes if we need to allocate some. If this is the
- * first pass across the AGs, take into account the potential
- * space needed for alignment of inode chunks when checking the
- * longest contiguous free space in the AG - this prevents us
- * from getting ENOSPC because we have free space larger than
- * ialloc_blks but alignment constraints prevent us from using
- * it.
- *
- * If we can't find an AG with space for full alignment slack to
- * be taken into account, we must be near ENOSPC in all AGs.
- * Hence we don't include alignment for the second pass and so
- * if we fail allocation due to alignment issues then it is most
- * likely a real ENOSPC condition.
- */
- ineed = M_IGEO(mp)->ialloc_min_blks;
- if (flags && ineed > 1)
- ineed += M_IGEO(mp)->cluster_align;
- longest = pag->pagf_longest;
- if (!longest)
- longest = pag->pagf_flcount > 0;
-
- if (pag->pagf_freeblks >= needspace + ineed &&
- longest >= ineed) {
- xfs_perag_put(pag);
- return agno;
- }
-nextag:
- xfs_perag_put(pag);
- /*
- * No point in iterating over the rest, if we're shutting
- * down.
- */
- if (XFS_FORCED_SHUTDOWN(mp))
- return NULLAGNUMBER;
- agno++;
- if (agno >= agcount)
- agno = 0;
- if (agno == pagno) {
- if (flags == 0)
- return NULLAGNUMBER;
- flags = 0;
- }
- }
-}
-
/*
* Try to retrieve the next record to the left/right from the current one.
*/
@@ -1123,15 +984,14 @@ STATIC int
xfs_dialloc_ag_inobt(
struct xfs_trans *tp,
struct xfs_buf *agbp,
+ struct xfs_perag *pag,
xfs_ino_t parent,
xfs_ino_t *inop)
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_agi *agi = agbp->b_addr;
- xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
- struct xfs_perag *pag = agbp->b_pag;
struct xfs_btree_cur *cur, *tcur;
struct xfs_inobt_rec_incore rec, trec;
xfs_ino_t ino;
@@ -1145,7 +1005,7 @@ xfs_dialloc_ag_inobt(
ASSERT(pag->pagi_freecount > 0);
restart_pagno:
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
/*
* If pagino is 0 (this is the root inode allocation) use newino.
* This must work because we've just allocated some.
@@ -1153,14 +1013,14 @@ xfs_dialloc_ag_inobt(
if (!pagino)
pagino = be32_to_cpu(agi->agi_newino);
- error = xfs_check_agi_freecount(cur, agi);
+ error = xfs_check_agi_freecount(cur);
if (error)
goto error0;
/*
* If in the same AG as the parent, try to get near the parent.
*/
- if (pagno == agno) {
+ if (pagno == pag->pag_agno) {
int doneleft; /* done, to the left */
int doneright; /* done, to the right */
@@ -1363,7 +1223,7 @@ alloc_inode:
ASSERT(offset < XFS_INODES_PER_CHUNK);
ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
XFS_INODES_PER_CHUNK) == 0);
- ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
+ ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
rec.ir_free &= ~XFS_INOBT_MASK(offset);
rec.ir_freecount--;
error = xfs_inobt_update(cur, &rec);
@@ -1373,7 +1233,7 @@ alloc_inode:
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
pag->pagi_freecount--;
- error = xfs_check_agi_freecount(cur, agi);
+ error = xfs_check_agi_freecount(cur);
if (error)
goto error0;
@@ -1568,16 +1428,16 @@ xfs_dialloc_ag_update_inobt(
* The caller selected an AG for us, and made sure that free inodes are
* available.
*/
-int
+static int
xfs_dialloc_ag(
struct xfs_trans *tp,
struct xfs_buf *agbp,
+ struct xfs_perag *pag,
xfs_ino_t parent,
xfs_ino_t *inop)
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_agi *agi = agbp->b_addr;
- xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
struct xfs_btree_cur *cur; /* finobt cursor */
@@ -1589,7 +1449,7 @@ xfs_dialloc_ag(
int i;
if (!xfs_sb_version_hasfinobt(&mp->m_sb))
- return xfs_dialloc_ag_inobt(tp, agbp, parent, inop);
+ return xfs_dialloc_ag_inobt(tp, agbp, pag, parent, inop);
/*
* If pagino is 0 (this is the root inode allocation) use newino.
@@ -1598,9 +1458,9 @@ xfs_dialloc_ag(
if (!pagino)
pagino = be32_to_cpu(agi->agi_newino);
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_FINO);
- error = xfs_check_agi_freecount(cur, agi);
+ error = xfs_check_agi_freecount(cur);
if (error)
goto error_cur;
@@ -1609,7 +1469,7 @@ xfs_dialloc_ag(
* parent. If so, find the closest available inode to the parent. If
* not, consider the agi hint or find the first free inode in the AG.
*/
- if (agno == pagno)
+ if (pag->pag_agno == pagno)
error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
else
error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
@@ -1621,7 +1481,7 @@ xfs_dialloc_ag(
ASSERT(offset < XFS_INODES_PER_CHUNK);
ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
XFS_INODES_PER_CHUNK) == 0);
- ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
+ ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
/*
* Modify or remove the finobt record.
@@ -1641,9 +1501,9 @@ xfs_dialloc_ag(
* the original freecount. If all is well, make the equivalent update to
* the inobt using the finobt record and offset information.
*/
- icur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
+ icur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
- error = xfs_check_agi_freecount(icur, agi);
+ error = xfs_check_agi_freecount(icur);
if (error)
goto error_icur;
@@ -1657,14 +1517,14 @@ xfs_dialloc_ag(
*/
be32_add_cpu(&agi->agi_freecount, -1);
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
- agbp->b_pag->pagi_freecount--;
+ pag->pagi_freecount--;
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
- error = xfs_check_agi_freecount(icur, agi);
+ error = xfs_check_agi_freecount(icur);
if (error)
goto error_icur;
- error = xfs_check_agi_freecount(cur, agi);
+ error = xfs_check_agi_freecount(cur);
if (error)
goto error_icur;
@@ -1708,54 +1568,195 @@ xfs_dialloc_roll(
/* Re-attach the quota info that we detached from prev trx. */
tp->t_dqinfo = dqinfo;
+ /*
+ * Join the buffer even on commit error so that the buffer is released
+ * when the caller cancels the transaction and doesn't have to handle
+ * this error case specially.
+ */
+ xfs_trans_bjoin(tp, agibp);
*tpp = tp;
+ return error;
+}
+
+static xfs_agnumber_t
+xfs_ialloc_next_ag(
+ xfs_mount_t *mp)
+{
+ xfs_agnumber_t agno;
+
+ spin_lock(&mp->m_agirotor_lock);
+ agno = mp->m_agirotor;
+ if (++mp->m_agirotor >= mp->m_maxagi)
+ mp->m_agirotor = 0;
+ spin_unlock(&mp->m_agirotor_lock);
+
+ return agno;
+}
+
+static bool
+xfs_dialloc_good_ag(
+ struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ umode_t mode,
+ int flags,
+ bool ok_alloc)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_extlen_t ineed;
+ xfs_extlen_t longest = 0;
+ int needspace;
+ int error;
+
+ if (!pag->pagi_inodeok)
+ return false;
+
+ if (!pag->pagi_init) {
+ error = xfs_ialloc_pagi_init(mp, tp, pag->pag_agno);
+ if (error)
+ return false;
+ }
+
+ if (pag->pagi_freecount)
+ return true;
+ if (!ok_alloc)
+ return false;
+
+ if (!pag->pagf_init) {
+ error = xfs_alloc_pagf_init(mp, tp, pag->pag_agno, flags);
+ if (error)
+ return false;
+ }
+
+ /*
+ * Check that there is enough free space for the file plus a chunk of
+ * inodes if we need to allocate some. If this is the first pass across
+ * the AGs, take into account the potential space needed for alignment
+ * of inode chunks when checking the longest contiguous free space in
+ * the AG - this prevents us from getting ENOSPC because we have free
+ * space larger than ialloc_blks but alignment constraints prevent us
+ * from using it.
+ *
+ * If we can't find an AG with space for full alignment slack to be
+ * taken into account, we must be near ENOSPC in all AGs. Hence we
+ * don't include alignment for the second pass and so if we fail
+ * allocation due to alignment issues then it is most likely a real
+ * ENOSPC condition.
+ *
+ * XXX(dgc): this calculation is now bogus thanks to the per-ag
+ * reservations that xfs_alloc_fix_freelist() now does via
+ * xfs_alloc_space_available(). When the AG fills up, pagf_freeblks will
+ * be more than large enough for the check below to succeed, but
+ * xfs_alloc_space_available() will fail because of the non-zero
+ * metadata reservation and hence we won't actually be able to allocate
+ * more inodes in this AG. We do soooo much unnecessary work near ENOSPC
+ * because of this.
+ */
+ ineed = M_IGEO(mp)->ialloc_min_blks;
+ if (flags && ineed > 1)
+ ineed += M_IGEO(mp)->cluster_align;
+ longest = pag->pagf_longest;
+ if (!longest)
+ longest = pag->pagf_flcount > 0;
+ needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
+
+ if (pag->pagf_freeblks < needspace + ineed || longest < ineed)
+ return false;
+ return true;
+}
+
+static int
+xfs_dialloc_try_ag(
+ struct xfs_trans **tpp,
+ struct xfs_perag *pag,
+ xfs_ino_t parent,
+ xfs_ino_t *new_ino,
+ bool ok_alloc)
+{
+ struct xfs_buf *agbp;
+ xfs_ino_t ino;
+ int error;
+
+ /*
+ * Then read in the AGI buffer and recheck with the AGI buffer
+ * lock held.
+ */
+ error = xfs_ialloc_read_agi(pag->pag_mount, *tpp, pag->pag_agno, &agbp);
if (error)
return error;
- xfs_trans_bjoin(tp, agibp);
- return 0;
+
+ if (!pag->pagi_freecount) {
+ if (!ok_alloc) {
+ error = -EAGAIN;
+ goto out_release;
+ }
+
+ error = xfs_ialloc_ag_alloc(*tpp, agbp, pag);
+ if (error < 0)
+ goto out_release;
+
+ /*
+ * We successfully allocated space for an inode cluster in this
+ * AG. Roll the transaction so that we can allocate one of the
+ * new inodes.
+ */
+ ASSERT(pag->pagi_freecount > 0);
+ error = xfs_dialloc_roll(tpp, agbp);
+ if (error)
+ goto out_release;
+ }
+
+ /* Allocate an inode in the found AG */
+ error = xfs_dialloc_ag(*tpp, agbp, pag, parent, &ino);
+ if (!error)
+ *new_ino = ino;
+ return error;
+
+out_release:
+ xfs_trans_brelse(*tpp, agbp);
+ return error;
}
/*
- * Select and prepare an AG for inode allocation.
+ * Allocate an on-disk inode.
*
* Mode is used to tell whether the new inode is a directory and hence where to
- * locate it.
- *
- * This function will ensure that the selected AG has free inodes available to
- * allocate from. The selected AGI will be returned locked to the caller, and it
- * will allocate more free inodes if required. If no free inodes are found or
- * can be allocated, no AGI will be returned.
+ * locate it. The on-disk inode that is allocated will be returned in @new_ino
+ * on success, otherwise an error will be set to indicate the failure (e.g.
+ * -ENOSPC).
*/
int
-xfs_dialloc_select_ag(
+xfs_dialloc(
struct xfs_trans **tpp,
xfs_ino_t parent,
umode_t mode,
- struct xfs_buf **IO_agbp)
+ xfs_ino_t *new_ino)
{
struct xfs_mount *mp = (*tpp)->t_mountp;
- struct xfs_buf *agbp;
xfs_agnumber_t agno;
- int error;
- bool noroom = false;
+ int error = 0;
xfs_agnumber_t start_agno;
struct xfs_perag *pag;
struct xfs_ino_geometry *igeo = M_IGEO(mp);
- bool okalloc = true;
-
- *IO_agbp = NULL;
+ bool ok_alloc = true;
+ int flags;
+ xfs_ino_t ino;
/*
- * We do not have an agbp, so select an initial allocation
- * group for inode allocation.
+ * Directories, symlinks, and regular files frequently allocate at least
+ * one block, so factor that potential expansion when we examine whether
+ * an AG has enough space for file creation.
*/
- start_agno = xfs_ialloc_ag_select(*tpp, parent, mode);
- if (start_agno == NULLAGNUMBER)
- return 0;
+ if (S_ISDIR(mode))
+ start_agno = xfs_ialloc_next_ag(mp);
+ else {
+ start_agno = XFS_INO_TO_AGNO(mp, parent);
+ if (start_agno >= mp->m_maxagi)
+ start_agno = 0;
+ }
/*
* If we have already hit the ceiling of inode blocks then clear
- * okalloc so we scan all available agi structures for a free
+ * ok_alloc so we scan all available agi structures for a free
* inode.
*
* Read rough value of mp->m_icount by percpu_counter_read_positive,
@@ -1764,8 +1765,7 @@ xfs_dialloc_select_ag(
if (igeo->maxicount &&
percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos
> igeo->maxicount) {
- noroom = true;
- okalloc = false;
+ ok_alloc = false;
}
/*
@@ -1774,82 +1774,36 @@ xfs_dialloc_select_ag(
* allocation groups upward, wrapping at the end.
*/
agno = start_agno;
+ flags = XFS_ALLOC_FLAG_TRYLOCK;
for (;;) {
pag = xfs_perag_get(mp, agno);
- if (!pag->pagi_inodeok) {
- xfs_ialloc_next_ag(mp);
- goto nextag;
- }
-
- if (!pag->pagi_init) {
- error = xfs_ialloc_pagi_init(mp, *tpp, agno);
- if (error)
+ if (xfs_dialloc_good_ag(*tpp, pag, mode, flags, ok_alloc)) {
+ error = xfs_dialloc_try_ag(tpp, pag, parent,
+ &ino, ok_alloc);
+ if (error != -EAGAIN)
break;
}
- /*
- * Do a first racy fast path check if this AG is usable.
- */
- if (!pag->pagi_freecount && !okalloc)
- goto nextag;
-
- /*
- * Then read in the AGI buffer and recheck with the AGI buffer
- * lock held.
- */
- error = xfs_ialloc_read_agi(mp, *tpp, agno, &agbp);
- if (error)
- break;
-
- if (pag->pagi_freecount) {
- xfs_perag_put(pag);
- goto found_ag;
- }
-
- if (!okalloc)
- goto nextag_relse_buffer;
-
- error = xfs_ialloc_ag_alloc(*tpp, agbp);
- if (error < 0) {
- xfs_trans_brelse(*tpp, agbp);
-
- if (error == -ENOSPC)
- error = 0;
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ error = -EFSCORRUPTED;
break;
}
-
- if (error == 0) {
- /*
- * We successfully allocated space for an inode cluster
- * in this AG. Roll the transaction so that we can
- * allocate one of the new inodes.
- */
- ASSERT(pag->pagi_freecount > 0);
- xfs_perag_put(pag);
-
- error = xfs_dialloc_roll(tpp, agbp);
- if (error) {
- xfs_buf_relse(agbp);
- return error;
+ if (++agno == mp->m_maxagi)
+ agno = 0;
+ if (agno == start_agno) {
+ if (!flags) {
+ error = -ENOSPC;
+ break;
}
- goto found_ag;
+ flags = 0;
}
-
-nextag_relse_buffer:
- xfs_trans_brelse(*tpp, agbp);
-nextag:
xfs_perag_put(pag);
- if (++agno == mp->m_sb.sb_agcount)
- agno = 0;
- if (agno == start_agno)
- return noroom ? -ENOSPC : 0;
}
+ if (!error)
+ *new_ino = ino;
xfs_perag_put(pag);
return error;
-found_ag:
- *IO_agbp = agbp;
- return 0;
}
/*
@@ -1935,12 +1889,12 @@ xfs_difree_inobt(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
+ struct xfs_perag *pag,
xfs_agino_t agino,
struct xfs_icluster *xic,
struct xfs_inobt_rec_incore *orec)
{
struct xfs_agi *agi = agbp->b_addr;
- xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
struct xfs_btree_cur *cur;
struct xfs_inobt_rec_incore rec;
int ilen;
@@ -1954,9 +1908,9 @@ xfs_difree_inobt(
/*
* Initialize the cursor.
*/
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
- error = xfs_check_agi_freecount(cur, agi);
+ error = xfs_check_agi_freecount(cur);
if (error)
goto error0;
@@ -2005,7 +1959,8 @@ xfs_difree_inobt(
struct xfs_perag *pag = agbp->b_pag;
xic->deleted = true;
- xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
+ xic->first_ino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
+ rec.ir_startino);
xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
/*
@@ -2028,7 +1983,7 @@ xfs_difree_inobt(
goto error0;
}
- xfs_difree_inode_chunk(tp, agno, &rec);
+ xfs_difree_inode_chunk(tp, pag->pag_agno, &rec);
} else {
xic->deleted = false;
@@ -2044,11 +1999,11 @@ xfs_difree_inobt(
*/
be32_add_cpu(&agi->agi_freecount, 1);
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
- agbp->b_pag->pagi_freecount++;
+ pag->pagi_freecount++;
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
}
- error = xfs_check_agi_freecount(cur, agi);
+ error = xfs_check_agi_freecount(cur);
if (error)
goto error0;
@@ -2069,18 +2024,17 @@ xfs_difree_finobt(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
+ struct xfs_perag *pag,
xfs_agino_t agino,
struct xfs_inobt_rec_incore *ibtrec) /* inobt record */
{
- struct xfs_agi *agi = agbp->b_addr;
- xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
struct xfs_btree_cur *cur;
struct xfs_inobt_rec_incore rec;
int offset = agino - ibtrec->ir_startino;
int error;
int i;
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_FINO);
error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i);
if (error)
@@ -2158,7 +2112,7 @@ xfs_difree_finobt(
}
out:
- error = xfs_check_agi_freecount(cur, agi);
+ error = xfs_check_agi_freecount(cur);
if (error)
goto error;
@@ -2178,36 +2132,33 @@ error:
*/
int
xfs_difree(
- struct xfs_trans *tp, /* transaction pointer */
- xfs_ino_t inode, /* inode to be freed */
- struct xfs_icluster *xic) /* cluster info if deleted */
+ struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ xfs_ino_t inode,
+ struct xfs_icluster *xic)
{
/* REFERENCED */
xfs_agblock_t agbno; /* block number containing inode */
struct xfs_buf *agbp; /* buffer for allocation group header */
xfs_agino_t agino; /* allocation group inode number */
- xfs_agnumber_t agno; /* allocation group number */
int error; /* error return value */
- struct xfs_mount *mp; /* mount structure for filesystem */
+ struct xfs_mount *mp = tp->t_mountp;
struct xfs_inobt_rec_incore rec;/* btree record */
- mp = tp->t_mountp;
-
/*
* Break up inode number into its components.
*/
- agno = XFS_INO_TO_AGNO(mp, inode);
- if (agno >= mp->m_sb.sb_agcount) {
- xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).",
- __func__, agno, mp->m_sb.sb_agcount);
+ if (pag->pag_agno != XFS_INO_TO_AGNO(mp, inode)) {
+ xfs_warn(mp, "%s: agno != pag->pag_agno (%d != %d).",
+ __func__, XFS_INO_TO_AGNO(mp, inode), pag->pag_agno);
ASSERT(0);
return -EINVAL;
}
agino = XFS_INO_TO_AGINO(mp, inode);
- if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) {
+ if (inode != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
__func__, (unsigned long long)inode,
- (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino));
+ (unsigned long long)XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
ASSERT(0);
return -EINVAL;
}
@@ -2221,7 +2172,7 @@ xfs_difree(
/*
* Get the allocation group header.
*/
- error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
+ error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp);
if (error) {
xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
__func__, error);
@@ -2231,7 +2182,7 @@ xfs_difree(
/*
* Fix up the inode allocation btree.
*/
- error = xfs_difree_inobt(mp, tp, agbp, agino, xic, &rec);
+ error = xfs_difree_inobt(mp, tp, agbp, pag, agino, xic, &rec);
if (error)
goto error0;
@@ -2239,7 +2190,7 @@ xfs_difree(
* Fix up the free inode btree.
*/
if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
- error = xfs_difree_finobt(mp, tp, agbp, agino, &rec);
+ error = xfs_difree_finobt(mp, tp, agbp, pag, agino, &rec);
if (error)
goto error0;
}
@@ -2254,7 +2205,7 @@ STATIC int
xfs_imap_lookup(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agino_t agino,
xfs_agblock_t agbno,
xfs_agblock_t *chunk_agbno,
@@ -2267,11 +2218,11 @@ xfs_imap_lookup(
int error;
int i;
- error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
+ error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp);
if (error) {
xfs_alert(mp,
"%s: xfs_ialloc_read_agi() returned error %d, agno %d",
- __func__, error, agno);
+ __func__, error, pag->pag_agno);
return error;
}
@@ -2281,7 +2232,7 @@ xfs_imap_lookup(
* we have a record, we need to ensure it contains the inode number
* we are looking up.
*/
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
if (!error) {
if (i)
@@ -2315,42 +2266,44 @@ xfs_imap_lookup(
*/
int
xfs_imap(
- xfs_mount_t *mp, /* file system mount structure */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_ino_t ino, /* inode to locate */
- struct xfs_imap *imap, /* location map structure */
- uint flags) /* flags for inode btree lookup */
+ struct xfs_mount *mp, /* file system mount structure */
+ struct xfs_trans *tp, /* transaction pointer */
+ xfs_ino_t ino, /* inode to locate */
+ struct xfs_imap *imap, /* location map structure */
+ uint flags) /* flags for inode btree lookup */
{
- xfs_agblock_t agbno; /* block number of inode in the alloc group */
- xfs_agino_t agino; /* inode number within alloc group */
- xfs_agnumber_t agno; /* allocation group number */
- xfs_agblock_t chunk_agbno; /* first block in inode chunk */
- xfs_agblock_t cluster_agbno; /* first block in inode cluster */
- int error; /* error code */
- int offset; /* index of inode in its buffer */
- xfs_agblock_t offset_agbno; /* blks from chunk start to inode */
+ xfs_agblock_t agbno; /* block number of inode in the alloc group */
+ xfs_agino_t agino; /* inode number within alloc group */
+ xfs_agblock_t chunk_agbno; /* first block in inode chunk */
+ xfs_agblock_t cluster_agbno; /* first block in inode cluster */
+ int error; /* error code */
+ int offset; /* index of inode in its buffer */
+ xfs_agblock_t offset_agbno; /* blks from chunk start to inode */
+ struct xfs_perag *pag;
ASSERT(ino != NULLFSINO);
/*
* Split up the inode number into its parts.
*/
- agno = XFS_INO_TO_AGNO(mp, ino);
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
agino = XFS_INO_TO_AGINO(mp, ino);
agbno = XFS_AGINO_TO_AGBNO(mp, agino);
- if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
- ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
+ if (!pag || agbno >= mp->m_sb.sb_agblocks ||
+ ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
+ error = -EINVAL;
#ifdef DEBUG
/*
* Don't output diagnostic information for untrusted inodes
* as they can be invalid without implying corruption.
*/
if (flags & XFS_IGET_UNTRUSTED)
- return -EINVAL;
- if (agno >= mp->m_sb.sb_agcount) {
+ goto out_drop;
+ if (!pag) {
xfs_alert(mp,
"%s: agno (%d) >= mp->m_sb.sb_agcount (%d)",
- __func__, agno, mp->m_sb.sb_agcount);
+ __func__, XFS_INO_TO_AGNO(mp, ino),
+ mp->m_sb.sb_agcount);
}
if (agbno >= mp->m_sb.sb_agblocks) {
xfs_alert(mp,
@@ -2358,15 +2311,15 @@ xfs_imap(
__func__, (unsigned long long)agbno,
(unsigned long)mp->m_sb.sb_agblocks);
}
- if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
+ if (pag && ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
xfs_alert(mp,
"%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
__func__, ino,
- XFS_AGINO_TO_INO(mp, agno, agino));
+ XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
}
xfs_stack_trace();
#endif /* DEBUG */
- return -EINVAL;
+ goto out_drop;
}
/*
@@ -2377,10 +2330,10 @@ xfs_imap(
* in all cases where an untrusted inode number is passed.
*/
if (flags & XFS_IGET_UNTRUSTED) {
- error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
+ error = xfs_imap_lookup(mp, tp, pag, agino, agbno,
&chunk_agbno, &offset_agbno, flags);
if (error)
- return error;
+ goto out_drop;
goto out_map;
}
@@ -2392,11 +2345,12 @@ xfs_imap(
offset = XFS_INO_TO_OFFSET(mp, ino);
ASSERT(offset < mp->m_sb.sb_inopblock);
- imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
+ imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, agbno);
imap->im_len = XFS_FSB_TO_BB(mp, 1);
imap->im_boffset = (unsigned short)(offset <<
mp->m_sb.sb_inodelog);
- return 0;
+ error = 0;
+ goto out_drop;
}
/*
@@ -2408,10 +2362,10 @@ xfs_imap(
offset_agbno = agbno & M_IGEO(mp)->inoalign_mask;
chunk_agbno = agbno - offset_agbno;
} else {
- error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
+ error = xfs_imap_lookup(mp, tp, pag, agino, agbno,
&chunk_agbno, &offset_agbno, flags);
if (error)
- return error;
+ goto out_drop;
}
out_map:
@@ -2422,7 +2376,7 @@ out_map:
offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
XFS_INO_TO_OFFSET(mp, ino);
- imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
+ imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, cluster_agbno);
imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
@@ -2439,9 +2393,14 @@ out_map:
__func__, (unsigned long long) imap->im_blkno,
(unsigned long long) imap->im_len,
XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
- return -EINVAL;
+ error = -EINVAL;
+ goto out_drop;
}
- return 0;
+ error = 0;
+out_drop:
+ if (pag)
+ xfs_perag_put(pag);
+ return error;
}
/*
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index 3511086a7ae1..9df7c80408ff 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -33,42 +33,14 @@ xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o)
}
/*
- * Allocate an inode on disk.
- * Mode is used to tell whether the new inode will need space, and whether
- * it is a directory.
- *
- * There are two phases to inode allocation: selecting an AG and ensuring
- * that it contains free inodes, followed by allocating one of the free
- * inodes. xfs_dialloc_select_ag() does the former and returns a locked AGI
- * to the caller, ensuring that followup call to xfs_dialloc_ag() will
- * have free inodes to allocate from. xfs_dialloc_ag() will return the inode
- * number of the free inode we allocated.
+ * Allocate an inode on disk. Mode is used to tell whether the new inode will
+ * need space, and whether it is a directory.
*/
-int /* error */
-xfs_dialloc_select_ag(
- struct xfs_trans **tpp, /* double pointer of transaction */
- xfs_ino_t parent, /* parent inode (directory) */
- umode_t mode, /* mode bits for new inode */
- struct xfs_buf **IO_agbp);
-
-int
-xfs_dialloc_ag(
- struct xfs_trans *tp,
- struct xfs_buf *agbp,
- xfs_ino_t parent,
- xfs_ino_t *inop);
+int xfs_dialloc(struct xfs_trans **tpp, xfs_ino_t parent, umode_t mode,
+ xfs_ino_t *new_ino);
-/*
- * Free disk inode. Carefully avoids touching the incore inode, all
- * manipulations incore are the caller's responsibility.
- * The on-disk inode is not changed by this operation, only the
- * btree (free inode mask) is changed.
- */
-int /* error */
-xfs_difree(
- struct xfs_trans *tp, /* transaction pointer */
- xfs_ino_t inode, /* inode to be freed */
- struct xfs_icluster *ifree); /* cluster info if deleted */
+int xfs_difree(struct xfs_trans *tp, struct xfs_perag *pag,
+ xfs_ino_t ino, struct xfs_icluster *ifree);
/*
* Return the location of the inode in imap, for mapping it into a buffer.
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 4c5831646bd9..823a038939f8 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -20,6 +20,7 @@
#include "xfs_trace.h"
#include "xfs_trans.h"
#include "xfs_rmap.h"
+#include "xfs_ag.h"
STATIC int
xfs_inobt_get_minrecs(
@@ -34,8 +35,7 @@ xfs_inobt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_ag.agbp, cur->bc_ag.agno,
- cur->bc_btnum);
+ cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum);
}
STATIC void
@@ -102,7 +102,7 @@ __xfs_inobt_alloc_block(
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
args.oinfo = XFS_RMAP_OINFO_INOBT;
- args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_ag.agno, sbno);
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_ag.pag->pag_agno, sbno);
args.minlen = 1;
args.maxlen = 1;
args.prod = 1;
@@ -235,7 +235,7 @@ xfs_inobt_init_ptr_from_cur(
{
struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_ag.agno == be32_to_cpu(agi->agi_seqno));
+ ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno));
ptr->s = agi->agi_root;
}
@@ -247,7 +247,7 @@ xfs_finobt_init_ptr_from_cur(
{
struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_ag.agno == be32_to_cpu(agi->agi_seqno));
+ ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno));
ptr->s = agi->agi_free_root;
}
@@ -427,7 +427,7 @@ static struct xfs_btree_cur *
xfs_inobt_init_common(
struct xfs_mount *mp, /* file system mount point */
struct xfs_trans *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
+ struct xfs_perag *pag,
xfs_btnum_t btnum) /* ialloc or free ino btree */
{
struct xfs_btree_cur *cur;
@@ -449,7 +449,9 @@ xfs_inobt_init_common(
if (xfs_sb_version_hascrc(&mp->m_sb))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
- cur->bc_ag.agno = agno;
+ /* take a reference for the cursor */
+ atomic_inc(&pag->pag_ref);
+ cur->bc_ag.pag = pag;
return cur;
}
@@ -459,13 +461,13 @@ xfs_inobt_init_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_btnum_t btnum)
{
struct xfs_btree_cur *cur;
struct xfs_agi *agi = agbp->b_addr;
- cur = xfs_inobt_init_common(mp, tp, agno, btnum);
+ cur = xfs_inobt_init_common(mp, tp, pag, btnum);
if (btnum == XFS_BTNUM_INO)
cur->bc_nlevels = be32_to_cpu(agi->agi_level);
else
@@ -479,12 +481,12 @@ struct xfs_btree_cur *
xfs_inobt_stage_cursor(
struct xfs_mount *mp,
struct xbtree_afakeroot *afake,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_btnum_t btnum)
{
struct xfs_btree_cur *cur;
- cur = xfs_inobt_init_common(mp, NULL, agno, btnum);
+ cur = xfs_inobt_init_common(mp, NULL, pag, btnum);
xfs_btree_stage_afakeroot(cur, afake);
return cur;
}
@@ -656,7 +658,7 @@ int
xfs_inobt_cur(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_btnum_t which,
struct xfs_btree_cur **curpp,
struct xfs_buf **agi_bpp)
@@ -667,11 +669,11 @@ xfs_inobt_cur(
ASSERT(*agi_bpp == NULL);
ASSERT(*curpp == NULL);
- error = xfs_ialloc_read_agi(mp, tp, agno, agi_bpp);
+ error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, agi_bpp);
if (error)
return error;
- cur = xfs_inobt_init_cursor(mp, tp, *agi_bpp, agno, which);
+ cur = xfs_inobt_init_cursor(mp, tp, *agi_bpp, pag, which);
*curpp = cur;
return 0;
}
@@ -680,7 +682,7 @@ static int
xfs_inobt_count_blocks(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_btnum_t btnum,
xfs_extlen_t *tree_blocks)
{
@@ -688,7 +690,7 @@ xfs_inobt_count_blocks(
struct xfs_btree_cur *cur = NULL;
int error;
- error = xfs_inobt_cur(mp, tp, agno, btnum, &cur, &agbp);
+ error = xfs_inobt_cur(mp, tp, pag, btnum, &cur, &agbp);
if (error)
return error;
@@ -704,14 +706,14 @@ static int
xfs_finobt_read_blocks(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_extlen_t *tree_blocks)
{
struct xfs_buf *agbp;
struct xfs_agi *agi;
int error;
- error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
+ error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp);
if (error)
return error;
@@ -728,7 +730,7 @@ int
xfs_finobt_calc_reserves(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_extlen_t *ask,
xfs_extlen_t *used)
{
@@ -739,14 +741,14 @@ xfs_finobt_calc_reserves(
return 0;
if (xfs_sb_version_hasinobtcounts(&mp->m_sb))
- error = xfs_finobt_read_blocks(mp, tp, agno, &tree_len);
+ error = xfs_finobt_read_blocks(mp, tp, pag, &tree_len);
else
- error = xfs_inobt_count_blocks(mp, tp, agno, XFS_BTNUM_FINO,
+ error = xfs_inobt_count_blocks(mp, tp, pag, XFS_BTNUM_FINO,
&tree_len);
if (error)
return error;
- *ask += xfs_inobt_max_size(mp, agno);
+ *ask += xfs_inobt_max_size(mp, pag->pag_agno);
*used += tree_len;
return 0;
}
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h
index 35bbd978c272..e530c82b2217 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.h
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.h
@@ -13,6 +13,7 @@
struct xfs_buf;
struct xfs_btree_cur;
struct xfs_mount;
+struct xfs_perag;
/*
* Btree block header size depends on a superblock flag.
@@ -45,11 +46,11 @@ struct xfs_mount;
(maxrecs) * sizeof(xfs_inobt_key_t) + \
((index) - 1) * sizeof(xfs_inobt_ptr_t)))
-extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *,
- struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t,
- xfs_btnum_t);
+extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *mp,
+ struct xfs_trans *tp, struct xfs_buf *agbp,
+ struct xfs_perag *pag, xfs_btnum_t btnum);
struct xfs_btree_cur *xfs_inobt_stage_cursor(struct xfs_mount *mp,
- struct xbtree_afakeroot *afake, xfs_agnumber_t agno,
+ struct xbtree_afakeroot *afake, struct xfs_perag *pag,
xfs_btnum_t btnum);
extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int);
@@ -64,11 +65,11 @@ int xfs_inobt_rec_check_count(struct xfs_mount *,
#endif /* DEBUG */
int xfs_finobt_calc_reserves(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
+ struct xfs_perag *pag, xfs_extlen_t *ask, xfs_extlen_t *used);
extern xfs_extlen_t xfs_iallocbt_calc_size(struct xfs_mount *mp,
unsigned long long len);
int xfs_inobt_cur(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_agnumber_t agno, xfs_btnum_t btnum,
+ struct xfs_perag *pag, xfs_btnum_t btnum,
struct xfs_btree_cur **curpp, struct xfs_buf **agi_bpp);
void xfs_inobt_commit_staged_btree(struct xfs_btree_cur *cur,
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index f3254a4f4cb4..04ce361688f7 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -612,7 +612,7 @@ xfs_inode_validate_extsize(
*/
if (rt_flag)
- blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
+ blocksize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
else
blocksize_bytes = mp->m_sb.sb_blocksize;
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 3e15ea29fb8d..d548ea4b6aab 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -34,9 +34,6 @@ typedef uint32_t xlog_tid_t;
#define XLOG_MIN_RECORD_BSHIFT 14 /* 16384 == 1 << 14 */
#define XLOG_BIG_RECORD_BSHIFT 15 /* 32k == 1 << 15 */
#define XLOG_MAX_RECORD_BSHIFT 18 /* 256k == 1 << 18 */
-#define XLOG_BTOLSUNIT(log, b) (((b)+(log)->l_mp->m_sb.sb_logsunit-1) / \
- (log)->l_mp->m_sb.sb_logsunit)
-#define XLOG_LSUNITTOB(log, su) ((su) * (log)->l_mp->m_sb.sb_logsunit)
#define XLOG_HEADER_SIZE 512
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 2037b9f23069..860a0c9801ba 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -22,6 +22,7 @@
#include "xfs_bit.h"
#include "xfs_refcount.h"
#include "xfs_rmap.h"
+#include "xfs_ag.h"
/* Allowable refcount adjustment amounts. */
enum xfs_refc_adjust_op {
@@ -46,7 +47,7 @@ xfs_refcount_lookup_le(
xfs_agblock_t bno,
int *stat)
{
- trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.agno, bno,
+ trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno,
XFS_LOOKUP_LE);
cur->bc_rec.rc.rc_startblock = bno;
cur->bc_rec.rc.rc_blockcount = 0;
@@ -63,7 +64,7 @@ xfs_refcount_lookup_ge(
xfs_agblock_t bno,
int *stat)
{
- trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.agno, bno,
+ trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno,
XFS_LOOKUP_GE);
cur->bc_rec.rc.rc_startblock = bno;
cur->bc_rec.rc.rc_blockcount = 0;
@@ -80,7 +81,7 @@ xfs_refcount_lookup_eq(
xfs_agblock_t bno,
int *stat)
{
- trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.agno, bno,
+ trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno,
XFS_LOOKUP_LE);
cur->bc_rec.rc.rc_startblock = bno;
cur->bc_rec.rc.rc_blockcount = 0;
@@ -108,7 +109,7 @@ xfs_refcount_get_rec(
int *stat)
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_ag.agno;
+ xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
union xfs_btree_rec *rec;
int error;
xfs_agblock_t realstart;
@@ -119,7 +120,7 @@ xfs_refcount_get_rec(
xfs_refcount_btrec_to_irec(rec, irec);
- agno = cur->bc_ag.agno;
+ agno = cur->bc_ag.pag->pag_agno;
if (irec->rc_blockcount == 0 || irec->rc_blockcount > MAXREFCEXTLEN)
goto out_bad_rec;
@@ -144,7 +145,7 @@ xfs_refcount_get_rec(
if (irec->rc_refcount == 0 || irec->rc_refcount > MAXREFCOUNT)
goto out_bad_rec;
- trace_xfs_refcount_get(cur->bc_mp, cur->bc_ag.agno, irec);
+ trace_xfs_refcount_get(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec);
return 0;
out_bad_rec:
@@ -169,14 +170,14 @@ xfs_refcount_update(
union xfs_btree_rec rec;
int error;
- trace_xfs_refcount_update(cur->bc_mp, cur->bc_ag.agno, irec);
+ trace_xfs_refcount_update(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec);
rec.refc.rc_startblock = cpu_to_be32(irec->rc_startblock);
rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount);
rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount);
error = xfs_btree_update(cur, &rec);
if (error)
trace_xfs_refcount_update_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -193,7 +194,7 @@ xfs_refcount_insert(
{
int error;
- trace_xfs_refcount_insert(cur->bc_mp, cur->bc_ag.agno, irec);
+ trace_xfs_refcount_insert(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec);
cur->bc_rec.rc.rc_startblock = irec->rc_startblock;
cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount;
cur->bc_rec.rc.rc_refcount = irec->rc_refcount;
@@ -208,7 +209,7 @@ xfs_refcount_insert(
out_error:
if (error)
trace_xfs_refcount_insert_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -234,7 +235,7 @@ xfs_refcount_delete(
error = -EFSCORRUPTED;
goto out_error;
}
- trace_xfs_refcount_delete(cur->bc_mp, cur->bc_ag.agno, &irec);
+ trace_xfs_refcount_delete(cur->bc_mp, cur->bc_ag.pag->pag_agno, &irec);
error = xfs_btree_delete(cur, i);
if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
error = -EFSCORRUPTED;
@@ -246,7 +247,7 @@ xfs_refcount_delete(
out_error:
if (error)
trace_xfs_refcount_delete_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -366,7 +367,7 @@ xfs_refcount_split_extent(
return 0;
*shape_changed = true;
- trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
&rcext, agbno);
/* Establish the right extent. */
@@ -391,7 +392,7 @@ xfs_refcount_split_extent(
out_error:
trace_xfs_refcount_split_extent_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -411,7 +412,7 @@ xfs_refcount_merge_center_extents(
int found_rec;
trace_xfs_refcount_merge_center_extents(cur->bc_mp,
- cur->bc_ag.agno, left, center, right);
+ cur->bc_ag.pag->pag_agno, left, center, right);
/*
* Make sure the center and right extents are not in the btree.
@@ -468,7 +469,7 @@ xfs_refcount_merge_center_extents(
out_error:
trace_xfs_refcount_merge_center_extents_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -487,7 +488,7 @@ xfs_refcount_merge_left_extent(
int found_rec;
trace_xfs_refcount_merge_left_extent(cur->bc_mp,
- cur->bc_ag.agno, left, cleft);
+ cur->bc_ag.pag->pag_agno, left, cleft);
/* If the extent at agbno (cleft) wasn't synthesized, remove it. */
if (cleft->rc_refcount > 1) {
@@ -530,7 +531,7 @@ xfs_refcount_merge_left_extent(
out_error:
trace_xfs_refcount_merge_left_extent_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -548,7 +549,7 @@ xfs_refcount_merge_right_extent(
int found_rec;
trace_xfs_refcount_merge_right_extent(cur->bc_mp,
- cur->bc_ag.agno, cright, right);
+ cur->bc_ag.pag->pag_agno, cright, right);
/*
* If the extent ending at agbno+aglen (cright) wasn't synthesized,
@@ -594,7 +595,7 @@ xfs_refcount_merge_right_extent(
out_error:
trace_xfs_refcount_merge_right_extent_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -679,13 +680,13 @@ xfs_refcount_find_left_extents(
cleft->rc_blockcount = aglen;
cleft->rc_refcount = 1;
}
- trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
left, cleft, agbno);
return error;
out_error:
trace_xfs_refcount_find_left_extent_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -768,13 +769,13 @@ xfs_refcount_find_right_extents(
cright->rc_blockcount = aglen;
cright->rc_refcount = 1;
}
- trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
cright, right, agbno + aglen);
return error;
out_error:
trace_xfs_refcount_find_right_extent_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -952,7 +953,7 @@ xfs_refcount_adjust_extents(
ext.rc_startblock - *agbno);
tmp.rc_refcount = 1 + adj;
trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_ag.agno, &tmp);
+ cur->bc_ag.pag->pag_agno, &tmp);
/*
* Either cover the hole (increment) or
@@ -971,7 +972,7 @@ xfs_refcount_adjust_extents(
cur->bc_ag.refc.nr_ops++;
} else {
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
- cur->bc_ag.agno,
+ cur->bc_ag.pag->pag_agno,
tmp.rc_startblock);
xfs_bmap_add_free(cur->bc_tp, fsbno,
tmp.rc_blockcount, oinfo);
@@ -998,7 +999,7 @@ xfs_refcount_adjust_extents(
goto skip;
ext.rc_refcount += adj;
trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_ag.agno, &ext);
+ cur->bc_ag.pag->pag_agno, &ext);
if (ext.rc_refcount > 1) {
error = xfs_refcount_update(cur, &ext);
if (error)
@@ -1016,7 +1017,7 @@ xfs_refcount_adjust_extents(
goto advloop;
} else {
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
- cur->bc_ag.agno,
+ cur->bc_ag.pag->pag_agno,
ext.rc_startblock);
xfs_bmap_add_free(cur->bc_tp, fsbno, ext.rc_blockcount,
oinfo);
@@ -1035,7 +1036,7 @@ advloop:
return error;
out_error:
trace_xfs_refcount_modify_extent_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -1057,10 +1058,10 @@ xfs_refcount_adjust(
*new_agbno = agbno;
*new_aglen = aglen;
if (adj == XFS_REFCOUNT_ADJUST_INCREASE)
- trace_xfs_refcount_increase(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_increase(cur->bc_mp, cur->bc_ag.pag->pag_agno,
agbno, aglen);
else
- trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_ag.pag->pag_agno,
agbno, aglen);
/*
@@ -1099,7 +1100,7 @@ xfs_refcount_adjust(
return 0;
out_error:
- trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_ag.pag->pag_agno,
error, _RET_IP_);
return error;
}
@@ -1142,30 +1143,30 @@ xfs_refcount_finish_one(
struct xfs_btree_cur *rcur;
struct xfs_buf *agbp = NULL;
int error = 0;
- xfs_agnumber_t agno;
xfs_agblock_t bno;
xfs_agblock_t new_agbno;
unsigned long nr_ops = 0;
int shape_changes = 0;
+ struct xfs_perag *pag;
- agno = XFS_FSB_TO_AGNO(mp, startblock);
- ASSERT(agno != NULLAGNUMBER);
+ pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, startblock));
bno = XFS_FSB_TO_AGBNO(mp, startblock);
trace_xfs_refcount_deferred(mp, XFS_FSB_TO_AGNO(mp, startblock),
type, XFS_FSB_TO_AGBNO(mp, startblock),
blockcount);
- if (XFS_TEST_ERROR(false, mp,
- XFS_ERRTAG_REFCOUNT_FINISH_ONE))
- return -EIO;
+ if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE)) {
+ error = -EIO;
+ goto out_drop;
+ }
/*
* If we haven't gotten a cursor or the cursor AG doesn't match
* the startblock, get one now.
*/
rcur = *pcur;
- if (rcur != NULL && rcur->bc_ag.agno != agno) {
+ if (rcur != NULL && rcur->bc_ag.pag != pag) {
nr_ops = rcur->bc_ag.refc.nr_ops;
shape_changes = rcur->bc_ag.refc.shape_changes;
xfs_refcount_finish_one_cleanup(tp, rcur, 0);
@@ -1173,12 +1174,12 @@ xfs_refcount_finish_one(
*pcur = NULL;
}
if (rcur == NULL) {
- error = xfs_alloc_read_agf(tp->t_mountp, tp, agno,
+ error = xfs_alloc_read_agf(tp->t_mountp, tp, pag->pag_agno,
XFS_ALLOC_FLAG_FREEING, &agbp);
if (error)
- return error;
+ goto out_drop;
- rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
+ rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
rcur->bc_ag.refc.nr_ops = nr_ops;
rcur->bc_ag.refc.shape_changes = shape_changes;
}
@@ -1188,12 +1189,12 @@ xfs_refcount_finish_one(
case XFS_REFCOUNT_INCREASE:
error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
new_len, XFS_REFCOUNT_ADJUST_INCREASE, NULL);
- *new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
+ *new_fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno);
break;
case XFS_REFCOUNT_DECREASE:
error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
new_len, XFS_REFCOUNT_ADJUST_DECREASE, NULL);
- *new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
+ *new_fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno);
break;
case XFS_REFCOUNT_ALLOC_COW:
*new_fsb = startblock + blockcount;
@@ -1210,8 +1211,10 @@ xfs_refcount_finish_one(
error = -EFSCORRUPTED;
}
if (!error && *new_len > 0)
- trace_xfs_refcount_finish_one_leftover(mp, agno, type,
+ trace_xfs_refcount_finish_one_leftover(mp, pag->pag_agno, type,
bno, blockcount, new_agbno, *new_len);
+out_drop:
+ xfs_perag_put(pag);
return error;
}
@@ -1294,7 +1297,7 @@ xfs_refcount_find_shared(
int have;
int error;
- trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_ag.pag->pag_agno,
agbno, aglen);
/* By default, skip the whole range */
@@ -1374,12 +1377,12 @@ xfs_refcount_find_shared(
done:
trace_xfs_refcount_find_shared_result(cur->bc_mp,
- cur->bc_ag.agno, *fbno, *flen);
+ cur->bc_ag.pag->pag_agno, *fbno, *flen);
out_error:
if (error)
trace_xfs_refcount_find_shared_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -1476,7 +1479,7 @@ xfs_refcount_adjust_cow_extents(
tmp.rc_blockcount = aglen;
tmp.rc_refcount = 1;
trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_ag.agno, &tmp);
+ cur->bc_ag.pag->pag_agno, &tmp);
error = xfs_refcount_insert(cur, &tmp,
&found_tmp);
@@ -1504,7 +1507,7 @@ xfs_refcount_adjust_cow_extents(
ext.rc_refcount = 0;
trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_ag.agno, &ext);
+ cur->bc_ag.pag->pag_agno, &ext);
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
@@ -1520,7 +1523,7 @@ xfs_refcount_adjust_cow_extents(
return error;
out_error:
trace_xfs_refcount_modify_extent_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -1566,7 +1569,7 @@ xfs_refcount_adjust_cow(
return 0;
out_error:
- trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_ag.pag->pag_agno,
error, _RET_IP_);
return error;
}
@@ -1580,7 +1583,7 @@ __xfs_refcount_cow_alloc(
xfs_agblock_t agbno,
xfs_extlen_t aglen)
{
- trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_ag.agno,
+ trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_ag.pag->pag_agno,
agbno, aglen);
/* Add refcount btree reservation */
@@ -1597,7 +1600,7 @@ __xfs_refcount_cow_free(
xfs_agblock_t agbno,
xfs_extlen_t aglen)
{
- trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_ag.agno,
+ trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_ag.pag->pag_agno,
agbno, aglen);
/* Remove refcount btree reservation */
@@ -1672,7 +1675,7 @@ xfs_refcount_recover_extent(
int
xfs_refcount_recover_cow_leftovers(
struct xfs_mount *mp,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xfs_trans *tp;
struct xfs_btree_cur *cur;
@@ -1704,10 +1707,10 @@ xfs_refcount_recover_cow_leftovers(
if (error)
return error;
- error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+ error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
if (error)
goto out_trans;
- cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
+ cur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
/* Find all the leftover CoW staging extents. */
memset(&low, 0, sizeof(low));
@@ -1729,11 +1732,12 @@ xfs_refcount_recover_cow_leftovers(
if (error)
goto out_free;
- trace_xfs_refcount_recover_extent(mp, agno, &rr->rr_rrec);
+ trace_xfs_refcount_recover_extent(mp, pag->pag_agno,
+ &rr->rr_rrec);
/* Free the orphan record */
agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START;
- fsb = XFS_AGB_TO_FSB(mp, agno, agbno);
+ fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno, agbno);
xfs_refcount_free_cow_extent(tp, fsb,
rr->rr_rrec.rc_blockcount);
diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h
index 209795539c8d..9f6e9aae4da0 100644
--- a/fs/xfs/libxfs/xfs_refcount.h
+++ b/fs/xfs/libxfs/xfs_refcount.h
@@ -6,6 +6,13 @@
#ifndef __XFS_REFCOUNT_H__
#define __XFS_REFCOUNT_H__
+struct xfs_trans;
+struct xfs_mount;
+struct xfs_perag;
+struct xfs_btree_cur;
+struct xfs_bmbt_irec;
+struct xfs_refcount_irec;
+
extern int xfs_refcount_lookup_le(struct xfs_btree_cur *cur,
xfs_agblock_t bno, int *stat);
extern int xfs_refcount_lookup_ge(struct xfs_btree_cur *cur,
@@ -50,7 +57,7 @@ void xfs_refcount_alloc_cow_extent(struct xfs_trans *tp, xfs_fsblock_t fsb,
void xfs_refcount_free_cow_extent(struct xfs_trans *tp, xfs_fsblock_t fsb,
xfs_extlen_t len);
extern int xfs_refcount_recover_cow_leftovers(struct xfs_mount *mp,
- xfs_agnumber_t agno);
+ struct xfs_perag *pag);
/*
* While we're adjusting the refcounts records of an extent, we have
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index a6ac60ae9421..92d336c17e83 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -9,7 +9,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
#include "xfs_btree_staging.h"
@@ -20,13 +19,14 @@
#include "xfs_trans.h"
#include "xfs_bit.h"
#include "xfs_rmap.h"
+#include "xfs_ag.h"
static struct xfs_btree_cur *
xfs_refcountbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_ag.agbp, cur->bc_ag.agno);
+ cur->bc_ag.agbp, cur->bc_ag.pag);
}
STATIC void
@@ -65,7 +65,7 @@ xfs_refcountbt_alloc_block(
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
args.type = XFS_ALLOCTYPE_NEAR_BNO;
- args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.agno,
+ args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno,
xfs_refc_block(args.mp));
args.oinfo = XFS_RMAP_OINFO_REFC;
args.minlen = args.maxlen = args.prod = 1;
@@ -74,13 +74,13 @@ xfs_refcountbt_alloc_block(
error = xfs_alloc_vextent(&args);
if (error)
goto out_error;
- trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
args.agbno, 1);
if (args.fsbno == NULLFSBLOCK) {
*stat = 0;
return 0;
}
- ASSERT(args.agno == cur->bc_ag.agno);
+ ASSERT(args.agno == cur->bc_ag.pag->pag_agno);
ASSERT(args.len == 1);
new->s = cpu_to_be32(args.agbno);
@@ -105,7 +105,7 @@ xfs_refcountbt_free_block(
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
int error;
- trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
be32_add_cpu(&agf->agf_refcount_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
@@ -170,7 +170,7 @@ xfs_refcountbt_init_ptr_from_cur(
{
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
+ ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_refcount_root;
}
@@ -316,12 +316,11 @@ static struct xfs_btree_cur *
xfs_refcountbt_init_common(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xfs_btree_cur *cur;
- ASSERT(agno != NULLAGNUMBER);
- ASSERT(agno < mp->m_sb.sb_agcount);
+ ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
cur->bc_tp = tp;
@@ -330,9 +329,12 @@ xfs_refcountbt_init_common(
cur->bc_blocklog = mp->m_sb.sb_blocklog;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
- cur->bc_ag.agno = agno;
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
+ /* take a reference for the cursor */
+ atomic_inc(&pag->pag_ref);
+ cur->bc_ag.pag = pag;
+
cur->bc_ag.refc.nr_ops = 0;
cur->bc_ag.refc.shape_changes = 0;
cur->bc_ops = &xfs_refcountbt_ops;
@@ -345,12 +347,12 @@ xfs_refcountbt_init_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xfs_agf *agf = agbp->b_addr;
struct xfs_btree_cur *cur;
- cur = xfs_refcountbt_init_common(mp, tp, agno);
+ cur = xfs_refcountbt_init_common(mp, tp, pag);
cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
cur->bc_ag.agbp = agbp;
return cur;
@@ -361,11 +363,11 @@ struct xfs_btree_cur *
xfs_refcountbt_stage_cursor(
struct xfs_mount *mp,
struct xbtree_afakeroot *afake,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xfs_btree_cur *cur;
- cur = xfs_refcountbt_init_common(mp, NULL, agno);
+ cur = xfs_refcountbt_init_common(mp, NULL, pag);
xfs_btree_stage_afakeroot(cur, afake);
return cur;
}
@@ -450,7 +452,7 @@ int
xfs_refcountbt_calc_reserves(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_extlen_t *ask,
xfs_extlen_t *used)
{
@@ -463,8 +465,7 @@ xfs_refcountbt_calc_reserves(
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return 0;
-
- error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+ error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
if (error)
return error;
@@ -479,7 +480,7 @@ xfs_refcountbt_calc_reserves(
* expansion. We therefore can pretend the space isn't there.
*/
if (mp->m_sb.sb_logstart &&
- XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
+ XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == pag->pag_agno)
agblocks -= mp->m_sb.sb_logblocks;
*ask += xfs_refcountbt_max_size(mp, agblocks);
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.h b/fs/xfs/libxfs/xfs_refcount_btree.h
index 69dc515db671..bd9ed9e1e41f 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.h
+++ b/fs/xfs/libxfs/xfs_refcount_btree.h
@@ -13,6 +13,7 @@
struct xfs_buf;
struct xfs_btree_cur;
struct xfs_mount;
+struct xfs_perag;
struct xbtree_afakeroot;
/*
@@ -46,9 +47,9 @@ struct xbtree_afakeroot;
extern struct xfs_btree_cur *xfs_refcountbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *agbp,
- xfs_agnumber_t agno);
+ struct xfs_perag *pag);
struct xfs_btree_cur *xfs_refcountbt_stage_cursor(struct xfs_mount *mp,
- struct xbtree_afakeroot *afake, xfs_agnumber_t agno);
+ struct xbtree_afakeroot *afake, struct xfs_perag *pag);
extern int xfs_refcountbt_maxrecs(int blocklen, bool leaf);
extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
@@ -58,7 +59,7 @@ extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp,
xfs_agblock_t agblocks);
extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp,
- struct xfs_trans *tp, xfs_agnumber_t agno, xfs_extlen_t *ask,
+ struct xfs_trans *tp, struct xfs_perag *pag, xfs_extlen_t *ask,
xfs_extlen_t *used);
void xfs_refcountbt_commit_staged_btree(struct xfs_btree_cur *cur,
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index 10e0cf9949a2..d1dfad0204e3 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -11,6 +11,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_sb.h"
#include "xfs_defer.h"
#include "xfs_btree.h"
#include "xfs_trans.h"
@@ -21,6 +22,7 @@
#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_inode.h"
+#include "xfs_ag.h"
/*
* Lookup the first record less than or equal to [bno, len, owner, offset]
@@ -79,7 +81,7 @@ xfs_rmap_update(
union xfs_btree_rec rec;
int error;
- trace_xfs_rmap_update(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_rmap_update(cur->bc_mp, cur->bc_ag.pag->pag_agno,
irec->rm_startblock, irec->rm_blockcount,
irec->rm_owner, irec->rm_offset, irec->rm_flags);
@@ -91,7 +93,7 @@ xfs_rmap_update(
error = xfs_btree_update(cur, &rec);
if (error)
trace_xfs_rmap_update_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -107,7 +109,7 @@ xfs_rmap_insert(
int i;
int error;
- trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_ag.agno, agbno,
+ trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_ag.pag->pag_agno, agbno,
len, owner, offset, flags);
error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
@@ -133,7 +135,7 @@ xfs_rmap_insert(
done:
if (error)
trace_xfs_rmap_insert_error(rcur->bc_mp,
- rcur->bc_ag.agno, error, _RET_IP_);
+ rcur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -149,7 +151,7 @@ xfs_rmap_delete(
int i;
int error;
- trace_xfs_rmap_delete(rcur->bc_mp, rcur->bc_ag.agno, agbno,
+ trace_xfs_rmap_delete(rcur->bc_mp, rcur->bc_ag.pag->pag_agno, agbno,
len, owner, offset, flags);
error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
@@ -170,7 +172,7 @@ xfs_rmap_delete(
done:
if (error)
trace_xfs_rmap_delete_error(rcur->bc_mp,
- rcur->bc_ag.agno, error, _RET_IP_);
+ rcur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -197,7 +199,7 @@ xfs_rmap_get_rec(
int *stat)
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_ag.agno;
+ xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
union xfs_btree_rec *rec;
int error;
@@ -260,7 +262,7 @@ xfs_rmap_find_left_neighbor_helper(
struct xfs_find_left_neighbor_info *info = priv;
trace_xfs_rmap_find_left_neighbor_candidate(cur->bc_mp,
- cur->bc_ag.agno, rec->rm_startblock,
+ cur->bc_ag.pag->pag_agno, rec->rm_startblock,
rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
rec->rm_flags);
@@ -312,7 +314,7 @@ xfs_rmap_find_left_neighbor(
info.stat = stat;
trace_xfs_rmap_find_left_neighbor_query(cur->bc_mp,
- cur->bc_ag.agno, bno, 0, owner, offset, flags);
+ cur->bc_ag.pag->pag_agno, bno, 0, owner, offset, flags);
error = xfs_rmap_query_range(cur, &info.high, &info.high,
xfs_rmap_find_left_neighbor_helper, &info);
@@ -320,7 +322,7 @@ xfs_rmap_find_left_neighbor(
error = 0;
if (*stat)
trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
- cur->bc_ag.agno, irec->rm_startblock,
+ cur->bc_ag.pag->pag_agno, irec->rm_startblock,
irec->rm_blockcount, irec->rm_owner,
irec->rm_offset, irec->rm_flags);
return error;
@@ -336,7 +338,7 @@ xfs_rmap_lookup_le_range_helper(
struct xfs_find_left_neighbor_info *info = priv;
trace_xfs_rmap_lookup_le_range_candidate(cur->bc_mp,
- cur->bc_ag.agno, rec->rm_startblock,
+ cur->bc_ag.pag->pag_agno, rec->rm_startblock,
rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
rec->rm_flags);
@@ -385,14 +387,14 @@ xfs_rmap_lookup_le_range(
info.stat = stat;
trace_xfs_rmap_lookup_le_range(cur->bc_mp,
- cur->bc_ag.agno, bno, 0, owner, offset, flags);
+ cur->bc_ag.pag->pag_agno, bno, 0, owner, offset, flags);
error = xfs_rmap_query_range(cur, &info.high, &info.high,
xfs_rmap_lookup_le_range_helper, &info);
if (error == -ECANCELED)
error = 0;
if (*stat)
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_ag.agno, irec->rm_startblock,
+ cur->bc_ag.pag->pag_agno, irec->rm_startblock,
irec->rm_blockcount, irec->rm_owner,
irec->rm_offset, irec->rm_flags);
return error;
@@ -498,7 +500,7 @@ xfs_rmap_unmap(
(flags & XFS_RMAP_BMBT_BLOCK);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_unmap(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_unmap(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
/*
@@ -522,7 +524,7 @@ xfs_rmap_unmap(
goto out_error;
}
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_ag.agno, ltrec.rm_startblock,
+ cur->bc_ag.pag->pag_agno, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags);
ltoff = ltrec.rm_offset;
@@ -588,7 +590,7 @@ xfs_rmap_unmap(
if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
/* exact match, simply remove the record from rmap tree */
- trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
ltrec.rm_startblock, ltrec.rm_blockcount,
ltrec.rm_owner, ltrec.rm_offset,
ltrec.rm_flags);
@@ -666,7 +668,7 @@ xfs_rmap_unmap(
else
cur->bc_rec.r.rm_offset = offset + len;
cur->bc_rec.r.rm_flags = flags;
- trace_xfs_rmap_insert(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno,
cur->bc_rec.r.rm_startblock,
cur->bc_rec.r.rm_blockcount,
cur->bc_rec.r.rm_owner,
@@ -678,11 +680,11 @@ xfs_rmap_unmap(
}
out_done:
- trace_xfs_rmap_unmap_done(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_unmap_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
out_error:
if (error)
- trace_xfs_rmap_unmap_error(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_unmap_error(mp, cur->bc_ag.pag->pag_agno,
error, _RET_IP_);
return error;
}
@@ -694,7 +696,7 @@ int
xfs_rmap_free(
struct xfs_trans *tp,
struct xfs_buf *agbp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agblock_t bno,
xfs_extlen_t len,
const struct xfs_owner_info *oinfo)
@@ -706,7 +708,7 @@ xfs_rmap_free(
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
return 0;
- cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
+ cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
error = xfs_rmap_unmap(cur, bno, len, false, oinfo);
@@ -773,7 +775,7 @@ xfs_rmap_map(
(flags & XFS_RMAP_BMBT_BLOCK);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_map(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_map(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
ASSERT(!xfs_rmap_should_skip_owner_update(oinfo));
@@ -795,7 +797,7 @@ xfs_rmap_map(
goto out_error;
}
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_ag.agno, ltrec.rm_startblock,
+ cur->bc_ag.pag->pag_agno, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags);
@@ -831,7 +833,7 @@ xfs_rmap_map(
goto out_error;
}
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_ag.agno, gtrec.rm_startblock,
+ cur->bc_ag.pag->pag_agno, gtrec.rm_startblock,
gtrec.rm_blockcount, gtrec.rm_owner,
gtrec.rm_offset, gtrec.rm_flags);
if (!xfs_rmap_is_mergeable(&gtrec, owner, flags))
@@ -870,7 +872,7 @@ xfs_rmap_map(
* result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
*/
ltrec.rm_blockcount += gtrec.rm_blockcount;
- trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
gtrec.rm_startblock,
gtrec.rm_blockcount,
gtrec.rm_owner,
@@ -921,7 +923,7 @@ xfs_rmap_map(
cur->bc_rec.r.rm_owner = owner;
cur->bc_rec.r.rm_offset = offset;
cur->bc_rec.r.rm_flags = flags;
- trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno, len,
owner, offset, flags);
error = xfs_btree_insert(cur, &i);
if (error)
@@ -932,11 +934,11 @@ xfs_rmap_map(
}
}
- trace_xfs_rmap_map_done(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_map_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
out_error:
if (error)
- trace_xfs_rmap_map_error(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_map_error(mp, cur->bc_ag.pag->pag_agno,
error, _RET_IP_);
return error;
}
@@ -948,7 +950,7 @@ int
xfs_rmap_alloc(
struct xfs_trans *tp,
struct xfs_buf *agbp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agblock_t bno,
xfs_extlen_t len,
const struct xfs_owner_info *oinfo)
@@ -960,7 +962,7 @@ xfs_rmap_alloc(
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
return 0;
- cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
+ cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
error = xfs_rmap_map(cur, bno, len, false, oinfo);
xfs_btree_del_cursor(cur, error);
@@ -1010,7 +1012,7 @@ xfs_rmap_convert(
(flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
new_endoff = offset + len;
- trace_xfs_rmap_convert(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_convert(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
/*
@@ -1034,7 +1036,7 @@ xfs_rmap_convert(
goto done;
}
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_ag.agno, PREV.rm_startblock,
+ cur->bc_ag.pag->pag_agno, PREV.rm_startblock,
PREV.rm_blockcount, PREV.rm_owner,
PREV.rm_offset, PREV.rm_flags);
@@ -1076,7 +1078,7 @@ xfs_rmap_convert(
goto done;
}
trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
- cur->bc_ag.agno, LEFT.rm_startblock,
+ cur->bc_ag.pag->pag_agno, LEFT.rm_startblock,
LEFT.rm_blockcount, LEFT.rm_owner,
LEFT.rm_offset, LEFT.rm_flags);
if (LEFT.rm_startblock + LEFT.rm_blockcount == bno &&
@@ -1114,7 +1116,7 @@ xfs_rmap_convert(
goto done;
}
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_ag.agno, RIGHT.rm_startblock,
+ cur->bc_ag.pag->pag_agno, RIGHT.rm_startblock,
RIGHT.rm_blockcount, RIGHT.rm_owner,
RIGHT.rm_offset, RIGHT.rm_flags);
if (bno + len == RIGHT.rm_startblock &&
@@ -1132,7 +1134,7 @@ xfs_rmap_convert(
RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
state &= ~RMAP_RIGHT_CONTIG;
- trace_xfs_rmap_convert_state(mp, cur->bc_ag.agno, state,
+ trace_xfs_rmap_convert_state(mp, cur->bc_ag.pag->pag_agno, state,
_RET_IP_);
/* reset the cursor back to PREV */
@@ -1162,7 +1164,7 @@ xfs_rmap_convert(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
RIGHT.rm_startblock, RIGHT.rm_blockcount,
RIGHT.rm_owner, RIGHT.rm_offset,
RIGHT.rm_flags);
@@ -1180,7 +1182,7 @@ xfs_rmap_convert(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
PREV.rm_startblock, PREV.rm_blockcount,
PREV.rm_owner, PREV.rm_offset,
PREV.rm_flags);
@@ -1210,7 +1212,7 @@ xfs_rmap_convert(
* Setting all of a previous oldext extent to newext.
* The left neighbor is contiguous, the right is not.
*/
- trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
PREV.rm_startblock, PREV.rm_blockcount,
PREV.rm_owner, PREV.rm_offset,
PREV.rm_flags);
@@ -1247,7 +1249,7 @@ xfs_rmap_convert(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
RIGHT.rm_startblock, RIGHT.rm_blockcount,
RIGHT.rm_owner, RIGHT.rm_offset,
RIGHT.rm_flags);
@@ -1326,7 +1328,7 @@ xfs_rmap_convert(
NEW.rm_blockcount = len;
NEW.rm_flags = newext;
cur->bc_rec.r = NEW;
- trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno,
len, owner, offset, newext);
error = xfs_btree_insert(cur, &i);
if (error)
@@ -1383,7 +1385,7 @@ xfs_rmap_convert(
NEW.rm_blockcount = len;
NEW.rm_flags = newext;
cur->bc_rec.r = NEW;
- trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno,
len, owner, offset, newext);
error = xfs_btree_insert(cur, &i);
if (error)
@@ -1414,7 +1416,7 @@ xfs_rmap_convert(
NEW = PREV;
NEW.rm_blockcount = offset - PREV.rm_offset;
cur->bc_rec.r = NEW;
- trace_xfs_rmap_insert(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno,
NEW.rm_startblock, NEW.rm_blockcount,
NEW.rm_owner, NEW.rm_offset,
NEW.rm_flags);
@@ -1441,7 +1443,7 @@ xfs_rmap_convert(
/* new middle extent - newext */
cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
cur->bc_rec.r.rm_flags |= newext;
- trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno, len,
owner, offset, newext);
error = xfs_btree_insert(cur, &i);
if (error)
@@ -1465,12 +1467,12 @@ xfs_rmap_convert(
ASSERT(0);
}
- trace_xfs_rmap_convert_done(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_convert_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
done:
if (error)
trace_xfs_rmap_convert_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -1506,7 +1508,7 @@ xfs_rmap_convert_shared(
(flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
new_endoff = offset + len;
- trace_xfs_rmap_convert(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_convert(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
/*
@@ -1573,7 +1575,7 @@ xfs_rmap_convert_shared(
goto done;
}
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_ag.agno, RIGHT.rm_startblock,
+ cur->bc_ag.pag->pag_agno, RIGHT.rm_startblock,
RIGHT.rm_blockcount, RIGHT.rm_owner,
RIGHT.rm_offset, RIGHT.rm_flags);
if (xfs_rmap_is_mergeable(&RIGHT, owner, newext))
@@ -1589,7 +1591,7 @@ xfs_rmap_convert_shared(
RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
state &= ~RMAP_RIGHT_CONTIG;
- trace_xfs_rmap_convert_state(mp, cur->bc_ag.agno, state,
+ trace_xfs_rmap_convert_state(mp, cur->bc_ag.pag->pag_agno, state,
_RET_IP_);
/*
* Switch out based on the FILLING and CONTIG state bits.
@@ -1880,12 +1882,12 @@ xfs_rmap_convert_shared(
ASSERT(0);
}
- trace_xfs_rmap_convert_done(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_convert_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
done:
if (error)
trace_xfs_rmap_convert_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -1923,7 +1925,7 @@ xfs_rmap_unmap_shared(
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_unmap(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_unmap(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
/*
@@ -2072,12 +2074,12 @@ xfs_rmap_unmap_shared(
goto out_error;
}
- trace_xfs_rmap_unmap_done(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_unmap_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
out_error:
if (error)
trace_xfs_rmap_unmap_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -2112,7 +2114,7 @@ xfs_rmap_map_shared(
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_map(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_map(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
/* Is there a left record that abuts our range? */
@@ -2138,7 +2140,7 @@ xfs_rmap_map_shared(
goto out_error;
}
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_ag.agno, gtrec.rm_startblock,
+ cur->bc_ag.pag->pag_agno, gtrec.rm_startblock,
gtrec.rm_blockcount, gtrec.rm_owner,
gtrec.rm_offset, gtrec.rm_flags);
@@ -2231,12 +2233,12 @@ xfs_rmap_map_shared(
goto out_error;
}
- trace_xfs_rmap_map_done(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_map_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
out_error:
if (error)
trace_xfs_rmap_map_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -2362,31 +2364,32 @@ xfs_rmap_finish_one(
struct xfs_btree_cur **pcur)
{
struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_perag *pag;
struct xfs_btree_cur *rcur;
struct xfs_buf *agbp = NULL;
int error = 0;
- xfs_agnumber_t agno;
struct xfs_owner_info oinfo;
xfs_agblock_t bno;
bool unwritten;
- agno = XFS_FSB_TO_AGNO(mp, startblock);
- ASSERT(agno != NULLAGNUMBER);
+ pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, startblock));
bno = XFS_FSB_TO_AGBNO(mp, startblock);
- trace_xfs_rmap_deferred(mp, agno, type, bno, owner, whichfork,
+ trace_xfs_rmap_deferred(mp, pag->pag_agno, type, bno, owner, whichfork,
startoff, blockcount, state);
- if (XFS_TEST_ERROR(false, mp,
- XFS_ERRTAG_RMAP_FINISH_ONE))
- return -EIO;
+ if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_RMAP_FINISH_ONE)) {
+ error = -EIO;
+ goto out_drop;
+ }
+
/*
* If we haven't gotten a cursor or the cursor AG doesn't match
* the startblock, get one now.
*/
rcur = *pcur;
- if (rcur != NULL && rcur->bc_ag.agno != agno) {
+ if (rcur != NULL && rcur->bc_ag.pag != pag) {
xfs_rmap_finish_one_cleanup(tp, rcur, 0);
rcur = NULL;
*pcur = NULL;
@@ -2397,13 +2400,15 @@ xfs_rmap_finish_one(
* rmapbt, because a shape change could cause us to
* allocate blocks.
*/
- error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
+ error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
if (error)
- return error;
- if (XFS_IS_CORRUPT(tp->t_mountp, !agbp))
- return -EFSCORRUPTED;
+ goto out_drop;
+ if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
+ error = -EFSCORRUPTED;
+ goto out_drop;
+ }
- rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
+ rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
}
*pcur = rcur;
@@ -2441,6 +2446,8 @@ xfs_rmap_finish_one(
ASSERT(0);
error = -EFSCORRUPTED;
}
+out_drop:
+ xfs_perag_put(pag);
return error;
}
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index abe633403fd1..f2423cf7f1e2 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -6,6 +6,8 @@
#ifndef __XFS_RMAP_H__
#define __XFS_RMAP_H__
+struct xfs_perag;
+
static inline void
xfs_rmap_ino_bmbt_owner(
struct xfs_owner_info *oi,
@@ -113,10 +115,10 @@ xfs_owner_info_pack(
}
int xfs_rmap_alloc(struct xfs_trans *tp, struct xfs_buf *agbp,
- xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
+ struct xfs_perag *pag, xfs_agblock_t bno, xfs_extlen_t len,
const struct xfs_owner_info *oinfo);
int xfs_rmap_free(struct xfs_trans *tp, struct xfs_buf *agbp,
- xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
+ struct xfs_perag *pag, xfs_agblock_t bno, xfs_extlen_t len,
const struct xfs_owner_info *oinfo);
int xfs_rmap_lookup_le(struct xfs_btree_cur *cur, xfs_agblock_t bno,
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 9f5bcbd834c3..f29bc71b9950 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -9,7 +9,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_trans.h"
#include "xfs_alloc.h"
@@ -20,6 +19,7 @@
#include "xfs_trace.h"
#include "xfs_error.h"
#include "xfs_extent_busy.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
/*
@@ -52,7 +52,7 @@ xfs_rmapbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_ag.agbp, cur->bc_ag.agno);
+ cur->bc_ag.agbp, cur->bc_ag.pag);
}
STATIC void
@@ -64,13 +64,12 @@ xfs_rmapbt_set_root(
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
int btnum = cur->bc_btnum;
- struct xfs_perag *pag = agbp->b_pag;
ASSERT(ptr->s != 0);
agf->agf_roots[btnum] = ptr->s;
be32_add_cpu(&agf->agf_levels[btnum], inc);
- pag->pagf_levels[btnum] += inc;
+ cur->bc_ag.pag->pagf_levels[btnum] += inc;
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
}
@@ -84,6 +83,7 @@ xfs_rmapbt_alloc_block(
{
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
+ struct xfs_perag *pag = cur->bc_ag.pag;
int error;
xfs_agblock_t bno;
@@ -93,21 +93,19 @@ xfs_rmapbt_alloc_block(
if (error)
return error;
- trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_ag.agno,
- bno, 1);
+ trace_xfs_rmapbt_alloc_block(cur->bc_mp, pag->pag_agno, bno, 1);
if (bno == NULLAGBLOCK) {
*stat = 0;
return 0;
}
- xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1,
- false);
+ xfs_extent_busy_reuse(cur->bc_mp, pag, bno, 1, false);
new->s = cpu_to_be32(bno);
be32_add_cpu(&agf->agf_rmap_blocks, 1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
- xfs_ag_resv_rmapbt_alloc(cur->bc_mp, cur->bc_ag.agno);
+ xfs_ag_resv_rmapbt_alloc(cur->bc_mp, pag->pag_agno);
*stat = 1;
return 0;
@@ -120,12 +118,12 @@ xfs_rmapbt_free_block(
{
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
- struct xfs_perag *pag;
+ struct xfs_perag *pag = cur->bc_ag.pag;
xfs_agblock_t bno;
int error;
bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
- trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_rmapbt_free_block(cur->bc_mp, pag->pag_agno,
bno, 1);
be32_add_cpu(&agf->agf_rmap_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
@@ -133,10 +131,9 @@ xfs_rmapbt_free_block(
if (error)
return error;
- xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
+ xfs_extent_busy_insert(cur->bc_tp, pag, bno, 1,
XFS_EXTENT_BUSY_SKIP_DISCARD);
- pag = cur->bc_ag.agbp->b_pag;
xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1);
return 0;
}
@@ -215,7 +212,7 @@ xfs_rmapbt_init_ptr_from_cur(
{
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
+ ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_roots[cur->bc_btnum];
}
@@ -450,7 +447,7 @@ static struct xfs_btree_cur *
xfs_rmapbt_init_common(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xfs_btree_cur *cur;
@@ -462,9 +459,12 @@ xfs_rmapbt_init_common(
cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
- cur->bc_ag.agno = agno;
cur->bc_ops = &xfs_rmapbt_ops;
+ /* take a reference for the cursor */
+ atomic_inc(&pag->pag_ref);
+ cur->bc_ag.pag = pag;
+
return cur;
}
@@ -474,12 +474,12 @@ xfs_rmapbt_init_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xfs_agf *agf = agbp->b_addr;
struct xfs_btree_cur *cur;
- cur = xfs_rmapbt_init_common(mp, tp, agno);
+ cur = xfs_rmapbt_init_common(mp, tp, pag);
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
cur->bc_ag.agbp = agbp;
return cur;
@@ -490,11 +490,11 @@ struct xfs_btree_cur *
xfs_rmapbt_stage_cursor(
struct xfs_mount *mp,
struct xbtree_afakeroot *afake,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xfs_btree_cur *cur;
- cur = xfs_rmapbt_init_common(mp, NULL, agno);
+ cur = xfs_rmapbt_init_common(mp, NULL, pag);
xfs_btree_stage_afakeroot(cur, afake);
return cur;
}
@@ -596,7 +596,7 @@ int
xfs_rmapbt_calc_reserves(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_extlen_t *ask,
xfs_extlen_t *used)
{
@@ -609,7 +609,7 @@ xfs_rmapbt_calc_reserves(
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
return 0;
- error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+ error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
if (error)
return error;
@@ -624,7 +624,7 @@ xfs_rmapbt_calc_reserves(
* expansion. We therefore can pretend the space isn't there.
*/
if (mp->m_sb.sb_logstart &&
- XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
+ XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == pag->pag_agno)
agblocks -= mp->m_sb.sb_logblocks;
/* Reserve 1% of the AG or enough for 1 block per record. */
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h
index 115c3455a734..88d8d18788a2 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.h
+++ b/fs/xfs/libxfs/xfs_rmap_btree.h
@@ -43,9 +43,9 @@ struct xbtree_afakeroot;
struct xfs_btree_cur *xfs_rmapbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *bp,
- xfs_agnumber_t agno);
+ struct xfs_perag *pag);
struct xfs_btree_cur *xfs_rmapbt_stage_cursor(struct xfs_mount *mp,
- struct xbtree_afakeroot *afake, xfs_agnumber_t agno);
+ struct xbtree_afakeroot *afake, struct xfs_perag *pag);
void xfs_rmapbt_commit_staged_btree(struct xfs_btree_cur *cur,
struct xfs_trans *tp, struct xfs_buf *agbp);
int xfs_rmapbt_maxrecs(int blocklen, int leaf);
@@ -57,6 +57,6 @@ extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp,
xfs_agblock_t agblocks);
extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
+ struct xfs_perag *pag, xfs_extlen_t *ask, xfs_extlen_t *used);
#endif /* __XFS_RMAP_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index dfbbcbd448c1..04f5386446db 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -15,7 +15,6 @@
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
-#include "xfs_trace.h"
#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_bmap_btree.h"
@@ -25,72 +24,12 @@
#include "xfs_refcount_btree.h"
#include "xfs_da_format.h"
#include "xfs_health.h"
+#include "xfs_ag.h"
/*
* Physical superblock buffer manipulations. Shared with libxfs in userspace.
*/
-/*
- * Reference counting access wrappers to the perag structures.
- * Because we never free per-ag structures, the only thing we
- * have to protect against changes is the tree structure itself.
- */
-struct xfs_perag *
-xfs_perag_get(
- struct xfs_mount *mp,
- xfs_agnumber_t agno)
-{
- struct xfs_perag *pag;
- int ref = 0;
-
- rcu_read_lock();
- pag = radix_tree_lookup(&mp->m_perag_tree, agno);
- if (pag) {
- ASSERT(atomic_read(&pag->pag_ref) >= 0);
- ref = atomic_inc_return(&pag->pag_ref);
- }
- rcu_read_unlock();
- trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
- return pag;
-}
-
-/*
- * search from @first to find the next perag with the given tag set.
- */
-struct xfs_perag *
-xfs_perag_get_tag(
- struct xfs_mount *mp,
- xfs_agnumber_t first,
- int tag)
-{
- struct xfs_perag *pag;
- int found;
- int ref;
-
- rcu_read_lock();
- found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
- (void **)&pag, first, 1, tag);
- if (found <= 0) {
- rcu_read_unlock();
- return NULL;
- }
- ref = atomic_inc_return(&pag->pag_ref);
- rcu_read_unlock();
- trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
- return pag;
-}
-
-void
-xfs_perag_put(
- struct xfs_perag *pag)
-{
- int ref;
-
- ASSERT(atomic_read(&pag->pag_ref) > 0);
- ref = atomic_dec_return(&pag->pag_ref);
- trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
-}
-
/* Check all the superblock fields we care about when reading one in. */
STATIC int
xfs_validate_sb_read(
@@ -842,78 +781,6 @@ xfs_sb_mount_common(
}
/*
- * xfs_initialize_perag_data
- *
- * Read in each per-ag structure so we can count up the number of
- * allocated inodes, free inodes and used filesystem blocks as this
- * information is no longer persistent in the superblock. Once we have
- * this information, write it into the in-core superblock structure.
- */
-int
-xfs_initialize_perag_data(
- struct xfs_mount *mp,
- xfs_agnumber_t agcount)
-{
- xfs_agnumber_t index;
- xfs_perag_t *pag;
- xfs_sb_t *sbp = &mp->m_sb;
- uint64_t ifree = 0;
- uint64_t ialloc = 0;
- uint64_t bfree = 0;
- uint64_t bfreelst = 0;
- uint64_t btree = 0;
- uint64_t fdblocks;
- int error = 0;
-
- for (index = 0; index < agcount; index++) {
- /*
- * read the agf, then the agi. This gets us
- * all the information we need and populates the
- * per-ag structures for us.
- */
- error = xfs_alloc_pagf_init(mp, NULL, index, 0);
- if (error)
- return error;
-
- error = xfs_ialloc_pagi_init(mp, NULL, index);
- if (error)
- return error;
- pag = xfs_perag_get(mp, index);
- ifree += pag->pagi_freecount;
- ialloc += pag->pagi_count;
- bfree += pag->pagf_freeblks;
- bfreelst += pag->pagf_flcount;
- btree += pag->pagf_btreeblks;
- xfs_perag_put(pag);
- }
- fdblocks = bfree + bfreelst + btree;
-
- /*
- * If the new summary counts are obviously incorrect, fail the
- * mount operation because that implies the AGFs are also corrupt.
- * Clear FS_COUNTERS so that we don't unmount with a dirty log, which
- * will prevent xfs_repair from fixing anything.
- */
- if (fdblocks > sbp->sb_dblocks || ifree > ialloc) {
- xfs_alert(mp, "AGF corruption. Please run xfs_repair.");
- error = -EFSCORRUPTED;
- goto out;
- }
-
- /* Overwrite incore superblock counters with just-read data */
- spin_lock(&mp->m_sb_lock);
- sbp->sb_ifree = ifree;
- sbp->sb_icount = ialloc;
- sbp->sb_fdblocks = fdblocks;
- spin_unlock(&mp->m_sb_lock);
-
- xfs_reinit_percpu_counters(mp);
-out:
- xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS);
- return error;
-}
-
-/*
* xfs_log_sb() can be used to copy arbitrary changes to the in-core superblock
* into the superblock buffer to be logged. It does not provide the higher
* level of locking that is needed to protect the in-core superblock from
@@ -989,17 +856,18 @@ int
xfs_update_secondary_sbs(
struct xfs_mount *mp)
{
- xfs_agnumber_t agno;
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno = 1;
int saved_error = 0;
int error = 0;
LIST_HEAD (buffer_list);
/* update secondary superblocks. */
- for (agno = 1; agno < mp->m_sb.sb_agcount; agno++) {
+ for_each_perag_from(mp, agno, pag) {
struct xfs_buf *bp;
error = xfs_buf_get(mp->m_ddev_targp,
- XFS_AG_DADDR(mp, agno, XFS_SB_DADDR),
+ XFS_AG_DADDR(mp, pag->pag_agno, XFS_SB_DADDR),
XFS_FSS_TO_BB(mp, 1), &bp);
/*
* If we get an error reading or writing alternate superblocks,
@@ -1011,7 +879,7 @@ xfs_update_secondary_sbs(
if (error) {
xfs_warn(mp,
"error allocating secondary superblock for ag %d",
- agno);
+ pag->pag_agno);
if (!saved_error)
saved_error = error;
continue;
@@ -1032,7 +900,7 @@ xfs_update_secondary_sbs(
if (error) {
xfs_warn(mp,
"write error %d updating a secondary superblock near ag %d",
- error, agno);
+ error, pag->pag_agno);
if (!saved_error)
saved_error = error;
continue;
diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
index f79f9dc632b6..0c1602d9b53d 100644
--- a/fs/xfs/libxfs/xfs_sb.h
+++ b/fs/xfs/libxfs/xfs_sb.h
@@ -13,15 +13,6 @@ struct xfs_trans;
struct xfs_fsop_geom;
struct xfs_perag;
-/*
- * perag get/put wrappers for ref counting
- */
-extern struct xfs_perag *xfs_perag_get(struct xfs_mount *, xfs_agnumber_t);
-extern struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *, xfs_agnumber_t,
- int tag);
-extern void xfs_perag_put(struct xfs_perag *pag);
-extern int xfs_initialize_perag_data(struct xfs_mount *, xfs_agnumber_t);
-
extern void xfs_log_sb(struct xfs_trans *tp);
extern int xfs_sync_sb(struct xfs_mount *mp, bool wait);
extern int xfs_sync_sb_buf(struct xfs_mount *mp);
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 782fdd08f759..25c4cab58851 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -22,30 +22,26 @@ struct xfs_inode;
* Buffer verifier operations are widely used, including userspace tools
*/
extern const struct xfs_buf_ops xfs_agf_buf_ops;
-extern const struct xfs_buf_ops xfs_agi_buf_ops;
-extern const struct xfs_buf_ops xfs_agf_buf_ops;
extern const struct xfs_buf_ops xfs_agfl_buf_ops;
-extern const struct xfs_buf_ops xfs_bnobt_buf_ops;
-extern const struct xfs_buf_ops xfs_cntbt_buf_ops;
-extern const struct xfs_buf_ops xfs_rmapbt_buf_ops;
-extern const struct xfs_buf_ops xfs_refcountbt_buf_ops;
+extern const struct xfs_buf_ops xfs_agi_buf_ops;
extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
extern const struct xfs_buf_ops xfs_bmbt_buf_ops;
+extern const struct xfs_buf_ops xfs_bnobt_buf_ops;
+extern const struct xfs_buf_ops xfs_cntbt_buf_ops;
extern const struct xfs_buf_ops xfs_da3_node_buf_ops;
extern const struct xfs_buf_ops xfs_dquot_buf_ops;
-extern const struct xfs_buf_ops xfs_symlink_buf_ops;
-extern const struct xfs_buf_ops xfs_agi_buf_ops;
-extern const struct xfs_buf_ops xfs_inobt_buf_ops;
+extern const struct xfs_buf_ops xfs_dquot_buf_ra_ops;
extern const struct xfs_buf_ops xfs_finobt_buf_ops;
+extern const struct xfs_buf_ops xfs_inobt_buf_ops;
extern const struct xfs_buf_ops xfs_inode_buf_ops;
extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
-extern const struct xfs_buf_ops xfs_dquot_buf_ops;
-extern const struct xfs_buf_ops xfs_dquot_buf_ra_ops;
+extern const struct xfs_buf_ops xfs_refcountbt_buf_ops;
+extern const struct xfs_buf_ops xfs_rmapbt_buf_ops;
+extern const struct xfs_buf_ops xfs_rtbuf_ops;
extern const struct xfs_buf_ops xfs_sb_buf_ops;
extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
extern const struct xfs_buf_ops xfs_symlink_buf_ops;
-extern const struct xfs_buf_ops xfs_rtbuf_ops;
/* log size calculation functions */
int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
diff --git a/fs/xfs/libxfs/xfs_types.c b/fs/xfs/libxfs/xfs_types.c
index 04801362e1a7..e8f4abee7892 100644
--- a/fs/xfs/libxfs/xfs_types.c
+++ b/fs/xfs/libxfs/xfs_types.c
@@ -11,6 +11,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_ag.h"
/* Find the size of the AG, in blocks. */
inline xfs_agblock_t
@@ -222,12 +223,13 @@ xfs_icount_range(
unsigned long long *max)
{
unsigned long long nr_inos = 0;
+ struct xfs_perag *pag;
xfs_agnumber_t agno;
/* root, rtbitmap, rtsum all live in the first chunk */
*min = XFS_INODES_PER_CHUNK;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+ for_each_perag(mp, agno, pag) {
xfs_agino_t first, last;
xfs_agino_range(mp, agno, &first, &last);
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 064bd6e8c922..0870ef6f933d 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -21,6 +21,7 @@ typedef int32_t xfs_suminfo_t; /* type of bitmap summary info */
typedef uint32_t xfs_rtword_t; /* word type for bitmap manipulations */
typedef int64_t xfs_lsn_t; /* log sequence number */
+typedef int64_t xfs_csn_t; /* CIL sequence number */
typedef uint32_t xfs_dablk_t; /* dir/attr block number (in file) */
typedef uint32_t xfs_dahash_t; /* dir/attr hash value */
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index f96e84793cc9..be1a7e1e65f7 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -14,6 +14,7 @@
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
#include "xfs_rmap.h"
+#include "xfs_ag.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 23690f824ffa..e95f8c98f0f7 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -20,6 +20,7 @@
#include "xfs_rmap.h"
#include "xfs_rmap_btree.h"
#include "xfs_refcount_btree.h"
+#include "xfs_ag.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@@ -245,8 +246,8 @@ xrep_agf_calc_from_btrees(
int error;
/* Update the AGF counters from the bnobt. */
- cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
- XFS_BTNUM_BNO);
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
+ sc->sa.pag, XFS_BTNUM_BNO);
error = xfs_alloc_query_all(cur, xrep_agf_walk_allocbt, &raa);
if (error)
goto err;
@@ -259,8 +260,8 @@ xrep_agf_calc_from_btrees(
agf->agf_longest = cpu_to_be32(raa.longest);
/* Update the AGF counters from the cntbt. */
- cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
- XFS_BTNUM_CNT);
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
+ sc->sa.pag, XFS_BTNUM_CNT);
error = xfs_btree_count_blocks(cur, &blocks);
if (error)
goto err;
@@ -268,7 +269,7 @@ xrep_agf_calc_from_btrees(
btreeblks += blocks - 1;
/* Update the AGF counters from the rmapbt. */
- cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
+ cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
error = xfs_btree_count_blocks(cur, &blocks);
if (error)
goto err;
@@ -281,7 +282,7 @@ xrep_agf_calc_from_btrees(
/* Update the AGF counters from the refcountbt. */
if (xfs_sb_version_hasreflink(&mp->m_sb)) {
cur = xfs_refcountbt_init_cursor(mp, sc->tp, agf_bp,
- sc->sa.agno);
+ sc->sa.pag);
error = xfs_btree_count_blocks(cur, &blocks);
if (error)
goto err;
@@ -453,7 +454,7 @@ xrep_agfl_walk_rmap(
/* Record all the OWN_AG blocks. */
if (rec->rm_owner == XFS_RMAP_OWN_AG) {
- fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.agno,
+ fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno,
rec->rm_startblock);
error = xbitmap_set(ra->freesp, fsb, rec->rm_blockcount);
if (error)
@@ -489,23 +490,23 @@ xrep_agfl_collect_blocks(
xbitmap_init(&ra.agmetablocks);
/* Find all space used by the free space btrees & rmapbt. */
- cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
+ cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
error = xfs_rmap_query_all(cur, xrep_agfl_walk_rmap, &ra);
if (error)
goto err;
xfs_btree_del_cursor(cur, error);
/* Find all blocks currently being used by the bnobt. */
- cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
- XFS_BTNUM_BNO);
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
+ sc->sa.pag, XFS_BTNUM_BNO);
error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
if (error)
goto err;
xfs_btree_del_cursor(cur, error);
/* Find all blocks currently being used by the cntbt. */
- cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
- XFS_BTNUM_CNT);
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
+ sc->sa.pag, XFS_BTNUM_CNT);
error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
if (error)
goto err;
@@ -805,8 +806,8 @@ xrep_agi_calc_from_btrees(
xfs_agino_t freecount;
int error;
- cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp, sc->sa.agno,
- XFS_BTNUM_INO);
+ cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp,
+ sc->sa.pag, XFS_BTNUM_INO);
error = xfs_ialloc_count_inodes(cur, &count, &freecount);
if (error)
goto err;
@@ -827,8 +828,8 @@ xrep_agi_calc_from_btrees(
xfs_sb_version_hasinobtcounts(&mp->m_sb)) {
xfs_agblock_t blocks;
- cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp, sc->sa.agno,
- XFS_BTNUM_FINO);
+ cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp,
+ sc->sa.pag, XFS_BTNUM_FINO);
error = xfs_btree_count_blocks(cur, &blocks);
if (error)
goto err;
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index 2720bd7fe53b..d5741980094a 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -15,6 +15,7 @@
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
+#include "xfs_ag.h"
/*
* Set us up to scrub free space btrees.
@@ -93,7 +94,7 @@ xchk_allocbt_rec(
union xfs_btree_rec *rec)
{
struct xfs_mount *mp = bs->cur->bc_mp;
- xfs_agnumber_t agno = bs->cur->bc_ag.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
xfs_agblock_t bno;
xfs_extlen_t len;
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index 77d5c4a0f09f..1d146c9d9de1 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -22,6 +22,7 @@
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
+#include "xfs_ag.h"
/* Set us up with an inode's bmap. */
int
@@ -514,7 +515,7 @@ xchk_bmap_check_rmap(
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
- cur->bc_ag.agno, rec->rm_startblock))
+ cur->bc_ag.pag->pag_agno, rec->rm_startblock))
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
if (irec.br_blockcount > rec->rm_blockcount)
@@ -544,18 +545,18 @@ STATIC int
xchk_bmap_check_ag_rmaps(
struct xfs_scrub *sc,
int whichfork,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xchk_bmap_check_rmap_info sbcri;
struct xfs_btree_cur *cur;
struct xfs_buf *agf;
int error;
- error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf);
+ error = xfs_alloc_read_agf(sc->mp, sc->tp, pag->pag_agno, 0, &agf);
if (error)
return error;
- cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno);
+ cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, pag);
sbcri.sc = sc;
sbcri.whichfork = whichfork;
@@ -575,6 +576,7 @@ xchk_bmap_check_rmaps(
int whichfork)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork);
+ struct xfs_perag *pag;
xfs_agnumber_t agno;
bool zero_size;
int error;
@@ -607,15 +609,16 @@ xchk_bmap_check_rmaps(
(zero_size || ifp->if_nextents > 0))
return 0;
- for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
- error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno);
+ for_each_perag(sc->mp, agno, pag) {
+ error = xchk_bmap_check_ag_rmaps(sc, whichfork, pag);
if (error)
- return error;
+ break;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
break;
}
-
- return 0;
+ if (pag)
+ xfs_perag_put(pag);
+ return error;
}
/*
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index 6cc92291c46f..8558ca05e11d 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -12,7 +12,6 @@
#include "xfs_btree.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
-#include "xfs_sb.h"
#include "xfs_inode.h"
#include "xfs_icache.h"
#include "xfs_alloc.h"
@@ -26,6 +25,7 @@
#include "xfs_trans_priv.h"
#include "xfs_attr.h"
#include "xfs_reflink.h"
+#include "xfs_ag.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@@ -460,49 +460,48 @@ xchk_ag_btcur_init(
struct xchk_ag *sa)
{
struct xfs_mount *mp = sc->mp;
- xfs_agnumber_t agno = sa->agno;
xchk_perag_get(sc->mp, sa);
if (sa->agf_bp &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_BNO)) {
/* Set up a bnobt cursor for cross-referencing. */
sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
- agno, XFS_BTNUM_BNO);
+ sa->pag, XFS_BTNUM_BNO);
}
if (sa->agf_bp &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_CNT)) {
/* Set up a cntbt cursor for cross-referencing. */
sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
- agno, XFS_BTNUM_CNT);
+ sa->pag, XFS_BTNUM_CNT);
}
/* Set up a inobt cursor for cross-referencing. */
if (sa->agi_bp &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_INO)) {
sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
- agno, XFS_BTNUM_INO);
+ sa->pag, XFS_BTNUM_INO);
}
/* Set up a finobt cursor for cross-referencing. */
if (sa->agi_bp && xfs_sb_version_hasfinobt(&mp->m_sb) &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_FINO)) {
sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
- agno, XFS_BTNUM_FINO);
+ sa->pag, XFS_BTNUM_FINO);
}
/* Set up a rmapbt cursor for cross-referencing. */
if (sa->agf_bp && xfs_sb_version_hasrmapbt(&mp->m_sb) &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_RMAP)) {
sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp,
- agno);
+ sa->pag);
}
/* Set up a refcountbt cursor for cross-referencing. */
if (sa->agf_bp && xfs_sb_version_hasreflink(&mp->m_sb) &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_REFC)) {
sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
- sa->agf_bp, agno);
+ sa->agf_bp, sa->pag);
}
}
diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
index f1d1a8c58853..fd7941e04ae1 100644
--- a/fs/xfs/scrub/fscounters.c
+++ b/fs/xfs/scrub/fscounters.c
@@ -9,11 +9,11 @@
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
-#include "xfs_sb.h"
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
#include "xfs_health.h"
#include "xfs_btree.h"
+#include "xfs_ag.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@@ -71,11 +71,11 @@ xchk_fscount_warmup(
xfs_agnumber_t agno;
int error = 0;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- pag = xfs_perag_get(mp, agno);
-
+ for_each_perag(mp, agno, pag) {
+ if (xchk_should_terminate(sc, &error))
+ break;
if (pag->pagi_init && pag->pagf_init)
- goto next_loop_perag;
+ continue;
/* Lock both AG headers. */
error = xfs_ialloc_read_agi(mp, sc->tp, agno, &agi_bp);
@@ -89,21 +89,15 @@ xchk_fscount_warmup(
* These are supposed to be initialized by the header read
* function.
*/
- error = -EFSCORRUPTED;
- if (!pag->pagi_init || !pag->pagf_init)
+ if (!pag->pagi_init || !pag->pagf_init) {
+ error = -EFSCORRUPTED;
break;
+ }
xfs_buf_relse(agf_bp);
agf_bp = NULL;
xfs_buf_relse(agi_bp);
agi_bp = NULL;
-next_loop_perag:
- xfs_perag_put(pag);
- pag = NULL;
- error = 0;
-
- if (xchk_should_terminate(sc, &error))
- break;
}
if (agf_bp)
@@ -196,13 +190,14 @@ retry:
fsc->ifree = 0;
fsc->fdblocks = 0;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- pag = xfs_perag_get(mp, agno);
+ for_each_perag(mp, agno, pag) {
+ if (xchk_should_terminate(sc, &error))
+ break;
/* This somehow got unset since the warmup? */
if (!pag->pagi_init || !pag->pagf_init) {
- xfs_perag_put(pag);
- return -EFSCORRUPTED;
+ error = -EFSCORRUPTED;
+ break;
}
/* Count all the inodes */
@@ -216,10 +211,8 @@ retry:
fsc->fdblocks += pag->pagf_btreeblks;
} else {
error = xchk_fscount_btreeblks(sc, fsc, agno);
- if (error) {
- xfs_perag_put(pag);
+ if (error)
break;
- }
}
/*
@@ -229,12 +222,9 @@ retry:
fsc->fdblocks -= pag->pag_meta_resv.ar_reserved;
fsc->fdblocks -= pag->pag_rmapbt_resv.ar_orig_reserved;
- xfs_perag_put(pag);
-
- if (xchk_should_terminate(sc, &error))
- break;
}
-
+ if (pag)
+ xfs_perag_put(pag);
if (error)
return error;
diff --git a/fs/xfs/scrub/health.c b/fs/xfs/scrub/health.c
index 3de59b5c2ce6..2e61df3bca83 100644
--- a/fs/xfs/scrub/health.c
+++ b/fs/xfs/scrub/health.c
@@ -8,7 +8,7 @@
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_btree.h"
-#include "xfs_sb.h"
+#include "xfs_ag.h"
#include "xfs_health.h"
#include "scrub/scrub.h"
#include "scrub/health.h"
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index 8d9f3fb0cd22..30e568596b79 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -21,6 +21,7 @@
#include "scrub/common.h"
#include "scrub/btree.h"
#include "scrub/trace.h"
+#include "xfs_ag.h"
/*
* Set us up to scrub inode btrees.
@@ -103,7 +104,7 @@ xchk_iallocbt_chunk(
xfs_extlen_t len)
{
struct xfs_mount *mp = bs->cur->bc_mp;
- xfs_agnumber_t agno = bs->cur->bc_ag.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
xfs_agblock_t bno;
bno = XFS_AGINO_TO_AGBNO(mp, agino);
@@ -163,7 +164,7 @@ xchk_iallocbt_check_cluster_ifree(
* the record, compute which fs inode we're talking about.
*/
agino = irec->ir_startino + irec_ino;
- fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.agno, agino);
+ fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.pag->pag_agno, agino);
irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
@@ -213,7 +214,7 @@ xchk_iallocbt_check_cluster(
struct xfs_mount *mp = bs->cur->bc_mp;
struct xfs_buf *cluster_bp;
unsigned int nr_inodes;
- xfs_agnumber_t agno = bs->cur->bc_ag.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
xfs_agblock_t agbno;
unsigned int cluster_index;
uint16_t cluster_mask = 0;
@@ -423,7 +424,7 @@ xchk_iallocbt_rec(
struct xchk_iallocbt *iabt = bs->private;
struct xfs_inobt_rec_incore irec;
uint64_t holes;
- xfs_agnumber_t agno = bs->cur->bc_ag.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
xfs_agino_t agino;
xfs_extlen_t len;
int holecount;
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
index 744530a66c0c..7014b7408bad 100644
--- a/fs/xfs/scrub/refcount.c
+++ b/fs/xfs/scrub/refcount.c
@@ -13,6 +13,7 @@
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
+#include "xfs_ag.h"
/*
* Set us up to scrub reference count btrees.
@@ -333,7 +334,7 @@ xchk_refcountbt_rec(
{
struct xfs_mount *mp = bs->cur->bc_mp;
xfs_agblock_t *cow_blocks = bs->private;
- xfs_agnumber_t agno = bs->cur->bc_ag.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
xfs_agblock_t bno;
xfs_extlen_t len;
xfs_nlink_t refcount;
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index b8202dd08939..ebb0e245aa72 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -22,6 +22,7 @@
#include "xfs_rmap_btree.h"
#include "xfs_refcount_btree.h"
#include "xfs_extent_busy.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_quota.h"
#include "scrub/scrub.h"
@@ -303,7 +304,7 @@ xrep_alloc_ag_block(
return error;
if (bno == NULLAGBLOCK)
return -ENOSPC;
- xfs_extent_busy_reuse(sc->mp, sc->sa.agno, bno,
+ xfs_extent_busy_reuse(sc->mp, sc->sa.pag, bno,
1, false);
*fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.agno, bno);
if (resv == XFS_AG_RESV_RMAPBT)
@@ -508,7 +509,7 @@ xrep_put_freelist(
* create an rmap for the block prior to merging it or else other
* parts will break.
*/
- error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.agno, agbno, 1,
+ error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1,
&XFS_RMAP_OINFO_AG);
if (error)
return error;
@@ -518,7 +519,7 @@ xrep_put_freelist(
agbno, 0);
if (error)
return error;
- xfs_extent_busy_insert(sc->tp, sc->sa.agno, agbno, 1,
+ xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1,
XFS_EXTENT_BUSY_SKIP_DISCARD);
return 0;
@@ -554,7 +555,7 @@ xrep_reap_block(
} else {
agf_bp = sc->sa.agf_bp;
}
- cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, agno);
+ cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, sc->sa.pag);
/* Can we find any other rmappings? */
error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap);
@@ -576,7 +577,8 @@ xrep_reap_block(
* to run xfs_repair.
*/
if (has_other_rmap)
- error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1, oinfo);
+ error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno,
+ 1, oinfo);
else if (resv == XFS_AG_RESV_AGFL)
error = xrep_put_freelist(sc, agbno);
else
@@ -891,7 +893,7 @@ xrep_find_ag_btree_roots(
fab->height = 0;
}
- cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
+ cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
xfs_btree_del_cursor(cur, error);
diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c
index a4f17477c5d1..fc306573f0ac 100644
--- a/fs/xfs/scrub/rmap.c
+++ b/fs/xfs/scrub/rmap.c
@@ -15,6 +15,7 @@
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
+#include "xfs_ag.h"
/*
* Set us up to scrub reverse mapping btrees.
@@ -91,7 +92,7 @@ xchk_rmapbt_rec(
{
struct xfs_mount *mp = bs->cur->bc_mp;
struct xfs_rmap_irec irec;
- xfs_agnumber_t agno = bs->cur->bc_ag.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
bool non_inode;
bool is_unwritten;
bool is_bmbt;
diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c
index 2c6c248be823..03882a605a3c 100644
--- a/fs/xfs/scrub/trace.c
+++ b/fs/xfs/scrub/trace.c
@@ -13,6 +13,7 @@
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "scrub/scrub.h"
+#include "xfs_ag.h"
/* Figure out which block the btree cursor was pointing to. */
static inline xfs_fsblock_t
@@ -26,7 +27,7 @@ xchk_btree_cur_fsbno(
cur->bc_flags & XFS_BTREE_LONG_PTRS)
return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_ino.ip->i_ino);
else if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS))
- return XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.agno, 0);
+ return XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno, 0);
return NULLFSBLOCK;
}
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index bfad669e6b2f..aaa7e66c42d7 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -15,10 +15,10 @@
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_inode.h"
+#include "xfs_attr.h"
#include "xfs_attr_remote.h"
#include "xfs_trans.h"
#include "xfs_bmap.h"
-#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_quota.h"
#include "xfs_dir2.h"
diff --git a/fs/xfs/xfs_bio_io.c b/fs/xfs/xfs_bio_io.c
index 17f36db2f792..667e297f59b1 100644
--- a/fs/xfs/xfs_bio_io.c
+++ b/fs/xfs/xfs_bio_io.c
@@ -9,6 +9,41 @@ static inline unsigned int bio_max_vecs(unsigned int count)
return bio_max_segs(howmany(count, PAGE_SIZE));
}
+static void
+xfs_flush_bdev_async_endio(
+ struct bio *bio)
+{
+ complete(bio->bi_private);
+}
+
+/*
+ * Submit a request for an async cache flush to run. If the request queue does
+ * not require flush operations, just skip it altogether. If the caller needs
+ * to wait for the flush completion at a later point in time, they must supply a
+ * valid completion. This will be signalled when the flush completes. The
+ * caller never sees the bio that is issued here.
+ */
+void
+xfs_flush_bdev_async(
+ struct bio *bio,
+ struct block_device *bdev,
+ struct completion *done)
+{
+ struct request_queue *q = bdev->bd_disk->queue;
+
+ if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
+ complete(done);
+ return;
+ }
+
+ bio_init(bio, NULL, 0);
+ bio_set_dev(bio, bdev);
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+ bio->bi_private = done;
+ bio->bi_end_io = xfs_flush_bdev_async_endio;
+
+ submit_bio(bio);
+}
int
xfs_rw_bdev(
struct block_device *bdev,
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 2988efa97e14..213a97a921bb 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -945,7 +945,7 @@ xfs_flush_unmap_range(
xfs_off_t rounding, start, end;
int error;
- rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
+ rounding = max_t(xfs_off_t, mp->m_sb.sb_blocksize, PAGE_SIZE);
start = round_down(offset, rounding);
end = round_up(offset + len, rounding) - 1;
@@ -1053,9 +1053,9 @@ xfs_prepare_shift(
* extent (after split) during the shift and corrupt the file. Start
* with the block just prior to the start to stabilize the boundary.
*/
- offset = round_down(offset, 1 << mp->m_sb.sb_blocklog);
+ offset = round_down(offset, mp->m_sb.sb_blocksize);
if (offset)
- offset -= (1 << mp->m_sb.sb_blocklog);
+ offset -= mp->m_sb.sb_blocksize;
/*
* Writeback and invalidate cache for the remainder of the file as we're
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 592800c8852f..8ff42b3585e0 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -10,7 +10,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_trace.h"
#include "xfs_log.h"
@@ -19,12 +18,10 @@
#include "xfs_buf_item.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
+#include "xfs_ag.h"
static kmem_zone_t *xfs_buf_zone;
-#define xb_to_gfp(flags) \
- ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
-
/*
* Locking orders
*
@@ -79,7 +76,7 @@ static inline int
xfs_buf_vmap_len(
struct xfs_buf *bp)
{
- return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
+ return (bp->b_page_count * PAGE_SIZE);
}
/*
@@ -272,51 +269,30 @@ _xfs_buf_alloc(
return 0;
}
-/*
- * Allocate a page array capable of holding a specified number
- * of pages, and point the page buf at it.
- */
-STATIC int
-_xfs_buf_get_pages(
- struct xfs_buf *bp,
- int page_count)
+static void
+xfs_buf_free_pages(
+ struct xfs_buf *bp)
{
- /* Make sure that we have a page list */
- if (bp->b_pages == NULL) {
- bp->b_page_count = page_count;
- if (page_count <= XB_PAGES) {
- bp->b_pages = bp->b_page_array;
- } else {
- bp->b_pages = kmem_alloc(sizeof(struct page *) *
- page_count, KM_NOFS);
- if (bp->b_pages == NULL)
- return -ENOMEM;
- }
- memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
+ uint i;
+
+ ASSERT(bp->b_flags & _XBF_PAGES);
+
+ if (xfs_buf_is_vmapped(bp))
+ vm_unmap_ram(bp->b_addr, bp->b_page_count);
+
+ for (i = 0; i < bp->b_page_count; i++) {
+ if (bp->b_pages[i])
+ __free_page(bp->b_pages[i]);
}
- return 0;
-}
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += bp->b_page_count;
-/*
- * Frees b_pages if it was allocated.
- */
-STATIC void
-_xfs_buf_free_pages(
- struct xfs_buf *bp)
-{
- if (bp->b_pages != bp->b_page_array) {
+ if (bp->b_pages != bp->b_page_array)
kmem_free(bp->b_pages);
- bp->b_pages = NULL;
- }
+ bp->b_pages = NULL;
+ bp->b_flags &= ~_XBF_PAGES;
}
-/*
- * Releases the specified buffer.
- *
- * The modification state of any associated pages is left unchanged.
- * The buffer must not be on any hash - use xfs_buf_rele instead for
- * hashed and refcounted buffers
- */
static void
xfs_buf_free(
struct xfs_buf *bp)
@@ -325,137 +301,103 @@ xfs_buf_free(
ASSERT(list_empty(&bp->b_lru));
- if (bp->b_flags & _XBF_PAGES) {
- uint i;
-
- if (xfs_buf_is_vmapped(bp))
- vm_unmap_ram(bp->b_addr - bp->b_offset,
- bp->b_page_count);
-
- for (i = 0; i < bp->b_page_count; i++) {
- struct page *page = bp->b_pages[i];
-
- __free_page(page);
- }
- if (current->reclaim_state)
- current->reclaim_state->reclaimed_slab +=
- bp->b_page_count;
- } else if (bp->b_flags & _XBF_KMEM)
+ if (bp->b_flags & _XBF_PAGES)
+ xfs_buf_free_pages(bp);
+ else if (bp->b_flags & _XBF_KMEM)
kmem_free(bp->b_addr);
- _xfs_buf_free_pages(bp);
+
xfs_buf_free_maps(bp);
kmem_cache_free(xfs_buf_zone, bp);
}
-/*
- * Allocates all the pages for buffer in question and builds it's page list.
- */
-STATIC int
-xfs_buf_allocate_memory(
- struct xfs_buf *bp,
- uint flags)
+static int
+xfs_buf_alloc_kmem(
+ struct xfs_buf *bp,
+ xfs_buf_flags_t flags)
{
- size_t size;
- size_t nbytes, offset;
- gfp_t gfp_mask = xb_to_gfp(flags);
- unsigned short page_count, i;
- xfs_off_t start, end;
- int error;
- xfs_km_flags_t kmflag_mask = 0;
+ int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
+ xfs_km_flags_t kmflag_mask = KM_NOFS;
+ size_t size = BBTOB(bp->b_length);
- /*
- * assure zeroed buffer for non-read cases.
- */
- if (!(flags & XBF_READ)) {
+ /* Assure zeroed buffer for non-read cases. */
+ if (!(flags & XBF_READ))
kmflag_mask |= KM_ZERO;
- gfp_mask |= __GFP_ZERO;
- }
- /*
- * for buffers that are contained within a single page, just allocate
- * the memory from the heap - there's no need for the complexity of
- * page arrays to keep allocation down to order 0.
- */
- size = BBTOB(bp->b_length);
- if (size < PAGE_SIZE) {
- int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
- bp->b_addr = kmem_alloc_io(size, align_mask,
- KM_NOFS | kmflag_mask);
- if (!bp->b_addr) {
- /* low memory - use alloc_page loop instead */
- goto use_alloc_page;
- }
+ bp->b_addr = kmem_alloc_io(size, align_mask, kmflag_mask);
+ if (!bp->b_addr)
+ return -ENOMEM;
- if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
- ((unsigned long)bp->b_addr & PAGE_MASK)) {
- /* b_addr spans two pages - use alloc_page instead */
- kmem_free(bp->b_addr);
- bp->b_addr = NULL;
- goto use_alloc_page;
- }
- bp->b_offset = offset_in_page(bp->b_addr);
- bp->b_pages = bp->b_page_array;
- bp->b_pages[0] = kmem_to_page(bp->b_addr);
- bp->b_page_count = 1;
- bp->b_flags |= _XBF_KMEM;
- return 0;
+ if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
+ ((unsigned long)bp->b_addr & PAGE_MASK)) {
+ /* b_addr spans two pages - use alloc_page instead */
+ kmem_free(bp->b_addr);
+ bp->b_addr = NULL;
+ return -ENOMEM;
}
+ bp->b_offset = offset_in_page(bp->b_addr);
+ bp->b_pages = bp->b_page_array;
+ bp->b_pages[0] = kmem_to_page(bp->b_addr);
+ bp->b_page_count = 1;
+ bp->b_flags |= _XBF_KMEM;
+ return 0;
+}
-use_alloc_page:
- start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
- end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
- >> PAGE_SHIFT;
- page_count = end - start;
- error = _xfs_buf_get_pages(bp, page_count);
- if (unlikely(error))
- return error;
+static int
+xfs_buf_alloc_pages(
+ struct xfs_buf *bp,
+ xfs_buf_flags_t flags)
+{
+ gfp_t gfp_mask = __GFP_NOWARN;
+ long filled = 0;
- offset = bp->b_offset;
+ if (flags & XBF_READ_AHEAD)
+ gfp_mask |= __GFP_NORETRY;
+ else
+ gfp_mask |= GFP_NOFS;
+
+ /* Make sure that we have a page list */
+ bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
+ if (bp->b_page_count <= XB_PAGES) {
+ bp->b_pages = bp->b_page_array;
+ } else {
+ bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count,
+ gfp_mask);
+ if (!bp->b_pages)
+ return -ENOMEM;
+ }
bp->b_flags |= _XBF_PAGES;
- for (i = 0; i < bp->b_page_count; i++) {
- struct page *page;
- uint retries = 0;
-retry:
- page = alloc_page(gfp_mask);
- if (unlikely(page == NULL)) {
- if (flags & XBF_READ_AHEAD) {
- bp->b_page_count = i;
- error = -ENOMEM;
- goto out_free_pages;
- }
+ /* Assure zeroed buffer for non-read cases. */
+ if (!(flags & XBF_READ))
+ gfp_mask |= __GFP_ZERO;
- /*
- * This could deadlock.
- *
- * But until all the XFS lowlevel code is revamped to
- * handle buffer allocation failures we can't do much.
- */
- if (!(++retries % 100))
- xfs_err(NULL,
- "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
- current->comm, current->pid,
- __func__, gfp_mask);
-
- XFS_STATS_INC(bp->b_mount, xb_page_retries);
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- goto retry;
+ /*
+ * Bulk filling of pages can take multiple calls. Not filling the entire
+ * array is not an allocation failure, so don't back off if we get at
+ * least one extra page.
+ */
+ for (;;) {
+ long last = filled;
+
+ filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
+ bp->b_pages);
+ if (filled == bp->b_page_count) {
+ XFS_STATS_INC(bp->b_mount, xb_page_found);
+ break;
}
- XFS_STATS_INC(bp->b_mount, xb_page_found);
+ if (filled != last)
+ continue;
- nbytes = min_t(size_t, size, PAGE_SIZE - offset);
- size -= nbytes;
- bp->b_pages[i] = page;
- offset = 0;
+ if (flags & XBF_READ_AHEAD) {
+ xfs_buf_free_pages(bp);
+ return -ENOMEM;
+ }
+
+ XFS_STATS_INC(bp->b_mount, xb_page_retries);
+ congestion_wait(BLK_RW_ASYNC, HZ / 50);
}
return 0;
-
-out_free_pages:
- for (i = 0; i < bp->b_page_count; i++)
- __free_page(bp->b_pages[i]);
- bp->b_flags &= ~_XBF_PAGES;
- return error;
}
/*
@@ -469,7 +411,7 @@ _xfs_buf_map_pages(
ASSERT(bp->b_flags & _XBF_PAGES);
if (bp->b_page_count == 1) {
/* A single page buffer is always mappable */
- bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
+ bp->b_addr = page_address(bp->b_pages[0]);
} else if (flags & XBF_UNMAPPED) {
bp->b_addr = NULL;
} else {
@@ -496,7 +438,6 @@ _xfs_buf_map_pages(
if (!bp->b_addr)
return -ENOMEM;
- bp->b_addr += bp->b_offset;
}
return 0;
@@ -707,7 +648,7 @@ xfs_buf_get_map(
{
struct xfs_buf *bp;
struct xfs_buf *new_bp;
- int error = 0;
+ int error;
*bpp = NULL;
error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
@@ -720,17 +661,22 @@ xfs_buf_get_map(
if (error)
return error;
- error = xfs_buf_allocate_memory(new_bp, flags);
- if (error) {
- xfs_buf_free(new_bp);
- return error;
+ /*
+ * For buffers that fit entirely within a single page, first attempt to
+ * allocate the memory from the heap to minimise memory usage. If we
+ * can't get heap memory for these small buffers, we fall back to using
+ * the page allocator.
+ */
+ if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
+ xfs_buf_alloc_kmem(new_bp, flags) < 0) {
+ error = xfs_buf_alloc_pages(new_bp, flags);
+ if (error)
+ goto out_free_buf;
}
error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
- if (error) {
- xfs_buf_free(new_bp);
- return error;
- }
+ if (error)
+ goto out_free_buf;
if (bp != new_bp)
xfs_buf_free(new_bp);
@@ -758,6 +704,9 @@ found:
trace_xfs_buf_get(bp, flags, _RET_IP_);
*bpp = bp;
return 0;
+out_free_buf:
+ xfs_buf_free(new_bp);
+ return error;
}
int
@@ -950,8 +899,7 @@ xfs_buf_get_uncached(
int flags,
struct xfs_buf **bpp)
{
- unsigned long page_count;
- int error, i;
+ int error;
struct xfs_buf *bp;
DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
@@ -960,41 +908,25 @@ xfs_buf_get_uncached(
/* flags might contain irrelevant bits, pass only what we care about */
error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
if (error)
- goto fail;
+ return error;
- page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
- error = _xfs_buf_get_pages(bp, page_count);
+ error = xfs_buf_alloc_pages(bp, flags);
if (error)
goto fail_free_buf;
- for (i = 0; i < page_count; i++) {
- bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
- if (!bp->b_pages[i]) {
- error = -ENOMEM;
- goto fail_free_mem;
- }
- }
- bp->b_flags |= _XBF_PAGES;
-
error = _xfs_buf_map_pages(bp, 0);
if (unlikely(error)) {
xfs_warn(target->bt_mount,
"%s: failed to map pages", __func__);
- goto fail_free_mem;
+ goto fail_free_buf;
}
trace_xfs_buf_get_uncached(bp, _RET_IP_);
*bpp = bp;
return 0;
- fail_free_mem:
- while (--i >= 0)
- __free_page(bp->b_pages[i]);
- _xfs_buf_free_pages(bp);
- fail_free_buf:
- xfs_buf_free_maps(bp);
- kmem_cache_free(xfs_buf_zone, bp);
- fail:
+fail_free_buf:
+ xfs_buf_free(bp);
return error;
}
@@ -1722,7 +1654,6 @@ xfs_buf_offset(
if (bp->b_addr)
return bp->b_addr + offset;
- offset += bp->b_offset;
page = bp->b_pages[offset >> PAGE_SHIFT];
return page_address(page) + (offset & (PAGE_SIZE-1));
}
@@ -1958,7 +1889,7 @@ xfs_free_buftarg(
percpu_counter_destroy(&btp->bt_io_count);
list_lru_destroy(&btp->bt_lru);
- xfs_blkdev_issue_flush(btp);
+ blkdev_issue_flush(btp->bt_bdev);
kmem_free(btp);
}
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 459ca34f26f5..464dc548fa23 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -167,7 +167,8 @@ struct xfs_buf {
atomic_t b_pin_count; /* pin count */
atomic_t b_io_remaining; /* #outstanding I/O requests */
unsigned int b_page_count; /* size of page array */
- unsigned int b_offset; /* page offset in first page */
+ unsigned int b_offset; /* page offset of b_addr,
+ only for _XBF_KMEM buffers */
int b_error; /* error code on I/O */
/*
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index fb69879e4b2b..2828ce45b701 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -74,14 +74,12 @@ xfs_buf_item_straddle(
}
/*
- * This returns the number of log iovecs needed to log the
- * given buf log item.
+ * Return the number of log iovecs and space needed to log the given buf log
+ * item segment.
*
- * It calculates this as 1 iovec for the buf log format structure
- * and 1 for each stretch of non-contiguous chunks to be logged.
- * Contiguous chunks are logged in a single iovec.
- *
- * If the XFS_BLI_STALE flag has been set, then log nothing.
+ * It calculates this as 1 iovec for the buf log format structure and 1 for each
+ * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
+ * in a single iovec.
*/
STATIC void
xfs_buf_item_size_segment(
@@ -168,11 +166,8 @@ slow_scan:
}
/*
- * This returns the number of log iovecs needed to log the given buf log item.
- *
- * It calculates this as 1 iovec for the buf log format structure and 1 for each
- * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
- * in a single iovec.
+ * Return the number of log iovecs and space needed to log the given buf log
+ * item.
*
* Discontiguous buffers need a format structure per region that is being
* logged. This makes the changes in the buffer appear to log recovery as though
@@ -182,7 +177,11 @@ slow_scan:
* what ends up on disk.
*
* If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
- * format structures.
+ * format structures. If the item has previously been logged and has dirty
+ * regions, we do not relog them in stale buffers. This has the effect of
+ * reducing the size of the relogged item by the amount of dirty data tracked
+ * by the log item. This can result in the committing transaction reducing the
+ * amount of space being consumed by the CIL.
*/
STATIC void
xfs_buf_item_size(
@@ -199,9 +198,9 @@ xfs_buf_item_size(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
if (bip->bli_flags & XFS_BLI_STALE) {
/*
- * The buffer is stale, so all we need to log
- * is the buf log format structure with the
- * cancel flag in it.
+ * The buffer is stale, so all we need to log is the buf log
+ * format structure with the cancel flag in it as we are never
+ * going to replay the changes tracked in the log item.
*/
trace_xfs_buf_item_size_stale(bip);
ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
@@ -216,9 +215,9 @@ xfs_buf_item_size(
if (bip->bli_flags & XFS_BLI_ORDERED) {
/*
- * The buffer has been logged just to order it.
- * It is not being included in the transaction
- * commit, so no vectors are used at all.
+ * The buffer has been logged just to order it. It is not being
+ * included in the transaction commit, so no vectors are used at
+ * all.
*/
trace_xfs_buf_item_size_ordered(bip);
*nvecs = XFS_LOG_VEC_ORDERED;
@@ -475,17 +474,8 @@ xfs_buf_item_pin(
}
/*
- * This is called to unpin the buffer associated with the buf log
- * item which was previously pinned with a call to xfs_buf_item_pin().
- *
- * Also drop the reference to the buf item for the current transaction.
- * If the XFS_BLI_STALE flag is set and we are the last reference,
- * then free up the buf log item and unlock the buffer.
- *
- * If the remove flag is set we are called from uncommit in the
- * forced-shutdown path. If that is true and the reference count on
- * the log item is going to drop to zero we need to free the item's
- * descriptor in the transaction.
+ * This is called to unpin the buffer associated with the buf log item which
+ * was previously pinned with a call to xfs_buf_item_pin().
*/
STATIC void
xfs_buf_item_unpin(
@@ -502,38 +492,35 @@ xfs_buf_item_unpin(
trace_xfs_buf_item_unpin(bip);
+ /*
+ * Drop the bli ref associated with the pin and grab the hold required
+ * for the I/O simulation failure in the abort case. We have to do this
+ * before the pin count drops because the AIL doesn't acquire a bli
+ * reference. Therefore if the refcount drops to zero, the bli could
+ * still be AIL resident and the buffer submitted for I/O (and freed on
+ * completion) at any point before we return. This can be removed once
+ * the AIL properly holds a reference on the bli.
+ */
freed = atomic_dec_and_test(&bip->bli_refcount);
-
+ if (freed && !stale && remove)
+ xfs_buf_hold(bp);
if (atomic_dec_and_test(&bp->b_pin_count))
wake_up_all(&bp->b_waiters);
- if (freed && stale) {
+ /* nothing to do but drop the pin count if the bli is active */
+ if (!freed)
+ return;
+
+ if (stale) {
ASSERT(bip->bli_flags & XFS_BLI_STALE);
ASSERT(xfs_buf_islocked(bp));
ASSERT(bp->b_flags & XBF_STALE);
ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
+ ASSERT(list_empty(&lip->li_trans));
+ ASSERT(!bp->b_transp);
trace_xfs_buf_item_unpin_stale(bip);
- if (remove) {
- /*
- * If we are in a transaction context, we have to
- * remove the log item from the transaction as we are
- * about to release our reference to the buffer. If we
- * don't, the unlock that occurs later in
- * xfs_trans_uncommit() will try to reference the
- * buffer which we no longer have a hold on.
- */
- if (!list_empty(&lip->li_trans))
- xfs_trans_del_item(lip);
-
- /*
- * Since the transaction no longer refers to the buffer,
- * the buffer should no longer refer to the transaction.
- */
- bp->b_transp = NULL;
- }
-
/*
* If we get called here because of an IO error, we may or may
* not have the item on the AIL. xfs_trans_ail_delete() will
@@ -550,13 +537,13 @@ xfs_buf_item_unpin(
ASSERT(bp->b_log_item == NULL);
}
xfs_buf_relse(bp);
- } else if (freed && remove) {
+ } else if (remove) {
/*
* The buffer must be locked and held by the caller to simulate
- * an async I/O failure.
+ * an async I/O failure. We acquired the hold for this case
+ * before the buffer was unpinned.
*/
xfs_buf_lock(bp);
- xfs_buf_hold(bp);
bp->b_flags |= XBF_ASYNC;
xfs_buf_ioend_fail(bp);
}
@@ -714,7 +701,7 @@ xfs_buf_item_release(
STATIC void
xfs_buf_item_committing(
struct xfs_log_item *lip,
- xfs_lsn_t commit_lsn)
+ xfs_csn_t seq)
{
return xfs_buf_item_release(lip);
}
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index f979d0d7e6cd..736df5660f1f 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -8,7 +8,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
#include "xfs_alloc_btree.h"
@@ -18,6 +17,7 @@
#include "xfs_extent_busy.h"
#include "xfs_trace.h"
#include "xfs_log.h"
+#include "xfs_ag.h"
STATIC int
xfs_trim_extents(
@@ -50,7 +50,7 @@ xfs_trim_extents(
goto out_put_perag;
agf = agbp->b_addr;
- cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
+ cur = xfs_allocbt_init_cursor(mp, NULL, agbp, pag, XFS_BTNUM_CNT);
/*
* Look up the longest btree in the AGF and start with it.
@@ -108,7 +108,7 @@ xfs_trim_extents(
* If any blocks in the range are still busy, skip the
* discard and try again the next time.
*/
- if (xfs_extent_busy_search(mp, agno, fbno, flen)) {
+ if (xfs_extent_busy_search(mp, pag, fbno, flen)) {
trace_xfs_discard_busy(mp, agno, fbno, flen);
goto next_extent;
}
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 8c1fdf37ee8f..8ed47b739b6c 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -188,7 +188,7 @@ xfs_qm_dquot_logitem_release(
STATIC void
xfs_qm_dquot_logitem_committing(
struct xfs_log_item *lip,
- xfs_lsn_t commit_lsn)
+ xfs_csn_t seq)
{
return xfs_qm_dquot_logitem_release(lip);
}
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index a4075685d9eb..ad22a003f959 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -11,39 +11,37 @@
#include "xfs_log_format.h"
#include "xfs_shared.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_alloc.h"
#include "xfs_extent_busy.h"
#include "xfs_trace.h"
#include "xfs_trans.h"
#include "xfs_log.h"
+#include "xfs_ag.h"
void
xfs_extent_busy_insert(
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agblock_t bno,
xfs_extlen_t len,
unsigned int flags)
{
struct xfs_extent_busy *new;
struct xfs_extent_busy *busyp;
- struct xfs_perag *pag;
struct rb_node **rbp;
struct rb_node *parent = NULL;
new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0);
- new->agno = agno;
+ new->agno = pag->pag_agno;
new->bno = bno;
new->length = len;
INIT_LIST_HEAD(&new->list);
new->flags = flags;
/* trace before insert to be able to see failed inserts */
- trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
+ trace_xfs_extent_busy(tp->t_mountp, pag->pag_agno, bno, len);
- pag = xfs_perag_get(tp->t_mountp, new->agno);
spin_lock(&pag->pagb_lock);
rbp = &pag->pagb_tree.rb_node;
while (*rbp) {
@@ -66,7 +64,6 @@ xfs_extent_busy_insert(
list_add(&new->list, &tp->t_busy);
spin_unlock(&pag->pagb_lock);
- xfs_perag_put(pag);
}
/*
@@ -81,21 +78,17 @@ xfs_extent_busy_insert(
int
xfs_extent_busy_search(
struct xfs_mount *mp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agblock_t bno,
xfs_extlen_t len)
{
- struct xfs_perag *pag;
struct rb_node *rbp;
struct xfs_extent_busy *busyp;
int match = 0;
- pag = xfs_perag_get(mp, agno);
+ /* find closest start bno overlap */
spin_lock(&pag->pagb_lock);
-
rbp = pag->pagb_tree.rb_node;
-
- /* find closest start bno overlap */
while (rbp) {
busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
if (bno < busyp->bno) {
@@ -115,7 +108,6 @@ xfs_extent_busy_search(
}
}
spin_unlock(&pag->pagb_lock);
- xfs_perag_put(pag);
return match;
}
@@ -281,17 +273,14 @@ out_force_log:
void
xfs_extent_busy_reuse(
struct xfs_mount *mp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agblock_t fbno,
xfs_extlen_t flen,
bool userdata)
{
- struct xfs_perag *pag;
struct rb_node *rbp;
ASSERT(flen > 0);
-
- pag = xfs_perag_get(mp, agno);
spin_lock(&pag->pagb_lock);
restart:
rbp = pag->pagb_tree.rb_node;
@@ -314,7 +303,6 @@ restart:
goto restart;
}
spin_unlock(&pag->pagb_lock);
- xfs_perag_put(pag);
}
/*
@@ -605,12 +593,11 @@ void
xfs_extent_busy_wait_all(
struct xfs_mount *mp)
{
+ struct xfs_perag *pag;
DEFINE_WAIT (wait);
xfs_agnumber_t agno;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- struct xfs_perag *pag = xfs_perag_get(mp, agno);
-
+ for_each_perag(mp, agno, pag) {
do {
prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
if (RB_EMPTY_ROOT(&pag->pagb_tree))
@@ -618,8 +605,6 @@ xfs_extent_busy_wait_all(
schedule();
} while (1);
finish_wait(&pag->pagb_wait, &wait);
-
- xfs_perag_put(pag);
}
}
diff --git a/fs/xfs/xfs_extent_busy.h b/fs/xfs/xfs_extent_busy.h
index 8aea07100092..4a118131059f 100644
--- a/fs/xfs/xfs_extent_busy.h
+++ b/fs/xfs/xfs_extent_busy.h
@@ -9,6 +9,7 @@
#define __XFS_EXTENT_BUSY_H__
struct xfs_mount;
+struct xfs_perag;
struct xfs_trans;
struct xfs_alloc_arg;
@@ -31,7 +32,7 @@ struct xfs_extent_busy {
};
void
-xfs_extent_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
+xfs_extent_busy_insert(struct xfs_trans *tp, struct xfs_perag *pag,
xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);
void
@@ -39,11 +40,11 @@ xfs_extent_busy_clear(struct xfs_mount *mp, struct list_head *list,
bool do_discard);
int
-xfs_extent_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
+xfs_extent_busy_search(struct xfs_mount *mp, struct xfs_perag *pag,
xfs_agblock_t bno, xfs_extlen_t len);
void
-xfs_extent_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno,
+xfs_extent_busy_reuse(struct xfs_mount *mp, struct xfs_perag *pag,
xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata);
bool
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 3c0749ab9e40..cc3cfb12df53 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -119,8 +119,8 @@ xfs_dir_fsync(
return xfs_log_force_inode(ip);
}
-static xfs_lsn_t
-xfs_fsync_lsn(
+static xfs_csn_t
+xfs_fsync_seq(
struct xfs_inode *ip,
bool datasync)
{
@@ -128,7 +128,7 @@ xfs_fsync_lsn(
return 0;
if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
return 0;
- return ip->i_itemp->ili_last_lsn;
+ return ip->i_itemp->ili_commit_seq;
}
/*
@@ -151,12 +151,12 @@ xfs_fsync_flush_log(
int *log_flushed)
{
int error = 0;
- xfs_lsn_t lsn;
+ xfs_csn_t seq;
xfs_ilock(ip, XFS_ILOCK_SHARED);
- lsn = xfs_fsync_lsn(ip, datasync);
- if (lsn) {
- error = xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC,
+ seq = xfs_fsync_seq(ip, datasync);
+ if (seq) {
+ error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
log_flushed);
spin_lock(&ip->i_itemp->ili_lock);
@@ -197,9 +197,9 @@ xfs_file_fsync(
* inode size in case of an extending write.
*/
if (XFS_IS_REALTIME_INODE(ip))
- xfs_blkdev_issue_flush(mp->m_rtdev_targp);
+ blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
else if (mp->m_logdev_targp != mp->m_ddev_targp)
- xfs_blkdev_issue_flush(mp->m_ddev_targp);
+ blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
/*
* Any inode that has dirty modifications in the log is pinned. The
@@ -219,7 +219,7 @@ xfs_file_fsync(
*/
if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
mp->m_logdev_targp == mp->m_ddev_targp)
- xfs_blkdev_issue_flush(mp->m_ddev_targp);
+ blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
return error;
}
@@ -384,21 +384,30 @@ restart:
}
goto restart;
}
+
/*
* If the offset is beyond the size of the file, we need to zero any
* blocks that fall between the existing EOF and the start of this
- * write. If zeroing is needed and we are currently holding the
- * iolock shared, we need to update it to exclusive which implies
- * having to redo all checks before.
+ * write. If zeroing is needed and we are currently holding the iolock
+ * shared, we need to update it to exclusive which implies having to
+ * redo all checks before.
+ *
+ * We need to serialise against EOF updates that occur in IO completions
+ * here. We want to make sure that nobody is changing the size while we
+ * do this check until we have placed an IO barrier (i.e. hold the
+ * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched. The
+ * spinlock effectively forms a memory barrier once we have the
+ * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
+ * hence be able to correctly determine if we need to run zeroing.
*
- * We need to serialise against EOF updates that occur in IO
- * completions here. We want to make sure that nobody is changing the
- * size while we do this check until we have placed an IO barrier (i.e.
- * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
- * The spinlock effectively forms a memory barrier once we have the
- * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
- * and hence be able to correctly determine if we need to run zeroing.
+ * We can do an unlocked check here safely as IO completion can only
+ * extend EOF. Truncate is locked out at this point, so the EOF can
+ * not move backwards, only forwards. Hence we only need to take the
+ * slow path and spin locks when we are at or beyond the current EOF.
*/
+ if (iocb->ki_pos <= i_size_read(inode))
+ goto out;
+
spin_lock(&ip->i_flags_lock);
isize = i_size_read(inode);
if (iocb->ki_pos > isize) {
@@ -426,7 +435,7 @@ restart:
drained_dio = true;
goto restart;
}
-
+
trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
NULL, &xfs_buffered_write_iomap_ops);
@@ -435,6 +444,7 @@ restart:
} else
spin_unlock(&ip->i_flags_lock);
+out:
return file_modified(file);
}
@@ -500,7 +510,17 @@ xfs_dio_write_end_io(
* other IO completions here to update the EOF. Failing to serialise
* here can result in EOF moving backwards and Bad Things Happen when
* that occurs.
+ *
+ * As IO completion only ever extends EOF, we can do an unlocked check
+ * here to avoid taking the spinlock. If we land within the current EOF,
+ * then we do not need to do an extending update at all, and we don't
+ * need to take the lock to check this. If we race with an update moving
+ * EOF, then we'll either still be beyond EOF and need to take the lock,
+ * or we'll be within EOF and we don't need to take it at all.
*/
+ if (offset + size <= i_size_read(inode))
+ goto out;
+
spin_lock(&ip->i_flags_lock);
if (offset + size > i_size_read(inode)) {
i_size_write(inode, offset + size);
@@ -749,18 +769,18 @@ write_retry:
*/
if (ret == -EDQUOT && !cleared_space) {
xfs_iunlock(ip, iolock);
- xfs_blockgc_free_quota(ip, XFS_EOF_FLAGS_SYNC);
+ xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
cleared_space = true;
goto write_retry;
} else if (ret == -ENOSPC && !cleared_space) {
- struct xfs_eofblocks eofb = {0};
+ struct xfs_icwalk icw = {0};
cleared_space = true;
xfs_flush_inodes(ip->i_mount);
xfs_iunlock(ip, iolock);
- eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
- xfs_blockgc_free_space(ip->i_mount, &eofb);
+ icw.icw_flags = XFS_ICWALK_FLAG_SYNC;
+ xfs_blockgc_free_space(ip->i_mount, &icw);
goto write_retry;
}
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index db23e455eb91..eed6ca5f8f91 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -9,13 +9,13 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_alloc.h"
#include "xfs_mru_cache.h"
#include "xfs_trace.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_trans.h"
#include "xfs_filestream.h"
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 34f2b971ce43..7d0b09c1366e 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -24,6 +24,7 @@
#include "xfs_refcount_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_rtalloc.h"
+#include "xfs_ag.h"
/* Convert an xfs_fsmap to an fsmap. */
static void
@@ -157,10 +158,10 @@ struct xfs_getfsmap_info {
struct xfs_fsmap_head *head;
struct fsmap *fsmap_recs; /* mapping records */
struct xfs_buf *agf_bp; /* AGF, for refcount queries */
+ struct xfs_perag *pag; /* AG info, if applicable */
xfs_daddr_t next_daddr; /* next daddr we expect */
u64 missing_owner; /* owner of holes */
u32 dev; /* device id */
- xfs_agnumber_t agno; /* AG number, if applicable */
struct xfs_rmap_irec low; /* low rmap key */
struct xfs_rmap_irec high; /* high rmap key */
bool last; /* last extent? */
@@ -203,14 +204,13 @@ xfs_getfsmap_is_shared(
*stat = false;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return 0;
- /* rt files will have agno set to NULLAGNUMBER */
- if (info->agno == NULLAGNUMBER)
+ /* rt files will have no perag structure */
+ if (!info->pag)
return 0;
/* Are there any shared blocks here? */
flen = 0;
- cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp,
- info->agno);
+ cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp, info->pag);
error = xfs_refcount_find_shared(cur, rec->rm_startblock,
rec->rm_blockcount, &fbno, &flen, false);
@@ -311,7 +311,8 @@ xfs_getfsmap_helper(
if (info->head->fmh_entries >= info->head->fmh_count)
return -ECANCELED;
- trace_xfs_fsmap_mapping(mp, info->dev, info->agno, rec);
+ trace_xfs_fsmap_mapping(mp, info->dev,
+ info->pag ? info->pag->pag_agno : NULLAGNUMBER, rec);
fmr.fmr_device = info->dev;
fmr.fmr_physical = rec_daddr;
@@ -354,7 +355,7 @@ xfs_getfsmap_datadev_helper(
xfs_fsblock_t fsb;
xfs_daddr_t rec_daddr;
- fsb = XFS_AGB_TO_FSB(mp, cur->bc_ag.agno, rec->rm_startblock);
+ fsb = XFS_AGB_TO_FSB(mp, cur->bc_ag.pag->pag_agno, rec->rm_startblock);
rec_daddr = XFS_FSB_TO_DADDR(mp, fsb);
return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
@@ -372,7 +373,7 @@ xfs_getfsmap_datadev_bnobt_helper(
struct xfs_rmap_irec irec;
xfs_daddr_t rec_daddr;
- rec_daddr = XFS_AGB_TO_DADDR(mp, cur->bc_ag.agno,
+ rec_daddr = XFS_AGB_TO_DADDR(mp, cur->bc_ag.pag->pag_agno,
rec->ar_startblock);
irec.rm_startblock = rec->ar_startblock;
@@ -429,8 +430,8 @@ xfs_getfsmap_logdev(
info->high.rm_flags = XFS_RMAP_KEY_FLAGS | XFS_RMAP_REC_FLAGS;
info->missing_owner = XFS_FMR_OWN_FREE;
- trace_xfs_fsmap_low_key(mp, info->dev, info->agno, &info->low);
- trace_xfs_fsmap_high_key(mp, info->dev, info->agno, &info->high);
+ trace_xfs_fsmap_low_key(mp, info->dev, NULLAGNUMBER, &info->low);
+ trace_xfs_fsmap_high_key(mp, info->dev, NULLAGNUMBER, &info->high);
if (keys[0].fmr_physical > 0)
return 0;
@@ -508,8 +509,8 @@ __xfs_getfsmap_rtdev(
info->high.rm_blockcount = 0;
xfs_getfsmap_set_irec_flags(&info->high, &keys[1]);
- trace_xfs_fsmap_low_key(mp, info->dev, info->agno, &info->low);
- trace_xfs_fsmap_high_key(mp, info->dev, info->agno, &info->high);
+ trace_xfs_fsmap_low_key(mp, info->dev, NULLAGNUMBER, &info->low);
+ trace_xfs_fsmap_high_key(mp, info->dev, NULLAGNUMBER, &info->high);
return query_fn(tp, info);
}
@@ -572,6 +573,7 @@ __xfs_getfsmap_datadev(
void *priv)
{
struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_perag *pag;
struct xfs_btree_cur *bt_cur = NULL;
xfs_fsblock_t start_fsb;
xfs_fsblock_t end_fsb;
@@ -610,20 +612,20 @@ __xfs_getfsmap_datadev(
start_ag = XFS_FSB_TO_AGNO(mp, start_fsb);
end_ag = XFS_FSB_TO_AGNO(mp, end_fsb);
- /* Query each AG */
- for (info->agno = start_ag; info->agno <= end_ag; info->agno++) {
+ for_each_perag_range(mp, start_ag, end_ag, pag) {
/*
* Set the AG high key from the fsmap high key if this
* is the last AG that we're querying.
*/
- if (info->agno == end_ag) {
+ info->pag = pag;
+ if (pag->pag_agno == end_ag) {
info->high.rm_startblock = XFS_FSB_TO_AGBNO(mp,
end_fsb);
info->high.rm_offset = XFS_BB_TO_FSBT(mp,
keys[1].fmr_offset);
error = xfs_fsmap_owner_to_rmap(&info->high, &keys[1]);
if (error)
- goto err;
+ break;
xfs_getfsmap_set_irec_flags(&info->high, &keys[1]);
}
@@ -634,38 +636,45 @@ __xfs_getfsmap_datadev(
info->agf_bp = NULL;
}
- error = xfs_alloc_read_agf(mp, tp, info->agno, 0,
+ error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0,
&info->agf_bp);
if (error)
- goto err;
+ break;
- trace_xfs_fsmap_low_key(mp, info->dev, info->agno, &info->low);
- trace_xfs_fsmap_high_key(mp, info->dev, info->agno,
+ trace_xfs_fsmap_low_key(mp, info->dev, pag->pag_agno,
+ &info->low);
+ trace_xfs_fsmap_high_key(mp, info->dev, pag->pag_agno,
&info->high);
error = query_fn(tp, info, &bt_cur, priv);
if (error)
- goto err;
+ break;
/*
* Set the AG low key to the start of the AG prior to
* moving on to the next AG.
*/
- if (info->agno == start_ag) {
+ if (pag->pag_agno == start_ag) {
info->low.rm_startblock = 0;
info->low.rm_owner = 0;
info->low.rm_offset = 0;
info->low.rm_flags = 0;
}
- }
- /* Report any gap at the end of the AG */
- info->last = true;
- error = query_fn(tp, info, &bt_cur, priv);
- if (error)
- goto err;
+ /*
+ * If this is the last AG, report any gap at the end of it
+ * before we drop the reference to the perag when the loop
+ * terminates.
+ */
+ if (pag->pag_agno == end_ag) {
+ info->last = true;
+ error = query_fn(tp, info, &bt_cur, priv);
+ if (error)
+ break;
+ }
+ info->pag = NULL;
+ }
-err:
if (bt_cur)
xfs_btree_del_cursor(bt_cur, error < 0 ? XFS_BTREE_ERROR :
XFS_BTREE_NOERROR);
@@ -673,6 +682,13 @@ err:
xfs_trans_brelse(tp, info->agf_bp);
info->agf_bp = NULL;
}
+ if (info->pag) {
+ xfs_perag_put(info->pag);
+ info->pag = NULL;
+ } else if (pag) {
+ /* loop termination case */
+ xfs_perag_put(pag);
+ }
return error;
}
@@ -691,7 +707,7 @@ xfs_getfsmap_datadev_rmapbt_query(
/* Allocate cursor for this AG and query_range it. */
*curpp = xfs_rmapbt_init_cursor(tp->t_mountp, tp, info->agf_bp,
- info->agno);
+ info->pag);
return xfs_rmap_query_range(*curpp, &info->low, &info->high,
xfs_getfsmap_datadev_helper, info);
}
@@ -724,7 +740,7 @@ xfs_getfsmap_datadev_bnobt_query(
/* Allocate cursor for this AG and query_range it. */
*curpp = xfs_allocbt_init_cursor(tp->t_mountp, tp, info->agf_bp,
- info->agno, XFS_BTNUM_BNO);
+ info->pag, XFS_BTNUM_BNO);
key->ar_startblock = info->low.rm_startblock;
key[1].ar_startblock = info->high.rm_startblock;
return xfs_alloc_query_range(*curpp, key, &key[1],
@@ -937,7 +953,7 @@ xfs_getfsmap(
info.dev = handlers[i].dev;
info.last = false;
- info.agno = NULLAGNUMBER;
+ info.pag = NULL;
error = handlers[i].fn(tp, dkeys, &info);
if (error)
break;
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index be9cf88d2ad7..6ed29b158312 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -538,25 +538,25 @@ xfs_do_force_shutdown(
if (flags & SHUTDOWN_FORCE_UMOUNT) {
xfs_alert(mp,
-"User initiated shutdown received. Shutting down filesystem");
+"User initiated shutdown (0x%x) received. Shutting down filesystem",
+ flags);
return;
}
- xfs_notice(mp,
-"%s(0x%x) called from line %d of file %s. Return address = "PTR_FMT,
- __func__, flags, lnnum, fname, __return_address);
-
if (flags & SHUTDOWN_CORRUPT_INCORE) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
-"Corruption of in-memory data detected. Shutting down filesystem");
+"Corruption of in-memory data (0x%x) detected at %pS (%s:%d). Shutting down filesystem",
+ flags, __return_address, fname, lnnum);
if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
xfs_stack_trace();
} else if (logerror) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
- "Log I/O Error Detected. Shutting down filesystem");
+"Log I/O error (0x%x) detected at %pS (%s:%d). Shutting down filesystem",
+ flags, __return_address, fname, lnnum);
} else {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
- "I/O Error Detected. Shutting down filesystem");
+"I/O error (0x%x) detected at %pS (%s:%d). Shutting down filesystem",
+ flags, __return_address, fname, lnnum);
}
xfs_alert(mp,
@@ -576,10 +576,8 @@ xfs_fs_reserve_ag_blocks(
int err2;
mp->m_finobt_nores = false;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- pag = xfs_perag_get(mp, agno);
+ for_each_perag(mp, agno, pag) {
err2 = xfs_ag_resv_init(pag, NULL);
- xfs_perag_put(pag);
if (err2 && !error)
error = err2;
}
@@ -605,10 +603,8 @@ xfs_fs_unreserve_ag_blocks(
int error = 0;
int err2;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- pag = xfs_perag_get(mp, agno);
+ for_each_perag(mp, agno, pag) {
err2 = xfs_ag_resv_free(pag);
- xfs_perag_put(pag);
if (err2 && !error)
error = err2;
}
diff --git a/fs/xfs/xfs_health.c b/fs/xfs/xfs_health.c
index 8e0cb05a7142..eb10eacabc8f 100644
--- a/fs/xfs/xfs_health.c
+++ b/fs/xfs/xfs_health.c
@@ -9,11 +9,11 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trace.h"
#include "xfs_health.h"
+#include "xfs_ag.h"
/*
* Warn about metadata corruption that we detected but haven't fixed, and
@@ -34,14 +34,12 @@ xfs_health_unmount(
return;
/* Measure AG corruption levels. */
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- pag = xfs_perag_get(mp, agno);
+ for_each_perag(mp, agno, pag) {
xfs_ag_measure_sickness(pag, &sick, &checked);
if (sick) {
trace_xfs_ag_unfixed_corruption(mp, agno, sick);
warn = true;
}
- xfs_perag_put(pag);
}
/* Measure realtime volume corruption levels. */
@@ -231,6 +229,15 @@ xfs_inode_mark_sick(
ip->i_sick |= mask;
ip->i_checked |= mask;
spin_unlock(&ip->i_flags_lock);
+
+ /*
+ * Keep this inode around so we don't lose the sickness report. Scrub
+ * grabs inodes with DONTCACHE assuming that most inode are ok, which
+ * is not the case here.
+ */
+ spin_lock(&VFS_I(ip)->i_lock);
+ VFS_I(ip)->i_state &= ~I_DONTCACHE;
+ spin_unlock(&VFS_I(ip)->i_lock);
}
/* Mark parts of an inode healed. */
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 3c81daca0e9a..6007683482c6 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -9,7 +9,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
@@ -23,9 +22,65 @@
#include "xfs_dquot.h"
#include "xfs_reflink.h"
#include "xfs_ialloc.h"
+#include "xfs_ag.h"
#include <linux/iversion.h>
+/* Radix tree tags for incore inode tree. */
+
+/* inode is to be reclaimed */
+#define XFS_ICI_RECLAIM_TAG 0
+/* Inode has speculative preallocations (posteof or cow) to clean. */
+#define XFS_ICI_BLOCKGC_TAG 1
+
+/*
+ * The goal for walking incore inodes. These can correspond with incore inode
+ * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
+ */
+enum xfs_icwalk_goal {
+ /* Goals that are not related to tags; these must be < 0. */
+ XFS_ICWALK_DQRELE = -1,
+
+ /* Goals directly associated with tagged inodes. */
+ XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
+ XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG,
+};
+
+#define XFS_ICWALK_NULL_TAG (-1U)
+
+/* Compute the inode radix tree tag for this goal. */
+static inline unsigned int
+xfs_icwalk_tag(enum xfs_icwalk_goal goal)
+{
+ return goal < 0 ? XFS_ICWALK_NULL_TAG : goal;
+}
+
+static int xfs_icwalk(struct xfs_mount *mp,
+ enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
+static int xfs_icwalk_ag(struct xfs_perag *pag,
+ enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
+
+/*
+ * Private inode cache walk flags for struct xfs_icwalk. Must not
+ * coincide with XFS_ICWALK_FLAGS_VALID.
+ */
+#define XFS_ICWALK_FLAG_DROP_UDQUOT (1U << 31)
+#define XFS_ICWALK_FLAG_DROP_GDQUOT (1U << 30)
+#define XFS_ICWALK_FLAG_DROP_PDQUOT (1U << 29)
+
+/* Stop scanning after icw_scan_limit inodes. */
+#define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
+
+#define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
+#define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */
+
+#define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_DROP_UDQUOT | \
+ XFS_ICWALK_FLAG_DROP_GDQUOT | \
+ XFS_ICWALK_FLAG_DROP_PDQUOT | \
+ XFS_ICWALK_FLAG_SCAN_LIMIT | \
+ XFS_ICWALK_FLAG_RECLAIM_SICK | \
+ XFS_ICWALK_FLAG_UNION)
+
/*
* Allocate and initialise an xfs_inode.
*/
@@ -157,46 +212,94 @@ xfs_reclaim_work_queue(
rcu_read_unlock();
}
-static void
-xfs_perag_set_reclaim_tag(
+/*
+ * Background scanning to trim preallocated space. This is queued based on the
+ * 'speculative_prealloc_lifetime' tunable (5m by default).
+ */
+static inline void
+xfs_blockgc_queue(
struct xfs_perag *pag)
{
+ rcu_read_lock();
+ if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
+ queue_delayed_work(pag->pag_mount->m_gc_workqueue,
+ &pag->pag_blockgc_work,
+ msecs_to_jiffies(xfs_blockgc_secs * 1000));
+ rcu_read_unlock();
+}
+
+/* Set a tag on both the AG incore inode tree and the AG radix tree. */
+static void
+xfs_perag_set_inode_tag(
+ struct xfs_perag *pag,
+ xfs_agino_t agino,
+ unsigned int tag)
+{
struct xfs_mount *mp = pag->pag_mount;
+ bool was_tagged;
lockdep_assert_held(&pag->pag_ici_lock);
- if (pag->pag_ici_reclaimable++)
+
+ was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
+ radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
+
+ if (tag == XFS_ICI_RECLAIM_TAG)
+ pag->pag_ici_reclaimable++;
+
+ if (was_tagged)
return;
- /* propagate the reclaim tag up into the perag radix tree */
+ /* propagate the tag up into the perag radix tree */
spin_lock(&mp->m_perag_lock);
- radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
- XFS_ICI_RECLAIM_TAG);
+ radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
spin_unlock(&mp->m_perag_lock);
- /* schedule periodic background inode reclaim */
- xfs_reclaim_work_queue(mp);
+ /* start background work */
+ switch (tag) {
+ case XFS_ICI_RECLAIM_TAG:
+ xfs_reclaim_work_queue(mp);
+ break;
+ case XFS_ICI_BLOCKGC_TAG:
+ xfs_blockgc_queue(pag);
+ break;
+ }
- trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
+ trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
}
+/* Clear a tag on both the AG incore inode tree and the AG radix tree. */
static void
-xfs_perag_clear_reclaim_tag(
- struct xfs_perag *pag)
+xfs_perag_clear_inode_tag(
+ struct xfs_perag *pag,
+ xfs_agino_t agino,
+ unsigned int tag)
{
struct xfs_mount *mp = pag->pag_mount;
lockdep_assert_held(&pag->pag_ici_lock);
- if (--pag->pag_ici_reclaimable)
+
+ /*
+ * Reclaim can signal (with a null agino) that it cleared its own tag
+ * by removing the inode from the radix tree.
+ */
+ if (agino != NULLAGINO)
+ radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
+ else
+ ASSERT(tag == XFS_ICI_RECLAIM_TAG);
+
+ if (tag == XFS_ICI_RECLAIM_TAG)
+ pag->pag_ici_reclaimable--;
+
+ if (radix_tree_tagged(&pag->pag_ici_root, tag))
return;
- /* clear the reclaim tag from the perag radix tree */
+ /* clear the tag from the perag radix tree */
spin_lock(&mp->m_perag_lock);
- radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
- XFS_ICI_RECLAIM_TAG);
+ radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
spin_unlock(&mp->m_perag_lock);
- trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
-}
+ trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
+}
/*
* We set the inode flag atomically with the radix tree tag.
@@ -204,7 +307,7 @@ xfs_perag_clear_reclaim_tag(
* can go away.
*/
void
-xfs_inode_set_reclaim_tag(
+xfs_inode_mark_reclaimable(
struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
@@ -214,9 +317,8 @@ xfs_inode_set_reclaim_tag(
spin_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
- radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
- XFS_ICI_RECLAIM_TAG);
- xfs_perag_set_reclaim_tag(pag);
+ xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
+ XFS_ICI_RECLAIM_TAG);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
spin_unlock(&ip->i_flags_lock);
@@ -224,18 +326,7 @@ xfs_inode_set_reclaim_tag(
xfs_perag_put(pag);
}
-STATIC void
-xfs_inode_clear_reclaim_tag(
- struct xfs_perag *pag,
- xfs_ino_t ino)
-{
- radix_tree_tag_clear(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(pag->pag_mount, ino),
- XFS_ICI_RECLAIM_TAG);
- xfs_perag_clear_reclaim_tag(pag);
-}
-
-static void
+static inline void
xfs_inew_wait(
struct xfs_inode *ip)
{
@@ -264,14 +355,14 @@ xfs_reinit_inode(
struct xfs_mount *mp,
struct inode *inode)
{
- int error;
- uint32_t nlink = inode->i_nlink;
- uint32_t generation = inode->i_generation;
- uint64_t version = inode_peek_iversion(inode);
- umode_t mode = inode->i_mode;
- dev_t dev = inode->i_rdev;
- kuid_t uid = inode->i_uid;
- kgid_t gid = inode->i_gid;
+ int error;
+ uint32_t nlink = inode->i_nlink;
+ uint32_t generation = inode->i_generation;
+ uint64_t version = inode_peek_iversion(inode);
+ umode_t mode = inode->i_mode;
+ dev_t dev = inode->i_rdev;
+ kuid_t uid = inode->i_uid;
+ kgid_t gid = inode->i_gid;
error = inode_init_always(mp->m_super, inode);
@@ -286,6 +377,74 @@ xfs_reinit_inode(
}
/*
+ * Carefully nudge an inode whose VFS state has been torn down back into a
+ * usable state. Drops the i_flags_lock and the rcu read lock.
+ */
+static int
+xfs_iget_recycle(
+ struct xfs_perag *pag,
+ struct xfs_inode *ip) __releases(&ip->i_flags_lock)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct inode *inode = VFS_I(ip);
+ int error;
+
+ trace_xfs_iget_recycle(ip);
+
+ /*
+ * We need to make it look like the inode is being reclaimed to prevent
+ * the actual reclaim workers from stomping over us while we recycle
+ * the inode. We can't clear the radix tree tag yet as it requires
+ * pag_ici_lock to be held exclusive.
+ */
+ ip->i_flags |= XFS_IRECLAIM;
+
+ spin_unlock(&ip->i_flags_lock);
+ rcu_read_unlock();
+
+ ASSERT(!rwsem_is_locked(&inode->i_rwsem));
+ error = xfs_reinit_inode(mp, inode);
+ if (error) {
+ bool wake;
+
+ /*
+ * Re-initializing the inode failed, and we are in deep
+ * trouble. Try to re-add it to the reclaim list.
+ */
+ rcu_read_lock();
+ spin_lock(&ip->i_flags_lock);
+ wake = !!__xfs_iflags_test(ip, XFS_INEW);
+ ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
+ if (wake)
+ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
+ ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
+ spin_unlock(&ip->i_flags_lock);
+ rcu_read_unlock();
+
+ trace_xfs_iget_recycle_fail(ip);
+ return error;
+ }
+
+ spin_lock(&pag->pag_ici_lock);
+ spin_lock(&ip->i_flags_lock);
+
+ /*
+ * Clear the per-lifetime state in the inode as we are now effectively
+ * a new inode and need to return to the initial state before reuse
+ * occurs.
+ */
+ ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
+ ip->i_flags |= XFS_INEW;
+ xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
+ XFS_ICI_RECLAIM_TAG);
+ inode->i_state = I_NEW;
+ spin_unlock(&ip->i_flags_lock);
+ spin_unlock(&pag->pag_ici_lock);
+
+ return 0;
+}
+
+/*
* If we are allocating a new inode, then check what was returned is
* actually a free, empty inode. If we are not allocating an inode,
* then check we didn't find a free inode.
@@ -348,30 +507,21 @@ xfs_iget_cache_hit(
* will not match, so check for that, too.
*/
spin_lock(&ip->i_flags_lock);
- if (ip->i_ino != ino) {
- trace_xfs_iget_skip(ip);
- XFS_STATS_INC(mp, xs_ig_frecycle);
- error = -EAGAIN;
- goto out_error;
- }
-
+ if (ip->i_ino != ino)
+ goto out_skip;
/*
* If we are racing with another cache hit that is currently
* instantiating this inode or currently recycling it out of
- * reclaimabe state, wait for the initialisation to complete
+ * reclaimable state, wait for the initialisation to complete
* before continuing.
*
* XXX(hch): eventually we should do something equivalent to
* wait_on_inode to wait for these flags to be cleared
* instead of polling for it.
*/
- if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
- trace_xfs_iget_skip(ip);
- XFS_STATS_INC(mp, xs_ig_frecycle);
- error = -EAGAIN;
- goto out_error;
- }
+ if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM))
+ goto out_skip;
/*
* Check the inode free state is valid. This also detects lookup
@@ -381,72 +531,21 @@ xfs_iget_cache_hit(
if (error)
goto out_error;
- /*
- * If IRECLAIMABLE is set, we've torn down the VFS inode already.
- * Need to carefully get it back into useable state.
- */
- if (ip->i_flags & XFS_IRECLAIMABLE) {
- trace_xfs_iget_reclaim(ip);
-
- if (flags & XFS_IGET_INCORE) {
- error = -EAGAIN;
- goto out_error;
- }
-
- /*
- * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
- * from stomping over us while we recycle the inode. We can't
- * clear the radix tree reclaimable tag yet as it requires
- * pag_ici_lock to be held exclusive.
- */
- ip->i_flags |= XFS_IRECLAIM;
-
- spin_unlock(&ip->i_flags_lock);
- rcu_read_unlock();
+ /* Skip inodes that have no vfs state. */
+ if ((flags & XFS_IGET_INCORE) &&
+ (ip->i_flags & XFS_IRECLAIMABLE))
+ goto out_skip;
- ASSERT(!rwsem_is_locked(&inode->i_rwsem));
- error = xfs_reinit_inode(mp, inode);
- if (error) {
- bool wake;
- /*
- * Re-initializing the inode failed, and we are in deep
- * trouble. Try to re-add it to the reclaim list.
- */
- rcu_read_lock();
- spin_lock(&ip->i_flags_lock);
- wake = !!__xfs_iflags_test(ip, XFS_INEW);
- ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
- if (wake)
- wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
- ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
- trace_xfs_iget_reclaim_fail(ip);
- goto out_error;
- }
-
- spin_lock(&pag->pag_ici_lock);
- spin_lock(&ip->i_flags_lock);
-
- /*
- * Clear the per-lifetime state in the inode as we are now
- * effectively a new inode and need to return to the initial
- * state before reuse occurs.
- */
- ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
- ip->i_flags |= XFS_INEW;
- xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
- inode->i_state = I_NEW;
- ip->i_sick = 0;
- ip->i_checked = 0;
-
- spin_unlock(&ip->i_flags_lock);
- spin_unlock(&pag->pag_ici_lock);
+ /* The inode fits the selection criteria; process it. */
+ if (ip->i_flags & XFS_IRECLAIMABLE) {
+ /* Drops i_flags_lock and RCU read lock. */
+ error = xfs_iget_recycle(pag, ip);
+ if (error)
+ return error;
} else {
/* If the VFS inode is being torn down, pause and try again. */
- if (!igrab(inode)) {
- trace_xfs_iget_skip(ip);
- error = -EAGAIN;
- goto out_error;
- }
+ if (!igrab(inode))
+ goto out_skip;
/* We've got a live one. */
spin_unlock(&ip->i_flags_lock);
@@ -463,13 +562,16 @@ xfs_iget_cache_hit(
return 0;
+out_skip:
+ trace_xfs_iget_skip(ip);
+ XFS_STATS_INC(mp, xs_ig_frecycle);
+ error = -EAGAIN;
out_error:
spin_unlock(&ip->i_flags_lock);
rcu_read_unlock();
return error;
}
-
static int
xfs_iget_cache_miss(
struct xfs_mount *mp,
@@ -715,207 +817,96 @@ xfs_icache_inode_is_allocated(
return 0;
}
-/*
- * The inode lookup is done in batches to keep the amount of lock traffic and
- * radix tree lookups to a minimum. The batch size is a trade off between
- * lookup reduction and stack usage. This is in the reclaim path, so we can't
- * be too greedy.
- */
-#define XFS_LOOKUP_BATCH 32
-
-/*
- * Decide if the given @ip is eligible to be a part of the inode walk, and
- * grab it if so. Returns true if it's ready to go or false if we should just
- * ignore it.
- */
-STATIC bool
-xfs_inode_walk_ag_grab(
- struct xfs_inode *ip,
- int flags)
+#ifdef CONFIG_XFS_QUOTA
+/* Decide if we want to grab this inode to drop its dquots. */
+static bool
+xfs_dqrele_igrab(
+ struct xfs_inode *ip)
{
- struct inode *inode = VFS_I(ip);
- bool newinos = !!(flags & XFS_INODE_WALK_INEW_WAIT);
+ bool ret = false;
ASSERT(rcu_read_lock_held());
/* Check for stale RCU freed inode */
spin_lock(&ip->i_flags_lock);
if (!ip->i_ino)
- goto out_unlock_noent;
-
- /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
- if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
- __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
- goto out_unlock_noent;
- spin_unlock(&ip->i_flags_lock);
-
- /* nothing to sync during shutdown */
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
- return false;
+ goto out_unlock;
- /* If we can't grab the inode, it must on it's way to reclaim. */
- if (!igrab(inode))
- return false;
+ /*
+ * Skip inodes that are anywhere in the reclaim machinery because we
+ * drop dquots before tagging an inode for reclamation.
+ */
+ if (ip->i_flags & (XFS_IRECLAIM | XFS_IRECLAIMABLE))
+ goto out_unlock;
- /* inode is valid */
- return true;
+ /*
+ * The inode looks alive; try to grab a VFS reference so that it won't
+ * get destroyed. If we got the reference, return true to say that
+ * we grabbed the inode.
+ *
+ * If we can't get the reference, then we know the inode had its VFS
+ * state torn down and hasn't yet entered the reclaim machinery. Since
+ * we also know that dquots are detached from an inode before it enters
+ * reclaim, we can skip the inode.
+ */
+ ret = igrab(VFS_I(ip)) != NULL;
-out_unlock_noent:
+out_unlock:
spin_unlock(&ip->i_flags_lock);
- return false;
+ return ret;
}
-/*
- * For a given per-AG structure @pag, grab, @execute, and rele all incore
- * inodes with the given radix tree @tag.
- */
-STATIC int
-xfs_inode_walk_ag(
- struct xfs_perag *pag,
- int iter_flags,
- int (*execute)(struct xfs_inode *ip, void *args),
- void *args,
- int tag)
+/* Drop this inode's dquots. */
+static void
+xfs_dqrele_inode(
+ struct xfs_inode *ip,
+ struct xfs_icwalk *icw)
{
- struct xfs_mount *mp = pag->pag_mount;
- uint32_t first_index;
- int last_error = 0;
- int skipped;
- bool done;
- int nr_found;
-
-restart:
- done = false;
- skipped = 0;
- first_index = 0;
- nr_found = 0;
- do {
- struct xfs_inode *batch[XFS_LOOKUP_BATCH];
- int error = 0;
- int i;
-
- rcu_read_lock();
-
- if (tag == XFS_ICI_NO_TAG)
- nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
- (void **)batch, first_index,
- XFS_LOOKUP_BATCH);
- else
- nr_found = radix_tree_gang_lookup_tag(
- &pag->pag_ici_root,
- (void **) batch, first_index,
- XFS_LOOKUP_BATCH, tag);
-
- if (!nr_found) {
- rcu_read_unlock();
- break;
- }
-
- /*
- * Grab the inodes before we drop the lock. if we found
- * nothing, nr == 0 and the loop will be skipped.
- */
- for (i = 0; i < nr_found; i++) {
- struct xfs_inode *ip = batch[i];
-
- if (done || !xfs_inode_walk_ag_grab(ip, iter_flags))
- batch[i] = NULL;
-
- /*
- * Update the index for the next lookup. Catch
- * overflows into the next AG range which can occur if
- * we have inodes in the last block of the AG and we
- * are currently pointing to the last inode.
- *
- * Because we may see inodes that are from the wrong AG
- * due to RCU freeing and reallocation, only update the
- * index if it lies in this AG. It was a race that lead
- * us to see this inode, so another lookup from the
- * same index will not find it again.
- */
- if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
- continue;
- first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
- if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
- done = true;
- }
-
- /* unlock now we've grabbed the inodes. */
- rcu_read_unlock();
-
- for (i = 0; i < nr_found; i++) {
- if (!batch[i])
- continue;
- if ((iter_flags & XFS_INODE_WALK_INEW_WAIT) &&
- xfs_iflags_test(batch[i], XFS_INEW))
- xfs_inew_wait(batch[i]);
- error = execute(batch[i], args);
- xfs_irele(batch[i]);
- if (error == -EAGAIN) {
- skipped++;
- continue;
- }
- if (error && last_error != -EFSCORRUPTED)
- last_error = error;
- }
-
- /* bail out if the filesystem is corrupted. */
- if (error == -EFSCORRUPTED)
- break;
+ if (xfs_iflags_test(ip, XFS_INEW))
+ xfs_inew_wait(ip);
- cond_resched();
-
- } while (nr_found && !done);
-
- if (skipped) {
- delay(1);
- goto restart;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ if (icw->icw_flags & XFS_ICWALK_FLAG_DROP_UDQUOT) {
+ xfs_qm_dqrele(ip->i_udquot);
+ ip->i_udquot = NULL;
}
- return last_error;
-}
-
-/* Fetch the next (possibly tagged) per-AG structure. */
-static inline struct xfs_perag *
-xfs_inode_walk_get_perag(
- struct xfs_mount *mp,
- xfs_agnumber_t agno,
- int tag)
-{
- if (tag == XFS_ICI_NO_TAG)
- return xfs_perag_get(mp, agno);
- return xfs_perag_get_tag(mp, agno, tag);
+ if (icw->icw_flags & XFS_ICWALK_FLAG_DROP_GDQUOT) {
+ xfs_qm_dqrele(ip->i_gdquot);
+ ip->i_gdquot = NULL;
+ }
+ if (icw->icw_flags & XFS_ICWALK_FLAG_DROP_PDQUOT) {
+ xfs_qm_dqrele(ip->i_pdquot);
+ ip->i_pdquot = NULL;
+ }
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_irele(ip);
}
/*
- * Call the @execute function on all incore inodes matching the radix tree
- * @tag.
+ * Detach all dquots from incore inodes if we can. The caller must already
+ * have dropped the relevant XFS_[UGP]QUOTA_ACTIVE flags so that dquots will
+ * not get reattached.
*/
int
-xfs_inode_walk(
+xfs_dqrele_all_inodes(
struct xfs_mount *mp,
- int iter_flags,
- int (*execute)(struct xfs_inode *ip, void *args),
- void *args,
- int tag)
+ unsigned int qflags)
{
- struct xfs_perag *pag;
- int error = 0;
- int last_error = 0;
- xfs_agnumber_t ag;
+ struct xfs_icwalk icw = { .icw_flags = 0 };
- ag = 0;
- while ((pag = xfs_inode_walk_get_perag(mp, ag, tag))) {
- ag = pag->pag_agno + 1;
- error = xfs_inode_walk_ag(pag, iter_flags, execute, args, tag);
- xfs_perag_put(pag);
- if (error) {
- last_error = error;
- if (error == -EFSCORRUPTED)
- break;
- }
- }
- return last_error;
+ if (qflags & XFS_UQUOTA_ACCT)
+ icw.icw_flags |= XFS_ICWALK_FLAG_DROP_UDQUOT;
+ if (qflags & XFS_GQUOTA_ACCT)
+ icw.icw_flags |= XFS_ICWALK_FLAG_DROP_GDQUOT;
+ if (qflags & XFS_PQUOTA_ACCT)
+ icw.icw_flags |= XFS_ICWALK_FLAG_DROP_PDQUOT;
+
+ return xfs_icwalk(mp, XFS_ICWALK_DQRELE, &icw);
}
+#else
+# define xfs_dqrele_igrab(ip) (false)
+# define xfs_dqrele_inode(ip, priv) ((void)0)
+#endif /* CONFIG_XFS_QUOTA */
/*
* Grab the inode for reclaim exclusively.
@@ -935,8 +926,9 @@ xfs_inode_walk(
* Return true if we grabbed it, false otherwise.
*/
static bool
-xfs_reclaim_inode_grab(
- struct xfs_inode *ip)
+xfs_reclaim_igrab(
+ struct xfs_inode *ip,
+ struct xfs_icwalk *icw)
{
ASSERT(rcu_read_lock_held());
@@ -947,6 +939,14 @@ xfs_reclaim_inode_grab(
spin_unlock(&ip->i_flags_lock);
return false;
}
+
+ /* Don't reclaim a sick inode unless the caller asked for it. */
+ if (ip->i_sick &&
+ (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
+ spin_unlock(&ip->i_flags_lock);
+ return false;
+ }
+
__xfs_iflags_set(ip, XFS_IRECLAIM);
spin_unlock(&ip->i_flags_lock);
return true;
@@ -1002,6 +1002,8 @@ reclaim:
spin_lock(&ip->i_flags_lock);
ip->i_flags = XFS_IRECLAIM;
ip->i_ino = 0;
+ ip->i_sick = 0;
+ ip->i_checked = 0;
spin_unlock(&ip->i_flags_lock);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -1018,7 +1020,7 @@ reclaim:
if (!radix_tree_delete(&pag->pag_ici_root,
XFS_INO_TO_AGINO(ip->i_mount, ino)))
ASSERT(0);
- xfs_perag_clear_reclaim_tag(pag);
+ xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
spin_unlock(&pag->pag_ici_lock);
/*
@@ -1030,7 +1032,7 @@ reclaim:
* unlocked after the lookup before we go ahead and free it.
*/
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_qm_dqdetach(ip);
+ ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
ASSERT(xfs_inode_clean(ip));
@@ -1045,108 +1047,30 @@ out:
xfs_iflags_clear(ip, XFS_IRECLAIM);
}
-/*
- * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
- * corrupted, we still want to try to reclaim all the inodes. If we don't,
- * then a shut down during filesystem unmount reclaim walk leak all the
- * unreclaimed inodes.
- *
- * Returns non-zero if any AGs or inodes were skipped in the reclaim pass
- * so that callers that want to block until all dirty inodes are written back
- * and reclaimed can sanely loop.
- */
-static void
-xfs_reclaim_inodes_ag(
- struct xfs_mount *mp,
- int *nr_to_scan)
+/* Reclaim sick inodes if we're unmounting or the fs went down. */
+static inline bool
+xfs_want_reclaim_sick(
+ struct xfs_mount *mp)
{
- struct xfs_perag *pag;
- xfs_agnumber_t ag = 0;
-
- while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
- unsigned long first_index = 0;
- int done = 0;
- int nr_found = 0;
-
- ag = pag->pag_agno + 1;
-
- first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
- do {
- struct xfs_inode *batch[XFS_LOOKUP_BATCH];
- int i;
-
- rcu_read_lock();
- nr_found = radix_tree_gang_lookup_tag(
- &pag->pag_ici_root,
- (void **)batch, first_index,
- XFS_LOOKUP_BATCH,
- XFS_ICI_RECLAIM_TAG);
- if (!nr_found) {
- done = 1;
- rcu_read_unlock();
- break;
- }
-
- /*
- * Grab the inodes before we drop the lock. if we found
- * nothing, nr == 0 and the loop will be skipped.
- */
- for (i = 0; i < nr_found; i++) {
- struct xfs_inode *ip = batch[i];
-
- if (done || !xfs_reclaim_inode_grab(ip))
- batch[i] = NULL;
-
- /*
- * Update the index for the next lookup. Catch
- * overflows into the next AG range which can
- * occur if we have inodes in the last block of
- * the AG and we are currently pointing to the
- * last inode.
- *
- * Because we may see inodes that are from the
- * wrong AG due to RCU freeing and
- * reallocation, only update the index if it
- * lies in this AG. It was a race that lead us
- * to see this inode, so another lookup from
- * the same index will not find it again.
- */
- if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
- pag->pag_agno)
- continue;
- first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
- if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
- done = 1;
- }
-
- /* unlock now we've grabbed the inodes. */
- rcu_read_unlock();
-
- for (i = 0; i < nr_found; i++) {
- if (batch[i])
- xfs_reclaim_inode(batch[i], pag);
- }
-
- *nr_to_scan -= XFS_LOOKUP_BATCH;
- cond_resched();
- } while (nr_found && !done && *nr_to_scan > 0);
-
- if (done)
- first_index = 0;
- WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
- xfs_perag_put(pag);
- }
+ return (mp->m_flags & XFS_MOUNT_UNMOUNTING) ||
+ (mp->m_flags & XFS_MOUNT_NORECOVERY) ||
+ XFS_FORCED_SHUTDOWN(mp);
}
void
xfs_reclaim_inodes(
struct xfs_mount *mp)
{
- int nr_to_scan = INT_MAX;
+ struct xfs_icwalk icw = {
+ .icw_flags = 0,
+ };
+
+ if (xfs_want_reclaim_sick(mp))
+ icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
xfs_ail_push_all_sync(mp->m_ail);
- xfs_reclaim_inodes_ag(mp, &nr_to_scan);
+ xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
}
}
@@ -1160,13 +1084,21 @@ xfs_reclaim_inodes(
long
xfs_reclaim_inodes_nr(
struct xfs_mount *mp,
- int nr_to_scan)
+ unsigned long nr_to_scan)
{
+ struct xfs_icwalk icw = {
+ .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT,
+ .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
+ };
+
+ if (xfs_want_reclaim_sick(mp))
+ icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
+
/* kick background reclaimer and push the AIL */
xfs_reclaim_work_queue(mp);
xfs_ail_push_all(mp->m_ail);
- xfs_reclaim_inodes_ag(mp, &nr_to_scan);
+ xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
return 0;
}
@@ -1174,13 +1106,13 @@ xfs_reclaim_inodes_nr(
* Return the number of reclaimable inodes in the filesystem for
* the shrinker to determine how much to reclaim.
*/
-int
+long
xfs_reclaim_inodes_count(
struct xfs_mount *mp)
{
struct xfs_perag *pag;
xfs_agnumber_t ag = 0;
- int reclaimable = 0;
+ long reclaimable = 0;
while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
ag = pag->pag_agno + 1;
@@ -1191,20 +1123,20 @@ xfs_reclaim_inodes_count(
}
STATIC bool
-xfs_inode_match_id(
+xfs_icwalk_match_id(
struct xfs_inode *ip,
- struct xfs_eofblocks *eofb)
+ struct xfs_icwalk *icw)
{
- if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
- !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
+ if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
+ !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
return false;
- if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
- !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
+ if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
+ !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
return false;
- if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
- ip->i_projid != eofb->eof_prid)
+ if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
+ ip->i_projid != icw->icw_prid)
return false;
return true;
@@ -1215,20 +1147,20 @@ xfs_inode_match_id(
* criteria match. This is for global/internal scans only.
*/
STATIC bool
-xfs_inode_match_id_union(
+xfs_icwalk_match_id_union(
struct xfs_inode *ip,
- struct xfs_eofblocks *eofb)
+ struct xfs_icwalk *icw)
{
- if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
- uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
+ if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
+ uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
return true;
- if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
- gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
+ if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
+ gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
return true;
- if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
- ip->i_projid == eofb->eof_prid)
+ if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
+ ip->i_projid == icw->icw_prid)
return true;
return false;
@@ -1236,29 +1168,29 @@ xfs_inode_match_id_union(
/*
* Is this inode @ip eligible for eof/cow block reclamation, given some
- * filtering parameters @eofb? The inode is eligible if @eofb is null or
+ * filtering parameters @icw? The inode is eligible if @icw is null or
* if the predicate functions match.
*/
static bool
-xfs_inode_matches_eofb(
+xfs_icwalk_match(
struct xfs_inode *ip,
- struct xfs_eofblocks *eofb)
+ struct xfs_icwalk *icw)
{
bool match;
- if (!eofb)
+ if (!icw)
return true;
- if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
- match = xfs_inode_match_id_union(ip, eofb);
+ if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
+ match = xfs_icwalk_match_id_union(ip, icw);
else
- match = xfs_inode_match_id(ip, eofb);
+ match = xfs_icwalk_match_id(ip, icw);
if (!match)
return false;
/* skip the inode if the file size is too small */
- if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) &&
- XFS_ISIZE(ip) < eofb->eof_min_file_size)
+ if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
+ XFS_ISIZE(ip) < icw->icw_min_file_size)
return false;
return true;
@@ -1276,22 +1208,20 @@ xfs_reclaim_worker(
{
struct xfs_mount *mp = container_of(to_delayed_work(work),
struct xfs_mount, m_reclaim_work);
- int nr_to_scan = INT_MAX;
- xfs_reclaim_inodes_ag(mp, &nr_to_scan);
+ xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
xfs_reclaim_work_queue(mp);
}
STATIC int
xfs_inode_free_eofblocks(
struct xfs_inode *ip,
- void *args,
+ struct xfs_icwalk *icw,
unsigned int *lockflags)
{
- struct xfs_eofblocks *eofb = args;
bool wait;
- wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
+ wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
return 0;
@@ -1303,7 +1233,7 @@ xfs_inode_free_eofblocks(
if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
return 0;
- if (!xfs_inode_matches_eofb(ip, eofb))
+ if (!xfs_icwalk_match(ip, icw))
return 0;
/*
@@ -1326,22 +1256,6 @@ xfs_inode_free_eofblocks(
return 0;
}
-/*
- * Background scanning to trim preallocated space. This is queued based on the
- * 'speculative_prealloc_lifetime' tunable (5m by default).
- */
-static inline void
-xfs_blockgc_queue(
- struct xfs_perag *pag)
-{
- rcu_read_lock();
- if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
- queue_delayed_work(pag->pag_mount->m_gc_workqueue,
- &pag->pag_blockgc_work,
- msecs_to_jiffies(xfs_blockgc_secs * 1000));
- rcu_read_unlock();
-}
-
static void
xfs_blockgc_set_iflag(
struct xfs_inode *ip,
@@ -1349,7 +1263,6 @@ xfs_blockgc_set_iflag(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;
- int tagged;
ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
@@ -1366,24 +1279,8 @@ xfs_blockgc_set_iflag(
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
- tagged = radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG);
- radix_tree_tag_set(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
- XFS_ICI_BLOCKGC_TAG);
- if (!tagged) {
- /* propagate the blockgc tag up into the perag radix tree */
- spin_lock(&ip->i_mount->m_perag_lock);
- radix_tree_tag_set(&ip->i_mount->m_perag_tree,
- XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
- XFS_ICI_BLOCKGC_TAG);
- spin_unlock(&ip->i_mount->m_perag_lock);
-
- /* kick off background trimming */
- xfs_blockgc_queue(pag);
-
- trace_xfs_perag_set_blockgc(ip->i_mount, pag->pag_agno, -1,
- _RET_IP_);
- }
+ xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
+ XFS_ICI_BLOCKGC_TAG);
spin_unlock(&pag->pag_ici_lock);
xfs_perag_put(pag);
@@ -1419,19 +1316,8 @@ xfs_blockgc_clear_iflag(
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
- radix_tree_tag_clear(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
- XFS_ICI_BLOCKGC_TAG);
- if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) {
- /* clear the blockgc tag from the perag radix tree */
- spin_lock(&ip->i_mount->m_perag_lock);
- radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
- XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
- XFS_ICI_BLOCKGC_TAG);
- spin_unlock(&ip->i_mount->m_perag_lock);
- trace_xfs_perag_clear_blockgc(ip->i_mount, pag->pag_agno, -1,
- _RET_IP_);
- }
+ xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
+ XFS_ICI_BLOCKGC_TAG);
spin_unlock(&pag->pag_ici_lock);
xfs_perag_put(pag);
@@ -1492,14 +1378,13 @@ xfs_prep_free_cowblocks(
STATIC int
xfs_inode_free_cowblocks(
struct xfs_inode *ip,
- void *args,
+ struct xfs_icwalk *icw,
unsigned int *lockflags)
{
- struct xfs_eofblocks *eofb = args;
bool wait;
int ret = 0;
- wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
+ wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
return 0;
@@ -1507,7 +1392,7 @@ xfs_inode_free_cowblocks(
if (!xfs_prep_free_cowblocks(ip))
return 0;
- if (!xfs_inode_matches_eofb(ip, eofb))
+ if (!xfs_icwalk_match(ip, icw))
return 0;
/*
@@ -1554,14 +1439,6 @@ xfs_inode_clear_cowblocks_tag(
return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
}
-#define for_each_perag_tag(mp, next_agno, pag, tag) \
- for ((next_agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \
- (pag) != NULL; \
- (next_agno) = (pag)->pag_agno + 1, \
- xfs_perag_put(pag), \
- (pag) = xfs_perag_get_tag((mp), (next_agno), (tag)))
-
-
/* Disable post-EOF and CoW block auto-reclamation. */
void
xfs_blockgc_stop(
@@ -1586,23 +1463,66 @@ xfs_blockgc_start(
xfs_blockgc_queue(pag);
}
+/* Don't try to run block gc on an inode that's in any of these states. */
+#define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
+ XFS_IRECLAIMABLE | \
+ XFS_IRECLAIM)
+/*
+ * Decide if the given @ip is eligible for garbage collection of speculative
+ * preallocations, and grab it if so. Returns true if it's ready to go or
+ * false if we should just ignore it.
+ */
+static bool
+xfs_blockgc_igrab(
+ struct xfs_inode *ip)
+{
+ struct inode *inode = VFS_I(ip);
+
+ ASSERT(rcu_read_lock_held());
+
+ /* Check for stale RCU freed inode */
+ spin_lock(&ip->i_flags_lock);
+ if (!ip->i_ino)
+ goto out_unlock_noent;
+
+ if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
+ goto out_unlock_noent;
+ spin_unlock(&ip->i_flags_lock);
+
+ /* nothing to sync during shutdown */
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ return false;
+
+ /* If we can't grab the inode, it must on it's way to reclaim. */
+ if (!igrab(inode))
+ return false;
+
+ /* inode is valid */
+ return true;
+
+out_unlock_noent:
+ spin_unlock(&ip->i_flags_lock);
+ return false;
+}
+
/* Scan one incore inode for block preallocations that we can remove. */
static int
xfs_blockgc_scan_inode(
struct xfs_inode *ip,
- void *args)
+ struct xfs_icwalk *icw)
{
unsigned int lockflags = 0;
int error;
- error = xfs_inode_free_eofblocks(ip, args, &lockflags);
+ error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
if (error)
goto unlock;
- error = xfs_inode_free_cowblocks(ip, args, &lockflags);
+ error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
unlock:
if (lockflags)
xfs_iunlock(ip, lockflags);
+ xfs_irele(ip);
return error;
}
@@ -1618,8 +1538,7 @@ xfs_blockgc_worker(
if (!sb_start_write_trylock(mp->m_super))
return;
- error = xfs_inode_walk_ag(pag, 0, xfs_blockgc_scan_inode, NULL,
- XFS_ICI_BLOCKGC_TAG);
+ error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
if (error)
xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
pag->pag_agno, error);
@@ -1633,12 +1552,11 @@ xfs_blockgc_worker(
int
xfs_blockgc_free_space(
struct xfs_mount *mp,
- struct xfs_eofblocks *eofb)
+ struct xfs_icwalk *icw)
{
- trace_xfs_blockgc_free_space(mp, eofb, _RET_IP_);
+ trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
- return xfs_inode_walk(mp, 0, xfs_blockgc_scan_inode, eofb,
- XFS_ICI_BLOCKGC_TAG);
+ return xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
}
/*
@@ -1648,7 +1566,7 @@ xfs_blockgc_free_space(
* scan.
*
* Callers must not hold any inode's ILOCK. If requesting a synchronous scan
- * (XFS_EOF_FLAGS_SYNC), the caller also must not hold any inode's IOLOCK or
+ * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
* MMAPLOCK.
*/
int
@@ -1657,9 +1575,9 @@ xfs_blockgc_free_dquots(
struct xfs_dquot *udqp,
struct xfs_dquot *gdqp,
struct xfs_dquot *pdqp,
- unsigned int eof_flags)
+ unsigned int iwalk_flags)
{
- struct xfs_eofblocks eofb = {0};
+ struct xfs_icwalk icw = {0};
bool do_work = false;
if (!udqp && !gdqp && !pdqp)
@@ -1669,40 +1587,260 @@ xfs_blockgc_free_dquots(
* Run a scan to free blocks using the union filter to cover all
* applicable quotas in a single scan.
*/
- eofb.eof_flags = XFS_EOF_FLAGS_UNION | eof_flags;
+ icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
- eofb.eof_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
- eofb.eof_flags |= XFS_EOF_FLAGS_UID;
+ icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
+ icw.icw_flags |= XFS_ICWALK_FLAG_UID;
do_work = true;
}
if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
- eofb.eof_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
- eofb.eof_flags |= XFS_EOF_FLAGS_GID;
+ icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
+ icw.icw_flags |= XFS_ICWALK_FLAG_GID;
do_work = true;
}
if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
- eofb.eof_prid = pdqp->q_id;
- eofb.eof_flags |= XFS_EOF_FLAGS_PRID;
+ icw.icw_prid = pdqp->q_id;
+ icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
do_work = true;
}
if (!do_work)
return 0;
- return xfs_blockgc_free_space(mp, &eofb);
+ return xfs_blockgc_free_space(mp, &icw);
}
/* Run cow/eofblocks scans on the quotas attached to the inode. */
int
xfs_blockgc_free_quota(
struct xfs_inode *ip,
- unsigned int eof_flags)
+ unsigned int iwalk_flags)
{
return xfs_blockgc_free_dquots(ip->i_mount,
xfs_inode_dquot(ip, XFS_DQTYPE_USER),
xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
- xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), eof_flags);
+ xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
+}
+
+/* XFS Inode Cache Walking Code */
+
+/*
+ * The inode lookup is done in batches to keep the amount of lock traffic and
+ * radix tree lookups to a minimum. The batch size is a trade off between
+ * lookup reduction and stack usage. This is in the reclaim path, so we can't
+ * be too greedy.
+ */
+#define XFS_LOOKUP_BATCH 32
+
+
+/*
+ * Decide if we want to grab this inode in anticipation of doing work towards
+ * the goal.
+ */
+static inline bool
+xfs_icwalk_igrab(
+ enum xfs_icwalk_goal goal,
+ struct xfs_inode *ip,
+ struct xfs_icwalk *icw)
+{
+ switch (goal) {
+ case XFS_ICWALK_DQRELE:
+ return xfs_dqrele_igrab(ip);
+ case XFS_ICWALK_BLOCKGC:
+ return xfs_blockgc_igrab(ip);
+ case XFS_ICWALK_RECLAIM:
+ return xfs_reclaim_igrab(ip, icw);
+ default:
+ return false;
+ }
+}
+
+/*
+ * Process an inode. Each processing function must handle any state changes
+ * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
+ */
+static inline int
+xfs_icwalk_process_inode(
+ enum xfs_icwalk_goal goal,
+ struct xfs_inode *ip,
+ struct xfs_perag *pag,
+ struct xfs_icwalk *icw)
+{
+ int error = 0;
+
+ switch (goal) {
+ case XFS_ICWALK_DQRELE:
+ xfs_dqrele_inode(ip, icw);
+ break;
+ case XFS_ICWALK_BLOCKGC:
+ error = xfs_blockgc_scan_inode(ip, icw);
+ break;
+ case XFS_ICWALK_RECLAIM:
+ xfs_reclaim_inode(ip, pag);
+ break;
+ }
+ return error;
+}
+
+/*
+ * For a given per-AG structure @pag and a goal, grab qualifying inodes and
+ * process them in some manner.
+ */
+static int
+xfs_icwalk_ag(
+ struct xfs_perag *pag,
+ enum xfs_icwalk_goal goal,
+ struct xfs_icwalk *icw)
+{
+ struct xfs_mount *mp = pag->pag_mount;
+ uint32_t first_index;
+ int last_error = 0;
+ int skipped;
+ bool done;
+ int nr_found;
+
+restart:
+ done = false;
+ skipped = 0;
+ if (goal == XFS_ICWALK_RECLAIM)
+ first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
+ else
+ first_index = 0;
+ nr_found = 0;
+ do {
+ struct xfs_inode *batch[XFS_LOOKUP_BATCH];
+ unsigned int tag = xfs_icwalk_tag(goal);
+ int error = 0;
+ int i;
+
+ rcu_read_lock();
+
+ if (tag == XFS_ICWALK_NULL_TAG)
+ nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
+ (void **)batch, first_index,
+ XFS_LOOKUP_BATCH);
+ else
+ nr_found = radix_tree_gang_lookup_tag(
+ &pag->pag_ici_root,
+ (void **) batch, first_index,
+ XFS_LOOKUP_BATCH, tag);
+
+ if (!nr_found) {
+ done = true;
+ rcu_read_unlock();
+ break;
+ }
+
+ /*
+ * Grab the inodes before we drop the lock. if we found
+ * nothing, nr == 0 and the loop will be skipped.
+ */
+ for (i = 0; i < nr_found; i++) {
+ struct xfs_inode *ip = batch[i];
+
+ if (done || !xfs_icwalk_igrab(goal, ip, icw))
+ batch[i] = NULL;
+
+ /*
+ * Update the index for the next lookup. Catch
+ * overflows into the next AG range which can occur if
+ * we have inodes in the last block of the AG and we
+ * are currently pointing to the last inode.
+ *
+ * Because we may see inodes that are from the wrong AG
+ * due to RCU freeing and reallocation, only update the
+ * index if it lies in this AG. It was a race that lead
+ * us to see this inode, so another lookup from the
+ * same index will not find it again.
+ */
+ if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
+ continue;
+ first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
+ if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
+ done = true;
+ }
+
+ /* unlock now we've grabbed the inodes. */
+ rcu_read_unlock();
+
+ for (i = 0; i < nr_found; i++) {
+ if (!batch[i])
+ continue;
+ error = xfs_icwalk_process_inode(goal, batch[i], pag,
+ icw);
+ if (error == -EAGAIN) {
+ skipped++;
+ continue;
+ }
+ if (error && last_error != -EFSCORRUPTED)
+ last_error = error;
+ }
+
+ /* bail out if the filesystem is corrupted. */
+ if (error == -EFSCORRUPTED)
+ break;
+
+ cond_resched();
+
+ if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
+ icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
+ if (icw->icw_scan_limit <= 0)
+ break;
+ }
+ } while (nr_found && !done);
+
+ if (goal == XFS_ICWALK_RECLAIM) {
+ if (done)
+ first_index = 0;
+ WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
+ }
+
+ if (skipped) {
+ delay(1);
+ goto restart;
+ }
+ return last_error;
+}
+
+/* Fetch the next (possibly tagged) per-AG structure. */
+static inline struct xfs_perag *
+xfs_icwalk_get_perag(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ enum xfs_icwalk_goal goal)
+{
+ unsigned int tag = xfs_icwalk_tag(goal);
+
+ if (tag == XFS_ICWALK_NULL_TAG)
+ return xfs_perag_get(mp, agno);
+ return xfs_perag_get_tag(mp, agno, tag);
+}
+
+/* Walk all incore inodes to achieve a given goal. */
+static int
+xfs_icwalk(
+ struct xfs_mount *mp,
+ enum xfs_icwalk_goal goal,
+ struct xfs_icwalk *icw)
+{
+ struct xfs_perag *pag;
+ int error = 0;
+ int last_error = 0;
+ xfs_agnumber_t agno = 0;
+
+ while ((pag = xfs_icwalk_get_perag(mp, agno, goal))) {
+ agno = pag->pag_agno + 1;
+ error = xfs_icwalk_ag(pag, goal, icw);
+ xfs_perag_put(pag);
+ if (error) {
+ last_error = error;
+ if (error == -EFSCORRUPTED)
+ break;
+ }
+ }
+ return last_error;
+ BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
}
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index d1fddb152420..c751cc32dc46 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -9,22 +9,27 @@
struct xfs_mount;
struct xfs_perag;
-struct xfs_eofblocks {
- __u32 eof_flags;
- kuid_t eof_uid;
- kgid_t eof_gid;
- prid_t eof_prid;
- __u64 eof_min_file_size;
+struct xfs_icwalk {
+ __u32 icw_flags;
+ kuid_t icw_uid;
+ kgid_t icw_gid;
+ prid_t icw_prid;
+ __u64 icw_min_file_size;
+ long icw_scan_limit;
};
-/*
- * tags for inode radix tree
- */
-#define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup
- in xfs_inode_walk */
-#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */
-/* Inode has speculative preallocations (posteof or cow) to clean. */
-#define XFS_ICI_BLOCKGC_TAG 1
+/* Flags that reflect xfs_fs_eofblocks functionality. */
+#define XFS_ICWALK_FLAG_SYNC (1U << 0) /* sync/wait mode scan */
+#define XFS_ICWALK_FLAG_UID (1U << 1) /* filter by uid */
+#define XFS_ICWALK_FLAG_GID (1U << 2) /* filter by gid */
+#define XFS_ICWALK_FLAG_PRID (1U << 3) /* filter by project id */
+#define XFS_ICWALK_FLAG_MINFILESIZE (1U << 4) /* filter by min file size */
+
+#define XFS_ICWALK_FLAGS_VALID (XFS_ICWALK_FLAG_SYNC | \
+ XFS_ICWALK_FLAG_UID | \
+ XFS_ICWALK_FLAG_GID | \
+ XFS_ICWALK_FLAG_PRID | \
+ XFS_ICWALK_FLAG_MINFILESIZE)
/*
* Flags for xfs_iget()
@@ -34,11 +39,6 @@ struct xfs_eofblocks {
#define XFS_IGET_DONTCACHE 0x4
#define XFS_IGET_INCORE 0x8 /* don't read from disk or reinit */
-/*
- * flags for AG inode iterator
- */
-#define XFS_INODE_WALK_INEW_WAIT 0x1 /* wait on new inodes */
-
int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino,
uint flags, uint lock_flags, xfs_inode_t **ipp);
@@ -49,16 +49,16 @@ void xfs_inode_free(struct xfs_inode *ip);
void xfs_reclaim_worker(struct work_struct *work);
void xfs_reclaim_inodes(struct xfs_mount *mp);
-int xfs_reclaim_inodes_count(struct xfs_mount *mp);
-long xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
+long xfs_reclaim_inodes_count(struct xfs_mount *mp);
+long xfs_reclaim_inodes_nr(struct xfs_mount *mp, unsigned long nr_to_scan);
-void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
+void xfs_inode_mark_reclaimable(struct xfs_inode *ip);
int xfs_blockgc_free_dquots(struct xfs_mount *mp, struct xfs_dquot *udqp,
struct xfs_dquot *gdqp, struct xfs_dquot *pdqp,
- unsigned int eof_flags);
-int xfs_blockgc_free_quota(struct xfs_inode *ip, unsigned int eof_flags);
-int xfs_blockgc_free_space(struct xfs_mount *mp, struct xfs_eofblocks *eofb);
+ unsigned int iwalk_flags);
+int xfs_blockgc_free_quota(struct xfs_inode *ip, unsigned int iwalk_flags);
+int xfs_blockgc_free_space(struct xfs_mount *mp, struct xfs_icwalk *icm);
void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip);
void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip);
@@ -68,9 +68,11 @@ void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip);
void xfs_blockgc_worker(struct work_struct *work);
-int xfs_inode_walk(struct xfs_mount *mp, int iter_flags,
- int (*execute)(struct xfs_inode *ip, void *args),
- void *args, int tag);
+#ifdef CONFIG_XFS_QUOTA
+int xfs_dqrele_all_inodes(struct xfs_mount *mp, unsigned int qflags);
+#else
+# define xfs_dqrele_all_inodes(mp, qflags) (0)
+#endif
int xfs_icache_inode_is_allocated(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_ino_t ino, bool *inuse);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 1db99a909b23..a835ceb79ba5 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -11,7 +11,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_inode.h"
@@ -35,6 +34,7 @@
#include "xfs_log.h"
#include "xfs_bmap_btree.h"
#include "xfs_reflink.h"
+#include "xfs_ag.h"
kmem_zone_t *xfs_inode_zone;
@@ -45,7 +45,8 @@ kmem_zone_t *xfs_inode_zone;
#define XFS_ITRUNC_MAX_EXTENTS 2
STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
-STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
+STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
+ struct xfs_inode *);
/*
* helper function to extract extent size hint from inode
@@ -778,7 +779,7 @@ xfs_inode_inherit_flags2(
* Initialise a newly allocated inode and return the in-core inode to the
* caller locked exclusively.
*/
-static int
+int
xfs_init_new_inode(
struct user_namespace *mnt_userns,
struct xfs_trans *tp,
@@ -915,57 +916,6 @@ xfs_init_new_inode(
}
/*
- * Allocates a new inode from disk and return a pointer to the incore copy. This
- * routine will internally commit the current transaction and allocate a new one
- * if we needed to allocate more on-disk free inodes to perform the requested
- * operation.
- *
- * If we are allocating quota inodes, we do not have a parent inode to attach to
- * or associate with (i.e. dp == NULL) because they are not linked into the
- * directory structure - they are attached directly to the superblock - and so
- * have no parent.
- */
-int
-xfs_dir_ialloc(
- struct user_namespace *mnt_userns,
- struct xfs_trans **tpp,
- struct xfs_inode *dp,
- umode_t mode,
- xfs_nlink_t nlink,
- dev_t rdev,
- prid_t prid,
- bool init_xattrs,
- struct xfs_inode **ipp)
-{
- struct xfs_buf *agibp;
- xfs_ino_t parent_ino = dp ? dp->i_ino : 0;
- xfs_ino_t ino;
- int error;
-
- ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);
-
- /*
- * Call the space management code to pick the on-disk inode to be
- * allocated.
- */
- error = xfs_dialloc_select_ag(tpp, parent_ino, mode, &agibp);
- if (error)
- return error;
-
- if (!agibp)
- return -ENOSPC;
-
- /* Allocate an inode from the selected AG */
- error = xfs_dialloc_ag(*tpp, agibp, parent_ino, &ino);
- if (error)
- return error;
- ASSERT(ino != NULLFSINO);
-
- return xfs_init_new_inode(mnt_userns, *tpp, dp, ino, mode, nlink, rdev,
- prid, init_xattrs, ipp);
-}
-
-/*
* Decrement the link count on an inode & log the change. If this causes the
* link count to go to zero, move the inode to AGI unlinked list so that it can
* be freed when the last active reference goes away via xfs_inactive().
@@ -1022,6 +972,7 @@ xfs_create(
struct xfs_dquot *pdqp = NULL;
struct xfs_trans_res *tres;
uint resblks;
+ xfs_ino_t ino;
trace_xfs_create(dp, name);
@@ -1078,14 +1029,16 @@ xfs_create(
* entry pointing to them, but a directory also the "." entry
* pointing to itself.
*/
- error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, is_dir ? 2 : 1, rdev,
- prid, init_xattrs, &ip);
+ error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
+ if (!error)
+ error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
+ is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
if (error)
goto out_trans_cancel;
/*
* Now we join the directory inode to the transaction. We do not do it
- * earlier because xfs_dir_ialloc might commit the previous transaction
+ * earlier because xfs_dialloc might commit the previous transaction
* (and release all the locks). An error from here on will result in
* the transaction cancel unlocking dp so don't do it explicitly in the
* error path.
@@ -1175,6 +1128,7 @@ xfs_create_tmpfile(
struct xfs_dquot *pdqp = NULL;
struct xfs_trans_res *tres;
uint resblks;
+ xfs_ino_t ino;
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
@@ -1199,8 +1153,10 @@ xfs_create_tmpfile(
if (error)
goto out_release_dquots;
- error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, 0, 0, prid,
- false, &ip);
+ error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
+ if (!error)
+ error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
+ 0, 0, prid, false, &ip);
if (error)
goto out_trans_cancel;
@@ -1315,7 +1271,11 @@ xfs_link(
* Handle initial link state of O_TMPFILE inode
*/
if (VFS_I(sip)->i_nlink == 0) {
- error = xfs_iunlink_remove(tp, sip);
+ struct xfs_perag *pag;
+
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
+ error = xfs_iunlink_remove(tp, pag, sip);
+ xfs_perag_put(pag);
if (error)
goto error_return;
}
@@ -1716,7 +1676,7 @@ xfs_inactive(
*/
if (VFS_I(ip)->i_mode == 0) {
ASSERT(ip->i_df.if_broot_bytes == 0);
- return;
+ goto out;
}
mp = ip->i_mount;
@@ -1724,11 +1684,11 @@ xfs_inactive(
/* If this is a read-only mount, don't do this (would generate I/O) */
if (mp->m_flags & XFS_MOUNT_RDONLY)
- return;
+ goto out;
/* Metadata inodes require explicit resource cleanup. */
if (xfs_is_metadata_inode(ip))
- return;
+ goto out;
/* Try to clean out the cow blocks if there are any. */
if (xfs_inode_has_cow_data(ip))
@@ -1747,7 +1707,7 @@ xfs_inactive(
if (xfs_can_free_eofblocks(ip, true))
xfs_free_eofblocks(ip);
- return;
+ goto out;
}
if (S_ISREG(VFS_I(ip)->i_mode) &&
@@ -1757,14 +1717,14 @@ xfs_inactive(
error = xfs_qm_dqattach(ip);
if (error)
- return;
+ goto out;
if (S_ISLNK(VFS_I(ip)->i_mode))
error = xfs_inactive_symlink(ip);
else if (truncate)
error = xfs_inactive_truncate(ip);
if (error)
- return;
+ goto out;
/*
* If there are attributes associated with the file then blow them away
@@ -1774,7 +1734,7 @@ xfs_inactive(
if (XFS_IFORK_Q(ip)) {
error = xfs_attr_inactive(ip);
if (error)
- return;
+ goto out;
}
ASSERT(!ip->i_afp);
@@ -1783,12 +1743,12 @@ xfs_inactive(
/*
* Free the inode.
*/
- error = xfs_inactive_ifree(ip);
- if (error)
- return;
+ xfs_inactive_ifree(ip);
+out:
/*
- * Release the dquots held by inode, if any.
+ * We're done making metadata updates for this inode, so we can release
+ * the attached dquots.
*/
xfs_qm_dqdetach(ip);
}
@@ -2008,7 +1968,7 @@ xfs_iunlink_destroy(
STATIC int
xfs_iunlink_update_bucket(
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
struct xfs_buf *agibp,
unsigned int bucket_index,
xfs_agino_t new_agino)
@@ -2017,10 +1977,10 @@ xfs_iunlink_update_bucket(
xfs_agino_t old_value;
int offset;
- ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
+ ASSERT(xfs_verify_agino_or_null(tp->t_mountp, pag->pag_agno, new_agino));
old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
- trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
+ trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
old_value, new_agino);
/*
@@ -2044,7 +2004,7 @@ xfs_iunlink_update_bucket(
STATIC void
xfs_iunlink_update_dinode(
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agino_t agino,
struct xfs_buf *ibp,
struct xfs_dinode *dip,
@@ -2054,9 +2014,9 @@ xfs_iunlink_update_dinode(
struct xfs_mount *mp = tp->t_mountp;
int offset;
- ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
+ ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
- trace_xfs_iunlink_update_dinode(mp, agno, agino,
+ trace_xfs_iunlink_update_dinode(mp, pag->pag_agno, agino,
be32_to_cpu(dip->di_next_unlinked), next_agino);
dip->di_next_unlinked = cpu_to_be32(next_agino);
@@ -2074,7 +2034,7 @@ STATIC int
xfs_iunlink_update_inode(
struct xfs_trans *tp,
struct xfs_inode *ip,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agino_t next_agino,
xfs_agino_t *old_next_agino)
{
@@ -2084,7 +2044,7 @@ xfs_iunlink_update_inode(
xfs_agino_t old_value;
int error;
- ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
+ ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
if (error)
@@ -2093,7 +2053,7 @@ xfs_iunlink_update_inode(
/* Make sure the old pointer isn't garbage. */
old_value = be32_to_cpu(dip->di_next_unlinked);
- if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
+ if (!xfs_verify_agino_or_null(mp, pag->pag_agno, old_value)) {
xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
sizeof(*dip), __this_address);
error = -EFSCORRUPTED;
@@ -2116,7 +2076,7 @@ xfs_iunlink_update_inode(
}
/* Ok, update the new pointer. */
- xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
+ xfs_iunlink_update_dinode(tp, pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
ibp, dip, &ip->i_imap, next_agino);
return 0;
out:
@@ -2137,10 +2097,10 @@ xfs_iunlink(
struct xfs_inode *ip)
{
struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_perag *pag;
struct xfs_agi *agi;
struct xfs_buf *agibp;
xfs_agino_t next_agino;
- xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
int error;
@@ -2149,10 +2109,12 @@ xfs_iunlink(
ASSERT(VFS_I(ip)->i_mode != 0);
trace_xfs_iunlink(ip);
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
+
/* Get the agi buffer first. It ensures lock ordering on the list. */
- error = xfs_read_agi(mp, tp, agno, &agibp);
+ error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
if (error)
- return error;
+ goto out;
agi = agibp->b_addr;
/*
@@ -2162,9 +2124,10 @@ xfs_iunlink(
*/
next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
if (next_agino == agino ||
- !xfs_verify_agino_or_null(mp, agno, next_agino)) {
+ !xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino)) {
xfs_buf_mark_corrupt(agibp);
- return -EFSCORRUPTED;
+ error = -EFSCORRUPTED;
+ goto out;
}
if (next_agino != NULLAGINO) {
@@ -2174,23 +2137,26 @@ xfs_iunlink(
* There is already another inode in the bucket, so point this
* inode to the current head of the list.
*/
- error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
+ error = xfs_iunlink_update_inode(tp, ip, pag, next_agino,
&old_agino);
if (error)
- return error;
+ goto out;
ASSERT(old_agino == NULLAGINO);
/*
* agino has been unlinked, add a backref from the next inode
* back to agino.
*/
- error = xfs_iunlink_add_backref(agibp->b_pag, agino, next_agino);
+ error = xfs_iunlink_add_backref(pag, agino, next_agino);
if (error)
- return error;
+ goto out;
}
/* Point the head of the list to point to this inode. */
- return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
+ error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
+out:
+ xfs_perag_put(pag);
+ return error;
}
/* Return the imap, dinode pointer, and buffer for an inode. */
@@ -2238,14 +2204,13 @@ xfs_iunlink_map_ino(
STATIC int
xfs_iunlink_map_prev(
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agino_t head_agino,
xfs_agino_t target_agino,
xfs_agino_t *agino,
struct xfs_imap *imap,
struct xfs_dinode **dipp,
- struct xfs_buf **bpp,
- struct xfs_perag *pag)
+ struct xfs_buf **bpp)
{
struct xfs_mount *mp = tp->t_mountp;
xfs_agino_t next_agino;
@@ -2257,7 +2222,8 @@ xfs_iunlink_map_prev(
/* See if our backref cache can find it faster. */
*agino = xfs_iunlink_lookup_backref(pag, target_agino);
if (*agino != NULLAGINO) {
- error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
+ error = xfs_iunlink_map_ino(tp, pag->pag_agno, *agino, imap,
+ dipp, bpp);
if (error)
return error;
@@ -2273,7 +2239,7 @@ xfs_iunlink_map_prev(
WARN_ON_ONCE(1);
}
- trace_xfs_iunlink_map_prev_fallback(mp, agno);
+ trace_xfs_iunlink_map_prev_fallback(mp, pag->pag_agno);
/* Otherwise, walk the entire bucket until we find it. */
next_agino = head_agino;
@@ -2284,8 +2250,8 @@ xfs_iunlink_map_prev(
xfs_trans_brelse(tp, *bpp);
*agino = next_agino;
- error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
- bpp);
+ error = xfs_iunlink_map_ino(tp, pag->pag_agno, next_agino, imap,
+ dipp, bpp);
if (error)
return error;
@@ -2294,7 +2260,7 @@ xfs_iunlink_map_prev(
* Make sure this pointer is valid and isn't an obvious
* infinite loop.
*/
- if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
+ if (!xfs_verify_agino(mp, pag->pag_agno, unlinked_agino) ||
next_agino == unlinked_agino) {
XFS_CORRUPTION_ERROR(__func__,
XFS_ERRLEVEL_LOW, mp,
@@ -2314,6 +2280,7 @@ xfs_iunlink_map_prev(
STATIC int
xfs_iunlink_remove(
struct xfs_trans *tp,
+ struct xfs_perag *pag,
struct xfs_inode *ip)
{
struct xfs_mount *mp = tp->t_mountp;
@@ -2321,7 +2288,6 @@ xfs_iunlink_remove(
struct xfs_buf *agibp;
struct xfs_buf *last_ibp;
struct xfs_dinode *last_dip = NULL;
- xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
xfs_agino_t next_agino;
xfs_agino_t head_agino;
@@ -2331,7 +2297,7 @@ xfs_iunlink_remove(
trace_xfs_iunlink_remove(ip);
/* Get the agi buffer first. It ensures lock ordering on the list. */
- error = xfs_read_agi(mp, tp, agno, &agibp);
+ error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
if (error)
return error;
agi = agibp->b_addr;
@@ -2341,7 +2307,7 @@ xfs_iunlink_remove(
* go on. Make sure the head pointer isn't garbage.
*/
head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
- if (!xfs_verify_agino(mp, agno, head_agino)) {
+ if (!xfs_verify_agino(mp, pag->pag_agno, head_agino)) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
agi, sizeof(*agi));
return -EFSCORRUPTED;
@@ -2352,7 +2318,7 @@ xfs_iunlink_remove(
* the old pointer value so that we can update whatever was previous
* to us in the list to point to whatever was next in the list.
*/
- error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
+ error = xfs_iunlink_update_inode(tp, ip, pag, NULLAGINO, &next_agino);
if (error)
return error;
@@ -2364,8 +2330,7 @@ xfs_iunlink_remove(
* this inode's backref to point from the next inode.
*/
if (next_agino != NULLAGINO) {
- error = xfs_iunlink_change_backref(agibp->b_pag, next_agino,
- NULLAGINO);
+ error = xfs_iunlink_change_backref(pag, next_agino, NULLAGINO);
if (error)
return error;
}
@@ -2375,14 +2340,13 @@ xfs_iunlink_remove(
xfs_agino_t prev_agino;
/* We need to search the list for the inode being freed. */
- error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
- &prev_agino, &imap, &last_dip, &last_ibp,
- agibp->b_pag);
+ error = xfs_iunlink_map_prev(tp, pag, head_agino, agino,
+ &prev_agino, &imap, &last_dip, &last_ibp);
if (error)
return error;
/* Point the previous inode on the list to the next inode. */
- xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
+ xfs_iunlink_update_dinode(tp, pag, prev_agino, last_ibp,
last_dip, &imap, next_agino);
/*
@@ -2398,7 +2362,7 @@ xfs_iunlink_remove(
}
/* Point the head of the list to the next unlinked inode. */
- return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
+ return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
next_agino);
}
@@ -2409,12 +2373,11 @@ xfs_iunlink_remove(
*/
static void
xfs_ifree_mark_inode_stale(
- struct xfs_buf *bp,
+ struct xfs_perag *pag,
struct xfs_inode *free_ip,
xfs_ino_t inum)
{
- struct xfs_mount *mp = bp->b_mount;
- struct xfs_perag *pag = bp->b_pag;
+ struct xfs_mount *mp = pag->pag_mount;
struct xfs_inode_log_item *iip;
struct xfs_inode *ip;
@@ -2504,10 +2467,11 @@ out_iflags_unlock:
* inodes that are in memory - they all must be marked stale and attached to
* the cluster buffer.
*/
-STATIC int
+static int
xfs_ifree_cluster(
- struct xfs_inode *free_ip,
struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ struct xfs_inode *free_ip,
struct xfs_icluster *xic)
{
struct xfs_mount *mp = free_ip->i_mount;
@@ -2569,7 +2533,7 @@ xfs_ifree_cluster(
* already marked XFS_ISTALE.
*/
for (i = 0; i < igeo->inodes_per_cluster; i++)
- xfs_ifree_mark_inode_stale(bp, free_ip, inum + i);
+ xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
xfs_trans_stale_inode_buf(tp, bp);
xfs_trans_binval(tp, bp);
@@ -2592,9 +2556,11 @@ xfs_ifree(
struct xfs_trans *tp,
struct xfs_inode *ip)
{
- int error;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_perag *pag;
struct xfs_icluster xic = { 0 };
struct xfs_inode_log_item *iip = ip->i_itemp;
+ int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(VFS_I(ip)->i_nlink == 0);
@@ -2602,16 +2568,18 @@ xfs_ifree(
ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
ASSERT(ip->i_nblocks == 0);
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
+
/*
* Pull the on-disk inode from the AGI unlinked list.
*/
- error = xfs_iunlink_remove(tp, ip);
+ error = xfs_iunlink_remove(tp, pag, ip);
if (error)
- return error;
+ goto out;
- error = xfs_difree(tp, ip->i_ino, &xic);
+ error = xfs_difree(tp, pag, ip->i_ino, &xic);
if (error)
- return error;
+ goto out;
/*
* Free any local-format data sitting around before we reset the
@@ -2626,7 +2594,7 @@ xfs_ifree(
VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
ip->i_diflags = 0;
- ip->i_diflags2 = ip->i_mount->m_ino_geo.new_diflags2;
+ ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
ip->i_forkoff = 0; /* mark the attr fork not in use */
ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
@@ -2645,8 +2613,9 @@ xfs_ifree(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
if (xic.deleted)
- error = xfs_ifree_cluster(ip, tp, &xic);
-
+ error = xfs_ifree_cluster(tp, pag, ip, &xic);
+out:
+ xfs_perag_put(pag);
return error;
}
@@ -2664,7 +2633,7 @@ xfs_iunpin(
trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
/* Give the log a push to start the unpinning I/O */
- xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
+ xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
}
@@ -3250,8 +3219,13 @@ xfs_rename(
* in future.
*/
if (wip) {
+ struct xfs_perag *pag;
+
ASSERT(VFS_I(wip)->i_nlink == 0);
- error = xfs_iunlink_remove(tp, wip);
+
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
+ error = xfs_iunlink_remove(tp, pag, wip);
+ xfs_perag_put(pag);
if (error)
goto out_trans_cancel;
@@ -3673,16 +3647,16 @@ int
xfs_log_force_inode(
struct xfs_inode *ip)
{
- xfs_lsn_t lsn = 0;
+ xfs_csn_t seq = 0;
xfs_ilock(ip, XFS_ILOCK_SHARED);
if (xfs_ipincount(ip))
- lsn = ip->i_itemp->ili_last_lsn;
+ seq = ip->i_itemp->ili_commit_seq;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- if (!lsn)
+ if (!seq)
return 0;
- return xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC, NULL);
+ return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
}
/*
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index ca826cfba91c..4b6703dbffb8 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -431,11 +431,10 @@ void xfs_lock_two_inodes(struct xfs_inode *ip0, uint ip0_mode,
xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip);
-int xfs_dir_ialloc(struct user_namespace *mnt_userns,
- struct xfs_trans **tpp, struct xfs_inode *dp,
- umode_t mode, xfs_nlink_t nlink, dev_t dev,
- prid_t prid, bool need_xattr,
- struct xfs_inode **ipp);
+int xfs_init_new_inode(struct user_namespace *mnt_userns, struct xfs_trans *tp,
+ struct xfs_inode *pip, xfs_ino_t ino, umode_t mode,
+ xfs_nlink_t nlink, dev_t rdev, prid_t prid, bool init_xattrs,
+ struct xfs_inode **ipp);
static inline int
xfs_itruncate_extents(
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 6764d12342da..35de30849fcc 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -28,6 +28,20 @@ static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
return container_of(lip, struct xfs_inode_log_item, ili_item);
}
+/*
+ * The logged size of an inode fork is always the current size of the inode
+ * fork. This means that when an inode fork is relogged, the size of the logged
+ * region is determined by the current state, not the combination of the
+ * previously logged state + the current state. This is different relogging
+ * behaviour to most other log items which will retain the size of the
+ * previously logged changes when smaller regions are relogged.
+ *
+ * Hence operations that remove data from the inode fork (e.g. shortform
+ * dir/attr remove, extent form extent removal, etc), the size of the relogged
+ * inode gets -smaller- rather than stays the same size as the previously logged
+ * size and this can result in the committing transaction reducing the amount of
+ * space being consumed by the CIL.
+ */
STATIC void
xfs_inode_item_data_fork_size(
struct xfs_inode_log_item *iip,
@@ -629,9 +643,9 @@ xfs_inode_item_committed(
STATIC void
xfs_inode_item_committing(
struct xfs_log_item *lip,
- xfs_lsn_t commit_lsn)
+ xfs_csn_t seq)
{
- INODE_ITEM(lip)->ili_last_lsn = commit_lsn;
+ INODE_ITEM(lip)->ili_commit_seq = seq;
return xfs_inode_item_release(lip);
}
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index 4b926e32831c..403b45ab9aa2 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -33,7 +33,7 @@ struct xfs_inode_log_item {
unsigned int ili_fields; /* fields to be logged */
unsigned int ili_fsync_fields; /* logged since last fsync */
xfs_lsn_t ili_flush_lsn; /* lsn at last flush */
- xfs_lsn_t ili_last_lsn; /* lsn at last transaction */
+ xfs_csn_t ili_commit_seq; /* last transaction commit */
};
static inline int xfs_inode_clean(struct xfs_inode *ip)
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 7b1796cede10..65270e63c032 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1875,7 +1875,7 @@ out:
static inline int
xfs_fs_eofblocks_from_user(
struct xfs_fs_eofblocks *src,
- struct xfs_eofblocks *dst)
+ struct xfs_icwalk *dst)
{
if (src->eof_version != XFS_EOFBLOCKS_VERSION)
return -EINVAL;
@@ -1887,21 +1887,32 @@ xfs_fs_eofblocks_from_user(
memchr_inv(src->pad64, 0, sizeof(src->pad64)))
return -EINVAL;
- dst->eof_flags = src->eof_flags;
- dst->eof_prid = src->eof_prid;
- dst->eof_min_file_size = src->eof_min_file_size;
-
- dst->eof_uid = INVALID_UID;
+ dst->icw_flags = 0;
+ if (src->eof_flags & XFS_EOF_FLAGS_SYNC)
+ dst->icw_flags |= XFS_ICWALK_FLAG_SYNC;
+ if (src->eof_flags & XFS_EOF_FLAGS_UID)
+ dst->icw_flags |= XFS_ICWALK_FLAG_UID;
+ if (src->eof_flags & XFS_EOF_FLAGS_GID)
+ dst->icw_flags |= XFS_ICWALK_FLAG_GID;
+ if (src->eof_flags & XFS_EOF_FLAGS_PRID)
+ dst->icw_flags |= XFS_ICWALK_FLAG_PRID;
+ if (src->eof_flags & XFS_EOF_FLAGS_MINFILESIZE)
+ dst->icw_flags |= XFS_ICWALK_FLAG_MINFILESIZE;
+
+ dst->icw_prid = src->eof_prid;
+ dst->icw_min_file_size = src->eof_min_file_size;
+
+ dst->icw_uid = INVALID_UID;
if (src->eof_flags & XFS_EOF_FLAGS_UID) {
- dst->eof_uid = make_kuid(current_user_ns(), src->eof_uid);
- if (!uid_valid(dst->eof_uid))
+ dst->icw_uid = make_kuid(current_user_ns(), src->eof_uid);
+ if (!uid_valid(dst->icw_uid))
return -EINVAL;
}
- dst->eof_gid = INVALID_GID;
+ dst->icw_gid = INVALID_GID;
if (src->eof_flags & XFS_EOF_FLAGS_GID) {
- dst->eof_gid = make_kgid(current_user_ns(), src->eof_gid);
- if (!gid_valid(dst->eof_gid))
+ dst->icw_gid = make_kgid(current_user_ns(), src->eof_gid);
+ if (!gid_valid(dst->icw_gid))
return -EINVAL;
}
return 0;
@@ -2164,8 +2175,8 @@ xfs_file_ioctl(
return xfs_errortag_clearall(mp);
case XFS_IOC_FREE_EOFBLOCKS: {
- struct xfs_fs_eofblocks eofb;
- struct xfs_eofblocks keofb;
+ struct xfs_fs_eofblocks eofb;
+ struct xfs_icwalk icw;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -2176,14 +2187,14 @@ xfs_file_ioctl(
if (copy_from_user(&eofb, arg, sizeof(eofb)))
return -EFAULT;
- error = xfs_fs_eofblocks_from_user(&eofb, &keofb);
+ error = xfs_fs_eofblocks_from_user(&eofb, &icw);
if (error)
return error;
- trace_xfs_ioc_free_eofblocks(mp, &keofb, _RET_IP_);
+ trace_xfs_ioc_free_eofblocks(mp, &icw, _RET_IP_);
sb_start_write(mp->m_super);
- error = xfs_blockgc_free_space(mp, &keofb);
+ error = xfs_blockgc_free_space(mp, &icw);
sb_end_write(mp->m_super);
return error;
}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index dfe24b7f26e5..93c082db04b7 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -543,7 +543,7 @@ xfs_stat_blksize(
* always return the realtime extent size.
*/
if (XFS_IS_REALTIME_INODE(ip))
- return xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog;
+ return XFS_FSB_TO_B(mp, xfs_get_extsz_hint(ip));
/*
* Allow large block sizes to be reported to userspace programs if the
@@ -560,7 +560,7 @@ xfs_stat_blksize(
*/
if (mp->m_flags & XFS_MOUNT_LARGEIO) {
if (mp->m_swidth)
- return mp->m_swidth << mp->m_sb.sb_blocklog;
+ return XFS_FSB_TO_B(mp, mp->m_swidth);
if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
return 1U << mp->m_allocsize_log;
}
diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c
index c4a340f1f1e1..917d51eefee3 100644
--- a/fs/xfs/xfs_iwalk.c
+++ b/fs/xfs/xfs_iwalk.c
@@ -21,6 +21,7 @@
#include "xfs_health.h"
#include "xfs_trans.h"
#include "xfs_pwork.h"
+#include "xfs_ag.h"
/*
* Walking Inodes in the Filesystem
@@ -51,6 +52,7 @@ struct xfs_iwalk_ag {
struct xfs_mount *mp;
struct xfs_trans *tp;
+ struct xfs_perag *pag;
/* Where do we start the traversal? */
xfs_ino_t startino;
@@ -90,7 +92,7 @@ struct xfs_iwalk_ag {
STATIC void
xfs_iwalk_ichunk_ra(
struct xfs_mount *mp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
struct xfs_inobt_rec_incore *irec)
{
struct xfs_ino_geometry *igeo = M_IGEO(mp);
@@ -106,7 +108,7 @@ xfs_iwalk_ichunk_ra(
imask = xfs_inobt_maskn(i, igeo->inodes_per_cluster);
if (imask & ~irec->ir_free) {
- xfs_btree_reada_bufs(mp, agno, agbno,
+ xfs_btree_reada_bufs(mp, pag->pag_agno, agbno,
igeo->blocks_per_cluster,
&xfs_inode_buf_ops);
}
@@ -174,26 +176,25 @@ xfs_iwalk_free(
/* For each inuse inode in each cached inobt record, call our function. */
STATIC int
xfs_iwalk_ag_recs(
- struct xfs_iwalk_ag *iwag)
+ struct xfs_iwalk_ag *iwag)
{
- struct xfs_mount *mp = iwag->mp;
- struct xfs_trans *tp = iwag->tp;
- xfs_ino_t ino;
- unsigned int i, j;
- xfs_agnumber_t agno;
- int error;
+ struct xfs_mount *mp = iwag->mp;
+ struct xfs_trans *tp = iwag->tp;
+ struct xfs_perag *pag = iwag->pag;
+ xfs_ino_t ino;
+ unsigned int i, j;
+ int error;
- agno = XFS_INO_TO_AGNO(mp, iwag->startino);
for (i = 0; i < iwag->nr_recs; i++) {
struct xfs_inobt_rec_incore *irec = &iwag->recs[i];
- trace_xfs_iwalk_ag_rec(mp, agno, irec);
+ trace_xfs_iwalk_ag_rec(mp, pag->pag_agno, irec);
if (xfs_pwork_want_abort(&iwag->pwork))
return 0;
if (iwag->inobt_walk_fn) {
- error = iwag->inobt_walk_fn(mp, tp, agno, irec,
+ error = iwag->inobt_walk_fn(mp, tp, pag->pag_agno, irec,
iwag->data);
if (error)
return error;
@@ -211,7 +212,8 @@ xfs_iwalk_ag_recs(
continue;
/* Otherwise call our function. */
- ino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino + j);
+ ino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
+ irec->ir_startino + j);
error = iwag->iwalk_fn(mp, tp, ino, iwag->data);
if (error)
return error;
@@ -257,7 +259,6 @@ xfs_iwalk_del_inobt(
STATIC int
xfs_iwalk_ag_start(
struct xfs_iwalk_ag *iwag,
- xfs_agnumber_t agno,
xfs_agino_t agino,
struct xfs_btree_cur **curpp,
struct xfs_buf **agi_bpp,
@@ -265,12 +266,13 @@ xfs_iwalk_ag_start(
{
struct xfs_mount *mp = iwag->mp;
struct xfs_trans *tp = iwag->tp;
+ struct xfs_perag *pag = iwag->pag;
struct xfs_inobt_rec_incore *irec;
int error;
/* Set up a fresh cursor and empty the inobt cache. */
iwag->nr_recs = 0;
- error = xfs_inobt_cur(mp, tp, agno, XFS_BTNUM_INO, curpp, agi_bpp);
+ error = xfs_inobt_cur(mp, tp, pag, XFS_BTNUM_INO, curpp, agi_bpp);
if (error)
return error;
@@ -304,7 +306,7 @@ xfs_iwalk_ag_start(
if (XFS_IS_CORRUPT(mp, *has_more != 1))
return -EFSCORRUPTED;
- iwag->lastino = XFS_AGINO_TO_INO(mp, agno,
+ iwag->lastino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
irec->ir_startino + XFS_INODES_PER_CHUNK - 1);
/*
@@ -345,7 +347,6 @@ out_advance:
STATIC int
xfs_iwalk_run_callbacks(
struct xfs_iwalk_ag *iwag,
- xfs_agnumber_t agno,
struct xfs_btree_cur **curpp,
struct xfs_buf **agi_bpp,
int *has_more)
@@ -376,7 +377,7 @@ xfs_iwalk_run_callbacks(
return 0;
/* ...and recreate the cursor just past where we left off. */
- error = xfs_inobt_cur(mp, tp, agno, XFS_BTNUM_INO, curpp, agi_bpp);
+ error = xfs_inobt_cur(mp, tp, iwag->pag, XFS_BTNUM_INO, curpp, agi_bpp);
if (error)
return error;
@@ -390,17 +391,17 @@ xfs_iwalk_ag(
{
struct xfs_mount *mp = iwag->mp;
struct xfs_trans *tp = iwag->tp;
+ struct xfs_perag *pag = iwag->pag;
struct xfs_buf *agi_bp = NULL;
struct xfs_btree_cur *cur = NULL;
- xfs_agnumber_t agno;
xfs_agino_t agino;
int has_more;
int error = 0;
/* Set up our cursor at the right place in the inode btree. */
- agno = XFS_INO_TO_AGNO(mp, iwag->startino);
+ ASSERT(pag->pag_agno == XFS_INO_TO_AGNO(mp, iwag->startino));
agino = XFS_INO_TO_AGINO(mp, iwag->startino);
- error = xfs_iwalk_ag_start(iwag, agno, agino, &cur, &agi_bp, &has_more);
+ error = xfs_iwalk_ag_start(iwag, agino, &cur, &agi_bp, &has_more);
while (!error && has_more) {
struct xfs_inobt_rec_incore *irec;
@@ -417,7 +418,7 @@ xfs_iwalk_ag(
break;
/* Make sure that we always move forward. */
- rec_fsino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino);
+ rec_fsino = XFS_AGINO_TO_INO(mp, pag->pag_agno, irec->ir_startino);
if (iwag->lastino != NULLFSINO &&
XFS_IS_CORRUPT(mp, iwag->lastino >= rec_fsino)) {
error = -EFSCORRUPTED;
@@ -438,7 +439,7 @@ xfs_iwalk_ag(
* walking the inodes.
*/
if (iwag->iwalk_fn)
- xfs_iwalk_ichunk_ra(mp, agno, irec);
+ xfs_iwalk_ichunk_ra(mp, pag, irec);
/*
* If there's space in the buffer for more records, increment
@@ -458,15 +459,14 @@ xfs_iwalk_ag(
* we would be if we had been able to increment like above.
*/
ASSERT(has_more);
- error = xfs_iwalk_run_callbacks(iwag, agno, &cur, &agi_bp,
- &has_more);
+ error = xfs_iwalk_run_callbacks(iwag, &cur, &agi_bp, &has_more);
}
if (iwag->nr_recs == 0 || error)
goto out;
/* Walk the unprocessed records in the cache. */
- error = xfs_iwalk_run_callbacks(iwag, agno, &cur, &agi_bp, &has_more);
+ error = xfs_iwalk_run_callbacks(iwag, &cur, &agi_bp, &has_more);
out:
xfs_iwalk_del_inobt(tp, &cur, &agi_bp, error);
@@ -555,6 +555,7 @@ xfs_iwalk(
.pwork = XFS_PWORK_SINGLE_THREADED,
.lastino = NULLFSINO,
};
+ struct xfs_perag *pag;
xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino);
int error;
@@ -565,15 +566,19 @@ xfs_iwalk(
if (error)
return error;
- for (; agno < mp->m_sb.sb_agcount; agno++) {
+ for_each_perag_from(mp, agno, pag) {
+ iwag.pag = pag;
error = xfs_iwalk_ag(&iwag);
if (error)
break;
iwag.startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
if (flags & XFS_INOBT_WALK_SAME_AG)
break;
+ iwag.pag = NULL;
}
+ if (iwag.pag)
+ xfs_perag_put(pag);
xfs_iwalk_free(&iwag);
return error;
}
@@ -598,6 +603,7 @@ xfs_iwalk_ag_work(
error = xfs_iwalk_ag(iwag);
xfs_iwalk_free(iwag);
out:
+ xfs_perag_put(iwag->pag);
kmem_free(iwag);
return error;
}
@@ -617,6 +623,7 @@ xfs_iwalk_threaded(
void *data)
{
struct xfs_pwork_ctl pctl;
+ struct xfs_perag *pag;
xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino);
int error;
@@ -627,7 +634,7 @@ xfs_iwalk_threaded(
if (error)
return error;
- for (; agno < mp->m_sb.sb_agcount; agno++) {
+ for_each_perag_from(mp, agno, pag) {
struct xfs_iwalk_ag *iwag;
if (xfs_pwork_ctl_want_abort(&pctl))
@@ -635,17 +642,25 @@ xfs_iwalk_threaded(
iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), 0);
iwag->mp = mp;
+
+ /*
+ * perag is being handed off to async work, so take another
+ * reference for the async work to release.
+ */
+ atomic_inc(&pag->pag_ref);
+ iwag->pag = pag;
iwag->iwalk_fn = iwalk_fn;
iwag->data = data;
iwag->startino = startino;
iwag->sz_recs = xfs_iwalk_prefetch(inode_records);
iwag->lastino = NULLFSINO;
xfs_pwork_queue(&pctl, &iwag->pwork);
- startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
+ startino = XFS_AGINO_TO_INO(mp, pag->pag_agno + 1, 0);
if (flags & XFS_INOBT_WALK_SAME_AG)
break;
}
-
+ if (pag)
+ xfs_perag_put(pag);
if (polled)
xfs_pwork_poll(&pctl);
return xfs_pwork_destroy(&pctl);
@@ -715,6 +730,7 @@ xfs_inobt_walk(
.pwork = XFS_PWORK_SINGLE_THREADED,
.lastino = NULLFSINO,
};
+ struct xfs_perag *pag;
xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino);
int error;
@@ -725,15 +741,19 @@ xfs_inobt_walk(
if (error)
return error;
- for (; agno < mp->m_sb.sb_agcount; agno++) {
+ for_each_perag_from(mp, agno, pag) {
+ iwag.pag = pag;
error = xfs_iwalk_ag(&iwag);
if (error)
break;
- iwag.startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
+ iwag.startino = XFS_AGINO_TO_INO(mp, pag->pag_agno + 1, 0);
if (flags & XFS_INOBT_WALK_SAME_AG)
break;
+ iwag.pag = NULL;
}
+ if (iwag.pag)
+ xfs_perag_put(pag);
xfs_iwalk_free(&iwag);
return error;
}
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 7688663b9773..c174262a074e 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -196,6 +196,8 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y)
int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
char *data, unsigned int op);
+void xfs_flush_bdev_async(struct bio *bio, struct block_device *bdev,
+ struct completion *done);
#define ASSERT_ALWAYS(expr) \
(likely(expr) ? (void)0 : assfail(NULL, #expr, __FILE__, __LINE__))
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a002425377b5..36fa2650b081 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -502,6 +502,7 @@ __xlog_state_release_iclog(
iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
xlog_verify_tail_lsn(log, iclog, tail_lsn);
/* cycle incremented when incrementing curr_block */
+ trace_xlog_iclog_syncing(iclog, _RET_IP_);
return true;
}
@@ -513,13 +514,14 @@ __xlog_state_release_iclog(
* Flush iclog to disk if this is the last reference to the given iclog and the
* it is in the WANT_SYNC state.
*/
-static int
+int
xlog_state_release_iclog(
struct xlog *log,
struct xlog_in_core *iclog)
{
lockdep_assert_held(&log->l_icloglock);
+ trace_xlog_iclog_release(iclog, _RET_IP_);
if (iclog->ic_state == XLOG_STATE_IOERROR)
return -EIO;
@@ -533,23 +535,6 @@ xlog_state_release_iclog(
return 0;
}
-void
-xfs_log_release_iclog(
- struct xlog_in_core *iclog)
-{
- struct xlog *log = iclog->ic_log;
- bool sync = false;
-
- if (atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) {
- if (iclog->ic_state != XLOG_STATE_IOERROR)
- sync = __xlog_state_release_iclog(log, iclog);
- spin_unlock(&log->l_icloglock);
- }
-
- if (sync)
- xlog_sync(log, iclog);
-}
-
/*
* Mount a log filesystem
*
@@ -770,6 +755,9 @@ xfs_log_mount_finish(
if (readonly)
mp->m_flags |= XFS_MOUNT_RDONLY;
+ /* Make sure the log is dead if we're returning failure. */
+ ASSERT(!error || (mp->m_log->l_flags & XLOG_IO_ERROR));
+
return error;
}
@@ -786,16 +774,19 @@ xfs_log_mount_cancel(
}
/*
- * Wait for the iclog to be written disk, or return an error if the log has been
- * shut down.
+ * Wait for the iclog and all prior iclogs to be written disk as required by the
+ * log force state machine. Waiting on ic_force_wait ensures iclog completions
+ * have been ordered and callbacks run before we are woken here, hence
+ * guaranteeing that all the iclogs up to this one are on stable storage.
*/
-static int
+int
xlog_wait_on_iclog(
struct xlog_in_core *iclog)
__releases(iclog->ic_log->l_icloglock)
{
struct xlog *log = iclog->ic_log;
+ trace_xlog_iclog_wait_on(iclog, _RET_IP_);
if (!XLOG_FORCED_SHUTDOWN(log) &&
iclog->ic_state != XLOG_STATE_ACTIVE &&
iclog->ic_state != XLOG_STATE_DIRTY) {
@@ -818,9 +809,7 @@ xlog_wait_on_iclog(
static int
xlog_write_unmount_record(
struct xlog *log,
- struct xlog_ticket *ticket,
- xfs_lsn_t *lsn,
- uint flags)
+ struct xlog_ticket *ticket)
{
struct xfs_unmount_log_format ulf = {
.magic = XLOG_UNMOUNT_TYPE,
@@ -837,7 +826,15 @@ xlog_write_unmount_record(
/* account for space used by record data */
ticket->t_curr_res -= sizeof(ulf);
- return xlog_write(log, &vec, ticket, lsn, NULL, flags, false);
+
+ /*
+ * For external log devices, we need to flush the data device cache
+ * first to ensure all metadata writeback is on stable storage before we
+ * stamp the tail LSN into the unmount record.
+ */
+ if (log->l_targ != log->l_mp->m_ddev_targp)
+ blkdev_issue_flush(log->l_targ->bt_bdev);
+ return xlog_write(log, &vec, ticket, NULL, NULL, XLOG_UNMOUNT_TRANS);
}
/*
@@ -851,15 +848,13 @@ xlog_unmount_write(
struct xfs_mount *mp = log->l_mp;
struct xlog_in_core *iclog;
struct xlog_ticket *tic = NULL;
- xfs_lsn_t lsn;
- uint flags = XLOG_UNMOUNT_TRANS;
int error;
error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0);
if (error)
goto out_err;
- error = xlog_write_unmount_record(log, tic, &lsn, flags);
+ error = xlog_write_unmount_record(log, tic);
/*
* At this point, we're umounting anyway, so there's no point in
* transitioning log state to IOERROR. Just continue...
@@ -876,6 +871,11 @@ out_err:
else
ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
iclog->ic_state == XLOG_STATE_IOERROR);
+ /*
+ * Ensure the journal is fully flushed and on stable storage once the
+ * iclog containing the unmount record is written.
+ */
+ iclog->ic_flags |= (XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
error = xlog_state_release_iclog(log, iclog);
xlog_wait_on_iclog(iclog);
@@ -1401,6 +1401,11 @@ xlog_alloc_log(
xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
+ if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1)
+ log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
+ else
+ log->l_iclog_roundoff = BBSIZE;
+
xlog_grant_head_init(&log->l_reserve_head);
xlog_grant_head_init(&log->l_write_head);
@@ -1479,7 +1484,6 @@ xlog_alloc_log(
iclog->ic_state = XLOG_STATE_ACTIVE;
iclog->ic_log = log;
atomic_set(&iclog->ic_refcnt, 0);
- spin_lock_init(&iclog->ic_callback_lock);
INIT_LIST_HEAD(&iclog->ic_callbacks);
iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
@@ -1546,8 +1550,7 @@ xlog_commit_record(
if (XLOG_FORCED_SHUTDOWN(log))
return -EIO;
- error = xlog_write(log, &vec, ticket, lsn, iclog, XLOG_COMMIT_TRANS,
- false);
+ error = xlog_write(log, &vec, ticket, lsn, iclog, XLOG_COMMIT_TRANS);
if (error)
xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
return error;
@@ -1753,10 +1756,10 @@ xlog_write_iclog(
struct xlog *log,
struct xlog_in_core *iclog,
uint64_t bno,
- unsigned int count,
- bool need_flush)
+ unsigned int count)
{
ASSERT(bno < log->l_logBBsize);
+ trace_xlog_iclog_write(iclog, _RET_IP_);
/*
* We lock the iclogbufs here so that we can serialise against I/O
@@ -1792,10 +1795,12 @@ xlog_write_iclog(
* writeback throttle from throttling log writes behind background
* metadata writeback and causing priority inversions.
*/
- iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC |
- REQ_IDLE | REQ_FUA;
- if (need_flush)
+ iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE;
+ if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH)
iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
+ if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
+ iclog->ic_bio.bi_opf |= REQ_FUA;
+ iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
@@ -1854,29 +1859,15 @@ xlog_calc_iclog_size(
uint32_t *roundoff)
{
uint32_t count_init, count;
- bool use_lsunit;
-
- use_lsunit = xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
- log->l_mp->m_sb.sb_logsunit > 1;
/* Add for LR header */
count_init = log->l_iclog_hsize + iclog->ic_offset;
+ count = roundup(count_init, log->l_iclog_roundoff);
- /* Round out the log write size */
- if (use_lsunit) {
- /* we have a v2 stripe unit to use */
- count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
- } else {
- count = BBTOB(BTOBB(count_init));
- }
-
- ASSERT(count >= count_init);
*roundoff = count - count_init;
- if (use_lsunit)
- ASSERT(*roundoff < log->l_mp->m_sb.sb_logsunit);
- else
- ASSERT(*roundoff < BBTOB(1));
+ ASSERT(count >= count_init);
+ ASSERT(*roundoff < log->l_iclog_roundoff);
return count;
}
@@ -1912,9 +1903,9 @@ xlog_sync(
unsigned int roundoff; /* roundoff to BB or stripe */
uint64_t bno;
unsigned int size;
- bool need_flush = true, split = false;
ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
+ trace_xlog_iclog_sync(iclog, _RET_IP_);
count = xlog_calc_iclog_size(log, iclog, &roundoff);
@@ -1937,10 +1928,8 @@ xlog_sync(
bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
/* Do we need to split this write into 2 parts? */
- if (bno + BTOBB(count) > log->l_logBBsize) {
+ if (bno + BTOBB(count) > log->l_logBBsize)
xlog_split_iclog(log, &iclog->ic_header, bno, count);
- split = true;
- }
/* calculcate the checksum */
iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
@@ -1961,22 +1950,8 @@ xlog_sync(
be64_to_cpu(iclog->ic_header.h_lsn));
}
#endif
-
- /*
- * Flush the data device before flushing the log to make sure all meta
- * data written back from the AIL actually made it to disk before
- * stamping the new log tail LSN into the log buffer. For an external
- * log we need to issue the flush explicitly, and unfortunately
- * synchronously here; for an internal log we can simply use the block
- * layer state machine for preflushes.
- */
- if (log->l_targ != log->l_mp->m_ddev_targp || split) {
- xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
- need_flush = false;
- }
-
xlog_verify_iclog(log, iclog, count);
- xlog_write_iclog(log, iclog, bno, count, need_flush);
+ xlog_write_iclog(log, iclog, bno, count);
}
/*
@@ -2158,13 +2133,16 @@ static int
xlog_write_calc_vec_length(
struct xlog_ticket *ticket,
struct xfs_log_vec *log_vector,
- bool need_start_rec)
+ uint optype)
{
struct xfs_log_vec *lv;
- int headers = need_start_rec ? 1 : 0;
+ int headers = 0;
int len = 0;
int i;
+ if (optype & XLOG_START_TRANS)
+ headers++;
+
for (lv = log_vector; lv; lv = lv->lv_next) {
/* we don't write ordered log vectors */
if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED)
@@ -2384,8 +2362,7 @@ xlog_write(
struct xlog_ticket *ticket,
xfs_lsn_t *start_lsn,
struct xlog_in_core **commit_iclog,
- uint flags,
- bool need_start_rec)
+ uint optype)
{
struct xlog_in_core *iclog = NULL;
struct xfs_log_vec *lv = log_vector;
@@ -2413,8 +2390,9 @@ xlog_write(
xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
}
- len = xlog_write_calc_vec_length(ticket, log_vector, need_start_rec);
- *start_lsn = 0;
+ len = xlog_write_calc_vec_length(ticket, log_vector, optype);
+ if (start_lsn)
+ *start_lsn = 0;
while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) {
void *ptr;
int log_offset;
@@ -2427,8 +2405,8 @@ xlog_write(
ASSERT(log_offset <= iclog->ic_size - 1);
ptr = iclog->ic_datap + log_offset;
- /* start_lsn is the first lsn written to. That's all we need. */
- if (!*start_lsn)
+ /* Start_lsn is the first lsn written to. */
+ if (start_lsn && !*start_lsn)
*start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
/*
@@ -2441,6 +2419,7 @@ xlog_write(
int copy_len;
int copy_off;
bool ordered = false;
+ bool wrote_start_rec = false;
/* ordered log vectors have no regions to write */
if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) {
@@ -2458,13 +2437,15 @@ xlog_write(
* write a start record. Only do this for the first
* iclog we write to.
*/
- if (need_start_rec) {
+ if (optype & XLOG_START_TRANS) {
xlog_write_start_rec(ptr, ticket);
xlog_write_adv_cnt(&ptr, &len, &log_offset,
sizeof(struct xlog_op_header));
+ optype &= ~XLOG_START_TRANS;
+ wrote_start_rec = true;
}
- ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags);
+ ophdr = xlog_write_setup_ophdr(log, ptr, ticket, optype);
if (!ophdr)
return -EIO;
@@ -2495,14 +2476,13 @@ xlog_write(
}
copy_len += sizeof(struct xlog_op_header);
record_cnt++;
- if (need_start_rec) {
+ if (wrote_start_rec) {
copy_len += sizeof(struct xlog_op_header);
record_cnt++;
- need_start_rec = false;
}
data_cnt += contwr ? copy_len : 0;
- error = xlog_write_copy_finish(log, iclog, flags,
+ error = xlog_write_copy_finish(log, iclog, optype,
&record_cnt, &data_cnt,
&partial_copy,
&partial_copy_len,
@@ -2546,7 +2526,7 @@ next_lv:
spin_lock(&log->l_icloglock);
xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
if (commit_iclog) {
- ASSERT(flags & XLOG_COMMIT_TRANS);
+ ASSERT(optype & XLOG_COMMIT_TRANS);
*commit_iclog = iclog;
} else {
error = xlog_state_release_iclog(log, iclog);
@@ -2562,6 +2542,7 @@ xlog_state_activate_iclog(
int *iclogs_changed)
{
ASSERT(list_empty_careful(&iclog->ic_callbacks));
+ trace_xlog_iclog_activate(iclog, _RET_IP_);
/*
* If the number of ops in this iclog indicate it just contains the
@@ -2652,6 +2633,8 @@ xlog_state_clean_iclog(
{
int iclogs_changed = 0;
+ trace_xlog_iclog_clean(dirty_iclog, _RET_IP_);
+
dirty_iclog->ic_state = XLOG_STATE_DIRTY;
xlog_state_activate_iclogs(log, &iclogs_changed);
@@ -2711,6 +2694,7 @@ xlog_state_set_callback(
struct xlog_in_core *iclog,
xfs_lsn_t header_lsn)
{
+ trace_xlog_iclog_callback(iclog, _RET_IP_);
iclog->ic_state = XLOG_STATE_CALLBACK;
ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
@@ -2776,43 +2760,6 @@ xlog_state_iodone_process_iclog(
}
}
-/*
- * Keep processing entries in the iclog callback list until we come around and
- * it is empty. We need to atomically see that the list is empty and change the
- * state to DIRTY so that we don't miss any more callbacks being added.
- *
- * This function is called with the icloglock held and returns with it held. We
- * drop it while running callbacks, however, as holding it over thousands of
- * callbacks is unnecessary and causes excessive contention if we do.
- */
-static void
-xlog_state_do_iclog_callbacks(
- struct xlog *log,
- struct xlog_in_core *iclog)
- __releases(&log->l_icloglock)
- __acquires(&log->l_icloglock)
-{
- spin_unlock(&log->l_icloglock);
- spin_lock(&iclog->ic_callback_lock);
- while (!list_empty(&iclog->ic_callbacks)) {
- LIST_HEAD(tmp);
-
- list_splice_init(&iclog->ic_callbacks, &tmp);
-
- spin_unlock(&iclog->ic_callback_lock);
- xlog_cil_process_committed(&tmp);
- spin_lock(&iclog->ic_callback_lock);
- }
-
- /*
- * Pick up the icloglock while still holding the callback lock so we
- * serialise against anyone trying to add more callbacks to this iclog
- * now we've finished processing.
- */
- spin_lock(&log->l_icloglock);
- spin_unlock(&iclog->ic_callback_lock);
-}
-
STATIC void
xlog_state_do_callback(
struct xlog *log)
@@ -2841,6 +2788,8 @@ xlog_state_do_callback(
repeats++;
do {
+ LIST_HEAD(cb_list);
+
if (xlog_state_iodone_process_iclog(log, iclog,
&ioerror))
break;
@@ -2850,13 +2799,15 @@ xlog_state_do_callback(
iclog = iclog->ic_next;
continue;
}
+ list_splice_init(&iclog->ic_callbacks, &cb_list);
+ spin_unlock(&log->l_icloglock);
- /*
- * Running callbacks will drop the icloglock which means
- * we'll have to run at least one more complete loop.
- */
+ trace_xlog_iclog_callbacks_start(iclog, _RET_IP_);
+ xlog_cil_process_committed(&cb_list);
+ trace_xlog_iclog_callbacks_done(iclog, _RET_IP_);
cycled_icloglock = true;
- xlog_state_do_iclog_callbacks(log, iclog);
+
+ spin_lock(&log->l_icloglock);
if (XLOG_FORCED_SHUTDOWN(log))
wake_up_all(&iclog->ic_force_wait);
else
@@ -2902,6 +2853,7 @@ xlog_state_done_syncing(
spin_lock(&log->l_icloglock);
ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
+ trace_xlog_iclog_sync_done(iclog, _RET_IP_);
/*
* If we got an error, either on the first buffer, or in the case of
@@ -2975,6 +2927,8 @@ restart:
atomic_inc(&iclog->ic_refcnt); /* prevents sync */
log_offset = iclog->ic_offset;
+ trace_xlog_iclog_get_space(iclog, _RET_IP_);
+
/* On the 1st write to an iclog, figure out lsn. This works
* if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
* committing to. If the offset is set, that's how many blocks
@@ -3140,6 +3094,7 @@ xlog_state_switch_iclogs(
{
ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
assert_spin_locked(&log->l_icloglock);
+ trace_xlog_iclog_switch(iclog, _RET_IP_);
if (!eventual_size)
eventual_size = iclog->ic_offset;
@@ -3152,9 +3107,8 @@ xlog_state_switch_iclogs(
log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
/* Round up to next log-sunit */
- if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
- log->l_mp->m_sb.sb_logsunit > 1) {
- uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
+ if (log->l_iclog_roundoff > BBSIZE) {
+ uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff);
log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
}
@@ -3223,6 +3177,8 @@ xfs_log_force(
if (iclog->ic_state == XLOG_STATE_IOERROR)
goto out_error;
+ trace_xlog_iclog_force(iclog, _RET_IP_);
+
if (iclog->ic_state == XLOG_STATE_DIRTY ||
(iclog->ic_state == XLOG_STATE_ACTIVE &&
atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
@@ -3281,14 +3237,13 @@ out_error:
}
static int
-__xfs_log_force_lsn(
- struct xfs_mount *mp,
+xlog_force_lsn(
+ struct xlog *log,
xfs_lsn_t lsn,
uint flags,
int *log_flushed,
bool already_slept)
{
- struct xlog *log = mp->m_log;
struct xlog_in_core *iclog;
spin_lock(&log->l_icloglock);
@@ -3297,6 +3252,7 @@ __xfs_log_force_lsn(
goto out_error;
while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
+ trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
iclog = iclog->ic_next;
if (iclog == log->l_iclog)
goto out_unlock;
@@ -3321,8 +3277,6 @@ __xfs_log_force_lsn(
if (!already_slept &&
(iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
- XFS_STATS_INC(mp, xs_log_force_sleep);
-
xlog_wait(&iclog->ic_prev->ic_write_wait,
&log->l_icloglock);
return -EAGAIN;
@@ -3360,25 +3314,29 @@ out_error:
* to disk, that thread will wake up all threads waiting on the queue.
*/
int
-xfs_log_force_lsn(
+xfs_log_force_seq(
struct xfs_mount *mp,
- xfs_lsn_t lsn,
+ xfs_csn_t seq,
uint flags,
int *log_flushed)
{
+ struct xlog *log = mp->m_log;
+ xfs_lsn_t lsn;
int ret;
- ASSERT(lsn != 0);
+ ASSERT(seq != 0);
XFS_STATS_INC(mp, xs_log_force);
- trace_xfs_log_force(mp, lsn, _RET_IP_);
+ trace_xfs_log_force(mp, seq, _RET_IP_);
- lsn = xlog_cil_force_lsn(mp->m_log, lsn);
+ lsn = xlog_cil_force_seq(log, seq);
if (lsn == NULLCOMMITLSN)
return 0;
- ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, false);
- if (ret == -EAGAIN)
- ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, true);
+ ret = xlog_force_lsn(log, lsn, flags, log_flushed, false);
+ if (ret == -EAGAIN) {
+ XFS_STATS_INC(mp, xs_log_force_sleep);
+ ret = xlog_force_lsn(log, lsn, flags, log_flushed, true);
+ }
return ret;
}
@@ -3407,12 +3365,11 @@ xfs_log_ticket_get(
* Figure out the total log space unit (in bytes) that would be
* required for a log ticket.
*/
-int
-xfs_log_calc_unit_res(
- struct xfs_mount *mp,
+static int
+xlog_calc_unit_res(
+ struct xlog *log,
int unit_bytes)
{
- struct xlog *log = mp->m_log;
int iclog_space;
uint num_headers;
@@ -3488,18 +3445,20 @@ xfs_log_calc_unit_res(
/* for commit-rec LR header - note: padding will subsume the ophdr */
unit_bytes += log->l_iclog_hsize;
- /* for roundoff padding for transaction data and one for commit record */
- if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) {
- /* log su roundoff */
- unit_bytes += 2 * mp->m_sb.sb_logsunit;
- } else {
- /* BB roundoff */
- unit_bytes += 2 * BBSIZE;
- }
+ /* roundoff padding for transaction data and one for commit record */
+ unit_bytes += 2 * log->l_iclog_roundoff;
return unit_bytes;
}
+int
+xfs_log_calc_unit_res(
+ struct xfs_mount *mp,
+ int unit_bytes)
+{
+ return xlog_calc_unit_res(mp->m_log, unit_bytes);
+}
+
/*
* Allocate and initialise a new log ticket.
*/
@@ -3516,7 +3475,7 @@ xlog_ticket_alloc(
tic = kmem_cache_zalloc(xfs_log_ticket_zone, GFP_NOFS | __GFP_NOFAIL);
- unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes);
+ unit_res = xlog_calc_unit_res(log, unit_bytes);
atomic_set(&tic->t_ref, 1);
tic->t_task = current;
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 044e02cb8921..813b972e9788 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -106,7 +106,7 @@ struct xfs_item_ops;
struct xfs_trans;
int xfs_log_force(struct xfs_mount *mp, uint flags);
-int xfs_log_force_lsn(struct xfs_mount *mp, xfs_lsn_t lsn, uint flags,
+int xfs_log_force_seq(struct xfs_mount *mp, xfs_csn_t seq, uint flags,
int *log_forced);
int xfs_log_mount(struct xfs_mount *mp,
struct xfs_buftarg *log_target,
@@ -117,7 +117,6 @@ void xfs_log_mount_cancel(struct xfs_mount *);
xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
void xfs_log_space_wake(struct xfs_mount *mp);
-void xfs_log_release_iclog(struct xlog_in_core *iclog);
int xfs_log_reserve(struct xfs_mount *mp,
int length,
int count,
@@ -132,8 +131,6 @@ bool xfs_log_writable(struct xfs_mount *mp);
struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
void xfs_log_ticket_put(struct xlog_ticket *ticket);
-void xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_lsn_t *commit_lsn, bool regrant);
void xlog_cil_process_committed(struct list_head *list);
bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index b0ef071b3cb5..b128aaa9b870 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -656,6 +656,8 @@ xlog_cil_push_work(
struct xfs_log_vec lvhdr = { NULL };
xfs_lsn_t commit_lsn;
xfs_lsn_t push_seq;
+ struct bio bio;
+ DECLARE_COMPLETION_ONSTACK(bdev_flush);
new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS);
new_ctx->ticket = xlog_cil_ticket_alloc(log);
@@ -668,9 +670,14 @@ xlog_cil_push_work(
ASSERT(push_seq <= ctx->sequence);
/*
- * Wake up any background push waiters now this context is being pushed.
+ * As we are about to switch to a new, empty CIL context, we no longer
+ * need to throttle tasks on CIL space overruns. Wake any waiters that
+ * the hard push throttle may have caught so they can start committing
+ * to the new context. The ctx->xc_push_lock provides the serialisation
+ * necessary for safely using the lockless waitqueue_active() check in
+ * this context.
*/
- if (ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
+ if (waitqueue_active(&cil->xc_push_wait))
wake_up_all(&cil->xc_push_wait);
/*
@@ -719,10 +726,19 @@ xlog_cil_push_work(
spin_unlock(&cil->xc_push_lock);
/*
- * pull all the log vectors off the items in the CIL, and
- * remove the items from the CIL. We don't need the CIL lock
- * here because it's only needed on the transaction commit
- * side which is currently locked out by the flush lock.
+ * The CIL is stable at this point - nothing new will be added to it
+ * because we hold the flush lock exclusively. Hence we can now issue
+ * a cache flush to ensure all the completed metadata in the journal we
+ * are about to overwrite is on stable storage.
+ */
+ xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev,
+ &bdev_flush);
+
+ /*
+ * Pull all the log vectors off the items in the CIL, and remove the
+ * items from the CIL. We don't need the CIL lock here because it's only
+ * needed on the transaction commit side which is currently locked out
+ * by the flush lock.
*/
lv = NULL;
num_iovecs = 0;
@@ -772,7 +788,7 @@ xlog_cil_push_work(
* that higher sequences will wait for us to write out a commit record
* before they do.
*
- * xfs_log_force_lsn requires us to mirror the new sequence into the cil
+ * xfs_log_force_seq requires us to mirror the new sequence into the cil
* structure atomically with the addition of this sequence to the
* committing list. This also ensures that we can do unlocked checks
* against the current sequence in log forces without risking
@@ -806,7 +822,14 @@ xlog_cil_push_work(
lvhdr.lv_iovecp = &lhdr;
lvhdr.lv_next = ctx->lv_chain;
- error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0, true);
+ /*
+ * Before we format and submit the first iclog, we have to ensure that
+ * the metadata writeback ordering cache flush is complete.
+ */
+ wait_for_completion(&bdev_flush);
+
+ error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL,
+ XLOG_START_TRANS);
if (error)
goto out_abort_free_ticket;
@@ -850,15 +873,21 @@ restart:
xfs_log_ticket_ungrant(log, tic);
- spin_lock(&commit_iclog->ic_callback_lock);
+ /*
+ * Once we attach the ctx to the iclog, a shutdown can process the
+ * iclog, run the callbacks and free the ctx. The only thing preventing
+ * this potential UAF situation here is that we are holding the
+ * icloglock. Hence we cannot access the ctx once we have attached the
+ * callbacks and dropped the icloglock.
+ */
+ spin_lock(&log->l_icloglock);
if (commit_iclog->ic_state == XLOG_STATE_IOERROR) {
- spin_unlock(&commit_iclog->ic_callback_lock);
+ spin_unlock(&log->l_icloglock);
goto out_abort;
}
ASSERT_ALWAYS(commit_iclog->ic_state == XLOG_STATE_ACTIVE ||
commit_iclog->ic_state == XLOG_STATE_WANT_SYNC);
list_add_tail(&ctx->iclog_entry, &commit_iclog->ic_callbacks);
- spin_unlock(&commit_iclog->ic_callback_lock);
/*
* now the checkpoint commit is complete and we've attached the
@@ -870,8 +899,50 @@ restart:
wake_up_all(&cil->xc_commit_wait);
spin_unlock(&cil->xc_push_lock);
- /* release the hounds! */
- xfs_log_release_iclog(commit_iclog);
+ /*
+ * If the checkpoint spans multiple iclogs, wait for all previous iclogs
+ * to complete before we submit the commit_iclog. We can't use state
+ * checks for this - ACTIVE can be either a past completed iclog or a
+ * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
+ * past or future iclog awaiting IO or ordered IO completion to be run.
+ * In the latter case, if it's a future iclog and we wait on it, the we
+ * will hang because it won't get processed through to ic_force_wait
+ * wakeup until this commit_iclog is written to disk. Hence we use the
+ * iclog header lsn and compare it to the commit lsn to determine if we
+ * need to wait on iclogs or not.
+ *
+ * NOTE: It is not safe to reference the ctx after this check as we drop
+ * the icloglock if we have to wait for completion of other iclogs.
+ */
+ if (ctx->start_lsn != commit_lsn) {
+ xfs_lsn_t plsn;
+
+ plsn = be64_to_cpu(commit_iclog->ic_prev->ic_header.h_lsn);
+ if (plsn && XFS_LSN_CMP(plsn, commit_lsn) < 0) {
+ /*
+ * Waiting on ic_force_wait orders the completion of
+ * iclogs older than ic_prev. Hence we only need to wait
+ * on the most recent older iclog here.
+ */
+ xlog_wait_on_iclog(commit_iclog->ic_prev);
+ spin_lock(&log->l_icloglock);
+ }
+
+ /*
+ * We need to issue a pre-flush so that the ordering for this
+ * checkpoint is correctly preserved down to stable storage.
+ */
+ commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
+ }
+
+ /*
+ * The commit iclog must be written to stable storage to guarantee
+ * journal IO vs metadata writeback IO is correctly ordered on stable
+ * storage.
+ */
+ commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
+ xlog_state_release_iclog(log, commit_iclog);
+ spin_unlock(&log->l_icloglock);
return;
out_skip:
@@ -907,7 +978,7 @@ xlog_cil_push_background(
ASSERT(!list_empty(&cil->xc_cil));
/*
- * don't do a background push if we haven't used up all the
+ * Don't do a background push if we haven't used up all the
* space available yet.
*/
if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) {
@@ -931,9 +1002,16 @@ xlog_cil_push_background(
/*
* If we are well over the space limit, throttle the work that is being
- * done until the push work on this context has begun.
+ * done until the push work on this context has begun. Enforce the hard
+ * throttle on all transaction commits once it has been activated, even
+ * if the committing transactions have resulted in the space usage
+ * dipping back down under the hard limit.
+ *
+ * The ctx->xc_push_lock provides the serialisation necessary for safely
+ * using the lockless waitqueue_active() check in this context.
*/
- if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) {
+ if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log) ||
+ waitqueue_active(&cil->xc_push_wait)) {
trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
ASSERT(cil->xc_ctx->space_used < log->l_logsize);
xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
@@ -1008,16 +1086,14 @@ xlog_cil_empty(
* allowed again.
*/
void
-xfs_log_commit_cil(
- struct xfs_mount *mp,
+xlog_cil_commit(
+ struct xlog *log,
struct xfs_trans *tp,
- xfs_lsn_t *commit_lsn,
+ xfs_csn_t *commit_seq,
bool regrant)
{
- struct xlog *log = mp->m_log;
struct xfs_cil *cil = log->l_cilp;
struct xfs_log_item *lip, *next;
- xfs_lsn_t xc_commit_lsn;
/*
* Do all necessary memory allocation before we lock the CIL.
@@ -1031,10 +1107,6 @@ xfs_log_commit_cil(
xlog_cil_insert_items(log, tp);
- xc_commit_lsn = cil->xc_ctx->sequence;
- if (commit_lsn)
- *commit_lsn = xc_commit_lsn;
-
if (regrant && !XLOG_FORCED_SHUTDOWN(log))
xfs_log_ticket_regrant(log, tp->t_ticket);
else
@@ -1057,8 +1129,10 @@ xfs_log_commit_cil(
list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
xfs_trans_del_item(lip);
if (lip->li_ops->iop_committing)
- lip->li_ops->iop_committing(lip, xc_commit_lsn);
+ lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
}
+ if (commit_seq)
+ *commit_seq = cil->xc_ctx->sequence;
/* xlog_cil_push_background() releases cil->xc_ctx_lock */
xlog_cil_push_background(log);
@@ -1075,9 +1149,9 @@ xfs_log_commit_cil(
* iclog flush is necessary following this call.
*/
xfs_lsn_t
-xlog_cil_force_lsn(
+xlog_cil_force_seq(
struct xlog *log,
- xfs_lsn_t sequence)
+ xfs_csn_t sequence)
{
struct xfs_cil *cil = log->l_cilp;
struct xfs_cil_ctx *ctx;
@@ -1173,21 +1247,17 @@ bool
xfs_log_item_in_current_chkpt(
struct xfs_log_item *lip)
{
- struct xfs_cil_ctx *ctx;
+ struct xfs_cil_ctx *ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
if (list_empty(&lip->li_cil))
return false;
- ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
-
/*
* li_seq is written on the first commit of a log item to record the
* first checkpoint it is written to. Hence if it is different to the
* current sequence, we're in a new checkpoint.
*/
- if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0)
- return false;
- return true;
+ return lip->li_seq == ctx->sequence;
}
/*
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 1c6fdbf3d506..4c41bbfa33b0 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -50,6 +50,16 @@ enum xlog_iclog_state {
XLOG_STATE_IOERROR, /* IO error happened in sync'ing log */
};
+#define XLOG_STATE_STRINGS \
+ { XLOG_STATE_ACTIVE, "XLOG_STATE_ACTIVE" }, \
+ { XLOG_STATE_WANT_SYNC, "XLOG_STATE_WANT_SYNC" }, \
+ { XLOG_STATE_SYNCING, "XLOG_STATE_SYNCING" }, \
+ { XLOG_STATE_DONE_SYNC, "XLOG_STATE_DONE_SYNC" }, \
+ { XLOG_STATE_CALLBACK, "XLOG_STATE_CALLBACK" }, \
+ { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" }, \
+ { XLOG_STATE_IOERROR, "XLOG_STATE_IOERROR" }
+
+
/*
* Log ticket flags
*/
@@ -133,6 +143,9 @@ enum xlog_iclog_state {
#define XLOG_COVER_OPS 5
+#define XLOG_ICL_NEED_FLUSH (1 << 0) /* iclog needs REQ_PREFLUSH */
+#define XLOG_ICL_NEED_FUA (1 << 1) /* iclog needs REQ_FUA */
+
/* Ticket reservation region accounting */
#define XLOG_TIC_LEN_MAX 15
@@ -201,10 +214,8 @@ typedef struct xlog_in_core {
u32 ic_size;
u32 ic_offset;
enum xlog_iclog_state ic_state;
+ unsigned int ic_flags;
char *ic_datap; /* pointer to iclog data */
-
- /* Callback structures need their own cacheline */
- spinlock_t ic_callback_lock ____cacheline_aligned_in_smp;
struct list_head ic_callbacks;
/* reference counts need their own cacheline */
@@ -230,7 +241,7 @@ struct xfs_cil;
struct xfs_cil_ctx {
struct xfs_cil *cil;
- xfs_lsn_t sequence; /* chkpt sequence # */
+ xfs_csn_t sequence; /* chkpt sequence # */
xfs_lsn_t start_lsn; /* first LSN of chkpt commit */
xfs_lsn_t commit_lsn; /* chkpt commit record lsn */
struct xlog_ticket *ticket; /* chkpt ticket */
@@ -268,10 +279,10 @@ struct xfs_cil {
struct xfs_cil_ctx *xc_ctx;
spinlock_t xc_push_lock ____cacheline_aligned_in_smp;
- xfs_lsn_t xc_push_seq;
+ xfs_csn_t xc_push_seq;
struct list_head xc_committing;
wait_queue_head_t xc_commit_wait;
- xfs_lsn_t xc_current_sequence;
+ xfs_csn_t xc_current_sequence;
struct work_struct xc_push_work;
wait_queue_head_t xc_push_wait; /* background push throttle */
} ____cacheline_aligned_in_smp;
@@ -436,6 +447,8 @@ struct xlog {
#endif
/* log recovery lsn tracking (for buffer submission */
xfs_lsn_t l_recovery_lsn;
+
+ uint32_t l_iclog_roundoff;/* padding roundoff */
};
#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
@@ -478,13 +491,14 @@ void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
void xlog_print_trans(struct xfs_trans *);
int xlog_write(struct xlog *log, struct xfs_log_vec *log_vector,
struct xlog_ticket *tic, xfs_lsn_t *start_lsn,
- struct xlog_in_core **commit_iclog, uint flags,
- bool need_start_rec);
+ struct xlog_in_core **commit_iclog, uint optype);
int xlog_commit_record(struct xlog *log, struct xlog_ticket *ticket,
struct xlog_in_core **iclog, xfs_lsn_t *lsn);
void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
+int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog);
+
/*
* When we crack an atomic LSN, we sample it first so that the value will not
* change while we are cracking it into the component values. This means we
@@ -547,19 +561,18 @@ int xlog_cil_init(struct xlog *log);
void xlog_cil_init_post_recovery(struct xlog *log);
void xlog_cil_destroy(struct xlog *log);
bool xlog_cil_empty(struct xlog *log);
+void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp,
+ xfs_csn_t *commit_seq, bool regrant);
/*
* CIL force routines
*/
-xfs_lsn_t
-xlog_cil_force_lsn(
- struct xlog *log,
- xfs_lsn_t sequence);
+xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence);
static inline void
xlog_cil_force(struct xlog *log)
{
- xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
+ xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence);
}
/*
@@ -582,6 +595,8 @@ xlog_wait(
remove_wait_queue(wq, &wait);
}
+int xlog_wait_on_iclog(struct xlog_in_core *iclog);
+
/*
* The LSN is valid so long as it is behind the current LSN. If it isn't, this
* means that the next log record that includes this metadata could have a
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index e5dd1c0c2f03..1721fce2ec94 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -25,6 +25,7 @@
#include "xfs_icache.h"
#include "xfs_error.h"
#include "xfs_buf_item.h"
+#include "xfs_ag.h"
#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
@@ -2457,8 +2458,10 @@ xlog_finish_defer_ops(
error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
- if (error)
+ if (error) {
+ xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
return error;
+ }
/*
* Transfer to this new transaction all the dfops we captured
@@ -2741,21 +2744,17 @@ STATIC void
xlog_recover_process_iunlinks(
struct xlog *log)
{
- xfs_mount_t *mp;
- xfs_agnumber_t agno;
- xfs_agi_t *agi;
- struct xfs_buf *agibp;
- xfs_agino_t agino;
- int bucket;
- int error;
-
- mp = log->l_mp;
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+ struct xfs_agi *agi;
+ struct xfs_buf *agibp;
+ xfs_agino_t agino;
+ int bucket;
+ int error;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- /*
- * Find the agi for this ag.
- */
- error = xfs_read_agi(mp, NULL, agno, &agibp);
+ for_each_perag(mp, agno, pag) {
+ error = xfs_read_agi(mp, NULL, pag->pag_agno, &agibp);
if (error) {
/*
* AGI is b0rked. Don't process it.
@@ -2781,7 +2780,7 @@ xlog_recover_process_iunlinks(
agino = be32_to_cpu(agi->agi_unlinked[bucket]);
while (agino != NULLAGINO) {
agino = xlog_recover_process_one_iunlink(mp,
- agno, agino, bucket);
+ pag->pag_agno, agino, bucket);
cond_resched();
}
}
@@ -3452,6 +3451,7 @@ xlog_recover_finish(
* this) before we get around to xfs_log_mount_cancel.
*/
xlog_recover_cancel_intents(log);
+ xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
xfs_alert(log->l_mp, "Failed to recover intents");
return error;
}
@@ -3493,27 +3493,28 @@ xlog_recover_cancel(
*/
STATIC void
xlog_recover_check_summary(
- struct xlog *log)
+ struct xlog *log)
{
- xfs_mount_t *mp;
- struct xfs_buf *agfbp;
- struct xfs_buf *agibp;
- xfs_agnumber_t agno;
- uint64_t freeblks;
- uint64_t itotal;
- uint64_t ifree;
- int error;
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_perag *pag;
+ struct xfs_buf *agfbp;
+ struct xfs_buf *agibp;
+ xfs_agnumber_t agno;
+ uint64_t freeblks;
+ uint64_t itotal;
+ uint64_t ifree;
+ int error;
mp = log->l_mp;
freeblks = 0LL;
itotal = 0LL;
ifree = 0LL;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
+ for_each_perag(mp, agno, pag) {
+ error = xfs_read_agf(mp, NULL, pag->pag_agno, 0, &agfbp);
if (error) {
xfs_alert(mp, "%s agf read failed agno %d error %d",
- __func__, agno, error);
+ __func__, pag->pag_agno, error);
} else {
struct xfs_agf *agfp = agfbp->b_addr;
@@ -3522,10 +3523,10 @@ xlog_recover_check_summary(
xfs_buf_relse(agfbp);
}
- error = xfs_read_agi(mp, NULL, agno, &agibp);
+ error = xfs_read_agi(mp, NULL, pag->pag_agno, &agibp);
if (error) {
xfs_alert(mp, "%s agi read failed agno %d error %d",
- __func__, agno, error);
+ __func__, pag->pag_agno, error);
} else {
struct xfs_agi *agi = agibp->b_addr;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index bdfee1943796..d0755494597f 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -32,6 +32,7 @@
#include "xfs_extent_busy.h"
#include "xfs_health.h"
#include "xfs_trace.h"
+#include "xfs_ag.h"
static DEFINE_MUTEX(xfs_uuid_table_mutex);
static int xfs_uuid_table_size;
@@ -119,41 +120,6 @@ xfs_uuid_unmount(
mutex_unlock(&xfs_uuid_table_mutex);
}
-
-STATIC void
-__xfs_free_perag(
- struct rcu_head *head)
-{
- struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
-
- ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
- ASSERT(atomic_read(&pag->pag_ref) == 0);
- kmem_free(pag);
-}
-
-/*
- * Free up the per-ag resources associated with the mount structure.
- */
-STATIC void
-xfs_free_perag(
- xfs_mount_t *mp)
-{
- xfs_agnumber_t agno;
- struct xfs_perag *pag;
-
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- spin_lock(&mp->m_perag_lock);
- pag = radix_tree_delete(&mp->m_perag_tree, agno);
- spin_unlock(&mp->m_perag_lock);
- ASSERT(pag);
- ASSERT(atomic_read(&pag->pag_ref) == 0);
- cancel_delayed_work_sync(&pag->pag_blockgc_work);
- xfs_iunlink_destroy(pag);
- xfs_buf_hash_destroy(pag);
- call_rcu(&pag->rcu_head, __xfs_free_perag);
- }
-}
-
/*
* Check size of device based on the (data/realtime) block count.
* Note: this check is used by the growfs code as well as mount.
@@ -172,96 +138,6 @@ xfs_sb_validate_fsb_count(
return 0;
}
-int
-xfs_initialize_perag(
- xfs_mount_t *mp,
- xfs_agnumber_t agcount,
- xfs_agnumber_t *maxagi)
-{
- xfs_agnumber_t index;
- xfs_agnumber_t first_initialised = NULLAGNUMBER;
- xfs_perag_t *pag;
- int error = -ENOMEM;
-
- /*
- * Walk the current per-ag tree so we don't try to initialise AGs
- * that already exist (growfs case). Allocate and insert all the
- * AGs we don't find ready for initialisation.
- */
- for (index = 0; index < agcount; index++) {
- pag = xfs_perag_get(mp, index);
- if (pag) {
- xfs_perag_put(pag);
- continue;
- }
-
- pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
- if (!pag) {
- error = -ENOMEM;
- goto out_unwind_new_pags;
- }
- pag->pag_agno = index;
- pag->pag_mount = mp;
- spin_lock_init(&pag->pag_ici_lock);
- INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
- INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
-
- error = xfs_buf_hash_init(pag);
- if (error)
- goto out_free_pag;
- init_waitqueue_head(&pag->pagb_wait);
- spin_lock_init(&pag->pagb_lock);
- pag->pagb_count = 0;
- pag->pagb_tree = RB_ROOT;
-
- error = radix_tree_preload(GFP_NOFS);
- if (error)
- goto out_hash_destroy;
-
- spin_lock(&mp->m_perag_lock);
- if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
- WARN_ON_ONCE(1);
- spin_unlock(&mp->m_perag_lock);
- radix_tree_preload_end();
- error = -EEXIST;
- goto out_hash_destroy;
- }
- spin_unlock(&mp->m_perag_lock);
- radix_tree_preload_end();
- /* first new pag is fully initialized */
- if (first_initialised == NULLAGNUMBER)
- first_initialised = index;
- error = xfs_iunlink_init(pag);
- if (error)
- goto out_hash_destroy;
- spin_lock_init(&pag->pag_state_lock);
- }
-
- index = xfs_set_inode_alloc(mp, agcount);
-
- if (maxagi)
- *maxagi = index;
-
- mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
- return 0;
-
-out_hash_destroy:
- xfs_buf_hash_destroy(pag);
-out_free_pag:
- kmem_free(pag);
-out_unwind_new_pags:
- /* unwind any prior newly initialized pags */
- for (index = first_initialised; index < agcount; index++) {
- pag = radix_tree_delete(&mp->m_perag_tree, index);
- if (!pag)
- break;
- xfs_buf_hash_destroy(pag);
- xfs_iunlink_destroy(pag);
- kmem_free(pag);
- }
- return error;
-}
-
/*
* xfs_readsb
*
@@ -983,9 +859,17 @@ xfs_mountfs(
/*
* Finish recovering the file system. This part needed to be delayed
* until after the root and real-time bitmap inodes were consistently
- * read in.
+ * read in. Temporarily create per-AG space reservations for metadata
+ * btree shape changes because space freeing transactions (for inode
+ * inactivation) require the per-AG reservation in lieu of reserving
+ * blocks.
*/
+ error = xfs_fs_reserve_ag_blocks(mp);
+ if (error && error == -ENOSPC)
+ xfs_warn(mp,
+ "ENOSPC reserving per-AG metadata pool, log recovery may fail.");
error = xfs_log_mount_finish(mp);
+ xfs_fs_unreserve_ag_blocks(mp);
if (error) {
xfs_warn(mp, "log mount finish failed");
goto out_rtunmount;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index bb67274ee23f..c78b63fe779a 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -12,6 +12,7 @@ struct xfs_mru_cache;
struct xfs_ail;
struct xfs_quotainfo;
struct xfs_da_geometry;
+struct xfs_perag;
/* dynamic preallocation free space thresholds, 5% down to 1% */
enum {
@@ -297,117 +298,12 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
}
-/* per-AG block reservation data structures*/
-struct xfs_ag_resv {
- /* number of blocks originally reserved here */
- xfs_extlen_t ar_orig_reserved;
- /* number of blocks reserved here */
- xfs_extlen_t ar_reserved;
- /* number of blocks originally asked for */
- xfs_extlen_t ar_asked;
-};
-
-/*
- * Per-ag incore structure, copies of information in agf and agi, to improve the
- * performance of allocation group selection.
- */
-typedef struct xfs_perag {
- struct xfs_mount *pag_mount; /* owner filesystem */
- xfs_agnumber_t pag_agno; /* AG this structure belongs to */
- atomic_t pag_ref; /* perag reference count */
- char pagf_init; /* this agf's entry is initialized */
- char pagi_init; /* this agi's entry is initialized */
- char pagf_metadata; /* the agf is preferred to be metadata */
- char pagi_inodeok; /* The agi is ok for inodes */
- uint8_t pagf_levels[XFS_BTNUM_AGF];
- /* # of levels in bno & cnt btree */
- bool pagf_agflreset; /* agfl requires reset before use */
- uint32_t pagf_flcount; /* count of blocks in freelist */
- xfs_extlen_t pagf_freeblks; /* total free blocks */
- xfs_extlen_t pagf_longest; /* longest free space */
- uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */
- xfs_agino_t pagi_freecount; /* number of free inodes */
- xfs_agino_t pagi_count; /* number of allocated inodes */
-
- /*
- * Inode allocation search lookup optimisation.
- * If the pagino matches, the search for new inodes
- * doesn't need to search the near ones again straight away
- */
- xfs_agino_t pagl_pagino;
- xfs_agino_t pagl_leftrec;
- xfs_agino_t pagl_rightrec;
-
- /*
- * Bitsets of per-ag metadata that have been checked and/or are sick.
- * Callers should hold pag_state_lock before accessing this field.
- */
- uint16_t pag_checked;
- uint16_t pag_sick;
- spinlock_t pag_state_lock;
-
- spinlock_t pagb_lock; /* lock for pagb_tree */
- struct rb_root pagb_tree; /* ordered tree of busy extents */
- unsigned int pagb_gen; /* generation count for pagb_tree */
- wait_queue_head_t pagb_wait; /* woken when pagb_gen changes */
-
- atomic_t pagf_fstrms; /* # of filestreams active in this AG */
-
- spinlock_t pag_ici_lock; /* incore inode cache lock */
- struct radix_tree_root pag_ici_root; /* incore inode cache root */
- int pag_ici_reclaimable; /* reclaimable inodes */
- unsigned long pag_ici_reclaim_cursor; /* reclaim restart point */
-
- /* buffer cache index */
- spinlock_t pag_buf_lock; /* lock for pag_buf_hash */
- struct rhashtable pag_buf_hash;
-
- /* for rcu-safe freeing */
- struct rcu_head rcu_head;
- int pagb_count; /* pagb slots in use */
-
- /* Blocks reserved for all kinds of metadata. */
- struct xfs_ag_resv pag_meta_resv;
- /* Blocks reserved for the reverse mapping btree. */
- struct xfs_ag_resv pag_rmapbt_resv;
-
- /* background prealloc block trimming */
- struct delayed_work pag_blockgc_work;
-
- /* reference count */
- uint8_t pagf_refcount_level;
-
- /*
- * Unlinked inode information. This incore information reflects
- * data stored in the AGI, so callers must hold the AGI buffer lock
- * or have some other means to control concurrency.
- */
- struct rhashtable pagi_unlinked_hash;
-} xfs_perag_t;
-
-static inline struct xfs_ag_resv *
-xfs_perag_resv(
- struct xfs_perag *pag,
- enum xfs_ag_resv_type type)
-{
- switch (type) {
- case XFS_AG_RESV_METADATA:
- return &pag->pag_meta_resv;
- case XFS_AG_RESV_RMAPBT:
- return &pag->pag_rmapbt_resv;
- default:
- return NULL;
- }
-}
-
-int xfs_buf_hash_init(xfs_perag_t *pag);
-void xfs_buf_hash_destroy(xfs_perag_t *pag);
+int xfs_buf_hash_init(struct xfs_perag *pag);
+void xfs_buf_hash_destroy(struct xfs_perag *pag);
extern void xfs_uuid_table_free(void);
extern uint64_t xfs_default_resblks(xfs_mount_t *mp);
extern int xfs_mountfs(xfs_mount_t *mp);
-extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
- xfs_agnumber_t *maxagi);
extern void xfs_unmountfs(xfs_mount_t *);
extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 4bf949a89d0d..fe341f3fd419 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -23,6 +23,8 @@
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_error.h"
+#include "xfs_ag.h"
+#include "xfs_ialloc.h"
/*
* The global quota manager. There is only one of these for the entire
@@ -787,8 +789,12 @@ xfs_qm_qino_alloc(
return error;
if (need_alloc) {
- error = xfs_dir_ialloc(&init_user_ns, &tp, NULL, S_IFREG, 1, 0,
- 0, false, ipp);
+ xfs_ino_t ino;
+
+ error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
+ if (!error)
+ error = xfs_init_new_inode(&init_user_ns, tp, NULL, ino,
+ S_IFREG, 1, 0, 0, false, ipp);
if (error) {
xfs_trans_cancel(tp);
return error;
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index e3dabab44097..ebbb484c49dc 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -142,7 +142,6 @@ extern void xfs_qm_destroy_quotainfo(struct xfs_mount *);
/* dquot stuff */
extern void xfs_qm_dqpurge_all(struct xfs_mount *, uint);
-extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
/* quota ops */
extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 11f1e2fbf22f..13a56e1ea15c 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -201,7 +201,8 @@ xfs_qm_scall_quotaoff(
* depend on the quota inodes (and other things) being valid as long as
* we keep the lock(s).
*/
- xfs_qm_dqrele_all_inodes(mp, flags);
+ error = xfs_dqrele_all_inodes(mp, flags);
+ ASSERT(!error);
/*
* Next we make the changes in the quota flag in the mount struct.
@@ -747,54 +748,3 @@ xfs_qm_scall_getquota_next(
xfs_qm_dqput(dqp);
return error;
}
-
-STATIC int
-xfs_dqrele_inode(
- struct xfs_inode *ip,
- void *args)
-{
- uint *flags = args;
-
- /* skip quota inodes */
- if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
- ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
- ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
- ASSERT(ip->i_udquot == NULL);
- ASSERT(ip->i_gdquot == NULL);
- ASSERT(ip->i_pdquot == NULL);
- return 0;
- }
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- if ((*flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
- xfs_qm_dqrele(ip->i_udquot);
- ip->i_udquot = NULL;
- }
- if ((*flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
- xfs_qm_dqrele(ip->i_gdquot);
- ip->i_gdquot = NULL;
- }
- if ((*flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
- xfs_qm_dqrele(ip->i_pdquot);
- ip->i_pdquot = NULL;
- }
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- return 0;
-}
-
-
-/*
- * Go thru all the inodes in the file system, releasing their dquots.
- *
- * Note that the mount structure gets modified to indicate that quotas are off
- * AFTER this, in the case of quotaoff.
- */
-void
-xfs_qm_dqrele_all_inodes(
- struct xfs_mount *mp,
- uint flags)
-{
- ASSERT(mp->m_quotainfo);
- xfs_inode_walk(mp, XFS_INODE_WALK_INEW_WAIT, xfs_dqrele_inode,
- &flags, XFS_ICI_NO_TAG);
-}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 060695d6d56a..c256104772cb 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -27,7 +27,7 @@
#include "xfs_quota.h"
#include "xfs_reflink.h"
#include "xfs_iomap.h"
-#include "xfs_sb.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
/*
@@ -144,7 +144,7 @@ xfs_reflink_find_shared(
if (error)
return error;
- cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
+ cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agbp->b_pag);
error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
find_end_of_shared);
@@ -755,16 +755,19 @@ int
xfs_reflink_recover_cow(
struct xfs_mount *mp)
{
+ struct xfs_perag *pag;
xfs_agnumber_t agno;
int error = 0;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return 0;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- error = xfs_refcount_recover_cow_leftovers(mp, agno);
- if (error)
+ for_each_perag(mp, agno, pag) {
+ error = xfs_refcount_recover_cow_leftovers(mp, pag);
+ if (error) {
+ xfs_perag_put(pag);
break;
+ }
}
return error;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index a2dab05332ac..2c9e26a44546 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -36,6 +36,7 @@
#include "xfs_bmap_item.h"
#include "xfs_reflink.h"
#include "xfs_pwork.h"
+#include "xfs_ag.h"
#include <linux/magic.h>
#include <linux/fs_context.h>
@@ -339,13 +340,6 @@ xfs_blkdev_put(
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
-void
-xfs_blkdev_issue_flush(
- xfs_buftarg_t *buftarg)
-{
- blkdev_issue_flush(buftarg->bt_bdev);
-}
-
STATIC void
xfs_close_devices(
struct xfs_mount *mp)
@@ -667,7 +661,7 @@ xfs_fs_destroy_inode(
* reclaim path handles this more efficiently than we can here, so
* simply let background reclaim tear down all inodes.
*/
- xfs_inode_set_reclaim_tag(ip);
+ xfs_inode_mark_reclaimable(ip);
}
static void
diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h
index d2b40dc60dfc..167d23f92ffe 100644
--- a/fs/xfs/xfs_super.h
+++ b/fs/xfs/xfs_super.h
@@ -87,7 +87,6 @@ struct xfs_buftarg;
struct block_device;
extern void xfs_flush_inodes(struct xfs_mount *mp);
-extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
extern xfs_agnumber_t xfs_set_inode_alloc(struct xfs_mount *,
xfs_agnumber_t agcount);
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 99fbec32c10a..1525636f4065 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -21,6 +21,7 @@
#include "xfs_trans_space.h"
#include "xfs_trace.h"
#include "xfs_trans.h"
+#include "xfs_ialloc.h"
/* ----- Kernel only functions below ----- */
int
@@ -161,6 +162,7 @@ xfs_symlink(
struct xfs_dquot *gdqp = NULL;
struct xfs_dquot *pdqp = NULL;
uint resblks;
+ xfs_ino_t ino;
*ipp = NULL;
@@ -223,8 +225,11 @@ xfs_symlink(
/*
* Allocate an inode for the symlink.
*/
- error = xfs_dir_ialloc(mnt_userns, &tp, dp, S_IFLNK | (mode & ~S_IFMT),
- 1, 0, prid, false, &ip);
+ error = xfs_dialloc(&tp, dp->i_ino, S_IFLNK, &ino);
+ if (!error)
+ error = xfs_init_new_inode(mnt_userns, tp, dp, ino,
+ S_IFLNK | (mode & ~S_IFMT), 1, 0, prid,
+ false, &ip);
if (error)
goto out_trans_cancel;
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 9b8d703dc9fd..7e01e00550ac 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -30,6 +30,8 @@
#include "xfs_fsmap.h"
#include "xfs_btree_staging.h"
#include "xfs_icache.h"
+#include "xfs_ag.h"
+#include "xfs_ag_resv.h"
/*
* We include this last to have the helpers above available for the trace
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 808ae337b222..f9d8d605f9b1 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -24,6 +24,7 @@ struct xlog_ticket;
struct xlog_recover;
struct xlog_recover_item;
struct xlog_rec_header;
+struct xlog_in_core;
struct xfs_buf_log_format;
struct xfs_inode_log_format;
struct xfs_bmbt_irec;
@@ -37,7 +38,7 @@ struct xfs_trans_res;
struct xfs_inobt_rec_incore;
union xfs_btree_ptr;
struct xfs_dqtrx;
-struct xfs_eofblocks;
+struct xfs_icwalk;
#define XFS_ATTR_FILTER_FLAGS \
{ XFS_ATTR_ROOT, "ROOT" }, \
@@ -153,10 +154,8 @@ DEFINE_EVENT(xfs_perag_class, name, \
DEFINE_PERAG_REF_EVENT(xfs_perag_get);
DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_put);
-DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim);
-DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
-DEFINE_PERAG_REF_EVENT(xfs_perag_set_blockgc);
-DEFINE_PERAG_REF_EVENT(xfs_perag_clear_blockgc);
+DEFINE_PERAG_REF_EVENT(xfs_perag_set_inode_tag);
+DEFINE_PERAG_REF_EVENT(xfs_perag_clear_inode_tag);
DECLARE_EVENT_CLASS(xfs_ag_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno),
@@ -632,8 +631,8 @@ DEFINE_EVENT(xfs_inode_class, name, \
TP_PROTO(struct xfs_inode *ip), \
TP_ARGS(ip))
DEFINE_INODE_EVENT(xfs_iget_skip);
-DEFINE_INODE_EVENT(xfs_iget_reclaim);
-DEFINE_INODE_EVENT(xfs_iget_reclaim_fail);
+DEFINE_INODE_EVENT(xfs_iget_recycle);
+DEFINE_INODE_EVENT(xfs_iget_recycle_fail);
DEFINE_INODE_EVENT(xfs_iget_hit);
DEFINE_INODE_EVENT(xfs_iget_miss);
@@ -1914,7 +1913,6 @@ DEFINE_ATTR_EVENT(xfs_attr_leaf_add);
DEFINE_ATTR_EVENT(xfs_attr_leaf_add_old);
DEFINE_ATTR_EVENT(xfs_attr_leaf_add_new);
DEFINE_ATTR_EVENT(xfs_attr_leaf_add_work);
-DEFINE_ATTR_EVENT(xfs_attr_leaf_addname);
DEFINE_ATTR_EVENT(xfs_attr_leaf_create);
DEFINE_ATTR_EVENT(xfs_attr_leaf_compact);
DEFINE_ATTR_EVENT(xfs_attr_leaf_get);
@@ -1944,7 +1942,6 @@ DEFINE_ATTR_EVENT(xfs_attr_refillstate);
DEFINE_ATTR_EVENT(xfs_attr_rmtval_get);
DEFINE_ATTR_EVENT(xfs_attr_rmtval_set);
-DEFINE_ATTR_EVENT(xfs_attr_rmtval_remove);
#define DEFINE_DA_EVENT(name) \
DEFINE_EVENT(xfs_da_class, name, \
@@ -3730,7 +3727,7 @@ TRACE_EVENT(xfs_btree_commit_afakeroot,
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->btnum = cur->bc_btnum;
- __entry->agno = cur->bc_ag.agno;
+ __entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agbno = cur->bc_ag.afake->af_root;
__entry->levels = cur->bc_ag.afake->af_levels;
__entry->blocks = cur->bc_ag.afake->af_blocks;
@@ -3845,7 +3842,7 @@ TRACE_EVENT(xfs_btree_bload_block,
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb);
__entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsb);
} else {
- __entry->agno = cur->bc_ag.agno;
+ __entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agbno = be32_to_cpu(ptr->s);
}
__entry->nr_records = nr_records;
@@ -3887,10 +3884,10 @@ DEFINE_EVENT(xfs_timestamp_range_class, name, \
DEFINE_TIMESTAMP_RANGE_EVENT(xfs_inode_timestamp_range);
DEFINE_TIMESTAMP_RANGE_EVENT(xfs_quota_expiry_range);
-DECLARE_EVENT_CLASS(xfs_eofblocks_class,
- TP_PROTO(struct xfs_mount *mp, struct xfs_eofblocks *eofb,
+DECLARE_EVENT_CLASS(xfs_icwalk_class,
+ TP_PROTO(struct xfs_mount *mp, struct xfs_icwalk *icw,
unsigned long caller_ip),
- TP_ARGS(mp, eofb, caller_ip),
+ TP_ARGS(mp, icw, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(__u32, flags)
@@ -3898,35 +3895,97 @@ DECLARE_EVENT_CLASS(xfs_eofblocks_class,
__field(uint32_t, gid)
__field(prid_t, prid)
__field(__u64, min_file_size)
+ __field(long, scan_limit)
__field(unsigned long, caller_ip)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
- __entry->flags = eofb ? eofb->eof_flags : 0;
- __entry->uid = eofb ? from_kuid(mp->m_super->s_user_ns,
- eofb->eof_uid) : 0;
- __entry->gid = eofb ? from_kgid(mp->m_super->s_user_ns,
- eofb->eof_gid) : 0;
- __entry->prid = eofb ? eofb->eof_prid : 0;
- __entry->min_file_size = eofb ? eofb->eof_min_file_size : 0;
+ __entry->flags = icw ? icw->icw_flags : 0;
+ __entry->uid = icw ? from_kuid(mp->m_super->s_user_ns,
+ icw->icw_uid) : 0;
+ __entry->gid = icw ? from_kgid(mp->m_super->s_user_ns,
+ icw->icw_gid) : 0;
+ __entry->prid = icw ? icw->icw_prid : 0;
+ __entry->min_file_size = icw ? icw->icw_min_file_size : 0;
+ __entry->scan_limit = icw ? icw->icw_scan_limit : 0;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d flags 0x%x uid %u gid %u prid %u minsize %llu caller %pS",
+ TP_printk("dev %d:%d flags 0x%x uid %u gid %u prid %u minsize %llu scan_limit %ld caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->flags,
__entry->uid,
__entry->gid,
__entry->prid,
__entry->min_file_size,
+ __entry->scan_limit,
(char *)__entry->caller_ip)
);
-#define DEFINE_EOFBLOCKS_EVENT(name) \
-DEFINE_EVENT(xfs_eofblocks_class, name, \
- TP_PROTO(struct xfs_mount *mp, struct xfs_eofblocks *eofb, \
+#define DEFINE_ICWALK_EVENT(name) \
+DEFINE_EVENT(xfs_icwalk_class, name, \
+ TP_PROTO(struct xfs_mount *mp, struct xfs_icwalk *icw, \
unsigned long caller_ip), \
- TP_ARGS(mp, eofb, caller_ip))
-DEFINE_EOFBLOCKS_EVENT(xfs_ioc_free_eofblocks);
-DEFINE_EOFBLOCKS_EVENT(xfs_blockgc_free_space);
+ TP_ARGS(mp, icw, caller_ip))
+DEFINE_ICWALK_EVENT(xfs_ioc_free_eofblocks);
+DEFINE_ICWALK_EVENT(xfs_blockgc_free_space);
+
+TRACE_DEFINE_ENUM(XLOG_STATE_ACTIVE);
+TRACE_DEFINE_ENUM(XLOG_STATE_WANT_SYNC);
+TRACE_DEFINE_ENUM(XLOG_STATE_SYNCING);
+TRACE_DEFINE_ENUM(XLOG_STATE_DONE_SYNC);
+TRACE_DEFINE_ENUM(XLOG_STATE_CALLBACK);
+TRACE_DEFINE_ENUM(XLOG_STATE_DIRTY);
+TRACE_DEFINE_ENUM(XLOG_STATE_IOERROR);
+
+DECLARE_EVENT_CLASS(xlog_iclog_class,
+ TP_PROTO(struct xlog_in_core *iclog, unsigned long caller_ip),
+ TP_ARGS(iclog, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(uint32_t, state)
+ __field(int32_t, refcount)
+ __field(uint32_t, offset)
+ __field(unsigned long long, lsn)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = iclog->ic_log->l_mp->m_super->s_dev;
+ __entry->state = iclog->ic_state;
+ __entry->refcount = atomic_read(&iclog->ic_refcnt);
+ __entry->offset = iclog->ic_offset;
+ __entry->lsn = be64_to_cpu(iclog->ic_header.h_lsn);
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx caller %pS",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->state, XLOG_STATE_STRINGS),
+ __entry->refcount,
+ __entry->offset,
+ __entry->lsn,
+ (char *)__entry->caller_ip)
+
+);
+
+#define DEFINE_ICLOG_EVENT(name) \
+DEFINE_EVENT(xlog_iclog_class, name, \
+ TP_PROTO(struct xlog_in_core *iclog, unsigned long caller_ip), \
+ TP_ARGS(iclog, caller_ip))
+
+DEFINE_ICLOG_EVENT(xlog_iclog_activate);
+DEFINE_ICLOG_EVENT(xlog_iclog_clean);
+DEFINE_ICLOG_EVENT(xlog_iclog_callback);
+DEFINE_ICLOG_EVENT(xlog_iclog_callbacks_start);
+DEFINE_ICLOG_EVENT(xlog_iclog_callbacks_done);
+DEFINE_ICLOG_EVENT(xlog_iclog_force);
+DEFINE_ICLOG_EVENT(xlog_iclog_force_lsn);
+DEFINE_ICLOG_EVENT(xlog_iclog_get_space);
+DEFINE_ICLOG_EVENT(xlog_iclog_release);
+DEFINE_ICLOG_EVENT(xlog_iclog_switch);
+DEFINE_ICLOG_EVENT(xlog_iclog_sync);
+DEFINE_ICLOG_EVENT(xlog_iclog_syncing);
+DEFINE_ICLOG_EVENT(xlog_iclog_sync_done);
+DEFINE_ICLOG_EVENT(xlog_iclog_want_sync);
+DEFINE_ICLOG_EVENT(xlog_iclog_wait_on);
+DEFINE_ICLOG_EVENT(xlog_iclog_write);
#endif /* _TRACE_XFS_H */
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 586f2992b789..87bffd12c20c 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -839,7 +839,7 @@ __xfs_trans_commit(
bool regrant)
{
struct xfs_mount *mp = tp->t_mountp;
- xfs_lsn_t commit_lsn = -1;
+ xfs_csn_t commit_seq = 0;
int error = 0;
int sync = tp->t_flags & XFS_TRANS_SYNC;
@@ -881,7 +881,7 @@ __xfs_trans_commit(
xfs_trans_apply_sb_deltas(tp);
xfs_trans_apply_dquot_deltas(tp);
- xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
+ xlog_cil_commit(mp->m_log, tp, &commit_seq, regrant);
xfs_trans_free(tp);
@@ -890,7 +890,7 @@ __xfs_trans_commit(
* log out now and wait for it.
*/
if (sync) {
- error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
+ error = xfs_log_force_seq(mp, commit_seq, XFS_LOG_SYNC, NULL);
XFS_STATS_INC(mp, xs_trans_sync);
} else {
XFS_STATS_INC(mp, xs_trans_async);
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index ee42d98d9011..50da47f23a07 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -43,7 +43,7 @@ struct xfs_log_item {
struct list_head li_cil; /* CIL pointers */
struct xfs_log_vec *li_lv; /* active log vector */
struct xfs_log_vec *li_lv_shadow; /* standby vector */
- xfs_lsn_t li_seq; /* CIL commit seq */
+ xfs_csn_t li_seq; /* CIL commit seq */
};
/*
@@ -69,7 +69,7 @@ struct xfs_item_ops {
void (*iop_pin)(struct xfs_log_item *);
void (*iop_unpin)(struct xfs_log_item *, int remove);
uint (*iop_push)(struct xfs_log_item *, struct list_head *);
- void (*iop_committing)(struct xfs_log_item *, xfs_lsn_t commit_lsn);
+ void (*iop_committing)(struct xfs_log_item *lip, xfs_csn_t seq);
void (*iop_release)(struct xfs_log_item *);
xfs_lsn_t (*iop_committed)(struct xfs_log_item *, xfs_lsn_t);
int (*iop_recover)(struct xfs_log_item *lip,