summaryrefslogtreecommitdiff
path: root/fs/xfs/libxfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/libxfs')
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c41
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c52
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c32
-rw-r--r--fs/xfs/libxfs/xfs_btree.c33
-rw-r--r--fs/xfs/libxfs/xfs_btree.h41
-rw-r--r--fs/xfs/libxfs/xfs_format.h2
-rw-r--r--fs/xfs/libxfs/xfs_group.c17
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c31
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c24
-rw-r--r--fs/xfs/libxfs/xfs_log_recover.h4
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c4
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c18
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c67
-rw-r--r--fs/xfs/libxfs/xfs_rtrefcount_btree.c18
-rw-r--r--fs/xfs/libxfs/xfs_rtrmap_btree.c67
15 files changed, 222 insertions, 229 deletions
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 7839efe050bf..000cc7f4a3ce 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -3444,16 +3444,41 @@ xfs_alloc_read_agf(
set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
}
+
#ifdef DEBUG
- else if (!xfs_is_shutdown(mp)) {
- ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
- ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
- ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
- ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
- ASSERT(pag->pagf_bno_level == be32_to_cpu(agf->agf_bno_level));
- ASSERT(pag->pagf_cnt_level == be32_to_cpu(agf->agf_cnt_level));
+ /*
+ * It's possible for the AGF to be out of sync if the block device is
+ * silently dropping writes. This can happen in fstests with dmflakey
+ * enabled, which allows the buffer to be cleaned and reclaimed by
+ * memory pressure and then re-read from disk here. We will get a
+ * stale version of the AGF from disk, and nothing good can happen from
+ * here. Hence if we detect this situation, immediately shut down the
+ * filesystem.
+ *
+ * This can also happen if we are already in the middle of a forced
+ * shutdown, so don't bother checking if we are already shut down.
+ */
+ if (!xfs_is_shutdown(pag_mount(pag))) {
+ bool ok = true;
+
+ ok &= pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks);
+ ok &= pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks);
+ ok &= pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks);
+ ok &= pag->pagf_flcount == be32_to_cpu(agf->agf_flcount);
+ ok &= pag->pagf_longest == be32_to_cpu(agf->agf_longest);
+ ok &= pag->pagf_bno_level == be32_to_cpu(agf->agf_bno_level);
+ ok &= pag->pagf_cnt_level == be32_to_cpu(agf->agf_cnt_level);
+
+ if (XFS_IS_CORRUPT(pag_mount(pag), !ok)) {
+ xfs_ag_mark_sick(pag, XFS_SICK_AG_AGF);
+ xfs_trans_brelse(tp, agfbp);
+ xfs_force_shutdown(pag_mount(pag),
+ SHUTDOWN_CORRUPT_ONDISK);
+ return -EFSCORRUPTED;
+ }
}
-#endif
+#endif /* DEBUG */
+
if (agfbpp)
*agfbpp = agfbp;
else
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index a4ac37ba5d51..fa1f03c1331e 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -186,35 +186,32 @@ xfs_allocbt_init_ptr_from_cur(
ptr->s = agf->agf_cnt_root;
}
-STATIC int64_t
-xfs_bnobt_key_diff(
+STATIC int
+xfs_bnobt_cmp_key_with_cur(
struct xfs_btree_cur *cur,
const union xfs_btree_key *key)
{
struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a;
const struct xfs_alloc_rec *kp = &key->alloc;
- return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
+ return cmp_int(be32_to_cpu(kp->ar_startblock),
+ rec->ar_startblock);
}
-STATIC int64_t
-xfs_cntbt_key_diff(
+STATIC int
+xfs_cntbt_cmp_key_with_cur(
struct xfs_btree_cur *cur,
const union xfs_btree_key *key)
{
struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a;
const struct xfs_alloc_rec *kp = &key->alloc;
- int64_t diff;
- diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
- if (diff)
- return diff;
-
- return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
+ return cmp_int(be32_to_cpu(kp->ar_blockcount), rec->ar_blockcount) ?:
+ cmp_int(be32_to_cpu(kp->ar_startblock), rec->ar_startblock);
}
-STATIC int64_t
-xfs_bnobt_diff_two_keys(
+STATIC int
+xfs_bnobt_cmp_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
const union xfs_btree_key *k2,
@@ -222,29 +219,24 @@ xfs_bnobt_diff_two_keys(
{
ASSERT(!mask || mask->alloc.ar_startblock);
- return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
- be32_to_cpu(k2->alloc.ar_startblock);
+ return cmp_int(be32_to_cpu(k1->alloc.ar_startblock),
+ be32_to_cpu(k2->alloc.ar_startblock));
}
-STATIC int64_t
-xfs_cntbt_diff_two_keys(
+STATIC int
+xfs_cntbt_cmp_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
const union xfs_btree_key *k2,
const union xfs_btree_key *mask)
{
- int64_t diff;
-
ASSERT(!mask || (mask->alloc.ar_blockcount &&
mask->alloc.ar_startblock));
- diff = be32_to_cpu(k1->alloc.ar_blockcount) -
- be32_to_cpu(k2->alloc.ar_blockcount);
- if (diff)
- return diff;
-
- return be32_to_cpu(k1->alloc.ar_startblock) -
- be32_to_cpu(k2->alloc.ar_startblock);
+ return cmp_int(be32_to_cpu(k1->alloc.ar_blockcount),
+ be32_to_cpu(k2->alloc.ar_blockcount)) ?:
+ cmp_int(be32_to_cpu(k1->alloc.ar_startblock),
+ be32_to_cpu(k2->alloc.ar_startblock));
}
static xfs_failaddr_t
@@ -438,9 +430,9 @@ const struct xfs_btree_ops xfs_bnobt_ops = {
.init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec,
.init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
- .key_diff = xfs_bnobt_key_diff,
+ .cmp_key_with_cur = xfs_bnobt_cmp_key_with_cur,
.buf_ops = &xfs_bnobt_buf_ops,
- .diff_two_keys = xfs_bnobt_diff_two_keys,
+ .cmp_two_keys = xfs_bnobt_cmp_two_keys,
.keys_inorder = xfs_bnobt_keys_inorder,
.recs_inorder = xfs_bnobt_recs_inorder,
.keys_contiguous = xfs_allocbt_keys_contiguous,
@@ -468,9 +460,9 @@ const struct xfs_btree_ops xfs_cntbt_ops = {
.init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec,
.init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
- .key_diff = xfs_cntbt_key_diff,
+ .cmp_key_with_cur = xfs_cntbt_cmp_key_with_cur,
.buf_ops = &xfs_cntbt_buf_ops,
- .diff_two_keys = xfs_cntbt_diff_two_keys,
+ .cmp_two_keys = xfs_cntbt_cmp_two_keys,
.keys_inorder = xfs_cntbt_keys_inorder,
.recs_inorder = xfs_cntbt_recs_inorder,
.keys_contiguous = NULL, /* not needed right now */
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index 908d7b050e9c..188feac04b60 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -369,38 +369,26 @@ xfs_bmbt_init_rec_from_cur(
xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
}
-STATIC int64_t
-xfs_bmbt_key_diff(
+STATIC int
+xfs_bmbt_cmp_key_with_cur(
struct xfs_btree_cur *cur,
const union xfs_btree_key *key)
{
- return (int64_t)be64_to_cpu(key->bmbt.br_startoff) -
- cur->bc_rec.b.br_startoff;
+ return cmp_int(be64_to_cpu(key->bmbt.br_startoff),
+ cur->bc_rec.b.br_startoff);
}
-STATIC int64_t
-xfs_bmbt_diff_two_keys(
+STATIC int
+xfs_bmbt_cmp_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
const union xfs_btree_key *k2,
const union xfs_btree_key *mask)
{
- uint64_t a = be64_to_cpu(k1->bmbt.br_startoff);
- uint64_t b = be64_to_cpu(k2->bmbt.br_startoff);
-
ASSERT(!mask || mask->bmbt.br_startoff);
- /*
- * Note: This routine previously casted a and b to int64 and subtracted
- * them to generate a result. This lead to problems if b was the
- * "maximum" key value (all ones) being signed incorrectly, hence this
- * somewhat less efficient version.
- */
- if (a > b)
- return 1;
- if (b > a)
- return -1;
- return 0;
+ return cmp_int(be64_to_cpu(k1->bmbt.br_startoff),
+ be64_to_cpu(k2->bmbt.br_startoff));
}
static xfs_failaddr_t
@@ -647,8 +635,8 @@ const struct xfs_btree_ops xfs_bmbt_ops = {
.init_key_from_rec = xfs_bmbt_init_key_from_rec,
.init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec,
.init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
- .key_diff = xfs_bmbt_key_diff,
- .diff_two_keys = xfs_bmbt_diff_two_keys,
+ .cmp_key_with_cur = xfs_bmbt_cmp_key_with_cur,
+ .cmp_two_keys = xfs_bmbt_cmp_two_keys,
.buf_ops = &xfs_bmbt_buf_ops,
.keys_inorder = xfs_bmbt_keys_inorder,
.recs_inorder = xfs_bmbt_recs_inorder,
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 299ce7fd11b0..a61211d253f1 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -1985,7 +1985,7 @@ xfs_btree_lookup(
int *stat) /* success/failure */
{
struct xfs_btree_block *block; /* current btree block */
- int64_t diff; /* difference for the current key */
+ int cmp_r; /* current key comparison result */
int error; /* error return value */
int keyno; /* current key number */
int level; /* level in the btree */
@@ -2013,13 +2013,13 @@ xfs_btree_lookup(
* on the lookup record, then follow the corresponding block
* pointer down to the next level.
*/
- for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
+ for (level = cur->bc_nlevels - 1, cmp_r = 1; level >= 0; level--) {
/* Get the block we need to do the lookup on. */
error = xfs_btree_lookup_get_block(cur, level, pp, &block);
if (error)
goto error0;
- if (diff == 0) {
+ if (cmp_r == 0) {
/*
* If we already had a key match at a higher level, we
* know we need to use the first entry in this block.
@@ -2065,15 +2065,16 @@ xfs_btree_lookup(
keyno, block, &key);
/*
- * Compute difference to get next direction:
+ * Compute comparison result to get next
+ * direction:
* - less than, move right
* - greater than, move left
* - equal, we're done
*/
- diff = cur->bc_ops->key_diff(cur, kp);
- if (diff < 0)
+ cmp_r = cur->bc_ops->cmp_key_with_cur(cur, kp);
+ if (cmp_r < 0)
low = keyno + 1;
- else if (diff > 0)
+ else if (cmp_r > 0)
high = keyno - 1;
else
break;
@@ -2089,7 +2090,7 @@ xfs_btree_lookup(
* If we moved left, need the previous key number,
* unless there isn't one.
*/
- if (diff > 0 && --keyno < 1)
+ if (cmp_r > 0 && --keyno < 1)
keyno = 1;
pp = xfs_btree_ptr_addr(cur, keyno, block);
@@ -2102,7 +2103,7 @@ xfs_btree_lookup(
}
/* Done with the search. See if we need to adjust the results. */
- if (dir != XFS_LOOKUP_LE && diff < 0) {
+ if (dir != XFS_LOOKUP_LE && cmp_r < 0) {
keyno++;
/*
* If ge search and we went off the end of the block, but it's
@@ -2125,14 +2126,14 @@ xfs_btree_lookup(
*stat = 1;
return 0;
}
- } else if (dir == XFS_LOOKUP_LE && diff > 0)
+ } else if (dir == XFS_LOOKUP_LE && cmp_r > 0)
keyno--;
cur->bc_levels[0].ptr = keyno;
/* Return if we succeeded or not. */
if (keyno == 0 || keyno > xfs_btree_get_numrecs(block))
*stat = 0;
- else if (dir != XFS_LOOKUP_EQ || diff == 0)
+ else if (dir != XFS_LOOKUP_EQ || cmp_r == 0)
*stat = 1;
else
*stat = 0;
@@ -5058,7 +5059,7 @@ xfs_btree_simple_query_range(
int error;
ASSERT(cur->bc_ops->init_high_key_from_rec);
- ASSERT(cur->bc_ops->diff_two_keys);
+ ASSERT(cur->bc_ops->cmp_two_keys);
/*
* Find the leftmost record. The btree cursor must be set
@@ -5352,15 +5353,15 @@ xfs_btree_count_blocks(
}
/* Compare two btree pointers. */
-int64_t
-xfs_btree_diff_two_ptrs(
+int
+xfs_btree_cmp_two_ptrs(
struct xfs_btree_cur *cur,
const union xfs_btree_ptr *a,
const union xfs_btree_ptr *b)
{
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
- return (int64_t)be64_to_cpu(a->l) - be64_to_cpu(b->l);
- return (int64_t)be32_to_cpu(a->s) - be32_to_cpu(b->s);
+ return cmp_int(be64_to_cpu(a->l), be64_to_cpu(b->l));
+ return cmp_int(be32_to_cpu(a->s), be32_to_cpu(b->s));
}
struct xfs_btree_has_records {
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 355b304696e6..60e78572e725 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -171,20 +171,23 @@ struct xfs_btree_ops {
void (*init_high_key_from_rec)(union xfs_btree_key *key,
const union xfs_btree_rec *rec);
- /* difference between key value and cursor value */
- int64_t (*key_diff)(struct xfs_btree_cur *cur,
- const union xfs_btree_key *key);
+ /*
+ * Compare key value and cursor value -- positive if key > cur,
+ * negative if key < cur, and zero if equal.
+ */
+ int (*cmp_key_with_cur)(struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key);
/*
- * Difference between key2 and key1 -- positive if key1 > key2,
- * negative if key1 < key2, and zero if equal. If the @mask parameter
- * is non NULL, each key field to be used in the comparison must
- * contain a nonzero value.
+ * Compare key1 and key2 -- positive if key1 > key2, negative if
+ * key1 < key2, and zero if equal. If the @mask parameter is non NULL,
+ * each key field to be used in the comparison must contain a nonzero
+ * value.
*/
- int64_t (*diff_two_keys)(struct xfs_btree_cur *cur,
- const union xfs_btree_key *key1,
- const union xfs_btree_key *key2,
- const union xfs_btree_key *mask);
+ int (*cmp_two_keys)(struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2,
+ const union xfs_btree_key *mask);
const struct xfs_buf_ops *buf_ops;
@@ -516,9 +519,9 @@ struct xfs_btree_block *xfs_btree_get_block(struct xfs_btree_cur *cur,
int level, struct xfs_buf **bpp);
bool xfs_btree_ptr_is_null(struct xfs_btree_cur *cur,
const union xfs_btree_ptr *ptr);
-int64_t xfs_btree_diff_two_ptrs(struct xfs_btree_cur *cur,
- const union xfs_btree_ptr *a,
- const union xfs_btree_ptr *b);
+int xfs_btree_cmp_two_ptrs(struct xfs_btree_cur *cur,
+ const union xfs_btree_ptr *a,
+ const union xfs_btree_ptr *b);
void xfs_btree_get_sibling(struct xfs_btree_cur *cur,
struct xfs_btree_block *block,
union xfs_btree_ptr *ptr, int lr);
@@ -546,7 +549,7 @@ xfs_btree_keycmp_lt(
const union xfs_btree_key *key1,
const union xfs_btree_key *key2)
{
- return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) < 0;
+ return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) < 0;
}
static inline bool
@@ -555,7 +558,7 @@ xfs_btree_keycmp_gt(
const union xfs_btree_key *key1,
const union xfs_btree_key *key2)
{
- return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) > 0;
+ return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) > 0;
}
static inline bool
@@ -564,7 +567,7 @@ xfs_btree_keycmp_eq(
const union xfs_btree_key *key1,
const union xfs_btree_key *key2)
{
- return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) == 0;
+ return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) == 0;
}
static inline bool
@@ -602,7 +605,7 @@ xfs_btree_masked_keycmp_lt(
const union xfs_btree_key *key2,
const union xfs_btree_key *mask)
{
- return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) < 0;
+ return cur->bc_ops->cmp_two_keys(cur, key1, key2, mask) < 0;
}
static inline bool
@@ -612,7 +615,7 @@ xfs_btree_masked_keycmp_gt(
const union xfs_btree_key *key2,
const union xfs_btree_key *mask)
{
- return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) > 0;
+ return cur->bc_ops->cmp_two_keys(cur, key1, key2, mask) > 0;
}
static inline bool
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 9566a7623365..779dac59b1f3 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -112,7 +112,7 @@ typedef struct xfs_sb {
uint16_t sb_sectsize; /* volume sector size, bytes */
uint16_t sb_inodesize; /* inode size, bytes */
uint16_t sb_inopblock; /* inodes per block */
- char sb_fname[XFSLABEL_MAX]; /* file system name */
+ char sb_fname[XFSLABEL_MAX] __nonstring; /* file system name */
uint8_t sb_blocklog; /* log2 of sb_blocksize */
uint8_t sb_sectlog; /* log2 of sb_sectsize */
uint8_t sb_inodelog; /* log2 of sb_inodesize */
diff --git a/fs/xfs/libxfs/xfs_group.c b/fs/xfs/libxfs/xfs_group.c
index e9d76bcdc820..792f76d2e2a0 100644
--- a/fs/xfs/libxfs/xfs_group.c
+++ b/fs/xfs/libxfs/xfs_group.c
@@ -163,7 +163,8 @@ xfs_group_free(
xfs_defer_drain_free(&xg->xg_intents_drain);
#ifdef __KERNEL__
- kfree(xg->xg_busy_extents);
+ if (xfs_group_has_extent_busy(xg->xg_mount, xg->xg_type))
+ kfree(xg->xg_busy_extents);
#endif
if (uninit)
@@ -171,7 +172,8 @@ xfs_group_free(
/* drop the mount's active reference */
xfs_group_rele(xg);
- XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_active_ref) != 0);
+ XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_active_ref) > 0);
+ XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_active_ref) < 0);
kfree_rcu_mightsleep(xg);
}
@@ -189,9 +191,11 @@ xfs_group_insert(
xg->xg_type = type;
#ifdef __KERNEL__
- xg->xg_busy_extents = xfs_extent_busy_alloc();
- if (!xg->xg_busy_extents)
- return -ENOMEM;
+ if (xfs_group_has_extent_busy(mp, type)) {
+ xg->xg_busy_extents = xfs_extent_busy_alloc();
+ if (!xg->xg_busy_extents)
+ return -ENOMEM;
+ }
spin_lock_init(&xg->xg_state_lock);
xfs_hooks_init(&xg->xg_rmap_update_hooks);
#endif
@@ -210,7 +214,8 @@ xfs_group_insert(
out_drain:
xfs_defer_drain_free(&xg->xg_intents_drain);
#ifdef __KERNEL__
- kfree(xg->xg_busy_extents);
+ if (xfs_group_has_extent_busy(xg->xg_mount, xg->xg_type))
+ kfree(xg->xg_busy_extents);
#endif
return error;
}
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 0c47b5c6ca7d..750111634d9f 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -2801,12 +2801,35 @@ xfs_ialloc_read_agi(
set_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate);
}
+#ifdef DEBUG
/*
- * It's possible for these to be out of sync if
- * we are in the middle of a forced shutdown.
+ * It's possible for the AGF to be out of sync if the block device is
+ * silently dropping writes. This can happen in fstests with dmflakey
+ * enabled, which allows the buffer to be cleaned and reclaimed by
+ * memory pressure and then re-read from disk here. We will get a
+ * stale version of the AGF from disk, and nothing good can happen from
+ * here. Hence if we detect this situation, immediately shut down the
+ * filesystem.
+ *
+ * This can also happen if we are already in the middle of a forced
+ * shutdown, so don't bother checking if we are already shut down.
*/
- ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
- xfs_is_shutdown(pag_mount(pag)));
+ if (!xfs_is_shutdown(pag_mount(pag))) {
+ bool ok = true;
+
+ ok &= pag->pagi_freecount == be32_to_cpu(agi->agi_freecount);
+ ok &= pag->pagi_count == be32_to_cpu(agi->agi_count);
+
+ if (XFS_IS_CORRUPT(pag_mount(pag), !ok)) {
+ xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
+ xfs_trans_brelse(tp, agibp);
+ xfs_force_shutdown(pag_mount(pag),
+ SHUTDOWN_CORRUPT_ONDISK);
+ return -EFSCORRUPTED;
+ }
+ }
+#endif /* DEBUG */
+
if (agibpp)
*agibpp = agibp;
else
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 6f270d8f4270..100afdd66cdd 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -265,17 +265,17 @@ xfs_finobt_init_ptr_from_cur(
ptr->s = agi->agi_free_root;
}
-STATIC int64_t
-xfs_inobt_key_diff(
+STATIC int
+xfs_inobt_cmp_key_with_cur(
struct xfs_btree_cur *cur,
const union xfs_btree_key *key)
{
- return (int64_t)be32_to_cpu(key->inobt.ir_startino) -
- cur->bc_rec.i.ir_startino;
+ return cmp_int(be32_to_cpu(key->inobt.ir_startino),
+ cur->bc_rec.i.ir_startino);
}
-STATIC int64_t
-xfs_inobt_diff_two_keys(
+STATIC int
+xfs_inobt_cmp_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
const union xfs_btree_key *k2,
@@ -283,8 +283,8 @@ xfs_inobt_diff_two_keys(
{
ASSERT(!mask || mask->inobt.ir_startino);
- return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
- be32_to_cpu(k2->inobt.ir_startino);
+ return cmp_int(be32_to_cpu(k1->inobt.ir_startino),
+ be32_to_cpu(k2->inobt.ir_startino));
}
static xfs_failaddr_t
@@ -430,9 +430,9 @@ const struct xfs_btree_ops xfs_inobt_ops = {
.init_high_key_from_rec = xfs_inobt_init_high_key_from_rec,
.init_rec_from_cur = xfs_inobt_init_rec_from_cur,
.init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
- .key_diff = xfs_inobt_key_diff,
+ .cmp_key_with_cur = xfs_inobt_cmp_key_with_cur,
.buf_ops = &xfs_inobt_buf_ops,
- .diff_two_keys = xfs_inobt_diff_two_keys,
+ .cmp_two_keys = xfs_inobt_cmp_two_keys,
.keys_inorder = xfs_inobt_keys_inorder,
.recs_inorder = xfs_inobt_recs_inorder,
.keys_contiguous = xfs_inobt_keys_contiguous,
@@ -460,9 +460,9 @@ const struct xfs_btree_ops xfs_finobt_ops = {
.init_high_key_from_rec = xfs_inobt_init_high_key_from_rec,
.init_rec_from_cur = xfs_inobt_init_rec_from_cur,
.init_ptr_from_cur = xfs_finobt_init_ptr_from_cur,
- .key_diff = xfs_inobt_key_diff,
+ .cmp_key_with_cur = xfs_inobt_cmp_key_with_cur,
.buf_ops = &xfs_finobt_buf_ops,
- .diff_two_keys = xfs_inobt_diff_two_keys,
+ .cmp_two_keys = xfs_inobt_cmp_two_keys,
.keys_inorder = xfs_inobt_keys_inorder,
.recs_inorder = xfs_inobt_recs_inorder,
.keys_contiguous = xfs_inobt_keys_contiguous,
diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h
index 66c7916fb5cd..95de23095030 100644
--- a/fs/xfs/libxfs/xfs_log_recover.h
+++ b/fs/xfs/libxfs/xfs_log_recover.h
@@ -104,7 +104,7 @@ struct xlog_recover_item {
struct list_head ri_list;
int ri_cnt; /* count of regions found */
int ri_total; /* total regions */
- struct xfs_log_iovec *ri_buf; /* ptr to regions buffer */
+ struct kvec *ri_buf; /* ptr to regions buffer */
const struct xlog_recover_item_ops *ri_ops;
};
@@ -117,7 +117,7 @@ struct xlog_recover {
struct list_head r_itemq; /* q for items */
};
-#define ITEM_TYPE(i) (*(unsigned short *)(i)->ri_buf[0].i_addr)
+#define ITEM_TYPE(i) (*(unsigned short *)(i)->ri_buf[0].iov_base)
#define XLOG_RECOVER_CRCPASS 0
#define XLOG_RECOVER_PASS1 1
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index cebe83f7842a..897784037483 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -2099,9 +2099,7 @@ xfs_refcount_recover_cow_leftovers(
* recording the CoW debris we cancel the (empty) transaction
* and everything goes away cleanly.
*/
- error = xfs_trans_alloc_empty(mp, &tp);
- if (error)
- return error;
+ tp = xfs_trans_alloc_empty(mp);
if (isrt) {
xfs_rtgroup_lock(to_rtg(xg), XFS_RTGLOCK_REFCOUNT);
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index 54505fee1852..06da3ca14727 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -174,8 +174,8 @@ xfs_refcountbt_init_ptr_from_cur(
ptr->s = agf->agf_refcount_root;
}
-STATIC int64_t
-xfs_refcountbt_key_diff(
+STATIC int
+xfs_refcountbt_cmp_key_with_cur(
struct xfs_btree_cur *cur,
const union xfs_btree_key *key)
{
@@ -185,11 +185,11 @@ xfs_refcountbt_key_diff(
start = xfs_refcount_encode_startblock(irec->rc_startblock,
irec->rc_domain);
- return (int64_t)be32_to_cpu(kp->rc_startblock) - start;
+ return cmp_int(be32_to_cpu(kp->rc_startblock), start);
}
-STATIC int64_t
-xfs_refcountbt_diff_two_keys(
+STATIC int
+xfs_refcountbt_cmp_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
const union xfs_btree_key *k2,
@@ -197,8 +197,8 @@ xfs_refcountbt_diff_two_keys(
{
ASSERT(!mask || mask->refc.rc_startblock);
- return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
- be32_to_cpu(k2->refc.rc_startblock);
+ return cmp_int(be32_to_cpu(k1->refc.rc_startblock),
+ be32_to_cpu(k2->refc.rc_startblock));
}
STATIC xfs_failaddr_t
@@ -339,9 +339,9 @@ const struct xfs_btree_ops xfs_refcountbt_ops = {
.init_high_key_from_rec = xfs_refcountbt_init_high_key_from_rec,
.init_rec_from_cur = xfs_refcountbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_refcountbt_init_ptr_from_cur,
- .key_diff = xfs_refcountbt_key_diff,
+ .cmp_key_with_cur = xfs_refcountbt_cmp_key_with_cur,
.buf_ops = &xfs_refcountbt_buf_ops,
- .diff_two_keys = xfs_refcountbt_diff_two_keys,
+ .cmp_two_keys = xfs_refcountbt_cmp_two_keys,
.keys_inorder = xfs_refcountbt_keys_inorder,
.recs_inorder = xfs_refcountbt_recs_inorder,
.keys_contiguous = xfs_refcountbt_keys_contiguous,
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 2cab694ac58a..bf16aee50d73 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -243,38 +243,22 @@ static inline uint64_t offset_keymask(uint64_t offset)
return offset & ~XFS_RMAP_OFF_UNWRITTEN;
}
-STATIC int64_t
-xfs_rmapbt_key_diff(
+STATIC int
+xfs_rmapbt_cmp_key_with_cur(
struct xfs_btree_cur *cur,
const union xfs_btree_key *key)
{
struct xfs_rmap_irec *rec = &cur->bc_rec.r;
const struct xfs_rmap_key *kp = &key->rmap;
- __u64 x, y;
- int64_t d;
- d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
- if (d)
- return d;
-
- x = be64_to_cpu(kp->rm_owner);
- y = rec->rm_owner;
- if (x > y)
- return 1;
- else if (y > x)
- return -1;
-
- x = offset_keymask(be64_to_cpu(kp->rm_offset));
- y = offset_keymask(xfs_rmap_irec_offset_pack(rec));
- if (x > y)
- return 1;
- else if (y > x)
- return -1;
- return 0;
+ return cmp_int(be32_to_cpu(kp->rm_startblock), rec->rm_startblock) ?:
+ cmp_int(be64_to_cpu(kp->rm_owner), rec->rm_owner) ?:
+ cmp_int(offset_keymask(be64_to_cpu(kp->rm_offset)),
+ offset_keymask(xfs_rmap_irec_offset_pack(rec)));
}
-STATIC int64_t
-xfs_rmapbt_diff_two_keys(
+STATIC int
+xfs_rmapbt_cmp_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
const union xfs_btree_key *k2,
@@ -282,36 +266,31 @@ xfs_rmapbt_diff_two_keys(
{
const struct xfs_rmap_key *kp1 = &k1->rmap;
const struct xfs_rmap_key *kp2 = &k2->rmap;
- int64_t d;
- __u64 x, y;
+ int d;
/* Doesn't make sense to mask off the physical space part */
ASSERT(!mask || mask->rmap.rm_startblock);
- d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
- be32_to_cpu(kp2->rm_startblock);
+ d = cmp_int(be32_to_cpu(kp1->rm_startblock),
+ be32_to_cpu(kp2->rm_startblock));
if (d)
return d;
if (!mask || mask->rmap.rm_owner) {
- x = be64_to_cpu(kp1->rm_owner);
- y = be64_to_cpu(kp2->rm_owner);
- if (x > y)
- return 1;
- else if (y > x)
- return -1;
+ d = cmp_int(be64_to_cpu(kp1->rm_owner),
+ be64_to_cpu(kp2->rm_owner));
+ if (d)
+ return d;
}
if (!mask || mask->rmap.rm_offset) {
/* Doesn't make sense to allow offset but not owner */
ASSERT(!mask || mask->rmap.rm_owner);
- x = offset_keymask(be64_to_cpu(kp1->rm_offset));
- y = offset_keymask(be64_to_cpu(kp2->rm_offset));
- if (x > y)
- return 1;
- else if (y > x)
- return -1;
+ d = cmp_int(offset_keymask(be64_to_cpu(kp1->rm_offset)),
+ offset_keymask(be64_to_cpu(kp2->rm_offset)));
+ if (d)
+ return d;
}
return 0;
@@ -515,9 +494,9 @@ const struct xfs_btree_ops xfs_rmapbt_ops = {
.init_high_key_from_rec = xfs_rmapbt_init_high_key_from_rec,
.init_rec_from_cur = xfs_rmapbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_rmapbt_init_ptr_from_cur,
- .key_diff = xfs_rmapbt_key_diff,
+ .cmp_key_with_cur = xfs_rmapbt_cmp_key_with_cur,
.buf_ops = &xfs_rmapbt_buf_ops,
- .diff_two_keys = xfs_rmapbt_diff_two_keys,
+ .cmp_two_keys = xfs_rmapbt_cmp_two_keys,
.keys_inorder = xfs_rmapbt_keys_inorder,
.recs_inorder = xfs_rmapbt_recs_inorder,
.keys_contiguous = xfs_rmapbt_keys_contiguous,
@@ -632,9 +611,9 @@ const struct xfs_btree_ops xfs_rmapbt_mem_ops = {
.init_high_key_from_rec = xfs_rmapbt_init_high_key_from_rec,
.init_rec_from_cur = xfs_rmapbt_init_rec_from_cur,
.init_ptr_from_cur = xfbtree_init_ptr_from_cur,
- .key_diff = xfs_rmapbt_key_diff,
+ .cmp_key_with_cur = xfs_rmapbt_cmp_key_with_cur,
.buf_ops = &xfs_rmapbt_mem_buf_ops,
- .diff_two_keys = xfs_rmapbt_diff_two_keys,
+ .cmp_two_keys = xfs_rmapbt_cmp_two_keys,
.keys_inorder = xfs_rmapbt_keys_inorder,
.recs_inorder = xfs_rmapbt_recs_inorder,
.keys_contiguous = xfs_rmapbt_keys_contiguous,
diff --git a/fs/xfs/libxfs/xfs_rtrefcount_btree.c b/fs/xfs/libxfs/xfs_rtrefcount_btree.c
index 3db5e7a4a945..ac11e94b42ae 100644
--- a/fs/xfs/libxfs/xfs_rtrefcount_btree.c
+++ b/fs/xfs/libxfs/xfs_rtrefcount_btree.c
@@ -156,8 +156,8 @@ xfs_rtrefcountbt_init_ptr_from_cur(
ptr->l = 0;
}
-STATIC int64_t
-xfs_rtrefcountbt_key_diff(
+STATIC int
+xfs_rtrefcountbt_cmp_key_with_cur(
struct xfs_btree_cur *cur,
const union xfs_btree_key *key)
{
@@ -167,11 +167,11 @@ xfs_rtrefcountbt_key_diff(
start = xfs_refcount_encode_startblock(irec->rc_startblock,
irec->rc_domain);
- return (int64_t)be32_to_cpu(kp->rc_startblock) - start;
+ return cmp_int(be32_to_cpu(kp->rc_startblock), start);
}
-STATIC int64_t
-xfs_rtrefcountbt_diff_two_keys(
+STATIC int
+xfs_rtrefcountbt_cmp_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
const union xfs_btree_key *k2,
@@ -179,8 +179,8 @@ xfs_rtrefcountbt_diff_two_keys(
{
ASSERT(!mask || mask->refc.rc_startblock);
- return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
- be32_to_cpu(k2->refc.rc_startblock);
+ return cmp_int(be32_to_cpu(k1->refc.rc_startblock),
+ be32_to_cpu(k2->refc.rc_startblock));
}
static xfs_failaddr_t
@@ -387,9 +387,9 @@ const struct xfs_btree_ops xfs_rtrefcountbt_ops = {
.init_high_key_from_rec = xfs_rtrefcountbt_init_high_key_from_rec,
.init_rec_from_cur = xfs_rtrefcountbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_rtrefcountbt_init_ptr_from_cur,
- .key_diff = xfs_rtrefcountbt_key_diff,
+ .cmp_key_with_cur = xfs_rtrefcountbt_cmp_key_with_cur,
.buf_ops = &xfs_rtrefcountbt_buf_ops,
- .diff_two_keys = xfs_rtrefcountbt_diff_two_keys,
+ .cmp_two_keys = xfs_rtrefcountbt_cmp_two_keys,
.keys_inorder = xfs_rtrefcountbt_keys_inorder,
.recs_inorder = xfs_rtrefcountbt_recs_inorder,
.keys_contiguous = xfs_rtrefcountbt_keys_contiguous,
diff --git a/fs/xfs/libxfs/xfs_rtrmap_btree.c b/fs/xfs/libxfs/xfs_rtrmap_btree.c
index 9bdc2cbfc113..55f903165769 100644
--- a/fs/xfs/libxfs/xfs_rtrmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rtrmap_btree.c
@@ -185,38 +185,22 @@ static inline uint64_t offset_keymask(uint64_t offset)
return offset & ~XFS_RMAP_OFF_UNWRITTEN;
}
-STATIC int64_t
-xfs_rtrmapbt_key_diff(
+STATIC int
+xfs_rtrmapbt_cmp_key_with_cur(
struct xfs_btree_cur *cur,
const union xfs_btree_key *key)
{
struct xfs_rmap_irec *rec = &cur->bc_rec.r;
const struct xfs_rmap_key *kp = &key->rmap;
- __u64 x, y;
- int64_t d;
- d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
- if (d)
- return d;
-
- x = be64_to_cpu(kp->rm_owner);
- y = rec->rm_owner;
- if (x > y)
- return 1;
- else if (y > x)
- return -1;
-
- x = offset_keymask(be64_to_cpu(kp->rm_offset));
- y = offset_keymask(xfs_rmap_irec_offset_pack(rec));
- if (x > y)
- return 1;
- else if (y > x)
- return -1;
- return 0;
+ return cmp_int(be32_to_cpu(kp->rm_startblock), rec->rm_startblock) ?:
+ cmp_int(be64_to_cpu(kp->rm_owner), rec->rm_owner) ?:
+ cmp_int(offset_keymask(be64_to_cpu(kp->rm_offset)),
+ offset_keymask(xfs_rmap_irec_offset_pack(rec)));
}
-STATIC int64_t
-xfs_rtrmapbt_diff_two_keys(
+STATIC int
+xfs_rtrmapbt_cmp_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
const union xfs_btree_key *k2,
@@ -224,36 +208,31 @@ xfs_rtrmapbt_diff_two_keys(
{
const struct xfs_rmap_key *kp1 = &k1->rmap;
const struct xfs_rmap_key *kp2 = &k2->rmap;
- int64_t d;
- __u64 x, y;
+ int d;
/* Doesn't make sense to mask off the physical space part */
ASSERT(!mask || mask->rmap.rm_startblock);
- d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
- be32_to_cpu(kp2->rm_startblock);
+ d = cmp_int(be32_to_cpu(kp1->rm_startblock),
+ be32_to_cpu(kp2->rm_startblock));
if (d)
return d;
if (!mask || mask->rmap.rm_owner) {
- x = be64_to_cpu(kp1->rm_owner);
- y = be64_to_cpu(kp2->rm_owner);
- if (x > y)
- return 1;
- else if (y > x)
- return -1;
+ d = cmp_int(be64_to_cpu(kp1->rm_owner),
+ be64_to_cpu(kp2->rm_owner));
+ if (d)
+ return d;
}
if (!mask || mask->rmap.rm_offset) {
/* Doesn't make sense to allow offset but not owner */
ASSERT(!mask || mask->rmap.rm_owner);
- x = offset_keymask(be64_to_cpu(kp1->rm_offset));
- y = offset_keymask(be64_to_cpu(kp2->rm_offset));
- if (x > y)
- return 1;
- else if (y > x)
- return -1;
+ d = cmp_int(offset_keymask(be64_to_cpu(kp1->rm_offset)),
+ offset_keymask(be64_to_cpu(kp2->rm_offset)));
+ if (d)
+ return d;
}
return 0;
@@ -511,9 +490,9 @@ const struct xfs_btree_ops xfs_rtrmapbt_ops = {
.init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec,
.init_rec_from_cur = xfs_rtrmapbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_rtrmapbt_init_ptr_from_cur,
- .key_diff = xfs_rtrmapbt_key_diff,
+ .cmp_key_with_cur = xfs_rtrmapbt_cmp_key_with_cur,
.buf_ops = &xfs_rtrmapbt_buf_ops,
- .diff_two_keys = xfs_rtrmapbt_diff_two_keys,
+ .cmp_two_keys = xfs_rtrmapbt_cmp_two_keys,
.keys_inorder = xfs_rtrmapbt_keys_inorder,
.recs_inorder = xfs_rtrmapbt_recs_inorder,
.keys_contiguous = xfs_rtrmapbt_keys_contiguous,
@@ -620,9 +599,9 @@ const struct xfs_btree_ops xfs_rtrmapbt_mem_ops = {
.init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec,
.init_rec_from_cur = xfs_rtrmapbt_init_rec_from_cur,
.init_ptr_from_cur = xfbtree_init_ptr_from_cur,
- .key_diff = xfs_rtrmapbt_key_diff,
+ .cmp_key_with_cur = xfs_rtrmapbt_cmp_key_with_cur,
.buf_ops = &xfs_rtrmapbt_mem_buf_ops,
- .diff_two_keys = xfs_rtrmapbt_diff_two_keys,
+ .cmp_two_keys = xfs_rtrmapbt_cmp_two_keys,
.keys_inorder = xfs_rtrmapbt_keys_inorder,
.recs_inorder = xfs_rtrmapbt_recs_inorder,
.keys_contiguous = xfs_rtrmapbt_keys_contiguous,