summaryrefslogtreecommitdiff
path: root/fs/xfs/libxfs/xfs_refcount.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/libxfs/xfs_refcount.c')
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c1452
1 files changed, 993 insertions, 459 deletions
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 45b1c3b4e047..2484dc9f6d7e 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1,21 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2016 Oracle. All Rights Reserved.
- *
* Author: Darrick J. Wong <darrick.wong@oracle.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "xfs.h"
#include "xfs_fs.h"
@@ -23,20 +9,27 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_btree.h"
#include "xfs_bmap.h"
#include "xfs_refcount_btree.h"
#include "xfs_alloc.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_trace.h"
-#include "xfs_cksum.h"
#include "xfs_trans.h"
#include "xfs_bit.h"
#include "xfs_refcount.h"
#include "xfs_rmap.h"
+#include "xfs_ag.h"
+#include "xfs_health.h"
+#include "xfs_refcount_item.h"
+#include "xfs_rtgroup.h"
+#include "xfs_rtalloc.h"
+#include "xfs_rtrefcount_btree.h"
+
+struct kmem_cache *xfs_refcount_intent_cache;
/* Allowable refcount adjustment amounts. */
enum xfs_refc_adjust_op {
@@ -47,11 +40,9 @@ enum xfs_refc_adjust_op {
};
STATIC int __xfs_refcount_cow_alloc(struct xfs_btree_cur *rcur,
- xfs_agblock_t agbno, xfs_extlen_t aglen,
- struct xfs_defer_ops *dfops);
+ xfs_agblock_t agbno, xfs_extlen_t aglen);
STATIC int __xfs_refcount_cow_free(struct xfs_btree_cur *rcur,
- xfs_agblock_t agbno, xfs_extlen_t aglen,
- struct xfs_defer_ops *dfops);
+ xfs_agblock_t agbno, xfs_extlen_t aglen);
/*
* Look up the first record less than or equal to [bno, len] in the btree
@@ -60,13 +51,16 @@ STATIC int __xfs_refcount_cow_free(struct xfs_btree_cur *rcur,
int
xfs_refcount_lookup_le(
struct xfs_btree_cur *cur,
+ enum xfs_refc_domain domain,
xfs_agblock_t bno,
int *stat)
{
- trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_private.a.agno, bno,
+ trace_xfs_refcount_lookup(cur,
+ xfs_refcount_encode_startblock(bno, domain),
XFS_LOOKUP_LE);
cur->bc_rec.rc.rc_startblock = bno;
cur->bc_rec.rc.rc_blockcount = 0;
+ cur->bc_rec.rc.rc_domain = domain;
return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
}
@@ -77,27 +71,137 @@ xfs_refcount_lookup_le(
int
xfs_refcount_lookup_ge(
struct xfs_btree_cur *cur,
+ enum xfs_refc_domain domain,
xfs_agblock_t bno,
int *stat)
{
- trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_private.a.agno, bno,
+ trace_xfs_refcount_lookup(cur,
+ xfs_refcount_encode_startblock(bno, domain),
XFS_LOOKUP_GE);
cur->bc_rec.rc.rc_startblock = bno;
cur->bc_rec.rc.rc_blockcount = 0;
+ cur->bc_rec.rc.rc_domain = domain;
return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
}
+/*
+ * Look up the first record equal to [bno, len] in the btree
+ * given by cur.
+ */
+int
+xfs_refcount_lookup_eq(
+ struct xfs_btree_cur *cur,
+ enum xfs_refc_domain domain,
+ xfs_agblock_t bno,
+ int *stat)
+{
+ trace_xfs_refcount_lookup(cur,
+ xfs_refcount_encode_startblock(bno, domain),
+ XFS_LOOKUP_LE);
+ cur->bc_rec.rc.rc_startblock = bno;
+ cur->bc_rec.rc.rc_blockcount = 0;
+ cur->bc_rec.rc.rc_domain = domain;
+ return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
+}
+
/* Convert on-disk record to in-core format. */
-static inline void
+void
xfs_refcount_btrec_to_irec(
- union xfs_btree_rec *rec,
+ const union xfs_btree_rec *rec,
struct xfs_refcount_irec *irec)
{
- irec->rc_startblock = be32_to_cpu(rec->refc.rc_startblock);
+ uint32_t start;
+
+ start = be32_to_cpu(rec->refc.rc_startblock);
+ if (start & XFS_REFC_COWFLAG) {
+ start &= ~XFS_REFC_COWFLAG;
+ irec->rc_domain = XFS_REFC_DOMAIN_COW;
+ } else {
+ irec->rc_domain = XFS_REFC_DOMAIN_SHARED;
+ }
+
+ irec->rc_startblock = start;
irec->rc_blockcount = be32_to_cpu(rec->refc.rc_blockcount);
irec->rc_refcount = be32_to_cpu(rec->refc.rc_refcount);
}
+/* Simple checks for refcount records. */
+xfs_failaddr_t
+xfs_refcount_check_irec(
+ struct xfs_perag *pag,
+ const struct xfs_refcount_irec *irec)
+{
+ if (irec->rc_blockcount == 0 || irec->rc_blockcount > XFS_REFC_LEN_MAX)
+ return __this_address;
+
+ if (!xfs_refcount_check_domain(irec))
+ return __this_address;
+
+ /* check for valid extent range, including overflow */
+ if (!xfs_verify_agbext(pag, irec->rc_startblock, irec->rc_blockcount))
+ return __this_address;
+
+ if (irec->rc_refcount == 0 || irec->rc_refcount > XFS_REFC_REFCOUNT_MAX)
+ return __this_address;
+
+ return NULL;
+}
+
+xfs_failaddr_t
+xfs_rtrefcount_check_irec(
+ struct xfs_rtgroup *rtg,
+ const struct xfs_refcount_irec *irec)
+{
+ if (irec->rc_blockcount == 0 || irec->rc_blockcount > XFS_REFC_LEN_MAX)
+ return __this_address;
+
+ if (!xfs_refcount_check_domain(irec))
+ return __this_address;
+
+ /* check for valid extent range, including overflow */
+ if (!xfs_verify_rgbext(rtg, irec->rc_startblock, irec->rc_blockcount))
+ return __this_address;
+
+ if (irec->rc_refcount == 0 || irec->rc_refcount > XFS_REFC_REFCOUNT_MAX)
+ return __this_address;
+
+ return NULL;
+}
+
+static inline xfs_failaddr_t
+xfs_refcount_check_btrec(
+ struct xfs_btree_cur *cur,
+ const struct xfs_refcount_irec *irec)
+{
+ if (xfs_btree_is_rtrefcount(cur->bc_ops))
+ return xfs_rtrefcount_check_irec(to_rtg(cur->bc_group), irec);
+ return xfs_refcount_check_irec(to_perag(cur->bc_group), irec);
+}
+
+static inline int
+xfs_refcount_complain_bad_rec(
+ struct xfs_btree_cur *cur,
+ xfs_failaddr_t fa,
+ const struct xfs_refcount_irec *irec)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+
+ if (xfs_btree_is_rtrefcount(cur->bc_ops)) {
+ xfs_warn(mp,
+ "RT Refcount BTree record corruption in rtgroup %u detected at %pS!",
+ cur->bc_group->xg_gno, fa);
+ } else {
+ xfs_warn(mp,
+ "Refcount BTree record corruption in AG %d detected at %pS!",
+ cur->bc_group->xg_gno, fa);
+ }
+ xfs_warn(mp,
+ "Start block 0x%x, block count 0x%x, references 0x%x",
+ irec->rc_startblock, irec->rc_blockcount, irec->rc_refcount);
+ xfs_btree_mark_sick(cur);
+ return -EFSCORRUPTED;
+}
+
/*
* Get the data from the pointed-to record.
*/
@@ -108,15 +212,20 @@ xfs_refcount_get_rec(
int *stat)
{
union xfs_btree_rec *rec;
+ xfs_failaddr_t fa;
int error;
error = xfs_btree_get_rec(cur, &rec, stat);
- if (!error && *stat == 1) {
- xfs_refcount_btrec_to_irec(rec, irec);
- trace_xfs_refcount_get(cur->bc_mp, cur->bc_private.a.agno,
- irec);
- }
- return error;
+ if (error || !*stat)
+ return error;
+
+ xfs_refcount_btrec_to_irec(rec, irec);
+ fa = xfs_refcount_check_btrec(cur, irec);
+ if (fa)
+ return xfs_refcount_complain_bad_rec(cur, fa, irec);
+
+ trace_xfs_refcount_get(cur, irec);
+ return 0;
}
/*
@@ -130,16 +239,20 @@ xfs_refcount_update(
struct xfs_refcount_irec *irec)
{
union xfs_btree_rec rec;
+ uint32_t start;
int error;
- trace_xfs_refcount_update(cur->bc_mp, cur->bc_private.a.agno, irec);
- rec.refc.rc_startblock = cpu_to_be32(irec->rc_startblock);
+ trace_xfs_refcount_update(cur, irec);
+
+ start = xfs_refcount_encode_startblock(irec->rc_startblock,
+ irec->rc_domain);
+ rec.refc.rc_startblock = cpu_to_be32(start);
rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount);
rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount);
+
error = xfs_btree_update(cur, &rec);
if (error)
- trace_xfs_refcount_update_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ trace_xfs_refcount_update_error(cur, error, _RET_IP_);
return error;
}
@@ -148,7 +261,7 @@ xfs_refcount_update(
* by [bno, len, refcount].
* This either works (return 0) or gets an EFSCORRUPTED error.
*/
-STATIC int
+int
xfs_refcount_insert(
struct xfs_btree_cur *cur,
struct xfs_refcount_irec *irec,
@@ -156,16 +269,25 @@ xfs_refcount_insert(
{
int error;
- trace_xfs_refcount_insert(cur->bc_mp, cur->bc_private.a.agno, irec);
+ trace_xfs_refcount_insert(cur, irec);
+
cur->bc_rec.rc.rc_startblock = irec->rc_startblock;
cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount;
cur->bc_rec.rc.rc_refcount = irec->rc_refcount;
+ cur->bc_rec.rc.rc_domain = irec->rc_domain;
+
error = xfs_btree_insert(cur, i);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, *i == 1, out_error);
+ if (error)
+ goto out_error;
+ if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+
out_error:
if (error)
- trace_xfs_refcount_insert_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ trace_xfs_refcount_insert_error(cur, error, _RET_IP_);
return error;
}
@@ -187,17 +309,25 @@ xfs_refcount_delete(
error = xfs_refcount_get_rec(cur, &irec, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
- trace_xfs_refcount_delete(cur->bc_mp, cur->bc_private.a.agno, &irec);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ trace_xfs_refcount_delete(cur, &irec);
error = xfs_btree_delete(cur, i);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, *i == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (error)
goto out_error;
- error = xfs_refcount_lookup_ge(cur, irec.rc_startblock, &found_rec);
+ error = xfs_refcount_lookup_ge(cur, irec.rc_domain, irec.rc_startblock,
+ &found_rec);
out_error:
if (error)
- trace_xfs_refcount_delete_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ trace_xfs_refcount_delete_error(cur, error, _RET_IP_);
return error;
}
@@ -292,6 +422,7 @@ xfs_refc_next(
STATIC int
xfs_refcount_split_extent(
struct xfs_btree_cur *cur,
+ enum xfs_refc_domain domain,
xfs_agblock_t agbno,
bool *shape_changed)
{
@@ -300,7 +431,7 @@ xfs_refcount_split_extent(
int error;
*shape_changed = false;
- error = xfs_refcount_lookup_le(cur, agbno, &found_rec);
+ error = xfs_refcount_lookup_le(cur, domain, agbno, &found_rec);
if (error)
goto out_error;
if (!found_rec)
@@ -309,13 +440,18 @@ xfs_refcount_split_extent(
error = xfs_refcount_get_rec(cur, &rcext, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ if (rcext.rc_domain != domain)
+ return 0;
if (rcext.rc_startblock == agbno || xfs_refc_next(&rcext) <= agbno)
return 0;
*shape_changed = true;
- trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_private.a.agno,
- &rcext, agbno);
+ trace_xfs_refcount_split_extent(cur, &rcext, agbno);
/* Establish the right extent. */
tmp = rcext;
@@ -331,12 +467,15 @@ xfs_refcount_split_extent(
error = xfs_refcount_insert(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
return error;
out_error:
- trace_xfs_refcount_split_extent_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ trace_xfs_refcount_split_extent_error(cur, error, _RET_IP_);
return error;
}
@@ -350,14 +489,15 @@ xfs_refcount_merge_center_extents(
struct xfs_refcount_irec *center,
struct xfs_refcount_irec *right,
unsigned long long extlen,
- xfs_agblock_t *agbno,
xfs_extlen_t *aglen)
{
int error;
int found_rec;
- trace_xfs_refcount_merge_center_extents(cur->bc_mp,
- cur->bc_private.a.agno, left, center, right);
+ trace_xfs_refcount_merge_center_extents(cur, left, center, right);
+
+ ASSERT(left->rc_domain == center->rc_domain);
+ ASSERT(right->rc_domain == center->rc_domain);
/*
* Make sure the center and right extents are not in the btree.
@@ -367,31 +507,46 @@ xfs_refcount_merge_center_extents(
* call removes the center and the second one removes the right
* extent.
*/
- error = xfs_refcount_lookup_ge(cur, center->rc_startblock,
- &found_rec);
+ error = xfs_refcount_lookup_ge(cur, center->rc_domain,
+ center->rc_startblock, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (center->rc_refcount > 1) {
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
/* Enlarge the left extent. */
- error = xfs_refcount_lookup_le(cur, left->rc_startblock,
- &found_rec);
+ error = xfs_refcount_lookup_le(cur, left->rc_domain,
+ left->rc_startblock, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
left->rc_blockcount = extlen;
error = xfs_refcount_update(cur, left);
@@ -402,8 +557,7 @@ xfs_refcount_merge_center_extents(
return error;
out_error:
- trace_xfs_refcount_merge_center_extents_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ trace_xfs_refcount_merge_center_extents_error(cur, error, _RET_IP_);
return error;
}
@@ -421,31 +575,42 @@ xfs_refcount_merge_left_extent(
int error;
int found_rec;
- trace_xfs_refcount_merge_left_extent(cur->bc_mp,
- cur->bc_private.a.agno, left, cleft);
+ trace_xfs_refcount_merge_left_extent(cur, left, cleft);
+
+ ASSERT(left->rc_domain == cleft->rc_domain);
/* If the extent at agbno (cleft) wasn't synthesized, remove it. */
if (cleft->rc_refcount > 1) {
- error = xfs_refcount_lookup_le(cur, cleft->rc_startblock,
- &found_rec);
+ error = xfs_refcount_lookup_le(cur, cleft->rc_domain,
+ cleft->rc_startblock, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
/* Enlarge the left extent. */
- error = xfs_refcount_lookup_le(cur, left->rc_startblock,
- &found_rec);
+ error = xfs_refcount_lookup_le(cur, left->rc_domain,
+ left->rc_startblock, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
left->rc_blockcount += cleft->rc_blockcount;
error = xfs_refcount_update(cur, left);
@@ -457,8 +622,7 @@ xfs_refcount_merge_left_extent(
return error;
out_error:
- trace_xfs_refcount_merge_left_extent_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ trace_xfs_refcount_merge_left_extent_error(cur, error, _RET_IP_);
return error;
}
@@ -470,40 +634,50 @@ xfs_refcount_merge_right_extent(
struct xfs_btree_cur *cur,
struct xfs_refcount_irec *right,
struct xfs_refcount_irec *cright,
- xfs_agblock_t *agbno,
xfs_extlen_t *aglen)
{
int error;
int found_rec;
- trace_xfs_refcount_merge_right_extent(cur->bc_mp,
- cur->bc_private.a.agno, cright, right);
+ trace_xfs_refcount_merge_right_extent(cur, cright, right);
+
+ ASSERT(right->rc_domain == cright->rc_domain);
/*
* If the extent ending at agbno+aglen (cright) wasn't synthesized,
* remove it.
*/
if (cright->rc_refcount > 1) {
- error = xfs_refcount_lookup_le(cur, cright->rc_startblock,
- &found_rec);
+ error = xfs_refcount_lookup_le(cur, cright->rc_domain,
+ cright->rc_startblock, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
/* Enlarge the right extent. */
- error = xfs_refcount_lookup_le(cur, right->rc_startblock,
- &found_rec);
+ error = xfs_refcount_lookup_le(cur, right->rc_domain,
+ right->rc_startblock, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
right->rc_startblock -= cright->rc_blockcount;
right->rc_blockcount += cright->rc_blockcount;
@@ -515,13 +689,10 @@ xfs_refcount_merge_right_extent(
return error;
out_error:
- trace_xfs_refcount_merge_right_extent_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ trace_xfs_refcount_merge_right_extent_error(cur, error, _RET_IP_);
return error;
}
-#define XFS_FIND_RCEXT_SHARED 1
-#define XFS_FIND_RCEXT_COW 2
/*
* Find the left extent and the one after it (cleft). This function assumes
* that we've already split any extent crossing agbno.
@@ -531,16 +702,16 @@ xfs_refcount_find_left_extents(
struct xfs_btree_cur *cur,
struct xfs_refcount_irec *left,
struct xfs_refcount_irec *cleft,
+ enum xfs_refc_domain domain,
xfs_agblock_t agbno,
- xfs_extlen_t aglen,
- int flags)
+ xfs_extlen_t aglen)
{
struct xfs_refcount_irec tmp;
int error;
int found_rec;
left->rc_startblock = cleft->rc_startblock = NULLAGBLOCK;
- error = xfs_refcount_lookup_le(cur, agbno - 1, &found_rec);
+ error = xfs_refcount_lookup_le(cur, domain, agbno - 1, &found_rec);
if (error)
goto out_error;
if (!found_rec)
@@ -549,13 +720,15 @@ xfs_refcount_find_left_extents(
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
- if (xfs_refc_next(&tmp) != agbno)
+ if (tmp.rc_domain != domain)
return 0;
- if ((flags & XFS_FIND_RCEXT_SHARED) && tmp.rc_refcount < 2)
- return 0;
- if ((flags & XFS_FIND_RCEXT_COW) && tmp.rc_refcount > 1)
+ if (xfs_refc_next(&tmp) != agbno)
return 0;
/* We have a left extent; retrieve (or invent) the next right one */
*left = tmp;
@@ -567,8 +740,14 @@ xfs_refcount_find_left_extents(
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+
+ if (tmp.rc_domain != domain)
+ goto not_found;
/* if tmp starts at the end of our range, just use that */
if (tmp.rc_startblock == agbno)
@@ -586,8 +765,10 @@ xfs_refcount_find_left_extents(
cleft->rc_blockcount = min(aglen,
tmp.rc_startblock - agbno);
cleft->rc_refcount = 1;
+ cleft->rc_domain = domain;
}
} else {
+not_found:
/*
* No extents, so pretend that there's one covering the whole
* range.
@@ -595,14 +776,13 @@ xfs_refcount_find_left_extents(
cleft->rc_startblock = agbno;
cleft->rc_blockcount = aglen;
cleft->rc_refcount = 1;
+ cleft->rc_domain = domain;
}
- trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_private.a.agno,
- left, cleft, agbno);
+ trace_xfs_refcount_find_left_extent(cur, left, cleft, agbno);
return error;
out_error:
- trace_xfs_refcount_find_left_extent_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ trace_xfs_refcount_find_left_extent_error(cur, error, _RET_IP_);
return error;
}
@@ -615,16 +795,16 @@ xfs_refcount_find_right_extents(
struct xfs_btree_cur *cur,
struct xfs_refcount_irec *right,
struct xfs_refcount_irec *cright,
+ enum xfs_refc_domain domain,
xfs_agblock_t agbno,
- xfs_extlen_t aglen,
- int flags)
+ xfs_extlen_t aglen)
{
struct xfs_refcount_irec tmp;
int error;
int found_rec;
right->rc_startblock = cright->rc_startblock = NULLAGBLOCK;
- error = xfs_refcount_lookup_ge(cur, agbno + aglen, &found_rec);
+ error = xfs_refcount_lookup_ge(cur, domain, agbno + aglen, &found_rec);
if (error)
goto out_error;
if (!found_rec)
@@ -633,13 +813,15 @@ xfs_refcount_find_right_extents(
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
- if (tmp.rc_startblock != agbno + aglen)
- return 0;
- if ((flags & XFS_FIND_RCEXT_SHARED) && tmp.rc_refcount < 2)
+ if (tmp.rc_domain != domain)
return 0;
- if ((flags & XFS_FIND_RCEXT_COW) && tmp.rc_refcount > 1)
+ if (tmp.rc_startblock != agbno + aglen)
return 0;
/* We have a right extent; retrieve (or invent) the next left one */
*right = tmp;
@@ -651,8 +833,14 @@ xfs_refcount_find_right_extents(
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+
+ if (tmp.rc_domain != domain)
+ goto not_found;
/* if tmp ends at the end of our range, just use that */
if (xfs_refc_next(&tmp) == agbno + aglen)
@@ -670,8 +858,10 @@ xfs_refcount_find_right_extents(
cright->rc_blockcount = right->rc_startblock -
cright->rc_startblock;
cright->rc_refcount = 1;
+ cright->rc_domain = domain;
}
} else {
+not_found:
/*
* No extents, so pretend that there's one covering the whole
* range.
@@ -679,35 +869,160 @@ xfs_refcount_find_right_extents(
cright->rc_startblock = agbno;
cright->rc_blockcount = aglen;
cright->rc_refcount = 1;
+ cright->rc_domain = domain;
}
- trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_private.a.agno,
- cright, right, agbno + aglen);
+ trace_xfs_refcount_find_right_extent(cur, cright, right,
+ agbno + aglen);
return error;
out_error:
- trace_xfs_refcount_find_right_extent_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ trace_xfs_refcount_find_right_extent_error(cur, error, _RET_IP_);
return error;
}
/* Is this extent valid? */
static inline bool
xfs_refc_valid(
- struct xfs_refcount_irec *rc)
+ const struct xfs_refcount_irec *rc)
{
return rc->rc_startblock != NULLAGBLOCK;
}
+static inline xfs_nlink_t
+xfs_refc_merge_refcount(
+ const struct xfs_refcount_irec *irec,
+ enum xfs_refc_adjust_op adjust)
+{
+ /* Once a record hits XFS_REFC_REFCOUNT_MAX, it is pinned forever */
+ if (irec->rc_refcount == XFS_REFC_REFCOUNT_MAX)
+ return XFS_REFC_REFCOUNT_MAX;
+ return irec->rc_refcount + adjust;
+}
+
+static inline bool
+xfs_refc_want_merge_center(
+ const struct xfs_refcount_irec *left,
+ const struct xfs_refcount_irec *cleft,
+ const struct xfs_refcount_irec *cright,
+ const struct xfs_refcount_irec *right,
+ bool cleft_is_cright,
+ enum xfs_refc_adjust_op adjust,
+ unsigned long long *ulenp)
+{
+ unsigned long long ulen = left->rc_blockcount;
+ xfs_nlink_t new_refcount;
+
+ /*
+ * To merge with a center record, both shoulder records must be
+ * adjacent to the record we want to adjust. This is only true if
+ * find_left and find_right made all four records valid.
+ */
+ if (!xfs_refc_valid(left) || !xfs_refc_valid(right) ||
+ !xfs_refc_valid(cleft) || !xfs_refc_valid(cright))
+ return false;
+
+ /* There must only be one record for the entire range. */
+ if (!cleft_is_cright)
+ return false;
+
+ /* The shoulder record refcounts must match the new refcount. */
+ new_refcount = xfs_refc_merge_refcount(cleft, adjust);
+ if (left->rc_refcount != new_refcount)
+ return false;
+ if (right->rc_refcount != new_refcount)
+ return false;
+
+ /*
+ * The new record cannot exceed the max length. ulen is a ULL as the
+ * individual record block counts can be up to (u32 - 1) in length
+ * hence we need to catch u32 addition overflows here.
+ */
+ ulen += cleft->rc_blockcount + right->rc_blockcount;
+ if (ulen >= XFS_REFC_LEN_MAX)
+ return false;
+
+ *ulenp = ulen;
+ return true;
+}
+
+static inline bool
+xfs_refc_want_merge_left(
+ const struct xfs_refcount_irec *left,
+ const struct xfs_refcount_irec *cleft,
+ enum xfs_refc_adjust_op adjust)
+{
+ unsigned long long ulen = left->rc_blockcount;
+ xfs_nlink_t new_refcount;
+
+ /*
+ * For a left merge, the left shoulder record must be adjacent to the
+ * start of the range. If this is true, find_left made left and cleft
+ * contain valid contents.
+ */
+ if (!xfs_refc_valid(left) || !xfs_refc_valid(cleft))
+ return false;
+
+ /* Left shoulder record refcount must match the new refcount. */
+ new_refcount = xfs_refc_merge_refcount(cleft, adjust);
+ if (left->rc_refcount != new_refcount)
+ return false;
+
+ /*
+ * The new record cannot exceed the max length. ulen is a ULL as the
+ * individual record block counts can be up to (u32 - 1) in length
+ * hence we need to catch u32 addition overflows here.
+ */
+ ulen += cleft->rc_blockcount;
+ if (ulen >= XFS_REFC_LEN_MAX)
+ return false;
+
+ return true;
+}
+
+static inline bool
+xfs_refc_want_merge_right(
+ const struct xfs_refcount_irec *cright,
+ const struct xfs_refcount_irec *right,
+ enum xfs_refc_adjust_op adjust)
+{
+ unsigned long long ulen = right->rc_blockcount;
+ xfs_nlink_t new_refcount;
+
+ /*
+ * For a right merge, the right shoulder record must be adjacent to the
+ * end of the range. If this is true, find_right made cright and right
+ * contain valid contents.
+ */
+ if (!xfs_refc_valid(right) || !xfs_refc_valid(cright))
+ return false;
+
+ /* Right shoulder record refcount must match the new refcount. */
+ new_refcount = xfs_refc_merge_refcount(cright, adjust);
+ if (right->rc_refcount != new_refcount)
+ return false;
+
+ /*
+ * The new record cannot exceed the max length. ulen is a ULL as the
+ * individual record block counts can be up to (u32 - 1) in length
+ * hence we need to catch u32 addition overflows here.
+ */
+ ulen += cright->rc_blockcount;
+ if (ulen >= XFS_REFC_LEN_MAX)
+ return false;
+
+ return true;
+}
+
/*
* Try to merge with any extents on the boundaries of the adjustment range.
*/
STATIC int
xfs_refcount_merge_extents(
struct xfs_btree_cur *cur,
+ enum xfs_refc_domain domain,
xfs_agblock_t *agbno,
xfs_extlen_t *aglen,
enum xfs_refc_adjust_op adjust,
- int flags,
bool *shape_changed)
{
struct xfs_refcount_irec left = {0}, cleft = {0};
@@ -722,12 +1037,12 @@ xfs_refcount_merge_extents(
* just below (agbno + aglen) [cright], and just above (agbno + aglen)
* [right].
*/
- error = xfs_refcount_find_left_extents(cur, &left, &cleft, *agbno,
- *aglen, flags);
+ error = xfs_refcount_find_left_extents(cur, &left, &cleft, domain,
+ *agbno, *aglen);
if (error)
return error;
- error = xfs_refcount_find_right_extents(cur, &right, &cright, *agbno,
- *aglen, flags);
+ error = xfs_refcount_find_right_extents(cur, &right, &cright, domain,
+ *agbno, *aglen);
if (error)
return error;
@@ -739,23 +1054,15 @@ xfs_refcount_merge_extents(
(cleft.rc_blockcount == cright.rc_blockcount);
/* Try to merge left, cleft, and right. cleft must == cright. */
- ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount +
- right.rc_blockcount;
- if (xfs_refc_valid(&left) && xfs_refc_valid(&right) &&
- xfs_refc_valid(&cleft) && xfs_refc_valid(&cright) && cequal &&
- left.rc_refcount == cleft.rc_refcount + adjust &&
- right.rc_refcount == cleft.rc_refcount + adjust &&
- ulen < MAXREFCEXTLEN) {
+ if (xfs_refc_want_merge_center(&left, &cleft, &cright, &right, cequal,
+ adjust, &ulen)) {
*shape_changed = true;
return xfs_refcount_merge_center_extents(cur, &left, &cleft,
- &right, ulen, agbno, aglen);
+ &right, ulen, aglen);
}
/* Try to merge left and cleft. */
- ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount;
- if (xfs_refc_valid(&left) && xfs_refc_valid(&cleft) &&
- left.rc_refcount == cleft.rc_refcount + adjust &&
- ulen < MAXREFCEXTLEN) {
+ if (xfs_refc_want_merge_left(&left, &cleft, adjust)) {
*shape_changed = true;
error = xfs_refcount_merge_left_extent(cur, &left, &cleft,
agbno, aglen);
@@ -771,16 +1078,13 @@ xfs_refcount_merge_extents(
}
/* Try to merge cright and right. */
- ulen = (unsigned long long)right.rc_blockcount + cright.rc_blockcount;
- if (xfs_refc_valid(&right) && xfs_refc_valid(&cright) &&
- right.rc_refcount == cright.rc_refcount + adjust &&
- ulen < MAXREFCEXTLEN) {
+ if (xfs_refc_want_merge_right(&cright, &right, adjust)) {
*shape_changed = true;
return xfs_refcount_merge_right_extent(cur, &right, &cright,
- agbno, aglen);
+ aglen);
}
- return error;
+ return 0;
}
/*
@@ -795,25 +1099,45 @@ xfs_refcount_still_have_space(
{
unsigned long overhead;
- overhead = cur->bc_private.a.priv.refc.shape_changes *
- xfs_allocfree_log_count(cur->bc_mp, 1);
+ /*
+ * Worst case estimate: full splits of the free space and rmap btrees
+ * to handle each of the shape changes to the refcount btree.
+ */
+ overhead = xfs_allocfree_block_count(cur->bc_mp,
+ cur->bc_refc.shape_changes);
+ overhead += cur->bc_maxlevels;
overhead *= cur->bc_mp->m_sb.sb_blocksize;
/*
* Only allow 2 refcount extent updates per transaction if the
* refcount continue update "error" has been injected.
*/
- if (cur->bc_private.a.priv.refc.nr_ops > 2 &&
- XFS_TEST_ERROR(false, cur->bc_mp,
- XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE))
+ if (cur->bc_refc.nr_ops > 2 &&
+ XFS_TEST_ERROR(cur->bc_mp, XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE))
return false;
- if (cur->bc_private.a.priv.refc.nr_ops == 0)
+ if (cur->bc_refc.nr_ops == 0)
return true;
else if (overhead > cur->bc_tp->t_log_res)
return false;
- return cur->bc_tp->t_log_res - overhead >
- cur->bc_private.a.priv.refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
+ return cur->bc_tp->t_log_res - overhead >
+ cur->bc_refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
+}
+
+/* Schedule an extent free. */
+static int
+xrefc_free_extent(
+ struct xfs_btree_cur *cur,
+ struct xfs_refcount_irec *rec)
+{
+ unsigned int flags = 0;
+
+ if (xfs_btree_is_rtrefcount(cur->bc_ops))
+ flags |= XFS_FREE_EXTENT_REALTIME;
+
+ return xfs_free_extent_later(cur->bc_tp,
+ xfs_gbno_to_fsb(cur->bc_group, rec->rc_startblock),
+ rec->rc_blockcount, NULL, XFS_AG_RESV_NONE, flags);
}
/*
@@ -827,20 +1151,18 @@ xfs_refcount_adjust_extents(
struct xfs_btree_cur *cur,
xfs_agblock_t *agbno,
xfs_extlen_t *aglen,
- enum xfs_refc_adjust_op adj,
- struct xfs_defer_ops *dfops,
- struct xfs_owner_info *oinfo)
+ enum xfs_refc_adjust_op adj)
{
struct xfs_refcount_irec ext, tmp;
int error;
int found_rec, found_tmp;
- xfs_fsblock_t fsbno;
/* Merging did all the work already. */
if (*aglen == 0)
return 0;
- error = xfs_refcount_lookup_ge(cur, *agbno, &found_rec);
+ error = xfs_refcount_lookup_ge(cur, XFS_REFC_DOMAIN_SHARED, *agbno,
+ &found_rec);
if (error)
goto out_error;
@@ -848,10 +1170,11 @@ xfs_refcount_adjust_extents(
error = xfs_refcount_get_rec(cur, &ext, &found_rec);
if (error)
goto out_error;
- if (!found_rec) {
- ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks;
+ if (!found_rec || ext.rc_domain != XFS_REFC_DOMAIN_SHARED) {
+ ext.rc_startblock = xfs_group_max_blocks(cur->bc_group);
ext.rc_blockcount = 0;
ext.rc_refcount = 0;
+ ext.rc_domain = XFS_REFC_DOMAIN_SHARED;
}
/*
@@ -864,70 +1187,88 @@ xfs_refcount_adjust_extents(
tmp.rc_blockcount = min(*aglen,
ext.rc_startblock - *agbno);
tmp.rc_refcount = 1 + adj;
- trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_private.a.agno, &tmp);
+ tmp.rc_domain = XFS_REFC_DOMAIN_SHARED;
+
+ trace_xfs_refcount_modify_extent(cur, &tmp);
/*
* Either cover the hole (increment) or
* delete the range (decrement).
*/
+ cur->bc_refc.nr_ops++;
if (tmp.rc_refcount) {
error = xfs_refcount_insert(cur, &tmp,
&found_tmp);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- found_tmp == 1, out_error);
- cur->bc_private.a.priv.refc.nr_ops++;
+ if (XFS_IS_CORRUPT(cur->bc_mp,
+ found_tmp != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
} else {
- fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
- cur->bc_private.a.agno,
- tmp.rc_startblock);
- xfs_bmap_add_free(cur->bc_mp, dfops, fsbno,
- tmp.rc_blockcount, oinfo);
+ error = xrefc_free_extent(cur, &tmp);
+ if (error)
+ goto out_error;
}
(*agbno) += tmp.rc_blockcount;
(*aglen) -= tmp.rc_blockcount;
- error = xfs_refcount_lookup_ge(cur, *agbno,
+ /* Stop if there's nothing left to modify */
+ if (*aglen == 0 || !xfs_refcount_still_have_space(cur))
+ break;
+
+ /* Move the cursor to the start of ext. */
+ error = xfs_refcount_lookup_ge(cur,
+ XFS_REFC_DOMAIN_SHARED, *agbno,
&found_rec);
if (error)
goto out_error;
}
- /* Stop if there's nothing left to modify */
- if (*aglen == 0 || !xfs_refcount_still_have_space(cur))
- break;
+ /*
+ * A previous step trimmed agbno/aglen such that the end of the
+ * range would not be in the middle of the record. If this is
+ * no longer the case, something is seriously wrong with the
+ * btree. Make sure we never feed the synthesized record into
+ * the processing loop below.
+ */
+ if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount == 0) ||
+ XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount > *aglen)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/*
* Adjust the reference count and either update the tree
* (incr) or free the blocks (decr).
*/
- if (ext.rc_refcount == MAXREFCOUNT)
+ if (ext.rc_refcount == XFS_REFC_REFCOUNT_MAX)
goto skip;
ext.rc_refcount += adj;
- trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_private.a.agno, &ext);
+ trace_xfs_refcount_modify_extent(cur, &ext);
+ cur->bc_refc.nr_ops++;
if (ext.rc_refcount > 1) {
error = xfs_refcount_update(cur, &ext);
if (error)
goto out_error;
- cur->bc_private.a.priv.refc.nr_ops++;
} else if (ext.rc_refcount == 1) {
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- found_rec == 1, out_error);
- cur->bc_private.a.priv.refc.nr_ops++;
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
goto advloop;
} else {
- fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
- cur->bc_private.a.agno,
- ext.rc_startblock);
- xfs_bmap_add_free(cur->bc_mp, dfops, fsbno,
- ext.rc_blockcount, oinfo);
+ error = xrefc_free_extent(cur, &ext);
+ if (error)
+ goto out_error;
}
skip:
@@ -942,8 +1283,7 @@ advloop:
return error;
out_error:
- trace_xfs_refcount_modify_extent_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ trace_xfs_refcount_modify_extent_error(cur, error, _RET_IP_);
return error;
}
@@ -951,37 +1291,31 @@ out_error:
STATIC int
xfs_refcount_adjust(
struct xfs_btree_cur *cur,
- xfs_agblock_t agbno,
- xfs_extlen_t aglen,
- xfs_agblock_t *new_agbno,
- xfs_extlen_t *new_aglen,
- enum xfs_refc_adjust_op adj,
- struct xfs_defer_ops *dfops,
- struct xfs_owner_info *oinfo)
+ xfs_agblock_t *agbno,
+ xfs_extlen_t *aglen,
+ enum xfs_refc_adjust_op adj)
{
bool shape_changed;
int shape_changes = 0;
int error;
- *new_agbno = agbno;
- *new_aglen = aglen;
if (adj == XFS_REFCOUNT_ADJUST_INCREASE)
- trace_xfs_refcount_increase(cur->bc_mp, cur->bc_private.a.agno,
- agbno, aglen);
+ trace_xfs_refcount_increase(cur, *agbno, *aglen);
else
- trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_private.a.agno,
- agbno, aglen);
+ trace_xfs_refcount_decrease(cur, *agbno, *aglen);
/*
* Ensure that no rcextents cross the boundary of the adjustment range.
*/
- error = xfs_refcount_split_extent(cur, agbno, &shape_changed);
+ error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_SHARED,
+ *agbno, &shape_changed);
if (error)
goto out_error;
if (shape_changed)
shape_changes++;
- error = xfs_refcount_split_extent(cur, agbno + aglen, &shape_changed);
+ error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_SHARED,
+ *agbno + *aglen, &shape_changed);
if (error)
goto out_error;
if (shape_changed)
@@ -990,44 +1324,52 @@ xfs_refcount_adjust(
/*
* Try to merge with the left or right extents of the range.
*/
- error = xfs_refcount_merge_extents(cur, new_agbno, new_aglen, adj,
- XFS_FIND_RCEXT_SHARED, &shape_changed);
+ error = xfs_refcount_merge_extents(cur, XFS_REFC_DOMAIN_SHARED,
+ agbno, aglen, adj, &shape_changed);
if (error)
goto out_error;
if (shape_changed)
shape_changes++;
if (shape_changes)
- cur->bc_private.a.priv.refc.shape_changes++;
+ cur->bc_refc.shape_changes++;
/* Now that we've taken care of the ends, adjust the middle extents */
- error = xfs_refcount_adjust_extents(cur, new_agbno, new_aglen,
- adj, dfops, oinfo);
+ error = xfs_refcount_adjust_extents(cur, agbno, aglen, adj);
if (error)
goto out_error;
return 0;
out_error:
- trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_private.a.agno,
- error, _RET_IP_);
+ trace_xfs_refcount_adjust_error(cur, error, _RET_IP_);
return error;
}
-/* Clean up after calling xfs_refcount_finish_one. */
-void
-xfs_refcount_finish_one_cleanup(
- struct xfs_trans *tp,
- struct xfs_btree_cur *rcur,
- int error)
+/*
+ * Set up a continuation a deferred refcount operation by updating the intent.
+ * Checks to make sure we're not going to run off the end of the AG.
+ */
+static inline int
+xfs_refcount_continue_op(
+ struct xfs_btree_cur *cur,
+ struct xfs_refcount_intent *ri,
+ xfs_agblock_t new_agbno)
{
- struct xfs_buf *agbp;
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_perag *pag = to_perag(cur->bc_group);
- if (rcur == NULL)
- return;
- agbp = rcur->bc_private.a.agbp;
- xfs_btree_del_cursor(rcur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
- if (error)
- xfs_trans_brelse(tp, agbp);
+ if (XFS_IS_CORRUPT(mp, !xfs_verify_agbext(pag, new_agbno,
+ ri->ri_blockcount))) {
+ xfs_btree_mark_sick(cur);
+ return -EFSCORRUPTED;
+ }
+
+ ri->ri_startblock = xfs_agbno_to_fsb(pag, new_agbno);
+
+ ASSERT(xfs_verify_fsbext(mp, ri->ri_startblock, ri->ri_blockcount));
+ ASSERT(pag_agno(pag) == XFS_FSB_TO_AGNO(mp, ri->ri_startblock));
+
+ return 0;
}
/*
@@ -1040,160 +1382,249 @@ xfs_refcount_finish_one_cleanup(
int
xfs_refcount_finish_one(
struct xfs_trans *tp,
- struct xfs_defer_ops *dfops,
- enum xfs_refcount_intent_type type,
- xfs_fsblock_t startblock,
- xfs_extlen_t blockcount,
- xfs_fsblock_t *new_fsb,
- xfs_extlen_t *new_len,
+ struct xfs_refcount_intent *ri,
struct xfs_btree_cur **pcur)
{
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_btree_cur *rcur;
+ struct xfs_btree_cur *rcur = *pcur;
struct xfs_buf *agbp = NULL;
int error = 0;
- xfs_agnumber_t agno;
xfs_agblock_t bno;
- xfs_agblock_t new_agbno;
unsigned long nr_ops = 0;
int shape_changes = 0;
- agno = XFS_FSB_TO_AGNO(mp, startblock);
- ASSERT(agno != NULLAGNUMBER);
- bno = XFS_FSB_TO_AGBNO(mp, startblock);
+ bno = XFS_FSB_TO_AGBNO(mp, ri->ri_startblock);
- trace_xfs_refcount_deferred(mp, XFS_FSB_TO_AGNO(mp, startblock),
- type, XFS_FSB_TO_AGBNO(mp, startblock),
- blockcount);
+ trace_xfs_refcount_deferred(mp, ri);
- if (XFS_TEST_ERROR(false, mp,
- XFS_ERRTAG_REFCOUNT_FINISH_ONE))
+ if (XFS_TEST_ERROR(mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE))
return -EIO;
/*
* If we haven't gotten a cursor or the cursor AG doesn't match
* the startblock, get one now.
*/
- rcur = *pcur;
- if (rcur != NULL && rcur->bc_private.a.agno != agno) {
- nr_ops = rcur->bc_private.a.priv.refc.nr_ops;
- shape_changes = rcur->bc_private.a.priv.refc.shape_changes;
- xfs_refcount_finish_one_cleanup(tp, rcur, 0);
+ if (rcur != NULL && rcur->bc_group != ri->ri_group) {
+ nr_ops = rcur->bc_refc.nr_ops;
+ shape_changes = rcur->bc_refc.shape_changes;
+ xfs_btree_del_cursor(rcur, 0);
rcur = NULL;
*pcur = NULL;
}
if (rcur == NULL) {
- error = xfs_alloc_read_agf(tp->t_mountp, tp, agno,
+ struct xfs_perag *pag = to_perag(ri->ri_group);
+
+ error = xfs_alloc_read_agf(pag, tp,
XFS_ALLOC_FLAG_FREEING, &agbp);
if (error)
return error;
- if (!agbp)
- return -EFSCORRUPTED;
- rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, dfops);
- if (!rcur) {
- error = -ENOMEM;
- goto out_cur;
- }
- rcur->bc_private.a.priv.refc.nr_ops = nr_ops;
- rcur->bc_private.a.priv.refc.shape_changes = shape_changes;
+ *pcur = rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
+ rcur->bc_refc.nr_ops = nr_ops;
+ rcur->bc_refc.shape_changes = shape_changes;
}
- *pcur = rcur;
- switch (type) {
+ switch (ri->ri_type) {
case XFS_REFCOUNT_INCREASE:
- error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
- new_len, XFS_REFCOUNT_ADJUST_INCREASE, dfops, NULL);
- *new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
+ error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount,
+ XFS_REFCOUNT_ADJUST_INCREASE);
+ if (error)
+ return error;
+ if (ri->ri_blockcount > 0)
+ error = xfs_refcount_continue_op(rcur, ri, bno);
break;
case XFS_REFCOUNT_DECREASE:
- error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
- new_len, XFS_REFCOUNT_ADJUST_DECREASE, dfops, NULL);
- *new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
+ error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount,
+ XFS_REFCOUNT_ADJUST_DECREASE);
+ if (error)
+ return error;
+ if (ri->ri_blockcount > 0)
+ error = xfs_refcount_continue_op(rcur, ri, bno);
break;
case XFS_REFCOUNT_ALLOC_COW:
- *new_fsb = startblock + blockcount;
- *new_len = 0;
- error = __xfs_refcount_cow_alloc(rcur, bno, blockcount, dfops);
+ error = __xfs_refcount_cow_alloc(rcur, bno, ri->ri_blockcount);
+ if (error)
+ return error;
+ ri->ri_blockcount = 0;
break;
case XFS_REFCOUNT_FREE_COW:
- *new_fsb = startblock + blockcount;
- *new_len = 0;
- error = __xfs_refcount_cow_free(rcur, bno, blockcount, dfops);
+ error = __xfs_refcount_cow_free(rcur, bno, ri->ri_blockcount);
+ if (error)
+ return error;
+ ri->ri_blockcount = 0;
break;
default:
ASSERT(0);
- error = -EFSCORRUPTED;
+ return -EFSCORRUPTED;
}
- if (!error && *new_len > 0)
- trace_xfs_refcount_finish_one_leftover(mp, agno, type,
- bno, blockcount, new_agbno, *new_len);
+ if (!error && ri->ri_blockcount > 0)
+ trace_xfs_refcount_finish_one_leftover(mp, ri);
return error;
+}
+
+/*
+ * Set up a continuation a deferred rtrefcount operation by updating the
+ * intent. Checks to make sure we're not going to run off the end of the
+ * rtgroup.
+ */
+static inline int
+xfs_rtrefcount_continue_op(
+ struct xfs_btree_cur *cur,
+ struct xfs_refcount_intent *ri,
+ xfs_agblock_t new_agbno)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_rtgroup *rtg = to_rtg(ri->ri_group);
+
+ if (XFS_IS_CORRUPT(mp, !xfs_verify_rgbext(rtg, new_agbno,
+ ri->ri_blockcount))) {
+ xfs_btree_mark_sick(cur);
+ return -EFSCORRUPTED;
+ }
+
+ ri->ri_startblock = xfs_rgbno_to_rtb(rtg, new_agbno);
+
+ ASSERT(xfs_verify_rtbext(mp, ri->ri_startblock, ri->ri_blockcount));
+ return 0;
+}
+
+/*
+ * Process one of the deferred realtime refcount operations. We pass back the
+ * btree cursor to maintain our lock on the btree between calls.
+ */
+int
+xfs_rtrefcount_finish_one(
+ struct xfs_trans *tp,
+ struct xfs_refcount_intent *ri,
+ struct xfs_btree_cur **pcur)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_rtgroup *rtg = to_rtg(ri->ri_group);
+ struct xfs_btree_cur *rcur = *pcur;
+ int error = 0;
+ xfs_rgblock_t bno;
+ unsigned long nr_ops = 0;
+ int shape_changes = 0;
+
+ bno = xfs_rtb_to_rgbno(mp, ri->ri_startblock);
+
+ trace_xfs_refcount_deferred(mp, ri);
+
+ if (XFS_TEST_ERROR(mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE))
+ return -EIO;
+
+ /*
+ * If we haven't gotten a cursor or the cursor AG doesn't match
+ * the startblock, get one now.
+ */
+ if (rcur != NULL && rcur->bc_group != ri->ri_group) {
+ nr_ops = rcur->bc_refc.nr_ops;
+ shape_changes = rcur->bc_refc.shape_changes;
+ xfs_btree_del_cursor(rcur, 0);
+ rcur = NULL;
+ *pcur = NULL;
+ }
+ if (rcur == NULL) {
+ xfs_rtgroup_lock(rtg, XFS_RTGLOCK_REFCOUNT);
+ xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_REFCOUNT);
+ *pcur = rcur = xfs_rtrefcountbt_init_cursor(tp, rtg);
-out_cur:
- xfs_trans_brelse(tp, agbp);
+ rcur->bc_refc.nr_ops = nr_ops;
+ rcur->bc_refc.shape_changes = shape_changes;
+ }
+ switch (ri->ri_type) {
+ case XFS_REFCOUNT_INCREASE:
+ error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount,
+ XFS_REFCOUNT_ADJUST_INCREASE);
+ if (error)
+ return error;
+ if (ri->ri_blockcount > 0)
+ error = xfs_rtrefcount_continue_op(rcur, ri, bno);
+ break;
+ case XFS_REFCOUNT_DECREASE:
+ error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount,
+ XFS_REFCOUNT_ADJUST_DECREASE);
+ if (error)
+ return error;
+ if (ri->ri_blockcount > 0)
+ error = xfs_rtrefcount_continue_op(rcur, ri, bno);
+ break;
+ case XFS_REFCOUNT_ALLOC_COW:
+ error = __xfs_refcount_cow_alloc(rcur, bno, ri->ri_blockcount);
+ if (error)
+ return error;
+ ri->ri_blockcount = 0;
+ break;
+ case XFS_REFCOUNT_FREE_COW:
+ error = __xfs_refcount_cow_free(rcur, bno, ri->ri_blockcount);
+ if (error)
+ return error;
+ ri->ri_blockcount = 0;
+ break;
+ default:
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
+ if (!error && ri->ri_blockcount > 0)
+ trace_xfs_refcount_finish_one_leftover(mp, ri);
return error;
}
/*
* Record a refcount intent for later processing.
*/
-static int
+static void
__xfs_refcount_add(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
enum xfs_refcount_intent_type type,
+ bool isrt,
xfs_fsblock_t startblock,
xfs_extlen_t blockcount)
{
struct xfs_refcount_intent *ri;
- trace_xfs_refcount_defer(mp, XFS_FSB_TO_AGNO(mp, startblock),
- type, XFS_FSB_TO_AGBNO(mp, startblock),
- blockcount);
-
- ri = kmem_alloc(sizeof(struct xfs_refcount_intent),
- KM_SLEEP | KM_NOFS);
+ ri = kmem_cache_alloc(xfs_refcount_intent_cache,
+ GFP_KERNEL | __GFP_NOFAIL);
INIT_LIST_HEAD(&ri->ri_list);
ri->ri_type = type;
ri->ri_startblock = startblock;
ri->ri_blockcount = blockcount;
+ ri->ri_realtime = isrt;
- xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_REFCOUNT, &ri->ri_list);
- return 0;
+ xfs_refcount_defer_add(tp, ri);
}
/*
* Increase the reference count of the blocks backing a file's extent.
*/
-int
+void
xfs_refcount_increase_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
+ bool isrt,
struct xfs_bmbt_irec *PREV)
{
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
- return 0;
+ if (!xfs_has_reflink(tp->t_mountp))
+ return;
- return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_INCREASE,
- PREV->br_startblock, PREV->br_blockcount);
+ __xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE, isrt, PREV->br_startblock,
+ PREV->br_blockcount);
}
/*
* Decrease the reference count of the blocks backing a file's extent.
*/
-int
+void
xfs_refcount_decrease_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
+ bool isrt,
struct xfs_bmbt_irec *PREV)
{
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
- return 0;
+ if (!xfs_has_reflink(tp->t_mountp))
+ return;
- return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_DECREASE,
- PREV->br_startblock, PREV->br_blockcount);
+ __xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE, isrt, PREV->br_startblock,
+ PREV->br_blockcount);
}
/*
@@ -1218,15 +1649,15 @@ xfs_refcount_find_shared(
int have;
int error;
- trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_private.a.agno,
- agbno, aglen);
+ trace_xfs_refcount_find_shared(cur, agbno, aglen);
/* By default, skip the whole range */
*fbno = NULLAGBLOCK;
*flen = 0;
/* Try to find a refcount extent that crosses the start */
- error = xfs_refcount_lookup_le(cur, agbno, &have);
+ error = xfs_refcount_lookup_le(cur, XFS_REFC_DOMAIN_SHARED, agbno,
+ &have);
if (error)
goto out_error;
if (!have) {
@@ -1240,7 +1671,13 @@ xfs_refcount_find_shared(
error = xfs_refcount_get_rec(cur, &tmp, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ if (tmp.rc_domain != XFS_REFC_DOMAIN_SHARED)
+ goto done;
/* If the extent ends before the start, look at the next one */
if (tmp.rc_startblock + tmp.rc_blockcount <= agbno) {
@@ -1252,7 +1689,13 @@ xfs_refcount_find_shared(
error = xfs_refcount_get_rec(cur, &tmp, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ if (tmp.rc_domain != XFS_REFC_DOMAIN_SHARED)
+ goto done;
}
/* If the extent starts after the range we want, bail out */
@@ -1280,21 +1723,24 @@ xfs_refcount_find_shared(
error = xfs_refcount_get_rec(cur, &tmp, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
- if (tmp.rc_startblock >= agbno + aglen ||
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ if (tmp.rc_domain != XFS_REFC_DOMAIN_SHARED ||
+ tmp.rc_startblock >= agbno + aglen ||
tmp.rc_startblock != *fbno + *flen)
break;
*flen = min(*flen + tmp.rc_blockcount, agbno + aglen - *fbno);
}
done:
- trace_xfs_refcount_find_shared_result(cur->bc_mp,
- cur->bc_private.a.agno, *fbno, *flen);
+ trace_xfs_refcount_find_shared_result(cur, *fbno, *flen);
out_error:
if (error)
- trace_xfs_refcount_find_shared_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ trace_xfs_refcount_find_shared_error(cur, error, _RET_IP_);
return error;
}
@@ -1355,9 +1801,7 @@ xfs_refcount_adjust_cow_extents(
struct xfs_btree_cur *cur,
xfs_agblock_t agbno,
xfs_extlen_t aglen,
- enum xfs_refc_adjust_op adj,
- struct xfs_defer_ops *dfops,
- struct xfs_owner_info *oinfo)
+ enum xfs_refc_adjust_op adj)
{
struct xfs_refcount_irec ext, tmp;
int error;
@@ -1367,55 +1811,81 @@ xfs_refcount_adjust_cow_extents(
return 0;
/* Find any overlapping refcount records */
- error = xfs_refcount_lookup_ge(cur, agbno, &found_rec);
+ error = xfs_refcount_lookup_ge(cur, XFS_REFC_DOMAIN_COW, agbno,
+ &found_rec);
if (error)
goto out_error;
error = xfs_refcount_get_rec(cur, &ext, &found_rec);
if (error)
goto out_error;
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec &&
+ ext.rc_domain != XFS_REFC_DOMAIN_COW)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (!found_rec) {
- ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks +
- XFS_REFC_COW_START;
+ ext.rc_startblock = xfs_group_max_blocks(cur->bc_group);
ext.rc_blockcount = 0;
ext.rc_refcount = 0;
+ ext.rc_domain = XFS_REFC_DOMAIN_COW;
}
switch (adj) {
case XFS_REFCOUNT_ADJUST_COW_ALLOC:
/* Adding a CoW reservation, there should be nothing here. */
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- ext.rc_startblock >= agbno + aglen, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp,
+ agbno + aglen > ext.rc_startblock)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
tmp.rc_startblock = agbno;
tmp.rc_blockcount = aglen;
tmp.rc_refcount = 1;
- trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_private.a.agno, &tmp);
+ tmp.rc_domain = XFS_REFC_DOMAIN_COW;
+
+ trace_xfs_refcount_modify_extent(cur, &tmp);
error = xfs_refcount_insert(cur, &tmp,
&found_tmp);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- found_tmp == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_tmp != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
break;
case XFS_REFCOUNT_ADJUST_COW_FREE:
/* Removing a CoW reservation, there should be one extent. */
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- ext.rc_startblock == agbno, out_error);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- ext.rc_blockcount == aglen, out_error);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- ext.rc_refcount == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_startblock != agbno)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount != aglen)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_refcount != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
ext.rc_refcount = 0;
- trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_private.a.agno, &ext);
+ trace_xfs_refcount_modify_extent(cur, &ext);
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ xfs_btree_mark_sick(cur);
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
break;
default:
ASSERT(0);
@@ -1423,8 +1893,7 @@ xfs_refcount_adjust_cow_extents(
return error;
out_error:
- trace_xfs_refcount_modify_extent_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ trace_xfs_refcount_modify_extent_error(cur, error, _RET_IP_);
return error;
}
@@ -1436,44 +1905,41 @@ xfs_refcount_adjust_cow(
struct xfs_btree_cur *cur,
xfs_agblock_t agbno,
xfs_extlen_t aglen,
- enum xfs_refc_adjust_op adj,
- struct xfs_defer_ops *dfops)
+ enum xfs_refc_adjust_op adj)
{
bool shape_changed;
int error;
- agbno += XFS_REFC_COW_START;
-
/*
* Ensure that no rcextents cross the boundary of the adjustment range.
*/
- error = xfs_refcount_split_extent(cur, agbno, &shape_changed);
+ error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_COW,
+ agbno, &shape_changed);
if (error)
goto out_error;
- error = xfs_refcount_split_extent(cur, agbno + aglen, &shape_changed);
+ error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_COW,
+ agbno + aglen, &shape_changed);
if (error)
goto out_error;
/*
* Try to merge with the left or right extents of the range.
*/
- error = xfs_refcount_merge_extents(cur, &agbno, &aglen, adj,
- XFS_FIND_RCEXT_COW, &shape_changed);
+ error = xfs_refcount_merge_extents(cur, XFS_REFC_DOMAIN_COW, &agbno,
+ &aglen, adj, &shape_changed);
if (error)
goto out_error;
/* Now that we've taken care of the ends, adjust the middle extents */
- error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj,
- dfops, NULL);
+ error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj);
if (error)
goto out_error;
return 0;
out_error:
- trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_private.a.agno,
- error, _RET_IP_);
+ trace_xfs_refcount_adjust_cow_error(cur, error, _RET_IP_);
return error;
}
@@ -1484,30 +1950,13 @@ STATIC int
__xfs_refcount_cow_alloc(
struct xfs_btree_cur *rcur,
xfs_agblock_t agbno,
- xfs_extlen_t aglen,
- struct xfs_defer_ops *dfops)
+ xfs_extlen_t aglen)
{
- int error;
-
- trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno,
- agbno, aglen);
+ trace_xfs_refcount_cow_increase(rcur, agbno, aglen);
/* Add refcount btree reservation */
- error = xfs_refcount_adjust_cow(rcur, agbno, aglen,
- XFS_REFCOUNT_ADJUST_COW_ALLOC, dfops);
- if (error)
- return error;
-
- /* Add rmap entry */
- if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
- error = xfs_rmap_alloc_extent(rcur->bc_mp, dfops,
- rcur->bc_private.a.agno,
- agbno, aglen, XFS_RMAP_OWN_COW);
- if (error)
- return error;
- }
-
- return error;
+ return xfs_refcount_adjust_cow(rcur, agbno, aglen,
+ XFS_REFCOUNT_ADJUST_COW_ALLOC);
}
/*
@@ -1517,60 +1966,50 @@ STATIC int
__xfs_refcount_cow_free(
struct xfs_btree_cur *rcur,
xfs_agblock_t agbno,
- xfs_extlen_t aglen,
- struct xfs_defer_ops *dfops)
+ xfs_extlen_t aglen)
{
- int error;
-
- trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno,
- agbno, aglen);
+ trace_xfs_refcount_cow_decrease(rcur, agbno, aglen);
/* Remove refcount btree reservation */
- error = xfs_refcount_adjust_cow(rcur, agbno, aglen,
- XFS_REFCOUNT_ADJUST_COW_FREE, dfops);
- if (error)
- return error;
-
- /* Remove rmap entry */
- if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
- error = xfs_rmap_free_extent(rcur->bc_mp, dfops,
- rcur->bc_private.a.agno,
- agbno, aglen, XFS_RMAP_OWN_COW);
- if (error)
- return error;
- }
-
- return error;
+ return xfs_refcount_adjust_cow(rcur, agbno, aglen,
+ XFS_REFCOUNT_ADJUST_COW_FREE);
}
/* Record a CoW staging extent in the refcount btree. */
-int
+void
xfs_refcount_alloc_cow_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
+ bool isrt,
xfs_fsblock_t fsb,
xfs_extlen_t len)
{
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
- return 0;
+ struct xfs_mount *mp = tp->t_mountp;
+
+ if (!xfs_has_reflink(mp))
+ return;
- return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
- fsb, len);
+ __xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, isrt, fsb, len);
+
+ /* Add rmap entry */
+ xfs_rmap_alloc_extent(tp, isrt, fsb, len, XFS_RMAP_OWN_COW);
}
/* Forget a CoW staging event in the refcount btree. */
-int
+void
xfs_refcount_free_cow_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
+ bool isrt,
xfs_fsblock_t fsb,
xfs_extlen_t len)
{
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
- return 0;
+ struct xfs_mount *mp = tp->t_mountp;
- return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_FREE_COW,
- fsb, len);
+ if (!xfs_has_reflink(mp))
+ return;
+
+ /* Remove rmap entry */
+ xfs_rmap_free_extent(tp, isrt, fsb, len, XFS_RMAP_OWN_COW);
+ __xfs_refcount_add(tp, XFS_REFCOUNT_FREE_COW, isrt, fsb, len);
}
struct xfs_refcount_recovery {
@@ -1582,42 +2021,70 @@ struct xfs_refcount_recovery {
STATIC int
xfs_refcount_recover_extent(
struct xfs_btree_cur *cur,
- union xfs_btree_rec *rec,
+ const union xfs_btree_rec *rec,
void *priv)
{
struct list_head *debris = priv;
struct xfs_refcount_recovery *rr;
- if (be32_to_cpu(rec->refc.rc_refcount) != 1)
+ if (XFS_IS_CORRUPT(cur->bc_mp,
+ be32_to_cpu(rec->refc.rc_refcount) != 1)) {
+ xfs_btree_mark_sick(cur);
return -EFSCORRUPTED;
+ }
- rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), KM_SLEEP);
+ rr = kmalloc(sizeof(struct xfs_refcount_recovery),
+ GFP_KERNEL | __GFP_NOFAIL);
+ INIT_LIST_HEAD(&rr->rr_list);
xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
- list_add_tail(&rr->rr_list, debris);
+ if (xfs_refcount_check_btrec(cur, &rr->rr_rrec) != NULL ||
+ XFS_IS_CORRUPT(cur->bc_mp,
+ rr->rr_rrec.rc_domain != XFS_REFC_DOMAIN_COW)) {
+ xfs_btree_mark_sick(cur);
+ kfree(rr);
+ return -EFSCORRUPTED;
+ }
+
+ list_add_tail(&rr->rr_list, debris);
return 0;
}
/* Find and remove leftover CoW reservations. */
int
xfs_refcount_recover_cow_leftovers(
- struct xfs_mount *mp,
- xfs_agnumber_t agno)
+ struct xfs_group *xg)
{
+ struct xfs_mount *mp = xg->xg_mount;
+ bool isrt = xg->xg_type == XG_TYPE_RTG;
struct xfs_trans *tp;
struct xfs_btree_cur *cur;
- struct xfs_buf *agbp;
+ struct xfs_buf *agbp = NULL;
struct xfs_refcount_recovery *rr, *n;
struct list_head debris;
- union xfs_btree_irec low;
- union xfs_btree_irec high;
- struct xfs_defer_ops dfops;
+ union xfs_btree_irec low = {
+ .rc.rc_domain = XFS_REFC_DOMAIN_COW,
+ };
+ union xfs_btree_irec high = {
+ .rc.rc_domain = XFS_REFC_DOMAIN_COW,
+ .rc.rc_startblock = -1U,
+ };
xfs_fsblock_t fsb;
- xfs_agblock_t agbno;
int error;
- if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START)
- return -EOPNOTSUPP;
+ /* reflink filesystems must not have groups larger than 2^31-1 blocks */
+ BUILD_BUG_ON(XFS_MAX_RGBLOCKS >= XFS_REFC_COWFLAG);
+ BUILD_BUG_ON(XFS_MAX_CRC_AG_BLOCKS >= XFS_REFC_COWFLAG);
+
+ if (isrt) {
+ if (!xfs_has_rtgroups(mp))
+ return 0;
+ if (xfs_group_max_blocks(xg) >= XFS_MAX_RGBLOCKS)
+ return -EOPNOTSUPP;
+ } else {
+ if (xfs_group_max_blocks(xg) > XFS_MAX_CRC_AG_BLOCKS)
+ return -EOPNOTSUPP;
+ }
INIT_LIST_HEAD(&debris);
@@ -1631,31 +2098,29 @@ xfs_refcount_recover_cow_leftovers(
* recording the CoW debris we cancel the (empty) transaction
* and everything goes away cleanly.
*/
- error = xfs_trans_alloc_empty(mp, &tp);
- if (error)
- return error;
+ tp = xfs_trans_alloc_empty(mp);
- error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
- if (error)
- goto out_trans;
- if (!agbp) {
- error = -ENOMEM;
- goto out_trans;
+ if (isrt) {
+ xfs_rtgroup_lock(to_rtg(xg), XFS_RTGLOCK_REFCOUNT);
+ cur = xfs_rtrefcountbt_init_cursor(tp, to_rtg(xg));
+ } else {
+ error = xfs_alloc_read_agf(to_perag(xg), tp, 0, &agbp);
+ if (error)
+ goto out_trans;
+ cur = xfs_refcountbt_init_cursor(mp, tp, agbp, to_perag(xg));
}
- cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
/* Find all the leftover CoW staging extents. */
- memset(&low, 0, sizeof(low));
- memset(&high, 0, sizeof(high));
- low.rc.rc_startblock = XFS_REFC_COW_START;
- high.rc.rc_startblock = -1U;
error = xfs_btree_query_range(cur, &low, &high,
xfs_refcount_recover_extent, &debris);
- if (error)
- goto out_cursor;
- xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
- xfs_trans_brelse(tp, agbp);
+ xfs_btree_del_cursor(cur, error);
+ if (agbp)
+ xfs_trans_brelse(tp, agbp);
+ else
+ xfs_rtgroup_unlock(to_rtg(xg), XFS_RTGLOCK_REFCOUNT);
xfs_trans_cancel(tp);
+ if (error)
+ goto out_free;
/* Now iterate the list to free the leftovers */
list_for_each_entry_safe(rr, n, &debris, rr_list) {
@@ -1664,48 +2129,117 @@ xfs_refcount_recover_cow_leftovers(
if (error)
goto out_free;
- trace_xfs_refcount_recover_extent(mp, agno, &rr->rr_rrec);
-
/* Free the orphan record */
- xfs_defer_init(&dfops, &fsb);
- agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START;
- fsb = XFS_AGB_TO_FSB(mp, agno, agbno);
- error = xfs_refcount_free_cow_extent(mp, &dfops, fsb,
+ fsb = xfs_gbno_to_fsb(xg, rr->rr_rrec.rc_startblock);
+ xfs_refcount_free_cow_extent(tp, isrt, fsb,
rr->rr_rrec.rc_blockcount);
- if (error)
- goto out_defer;
/* Free the block. */
- xfs_bmap_add_free(mp, &dfops, fsb,
- rr->rr_rrec.rc_blockcount, NULL);
-
- error = xfs_defer_finish(&tp, &dfops, NULL);
+ error = xfs_free_extent_later(tp, fsb,
+ rr->rr_rrec.rc_blockcount, NULL,
+ XFS_AG_RESV_NONE,
+ isrt ? XFS_FREE_EXTENT_REALTIME : 0);
if (error)
- goto out_defer;
+ goto out_trans;
error = xfs_trans_commit(tp);
if (error)
goto out_free;
list_del(&rr->rr_list);
- kmem_free(rr);
+ kfree(rr);
}
return error;
-out_defer:
- xfs_defer_cancel(&dfops);
out_trans:
xfs_trans_cancel(tp);
out_free:
/* Free the leftover list */
list_for_each_entry_safe(rr, n, &debris, rr_list) {
list_del(&rr->rr_list);
- kmem_free(rr);
+ kfree(rr);
}
return error;
+}
+
+/*
+ * Scan part of the keyspace of the refcount records and tell us if the area
+ * has no records, is fully mapped by records, or is partially filled.
+ */
+int
+xfs_refcount_has_records(
+ struct xfs_btree_cur *cur,
+ enum xfs_refc_domain domain,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ enum xbtree_recpacking *outcome)
+{
+ union xfs_btree_irec low;
+ union xfs_btree_irec high;
-out_cursor:
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
- xfs_trans_brelse(tp, agbp);
- goto out_trans;
+ memset(&low, 0, sizeof(low));
+ low.rc.rc_startblock = bno;
+ memset(&high, 0xFF, sizeof(high));
+ high.rc.rc_startblock = bno + len - 1;
+ low.rc.rc_domain = high.rc.rc_domain = domain;
+
+ return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
+}
+
+struct xfs_refcount_query_range_info {
+ xfs_refcount_query_range_fn fn;
+ void *priv;
+};
+
+/* Format btree record and pass to our callback. */
+STATIC int
+xfs_refcount_query_range_helper(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_rec *rec,
+ void *priv)
+{
+ struct xfs_refcount_query_range_info *query = priv;
+ struct xfs_refcount_irec irec;
+ xfs_failaddr_t fa;
+
+ xfs_refcount_btrec_to_irec(rec, &irec);
+ fa = xfs_refcount_check_btrec(cur, &irec);
+ if (fa)
+ return xfs_refcount_complain_bad_rec(cur, fa, &irec);
+
+ return query->fn(cur, &irec, query->priv);
+}
+
+/* Find all refcount records between two keys. */
+int
+xfs_refcount_query_range(
+ struct xfs_btree_cur *cur,
+ const struct xfs_refcount_irec *low_rec,
+ const struct xfs_refcount_irec *high_rec,
+ xfs_refcount_query_range_fn fn,
+ void *priv)
+{
+ union xfs_btree_irec low_brec = { .rc = *low_rec };
+ union xfs_btree_irec high_brec = { .rc = *high_rec };
+ struct xfs_refcount_query_range_info query = { .priv = priv, .fn = fn };
+
+ return xfs_btree_query_range(cur, &low_brec, &high_brec,
+ xfs_refcount_query_range_helper, &query);
+}
+
+int __init
+xfs_refcount_intent_init_cache(void)
+{
+ xfs_refcount_intent_cache = kmem_cache_create("xfs_refc_intent",
+ sizeof(struct xfs_refcount_intent),
+ 0, 0, NULL);
+
+ return xfs_refcount_intent_cache != NULL ? 0 : -ENOMEM;
+}
+
+void
+xfs_refcount_intent_destroy_cache(void)
+{
+ kmem_cache_destroy(xfs_refcount_intent_cache);
+ xfs_refcount_intent_cache = NULL;
}