diff options
Diffstat (limited to 'fs/xfs/libxfs/xfs_refcount_btree.c')
| -rw-r--r-- | fs/xfs/libxfs/xfs_refcount_btree.c | 164 |
1 files changed, 81 insertions, 83 deletions
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c index e1f789866683..06da3ca14727 100644 --- a/fs/xfs/libxfs/xfs_refcount_btree.c +++ b/fs/xfs/libxfs/xfs_refcount_btree.c @@ -16,6 +16,7 @@ #include "xfs_refcount.h" #include "xfs_alloc.h" #include "xfs_error.h" +#include "xfs_health.h" #include "xfs_trace.h" #include "xfs_trans.h" #include "xfs_bit.h" @@ -29,7 +30,7 @@ xfs_refcountbt_dup_cursor( struct xfs_btree_cur *cur) { return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp, - cur->bc_ag.agbp, cur->bc_ag.pag); + cur->bc_ag.agbp, to_perag(cur->bc_group)); } STATIC void @@ -67,23 +68,20 @@ xfs_refcountbt_alloc_block( memset(&args, 0, sizeof(args)); args.tp = cur->bc_tp; args.mp = cur->bc_mp; - args.type = XFS_ALLOCTYPE_NEAR_BNO; - args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno, - xfs_refc_block(args.mp)); + args.pag = to_perag(cur->bc_group); args.oinfo = XFS_RMAP_OINFO_REFC; args.minlen = args.maxlen = args.prod = 1; args.resv = XFS_AG_RESV_METADATA; - error = xfs_alloc_vextent(&args); + error = xfs_alloc_vextent_near_bno(&args, + xfs_agbno_to_fsb(args.pag, xfs_refc_block(args.mp))); if (error) goto out_error; - trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.pag->pag_agno, - args.agbno, 1); if (args.fsbno == NULLFSBLOCK) { *stat = 0; return 0; } - ASSERT(args.agno == cur->bc_ag.pag->pag_agno); + ASSERT(args.agno == cur->bc_group->xg_gno); ASSERT(args.len == 1); new->s = cpu_to_be32(args.agbno); @@ -106,18 +104,11 @@ xfs_refcountbt_free_block( struct xfs_buf *agbp = cur->bc_ag.agbp; struct xfs_agf *agf = agbp->b_addr; xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp)); - int error; - trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.pag->pag_agno, - XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1); be32_add_cpu(&agf->agf_refcount_blocks, -1); xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS); - error = xfs_free_extent(cur->bc_tp, fsbno, 1, &XFS_RMAP_OINFO_REFC, - XFS_AG_RESV_METADATA); - if (error) - return error; - - return error; + return xfs_free_extent_later(cur->bc_tp, fsbno, 1, + &XFS_RMAP_OINFO_REFC, XFS_AG_RESV_METADATA, 0); } STATIC int @@ -178,13 +169,13 @@ xfs_refcountbt_init_ptr_from_cur( { struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; - ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno)); + ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agf->agf_seqno)); ptr->s = agf->agf_refcount_root; } -STATIC int64_t -xfs_refcountbt_key_diff( +STATIC int +xfs_refcountbt_cmp_key_with_cur( struct xfs_btree_cur *cur, const union xfs_btree_key *key) { @@ -194,17 +185,20 @@ xfs_refcountbt_key_diff( start = xfs_refcount_encode_startblock(irec->rc_startblock, irec->rc_domain); - return (int64_t)be32_to_cpu(kp->rc_startblock) - start; + return cmp_int(be32_to_cpu(kp->rc_startblock), start); } -STATIC int64_t -xfs_refcountbt_diff_two_keys( +STATIC int +xfs_refcountbt_cmp_two_keys( struct xfs_btree_cur *cur, const union xfs_btree_key *k1, - const union xfs_btree_key *k2) + const union xfs_btree_key *k2, + const union xfs_btree_key *mask) { - return (int64_t)be32_to_cpu(k1->refc.rc_startblock) - - be32_to_cpu(k2->refc.rc_startblock); + ASSERT(!mask || mask->refc.rc_startblock); + + return cmp_int(be32_to_cpu(k1->refc.rc_startblock), + be32_to_cpu(k2->refc.rc_startblock)); } STATIC xfs_failaddr_t @@ -222,18 +216,29 @@ xfs_refcountbt_verify( if (!xfs_has_reflink(mp)) return __this_address; - fa = xfs_btree_sblock_v5hdr_verify(bp); + fa = xfs_btree_agblock_v5hdr_verify(bp); if (fa) return fa; level = be16_to_cpu(block->bb_level); - if (pag && pag->pagf_init) { - if (level >= pag->pagf_refcount_level) + if (pag && xfs_perag_initialised_agf(pag)) { + unsigned int maxlevel = pag->pagf_refcount_level; + +#ifdef CONFIG_XFS_ONLINE_REPAIR + /* + * Online repair could be rewriting the refcount btree, so + * we'll validate against the larger of either tree while this + * is going on. + */ + maxlevel = max_t(unsigned int, maxlevel, + pag->pagf_repair_refcount_level); +#endif + if (level >= maxlevel) return __this_address; } else if (level >= mp->m_refc_maxlevels) return __this_address; - return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]); + return xfs_btree_agblock_verify(bp, mp->m_refc_mxr[level != 0]); } STATIC void @@ -242,7 +247,7 @@ xfs_refcountbt_read_verify( { xfs_failaddr_t fa; - if (!xfs_btree_sblock_verify_crc(bp)) + if (!xfs_btree_agblock_verify_crc(bp)) xfs_verifier_error(bp, -EFSBADCRC, __this_address); else { fa = xfs_refcountbt_verify(bp); @@ -266,7 +271,7 @@ xfs_refcountbt_write_verify( xfs_verifier_error(bp, -EFSCORRUPTED, fa); return; } - xfs_btree_sblock_calc_crc(bp); + xfs_btree_agblock_calc_crc(bp); } @@ -299,9 +304,30 @@ xfs_refcountbt_recs_inorder( be32_to_cpu(r2->refc.rc_startblock); } -static const struct xfs_btree_ops xfs_refcountbt_ops = { +STATIC enum xbtree_key_contig +xfs_refcountbt_keys_contiguous( + struct xfs_btree_cur *cur, + const union xfs_btree_key *key1, + const union xfs_btree_key *key2, + const union xfs_btree_key *mask) +{ + ASSERT(!mask || mask->refc.rc_startblock); + + return xbtree_key_contig(be32_to_cpu(key1->refc.rc_startblock), + be32_to_cpu(key2->refc.rc_startblock)); +} + +const struct xfs_btree_ops xfs_refcountbt_ops = { + .name = "refcount", + .type = XFS_BTREE_TYPE_AG, + .rec_len = sizeof(struct xfs_refcount_rec), .key_len = sizeof(struct xfs_refcount_key), + .ptr_len = XFS_BTREE_SHORT_PTR_LEN, + + .lru_refs = XFS_REFC_BTREE_REF, + .statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2), + .sick_mask = XFS_SICK_AG_REFCNTBT, .dup_cursor = xfs_refcountbt_dup_cursor, .set_root = xfs_refcountbt_set_root, @@ -313,43 +339,19 @@ static const struct xfs_btree_ops xfs_refcountbt_ops = { .init_high_key_from_rec = xfs_refcountbt_init_high_key_from_rec, .init_rec_from_cur = xfs_refcountbt_init_rec_from_cur, .init_ptr_from_cur = xfs_refcountbt_init_ptr_from_cur, - .key_diff = xfs_refcountbt_key_diff, + .cmp_key_with_cur = xfs_refcountbt_cmp_key_with_cur, .buf_ops = &xfs_refcountbt_buf_ops, - .diff_two_keys = xfs_refcountbt_diff_two_keys, + .cmp_two_keys = xfs_refcountbt_cmp_two_keys, .keys_inorder = xfs_refcountbt_keys_inorder, .recs_inorder = xfs_refcountbt_recs_inorder, + .keys_contiguous = xfs_refcountbt_keys_contiguous, }; /* - * Initialize a new refcount btree cursor. + * Create a new refcount btree cursor. + * + * For staging cursors tp and agbp are NULL. */ -static struct xfs_btree_cur * -xfs_refcountbt_init_common( - struct xfs_mount *mp, - struct xfs_trans *tp, - struct xfs_perag *pag) -{ - struct xfs_btree_cur *cur; - - ASSERT(pag->pag_agno < mp->m_sb.sb_agcount); - - cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC, - mp->m_refc_maxlevels, xfs_refcountbt_cur_cache); - cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2); - - cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; - - /* take a reference for the cursor */ - atomic_inc(&pag->pag_ref); - cur->bc_ag.pag = pag; - - cur->bc_ag.refc.nr_ops = 0; - cur->bc_ag.refc.shape_changes = 0; - cur->bc_ops = &xfs_refcountbt_ops; - return cur; -} - -/* Create a btree cursor. */ struct xfs_btree_cur * xfs_refcountbt_init_cursor( struct xfs_mount *mp, @@ -357,26 +359,21 @@ xfs_refcountbt_init_cursor( struct xfs_buf *agbp, struct xfs_perag *pag) { - struct xfs_agf *agf = agbp->b_addr; struct xfs_btree_cur *cur; - cur = xfs_refcountbt_init_common(mp, tp, pag); - cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level); - cur->bc_ag.agbp = agbp; - return cur; -} + ASSERT(pag_agno(pag) < mp->m_sb.sb_agcount); -/* Create a btree cursor with a fake root for staging. */ -struct xfs_btree_cur * -xfs_refcountbt_stage_cursor( - struct xfs_mount *mp, - struct xbtree_afakeroot *afake, - struct xfs_perag *pag) -{ - struct xfs_btree_cur *cur; + cur = xfs_btree_alloc_cursor(mp, tp, &xfs_refcountbt_ops, + mp->m_refc_maxlevels, xfs_refcountbt_cur_cache); + cur->bc_group = xfs_group_hold(pag_group(pag)); + cur->bc_refc.nr_ops = 0; + cur->bc_refc.shape_changes = 0; + cur->bc_ag.agbp = agbp; + if (agbp) { + struct xfs_agf *agf = agbp->b_addr; - cur = xfs_refcountbt_init_common(mp, NULL, pag); - xfs_btree_stage_afakeroot(cur, afake); + cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level); + } return cur; } @@ -401,7 +398,7 @@ xfs_refcountbt_commit_staged_btree( xfs_alloc_log_agf(tp, agbp, XFS_AGF_REFCOUNT_BLOCKS | XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL); - xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_refcountbt_ops); + xfs_btree_commit_afakeroot(cur, tp, agbp); } /* Calculate number of records in a refcount btree block. */ @@ -419,9 +416,10 @@ xfs_refcountbt_block_maxrecs( /* * Calculate the number of records in a refcount btree block. */ -int +unsigned int xfs_refcountbt_maxrecs( - int blocklen, + struct xfs_mount *mp, + unsigned int blocklen, bool leaf) { blocklen -= XFS_REFCOUNT_BLOCK_LEN; @@ -516,7 +514,7 @@ xfs_refcountbt_calc_reserves( * never be available for the kinds of things that would require btree * expansion. We therefore can pretend the space isn't there. */ - if (xfs_ag_contains_log(mp, pag->pag_agno)) + if (xfs_ag_contains_log(mp, pag_agno(pag))) agblocks -= mp->m_sb.sb_logblocks; *ask += xfs_refcountbt_max_size(mp, agblocks); |
