diff options
Diffstat (limited to 'fs/xfs/libxfs/xfs_bmap_btree.c')
-rw-r--r-- | fs/xfs/libxfs/xfs_bmap_btree.c | 289 |
1 files changed, 187 insertions, 102 deletions
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c index 71f2d50f7823..908d7b050e9c 100644 --- a/fs/xfs/libxfs/xfs_bmap_btree.c +++ b/fs/xfs/libxfs/xfs_bmap_btree.c @@ -26,6 +26,22 @@ static struct kmem_cache *xfs_bmbt_cur_cache; +void +xfs_bmbt_init_block( + struct xfs_inode *ip, + struct xfs_btree_block *buf, + struct xfs_buf *bp, + __u16 level, + __u16 numrecs) +{ + if (bp) + xfs_btree_init_buf(ip->i_mount, bp, &xfs_bmbt_ops, level, + numrecs, ip->i_ino); + else + xfs_btree_init_block(ip->i_mount, buf, &xfs_bmbt_ops, level, + numrecs, ip->i_ino); +} + /* * Convert on-disk form of btree root to in-memory form. */ @@ -44,17 +60,15 @@ xfs_bmdr_to_bmbt( xfs_bmbt_key_t *tkp; __be64 *tpp; - xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL, - XFS_BTNUM_BMAP, 0, 0, ip->i_ino, - XFS_BTREE_LONG_PTRS); + xfs_bmbt_init_block(ip, rblock, NULL, 0, 0); rblock->bb_level = dblock->bb_level; ASSERT(be16_to_cpu(rblock->bb_level) > 0); rblock->bb_numrecs = dblock->bb_numrecs; dmxr = xfs_bmdr_maxrecs(dblocklen, 0); - fkp = XFS_BMDR_KEY_ADDR(dblock, 1); - tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1); - fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr); - tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen); + fkp = xfs_bmdr_key_addr(dblock, 1); + tkp = xfs_bmbt_key_addr(mp, rblock, 1); + fpp = xfs_bmdr_ptr_addr(dblock, 1, dmxr); + tpp = xfs_bmap_broot_ptr_addr(mp, rblock, 1, rblocklen); dmxr = be16_to_cpu(dblock->bb_numrecs); memcpy(tkp, fkp, sizeof(*fkp) * dmxr); memcpy(tpp, fpp, sizeof(*fpp) * dmxr); @@ -154,10 +168,10 @@ xfs_bmbt_to_bmdr( dblock->bb_level = rblock->bb_level; dblock->bb_numrecs = rblock->bb_numrecs; dmxr = xfs_bmdr_maxrecs(dblocklen, 0); - fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1); - tkp = XFS_BMDR_KEY_ADDR(dblock, 1); - fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen); - tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr); + fkp = xfs_bmbt_key_addr(mp, rblock, 1); + tkp = xfs_bmdr_key_addr(dblock, 1); + fpp = xfs_bmap_broot_ptr_addr(mp, rblock, 1, rblocklen); + tpp = xfs_bmdr_ptr_addr(dblock, 1, dmxr); dmxr = be16_to_cpu(dblock->bb_numrecs); memcpy(tkp, fkp, sizeof(*fkp) * dmxr); memcpy(tpp, fpp, sizeof(*fpp) * dmxr); @@ -171,13 +185,8 @@ xfs_bmbt_dup_cursor( new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ino.ip, cur->bc_ino.whichfork); - - /* - * Copy the firstblock, dfops, and flags values, - * since init cursor doesn't get them. - */ - new->bc_ino.flags = cur->bc_ino.flags; - + new->bc_flags |= (cur->bc_flags & + (XFS_BTREE_BMBT_INVALID_OWNER | XFS_BTREE_BMBT_WASDEL)); return new; } @@ -189,10 +198,10 @@ xfs_bmbt_update_cursor( ASSERT((dst->bc_tp->t_highest_agno != NULLAGNUMBER) || (dst->bc_ino.ip->i_diflags & XFS_DIFLAG_REALTIME)); - dst->bc_ino.allocated += src->bc_ino.allocated; + dst->bc_bmap.allocated += src->bc_bmap.allocated; dst->bc_tp->t_highest_agno = src->bc_tp->t_highest_agno; - src->bc_ino.allocated = 0; + src->bc_bmap.allocated = 0; } STATIC int @@ -211,7 +220,7 @@ xfs_bmbt_alloc_block( xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_ino.ip->i_ino, cur->bc_ino.whichfork); args.minlen = args.maxlen = args.prod = 1; - args.wasdel = cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL; + args.wasdel = cur->bc_flags & XFS_BTREE_BMBT_WASDEL; if (!args.wasdel && args.tp->t_blk_res == 0) return -ENOSPC; @@ -247,7 +256,7 @@ xfs_bmbt_alloc_block( } ASSERT(args.len == 1); - cur->bc_ino.allocated++; + cur->bc_bmap.allocated++; cur->bc_ino.ip->i_nblocks++; xfs_trans_log_inode(args.tp, cur->bc_ino.ip, XFS_ILOG_CORE); xfs_trans_mod_dquot_byino(args.tp, cur->bc_ino.ip, @@ -273,7 +282,7 @@ xfs_bmbt_free_block( xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork); error = xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo, - XFS_AG_RESV_NONE, false); + XFS_AG_RESV_NONE, 0); if (error) return error; @@ -360,14 +369,6 @@ xfs_bmbt_init_rec_from_cur( xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b); } -STATIC void -xfs_bmbt_init_ptr_from_cur( - struct xfs_btree_cur *cur, - union xfs_btree_ptr *ptr) -{ - ptr->l = 0; -} - STATIC int64_t xfs_bmbt_key_diff( struct xfs_btree_cur *cur, @@ -419,7 +420,7 @@ xfs_bmbt_verify( * XXX: need a better way of verifying the owner here. Right now * just make sure there has been one set. */ - fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN); + fa = xfs_btree_fsblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN); if (fa) return fa; } @@ -435,7 +436,7 @@ xfs_bmbt_verify( if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1])) return __this_address; - return xfs_btree_lblock_verify(bp, mp->m_bmap_dmxr[level != 0]); + return xfs_btree_fsblock_verify(bp, mp->m_bmap_dmxr[level != 0]); } static void @@ -444,7 +445,7 @@ xfs_bmbt_read_verify( { xfs_failaddr_t fa; - if (!xfs_btree_lblock_verify_crc(bp)) + if (!xfs_btree_fsblock_verify_crc(bp)) xfs_verifier_error(bp, -EFSBADCRC, __this_address); else { fa = xfs_bmbt_verify(bp); @@ -468,7 +469,7 @@ xfs_bmbt_write_verify( xfs_verifier_error(bp, -EFSCORRUPTED, fa); return; } - xfs_btree_lblock_calc_crc(bp); + xfs_btree_fsblock_calc_crc(bp); } const struct xfs_buf_ops xfs_bmbt_buf_ops = { @@ -515,9 +516,126 @@ xfs_bmbt_keys_contiguous( be64_to_cpu(key2->bmbt.br_startoff)); } -static const struct xfs_btree_ops xfs_bmbt_ops = { +static inline void +xfs_bmbt_move_ptrs( + struct xfs_mount *mp, + struct xfs_btree_block *broot, + short old_size, + size_t new_size, + unsigned int numrecs) +{ + void *dptr; + void *sptr; + + sptr = xfs_bmap_broot_ptr_addr(mp, broot, 1, old_size); + dptr = xfs_bmap_broot_ptr_addr(mp, broot, 1, new_size); + memmove(dptr, sptr, numrecs * sizeof(xfs_bmbt_ptr_t)); +} + +/* + * Reallocate the space for if_broot based on the number of records. Move the + * records and pointers in if_broot to fit the new size. When shrinking this + * will eliminate holes between the records and pointers created by the caller. + * When growing this will create holes to be filled in by the caller. + * + * The caller must not request to add more records than would fit in the + * on-disk inode root. If the if_broot is currently NULL, then if we are + * adding records, one will be allocated. The caller must also not request + * that the number of records go below zero, although it can go to zero. + * + * ip -- the inode whose if_broot area is changing + * whichfork -- which inode fork to change + * new_numrecs -- the new number of records requested for the if_broot array + * + * Returns the incore btree root block. + */ +struct xfs_btree_block * +xfs_bmap_broot_realloc( + struct xfs_inode *ip, + int whichfork, + unsigned int new_numrecs) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); + struct xfs_btree_block *broot; + unsigned int new_size; + unsigned int old_size = ifp->if_broot_bytes; + + /* + * Block mapping btrees do not support storing zero records; if this + * happens, the fork is being changed to FMT_EXTENTS. Free the broot + * and get out. + */ + if (new_numrecs == 0) + return xfs_broot_realloc(ifp, 0); + + new_size = xfs_bmap_broot_space_calc(mp, new_numrecs); + + /* Handle the nop case quietly. */ + if (new_size == old_size) + return ifp->if_broot; + + if (new_size > old_size) { + unsigned int old_numrecs; + + /* + * If there wasn't any memory allocated before, just + * allocate it now and get out. + */ + if (old_size == 0) + return xfs_broot_realloc(ifp, new_size); + + /* + * If there is already an existing if_broot, then we need + * to realloc() it and shift the pointers to their new + * location. The records don't change location because + * they are kept butted up against the btree block header. + */ + old_numrecs = xfs_bmbt_maxrecs(mp, old_size, false); + broot = xfs_broot_realloc(ifp, new_size); + ASSERT(xfs_bmap_bmdr_space(broot) <= + xfs_inode_fork_size(ip, whichfork)); + xfs_bmbt_move_ptrs(mp, broot, old_size, new_size, old_numrecs); + return broot; + } + + /* + * We're reducing, but not totally eliminating, numrecs. In this case, + * we are shrinking the if_broot buffer, so it must already exist. + */ + ASSERT(ifp->if_broot != NULL && old_size > 0 && new_size > 0); + + /* + * Shrink the btree root by moving the bmbt pointers, since they are + * not butted up against the btree block header, then reallocating + * broot. + */ + xfs_bmbt_move_ptrs(mp, ifp->if_broot, old_size, new_size, new_numrecs); + broot = xfs_broot_realloc(ifp, new_size); + ASSERT(xfs_bmap_bmdr_space(broot) <= + xfs_inode_fork_size(ip, whichfork)); + return broot; +} + +static struct xfs_btree_block * +xfs_bmbt_broot_realloc( + struct xfs_btree_cur *cur, + unsigned int new_numrecs) +{ + return xfs_bmap_broot_realloc(cur->bc_ino.ip, cur->bc_ino.whichfork, + new_numrecs); +} + +const struct xfs_btree_ops xfs_bmbt_ops = { + .name = "bmap", + .type = XFS_BTREE_TYPE_INODE, + .rec_len = sizeof(xfs_bmbt_rec_t), .key_len = sizeof(xfs_bmbt_key_t), + .ptr_len = XFS_BTREE_LONG_PTR_LEN, + + .lru_refs = XFS_BMAP_BTREE_REF, + .statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2), .dup_cursor = xfs_bmbt_dup_cursor, .update_cursor = xfs_bmbt_update_cursor, @@ -529,44 +647,19 @@ static const struct xfs_btree_ops xfs_bmbt_ops = { .init_key_from_rec = xfs_bmbt_init_key_from_rec, .init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec, .init_rec_from_cur = xfs_bmbt_init_rec_from_cur, - .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur, .key_diff = xfs_bmbt_key_diff, .diff_two_keys = xfs_bmbt_diff_two_keys, .buf_ops = &xfs_bmbt_buf_ops, .keys_inorder = xfs_bmbt_keys_inorder, .recs_inorder = xfs_bmbt_recs_inorder, .keys_contiguous = xfs_bmbt_keys_contiguous, + .broot_realloc = xfs_bmbt_broot_realloc, }; -static struct xfs_btree_cur * -xfs_bmbt_init_common( - struct xfs_mount *mp, - struct xfs_trans *tp, - struct xfs_inode *ip, - int whichfork) -{ - struct xfs_btree_cur *cur; - - ASSERT(whichfork != XFS_COW_FORK); - - cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP, - mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache); - cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2); - - cur->bc_ops = &xfs_bmbt_ops; - cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE; - if (xfs_has_crc(mp)) - cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; - - cur->bc_ino.ip = ip; - cur->bc_ino.allocated = 0; - cur->bc_ino.flags = 0; - - return cur; -} - /* - * Allocate a new bmap btree cursor. + * Create a new bmap btree cursor. + * + * For staging cursors -1 in passed in whichfork. */ struct xfs_btree_cur * xfs_bmbt_init_cursor( @@ -575,15 +668,34 @@ xfs_bmbt_init_cursor( struct xfs_inode *ip, int whichfork) { - struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_btree_cur *cur; + unsigned int maxlevels; - cur = xfs_bmbt_init_common(mp, tp, ip, whichfork); + ASSERT(whichfork != XFS_COW_FORK); - cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1; - cur->bc_ino.forksize = xfs_inode_fork_size(ip, whichfork); + /* + * The Data fork always has larger maxlevel, so use that for staging + * cursors. + */ + switch (whichfork) { + case XFS_STAGING_FORK: + maxlevels = mp->m_bm_maxlevels[XFS_DATA_FORK]; + break; + default: + maxlevels = mp->m_bm_maxlevels[whichfork]; + break; + } + cur = xfs_btree_alloc_cursor(mp, tp, &xfs_bmbt_ops, maxlevels, + xfs_bmbt_cur_cache); + cur->bc_ino.ip = ip; cur->bc_ino.whichfork = whichfork; + cur->bc_bmap.allocated = 0; + if (whichfork != XFS_STAGING_FORK) { + struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); + cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1; + cur->bc_ino.forksize = xfs_inode_fork_size(ip, whichfork); + } return cur; } @@ -599,33 +711,6 @@ xfs_bmbt_block_maxrecs( } /* - * Allocate a new bmap btree cursor for reloading an inode block mapping data - * structure. Note that callers can use the staged cursor to reload extents - * format inode forks if they rebuild the iext tree and commit the staged - * cursor immediately. - */ -struct xfs_btree_cur * -xfs_bmbt_stage_cursor( - struct xfs_mount *mp, - struct xfs_inode *ip, - struct xbtree_ifakeroot *ifake) -{ - struct xfs_btree_cur *cur; - struct xfs_btree_ops *ops; - - /* data fork always has larger maxheight */ - cur = xfs_bmbt_init_common(mp, NULL, ip, XFS_DATA_FORK); - cur->bc_nlevels = ifake->if_levels; - cur->bc_ino.forksize = ifake->if_fork_size; - - /* Don't let anyone think we're attached to the real fork yet. */ - cur->bc_ino.whichfork = -1; - xfs_btree_stage_ifakeroot(cur, ifake, &ops); - ops->update_cursor = NULL; - return cur; -} - -/* * Swap in the new inode fork root. Once we pass this point the newly rebuilt * mappings are in place and we have to kill off any old btree blocks. */ @@ -665,19 +750,19 @@ xfs_bmbt_commit_staged_btree( break; } xfs_trans_log_inode(tp, cur->bc_ino.ip, flags); - xfs_btree_commit_ifakeroot(cur, tp, whichfork, &xfs_bmbt_ops); + xfs_btree_commit_ifakeroot(cur, tp, whichfork); } /* * Calculate number of records in a bmap btree block. */ -int +unsigned int xfs_bmbt_maxrecs( struct xfs_mount *mp, - int blocklen, - int leaf) + unsigned int blocklen, + bool leaf) { - blocklen -= XFS_BMBT_BLOCK_LEN(mp); + blocklen -= xfs_bmbt_block_len(mp); return xfs_bmbt_block_maxrecs(blocklen, leaf); } @@ -751,7 +836,7 @@ xfs_bmbt_change_owner( ASSERT(xfs_ifork_ptr(ip, whichfork)->if_format == XFS_DINODE_FMT_BTREE); cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork); - cur->bc_ino.flags |= XFS_BTCUR_BMBT_INVALID_OWNER; + cur->bc_flags |= XFS_BTREE_BMBT_INVALID_OWNER; error = xfs_btree_change_owner(cur, new_owner, buffer_list); xfs_btree_del_cursor(cur, error); |