diff options
Diffstat (limited to 'fs/ufs/balloc.c')
| -rw-r--r-- | fs/ufs/balloc.c | 137 |
1 files changed, 56 insertions, 81 deletions
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index 075d3d9114c8..194ed3ab945e 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -33,6 +33,29 @@ static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info * static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[]; static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int); +static void adjust_free_blocks(struct super_block *sb, + struct ufs_cylinder_group *ucg, + struct ufs_cg_private_info *ucpi, + unsigned fragment, int delta) +{ + struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; + + if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) + ufs_clusteracct(sb, ucpi, fragment, delta); + + fs32_add(sb, &ucg->cg_cs.cs_nbfree, delta); + uspi->cs_total.cs_nbfree += delta; + fs32_add(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, delta); + + if (uspi->fs_magic != UFS2_MAGIC) { + unsigned cylno = ufs_cbtocylno(fragment); + + fs16_add(sb, &ubh_cg_blks(ucpi, cylno, + ufs_cbtorpos(fragment)), delta); + fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), delta); + } +} + /* * Free 'count' fragments from fragment number 'fragment' */ @@ -43,7 +66,6 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned cgno, bit, end_bit, bbase, blkmap, i; - u64 blkno; sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; @@ -51,7 +73,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) UFSD("ENTER, fragment %llu, count %u\n", (unsigned long long)fragment, count); - if (ufs_fragnum(fragment) + count > uspi->s_fpg) + if (ufs_fragnum(fragment) + count > uspi->s_fpb) ufs_error (sb, "ufs_free_fragments", "internal error"); mutex_lock(&UFS_SB(sb)->s_lock); @@ -94,23 +116,11 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) /* * Trying to reassemble free fragments into block */ - blkno = ufs_fragstoblks (bbase); - if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) { + if (ubh_isblockset(uspi, ucpi, bbase)) { fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb); uspi->cs_total.cs_nffree -= uspi->s_fpb; fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb); - if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) - ufs_clusteracct (sb, ucpi, blkno, 1); - fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); - uspi->cs_total.cs_nbfree++; - fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); - if (uspi->fs_magic != UFS2_MAGIC) { - unsigned cylno = ufs_cbtocylno (bbase); - - fs16_add(sb, &ubh_cg_blks(ucpi, cylno, - ufs_cbtorpos(bbase)), 1); - fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); - } + adjust_free_blocks(sb, ucg, ucpi, bbase, 1); } ubh_mark_buffer_dirty (USPI_UBH(uspi)); @@ -139,7 +149,6 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count) struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned overflow, cgno, bit, end_bit, i; - u64 blkno; sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; @@ -181,26 +190,12 @@ do_more: } for (i = bit; i < end_bit; i += uspi->s_fpb) { - blkno = ufs_fragstoblks(i); - if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) { + if (ubh_isblockset(uspi, ucpi, i)) { ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); } - ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); + ubh_setblock(uspi, ucpi, i); inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift); - if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) - ufs_clusteracct (sb, ucpi, blkno, 1); - - fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); - uspi->cs_total.cs_nbfree++; - fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); - - if (uspi->fs_magic != UFS2_MAGIC) { - unsigned cylno = ufs_cbtocylno(i); - - fs16_add(sb, &ubh_cg_blks(ucpi, cylno, - ufs_cbtorpos(i)), 1); - fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); - } + adjust_free_blocks(sb, ucg, ucpi, i, 1); } ubh_mark_buffer_dirty (USPI_UBH(uspi)); @@ -234,12 +229,13 @@ failed: * situated at the end of file. * * We can come here from ufs_writepage or ufs_prepare_write, - * locked_page is argument of these functions, so we already lock it. + * locked_folio is argument of these functions, so we already lock it. */ static void ufs_change_blocknr(struct inode *inode, sector_t beg, unsigned int count, sector_t oldb, - sector_t newb, struct page *locked_page) + sector_t newb, struct folio *locked_folio) { + struct folio *folio; const unsigned blks_per_page = 1 << (PAGE_SHIFT - inode->i_blkbits); const unsigned mask = blks_per_page - 1; @@ -247,42 +243,39 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg, pgoff_t index, cur_index, last_index; unsigned pos, j, lblock; sector_t end, i; - struct page *page; struct buffer_head *head, *bh; UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n", inode->i_ino, count, (unsigned long long)oldb, (unsigned long long)newb); - BUG_ON(!locked_page); - BUG_ON(!PageLocked(locked_page)); + BUG_ON(!folio_test_locked(locked_folio)); - cur_index = locked_page->index; + cur_index = locked_folio->index; end = count + beg; last_index = end >> (PAGE_SHIFT - inode->i_blkbits); for (i = beg; i < end; i = (i | mask) + 1) { index = i >> (PAGE_SHIFT - inode->i_blkbits); if (likely(cur_index != index)) { - page = ufs_get_locked_page(mapping, index); - if (!page)/* it was truncated */ + folio = ufs_get_locked_folio(mapping, index); + if (!folio) /* it was truncated */ continue; - if (IS_ERR(page)) {/* or EIO */ + if (IS_ERR(folio)) {/* or EIO */ ufs_error(inode->i_sb, __func__, "read of page %llu failed\n", (unsigned long long)index); continue; } } else - page = locked_page; + folio = locked_folio; - head = page_buffers(page); + head = folio_buffers(folio); bh = head; pos = i & mask; for (j = 0; j < pos; ++j) bh = bh->b_this_page; - if (unlikely(index == last_index)) lblock = end & mask; else @@ -295,14 +288,10 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg, if (!buffer_mapped(bh)) map_bh(bh, inode->i_sb, oldb + pos); - if (!buffer_uptodate(bh)) { - ll_rw_block(REQ_OP_READ, 0, 1, &bh); - wait_on_buffer(bh); - if (!buffer_uptodate(bh)) { - ufs_error(inode->i_sb, __func__, - "read of block failed\n"); - break; - } + if (bh_read(bh, 0) < 0) { + ufs_error(inode->i_sb, __func__, + "read of block failed\n"); + break; } UFSD(" change from %llu to %llu, pos %u\n", @@ -317,7 +306,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg, } while (bh != head); if (likely(cur_index != index)) - ufs_put_locked_page(page); + ufs_put_locked_folio(folio); } UFSD("EXIT\n"); } @@ -343,7 +332,7 @@ static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n, u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, u64 goal, unsigned count, int *err, - struct page *locked_page) + struct folio *locked_folio) { struct super_block * sb; struct ufs_sb_private_info * uspi; @@ -423,7 +412,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, result = ufs_alloc_fragments (inode, cgno, goal, count, err); if (result) { ufs_clear_frags(inode, result + oldcount, - newcount - oldcount, locked_page != NULL); + newcount - oldcount, locked_folio != NULL); *err = 0; write_seqlock(&UFS_I(inode)->meta_lock); ufs_cpu_to_data_ptr(sb, p, result); @@ -447,7 +436,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, fragment + count); read_sequnlock_excl(&UFS_I(inode)->meta_lock); ufs_clear_frags(inode, result + oldcount, newcount - oldcount, - locked_page != NULL); + locked_folio != NULL); mutex_unlock(&UFS_SB(sb)->s_lock); UFSD("EXIT, result %llu\n", (unsigned long long)result); return result; @@ -468,11 +457,11 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, result = ufs_alloc_fragments (inode, cgno, goal, request, err); if (result) { ufs_clear_frags(inode, result + oldcount, newcount - oldcount, - locked_page != NULL); + locked_folio != NULL); mutex_unlock(&UFS_SB(sb)->s_lock); ufs_change_blocknr(inode, fragment - oldcount, oldcount, uspi->s_sbbase + tmp, - uspi->s_sbbase + result, locked_page); + uspi->s_sbbase + result, locked_folio); *err = 0; write_seqlock(&UFS_I(inode)->meta_lock); ufs_cpu_to_data_ptr(sb, p, result); @@ -704,7 +693,7 @@ static u64 ufs_alloccg_block(struct inode *inode, struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_cylinder_group * ucg; - u64 result, blkno; + u64 result; UFSD("ENTER, goal %llu\n", (unsigned long long)goal); @@ -722,7 +711,7 @@ static u64 ufs_alloccg_block(struct inode *inode, /* * If the requested block is available, use it. */ - if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) { + if (ubh_isblockset(uspi, ucpi, goal)) { result = goal; goto gotit; } @@ -735,22 +724,8 @@ norot: gotit: if (!try_add_frags(inode, uspi->s_fpb)) return 0; - blkno = ufs_fragstoblks(result); - ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); - if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) - ufs_clusteracct (sb, ucpi, blkno, -1); - - fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1); - uspi->cs_total.cs_nbfree--; - fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1); - - if (uspi->fs_magic != UFS2_MAGIC) { - unsigned cylno = ufs_cbtocylno((unsigned)result); - - fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, - ufs_cbtorpos((unsigned)result)), 1); - fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1); - } + ubh_clrblock(uspi, ucpi, result); + adjust_free_blocks(sb, ucg, ucpi, result, -1); UFSD("EXIT, result %llu\n", (unsigned long long)result); @@ -869,12 +844,12 @@ static u64 ufs_bitmap_search(struct super_block *sb, } static void ufs_clusteracct(struct super_block * sb, - struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt) + struct ufs_cg_private_info * ucpi, unsigned frag, int cnt) { - struct ufs_sb_private_info * uspi; + struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi; int i, start, end, forw, back; + unsigned blkno = ufs_fragstoblks(frag); - uspi = UFS_SB(sb)->s_uspi; if (uspi->s_contigsumsize <= 0) return; |
