diff options
Diffstat (limited to 'fs/hfs/btree.c')
| -rw-r--r-- | fs/hfs/btree.c | 90 |
1 files changed, 64 insertions, 26 deletions
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c index 19017d296173..7bc425283d49 100644 --- a/fs/hfs/btree.c +++ b/fs/hfs/btree.c @@ -21,8 +21,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke struct hfs_btree *tree; struct hfs_btree_header_rec *head; struct address_space *mapping; - struct page *page; + struct folio *folio; + struct buffer_head *bh; unsigned int size; + u16 dblock; + sector_t start_block; + loff_t offset; tree = kzalloc(sizeof(*tree), GFP_KERNEL); if (!tree) @@ -38,7 +42,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke tree->inode = iget_locked(sb, id); if (!tree->inode) goto free_tree; - BUG_ON(!(tree->inode->i_state & I_NEW)); + BUG_ON(!(inode_state_read_once(tree->inode) & I_NEW)); { struct hfs_mdb *mdb = HFS_SB(sb)->mdb; HFS_I(tree->inode)->flags = 0; @@ -75,12 +79,41 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke unlock_new_inode(tree->inode); mapping = tree->inode->i_mapping; - page = read_mapping_page(mapping, 0, NULL); - if (IS_ERR(page)) + folio = filemap_grab_folio(mapping, 0); + if (IS_ERR(folio)) goto free_inode; + folio_zero_range(folio, 0, folio_size(folio)); + + dblock = hfs_ext_find_block(HFS_I(tree->inode)->first_extents, 0); + start_block = HFS_SB(sb)->fs_start + (dblock * HFS_SB(sb)->fs_div); + + size = folio_size(folio); + offset = 0; + while (size > 0) { + size_t len; + + bh = sb_bread(sb, start_block); + if (!bh) { + pr_err("unable to read tree header\n"); + goto put_folio; + } + + len = min_t(size_t, folio_size(folio), sb->s_blocksize); + memcpy_to_folio(folio, offset, bh->b_data, sb->s_blocksize); + + brelse(bh); + + start_block++; + offset += len; + size -= len; + } + + folio_mark_uptodate(folio); + /* Load the header */ - head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); + head = (struct hfs_btree_header_rec *)(kmap_local_folio(folio, 0) + + sizeof(struct hfs_bnode_desc)); tree->root = be32_to_cpu(head->root); tree->leaf_count = be32_to_cpu(head->leaf_count); tree->leaf_head = be32_to_cpu(head->leaf_head); @@ -94,22 +127,22 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke size = tree->node_size; if (!is_power_of_2(size)) - goto fail_page; + goto fail_folio; if (!tree->node_count) - goto fail_page; + goto fail_folio; switch (id) { case HFS_EXT_CNID: if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) { pr_err("invalid extent max_key_len %d\n", tree->max_key_len); - goto fail_page; + goto fail_folio; } break; case HFS_CAT_CNID: if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) { pr_err("invalid catalog max_key_len %d\n", tree->max_key_len); - goto fail_page; + goto fail_folio; } break; default: @@ -119,12 +152,16 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke tree->node_size_shift = ffs(size) - 1; tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT; - kunmap(page); - put_page(page); + kunmap_local(head); + folio_unlock(folio); + folio_put(folio); return tree; -fail_page: - put_page(page); +fail_folio: + kunmap_local(head); +put_folio: + folio_unlock(folio); + folio_put(folio); free_inode: tree->inode->i_mapping->a_ops = &hfs_aops; iput(tree->inode); @@ -169,7 +206,8 @@ void hfs_btree_write(struct hfs_btree *tree) return; /* Load the header */ page = node->page[0]; - head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); + head = (struct hfs_btree_header_rec *)(kmap_local_page(page) + + sizeof(struct hfs_bnode_desc)); head->root = cpu_to_be32(tree->root); head->leaf_count = cpu_to_be32(tree->leaf_count); @@ -180,7 +218,7 @@ void hfs_btree_write(struct hfs_btree *tree) head->attributes = cpu_to_be32(tree->attributes); head->depth = cpu_to_be16(tree->depth); - kunmap(page); + kunmap_local(head); set_page_dirty(page); hfs_bnode_put(node); } @@ -221,7 +259,7 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) } /* Make sure @tree has enough space for the @rsvd_nodes */ -int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes) +int hfs_bmap_reserve(struct hfs_btree *tree, u32 rsvd_nodes) { struct inode *inode = tree->inode; u32 count; @@ -268,7 +306,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); - data = kmap(*pagep); + data = kmap_local_page(*pagep); off &= ~PAGE_MASK; idx = 0; @@ -281,7 +319,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) idx += i; data[off] |= m; set_page_dirty(*pagep); - kunmap(*pagep); + kunmap_local(data); tree->free_nodes--; mark_inode_dirty(tree->inode); hfs_bnode_put(node); @@ -290,14 +328,14 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) } } if (++off >= PAGE_SIZE) { - kunmap(*pagep); - data = kmap(*++pagep); + kunmap_local(data); + data = kmap_local_page(*++pagep); off = 0; } idx += 8; len--; } - kunmap(*pagep); + kunmap_local(data); nidx = node->next; if (!nidx) { printk(KERN_DEBUG "create new bmap node...\n"); @@ -313,7 +351,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) off = off16; off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); - data = kmap(*pagep); + data = kmap_local_page(*pagep); off &= ~PAGE_MASK; } } @@ -326,7 +364,7 @@ void hfs_bmap_free(struct hfs_bnode *node) u32 nidx; u8 *data, byte, m; - hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this); + hfs_dbg("node %u\n", node->this); tree = node->tree; nidx = node->this; node = hfs_bnode_find(tree, 0); @@ -360,20 +398,20 @@ void hfs_bmap_free(struct hfs_bnode *node) } off += node->page_offset + nidx / 8; page = node->page[off >> PAGE_SHIFT]; - data = kmap(page); + data = kmap_local_page(page); off &= ~PAGE_MASK; m = 1 << (~nidx & 7); byte = data[off]; if (!(byte & m)) { pr_crit("trying to free free bnode %u(%d)\n", node->this, node->type); - kunmap(page); + kunmap_local(data); hfs_bnode_put(node); return; } data[off] = byte & ~m; set_page_dirty(page); - kunmap(page); + kunmap_local(data); hfs_bnode_put(node); tree->free_nodes++; mark_inode_dirty(tree->inode); |
