diff options
Diffstat (limited to 'fs/hfsplus/btree.c')
| -rw-r--r-- | fs/hfsplus/btree.c | 221 |
1 files changed, 177 insertions, 44 deletions
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c index 0c6540c91167..229f25dc7c49 100644 --- a/fs/hfsplus/btree.c +++ b/fs/hfsplus/btree.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hfsplus/btree.c * @@ -15,6 +16,118 @@ #include "hfsplus_fs.h" #include "hfsplus_raw.h" +/* + * Initial source code of clump size calculation is gotten + * from http://opensource.apple.com/tarballs/diskdev_cmds/ + */ +#define CLUMP_ENTRIES 15 + +static short clumptbl[CLUMP_ENTRIES * 3] = { +/* + * Volume Attributes Catalog Extents + * Size Clump (MB) Clump (MB) Clump (MB) + */ + /* 1GB */ 4, 4, 4, + /* 2GB */ 6, 6, 4, + /* 4GB */ 8, 8, 4, + /* 8GB */ 11, 11, 5, + /* + * For volumes 16GB and larger, we want to make sure that a full OS + * install won't require fragmentation of the Catalog or Attributes + * B-trees. We do this by making the clump sizes sufficiently large, + * and by leaving a gap after the B-trees for them to grow into. + * + * For SnowLeopard 10A298, a FullNetInstall with all packages selected + * results in: + * Catalog B-tree Header + * nodeSize: 8192 + * totalNodes: 31616 + * freeNodes: 1978 + * (used = 231.55 MB) + * Attributes B-tree Header + * nodeSize: 8192 + * totalNodes: 63232 + * freeNodes: 958 + * (used = 486.52 MB) + * + * We also want Time Machine backup volumes to have a sufficiently + * large clump size to reduce fragmentation. + * + * The series of numbers for Catalog and Attribute form a geometric + * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times + * the previous term. For Attributes (16GB to 512GB), each term is + * 4**(1/5) times the previous term. For 1TB to 16TB, each term is + * 2**(1/5) times the previous term. + */ + /* 16GB */ 64, 32, 5, + /* 32GB */ 84, 49, 6, + /* 64GB */ 111, 74, 7, + /* 128GB */ 147, 111, 8, + /* 256GB */ 194, 169, 9, + /* 512GB */ 256, 256, 11, + /* 1TB */ 294, 294, 14, + /* 2TB */ 338, 338, 16, + /* 4TB */ 388, 388, 20, + /* 8TB */ 446, 446, 25, + /* 16TB */ 512, 512, 32 +}; + +u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, + u64 sectors, int file_id) +{ + u32 mod = max(node_size, block_size); + u32 clump_size; + int column; + int i; + + /* Figure out which column of the above table to use for this file. */ + switch (file_id) { + case HFSPLUS_ATTR_CNID: + column = 0; + break; + case HFSPLUS_CAT_CNID: + column = 1; + break; + default: + column = 2; + break; + } + + /* + * The default clump size is 0.8% of the volume size. And + * it must also be a multiple of the node and block size. + */ + if (sectors < 0x200000) { + clump_size = sectors << 2; /* 0.8 % */ + if (clump_size < (8 * node_size)) + clump_size = 8 * node_size; + } else { + /* turn exponent into table index... */ + for (i = 0, sectors = sectors >> 22; + sectors && (i < CLUMP_ENTRIES - 1); + ++i, sectors = sectors >> 1) { + /* empty body */ + } + + clump_size = clumptbl[column + (i) * 3] * 1024 * 1024; + } + + /* + * Round the clump size to a multiple of node and block size. + * NOTE: This rounds down. + */ + clump_size /= mod; + clump_size *= mod; + + /* + * Rounding down could have rounded down to 0 if the block size was + * greater than the clump size. If so, just use one block or node. + */ + if (clump_size == 0) + clump_size = mod; + + return clump_size; +} /* Get a reference to a B*Tree and do some initial checks */ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) @@ -50,7 +163,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) goto free_inode; /* Load the header */ - head = (struct hfs_btree_header_rec *)(kmap(page) + + head = (struct hfs_btree_header_rec *)(kmap_local_page(page) + sizeof(struct hfs_bnode_desc)); tree->root = be32_to_cpu(head->root); tree->leaf_count = be32_to_cpu(head->leaf_count); @@ -124,15 +237,16 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) tree->node_size_shift = ffs(size) - 1; tree->pages_per_bnode = - (tree->node_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + (tree->node_size + PAGE_SIZE - 1) >> + PAGE_SHIFT; - kunmap(page); - page_cache_release(page); + kunmap_local(head); + put_page(page); return tree; fail_page: - page_cache_release(page); + kunmap_local(head); + put_page(page); free_inode: tree->inode->i_mapping->a_ops = &hfsplus_aops; iput(tree->inode); @@ -178,7 +292,7 @@ int hfs_btree_write(struct hfs_btree *tree) return -EIO; /* Load the header */ page = node->page[0]; - head = (struct hfs_btree_header_rec *)(kmap(page) + + head = (struct hfs_btree_header_rec *)(kmap_local_page(page) + sizeof(struct hfs_bnode_desc)); head->root = cpu_to_be32(tree->root); @@ -190,7 +304,7 @@ int hfs_btree_write(struct hfs_btree *tree) head->attributes = cpu_to_be32(tree->attributes); head->depth = cpu_to_be16(tree->depth); - kunmap(page); + kunmap_local(head); set_page_dirty(page); hfs_bnode_put(node); return 0; @@ -229,26 +343,21 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) return node; } -struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) +/* Make sure @tree has enough space for the @rsvd_nodes */ +int hfs_bmap_reserve(struct hfs_btree *tree, u32 rsvd_nodes) { - struct hfs_bnode *node, *next_node; - struct page **pagep; - u32 nidx, idx; - unsigned off; - u16 off16; - u16 len; - u8 *data, byte, m; - int i; + struct inode *inode = tree->inode; + struct hfsplus_inode_info *hip = HFSPLUS_I(inode); + u32 count; + int res; - while (!tree->free_nodes) { - struct inode *inode = tree->inode; - struct hfsplus_inode_info *hip = HFSPLUS_I(inode); - u32 count; - int res; + if (rsvd_nodes <= 0) + return 0; - res = hfsplus_file_extend(inode); + while (tree->free_nodes < rsvd_nodes) { + res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree)); if (res) - return ERR_PTR(res); + return res; hip->phys_size = inode->i_size = (loff_t)hip->alloc_blocks << HFSPLUS_SB(tree->sb)->alloc_blksz_shift; @@ -256,9 +365,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift; inode_set_bytes(inode, inode->i_size); count = inode->i_size >> tree->node_size_shift; - tree->free_nodes = count - tree->node_count; + tree->free_nodes += count - tree->node_count; tree->node_count = count; } + return 0; +} + +struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) +{ + struct hfs_bnode *node, *next_node; + struct page **pagep; + u32 nidx, idx; + unsigned off; + u16 off16; + u16 len; + u8 *data, byte, m; + int i, res; + + res = hfs_bmap_reserve(tree, 1); + if (res) + return ERR_PTR(res); nidx = 0; node = hfs_bnode_find(tree, nidx); @@ -267,10 +393,16 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) len = hfs_brec_lenoff(node, 2, &off16); off = off16; + if (!is_bnode_offset_valid(node, off)) { + hfs_bnode_put(node); + return ERR_PTR(-EIO); + } + len = check_and_correct_requested_length(node, off, len); + off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); - data = kmap(*pagep); - off &= ~PAGE_CACHE_MASK; + pagep = node->page + (off >> PAGE_SHIFT); + data = kmap_local_page(*pagep); + off &= ~PAGE_MASK; idx = 0; for (;;) { @@ -282,7 +414,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) idx += i; data[off] |= m; set_page_dirty(*pagep); - kunmap(*pagep); + kunmap_local(data); tree->free_nodes--; mark_inode_dirty(tree->inode); hfs_bnode_put(node); @@ -291,18 +423,18 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) } } } - if (++off >= PAGE_CACHE_SIZE) { - kunmap(*pagep); - data = kmap(*++pagep); + if (++off >= PAGE_SIZE) { + kunmap_local(data); + data = kmap_local_page(*++pagep); off = 0; } idx += 8; len--; } - kunmap(*pagep); + kunmap_local(data); nidx = node->next; if (!nidx) { - hfs_dbg(BNODE_MOD, "create new bmap node\n"); + hfs_dbg("create new bmap node\n"); next_node = hfs_bmap_new_bmap(node, idx); } else next_node = hfs_bnode_find(tree, nidx); @@ -314,9 +446,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) len = hfs_brec_lenoff(node, 0, &off16); off = off16; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); - data = kmap(*pagep); - off &= ~PAGE_CACHE_MASK; + pagep = node->page + (off >> PAGE_SHIFT); + data = kmap_local_page(*pagep); + off &= ~PAGE_MASK; } } @@ -328,7 +460,7 @@ void hfs_bmap_free(struct hfs_bnode *node) u32 nidx; u8 *data, byte, m; - hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this); + hfs_dbg("node %u\n", node->this); BUG_ON(!node->this); tree = node->tree; nidx = node->this; @@ -341,14 +473,15 @@ void hfs_bmap_free(struct hfs_bnode *node) nidx -= len * 8; i = node->next; - hfs_bnode_put(node); if (!i) { /* panic */; pr_crit("unable to free bnode %u. " "bmap not found!\n", node->this); + hfs_bnode_put(node); return; } + hfs_bnode_put(node); node = hfs_bnode_find(tree, i); if (IS_ERR(node)) return; @@ -363,22 +496,22 @@ void hfs_bmap_free(struct hfs_bnode *node) len = hfs_brec_lenoff(node, 0, &off); } off += node->page_offset + nidx / 8; - page = node->page[off >> PAGE_CACHE_SHIFT]; - data = kmap(page); - off &= ~PAGE_CACHE_MASK; + page = node->page[off >> PAGE_SHIFT]; + data = kmap_local_page(page); + off &= ~PAGE_MASK; m = 1 << (~nidx & 7); byte = data[off]; if (!(byte & m)) { pr_crit("trying to free free bnode " "%u(%d)\n", node->this, node->type); - kunmap(page); + kunmap_local(data); hfs_bnode_put(node); return; } data[off] = byte & ~m; set_page_dirty(page); - kunmap(page); + kunmap_local(data); hfs_bnode_put(node); tree->free_nodes++; mark_inode_dirty(tree->inode); |
