diff options
Diffstat (limited to 'fs/hfsplus/bnode.c')
| -rw-r--r-- | fs/hfsplus/bnode.c | 345 |
1 files changed, 197 insertions, 148 deletions
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c index 11c860204520..191661af9677 100644 --- a/fs/hfsplus/bnode.c +++ b/fs/hfsplus/bnode.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hfsplus/bnode.c * @@ -17,29 +18,42 @@ #include "hfsplus_fs.h" #include "hfsplus_raw.h" + /* Copy a specified range of bytes from the raw data of a node */ -void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) +void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len) { struct page **pagep; - int l; + u32 l; + + if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %u, len %u\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); - off &= ~PAGE_CACHE_MASK; + pagep = node->page + (off >> PAGE_SHIFT); + off &= ~PAGE_MASK; - l = min(len, (int)PAGE_CACHE_SIZE - off); - memcpy(buf, kmap(*pagep) + off, l); - kunmap(*pagep); + l = min_t(u32, len, PAGE_SIZE - off); + memcpy_from_page(buf, *pagep, off, l); while ((len -= l) != 0) { buf += l; - l = min(len, (int)PAGE_CACHE_SIZE); - memcpy(buf, kmap(*++pagep), l); - kunmap(*pagep); + l = min_t(u32, len, PAGE_SIZE); + memcpy_from_page(buf, *++pagep, 0, l); } } -u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) +u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off) { __be16 data; /* TODO: optimize later... */ @@ -47,7 +61,7 @@ u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) return be16_to_cpu(data); } -u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off) +u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off) { u8 data; /* TODO: optimize later... */ @@ -55,10 +69,10 @@ u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off) return data; } -void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) +void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off) { struct hfs_btree *tree; - int key_len; + u32 key_len; tree = node->tree; if (node->type == HFS_NODE_LEAF || @@ -68,113 +82,141 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) else key_len = tree->max_key_len + 2; + if (key_len > sizeof(hfsplus_btree_key) || key_len < 1) { + memset(key, 0, sizeof(hfsplus_btree_key)); + pr_err("hfsplus: Invalid key length: %u\n", key_len); + return; + } + hfs_bnode_read(node, key, off, key_len); } -void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) +void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len) { struct page **pagep; - int l; + u32 l; + + if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %u, len %u\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); - off &= ~PAGE_CACHE_MASK; + pagep = node->page + (off >> PAGE_SHIFT); + off &= ~PAGE_MASK; - l = min(len, (int)PAGE_CACHE_SIZE - off); - memcpy(kmap(*pagep) + off, buf, l); + l = min_t(u32, len, PAGE_SIZE - off); + memcpy_to_page(*pagep, off, buf, l); set_page_dirty(*pagep); - kunmap(*pagep); while ((len -= l) != 0) { buf += l; - l = min(len, (int)PAGE_CACHE_SIZE); - memcpy(kmap(*++pagep), buf, l); + l = min_t(u32, len, PAGE_SIZE); + memcpy_to_page(*++pagep, 0, buf, l); set_page_dirty(*pagep); - kunmap(*pagep); } } -void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data) +void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data) { __be16 v = cpu_to_be16(data); /* TODO: optimize later... */ hfs_bnode_write(node, &v, off, 2); } -void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) +void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len) { struct page **pagep; - int l; + u32 l; + + if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %u, len %u\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); - off &= ~PAGE_CACHE_MASK; + pagep = node->page + (off >> PAGE_SHIFT); + off &= ~PAGE_MASK; - l = min(len, (int)PAGE_CACHE_SIZE - off); - memset(kmap(*pagep) + off, 0, l); + l = min_t(u32, len, PAGE_SIZE - off); + memzero_page(*pagep, off, l); set_page_dirty(*pagep); - kunmap(*pagep); while ((len -= l) != 0) { - l = min(len, (int)PAGE_CACHE_SIZE); - memset(kmap(*++pagep), 0, l); + l = min_t(u32, len, PAGE_SIZE); + memzero_page(*++pagep, 0, l); set_page_dirty(*pagep); - kunmap(*pagep); } } -void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, - struct hfs_bnode *src_node, int src, int len) +void hfs_bnode_copy(struct hfs_bnode *dst_node, u32 dst, + struct hfs_bnode *src_node, u32 src, u32 len) { - struct hfs_btree *tree; struct page **src_page, **dst_page; - int l; + u32 l; - hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); + hfs_dbg("dst %u, src %u, len %u\n", dst, src, len); if (!len) return; - tree = src_node->tree; + + len = check_and_correct_requested_length(src_node, src, len); + len = check_and_correct_requested_length(dst_node, dst, len); + src += src_node->page_offset; dst += dst_node->page_offset; - src_page = src_node->page + (src >> PAGE_CACHE_SHIFT); - src &= ~PAGE_CACHE_MASK; - dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT); - dst &= ~PAGE_CACHE_MASK; + src_page = src_node->page + (src >> PAGE_SHIFT); + src &= ~PAGE_MASK; + dst_page = dst_node->page + (dst >> PAGE_SHIFT); + dst &= ~PAGE_MASK; if (src == dst) { - l = min(len, (int)PAGE_CACHE_SIZE - src); - memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l); - kunmap(*src_page); + l = min_t(u32, len, PAGE_SIZE - src); + memcpy_page(*dst_page, src, *src_page, src, l); set_page_dirty(*dst_page); - kunmap(*dst_page); while ((len -= l) != 0) { - l = min(len, (int)PAGE_CACHE_SIZE); - memcpy(kmap(*++dst_page), kmap(*++src_page), l); - kunmap(*src_page); + l = min_t(u32, len, PAGE_SIZE); + memcpy_page(*++dst_page, 0, *++src_page, 0, l); set_page_dirty(*dst_page); - kunmap(*dst_page); } } else { void *src_ptr, *dst_ptr; do { - src_ptr = kmap(*src_page) + src; - dst_ptr = kmap(*dst_page) + dst; - if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) { - l = PAGE_CACHE_SIZE - src; + dst_ptr = kmap_local_page(*dst_page) + dst; + src_ptr = kmap_local_page(*src_page) + src; + if (PAGE_SIZE - src < PAGE_SIZE - dst) { + l = PAGE_SIZE - src; src = 0; dst += l; } else { - l = PAGE_CACHE_SIZE - dst; + l = PAGE_SIZE - dst; src += l; dst = 0; } l = min(len, l); memcpy(dst_ptr, src_ptr, l); - kunmap(*src_page); + kunmap_local(src_ptr); set_page_dirty(*dst_page); - kunmap(*dst_page); + kunmap_local(dst_ptr); if (!dst) dst_page++; else @@ -183,110 +225,117 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, } } -void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) +void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len) { struct page **src_page, **dst_page; - int l; + void *src_ptr, *dst_ptr; + u32 l; - hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); + hfs_dbg("dst %u, src %u, len %u\n", dst, src, len); if (!len) return; + + len = check_and_correct_requested_length(node, src, len); + len = check_and_correct_requested_length(node, dst, len); + src += node->page_offset; dst += node->page_offset; if (dst > src) { src += len - 1; - src_page = node->page + (src >> PAGE_CACHE_SHIFT); - src = (src & ~PAGE_CACHE_MASK) + 1; + src_page = node->page + (src >> PAGE_SHIFT); + src = (src & ~PAGE_MASK) + 1; dst += len - 1; - dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); - dst = (dst & ~PAGE_CACHE_MASK) + 1; + dst_page = node->page + (dst >> PAGE_SHIFT); + dst = (dst & ~PAGE_MASK) + 1; if (src == dst) { while (src < len) { - memmove(kmap(*dst_page), kmap(*src_page), src); - kunmap(*src_page); + dst_ptr = kmap_local_page(*dst_page); + src_ptr = kmap_local_page(*src_page); + memmove(dst_ptr, src_ptr, src); + kunmap_local(src_ptr); set_page_dirty(*dst_page); - kunmap(*dst_page); + kunmap_local(dst_ptr); len -= src; - src = PAGE_CACHE_SIZE; + src = PAGE_SIZE; src_page--; dst_page--; } src -= len; - memmove(kmap(*dst_page) + src, - kmap(*src_page) + src, len); - kunmap(*src_page); + dst_ptr = kmap_local_page(*dst_page); + src_ptr = kmap_local_page(*src_page); + memmove(dst_ptr + src, src_ptr + src, len); + kunmap_local(src_ptr); set_page_dirty(*dst_page); - kunmap(*dst_page); + kunmap_local(dst_ptr); } else { - void *src_ptr, *dst_ptr; - do { - src_ptr = kmap(*src_page) + src; - dst_ptr = kmap(*dst_page) + dst; + dst_ptr = kmap_local_page(*dst_page) + dst; + src_ptr = kmap_local_page(*src_page) + src; if (src < dst) { l = src; - src = PAGE_CACHE_SIZE; + src = PAGE_SIZE; dst -= l; } else { l = dst; src -= l; - dst = PAGE_CACHE_SIZE; + dst = PAGE_SIZE; } l = min(len, l); memmove(dst_ptr - l, src_ptr - l, l); - kunmap(*src_page); + kunmap_local(src_ptr); set_page_dirty(*dst_page); - kunmap(*dst_page); - if (dst == PAGE_CACHE_SIZE) + kunmap_local(dst_ptr); + if (dst == PAGE_SIZE) dst_page--; else src_page--; } while ((len -= l)); } } else { - src_page = node->page + (src >> PAGE_CACHE_SHIFT); - src &= ~PAGE_CACHE_MASK; - dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); - dst &= ~PAGE_CACHE_MASK; + src_page = node->page + (src >> PAGE_SHIFT); + src &= ~PAGE_MASK; + dst_page = node->page + (dst >> PAGE_SHIFT); + dst &= ~PAGE_MASK; if (src == dst) { - l = min(len, (int)PAGE_CACHE_SIZE - src); - memmove(kmap(*dst_page) + src, - kmap(*src_page) + src, l); - kunmap(*src_page); + l = min_t(u32, len, PAGE_SIZE - src); + + dst_ptr = kmap_local_page(*dst_page) + src; + src_ptr = kmap_local_page(*src_page) + src; + memmove(dst_ptr, src_ptr, l); + kunmap_local(src_ptr); set_page_dirty(*dst_page); - kunmap(*dst_page); + kunmap_local(dst_ptr); while ((len -= l) != 0) { - l = min(len, (int)PAGE_CACHE_SIZE); - memmove(kmap(*++dst_page), - kmap(*++src_page), l); - kunmap(*src_page); + l = min_t(u32, len, PAGE_SIZE); + dst_ptr = kmap_local_page(*++dst_page); + src_ptr = kmap_local_page(*++src_page); + memmove(dst_ptr, src_ptr, l); + kunmap_local(src_ptr); set_page_dirty(*dst_page); - kunmap(*dst_page); + kunmap_local(dst_ptr); } } else { - void *src_ptr, *dst_ptr; - do { - src_ptr = kmap(*src_page) + src; - dst_ptr = kmap(*dst_page) + dst; - if (PAGE_CACHE_SIZE - src < - PAGE_CACHE_SIZE - dst) { - l = PAGE_CACHE_SIZE - src; + dst_ptr = kmap_local_page(*dst_page) + dst; + src_ptr = kmap_local_page(*src_page) + src; + if (PAGE_SIZE - src < + PAGE_SIZE - dst) { + l = PAGE_SIZE - src; src = 0; dst += l; } else { - l = PAGE_CACHE_SIZE - dst; + l = PAGE_SIZE - dst; src += l; dst = 0; } l = min(len, l); memmove(dst_ptr, src_ptr, l); - kunmap(*src_page); + kunmap_local(src_ptr); set_page_dirty(*dst_page); - kunmap(*dst_page); + kunmap_local(dst_ptr); if (!dst) dst_page++; else @@ -302,16 +351,16 @@ void hfs_bnode_dump(struct hfs_bnode *node) __be32 cnid; int i, off, key_off; - hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this); + hfs_dbg("node %d\n", node->this); hfs_bnode_read(node, &desc, 0, sizeof(desc)); - hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n", + hfs_dbg("next %d, prev %d, type %d, height %d, num_recs %d\n", be32_to_cpu(desc.next), be32_to_cpu(desc.prev), desc.type, desc.height, be16_to_cpu(desc.num_recs)); off = node->tree->node_size - 2; for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) { key_off = hfs_bnode_read_u16(node, off); - hfs_dbg(BNODE_MOD, " %d", key_off); + hfs_dbg(" key_off %d", key_off); if (i && node->type == HFS_NODE_INDEX) { int tmp; @@ -320,17 +369,17 @@ void hfs_bnode_dump(struct hfs_bnode *node) tmp = hfs_bnode_read_u16(node, key_off) + 2; else tmp = node->tree->max_key_len + 2; - hfs_dbg_cont(BNODE_MOD, " (%d", tmp); + hfs_dbg(" (%d", tmp); hfs_bnode_read(node, &cnid, key_off + tmp, 4); - hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid)); + hfs_dbg(", cnid %d)", be32_to_cpu(cnid)); } else if (i && node->type == HFS_NODE_LEAF) { int tmp; tmp = hfs_bnode_read_u16(node, key_off); - hfs_dbg_cont(BNODE_MOD, " (%d)", tmp); + hfs_dbg(" (%d)", tmp); } } - hfs_dbg_cont(BNODE_MOD, "\n"); + hfs_dbg("\n"); } void hfs_bnode_unlink(struct hfs_bnode *node) @@ -366,7 +415,7 @@ void hfs_bnode_unlink(struct hfs_bnode *node) /* move down? */ if (!node->prev && !node->next) - hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n"); + hfs_dbg("btree delete level\n"); if (!node->parent) { tree->root = 0; tree->depth = 0; @@ -386,9 +435,8 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid) struct hfs_bnode *node; if (cnid >= tree->node_count) { - pr_err("request for non-existent node " - "%d in B*Tree\n", - cnid); + pr_err("request for non-existent node %d in B*Tree\n", + cnid); return NULL; } @@ -401,7 +449,6 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid) static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) { - struct super_block *sb; struct hfs_bnode *node, *node2; struct address_space *mapping; struct page *page; @@ -409,13 +456,11 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) loff_t off; if (cnid >= tree->node_count) { - pr_err("request for non-existent node " - "%d in B*Tree\n", - cnid); + pr_err("request for non-existent node %d in B*Tree\n", + cnid); return NULL; } - sb = tree->inode->i_sb; size = sizeof(struct hfs_bnode) + tree->pages_per_bnode * sizeof(struct page *); node = kzalloc(size, GFP_KERNEL); @@ -425,7 +470,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) node->this = cnid; set_bit(HFS_BNODE_NEW, &node->flags); atomic_set(&node->refcnt, 1); - hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n", + hfs_dbg("cnid %d, node %d, refcnt 1\n", node->tree->cnid, node->this); init_waitqueue_head(&node->lock_wq); spin_lock(&tree->hash_lock); @@ -436,6 +481,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) tree->node_hash[hash] = node; tree->node_hash_cnt++; } else { + hfs_bnode_get(node2); spin_unlock(&tree->hash_lock); kfree(node); wait_event(node2->lock_wq, @@ -446,17 +492,12 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) mapping = tree->inode->i_mapping; off = (loff_t)cnid << tree->node_size_shift; - block = off >> PAGE_CACHE_SHIFT; - node->page_offset = off & ~PAGE_CACHE_MASK; + block = off >> PAGE_SHIFT; + node->page_offset = off & ~PAGE_MASK; for (i = 0; i < tree->pages_per_bnode; block++, i++) { page = read_mapping_page(mapping, block, NULL); if (IS_ERR(page)) goto fail; - if (PageError(page)) { - page_cache_release(page); - goto fail; - } - page_cache_release(page); node->page[i] = page; } @@ -470,7 +511,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node) { struct hfs_bnode **p; - hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n", + hfs_dbg("cnid %d, node %d, refcnt %d\n", node->tree->cnid, node->this, atomic_read(&node->refcnt)); for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; *p && *p != node; p = &(*p)->next_hash) @@ -508,14 +549,14 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) if (!test_bit(HFS_BNODE_NEW, &node->flags)) return node; - desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + - node->page_offset); + desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) + + node->page_offset); node->prev = be32_to_cpu(desc->prev); node->next = be32_to_cpu(desc->next); node->num_recs = be16_to_cpu(desc->num_recs); node->type = desc->type; node->height = desc->height; - kunmap(node->page[0]); + kunmap_local(desc); switch (node->type) { case HFS_NODE_HEADER: @@ -568,13 +609,11 @@ node_error: void hfs_bnode_free(struct hfs_bnode *node) { -#if 0 int i; for (i = 0; i < node->tree->pages_per_bnode; i++) if (node->page[i]) - page_cache_release(node->page[i]); -#endif + put_page(node->page[i]); kfree(node); } @@ -601,14 +640,12 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) } pagep = node->page; - memset(kmap(*pagep) + node->page_offset, 0, - min((int)PAGE_CACHE_SIZE, (int)tree->node_size)); + memzero_page(*pagep, node->page_offset, + min_t(int, PAGE_SIZE, tree->node_size)); set_page_dirty(*pagep); - kunmap(*pagep); for (i = 1; i < tree->pages_per_bnode; i++) { - memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); + memzero_page(*++pagep, 0, PAGE_SIZE); set_page_dirty(*pagep); - kunmap(*pagep); } clear_bit(HFS_BNODE_NEW, &node->flags); wake_up(&node->lock_wq); @@ -620,7 +657,7 @@ void hfs_bnode_get(struct hfs_bnode *node) { if (node) { atomic_inc(&node->refcnt); - hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n", + hfs_dbg("cnid %d, node %d, refcnt %d\n", node->tree->cnid, node->this, atomic_read(&node->refcnt)); } @@ -633,7 +670,7 @@ void hfs_bnode_put(struct hfs_bnode *node) struct hfs_btree *tree = node->tree; int i; - hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n", + hfs_dbg("cnid %d, node %d, refcnt %d\n", node->tree->cnid, node->this, atomic_read(&node->refcnt)); BUG_ON(!atomic_read(&node->refcnt)); @@ -648,8 +685,8 @@ void hfs_bnode_put(struct hfs_bnode *node) if (test_bit(HFS_BNODE_DELETED, &node->flags)) { hfs_bnode_unhash(node); spin_unlock(&tree->hash_lock); - hfs_bnode_clear(node, 0, - PAGE_CACHE_SIZE * tree->pages_per_bnode); + if (hfs_bnode_need_zeroout(tree)) + hfs_bnode_clear(node, 0, tree->node_size); hfs_bmap_free(node); hfs_bnode_free(node); return; @@ -658,3 +695,15 @@ void hfs_bnode_put(struct hfs_bnode *node) } } +/* + * Unused nodes have to be zeroed if this is the catalog tree and + * a corresponding flag in the volume header is set. + */ +bool hfs_bnode_need_zeroout(struct hfs_btree *tree) +{ + struct super_block *sb = tree->inode->i_sb; + struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); + const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes); + + return volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX; +} |
