diff options
Diffstat (limited to 'fs/btrfs/lzo.c')
| -rw-r--r-- | fs/btrfs/lzo.c | 147 |
1 files changed, 86 insertions, 61 deletions
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 1c396ac167aa..4758f66da449 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -58,9 +58,6 @@ * 0x1000 | SegHdr N+1| Data payload N+1 ... | */ -#define WORKSPACE_BUF_LENGTH (lzo1x_worst_compress(PAGE_SIZE)) -#define WORKSPACE_CBUF_LENGTH (lzo1x_worst_compress(PAGE_SIZE)) - struct workspace { void *mem; void *buf; /* where decompressed data goes */ @@ -68,7 +65,14 @@ struct workspace { struct list_head list; }; -static struct workspace_manager wsm; +static u32 workspace_buf_length(const struct btrfs_fs_info *fs_info) +{ + return lzo1x_worst_compress(fs_info->sectorsize); +} +static u32 workspace_cbuf_length(const struct btrfs_fs_info *fs_info) +{ + return lzo1x_worst_compress(fs_info->sectorsize); +} void lzo_free_workspace(struct list_head *ws) { @@ -80,7 +84,7 @@ void lzo_free_workspace(struct list_head *ws) kfree(workspace); } -struct list_head *lzo_alloc_workspace(unsigned int level) +struct list_head *lzo_alloc_workspace(struct btrfs_fs_info *fs_info) { struct workspace *workspace; @@ -89,8 +93,8 @@ struct list_head *lzo_alloc_workspace(unsigned int level) return ERR_PTR(-ENOMEM); workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN); - workspace->buf = kvmalloc(WORKSPACE_BUF_LENGTH, GFP_KERNEL | __GFP_NOWARN); - workspace->cbuf = kvmalloc(WORKSPACE_CBUF_LENGTH, GFP_KERNEL | __GFP_NOWARN); + workspace->buf = kvmalloc(workspace_buf_length(fs_info), GFP_KERNEL | __GFP_NOWARN); + workspace->cbuf = kvmalloc(workspace_cbuf_length(fs_info), GFP_KERNEL | __GFP_NOWARN); if (!workspace->mem || !workspace->buf || !workspace->cbuf) goto fail; @@ -128,19 +132,21 @@ static inline size_t read_compress_length(const char *buf) * * Will allocate new pages when needed. */ -static int copy_compressed_data_to_page(char *compressed_data, +static int copy_compressed_data_to_page(struct btrfs_fs_info *fs_info, + char *compressed_data, size_t compressed_size, struct folio **out_folios, unsigned long max_nr_folio, - u32 *cur_out, - const u32 sectorsize) + u32 *cur_out) { + const u32 sectorsize = fs_info->sectorsize; + const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order; u32 sector_bytes_left; u32 orig_out; struct folio *cur_folio; char *kaddr; - if ((*cur_out / PAGE_SIZE) >= max_nr_folio) + if ((*cur_out >> min_folio_shift) >= max_nr_folio) return -E2BIG; /* @@ -149,18 +155,17 @@ static int copy_compressed_data_to_page(char *compressed_data, */ ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize); - cur_folio = out_folios[*cur_out / PAGE_SIZE]; + cur_folio = out_folios[*cur_out >> min_folio_shift]; /* Allocate a new page */ if (!cur_folio) { - cur_folio = btrfs_alloc_compr_folio(); + cur_folio = btrfs_alloc_compr_folio(fs_info); if (!cur_folio) return -ENOMEM; - out_folios[*cur_out / PAGE_SIZE] = cur_folio; + out_folios[*cur_out >> min_folio_shift] = cur_folio; } - kaddr = kmap_local_folio(cur_folio, 0); - write_compress_length(kaddr + offset_in_page(*cur_out), - compressed_size); + kaddr = kmap_local_folio(cur_folio, offset_in_folio(cur_folio, *cur_out)); + write_compress_length(kaddr, compressed_size); *cur_out += LZO_LEN; orig_out = *cur_out; @@ -172,20 +177,20 @@ static int copy_compressed_data_to_page(char *compressed_data, kunmap_local(kaddr); - if ((*cur_out / PAGE_SIZE) >= max_nr_folio) + if ((*cur_out >> min_folio_shift) >= max_nr_folio) return -E2BIG; - cur_folio = out_folios[*cur_out / PAGE_SIZE]; + cur_folio = out_folios[*cur_out >> min_folio_shift]; /* Allocate a new page */ if (!cur_folio) { - cur_folio = btrfs_alloc_compr_folio(); + cur_folio = btrfs_alloc_compr_folio(fs_info); if (!cur_folio) return -ENOMEM; - out_folios[*cur_out / PAGE_SIZE] = cur_folio; + out_folios[*cur_out >> min_folio_shift] = cur_folio; } kaddr = kmap_local_folio(cur_folio, 0); - memcpy(kaddr + offset_in_page(*cur_out), + memcpy(kaddr + offset_in_folio(cur_folio, *cur_out), compressed_data + *cur_out - orig_out, copy_len); *cur_out += copy_len; @@ -209,12 +214,15 @@ out: return 0; } -int lzo_compress_folios(struct list_head *ws, struct address_space *mapping, +int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode, u64 start, struct folio **folios, unsigned long *out_folios, unsigned long *total_in, unsigned long *total_out) { + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct workspace *workspace = list_entry(ws, struct workspace, list); - const u32 sectorsize = inode_to_fs_info(mapping->host)->sectorsize; + const u32 sectorsize = fs_info->sectorsize; + const u32 min_folio_size = btrfs_min_folio_size(fs_info); + struct address_space *mapping = inode->vfs_inode.i_mapping; struct folio *folio_in = NULL; char *sizes_ptr; const unsigned long max_nr_folio = *out_folios; @@ -252,21 +260,20 @@ int lzo_compress_folios(struct list_head *ws, struct address_space *mapping, /* Compress at most one sector of data each time */ in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off); ASSERT(in_len); - data_in = kmap_local_folio(folio_in, 0); - ret = lzo1x_1_compress(data_in + - offset_in_page(cur_in), in_len, + data_in = kmap_local_folio(folio_in, offset_in_folio(folio_in, cur_in)); + ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf, &out_len, workspace->mem); kunmap_local(data_in); - if (ret < 0) { - pr_debug("BTRFS: lzo in loop returned %d\n", ret); + if (unlikely(ret < 0)) { + /* lzo1x_1_compress never fails. */ ret = -EIO; goto out; } - ret = copy_compressed_data_to_page(workspace->cbuf, out_len, + ret = copy_compressed_data_to_page(fs_info, workspace->cbuf, out_len, folios, max_nr_folio, - &cur_out, sectorsize); + &cur_out); if (ret < 0) goto out; @@ -281,8 +288,8 @@ int lzo_compress_folios(struct list_head *ws, struct address_space *mapping, goto out; } - /* Check if we have reached page boundary */ - if (PAGE_ALIGNED(cur_in)) { + /* Check if we have reached folio boundary. */ + if (IS_ALIGNED(cur_in, min_folio_size)) { folio_put(folio_in); folio_in = NULL; } @@ -299,7 +306,7 @@ int lzo_compress_folios(struct list_head *ws, struct address_space *mapping, out: if (folio_in) folio_put(folio_in); - *out_folios = DIV_ROUND_UP(cur_out, PAGE_SIZE); + *out_folios = DIV_ROUND_UP(cur_out, min_folio_size); return ret; } @@ -311,15 +318,16 @@ out: static void copy_compressed_segment(struct compressed_bio *cb, char *dest, u32 len, u32 *cur_in) { + struct btrfs_fs_info *fs_info = cb_to_fs_info(cb); + const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order; u32 orig_in = *cur_in; while (*cur_in < orig_in + len) { - struct folio *cur_folio; - u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in), - orig_in + len - *cur_in); + struct folio *cur_folio = cb->compressed_folios[*cur_in >> min_folio_shift]; + u32 copy_len = min_t(u32, orig_in + len - *cur_in, + folio_size(cur_folio) - offset_in_folio(cur_folio, *cur_in)); ASSERT(copy_len); - cur_folio = cb->compressed_folios[*cur_in / PAGE_SIZE]; memcpy_from_folio(dest + *cur_in - orig_in, cur_folio, offset_in_folio(cur_folio, *cur_in), copy_len); @@ -333,6 +341,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) struct workspace *workspace = list_entry(ws, struct workspace, list); const struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info; const u32 sectorsize = fs_info->sectorsize; + const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order; char *kaddr; int ret; /* Compressed data length, can be unaligned */ @@ -354,11 +363,14 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) * and all sectors should be used. * If this happens, it means the compressed extent is corrupted. */ - if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) || - round_up(len_in, sectorsize) < cb->compressed_len) { + if (unlikely(len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) || + round_up(len_in, sectorsize) < cb->compressed_len)) { + struct btrfs_inode *inode = cb->bbio.inode; + btrfs_err(fs_info, - "invalid lzo header, lzo len %u compressed len %u", - len_in, cb->compressed_len); +"lzo header invalid, root %llu inode %llu offset %llu lzo len %u compressed len %u", + btrfs_root_id(inode->root), btrfs_ino(inode), + cb->start, len_in, cb->compressed_len); return -EUCLEAN; } @@ -376,20 +388,24 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) */ ASSERT(cur_in / sectorsize == (cur_in + LZO_LEN - 1) / sectorsize); - cur_folio = cb->compressed_folios[cur_in / PAGE_SIZE]; + cur_folio = cb->compressed_folios[cur_in >> min_folio_shift]; ASSERT(cur_folio); kaddr = kmap_local_folio(cur_folio, 0); - seg_len = read_compress_length(kaddr + offset_in_page(cur_in)); + seg_len = read_compress_length(kaddr + offset_in_folio(cur_folio, cur_in)); kunmap_local(kaddr); cur_in += LZO_LEN; - if (seg_len > WORKSPACE_CBUF_LENGTH) { + if (unlikely(seg_len > workspace_cbuf_length(fs_info))) { + struct btrfs_inode *inode = cb->bbio.inode; + /* * seg_len shouldn't be larger than we have allocated * for workspace->cbuf */ - btrfs_err(fs_info, "unexpectedly large lzo segment len %u", - seg_len); + btrfs_err(fs_info, + "lzo segment too big, root %llu inode %llu offset %llu len %u", + btrfs_root_id(inode->root), btrfs_ino(inode), + cb->start, seg_len); return -EIO; } @@ -399,8 +415,13 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) /* Decompress the data */ ret = lzo1x_decompress_safe(workspace->cbuf, seg_len, workspace->buf, &out_len); - if (ret != LZO_E_OK) { - btrfs_err(fs_info, "failed to decompress"); + if (unlikely(ret != LZO_E_OK)) { + struct btrfs_inode *inode = cb->bbio.inode; + + btrfs_err(fs_info, + "lzo decompression failed, error %d root %llu inode %llu offset %llu", + ret, btrfs_root_id(inode->root), btrfs_ino(inode), + cb->start); return -EIO; } @@ -426,27 +447,27 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) } int lzo_decompress(struct list_head *ws, const u8 *data_in, - struct page *dest_page, unsigned long dest_pgoff, size_t srclen, + struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen, size_t destlen) { struct workspace *workspace = list_entry(ws, struct workspace, list); - struct btrfs_fs_info *fs_info = page_to_fs_info(dest_page); + struct btrfs_fs_info *fs_info = folio_to_fs_info(dest_folio); const u32 sectorsize = fs_info->sectorsize; size_t in_len; size_t out_len; - size_t max_segment_len = WORKSPACE_BUF_LENGTH; + size_t max_segment_len = workspace_buf_length(fs_info); int ret = 0; - if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2) + if (unlikely(srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)) return -EUCLEAN; in_len = read_compress_length(data_in); - if (in_len != srclen) + if (unlikely(in_len != srclen)) return -EUCLEAN; data_in += LZO_LEN; in_len = read_compress_length(data_in); - if (in_len != srclen - LZO_LEN * 2) { + if (unlikely(in_len != srclen - LZO_LEN * 2)) { ret = -EUCLEAN; goto out; } @@ -454,25 +475,29 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in, out_len = sectorsize; ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len); - if (ret != LZO_E_OK) { - pr_warn("BTRFS: decompress failed!\n"); + if (unlikely(ret != LZO_E_OK)) { + struct btrfs_inode *inode = folio_to_inode(dest_folio); + + btrfs_err(fs_info, + "lzo decompression failed, error %d root %llu inode %llu offset %llu", + ret, btrfs_root_id(inode->root), btrfs_ino(inode), + folio_pos(dest_folio)); ret = -EIO; goto out; } ASSERT(out_len <= sectorsize); - memcpy_to_page(dest_page, dest_pgoff, workspace->buf, out_len); + memcpy_to_folio(dest_folio, dest_pgoff, workspace->buf, out_len); /* Early end, considered as an error. */ if (unlikely(out_len < destlen)) { ret = -EIO; - memzero_page(dest_page, dest_pgoff + out_len, destlen - out_len); + folio_zero_range(dest_folio, dest_pgoff + out_len, destlen - out_len); } out: return ret; } -const struct btrfs_compress_op btrfs_lzo_compress = { - .workspace_manager = &wsm, +const struct btrfs_compress_levels btrfs_lzo_compress = { .max_level = 1, .default_level = 1, }; |
