summaryrefslogtreecommitdiff
path: root/fs/btrfs/compression.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-09 13:27:51 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-09 13:27:51 -0700
commit66ba772ee3119849fcdd8ac9766c6c25ede4a982 (patch)
tree5d7b174b1a34f331b96ff1b55e6a78e4bf74c883 /fs/btrfs/compression.c
parent126e76ffbf78d9e948b641aadb265d16c57f5a3d (diff)
parentdb95c876c568cef951fbbd4c0118cb5386e4bb99 (diff)
Merge branch 'for-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs updates from David Sterba: "The changes range through all types: cleanups, core chagnes, sanity checks, fixes, other user visible changes, detailed list below: - deprecated: user transaction ioctl - mount option ssd does not change allocation alignments - degraded read-write mount is allowed if all the raid profile constraints are met, now based on more accurate check - defrag: do not reset compression afterwards; the NOCOMPRESS flag can be now overriden by defrag - prep work for better extent reference tracking (related to the qgroup slowness with balance) - prep work for compression heuristics - memory allocation reductions (may help latencies on a loaded system) - better accounting for io waiting states - error handling improvements (removed BUGs) - added more sanity checks for shared refs - fix readdir vs pagefault deadlock under some circumstances - fix for 'no-hole' mode, certain combination of compressed and inline extents - send: fix emission of invalid clone operations - fixup file mode if setting acls fail - more fixes from fuzzing - oher cleanups" * 'for-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (104 commits) btrfs: submit superblock io with REQ_META and REQ_PRIO btrfs: remove unnecessary memory barrier in btrfs_direct_IO btrfs: remove superfluous chunk_tree argument from btrfs_alloc_dev_extent btrfs: Remove chunk_objectid parameter of btrfs_alloc_dev_extent btrfs: pass fs_info to btrfs_del_root instead of tree_root Btrfs: add one more sanity check for shared ref type Btrfs: remove BUG_ON in __add_tree_block Btrfs: remove BUG() in add_data_reference Btrfs: remove BUG() in print_extent_item Btrfs: remove BUG() in btrfs_extent_inline_ref_size Btrfs: convert to use btrfs_get_extent_inline_ref_type Btrfs: add a helper to retrive extent inline ref type btrfs: scrub: simplify scrub worker initialization btrfs: scrub: clean up division in scrub_find_csum btrfs: scrub: clean up division in __scrub_mark_bitmap btrfs: scrub: use bool for flush_all_writes btrfs: preserve i_mode if __btrfs_set_acl() fails btrfs: Remove extraneous chunk_objectid variable btrfs: Remove chunk_objectid argument from btrfs_make_block_group btrfs: Remove extra parentheses from condition in copy_items() ...
Diffstat (limited to 'fs/btrfs/compression.c')
-rw-r--r--fs/btrfs/compression.c35
1 files changed, 34 insertions, 1 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d2ef9ac2a630..883ecc58fd0d 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -825,7 +825,7 @@ static void free_workspace(int type, struct list_head *workspace)
int *free_ws = &btrfs_comp_ws[idx].free_ws;
spin_lock(ws_lock);
- if (*free_ws < num_online_cpus()) {
+ if (*free_ws <= num_online_cpus()) {
list_add(workspace, idle_ws);
(*free_ws)++;
spin_unlock(ws_lock);
@@ -1047,3 +1047,36 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
return 1;
}
+
+/*
+ * Compression heuristic.
+ *
+ * For now is's a naive and optimistic 'return true', we'll extend the logic to
+ * quickly (compared to direct compression) detect data characteristics
+ * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
+ * data.
+ *
+ * The following types of analysis can be performed:
+ * - detect mostly zero data
+ * - detect data with low "byte set" size (text, etc)
+ * - detect data with low/high "core byte" set
+ *
+ * Return non-zero if the compression should be done, 0 otherwise.
+ */
+int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
+{
+ u64 index = start >> PAGE_SHIFT;
+ u64 end_index = end >> PAGE_SHIFT;
+ struct page *page;
+ int ret = 1;
+
+ while (index <= end_index) {
+ page = find_get_page(inode->i_mapping, index);
+ kmap(page);
+ kunmap(page);
+ put_page(page);
+ index++;
+ }
+
+ return ret;
+}