summaryrefslogtreecommitdiff
path: root/fs/btrfs
diff options
context:
space:
mode:
authorDennis Zhou <dennis@kernel.org>2019-02-04 15:20:01 -0500
committerDavid Sterba <dsterba@suse.com>2019-02-25 14:13:31 +0100
commit1666edabc8b4c2c521021b2ec2fcf2010a592b48 (patch)
tree01d4b3c0a87124c998dc38ebc77110f4dd5b020f /fs/btrfs
parent10b94a51cafb28d47ee6912248ed698c9ac183be (diff)
btrfs: add helper methods for workspace manager init and cleanup
Workspace manager init and cleanup code is open coded inside a for loop over the compression types. This forces each compression type to rely on the same workspace manager implementation. This patch creates helper methods that will be the generic implementation for btrfs workspace management. Reviewed-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Dennis Zhou <dennis@kernel.org> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/compression.c82
1 files changed, 43 insertions, 39 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 3b069f02903b..c24bf6911fea 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -796,31 +796,42 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
&btrfs_zstd_compress,
};
-void __init btrfs_init_compress(void)
+static void btrfs_init_workspace_manager(int type)
{
+ struct workspace_manager *wsman = &wsm[type];
struct list_head *workspace;
- int i;
- for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) {
- wsm[i].ops = btrfs_compress_op[i];
+ wsman->ops = btrfs_compress_op[type];
- INIT_LIST_HEAD(&wsm[i].idle_ws);
- spin_lock_init(&wsm[i].ws_lock);
- atomic_set(&wsm[i].total_ws, 0);
- init_waitqueue_head(&wsm[i].ws_wait);
+ INIT_LIST_HEAD(&wsman->idle_ws);
+ spin_lock_init(&wsman->ws_lock);
+ atomic_set(&wsman->total_ws, 0);
+ init_waitqueue_head(&wsman->ws_wait);
- /*
- * Preallocate one workspace for each compression type so
- * we can guarantee forward progress in the worst case
- */
- workspace = wsm[i].ops->alloc_workspace();
- if (IS_ERR(workspace)) {
- pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
- } else {
- atomic_set(&wsm[i].total_ws, 1);
- wsm[i].free_ws = 1;
- list_add(workspace, &wsm[i].idle_ws);
- }
+ /*
+ * Preallocate one workspace for each compression type so we can
+ * guarantee forward progress in the worst case
+ */
+ workspace = wsman->ops->alloc_workspace();
+ if (IS_ERR(workspace)) {
+ pr_warn(
+ "BTRFS: cannot preallocate compression workspace, will try later\n");
+ } else {
+ atomic_set(&wsman->total_ws, 1);
+ wsman->free_ws = 1;
+ list_add(workspace, &wsman->idle_ws);
+ }
+}
+
+static void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman)
+{
+ struct list_head *ws;
+
+ while (!list_empty(&wsman->idle_ws)) {
+ ws = wsman->idle_ws.next;
+ list_del(ws);
+ wsman->ops->free_workspace(ws);
+ atomic_dec(&wsman->total_ws);
}
}
@@ -941,24 +952,6 @@ wake:
}
/*
- * cleanup function for module exit
- */
-static void free_workspaces(void)
-{
- struct list_head *workspace;
- int i;
-
- for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) {
- while (!list_empty(&wsm[i].idle_ws)) {
- workspace = wsm[i].idle_ws.next;
- list_del(workspace);
- wsm[i].ops->free_workspace(workspace);
- atomic_dec(&wsm[i].total_ws);
- }
- }
-}
-
-/*
* Given an address space and start and length, compress the bytes into @pages
* that are allocated on demand.
*
@@ -1050,9 +1043,20 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
return ret;
}
+void __init btrfs_init_compress(void)
+{
+ int i;
+
+ for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
+ btrfs_init_workspace_manager(i);
+}
+
void __cold btrfs_exit_compress(void)
{
- free_workspaces();
+ int i;
+
+ for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
+ btrfs_cleanup_workspace_manager(&wsm[i]);
}
/*