summaryrefslogtreecommitdiff
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/compression.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 7034cf2749e6..3b069f02903b 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -775,6 +775,7 @@ const struct btrfs_compress_op btrfs_heuristic_compress = {
};
struct workspace_manager {
+ const struct btrfs_compress_op *ops;
struct list_head idle_ws;
spinlock_t ws_lock;
/* Number of free workspaces */
@@ -801,6 +802,8 @@ void __init btrfs_init_compress(void)
int i;
for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) {
+ wsm[i].ops = btrfs_compress_op[i];
+
INIT_LIST_HEAD(&wsm[i].idle_ws);
spin_lock_init(&wsm[i].ws_lock);
atomic_set(&wsm[i].total_ws, 0);
@@ -810,7 +813,7 @@ void __init btrfs_init_compress(void)
* Preallocate one workspace for each compression type so
* we can guarantee forward progress in the worst case
*/
- workspace = btrfs_compress_op[i]->alloc_workspace();
+ workspace = wsm[i].ops->alloc_workspace();
if (IS_ERR(workspace)) {
pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
} else {
@@ -873,7 +876,7 @@ again:
* context of btrfs_compress_bio/btrfs_compress_pages
*/
nofs_flag = memalloc_nofs_save();
- workspace = btrfs_compress_op[type]->alloc_workspace();
+ workspace = wsm[type].ops->alloc_workspace();
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(workspace)) {
@@ -931,7 +934,7 @@ static void free_workspace(int type, struct list_head *workspace)
}
spin_unlock(ws_lock);
- btrfs_compress_op[type]->free_workspace(workspace);
+ wsm[type].ops->free_workspace(workspace);
atomic_dec(total_ws);
wake:
cond_wake_up(ws_wait);
@@ -949,7 +952,7 @@ static void free_workspaces(void)
while (!list_empty(&wsm[i].idle_ws)) {
workspace = wsm[i].idle_ws.next;
list_del(workspace);
- btrfs_compress_op[i]->free_workspace(workspace);
+ wsm[i].ops->free_workspace(workspace);
atomic_dec(&wsm[i].total_ws);
}
}