summaryrefslogtreecommitdiff
path: root/drivers/block/zram/zcomp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/zram/zcomp.c')
-rw-r--r--drivers/block/zram/zcomp.c257
1 files changed, 257 insertions, 0 deletions
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
new file mode 100644
index 000000000000..b1bd1daa0060
--- /dev/null
+++ b/drivers/block/zram/zcomp.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/cpuhotplug.h>
+#include <linux/vmalloc.h>
+#include <linux/sysfs.h>
+
+#include "zcomp.h"
+
+#include "backend_lzo.h"
+#include "backend_lzorle.h"
+#include "backend_lz4.h"
+#include "backend_lz4hc.h"
+#include "backend_zstd.h"
+#include "backend_deflate.h"
+#include "backend_842.h"
+
+static const struct zcomp_ops *backends[] = {
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZO)
+ &backend_lzorle,
+ &backend_lzo,
+#endif
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4)
+ &backend_lz4,
+#endif
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4HC)
+ &backend_lz4hc,
+#endif
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_ZSTD)
+ &backend_zstd,
+#endif
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_DEFLATE)
+ &backend_deflate,
+#endif
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_842)
+ &backend_842,
+#endif
+ NULL
+};
+
+static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
+{
+ comp->ops->destroy_ctx(&zstrm->ctx);
+ vfree(zstrm->local_copy);
+ vfree(zstrm->buffer);
+ zstrm->buffer = NULL;
+}
+
+static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
+{
+ int ret;
+
+ ret = comp->ops->create_ctx(comp->params, &zstrm->ctx);
+ if (ret)
+ return ret;
+
+ zstrm->local_copy = vzalloc(PAGE_SIZE);
+ /*
+ * allocate 2 pages. 1 for compressed data, plus 1 extra for the
+ * case when compressed size is larger than the original one
+ */
+ zstrm->buffer = vzalloc(2 * PAGE_SIZE);
+ if (!zstrm->buffer || !zstrm->local_copy) {
+ zcomp_strm_free(comp, zstrm);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static const struct zcomp_ops *lookup_backend_ops(const char *comp)
+{
+ int i = 0;
+
+ while (backends[i]) {
+ if (sysfs_streq(comp, backends[i]->name))
+ break;
+ i++;
+ }
+ return backends[i];
+}
+
+bool zcomp_available_algorithm(const char *comp)
+{
+ return lookup_backend_ops(comp) != NULL;
+}
+
+/* show available compressors */
+ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
+ if (!strcmp(comp, backends[i]->name)) {
+ at += sysfs_emit_at(buf, at, "[%s] ",
+ backends[i]->name);
+ } else {
+ at += sysfs_emit_at(buf, at, "%s ", backends[i]->name);
+ }
+ }
+
+ at += sysfs_emit_at(buf, at, "\n");
+ return at;
+}
+
+struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
+{
+ for (;;) {
+ struct zcomp_strm *zstrm = raw_cpu_ptr(comp->stream);
+
+ /*
+ * Inspired by zswap
+ *
+ * stream is returned with ->mutex locked which prevents
+ * cpu_dead() from releasing this stream under us, however
+ * there is still a race window between raw_cpu_ptr() and
+ * mutex_lock(), during which we could have been migrated
+ * from a CPU that has already destroyed its stream. If
+ * so then unlock and re-try on the current CPU.
+ */
+ mutex_lock(&zstrm->lock);
+ if (likely(zstrm->buffer))
+ return zstrm;
+ mutex_unlock(&zstrm->lock);
+ }
+}
+
+void zcomp_stream_put(struct zcomp_strm *zstrm)
+{
+ mutex_unlock(&zstrm->lock);
+}
+
+int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const void *src, unsigned int *dst_len)
+{
+ struct zcomp_req req = {
+ .src = src,
+ .dst = zstrm->buffer,
+ .src_len = PAGE_SIZE,
+ .dst_len = 2 * PAGE_SIZE,
+ };
+ int ret;
+
+ might_sleep();
+ ret = comp->ops->compress(comp->params, &zstrm->ctx, &req);
+ if (!ret)
+ *dst_len = req.dst_len;
+ return ret;
+}
+
+int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const void *src, unsigned int src_len, void *dst)
+{
+ struct zcomp_req req = {
+ .src = src,
+ .dst = dst,
+ .src_len = src_len,
+ .dst_len = PAGE_SIZE,
+ };
+
+ might_sleep();
+ return comp->ops->decompress(comp->params, &zstrm->ctx, &req);
+}
+
+int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
+{
+ struct zcomp *comp = hlist_entry(node, struct zcomp, node);
+ struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
+ int ret;
+
+ ret = zcomp_strm_init(comp, zstrm);
+ if (ret)
+ pr_err("Can't allocate a compression stream\n");
+ return ret;
+}
+
+int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct zcomp *comp = hlist_entry(node, struct zcomp, node);
+ struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
+
+ mutex_lock(&zstrm->lock);
+ zcomp_strm_free(comp, zstrm);
+ mutex_unlock(&zstrm->lock);
+ return 0;
+}
+
+static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
+{
+ int ret, cpu;
+
+ comp->stream = alloc_percpu(struct zcomp_strm);
+ if (!comp->stream)
+ return -ENOMEM;
+
+ comp->params = params;
+ ret = comp->ops->setup_params(comp->params);
+ if (ret)
+ goto cleanup;
+
+ for_each_possible_cpu(cpu)
+ mutex_init(&per_cpu_ptr(comp->stream, cpu)->lock);
+
+ ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
+ if (ret < 0)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ comp->ops->release_params(comp->params);
+ free_percpu(comp->stream);
+ return ret;
+}
+
+void zcomp_destroy(struct zcomp *comp)
+{
+ cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
+ comp->ops->release_params(comp->params);
+ free_percpu(comp->stream);
+ kfree(comp);
+}
+
+struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params)
+{
+ struct zcomp *comp;
+ int error;
+
+ /*
+ * The backends array has a sentinel NULL value, so the minimum
+ * size is 1. In order to be valid the array, apart from the
+ * sentinel NULL element, should have at least one compression
+ * backend selected.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(backends) <= 1);
+
+ comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
+ if (!comp)
+ return ERR_PTR(-ENOMEM);
+
+ comp->ops = lookup_backend_ops(alg);
+ if (!comp->ops) {
+ kfree(comp);
+ return ERR_PTR(-EINVAL);
+ }
+
+ error = zcomp_init(comp, params);
+ if (error) {
+ kfree(comp);
+ return ERR_PTR(error);
+ }
+ return comp;
+}