summaryrefslogtreecommitdiff
path: root/drivers/crypto/img-hash.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/img-hash.c')
-rw-r--r--drivers/crypto/img-hash.c114
1 files changed, 50 insertions, 64 deletions
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index aa4c7b2af3e2..f22c12e36b56 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -13,7 +13,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
@@ -157,9 +157,9 @@ static inline void img_hash_write(struct img_hash_dev *hdev,
writel_relaxed(value, hdev->io_base + offset);
}
-static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
+static inline __be32 img_hash_read_result_queue(struct img_hash_dev *hdev)
{
- return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
+ return cpu_to_be32(img_hash_read(hdev, CR_RESULT_QUEUE));
}
static void img_hash_start(struct img_hash_dev *hdev, bool dma)
@@ -209,7 +209,7 @@ static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
static void img_hash_dma_callback(void *data)
{
- struct img_hash_dev *hdev = (struct img_hash_dev *)data;
+ struct img_hash_dev *hdev = data;
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
if (ctx->bufcnt) {
@@ -283,10 +283,10 @@ static int img_hash_finish(struct ahash_request *req)
static void img_hash_copy_hash(struct ahash_request *req)
{
struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
- u32 *hash = (u32 *)ctx->digest;
+ __be32 *hash = (__be32 *)ctx->digest;
int i;
- for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
+ for (i = (ctx->digsize / sizeof(*hash)) - 1; i >= 0; i--)
hash[i] = img_hash_read_result_queue(ctx->hdev);
}
@@ -308,7 +308,7 @@ static void img_hash_finish_req(struct ahash_request *req, int err)
DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
if (req->base.complete)
- req->base.complete(&req->base, err);
+ ahash_request_complete(req, err);
}
static int img_hash_write_via_dma(struct img_hash_dev *hdev)
@@ -358,12 +358,16 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
static void img_hash_dma_task(unsigned long d)
{
struct img_hash_dev *hdev = (struct img_hash_dev *)d;
- struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+ struct img_hash_request_ctx *ctx;
u8 *addr;
size_t nbytes, bleft, wsend, len, tbc;
struct scatterlist tsg;
- if (!hdev->req || !ctx->sg)
+ if (!hdev->req)
+ return;
+
+ ctx = ahash_request_ctx(hdev->req);
+ if (!ctx->sg)
return;
addr = sg_virt(ctx->sg);
@@ -432,7 +436,7 @@ static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
if (ctx->flags & DRIVER_FLAGS_SG)
- dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
+ dma_unmap_sg(hdev->dev, ctx->sg, 1, DMA_TO_DEVICE);
return 0;
}
@@ -487,8 +491,9 @@ static int img_hash_init(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -522,7 +527,7 @@ static int img_hash_handle_queue(struct img_hash_dev *hdev,
return res;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
req = ahash_request_cast(async_req);
hdev->req = req;
@@ -551,10 +556,10 @@ static int img_hash_update(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -566,9 +571,10 @@ static int img_hash_final(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -580,11 +586,12 @@ static int img_hash_finup(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
+
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -596,8 +603,9 @@ static int img_hash_import(struct ahash_request *req, const void *in)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -609,8 +617,9 @@ static int img_hash_export(struct ahash_request *req, void *out)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
@@ -674,14 +683,12 @@ static int img_hash_digest(struct ahash_request *req)
static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
{
struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- int err = -ENOMEM;
ctx->fallback = crypto_alloc_ahash(alg_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback)) {
pr_err("img_hash: Could not load fallback driver.\n");
- err = PTR_ERR(ctx->fallback);
- goto err;
+ return PTR_ERR(ctx->fallback);
}
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct img_hash_request_ctx) +
@@ -689,29 +696,26 @@ static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
IMG_HASH_DMA_THRESHOLD);
return 0;
-
-err:
- return err;
}
static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "md5-generic");
+ return img_hash_cra_init(tfm, "md5-lib");
}
static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha1-generic");
+ return img_hash_cra_init(tfm, "sha1-lib");
}
static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha224-generic");
+ return img_hash_cra_init(tfm, "sha224-lib");
}
static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha256-generic");
+ return img_hash_cra_init(tfm, "sha256-lib");
}
static void img_hash_cra_exit(struct crypto_tfm *tfm)
@@ -928,7 +932,7 @@ finish:
img_hash_finish_req(hdev->req, err);
}
-static const struct of_device_id img_hash_match[] = {
+static const struct of_device_id img_hash_match[] __maybe_unused = {
{ .compatible = "img,hash-accelerator" },
{}
};
@@ -967,8 +971,7 @@ static int img_hash_probe(struct platform_device *pdev)
}
/* Write port (DMA or CPU) */
- hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
+ hdev->cpu_addr = devm_platform_get_and_ioremap_resource(pdev, 1, &hash_res);
if (IS_ERR(hdev->cpu_addr)) {
err = PTR_ERR(hdev->cpu_addr);
goto res_err;
@@ -989,31 +992,23 @@ static int img_hash_probe(struct platform_device *pdev)
}
dev_dbg(dev, "using IRQ channel %d\n", irq);
- hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
+ hdev->hash_clk = devm_clk_get_enabled(&pdev->dev, "hash");
if (IS_ERR(hdev->hash_clk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(hdev->hash_clk);
goto res_err;
}
- hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
+ hdev->sys_clk = devm_clk_get_enabled(&pdev->dev, "sys");
if (IS_ERR(hdev->sys_clk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(hdev->sys_clk);
goto res_err;
}
- err = clk_prepare_enable(hdev->hash_clk);
- if (err)
- goto res_err;
-
- err = clk_prepare_enable(hdev->sys_clk);
- if (err)
- goto clk_err;
-
err = img_hash_dma_init(hdev);
if (err)
- goto dma_err;
+ goto res_err;
dev_dbg(dev, "using %s for DMA transfers\n",
dma_chan_name(hdev->dma_lch));
@@ -1034,10 +1029,6 @@ err_algs:
list_del(&hdev->list);
spin_unlock(&img_hash.lock);
dma_release_channel(hdev->dma_lch);
-dma_err:
- clk_disable_unprepare(hdev->sys_clk);
-clk_err:
- clk_disable_unprepare(hdev->hash_clk);
res_err:
tasklet_kill(&hdev->done_task);
tasklet_kill(&hdev->dma_task);
@@ -1045,7 +1036,7 @@ res_err:
return err;
}
-static int img_hash_remove(struct platform_device *pdev)
+static void img_hash_remove(struct platform_device *pdev)
{
struct img_hash_dev *hdev;
@@ -1060,11 +1051,6 @@ static int img_hash_remove(struct platform_device *pdev)
tasklet_kill(&hdev->dma_task);
dma_release_channel(hdev->dma_lch);
-
- clk_disable_unprepare(hdev->hash_clk);
- clk_disable_unprepare(hdev->sys_clk);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1107,7 +1093,7 @@ static struct platform_driver img_hash_driver = {
.driver = {
.name = "img-hash-accelerator",
.pm = &img_hash_pm_ops,
- .of_match_table = of_match_ptr(img_hash_match),
+ .of_match_table = img_hash_match,
}
};
module_platform_driver(img_hash_driver);