summaryrefslogtreecommitdiff
path: root/arch/s390/crypto/paes_s390.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/crypto/paes_s390.c')
-rw-r--r--arch/s390/crypto/paes_s390.c423
1 files changed, 296 insertions, 127 deletions
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 55ee5567a5ea..511093713a6f 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -34,14 +34,22 @@
* is called. As paes can handle different kinds of key blobs
* and padding is also possible, the limits need to be generous.
*/
-#define PAES_MIN_KEYSIZE 16
-#define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE
+#define PAES_MIN_KEYSIZE 16
+#define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE
+#define PAES_256_PROTKEY_SIZE (32 + 32) /* key + verification pattern */
+#define PXTS_256_PROTKEY_SIZE (32 + 32 + 32) /* k1 + k2 + verification pattern */
static u8 *ctrblk;
static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
+struct paes_protkey {
+ u32 type;
+ u32 len;
+ u8 protkey[PXTS_256_PROTKEY_SIZE];
+};
+
struct key_blob {
/*
* Small keys will be stored in the keybuf. Larger keys are
@@ -55,31 +63,43 @@ struct key_blob {
unsigned int keylen;
};
-static inline int _key_to_kb(struct key_blob *kb,
- const u8 *key,
- unsigned int keylen)
+/*
+ * make_clrkey_token() - wrap the raw key ck with pkey clearkey token
+ * information.
+ * @returns the size of the clearkey token
+ */
+static inline u32 make_clrkey_token(const u8 *ck, size_t cklen, u8 *dest)
{
- struct clearkey_header {
+ struct clrkey_token {
u8 type;
u8 res0[3];
u8 version;
u8 res1[3];
u32 keytype;
u32 len;
- } __packed * h;
+ u8 key[];
+ } __packed *token = (struct clrkey_token *)dest;
+
+ token->type = 0x00;
+ token->version = 0x02;
+ token->keytype = (cklen - 8) >> 3;
+ token->len = cklen;
+ memcpy(token->key, ck, cklen);
+
+ return sizeof(*token) + cklen;
+}
+static inline int _key_to_kb(struct key_blob *kb,
+ const u8 *key,
+ unsigned int keylen)
+{
switch (keylen) {
case 16:
case 24:
case 32:
/* clear key value, prepare pkey clear key token in keybuf */
memset(kb->keybuf, 0, sizeof(kb->keybuf));
- h = (struct clearkey_header *) kb->keybuf;
- h->version = 0x02; /* TOKVER_CLEAR_KEY */
- h->keytype = (keylen - 8) >> 3;
- h->len = keylen;
- memcpy(kb->keybuf + sizeof(*h), key, keylen);
- kb->keylen = sizeof(*h) + keylen;
+ kb->keylen = make_clrkey_token(key, keylen, kb->keybuf);
kb->key = kb->keybuf;
break;
default:
@@ -99,6 +119,40 @@ static inline int _key_to_kb(struct key_blob *kb,
return 0;
}
+static inline int _xts_key_to_kb(struct key_blob *kb,
+ const u8 *key,
+ unsigned int keylen)
+{
+ size_t cklen = keylen / 2;
+
+ memset(kb->keybuf, 0, sizeof(kb->keybuf));
+
+ switch (keylen) {
+ case 32:
+ case 64:
+ /* clear key value, prepare pkey clear key tokens in keybuf */
+ kb->key = kb->keybuf;
+ kb->keylen = make_clrkey_token(key, cklen, kb->key);
+ kb->keylen += make_clrkey_token(key + cklen, cklen,
+ kb->key + kb->keylen);
+ break;
+ default:
+ /* other key material, let pkey handle this */
+ if (keylen <= sizeof(kb->keybuf)) {
+ kb->key = kb->keybuf;
+ } else {
+ kb->key = kmalloc(keylen, GFP_KERNEL);
+ if (!kb->key)
+ return -ENOMEM;
+ }
+ memcpy(kb->key, key, keylen);
+ kb->keylen = keylen;
+ break;
+ }
+
+ return 0;
+}
+
static inline void _free_kb_keybuf(struct key_blob *kb)
{
if (kb->key && kb->key != kb->keybuf
@@ -106,53 +160,53 @@ static inline void _free_kb_keybuf(struct key_blob *kb)
kfree_sensitive(kb->key);
kb->key = NULL;
}
+ memzero_explicit(kb->keybuf, sizeof(kb->keybuf));
}
struct s390_paes_ctx {
struct key_blob kb;
- struct pkey_protkey pk;
+ struct paes_protkey pk;
spinlock_t pk_lock;
unsigned long fc;
};
struct s390_pxts_ctx {
- struct key_blob kb[2];
- struct pkey_protkey pk[2];
+ struct key_blob kb;
+ struct paes_protkey pk[2];
spinlock_t pk_lock;
unsigned long fc;
};
-static inline int __paes_keyblob2pkey(struct key_blob *kb,
- struct pkey_protkey *pk)
+static inline int __paes_keyblob2pkey(const u8 *key, unsigned int keylen,
+ struct paes_protkey *pk)
{
- int i, ret;
+ int i, rc = -EIO;
- /* try three times in case of failure */
- for (i = 0; i < 3; i++) {
- if (i > 0 && ret == -EAGAIN && in_task())
+ /* try three times in case of busy card */
+ for (i = 0; rc && i < 3; i++) {
+ if (rc == -EBUSY && in_task()) {
if (msleep_interruptible(1000))
return -EINTR;
- ret = pkey_keyblob2pkey(kb->key, kb->keylen,
- pk->protkey, &pk->len, &pk->type);
- if (ret == 0)
- break;
+ }
+ rc = pkey_key2protkey(key, keylen, pk->protkey, &pk->len,
+ &pk->type);
}
- return ret;
+ return rc;
}
static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
{
- int ret;
- struct pkey_protkey pkey;
+ struct paes_protkey pk;
+ int rc;
- pkey.len = sizeof(pkey.protkey);
- ret = __paes_keyblob2pkey(&ctx->kb, &pkey);
- if (ret)
- return ret;
+ pk.len = sizeof(pk.protkey);
+ rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk);
+ if (rc)
+ return rc;
spin_lock_bh(&ctx->pk_lock);
- memcpy(&ctx->pk, &pkey, sizeof(pkey));
+ memcpy(&ctx->pk, &pk, sizeof(pk));
spin_unlock_bh(&ctx->pk_lock);
return 0;
@@ -177,8 +231,8 @@ static void ecb_paes_exit(struct crypto_skcipher *tfm)
static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
{
- int rc;
unsigned long fc;
+ int rc;
rc = __paes_convert_key(ctx);
if (rc)
@@ -198,8 +252,8 @@ static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- int rc;
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int rc;
_free_kb_keybuf(&ctx->kb);
rc = _key_to_kb(&ctx->kb, in_key, key_len);
@@ -213,19 +267,19 @@ static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes, n, k;
- int ret;
struct {
- u8 key[MAXPROTKEYSIZE];
+ u8 key[PAES_256_PROTKEY_SIZE];
} param;
+ struct skcipher_walk walk;
+ unsigned int nbytes, n, k;
+ int rc;
- ret = skcipher_walk_virt(&walk, req, false);
- if (ret)
- return ret;
+ rc = skcipher_walk_virt(&walk, req, false);
+ if (rc)
+ return rc;
spin_lock_bh(&ctx->pk_lock);
- memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
spin_unlock_bh(&ctx->pk_lock);
while ((nbytes = walk.nbytes) != 0) {
@@ -234,16 +288,16 @@ static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
k = cpacf_km(ctx->fc | modifier, &param,
walk.dst.virt.addr, walk.src.virt.addr, n);
if (k)
- ret = skcipher_walk_done(&walk, nbytes - k);
+ rc = skcipher_walk_done(&walk, nbytes - k);
if (k < n) {
if (__paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO);
spin_lock_bh(&ctx->pk_lock);
- memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
spin_unlock_bh(&ctx->pk_lock);
}
}
- return ret;
+ return rc;
}
static int ecb_paes_encrypt(struct skcipher_request *req)
@@ -292,8 +346,8 @@ static void cbc_paes_exit(struct crypto_skcipher *tfm)
static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
{
- int rc;
unsigned long fc;
+ int rc;
rc = __paes_convert_key(ctx);
if (rc)
@@ -313,8 +367,8 @@ static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- int rc;
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int rc;
_free_kb_keybuf(&ctx->kb);
rc = _key_to_kb(&ctx->kb, in_key, key_len);
@@ -328,21 +382,21 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes, n, k;
- int ret;
struct {
u8 iv[AES_BLOCK_SIZE];
- u8 key[MAXPROTKEYSIZE];
+ u8 key[PAES_256_PROTKEY_SIZE];
} param;
+ struct skcipher_walk walk;
+ unsigned int nbytes, n, k;
+ int rc;
- ret = skcipher_walk_virt(&walk, req, false);
- if (ret)
- return ret;
+ rc = skcipher_walk_virt(&walk, req, false);
+ if (rc)
+ return rc;
memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
spin_lock_bh(&ctx->pk_lock);
- memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
spin_unlock_bh(&ctx->pk_lock);
while ((nbytes = walk.nbytes) != 0) {
@@ -352,17 +406,17 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
walk.dst.virt.addr, walk.src.virt.addr, n);
if (k) {
memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
- ret = skcipher_walk_done(&walk, nbytes - k);
+ rc = skcipher_walk_done(&walk, nbytes - k);
}
if (k < n) {
if (__paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO);
spin_lock_bh(&ctx->pk_lock);
- memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
spin_unlock_bh(&ctx->pk_lock);
}
}
- return ret;
+ return rc;
}
static int cbc_paes_encrypt(struct skcipher_request *req)
@@ -397,8 +451,7 @@ static int xts_paes_init(struct crypto_skcipher *tfm)
{
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
- ctx->kb[0].key = NULL;
- ctx->kb[1].key = NULL;
+ ctx->kb.key = NULL;
spin_lock_init(&ctx->pk_lock);
return 0;
@@ -408,24 +461,51 @@ static void xts_paes_exit(struct crypto_skcipher *tfm)
{
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
- _free_kb_keybuf(&ctx->kb[0]);
- _free_kb_keybuf(&ctx->kb[1]);
+ _free_kb_keybuf(&ctx->kb);
}
static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
{
- struct pkey_protkey pkey0, pkey1;
+ struct paes_protkey pk0, pk1;
+ size_t split_keylen;
+ int rc;
- pkey0.len = sizeof(pkey0.protkey);
- pkey1.len = sizeof(pkey1.protkey);
+ pk0.len = sizeof(pk0.protkey);
+ pk1.len = sizeof(pk1.protkey);
- if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) ||
- __paes_keyblob2pkey(&ctx->kb[1], &pkey1))
+ rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk0);
+ if (rc)
+ return rc;
+
+ switch (pk0.type) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_256:
+ /* second keytoken required */
+ if (ctx->kb.keylen % 2)
+ return -EINVAL;
+ split_keylen = ctx->kb.keylen / 2;
+
+ rc = __paes_keyblob2pkey(ctx->kb.key + split_keylen,
+ split_keylen, &pk1);
+ if (rc)
+ return rc;
+
+ if (pk0.type != pk1.type)
+ return -EINVAL;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ case PKEY_KEYTYPE_AES_XTS_256:
+ /* single key */
+ pk1.type = 0;
+ break;
+ default:
+ /* unsupported protected keytype */
return -EINVAL;
+ }
spin_lock_bh(&ctx->pk_lock);
- memcpy(&ctx->pk[0], &pkey0, sizeof(pkey0));
- memcpy(&ctx->pk[1], &pkey1, sizeof(pkey1));
+ ctx->pk[0] = pk0;
+ ctx->pk[1] = pk1;
spin_unlock_bh(&ctx->pk_lock);
return 0;
@@ -434,17 +514,30 @@ static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
{
unsigned long fc;
+ int rc;
- if (__xts_paes_convert_key(ctx))
- return -EINVAL;
-
- if (ctx->pk[0].type != ctx->pk[1].type)
- return -EINVAL;
+ rc = __xts_paes_convert_key(ctx);
+ if (rc)
+ return rc;
/* Pick the correct function code based on the protected key type */
- fc = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PXTS_128 :
- (ctx->pk[0].type == PKEY_KEYTYPE_AES_256) ?
- CPACF_KM_PXTS_256 : 0;
+ switch (ctx->pk[0].type) {
+ case PKEY_KEYTYPE_AES_128:
+ fc = CPACF_KM_PXTS_128;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ fc = CPACF_KM_PXTS_256;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ fc = CPACF_KM_PXTS_128_FULL;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ fc = CPACF_KM_PXTS_256_FULL;
+ break;
+ default:
+ fc = 0;
+ break;
+ }
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
@@ -453,24 +546,19 @@ static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
}
static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
- unsigned int xts_key_len)
+ unsigned int in_keylen)
{
- int rc;
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 ckey[2 * AES_MAX_KEY_SIZE];
- unsigned int ckey_len, key_len;
+ unsigned int ckey_len;
+ int rc;
- if (xts_key_len % 2)
+ if ((in_keylen == 32 || in_keylen == 64) &&
+ xts_verify_key(tfm, in_key, in_keylen))
return -EINVAL;
- key_len = xts_key_len / 2;
-
- _free_kb_keybuf(&ctx->kb[0]);
- _free_kb_keybuf(&ctx->kb[1]);
- rc = _key_to_kb(&ctx->kb[0], in_key, key_len);
- if (rc)
- return rc;
- rc = _key_to_kb(&ctx->kb[1], in_key + key_len, key_len);
+ _free_kb_keybuf(&ctx->kb);
+ rc = _xts_key_to_kb(&ctx->kb, in_key, in_keylen);
if (rc)
return rc;
@@ -479,6 +567,13 @@ static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
return rc;
/*
+ * It is not possible on a single protected key (e.g. full AES-XTS) to
+ * check, if k1 and k2 are the same.
+ */
+ if (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128 ||
+ ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_256)
+ return 0;
+ /*
* xts_verify_key verifies the key length is not odd and makes
* sure that the two keys are not the same. This can be done
* on the two protected keys as well
@@ -490,28 +585,82 @@ static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
return xts_verify_key(tfm, ckey, 2*ckey_len);
}
-static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
+static int paes_xts_crypt_full(struct skcipher_request *req,
+ unsigned long modifier)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ unsigned int keylen, offset, nbytes, n, k;
+ struct {
+ u8 key[64];
+ u8 tweak[16];
+ u8 nap[16];
+ u8 wkvp[32];
+ } fxts_param = {
+ .nap = {0},
+ };
struct skcipher_walk walk;
+ int rc;
+
+ rc = skcipher_walk_virt(&walk, req, false);
+ if (rc)
+ return rc;
+
+ keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 64;
+ offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 0;
+
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(fxts_param.key + offset, ctx->pk[0].protkey, keylen);
+ memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen,
+ sizeof(fxts_param.wkvp));
+ spin_unlock_bh(&ctx->pk_lock);
+ memcpy(fxts_param.tweak, walk.iv, sizeof(fxts_param.tweak));
+ fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */
+
+ while ((nbytes = walk.nbytes) != 0) {
+ /* only use complete blocks */
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ k = cpacf_km(ctx->fc | modifier, fxts_param.key + offset,
+ walk.dst.virt.addr, walk.src.virt.addr, n);
+ if (k)
+ rc = skcipher_walk_done(&walk, nbytes - k);
+ if (k < n) {
+ if (__xts_paes_convert_key(ctx))
+ return skcipher_walk_done(&walk, -EIO);
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(fxts_param.key + offset, ctx->pk[0].protkey,
+ keylen);
+ memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen,
+ sizeof(fxts_param.wkvp));
+ spin_unlock_bh(&ctx->pk_lock);
+ }
+ }
+
+ return rc;
+}
+
+static int paes_xts_crypt(struct skcipher_request *req, unsigned long modifier)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
unsigned int keylen, offset, nbytes, n, k;
- int ret;
struct {
- u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
+ u8 key[PAES_256_PROTKEY_SIZE];
u8 tweak[16];
u8 block[16];
u8 bit[16];
u8 xts[16];
} pcc_param;
struct {
- u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
+ u8 key[PAES_256_PROTKEY_SIZE];
u8 init[16];
} xts_param;
+ struct skcipher_walk walk;
+ int rc;
- ret = skcipher_walk_virt(&walk, req, false);
- if (ret)
- return ret;
+ rc = skcipher_walk_virt(&walk, req, false);
+ if (rc)
+ return rc;
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
@@ -531,7 +680,7 @@ static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
walk.dst.virt.addr, walk.src.virt.addr, n);
if (k)
- ret = skcipher_walk_done(&walk, nbytes - k);
+ rc = skcipher_walk_done(&walk, nbytes - k);
if (k < n) {
if (__xts_paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO);
@@ -542,7 +691,24 @@ static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
}
}
- return ret;
+ return rc;
+}
+
+static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ switch (ctx->fc) {
+ case CPACF_KM_PXTS_128:
+ case CPACF_KM_PXTS_256:
+ return paes_xts_crypt(req, modifier);
+ case CPACF_KM_PXTS_128_FULL:
+ case CPACF_KM_PXTS_256_FULL:
+ return paes_xts_crypt_full(req, modifier);
+ default:
+ return -EINVAL;
+ }
}
static int xts_paes_encrypt(struct skcipher_request *req)
@@ -592,8 +758,8 @@ static void ctr_paes_exit(struct crypto_skcipher *tfm)
static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
{
- int rc;
unsigned long fc;
+ int rc;
rc = __paes_convert_key(ctx);
if (rc)
@@ -614,8 +780,8 @@ static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- int rc;
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int rc;
_free_kb_keybuf(&ctx->kb);
rc = _key_to_kb(&ctx->kb, in_key, key_len);
@@ -645,19 +811,19 @@ static int ctr_paes_crypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 buf[AES_BLOCK_SIZE], *ctrptr;
- struct skcipher_walk walk;
- unsigned int nbytes, n, k;
- int ret, locked;
struct {
- u8 key[MAXPROTKEYSIZE];
+ u8 key[PAES_256_PROTKEY_SIZE];
} param;
+ struct skcipher_walk walk;
+ unsigned int nbytes, n, k;
+ int rc, locked;
- ret = skcipher_walk_virt(&walk, req, false);
- if (ret)
- return ret;
+ rc = skcipher_walk_virt(&walk, req, false);
+ if (rc)
+ return rc;
spin_lock_bh(&ctx->pk_lock);
- memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
spin_unlock_bh(&ctx->pk_lock);
locked = mutex_trylock(&ctrblk_lock);
@@ -674,7 +840,7 @@ static int ctr_paes_crypt(struct skcipher_request *req)
memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
crypto_inc(walk.iv, AES_BLOCK_SIZE);
- ret = skcipher_walk_done(&walk, nbytes - k);
+ rc = skcipher_walk_done(&walk, nbytes - k);
}
if (k < n) {
if (__paes_convert_key(ctx)) {
@@ -683,7 +849,7 @@ static int ctr_paes_crypt(struct skcipher_request *req)
return skcipher_walk_done(&walk, -EIO);
}
spin_lock_bh(&ctx->pk_lock);
- memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
spin_unlock_bh(&ctx->pk_lock);
}
}
@@ -703,15 +869,15 @@ static int ctr_paes_crypt(struct skcipher_request *req)
if (__paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO);
spin_lock_bh(&ctx->pk_lock);
- memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
spin_unlock_bh(&ctx->pk_lock);
}
memcpy(walk.dst.virt.addr, buf, nbytes);
crypto_inc(walk.iv, AES_BLOCK_SIZE);
- ret = skcipher_walk_done(&walk, nbytes);
+ rc = skcipher_walk_done(&walk, nbytes);
}
- return ret;
+ return rc;
}
static struct skcipher_alg ctr_paes_alg = {
@@ -751,7 +917,7 @@ static void paes_s390_fini(void)
static int __init paes_s390_init(void)
{
- int ret;
+ int rc;
/* Query available functions for KM, KMC and KMCTR */
cpacf_query(CPACF_KM, &km_functions);
@@ -761,23 +927,23 @@ static int __init paes_s390_init(void)
if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
- ret = crypto_register_skcipher(&ecb_paes_alg);
- if (ret)
+ rc = crypto_register_skcipher(&ecb_paes_alg);
+ if (rc)
goto out_err;
}
if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
- ret = crypto_register_skcipher(&cbc_paes_alg);
- if (ret)
+ rc = crypto_register_skcipher(&cbc_paes_alg);
+ if (rc)
goto out_err;
}
if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
- ret = crypto_register_skcipher(&xts_paes_alg);
- if (ret)
+ rc = crypto_register_skcipher(&xts_paes_alg);
+ if (rc)
goto out_err;
}
@@ -786,24 +952,27 @@ static int __init paes_s390_init(void)
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
- ret = -ENOMEM;
+ rc = -ENOMEM;
goto out_err;
}
- ret = crypto_register_skcipher(&ctr_paes_alg);
- if (ret)
+ rc = crypto_register_skcipher(&ctr_paes_alg);
+ if (rc)
goto out_err;
}
return 0;
out_err:
paes_s390_fini();
- return ret;
+ return rc;
}
module_init(paes_s390_init);
module_exit(paes_s390_fini);
-MODULE_ALIAS_CRYPTO("paes");
+MODULE_ALIAS_CRYPTO("ecb(paes)");
+MODULE_ALIAS_CRYPTO("cbc(paes)");
+MODULE_ALIAS_CRYPTO("ctr(paes)");
+MODULE_ALIAS_CRYPTO("xts(paes)");
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
MODULE_LICENSE("GPL");