diff options
Diffstat (limited to 'arch/s390/crypto')
-rw-r--r-- | arch/s390/crypto/aes_s390.c | 156 | ||||
-rw-r--r-- | arch/s390/crypto/crc32be-vx.S | 1 | ||||
-rw-r--r-- | arch/s390/crypto/crc32le-vx.S | 6 | ||||
-rw-r--r-- | arch/s390/crypto/des_s390.c | 28 | ||||
-rw-r--r-- | arch/s390/crypto/prng.c | 135 |
5 files changed, 220 insertions, 106 deletions
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index dd456725189f..d00f84add5f4 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -27,14 +27,14 @@ #include <linux/module.h> #include <linux/cpufeature.h> #include <linux/init.h> -#include <linux/spinlock.h> +#include <linux/mutex.h> #include <linux/fips.h> #include <linux/string.h> #include <crypto/xts.h> #include <asm/cpacf.h> static u8 *ctrblk; -static DEFINE_SPINLOCK(ctrblk_lock); +static DEFINE_MUTEX(ctrblk_lock); static cpacf_mask_t km_functions, kmc_functions, kmctr_functions, kma_functions; @@ -698,7 +698,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, unsigned int n, nbytes; int ret, locked; - locked = spin_trylock(&ctrblk_lock); + locked = mutex_trylock(&ctrblk_lock); ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { @@ -716,7 +716,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, ret = blkcipher_walk_done(desc, walk, nbytes - n); } if (locked) - spin_unlock(&ctrblk_lock); + mutex_unlock(&ctrblk_lock); /* * final block may be < AES_BLOCK_SIZE, copy only nbytes */ @@ -826,19 +826,45 @@ static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize) return 0; } -static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, - unsigned int len) +static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, + unsigned int len) { memset(gw, 0, sizeof(*gw)); gw->walk_bytes_remain = len; scatterwalk_start(&gw->walk, sg); } -static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) +static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw) +{ + struct scatterlist *nextsg; + + gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain); + while (!gw->walk_bytes) { + nextsg = sg_next(gw->walk.sg); + if (!nextsg) + return 0; + scatterwalk_start(&gw->walk, nextsg); + gw->walk_bytes = scatterwalk_clamp(&gw->walk, + gw->walk_bytes_remain); + } + gw->walk_ptr = scatterwalk_map(&gw->walk); + return gw->walk_bytes; +} + +static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw, + unsigned int nbytes) +{ + gw->walk_bytes_remain -= nbytes; + scatterwalk_unmap(&gw->walk); + scatterwalk_advance(&gw->walk, nbytes); + scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); + gw->walk_ptr = NULL; +} + +static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) { int n; - /* minbytesneeded <= AES_BLOCK_SIZE */ if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) { gw->ptr = gw->buf; gw->nbytes = gw->buf_bytes; @@ -851,13 +877,11 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) goto out; } - gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain); - if (!gw->walk_bytes) { - scatterwalk_start(&gw->walk, sg_next(gw->walk.sg)); - gw->walk_bytes = scatterwalk_clamp(&gw->walk, - gw->walk_bytes_remain); + if (!_gcm_sg_clamp_and_map(gw)) { + gw->ptr = NULL; + gw->nbytes = 0; + goto out; } - gw->walk_ptr = scatterwalk_map(&gw->walk); if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) { gw->ptr = gw->walk_ptr; @@ -869,51 +893,90 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes); memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n); gw->buf_bytes += n; - gw->walk_bytes_remain -= n; - scatterwalk_unmap(&gw->walk); - scatterwalk_advance(&gw->walk, n); - scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); - + _gcm_sg_unmap_and_advance(gw, n); if (gw->buf_bytes >= minbytesneeded) { gw->ptr = gw->buf; gw->nbytes = gw->buf_bytes; goto out; } - - gw->walk_bytes = scatterwalk_clamp(&gw->walk, - gw->walk_bytes_remain); - if (!gw->walk_bytes) { - scatterwalk_start(&gw->walk, sg_next(gw->walk.sg)); - gw->walk_bytes = scatterwalk_clamp(&gw->walk, - gw->walk_bytes_remain); + if (!_gcm_sg_clamp_and_map(gw)) { + gw->ptr = NULL; + gw->nbytes = 0; + goto out; } - gw->walk_ptr = scatterwalk_map(&gw->walk); } out: return gw->nbytes; } -static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) +static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) { - int n; + if (gw->walk_bytes_remain == 0) { + gw->ptr = NULL; + gw->nbytes = 0; + goto out; + } + if (!_gcm_sg_clamp_and_map(gw)) { + gw->ptr = NULL; + gw->nbytes = 0; + goto out; + } + + if (gw->walk_bytes >= minbytesneeded) { + gw->ptr = gw->walk_ptr; + gw->nbytes = gw->walk_bytes; + goto out; + } + + scatterwalk_unmap(&gw->walk); + gw->walk_ptr = NULL; + + gw->ptr = gw->buf; + gw->nbytes = sizeof(gw->buf); + +out: + return gw->nbytes; +} + +static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) +{ if (gw->ptr == NULL) - return; + return 0; if (gw->ptr == gw->buf) { - n = gw->buf_bytes - bytesdone; + int n = gw->buf_bytes - bytesdone; if (n > 0) { memmove(gw->buf, gw->buf + bytesdone, n); - gw->buf_bytes -= n; + gw->buf_bytes = n; } else gw->buf_bytes = 0; - } else { - gw->walk_bytes_remain -= bytesdone; - scatterwalk_unmap(&gw->walk); - scatterwalk_advance(&gw->walk, bytesdone); - scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); - } + } else + _gcm_sg_unmap_and_advance(gw, bytesdone); + + return bytesdone; +} + +static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) +{ + int i, n; + + if (gw->ptr == NULL) + return 0; + + if (gw->ptr == gw->buf) { + for (i = 0; i < bytesdone; i += n) { + if (!_gcm_sg_clamp_and_map(gw)) + return i; + n = min(gw->walk_bytes, bytesdone - i); + memcpy(gw->walk_ptr, gw->buf + i, n); + _gcm_sg_unmap_and_advance(gw, n); + } + } else + _gcm_sg_unmap_and_advance(gw, bytesdone); + + return bytesdone; } static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) @@ -926,7 +989,7 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) unsigned int pclen = req->cryptlen; int ret = 0; - unsigned int len, in_bytes, out_bytes, + unsigned int n, len, in_bytes, out_bytes, min_bytes, bytes, aad_bytes, pc_bytes; struct gcm_sg_walk gw_in, gw_out; u8 tag[GHASH_DIGEST_SIZE]; @@ -963,14 +1026,14 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) *(u32 *)(param.j0 + ivsize) = 1; memcpy(param.k, ctx->key, ctx->key_len); - gcm_sg_walk_start(&gw_in, req->src, len); - gcm_sg_walk_start(&gw_out, req->dst, len); + gcm_walk_start(&gw_in, req->src, len); + gcm_walk_start(&gw_out, req->dst, len); do { min_bytes = min_t(unsigned int, aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE); - in_bytes = gcm_sg_walk_go(&gw_in, min_bytes); - out_bytes = gcm_sg_walk_go(&gw_out, min_bytes); + in_bytes = gcm_in_walk_go(&gw_in, min_bytes); + out_bytes = gcm_out_walk_go(&gw_out, min_bytes); bytes = min(in_bytes, out_bytes); if (aadlen + pclen <= bytes) { @@ -997,8 +1060,11 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) gw_in.ptr + aad_bytes, pc_bytes, gw_in.ptr, aad_bytes); - gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes); - gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes); + n = aad_bytes + pc_bytes; + if (gcm_in_walk_done(&gw_in, n) != n) + return -ENOMEM; + if (gcm_out_walk_done(&gw_out, n) != n) + return -ENOMEM; aadlen -= aad_bytes; pclen -= pc_bytes; } while (aadlen + pclen > 0); diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S index 2bf01ba44107..0099044e2c86 100644 --- a/arch/s390/crypto/crc32be-vx.S +++ b/arch/s390/crypto/crc32be-vx.S @@ -207,5 +207,6 @@ ENTRY(crc32_be_vgfm_16) .Ldone: VLGVF %r2,%v2,3 BR_EX %r14 +ENDPROC(crc32_be_vgfm_16) .previous diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S index 7d6f568bd3ad..71caf0f4ec08 100644 --- a/arch/s390/crypto/crc32le-vx.S +++ b/arch/s390/crypto/crc32le-vx.S @@ -105,13 +105,14 @@ ENTRY(crc32_le_vgfm_16) larl %r5,.Lconstants_CRC_32_LE j crc32_le_vgfm_generic +ENDPROC(crc32_le_vgfm_16) ENTRY(crc32c_le_vgfm_16) larl %r5,.Lconstants_CRC_32C_LE j crc32_le_vgfm_generic +ENDPROC(crc32c_le_vgfm_16) - -crc32_le_vgfm_generic: +ENTRY(crc32_le_vgfm_generic) /* Load CRC-32 constants */ VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5 @@ -267,5 +268,6 @@ crc32_le_vgfm_generic: .Ldone: VLGVF %r2,%v2,2 BR_EX %r14 +ENDPROC(crc32_le_vgfm_generic) .previous diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 0d15383d0ff1..374b42fc7637 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -14,6 +14,7 @@ #include <linux/cpufeature.h> #include <linux/crypto.h> #include <linux/fips.h> +#include <linux/mutex.h> #include <crypto/algapi.h> #include <crypto/des.h> #include <asm/cpacf.h> @@ -21,7 +22,7 @@ #define DES3_KEY_SIZE (3 * DES_KEY_SIZE) static u8 *ctrblk; -static DEFINE_SPINLOCK(ctrblk_lock); +static DEFINE_MUTEX(ctrblk_lock); static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; @@ -224,24 +225,11 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len) { struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); + int err; - if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && - crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], - DES_KEY_SIZE)) && - (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; - return -EINVAL; - } - - /* in fips mode, ensure k1 != k2 and k2 != k3 and k1 != k3 */ - if (fips_enabled && - !(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && - crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], - DES_KEY_SIZE) && - crypto_memneq(key, &key[DES_KEY_SIZE * 2], DES_KEY_SIZE))) { - tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; - return -EINVAL; - } + err = __des3_verify_key(&tfm->crt_flags, key); + if (unlikely(err)) + return err; memcpy(ctx->key, key, key_len); return 0; @@ -387,7 +375,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc, unsigned int n, nbytes; int ret, locked; - locked = spin_trylock(&ctrblk_lock); + locked = mutex_trylock(&ctrblk_lock); ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { @@ -404,7 +392,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc, ret = blkcipher_walk_done(desc, walk, nbytes - n); } if (locked) - spin_unlock(&ctrblk_lock); + mutex_unlock(&ctrblk_lock); /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr, diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index a97a1802cfb4..12cca467af7d 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c @@ -61,6 +61,7 @@ static unsigned int prng_reseed_limit; module_param_named(reseed_limit, prng_reseed_limit, int, 0); MODULE_PARM_DESC(prng_reseed_limit, "PRNG reseed limit"); +static bool trng_available; /* * Any one who considers arithmetical methods of producing random digits is, @@ -115,46 +116,68 @@ static const u8 initial_parm_block[32] __initconst = { /* * generate_entropy: - * This algorithm produces 64 bytes of entropy data based on 1024 - * individual stckf() invocations assuming that each stckf() value - * contributes 0.25 bits of entropy. So the caller gets 256 bit - * entropy per 64 byte or 4 bits entropy per byte. + * This function fills a given buffer with random bytes. The entropy within + * the random bytes given back is assumed to have at least 50% - meaning + * a 64 bytes buffer has at least 64 * 8 / 2 = 256 bits of entropy. + * Within the function the entropy generation is done in junks of 64 bytes. + * So the caller should also ask for buffer fill in multiples of 64 bytes. + * The generation of the entropy is based on the assumption that every stckf() + * invocation produces 0.5 bits of entropy. To accumulate 256 bits of entropy + * at least 512 stckf() values are needed. The entropy relevant part of the + * stckf value is bit 51 (counting starts at the left with bit nr 0) so + * here we use the lower 4 bytes and exor the values into 2k of bufferspace. + * To be on the save side, if there is ever a problem with stckf() the + * other half of the page buffer is filled with bytes from urandom via + * get_random_bytes(), so this function consumes 2k of urandom for each + * requested 64 bytes output data. Finally the buffer page is condensed into + * a 64 byte value by hashing with a SHA512 hash. */ static int generate_entropy(u8 *ebuf, size_t nbytes) { int n, ret = 0; - u8 *pg, *h, hash[64]; - - /* allocate 2 pages */ - pg = (u8 *) __get_free_pages(GFP_KERNEL, 1); + u8 *pg, pblock[80] = { + /* 8 x 64 bit init values */ + 0x6A, 0x09, 0xE6, 0x67, 0xF3, 0xBC, 0xC9, 0x08, + 0xBB, 0x67, 0xAE, 0x85, 0x84, 0xCA, 0xA7, 0x3B, + 0x3C, 0x6E, 0xF3, 0x72, 0xFE, 0x94, 0xF8, 0x2B, + 0xA5, 0x4F, 0xF5, 0x3A, 0x5F, 0x1D, 0x36, 0xF1, + 0x51, 0x0E, 0x52, 0x7F, 0xAD, 0xE6, 0x82, 0xD1, + 0x9B, 0x05, 0x68, 0x8C, 0x2B, 0x3E, 0x6C, 0x1F, + 0x1F, 0x83, 0xD9, 0xAB, 0xFB, 0x41, 0xBD, 0x6B, + 0x5B, 0xE0, 0xCD, 0x19, 0x13, 0x7E, 0x21, 0x79, + /* 128 bit counter total message bit length */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 }; + + /* allocate one page stckf buffer */ + pg = (u8 *) __get_free_page(GFP_KERNEL); if (!pg) { prng_errorflag = PRNG_GEN_ENTROPY_FAILED; return -ENOMEM; } + /* fill the ebuf in chunks of 64 byte each */ while (nbytes) { - /* fill pages with urandom bytes */ - get_random_bytes(pg, 2*PAGE_SIZE); - /* exor pages with 1024 stckf values */ - for (n = 0; n < 2 * PAGE_SIZE / sizeof(u64); n++) { - u64 *p = ((u64 *)pg) + n; + /* fill lower 2k with urandom bytes */ + get_random_bytes(pg, PAGE_SIZE / 2); + /* exor upper 2k with 512 stckf values, offset 4 bytes each */ + for (n = 0; n < 512; n++) { + int offset = (PAGE_SIZE / 2) + (n * 4) - 4; + u64 *p = (u64 *)(pg + offset); *p ^= get_tod_clock_fast(); } - n = (nbytes < sizeof(hash)) ? nbytes : sizeof(hash); - if (n < sizeof(hash)) - h = hash; - else - h = ebuf; - /* hash over the filled pages */ - cpacf_kimd(CPACF_KIMD_SHA_512, h, pg, 2*PAGE_SIZE); - if (n < sizeof(hash)) - memcpy(ebuf, hash, n); + /* hash over the filled page */ + cpacf_klmd(CPACF_KLMD_SHA_512, pblock, pg, PAGE_SIZE); + n = (nbytes < 64) ? nbytes : 64; + memcpy(ebuf, pblock, n); ret += n; ebuf += n; nbytes -= n; } - free_pages((unsigned long)pg, 1); + memzero_explicit(pblock, sizeof(pblock)); + memzero_explicit(pg, PAGE_SIZE); + free_page((unsigned long)pg); return ret; } @@ -344,8 +367,8 @@ static int __init prng_sha512_selftest(void) static int __init prng_sha512_instantiate(void) { - int ret, datalen; - u8 seed[64 + 32 + 16]; + int ret, datalen, seedlen; + u8 seed[128 + 16]; pr_debug("prng runs in SHA-512 mode " "with chunksize=%d and reseed_limit=%u\n", @@ -368,16 +391,36 @@ static int __init prng_sha512_instantiate(void) if (ret) goto outfree; - /* generate initial seed bytestring, with 256 + 128 bits entropy */ - ret = generate_entropy(seed, 64 + 32); - if (ret != 64 + 32) - goto outfree; - /* followed by 16 bytes of unique nonce */ - get_tod_clock_ext(seed + 64 + 32); + /* generate initial seed, we need at least 256 + 128 bits entropy. */ + if (trng_available) { + /* + * Trng available, so use it. The trng works in chunks of + * 32 bytes and produces 100% entropy. So we pull 64 bytes + * which gives us 512 bits entropy. + */ + seedlen = 2 * 32; + cpacf_trng(NULL, 0, seed, seedlen); + } else { + /* + * No trng available, so use the generate_entropy() function. + * This function works in 64 byte junks and produces + * 50% entropy. So we pull 2*64 bytes which gives us 512 bits + * of entropy. + */ + seedlen = 2 * 64; + ret = generate_entropy(seed, seedlen); + if (ret != seedlen) + goto outfree; + } + + /* append the seed by 16 bytes of unique nonce */ + get_tod_clock_ext(seed + seedlen); + seedlen += 16; - /* initial seed of the prno drng */ + /* now initial seed of the prno drng */ cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, - &prng_data->prnows, NULL, 0, seed, sizeof(seed)); + &prng_data->prnows, NULL, 0, seed, seedlen); + memzero_explicit(seed, sizeof(seed)); /* if fips mode is enabled, generate a first block of random bytes for the FIPS 140-2 Conditional Self Test */ @@ -405,17 +448,26 @@ static void prng_sha512_deinstantiate(void) static int prng_sha512_reseed(void) { - int ret; + int ret, seedlen; u8 seed[64]; - /* fetch 256 bits of fresh entropy */ - ret = generate_entropy(seed, sizeof(seed)); - if (ret != sizeof(seed)) - return ret; + /* We need at least 256 bits of fresh entropy for reseeding */ + if (trng_available) { + /* trng produces 256 bits entropy in 32 bytes */ + seedlen = 32; + cpacf_trng(NULL, 0, seed, seedlen); + } else { + /* generate_entropy() produces 256 bits entropy in 64 bytes */ + seedlen = 64; + ret = generate_entropy(seed, seedlen); + if (ret != sizeof(seed)) + return ret; + } /* do a reseed of the prno drng with this bytestring */ cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, - &prng_data->prnows, NULL, 0, seed, sizeof(seed)); + &prng_data->prnows, NULL, 0, seed, seedlen); + memzero_explicit(seed, sizeof(seed)); return 0; } @@ -592,6 +644,7 @@ static ssize_t prng_sha512_read(struct file *file, char __user *ubuf, ret = -EFAULT; break; } + memzero_explicit(p, n); ubuf += n; nbytes -= n; ret += n; @@ -773,6 +826,10 @@ static int __init prng_init(void) if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) return -EOPNOTSUPP; + /* check if TRNG subfunction is available */ + if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) + trng_available = true; + /* choose prng mode */ if (prng_mode != PRNG_MODE_TDES) { /* check for MSA5 support for PRNO operations */ |