diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/Kconfig | 2 | ||||
-rw-r--r-- | arch/s390/configs/debug_defconfig | 1 | ||||
-rw-r--r-- | arch/s390/configs/defconfig | 1 | ||||
-rw-r--r-- | arch/s390/crypto/Makefile | 1 | ||||
-rw-r--r-- | arch/s390/crypto/hmac_s390.c | 12 | ||||
-rw-r--r-- | arch/s390/crypto/paes_s390.c | 2 | ||||
-rw-r--r-- | arch/s390/crypto/phmac_s390.c | 1048 | ||||
-rw-r--r-- | arch/s390/crypto/sha.h | 3 | ||||
-rw-r--r-- | arch/s390/crypto/sha3_256_s390.c | 24 | ||||
-rw-r--r-- | arch/s390/crypto/sha3_512_s390.c | 25 | ||||
-rw-r--r-- | arch/s390/include/asm/cpacf.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/entry-common.h | 10 | ||||
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/percpu.h | 5 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 51 | ||||
-rw-r--r-- | arch/s390/kvm/vsie.c | 17 | ||||
-rw-r--r-- | arch/s390/mm/dump_pagetables.c | 2 |
17 files changed, 1163 insertions, 48 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 25a773e6596e..f0c0469e553d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -131,6 +131,7 @@ config S390 select ARCH_INLINE_WRITE_UNLOCK_IRQ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE + select ARCH_MODULE_NEEDS_WEAK_PER_CPU select ARCH_STACKWALK select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC @@ -198,7 +199,6 @@ config S390 select HAVE_GUP_FAST select HAVE_FENTRY select HAVE_FTRACE_GRAPH_FUNC - select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_GRAPH_FREGS diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index a8e84e80552d..6b33429f1c4d 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -816,6 +816,7 @@ CONFIG_PKEY_EP11=m CONFIG_PKEY_PCKMO=m CONFIG_PKEY_UV=m CONFIG_CRYPTO_PAES_S390=m +CONFIG_CRYPTO_PHMAC_S390=m CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_SYSTEM_BLACKLIST_KEYRING=y CONFIG_CRYPTO_KRB5=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index a209b6ae4791..b75eb2775850 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -803,6 +803,7 @@ CONFIG_PKEY_EP11=m CONFIG_PKEY_PCKMO=m CONFIG_PKEY_UV=m CONFIG_CRYPTO_PAES_S390=m +CONFIG_CRYPTO_PHMAC_S390=m CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_SYSTEM_BLACKLIST_KEYRING=y CONFIG_CRYPTO_KRB5=m diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile index 1e5a1038d491..998f4b656b18 100644 --- a/arch/s390/crypto/Makefile +++ b/arch/s390/crypto/Makefile @@ -11,4 +11,5 @@ obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o obj-$(CONFIG_S390_PRNG) += prng.o obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o obj-$(CONFIG_CRYPTO_HMAC_S390) += hmac_s390.o +obj-$(CONFIG_CRYPTO_PHMAC_S390) += phmac_s390.o obj-y += arch_random.o diff --git a/arch/s390/crypto/hmac_s390.c b/arch/s390/crypto/hmac_s390.c index 93a1098d9f8d..58444da9b004 100644 --- a/arch/s390/crypto/hmac_s390.c +++ b/arch/s390/crypto/hmac_s390.c @@ -290,6 +290,7 @@ static int s390_hmac_export(struct shash_desc *desc, void *out) struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); unsigned int bs = crypto_shash_blocksize(desc->tfm); unsigned int ds = bs / 2; + u64 lo = ctx->buflen[0]; union { u8 *u8; u64 *u64; @@ -301,9 +302,10 @@ static int s390_hmac_export(struct shash_desc *desc, void *out) else memcpy(p.u8, ctx->param, ds); p.u8 += ds; - put_unaligned(ctx->buflen[0], p.u64++); + lo += bs; + put_unaligned(lo, p.u64++); if (ds == SHA512_DIGEST_SIZE) - put_unaligned(ctx->buflen[1], p.u64); + put_unaligned(ctx->buflen[1] + (lo < bs), p.u64); return err; } @@ -316,14 +318,16 @@ static int s390_hmac_import(struct shash_desc *desc, const void *in) const u8 *u8; const u64 *u64; } p = { .u8 = in }; + u64 lo; int err; err = s390_hmac_sha2_init(desc); memcpy(ctx->param, p.u8, ds); p.u8 += ds; - ctx->buflen[0] = get_unaligned(p.u64++); + lo = get_unaligned(p.u64++); + ctx->buflen[0] = lo - bs; if (ds == SHA512_DIGEST_SIZE) - ctx->buflen[1] = get_unaligned(p.u64); + ctx->buflen[1] = get_unaligned(p.u64) - (lo < bs); if (ctx->buflen[0] | ctx->buflen[1]) ctx->gr0.ikp = 1; return err; diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index 8a340c16acb4..a624a43a2b54 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -1633,7 +1633,7 @@ static int __init paes_s390_init(void) /* with this pseudo devie alloc and start a crypto engine */ paes_crypto_engine = crypto_engine_alloc_init_and_set(paes_dev.this_device, - true, NULL, false, MAX_QLEN); + true, false, MAX_QLEN); if (!paes_crypto_engine) { rc = -ENOMEM; goto out_err; diff --git a/arch/s390/crypto/phmac_s390.c b/arch/s390/crypto/phmac_s390.c new file mode 100644 index 000000000000..7ecfdc4fba2d --- /dev/null +++ b/arch/s390/crypto/phmac_s390.c @@ -0,0 +1,1048 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright IBM Corp. 2025 + * + * s390 specific HMAC support for protected keys. + */ + +#define KMSG_COMPONENT "phmac_s390" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <asm/cpacf.h> +#include <asm/pkey.h> +#include <crypto/engine.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> +#include <crypto/sha2.h> +#include <linux/atomic.h> +#include <linux/cpufeature.h> +#include <linux/delay.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/spinlock.h> + +static struct crypto_engine *phmac_crypto_engine; +#define MAX_QLEN 10 + +/* + * A simple hash walk helper + */ + +struct hash_walk_helper { + struct crypto_hash_walk walk; + const u8 *walkaddr; + int walkbytes; +}; + +/* + * Prepare hash walk helper. + * Set up the base hash walk, fill walkaddr and walkbytes. + * Returns 0 on success or negative value on error. + */ +static inline int hwh_prepare(struct ahash_request *req, + struct hash_walk_helper *hwh) +{ + hwh->walkbytes = crypto_hash_walk_first(req, &hwh->walk); + if (hwh->walkbytes < 0) + return hwh->walkbytes; + hwh->walkaddr = hwh->walk.data; + return 0; +} + +/* + * Advance hash walk helper by n bytes. + * Progress the walkbytes and walkaddr fields by n bytes. + * If walkbytes is then 0, pull next hunk from hash walk + * and update walkbytes and walkaddr. + * If n is negative, unmap hash walk and return error. + * Returns 0 on success or negative value on error. + */ +static inline int hwh_advance(struct hash_walk_helper *hwh, int n) +{ + if (n < 0) + return crypto_hash_walk_done(&hwh->walk, n); + + hwh->walkbytes -= n; + hwh->walkaddr += n; + if (hwh->walkbytes > 0) + return 0; + + hwh->walkbytes = crypto_hash_walk_done(&hwh->walk, 0); + if (hwh->walkbytes < 0) + return hwh->walkbytes; + + hwh->walkaddr = hwh->walk.data; + return 0; +} + +/* + * KMAC param block layout for sha2 function codes: + * The layout of the param block for the KMAC instruction depends on the + * blocksize of the used hashing sha2-algorithm function codes. The param block + * contains the hash chaining value (cv), the input message bit-length (imbl) + * and the hmac-secret (key). To prevent code duplication, the sizes of all + * these are calculated based on the blocksize. + * + * param-block: + * +-------+ + * | cv | + * +-------+ + * | imbl | + * +-------+ + * | key | + * +-------+ + * + * sizes: + * part | sh2-alg | calculation | size | type + * -----+---------+-------------+------+-------- + * cv | 224/256 | blocksize/2 | 32 | u64[8] + * | 384/512 | | 64 | u128[8] + * imbl | 224/256 | blocksize/8 | 8 | u64 + * | 384/512 | | 16 | u128 + * key | 224/256 | blocksize | 96 | u8[96] + * | 384/512 | | 160 | u8[160] + */ + +#define MAX_DIGEST_SIZE SHA512_DIGEST_SIZE +#define MAX_IMBL_SIZE sizeof(u128) +#define MAX_BLOCK_SIZE SHA512_BLOCK_SIZE + +#define SHA2_CV_SIZE(bs) ((bs) >> 1) +#define SHA2_IMBL_SIZE(bs) ((bs) >> 3) + +#define SHA2_IMBL_OFFSET(bs) (SHA2_CV_SIZE(bs)) +#define SHA2_KEY_OFFSET(bs) (SHA2_CV_SIZE(bs) + SHA2_IMBL_SIZE(bs)) + +#define PHMAC_MAX_KEYSIZE 256 +#define PHMAC_SHA256_PK_SIZE (SHA256_BLOCK_SIZE + 32) +#define PHMAC_SHA512_PK_SIZE (SHA512_BLOCK_SIZE + 32) +#define PHMAC_MAX_PK_SIZE PHMAC_SHA512_PK_SIZE + +/* phmac protected key struct */ +struct phmac_protkey { + u32 type; + u32 len; + u8 protkey[PHMAC_MAX_PK_SIZE]; +}; + +#define PK_STATE_NO_KEY 0 +#define PK_STATE_CONVERT_IN_PROGRESS 1 +#define PK_STATE_VALID 2 + +/* phmac tfm context */ +struct phmac_tfm_ctx { + /* source key material used to derive a protected key from */ + u8 keybuf[PHMAC_MAX_KEYSIZE]; + unsigned int keylen; + + /* cpacf function code to use with this protected key type */ + long fc; + + /* nr of requests enqueued via crypto engine which use this tfm ctx */ + atomic_t via_engine_ctr; + + /* spinlock to atomic read/update all the following fields */ + spinlock_t pk_lock; + + /* see PK_STATE* defines above, < 0 holds convert failure rc */ + int pk_state; + /* if state is valid, pk holds the protected key */ + struct phmac_protkey pk; +}; + +union kmac_gr0 { + unsigned long reg; + struct { + unsigned long : 48; + unsigned long ikp : 1; + unsigned long iimp : 1; + unsigned long ccup : 1; + unsigned long : 6; + unsigned long fc : 7; + }; +}; + +struct kmac_sha2_ctx { + u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + PHMAC_MAX_PK_SIZE]; + union kmac_gr0 gr0; + u8 buf[MAX_BLOCK_SIZE]; + u64 buflen[2]; +}; + +/* phmac request context */ +struct phmac_req_ctx { + struct hash_walk_helper hwh; + struct kmac_sha2_ctx kmac_ctx; + bool final; +}; + +/* + * Pkey 'token' struct used to derive a protected key value from a clear key. + */ +struct hmac_clrkey_token { + u8 type; + u8 res0[3]; + u8 version; + u8 res1[3]; + u32 keytype; + u32 len; + u8 key[]; +} __packed; + +static int hash_key(const u8 *in, unsigned int inlen, + u8 *digest, unsigned int digestsize) +{ + unsigned long func; + union { + struct sha256_paramblock { + u32 h[8]; + u64 mbl; + } sha256; + struct sha512_paramblock { + u64 h[8]; + u128 mbl; + } sha512; + } __packed param; + +#define PARAM_INIT(x, y, z) \ + param.sha##x.h[0] = SHA##y ## _H0; \ + param.sha##x.h[1] = SHA##y ## _H1; \ + param.sha##x.h[2] = SHA##y ## _H2; \ + param.sha##x.h[3] = SHA##y ## _H3; \ + param.sha##x.h[4] = SHA##y ## _H4; \ + param.sha##x.h[5] = SHA##y ## _H5; \ + param.sha##x.h[6] = SHA##y ## _H6; \ + param.sha##x.h[7] = SHA##y ## _H7; \ + param.sha##x.mbl = (z) + + switch (digestsize) { + case SHA224_DIGEST_SIZE: + func = CPACF_KLMD_SHA_256; + PARAM_INIT(256, 224, inlen * 8); + break; + case SHA256_DIGEST_SIZE: + func = CPACF_KLMD_SHA_256; + PARAM_INIT(256, 256, inlen * 8); + break; + case SHA384_DIGEST_SIZE: + func = CPACF_KLMD_SHA_512; + PARAM_INIT(512, 384, inlen * 8); + break; + case SHA512_DIGEST_SIZE: + func = CPACF_KLMD_SHA_512; + PARAM_INIT(512, 512, inlen * 8); + break; + default: + return -EINVAL; + } + +#undef PARAM_INIT + + cpacf_klmd(func, ¶m, in, inlen); + + memcpy(digest, ¶m, digestsize); + + return 0; +} + +/* + * make_clrkey_token() - wrap the clear key into a pkey clearkey token. + */ +static inline int make_clrkey_token(const u8 *clrkey, size_t clrkeylen, + unsigned int digestsize, u8 *dest) +{ + struct hmac_clrkey_token *token = (struct hmac_clrkey_token *)dest; + unsigned int blocksize; + int rc; + + token->type = 0x00; + token->version = 0x02; + switch (digestsize) { + case SHA224_DIGEST_SIZE: + case SHA256_DIGEST_SIZE: + token->keytype = PKEY_KEYTYPE_HMAC_512; + blocksize = 64; + break; + case SHA384_DIGEST_SIZE: + case SHA512_DIGEST_SIZE: + token->keytype = PKEY_KEYTYPE_HMAC_1024; + blocksize = 128; + break; + default: + return -EINVAL; + } + token->len = blocksize; + + if (clrkeylen > blocksize) { + rc = hash_key(clrkey, clrkeylen, token->key, digestsize); + if (rc) + return rc; + } else { + memcpy(token->key, clrkey, clrkeylen); + } + + return 0; +} + +/* + * phmac_tfm_ctx_setkey() - Set key value into tfm context, maybe construct + * a clear key token digestible by pkey from a clear key value. + */ +static inline int phmac_tfm_ctx_setkey(struct phmac_tfm_ctx *tfm_ctx, + const u8 *key, unsigned int keylen) +{ + if (keylen > sizeof(tfm_ctx->keybuf)) + return -EINVAL; + + memcpy(tfm_ctx->keybuf, key, keylen); + tfm_ctx->keylen = keylen; + + return 0; +} + +/* + * Convert the raw key material into a protected key via PKEY api. + * This function may sleep - don't call in non-sleeping context. + */ +static inline int convert_key(const u8 *key, unsigned int keylen, + struct phmac_protkey *pk) +{ + int rc, i; + + pk->len = sizeof(pk->protkey); + + /* + * In case of a busy card retry with increasing delay + * of 200, 400, 800 and 1600 ms - in total 3 s. + */ + for (rc = -EIO, i = 0; rc && i < 5; i++) { + if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) { + rc = -EINTR; + goto out; + } + rc = pkey_key2protkey(key, keylen, + pk->protkey, &pk->len, &pk->type, + PKEY_XFLAG_NOMEMALLOC); + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * (Re-)Convert the raw key material from the tfm ctx into a protected + * key via convert_key() function. Update the pk_state, pk_type, pk_len + * and the protected key in the tfm context. + * Please note this function may be invoked concurrently with the very + * same tfm context. The pk_lock spinlock in the context ensures an + * atomic update of the pk and the pk state but does not guarantee any + * order of update. So a fresh converted valid protected key may get + * updated with an 'old' expired key value. As the cpacf instructions + * detect this, refuse to operate with an invalid key and the calling + * code triggers a (re-)conversion this does no harm. This may lead to + * unnecessary additional conversion but never to invalid data on the + * hash operation. + */ +static int phmac_convert_key(struct phmac_tfm_ctx *tfm_ctx) +{ + struct phmac_protkey pk; + int rc; + + spin_lock_bh(&tfm_ctx->pk_lock); + tfm_ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS; + spin_unlock_bh(&tfm_ctx->pk_lock); + + rc = convert_key(tfm_ctx->keybuf, tfm_ctx->keylen, &pk); + + /* update context */ + spin_lock_bh(&tfm_ctx->pk_lock); + if (rc) { + tfm_ctx->pk_state = rc; + } else { + tfm_ctx->pk_state = PK_STATE_VALID; + tfm_ctx->pk = pk; + } + spin_unlock_bh(&tfm_ctx->pk_lock); + + memzero_explicit(&pk, sizeof(pk)); + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize + */ +static inline void kmac_sha2_set_imbl(u8 *param, u64 buflen_lo, + u64 buflen_hi, unsigned int blocksize) +{ + u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize); + + switch (blocksize) { + case SHA256_BLOCK_SIZE: + *(u64 *)imbl = buflen_lo * BITS_PER_BYTE; + break; + case SHA512_BLOCK_SIZE: + *(u128 *)imbl = (((u128)buflen_hi << 64) + buflen_lo) << 3; + break; + default: + break; + } +} + +static int phmac_kmac_update(struct ahash_request *req, bool maysleep) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx; + struct hash_walk_helper *hwh = &req_ctx->hwh; + unsigned int bs = crypto_ahash_blocksize(tfm); + unsigned int offset, k, n; + int rc = 0; + + /* + * The walk is always mapped when this function is called. + * Note that in case of partial processing or failure the walk + * is NOT unmapped here. So a follow up task may reuse the walk + * or in case of unrecoverable failure needs to unmap it. + */ + + while (hwh->walkbytes > 0) { + /* check sha2 context buffer */ + offset = ctx->buflen[0] % bs; + if (offset + hwh->walkbytes < bs) + goto store; + + if (offset) { + /* fill ctx buffer up to blocksize and process this block */ + n = bs - offset; + memcpy(ctx->buf + offset, hwh->walkaddr, n); + ctx->gr0.iimp = 1; + for (;;) { + k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs); + if (likely(k == bs)) + break; + if (unlikely(k > 0)) { + /* + * Can't deal with hunks smaller than blocksize. + * And kmac should always return the nr of + * processed bytes as 0 or a multiple of the + * blocksize. + */ + rc = -EIO; + goto out; + } + /* protected key is invalid and needs re-conversion */ + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = phmac_convert_key(tfm_ctx); + if (rc) + goto out; + spin_lock_bh(&tfm_ctx->pk_lock); + memcpy(ctx->param + SHA2_KEY_OFFSET(bs), + tfm_ctx->pk.protkey, tfm_ctx->pk.len); + spin_unlock_bh(&tfm_ctx->pk_lock); + } + ctx->buflen[0] += n; + if (ctx->buflen[0] < n) + ctx->buflen[1]++; + rc = hwh_advance(hwh, n); + if (unlikely(rc)) + goto out; + offset = 0; + } + + /* process as many blocks as possible from the walk */ + while (hwh->walkbytes >= bs) { + n = (hwh->walkbytes / bs) * bs; + ctx->gr0.iimp = 1; + k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, hwh->walkaddr, n); + if (likely(k > 0)) { + ctx->buflen[0] += k; + if (ctx->buflen[0] < k) + ctx->buflen[1]++; + rc = hwh_advance(hwh, k); + if (unlikely(rc)) + goto out; + } + if (unlikely(k < n)) { + /* protected key is invalid and needs re-conversion */ + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = phmac_convert_key(tfm_ctx); + if (rc) + goto out; + spin_lock_bh(&tfm_ctx->pk_lock); + memcpy(ctx->param + SHA2_KEY_OFFSET(bs), + tfm_ctx->pk.protkey, tfm_ctx->pk.len); + spin_unlock_bh(&tfm_ctx->pk_lock); + } + } + +store: + /* store incomplete block in context buffer */ + if (hwh->walkbytes) { + memcpy(ctx->buf + offset, hwh->walkaddr, hwh->walkbytes); + ctx->buflen[0] += hwh->walkbytes; + if (ctx->buflen[0] < hwh->walkbytes) + ctx->buflen[1]++; + rc = hwh_advance(hwh, hwh->walkbytes); + if (unlikely(rc)) + goto out; + } + + } /* end of while (hwh->walkbytes > 0) */ + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_kmac_final(struct ahash_request *req, bool maysleep) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx; + unsigned int ds = crypto_ahash_digestsize(tfm); + unsigned int bs = crypto_ahash_blocksize(tfm); + unsigned int k, n; + int rc = 0; + + n = ctx->buflen[0] % bs; + ctx->gr0.iimp = 0; + kmac_sha2_set_imbl(ctx->param, ctx->buflen[0], ctx->buflen[1], bs); + for (;;) { + k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, n); + if (likely(k == n)) + break; + if (unlikely(k > 0)) { + /* Can't deal with hunks smaller than blocksize. */ + rc = -EIO; + goto out; + } + /* protected key is invalid and needs re-conversion */ + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = phmac_convert_key(tfm_ctx); + if (rc) + goto out; + spin_lock_bh(&tfm_ctx->pk_lock); + memcpy(ctx->param + SHA2_KEY_OFFSET(bs), + tfm_ctx->pk.protkey, tfm_ctx->pk.len); + spin_unlock_bh(&tfm_ctx->pk_lock); + } + + memcpy(req->result, ctx->param, ds); + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx; + unsigned int bs = crypto_ahash_blocksize(tfm); + int rc = 0; + + /* zero request context (includes the kmac sha2 context) */ + memset(req_ctx, 0, sizeof(*req_ctx)); + + /* + * setkey() should have set a valid fc into the tfm context. + * Copy this function code into the gr0 field of the kmac context. + */ + if (!tfm_ctx->fc) { + rc = -ENOKEY; + goto out; + } + kmac_ctx->gr0.fc = tfm_ctx->fc; + + /* + * Copy the pk from tfm ctx into kmac ctx. The protected key + * may be outdated but update() and final() will handle this. + */ + spin_lock_bh(&tfm_ctx->pk_lock); + memcpy(kmac_ctx->param + SHA2_KEY_OFFSET(bs), + tfm_ctx->pk.protkey, tfm_ctx->pk.len); + spin_unlock_bh(&tfm_ctx->pk_lock); + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_update(struct ahash_request *req) +{ + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx; + struct hash_walk_helper *hwh = &req_ctx->hwh; + int rc; + + /* prep the walk in the request context */ + rc = hwh_prepare(req, hwh); + if (rc) + goto out; + + /* Try synchronous operation if no active engine usage */ + if (!atomic_read(&tfm_ctx->via_engine_ctr)) { + rc = phmac_kmac_update(req, false); + if (rc == 0) + goto out; + } + + /* + * If sync operation failed or key expired or there are already + * requests enqueued via engine, fallback to async. Mark tfm as + * using engine to serialize requests. + */ + if (rc == 0 || rc == -EKEYEXPIRED) { + atomic_inc(&tfm_ctx->via_engine_ctr); + rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); + if (rc != -EINPROGRESS) + atomic_dec(&tfm_ctx->via_engine_ctr); + } + + if (rc != -EINPROGRESS) { + hwh_advance(hwh, rc); + memzero_explicit(kmac_ctx, sizeof(*kmac_ctx)); + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_final(struct ahash_request *req) +{ + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx; + int rc = 0; + + /* Try synchronous operation if no active engine usage */ + if (!atomic_read(&tfm_ctx->via_engine_ctr)) { + rc = phmac_kmac_final(req, false); + if (rc == 0) + goto out; + } + + /* + * If sync operation failed or key expired or there are already + * requests enqueued via engine, fallback to async. Mark tfm as + * using engine to serialize requests. + */ + if (rc == 0 || rc == -EKEYEXPIRED) { + req->nbytes = 0; + req_ctx->final = true; + atomic_inc(&tfm_ctx->via_engine_ctr); + rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); + if (rc != -EINPROGRESS) + atomic_dec(&tfm_ctx->via_engine_ctr); + } + +out: + if (rc != -EINPROGRESS) + memzero_explicit(kmac_ctx, sizeof(*kmac_ctx)); + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_finup(struct ahash_request *req) +{ + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx; + struct hash_walk_helper *hwh = &req_ctx->hwh; + int rc; + + /* prep the walk in the request context */ + rc = hwh_prepare(req, hwh); + if (rc) + goto out; + + /* Try synchronous operations if no active engine usage */ + if (!atomic_read(&tfm_ctx->via_engine_ctr)) { + rc = phmac_kmac_update(req, false); + if (rc == 0) + req->nbytes = 0; + } + if (!rc && !req->nbytes && !atomic_read(&tfm_ctx->via_engine_ctr)) { + rc = phmac_kmac_final(req, false); + if (rc == 0) + goto out; + } + + /* + * If sync operation failed or key expired or there are already + * requests enqueued via engine, fallback to async. Mark tfm as + * using engine to serialize requests. + */ + if (rc == 0 || rc == -EKEYEXPIRED) { + req_ctx->final = true; + atomic_inc(&tfm_ctx->via_engine_ctr); + rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); + if (rc != -EINPROGRESS) + atomic_dec(&tfm_ctx->via_engine_ctr); + } + + if (rc != -EINPROGRESS) + hwh_advance(hwh, rc); + +out: + if (rc != -EINPROGRESS) + memzero_explicit(kmac_ctx, sizeof(*kmac_ctx)); + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_digest(struct ahash_request *req) +{ + int rc; + + rc = phmac_init(req); + if (rc) + goto out; + + rc = phmac_finup(req); + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_setkey(struct crypto_ahash *tfm, + const u8 *key, unsigned int keylen) +{ + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + unsigned int ds = crypto_ahash_digestsize(tfm); + unsigned int bs = crypto_ahash_blocksize(tfm); + unsigned int tmpkeylen; + u8 *tmpkey = NULL; + int rc = 0; + + if (!crypto_ahash_tested(tfm)) { + /* + * selftest running: key is a raw hmac clear key and needs + * to get embedded into a 'clear key token' in order to have + * it correctly processed by the pkey module. + */ + tmpkeylen = sizeof(struct hmac_clrkey_token) + bs; + tmpkey = kzalloc(tmpkeylen, GFP_KERNEL); + if (!tmpkey) { + rc = -ENOMEM; + goto out; + } + rc = make_clrkey_token(key, keylen, ds, tmpkey); + if (rc) + goto out; + keylen = tmpkeylen; + key = tmpkey; + } + + /* copy raw key into tfm context */ + rc = phmac_tfm_ctx_setkey(tfm_ctx, key, keylen); + if (rc) + goto out; + + /* convert raw key into protected key */ + rc = phmac_convert_key(tfm_ctx); + if (rc) + goto out; + + /* set function code in tfm context, check for valid pk type */ + switch (ds) { + case SHA224_DIGEST_SIZE: + if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512) + rc = -EINVAL; + else + tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_224; + break; + case SHA256_DIGEST_SIZE: + if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512) + rc = -EINVAL; + else + tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_256; + break; + case SHA384_DIGEST_SIZE: + if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024) + rc = -EINVAL; + else + tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_384; + break; + case SHA512_DIGEST_SIZE: + if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024) + rc = -EINVAL; + else + tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_512; + break; + default: + tfm_ctx->fc = 0; + rc = -EINVAL; + } + +out: + kfree(tmpkey); + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_export(struct ahash_request *req, void *out) +{ + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx; + + memcpy(out, ctx, sizeof(*ctx)); + + return 0; +} + +static int phmac_import(struct ahash_request *req, const void *in) +{ + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx; + + memset(req_ctx, 0, sizeof(*req_ctx)); + memcpy(ctx, in, sizeof(*ctx)); + + return 0; +} + +static int phmac_init_tfm(struct crypto_ahash *tfm) +{ + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + + memset(tfm_ctx, 0, sizeof(*tfm_ctx)); + spin_lock_init(&tfm_ctx->pk_lock); + + crypto_ahash_set_reqsize(tfm, sizeof(struct phmac_req_ctx)); + + return 0; +} + +static void phmac_exit_tfm(struct crypto_ahash *tfm) +{ + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + + memzero_explicit(tfm_ctx->keybuf, sizeof(tfm_ctx->keybuf)); + memzero_explicit(&tfm_ctx->pk, sizeof(tfm_ctx->pk)); +} + +static int phmac_do_one_request(struct crypto_engine *engine, void *areq) +{ + struct ahash_request *req = ahash_request_cast(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx; + struct hash_walk_helper *hwh = &req_ctx->hwh; + int rc = -EINVAL; + + /* + * Three kinds of requests come in here: + * update when req->nbytes > 0 and req_ctx->final is false + * final when req->nbytes = 0 and req_ctx->final is true + * finup when req->nbytes > 0 and req_ctx->final is true + * For update and finup the hwh walk needs to be prepared and + * up to date but the actual nr of bytes in req->nbytes may be + * any non zero number. For final there is no hwh walk needed. + */ + + if (req->nbytes) { + rc = phmac_kmac_update(req, true); + if (rc == -EKEYEXPIRED) { + /* + * Protected key expired, conversion is in process. + * Trigger a re-schedule of this request by returning + * -ENOSPC ("hardware queue full") to the crypto engine. + * To avoid immediately re-invocation of this callback, + * tell scheduler to voluntarily give up the CPU here. + */ + pr_debug("rescheduling request\n"); + cond_resched(); + return -ENOSPC; + } else if (rc) { + hwh_advance(hwh, rc); + goto out; + } + req->nbytes = 0; + } + + if (req_ctx->final) { + rc = phmac_kmac_final(req, true); + if (rc == -EKEYEXPIRED) { + /* + * Protected key expired, conversion is in process. + * Trigger a re-schedule of this request by returning + * -ENOSPC ("hardware queue full") to the crypto engine. + * To avoid immediately re-invocation of this callback, + * tell scheduler to voluntarily give up the CPU here. + */ + pr_debug("rescheduling request\n"); + cond_resched(); + return -ENOSPC; + } + } + +out: + if (rc || req_ctx->final) + memzero_explicit(kmac_ctx, sizeof(*kmac_ctx)); + pr_debug("request complete with rc=%d\n", rc); + local_bh_disable(); + atomic_dec(&tfm_ctx->via_engine_ctr); + crypto_finalize_hash_request(engine, req, rc); + local_bh_enable(); + return rc; +} + +#define S390_ASYNC_PHMAC_ALG(x) \ +{ \ + .base = { \ + .init = phmac_init, \ + .update = phmac_update, \ + .final = phmac_final, \ + .finup = phmac_finup, \ + .digest = phmac_digest, \ + .setkey = phmac_setkey, \ + .import = phmac_import, \ + .export = phmac_export, \ + .init_tfm = phmac_init_tfm, \ + .exit_tfm = phmac_exit_tfm, \ + .halg = { \ + .digestsize = SHA##x##_DIGEST_SIZE, \ + .statesize = sizeof(struct kmac_sha2_ctx), \ + .base = { \ + .cra_name = "phmac(sha" #x ")", \ + .cra_driver_name = "phmac_s390_sha" #x, \ + .cra_blocksize = SHA##x##_BLOCK_SIZE, \ + .cra_priority = 400, \ + .cra_flags = CRYPTO_ALG_ASYNC | \ + CRYPTO_ALG_NO_FALLBACK, \ + .cra_ctxsize = sizeof(struct phmac_tfm_ctx), \ + .cra_module = THIS_MODULE, \ + }, \ + }, \ + }, \ + .op = { \ + .do_one_request = phmac_do_one_request, \ + }, \ +} + +static struct phmac_alg { + unsigned int fc; + struct ahash_engine_alg alg; + bool registered; +} phmac_algs[] = { + { + .fc = CPACF_KMAC_PHMAC_SHA_224, + .alg = S390_ASYNC_PHMAC_ALG(224), + }, { + .fc = CPACF_KMAC_PHMAC_SHA_256, + .alg = S390_ASYNC_PHMAC_ALG(256), + }, { + .fc = CPACF_KMAC_PHMAC_SHA_384, + .alg = S390_ASYNC_PHMAC_ALG(384), + }, { + .fc = CPACF_KMAC_PHMAC_SHA_512, + .alg = S390_ASYNC_PHMAC_ALG(512), + } +}; + +static struct miscdevice phmac_dev = { + .name = "phmac", + .minor = MISC_DYNAMIC_MINOR, +}; + +static void s390_phmac_exit(void) +{ + struct phmac_alg *phmac; + int i; + + if (phmac_crypto_engine) { + crypto_engine_stop(phmac_crypto_engine); + crypto_engine_exit(phmac_crypto_engine); + } + + for (i = ARRAY_SIZE(phmac_algs) - 1; i >= 0; i--) { + phmac = &phmac_algs[i]; + if (phmac->registered) + crypto_engine_unregister_ahash(&phmac->alg); + } + + misc_deregister(&phmac_dev); +} + +static int __init s390_phmac_init(void) +{ + struct phmac_alg *phmac; + int i, rc; + + /* for selftest cpacf klmd subfunction is needed */ + if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256)) + return -ENODEV; + if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512)) + return -ENODEV; + + /* register a simple phmac pseudo misc device */ + rc = misc_register(&phmac_dev); + if (rc) + return rc; + + /* with this pseudo device alloc and start a crypto engine */ + phmac_crypto_engine = + crypto_engine_alloc_init_and_set(phmac_dev.this_device, + true, false, MAX_QLEN); + if (!phmac_crypto_engine) { + rc = -ENOMEM; + goto out_err; + } + rc = crypto_engine_start(phmac_crypto_engine); + if (rc) { + crypto_engine_exit(phmac_crypto_engine); + phmac_crypto_engine = NULL; + goto out_err; + } + + for (i = 0; i < ARRAY_SIZE(phmac_algs); i++) { + phmac = &phmac_algs[i]; + if (!cpacf_query_func(CPACF_KMAC, phmac->fc)) + continue; + rc = crypto_engine_register_ahash(&phmac->alg); + if (rc) + goto out_err; + phmac->registered = true; + pr_debug("%s registered\n", phmac->alg.base.halg.base.cra_name); + } + + return 0; + +out_err: + s390_phmac_exit(); + return rc; +} + +module_init(s390_phmac_init); +module_exit(s390_phmac_exit); + +MODULE_ALIAS_CRYPTO("phmac(sha224)"); +MODULE_ALIAS_CRYPTO("phmac(sha256)"); +MODULE_ALIAS_CRYPTO("phmac(sha384)"); +MODULE_ALIAS_CRYPTO("phmac(sha512)"); + +MODULE_DESCRIPTION("S390 HMAC driver for protected keys"); +MODULE_LICENSE("GPL"); diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h index d757ccbce2b4..cadb4b13622a 100644 --- a/arch/s390/crypto/sha.h +++ b/arch/s390/crypto/sha.h @@ -27,6 +27,9 @@ struct s390_sha_ctx { u64 state[SHA512_DIGEST_SIZE / sizeof(u64)]; u64 count_hi; } sha512; + struct { + __le64 state[SHA3_STATE_SIZE / sizeof(u64)]; + } sha3; }; int func; /* KIMD function to use */ bool first_message_part; diff --git a/arch/s390/crypto/sha3_256_s390.c b/arch/s390/crypto/sha3_256_s390.c index 4a7731ac6bcd..03bb4f4bab70 100644 --- a/arch/s390/crypto/sha3_256_s390.c +++ b/arch/s390/crypto/sha3_256_s390.c @@ -35,23 +35,33 @@ static int sha3_256_init(struct shash_desc *desc) static int sha3_256_export(struct shash_desc *desc, void *out) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - struct sha3_state *octx = out; + union { + u8 *u8; + u64 *u64; + } p = { .u8 = out }; + int i; if (sctx->first_message_part) { - memset(sctx->state, 0, sizeof(sctx->state)); - sctx->first_message_part = 0; + memset(out, 0, SHA3_STATE_SIZE); + return 0; } - memcpy(octx->st, sctx->state, sizeof(octx->st)); + for (i = 0; i < SHA3_STATE_SIZE / 8; i++) + put_unaligned(le64_to_cpu(sctx->sha3.state[i]), p.u64++); return 0; } static int sha3_256_import(struct shash_desc *desc, const void *in) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - const struct sha3_state *ictx = in; - + union { + const u8 *u8; + const u64 *u64; + } p = { .u8 = in }; + int i; + + for (i = 0; i < SHA3_STATE_SIZE / 8; i++) + sctx->sha3.state[i] = cpu_to_le64(get_unaligned(p.u64++)); sctx->count = 0; - memcpy(sctx->state, ictx->st, sizeof(ictx->st)); sctx->first_message_part = 0; sctx->func = CPACF_KIMD_SHA3_256; diff --git a/arch/s390/crypto/sha3_512_s390.c b/arch/s390/crypto/sha3_512_s390.c index 018f02fff444..a5c9690eecb1 100644 --- a/arch/s390/crypto/sha3_512_s390.c +++ b/arch/s390/crypto/sha3_512_s390.c @@ -34,24 +34,33 @@ static int sha3_512_init(struct shash_desc *desc) static int sha3_512_export(struct shash_desc *desc, void *out) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - struct sha3_state *octx = out; - + union { + u8 *u8; + u64 *u64; + } p = { .u8 = out }; + int i; if (sctx->first_message_part) { - memset(sctx->state, 0, sizeof(sctx->state)); - sctx->first_message_part = 0; + memset(out, 0, SHA3_STATE_SIZE); + return 0; } - memcpy(octx->st, sctx->state, sizeof(octx->st)); + for (i = 0; i < SHA3_STATE_SIZE / 8; i++) + put_unaligned(le64_to_cpu(sctx->sha3.state[i]), p.u64++); return 0; } static int sha3_512_import(struct shash_desc *desc, const void *in) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - const struct sha3_state *ictx = in; - + union { + const u8 *u8; + const u64 *u64; + } p = { .u8 = in }; + int i; + + for (i = 0; i < SHA3_STATE_SIZE / 8; i++) + sctx->sha3.state[i] = cpu_to_le64(get_unaligned(p.u64++)); sctx->count = 0; - memcpy(sctx->state, ictx->st, sizeof(ictx->st)); sctx->first_message_part = 0; sctx->func = CPACF_KIMD_SHA3_512; diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h index 54cb97603ec0..4bc5317fbb12 100644 --- a/arch/s390/include/asm/cpacf.h +++ b/arch/s390/include/asm/cpacf.h @@ -129,6 +129,10 @@ #define CPACF_KMAC_HMAC_SHA_256 0x71 #define CPACF_KMAC_HMAC_SHA_384 0x72 #define CPACF_KMAC_HMAC_SHA_512 0x73 +#define CPACF_KMAC_PHMAC_SHA_224 0x78 +#define CPACF_KMAC_PHMAC_SHA_256 0x79 +#define CPACF_KMAC_PHMAC_SHA_384 0x7a +#define CPACF_KMAC_PHMAC_SHA_512 0x7b /* * Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT) diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h index 35555c944630..979af986a8fe 100644 --- a/arch/s390/include/asm/entry-common.h +++ b/arch/s390/include/asm/entry-common.h @@ -59,4 +59,14 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare +static __always_inline bool arch_in_rcu_eqs(void) +{ + if (IS_ENABLED(CONFIG_KVM)) + return current->flags & PF_VCPU; + + return false; +} + +#define arch_in_rcu_eqs arch_in_rcu_eqs + #endif diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index cb89e54ada25..f870d09515cc 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -716,6 +716,9 @@ extern char sie_exit; bool kvm_s390_pv_is_protected(struct kvm *kvm); bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu); +extern int kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb, + u64 *gprs, unsigned long gasce); + extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc); extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc); diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 84f6b8357b45..96af7d964014 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -16,10 +16,9 @@ * For 64 bit module code, the module may be more than 4G above the * per cpu area, use weak definitions to force the compiler to * generate external references. + * Therefore, we have enabled CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU + * in the Kconfig. */ -#if defined(MODULE) -#define ARCH_NEEDS_WEAK_PER_CPU -#endif /* * We use a compare-and-swap loop since that uses less cpu cycles than diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 78c9a310efa5..bf6fa8b9ca73 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5063,6 +5063,30 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) return vcpu_post_run_handle_fault(vcpu); } +int noinstr kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb, + u64 *gprs, unsigned long gasce) +{ + int ret; + + guest_state_enter_irqoff(); + + /* + * The guest_state_{enter,exit}_irqoff() functions inform lockdep and + * tracing that entry to the guest will enable host IRQs, and exit from + * the guest will disable host IRQs. + * + * We must not use lockdep/tracing/RCU in this critical section, so we + * use the low-level arch_local_irq_*() helpers to enable/disable IRQs. + */ + arch_local_irq_enable(); + ret = sie64a(scb, gprs, gasce); + arch_local_irq_disable(); + + guest_state_exit_irqoff(); + + return ret; +} + #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK) static int __vcpu_run(struct kvm_vcpu *vcpu) { @@ -5083,20 +5107,27 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) kvm_vcpu_srcu_read_unlock(vcpu); /* * As PF_VCPU will be used in fault handler, between - * guest_enter and guest_exit should be no uaccess. + * guest_timing_enter_irqoff and guest_timing_exit_irqoff + * should be no uaccess. */ - local_irq_disable(); - guest_enter_irqoff(); - __disable_cpu_timer_accounting(vcpu); - local_irq_enable(); if (kvm_s390_pv_cpu_is_protected(vcpu)) { memcpy(sie_page->pv_grregs, vcpu->run->s.regs.gprs, sizeof(sie_page->pv_grregs)); } - exit_reason = sie64a(vcpu->arch.sie_block, - vcpu->run->s.regs.gprs, - vcpu->arch.gmap->asce); + + local_irq_disable(); + guest_timing_enter_irqoff(); + __disable_cpu_timer_accounting(vcpu); + + exit_reason = kvm_s390_enter_exit_sie(vcpu->arch.sie_block, + vcpu->run->s.regs.gprs, + vcpu->arch.gmap->asce); + + __enable_cpu_timer_accounting(vcpu); + guest_timing_exit_irqoff(); + local_irq_enable(); + if (kvm_s390_pv_cpu_is_protected(vcpu)) { memcpy(vcpu->run->s.regs.gprs, sie_page->pv_grregs, @@ -5112,10 +5143,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; } } - local_irq_disable(); - __enable_cpu_timer_accounting(vcpu); - guest_exit_irqoff(); - local_irq_enable(); kvm_vcpu_srcu_read_lock(vcpu); rc = vcpu_post_run(vcpu, exit_reason); diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 13a9661d2b28..347268f89f2f 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -1170,10 +1170,6 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) vcpu->arch.sie_block->fpf & FPF_BPBC) set_thread_flag(TIF_ISOLATE_BP_GUEST); - local_irq_disable(); - guest_enter_irqoff(); - local_irq_enable(); - /* * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking * and VCPU requests also hinder the vSIE from running and lead @@ -1183,15 +1179,16 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) vcpu->arch.sie_block->prog0c |= PROG_IN_SIE; current->thread.gmap_int_code = 0; barrier(); - if (!kvm_s390_vcpu_sie_inhibited(vcpu)) - rc = sie64a(scb_s, vcpu->run->s.regs.gprs, vsie_page->gmap->asce); + if (!kvm_s390_vcpu_sie_inhibited(vcpu)) { + local_irq_disable(); + guest_timing_enter_irqoff(); + rc = kvm_s390_enter_exit_sie(scb_s, vcpu->run->s.regs.gprs, vsie_page->gmap->asce); + guest_timing_exit_irqoff(); + local_irq_enable(); + } barrier(); vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE; - local_irq_disable(); - guest_exit_irqoff(); - local_irq_enable(); - /* restore guest state for bp isolation override */ if (!guest_bp_isolation) clear_thread_flag(TIF_ISOLATE_BP_GUEST); diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index ac604b176660..9af2aae0a515 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -247,11 +247,9 @@ static int ptdump_show(struct seq_file *m, void *v) .marker = markers, }; - get_online_mems(); mutex_lock(&cpa_mutex); ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); mutex_unlock(&cpa_mutex); - put_online_mems(); return 0; } DEFINE_SHOW_ATTRIBUTE(ptdump); |