summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml2
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom,prng.yaml5
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom-qce.yaml2
-rw-r--r--Documentation/driver-api/crypto/iaa/iaa-crypto.rst9
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/loongarch/configs/loongson3_defconfig1
-rw-r--r--arch/m68k/configs/amiga_defconfig2
-rw-r--r--arch/m68k/configs/apollo_defconfig2
-rw-r--r--arch/m68k/configs/atari_defconfig2
-rw-r--r--arch/m68k/configs/bvme6000_defconfig2
-rw-r--r--arch/m68k/configs/hp300_defconfig2
-rw-r--r--arch/m68k/configs/mac_defconfig2
-rw-r--r--arch/m68k/configs/multi_defconfig2
-rw-r--r--arch/m68k/configs/mvme147_defconfig2
-rw-r--r--arch/m68k/configs/mvme16x_defconfig2
-rw-r--r--arch/m68k/configs/q40_defconfig2
-rw-r--r--arch/m68k/configs/sun3_defconfig2
-rw-r--r--arch/m68k/configs/sun3x_defconfig2
-rw-r--r--arch/mips/configs/bigsur_defconfig1
-rw-r--r--arch/mips/configs/decstation_64_defconfig2
-rw-r--r--arch/mips/configs/decstation_defconfig2
-rw-r--r--arch/mips/configs/decstation_r4k_defconfig2
-rw-r--r--arch/mips/configs/ip27_defconfig1
-rw-r--r--arch/mips/configs/ip30_defconfig1
-rw-r--r--arch/powerpc/crypto/aes-gcm-p10-glue.c9
-rw-r--r--arch/s390/configs/debug_defconfig2
-rw-r--r--arch/s390/configs/defconfig2
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c1
-rw-r--r--arch/x86/crypto/aes-gcm-avx10-x86_64.S119
-rw-r--r--arch/x86/crypto/aes-xts-avx-x86_64.S329
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c10
-rw-r--r--arch/x86/crypto/blowfish_glue.c1
-rw-r--r--arch/x86/crypto/camellia_glue.c1
-rw-r--r--arch/x86/crypto/des3_ede_glue.c1
-rw-r--r--arch/x86/crypto/twofish_glue.c1
-rw-r--r--crypto/Kconfig18
-rw-r--r--crypto/Makefile2
-rw-r--r--crypto/aegis128-core.c2
-rw-r--r--crypto/ahash.c158
-rw-r--r--crypto/algapi.c31
-rw-r--r--crypto/anubis.c14
-rw-r--r--crypto/aria_generic.c37
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c10
-rw-r--r--crypto/fips.c4
-rw-r--r--crypto/keywrap.c320
-rw-r--r--crypto/khazad.c17
-rw-r--r--crypto/proc.c9
-rw-r--r--crypto/seed.c48
-rw-r--r--crypto/sig.c4
-rw-r--r--crypto/skcipher.c369
-rw-r--r--crypto/tcrypt.c4
-rw-r--r--crypto/tea.c83
-rw-r--r--crypto/testmgr.c26
-rw-r--r--crypto/testmgr.h192
-rw-r--r--crypto/vmac.c696
-rw-r--r--drivers/crypto/Kconfig17
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/bcm/spu.c7
-rw-r--r--drivers/crypto/caam/blob_gen.c3
-rw-r--r--drivers/crypto/ccp/dbc.c53
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c13
-rw-r--r--drivers/crypto/hisilicon/qm.c291
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h3
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c161
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h11
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c13
-rw-r--r--drivers/crypto/hisilicon/zip/Makefile2
-rw-r--r--drivers/crypto/hisilicon/zip/dae_main.c262
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h8
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c52
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c2
-rw-r--r--drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c3
-rw-r--r--drivers/crypto/n2_asm.S96
-rw-r--r--drivers/crypto/n2_core.c2168
-rw-r--r--drivers/crypto/n2_core.h232
-rw-r--r--drivers/crypto/omap-aes.c34
-rw-r--r--drivers/crypto/omap-aes.h6
-rw-r--r--drivers/crypto/omap-des.c40
-rw-r--r--drivers/crypto/qce/aead.c2
-rw-r--r--drivers/crypto/qce/core.c129
-rw-r--r--drivers/crypto/qce/core.h9
-rw-r--r--drivers/crypto/qce/dma.c22
-rw-r--r--drivers/crypto/qce/dma.h3
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c2
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c7
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c7
-rw-r--r--include/crypto/gf128mul.h6
-rw-r--r--include/crypto/internal/hash.h23
-rw-r--r--include/crypto/internal/skcipher.h14
-rw-r--r--include/linux/hisi_acc_qm.h8
-rw-r--r--include/linux/verification.h2
-rw-r--r--kernel/padata.c45
-rw-r--r--lib/crypto/aesgcm.c2
-rw-r--r--lib/crypto/gf128mul.c75
-rw-r--r--lib/rhashtable.c12
97 files changed, 1361 insertions, 5063 deletions
diff --git a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
index 0304f074cf08..08fe6a707a37 100644
--- a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
@@ -13,12 +13,14 @@ properties:
compatible:
items:
- enum:
+ - qcom,qcs8300-inline-crypto-engine
- qcom,sa8775p-inline-crypto-engine
- qcom,sc7180-inline-crypto-engine
- qcom,sc7280-inline-crypto-engine
- qcom,sm8450-inline-crypto-engine
- qcom,sm8550-inline-crypto-engine
- qcom,sm8650-inline-crypto-engine
+ - qcom,sm8750-inline-crypto-engine
- const: qcom,inline-crypto-engine
reg:
diff --git a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml
index 048b769a73c0..5e6f8b642545 100644
--- a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml
@@ -17,12 +17,17 @@ properties:
- qcom,prng-ee # 8996 and later using EE
- items:
- enum:
+ - qcom,ipq5332-trng
+ - qcom,ipq5424-trng
+ - qcom,ipq9574-trng
+ - qcom,qcs8300-trng
- qcom,sa8255p-trng
- qcom,sa8775p-trng
- qcom,sc7280-trng
- qcom,sm8450-trng
- qcom,sm8550-trng
- qcom,sm8650-trng
+ - qcom,sm8750-trng
- const: qcom,trng
reg:
diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
index 62310add2e44..3ed56d9d378e 100644
--- a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
@@ -45,6 +45,7 @@ properties:
- items:
- enum:
+ - qcom,qcs8300-qce
- qcom,sa8775p-qce
- qcom,sc7280-qce
- qcom,sm6350-qce
@@ -53,6 +54,7 @@ properties:
- qcom,sm8450-qce
- qcom,sm8550-qce
- qcom,sm8650-qce
+ - qcom,sm8750-qce
- const: qcom,sm8150-qce
- const: qcom,qce
diff --git a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
index bba40158dd5c..8e50b900d51c 100644
--- a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
+++ b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
@@ -272,7 +272,7 @@ The available attributes are:
echo async_irq > /sys/bus/dsa/drivers/crypto/sync_mode
Async mode without interrupts (caller must poll) can be enabled by
- writing 'async' to it::
+ writing 'async' to it (please see Caveat)::
echo async > /sys/bus/dsa/drivers/crypto/sync_mode
@@ -283,6 +283,13 @@ The available attributes are:
The default mode is 'sync'.
+ Caveat: since the only mechanism that iaa_crypto currently implements
+ for async polling without interrupts is via the 'sync' mode as
+ described earlier, writing 'async' to
+ '/sys/bus/dsa/drivers/crypto/sync_mode' will internally enable the
+ 'sync' mode. This is to ensure correct iaa_crypto behavior until true
+ async polling without interrupts is enabled in iaa_crypto.
+
.. _iaa_default_config:
IAA Default Configuration
diff --git a/MAINTAINERS b/MAINTAINERS
index 0451718a5a0c..7429295d259c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -20164,7 +20164,7 @@ F: net/rfkill/
RHASHTABLE
M: Thomas Graf <tgraf@suug.ch>
M: Herbert Xu <herbert@gondor.apana.org.au>
-L: netdev@vger.kernel.org
+L: linux-crypto@vger.kernel.org
S: Maintained
F: include/linux/rhashtable-types.h
F: include/linux/rhashtable.h
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 38916ac4bce4..de0ac8f521d7 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -652,7 +652,6 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index 2052452e241a..a3d513919484 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -1029,7 +1029,6 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index e5acb7b52550..dbf2ea561c85 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -579,7 +579,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -589,7 +588,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 89fc22636bb5..b0fd199cc0a4 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -536,7 +536,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -546,7 +545,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 9dfba6488b72..bb5b2d3b6c10 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -556,7 +556,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -566,7 +565,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 5250447e249b..8315a13bab73 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -528,7 +528,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -538,7 +537,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 0f99307f9caf..350370657e5f 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -538,7 +538,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -548,7 +547,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 36e5f493654a..f942b4755702 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -555,7 +555,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -565,7 +564,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 382b0e2e20dc..b1eaad02efab 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -642,7 +642,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -652,7 +651,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index d91ec0cc23c8..6309a4442bb3 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -528,7 +528,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -538,7 +537,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 4cde4ee78a85..3feb0731f814 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -529,7 +529,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -539,7 +538,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index d49c01e591e1..ea04b1b0da7d 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -545,7 +545,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -555,7 +554,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 468f2545b93e..f52d9af92153 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -526,7 +526,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -536,7 +535,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index ea38b1f2453d..f348447824da 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -526,7 +526,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -536,7 +535,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index e463a9acae03..f7c4b3529a2c 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -222,7 +222,6 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig
index 92a1d0aea38c..da51b9731db0 100644
--- a/arch/mips/configs/decstation_64_defconfig
+++ b/arch/mips/configs/decstation_64_defconfig
@@ -177,10 +177,8 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index db214fcebcbe..424e3f011fc2 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -172,10 +172,8 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig
index 15b769e96d5b..cfc8bf791792 100644
--- a/arch/mips/configs/decstation_r4k_defconfig
+++ b/arch/mips/configs/decstation_r4k_defconfig
@@ -172,10 +172,8 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 4714074c8bd7..b08a199767d1 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -305,7 +305,6 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA512=m
diff --git a/arch/mips/configs/ip30_defconfig b/arch/mips/configs/ip30_defconfig
index 178d61645cea..270181a7320a 100644
--- a/arch/mips/configs/ip30_defconfig
+++ b/arch/mips/configs/ip30_defconfig
@@ -176,7 +176,6 @@ CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c
index f37b3d13fc53..679f52794baf 100644
--- a/arch/powerpc/crypto/aes-gcm-p10-glue.c
+++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c
@@ -214,7 +214,6 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
struct gcm_ctx *gctx = PTR_ALIGN((void *)databuf, PPC_ALIGN);
u8 hashbuf[sizeof(struct Hash_ctx) + PPC_ALIGN];
struct Hash_ctx *hash = PTR_ALIGN((void *)hashbuf, PPC_ALIGN);
- struct scatter_walk assoc_sg_walk;
struct skcipher_walk walk;
u8 *assocmem = NULL;
u8 *assoc;
@@ -234,8 +233,7 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
/* Linearize assoc, if not already linear */
if (req->src->length >= assoclen && req->src->length) {
- scatterwalk_start(&assoc_sg_walk, req->src);
- assoc = scatterwalk_map(&assoc_sg_walk);
+ assoc = sg_virt(req->src); /* ppc64 is !HIGHMEM */
} else {
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
@@ -253,10 +251,7 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
gcmp10_init(gctx, iv, (unsigned char *) &ctx->enc_key, hash, assoc, assoclen);
vsx_end();
- if (!assocmem)
- scatterwalk_unmap(assoc);
- else
- kfree(assocmem);
+ kfree(assocmem);
if (enc)
ret = skcipher_walk_aead_encrypt(&walk, req, false);
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 931c0a974ed8..d6beec5292a0 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -770,7 +770,6 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_AEGIS128=m
@@ -782,7 +781,6 @@ CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_CRC32=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index d521aabc31d7..8cfbfb10bba8 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -756,7 +756,6 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_AEGIS128=m
@@ -768,7 +767,6 @@ CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_CRC32=m
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index c19d8e3d96a3..01fa568dc5fc 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -240,7 +240,6 @@ static struct aead_alg crypto_aegis128_aesni_alg = {
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx) +
__alignof__(struct aegis_ctx),
- .cra_alignmask = 0,
.cra_priority = 400,
.cra_name = "__aegis128",
diff --git a/arch/x86/crypto/aes-gcm-avx10-x86_64.S b/arch/x86/crypto/aes-gcm-avx10-x86_64.S
index 97e0ee515fc5..02ee11083d4f 100644
--- a/arch/x86/crypto/aes-gcm-avx10-x86_64.S
+++ b/arch/x86/crypto/aes-gcm-avx10-x86_64.S
@@ -88,7 +88,7 @@
// A shuffle mask that reflects the bytes of 16-byte blocks
.Lbswap_mask:
- .octa 0x000102030405060708090a0b0c0d0e0f
+ .octa 0x000102030405060708090a0b0c0d0e0f
// This is the GHASH reducing polynomial without its constant term, i.e.
// x^128 + x^7 + x^2 + x, represented using the backwards mapping
@@ -384,8 +384,8 @@
vpshufd $0xd3, H_CUR_XMM, %xmm0
vpsrad $31, %xmm0, %xmm0
vpaddq H_CUR_XMM, H_CUR_XMM, H_CUR_XMM
- vpand .Lgfpoly_and_internal_carrybit(%rip), %xmm0, %xmm0
- vpxor %xmm0, H_CUR_XMM, H_CUR_XMM
+ // H_CUR_XMM ^= xmm0 & gfpoly_and_internal_carrybit
+ vpternlogd $0x78, .Lgfpoly_and_internal_carrybit(%rip), %xmm0, H_CUR_XMM
// Load the gfpoly constant.
vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY
@@ -562,6 +562,32 @@
vpxord RNDKEY0, V3, V3
.endm
+// Do the last AES round for four vectors of counter blocks V0-V3, XOR source
+// data with the resulting keystream, and write the result to DST and
+// GHASHDATA[0-3]. (Implementation differs slightly, but has the same effect.)
+.macro _aesenclast_and_xor_4x
+ // XOR the source data with the last round key, saving the result in
+ // GHASHDATA[0-3]. This reduces latency by taking advantage of the
+ // property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).
+ vpxord 0*VL(SRC), RNDKEYLAST, GHASHDATA0
+ vpxord 1*VL(SRC), RNDKEYLAST, GHASHDATA1
+ vpxord 2*VL(SRC), RNDKEYLAST, GHASHDATA2
+ vpxord 3*VL(SRC), RNDKEYLAST, GHASHDATA3
+
+ // Do the last AES round. This handles the XOR with the source data
+ // too, as per the optimization described above.
+ vaesenclast GHASHDATA0, V0, GHASHDATA0
+ vaesenclast GHASHDATA1, V1, GHASHDATA1
+ vaesenclast GHASHDATA2, V2, GHASHDATA2
+ vaesenclast GHASHDATA3, V3, GHASHDATA3
+
+ // Store the en/decrypted data to DST.
+ vmovdqu8 GHASHDATA0, 0*VL(DST)
+ vmovdqu8 GHASHDATA1, 1*VL(DST)
+ vmovdqu8 GHASHDATA2, 2*VL(DST)
+ vmovdqu8 GHASHDATA3, 3*VL(DST)
+.endm
+
// void aes_gcm_{enc,dec}_update_##suffix(const struct aes_gcm_key_avx10 *key,
// const u32 le_ctr[4], u8 ghash_acc[16],
// const u8 *src, u8 *dst, int datalen);
@@ -640,7 +666,7 @@
// LE_CTR contains the next set of little-endian counter blocks.
.set LE_CTR, V12
- // RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-5] contain cached AES round keys,
+ // RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-1] contain cached AES round keys,
// copied to all 128-bit lanes. RNDKEY0 is the zero-th round key,
// RNDKEYLAST the last, and RNDKEY_M\i the one \i-th from the last.
.set RNDKEY0, V13
@@ -650,15 +676,10 @@
.set RNDKEY_M7, V17
.set RNDKEY_M6, V18
.set RNDKEY_M5, V19
-
- // RNDKEYLAST[0-3] temporarily store the last AES round key XOR'd with
- // the corresponding block of source data. This is useful because
- // vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a), and key ^ b can
- // be computed in parallel with the AES rounds.
- .set RNDKEYLAST0, V20
- .set RNDKEYLAST1, V21
- .set RNDKEYLAST2, V22
- .set RNDKEYLAST3, V23
+ .set RNDKEY_M4, V20
+ .set RNDKEY_M3, V21
+ .set RNDKEY_M2, V22
+ .set RNDKEY_M1, V23
// GHASHTMP[0-2] are temporary variables used by _ghash_step_4x. These
// cannot coincide with anything used for AES encryption, since for
@@ -713,7 +734,7 @@
// Pre-subtracting 4*VL from DATALEN saves an instruction from the main
// loop and also ensures that at least one write always occurs to
// DATALEN, zero-extending it and allowing DATALEN64 to be used later.
- sub $4*VL, DATALEN
+ add $-4*VL, DATALEN // shorter than 'sub 4*VL' when VL=32
jl .Lcrypt_loop_4x_done\@
// Load powers of the hash key.
@@ -748,26 +769,15 @@
add $16, %rax
cmp %rax, RNDKEYLAST_PTR
jne 1b
- vpxord 0*VL(SRC), RNDKEYLAST, RNDKEYLAST0
- vpxord 1*VL(SRC), RNDKEYLAST, RNDKEYLAST1
- vpxord 2*VL(SRC), RNDKEYLAST, RNDKEYLAST2
- vpxord 3*VL(SRC), RNDKEYLAST, RNDKEYLAST3
- vaesenclast RNDKEYLAST0, V0, GHASHDATA0
- vaesenclast RNDKEYLAST1, V1, GHASHDATA1
- vaesenclast RNDKEYLAST2, V2, GHASHDATA2
- vaesenclast RNDKEYLAST3, V3, GHASHDATA3
- vmovdqu8 GHASHDATA0, 0*VL(DST)
- vmovdqu8 GHASHDATA1, 1*VL(DST)
- vmovdqu8 GHASHDATA2, 2*VL(DST)
- vmovdqu8 GHASHDATA3, 3*VL(DST)
- add $4*VL, SRC
- add $4*VL, DST
- sub $4*VL, DATALEN
+ _aesenclast_and_xor_4x
+ sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32
+ sub $-4*VL, DST
+ add $-4*VL, DATALEN
jl .Lghash_last_ciphertext_4x\@
.endif
// Cache as many additional AES round keys as possible.
-.irp i, 9,8,7,6,5
+.irp i, 9,8,7,6,5,4,3,2,1
vbroadcasti32x4 -\i*16(RNDKEYLAST_PTR), RNDKEY_M\i
.endr
@@ -799,50 +809,17 @@
_vaesenc_4x RNDKEY
128:
- // XOR the source data with the last round key, saving the result in
- // RNDKEYLAST[0-3]. This reduces latency by taking advantage of the
- // property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).
-.if \enc
- vpxord 0*VL(SRC), RNDKEYLAST, RNDKEYLAST0
- vpxord 1*VL(SRC), RNDKEYLAST, RNDKEYLAST1
- vpxord 2*VL(SRC), RNDKEYLAST, RNDKEYLAST2
- vpxord 3*VL(SRC), RNDKEYLAST, RNDKEYLAST3
-.else
- vpxord GHASHDATA0, RNDKEYLAST, RNDKEYLAST0
- vpxord GHASHDATA1, RNDKEYLAST, RNDKEYLAST1
- vpxord GHASHDATA2, RNDKEYLAST, RNDKEYLAST2
- vpxord GHASHDATA3, RNDKEYLAST, RNDKEYLAST3
-.endif
-
// Finish the AES encryption of the counter blocks in V0-V3, interleaved
// with the GHASH update of the ciphertext blocks in GHASHDATA[0-3].
-.irp i, 9,8,7,6,5
+.irp i, 9,8,7,6,5,4,3,2,1
+ _ghash_step_4x (9 - \i)
_vaesenc_4x RNDKEY_M\i
- _ghash_step_4x (9 - \i)
-.endr
-.irp i, 4,3,2,1
- vbroadcasti32x4 -\i*16(RNDKEYLAST_PTR), RNDKEY
- _vaesenc_4x RNDKEY
- _ghash_step_4x (9 - \i)
.endr
_ghash_step_4x 9
-
- // Do the last AES round. This handles the XOR with the source data
- // too, as per the optimization described above.
- vaesenclast RNDKEYLAST0, V0, GHASHDATA0
- vaesenclast RNDKEYLAST1, V1, GHASHDATA1
- vaesenclast RNDKEYLAST2, V2, GHASHDATA2
- vaesenclast RNDKEYLAST3, V3, GHASHDATA3
-
- // Store the en/decrypted data to DST.
- vmovdqu8 GHASHDATA0, 0*VL(DST)
- vmovdqu8 GHASHDATA1, 1*VL(DST)
- vmovdqu8 GHASHDATA2, 2*VL(DST)
- vmovdqu8 GHASHDATA3, 3*VL(DST)
-
- add $4*VL, SRC
- add $4*VL, DST
- sub $4*VL, DATALEN
+ _aesenclast_and_xor_4x
+ sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32
+ sub $-4*VL, DST
+ add $-4*VL, DATALEN
jge .Lcrypt_loop_4x\@
.if \enc
@@ -856,7 +833,7 @@
.Lcrypt_loop_4x_done\@:
// Undo the extra subtraction by 4*VL and check whether data remains.
- add $4*VL, DATALEN
+ sub $-4*VL, DATALEN // shorter than 'add 4*VL' when VL=32
jz .Ldone\@
// The data length isn't a multiple of 4*VL. Process the remaining data
@@ -940,7 +917,7 @@
// GHASH. However, any such blocks are all-zeroes, and the values that
// they're multiplied with are also all-zeroes. Therefore they just add
// 0 * 0 = 0 to the final GHASH result, which makes no difference.
- vmovdqu8 (POWERS_PTR), H_POW1
+ vmovdqu8 (POWERS_PTR), H_POW1
.if \enc
vmovdqu8 V0, V1{%k1}{z}
.endif
diff --git a/arch/x86/crypto/aes-xts-avx-x86_64.S b/arch/x86/crypto/aes-xts-avx-x86_64.S
index 48f97b79f7a9..8a3e23fbcf85 100644
--- a/arch/x86/crypto/aes-xts-avx-x86_64.S
+++ b/arch/x86/crypto/aes-xts-avx-x86_64.S
@@ -80,22 +80,6 @@
.byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
.text
-// Function parameters
-.set KEY, %rdi // Initially points to crypto_aes_ctx, then is
- // advanced to point to 7th-from-last round key
-.set SRC, %rsi // Pointer to next source data
-.set DST, %rdx // Pointer to next destination data
-.set LEN, %ecx // Remaining length in bytes
-.set LEN8, %cl
-.set LEN64, %rcx
-.set TWEAK, %r8 // Pointer to next tweak
-
-// %rax holds the AES key length in bytes.
-.set KEYLEN, %eax
-.set KEYLEN64, %rax
-
-// %r9-r11 are available as temporaries.
-
.macro _define_Vi i
.if VL == 16
.set V\i, %xmm\i
@@ -112,41 +96,31 @@
// Define register aliases V0-V15, or V0-V31 if all 32 SIMD registers
// are available, that map to the xmm, ymm, or zmm registers according
// to the selected Vector Length (VL).
- _define_Vi 0
- _define_Vi 1
- _define_Vi 2
- _define_Vi 3
- _define_Vi 4
- _define_Vi 5
- _define_Vi 6
- _define_Vi 7
- _define_Vi 8
- _define_Vi 9
- _define_Vi 10
- _define_Vi 11
- _define_Vi 12
- _define_Vi 13
- _define_Vi 14
- _define_Vi 15
+.irp i, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+ _define_Vi \i
+.endr
.if USE_AVX10
- _define_Vi 16
- _define_Vi 17
- _define_Vi 18
- _define_Vi 19
- _define_Vi 20
- _define_Vi 21
- _define_Vi 22
- _define_Vi 23
- _define_Vi 24
- _define_Vi 25
- _define_Vi 26
- _define_Vi 27
- _define_Vi 28
- _define_Vi 29
- _define_Vi 30
- _define_Vi 31
+.irp i, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+ _define_Vi \i
+.endr
.endif
+ // Function parameters
+ .set KEY, %rdi // Initially points to crypto_aes_ctx, then is
+ // advanced to point to 7th-from-last round key
+ .set SRC, %rsi // Pointer to next source data
+ .set DST, %rdx // Pointer to next destination data
+ .set LEN, %ecx // Remaining length in bytes
+ .set LEN8, %cl
+ .set LEN64, %rcx
+ .set TWEAK, %r8 // Pointer to next tweak
+
+ // %rax holds the AES key length in bytes.
+ .set KEYLEN, %eax
+ .set KEYLEN64, %rax
+
+ // %r9-r11 are available as temporaries.
+
// V0-V3 hold the data blocks during the main loop, or temporary values
// otherwise. V4-V5 hold temporary values.
@@ -214,6 +188,7 @@
.endm
// Move a vector between memory and a register.
+// The register operand must be in the first 16 vector registers.
.macro _vmovdqu src, dst
.if VL < 64
vmovdqu \src, \dst
@@ -234,11 +209,12 @@
.endm
// XOR two vectors together.
+// Any register operands must be in the first 16 vector registers.
.macro _vpxor src1, src2, dst
-.if USE_AVX10
- vpxord \src1, \src2, \dst
-.else
+.if VL < 64
vpxor \src1, \src2, \dst
+.else
+ vpxord \src1, \src2, \dst
.endif
.endm
@@ -259,8 +235,12 @@
vpshufd $0x13, \src, \tmp
vpaddq \src, \src, \dst
vpsrad $31, \tmp, \tmp
+.if USE_AVX10
+ vpternlogd $0x78, GF_POLY_XMM, \tmp, \dst
+.else
vpand GF_POLY_XMM, \tmp, \tmp
vpxor \tmp, \dst, \dst
+.endif
.endm
// Given the XTS tweak(s) in the vector \src, compute the next vector of
@@ -369,9 +349,14 @@
// Do one step in computing the next set of tweaks using the VPCLMULQDQ method
// (the same method _next_tweakvec uses for VL > 16). This means multiplying
-// each tweak by x^(4*VL/16) independently. Since 4*VL/16 is a multiple of 8
-// when VL > 16 (which it is here), the needed shift amounts are byte-aligned,
-// which allows the use of vpsrldq and vpslldq to do 128-bit wide shifts.
+// each tweak by x^(4*VL/16) independently.
+//
+// Since 4*VL/16 is a multiple of 8 when VL > 16 (which it is here), the needed
+// shift amounts are byte-aligned, which allows the use of vpsrldq and vpslldq
+// to do 128-bit wide shifts. The 128-bit left shift (vpslldq) saves
+// instructions directly. The 128-bit right shift (vpsrldq) performs better
+// than a 64-bit right shift on Intel CPUs in the context where it is used here,
+// because it runs on a different execution port from the AES instructions.
.macro _tweak_step_pclmul i
.if \i == 0
vpsrldq $(128 - 4*VL/16) / 8, TWEAK0, NEXT_TWEAK0
@@ -406,7 +391,7 @@
// \i that include at least 0 through 19, then 1000 which signals the last step.
//
// This is used to interleave the computation of the next set of tweaks with the
-// AES en/decryptions, which increases performance in some cases.
+// AES en/decryptions, which increases performance in some cases. Clobbers V5.
.macro _tweak_step i
.if VL == 16
_tweak_step_mulx \i
@@ -443,9 +428,10 @@
// the last round needs different instructions.
//
// An alternative approach would be to roll up all the round loops. We
- // don't do that because it isn't compatible with caching the round keys
- // in registers which we do when possible (see below), and also because
- // it seems unwise to rely *too* heavily on the CPU's branch predictor.
+ // don't do that because (a) it isn't compatible with caching the round
+ // keys in registers which we do when possible (see below), (b) we
+ // interleave the AES rounds with the XTS tweak computation, and (c) it
+ // seems unwise to rely *too* heavily on the CPU's branch predictor.
lea OFFS-16(KEY, KEYLEN64, 4), KEY
// If all 32 SIMD registers are available, cache all the round keys.
@@ -472,90 +458,94 @@
.endif
.endm
-// Do a single round of AES encryption (if \enc==1) or decryption (if \enc==0)
-// on the block(s) in \data using the round key(s) in \key. The register length
-// determines the number of AES blocks en/decrypted.
-.macro _vaes enc, last, key, data
+// Do a single non-last round of AES encryption (if \enc==1) or decryption (if
+// \enc==0) on the block(s) in \data using the round key(s) in \key. The
+// register length determines the number of AES blocks en/decrypted.
+.macro _vaes enc, key, data
.if \enc
-.if \last
- vaesenclast \key, \data, \data
-.else
vaesenc \key, \data, \data
-.endif
-.else
-.if \last
- vaesdeclast \key, \data, \data
.else
vaesdec \key, \data, \data
.endif
+.endm
+
+// Same as _vaes, but does the last round.
+.macro _vaeslast enc, key, data
+.if \enc
+ vaesenclast \key, \data, \data
+.else
+ vaesdeclast \key, \data, \data
.endif
.endm
-// Do a single round of AES en/decryption on the block(s) in \data, using the
-// same key for all block(s). The round key is loaded from the appropriate
-// register or memory location for round \i. May clobber V4.
-.macro _vaes_1x enc, last, i, xmm_suffix, data
+// Do a single non-last round of AES en/decryption on the block(s) in \data,
+// using the same key for all block(s). The round key is loaded from the
+// appropriate register or memory location for round \i. May clobber \tmp.
+.macro _vaes_1x enc, i, xmm_suffix, data, tmp
.if USE_AVX10
- _vaes \enc, \last, KEY\i\xmm_suffix, \data
+ _vaes \enc, KEY\i\xmm_suffix, \data
.else
.ifnb \xmm_suffix
- _vaes \enc, \last, (\i-7)*16(KEY), \data
+ _vaes \enc, (\i-7)*16(KEY), \data
.else
- _vbroadcast128 (\i-7)*16(KEY), V4
- _vaes \enc, \last, V4, \data
+ _vbroadcast128 (\i-7)*16(KEY), \tmp
+ _vaes \enc, \tmp, \data
.endif
.endif
.endm
-// Do a single round of AES en/decryption on the blocks in registers V0-V3,
-// using the same key for all blocks. The round key is loaded from the
+// Do a single non-last round of AES en/decryption on the blocks in registers
+// V0-V3, using the same key for all blocks. The round key is loaded from the
// appropriate register or memory location for round \i. In addition, does two
-// steps of the computation of the next set of tweaks. May clobber V4.
-.macro _vaes_4x enc, last, i
+// steps of the computation of the next set of tweaks. May clobber V4 and V5.
+.macro _vaes_4x enc, i
.if USE_AVX10
_tweak_step (2*(\i-5))
- _vaes \enc, \last, KEY\i, V0
- _vaes \enc, \last, KEY\i, V1
+ _vaes \enc, KEY\i, V0
+ _vaes \enc, KEY\i, V1
_tweak_step (2*(\i-5) + 1)
- _vaes \enc, \last, KEY\i, V2
- _vaes \enc, \last, KEY\i, V3
+ _vaes \enc, KEY\i, V2
+ _vaes \enc, KEY\i, V3
.else
_vbroadcast128 (\i-7)*16(KEY), V4
_tweak_step (2*(\i-5))
- _vaes \enc, \last, V4, V0
- _vaes \enc, \last, V4, V1
+ _vaes \enc, V4, V0
+ _vaes \enc, V4, V1
_tweak_step (2*(\i-5) + 1)
- _vaes \enc, \last, V4, V2
- _vaes \enc, \last, V4, V3
+ _vaes \enc, V4, V2
+ _vaes \enc, V4, V3
.endif
.endm
// Do tweaked AES en/decryption (i.e., XOR with \tweak, then AES en/decrypt,
// then XOR with \tweak again) of the block(s) in \data. To process a single
// block, use xmm registers and set \xmm_suffix=_XMM. To process a vector of
-// length VL, use V* registers and leave \xmm_suffix empty. May clobber V4.
-.macro _aes_crypt enc, xmm_suffix, tweak, data
+// length VL, use V* registers and leave \xmm_suffix empty. Clobbers \tmp.
+.macro _aes_crypt enc, xmm_suffix, tweak, data, tmp
_xor3 KEY0\xmm_suffix, \tweak, \data
cmp $24, KEYLEN
jl .Laes128\@
je .Laes192\@
- _vaes_1x \enc, 0, 1, \xmm_suffix, \data
- _vaes_1x \enc, 0, 2, \xmm_suffix, \data
+ _vaes_1x \enc, 1, \xmm_suffix, \data, tmp=\tmp
+ _vaes_1x \enc, 2, \xmm_suffix, \data, tmp=\tmp
.Laes192\@:
- _vaes_1x \enc, 0, 3, \xmm_suffix, \data
- _vaes_1x \enc, 0, 4, \xmm_suffix, \data
+ _vaes_1x \enc, 3, \xmm_suffix, \data, tmp=\tmp
+ _vaes_1x \enc, 4, \xmm_suffix, \data, tmp=\tmp
.Laes128\@:
- _vaes_1x \enc, 0, 5, \xmm_suffix, \data
- _vaes_1x \enc, 0, 6, \xmm_suffix, \data
- _vaes_1x \enc, 0, 7, \xmm_suffix, \data
- _vaes_1x \enc, 0, 8, \xmm_suffix, \data
- _vaes_1x \enc, 0, 9, \xmm_suffix, \data
- _vaes_1x \enc, 0, 10, \xmm_suffix, \data
- _vaes_1x \enc, 0, 11, \xmm_suffix, \data
- _vaes_1x \enc, 0, 12, \xmm_suffix, \data
- _vaes_1x \enc, 0, 13, \xmm_suffix, \data
- _vaes_1x \enc, 1, 14, \xmm_suffix, \data
- _vpxor \tweak, \data, \data
+.irp i, 5,6,7,8,9,10,11,12,13
+ _vaes_1x \enc, \i, \xmm_suffix, \data, tmp=\tmp
+.endr
+.if USE_AVX10
+ vpxord KEY14\xmm_suffix, \tweak, \tmp
+.else
+.ifnb \xmm_suffix
+ vpxor 7*16(KEY), \tweak, \tmp
+.else
+ _vbroadcast128 7*16(KEY), \tmp
+ vpxor \tweak, \tmp, \tmp
+.endif
+.endif
+ _vaeslast \enc, \tmp, \data
.endm
.macro _aes_xts_crypt enc
@@ -581,7 +571,7 @@
// Compute the first set of tweaks TWEAK[0-3].
_compute_first_set_of_tweaks
- sub $4*VL, LEN
+ add $-4*VL, LEN // shorter than 'sub 4*VL' when VL=32
jl .Lhandle_remainder\@
.Lmain_loop\@:
@@ -589,10 +579,10 @@
// XOR each source block with its tweak and the zero-th round key.
.if USE_AVX10
- vmovdqu8 0*VL(SRC), V0
- vmovdqu8 1*VL(SRC), V1
- vmovdqu8 2*VL(SRC), V2
- vmovdqu8 3*VL(SRC), V3
+ _vmovdqu 0*VL(SRC), V0
+ _vmovdqu 1*VL(SRC), V1
+ _vmovdqu 2*VL(SRC), V2
+ _vmovdqu 3*VL(SRC), V3
vpternlogd $0x96, TWEAK0, KEY0, V0
vpternlogd $0x96, TWEAK1, KEY0, V1
vpternlogd $0x96, TWEAK2, KEY0, V2
@@ -612,28 +602,43 @@
je .Laes192\@
// Do all the AES rounds on the data blocks, interleaved with
// the computation of the next set of tweaks.
- _vaes_4x \enc, 0, 1
- _vaes_4x \enc, 0, 2
+ _vaes_4x \enc, 1
+ _vaes_4x \enc, 2
.Laes192\@:
- _vaes_4x \enc, 0, 3
- _vaes_4x \enc, 0, 4
+ _vaes_4x \enc, 3
+ _vaes_4x \enc, 4
.Laes128\@:
- _vaes_4x \enc, 0, 5
- _vaes_4x \enc, 0, 6
- _vaes_4x \enc, 0, 7
- _vaes_4x \enc, 0, 8
- _vaes_4x \enc, 0, 9
- _vaes_4x \enc, 0, 10
- _vaes_4x \enc, 0, 11
- _vaes_4x \enc, 0, 12
- _vaes_4x \enc, 0, 13
- _vaes_4x \enc, 1, 14
-
- // XOR in the tweaks again.
- _vpxor TWEAK0, V0, V0
- _vpxor TWEAK1, V1, V1
- _vpxor TWEAK2, V2, V2
- _vpxor TWEAK3, V3, V3
+.irp i, 5,6,7,8,9,10,11,12,13
+ _vaes_4x \enc, \i
+.endr
+ // Do the last AES round, then XOR the results with the tweaks again.
+ // Reduce latency by doing the XOR before the vaesenclast, utilizing the
+ // property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a)
+ // (and likewise for vaesdeclast).
+.if USE_AVX10
+ _tweak_step 18
+ _tweak_step 19
+ vpxord TWEAK0, KEY14, V4
+ vpxord TWEAK1, KEY14, V5
+ _vaeslast \enc, V4, V0
+ _vaeslast \enc, V5, V1
+ vpxord TWEAK2, KEY14, V4
+ vpxord TWEAK3, KEY14, V5
+ _vaeslast \enc, V4, V2
+ _vaeslast \enc, V5, V3
+.else
+ _vbroadcast128 7*16(KEY), V4
+ _tweak_step 18 // uses V5
+ _tweak_step 19 // uses V5
+ vpxor TWEAK0, V4, V5
+ _vaeslast \enc, V5, V0
+ vpxor TWEAK1, V4, V5
+ _vaeslast \enc, V5, V1
+ vpxor TWEAK2, V4, V5
+ vpxor TWEAK3, V4, V4
+ _vaeslast \enc, V5, V2
+ _vaeslast \enc, V4, V3
+.endif
// Store the destination blocks.
_vmovdqu V0, 0*VL(DST)
@@ -644,9 +649,9 @@
// Finish computing the next set of tweaks.
_tweak_step 1000
- add $4*VL, SRC
- add $4*VL, DST
- sub $4*VL, LEN
+ sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32
+ sub $-4*VL, DST
+ add $-4*VL, LEN
jge .Lmain_loop\@
// Check for the uncommon case where the data length isn't a multiple of
@@ -670,7 +675,7 @@
jl .Lvec_at_a_time_done\@
.Lvec_at_a_time\@:
_vmovdqu (SRC), V0
- _aes_crypt \enc, , TWEAK0, V0
+ _aes_crypt \enc, , TWEAK0, V0, tmp=V1
_vmovdqu V0, (DST)
_next_tweakvec TWEAK0, V0, V1, TWEAK0
add $VL, SRC
@@ -687,7 +692,7 @@
jl .Lblock_at_a_time_done\@
.Lblock_at_a_time\@:
vmovdqu (SRC), %xmm0
- _aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0
+ _aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0, tmp=%xmm1
vmovdqu %xmm0, (DST)
_next_tweak TWEAK0_XMM, %xmm0, TWEAK0_XMM
add $16, SRC
@@ -715,7 +720,7 @@
// Do it now by advancing the tweak and decrypting the last full block.
_next_tweak TWEAK0_XMM, %xmm0, TWEAK1_XMM
vmovdqu (SRC), %xmm0
- _aes_crypt \enc, _XMM, TWEAK1_XMM, %xmm0
+ _aes_crypt \enc, _XMM, TWEAK1_XMM, %xmm0, tmp=%xmm1
.endif
.if USE_AVX10
@@ -758,47 +763,49 @@
vpblendvb %xmm3, %xmm0, %xmm1, %xmm0
.endif
// En/decrypt again and store the last full block.
- _aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0
+ _aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0, tmp=%xmm1
vmovdqu %xmm0, (DST)
jmp .Ldone\@
.endm
// void aes_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
// u8 iv[AES_BLOCK_SIZE]);
+//
+// Encrypt |iv| using the AES key |tweak_key| to get the first tweak. Assumes
+// that the CPU supports AES-NI and AVX, but not necessarily VAES or AVX10.
SYM_TYPED_FUNC_START(aes_xts_encrypt_iv)
- vmovdqu (%rsi), %xmm0
- vpxor (%rdi), %xmm0, %xmm0
- movl 480(%rdi), %eax // AES key length
- lea -16(%rdi, %rax, 4), %rdi
- cmp $24, %eax
+ .set TWEAK_KEY, %rdi
+ .set IV, %rsi
+ .set KEYLEN, %eax
+ .set KEYLEN64, %rax
+
+ vmovdqu (IV), %xmm0
+ vpxor (TWEAK_KEY), %xmm0, %xmm0
+ movl 480(TWEAK_KEY), KEYLEN
+ lea -16(TWEAK_KEY, KEYLEN64, 4), TWEAK_KEY
+ cmp $24, KEYLEN
jl .Lencrypt_iv_aes128
je .Lencrypt_iv_aes192
- vaesenc -6*16(%rdi), %xmm0, %xmm0
- vaesenc -5*16(%rdi), %xmm0, %xmm0
+ vaesenc -6*16(TWEAK_KEY), %xmm0, %xmm0
+ vaesenc -5*16(TWEAK_KEY), %xmm0, %xmm0
.Lencrypt_iv_aes192:
- vaesenc -4*16(%rdi), %xmm0, %xmm0
- vaesenc -3*16(%rdi), %xmm0, %xmm0
+ vaesenc -4*16(TWEAK_KEY), %xmm0, %xmm0
+ vaesenc -3*16(TWEAK_KEY), %xmm0, %xmm0
.Lencrypt_iv_aes128:
- vaesenc -2*16(%rdi), %xmm0, %xmm0
- vaesenc -1*16(%rdi), %xmm0, %xmm0
- vaesenc 0*16(%rdi), %xmm0, %xmm0
- vaesenc 1*16(%rdi), %xmm0, %xmm0
- vaesenc 2*16(%rdi), %xmm0, %xmm0
- vaesenc 3*16(%rdi), %xmm0, %xmm0
- vaesenc 4*16(%rdi), %xmm0, %xmm0
- vaesenc 5*16(%rdi), %xmm0, %xmm0
- vaesenc 6*16(%rdi), %xmm0, %xmm0
- vaesenclast 7*16(%rdi), %xmm0, %xmm0
- vmovdqu %xmm0, (%rsi)
+.irp i, -2,-1,0,1,2,3,4,5,6
+ vaesenc \i*16(TWEAK_KEY), %xmm0, %xmm0
+.endr
+ vaesenclast 7*16(TWEAK_KEY), %xmm0, %xmm0
+ vmovdqu %xmm0, (IV)
RET
SYM_FUNC_END(aes_xts_encrypt_iv)
// Below are the actual AES-XTS encryption and decryption functions,
// instantiated from the above macro. They all have the following prototype:
//
-// void (*xts_asm_func)(const struct crypto_aes_ctx *key,
-// const u8 *src, u8 *dst, unsigned int len,
-// u8 tweak[AES_BLOCK_SIZE]);
+// void (*xts_crypt_func)(const struct crypto_aes_ctx *key,
+// const u8 *src, u8 *dst, int len,
+// u8 tweak[AES_BLOCK_SIZE]);
//
// |key| is the data key. |tweak| contains the next tweak; the encryption of
// the original IV with the tweak key was already done. This function supports
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index fbf43482e1f5..11e95fc62636 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -505,7 +505,7 @@ static int xts_setkey_aesni(struct crypto_skcipher *tfm, const u8 *key,
typedef void (*xts_encrypt_iv_func)(const struct crypto_aes_ctx *tweak_key,
u8 iv[AES_BLOCK_SIZE]);
typedef void (*xts_crypt_func)(const struct crypto_aes_ctx *key,
- const u8 *src, u8 *dst, unsigned int len,
+ const u8 *src, u8 *dst, int len,
u8 tweak[AES_BLOCK_SIZE]);
/* This handles cases where the source and/or destination span pages. */
@@ -624,14 +624,14 @@ static void aesni_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
}
static void aesni_xts_encrypt(const struct crypto_aes_ctx *key,
- const u8 *src, u8 *dst, unsigned int len,
+ const u8 *src, u8 *dst, int len,
u8 tweak[AES_BLOCK_SIZE])
{
aesni_xts_enc(key, dst, src, len, tweak);
}
static void aesni_xts_decrypt(const struct crypto_aes_ctx *key,
- const u8 *src, u8 *dst, unsigned int len,
+ const u8 *src, u8 *dst, int len,
u8 tweak[AES_BLOCK_SIZE])
{
aesni_xts_dec(key, dst, src, len, tweak);
@@ -790,10 +790,10 @@ asmlinkage void aes_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
\
asmlinkage void \
aes_xts_encrypt_##suffix(const struct crypto_aes_ctx *key, const u8 *src, \
- u8 *dst, unsigned int len, u8 tweak[AES_BLOCK_SIZE]); \
+ u8 *dst, int len, u8 tweak[AES_BLOCK_SIZE]); \
asmlinkage void \
aes_xts_decrypt_##suffix(const struct crypto_aes_ctx *key, const u8 *src, \
- u8 *dst, unsigned int len, u8 tweak[AES_BLOCK_SIZE]); \
+ u8 *dst, int len, u8 tweak[AES_BLOCK_SIZE]); \
\
static int xts_encrypt_##suffix(struct skcipher_request *req) \
{ \
diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
index 552f2df0643f..26c5f2ee5d10 100644
--- a/arch/x86/crypto/blowfish_glue.c
+++ b/arch/x86/crypto/blowfish_glue.c
@@ -94,7 +94,6 @@ static struct crypto_alg bf_cipher_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
- .cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index f110708c8038..3bd37d664121 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -1313,7 +1313,6 @@ static struct crypto_alg camellia_cipher_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
index abb8b1fe123b..e88439d3828e 100644
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -291,7 +291,6 @@ static struct crypto_alg des3_ede_cipher = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
- .cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
index 0614beece279..4c67184dc573 100644
--- a/arch/x86/crypto/twofish_glue.c
+++ b/arch/x86/crypto/twofish_glue.c
@@ -68,7 +68,6 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/crypto/Kconfig b/crypto/Kconfig
index b459e8a23acc..74ae5f52b784 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -684,14 +684,6 @@ config CRYPTO_HCTR2
See https://eprint.iacr.org/2021/1441
-config CRYPTO_KEYWRAP
- tristate "KW (AES Key Wrap)"
- select CRYPTO_SKCIPHER
- select CRYPTO_MANAGER
- help
- KW (AES Key Wrap) authenticated encryption mode (NIST SP800-38F
- and RFC3394) without padding.
-
config CRYPTO_LRW
tristate "LRW (Liskov Rivest Wagner)"
select CRYPTO_LIB_GF128MUL
@@ -1029,16 +1021,6 @@ config CRYPTO_STREEBOG
https://tc26.ru/upload/iblock/fed/feddbb4d26b685903faa2ba11aea43f6.pdf
https://tools.ietf.org/html/rfc6986
-config CRYPTO_VMAC
- tristate "VMAC"
- select CRYPTO_HASH
- select CRYPTO_MANAGER
- help
- VMAC is a message authentication algorithm designed for
- very high speed on 64-bit architectures.
-
- See https://fastcrypto.org/vmac for further information.
-
config CRYPTO_WP512
tristate "Whirlpool"
select CRYPTO_HASH
diff --git a/crypto/Makefile b/crypto/Makefile
index ffd94c7f2643..f67e853c4690 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -69,7 +69,6 @@ obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
-obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
@@ -95,7 +94,6 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
obj-$(CONFIG_CRYPTO_XCTR) += xctr.o
obj-$(CONFIG_CRYPTO_HCTR2) += hctr2.o
-obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o
obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o
obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o
obj-$(CONFIG_CRYPTO_GCM) += gcm.o
diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c
index 4fdb53435827..6cbff298722b 100644
--- a/crypto/aegis128-core.c
+++ b/crypto/aegis128-core.c
@@ -516,7 +516,6 @@ static struct aead_alg crypto_aegis128_alg_generic = {
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aegis_ctx),
- .base.cra_alignmask = 0,
.base.cra_priority = 100,
.base.cra_name = "aegis128",
.base.cra_driver_name = "aegis128-generic",
@@ -535,7 +534,6 @@ static struct aead_alg crypto_aegis128_alg_simd = {
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aegis_ctx),
- .base.cra_alignmask = 0,
.base.cra_priority = 200,
.base.cra_name = "aegis128",
.base.cra_driver_name = "aegis128-simd",
diff --git a/crypto/ahash.c b/crypto/ahash.c
index bcd9de009a91..b08b89ec26ec 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -27,6 +27,93 @@
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
+struct crypto_hash_walk {
+ char *data;
+
+ unsigned int offset;
+ unsigned int flags;
+
+ struct page *pg;
+ unsigned int entrylen;
+
+ unsigned int total;
+ struct scatterlist *sg;
+};
+
+static int hash_walk_next(struct crypto_hash_walk *walk)
+{
+ unsigned int offset = walk->offset;
+ unsigned int nbytes = min(walk->entrylen,
+ ((unsigned int)(PAGE_SIZE)) - offset);
+
+ walk->data = kmap_local_page(walk->pg);
+ walk->data += offset;
+ walk->entrylen -= nbytes;
+ return nbytes;
+}
+
+static int hash_walk_new_entry(struct crypto_hash_walk *walk)
+{
+ struct scatterlist *sg;
+
+ sg = walk->sg;
+ walk->offset = sg->offset;
+ walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+ walk->offset = offset_in_page(walk->offset);
+ walk->entrylen = sg->length;
+
+ if (walk->entrylen > walk->total)
+ walk->entrylen = walk->total;
+ walk->total -= walk->entrylen;
+
+ return hash_walk_next(walk);
+}
+
+static int crypto_hash_walk_first(struct ahash_request *req,
+ struct crypto_hash_walk *walk)
+{
+ walk->total = req->nbytes;
+
+ if (!walk->total) {
+ walk->entrylen = 0;
+ return 0;
+ }
+
+ walk->sg = req->src;
+ walk->flags = req->base.flags;
+
+ return hash_walk_new_entry(walk);
+}
+
+static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
+{
+ walk->data -= walk->offset;
+
+ kunmap_local(walk->data);
+ crypto_yield(walk->flags);
+
+ if (err)
+ return err;
+
+ if (walk->entrylen) {
+ walk->offset = 0;
+ walk->pg++;
+ return hash_walk_next(walk);
+ }
+
+ if (!walk->total)
+ return 0;
+
+ walk->sg = sg_next(walk->sg);
+
+ return hash_walk_new_entry(walk);
+}
+
+static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
+{
+ return !(walk->entrylen | walk->total);
+}
+
/*
* For an ahash tfm that is using an shash algorithm (instead of an ahash
* algorithm), this returns the underlying shash tfm.
@@ -137,77 +224,6 @@ static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
return 0;
}
-static int hash_walk_next(struct crypto_hash_walk *walk)
-{
- unsigned int offset = walk->offset;
- unsigned int nbytes = min(walk->entrylen,
- ((unsigned int)(PAGE_SIZE)) - offset);
-
- walk->data = kmap_local_page(walk->pg);
- walk->data += offset;
- walk->entrylen -= nbytes;
- return nbytes;
-}
-
-static int hash_walk_new_entry(struct crypto_hash_walk *walk)
-{
- struct scatterlist *sg;
-
- sg = walk->sg;
- walk->offset = sg->offset;
- walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
- walk->offset = offset_in_page(walk->offset);
- walk->entrylen = sg->length;
-
- if (walk->entrylen > walk->total)
- walk->entrylen = walk->total;
- walk->total -= walk->entrylen;
-
- return hash_walk_next(walk);
-}
-
-int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
-{
- walk->data -= walk->offset;
-
- kunmap_local(walk->data);
- crypto_yield(walk->flags);
-
- if (err)
- return err;
-
- if (walk->entrylen) {
- walk->offset = 0;
- walk->pg++;
- return hash_walk_next(walk);
- }
-
- if (!walk->total)
- return 0;
-
- walk->sg = sg_next(walk->sg);
-
- return hash_walk_new_entry(walk);
-}
-EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
-
-int crypto_hash_walk_first(struct ahash_request *req,
- struct crypto_hash_walk *walk)
-{
- walk->total = req->nbytes;
-
- if (!walk->total) {
- walk->entrylen = 0;
- return 0;
- }
-
- walk->sg = req->src;
- walk->flags = req->base.flags;
-
- return hash_walk_new_entry(walk);
-}
-EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
-
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 16f7c7a9d8ab..5318c214debb 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -407,6 +407,7 @@ EXPORT_SYMBOL_GPL(crypto_remove_final);
int crypto_register_alg(struct crypto_alg *alg)
{
struct crypto_larval *larval;
+ bool test_started = false;
LIST_HEAD(algs_to_put);
int err;
@@ -418,17 +419,19 @@ int crypto_register_alg(struct crypto_alg *alg)
down_write(&crypto_alg_sem);
larval = __crypto_register_alg(alg, &algs_to_put);
if (!IS_ERR_OR_NULL(larval)) {
- bool test_started = crypto_boot_test_finished();
-
+ test_started = crypto_boot_test_finished();
larval->test_started = test_started;
- if (test_started)
- crypto_schedule_test(larval);
}
up_write(&crypto_alg_sem);
if (IS_ERR(larval))
return PTR_ERR(larval);
- crypto_remove_final(&algs_to_put);
+
+ if (test_started)
+ crypto_schedule_test(larval);
+ else
+ crypto_remove_final(&algs_to_put);
+
return 0;
}
EXPORT_SYMBOL_GPL(crypto_register_alg);
@@ -642,10 +645,8 @@ int crypto_register_instance(struct crypto_template *tmpl,
larval = __crypto_register_alg(&inst->alg, &algs_to_put);
if (IS_ERR(larval))
goto unlock;
- else if (larval) {
+ else if (larval)
larval->test_started = true;
- crypto_schedule_test(larval);
- }
hlist_add_head(&inst->list, &tmpl->instances);
inst->tmpl = tmpl;
@@ -655,7 +656,12 @@ unlock:
if (IS_ERR(larval))
return PTR_ERR(larval);
- crypto_remove_final(&algs_to_put);
+
+ if (larval)
+ crypto_schedule_test(larval);
+ else
+ crypto_remove_final(&algs_to_put);
+
return 0;
}
EXPORT_SYMBOL_GPL(crypto_register_instance);
@@ -1016,6 +1022,8 @@ static void __init crypto_start_tests(void)
if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
return;
+ set_crypto_boot_test_finished();
+
for (;;) {
struct crypto_larval *larval = NULL;
struct crypto_alg *q;
@@ -1038,7 +1046,6 @@ static void __init crypto_start_tests(void)
l->test_started = true;
larval = l;
- crypto_schedule_test(larval);
break;
}
@@ -1046,9 +1053,9 @@ static void __init crypto_start_tests(void)
if (!larval)
break;
- }
- set_crypto_boot_test_finished();
+ crypto_schedule_test(larval);
+ }
}
static int __init crypto_algapi_init(void)
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 9f0cf61bbc6e..886e7c913688 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -33,7 +33,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#define ANUBIS_MIN_KEY_SIZE 16
@@ -463,7 +463,6 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *key = (const __be32 *)in_key;
int N, R, i, r;
u32 kappa[ANUBIS_MAX_N];
u32 inter[ANUBIS_MAX_N];
@@ -482,7 +481,7 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
/* * map cipher key to initial key state (mu): */
for (i = 0; i < N; i++)
- kappa[i] = be32_to_cpu(key[i]);
+ kappa[i] = get_unaligned_be32(&in_key[4 * i]);
/*
* generate R + 1 round keys:
@@ -570,10 +569,8 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
}
static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
- u8 *ciphertext, const u8 *plaintext, const int R)
+ u8 *dst, const u8 *src, const int R)
{
- const __be32 *src = (const __be32 *)plaintext;
- __be32 *dst = (__be32 *)ciphertext;
int i, r;
u32 state[4];
u32 inter[4];
@@ -583,7 +580,7 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
* and add initial round key (sigma[K^0]):
*/
for (i = 0; i < 4; i++)
- state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i];
+ state[i] = get_unaligned_be32(&src[4 * i]) ^ roundKey[0][i];
/*
* R - 1 full rounds:
@@ -654,7 +651,7 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
*/
for (i = 0; i < 4; i++)
- dst[i] = cpu_to_be32(inter[i]);
+ put_unaligned_be32(inter[i], &dst[4 * i]);
}
static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@@ -675,7 +672,6 @@ static struct crypto_alg anubis_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ANUBIS_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct anubis_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = ANUBIS_MIN_KEY_SIZE,
diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c
index d96dfc4fdde6..bd359d3313c2 100644
--- a/crypto/aria_generic.c
+++ b/crypto/aria_generic.c
@@ -15,6 +15,7 @@
*/
#include <crypto/aria.h>
+#include <linux/unaligned.h>
static const u32 key_rc[20] = {
0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0,
@@ -27,7 +28,6 @@ static const u32 key_rc[20] = {
static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
- const __be32 *key = (const __be32 *)in_key;
u32 w0[4], w1[4], w2[4], w3[4];
u32 reg0, reg1, reg2, reg3;
const u32 *ck;
@@ -35,10 +35,10 @@ static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
ck = &key_rc[(key_len - 16) / 2];
- w0[0] = be32_to_cpu(key[0]);
- w0[1] = be32_to_cpu(key[1]);
- w0[2] = be32_to_cpu(key[2]);
- w0[3] = be32_to_cpu(key[3]);
+ w0[0] = get_unaligned_be32(&in_key[0]);
+ w0[1] = get_unaligned_be32(&in_key[4]);
+ w0[2] = get_unaligned_be32(&in_key[8]);
+ w0[3] = get_unaligned_be32(&in_key[12]);
reg0 = w0[0] ^ ck[0];
reg1 = w0[1] ^ ck[1];
@@ -48,11 +48,11 @@ static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
aria_subst_diff_odd(&reg0, &reg1, &reg2, &reg3);
if (key_len > 16) {
- w1[0] = be32_to_cpu(key[4]);
- w1[1] = be32_to_cpu(key[5]);
+ w1[0] = get_unaligned_be32(&in_key[16]);
+ w1[1] = get_unaligned_be32(&in_key[20]);
if (key_len > 24) {
- w1[2] = be32_to_cpu(key[6]);
- w1[3] = be32_to_cpu(key[7]);
+ w1[2] = get_unaligned_be32(&in_key[24]);
+ w1[3] = get_unaligned_be32(&in_key[28]);
} else {
w1[2] = 0;
w1[3] = 0;
@@ -195,17 +195,15 @@ EXPORT_SYMBOL_GPL(aria_set_key);
static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
u32 key[][ARIA_RD_KEY_WORDS])
{
- const __be32 *src = (const __be32 *)in;
- __be32 *dst = (__be32 *)out;
u32 reg0, reg1, reg2, reg3;
int rounds, rkidx = 0;
rounds = ctx->rounds;
- reg0 = be32_to_cpu(src[0]);
- reg1 = be32_to_cpu(src[1]);
- reg2 = be32_to_cpu(src[2]);
- reg3 = be32_to_cpu(src[3]);
+ reg0 = get_unaligned_be32(&in[0]);
+ reg1 = get_unaligned_be32(&in[4]);
+ reg2 = get_unaligned_be32(&in[8]);
+ reg3 = get_unaligned_be32(&in[12]);
aria_add_round_key(key[rkidx], &reg0, &reg1, &reg2, &reg3);
rkidx++;
@@ -241,10 +239,10 @@ static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
(u8)(s1[get_u8(reg3, 2)]),
(u8)(s2[get_u8(reg3, 3)]));
- dst[0] = cpu_to_be32(reg0);
- dst[1] = cpu_to_be32(reg1);
- dst[2] = cpu_to_be32(reg2);
- dst[3] = cpu_to_be32(reg3);
+ put_unaligned_be32(reg0, &out[0]);
+ put_unaligned_be32(reg1, &out[4]);
+ put_unaligned_be32(reg2, &out[8]);
+ put_unaligned_be32(reg3, &out[12]);
}
void aria_encrypt(void *_ctx, u8 *out, const u8 *in)
@@ -284,7 +282,6 @@ static struct crypto_alg aria_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ARIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aria_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index 43af5fa510c0..ba2d9d1ea235 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -18,16 +18,6 @@
#include "asymmetric_keys.h"
-const char *const key_being_used_for[NR__KEY_BEING_USED_FOR] = {
- [VERIFYING_MODULE_SIGNATURE] = "mod sig",
- [VERIFYING_FIRMWARE_SIGNATURE] = "firmware sig",
- [VERIFYING_KEXEC_PE_SIGNATURE] = "kexec PE sig",
- [VERIFYING_KEY_SIGNATURE] = "key sig",
- [VERIFYING_KEY_SELF_SIGNATURE] = "key self sig",
- [VERIFYING_UNSPECIFIED_SIGNATURE] = "unspec sig",
-};
-EXPORT_SYMBOL_GPL(key_being_used_for);
-
static LIST_HEAD(asymmetric_key_parsers);
static DECLARE_RWSEM(asymmetric_key_parsers_sem);
diff --git a/crypto/fips.c b/crypto/fips.c
index 8a784018ebfc..a58e7750f532 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/sysctl.h>
#include <linux/notifier.h>
+#include <linux/string_choices.h>
#include <generated/utsrelease.h>
int fips_enabled;
@@ -24,8 +25,7 @@ EXPORT_SYMBOL_GPL(fips_fail_notif_chain);
static int fips_enable(char *str)
{
fips_enabled = !!simple_strtol(str, NULL, 0);
- printk(KERN_INFO "fips mode: %s\n",
- fips_enabled ? "enabled" : "disabled");
+ pr_info("fips mode: %s\n", str_enabled_disabled(fips_enabled));
return 1;
}
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
deleted file mode 100644
index 385ffdfd5a9b..000000000000
--- a/crypto/keywrap.c
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- * Key Wrapping: RFC3394 / NIST SP800-38F
- *
- * Copyright (C) 2015, Stephan Mueller <smueller@chronox.de>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, and the entire permission notice in its entirety,
- * including the disclaimer of warranties.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote
- * products derived from this software without specific prior
- * written permission.
- *
- * ALTERNATIVELY, this product may be distributed under the terms of
- * the GNU General Public License, in which case the provisions of the GPL2
- * are required INSTEAD OF the above restrictions. (This clause is
- * necessary due to a potential bad interaction between the GPL and
- * the restrictions contained in a BSD-style copyright.)
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
- * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- */
-
-/*
- * Note for using key wrapping:
- *
- * * The result of the encryption operation is the ciphertext starting
- * with the 2nd semiblock. The first semiblock is provided as the IV.
- * The IV used to start the encryption operation is the default IV.
- *
- * * The input for the decryption is the first semiblock handed in as an
- * IV. The ciphertext is the data starting with the 2nd semiblock. The
- * return code of the decryption operation will be EBADMSG in case an
- * integrity error occurs.
- *
- * To obtain the full result of an encryption as expected by SP800-38F, the
- * caller must allocate a buffer of plaintext + 8 bytes:
- *
- * unsigned int datalen = ptlen + crypto_skcipher_ivsize(tfm);
- * u8 data[datalen];
- * u8 *iv = data;
- * u8 *pt = data + crypto_skcipher_ivsize(tfm);
- * <ensure that pt contains the plaintext of size ptlen>
- * sg_init_one(&sg, pt, ptlen);
- * skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
- *
- * ==> After encryption, data now contains full KW result as per SP800-38F.
- *
- * In case of decryption, ciphertext now already has the expected length
- * and must be segmented appropriately:
- *
- * unsigned int datalen = CTLEN;
- * u8 data[datalen];
- * <ensure that data contains full ciphertext>
- * u8 *iv = data;
- * u8 *ct = data + crypto_skcipher_ivsize(tfm);
- * unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm);
- * sg_init_one(&sg, ct, ctlen);
- * skcipher_request_set_crypt(req, &sg, &sg, ctlen, iv);
- *
- * ==> After decryption (which hopefully does not return EBADMSG), the ct
- * pointer now points to the plaintext of size ctlen.
- *
- * Note 2: KWP is not implemented as this would defy in-place operation.
- * If somebody wants to wrap non-aligned data, he should simply pad
- * the input with zeros to fill it up to the 8 byte boundary.
- */
-
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/scatterlist.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/cipher.h>
-#include <crypto/internal/skcipher.h>
-
-struct crypto_kw_block {
-#define SEMIBSIZE 8
- __be64 A;
- __be64 R;
-};
-
-/*
- * Fast forward the SGL to the "end" length minus SEMIBSIZE.
- * The start in the SGL defined by the fast-forward is returned with
- * the walk variable
- */
-static void crypto_kw_scatterlist_ff(struct scatter_walk *walk,
- struct scatterlist *sg,
- unsigned int end)
-{
- unsigned int skip = 0;
-
- /* The caller should only operate on full SEMIBLOCKs. */
- BUG_ON(end < SEMIBSIZE);
-
- skip = end - SEMIBSIZE;
- while (sg) {
- if (sg->length > skip) {
- scatterwalk_start(walk, sg);
- scatterwalk_advance(walk, skip);
- break;
- }
-
- skip -= sg->length;
- sg = sg_next(sg);
- }
-}
-
-static int crypto_kw_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
- struct crypto_kw_block block;
- struct scatterlist *src, *dst;
- u64 t = 6 * ((req->cryptlen) >> 3);
- unsigned int i;
- int ret = 0;
-
- /*
- * Require at least 2 semiblocks (note, the 3rd semiblock that is
- * required by SP800-38F is the IV.
- */
- if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
- return -EINVAL;
-
- /* Place the IV into block A */
- memcpy(&block.A, req->iv, SEMIBSIZE);
-
- /*
- * src scatterlist is read-only. dst scatterlist is r/w. During the
- * first loop, src points to req->src and dst to req->dst. For any
- * subsequent round, the code operates on req->dst only.
- */
- src = req->src;
- dst = req->dst;
-
- for (i = 0; i < 6; i++) {
- struct scatter_walk src_walk, dst_walk;
- unsigned int nbytes = req->cryptlen;
-
- while (nbytes) {
- /* move pointer by nbytes in the SGL */
- crypto_kw_scatterlist_ff(&src_walk, src, nbytes);
- /* get the source block */
- scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
- false);
-
- /* perform KW operation: modify IV with counter */
- block.A ^= cpu_to_be64(t);
- t--;
- /* perform KW operation: decrypt block */
- crypto_cipher_decrypt_one(cipher, (u8 *)&block,
- (u8 *)&block);
-
- /* move pointer by nbytes in the SGL */
- crypto_kw_scatterlist_ff(&dst_walk, dst, nbytes);
- /* Copy block->R into place */
- scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
- true);
-
- nbytes -= SEMIBSIZE;
- }
-
- /* we now start to operate on the dst SGL only */
- src = req->dst;
- dst = req->dst;
- }
-
- /* Perform authentication check */
- if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL))
- ret = -EBADMSG;
-
- memzero_explicit(&block, sizeof(struct crypto_kw_block));
-
- return ret;
-}
-
-static int crypto_kw_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
- struct crypto_kw_block block;
- struct scatterlist *src, *dst;
- u64 t = 1;
- unsigned int i;
-
- /*
- * Require at least 2 semiblocks (note, the 3rd semiblock that is
- * required by SP800-38F is the IV that occupies the first semiblock.
- * This means that the dst memory must be one semiblock larger than src.
- * Also ensure that the given data is aligned to semiblock.
- */
- if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
- return -EINVAL;
-
- /*
- * Place the predefined IV into block A -- for encrypt, the caller
- * does not need to provide an IV, but he needs to fetch the final IV.
- */
- block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL);
-
- /*
- * src scatterlist is read-only. dst scatterlist is r/w. During the
- * first loop, src points to req->src and dst to req->dst. For any
- * subsequent round, the code operates on req->dst only.
- */
- src = req->src;
- dst = req->dst;
-
- for (i = 0; i < 6; i++) {
- struct scatter_walk src_walk, dst_walk;
- unsigned int nbytes = req->cryptlen;
-
- scatterwalk_start(&src_walk, src);
- scatterwalk_start(&dst_walk, dst);
-
- while (nbytes) {
- /* get the source block */
- scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
- false);
-
- /* perform KW operation: encrypt block */
- crypto_cipher_encrypt_one(cipher, (u8 *)&block,
- (u8 *)&block);
- /* perform KW operation: modify IV with counter */
- block.A ^= cpu_to_be64(t);
- t++;
-
- /* Copy block->R into place */
- scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
- true);
-
- nbytes -= SEMIBSIZE;
- }
-
- /* we now start to operate on the dst SGL only */
- src = req->dst;
- dst = req->dst;
- }
-
- /* establish the IV for the caller to pick up */
- memcpy(req->iv, &block.A, SEMIBSIZE);
-
- memzero_explicit(&block, sizeof(struct crypto_kw_block));
-
- return 0;
-}
-
-static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct skcipher_instance *inst;
- struct crypto_alg *alg;
- int err;
-
- inst = skcipher_alloc_instance_simple(tmpl, tb);
- if (IS_ERR(inst))
- return PTR_ERR(inst);
-
- alg = skcipher_ialg_simple(inst);
-
- err = -EINVAL;
- /* Section 5.1 requirement for KW */
- if (alg->cra_blocksize != sizeof(struct crypto_kw_block))
- goto out_free_inst;
-
- inst->alg.base.cra_blocksize = SEMIBSIZE;
- inst->alg.base.cra_alignmask = 0;
- inst->alg.ivsize = SEMIBSIZE;
-
- inst->alg.encrypt = crypto_kw_encrypt;
- inst->alg.decrypt = crypto_kw_decrypt;
-
- err = skcipher_register_instance(tmpl, inst);
- if (err) {
-out_free_inst:
- inst->free(inst);
- }
-
- return err;
-}
-
-static struct crypto_template crypto_kw_tmpl = {
- .name = "kw",
- .create = crypto_kw_create,
- .module = THIS_MODULE,
-};
-
-static int __init crypto_kw_init(void)
-{
- return crypto_register_template(&crypto_kw_tmpl);
-}
-
-static void __exit crypto_kw_exit(void)
-{
- crypto_unregister_template(&crypto_kw_tmpl);
-}
-
-subsys_initcall(crypto_kw_init);
-module_exit(crypto_kw_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
-MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)");
-MODULE_ALIAS_CRYPTO("kw");
-MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 70cafe73f974..7ad338ca2c18 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -23,7 +23,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#define KHAZAD_KEY_SIZE 16
@@ -757,14 +757,12 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *key = (const __be32 *)in_key;
int r;
const u64 *S = T7;
u64 K2, K1;
- /* key is supposed to be 32-bit aligned */
- K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]);
- K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]);
+ K2 = get_unaligned_be64(&in_key[0]);
+ K1 = get_unaligned_be64(&in_key[8]);
/* setup the encrypt key */
for (r = 0; r <= KHAZAD_ROUNDS; r++) {
@@ -800,14 +798,12 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
}
static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
- u8 *ciphertext, const u8 *plaintext)
+ u8 *dst, const u8 *src)
{
- const __be64 *src = (const __be64 *)plaintext;
- __be64 *dst = (__be64 *)ciphertext;
int r;
u64 state;
- state = be64_to_cpu(*src) ^ roundKey[0];
+ state = get_unaligned_be64(src) ^ roundKey[0];
for (r = 1; r < KHAZAD_ROUNDS; r++) {
state = T0[(int)(state >> 56) ] ^
@@ -831,7 +827,7 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
(T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^
roundKey[KHAZAD_ROUNDS];
- *dst = cpu_to_be64(state);
+ put_unaligned_be64(state, dst);
}
static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@@ -852,7 +848,6 @@ static struct crypto_alg khazad_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = KHAZAD_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct khazad_ctx),
- .cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = KHAZAD_KEY_SIZE,
diff --git a/crypto/proc.c b/crypto/proc.c
index 56c7c78df297..522b27d90d29 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -47,13 +47,10 @@ static int c_show(struct seq_file *m, void *p)
(alg->cra_flags & CRYPTO_ALG_TESTED) ?
"passed" : "unknown");
seq_printf(m, "internal : %s\n",
- (alg->cra_flags & CRYPTO_ALG_INTERNAL) ?
- "yes" : "no");
- if (fips_enabled) {
+ str_yes_no(alg->cra_flags & CRYPTO_ALG_INTERNAL));
+ if (fips_enabled)
seq_printf(m, "fips : %s\n",
- (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL) ?
- "no" : "yes");
- }
+ str_no_yes(alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL));
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
seq_printf(m, "type : larval\n");
diff --git a/crypto/seed.c b/crypto/seed.c
index d0506ade2a5f..d05d8ed909fa 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <asm/byteorder.h>
+#include <linux/unaligned.h>
#define SEED_NUM_KCONSTANTS 16
#define SEED_KEY_SIZE 16
@@ -329,13 +329,12 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *keyout = ctx->keysched;
- const __be32 *key = (const __be32 *)in_key;
u32 i, t0, t1, x1, x2, x3, x4;
- x1 = be32_to_cpu(key[0]);
- x2 = be32_to_cpu(key[1]);
- x3 = be32_to_cpu(key[2]);
- x4 = be32_to_cpu(key[3]);
+ x1 = get_unaligned_be32(&in_key[0]);
+ x2 = get_unaligned_be32(&in_key[4]);
+ x3 = get_unaligned_be32(&in_key[8]);
+ x4 = get_unaligned_be32(&in_key[12]);
for (i = 0; i < SEED_NUM_KCONSTANTS; i++) {
t0 = x1 + x3 - KC[i];
@@ -364,15 +363,13 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key,
static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *src = (const __be32 *)in;
- __be32 *dst = (__be32 *)out;
u32 x1, x2, x3, x4, t0, t1;
const u32 *ks = ctx->keysched;
- x1 = be32_to_cpu(src[0]);
- x2 = be32_to_cpu(src[1]);
- x3 = be32_to_cpu(src[2]);
- x4 = be32_to_cpu(src[3]);
+ x1 = get_unaligned_be32(&in[0]);
+ x2 = get_unaligned_be32(&in[4]);
+ x3 = get_unaligned_be32(&in[8]);
+ x4 = get_unaligned_be32(&in[12]);
OP(x1, x2, x3, x4, 0);
OP(x3, x4, x1, x2, 2);
@@ -391,10 +388,10 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
OP(x1, x2, x3, x4, 28);
OP(x3, x4, x1, x2, 30);
- dst[0] = cpu_to_be32(x3);
- dst[1] = cpu_to_be32(x4);
- dst[2] = cpu_to_be32(x1);
- dst[3] = cpu_to_be32(x2);
+ put_unaligned_be32(x3, &out[0]);
+ put_unaligned_be32(x4, &out[4]);
+ put_unaligned_be32(x1, &out[8]);
+ put_unaligned_be32(x2, &out[12]);
}
/* decrypt a block of text */
@@ -402,15 +399,13 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *src = (const __be32 *)in;
- __be32 *dst = (__be32 *)out;
u32 x1, x2, x3, x4, t0, t1;
const u32 *ks = ctx->keysched;
- x1 = be32_to_cpu(src[0]);
- x2 = be32_to_cpu(src[1]);
- x3 = be32_to_cpu(src[2]);
- x4 = be32_to_cpu(src[3]);
+ x1 = get_unaligned_be32(&in[0]);
+ x2 = get_unaligned_be32(&in[4]);
+ x3 = get_unaligned_be32(&in[8]);
+ x4 = get_unaligned_be32(&in[12]);
OP(x1, x2, x3, x4, 30);
OP(x3, x4, x1, x2, 28);
@@ -429,10 +424,10 @@ static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
OP(x1, x2, x3, x4, 2);
OP(x3, x4, x1, x2, 0);
- dst[0] = cpu_to_be32(x3);
- dst[1] = cpu_to_be32(x4);
- dst[2] = cpu_to_be32(x1);
- dst[3] = cpu_to_be32(x2);
+ put_unaligned_be32(x3, &out[0]);
+ put_unaligned_be32(x4, &out[4]);
+ put_unaligned_be32(x1, &out[8]);
+ put_unaligned_be32(x2, &out[12]);
}
@@ -443,7 +438,6 @@ static struct crypto_alg seed_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SEED_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct seed_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/crypto/sig.c b/crypto/sig.c
index 5e1f1f739da2..dfc7cae90802 100644
--- a/crypto/sig.c
+++ b/crypto/sig.c
@@ -15,8 +15,6 @@
#include "internal.h"
-#define CRYPTO_ALG_TYPE_SIG_MASK 0x0000000e
-
static void crypto_sig_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_sig *sig = __crypto_sig_tfm(tfm);
@@ -73,7 +71,7 @@ static const struct crypto_type crypto_sig_type = {
.report = crypto_sig_report,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
- .maskset = CRYPTO_ALG_TYPE_SIG_MASK,
+ .maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SIG,
.tfmsize = offsetof(struct crypto_sig, base),
};
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index f74e4d0d87a2..a9eb2dcf2898 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -17,7 +17,6 @@
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
@@ -29,19 +28,10 @@
#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
enum {
- SKCIPHER_WALK_PHYS = 1 << 0,
- SKCIPHER_WALK_SLOW = 1 << 1,
- SKCIPHER_WALK_COPY = 1 << 2,
- SKCIPHER_WALK_DIFF = 1 << 3,
- SKCIPHER_WALK_SLEEP = 1 << 4,
-};
-
-struct skcipher_walk_buffer {
- struct list_head entry;
- struct scatter_walk dst;
- unsigned int len;
- u8 *data;
- u8 buffer[];
+ SKCIPHER_WALK_SLOW = 1 << 0,
+ SKCIPHER_WALK_COPY = 1 << 1,
+ SKCIPHER_WALK_DIFF = 1 << 2,
+ SKCIPHER_WALK_SLEEP = 1 << 3,
};
static const struct crypto_type crypto_skcipher_type;
@@ -73,16 +63,6 @@ static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
}
-/* Get a spot of the specified length that does not straddle a page.
- * The caller needs to ensure that there is enough space for this operation.
- */
-static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
-{
- u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
-
- return max(start, end_page);
-}
-
static inline struct skcipher_alg *__crypto_skcipher_alg(
struct crypto_alg *alg)
{
@@ -91,30 +71,44 @@ static inline struct skcipher_alg *__crypto_skcipher_alg(
static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
{
- u8 *addr;
+ u8 *addr = PTR_ALIGN(walk->buffer, walk->alignmask + 1);
- addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
- addr = skcipher_get_spot(addr, bsize);
- scatterwalk_copychunks(addr, &walk->out, bsize,
- (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
+ scatterwalk_copychunks(addr, &walk->out, bsize, 1);
return 0;
}
-int skcipher_walk_done(struct skcipher_walk *walk, int err)
+/**
+ * skcipher_walk_done() - finish one step of a skcipher_walk
+ * @walk: the skcipher_walk
+ * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
+ * or a -errno value to terminate the walk due to an error
+ *
+ * This function cleans up after one step of walking through the source and
+ * destination scatterlists, and advances to the next step if applicable.
+ * walk->nbytes is set to the number of bytes available in the next step,
+ * walk->total is set to the new total number of bytes remaining, and
+ * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
+ * is no more data, or if an error occurred (i.e. -errno return), then
+ * walk->nbytes and walk->total are set to 0 and all resources owned by the
+ * skcipher_walk are freed.
+ *
+ * Return: 0 or a -errno value. If @res was a -errno value then it will be
+ * returned, but other errors may occur too.
+ */
+int skcipher_walk_done(struct skcipher_walk *walk, int res)
{
- unsigned int n = walk->nbytes;
- unsigned int nbytes = 0;
+ unsigned int n = walk->nbytes; /* num bytes processed this step */
+ unsigned int total = 0; /* new total remaining */
if (!n)
goto finish;
- if (likely(err >= 0)) {
- n -= err;
- nbytes = walk->total - n;
+ if (likely(res >= 0)) {
+ n -= res; /* subtract num bytes *not* processed */
+ total = walk->total - n;
}
- if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
- SKCIPHER_WALK_SLOW |
+ if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
SKCIPHER_WALK_COPY |
SKCIPHER_WALK_DIFF)))) {
unmap_src:
@@ -126,34 +120,36 @@ unmap_src:
skcipher_map_dst(walk);
memcpy(walk->dst.virt.addr, walk->page, n);
skcipher_unmap_dst(walk);
- } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
- if (err > 0) {
+ } else { /* SKCIPHER_WALK_SLOW */
+ if (res > 0) {
/*
* Didn't process all bytes. Either the algorithm is
* broken, or this was the last step and it turned out
* the message wasn't evenly divisible into blocks but
* the algorithm requires it.
*/
- err = -EINVAL;
- nbytes = 0;
+ res = -EINVAL;
+ total = 0;
} else
n = skcipher_done_slow(walk, n);
}
- if (err > 0)
- err = 0;
+ if (res > 0)
+ res = 0;
- walk->total = nbytes;
+ walk->total = total;
walk->nbytes = 0;
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
-
- if (nbytes) {
- crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
- CRYPTO_TFM_REQ_MAY_SLEEP : 0);
+ scatterwalk_done(&walk->in, 0, total);
+ scatterwalk_done(&walk->out, 1, total);
+
+ if (total) {
+ if (walk->flags & SKCIPHER_WALK_SLEEP)
+ cond_resched();
+ walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF);
return skcipher_walk_next(walk);
}
@@ -162,9 +158,6 @@ finish:
if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
goto out;
- if (walk->flags & SKCIPHER_WALK_PHYS)
- goto out;
-
if (walk->iv != walk->oiv)
memcpy(walk->oiv, walk->iv, walk->ivsize);
if (walk->buffer != walk->page)
@@ -173,104 +166,29 @@ finish:
free_page((unsigned long)walk->page);
out:
- return err;
+ return res;
}
EXPORT_SYMBOL_GPL(skcipher_walk_done);
-void skcipher_walk_complete(struct skcipher_walk *walk, int err)
-{
- struct skcipher_walk_buffer *p, *tmp;
-
- list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
- u8 *data;
-
- if (err)
- goto done;
-
- data = p->data;
- if (!data) {
- data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
- data = skcipher_get_spot(data, walk->stride);
- }
-
- scatterwalk_copychunks(data, &p->dst, p->len, 1);
-
- if (offset_in_page(p->data) + p->len + walk->stride >
- PAGE_SIZE)
- free_page((unsigned long)p->data);
-
-done:
- list_del(&p->entry);
- kfree(p);
- }
-
- if (!err && walk->iv != walk->oiv)
- memcpy(walk->oiv, walk->iv, walk->ivsize);
- if (walk->buffer != walk->page)
- kfree(walk->buffer);
- if (walk->page)
- free_page((unsigned long)walk->page);
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_complete);
-
-static void skcipher_queue_write(struct skcipher_walk *walk,
- struct skcipher_walk_buffer *p)
-{
- p->dst = walk->out;
- list_add_tail(&p->entry, &walk->buffers);
-}
-
static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
{
- bool phys = walk->flags & SKCIPHER_WALK_PHYS;
unsigned alignmask = walk->alignmask;
- struct skcipher_walk_buffer *p;
- unsigned a;
unsigned n;
u8 *buffer;
- void *v;
-
- if (!phys) {
- if (!walk->buffer)
- walk->buffer = walk->page;
- buffer = walk->buffer;
- if (buffer)
- goto ok;
- }
-
- /* Start with the minimum alignment of kmalloc. */
- a = crypto_tfm_ctx_alignment() - 1;
- n = bsize;
-
- if (phys) {
- /* Calculate the minimum alignment of p->buffer. */
- a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
- n += sizeof(*p);
- }
-
- /* Minimum size to align p->buffer by alignmask. */
- n += alignmask & ~a;
- /* Minimum size to ensure p->buffer does not straddle a page. */
- n += (bsize - 1) & ~(alignmask | a);
-
- v = kzalloc(n, skcipher_walk_gfp(walk));
- if (!v)
- return skcipher_walk_done(walk, -ENOMEM);
-
- if (phys) {
- p = v;
- p->len = bsize;
- skcipher_queue_write(walk, p);
- buffer = p->buffer;
- } else {
- walk->buffer = v;
- buffer = v;
+ if (!walk->buffer)
+ walk->buffer = walk->page;
+ buffer = walk->buffer;
+ if (!buffer) {
+ /* Min size for a buffer of bsize bytes aligned to alignmask */
+ n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ buffer = kzalloc(n, skcipher_walk_gfp(walk));
+ if (!buffer)
+ return skcipher_walk_done(walk, -ENOMEM);
+ walk->buffer = buffer;
}
-
-ok:
walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
- walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
walk->src.virt.addr = walk->dst.virt.addr;
scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
@@ -283,7 +201,6 @@ ok:
static int skcipher_next_copy(struct skcipher_walk *walk)
{
- struct skcipher_walk_buffer *p;
u8 *tmp = walk->page;
skcipher_map_src(walk);
@@ -292,24 +209,6 @@ static int skcipher_next_copy(struct skcipher_walk *walk)
walk->src.virt.addr = tmp;
walk->dst.virt.addr = tmp;
-
- if (!(walk->flags & SKCIPHER_WALK_PHYS))
- return 0;
-
- p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
- if (!p)
- return -ENOMEM;
-
- p->data = walk->page;
- p->len = walk->nbytes;
- skcipher_queue_write(walk, p);
-
- if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
- PAGE_SIZE)
- walk->page = NULL;
- else
- walk->page += walk->nbytes;
-
return 0;
}
@@ -317,16 +216,10 @@ static int skcipher_next_fast(struct skcipher_walk *walk)
{
unsigned long diff;
- walk->src.phys.page = scatterwalk_page(&walk->in);
- walk->src.phys.offset = offset_in_page(walk->in.offset);
- walk->dst.phys.page = scatterwalk_page(&walk->out);
- walk->dst.phys.offset = offset_in_page(walk->out.offset);
-
- if (walk->flags & SKCIPHER_WALK_PHYS)
- return 0;
-
- diff = walk->src.phys.offset - walk->dst.phys.offset;
- diff |= walk->src.virt.page - walk->dst.virt.page;
+ diff = offset_in_page(walk->in.offset) -
+ offset_in_page(walk->out.offset);
+ diff |= (u8 *)scatterwalk_page(&walk->in) -
+ (u8 *)scatterwalk_page(&walk->out);
skcipher_map_src(walk);
walk->dst.virt.addr = walk->src.virt.addr;
@@ -343,10 +236,6 @@ static int skcipher_walk_next(struct skcipher_walk *walk)
{
unsigned int bsize;
unsigned int n;
- int err;
-
- walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF);
n = walk->total;
bsize = min(walk->stride, max(n, walk->blocksize));
@@ -358,9 +247,9 @@ static int skcipher_walk_next(struct skcipher_walk *walk)
return skcipher_walk_done(walk, -EINVAL);
slow_path:
- err = skcipher_next_slow(walk, bsize);
- goto set_phys_lowmem;
+ return skcipher_next_slow(walk, bsize);
}
+ walk->nbytes = n;
if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
if (!walk->page) {
@@ -370,58 +259,30 @@ slow_path:
if (!walk->page)
goto slow_path;
}
-
- walk->nbytes = min_t(unsigned, n,
- PAGE_SIZE - offset_in_page(walk->page));
walk->flags |= SKCIPHER_WALK_COPY;
- err = skcipher_next_copy(walk);
- goto set_phys_lowmem;
+ return skcipher_next_copy(walk);
}
- walk->nbytes = n;
-
return skcipher_next_fast(walk);
-
-set_phys_lowmem:
- if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
- walk->src.phys.page = virt_to_page(walk->src.virt.addr);
- walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
- walk->src.phys.offset &= PAGE_SIZE - 1;
- walk->dst.phys.offset &= PAGE_SIZE - 1;
- }
- return err;
}
static int skcipher_copy_iv(struct skcipher_walk *walk)
{
- unsigned a = crypto_tfm_ctx_alignment() - 1;
unsigned alignmask = walk->alignmask;
unsigned ivsize = walk->ivsize;
- unsigned bs = walk->stride;
- unsigned aligned_bs;
+ unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
unsigned size;
u8 *iv;
- aligned_bs = ALIGN(bs, alignmask + 1);
-
- /* Minimum size to align buffer by alignmask. */
- size = alignmask & ~a;
-
- if (walk->flags & SKCIPHER_WALK_PHYS)
- size += ivsize;
- else {
- size += aligned_bs + ivsize;
-
- /* Minimum size to ensure buffer does not straddle a page. */
- size += (bs - 1) & ~(alignmask | a);
- }
+ /* Min size for a buffer of stride + ivsize, aligned to alignmask */
+ size = aligned_stride + ivsize +
+ (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
if (!walk->buffer)
return -ENOMEM;
- iv = PTR_ALIGN(walk->buffer, alignmask + 1);
- iv = skcipher_get_spot(iv, bs) + aligned_bs;
+ iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
walk->iv = memcpy(iv, walk->iv, walk->ivsize);
return 0;
@@ -444,16 +305,22 @@ static int skcipher_walk_first(struct skcipher_walk *walk)
return skcipher_walk_next(walk);
}
-static int skcipher_walk_skcipher(struct skcipher_walk *walk,
- struct skcipher_request *req)
+int skcipher_walk_virt(struct skcipher_walk *walk,
+ struct skcipher_request *req, bool atomic)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ const struct skcipher_alg *alg =
+ crypto_skcipher_alg(crypto_skcipher_reqtfm(req));
+
+ might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
walk->total = req->cryptlen;
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags = SKCIPHER_WALK_SLEEP;
+ else
+ walk->flags = 0;
if (unlikely(!walk->total))
return 0;
@@ -461,13 +328,14 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
- walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
- SKCIPHER_WALK_SLEEP : 0;
-
- walk->blocksize = crypto_skcipher_blocksize(tfm);
- walk->ivsize = crypto_skcipher_ivsize(tfm);
- walk->alignmask = crypto_skcipher_alignmask(tfm);
+ /*
+ * Accessing 'alg' directly generates better code than using the
+ * crypto_skcipher_blocksize() and similar helper functions here, as it
+ * prevents the algorithm pointer from being repeatedly reloaded.
+ */
+ walk->blocksize = alg->base.cra_blocksize;
+ walk->ivsize = alg->co.ivsize;
+ walk->alignmask = alg->base.cra_alignmask;
if (alg->co.base.cra_type != &crypto_skcipher_type)
walk->stride = alg->co.chunksize;
@@ -476,50 +344,24 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
return skcipher_walk_first(walk);
}
-
-int skcipher_walk_virt(struct skcipher_walk *walk,
- struct skcipher_request *req, bool atomic)
-{
- int err;
-
- might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
-
- walk->flags &= ~SKCIPHER_WALK_PHYS;
-
- err = skcipher_walk_skcipher(walk, req);
-
- walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
-
- return err;
-}
EXPORT_SYMBOL_GPL(skcipher_walk_virt);
-int skcipher_walk_async(struct skcipher_walk *walk,
- struct skcipher_request *req)
-{
- walk->flags |= SKCIPHER_WALK_PHYS;
-
- INIT_LIST_HEAD(&walk->buffers);
-
- return skcipher_walk_skcipher(walk, req);
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_async);
-
static int skcipher_walk_aead_common(struct skcipher_walk *walk,
struct aead_request *req, bool atomic)
{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- int err;
+ const struct aead_alg *alg = crypto_aead_alg(crypto_aead_reqtfm(req));
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags = SKCIPHER_WALK_SLEEP;
+ else
+ walk->flags = 0;
if (unlikely(!walk->total))
return 0;
- walk->flags &= ~SKCIPHER_WALK_PHYS;
-
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
@@ -529,22 +371,17 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
scatterwalk_done(&walk->in, 0, walk->total);
scatterwalk_done(&walk->out, 0, walk->total);
- if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
- walk->flags |= SKCIPHER_WALK_SLEEP;
- else
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
-
- walk->blocksize = crypto_aead_blocksize(tfm);
- walk->stride = crypto_aead_chunksize(tfm);
- walk->ivsize = crypto_aead_ivsize(tfm);
- walk->alignmask = crypto_aead_alignmask(tfm);
-
- err = skcipher_walk_first(walk);
-
- if (atomic)
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
+ /*
+ * Accessing 'alg' directly generates better code than using the
+ * crypto_aead_blocksize() and similar helper functions here, as it
+ * prevents the algorithm pointer from being repeatedly reloaded.
+ */
+ walk->blocksize = alg->base.cra_blocksize;
+ walk->stride = alg->chunksize;
+ walk->ivsize = alg->ivsize;
+ walk->alignmask = alg->base.cra_alignmask;
- return err;
+ return skcipher_walk_first(walk);
}
int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index e9e7dceb606e..e1a74cb2cfbe 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1738,10 +1738,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret = min(ret, tcrypt_test("hmac(rmd160)"));
break;
- case 109:
- ret = min(ret, tcrypt_test("vmac64(aes)"));
- break;
-
case 111:
ret = min(ret, tcrypt_test("hmac(sha3-224)"));
break;
diff --git a/crypto/tea.c b/crypto/tea.c
index 896f863f3067..b315da8c89eb 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#define TEA_KEY_SIZE 16
@@ -43,12 +43,11 @@ static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *key = (const __le32 *)in_key;
- ctx->KEY[0] = le32_to_cpu(key[0]);
- ctx->KEY[1] = le32_to_cpu(key[1]);
- ctx->KEY[2] = le32_to_cpu(key[2]);
- ctx->KEY[3] = le32_to_cpu(key[3]);
+ ctx->KEY[0] = get_unaligned_le32(&in_key[0]);
+ ctx->KEY[1] = get_unaligned_le32(&in_key[4]);
+ ctx->KEY[2] = get_unaligned_le32(&in_key[8]);
+ ctx->KEY[3] = get_unaligned_le32(&in_key[12]);
return 0;
@@ -59,11 +58,9 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, n, sum = 0;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
@@ -78,8 +75,8 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3);
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@@ -87,11 +84,9 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, n, sum;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
@@ -108,20 +103,19 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
sum -= TEA_DELTA;
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *key = (const __le32 *)in_key;
- ctx->KEY[0] = le32_to_cpu(key[0]);
- ctx->KEY[1] = le32_to_cpu(key[1]);
- ctx->KEY[2] = le32_to_cpu(key[2]);
- ctx->KEY[3] = le32_to_cpu(key[3]);
+ ctx->KEY[0] = get_unaligned_le32(&in_key[0]);
+ ctx->KEY[1] = get_unaligned_le32(&in_key[4]);
+ ctx->KEY[2] = get_unaligned_le32(&in_key[8]);
+ ctx->KEY[3] = get_unaligned_le32(&in_key[12]);
return 0;
@@ -132,11 +126,9 @@ static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
while (sum != limit) {
y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]);
@@ -144,19 +136,17 @@ static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]);
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
sum = XTEA_DELTA * XTEA_ROUNDS;
@@ -166,8 +156,8 @@ static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]);
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
@@ -176,11 +166,9 @@ static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
while (sum != limit) {
y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3];
@@ -188,19 +176,17 @@ static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3];
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
sum = XTEA_DELTA * XTEA_ROUNDS;
@@ -210,8 +196,8 @@ static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3];
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static struct crypto_alg tea_algs[3] = { {
@@ -220,7 +206,6 @@ static struct crypto_alg tea_algs[3] = { {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct tea_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = TEA_KEY_SIZE,
@@ -234,7 +219,6 @@ static struct crypto_alg tea_algs[3] = { {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
@@ -248,7 +232,6 @@ static struct crypto_alg tea_algs[3] = { {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 1f5f48ab18c7..e61490ba4095 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2885,18 +2885,11 @@ static int test_skcipher_vec_cfg(int enc, const struct cipher_testvec *vec,
if (ivsize) {
if (WARN_ON(ivsize > MAX_IVLEN))
return -EINVAL;
- if (vec->generates_iv && !enc)
- memcpy(iv, vec->iv_out, ivsize);
- else if (vec->iv)
+ if (vec->iv)
memcpy(iv, vec->iv, ivsize);
else
memset(iv, 0, ivsize);
} else {
- if (vec->generates_iv) {
- pr_err("alg: skcipher: %s has ivsize=0 but test vector %s generates IV!\n",
- driver, vec_name);
- return -EINVAL;
- }
iv = NULL;
}
@@ -3133,10 +3126,6 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
if (noextratests)
return 0;
- /* Keywrap isn't supported here yet as it handles its IV differently. */
- if (strncmp(algname, "kw(", 3) == 0)
- return 0;
-
init_rnd_state(&rng);
if (!generic_driver) { /* Use default naming convention? */
@@ -5409,13 +5398,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
.test = alg_test_null,
}, {
- .alg = "kw(aes)",
- .test = alg_test_skcipher,
- .fips_allowed = 1,
- .suite = {
- .cipher = __VECS(aes_kw_tv_template)
- }
- }, {
.alg = "lrw(aes)",
.generic_driver = "lrw(ecb(aes-generic))",
.test = alg_test_skcipher,
@@ -5750,12 +5732,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(streebog512_tv_template)
}
}, {
- .alg = "vmac64(aes)",
- .test = alg_test_hash,
- .suite = {
- .hash = __VECS(vmac64_aes_tv_template)
- }
- }, {
.alg = "wp256",
.test = alg_test_hash,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 430d33d9ac13..d754ab997186 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -59,8 +59,6 @@ struct hash_testvec {
* @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
* ( e.g. test needs to fail due to a weak key )
* @fips_skip: Skip the test vector in FIPS mode
- * @generates_iv: Encryption should ignore the given IV, and output @iv_out.
- * Decryption takes @iv_out. Needed for AES Keywrap ("kw(aes)").
* @setkey_error: Expected error from setkey()
* @crypt_error: Expected error from encrypt() and decrypt()
*/
@@ -74,7 +72,6 @@ struct cipher_testvec {
unsigned short klen;
unsigned int len;
bool fips_skip;
- bool generates_iv;
int setkey_error;
int crypt_error;
};
@@ -8561,159 +8558,6 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
-static const char vmac64_string1[144] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02',
- '\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03',
-};
-
-static const char vmac64_string2[144] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'a', 'b', 'c',
-};
-
-static const char vmac64_string3[144] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
- 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
- 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
- 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
- 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
-};
-
-static const char vmac64_string4[33] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'b', 'c', 'e', 'f', 'i', 'j', 'l', 'm',
- 'o', 'p', 'r', 's', 't', 'u', 'w', 'x',
- 'z',
-};
-
-static const char vmac64_string5[143] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'r', 'm', 'b', 't', 'c', 'o', 'l', 'k',
- ']', '%', '9', '2', '7', '!', 'A',
-};
-
-static const char vmac64_string6[145] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'p', 't', '*', '7', 'l', 'i', '!', '#',
- 'w', '0', 'z', '/', '4', 'A', 'n',
-};
-
-static const struct hash_testvec vmac64_aes_tv_template[] = {
- { /* draft-krovetz-vmac-01 test vector 1 */
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\0\0\0\0\0\0\0\0bcdefghi",
- .psize = 16,
- .digest = "\x25\x76\xbe\x1c\x56\xd8\xb8\x1b",
- }, { /* draft-krovetz-vmac-01 test vector 2 */
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\0\0\0\0\0\0\0\0bcdefghiabc",
- .psize = 19,
- .digest = "\x2d\x37\x6c\xf5\xb1\x81\x3c\xe5",
- }, { /* draft-krovetz-vmac-01 test vector 3 */
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
- .psize = 64,
- .digest = "\xe8\x42\x1f\x61\xd5\x73\xd2\x98",
- }, { /* draft-krovetz-vmac-01 test vector 4 */
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
- .psize = 316,
- .digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe",
- }, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ksize = 16,
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 16,
- .digest = "\x54\x7b\xa4\x77\x35\x80\x58\x07",
- }, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ksize = 16,
- .plaintext = vmac64_string1,
- .psize = sizeof(vmac64_string1),
- .digest = "\xa1\x8c\x68\xae\xd3\x3c\xf5\xce",
- }, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ksize = 16,
- .plaintext = vmac64_string2,
- .psize = sizeof(vmac64_string2),
- .digest = "\x2d\x14\xbd\x81\x73\xb0\x27\xc9",
- }, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ksize = 16,
- .plaintext = vmac64_string3,
- .psize = sizeof(vmac64_string3),
- .digest = "\x19\x0b\x47\x98\x8c\x95\x1a\x8d",
- }, {
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 16,
- .digest = "\x84\x8f\x55\x9e\x26\xa1\x89\x3b",
- }, {
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = vmac64_string1,
- .psize = sizeof(vmac64_string1),
- .digest = "\xc2\x74\x8d\xf6\xb0\xab\x5e\xab",
- }, {
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = vmac64_string2,
- .psize = sizeof(vmac64_string2),
- .digest = "\xdf\x09\x7b\x3d\x42\x68\x15\x11",
- }, {
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = vmac64_string3,
- .psize = sizeof(vmac64_string3),
- .digest = "\xd4\xfa\x8f\xed\xe1\x8f\x32\x8b",
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .ksize = 16,
- .plaintext = vmac64_string4,
- .psize = sizeof(vmac64_string4),
- .digest = "\x5f\xa1\x4e\x42\xea\x0f\xa5\xab",
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .ksize = 16,
- .plaintext = vmac64_string5,
- .psize = sizeof(vmac64_string5),
- .digest = "\x60\x67\xe8\x1d\xbc\x98\x31\x25",
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .ksize = 16,
- .plaintext = vmac64_string6,
- .psize = sizeof(vmac64_string6),
- .digest = "\x41\xeb\x65\x95\x47\x9b\xae\xc4",
- },
-};
-
/*
* SHA384 HMAC test vectors from RFC4231
*/
@@ -24349,42 +24193,6 @@ static const struct aead_testvec aegis128_tv_template[] = {
};
/*
- * All key wrapping test vectors taken from
- * http://csrc.nist.gov/groups/STM/cavp/documents/mac/kwtestvectors.zip
- *
- * Note: as documented in keywrap.c, the ivout for encryption is the first
- * semiblock of the ciphertext from the test vector. For decryption, iv is
- * the first semiblock of the ciphertext.
- */
-static const struct cipher_testvec aes_kw_tv_template[] = {
- {
- .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
- "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
- .klen = 16,
- .ptext = "\x42\x13\x6d\x3c\x38\x4a\x3e\xea"
- "\xc9\x5a\x06\x6f\xd2\x8f\xed\x3f",
- .ctext = "\xf6\x85\x94\x81\x6f\x64\xca\xa3"
- "\xf5\x6f\xab\xea\x25\x48\xf5\xfb",
- .len = 16,
- .iv_out = "\x03\x1f\x6b\xd7\xe6\x1e\x64\x3d",
- .generates_iv = true,
- }, {
- .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
- "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
- "\x03\x86\xf9\x32\x78\x6e\xf7\x96"
- "\x76\xfa\xfb\x90\xb8\x26\x3c\x5f",
- .klen = 32,
- .ptext = "\x0a\x25\x6b\xa7\x5c\xfa\x03\xaa"
- "\xa0\x2b\xa9\x42\x03\xf1\x5b\xaa",
- .ctext = "\xd3\x3d\x3d\x97\x7b\xf0\xa9\x15"
- "\x59\xf9\x9c\x8a\xcd\x29\x3d\x43",
- .len = 16,
- .iv_out = "\x42\x3c\x96\x0d\x8a\x2a\xc4\xc1",
- .generates_iv = true,
- },
-};
-
-/*
* ANSI X9.31 Continuous Pseudo-Random Number Generator (AES mode)
* test vectors, taken from Appendix B.2.9 and B.2.10:
* http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
diff --git a/crypto/vmac.c b/crypto/vmac.c
deleted file mode 100644
index 2ea384645ecf..000000000000
--- a/crypto/vmac.c
+++ /dev/null
@@ -1,696 +0,0 @@
-/*
- * VMAC: Message Authentication Code using Universal Hashing
- *
- * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
- *
- * Copyright (c) 2009, Intel Corporation.
- * Copyright (c) 2018, Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-/*
- * Derived from:
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Last modified: 17 APR 08, 1700 PDT
- */
-
-#include <linux/unaligned.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-#include <linux/scatterlist.h>
-#include <asm/byteorder.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/cipher.h>
-#include <crypto/internal/hash.h>
-
-/*
- * User definable settings.
- */
-#define VMAC_TAG_LEN 64
-#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
-#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
-#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
-#define VMAC_NONCEBYTES 16
-
-/* per-transform (per-key) context */
-struct vmac_tfm_ctx {
- struct crypto_cipher *cipher;
- u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
- u64 polykey[2*VMAC_TAG_LEN/64];
- u64 l3key[2*VMAC_TAG_LEN/64];
-};
-
-/* per-request context */
-struct vmac_desc_ctx {
- union {
- u8 partial[VMAC_NHBYTES]; /* partial block */
- __le64 partial_words[VMAC_NHBYTES / 8];
- };
- unsigned int partial_size; /* size of the partial block */
- bool first_block_processed;
- u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
- union {
- u8 bytes[VMAC_NONCEBYTES];
- __be64 pads[VMAC_NONCEBYTES / 8];
- } nonce;
- unsigned int nonce_size; /* nonce bytes filled so far */
-};
-
-/*
- * Constants and masks
- */
-#define UINT64_C(x) x##ULL
-static const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
-static const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
-static const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
-static const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
-static const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
-
-#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
-
-#ifdef __LITTLE_ENDIAN
-#define INDEX_HIGH 1
-#define INDEX_LOW 0
-#else
-#define INDEX_HIGH 0
-#define INDEX_LOW 1
-#endif
-
-/*
- * The following routines are used in this implementation. They are
- * written via macros to simulate zero-overhead call-by-reference.
- *
- * MUL64: 64x64->128-bit multiplication
- * PMUL64: assumes top bits cleared on inputs
- * ADD128: 128x128->128-bit addition
- */
-
-#define ADD128(rh, rl, ih, il) \
- do { \
- u64 _il = (il); \
- (rl) += (_il); \
- if ((rl) < (_il)) \
- (rh)++; \
- (rh) += (ih); \
- } while (0)
-
-#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
-
-#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
- do { \
- u64 _i1 = (i1), _i2 = (i2); \
- u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
- rh = MUL32(_i1>>32, _i2>>32); \
- rl = MUL32(_i1, _i2); \
- ADD128(rh, rl, (m >> 32), (m << 32)); \
- } while (0)
-
-#define MUL64(rh, rl, i1, i2) \
- do { \
- u64 _i1 = (i1), _i2 = (i2); \
- u64 m1 = MUL32(_i1, _i2>>32); \
- u64 m2 = MUL32(_i1>>32, _i2); \
- rh = MUL32(_i1>>32, _i2>>32); \
- rl = MUL32(_i1, _i2); \
- ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
- ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
- } while (0)
-
-/*
- * For highest performance the L1 NH and L2 polynomial hashes should be
- * carefully implemented to take advantage of one's target architecture.
- * Here these two hash functions are defined multiple time; once for
- * 64-bit architectures, once for 32-bit SSE2 architectures, and once
- * for the rest (32-bit) architectures.
- * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
- * Optionally, nh_vmac_nhbytes can be defined (for multiples of
- * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
- * NH computations at once).
- */
-
-#ifdef CONFIG_64BIT
-
-#define nh_16(mp, kp, nw, rh, rl) \
- do { \
- int i; u64 th, tl; \
- rh = rl = 0; \
- for (i = 0; i < nw; i += 2) { \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
- ADD128(rh, rl, th, tl); \
- } \
- } while (0)
-
-#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
- do { \
- int i; u64 th, tl; \
- rh1 = rl1 = rh = rl = 0; \
- for (i = 0; i < nw; i += 2) { \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
- ADD128(rh1, rl1, th, tl); \
- } \
- } while (0)
-
-#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
-#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
- do { \
- int i; u64 th, tl; \
- rh = rl = 0; \
- for (i = 0; i < nw; i += 8) { \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
- pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
- pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
- pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
- ADD128(rh, rl, th, tl); \
- } \
- } while (0)
-
-#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
- do { \
- int i; u64 th, tl; \
- rh1 = rl1 = rh = rl = 0; \
- for (i = 0; i < nw; i += 8) { \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
- ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
- pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
- pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
- ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
- pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
- pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
- ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
- pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
- pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
- ADD128(rh1, rl1, th, tl); \
- } \
- } while (0)
-#endif
-
-#define poly_step(ah, al, kh, kl, mh, ml) \
- do { \
- u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
- /* compute ab*cd, put bd into result registers */ \
- PMUL64(t3h, t3l, al, kh); \
- PMUL64(t2h, t2l, ah, kl); \
- PMUL64(t1h, t1l, ah, 2*kh); \
- PMUL64(ah, al, al, kl); \
- /* add 2 * ac to result */ \
- ADD128(ah, al, t1h, t1l); \
- /* add together ad + bc */ \
- ADD128(t2h, t2l, t3h, t3l); \
- /* now (ah,al), (t2l,2*t2h) need summing */ \
- /* first add the high registers, carrying into t2h */ \
- ADD128(t2h, ah, z, t2l); \
- /* double t2h and add top bit of ah */ \
- t2h = 2 * t2h + (ah >> 63); \
- ah &= m63; \
- /* now add the low registers */ \
- ADD128(ah, al, mh, ml); \
- ADD128(ah, al, z, t2h); \
- } while (0)
-
-#else /* ! CONFIG_64BIT */
-
-#ifndef nh_16
-#define nh_16(mp, kp, nw, rh, rl) \
- do { \
- u64 t1, t2, m1, m2, t; \
- int i; \
- rh = rl = t = 0; \
- for (i = 0; i < nw; i += 2) { \
- t1 = pe64_to_cpup(mp+i) + kp[i]; \
- t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
- m2 = MUL32(t1 >> 32, t2); \
- m1 = MUL32(t1, t2 >> 32); \
- ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
- MUL32(t1, t2)); \
- rh += (u64)(u32)(m1 >> 32) \
- + (u32)(m2 >> 32); \
- t += (u64)(u32)m1 + (u32)m2; \
- } \
- ADD128(rh, rl, (t >> 32), (t << 32)); \
- } while (0)
-#endif
-
-static void poly_step_func(u64 *ahi, u64 *alo,
- const u64 *kh, const u64 *kl,
- const u64 *mh, const u64 *ml)
-{
-#define a0 (*(((u32 *)alo)+INDEX_LOW))
-#define a1 (*(((u32 *)alo)+INDEX_HIGH))
-#define a2 (*(((u32 *)ahi)+INDEX_LOW))
-#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
-#define k0 (*(((u32 *)kl)+INDEX_LOW))
-#define k1 (*(((u32 *)kl)+INDEX_HIGH))
-#define k2 (*(((u32 *)kh)+INDEX_LOW))
-#define k3 (*(((u32 *)kh)+INDEX_HIGH))
-
- u64 p, q, t;
- u32 t2;
-
- p = MUL32(a3, k3);
- p += p;
- p += *(u64 *)mh;
- p += MUL32(a0, k2);
- p += MUL32(a1, k1);
- p += MUL32(a2, k0);
- t = (u32)(p);
- p >>= 32;
- p += MUL32(a0, k3);
- p += MUL32(a1, k2);
- p += MUL32(a2, k1);
- p += MUL32(a3, k0);
- t |= ((u64)((u32)p & 0x7fffffff)) << 32;
- p >>= 31;
- p += (u64)(((u32 *)ml)[INDEX_LOW]);
- p += MUL32(a0, k0);
- q = MUL32(a1, k3);
- q += MUL32(a2, k2);
- q += MUL32(a3, k1);
- q += q;
- p += q;
- t2 = (u32)(p);
- p >>= 32;
- p += (u64)(((u32 *)ml)[INDEX_HIGH]);
- p += MUL32(a0, k1);
- p += MUL32(a1, k0);
- q = MUL32(a2, k3);
- q += MUL32(a3, k2);
- q += q;
- p += q;
- *(u64 *)(alo) = (p << 32) | t2;
- p >>= 32;
- *(u64 *)(ahi) = p + t;
-
-#undef a0
-#undef a1
-#undef a2
-#undef a3
-#undef k0
-#undef k1
-#undef k2
-#undef k3
-}
-
-#define poly_step(ah, al, kh, kl, mh, ml) \
- poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
-
-#endif /* end of specialized NH and poly definitions */
-
-/* At least nh_16 is defined. Defined others as needed here */
-#ifndef nh_16_2
-#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
- do { \
- nh_16(mp, kp, nw, rh, rl); \
- nh_16(mp, ((kp)+2), nw, rh2, rl2); \
- } while (0)
-#endif
-#ifndef nh_vmac_nhbytes
-#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
- nh_16(mp, kp, nw, rh, rl)
-#endif
-#ifndef nh_vmac_nhbytes_2
-#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
- do { \
- nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
- nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
- } while (0)
-#endif
-
-static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
-{
- u64 rh, rl, t, z = 0;
-
- /* fully reduce (p1,p2)+(len,0) mod p127 */
- t = p1 >> 63;
- p1 &= m63;
- ADD128(p1, p2, len, t);
- /* At this point, (p1,p2) is at most 2^127+(len<<64) */
- t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
- ADD128(p1, p2, z, t);
- p1 &= m63;
-
- /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
- t = p1 + (p2 >> 32);
- t += (t >> 32);
- t += (u32)t > 0xfffffffeu;
- p1 += (t >> 32);
- p2 += (p1 << 32);
-
- /* compute (p1+k1)%p64 and (p2+k2)%p64 */
- p1 += k1;
- p1 += (0 - (p1 < k1)) & 257;
- p2 += k2;
- p2 += (0 - (p2 < k2)) & 257;
-
- /* compute (p1+k1)*(p2+k2)%p64 */
- MUL64(rh, rl, p1, p2);
- t = rh >> 56;
- ADD128(t, rl, z, rh);
- rh <<= 8;
- ADD128(t, rl, z, rh);
- t += t << 8;
- rl += t;
- rl += (0 - (rl < t)) & 257;
- rl += (0 - (rl > p64-1)) & 257;
- return rl;
-}
-
-/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
-static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
- struct vmac_desc_ctx *dctx,
- const __le64 *mptr, unsigned int blocks)
-{
- const u64 *kptr = tctx->nhkey;
- const u64 pkh = tctx->polykey[0];
- const u64 pkl = tctx->polykey[1];
- u64 ch = dctx->polytmp[0];
- u64 cl = dctx->polytmp[1];
- u64 rh, rl;
-
- if (!dctx->first_block_processed) {
- dctx->first_block_processed = true;
- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
- rh &= m62;
- ADD128(ch, cl, rh, rl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- blocks--;
- }
-
- while (blocks--) {
- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
- rh &= m62;
- poly_step(ch, cl, pkh, pkl, rh, rl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- }
-
- dctx->polytmp[0] = ch;
- dctx->polytmp[1] = cl;
-}
-
-static int vmac_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
- __be64 out[2];
- u8 in[16] = { 0 };
- unsigned int i;
- int err;
-
- if (keylen != VMAC_KEY_LEN)
- return -EINVAL;
-
- err = crypto_cipher_setkey(tctx->cipher, key, keylen);
- if (err)
- return err;
-
- /* Fill nh key */
- in[0] = 0x80;
- for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
- crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
- tctx->nhkey[i] = be64_to_cpu(out[0]);
- tctx->nhkey[i+1] = be64_to_cpu(out[1]);
- in[15]++;
- }
-
- /* Fill poly key */
- in[0] = 0xC0;
- in[15] = 0;
- for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
- crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
- tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
- tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
- in[15]++;
- }
-
- /* Fill ip key */
- in[0] = 0xE0;
- in[15] = 0;
- for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
- do {
- crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
- tctx->l3key[i] = be64_to_cpu(out[0]);
- tctx->l3key[i+1] = be64_to_cpu(out[1]);
- in[15]++;
- } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
- }
-
- return 0;
-}
-
-static int vmac_init(struct shash_desc *desc)
-{
- const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
-
- dctx->partial_size = 0;
- dctx->first_block_processed = false;
- memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
- dctx->nonce_size = 0;
- return 0;
-}
-
-static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
-{
- const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
- unsigned int n;
-
- /* Nonce is passed as first VMAC_NONCEBYTES bytes of data */
- if (dctx->nonce_size < VMAC_NONCEBYTES) {
- n = min(len, VMAC_NONCEBYTES - dctx->nonce_size);
- memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n);
- dctx->nonce_size += n;
- p += n;
- len -= n;
- }
-
- if (dctx->partial_size) {
- n = min(len, VMAC_NHBYTES - dctx->partial_size);
- memcpy(&dctx->partial[dctx->partial_size], p, n);
- dctx->partial_size += n;
- p += n;
- len -= n;
- if (dctx->partial_size == VMAC_NHBYTES) {
- vhash_blocks(tctx, dctx, dctx->partial_words, 1);
- dctx->partial_size = 0;
- }
- }
-
- if (len >= VMAC_NHBYTES) {
- n = round_down(len, VMAC_NHBYTES);
- /* TODO: 'p' may be misaligned here */
- vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
- p += n;
- len -= n;
- }
-
- if (len) {
- memcpy(dctx->partial, p, len);
- dctx->partial_size = len;
- }
-
- return 0;
-}
-
-static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
- struct vmac_desc_ctx *dctx)
-{
- unsigned int partial = dctx->partial_size;
- u64 ch = dctx->polytmp[0];
- u64 cl = dctx->polytmp[1];
-
- /* L1 and L2-hash the final block if needed */
- if (partial) {
- /* Zero-pad to next 128-bit boundary */
- unsigned int n = round_up(partial, 16);
- u64 rh, rl;
-
- memset(&dctx->partial[partial], 0, n - partial);
- nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
- rh &= m62;
- if (dctx->first_block_processed)
- poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
- rh, rl);
- else
- ADD128(ch, cl, rh, rl);
- }
-
- /* L3-hash the 128-bit output of L2-hash */
- return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
-}
-
-static int vmac_final(struct shash_desc *desc, u8 *out)
-{
- const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
- int index;
- u64 hash, pad;
-
- if (dctx->nonce_size != VMAC_NONCEBYTES)
- return -EINVAL;
-
- /*
- * The VMAC specification requires a nonce at least 1 bit shorter than
- * the block cipher's block length, so we actually only accept a 127-bit
- * nonce. We define the unused bit to be the first one and require that
- * it be 0, so the needed prepending of a 0 bit is implicit.
- */
- if (dctx->nonce.bytes[0] & 0x80)
- return -EINVAL;
-
- /* Finish calculating the VHASH of the message */
- hash = vhash_final(tctx, dctx);
-
- /* Generate pseudorandom pad by encrypting the nonce */
- BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8));
- index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1;
- dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1;
- crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes,
- dctx->nonce.bytes);
- pad = be64_to_cpu(dctx->nonce.pads[index]);
-
- /* The VMAC is the sum of VHASH and the pseudorandom pad */
- put_unaligned_be64(hash + pad, out);
- return 0;
-}
-
-static int vmac_init_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
- struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- struct crypto_cipher *cipher;
-
- cipher = crypto_spawn_cipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- tctx->cipher = cipher;
- return 0;
-}
-
-static void vmac_exit_tfm(struct crypto_tfm *tfm)
-{
- struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
-
- crypto_free_cipher(tctx->cipher);
-}
-
-static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct shash_instance *inst;
- struct crypto_cipher_spawn *spawn;
- struct crypto_alg *alg;
- u32 mask;
- int err;
-
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
- if (err)
- return err;
-
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst)
- return -ENOMEM;
- spawn = shash_instance_ctx(inst);
-
- err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, mask);
- if (err)
- goto err_free_inst;
- alg = crypto_spawn_cipher_alg(spawn);
-
- err = -EINVAL;
- if (alg->cra_blocksize != VMAC_NONCEBYTES)
- goto err_free_inst;
-
- err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
- if (err)
- goto err_free_inst;
-
- inst->alg.base.cra_priority = alg->cra_priority;
- inst->alg.base.cra_blocksize = alg->cra_blocksize;
-
- inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
- inst->alg.base.cra_init = vmac_init_tfm;
- inst->alg.base.cra_exit = vmac_exit_tfm;
-
- inst->alg.descsize = sizeof(struct vmac_desc_ctx);
- inst->alg.digestsize = VMAC_TAG_LEN / 8;
- inst->alg.init = vmac_init;
- inst->alg.update = vmac_update;
- inst->alg.final = vmac_final;
- inst->alg.setkey = vmac_setkey;
-
- inst->free = shash_free_singlespawn_instance;
-
- err = shash_register_instance(tmpl, inst);
- if (err) {
-err_free_inst:
- shash_free_singlespawn_instance(inst);
- }
- return err;
-}
-
-static struct crypto_template vmac64_tmpl = {
- .name = "vmac64",
- .create = vmac_create,
- .module = THIS_MODULE,
-};
-
-static int __init vmac_module_init(void)
-{
- return crypto_register_template(&vmac64_tmpl);
-}
-
-static void __exit vmac_module_exit(void)
-{
- crypto_unregister_template(&vmac64_tmpl);
-}
-
-subsys_initcall(vmac_module_init);
-module_exit(vmac_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("VMAC hash algorithm");
-MODULE_ALIAS_CRYPTO("vmac64");
-MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0a9cdd31cbd9..19ab145f912e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -200,23 +200,6 @@ config S390_PRNG
It is available as of z9.
-config CRYPTO_DEV_NIAGARA2
- tristate "Niagara2 Stream Processing Unit driver"
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- depends on SPARC64
- help
- Each core of a Niagara2 processor contains a Stream
- Processing Unit, which itself contains several cryptographic
- sub-units. One set provides the Modular Arithmetic Unit,
- used for SSL offload. The other set provides the Cipher
- Group, which can perform encryption, decryption, hashing,
- checksumming, and raw copies.
-
config CRYPTO_DEV_SL3516
tristate "Storlink SL3516 crypto offloader"
depends on ARCH_GEMINI || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index ad4ccef67d12..fef18ffdb128 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -21,8 +21,6 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
-obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
-n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index 6283e8c6d51d..86c227caa722 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -836,7 +836,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
u32 cipher_bits = 0;
u32 ecf_bits = 0;
u8 sctx_words = 0;
- u8 *ptr = spu_hdr;
flow_log("%s()\n", __func__);
flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
@@ -847,7 +846,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
/* starting out: zero the header (plus some) */
memset(spu_hdr, 0, sizeof(struct SPUHEADER));
- ptr += sizeof(struct SPUHEADER);
/* format master header word */
/* Do not set the next bit even though the datasheet says to */
@@ -861,10 +859,8 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
/* copy the encryption keys in the SAD entry */
if (cipher_parms->alg) {
- if (cipher_parms->key_len) {
- ptr += cipher_parms->key_len;
+ if (cipher_parms->key_len)
sctx_words += cipher_parms->key_len / 4;
- }
/*
* if encrypting then set IV size, use SCTX IV unless no IV
@@ -873,7 +869,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
if (cipher_parms->iv_len) {
/* Use SCTX IV */
ecf_bits |= SCTX_IV;
- ptr += cipher_parms->iv_len;
sctx_words += cipher_parms->iv_len / 4;
}
}
diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
index 87781c1534ee..079a22cc9f02 100644
--- a/drivers/crypto/caam/blob_gen.c
+++ b/drivers/crypto/caam/blob_gen.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
* Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ * Copyright 2024 NXP
*/
#define pr_fmt(fmt) "caam blob_gen: " fmt
@@ -104,7 +105,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
}
ctrlpriv = dev_get_drvdata(jrdev->parent);
- moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status));
+ moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
dev_warn(jrdev,
"using insecure test key, enable HAB to use unique device key!\n");
diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c
index 5b105a23f699..410084a9039c 100644
--- a/drivers/crypto/ccp/dbc.c
+++ b/drivers/crypto/ccp/dbc.c
@@ -7,6 +7,8 @@
* Author: Mario Limonciello <mario.limonciello@amd.com>
*/
+#include <linux/mutex.h>
+
#include "dbc.h"
#define DBC_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC)
@@ -137,64 +139,49 @@ static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -ENODEV;
dbc_dev = psp_master->dbc_data;
- mutex_lock(&dbc_dev->ioctl_mutex);
+ guard(mutex)(&dbc_dev->ioctl_mutex);
switch (cmd) {
case DBCIOCNONCE:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce)))
+ return -EFAULT;
ret = send_dbc_nonce(dbc_dev);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce)))
+ return -EFAULT;
break;
case DBCIOCUID:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid)))
+ return -EFAULT;
*dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_setuid);
ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid)))
+ return -EFAULT;
break;
case DBCIOCPARAM:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param)))
+ return -EFAULT;
*dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_param);
ret = send_dbc_parameter(dbc_dev);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param)))
+ return -EFAULT;
break;
default:
- ret = -EINVAL;
-
+ return -EINVAL;
}
-unlock:
- mutex_unlock(&dbc_dev->ioctl_mutex);
- return ret;
+ return 0;
}
static const struct file_operations dbc_fops = {
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 96fde9437b4b..f5b47e5ff48a 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -1209,7 +1209,6 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->mode = uacce_mode;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
@@ -1396,6 +1395,17 @@ static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
+static bool hpre_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hpre_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return false;
+}
+
static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1428,6 +1438,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.show_last_dfx_regs = hpre_show_last_dfx_regs,
.err_info_init = hpre_err_info_init,
.get_err_result = hpre_get_err_result,
+ .dev_is_abnormal = hpre_dev_is_abnormal,
};
static int hpre_pf_probe_init(struct hpre *hpre)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 19c1b5d3c954..d3f5d108b898 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -30,8 +30,6 @@
/* mailbox */
#define QM_MB_PING_ALL_VFS 0xffff
-#define QM_MB_CMD_DATA_SHIFT 32
-#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
#define QM_MB_STATUS_MASK GENMASK(12, 9)
/* sqc shift */
@@ -102,6 +100,8 @@
#define QM_PM_CTRL 0x100148
#define QM_IDLE_DISABLE BIT(9)
+#define QM_SUB_VERSION_ID 0x210
+
#define QM_VFT_CFG_DATA_L 0x100064
#define QM_VFT_CFG_DATA_H 0x100068
#define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
@@ -119,6 +119,7 @@
#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
#define QM_SQC_VFT_NUM_SHIFT_V2 45
#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
+#define QM_MAX_QC_TYPE 2
#define QM_ABNORMAL_INT_SOURCE 0x100000
#define QM_ABNORMAL_INT_MASK 0x100004
@@ -176,6 +177,10 @@
#define QM_IFC_INT_MASK 0x0024
#define QM_IFC_INT_STATUS 0x0028
#define QM_IFC_INT_SET_V 0x002C
+#define QM_PF2VF_PF_W 0x104700
+#define QM_VF2PF_PF_R 0x104800
+#define QM_VF2PF_VF_W 0x320
+#define QM_PF2VF_VF_R 0x380
#define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
#define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
#define QM_IFC_INT_SOURCE_MASK BIT(0)
@@ -185,8 +190,11 @@
#define QM_WAIT_DST_ACK 10
#define QM_MAX_PF_WAIT_COUNT 10
#define QM_MAX_VF_WAIT_COUNT 40
-#define QM_VF_RESET_WAIT_US 20000
-#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF_RESET_WAIT_US 20000
+#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF2PF_REG_SIZE 4
+#define QM_IFC_CMD_MASK GENMASK(31, 0)
+#define QM_IFC_DATA_SHIFT 32
#define QM_VF_RESET_WAIT_TIMEOUT_US \
(QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
@@ -234,8 +242,6 @@
#define QM_QOS_MAX_CIR_U 6
#define QM_AUTOSUSPEND_DELAY 3000
-#define QM_DEV_ALG_MAX_LEN 256
-
/* abnormal status value for stopping queue */
#define QM_STOP_QUEUE_FAIL 1
#define QM_DUMP_SQC_FAIL 3
@@ -276,7 +282,7 @@ enum qm_alg_type {
ALG_TYPE_1,
};
-enum qm_mb_cmd {
+enum qm_ifc_cmd {
QM_PF_FLR_PREPARE = 0x01,
QM_PF_SRST_PREPARE,
QM_PF_RESET_DONE,
@@ -333,6 +339,7 @@ static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
{QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1},
{QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
{QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_DAE, 0x3100, 0, BIT(15), 0x0, 0x0, 0x0},
};
static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
@@ -396,6 +403,11 @@ struct hisi_qm_hw_ops {
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
int (*set_msi)(struct hisi_qm *qm, bool set);
+
+ /* (u64)msg = (u32)data << 32 | (enum qm_ifc_cmd)cmd */
+ int (*set_ifc_begin)(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num);
+ void (*set_ifc_end)(struct hisi_qm *qm);
+ int (*get_ifc)(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num);
};
struct hisi_qm_hw_error {
@@ -501,15 +513,20 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm)
/* Check if the error causes the master ooo block */
static bool qm_check_dev_error(struct hisi_qm *qm)
{
- u32 val, dev_val;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ u32 err_status;
- if (qm->fun_type == QM_HW_VF)
+ if (pf_qm->fun_type == QM_HW_VF)
return false;
- val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
- dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
+ err_status = qm_get_hw_error_status(pf_qm);
+ if (err_status & pf_qm->err_info.qm_shutdown_mask)
+ return true;
+
+ if (pf_qm->err_ini->dev_is_abnormal)
+ return pf_qm->err_ini->dev_is_abnormal(pf_qm);
- return val || dev_val;
+ return false;
}
static int qm_wait_reset_finish(struct hisi_qm *qm)
@@ -654,7 +671,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_mb);
/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op)
{
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct qm_mailbox mailbox;
dma_addr_t xqc_dma;
void *tmp_xqc;
@@ -688,7 +704,7 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op
}
/* Setting xqc will fail if master OOO is blocked. */
- if (qm_check_dev_error(pf_qm)) {
+ if (qm_check_dev_error(qm)) {
dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n");
return -EIO;
}
@@ -855,10 +871,10 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d
strcat(algs, dev_algs[i].alg);
ptr = strrchr(algs, '\n');
- if (ptr) {
+ if (ptr)
*ptr = '\0';
- qm->uacce->algs = algs;
- }
+
+ qm->uacce->algs = algs;
return 0;
}
@@ -1052,11 +1068,10 @@ static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
static void qm_reset_function(struct hisi_qm *qm)
{
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct device *dev = &qm->pdev->dev;
int ret;
- if (qm_check_dev_error(pf_qm))
+ if (qm_check_dev_error(qm))
return;
ret = qm_reset_prepare_ready(qm);
@@ -1540,17 +1555,15 @@ static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
{
struct device *dev = &qm->pdev->dev;
- u32 cmd;
- u64 msg;
+ enum qm_ifc_cmd cmd;
int ret;
- ret = qm_get_mb_cmd(qm, &msg, vf_id);
+ ret = qm->ops->get_ifc(qm, &cmd, NULL, vf_id);
if (ret) {
- dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
+ dev_err(dev, "failed to get command from VF(%u)!\n", vf_id);
return;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_VF_PREPARE_FAIL:
dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
@@ -1562,7 +1575,7 @@ static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
case QM_VF_START_DONE:
break;
default:
- dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
+ dev_err(dev, "unsupported command(0x%x) sent by VF(%u)!\n", cmd, vf_id);
break;
}
}
@@ -1630,17 +1643,14 @@ static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
writel(val, qm->io_base + QM_IFC_INT_SET_V);
}
-static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
+static int qm_ping_single_vf(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- struct qm_mailbox mailbox;
int cnt = 0;
u64 val;
int ret;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, data, fun_num);
if (ret) {
dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
goto err_unlock;
@@ -1662,27 +1672,23 @@ static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
}
err_unlock:
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
return ret;
}
-static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
+static int qm_ping_all_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
struct device *dev = &qm->pdev->dev;
u32 vfs_num = qm->vfs_num;
- struct qm_mailbox mailbox;
u64 val = 0;
int cnt = 0;
int ret;
u32 i;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
- mutex_lock(&qm->mailbox_lock);
- /* PF sends command to all VFs by mailbox */
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, 0, QM_MB_PING_ALL_VFS);
if (ret) {
- dev_err(dev, "failed to send command to VFs!\n");
- mutex_unlock(&qm->mailbox_lock);
+ dev_err(dev, "failed to send command(0x%x) to all vfs!\n", cmd);
+ qm->ops->set_ifc_end(qm);
return ret;
}
@@ -1692,7 +1698,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
val = readq(qm->io_base + QM_IFC_READY_STATUS);
/* If all VFs acked, PF notifies VFs successfully. */
if (!(val & GENMASK(vfs_num, 1))) {
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
return 0;
}
@@ -1700,7 +1706,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
break;
}
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
/* Check which vf respond timeout. */
for (i = 1; i <= vfs_num; i++) {
@@ -1711,18 +1717,15 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
return -ETIMEDOUT;
}
-static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
+static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
- struct qm_mailbox mailbox;
int cnt = 0;
u32 val;
int ret;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, 0, 0);
if (ret) {
- dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
+ dev_err(&qm->pdev->dev, "failed to send command(0x%x) to PF!\n", cmd);
goto unlock;
}
@@ -1741,7 +1744,8 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
}
unlock:
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
+
return ret;
}
@@ -1842,6 +1846,94 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
return ret;
}
+static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
+{
+ struct qm_mailbox mailbox;
+ u64 msg;
+
+ msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, msg, fun_num, 0);
+ mutex_lock(&qm->mailbox_lock);
+ return qm_mb_nolock(qm, &mailbox);
+}
+
+static void qm_set_ifc_end_v3(struct hisi_qm *qm)
+{
+ mutex_unlock(&qm->mailbox_lock);
+}
+
+static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
+{
+ u64 msg;
+ int ret;
+
+ ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ if (ret)
+ return ret;
+
+ *cmd = msg & QM_IFC_CMD_MASK;
+
+ if (data)
+ *data = msg >> QM_IFC_DATA_SHIFT;
+
+ return 0;
+}
+
+static int qm_set_ifc_begin_v4(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
+{
+ uintptr_t offset;
+ u64 msg;
+
+ if (qm->fun_type == QM_HW_PF)
+ offset = QM_PF2VF_PF_W;
+ else
+ offset = QM_VF2PF_VF_W;
+
+ msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
+
+ mutex_lock(&qm->ifc_lock);
+ writeq(msg, qm->io_base + offset);
+
+ return 0;
+}
+
+static void qm_set_ifc_end_v4(struct hisi_qm *qm)
+{
+ mutex_unlock(&qm->ifc_lock);
+}
+
+static u64 qm_get_ifc_pf(struct hisi_qm *qm, u32 fun_num)
+{
+ uintptr_t offset;
+
+ offset = QM_VF2PF_PF_R + QM_VF2PF_REG_SIZE * fun_num;
+
+ return (u64)readl(qm->io_base + offset);
+}
+
+static u64 qm_get_ifc_vf(struct hisi_qm *qm)
+{
+ return readq(qm->io_base + QM_PF2VF_VF_R);
+}
+
+static int qm_get_ifc_v4(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
+{
+ u64 msg;
+
+ if (qm->fun_type == QM_HW_PF)
+ msg = qm_get_ifc_pf(qm, fun_num);
+ else
+ msg = qm_get_ifc_vf(qm);
+
+ *cmd = msg & QM_IFC_CMD_MASK;
+
+ if (data)
+ *data = msg >> QM_IFC_DATA_SHIFT;
+
+ return 0;
+}
+
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
.hw_error_init = qm_hw_error_init_v1,
@@ -1864,6 +1956,21 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
.hw_error_uninit = qm_hw_error_uninit_v3,
.hw_error_handle = qm_hw_error_handle_v2,
.set_msi = qm_set_msi_v3,
+ .set_ifc_begin = qm_set_ifc_begin_v3,
+ .set_ifc_end = qm_set_ifc_end_v3,
+ .get_ifc = qm_get_ifc_v3,
+};
+
+static const struct hisi_qm_hw_ops qm_hw_ops_v4 = {
+ .get_vft = qm_get_vft_v2,
+ .qm_db = qm_db_v2,
+ .hw_error_init = qm_hw_error_init_v3,
+ .hw_error_uninit = qm_hw_error_uninit_v3,
+ .hw_error_handle = qm_hw_error_handle_v2,
+ .set_msi = qm_set_msi_v3,
+ .set_ifc_begin = qm_set_ifc_begin_v4,
+ .set_ifc_end = qm_set_ifc_end_v4,
+ .get_ifc = qm_get_ifc_v4,
};
static void *qm_get_avail_sqe(struct hisi_qp *qp)
@@ -2156,12 +2263,11 @@ static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id)
static int qm_drain_qp(struct hisi_qp *qp)
{
struct hisi_qm *qm = qp->qm;
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
u32 state = 0;
int ret;
/* No need to judge if master OOO is blocked. */
- if (qm_check_dev_error(pf_qm))
+ if (qm_check_dev_error(qm))
return 0;
/* HW V3 supports drain qp by device */
@@ -2475,7 +2581,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
sizeof(struct hisi_qp_ctx)))
return -EFAULT;
- if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
+ if (qp_ctx.qc_type > QM_MAX_QC_TYPE)
return -EINVAL;
qm_set_sqctype(q, qp_ctx.qc_type);
@@ -2843,11 +2949,14 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
qm->ops = &qm_hw_ops_v1;
else if (qm->ver == QM_HW_V2)
qm->ops = &qm_hw_ops_v2;
- else
+ else if (qm->ver == QM_HW_V3)
qm->ops = &qm_hw_ops_v3;
+ else
+ qm->ops = &qm_hw_ops_v4;
pci_set_drvdata(pdev, qm);
mutex_init(&qm->mailbox_lock);
+ mutex_init(&qm->ifc_lock);
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
@@ -3607,7 +3716,6 @@ static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- u64 mb_cmd;
u32 qos;
int ret;
@@ -3617,10 +3725,9 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
return;
}
- mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
- ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
+ ret = qm_ping_single_vf(qm, QM_PF_SET_QOS, qos, fun_num);
if (ret)
- dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
+ dev_err(dev, "failed to send command(0x%x) to VF(%u)!\n", QM_PF_SET_QOS, fun_num);
}
static int qm_vf_read_qos(struct hisi_qm *qm)
@@ -4109,7 +4216,7 @@ stop_fail:
return ret;
}
-static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
+static int qm_try_stop_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd,
enum qm_stop_reason stop_reason)
{
struct pci_dev *pdev = qm->pdev;
@@ -4122,7 +4229,7 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
ret = qm_ping_all_vfs(qm, cmd);
if (ret)
- pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
+ pci_err(pdev, "failed to send command to all VFs before PF reset!\n");
} else {
ret = qm_vf_reset_prepare(qm, stop_reason);
if (ret)
@@ -4137,6 +4244,12 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
int ret;
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret)
+ return ret;
+ }
+
ret = qm_reset_prepare_ready(qm);
if (ret) {
pci_err(pdev, "Controller reset not ready!\n");
@@ -4298,7 +4411,7 @@ restart_fail:
return ret;
}
-static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
+static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4527,7 +4640,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
* Check whether there is an ECC mbit error, If it occurs, need to
* wait for soft reset to fix it.
*/
- while (qm_check_dev_error(pf_qm)) {
+ while (qm_check_dev_error(qm)) {
msleep(++delay);
if (delay > QM_RESET_WAIT_TIMEOUT)
return;
@@ -4675,7 +4788,7 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
enum qm_stop_reason stop_reason)
{
- enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
+ enum qm_ifc_cmd cmd = QM_VF_PREPARE_DONE;
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4709,7 +4822,7 @@ out:
static void qm_pf_reset_vf_done(struct hisi_qm *qm)
{
- enum qm_mb_cmd cmd = QM_VF_START_DONE;
+ enum qm_ifc_cmd cmd = QM_VF_START_DONE;
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4732,7 +4845,6 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
u32 val, cmd;
- u64 msg;
int ret;
/* Wait for reset to finish */
@@ -4749,16 +4861,15 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
* Whether message is got successfully,
* VF needs to ack PF by clearing the interrupt.
*/
- ret = qm_get_mb_cmd(qm, &msg, 0);
+ ret = qm->ops->get_ifc(qm, &cmd, NULL, 0);
qm_clear_cmd_interrupt(qm, 0);
if (ret) {
- dev_err(dev, "failed to get msg from PF in reset done!\n");
+ dev_err(dev, "failed to get command from PF in reset done!\n");
return ret;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
if (cmd != QM_PF_RESET_DONE) {
- dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
+ dev_err(dev, "the command(0x%x) is not reset done!\n", cmd);
ret = -EINVAL;
}
@@ -4795,22 +4906,21 @@ err_get_status:
static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- u64 msg;
- u32 cmd;
+ enum qm_ifc_cmd cmd;
+ u32 data;
int ret;
/*
* Get the msg from source by sending mailbox. Whether message is got
* successfully, destination needs to ack source by clearing the interrupt.
*/
- ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ ret = qm->ops->get_ifc(qm, &cmd, &data, fun_num);
qm_clear_cmd_interrupt(qm, BIT(fun_num));
if (ret) {
- dev_err(dev, "failed to get msg from source!\n");
+ dev_err(dev, "failed to get command from source!\n");
return;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_PF_FLR_PREPARE:
qm_pf_reset_vf_process(qm, QM_DOWN);
@@ -4822,10 +4932,10 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
qm_vf_get_qos(qm, fun_num);
break;
case QM_PF_SET_QOS:
- qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
+ qm->mb_qos = data;
break;
default:
- dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
+ dev_err(dev, "unsupported command(0x%x) sent by function(%u)!\n", cmd, fun_num);
break;
}
}
@@ -5167,6 +5277,20 @@ static int qm_get_hw_caps(struct hisi_qm *qm)
return qm_pre_store_caps(qm);
}
+static void qm_get_version(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 sub_version_id;
+
+ qm->ver = pdev->revision;
+
+ if (pdev->revision == QM_HW_V3) {
+ sub_version_id = readl(qm->io_base + QM_SUB_VERSION_ID);
+ if (sub_version_id)
+ qm->ver = sub_version_id;
+ }
+}
+
static int qm_get_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -5186,6 +5310,8 @@ static int qm_get_pci_res(struct hisi_qm *qm)
goto err_request_mem_regions;
}
+ qm_get_version(qm);
+
ret = qm_get_hw_caps(qm);
if (ret)
goto err_ioremap;
@@ -5205,6 +5331,7 @@ static int qm_get_pci_res(struct hisi_qm *qm)
qm->db_interval = 0;
}
+ hisi_qm_pre_init(qm);
ret = qm_get_qp_num(qm);
if (ret)
goto err_db_ioremap;
@@ -5247,6 +5374,14 @@ static int qm_clear_device(struct hisi_qm *qm)
return ret;
}
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret) {
+ writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+ return ret;
+ }
+ }
+
return qm_reset_device(qm);
}
@@ -5461,8 +5596,6 @@ int hisi_qm_init(struct hisi_qm *qm)
struct device *dev = &pdev->dev;
int ret;
- hisi_qm_pre_init(qm);
-
ret = hisi_qm_pci_init(qm);
if (ret)
return ret;
@@ -5598,6 +5731,12 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
if (ret)
return ret;
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret)
+ return ret;
+ }
+
ret = qm_set_pf_mse(qm, false);
if (ret)
pci_err(pdev, "failed to disable MSE before suspending!\n");
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 356188bee6fb..4b9970230822 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -37,6 +37,7 @@ struct sec_aead_req {
u8 *a_ivin;
dma_addr_t a_ivin_dma;
struct aead_request *aead_req;
+ bool fallback;
};
/* SEC request of Crypto */
@@ -90,9 +91,7 @@ struct sec_auth_ctx {
dma_addr_t a_key_dma;
u8 *a_key;
u8 a_key_len;
- u8 mac_len;
u8 a_alg;
- bool fallback;
struct crypto_shash *hash_tfm;
struct crypto_aead *fallback_aead_tfm;
};
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index ae9ebbb4103d..66bc07da9eb6 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -948,15 +948,14 @@ static int sec_aead_mac_init(struct sec_aead_req *req)
struct aead_request *aead_req = req->aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
size_t authsize = crypto_aead_authsize(tfm);
- u8 *mac_out = req->out_mac;
struct scatterlist *sgl = aead_req->src;
+ u8 *mac_out = req->out_mac;
size_t copy_size;
off_t skip_size;
/* Copy input mac */
skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
- copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
- authsize, skip_size);
+ copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
if (unlikely(copy_size != authsize))
return -EINVAL;
@@ -1120,10 +1119,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- if (unlikely(a_ctx->fallback_aead_tfm))
- return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
-
- return 0;
+ return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
}
static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
@@ -1139,7 +1135,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
const u32 keylen, const enum sec_hash_alg a_alg,
const enum sec_calg c_alg,
- const enum sec_mac_len mac_len,
const enum sec_cmode c_mode)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1151,7 +1146,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->a_ctx.a_alg = a_alg;
ctx->c_ctx.c_alg = c_alg;
- ctx->a_ctx.mac_len = mac_len;
c_ctx->c_mode = c_mode;
if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
@@ -1162,13 +1156,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
- if (unlikely(a_ctx->fallback_aead_tfm)) {
- ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
- if (ret)
- return ret;
- }
-
- return 0;
+ return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
}
ret = crypto_authenc_extractkeys(&keys, key, keylen);
@@ -1187,10 +1175,15 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
goto bad_key;
}
- if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
- (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
+ if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
ret = -EINVAL;
- dev_err(dev, "MAC or AUTH key length error!\n");
+ dev_err(dev, "AUTH key length error!\n");
+ goto bad_key;
+ }
+
+ ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+ if (ret) {
+ dev_err(dev, "set sec fallback key err!\n");
goto bad_key;
}
@@ -1202,27 +1195,19 @@ bad_key:
}
-#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
-static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
- u32 keylen) \
-{ \
- return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
-}
-
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
- SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
- SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
- SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
- SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
- SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
-GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
- SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
-GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
- SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
+#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \
+static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \
+{ \
+ return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \
+}
+
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -1470,9 +1455,10 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- struct sec_cipher_req *c_req = &req->c_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *a_req = &req->aead_req;
- size_t authsize = ctx->a_ctx.mac_len;
+ struct sec_cipher_req *c_req = &req->c_req;
u32 data_size = aead_req->cryptlen;
u8 flage = 0;
u8 cm, cl;
@@ -1513,10 +1499,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
- size_t authsize = crypto_aead_authsize(tfm);
- struct sec_cipher_req *c_req = &req->c_req;
struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
@@ -1524,15 +1508,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
/*
* CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
* the counter must set to 0x01
+ * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
*/
- ctx->a_ctx.mac_len = authsize;
- /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
set_aead_auth_iv(ctx, req);
- }
-
- /* GCM 12Byte Cipher_IV == Auth_IV */
- if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
- ctx->a_ctx.mac_len = authsize;
+ } else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
+ /* GCM 12Byte Cipher_IV == Auth_IV */
memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
}
}
@@ -1542,9 +1522,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
- sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
@@ -1568,9 +1550,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
- sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
+ sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sqe3->a_key_addr = sqe3->c_key_addr;
@@ -1594,11 +1578,12 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
- sec_sqe->type2.mac_key_alg =
- cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
+ sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
sec_sqe->type2.mac_key_alg |=
cpu_to_le32((u32)((ctx->a_key_len) /
@@ -1648,11 +1633,13 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
sqe3->auth_mac_key |=
- cpu_to_le32((u32)(ctx->mac_len /
+ cpu_to_le32((u32)(authsize /
SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
sqe3->auth_mac_key |=
@@ -1703,9 +1690,9 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
{
struct aead_request *a_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *aead_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
- size_t authsize = crypto_aead_authsize(tfm);
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct aead_request *backlog_aead_req;
struct sec_req *backlog_req;
@@ -1718,10 +1705,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
if (!err && c_req->encrypt) {
struct scatterlist *sgl = a_req->dst;
- sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
- aead_req->out_mac,
- authsize, a_req->cryptlen +
- a_req->assoclen);
+ sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
+ authsize, a_req->cryptlen + a_req->assoclen);
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
@@ -1929,8 +1914,10 @@ static void sec_aead_exit(struct crypto_aead *tfm)
static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
- struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ const char *aead_name = alg->base.cra_name;
int ret;
ret = sec_aead_init(tfm);
@@ -1939,11 +1926,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
return ret;
}
- auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
- if (IS_ERR(auth_ctx->hash_tfm)) {
+ a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(a_ctx->hash_tfm)) {
dev_err(ctx->dev, "aead alloc shash error!\n");
sec_aead_exit(tfm);
- return PTR_ERR(auth_ctx->hash_tfm);
+ return PTR_ERR(a_ctx->hash_tfm);
+ }
+
+ a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
+ if (IS_ERR(a_ctx->fallback_aead_tfm)) {
+ dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
+ crypto_free_shash(ctx->a_ctx.hash_tfm);
+ sec_aead_exit(tfm);
+ return PTR_ERR(a_ctx->fallback_aead_tfm);
}
return 0;
@@ -1953,6 +1949,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
crypto_free_shash(ctx->a_ctx.hash_tfm);
sec_aead_exit(tfm);
}
@@ -1979,7 +1976,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
sec_aead_exit(tfm);
return PTR_ERR(a_ctx->fallback_aead_tfm);
}
- a_ctx->fallback = false;
return 0;
}
@@ -2233,21 +2229,20 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
struct aead_request *req = sreq->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- size_t authsize = crypto_aead_authsize(tfm);
+ size_t sz = crypto_aead_authsize(tfm);
u8 c_mode = ctx->c_ctx.c_mode;
struct device *dev = ctx->dev;
int ret;
- if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
- req->assoclen > SEC_MAX_AAD_LEN)) {
- dev_err(dev, "aead input spec error!\n");
+ /* Hardware does not handle cases where authsize is less than 4 bytes */
+ if (unlikely(sz < MIN_MAC_LEN)) {
+ sreq->aead_req.fallback = true;
return -EINVAL;
}
- if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
- (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
- authsize & MAC_LEN_MASK)))) {
- dev_err(dev, "aead input mac length error!\n");
+ if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
+ req->assoclen > SEC_MAX_AAD_LEN)) {
+ dev_err(dev, "aead input spec error!\n");
return -EINVAL;
}
@@ -2266,7 +2261,7 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (sreq->c_req.encrypt)
sreq->c_req.c_len = req->cryptlen;
else
- sreq->c_req.c_len = req->cryptlen - authsize;
+ sreq->c_req.c_len = req->cryptlen - sz;
if (c_mode == SEC_CMODE_CBC) {
if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
dev_err(dev, "aead crypto length error!\n");
@@ -2292,8 +2287,8 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (ctx->sec->qm.ver == QM_HW_V2) {
if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
- req->cryptlen <= authsize))) {
- ctx->a_ctx.fallback = true;
+ req->cryptlen <= authsize))) {
+ sreq->aead_req.fallback = true;
return -EINVAL;
}
}
@@ -2321,16 +2316,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
bool encrypt)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- struct device *dev = ctx->dev;
struct aead_request *subreq;
int ret;
- /* Kunpeng920 aead mode not support input 0 size */
- if (!a_ctx->fallback_aead_tfm) {
- dev_err(dev, "aead fallback tfm is NULL!\n");
- return -EINVAL;
- }
-
subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
if (!subreq)
return -ENOMEM;
@@ -2362,10 +2350,11 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ req->aead_req.fallback = false;
ret = sec_aead_param_check(ctx, req);
if (unlikely(ret)) {
- if (ctx->a_ctx.fallback)
+ if (req->aead_req.fallback)
return sec_aead_soft_crypto(ctx, a_req, encrypt);
return -EINVAL;
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 27a0ee5ad913..04725b514382 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -23,17 +23,6 @@ enum sec_hash_alg {
SEC_A_HMAC_SHA512 = 0x15,
};
-enum sec_mac_len {
- SEC_HMAC_CCM_MAC = 16,
- SEC_HMAC_GCM_MAC = 16,
- SEC_SM3_MAC = 32,
- SEC_HMAC_SM3_MAC = 32,
- SEC_HMAC_MD5_MAC = 16,
- SEC_HMAC_SHA1_MAC = 20,
- SEC_HMAC_SHA256_MAC = 32,
- SEC_HMAC_SHA512_MAC = 64,
-};
-
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 8ec5333bb5aa..72cf48d1f3ab 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -1097,6 +1097,17 @@ static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
+static bool sec_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = sec_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return false;
+}
+
static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1129,6 +1140,7 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.show_last_dfx_regs = sec_show_last_dfx_regs,
.err_info_init = sec_err_info_init,
.get_err_result = sec_get_err_result,
+ .dev_is_abnormal = sec_dev_is_abnormal,
};
static int sec_pf_probe_init(struct sec_dev *sec)
@@ -1180,7 +1192,6 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
int ret;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
diff --git a/drivers/crypto/hisilicon/zip/Makefile b/drivers/crypto/hisilicon/zip/Makefile
index a936f099ee22..13de020b77d6 100644
--- a/drivers/crypto/hisilicon/zip/Makefile
+++ b/drivers/crypto/hisilicon/zip/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += hisi_zip.o
-hisi_zip-objs = zip_main.o zip_crypto.o
+hisi_zip-objs = zip_main.o zip_crypto.o dae_main.o
diff --git a/drivers/crypto/hisilicon/zip/dae_main.c b/drivers/crypto/hisilicon/zip/dae_main.c
new file mode 100644
index 000000000000..6f22e4c36e49
--- /dev/null
+++ b/drivers/crypto/hisilicon/zip/dae_main.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 HiSilicon Limited. */
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uacce.h>
+#include "zip.h"
+
+/* memory */
+#define DAE_MEM_START_OFFSET 0x331040
+#define DAE_MEM_DONE_OFFSET 0x331044
+#define DAE_MEM_START_MASK 0x1
+#define DAE_MEM_DONE_MASK 0x1
+#define DAE_REG_RD_INTVRL_US 10
+#define DAE_REG_RD_TMOUT_US USEC_PER_SEC
+
+#define DAE_ALG_NAME "hashagg"
+
+/* error */
+#define DAE_AXI_CFG_OFFSET 0x331000
+#define DAE_AXI_SHUTDOWN_MASK (BIT(0) | BIT(5))
+#define DAE_ERR_SOURCE_OFFSET 0x331C84
+#define DAE_ERR_STATUS_OFFSET 0x331C88
+#define DAE_ERR_CE_OFFSET 0x331CA0
+#define DAE_ERR_CE_MASK BIT(3)
+#define DAE_ERR_NFE_OFFSET 0x331CA4
+#define DAE_ERR_NFE_MASK 0x17
+#define DAE_ERR_FE_OFFSET 0x331CA8
+#define DAE_ERR_FE_MASK 0
+#define DAE_ECC_MBIT_MASK BIT(2)
+#define DAE_ECC_INFO_OFFSET 0x33400C
+#define DAE_ERR_SHUTDOWN_OFFSET 0x331CAC
+#define DAE_ERR_SHUTDOWN_MASK 0x17
+#define DAE_ERR_ENABLE_OFFSET 0x331C80
+#define DAE_ERR_ENABLE_MASK (DAE_ERR_FE_MASK | DAE_ERR_NFE_MASK | DAE_ERR_CE_MASK)
+#define DAE_AM_CTRL_GLOBAL_OFFSET 0x330000
+#define DAE_AM_RETURN_OFFSET 0x330150
+#define DAE_AM_RETURN_MASK 0x3
+#define DAE_AXI_CFG_OFFSET 0x331000
+#define DAE_AXI_SHUTDOWN_EN_MASK (BIT(0) | BIT(5))
+
+struct hisi_dae_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+static const struct hisi_dae_hw_error dae_hw_error[] = {
+ { .int_msk = BIT(0), .msg = "dae_axi_bus_err" },
+ { .int_msk = BIT(1), .msg = "dae_axi_poison_err" },
+ { .int_msk = BIT(2), .msg = "dae_ecc_2bit_err" },
+ { .int_msk = BIT(3), .msg = "dae_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "dae_fsm_hbeat_err" },
+};
+
+static inline bool dae_is_support(struct hisi_qm *qm)
+{
+ if (test_bit(QM_SUPPORT_DAE, &qm->caps))
+ return true;
+
+ return false;
+}
+
+int hisi_dae_set_user_domain(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ val = readl(qm->io_base + DAE_MEM_START_OFFSET);
+ val |= DAE_MEM_START_MASK;
+ writel(val, qm->io_base + DAE_MEM_START_OFFSET);
+ ret = readl_relaxed_poll_timeout(qm->io_base + DAE_MEM_DONE_OFFSET, val,
+ val & DAE_MEM_DONE_MASK,
+ DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to init dae memory!\n");
+
+ return ret;
+}
+
+int hisi_dae_set_alg(struct hisi_qm *qm)
+{
+ size_t len;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ if (!qm->uacce)
+ return 0;
+
+ len = strlen(qm->uacce->algs);
+ /* A line break may be required */
+ if (len + strlen(DAE_ALG_NAME) + 1 >= QM_DEV_ALG_MAX_LEN) {
+ pci_err(qm->pdev, "algorithm name is too long!\n");
+ return -EINVAL;
+ }
+
+ if (len)
+ strcat((char *)qm->uacce->algs, "\n");
+
+ strcat((char *)qm->uacce->algs, DAE_ALG_NAME);
+
+ return 0;
+}
+
+static void hisi_dae_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
+{
+ u32 axi_val, err_val;
+
+ axi_val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
+ if (enable) {
+ axi_val |= DAE_AXI_SHUTDOWN_MASK;
+ err_val = DAE_ERR_SHUTDOWN_MASK;
+ } else {
+ axi_val &= ~DAE_AXI_SHUTDOWN_MASK;
+ err_val = 0;
+ }
+
+ writel(axi_val, qm->io_base + DAE_AXI_CFG_OFFSET);
+ writel(err_val, qm->io_base + DAE_ERR_SHUTDOWN_OFFSET);
+}
+
+void hisi_dae_hw_error_enable(struct hisi_qm *qm)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ /* clear dae hw error source if having */
+ writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_SOURCE_OFFSET);
+
+ /* configure error type */
+ writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
+ writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
+ writel(DAE_ERR_FE_MASK, qm->io_base + DAE_ERR_FE_OFFSET);
+
+ hisi_dae_master_ooo_ctrl(qm, true);
+
+ /* enable dae hw error interrupts */
+ writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_ENABLE_OFFSET);
+}
+
+void hisi_dae_hw_error_disable(struct hisi_qm *qm)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ writel(0, qm->io_base + DAE_ERR_ENABLE_OFFSET);
+ hisi_dae_master_ooo_ctrl(qm, false);
+}
+
+static u32 hisi_dae_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + DAE_ERR_STATUS_OFFSET);
+}
+
+static void hisi_dae_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ writel(err_sts, qm->io_base + DAE_ERR_SOURCE_OFFSET);
+}
+
+static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET);
+}
+
+static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type)
+{
+ const struct hisi_dae_hw_error *err = dae_hw_error;
+ struct device *dev = &qm->pdev->dev;
+ u32 ecc_info;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dae_hw_error); i++) {
+ err = &dae_hw_error[i];
+ if (!(err->int_msk & err_type))
+ continue;
+
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+
+ if (err->int_msk & DAE_ECC_MBIT_MASK) {
+ ecc_info = readl(qm->io_base + DAE_ECC_INFO_OFFSET);
+ dev_err(dev, "dae multi ecc sram info 0x%x\n", ecc_info);
+ }
+ }
+}
+
+enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ if (!dae_is_support(qm))
+ return ACC_ERR_NONE;
+
+ err_status = hisi_dae_get_hw_err_status(qm);
+ if (!err_status)
+ return ACC_ERR_NONE;
+
+ hisi_dae_log_hw_error(qm, err_status);
+
+ if (err_status & DAE_ERR_NFE_MASK) {
+ /* Disable the same error reporting until device is recovered. */
+ hisi_dae_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ hisi_dae_clear_hw_err_status(qm, err_status);
+
+ return ACC_ERR_RECOVERED;
+}
+
+bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ if (!dae_is_support(qm))
+ return false;
+
+ err_status = hisi_dae_get_hw_err_status(qm);
+ if (err_status & DAE_ERR_NFE_MASK)
+ return true;
+
+ return false;
+}
+
+int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ val = readl(qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
+ val |= BIT(0);
+ writel(val, qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + DAE_AM_RETURN_OFFSET,
+ val, (val == DAE_AM_RETURN_MASK),
+ DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to close dae axi ooo!\n");
+
+ return ret;
+}
+
+void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (!dae_is_support(qm))
+ return;
+
+ val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
+
+ writel(val & ~DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
+ writel(val | DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
+}
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 2fecf346c3c9..9fb2a9c01132 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -103,4 +103,12 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
+int hisi_dae_set_user_domain(struct hisi_qm *qm);
+int hisi_dae_set_alg(struct hisi_qm *qm);
+void hisi_dae_hw_error_disable(struct hisi_qm *qm);
+void hisi_dae_hw_error_enable(struct hisi_qm *qm);
+void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm);
+int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm);
+bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm);
+enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 9239b251c2d7..d8ba23b7cc7d 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -582,7 +582,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
hisi_zip_enable_clock_gate(qm);
- return 0;
+ return hisi_dae_set_user_domain(qm);
}
static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
@@ -631,6 +631,8 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ hisi_dae_hw_error_enable(qm);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
@@ -643,6 +645,8 @@ static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_zip_master_ooo_ctrl(qm, false);
+
+ hisi_dae_hw_error_disable(qm);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -1129,6 +1133,8 @@ static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ hisi_dae_open_axi_master_ooo(qm);
}
static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
@@ -1147,8 +1153,11 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
{
+ enum acc_err_result zip_result = ACC_ERR_NONE;
+ enum acc_err_result dae_result;
u32 err_status;
+ /* Get device hardware new error status */
err_status = hisi_zip_get_hw_err_status(qm);
if (err_status) {
if (err_status & qm->err_info.ecc_2bits_mask)
@@ -1159,11 +1168,32 @@ static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
/* Disable the same error reporting until device is recovered. */
hisi_zip_disable_error_report(qm, err_status);
return ACC_ERR_NEED_RESET;
+ } else {
+ hisi_zip_clear_hw_err_status(qm, err_status);
}
- hisi_zip_clear_hw_err_status(qm, err_status);
}
- return ACC_ERR_RECOVERED;
+ dae_result = hisi_dae_get_err_result(qm);
+
+ return (zip_result == ACC_ERR_NEED_RESET ||
+ dae_result == ACC_ERR_NEED_RESET) ?
+ ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
+}
+
+static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hisi_zip_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return hisi_dae_dev_is_abnormal(qm);
+}
+
+static int hisi_zip_set_priv_status(struct hisi_qm *qm)
+{
+ return hisi_dae_close_axi_master_ooo(qm);
}
static void hisi_zip_err_info_init(struct hisi_qm *qm)
@@ -1200,6 +1230,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.show_last_dfx_regs = hisi_zip_show_last_dfx_regs,
.err_info_init = hisi_zip_err_info_init,
.get_err_result = hisi_zip_get_err_result,
+ .set_priv_status = hisi_zip_set_priv_status,
+ .dev_is_abnormal = hisi_zip_dev_is_abnormal,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -1264,7 +1296,6 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
int ret;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
@@ -1301,17 +1332,24 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
ret = zip_pre_store_cap_reg(qm);
if (ret) {
pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
- hisi_qm_uninit(qm);
- return ret;
+ goto err_qm_uninit;
}
alg_msk = qm->cap_tables.dev_cap_table[ZIP_ALG_BITMAP].cap_val;
ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set zip algs!\n");
- hisi_qm_uninit(qm);
+ goto err_qm_uninit;
}
+ ret = hisi_dae_set_alg(qm);
+ if (ret)
+ goto err_qm_uninit;
+
+ return 0;
+
+err_qm_uninit:
+ hisi_qm_uninit(qm);
return ret;
}
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 9e557649e5d0..c3776b0de51d 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -173,7 +173,7 @@ static int set_iaa_sync_mode(const char *name)
async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async")) {
- async_mode = true;
+ async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async_irq")) {
async_mode = true;
diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
index 449c6d3ab2db..fcc0cf4df637 100644
--- a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
+++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
@@ -471,6 +471,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
npe_id = npe_spec.args[0];
+ of_node_put(npe_spec.np);
ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
&queue_spec);
@@ -479,6 +480,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
recv_qid = queue_spec.args[0];
+ of_node_put(queue_spec.np);
ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
&queue_spec);
@@ -487,6 +489,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
send_qid = queue_spec.args[0];
+ of_node_put(queue_spec.np);
} else {
/*
* Hardcoded engine when using platform data, this goes away
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S
deleted file mode 100644
index 9a67dbf340f4..000000000000
--- a/drivers/crypto/n2_asm.S
+++ /dev/null
@@ -1,96 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* n2_asm.S: Hypervisor calls for NCS support.
- *
- * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
- */
-
-#include <linux/linkage.h>
-#include <asm/hypervisor.h>
-#include "n2_core.h"
-
- /* o0: queue type
- * o1: RA of queue
- * o2: num entries in queue
- * o3: address of queue handle return
- */
-ENTRY(sun4v_ncs_qconf)
- mov HV_FAST_NCS_QCONF, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o3]
- retl
- nop
-ENDPROC(sun4v_ncs_qconf)
-
- /* %o0: queue handle
- * %o1: address of queue type return
- * %o2: address of queue base address return
- * %o3: address of queue num entries return
- */
-ENTRY(sun4v_ncs_qinfo)
- mov %o1, %g1
- mov %o2, %g2
- mov %o3, %g3
- mov HV_FAST_NCS_QINFO, %o5
- ta HV_FAST_TRAP
- stx %o1, [%g1]
- stx %o2, [%g2]
- stx %o3, [%g3]
- retl
- nop
-ENDPROC(sun4v_ncs_qinfo)
-
- /* %o0: queue handle
- * %o1: address of head offset return
- */
-ENTRY(sun4v_ncs_gethead)
- mov %o1, %o2
- mov HV_FAST_NCS_GETHEAD, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_gethead)
-
- /* %o0: queue handle
- * %o1: address of tail offset return
- */
-ENTRY(sun4v_ncs_gettail)
- mov %o1, %o2
- mov HV_FAST_NCS_GETTAIL, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_gettail)
-
- /* %o0: queue handle
- * %o1: new tail offset
- */
-ENTRY(sun4v_ncs_settail)
- mov HV_FAST_NCS_SETTAIL, %o5
- ta HV_FAST_TRAP
- retl
- nop
-ENDPROC(sun4v_ncs_settail)
-
- /* %o0: queue handle
- * %o1: address of devino return
- */
-ENTRY(sun4v_ncs_qhandle_to_devino)
- mov %o1, %o2
- mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_qhandle_to_devino)
-
- /* %o0: queue handle
- * %o1: new head offset
- */
-ENTRY(sun4v_ncs_sethead_marker)
- mov HV_FAST_NCS_SETHEAD_MARKER, %o5
- ta HV_FAST_TRAP
- retl
- nop
-ENDPROC(sun4v_ncs_sethead_marker)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
deleted file mode 100644
index 14c302d2db79..000000000000
--- a/drivers/crypto/n2_core.c
+++ /dev/null
@@ -1,2168 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
- *
- * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/cpumask.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/crypto.h>
-#include <crypto/md5.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/aes.h>
-#include <crypto/internal/des.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/algapi.h>
-
-#include <asm/hypervisor.h>
-#include <asm/mdesc.h>
-
-#include "n2_core.h"
-
-#define DRV_MODULE_NAME "n2_crypto"
-#define DRV_MODULE_VERSION "0.2"
-#define DRV_MODULE_RELDATE "July 28, 2011"
-
-static const char version[] =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
-MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
-MODULE_DESCRIPTION("Niagara2 Crypto driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
-
-#define N2_CRA_PRIORITY 200
-
-static DEFINE_MUTEX(spu_lock);
-
-struct spu_queue {
- cpumask_t sharing;
- unsigned long qhandle;
-
- spinlock_t lock;
- u8 q_type;
- void *q;
- unsigned long head;
- unsigned long tail;
- struct list_head jobs;
-
- unsigned long devino;
-
- char irq_name[32];
- unsigned int irq;
-
- struct list_head list;
-};
-
-struct spu_qreg {
- struct spu_queue *queue;
- unsigned long type;
-};
-
-static struct spu_queue **cpu_to_cwq;
-static struct spu_queue **cpu_to_mau;
-
-static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
-{
- if (q->q_type == HV_NCS_QTYPE_MAU) {
- off += MAU_ENTRY_SIZE;
- if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
- off = 0;
- } else {
- off += CWQ_ENTRY_SIZE;
- if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
- off = 0;
- }
- return off;
-}
-
-struct n2_request_common {
- struct list_head entry;
- unsigned int offset;
-};
-#define OFFSET_NOT_RUNNING (~(unsigned int)0)
-
-/* An async job request records the final tail value it used in
- * n2_request_common->offset, test to see if that offset is in
- * the range old_head, new_head, inclusive.
- */
-static inline bool job_finished(struct spu_queue *q, unsigned int offset,
- unsigned long old_head, unsigned long new_head)
-{
- if (old_head <= new_head) {
- if (offset > old_head && offset <= new_head)
- return true;
- } else {
- if (offset > old_head || offset <= new_head)
- return true;
- }
- return false;
-}
-
-/* When the HEAD marker is unequal to the actual HEAD, we get
- * a virtual device INO interrupt. We should process the
- * completed CWQ entries and adjust the HEAD marker to clear
- * the IRQ.
- */
-static irqreturn_t cwq_intr(int irq, void *dev_id)
-{
- unsigned long off, new_head, hv_ret;
- struct spu_queue *q = dev_id;
-
- pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
- smp_processor_id(), q->qhandle);
-
- spin_lock(&q->lock);
-
- hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
-
- pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
- smp_processor_id(), new_head, hv_ret);
-
- for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
- /* XXX ... XXX */
- }
-
- hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
- if (hv_ret == HV_EOK)
- q->head = new_head;
-
- spin_unlock(&q->lock);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mau_intr(int irq, void *dev_id)
-{
- struct spu_queue *q = dev_id;
- unsigned long head, hv_ret;
-
- spin_lock(&q->lock);
-
- pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
- smp_processor_id(), q->qhandle);
-
- hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
-
- pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
- smp_processor_id(), head, hv_ret);
-
- sun4v_ncs_sethead_marker(q->qhandle, head);
-
- spin_unlock(&q->lock);
-
- return IRQ_HANDLED;
-}
-
-static void *spu_queue_next(struct spu_queue *q, void *cur)
-{
- return q->q + spu_next_offset(q, cur - q->q);
-}
-
-static int spu_queue_num_free(struct spu_queue *q)
-{
- unsigned long head = q->head;
- unsigned long tail = q->tail;
- unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
- unsigned long diff;
-
- if (head > tail)
- diff = head - tail;
- else
- diff = (end - tail) + head;
-
- return (diff / CWQ_ENTRY_SIZE) - 1;
-}
-
-static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
-{
- int avail = spu_queue_num_free(q);
-
- if (avail >= num_entries)
- return q->q + q->tail;
-
- return NULL;
-}
-
-static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
-{
- unsigned long hv_ret, new_tail;
-
- new_tail = spu_next_offset(q, last - q->q);
-
- hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
- if (hv_ret == HV_EOK)
- q->tail = new_tail;
- return hv_ret;
-}
-
-static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
- int enc_type, int auth_type,
- unsigned int hash_len,
- bool sfas, bool sob, bool eob, bool encrypt,
- int opcode)
-{
- u64 word = (len - 1) & CONTROL_LEN;
-
- word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
- word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
- word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
- if (sfas)
- word |= CONTROL_STORE_FINAL_AUTH_STATE;
- if (sob)
- word |= CONTROL_START_OF_BLOCK;
- if (eob)
- word |= CONTROL_END_OF_BLOCK;
- if (encrypt)
- word |= CONTROL_ENCRYPT;
- if (hmac_key_len)
- word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
- if (hash_len)
- word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
-
- return word;
-}
-
-#if 0
-static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
-{
- if (this_len >= 64 ||
- qp->head != qp->tail)
- return true;
- return false;
-}
-#endif
-
-struct n2_ahash_alg {
- struct list_head entry;
- const u8 *hash_zero;
- const u8 *hash_init;
- u8 hw_op_hashsz;
- u8 digest_size;
- u8 auth_type;
- u8 hmac_type;
- struct ahash_alg alg;
-};
-
-static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct ahash_alg *ahash_alg;
-
- ahash_alg = container_of(alg, struct ahash_alg, halg.base);
-
- return container_of(ahash_alg, struct n2_ahash_alg, alg);
-}
-
-struct n2_hmac_alg {
- const char *child_alg;
- struct n2_ahash_alg derived;
-};
-
-static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct ahash_alg *ahash_alg;
-
- ahash_alg = container_of(alg, struct ahash_alg, halg.base);
-
- return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
-}
-
-struct n2_hash_ctx {
- struct crypto_ahash *fallback_tfm;
-};
-
-#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
-
-struct n2_hmac_ctx {
- struct n2_hash_ctx base;
-
- struct crypto_shash *child_shash;
-
- int hash_key_len;
- unsigned char hash_key[N2_HASH_KEY_MAX];
-};
-
-struct n2_hash_req_ctx {
- union {
- struct md5_state md5;
- struct sha1_state sha1;
- struct sha256_state sha256;
- } u;
-
- struct ahash_request fallback_req;
-};
-
-static int n2_hash_async_init(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- return crypto_ahash_init(&rctx->fallback_req);
-}
-
-static int n2_hash_async_update(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
-
- return crypto_ahash_update(&rctx->fallback_req);
-}
-
-static int n2_hash_async_final(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_final(&rctx->fallback_req);
-}
-
-static int n2_hash_async_finup(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_finup(&rctx->fallback_req);
-}
-
-static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
-{
- return -ENOSYS;
-}
-
-static int n2_hash_async_noexport(struct ahash_request *req, void *out)
-{
- return -ENOSYS;
-}
-
-static int n2_hash_cra_init(struct crypto_tfm *tfm)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct crypto_ahash *fallback_tfm;
- int err;
-
- fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- pr_warn("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
-
- crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
- crypto_ahash_reqsize(fallback_tfm)));
-
- ctx->fallback_tfm = fallback_tfm;
- return 0;
-
-out:
- return err;
-}
-
-static void n2_hash_cra_exit(struct crypto_tfm *tfm)
-{
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-
- crypto_free_ahash(ctx->fallback_tfm);
-}
-
-static int n2_hmac_cra_init(struct crypto_tfm *tfm)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
- struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
- struct crypto_ahash *fallback_tfm;
- struct crypto_shash *child_shash;
- int err;
-
- fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- pr_warn("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
-
- child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
- if (IS_ERR(child_shash)) {
- pr_warn("Child shash '%s' could not be loaded!\n",
- n2alg->child_alg);
- err = PTR_ERR(child_shash);
- goto out_free_fallback;
- }
-
- crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
- crypto_ahash_reqsize(fallback_tfm)));
-
- ctx->child_shash = child_shash;
- ctx->base.fallback_tfm = fallback_tfm;
- return 0;
-
-out_free_fallback:
- crypto_free_ahash(fallback_tfm);
-
-out:
- return err;
-}
-
-static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
-{
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
-
- crypto_free_ahash(ctx->base.fallback_tfm);
- crypto_free_shash(ctx->child_shash);
-}
-
-static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
- struct crypto_shash *child_shash = ctx->child_shash;
- struct crypto_ahash *fallback_tfm;
- int err, bs, ds;
-
- fallback_tfm = ctx->base.fallback_tfm;
- err = crypto_ahash_setkey(fallback_tfm, key, keylen);
- if (err)
- return err;
-
- bs = crypto_shash_blocksize(child_shash);
- ds = crypto_shash_digestsize(child_shash);
- BUG_ON(ds > N2_HASH_KEY_MAX);
- if (keylen > bs) {
- err = crypto_shash_tfm_digest(child_shash, key, keylen,
- ctx->hash_key);
- if (err)
- return err;
- keylen = ds;
- } else if (keylen <= N2_HASH_KEY_MAX)
- memcpy(ctx->hash_key, key, keylen);
-
- ctx->hash_key_len = keylen;
-
- return err;
-}
-
-static unsigned long wait_for_tail(struct spu_queue *qp)
-{
- unsigned long head, hv_ret;
-
- do {
- hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
- if (hv_ret != HV_EOK) {
- pr_err("Hypervisor error on gethead\n");
- break;
- }
- if (head == qp->tail) {
- qp->head = head;
- break;
- }
- } while (1);
- return hv_ret;
-}
-
-static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
- struct cwq_initial_entry *ent)
-{
- unsigned long hv_ret = spu_queue_submit(qp, ent);
-
- if (hv_ret == HV_EOK)
- hv_ret = wait_for_tail(qp);
-
- return hv_ret;
-}
-
-static int n2_do_async_digest(struct ahash_request *req,
- unsigned int auth_type, unsigned int digest_size,
- unsigned int result_size, void *hash_loc,
- unsigned long auth_key, unsigned int auth_key_len)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cwq_initial_entry *ent;
- struct crypto_hash_walk walk;
- struct spu_queue *qp;
- unsigned long flags;
- int err = -ENODEV;
- int nbytes, cpu;
-
- /* The total effective length of the operation may not
- * exceed 2^16.
- */
- if (unlikely(req->nbytes > (1 << 16))) {
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_digest(&rctx->fallback_req);
- }
-
- nbytes = crypto_hash_walk_first(req, &walk);
-
- cpu = get_cpu();
- qp = cpu_to_cwq[cpu];
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- /* XXX can do better, improve this later by doing a by-hand scatterlist
- * XXX walk, etc.
- */
- ent = qp->q + qp->tail;
-
- ent->control = control_word_base(nbytes, auth_key_len, 0,
- auth_type, digest_size,
- false, true, false, false,
- OPCODE_INPLACE_BIT |
- OPCODE_AUTH_MAC);
- ent->src_addr = __pa(walk.data);
- ent->auth_key_addr = auth_key;
- ent->auth_iv_addr = __pa(hash_loc);
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = __pa(hash_loc);
-
- nbytes = crypto_hash_walk_done(&walk, 0);
- while (nbytes > 0) {
- ent = spu_queue_next(qp, ent);
-
- ent->control = (nbytes - 1);
- ent->src_addr = __pa(walk.data);
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = 0UL;
-
- nbytes = crypto_hash_walk_done(&walk, 0);
- }
- ent->control |= CONTROL_END_OF_BLOCK;
-
- if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
- err = -EINVAL;
- else
- err = 0;
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
- if (!err)
- memcpy(req->result, hash_loc, result_size);
-out:
- put_cpu();
-
- return err;
-}
-
-static int n2_hash_async_digest(struct ahash_request *req)
-{
- struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- int ds;
-
- ds = n2alg->digest_size;
- if (unlikely(req->nbytes == 0)) {
- memcpy(req->result, n2alg->hash_zero, ds);
- return 0;
- }
- memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
-
- return n2_do_async_digest(req, n2alg->auth_type,
- n2alg->hw_op_hashsz, ds,
- &rctx->u, 0UL, 0);
-}
-
-static int n2_hmac_async_digest(struct ahash_request *req)
-{
- struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
- int ds;
-
- ds = n2alg->derived.digest_size;
- if (unlikely(req->nbytes == 0) ||
- unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_digest(&rctx->fallback_req);
- }
- memcpy(&rctx->u, n2alg->derived.hash_init,
- n2alg->derived.hw_op_hashsz);
-
- return n2_do_async_digest(req, n2alg->derived.hmac_type,
- n2alg->derived.hw_op_hashsz, ds,
- &rctx->u,
- __pa(&ctx->hash_key),
- ctx->hash_key_len);
-}
-
-struct n2_skcipher_context {
- int key_len;
- int enc_type;
- union {
- u8 aes[AES_MAX_KEY_SIZE];
- u8 des[DES_KEY_SIZE];
- u8 des3[3 * DES_KEY_SIZE];
- } key;
-};
-
-#define N2_CHUNK_ARR_LEN 16
-
-struct n2_crypto_chunk {
- struct list_head entry;
- unsigned long iv_paddr : 44;
- unsigned long arr_len : 20;
- unsigned long dest_paddr;
- unsigned long dest_final;
- struct {
- unsigned long src_paddr : 44;
- unsigned long src_len : 20;
- } arr[N2_CHUNK_ARR_LEN];
-};
-
-struct n2_request_context {
- struct skcipher_walk walk;
- struct list_head chunk_list;
- struct n2_crypto_chunk chunk;
- u8 temp_iv[16];
-};
-
-/* The SPU allows some level of flexibility for partial cipher blocks
- * being specified in a descriptor.
- *
- * It merely requires that every descriptor's length field is at least
- * as large as the cipher block size. This means that a cipher block
- * can span at most 2 descriptors. However, this does not allow a
- * partial block to span into the final descriptor as that would
- * violate the rule (since every descriptor's length must be at lest
- * the block size). So, for example, assuming an 8 byte block size:
- *
- * 0xe --> 0xa --> 0x8
- *
- * is a valid length sequence, whereas:
- *
- * 0xe --> 0xb --> 0x7
- *
- * is not a valid sequence.
- */
-
-struct n2_skcipher_alg {
- struct list_head entry;
- u8 enc_type;
- struct skcipher_alg skcipher;
-};
-
-static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
-{
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-
- return container_of(alg, struct n2_skcipher_alg, skcipher);
-}
-
-static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
-
- ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
-
- switch (keylen) {
- case AES_KEYSIZE_128:
- ctx->enc_type |= ENC_TYPE_ALG_AES128;
- break;
- case AES_KEYSIZE_192:
- ctx->enc_type |= ENC_TYPE_ALG_AES192;
- break;
- case AES_KEYSIZE_256:
- ctx->enc_type |= ENC_TYPE_ALG_AES256;
- break;
- default:
- return -EINVAL;
- }
-
- ctx->key_len = keylen;
- memcpy(ctx->key.aes, key, keylen);
- return 0;
-}
-
-static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
- int err;
-
- err = verify_skcipher_des_key(skcipher, key);
- if (err)
- return err;
-
- ctx->enc_type = n2alg->enc_type;
-
- ctx->key_len = keylen;
- memcpy(ctx->key.des, key, keylen);
- return 0;
-}
-
-static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
- int err;
-
- err = verify_skcipher_des3_key(skcipher, key);
- if (err)
- return err;
-
- ctx->enc_type = n2alg->enc_type;
-
- ctx->key_len = keylen;
- memcpy(ctx->key.des3, key, keylen);
- return 0;
-}
-
-static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
-{
- int this_len = nbytes;
-
- this_len -= (nbytes & (block_size - 1));
- return this_len > (1 << 16) ? (1 << 16) : this_len;
-}
-
-static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
- struct n2_crypto_chunk *cp,
- struct spu_queue *qp, bool encrypt)
-{
- struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
- struct cwq_initial_entry *ent;
- bool in_place;
- int i;
-
- ent = spu_queue_alloc(qp, cp->arr_len);
- if (!ent) {
- pr_info("queue_alloc() of %d fails\n",
- cp->arr_len);
- return -EBUSY;
- }
-
- in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
-
- ent->control = control_word_base(cp->arr[0].src_len,
- 0, ctx->enc_type, 0, 0,
- false, true, false, encrypt,
- OPCODE_ENCRYPT |
- (in_place ? OPCODE_INPLACE_BIT : 0));
- ent->src_addr = cp->arr[0].src_paddr;
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = __pa(&ctx->key);
- ent->enc_iv_addr = cp->iv_paddr;
- ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
-
- for (i = 1; i < cp->arr_len; i++) {
- ent = spu_queue_next(qp, ent);
-
- ent->control = cp->arr[i].src_len - 1;
- ent->src_addr = cp->arr[i].src_paddr;
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = 0UL;
- }
- ent->control |= CONTROL_END_OF_BLOCK;
-
- return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
-}
-
-static int n2_compute_chunks(struct skcipher_request *req)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct skcipher_walk *walk = &rctx->walk;
- struct n2_crypto_chunk *chunk;
- unsigned long dest_prev;
- unsigned int tot_len;
- bool prev_in_place;
- int err, nbytes;
-
- err = skcipher_walk_async(walk, req);
- if (err)
- return err;
-
- INIT_LIST_HEAD(&rctx->chunk_list);
-
- chunk = &rctx->chunk;
- INIT_LIST_HEAD(&chunk->entry);
-
- chunk->iv_paddr = 0UL;
- chunk->arr_len = 0;
- chunk->dest_paddr = 0UL;
-
- prev_in_place = false;
- dest_prev = ~0UL;
- tot_len = 0;
-
- while ((nbytes = walk->nbytes) != 0) {
- unsigned long dest_paddr, src_paddr;
- bool in_place;
- int this_len;
-
- src_paddr = (page_to_phys(walk->src.phys.page) +
- walk->src.phys.offset);
- dest_paddr = (page_to_phys(walk->dst.phys.page) +
- walk->dst.phys.offset);
- in_place = (src_paddr == dest_paddr);
- this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
-
- if (chunk->arr_len != 0) {
- if (in_place != prev_in_place ||
- (!prev_in_place &&
- dest_paddr != dest_prev) ||
- chunk->arr_len == N2_CHUNK_ARR_LEN ||
- tot_len + this_len > (1 << 16)) {
- chunk->dest_final = dest_prev;
- list_add_tail(&chunk->entry,
- &rctx->chunk_list);
- chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
- if (!chunk) {
- err = -ENOMEM;
- break;
- }
- INIT_LIST_HEAD(&chunk->entry);
- }
- }
- if (chunk->arr_len == 0) {
- chunk->dest_paddr = dest_paddr;
- tot_len = 0;
- }
- chunk->arr[chunk->arr_len].src_paddr = src_paddr;
- chunk->arr[chunk->arr_len].src_len = this_len;
- chunk->arr_len++;
-
- dest_prev = dest_paddr + this_len;
- prev_in_place = in_place;
- tot_len += this_len;
-
- err = skcipher_walk_done(walk, nbytes - this_len);
- if (err)
- break;
- }
- if (!err && chunk->arr_len != 0) {
- chunk->dest_final = dest_prev;
- list_add_tail(&chunk->entry, &rctx->chunk_list);
- }
-
- return err;
-}
-
-static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct n2_crypto_chunk *c, *tmp;
-
- if (final_iv)
- memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
-
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
-
-}
-
-static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- int err = n2_compute_chunks(req);
- struct n2_crypto_chunk *c, *tmp;
- unsigned long flags, hv_ret;
- struct spu_queue *qp;
-
- if (err)
- return err;
-
- qp = cpu_to_cwq[get_cpu()];
- err = -ENODEV;
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
- err = __n2_crypt_chunk(tfm, c, qp, encrypt);
- if (err)
- break;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- if (!err) {
- hv_ret = wait_for_tail(qp);
- if (hv_ret != HV_EOK)
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
-out:
- put_cpu();
-
- n2_chunk_complete(req, NULL);
- return err;
-}
-
-static int n2_encrypt_ecb(struct skcipher_request *req)
-{
- return n2_do_ecb(req, true);
-}
-
-static int n2_decrypt_ecb(struct skcipher_request *req)
-{
- return n2_do_ecb(req, false);
-}
-
-static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- unsigned long flags, hv_ret, iv_paddr;
- int err = n2_compute_chunks(req);
- struct n2_crypto_chunk *c, *tmp;
- struct spu_queue *qp;
- void *final_iv_addr;
-
- final_iv_addr = NULL;
-
- if (err)
- return err;
-
- qp = cpu_to_cwq[get_cpu()];
- err = -ENODEV;
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- if (encrypt) {
- iv_paddr = __pa(rctx->walk.iv);
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
- entry) {
- c->iv_paddr = iv_paddr;
- err = __n2_crypt_chunk(tfm, c, qp, true);
- if (err)
- break;
- iv_paddr = c->dest_final - rctx->walk.blocksize;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- final_iv_addr = __va(iv_paddr);
- } else {
- list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
- entry) {
- if (c == &rctx->chunk) {
- iv_paddr = __pa(rctx->walk.iv);
- } else {
- iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
- tmp->arr[tmp->arr_len-1].src_len -
- rctx->walk.blocksize);
- }
- if (!final_iv_addr) {
- unsigned long pa;
-
- pa = (c->arr[c->arr_len-1].src_paddr +
- c->arr[c->arr_len-1].src_len -
- rctx->walk.blocksize);
- final_iv_addr = rctx->temp_iv;
- memcpy(rctx->temp_iv, __va(pa),
- rctx->walk.blocksize);
- }
- c->iv_paddr = iv_paddr;
- err = __n2_crypt_chunk(tfm, c, qp, false);
- if (err)
- break;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- }
- if (!err) {
- hv_ret = wait_for_tail(qp);
- if (hv_ret != HV_EOK)
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
-out:
- put_cpu();
-
- n2_chunk_complete(req, err ? NULL : final_iv_addr);
- return err;
-}
-
-static int n2_encrypt_chaining(struct skcipher_request *req)
-{
- return n2_do_chaining(req, true);
-}
-
-static int n2_decrypt_chaining(struct skcipher_request *req)
-{
- return n2_do_chaining(req, false);
-}
-
-struct n2_skcipher_tmpl {
- const char *name;
- const char *drv_name;
- u8 block_size;
- u8 enc_type;
- struct skcipher_alg skcipher;
-};
-
-static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
- /* DES: ECB CBC and CFB are supported */
- { .name = "ecb(des)",
- .drv_name = "ecb-des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_DES |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = n2_des_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(des)",
- .drv_name = "cbc-des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_DES |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = n2_des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
-
- /* 3DES: ECB CBC and CFB are supported */
- { .name = "ecb(des3_ede)",
- .drv_name = "ecb-3des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_3DES |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = n2_3des_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(des3_ede)",
- .drv_name = "cbc-3des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_3DES |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = n2_3des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
-
- /* AES: ECB CBC and CTR are supported */
- { .name = "ecb(aes)",
- .drv_name = "ecb-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(aes)",
- .drv_name = "cbc-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
- { .name = "ctr(aes)",
- .drv_name = "ctr-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_COUNTER),
- .skcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_encrypt_chaining,
- },
- },
-
-};
-#define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
-
-static LIST_HEAD(skcipher_algs);
-
-struct n2_hash_tmpl {
- const char *name;
- const u8 *hash_zero;
- const u8 *hash_init;
- u8 hw_op_hashsz;
- u8 digest_size;
- u8 statesize;
- u8 block_size;
- u8 auth_type;
- u8 hmac_type;
-};
-
-static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
- cpu_to_le32(MD5_H0),
- cpu_to_le32(MD5_H1),
- cpu_to_le32(MD5_H2),
- cpu_to_le32(MD5_H3),
-};
-static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
- SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
-};
-static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
- SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
-};
-static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
- SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
- SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
-};
-
-static const struct n2_hash_tmpl hash_tmpls[] = {
- { .name = "md5",
- .hash_zero = md5_zero_message_hash,
- .hash_init = (u8 *)n2_md5_init,
- .auth_type = AUTH_TYPE_MD5,
- .hmac_type = AUTH_TYPE_HMAC_MD5,
- .hw_op_hashsz = MD5_DIGEST_SIZE,
- .digest_size = MD5_DIGEST_SIZE,
- .statesize = sizeof(struct md5_state),
- .block_size = MD5_HMAC_BLOCK_SIZE },
- { .name = "sha1",
- .hash_zero = sha1_zero_message_hash,
- .hash_init = (u8 *)n2_sha1_init,
- .auth_type = AUTH_TYPE_SHA1,
- .hmac_type = AUTH_TYPE_HMAC_SHA1,
- .hw_op_hashsz = SHA1_DIGEST_SIZE,
- .digest_size = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_state),
- .block_size = SHA1_BLOCK_SIZE },
- { .name = "sha256",
- .hash_zero = sha256_zero_message_hash,
- .hash_init = (u8 *)n2_sha256_init,
- .auth_type = AUTH_TYPE_SHA256,
- .hmac_type = AUTH_TYPE_HMAC_SHA256,
- .hw_op_hashsz = SHA256_DIGEST_SIZE,
- .digest_size = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
- .block_size = SHA256_BLOCK_SIZE },
- { .name = "sha224",
- .hash_zero = sha224_zero_message_hash,
- .hash_init = (u8 *)n2_sha224_init,
- .auth_type = AUTH_TYPE_SHA256,
- .hmac_type = AUTH_TYPE_RESERVED,
- .hw_op_hashsz = SHA256_DIGEST_SIZE,
- .digest_size = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
- .block_size = SHA224_BLOCK_SIZE },
-};
-#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
-
-static LIST_HEAD(ahash_algs);
-static LIST_HEAD(hmac_algs);
-
-static int algs_registered;
-
-static void __n2_unregister_algs(void)
-{
- struct n2_skcipher_alg *skcipher, *skcipher_tmp;
- struct n2_ahash_alg *alg, *alg_tmp;
- struct n2_hmac_alg *hmac, *hmac_tmp;
-
- list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
- crypto_unregister_skcipher(&skcipher->skcipher);
- list_del(&skcipher->entry);
- kfree(skcipher);
- }
- list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
- crypto_unregister_ahash(&hmac->derived.alg);
- list_del(&hmac->derived.entry);
- kfree(hmac);
- }
- list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
- crypto_unregister_ahash(&alg->alg);
- list_del(&alg->entry);
- kfree(alg);
- }
-}
-
-static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
-{
- crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
- return 0;
-}
-
-static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
-{
- struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct skcipher_alg *alg;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- alg = &p->skcipher;
- *alg = tmpl->skcipher;
-
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
- alg->base.cra_priority = N2_CRA_PRIORITY;
- alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY;
- alg->base.cra_blocksize = tmpl->block_size;
- p->enc_type = tmpl->enc_type;
- alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
- alg->base.cra_module = THIS_MODULE;
- alg->init = n2_skcipher_init_tfm;
-
- list_add(&p->entry, &skcipher_algs);
- err = crypto_register_skcipher(alg);
- if (err) {
- pr_err("%s alg registration failed\n", alg->base.cra_name);
- list_del(&p->entry);
- kfree(p);
- } else {
- pr_info("%s alg registered\n", alg->base.cra_name);
- }
- return err;
-}
-
-static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
-{
- struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct ahash_alg *ahash;
- struct crypto_alg *base;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- p->child_alg = n2ahash->alg.halg.base.cra_name;
- memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
- INIT_LIST_HEAD(&p->derived.entry);
-
- ahash = &p->derived.alg;
- ahash->digest = n2_hmac_async_digest;
- ahash->setkey = n2_hmac_async_setkey;
-
- base = &ahash->halg.base;
- err = -EINVAL;
- if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
- p->child_alg) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_p;
- if (snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2",
- p->child_alg) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_p;
-
- base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
- base->cra_init = n2_hmac_cra_init;
- base->cra_exit = n2_hmac_cra_exit;
-
- list_add(&p->derived.entry, &hmac_algs);
- err = crypto_register_ahash(ahash);
- if (err) {
- pr_err("%s alg registration failed\n", base->cra_name);
- list_del(&p->derived.entry);
-out_free_p:
- kfree(p);
- } else {
- pr_info("%s alg registered\n", base->cra_name);
- }
- return err;
-}
-
-static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
-{
- struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct hash_alg_common *halg;
- struct crypto_alg *base;
- struct ahash_alg *ahash;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- p->hash_zero = tmpl->hash_zero;
- p->hash_init = tmpl->hash_init;
- p->auth_type = tmpl->auth_type;
- p->hmac_type = tmpl->hmac_type;
- p->hw_op_hashsz = tmpl->hw_op_hashsz;
- p->digest_size = tmpl->digest_size;
-
- ahash = &p->alg;
- ahash->init = n2_hash_async_init;
- ahash->update = n2_hash_async_update;
- ahash->final = n2_hash_async_final;
- ahash->finup = n2_hash_async_finup;
- ahash->digest = n2_hash_async_digest;
- ahash->export = n2_hash_async_noexport;
- ahash->import = n2_hash_async_noimport;
-
- halg = &ahash->halg;
- halg->digestsize = tmpl->digest_size;
- halg->statesize = tmpl->statesize;
-
- base = &halg->base;
- snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
- base->cra_priority = N2_CRA_PRIORITY;
- base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK;
- base->cra_blocksize = tmpl->block_size;
- base->cra_ctxsize = sizeof(struct n2_hash_ctx);
- base->cra_module = THIS_MODULE;
- base->cra_init = n2_hash_cra_init;
- base->cra_exit = n2_hash_cra_exit;
-
- list_add(&p->entry, &ahash_algs);
- err = crypto_register_ahash(ahash);
- if (err) {
- pr_err("%s alg registration failed\n", base->cra_name);
- list_del(&p->entry);
- kfree(p);
- } else {
- pr_info("%s alg registered\n", base->cra_name);
- }
- if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
- err = __n2_register_one_hmac(p);
- return err;
-}
-
-static int n2_register_algs(void)
-{
- int i, err = 0;
-
- mutex_lock(&spu_lock);
- if (algs_registered++)
- goto out;
-
- for (i = 0; i < NUM_HASH_TMPLS; i++) {
- err = __n2_register_one_ahash(&hash_tmpls[i]);
- if (err) {
- __n2_unregister_algs();
- goto out;
- }
- }
- for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
- err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
- if (err) {
- __n2_unregister_algs();
- goto out;
- }
- }
-
-out:
- mutex_unlock(&spu_lock);
- return err;
-}
-
-static void n2_unregister_algs(void)
-{
- mutex_lock(&spu_lock);
- if (!--algs_registered)
- __n2_unregister_algs();
- mutex_unlock(&spu_lock);
-}
-
-/* To map CWQ queues to interrupt sources, the hypervisor API provides
- * a devino. This isn't very useful to us because all of the
- * interrupts listed in the device_node have been translated to
- * Linux virtual IRQ cookie numbers.
- *
- * So we have to back-translate, going through the 'intr' and 'ino'
- * property tables of the n2cp MDESC node, matching it with the OF
- * 'interrupts' property entries, in order to figure out which
- * devino goes to which already-translated IRQ.
- */
-static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
- unsigned long dev_ino)
-{
- const unsigned int *dev_intrs;
- unsigned int intr;
- int i;
-
- for (i = 0; i < ip->num_intrs; i++) {
- if (ip->ino_table[i].ino == dev_ino)
- break;
- }
- if (i == ip->num_intrs)
- return -ENODEV;
-
- intr = ip->ino_table[i].intr;
-
- dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
- if (!dev_intrs)
- return -ENODEV;
-
- for (i = 0; i < dev->archdata.num_irqs; i++) {
- if (dev_intrs[i] == intr)
- return i;
- }
-
- return -ENODEV;
-}
-
-static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
- const char *irq_name, struct spu_queue *p,
- irq_handler_t handler)
-{
- unsigned long herr;
- int index;
-
- herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
- if (herr)
- return -EINVAL;
-
- index = find_devino_index(dev, ip, p->devino);
- if (index < 0)
- return index;
-
- p->irq = dev->archdata.irqs[index];
-
- sprintf(p->irq_name, "%s-%d", irq_name, index);
-
- return request_irq(p->irq, handler, 0, p->irq_name, p);
-}
-
-static struct kmem_cache *queue_cache[2];
-
-static void *new_queue(unsigned long q_type)
-{
- return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
-}
-
-static void free_queue(void *p, unsigned long q_type)
-{
- kmem_cache_free(queue_cache[q_type - 1], p);
-}
-
-static int queue_cache_init(void)
-{
- if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
- queue_cache[HV_NCS_QTYPE_MAU - 1] =
- kmem_cache_create("mau_queue",
- (MAU_NUM_ENTRIES *
- MAU_ENTRY_SIZE),
- MAU_ENTRY_SIZE, 0, NULL);
- if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
- return -ENOMEM;
-
- if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
- queue_cache[HV_NCS_QTYPE_CWQ - 1] =
- kmem_cache_create("cwq_queue",
- (CWQ_NUM_ENTRIES *
- CWQ_ENTRY_SIZE),
- CWQ_ENTRY_SIZE, 0, NULL);
- if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
- queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-static void queue_cache_destroy(void)
-{
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
- queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
- queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
-}
-
-static long spu_queue_register_workfn(void *arg)
-{
- struct spu_qreg *qr = arg;
- struct spu_queue *p = qr->queue;
- unsigned long q_type = qr->type;
- unsigned long hv_ret;
-
- hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
- CWQ_NUM_ENTRIES, &p->qhandle);
- if (!hv_ret)
- sun4v_ncs_sethead_marker(p->qhandle, 0);
-
- return hv_ret ? -EINVAL : 0;
-}
-
-static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
-{
- int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
- struct spu_qreg qr = { .queue = p, .type = q_type };
-
- return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
-}
-
-static int spu_queue_setup(struct spu_queue *p)
-{
- int err;
-
- p->q = new_queue(p->q_type);
- if (!p->q)
- return -ENOMEM;
-
- err = spu_queue_register(p, p->q_type);
- if (err) {
- free_queue(p->q, p->q_type);
- p->q = NULL;
- }
-
- return err;
-}
-
-static void spu_queue_destroy(struct spu_queue *p)
-{
- unsigned long hv_ret;
-
- if (!p->q)
- return;
-
- hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
-
- if (!hv_ret)
- free_queue(p->q, p->q_type);
-}
-
-static void spu_list_destroy(struct list_head *list)
-{
- struct spu_queue *p, *n;
-
- list_for_each_entry_safe(p, n, list, list) {
- int i;
-
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_to_cwq[i] == p)
- cpu_to_cwq[i] = NULL;
- }
-
- if (p->irq) {
- free_irq(p->irq, p);
- p->irq = 0;
- }
- spu_queue_destroy(p);
- list_del(&p->list);
- kfree(p);
- }
-}
-
-/* Walk the backward arcs of a CWQ 'exec-unit' node,
- * gathering cpu membership information.
- */
-static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
- struct platform_device *dev,
- u64 node, struct spu_queue *p,
- struct spu_queue **table)
-{
- u64 arc;
-
- mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
- u64 tgt = mdesc_arc_target(mdesc, arc);
- const char *name = mdesc_node_name(mdesc, tgt);
- const u64 *id;
-
- if (strcmp(name, "cpu"))
- continue;
- id = mdesc_get_property(mdesc, tgt, "id", NULL);
- if (table[*id] != NULL) {
- dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
- dev->dev.of_node);
- return -EINVAL;
- }
- cpumask_set_cpu(*id, &p->sharing);
- table[*id] = p;
- }
- return 0;
-}
-
-/* Process an 'exec-unit' MDESC node of type 'cwq'. */
-static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
- struct platform_device *dev, struct mdesc_handle *mdesc,
- u64 node, const char *iname, unsigned long q_type,
- irq_handler_t handler, struct spu_queue **table)
-{
- struct spu_queue *p;
- int err;
-
- p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
- if (!p) {
- dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- cpumask_clear(&p->sharing);
- spin_lock_init(&p->lock);
- p->q_type = q_type;
- INIT_LIST_HEAD(&p->jobs);
- list_add(&p->list, list);
-
- err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
- if (err)
- return err;
-
- err = spu_queue_setup(p);
- if (err)
- return err;
-
- return spu_map_ino(dev, ip, iname, p, handler);
-}
-
-static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
- struct spu_mdesc_info *ip, struct list_head *list,
- const char *exec_name, unsigned long q_type,
- irq_handler_t handler, struct spu_queue **table)
-{
- int err = 0;
- u64 node;
-
- mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
- const char *type;
-
- type = mdesc_get_property(mdesc, node, "type", NULL);
- if (!type || strcmp(type, exec_name))
- continue;
-
- err = handle_exec_unit(ip, list, dev, mdesc, node,
- exec_name, q_type, handler, table);
- if (err) {
- spu_list_destroy(list);
- break;
- }
- }
-
- return err;
-}
-
-static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
- struct spu_mdesc_info *ip)
-{
- const u64 *ino;
- int ino_len;
- int i;
-
- ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
- if (!ino) {
- printk("NO 'ino'\n");
- return -ENODEV;
- }
-
- ip->num_intrs = ino_len / sizeof(u64);
- ip->ino_table = kzalloc((sizeof(struct ino_blob) *
- ip->num_intrs),
- GFP_KERNEL);
- if (!ip->ino_table)
- return -ENOMEM;
-
- for (i = 0; i < ip->num_intrs; i++) {
- struct ino_blob *b = &ip->ino_table[i];
- b->intr = i + 1;
- b->ino = ino[i];
- }
-
- return 0;
-}
-
-static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
- struct platform_device *dev,
- struct spu_mdesc_info *ip,
- const char *node_name)
-{
- u64 node, reg;
-
- if (of_property_read_reg(dev->dev.of_node, 0, &reg, NULL) < 0)
- return -ENODEV;
-
- mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
- const char *name;
- const u64 *chdl;
-
- name = mdesc_get_property(mdesc, node, "name", NULL);
- if (!name || strcmp(name, node_name))
- continue;
- chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
- if (!chdl || (*chdl != reg))
- continue;
- ip->cfg_handle = *chdl;
- return get_irq_props(mdesc, node, ip);
- }
-
- return -ENODEV;
-}
-
-static unsigned long n2_spu_hvapi_major;
-static unsigned long n2_spu_hvapi_minor;
-
-static int n2_spu_hvapi_register(void)
-{
- int err;
-
- n2_spu_hvapi_major = 2;
- n2_spu_hvapi_minor = 0;
-
- err = sun4v_hvapi_register(HV_GRP_NCS,
- n2_spu_hvapi_major,
- &n2_spu_hvapi_minor);
-
- if (!err)
- pr_info("Registered NCS HVAPI version %lu.%lu\n",
- n2_spu_hvapi_major,
- n2_spu_hvapi_minor);
-
- return err;
-}
-
-static void n2_spu_hvapi_unregister(void)
-{
- sun4v_hvapi_unregister(HV_GRP_NCS);
-}
-
-static int global_ref;
-
-static int grab_global_resources(void)
-{
- int err = 0;
-
- mutex_lock(&spu_lock);
-
- if (global_ref++)
- goto out;
-
- err = n2_spu_hvapi_register();
- if (err)
- goto out;
-
- err = queue_cache_init();
- if (err)
- goto out_hvapi_release;
-
- err = -ENOMEM;
- cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
- GFP_KERNEL);
- if (!cpu_to_cwq)
- goto out_queue_cache_destroy;
-
- cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
- GFP_KERNEL);
- if (!cpu_to_mau)
- goto out_free_cwq_table;
-
- err = 0;
-
-out:
- if (err)
- global_ref--;
- mutex_unlock(&spu_lock);
- return err;
-
-out_free_cwq_table:
- kfree(cpu_to_cwq);
- cpu_to_cwq = NULL;
-
-out_queue_cache_destroy:
- queue_cache_destroy();
-
-out_hvapi_release:
- n2_spu_hvapi_unregister();
- goto out;
-}
-
-static void release_global_resources(void)
-{
- mutex_lock(&spu_lock);
- if (!--global_ref) {
- kfree(cpu_to_cwq);
- cpu_to_cwq = NULL;
-
- kfree(cpu_to_mau);
- cpu_to_mau = NULL;
-
- queue_cache_destroy();
- n2_spu_hvapi_unregister();
- }
- mutex_unlock(&spu_lock);
-}
-
-static struct n2_crypto *alloc_n2cp(void)
-{
- struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
-
- if (np)
- INIT_LIST_HEAD(&np->cwq_list);
-
- return np;
-}
-
-static void free_n2cp(struct n2_crypto *np)
-{
- kfree(np->cwq_info.ino_table);
- np->cwq_info.ino_table = NULL;
-
- kfree(np);
-}
-
-static void n2_spu_driver_version(void)
-{
- static int n2_spu_version_printed;
-
- if (n2_spu_version_printed++ == 0)
- pr_info("%s", version);
-}
-
-static int n2_crypto_probe(struct platform_device *dev)
-{
- struct mdesc_handle *mdesc;
- struct n2_crypto *np;
- int err;
-
- n2_spu_driver_version();
-
- pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
-
- np = alloc_n2cp();
- if (!np) {
- dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- err = grab_global_resources();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
- dev->dev.of_node);
- goto out_free_n2cp;
- }
-
- mdesc = mdesc_grab();
-
- if (!mdesc) {
- dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
- dev->dev.of_node);
- err = -ENODEV;
- goto out_free_global;
- }
- err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
- dev->dev.of_node);
- mdesc_release(mdesc);
- goto out_free_global;
- }
-
- err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
- "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
- cpu_to_cwq);
- mdesc_release(mdesc);
-
- if (err) {
- dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
- dev->dev.of_node);
- goto out_free_global;
- }
-
- err = n2_register_algs();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
- dev->dev.of_node);
- goto out_free_spu_list;
- }
-
- dev_set_drvdata(&dev->dev, np);
-
- return 0;
-
-out_free_spu_list:
- spu_list_destroy(&np->cwq_list);
-
-out_free_global:
- release_global_resources();
-
-out_free_n2cp:
- free_n2cp(np);
-
- return err;
-}
-
-static void n2_crypto_remove(struct platform_device *dev)
-{
- struct n2_crypto *np = dev_get_drvdata(&dev->dev);
-
- n2_unregister_algs();
-
- spu_list_destroy(&np->cwq_list);
-
- release_global_resources();
-
- free_n2cp(np);
-}
-
-static struct n2_mau *alloc_ncp(void)
-{
- struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
-
- if (mp)
- INIT_LIST_HEAD(&mp->mau_list);
-
- return mp;
-}
-
-static void free_ncp(struct n2_mau *mp)
-{
- kfree(mp->mau_info.ino_table);
- mp->mau_info.ino_table = NULL;
-
- kfree(mp);
-}
-
-static int n2_mau_probe(struct platform_device *dev)
-{
- struct mdesc_handle *mdesc;
- struct n2_mau *mp;
- int err;
-
- n2_spu_driver_version();
-
- pr_info("Found NCP at %pOF\n", dev->dev.of_node);
-
- mp = alloc_ncp();
- if (!mp) {
- dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- err = grab_global_resources();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
- dev->dev.of_node);
- goto out_free_ncp;
- }
-
- mdesc = mdesc_grab();
-
- if (!mdesc) {
- dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
- dev->dev.of_node);
- err = -ENODEV;
- goto out_free_global;
- }
-
- err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
- dev->dev.of_node);
- mdesc_release(mdesc);
- goto out_free_global;
- }
-
- err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
- "mau", HV_NCS_QTYPE_MAU, mau_intr,
- cpu_to_mau);
- mdesc_release(mdesc);
-
- if (err) {
- dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
- dev->dev.of_node);
- goto out_free_global;
- }
-
- dev_set_drvdata(&dev->dev, mp);
-
- return 0;
-
-out_free_global:
- release_global_resources();
-
-out_free_ncp:
- free_ncp(mp);
-
- return err;
-}
-
-static void n2_mau_remove(struct platform_device *dev)
-{
- struct n2_mau *mp = dev_get_drvdata(&dev->dev);
-
- spu_list_destroy(&mp->mau_list);
-
- release_global_resources();
-
- free_ncp(mp);
-}
-
-static const struct of_device_id n2_crypto_match[] = {
- {
- .name = "n2cp",
- .compatible = "SUNW,n2-cwq",
- },
- {
- .name = "n2cp",
- .compatible = "SUNW,vf-cwq",
- },
- {
- .name = "n2cp",
- .compatible = "SUNW,kt-cwq",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, n2_crypto_match);
-
-static struct platform_driver n2_crypto_driver = {
- .driver = {
- .name = "n2cp",
- .of_match_table = n2_crypto_match,
- },
- .probe = n2_crypto_probe,
- .remove = n2_crypto_remove,
-};
-
-static const struct of_device_id n2_mau_match[] = {
- {
- .name = "ncp",
- .compatible = "SUNW,n2-mau",
- },
- {
- .name = "ncp",
- .compatible = "SUNW,vf-mau",
- },
- {
- .name = "ncp",
- .compatible = "SUNW,kt-mau",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, n2_mau_match);
-
-static struct platform_driver n2_mau_driver = {
- .driver = {
- .name = "ncp",
- .of_match_table = n2_mau_match,
- },
- .probe = n2_mau_probe,
- .remove = n2_mau_remove,
-};
-
-static struct platform_driver * const drivers[] = {
- &n2_crypto_driver,
- &n2_mau_driver,
-};
-
-static int __init n2_init(void)
-{
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-static void __exit n2_exit(void)
-{
- platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-module_init(n2_init);
-module_exit(n2_exit);
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h
deleted file mode 100644
index 2406763b0306..000000000000
--- a/drivers/crypto/n2_core.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _N2_CORE_H
-#define _N2_CORE_H
-
-#ifndef __ASSEMBLY__
-
-struct ino_blob {
- u64 intr;
- u64 ino;
-};
-
-struct spu_mdesc_info {
- u64 cfg_handle;
- struct ino_blob *ino_table;
- int num_intrs;
-};
-
-struct n2_crypto {
- struct spu_mdesc_info cwq_info;
- struct list_head cwq_list;
-};
-
-struct n2_mau {
- struct spu_mdesc_info mau_info;
- struct list_head mau_list;
-};
-
-#define CWQ_ENTRY_SIZE 64
-#define CWQ_NUM_ENTRIES 64
-
-#define MAU_ENTRY_SIZE 64
-#define MAU_NUM_ENTRIES 64
-
-struct cwq_initial_entry {
- u64 control;
- u64 src_addr;
- u64 auth_key_addr;
- u64 auth_iv_addr;
- u64 final_auth_state_addr;
- u64 enc_key_addr;
- u64 enc_iv_addr;
- u64 dest_addr;
-};
-
-struct cwq_ext_entry {
- u64 len;
- u64 src_addr;
- u64 resv1;
- u64 resv2;
- u64 resv3;
- u64 resv4;
- u64 resv5;
- u64 resv6;
-};
-
-struct cwq_final_entry {
- u64 control;
- u64 src_addr;
- u64 resv1;
- u64 resv2;
- u64 resv3;
- u64 resv4;
- u64 resv5;
- u64 resv6;
-};
-
-#define CONTROL_LEN 0x000000000000ffffULL
-#define CONTROL_LEN_SHIFT 0
-#define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL
-#define CONTROL_HMAC_KEY_LEN_SHIFT 16
-#define CONTROL_ENC_TYPE 0x00000000ff000000ULL
-#define CONTROL_ENC_TYPE_SHIFT 24
-#define ENC_TYPE_ALG_RC4_STREAM 0x00ULL
-#define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL
-#define ENC_TYPE_ALG_DES 0x08ULL
-#define ENC_TYPE_ALG_3DES 0x0cULL
-#define ENC_TYPE_ALG_AES128 0x10ULL
-#define ENC_TYPE_ALG_AES192 0x14ULL
-#define ENC_TYPE_ALG_AES256 0x18ULL
-#define ENC_TYPE_ALG_RESERVED 0x1cULL
-#define ENC_TYPE_ALG_MASK 0x1cULL
-#define ENC_TYPE_CHAINING_ECB 0x00ULL
-#define ENC_TYPE_CHAINING_CBC 0x01ULL
-#define ENC_TYPE_CHAINING_CFB 0x02ULL
-#define ENC_TYPE_CHAINING_COUNTER 0x03ULL
-#define ENC_TYPE_CHAINING_MASK 0x03ULL
-#define CONTROL_AUTH_TYPE 0x0000001f00000000ULL
-#define CONTROL_AUTH_TYPE_SHIFT 32
-#define AUTH_TYPE_RESERVED 0x00ULL
-#define AUTH_TYPE_MD5 0x01ULL
-#define AUTH_TYPE_SHA1 0x02ULL
-#define AUTH_TYPE_SHA256 0x03ULL
-#define AUTH_TYPE_CRC32 0x04ULL
-#define AUTH_TYPE_HMAC_MD5 0x05ULL
-#define AUTH_TYPE_HMAC_SHA1 0x06ULL
-#define AUTH_TYPE_HMAC_SHA256 0x07ULL
-#define AUTH_TYPE_TCP_CHECKSUM 0x08ULL
-#define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL
-#define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL
-#define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL
-#define CONTROL_STRAND 0x000000e000000000ULL
-#define CONTROL_STRAND_SHIFT 37
-#define CONTROL_HASH_LEN 0x0000ff0000000000ULL
-#define CONTROL_HASH_LEN_SHIFT 40
-#define CONTROL_INTERRUPT 0x0001000000000000ULL
-#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL
-#define CONTROL_RESERVED 0x001c000000000000ULL
-#define CONTROL_HV_DONE 0x0004000000000000ULL
-#define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL
-#define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL
-#define CONTROL_END_OF_BLOCK 0x0020000000000000ULL
-#define CONTROL_START_OF_BLOCK 0x0040000000000000ULL
-#define CONTROL_ENCRYPT 0x0080000000000000ULL
-#define CONTROL_OPCODE 0xff00000000000000ULL
-#define CONTROL_OPCODE_SHIFT 56
-#define OPCODE_INPLACE_BIT 0x80ULL
-#define OPCODE_SSL_KEYBLOCK 0x10ULL
-#define OPCODE_COPY 0x20ULL
-#define OPCODE_ENCRYPT 0x40ULL
-#define OPCODE_AUTH_MAC 0x41ULL
-
-#endif /* !(__ASSEMBLY__) */
-
-/* NCS v2.0 hypervisor interfaces */
-#define HV_NCS_QTYPE_MAU 0x01
-#define HV_NCS_QTYPE_CWQ 0x02
-
-/* ncs_qconf()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QCONF
- * ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
- * ARG1: Real address of queue, or handle for unconfigure
- * ARG2: Number of entries in queue, zero for unconfigure
- * RET0: status
- * RET1: queue handle
- *
- * Configure a queue in the stream processing unit.
- *
- * The real address given as the base must be 64-byte
- * aligned.
- *
- * The queue size can range from a minimum of 2 to a maximum
- * of 64. The queue size must be a power of two.
- *
- * To unconfigure a queue, specify a length of zero and place
- * the queue handle into ARG1.
- *
- * On configure success the hypervisor will set the FIRST, HEAD,
- * and TAIL registers to the address of the first entry in the
- * queue. The LAST register will be set to point to the last
- * entry in the queue.
- */
-#define HV_FAST_NCS_QCONF 0x111
-
-/* ncs_qinfo()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QINFO
- * ARG0: Queue handle
- * RET0: status
- * RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
- * RET2: Queue base address
- * RET3: Number of entries
- */
-#define HV_FAST_NCS_QINFO 0x112
-
-/* ncs_gethead()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_GETHEAD
- * ARG0: Queue handle
- * RET0: status
- * RET1: queue head offset
- */
-#define HV_FAST_NCS_GETHEAD 0x113
-
-/* ncs_gettail()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_GETTAIL
- * ARG0: Queue handle
- * RET0: status
- * RET1: queue tail offset
- */
-#define HV_FAST_NCS_GETTAIL 0x114
-
-/* ncs_settail()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_SETTAIL
- * ARG0: Queue handle
- * ARG1: New tail offset
- * RET0: status
- */
-#define HV_FAST_NCS_SETTAIL 0x115
-
-/* ncs_qhandle_to_devino()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO
- * ARG0: Queue handle
- * RET0: status
- * RET1: devino
- */
-#define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116
-
-/* ncs_sethead_marker()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_SETHEAD_MARKER
- * ARG0: Queue handle
- * ARG1: New head offset
- * RET0: status
- */
-#define HV_FAST_NCS_SETHEAD_MARKER 0x117
-
-#ifndef __ASSEMBLY__
-extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
- unsigned long queue_ra,
- unsigned long num_entries,
- unsigned long *qhandle);
-extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
- unsigned long *queue_type,
- unsigned long *queue_ra,
- unsigned long *num_entries);
-extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
- unsigned long *head);
-extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
- unsigned long *tail);
-extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
- unsigned long tail);
-extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
- unsigned long *devino);
-extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
- unsigned long head);
-#endif /* !(__ASSEMBLY__) */
-
-#endif /* _N2_CORE_H */
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index e27b84616743..551dd32a8db0 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -18,7 +18,6 @@
#include <crypto/internal/aead.h>
#include <crypto/internal/engine.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
@@ -272,9 +271,9 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
int ret;
if (dd->pio_only) {
- scatterwalk_start(&dd->in_walk, dd->in_sg);
+ dd->in_sg_offset = 0;
if (out_sg_len)
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ dd->out_sg_offset = 0;
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -871,21 +870,18 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
BUG_ON(!dd->in_sg);
- BUG_ON(_calc_walked(in) > dd->in_sg->length);
+ BUG_ON(dd->in_sg_offset > dd->in_sg->length);
- src = sg_virt(dd->in_sg) + _calc_walked(in);
+ src = sg_virt(dd->in_sg) + dd->in_sg_offset;
for (i = 0; i < AES_BLOCK_WORDS; i++) {
omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
-
- scatterwalk_advance(&dd->in_walk, 4);
- if (dd->in_sg->length == _calc_walked(in)) {
+ dd->in_sg_offset += 4;
+ if (dd->in_sg_offset == dd->in_sg->length) {
dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
- scatterwalk_start(&dd->in_walk,
- dd->in_sg);
- src = sg_virt(dd->in_sg) +
- _calc_walked(in);
+ dd->in_sg_offset = 0;
+ src = sg_virt(dd->in_sg);
}
} else {
src++;
@@ -904,20 +900,18 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
BUG_ON(!dd->out_sg);
- BUG_ON(_calc_walked(out) > dd->out_sg->length);
+ BUG_ON(dd->out_sg_offset > dd->out_sg->length);
- dst = sg_virt(dd->out_sg) + _calc_walked(out);
+ dst = sg_virt(dd->out_sg) + dd->out_sg_offset;
for (i = 0; i < AES_BLOCK_WORDS; i++) {
*dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
- scatterwalk_advance(&dd->out_walk, 4);
- if (dd->out_sg->length == _calc_walked(out)) {
+ dd->out_sg_offset += 4;
+ if (dd->out_sg_offset == dd->out_sg->length) {
dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
- scatterwalk_start(&dd->out_walk,
- dd->out_sg);
- dst = sg_virt(dd->out_sg) +
- _calc_walked(out);
+ dd->out_sg_offset = 0;
+ dst = sg_virt(dd->out_sg);
}
} else {
dst++;
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 0f35c9164764..41d67780fd45 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -14,8 +14,6 @@
#define DST_MAXBURST 4
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
-#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
-
/*
* OMAP TRM gives bitfields as start:end, where start is the higher bit
* number. For example 7:0
@@ -186,8 +184,8 @@ struct omap_aes_dev {
struct scatterlist out_sgl;
struct scatterlist *orig_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
+ unsigned int in_sg_offset;
+ unsigned int out_sg_offset;
struct dma_chan *dma_lch_in;
struct dma_chan *dma_lch_out;
int in_sg_len;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 498cbd585ed1..a099460d5f21 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -19,7 +19,6 @@
#include <crypto/engine.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
@@ -40,8 +39,6 @@
#define DES_BLOCK_WORDS (DES_BLOCK_SIZE >> 2)
-#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
-
#define DES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
((x ^ 0x01) * 0x04))
@@ -152,8 +149,8 @@ struct omap_des_dev {
struct scatterlist out_sgl;
struct scatterlist *orig_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
+ unsigned int in_sg_offset;
+ unsigned int out_sg_offset;
struct dma_chan *dma_lch_in;
struct dma_chan *dma_lch_out;
int in_sg_len;
@@ -379,8 +376,8 @@ static int omap_des_crypt_dma(struct crypto_tfm *tfm,
int ret;
if (dd->pio_only) {
- scatterwalk_start(&dd->in_walk, dd->in_sg);
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ dd->in_sg_offset = 0;
+ dd->out_sg_offset = 0;
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -836,21 +833,18 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
BUG_ON(!dd->in_sg);
- BUG_ON(_calc_walked(in) > dd->in_sg->length);
+ BUG_ON(dd->in_sg_offset > dd->in_sg->length);
- src = sg_virt(dd->in_sg) + _calc_walked(in);
+ src = sg_virt(dd->in_sg) + dd->in_sg_offset;
for (i = 0; i < DES_BLOCK_WORDS; i++) {
omap_des_write(dd, DES_REG_DATA_N(dd, i), *src);
-
- scatterwalk_advance(&dd->in_walk, 4);
- if (dd->in_sg->length == _calc_walked(in)) {
+ dd->in_sg_offset += 4;
+ if (dd->in_sg_offset == dd->in_sg->length) {
dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
- scatterwalk_start(&dd->in_walk,
- dd->in_sg);
- src = sg_virt(dd->in_sg) +
- _calc_walked(in);
+ dd->in_sg_offset = 0;
+ src = sg_virt(dd->in_sg);
}
} else {
src++;
@@ -869,20 +863,18 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
BUG_ON(!dd->out_sg);
- BUG_ON(_calc_walked(out) > dd->out_sg->length);
+ BUG_ON(dd->out_sg_offset > dd->out_sg->length);
- dst = sg_virt(dd->out_sg) + _calc_walked(out);
+ dst = sg_virt(dd->out_sg) + dd->out_sg_offset;
for (i = 0; i < DES_BLOCK_WORDS; i++) {
*dst = omap_des_read(dd, DES_REG_DATA_N(dd, i));
- scatterwalk_advance(&dd->out_walk, 4);
- if (dd->out_sg->length == _calc_walked(out)) {
+ dd->out_sg_offset += 4;
+ if (dd->out_sg_offset == dd->out_sg->length) {
dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
- scatterwalk_start(&dd->out_walk,
- dd->out_sg);
- dst = sg_virt(dd->out_sg) +
- _calc_walked(out);
+ dd->out_sg_offset = 0;
+ dst = sg_virt(dd->out_sg);
}
} else {
dst++;
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 7d811728f047..97b56e92ea33 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -786,7 +786,7 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi
alg->init = qce_aead_init;
alg->exit = qce_aead_exit;
- alg->base.cra_priority = 300;
+ alg->base.cra_priority = 275;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index e228a31fe28d..e95e84486d9a 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -3,14 +3,15 @@
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
@@ -37,9 +38,10 @@ static const struct qce_algo_ops *qce_ops[] = {
#endif
};
-static void qce_unregister_algs(struct qce_device *qce)
+static void qce_unregister_algs(void *data)
{
const struct qce_algo_ops *ops;
+ struct qce_device *qce = data;
int i;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
@@ -48,19 +50,22 @@ static void qce_unregister_algs(struct qce_device *qce)
}
}
-static int qce_register_algs(struct qce_device *qce)
+static int devm_qce_register_algs(struct qce_device *qce)
{
const struct qce_algo_ops *ops;
- int i, ret = -ENODEV;
+ int i, j, ret = -ENODEV;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
ops = qce_ops[i];
ret = ops->register_algs(qce);
- if (ret)
- break;
+ if (ret) {
+ for (j = i - 1; j >= 0; j--)
+ ops->unregister_algs(qce);
+ return ret;
+ }
}
- return ret;
+ return devm_add_action_or_reset(qce->dev, qce_unregister_algs, qce);
}
static int qce_handle_request(struct crypto_async_request *async_req)
@@ -84,55 +89,49 @@ static int qce_handle_queue(struct qce_device *qce,
struct crypto_async_request *req)
{
struct crypto_async_request *async_req, *backlog;
- unsigned long flags;
int ret = 0, err;
- spin_lock_irqsave(&qce->lock, flags);
+ scoped_guard(mutex, &qce->lock) {
+ if (req)
+ ret = crypto_enqueue_request(&qce->queue, req);
- if (req)
- ret = crypto_enqueue_request(&qce->queue, req);
+ /* busy, do not dequeue request */
+ if (qce->req)
+ return ret;
- /* busy, do not dequeue request */
- if (qce->req) {
- spin_unlock_irqrestore(&qce->lock, flags);
- return ret;
+ backlog = crypto_get_backlog(&qce->queue);
+ async_req = crypto_dequeue_request(&qce->queue);
+ if (async_req)
+ qce->req = async_req;
}
- backlog = crypto_get_backlog(&qce->queue);
- async_req = crypto_dequeue_request(&qce->queue);
- if (async_req)
- qce->req = async_req;
-
- spin_unlock_irqrestore(&qce->lock, flags);
-
if (!async_req)
return ret;
if (backlog) {
- spin_lock_bh(&qce->lock);
- crypto_request_complete(backlog, -EINPROGRESS);
- spin_unlock_bh(&qce->lock);
+ scoped_guard(mutex, &qce->lock)
+ crypto_request_complete(backlog, -EINPROGRESS);
}
err = qce_handle_request(async_req);
if (err) {
qce->result = err;
- tasklet_schedule(&qce->done_tasklet);
+ schedule_work(&qce->done_work);
}
return ret;
}
-static void qce_tasklet_req_done(unsigned long data)
+static void qce_req_done_work(struct work_struct *work)
{
- struct qce_device *qce = (struct qce_device *)data;
+ struct qce_device *qce = container_of(work, struct qce_device,
+ done_work);
struct crypto_async_request *req;
- unsigned long flags;
- spin_lock_irqsave(&qce->lock, flags);
- req = qce->req;
- qce->req = NULL;
- spin_unlock_irqrestore(&qce->lock, flags);
+ scoped_guard(mutex, &qce->lock) {
+ req = qce->req;
+ qce->req = NULL;
+ }
if (req)
crypto_request_complete(req, qce->result);
@@ -149,7 +148,7 @@ static int qce_async_request_enqueue(struct qce_device *qce,
static void qce_async_request_done(struct qce_device *qce, int ret)
{
qce->result = ret;
- tasklet_schedule(&qce->done_tasklet);
+ schedule_work(&qce->done_work);
}
static int qce_check_version(struct qce_device *qce)
@@ -209,15 +208,15 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- qce->core = devm_clk_get_optional(qce->dev, "core");
+ qce->core = devm_clk_get_optional_enabled(qce->dev, "core");
if (IS_ERR(qce->core))
return PTR_ERR(qce->core);
- qce->iface = devm_clk_get_optional(qce->dev, "iface");
+ qce->iface = devm_clk_get_optional_enabled(qce->dev, "iface");
if (IS_ERR(qce->iface))
return PTR_ERR(qce->iface);
- qce->bus = devm_clk_get_optional(qce->dev, "bus");
+ qce->bus = devm_clk_get_optional_enabled(qce->dev, "bus");
if (IS_ERR(qce->bus))
return PTR_ERR(qce->bus);
@@ -229,64 +228,25 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = clk_prepare_enable(qce->core);
+ ret = devm_qce_dma_request(qce->dev, &qce->dma);
if (ret)
- goto err_mem_path_disable;
-
- ret = clk_prepare_enable(qce->iface);
- if (ret)
- goto err_clks_core;
-
- ret = clk_prepare_enable(qce->bus);
- if (ret)
- goto err_clks_iface;
+ return ret;
- ret = qce_dma_request(qce->dev, &qce->dma);
+ ret = qce_check_version(qce);
if (ret)
- goto err_clks;
+ return ret;
- ret = qce_check_version(qce);
+ ret = devm_mutex_init(qce->dev, &qce->lock);
if (ret)
- goto err_clks;
+ return ret;
- spin_lock_init(&qce->lock);
- tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
- (unsigned long)qce);
+ INIT_WORK(&qce->done_work, qce_req_done_work);
crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
qce->async_req_enqueue = qce_async_request_enqueue;
qce->async_req_done = qce_async_request_done;
- ret = qce_register_algs(qce);
- if (ret)
- goto err_dma;
-
- return 0;
-
-err_dma:
- qce_dma_release(&qce->dma);
-err_clks:
- clk_disable_unprepare(qce->bus);
-err_clks_iface:
- clk_disable_unprepare(qce->iface);
-err_clks_core:
- clk_disable_unprepare(qce->core);
-err_mem_path_disable:
- icc_set_bw(qce->mem_path, 0, 0);
-
- return ret;
-}
-
-static void qce_crypto_remove(struct platform_device *pdev)
-{
- struct qce_device *qce = platform_get_drvdata(pdev);
-
- tasklet_kill(&qce->done_tasklet);
- qce_unregister_algs(qce);
- qce_dma_release(&qce->dma);
- clk_disable_unprepare(qce->bus);
- clk_disable_unprepare(qce->iface);
- clk_disable_unprepare(qce->core);
+ return devm_qce_register_algs(qce);
}
static const struct of_device_id qce_crypto_of_match[] = {
@@ -299,7 +259,6 @@ MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
static struct platform_driver qce_crypto_driver = {
.probe = qce_crypto_probe,
- .remove = qce_crypto_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qce_crypto_of_match,
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
index 228fcd69ec51..eb6fa7a8b64a 100644
--- a/drivers/crypto/qce/core.h
+++ b/drivers/crypto/qce/core.h
@@ -6,13 +6,16 @@
#ifndef _CORE_H_
#define _CORE_H_
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
#include "dma.h"
/**
* struct qce_device - crypto engine device structure
* @queue: crypto request queue
* @lock: the lock protects queue and req
- * @done_tasklet: done tasklet object
+ * @done_work: workqueue context
* @req: current active request
* @result: result of current transform
* @base: virtual IO base
@@ -28,8 +31,8 @@
*/
struct qce_device {
struct crypto_queue queue;
- spinlock_t lock;
- struct tasklet_struct done_tasklet;
+ struct mutex lock;
+ struct work_struct done_work;
struct crypto_async_request *req;
int result;
void __iomem *base;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 46db5bf366b4..1dec7aea852d 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -3,12 +3,22 @@
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/device.h>
#include <linux/dmaengine.h>
#include <crypto/scatterwalk.h>
#include "dma.h"
-int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
+static void qce_dma_release(void *data)
+{
+ struct qce_dma_data *dma = data;
+
+ dma_release_channel(dma->txchan);
+ dma_release_channel(dma->rxchan);
+ kfree(dma->result_buf);
+}
+
+int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma)
{
int ret;
@@ -31,7 +41,8 @@ int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
- return 0;
+ return devm_add_action_or_reset(dev, qce_dma_release, dma);
+
error_nomem:
dma_release_channel(dma->rxchan);
error_rx:
@@ -39,13 +50,6 @@ error_rx:
return ret;
}
-void qce_dma_release(struct qce_dma_data *dma)
-{
- dma_release_channel(dma->txchan);
- dma_release_channel(dma->rxchan);
- kfree(dma->result_buf);
-}
-
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
unsigned int max_len)
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 786402169360..31629185000e 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -34,8 +34,7 @@ struct qce_dma_data {
void *ignore_buf;
};
-int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
-void qce_dma_release(struct qce_dma_data *dma);
+int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma);
int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
int in_ents, struct scatterlist *sg_out, int out_ents,
dma_async_tx_callback cb, void *cb_param);
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index fc72af8aa9a7..71b748183cfa 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -482,7 +482,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base = &alg->halg.base;
base->cra_blocksize = def->blocksize;
- base->cra_priority = 300;
+ base->cra_priority = 175;
base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
base->cra_ctxsize = sizeof(struct qce_sha_ctx);
base->cra_alignmask = 0;
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 5b493fdc1e74..ffb334eb5b34 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -461,7 +461,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->encrypt = qce_skcipher_encrypt;
alg->decrypt = qce_skcipher_decrypt;
- alg->base.cra_priority = 300;
+ alg->base.cra_priority = 275;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
index 9d130592cc0a..d734c9a56786 100644
--- a/drivers/crypto/tegra/tegra-se-aes.c
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -1750,10 +1750,13 @@ static int tegra_cmac_digest(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ int ret;
- tegra_cmac_init(req);
- rctx->task |= SHA_UPDATE | SHA_FINAL;
+ ret = tegra_cmac_init(req);
+ if (ret)
+ return ret;
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index 4d4bd727f498..0b5cdd5676b1 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -615,13 +615,16 @@ static int tegra_sha_digest(struct ahash_request *req)
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret;
if (ctx->fallback)
return tegra_sha_fallback_digest(req);
- tegra_sha_init(req);
- rctx->task |= SHA_UPDATE | SHA_FINAL;
+ ret = tegra_sha_init(req);
+ if (ret)
+ return ret;
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index 81330c6446f6..b0853f7cada0 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -158,12 +158,10 @@
64...71 72...79 80...87 88...95 96..103 104.111 112.119 120.127
*/
-/* A slow generic version of gf_mul, implemented for lle and bbe
+/* A slow generic version of gf_mul, implemented for lle
* It multiplies a and b and puts the result in a */
void gf128mul_lle(be128 *a, const be128 *b);
-void gf128mul_bbe(be128 *a, const be128 *b);
-
/*
* The following functions multiply a field element by x in
* the polynomial field representation. They use 64-bit word operations
@@ -224,9 +222,7 @@ struct gf128mul_4k {
};
struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
-struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
-void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
void gf128mul_x8_ble(le128 *r, const le128 *x);
static inline void gf128mul_free_4k(struct gf128mul_4k *t)
{
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 58967593b6b4..84da3424decc 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -12,20 +12,6 @@
#include <crypto/hash.h>
struct ahash_request;
-struct scatterlist;
-
-struct crypto_hash_walk {
- char *data;
-
- unsigned int offset;
- unsigned int flags;
-
- struct page *pg;
- unsigned int entrylen;
-
- unsigned int total;
- struct scatterlist *sg;
-};
struct ahash_instance {
void (*free)(struct ahash_instance *inst);
@@ -57,15 +43,6 @@ struct crypto_shash_spawn {
struct crypto_spawn base;
};
-int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
-int crypto_hash_walk_first(struct ahash_request *req,
- struct crypto_hash_walk *walk);
-
-static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
-{
- return !(walk->entrylen | walk->total);
-}
-
int crypto_register_ahash(struct ahash_alg *alg);
void crypto_unregister_ahash(struct ahash_alg *alg);
int crypto_register_ahashes(struct ahash_alg *algs, int count);
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 7ae42afdcf3e..4f49621d3eb6 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -11,7 +11,6 @@
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <crypto/skcipher.h>
-#include <linux/list.h>
#include <linux/types.h>
/*
@@ -58,12 +57,6 @@ struct crypto_lskcipher_spawn {
struct skcipher_walk {
union {
struct {
- struct page *page;
- unsigned long offset;
- } phys;
-
- struct {
- u8 *page;
void *addr;
} virt;
} src, dst;
@@ -74,8 +67,6 @@ struct skcipher_walk {
struct scatter_walk out;
unsigned int total;
- struct list_head buffers;
-
u8 *page;
u8 *buffer;
u8 *oiv;
@@ -205,17 +196,14 @@ void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count);
int lskcipher_register_instance(struct crypto_template *tmpl,
struct lskcipher_instance *inst);
-int skcipher_walk_done(struct skcipher_walk *walk, int err);
+int skcipher_walk_done(struct skcipher_walk *walk, int res);
int skcipher_walk_virt(struct skcipher_walk *walk,
struct skcipher_request *req,
bool atomic);
-int skcipher_walk_async(struct skcipher_walk *walk,
- struct skcipher_request *req);
int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
-void skcipher_walk_complete(struct skcipher_walk *walk, int err);
static inline void skcipher_walk_abort(struct skcipher_walk *walk)
{
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index 6dbd0d49628f..99fcf65d575f 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -97,6 +97,8 @@
/* page number for queue file region */
#define QM_DOORBELL_PAGE_NR 1
+#define QM_DEV_ALG_MAX_LEN 256
+
/* uacce mode of the driver */
#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
#define UACCE_MODE_SVA 1 /* use uacce sva mode */
@@ -122,6 +124,7 @@ enum qm_hw_ver {
QM_HW_V1 = 0x20,
QM_HW_V2 = 0x21,
QM_HW_V3 = 0x30,
+ QM_HW_V4 = 0x50,
};
enum qm_fun_type {
@@ -156,6 +159,7 @@ enum qm_cap_bits {
QM_SUPPORT_MB_COMMAND,
QM_SUPPORT_SVA_PREFETCH,
QM_SUPPORT_RPM,
+ QM_SUPPORT_DAE,
};
struct qm_dev_alg {
@@ -266,6 +270,8 @@ struct hisi_qm_err_ini {
void (*show_last_dfx_regs)(struct hisi_qm *qm);
void (*err_info_init)(struct hisi_qm *qm);
enum acc_err_result (*get_err_result)(struct hisi_qm *qm);
+ bool (*dev_is_abnormal)(struct hisi_qm *qm);
+ int (*set_priv_status)(struct hisi_qm *qm);
};
struct hisi_qm_cap_info {
@@ -392,6 +398,8 @@ struct hisi_qm {
struct mutex mailbox_lock;
+ struct mutex ifc_lock;
+
const struct hisi_qm_hw_ops *ops;
struct qm_debug debug;
diff --git a/include/linux/verification.h b/include/linux/verification.h
index cb2d47f28091..4f3022d081c3 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -38,8 +38,6 @@ enum key_being_used_for {
VERIFYING_UNSPECIFIED_SIGNATURE,
NR__KEY_BEING_USED_FOR
};
-extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
-
#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
struct key;
diff --git a/kernel/padata.c b/kernel/padata.c
index d51bbc76b227..418987056340 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -47,6 +47,22 @@ struct padata_mt_job_state {
static void padata_free_pd(struct parallel_data *pd);
static void __init padata_mt_helper(struct work_struct *work);
+static inline void padata_get_pd(struct parallel_data *pd)
+{
+ refcount_inc(&pd->refcnt);
+}
+
+static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt)
+{
+ if (refcount_sub_and_test(cnt, &pd->refcnt))
+ padata_free_pd(pd);
+}
+
+static inline void padata_put_pd(struct parallel_data *pd)
+{
+ padata_put_pd_cnt(pd, 1);
+}
+
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{
int cpu, target_cpu;
@@ -206,7 +222,7 @@ int padata_do_parallel(struct padata_shell *ps,
if ((pinst->flags & PADATA_RESET))
goto out;
- refcount_inc(&pd->refcnt);
+ padata_get_pd(pd);
padata->pd = pd;
padata->cb_cpu = *cb_cpu;
@@ -336,8 +352,14 @@ static void padata_reorder(struct parallel_data *pd)
smp_mb();
reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
- if (!list_empty(&reorder->list) && padata_find_next(pd, false))
+ if (!list_empty(&reorder->list) && padata_find_next(pd, false)) {
+ /*
+ * Other context(eg. the padata_serial_worker) can finish the request.
+ * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
+ */
+ padata_get_pd(pd);
queue_work(pinst->serial_wq, &pd->reorder_work);
+ }
}
static void invoke_padata_reorder(struct work_struct *work)
@@ -348,6 +370,8 @@ static void invoke_padata_reorder(struct work_struct *work)
pd = container_of(work, struct parallel_data, reorder_work);
padata_reorder(pd);
local_bh_enable();
+ /* Pairs with putting the reorder_work in the serial_wq */
+ padata_put_pd(pd);
}
static void padata_serial_worker(struct work_struct *serial_work)
@@ -380,8 +404,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
}
local_bh_enable();
- if (refcount_sub_and_test(cnt, &pd->refcnt))
- padata_free_pd(pd);
+ padata_put_pd_cnt(pd, cnt);
}
/**
@@ -681,8 +704,7 @@ static int padata_replace(struct padata_instance *pinst)
synchronize_rcu();
list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
- if (refcount_dec_and_test(&ps->opd->refcnt))
- padata_free_pd(ps->opd);
+ padata_put_pd(ps->opd);
pinst->flags &= ~PADATA_RESET;
@@ -970,7 +992,7 @@ static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
pinst = kobj2pinst(kobj);
pentry = attr2pentry(attr);
- if (pentry->show)
+ if (pentry->store)
ret = pentry->store(pinst, attr, buf, count);
return ret;
@@ -1121,11 +1143,16 @@ void padata_free_shell(struct padata_shell *ps)
if (!ps)
return;
+ /*
+ * Wait for all _do_serial calls to finish to avoid touching
+ * freed pd's and ps's.
+ */
+ synchronize_rcu();
+
mutex_lock(&ps->pinst->lock);
list_del(&ps->list);
pd = rcu_dereference_protected(ps->pd, 1);
- if (refcount_dec_and_test(&pd->refcnt))
- padata_free_pd(pd);
+ padata_put_pd(pd);
mutex_unlock(&ps->pinst->lock);
kfree(ps);
diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c
index 6bba6473fdf3..902e49410aaf 100644
--- a/lib/crypto/aesgcm.c
+++ b/lib/crypto/aesgcm.c
@@ -697,7 +697,7 @@ static int __init libaesgcm_init(void)
u8 tagbuf[AES_BLOCK_SIZE];
int plen = aesgcm_tv[i].plen;
struct aesgcm_ctx ctx;
- u8 buf[sizeof(ptext12)];
+ static u8 buf[sizeof(ptext12)];
if (aesgcm_expandkey(&ctx, aesgcm_tv[i].key, aesgcm_tv[i].klen,
aesgcm_tv[i].clen - plen)) {
diff --git a/lib/crypto/gf128mul.c b/lib/crypto/gf128mul.c
index 8f8c45e0cdcf..fbe72cb3453a 100644
--- a/lib/crypto/gf128mul.c
+++ b/lib/crypto/gf128mul.c
@@ -225,44 +225,6 @@ void gf128mul_lle(be128 *r, const be128 *b)
}
EXPORT_SYMBOL(gf128mul_lle);
-void gf128mul_bbe(be128 *r, const be128 *b)
-{
- be128 p[8];
- int i;
-
- p[0] = *r;
- for (i = 0; i < 7; ++i)
- gf128mul_x_bbe(&p[i + 1], &p[i]);
-
- memset(r, 0, sizeof(*r));
- for (i = 0;;) {
- u8 ch = ((u8 *)b)[i];
-
- if (ch & 0x80)
- be128_xor(r, r, &p[7]);
- if (ch & 0x40)
- be128_xor(r, r, &p[6]);
- if (ch & 0x20)
- be128_xor(r, r, &p[5]);
- if (ch & 0x10)
- be128_xor(r, r, &p[4]);
- if (ch & 0x08)
- be128_xor(r, r, &p[3]);
- if (ch & 0x04)
- be128_xor(r, r, &p[2]);
- if (ch & 0x02)
- be128_xor(r, r, &p[1]);
- if (ch & 0x01)
- be128_xor(r, r, &p[0]);
-
- if (++i >= 16)
- break;
-
- gf128mul_x8_bbe(r);
- }
-}
-EXPORT_SYMBOL(gf128mul_bbe);
-
/* This version uses 64k bytes of table space.
A 16 byte buffer has to be multiplied by a 16 byte key
value in GF(2^128). If we consider a GF(2^128) value in
@@ -380,28 +342,6 @@ out:
}
EXPORT_SYMBOL(gf128mul_init_4k_lle);
-struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g)
-{
- struct gf128mul_4k *t;
- int j, k;
-
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
- goto out;
-
- t->t[1] = *g;
- for (j = 1; j <= 64; j <<= 1)
- gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
-
- for (j = 2; j < 256; j += j)
- for (k = 1; k < j; ++k)
- be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
-
-out:
- return t;
-}
-EXPORT_SYMBOL(gf128mul_init_4k_bbe);
-
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
{
u8 *ap = (u8 *)a;
@@ -417,20 +357,5 @@ void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
}
EXPORT_SYMBOL(gf128mul_4k_lle);
-void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t)
-{
- u8 *ap = (u8 *)a;
- be128 r[1];
- int i = 0;
-
- *r = t->t[ap[0]];
- while (++i < 16) {
- gf128mul_x8_bbe(r);
- be128_xor(r, r, &t->t[ap[i]]);
- }
- *a = *r;
-}
-EXPORT_SYMBOL(gf128mul_4k_bbe);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6c902639728b..0e9a1d4cf89b 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -584,10 +584,6 @@ static struct bucket_table *rhashtable_insert_one(
*/
rht_assign_locked(bkt, obj);
- atomic_inc(&ht->nelems);
- if (rht_grow_above_75(ht, tbl))
- schedule_work(&ht->run_work);
-
return NULL;
}
@@ -615,15 +611,23 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
data = ERR_PTR(-EAGAIN);
} else {
+ bool inserted;
+
flags = rht_lock(tbl, bkt);
data = rhashtable_lookup_one(ht, bkt, tbl,
hash, key, obj);
new_tbl = rhashtable_insert_one(ht, bkt, tbl,
hash, obj, data);
+ inserted = data && !new_tbl;
+ if (inserted)
+ atomic_inc(&ht->nelems);
if (PTR_ERR(new_tbl) != -EEXIST)
data = ERR_CAST(new_tbl);
rht_unlock(tbl, bkt, flags);
+
+ if (inserted && rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
}
} while (!IS_ERR_OR_NULL(new_tbl));