summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom-qce.txt25
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom-qce.yaml123
-rw-r--r--MAINTAINERS33
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550.dtsi2
-rw-r--r--arch/arm64/crypto/aes-neonbs-core.S9
-rw-r--r--arch/powerpc/crypto/Kconfig17
-rw-r--r--arch/powerpc/crypto/Makefile13
-rw-r--r--arch/powerpc/crypto/aes-gcm-p10-glue.c343
-rw-r--r--arch/powerpc/crypto/aes-gcm-p10.S1521
-rw-r--r--arch/powerpc/crypto/aesp8-ppc.pl585
-rw-r--r--arch/powerpc/crypto/ghashp8-ppc.pl370
-rw-r--r--arch/powerpc/crypto/ppc-xlate.pl229
-rw-r--r--arch/powerpc/include/asm/cpufeature.h1
-rw-r--r--arch/x86/crypto/aegis128-aesni-asm.S6
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S198
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S254
-rw-r--r--arch/x86/crypto/aria-aesni-avx-asm_64.S28
-rw-r--r--arch/x86/crypto/aria-aesni-avx2-asm_64.S28
-rw-r--r--arch/x86/crypto/aria-gfni-avx512-asm_64.S24
-rw-r--r--arch/x86/crypto/camellia-aesni-avx-asm_64.S30
-rw-r--r--arch/x86/crypto/camellia-aesni-avx2-asm_64.S30
-rw-r--r--arch/x86/crypto/camellia-x86_64-asm_64.S6
-rw-r--r--arch/x86/crypto/cast5-avx-x86_64-asm_64.S38
-rw-r--r--arch/x86/crypto/cast6-avx-x86_64-asm_64.S32
-rw-r--r--arch/x86/crypto/crc32-pclmul_asm.S16
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S70
-rw-r--r--arch/x86/crypto/des3_ede-asm_64.S96
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_asm.S4
-rw-r--r--arch/x86/crypto/sha1_avx2_x86_64_asm.S25
-rw-r--r--arch/x86/crypto/sha256-avx-asm.S16
-rw-r--r--arch/x86/crypto/sha256-avx2-asm.S54
-rw-r--r--arch/x86/crypto/sha256-ssse3-asm.S16
-rw-r--r--arch/x86/crypto/sha512-avx-asm.S8
-rw-r--r--arch/x86/crypto/sha512-avx2-asm.S16
-rw-r--r--arch/x86/crypto/sha512-ssse3-asm.S8
-rw-r--r--arch/x86/kvm/svm/sev.c1
-rw-r--r--crypto/acompress.c81
-rw-r--r--crypto/aead.c98
-rw-r--r--crypto/ahash.c144
-rw-r--r--crypto/akcipher.c52
-rw-r--r--crypto/algapi.c219
-rw-r--r--crypto/algif_hash.c19
-rw-r--r--crypto/api.c63
-rw-r--r--crypto/async_tx/async_pq.c10
-rw-r--r--crypto/async_tx/async_tx.c4
-rw-r--r--crypto/compress.h26
-rw-r--r--crypto/cryptd.c34
-rw-r--r--crypto/crypto_user_stat.c183
-rw-r--r--crypto/drbg.c2
-rw-r--r--crypto/fips.c11
-rw-r--r--crypto/hash.h40
-rw-r--r--crypto/hmac.c15
-rw-r--r--crypto/internal.h10
-rw-r--r--crypto/jitterentropy-kcapi.c51
-rw-r--r--crypto/jitterentropy.c144
-rw-r--r--crypto/jitterentropy.h1
-rw-r--r--crypto/kpp.c53
-rw-r--r--crypto/rng.c65
-rw-r--r--crypto/scompress.c39
-rw-r--r--crypto/shash.c181
-rw-r--r--crypto/skcipher.c113
-rw-r--r--crypto/tcrypt.c11
-rw-r--r--crypto/testmgr.c272
-rw-r--r--crypto/testmgr.h47
-rw-r--r--drivers/char/hw_random/meson-rng.c29
-rw-r--r--drivers/char/hw_random/xgene-rng.c46
-rw-r--r--drivers/crypto/Kconfig21
-rw-r--r--drivers/crypto/Makefile4
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c2
-rw-r--r--drivers/crypto/aspeed/aspeed-acry.c14
-rw-r--r--drivers/crypto/atmel-aes.c16
-rw-r--r--drivers/crypto/atmel-sha.c34
-rw-r--r--drivers/crypto/atmel-sha204a.c2
-rw-r--r--drivers/crypto/atmel-tdes.c15
-rw-r--r--drivers/crypto/caam/caamalg.c21
-rw-r--r--drivers/crypto/caam/caamhash.c10
-rw-r--r--drivers/crypto/caam/caampkc.c6
-rw-r--r--drivers/crypto/caam/caamrng.c6
-rw-r--r--drivers/crypto/caam/ctrl.c112
-rw-r--r--drivers/crypto/caam/debugfs.c12
-rw-r--r--drivers/crypto/caam/debugfs.h7
-rw-r--r--drivers/crypto/caam/dpseci-debugfs.c2
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c61
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c1
-rw-r--r--drivers/crypto/ccp/Makefile3
-rw-r--r--drivers/crypto/ccp/platform-access.c215
-rw-r--r--drivers/crypto/ccp/platform-access.h35
-rw-r--r--drivers/crypto/ccp/psp-dev.c38
-rw-r--r--drivers/crypto/ccp/psp-dev.h11
-rw-r--r--drivers/crypto/ccp/sev-dev.c16
-rw-r--r--drivers/crypto/ccp/sev-dev.h2
-rw-r--r--drivers/crypto/ccp/sp-dev.h10
-rw-r--r--drivers/crypto/ccp/sp-pci.c11
-rw-r--r--drivers/crypto/ccp/tee-dev.c17
-rw-r--r--drivers/crypto/ccree/cc_driver.c4
-rw-r--r--drivers/crypto/hifn_795x.c24
-rw-r--r--drivers/crypto/hisilicon/Kconfig7
-rw-r--r--drivers/crypto/hisilicon/Makefile2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c1
-rw-r--r--drivers/crypto/hisilicon/qm.c1
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c1
-rw-r--r--drivers/crypto/hisilicon/trng/Makefile3
-rw-r--r--drivers/crypto/hisilicon/trng/trng-stb.c176
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c1
-rw-r--r--drivers/crypto/img-hash.c7
-rw-r--r--drivers/crypto/inside-secure/safexcel.c39
-rw-r--r--drivers/crypto/intel/Kconfig5
-rw-r--r--drivers/crypto/intel/Makefile5
-rw-r--r--drivers/crypto/intel/ixp4xx/Kconfig14
-rw-r--r--drivers/crypto/intel/ixp4xx/Makefile2
-rw-r--r--drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c (renamed from drivers/crypto/ixp4xx_crypto.c)15
-rw-r--r--drivers/crypto/intel/keembay/Kconfig (renamed from drivers/crypto/keembay/Kconfig)0
-rw-r--r--drivers/crypto/intel/keembay/Makefile (renamed from drivers/crypto/keembay/Makefile)0
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-aes-core.c (renamed from drivers/crypto/keembay/keembay-ocs-aes-core.c)2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-ecc.c (renamed from drivers/crypto/keembay/keembay-ocs-ecc.c)0
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c (renamed from drivers/crypto/keembay/keembay-ocs-hcu-core.c)0
-rw-r--r--drivers/crypto/intel/keembay/ocs-aes.c (renamed from drivers/crypto/keembay/ocs-aes.c)0
-rw-r--r--drivers/crypto/intel/keembay/ocs-aes.h (renamed from drivers/crypto/keembay/ocs-aes.h)0
-rw-r--r--drivers/crypto/intel/keembay/ocs-hcu.c (renamed from drivers/crypto/keembay/ocs-hcu.c)0
-rw-r--r--drivers/crypto/intel/keembay/ocs-hcu.h (renamed from drivers/crypto/keembay/ocs-hcu.h)0
-rw-r--r--drivers/crypto/intel/qat/Kconfig (renamed from drivers/crypto/qat/Kconfig)0
-rw-r--r--drivers/crypto/intel/qat/Makefile (renamed from drivers/crypto/qat/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/Makefile (renamed from drivers/crypto/qat/qat_4xxx/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c (renamed from drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c)62
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h (renamed from drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h)9
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c (renamed from drivers/crypto/qat/qat_4xxx/adf_drv.c)31
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/Makefile (renamed from drivers/crypto/qat/qat_c3xxx/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c (renamed from drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c)2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h (renamed from drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c (renamed from drivers/crypto/qat/qat_c3xxx/adf_drv.c)24
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/Makefile (renamed from drivers/crypto/qat/qat_c3xxxvf/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c (renamed from drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h (renamed from drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c (renamed from drivers/crypto/qat/qat_c3xxxvf/adf_drv.c)13
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/Makefile (renamed from drivers/crypto/qat/qat_c62x/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c (renamed from drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c)2
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h (renamed from drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c (renamed from drivers/crypto/qat/qat_c62x/adf_drv.c)24
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/Makefile (renamed from drivers/crypto/qat/qat_c62xvf/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c (renamed from drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h (renamed from drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c (renamed from drivers/crypto/qat/qat_c62xvf/adf_drv.c)13
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile (renamed from drivers/crypto/qat/qat_common/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h (renamed from drivers/crypto/qat/qat_common/adf_accel_devices.h)5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_engine.c (renamed from drivers/crypto/qat/qat_common/adf_accel_engine.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c (renamed from drivers/crypto/qat/qat_common/adf_admin.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c (renamed from drivers/crypto/qat/qat_common/adf_aer.c)39
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.c (renamed from drivers/crypto/qat/qat_common/adf_cfg.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.h (renamed from drivers/crypto/qat/qat_common/adf_cfg.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_common.h (renamed from drivers/crypto/qat/qat_common/adf_cfg_common.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h (renamed from drivers/crypto/qat/qat_common/adf_cfg_strings.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_user.h (renamed from drivers/crypto/qat/qat_common/adf_cfg_user.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h (renamed from drivers/crypto/qat/qat_common/adf_common_drv.h)10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c (renamed from drivers/crypto/qat/qat_common/adf_ctl_drv.c)30
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c (renamed from drivers/crypto/qat/qat_common/adf_dev_mgr.c)2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_config.c (renamed from drivers/crypto/qat/qat_common/adf_gen2_config.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_config.h (renamed from drivers/crypto/qat/qat_common/adf_gen2_config.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c (renamed from drivers/crypto/qat/qat_common/adf_gen2_dc.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h (renamed from drivers/crypto/qat/qat_common/adf_gen2_dc.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c (renamed from drivers/crypto/qat/qat_common/adf_gen2_hw_data.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h (renamed from drivers/crypto/qat/qat_common/adf_gen2_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c (renamed from drivers/crypto/qat/qat_common/adf_gen2_pfvf.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h (renamed from drivers/crypto/qat/qat_common/adf_gen2_pfvf.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c (renamed from drivers/crypto/qat/qat_common/adf_gen4_dc.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h (renamed from drivers/crypto/qat/qat_common/adf_gen4_dc.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c (renamed from drivers/crypto/qat/qat_common/adf_gen4_hw_data.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h (renamed from drivers/crypto/qat/qat_common/adf_gen4_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c (renamed from drivers/crypto/qat/qat_common/adf_gen4_pfvf.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h (renamed from drivers/crypto/qat/qat_common/adf_gen4_pfvf.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c (renamed from drivers/crypto/qat/qat_common/adf_gen4_pm.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h (renamed from drivers/crypto/qat/qat_common/adf_gen4_pm.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c (renamed from drivers/crypto/qat/qat_common/adf_hw_arbiter.c)2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c (renamed from drivers/crypto/qat/qat_common/adf_init.c)96
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c (renamed from drivers/crypto/qat/qat_common/adf_isr.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h (renamed from drivers/crypto/qat/qat_common/adf_pfvf_msg.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c (renamed from drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h (renamed from drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c (renamed from drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h (renamed from drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c (renamed from drivers/crypto/qat/qat_common/adf_pfvf_utils.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h (renamed from drivers/crypto/qat/qat_common/adf_pfvf_utils.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c (renamed from drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h (renamed from drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c (renamed from drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h (renamed from drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c (renamed from drivers/crypto/qat/qat_common/adf_sriov.c)10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c (renamed from drivers/crypto/qat/qat_common/adf_sysfs.c)23
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport.c (renamed from drivers/crypto/qat/qat_common/adf_transport.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport.h (renamed from drivers/crypto/qat/qat_common/adf_transport.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h (renamed from drivers/crypto/qat/qat_common/adf_transport_access_macros.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport_debug.c (renamed from drivers/crypto/qat/qat_common/adf_transport_debug.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport_internal.h (renamed from drivers/crypto/qat/qat_common/adf_transport_internal.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_vf_isr.c (renamed from drivers/crypto/qat/qat_common/adf_vf_isr.c)3
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw.h (renamed from drivers/crypto/qat/qat_common/icp_qat_fw.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h (renamed from drivers/crypto/qat/qat_common/icp_qat_fw_comp.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h (renamed from drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h (renamed from drivers/crypto/qat/qat_common/icp_qat_fw_la.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h (renamed from drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h (renamed from drivers/crypto/qat/qat_common/icp_qat_fw_pke.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hal.h (renamed from drivers/crypto/qat/qat_common/icp_qat_hal.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw.h (renamed from drivers/crypto/qat/qat_common/icp_qat_hw.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h (renamed from drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h (renamed from drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h (renamed from drivers/crypto/qat/qat_common/icp_qat_uclo.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c (renamed from drivers/crypto/qat/qat_common/qat_algs.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs_send.c (renamed from drivers/crypto/qat/qat_common/qat_algs_send.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs_send.h (renamed from drivers/crypto/qat/qat_common/qat_algs_send.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_asym_algs.c (renamed from drivers/crypto/qat/qat_common/qat_asym_algs.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.c (renamed from drivers/crypto/qat/qat_common/qat_bl.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.h (renamed from drivers/crypto/qat/qat_common/qat_bl.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_algs.c (renamed from drivers/crypto/qat/qat_common/qat_comp_algs.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_req.h (renamed from drivers/crypto/qat/qat_common/qat_comp_req.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c (renamed from drivers/crypto/qat/qat_common/qat_compression.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.h (renamed from drivers/crypto/qat/qat_common/qat_compression.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_crypto.c (renamed from drivers/crypto/qat/qat_common/qat_crypto.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_crypto.h (renamed from drivers/crypto/qat/qat_common/qat_crypto.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c (renamed from drivers/crypto/qat/qat_common/qat_hal.c)1
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c (renamed from drivers/crypto/qat/qat_common/qat_uclo.c)1
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/Makefile (renamed from drivers/crypto/qat/qat_dh895xcc/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c (renamed from drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c)2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h (renamed from drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c (renamed from drivers/crypto/qat/qat_dh895xcc/adf_drv.c)24
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/Makefile (renamed from drivers/crypto/qat/qat_dh895xccvf/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c (renamed from drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h (renamed from drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c (renamed from drivers/crypto/qat/qat_dh895xccvf/adf_drv.c)13
-rw-r--r--drivers/crypto/mxs-dcp.c21
-rw-r--r--drivers/crypto/qce/core.c23
-rw-r--r--drivers/crypto/qce/core.h1
-rw-r--r--drivers/crypto/sa2ul.c6
-rw-r--r--drivers/crypto/sahara.c4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c361
-rw-r--r--drivers/i2c/busses/Kconfig5
-rw-r--r--drivers/i2c/busses/i2c-designware-amdpsp.c205
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c1
-rw-r--r--drivers/tee/amdtee/call.c2
-rw-r--r--drivers/tee/amdtee/shm_pool.c2
-rw-r--r--include/crypto/acompress.h132
-rw-r--r--include/crypto/aead.h22
-rw-r--r--include/crypto/akcipher.h102
-rw-r--r--include/crypto/algapi.h93
-rw-r--r--include/crypto/hash.h95
-rw-r--r--include/crypto/internal/acompress.h43
-rw-r--r--include/crypto/internal/hash.h2
-rw-r--r--include/crypto/internal/scompress.h15
-rw-r--r--include/crypto/kpp.h73
-rw-r--r--include/crypto/rng.h65
-rw-r--r--include/crypto/skcipher.h22
-rw-r--r--include/crypto/utils.h73
-rw-r--r--include/linux/crypto.h236
-rw-r--r--include/linux/psp-platform-access.h65
-rw-r--r--include/linux/psp-sev.h8
-rw-r--r--include/linux/psp.h29
-rw-r--r--kernel/padata.c4
-rw-r--r--lib/crypto/utils.c2
257 files changed, 6776 insertions, 2730 deletions
diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.txt b/Documentation/devicetree/bindings/crypto/qcom-qce.txt
deleted file mode 100644
index fdd53b184ba8..000000000000
--- a/Documentation/devicetree/bindings/crypto/qcom-qce.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Qualcomm crypto engine driver
-
-Required properties:
-
-- compatible : should be "qcom,crypto-v5.1"
-- reg : specifies base physical address and size of the registers map
-- clocks : phandle to clock-controller plus clock-specifier pair
-- clock-names : "iface" clocks register interface
- "bus" clocks data transfer interface
- "core" clocks rest of the crypto block
-- dmas : DMA specifiers for tx and rx dma channels. For more see
- Documentation/devicetree/bindings/dma/dma.txt
-- dma-names : DMA request names should be "rx" and "tx"
-
-Example:
- crypto@fd45a000 {
- compatible = "qcom,crypto-v5.1";
- reg = <0xfd45a000 0x6000>;
- clocks = <&gcc GCC_CE2_AHB_CLK>,
- <&gcc GCC_CE2_AXI_CLK>,
- <&gcc GCC_CE2_CLK>;
- clock-names = "iface", "bus", "core";
- dmas = <&cryptobam 2>, <&cryptobam 3>;
- dma-names = "rx", "tx";
- };
diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
new file mode 100644
index 000000000000..e375bd981300
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/qcom-qce.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm crypto engine driver
+
+maintainers:
+ - Bhupesh Sharma <bhupesh.sharma@linaro.org>
+
+description:
+ This document defines the binding for the QCE crypto
+ controller found on Qualcomm parts.
+
+properties:
+ compatible:
+ oneOf:
+ - const: qcom,crypto-v5.1
+ deprecated: true
+ description: Kept only for ABI backward compatibility
+
+ - const: qcom,crypto-v5.4
+ deprecated: true
+ description: Kept only for ABI backward compatibility
+
+ - items:
+ - enum:
+ - qcom,ipq6018-qce
+ - qcom,ipq8074-qce
+ - qcom,msm8996-qce
+ - qcom,sdm845-qce
+ - const: qcom,ipq4019-qce
+ - const: qcom,qce
+
+ - items:
+ - enum:
+ - qcom,sm8250-qce
+ - qcom,sm8350-qce
+ - qcom,sm8450-qce
+ - qcom,sm8550-qce
+ - const: qcom,sm8150-qce
+ - const: qcom,qce
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: iface clocks register interface.
+ - description: bus clocks data transfer interface.
+ - description: core clocks rest of the crypto block.
+
+ clock-names:
+ items:
+ - const: iface
+ - const: bus
+ - const: core
+
+ iommus:
+ minItems: 1
+ maxItems: 8
+ description:
+ phandle to apps_smmu node with sid mask.
+
+ interconnects:
+ maxItems: 1
+ description:
+ Interconnect path between qce crypto and main memory.
+
+ interconnect-names:
+ const: memory
+
+ dmas:
+ items:
+ - description: DMA specifiers for rx dma channel.
+ - description: DMA specifiers for tx dma channel.
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,crypto-v5.1
+ - qcom,crypto-v5.4
+ - qcom,ipq4019-qce
+
+ then:
+ required:
+ - clocks
+ - clock-names
+
+required:
+ - compatible
+ - reg
+ - dmas
+ - dma-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-apq8084.h>
+ crypto-engine@fd45a000 {
+ compatible = "qcom,ipq6018-qce", "qcom,ipq4019-qce", "qcom,qce";
+ reg = <0xfd45a000 0x6000>;
+ clocks = <&gcc GCC_CE2_AHB_CLK>,
+ <&gcc GCC_CE2_AXI_CLK>,
+ <&gcc GCC_CE2_CLK>;
+ clock-names = "iface", "bus", "core";
+ dmas = <&cryptobam 2>, <&cryptobam 3>;
+ dma-names = "rx", "tx";
+ iommus = <&apps_smmu 0x584 0x0011>,
+ <&apps_smmu 0x586 0x0011>,
+ <&apps_smmu 0x594 0x0011>,
+ <&apps_smmu 0x596 0x0011>;
+ };
diff --git a/MAINTAINERS b/MAINTAINERS
index f0abf21883af..cd69cdfc23a2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2269,7 +2269,7 @@ F: arch/arm/boot/dts/intel-ixp*
F: arch/arm/mach-ixp4xx/
F: drivers/bus/intel-ixp4xx-eb.c
F: drivers/clocksource/timer-ixp4xx.c
-F: drivers/crypto/ixp4xx_crypto.c
+F: drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
F: drivers/gpio/gpio-ixp4xx.c
F: drivers/irqchip/irq-ixp4xx.c
@@ -10391,7 +10391,7 @@ INTEL IXP4XX CRYPTO SUPPORT
M: Corentin Labbe <clabbe@baylibre.com>
L: linux-crypto@vger.kernel.org
S: Maintained
-F: drivers/crypto/ixp4xx_crypto.c
+F: drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
INTEL ISHTP ECLITE DRIVER
M: Sumesh K Naduvalath <sumesh.k.naduvalath@intel.com>
@@ -10426,11 +10426,11 @@ INTEL KEEM BAY OCS AES/SM4 CRYPTO DRIVER
M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
S: Maintained
F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-aes.yaml
-F: drivers/crypto/keembay/Kconfig
-F: drivers/crypto/keembay/Makefile
-F: drivers/crypto/keembay/keembay-ocs-aes-core.c
-F: drivers/crypto/keembay/ocs-aes.c
-F: drivers/crypto/keembay/ocs-aes.h
+F: drivers/crypto/intel/keembay/Kconfig
+F: drivers/crypto/intel/keembay/Makefile
+F: drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
+F: drivers/crypto/intel/keembay/ocs-aes.c
+F: drivers/crypto/intel/keembay/ocs-aes.h
INTEL KEEM BAY OCS ECC CRYPTO DRIVER
M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
@@ -10438,20 +10438,20 @@ M: Prabhjot Khurana <prabhjot.khurana@intel.com>
M: Mark Gross <mgross@linux.intel.com>
S: Maintained
F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-ecc.yaml
-F: drivers/crypto/keembay/Kconfig
-F: drivers/crypto/keembay/Makefile
-F: drivers/crypto/keembay/keembay-ocs-ecc.c
+F: drivers/crypto/intel/keembay/Kconfig
+F: drivers/crypto/intel/keembay/Makefile
+F: drivers/crypto/intel/keembay/keembay-ocs-ecc.c
INTEL KEEM BAY OCS HCU CRYPTO DRIVER
M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
M: Declan Murphy <declan.murphy@intel.com>
S: Maintained
F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-hcu.yaml
-F: drivers/crypto/keembay/Kconfig
-F: drivers/crypto/keembay/Makefile
-F: drivers/crypto/keembay/keembay-ocs-hcu-core.c
-F: drivers/crypto/keembay/ocs-hcu.c
-F: drivers/crypto/keembay/ocs-hcu.h
+F: drivers/crypto/intel/keembay/Kconfig
+F: drivers/crypto/intel/keembay/Makefile
+F: drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+F: drivers/crypto/intel/keembay/ocs-hcu.c
+F: drivers/crypto/intel/keembay/ocs-hcu.h
INTEL THUNDER BAY EMMC PHY DRIVER
M: Nandhini Srikandan <nandhini.srikandan@intel.com>
@@ -17027,7 +17027,7 @@ QAT DRIVER
M: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
L: qat-linux@intel.com
S: Supported
-F: drivers/crypto/qat/
+F: drivers/crypto/intel/qat/
QCOM AUDIO (ASoC) DRIVERS
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
@@ -17295,6 +17295,7 @@ M: Thara Gopinath <thara.gopinath@gmail.com>
L: linux-crypto@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/crypto/qcom-qce.yaml
F: drivers/crypto/qce/
QUALCOMM EMAC GIGABIT ETHERNET DRIVER
diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
index f110d6cc195d..6e9bad8f6f33 100644
--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
@@ -1850,7 +1850,7 @@
};
crypto: crypto@1de0000 {
- compatible = "qcom,sm8550-qce";
+ compatible = "qcom,sm8550-qce", "qcom,sm8150-qce", "qcom,qce";
reg = <0x0 0x01dfa000 0x0 0x6000>;
dmas = <&cryptobam 4>, <&cryptobam 5>;
dma-names = "rx", "tx";
diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
index 7278a37c2d5c..baf450717b24 100644
--- a/arch/arm64/crypto/aes-neonbs-core.S
+++ b/arch/arm64/crypto/aes-neonbs-core.S
@@ -15,6 +15,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
.text
@@ -620,12 +621,12 @@ SYM_FUNC_END(aesbs_decrypt8)
.endm
.align 4
-SYM_FUNC_START(aesbs_ecb_encrypt)
+SYM_TYPED_FUNC_START(aesbs_ecb_encrypt)
__ecb_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
SYM_FUNC_END(aesbs_ecb_encrypt)
.align 4
-SYM_FUNC_START(aesbs_ecb_decrypt)
+SYM_TYPED_FUNC_START(aesbs_ecb_decrypt)
__ecb_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
SYM_FUNC_END(aesbs_ecb_decrypt)
@@ -799,11 +800,11 @@ SYM_FUNC_END(__xts_crypt8)
ret
.endm
-SYM_FUNC_START(aesbs_xts_encrypt)
+SYM_TYPED_FUNC_START(aesbs_xts_encrypt)
__xts_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
SYM_FUNC_END(aesbs_xts_encrypt)
-SYM_FUNC_START(aesbs_xts_decrypt)
+SYM_TYPED_FUNC_START(aesbs_xts_decrypt)
__xts_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
SYM_FUNC_END(aesbs_xts_decrypt)
diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
index c1b964447401..7113f9355165 100644
--- a/arch/powerpc/crypto/Kconfig
+++ b/arch/powerpc/crypto/Kconfig
@@ -94,4 +94,21 @@ config CRYPTO_AES_PPC_SPE
architecture specific assembler implementations that work on 1KB
tables or 256 bytes S-boxes.
+config CRYPTO_AES_GCM_P10
+ tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)"
+ depends on PPC64 && CPU_LITTLE_ENDIAN
+ select CRYPTO_LIB_AES
+ select CRYPTO_ALGAPI
+ select CRYPTO_AEAD
+ default m
+ help
+ AEAD cipher: AES cipher algorithms (FIPS-197)
+ GCM (Galois/Counter Mode) authenticated encryption mode (NIST SP800-38D)
+ Architecture: powerpc64 using:
+ - little-endian
+ - Power10 or later features
+
+ Support for cryptographic acceleration instructions on Power10 or
+ later CPU. This module supports stitched acceleration for AES/GCM.
+
endmenu
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
index 4808d97fede5..05c7486f42c5 100644
--- a/arch/powerpc/crypto/Makefile
+++ b/arch/powerpc/crypto/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o
obj-$(CONFIG_CRYPTO_CRCT10DIF_VPMSUM) += crct10dif-vpmsum.o
obj-$(CONFIG_CRYPTO_VPMSUM_TESTER) += crc-vpmsum_test.o
+obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o
aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
md5-ppc-y := md5-asm.o md5-glue.o
@@ -21,3 +22,15 @@ sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o
+aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp8-ppc.o aesp8-ppc.o
+
+quiet_cmd_perl = PERL $@
+ cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@
+
+targets += aesp8-ppc.S ghashp8-ppc.S
+
+$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
+ $(call if_changed,perl)
+
+OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
+OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y
diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c
new file mode 100644
index 000000000000..bd3475f5348d
--- /dev/null
+++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Glue code for accelerated AES-GCM stitched implementation for ppc64le.
+ *
+ * Copyright 2022- IBM Inc. All rights reserved
+ */
+
+#include <asm/unaligned.h>
+#include <asm/simd.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/b128ops.h>
+#include <crypto/gf128mul.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#define PPC_ALIGN 16
+#define GCM_IV_SIZE 12
+
+MODULE_DESCRIPTION("PPC64le AES-GCM with Stitched implementation");
+MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("aes");
+
+asmlinkage int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
+ void *key);
+asmlinkage void aes_p8_encrypt(const u8 *in, u8 *out, const void *key);
+asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
+ void *rkey, u8 *iv, void *Xi);
+asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
+ void *rkey, u8 *iv, void *Xi);
+asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]);
+asmlinkage void gcm_ghash_p8(unsigned char *Xi, unsigned char *Htable,
+ unsigned char *aad, unsigned int alen);
+
+struct aes_key {
+ u8 key[AES_MAX_KEYLENGTH];
+ u64 rounds;
+};
+
+struct gcm_ctx {
+ u8 iv[16];
+ u8 ivtag[16];
+ u8 aad_hash[16];
+ u64 aadLen;
+ u64 Plen; /* offset 56 - used in aes_p10_gcm_{en/de}crypt */
+};
+struct Hash_ctx {
+ u8 H[16]; /* subkey */
+ u8 Htable[256]; /* Xi, Hash table(offset 32) */
+};
+
+struct p10_aes_gcm_ctx {
+ struct aes_key enc_key;
+};
+
+static void vsx_begin(void)
+{
+ preempt_disable();
+ enable_kernel_vsx();
+}
+
+static void vsx_end(void)
+{
+ disable_kernel_vsx();
+ preempt_enable();
+}
+
+static void set_subkey(unsigned char *hash)
+{
+ *(u64 *)&hash[0] = be64_to_cpup((__be64 *)&hash[0]);
+ *(u64 *)&hash[8] = be64_to_cpup((__be64 *)&hash[8]);
+}
+
+/*
+ * Compute aad if any.
+ * - Hash aad and copy to Xi.
+ */
+static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
+ unsigned char *aad, int alen)
+{
+ int i;
+ u8 nXi[16] = {0, };
+
+ gctx->aadLen = alen;
+ i = alen & ~0xf;
+ if (i) {
+ gcm_ghash_p8(nXi, hash->Htable+32, aad, i);
+ aad += i;
+ alen -= i;
+ }
+ if (alen) {
+ for (i = 0; i < alen; i++)
+ nXi[i] ^= aad[i];
+
+ memset(gctx->aad_hash, 0, 16);
+ gcm_ghash_p8(gctx->aad_hash, hash->Htable+32, nXi, 16);
+ } else {
+ memcpy(gctx->aad_hash, nXi, 16);
+ }
+
+ memcpy(hash->Htable, gctx->aad_hash, 16);
+}
+
+static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
+ struct Hash_ctx *hash, u8 *assoc, unsigned int assoclen)
+{
+ __be32 counter = cpu_to_be32(1);
+
+ aes_p8_encrypt(hash->H, hash->H, rdkey);
+ set_subkey(hash->H);
+ gcm_init_htable(hash->Htable+32, hash->H);
+
+ *((__be32 *)(iv+12)) = counter;
+
+ gctx->Plen = 0;
+
+ /*
+ * Encrypt counter vector as iv tag and increment counter.
+ */
+ aes_p8_encrypt(iv, gctx->ivtag, rdkey);
+
+ counter = cpu_to_be32(2);
+ *((__be32 *)(iv+12)) = counter;
+ memcpy(gctx->iv, iv, 16);
+
+ gctx->aadLen = assoclen;
+ memset(gctx->aad_hash, 0, 16);
+ if (assoclen)
+ set_aad(gctx, hash, assoc, assoclen);
+}
+
+static void finish_tag(struct gcm_ctx *gctx, struct Hash_ctx *hash, int len)
+{
+ int i;
+ unsigned char len_ac[16 + PPC_ALIGN];
+ unsigned char *aclen = PTR_ALIGN((void *)len_ac, PPC_ALIGN);
+ __be64 clen = cpu_to_be64(len << 3);
+ __be64 alen = cpu_to_be64(gctx->aadLen << 3);
+
+ if (len == 0 && gctx->aadLen == 0) {
+ memcpy(hash->Htable, gctx->ivtag, 16);
+ return;
+ }
+
+ /*
+ * Len is in bits.
+ */
+ *((__be64 *)(aclen)) = alen;
+ *((__be64 *)(aclen+8)) = clen;
+
+ /*
+ * hash (AAD len and len)
+ */
+ gcm_ghash_p8(hash->Htable, hash->Htable+32, aclen, 16);
+
+ for (i = 0; i < 16; i++)
+ hash->Htable[i] ^= gctx->ivtag[i];
+}
+
+static int set_authsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 8:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ vsx_begin();
+ ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+ vsx_end();
+
+ return ret ? -EINVAL : 0;
+}
+
+static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
+{
+ struct crypto_tfm *tfm = req->base.tfm;
+ struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
+ u8 databuf[sizeof(struct gcm_ctx) + PPC_ALIGN];
+ struct gcm_ctx *gctx = PTR_ALIGN((void *)databuf, PPC_ALIGN);
+ u8 hashbuf[sizeof(struct Hash_ctx) + PPC_ALIGN];
+ struct Hash_ctx *hash = PTR_ALIGN((void *)hashbuf, PPC_ALIGN);
+ struct scatter_walk assoc_sg_walk;
+ struct skcipher_walk walk;
+ u8 *assocmem = NULL;
+ u8 *assoc;
+ unsigned int assoclen = req->assoclen;
+ unsigned int cryptlen = req->cryptlen;
+ unsigned char ivbuf[AES_BLOCK_SIZE+PPC_ALIGN];
+ unsigned char *iv = PTR_ALIGN((void *)ivbuf, PPC_ALIGN);
+ int ret;
+ unsigned long auth_tag_len = crypto_aead_authsize(__crypto_aead_cast(tfm));
+ u8 otag[16];
+ int total_processed = 0;
+
+ memset(databuf, 0, sizeof(databuf));
+ memset(hashbuf, 0, sizeof(hashbuf));
+ memset(ivbuf, 0, sizeof(ivbuf));
+ memcpy(iv, req->iv, GCM_IV_SIZE);
+
+ /* Linearize assoc, if not already linear */
+ if (req->src->length >= assoclen && req->src->length) {
+ scatterwalk_start(&assoc_sg_walk, req->src);
+ assoc = scatterwalk_map(&assoc_sg_walk);
+ } else {
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ /* assoc can be any length, so must be on heap */
+ assocmem = kmalloc(assoclen, flags);
+ if (unlikely(!assocmem))
+ return -ENOMEM;
+ assoc = assocmem;
+
+ scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
+ }
+
+ vsx_begin();
+ gcmp10_init(gctx, iv, (unsigned char *) &ctx->enc_key, hash, assoc, assoclen);
+ vsx_end();
+
+ if (!assocmem)
+ scatterwalk_unmap(assoc);
+ else
+ kfree(assocmem);
+
+ if (enc)
+ ret = skcipher_walk_aead_encrypt(&walk, req, false);
+ else
+ ret = skcipher_walk_aead_decrypt(&walk, req, false);
+ if (ret)
+ return ret;
+
+ while (walk.nbytes > 0 && ret == 0) {
+
+ vsx_begin();
+ if (enc)
+ aes_p10_gcm_encrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ walk.nbytes,
+ &ctx->enc_key, gctx->iv, hash->Htable);
+ else
+ aes_p10_gcm_decrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ walk.nbytes,
+ &ctx->enc_key, gctx->iv, hash->Htable);
+ vsx_end();
+
+ total_processed += walk.nbytes;
+ ret = skcipher_walk_done(&walk, 0);
+ }
+
+ if (ret)
+ return ret;
+
+ /* Finalize hash */
+ vsx_begin();
+ finish_tag(gctx, hash, total_processed);
+ vsx_end();
+
+ /* copy Xi to end of dst */
+ if (enc)
+ scatterwalk_map_and_copy(hash->Htable, req->dst, req->assoclen + cryptlen,
+ auth_tag_len, 1);
+ else {
+ scatterwalk_map_and_copy(otag, req->src,
+ req->assoclen + cryptlen - auth_tag_len,
+ auth_tag_len, 0);
+
+ if (crypto_memneq(otag, hash->Htable, auth_tag_len)) {
+ memzero_explicit(hash->Htable, 16);
+ return -EBADMSG;
+ }
+ }
+
+ return 0;
+}
+
+static int p10_aes_gcm_encrypt(struct aead_request *req)
+{
+ return p10_aes_gcm_crypt(req, 1);
+}
+
+static int p10_aes_gcm_decrypt(struct aead_request *req)
+{
+ return p10_aes_gcm_crypt(req, 0);
+}
+
+static struct aead_alg gcm_aes_alg = {
+ .ivsize = GCM_IV_SIZE,
+ .maxauthsize = 16,
+
+ .setauthsize = set_authsize,
+ .setkey = p10_aes_gcm_setkey,
+ .encrypt = p10_aes_gcm_encrypt,
+ .decrypt = p10_aes_gcm_decrypt,
+
+ .base.cra_name = "gcm(aes)",
+ .base.cra_driver_name = "aes_gcm_p10",
+ .base.cra_priority = 2100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx),
+ .base.cra_module = THIS_MODULE,
+};
+
+static int __init p10_init(void)
+{
+ return crypto_register_aead(&gcm_aes_alg);
+}
+
+static void __exit p10_exit(void)
+{
+ crypto_unregister_aead(&gcm_aes_alg);
+}
+
+module_cpu_feature_match(PPC_MODULE_FEATURE_P10, p10_init);
+module_exit(p10_exit);
diff --git a/arch/powerpc/crypto/aes-gcm-p10.S b/arch/powerpc/crypto/aes-gcm-p10.S
new file mode 100644
index 000000000000..a51f4b265308
--- /dev/null
+++ b/arch/powerpc/crypto/aes-gcm-p10.S
@@ -0,0 +1,1521 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+ #
+ # Accelerated AES-GCM stitched implementation for ppc64le.
+ #
+ # Copyright 2022- IBM Inc. All rights reserved
+ #
+ #===================================================================================
+ # Written by Danny Tsen <dtsen@linux.ibm.com>
+ #
+ # GHASH is based on the Karatsuba multiplication method.
+ #
+ # Xi xor X1
+ #
+ # X1 * H^4 + X2 * H^3 + x3 * H^2 + X4 * H =
+ # (X1.h * H4.h + xX.l * H4.l + X1 * H4) +
+ # (X2.h * H3.h + X2.l * H3.l + X2 * H3) +
+ # (X3.h * H2.h + X3.l * H2.l + X3 * H2) +
+ # (X4.h * H.h + X4.l * H.l + X4 * H)
+ #
+ # Xi = v0
+ # H Poly = v2
+ # Hash keys = v3 - v14
+ # ( H.l, H, H.h)
+ # ( H^2.l, H^2, H^2.h)
+ # ( H^3.l, H^3, H^3.h)
+ # ( H^4.l, H^4, H^4.h)
+ #
+ # v30 is IV
+ # v31 - counter 1
+ #
+ # AES used,
+ # vs0 - vs14 for round keys
+ # v15, v16, v17, v18, v19, v20, v21, v22 for 8 blocks (encrypted)
+ #
+ # This implementation uses stitched AES-GCM approach to improve overall performance.
+ # AES is implemented with 8x blocks and GHASH is using 2 4x blocks.
+ #
+ # ===================================================================================
+ #
+
+#include <asm/ppc_asm.h>
+#include <linux/linkage.h>
+
+.machine "any"
+.text
+
+ # 4x loops
+ # v15 - v18 - input states
+ # vs1 - vs9 - round keys
+ #
+.macro Loop_aes_middle4x
+ xxlor 19+32, 1, 1
+ xxlor 20+32, 2, 2
+ xxlor 21+32, 3, 3
+ xxlor 22+32, 4, 4
+
+ vcipher 15, 15, 19
+ vcipher 16, 16, 19
+ vcipher 17, 17, 19
+ vcipher 18, 18, 19
+
+ vcipher 15, 15, 20
+ vcipher 16, 16, 20
+ vcipher 17, 17, 20
+ vcipher 18, 18, 20
+
+ vcipher 15, 15, 21
+ vcipher 16, 16, 21
+ vcipher 17, 17, 21
+ vcipher 18, 18, 21
+
+ vcipher 15, 15, 22
+ vcipher 16, 16, 22
+ vcipher 17, 17, 22
+ vcipher 18, 18, 22
+
+ xxlor 19+32, 5, 5
+ xxlor 20+32, 6, 6
+ xxlor 21+32, 7, 7
+ xxlor 22+32, 8, 8
+
+ vcipher 15, 15, 19
+ vcipher 16, 16, 19
+ vcipher 17, 17, 19
+ vcipher 18, 18, 19
+
+ vcipher 15, 15, 20
+ vcipher 16, 16, 20
+ vcipher 17, 17, 20
+ vcipher 18, 18, 20
+
+ vcipher 15, 15, 21
+ vcipher 16, 16, 21
+ vcipher 17, 17, 21
+ vcipher 18, 18, 21
+
+ vcipher 15, 15, 22
+ vcipher 16, 16, 22
+ vcipher 17, 17, 22
+ vcipher 18, 18, 22
+
+ xxlor 23+32, 9, 9
+ vcipher 15, 15, 23
+ vcipher 16, 16, 23
+ vcipher 17, 17, 23
+ vcipher 18, 18, 23
+.endm
+
+ # 8x loops
+ # v15 - v22 - input states
+ # vs1 - vs9 - round keys
+ #
+.macro Loop_aes_middle8x
+ xxlor 23+32, 1, 1
+ xxlor 24+32, 2, 2
+ xxlor 25+32, 3, 3
+ xxlor 26+32, 4, 4
+
+ vcipher 15, 15, 23
+ vcipher 16, 16, 23
+ vcipher 17, 17, 23
+ vcipher 18, 18, 23
+ vcipher 19, 19, 23
+ vcipher 20, 20, 23
+ vcipher 21, 21, 23
+ vcipher 22, 22, 23
+
+ vcipher 15, 15, 24
+ vcipher 16, 16, 24
+ vcipher 17, 17, 24
+ vcipher 18, 18, 24
+ vcipher 19, 19, 24
+ vcipher 20, 20, 24
+ vcipher 21, 21, 24
+ vcipher 22, 22, 24
+
+ vcipher 15, 15, 25
+ vcipher 16, 16, 25
+ vcipher 17, 17, 25
+ vcipher 18, 18, 25
+ vcipher 19, 19, 25
+ vcipher 20, 20, 25
+ vcipher 21, 21, 25
+ vcipher 22, 22, 25
+
+ vcipher 15, 15, 26
+ vcipher 16, 16, 26
+ vcipher 17, 17, 26
+ vcipher 18, 18, 26
+ vcipher 19, 19, 26
+ vcipher 20, 20, 26
+ vcipher 21, 21, 26
+ vcipher 22, 22, 26
+
+ xxlor 23+32, 5, 5
+ xxlor 24+32, 6, 6
+ xxlor 25+32, 7, 7
+ xxlor 26+32, 8, 8
+
+ vcipher 15, 15, 23
+ vcipher 16, 16, 23
+ vcipher 17, 17, 23
+ vcipher 18, 18, 23
+ vcipher 19, 19, 23
+ vcipher 20, 20, 23
+ vcipher 21, 21, 23
+ vcipher 22, 22, 23
+
+ vcipher 15, 15, 24
+ vcipher 16, 16, 24
+ vcipher 17, 17, 24
+ vcipher 18, 18, 24
+ vcipher 19, 19, 24
+ vcipher 20, 20, 24
+ vcipher 21, 21, 24
+ vcipher 22, 22, 24
+
+ vcipher 15, 15, 25
+ vcipher 16, 16, 25
+ vcipher 17, 17, 25
+ vcipher 18, 18, 25
+ vcipher 19, 19, 25
+ vcipher 20, 20, 25
+ vcipher 21, 21, 25
+ vcipher 22, 22, 25
+
+ vcipher 15, 15, 26
+ vcipher 16, 16, 26
+ vcipher 17, 17, 26
+ vcipher 18, 18, 26
+ vcipher 19, 19, 26
+ vcipher 20, 20, 26
+ vcipher 21, 21, 26
+ vcipher 22, 22, 26
+
+ xxlor 23+32, 9, 9
+ vcipher 15, 15, 23
+ vcipher 16, 16, 23
+ vcipher 17, 17, 23
+ vcipher 18, 18, 23
+ vcipher 19, 19, 23
+ vcipher 20, 20, 23
+ vcipher 21, 21, 23
+ vcipher 22, 22, 23
+.endm
+
+.macro Loop_aes_middle_1x
+ xxlor 19+32, 1, 1
+ xxlor 20+32, 2, 2
+ xxlor 21+32, 3, 3
+ xxlor 22+32, 4, 4
+
+ vcipher 15, 15, 19
+ vcipher 15, 15, 20
+ vcipher 15, 15, 21
+ vcipher 15, 15, 22
+
+ xxlor 19+32, 5, 5
+ xxlor 20+32, 6, 6
+ xxlor 21+32, 7, 7
+ xxlor 22+32, 8, 8
+
+ vcipher 15, 15, 19
+ vcipher 15, 15, 20
+ vcipher 15, 15, 21
+ vcipher 15, 15, 22
+
+ xxlor 19+32, 9, 9
+ vcipher 15, 15, 19
+.endm
+
+ #
+ # Compute 4x hash values based on Karatsuba method.
+ #
+.macro ppc_aes_gcm_ghash
+ vxor 15, 15, 0
+
+ vpmsumd 23, 12, 15 # H4.L * X.L
+ vpmsumd 24, 9, 16
+ vpmsumd 25, 6, 17
+ vpmsumd 26, 3, 18
+
+ vxor 23, 23, 24
+ vxor 23, 23, 25
+ vxor 23, 23, 26 # L
+
+ vpmsumd 24, 13, 15 # H4.L * X.H + H4.H * X.L
+ vpmsumd 25, 10, 16 # H3.L * X1.H + H3.H * X1.L
+ vpmsumd 26, 7, 17
+ vpmsumd 27, 4, 18
+
+ vxor 24, 24, 25
+ vxor 24, 24, 26
+ vxor 24, 24, 27 # M
+
+ # sum hash and reduction with H Poly
+ vpmsumd 28, 23, 2 # reduction
+
+ vxor 29, 29, 29
+ vsldoi 26, 24, 29, 8 # mL
+ vsldoi 29, 29, 24, 8 # mH
+ vxor 23, 23, 26 # mL + L
+
+ vsldoi 23, 23, 23, 8 # swap
+ vxor 23, 23, 28
+
+ vpmsumd 24, 14, 15 # H4.H * X.H
+ vpmsumd 25, 11, 16
+ vpmsumd 26, 8, 17
+ vpmsumd 27, 5, 18
+
+ vxor 24, 24, 25
+ vxor 24, 24, 26
+ vxor 24, 24, 27
+
+ vxor 24, 24, 29
+
+ # sum hash and reduction with H Poly
+ vsldoi 27, 23, 23, 8 # swap
+ vpmsumd 23, 23, 2
+ vxor 27, 27, 24
+ vxor 23, 23, 27
+
+ xxlor 32, 23+32, 23+32 # update hash
+
+.endm
+
+ #
+ # Combine two 4x ghash
+ # v15 - v22 - input blocks
+ #
+.macro ppc_aes_gcm_ghash2_4x
+ # first 4x hash
+ vxor 15, 15, 0 # Xi + X
+
+ vpmsumd 23, 12, 15 # H4.L * X.L
+ vpmsumd 24, 9, 16
+ vpmsumd 25, 6, 17
+ vpmsumd 26, 3, 18
+
+ vxor 23, 23, 24
+ vxor 23, 23, 25
+ vxor 23, 23, 26 # L
+
+ vpmsumd 24, 13, 15 # H4.L * X.H + H4.H * X.L
+ vpmsumd 25, 10, 16 # H3.L * X1.H + H3.H * X1.L
+ vpmsumd 26, 7, 17
+ vpmsumd 27, 4, 18
+
+ vxor 24, 24, 25
+ vxor 24, 24, 26
+
+ # sum hash and reduction with H Poly
+ vpmsumd 28, 23, 2 # reduction
+
+ vxor 29, 29, 29
+
+ vxor 24, 24, 27 # M
+ vsldoi 26, 24, 29, 8 # mL
+ vsldoi 29, 29, 24, 8 # mH
+ vxor 23, 23, 26 # mL + L
+
+ vsldoi 23, 23, 23, 8 # swap
+ vxor 23, 23, 28
+
+ vpmsumd 24, 14, 15 # H4.H * X.H
+ vpmsumd 25, 11, 16
+ vpmsumd 26, 8, 17
+ vpmsumd 27, 5, 18
+
+ vxor 24, 24, 25
+ vxor 24, 24, 26
+ vxor 24, 24, 27 # H
+
+ vxor 24, 24, 29 # H + mH
+
+ # sum hash and reduction with H Poly
+ vsldoi 27, 23, 23, 8 # swap
+ vpmsumd 23, 23, 2
+ vxor 27, 27, 24
+ vxor 27, 23, 27 # 1st Xi
+
+ # 2nd 4x hash
+ vpmsumd 24, 9, 20
+ vpmsumd 25, 6, 21
+ vpmsumd 26, 3, 22
+ vxor 19, 19, 27 # Xi + X
+ vpmsumd 23, 12, 19 # H4.L * X.L
+
+ vxor 23, 23, 24
+ vxor 23, 23, 25
+ vxor 23, 23, 26 # L
+
+ vpmsumd 24, 13, 19 # H4.L * X.H + H4.H * X.L
+ vpmsumd 25, 10, 20 # H3.L * X1.H + H3.H * X1.L
+ vpmsumd 26, 7, 21
+ vpmsumd 27, 4, 22
+
+ vxor 24, 24, 25
+ vxor 24, 24, 26
+
+ # sum hash and reduction with H Poly
+ vpmsumd 28, 23, 2 # reduction
+
+ vxor 29, 29, 29
+
+ vxor 24, 24, 27 # M
+ vsldoi 26, 24, 29, 8 # mL
+ vsldoi 29, 29, 24, 8 # mH
+ vxor 23, 23, 26 # mL + L
+
+ vsldoi 23, 23, 23, 8 # swap
+ vxor 23, 23, 28
+
+ vpmsumd 24, 14, 19 # H4.H * X.H
+ vpmsumd 25, 11, 20
+ vpmsumd 26, 8, 21
+ vpmsumd 27, 5, 22
+
+ vxor 24, 24, 25
+ vxor 24, 24, 26
+ vxor 24, 24, 27 # H
+
+ vxor 24, 24, 29 # H + mH
+
+ # sum hash and reduction with H Poly
+ vsldoi 27, 23, 23, 8 # swap
+ vpmsumd 23, 23, 2
+ vxor 27, 27, 24
+ vxor 23, 23, 27
+
+ xxlor 32, 23+32, 23+32 # update hash
+
+.endm
+
+ #
+ # Compute update single hash
+ #
+.macro ppc_update_hash_1x
+ vxor 28, 28, 0
+
+ vxor 19, 19, 19
+
+ vpmsumd 22, 3, 28 # L
+ vpmsumd 23, 4, 28 # M
+ vpmsumd 24, 5, 28 # H
+
+ vpmsumd 27, 22, 2 # reduction
+
+ vsldoi 25, 23, 19, 8 # mL
+ vsldoi 26, 19, 23, 8 # mH
+ vxor 22, 22, 25 # LL + LL
+ vxor 24, 24, 26 # HH + HH
+
+ vsldoi 22, 22, 22, 8 # swap
+ vxor 22, 22, 27
+
+ vsldoi 20, 22, 22, 8 # swap
+ vpmsumd 22, 22, 2 # reduction
+ vxor 20, 20, 24
+ vxor 22, 22, 20
+
+ vmr 0, 22 # update hash
+
+.endm
+
+.macro SAVE_REGS
+ stdu 1,-640(1)
+ mflr 0
+
+ std 14,112(1)
+ std 15,120(1)
+ std 16,128(1)
+ std 17,136(1)
+ std 18,144(1)
+ std 19,152(1)
+ std 20,160(1)
+ std 21,168(1)
+ li 9, 256
+ stvx 20, 9, 1
+ addi 9, 9, 16
+ stvx 21, 9, 1
+ addi 9, 9, 16
+ stvx 22, 9, 1
+ addi 9, 9, 16
+ stvx 23, 9, 1
+ addi 9, 9, 16
+ stvx 24, 9, 1
+ addi 9, 9, 16
+ stvx 25, 9, 1
+ addi 9, 9, 16
+ stvx 26, 9, 1
+ addi 9, 9, 16
+ stvx 27, 9, 1
+ addi 9, 9, 16
+ stvx 28, 9, 1
+ addi 9, 9, 16
+ stvx 29, 9, 1
+ addi 9, 9, 16
+ stvx 30, 9, 1
+ addi 9, 9, 16
+ stvx 31, 9, 1
+ stxv 14, 464(1)
+ stxv 15, 480(1)
+ stxv 16, 496(1)
+ stxv 17, 512(1)
+ stxv 18, 528(1)
+ stxv 19, 544(1)
+ stxv 20, 560(1)
+ stxv 21, 576(1)
+ stxv 22, 592(1)
+ std 0, 656(1)
+.endm
+
+.macro RESTORE_REGS
+ lxv 14, 464(1)
+ lxv 15, 480(1)
+ lxv 16, 496(1)
+ lxv 17, 512(1)
+ lxv 18, 528(1)
+ lxv 19, 544(1)
+ lxv 20, 560(1)
+ lxv 21, 576(1)
+ lxv 22, 592(1)
+ li 9, 256
+ lvx 20, 9, 1
+ addi 9, 9, 16
+ lvx 21, 9, 1
+ addi 9, 9, 16
+ lvx 22, 9, 1
+ addi 9, 9, 16
+ lvx 23, 9, 1
+ addi 9, 9, 16
+ lvx 24, 9, 1
+ addi 9, 9, 16
+ lvx 25, 9, 1
+ addi 9, 9, 16
+ lvx 26, 9, 1
+ addi 9, 9, 16
+ lvx 27, 9, 1
+ addi 9, 9, 16
+ lvx 28, 9, 1
+ addi 9, 9, 16
+ lvx 29, 9, 1
+ addi 9, 9, 16
+ lvx 30, 9, 1
+ addi 9, 9, 16
+ lvx 31, 9, 1
+
+ ld 0, 656(1)
+ ld 14,112(1)
+ ld 15,120(1)
+ ld 16,128(1)
+ ld 17,136(1)
+ ld 18,144(1)
+ ld 19,152(1)
+ ld 20,160(1)
+ ld 21,168(1)
+
+ mtlr 0
+ addi 1, 1, 640
+.endm
+
+.macro LOAD_HASH_TABLE
+ # Load Xi
+ lxvb16x 32, 0, 8 # load Xi
+
+ # load Hash - h^4, h^3, h^2, h
+ li 10, 32
+ lxvd2x 2+32, 10, 8 # H Poli
+ li 10, 48
+ lxvd2x 3+32, 10, 8 # Hl
+ li 10, 64
+ lxvd2x 4+32, 10, 8 # H
+ li 10, 80
+ lxvd2x 5+32, 10, 8 # Hh
+
+ li 10, 96
+ lxvd2x 6+32, 10, 8 # H^2l
+ li 10, 112
+ lxvd2x 7+32, 10, 8 # H^2
+ li 10, 128
+ lxvd2x 8+32, 10, 8 # H^2h
+
+ li 10, 144
+ lxvd2x 9+32, 10, 8 # H^3l
+ li 10, 160
+ lxvd2x 10+32, 10, 8 # H^3
+ li 10, 176
+ lxvd2x 11+32, 10, 8 # H^3h
+
+ li 10, 192
+ lxvd2x 12+32, 10, 8 # H^4l
+ li 10, 208
+ lxvd2x 13+32, 10, 8 # H^4
+ li 10, 224
+ lxvd2x 14+32, 10, 8 # H^4h
+.endm
+
+ #
+ # aes_p10_gcm_encrypt (const void *inp, void *out, size_t len,
+ # const char *rk, unsigned char iv[16], void *Xip);
+ #
+ # r3 - inp
+ # r4 - out
+ # r5 - len
+ # r6 - AES round keys
+ # r7 - iv and other data
+ # r8 - Xi, HPoli, hash keys
+ #
+ # rounds is at offset 240 in rk
+ # Xi is at 0 in gcm_table (Xip).
+ #
+_GLOBAL(aes_p10_gcm_encrypt)
+.align 5
+
+ SAVE_REGS
+
+ LOAD_HASH_TABLE
+
+ # initialize ICB: GHASH( IV ), IV - r7
+ lxvb16x 30+32, 0, 7 # load IV - v30
+
+ mr 12, 5 # length
+ li 11, 0 # block index
+
+ # counter 1
+ vxor 31, 31, 31
+ vspltisb 22, 1
+ vsldoi 31, 31, 22,1 # counter 1
+
+ # load round key to VSR
+ lxv 0, 0(6)
+ lxv 1, 0x10(6)
+ lxv 2, 0x20(6)
+ lxv 3, 0x30(6)
+ lxv 4, 0x40(6)
+ lxv 5, 0x50(6)
+ lxv 6, 0x60(6)
+ lxv 7, 0x70(6)
+ lxv 8, 0x80(6)
+ lxv 9, 0x90(6)
+ lxv 10, 0xa0(6)
+
+ # load rounds - 10 (128), 12 (192), 14 (256)
+ lwz 9,240(6)
+
+ #
+ # vxor state, state, w # addroundkey
+ xxlor 32+29, 0, 0
+ vxor 15, 30, 29 # IV + round key - add round key 0
+
+ cmpdi 9, 10
+ beq Loop_aes_gcm_8x
+
+ # load 2 more round keys (v11, v12)
+ lxv 11, 0xb0(6)
+ lxv 12, 0xc0(6)
+
+ cmpdi 9, 12
+ beq Loop_aes_gcm_8x
+
+ # load 2 more round keys (v11, v12, v13, v14)
+ lxv 13, 0xd0(6)
+ lxv 14, 0xe0(6)
+ cmpdi 9, 14
+ beq Loop_aes_gcm_8x
+
+ b aes_gcm_out
+
+.align 5
+Loop_aes_gcm_8x:
+ mr 14, 3
+ mr 9, 4
+
+ #
+ # check partial block
+ #
+Continue_partial_check:
+ ld 15, 56(7)
+ cmpdi 15, 0
+ beq Continue
+ bgt Final_block
+ cmpdi 15, 16
+ blt Final_block
+
+Continue:
+ # n blcoks
+ li 10, 128
+ divdu 10, 12, 10 # n 128 bytes-blocks
+ cmpdi 10, 0
+ beq Loop_last_block
+
+ vaddudm 30, 30, 31 # IV + counter
+ vxor 16, 30, 29
+ vaddudm 30, 30, 31
+ vxor 17, 30, 29
+ vaddudm 30, 30, 31
+ vxor 18, 30, 29
+ vaddudm 30, 30, 31
+ vxor 19, 30, 29
+ vaddudm 30, 30, 31
+ vxor 20, 30, 29
+ vaddudm 30, 30, 31
+ vxor 21, 30, 29
+ vaddudm 30, 30, 31
+ vxor 22, 30, 29
+
+ mtctr 10
+
+ li 15, 16
+ li 16, 32
+ li 17, 48
+ li 18, 64
+ li 19, 80
+ li 20, 96
+ li 21, 112
+
+ lwz 10, 240(6)
+
+Loop_8x_block:
+
+ lxvb16x 15, 0, 14 # load block
+ lxvb16x 16, 15, 14 # load block
+ lxvb16x 17, 16, 14 # load block
+ lxvb16x 18, 17, 14 # load block
+ lxvb16x 19, 18, 14 # load block
+ lxvb16x 20, 19, 14 # load block
+ lxvb16x 21, 20, 14 # load block
+ lxvb16x 22, 21, 14 # load block
+ addi 14, 14, 128
+
+ Loop_aes_middle8x
+
+ xxlor 23+32, 10, 10
+
+ cmpdi 10, 10
+ beq Do_next_ghash
+
+ # 192 bits
+ xxlor 24+32, 11, 11
+
+ vcipher 15, 15, 23
+ vcipher 16, 16, 23
+ vcipher 17, 17, 23
+ vcipher 18, 18, 23
+ vcipher 19, 19, 23
+ vcipher 20, 20, 23
+ vcipher 21, 21, 23
+ vcipher 22, 22, 23
+
+ vcipher 15, 15, 24
+ vcipher 16, 16, 24
+ vcipher 17, 17, 24
+ vcipher 18, 18, 24
+ vcipher 19, 19, 24
+ vcipher 20, 20, 24
+ vcipher 21, 21, 24
+ vcipher 22, 22, 24
+
+ xxlor 23+32, 12, 12
+
+ cmpdi 10, 12
+ beq Do_next_ghash
+
+ # 256 bits
+ xxlor 24+32, 13, 13
+
+ vcipher 15, 15, 23
+ vcipher 16, 16, 23
+ vcipher 17, 17, 23
+ vcipher 18, 18, 23
+ vcipher 19, 19, 23
+ vcipher 20, 20, 23
+ vcipher 21, 21, 23
+ vcipher 22, 22, 23
+
+ vcipher 15, 15, 24
+ vcipher 16, 16, 24
+ vcipher 17, 17, 24
+ vcipher 18, 18, 24
+ vcipher 19, 19, 24
+ vcipher 20, 20, 24
+ vcipher 21, 21, 24
+ vcipher 22, 22, 24
+
+ xxlor 23+32, 14, 14
+
+ cmpdi 10, 14
+ beq Do_next_ghash
+ b aes_gcm_out
+
+Do_next_ghash:
+
+ #
+ # last round
+ vcipherlast 15, 15, 23
+ vcipherlast 16, 16, 23
+
+ xxlxor 47, 47, 15
+ stxvb16x 47, 0, 9 # store output
+ xxlxor 48, 48, 16
+ stxvb16x 48, 15, 9 # store output
+
+ vcipherlast 17, 17, 23
+ vcipherlast 18, 18, 23
+
+ xxlxor 49, 49, 17
+ stxvb16x 49, 16, 9 # store output
+ xxlxor 50, 50, 18
+ stxvb16x 50, 17, 9 # store output
+
+ vcipherlast 19, 19, 23
+ vcipherlast 20, 20, 23
+
+ xxlxor 51, 51, 19
+ stxvb16x 51, 18, 9 # store output
+ xxlxor 52, 52, 20
+ stxvb16x 52, 19, 9 # store output
+
+ vcipherlast 21, 21, 23
+ vcipherlast 22, 22, 23
+
+ xxlxor 53, 53, 21
+ stxvb16x 53, 20, 9 # store output
+ xxlxor 54, 54, 22
+ stxvb16x 54, 21, 9 # store output
+
+ addi 9, 9, 128
+
+ # ghash here
+ ppc_aes_gcm_ghash2_4x
+
+ xxlor 27+32, 0, 0
+ vaddudm 30, 30, 31 # IV + counter
+ vmr 29, 30
+ vxor 15, 30, 27 # add round key
+ vaddudm 30, 30, 31
+ vxor 16, 30, 27
+ vaddudm 30, 30, 31
+ vxor 17, 30, 27
+ vaddudm 30, 30, 31
+ vxor 18, 30, 27
+ vaddudm 30, 30, 31
+ vxor 19, 30, 27
+ vaddudm 30, 30, 31
+ vxor 20, 30, 27
+ vaddudm 30, 30, 31
+ vxor 21, 30, 27
+ vaddudm 30, 30, 31
+ vxor 22, 30, 27
+
+ addi 12, 12, -128
+ addi 11, 11, 128
+
+ bdnz Loop_8x_block
+
+ vmr 30, 29
+ stxvb16x 30+32, 0, 7 # update IV
+
+Loop_last_block:
+ cmpdi 12, 0
+ beq aes_gcm_out
+
+ # loop last few blocks
+ li 10, 16
+ divdu 10, 12, 10
+
+ mtctr 10
+
+ lwz 10, 240(6)
+
+ cmpdi 12, 16
+ blt Final_block
+
+Next_rem_block:
+ lxvb16x 15, 0, 14 # load block
+
+ Loop_aes_middle_1x
+
+ xxlor 23+32, 10, 10
+
+ cmpdi 10, 10
+ beq Do_next_1x
+
+ # 192 bits
+ xxlor 24+32, 11, 11
+
+ vcipher 15, 15, 23
+ vcipher 15, 15, 24
+
+ xxlor 23+32, 12, 12
+
+ cmpdi 10, 12
+ beq Do_next_1x
+
+ # 256 bits
+ xxlor 24+32, 13, 13
+
+ vcipher 15, 15, 23
+ vcipher 15, 15, 24
+
+ xxlor 23+32, 14, 14
+
+ cmpdi 10, 14
+ beq Do_next_1x
+
+Do_next_1x:
+ vcipherlast 15, 15, 23
+
+ xxlxor 47, 47, 15
+ stxvb16x 47, 0, 9 # store output
+ addi 14, 14, 16
+ addi 9, 9, 16
+
+ vmr 28, 15
+ ppc_update_hash_1x
+
+ addi 12, 12, -16
+ addi 11, 11, 16
+ xxlor 19+32, 0, 0
+ vaddudm 30, 30, 31 # IV + counter
+ vxor 15, 30, 19 # add round key
+
+ bdnz Next_rem_block
+
+ li 15, 0
+ std 15, 56(7) # clear partial?
+ stxvb16x 30+32, 0, 7 # update IV
+ cmpdi 12, 0
+ beq aes_gcm_out
+
+Final_block:
+ lwz 10, 240(6)
+ Loop_aes_middle_1x
+
+ xxlor 23+32, 10, 10
+
+ cmpdi 10, 10
+ beq Do_final_1x
+
+ # 192 bits
+ xxlor 24+32, 11, 11
+
+ vcipher 15, 15, 23
+ vcipher 15, 15, 24
+
+ xxlor 23+32, 12, 12
+
+ cmpdi 10, 12
+ beq Do_final_1x
+
+ # 256 bits
+ xxlor 24+32, 13, 13
+
+ vcipher 15, 15, 23
+ vcipher 15, 15, 24
+
+ xxlor 23+32, 14, 14
+
+ cmpdi 10, 14
+ beq Do_final_1x
+
+Do_final_1x:
+ vcipherlast 15, 15, 23
+
+ # check partial block
+ li 21, 0 # encrypt
+ ld 15, 56(7) # partial?
+ cmpdi 15, 0
+ beq Normal_block
+ bl Do_partial_block
+
+ cmpdi 12, 0
+ ble aes_gcm_out
+
+ b Continue_partial_check
+
+Normal_block:
+ lxvb16x 15, 0, 14 # load last block
+ xxlxor 47, 47, 15
+
+ # create partial block mask
+ li 15, 16
+ sub 15, 15, 12 # index to the mask
+
+ vspltisb 16, -1 # first 16 bytes - 0xffff...ff
+ vspltisb 17, 0 # second 16 bytes - 0x0000...00
+ li 10, 192
+ stvx 16, 10, 1
+ addi 10, 10, 16
+ stvx 17, 10, 1
+
+ addi 10, 1, 192
+ lxvb16x 16, 15, 10 # load partial block mask
+ xxland 47, 47, 16
+
+ vmr 28, 15
+ ppc_update_hash_1x
+
+ # * should store only the remaining bytes.
+ bl Write_partial_block
+
+ stxvb16x 30+32, 0, 7 # update IV
+ std 12, 56(7) # update partial?
+ li 16, 16
+
+ stxvb16x 32, 0, 8 # write out Xi
+ stxvb16x 32, 16, 8 # write out Xi
+ b aes_gcm_out
+
+ #
+ # Compute data mask
+ #
+.macro GEN_MASK _mask _start _end
+ vspltisb 16, -1 # first 16 bytes - 0xffff...ff
+ vspltisb 17, 0 # second 16 bytes - 0x0000...00
+ li 10, 192
+ stxvb16x 17+32, 10, 1
+ add 10, 10, \_start
+ stxvb16x 16+32, 10, 1
+ add 10, 10, \_end
+ stxvb16x 17+32, 10, 1
+
+ addi 10, 1, 192
+ lxvb16x \_mask, 0, 10 # load partial block mask
+.endm
+
+ #
+ # Handle multiple partial blocks for encrypt and decrypt
+ # operations.
+ #
+SYM_FUNC_START_LOCAL(Do_partial_block)
+ add 17, 15, 5
+ cmpdi 17, 16
+ bgt Big_block
+ GEN_MASK 18, 15, 5
+ b _Partial
+SYM_FUNC_END(Do_partial_block)
+Big_block:
+ li 16, 16
+ GEN_MASK 18, 15, 16
+
+_Partial:
+ lxvb16x 17+32, 0, 14 # load last block
+ sldi 16, 15, 3
+ mtvsrdd 32+16, 0, 16
+ vsro 17, 17, 16
+ xxlxor 47, 47, 17+32
+ xxland 47, 47, 18
+
+ vxor 0, 0, 0 # clear Xi
+ vmr 28, 15
+
+ cmpdi 21, 0 # encrypt/decrypt ops?
+ beq Skip_decrypt
+ xxland 32+28, 32+17, 18
+
+Skip_decrypt:
+
+ ppc_update_hash_1x
+
+ li 16, 16
+ lxvb16x 32+29, 16, 8
+ vxor 0, 0, 29
+ stxvb16x 32, 0, 8 # save Xi
+ stxvb16x 32, 16, 8 # save Xi
+
+ # store partial block
+ # loop the rest of the stream if any
+ sldi 16, 15, 3
+ mtvsrdd 32+16, 0, 16
+ vslo 15, 15, 16
+ #stxvb16x 15+32, 0, 9 # last block
+
+ li 16, 16
+ sub 17, 16, 15 # 16 - partial
+
+ add 16, 15, 5
+ cmpdi 16, 16
+ bgt Larger_16
+ mr 17, 5
+Larger_16:
+
+ # write partial
+ li 10, 192
+ stxvb16x 15+32, 10, 1 # save current block
+
+ addi 10, 9, -1
+ addi 16, 1, 191
+ mtctr 17 # move partial byte count
+
+Write_last_partial:
+ lbzu 18, 1(16)
+ stbu 18, 1(10)
+ bdnz Write_last_partial
+ # Complete loop partial
+
+ add 14, 14, 17
+ add 9, 9, 17
+ sub 12, 12, 17
+ add 11, 11, 17
+
+ add 15, 15, 5
+ cmpdi 15, 16
+ blt Save_partial
+
+ vaddudm 30, 30, 31
+ stxvb16x 30+32, 0, 7 # update IV
+ xxlor 32+29, 0, 0
+ vxor 15, 30, 29 # IV + round key - add round key 0
+ li 15, 0
+ std 15, 56(7) # partial done - clear
+ b Partial_done
+Save_partial:
+ std 15, 56(7) # partial
+
+Partial_done:
+ blr
+
+ #
+ # Write partial block
+ # r9 - output
+ # r12 - remaining bytes
+ # v15 - partial input data
+ #
+SYM_FUNC_START_LOCAL(Write_partial_block)
+ li 10, 192
+ stxvb16x 15+32, 10, 1 # last block
+
+ addi 10, 9, -1
+ addi 16, 1, 191
+
+ mtctr 12 # remaining bytes
+ li 15, 0
+
+Write_last_byte:
+ lbzu 14, 1(16)
+ stbu 14, 1(10)
+ bdnz Write_last_byte
+ blr
+SYM_FUNC_END(Write_partial_block)
+
+aes_gcm_out:
+ # out = state
+ stxvb16x 32, 0, 8 # write out Xi
+ add 3, 11, 12 # return count
+
+ RESTORE_REGS
+ blr
+
+ #
+ # 8x Decrypt
+ #
+_GLOBAL(aes_p10_gcm_decrypt)
+.align 5
+
+ SAVE_REGS
+
+ LOAD_HASH_TABLE
+
+ # initialize ICB: GHASH( IV ), IV - r7
+ lxvb16x 30+32, 0, 7 # load IV - v30
+
+ mr 12, 5 # length
+ li 11, 0 # block index
+
+ # counter 1
+ vxor 31, 31, 31
+ vspltisb 22, 1
+ vsldoi 31, 31, 22,1 # counter 1
+
+ # load round key to VSR
+ lxv 0, 0(6)
+ lxv 1, 0x10(6)
+ lxv 2, 0x20(6)
+ lxv 3, 0x30(6)
+ lxv 4, 0x40(6)
+ lxv 5, 0x50(6)
+ lxv 6, 0x60(6)
+ lxv 7, 0x70(6)
+ lxv 8, 0x80(6)
+ lxv 9, 0x90(6)
+ lxv 10, 0xa0(6)
+
+ # load rounds - 10 (128), 12 (192), 14 (256)
+ lwz 9,240(6)
+
+ #
+ # vxor state, state, w # addroundkey
+ xxlor 32+29, 0, 0
+ vxor 15, 30, 29 # IV + round key - add round key 0
+
+ cmpdi 9, 10
+ beq Loop_aes_gcm_8x_dec
+
+ # load 2 more round keys (v11, v12)
+ lxv 11, 0xb0(6)
+ lxv 12, 0xc0(6)
+
+ cmpdi 9, 12
+ beq Loop_aes_gcm_8x_dec
+
+ # load 2 more round keys (v11, v12, v13, v14)
+ lxv 13, 0xd0(6)
+ lxv 14, 0xe0(6)
+ cmpdi 9, 14
+ beq Loop_aes_gcm_8x_dec
+
+ b aes_gcm_out
+
+.align 5
+Loop_aes_gcm_8x_dec:
+ mr 14, 3
+ mr 9, 4
+
+ #
+ # check partial block
+ #
+Continue_partial_check_dec:
+ ld 15, 56(7)
+ cmpdi 15, 0
+ beq Continue_dec
+ bgt Final_block_dec
+ cmpdi 15, 16
+ blt Final_block_dec
+
+Continue_dec:
+ # n blcoks
+ li 10, 128
+ divdu 10, 12, 10 # n 128 bytes-blocks
+ cmpdi 10, 0
+ beq Loop_last_block_dec
+
+ vaddudm 30, 30, 31 # IV + counter
+ vxor 16, 30, 29
+ vaddudm 30, 30, 31
+ vxor 17, 30, 29
+ vaddudm 30, 30, 31
+ vxor 18, 30, 29
+ vaddudm 30, 30, 31
+ vxor 19, 30, 29
+ vaddudm 30, 30, 31
+ vxor 20, 30, 29
+ vaddudm 30, 30, 31
+ vxor 21, 30, 29
+ vaddudm 30, 30, 31
+ vxor 22, 30, 29
+
+ mtctr 10
+
+ li 15, 16
+ li 16, 32
+ li 17, 48
+ li 18, 64
+ li 19, 80
+ li 20, 96
+ li 21, 112
+
+ lwz 10, 240(6)
+
+Loop_8x_block_dec:
+
+ lxvb16x 15, 0, 14 # load block
+ lxvb16x 16, 15, 14 # load block
+ lxvb16x 17, 16, 14 # load block
+ lxvb16x 18, 17, 14 # load block
+ lxvb16x 19, 18, 14 # load block
+ lxvb16x 20, 19, 14 # load block
+ lxvb16x 21, 20, 14 # load block
+ lxvb16x 22, 21, 14 # load block
+ addi 14, 14, 128
+
+ Loop_aes_middle8x
+
+ xxlor 23+32, 10, 10
+
+ cmpdi 10, 10
+ beq Do_next_ghash_dec
+
+ # 192 bits
+ xxlor 24+32, 11, 11
+
+ vcipher 15, 15, 23
+ vcipher 16, 16, 23
+ vcipher 17, 17, 23
+ vcipher 18, 18, 23
+ vcipher 19, 19, 23
+ vcipher 20, 20, 23
+ vcipher 21, 21, 23
+ vcipher 22, 22, 23
+
+ vcipher 15, 15, 24
+ vcipher 16, 16, 24
+ vcipher 17, 17, 24
+ vcipher 18, 18, 24
+ vcipher 19, 19, 24
+ vcipher 20, 20, 24
+ vcipher 21, 21, 24
+ vcipher 22, 22, 24
+
+ xxlor 23+32, 12, 12
+
+ cmpdi 10, 12
+ beq Do_next_ghash_dec
+
+ # 256 bits
+ xxlor 24+32, 13, 13
+
+ vcipher 15, 15, 23
+ vcipher 16, 16, 23
+ vcipher 17, 17, 23
+ vcipher 18, 18, 23
+ vcipher 19, 19, 23
+ vcipher 20, 20, 23
+ vcipher 21, 21, 23
+ vcipher 22, 22, 23
+
+ vcipher 15, 15, 24
+ vcipher 16, 16, 24
+ vcipher 17, 17, 24
+ vcipher 18, 18, 24
+ vcipher 19, 19, 24
+ vcipher 20, 20, 24
+ vcipher 21, 21, 24
+ vcipher 22, 22, 24
+
+ xxlor 23+32, 14, 14
+
+ cmpdi 10, 14
+ beq Do_next_ghash_dec
+ b aes_gcm_out
+
+Do_next_ghash_dec:
+
+ #
+ # last round
+ vcipherlast 15, 15, 23
+ vcipherlast 16, 16, 23
+
+ xxlxor 47, 47, 15
+ stxvb16x 47, 0, 9 # store output
+ xxlxor 48, 48, 16
+ stxvb16x 48, 15, 9 # store output
+
+ vcipherlast 17, 17, 23
+ vcipherlast 18, 18, 23
+
+ xxlxor 49, 49, 17
+ stxvb16x 49, 16, 9 # store output
+ xxlxor 50, 50, 18
+ stxvb16x 50, 17, 9 # store output
+
+ vcipherlast 19, 19, 23
+ vcipherlast 20, 20, 23
+
+ xxlxor 51, 51, 19
+ stxvb16x 51, 18, 9 # store output
+ xxlxor 52, 52, 20
+ stxvb16x 52, 19, 9 # store output
+
+ vcipherlast 21, 21, 23
+ vcipherlast 22, 22, 23
+
+ xxlxor 53, 53, 21
+ stxvb16x 53, 20, 9 # store output
+ xxlxor 54, 54, 22
+ stxvb16x 54, 21, 9 # store output
+
+ addi 9, 9, 128
+
+ xxlor 15+32, 15, 15
+ xxlor 16+32, 16, 16
+ xxlor 17+32, 17, 17
+ xxlor 18+32, 18, 18
+ xxlor 19+32, 19, 19
+ xxlor 20+32, 20, 20
+ xxlor 21+32, 21, 21
+ xxlor 22+32, 22, 22
+
+ # ghash here
+ ppc_aes_gcm_ghash2_4x
+
+ xxlor 27+32, 0, 0
+ vaddudm 30, 30, 31 # IV + counter
+ vmr 29, 30
+ vxor 15, 30, 27 # add round key
+ vaddudm 30, 30, 31
+ vxor 16, 30, 27
+ vaddudm 30, 30, 31
+ vxor 17, 30, 27
+ vaddudm 30, 30, 31
+ vxor 18, 30, 27
+ vaddudm 30, 30, 31
+ vxor 19, 30, 27
+ vaddudm 30, 30, 31
+ vxor 20, 30, 27
+ vaddudm 30, 30, 31
+ vxor 21, 30, 27
+ vaddudm 30, 30, 31
+ vxor 22, 30, 27
+
+ addi 12, 12, -128
+ addi 11, 11, 128
+
+ bdnz Loop_8x_block_dec
+
+ vmr 30, 29
+ stxvb16x 30+32, 0, 7 # update IV
+
+Loop_last_block_dec:
+ cmpdi 12, 0
+ beq aes_gcm_out
+
+ # loop last few blocks
+ li 10, 16
+ divdu 10, 12, 10
+
+ mtctr 10
+
+ lwz 10, 240(6)
+
+ cmpdi 12, 16
+ blt Final_block_dec
+
+Next_rem_block_dec:
+ lxvb16x 15, 0, 14 # load block
+
+ Loop_aes_middle_1x
+
+ xxlor 23+32, 10, 10
+
+ cmpdi 10, 10
+ beq Do_next_1x_dec
+
+ # 192 bits
+ xxlor 24+32, 11, 11
+
+ vcipher 15, 15, 23
+ vcipher 15, 15, 24
+
+ xxlor 23+32, 12, 12
+
+ cmpdi 10, 12
+ beq Do_next_1x_dec
+
+ # 256 bits
+ xxlor 24+32, 13, 13
+
+ vcipher 15, 15, 23
+ vcipher 15, 15, 24
+
+ xxlor 23+32, 14, 14
+
+ cmpdi 10, 14
+ beq Do_next_1x_dec
+
+Do_next_1x_dec:
+ vcipherlast 15, 15, 23
+
+ xxlxor 47, 47, 15
+ stxvb16x 47, 0, 9 # store output
+ addi 14, 14, 16
+ addi 9, 9, 16
+
+ xxlor 28+32, 15, 15
+ #vmr 28, 15
+ ppc_update_hash_1x
+
+ addi 12, 12, -16
+ addi 11, 11, 16
+ xxlor 19+32, 0, 0
+ vaddudm 30, 30, 31 # IV + counter
+ vxor 15, 30, 19 # add round key
+
+ bdnz Next_rem_block_dec
+
+ li 15, 0
+ std 15, 56(7) # clear partial?
+ stxvb16x 30+32, 0, 7 # update IV
+ cmpdi 12, 0
+ beq aes_gcm_out
+
+Final_block_dec:
+ lwz 10, 240(6)
+ Loop_aes_middle_1x
+
+ xxlor 23+32, 10, 10
+
+ cmpdi 10, 10
+ beq Do_final_1x_dec
+
+ # 192 bits
+ xxlor 24+32, 11, 11
+
+ vcipher 15, 15, 23
+ vcipher 15, 15, 24
+
+ xxlor 23+32, 12, 12
+
+ cmpdi 10, 12
+ beq Do_final_1x_dec
+
+ # 256 bits
+ xxlor 24+32, 13, 13
+
+ vcipher 15, 15, 23
+ vcipher 15, 15, 24
+
+ xxlor 23+32, 14, 14
+
+ cmpdi 10, 14
+ beq Do_final_1x_dec
+
+Do_final_1x_dec:
+ vcipherlast 15, 15, 23
+
+ # check partial block
+ li 21, 1 # decrypt
+ ld 15, 56(7) # partial?
+ cmpdi 15, 0
+ beq Normal_block_dec
+ bl Do_partial_block
+ cmpdi 12, 0
+ ble aes_gcm_out
+
+ b Continue_partial_check_dec
+
+Normal_block_dec:
+ lxvb16x 15, 0, 14 # load last block
+ xxlxor 47, 47, 15
+
+ # create partial block mask
+ li 15, 16
+ sub 15, 15, 12 # index to the mask
+
+ vspltisb 16, -1 # first 16 bytes - 0xffff...ff
+ vspltisb 17, 0 # second 16 bytes - 0x0000...00
+ li 10, 192
+ stvx 16, 10, 1
+ addi 10, 10, 16
+ stvx 17, 10, 1
+
+ addi 10, 1, 192
+ lxvb16x 16, 15, 10 # load partial block mask
+ xxland 47, 47, 16
+
+ xxland 32+28, 15, 16
+ #vmr 28, 15
+ ppc_update_hash_1x
+
+ # * should store only the remaining bytes.
+ bl Write_partial_block
+
+ stxvb16x 30+32, 0, 7 # update IV
+ std 12, 56(7) # update partial?
+ li 16, 16
+
+ stxvb16x 32, 0, 8 # write out Xi
+ stxvb16x 32, 16, 8 # write out Xi
+ b aes_gcm_out
diff --git a/arch/powerpc/crypto/aesp8-ppc.pl b/arch/powerpc/crypto/aesp8-ppc.pl
new file mode 100644
index 000000000000..1f22aec27d79
--- /dev/null
+++ b/arch/powerpc/crypto/aesp8-ppc.pl
@@ -0,0 +1,585 @@
+#! /usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+
+# This code is taken from CRYPTOGAMs[1] and is included here using the option
+# in the license to distribute the code under the GPL. Therefore this program
+# is free software; you can redistribute it and/or modify it under the terms of
+# the GNU General Public License version 2 as published by the Free Software
+# Foundation.
+#
+# [1] https://www.openssl.org/~appro/cryptogams/
+
+# Copyright (c) 2006-2017, CRYPTOGAMS by <appro@openssl.org>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain copyright notices,
+# this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# * Neither the name of the CRYPTOGAMS nor the names of its
+# copyright holder and contributors may be used to endorse or
+# promote products derived from this software without specific
+# prior written permission.
+#
+# ALTERNATIVELY, provided that this notice is retained in full, this
+# product may be distributed under the terms of the GNU General Public
+# License (GPL), in which case the provisions of the GPL apply INSTEAD OF
+# those given above.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see https://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for AES instructions as per PowerISA
+# specification version 2.07, first implemented by POWER8 processor.
+# The module is endian-agnostic in sense that it supports both big-
+# and little-endian cases. Data alignment in parallelizable modes is
+# handled with VSX loads and stores, which implies MSR.VSX flag being
+# set. It should also be noted that ISA specification doesn't prohibit
+# alignment exceptions for these instructions on page boundaries.
+# Initially alignment was handled in pure AltiVec/VMX way [when data
+# is aligned programmatically, which in turn guarantees exception-
+# free execution], but it turned to hamper performance when vcipher
+# instructions are interleaved. It's reckoned that eventual
+# misalignment penalties at page boundaries are in average lower
+# than additional overhead in pure AltiVec approach.
+#
+# May 2016
+#
+# Add XTS subroutine, 9x on little- and 12x improvement on big-endian
+# systems were measured.
+#
+######################################################################
+# Current large-block performance in cycles per byte processed with
+# 128-bit key (less is better).
+#
+# CBC en-/decrypt CTR XTS
+# POWER8[le] 3.96/0.72 0.74 1.1
+# POWER8[be] 3.75/0.65 0.66 1.0
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+ $SIZE_T =8;
+ $LRSAVE =2*$SIZE_T;
+ $STU ="stdu";
+ $POP ="ld";
+ $PUSH ="std";
+ $UCMP ="cmpld";
+ $SHL ="sldi";
+} elsif ($flavour =~ /32/) {
+ $SIZE_T =4;
+ $LRSAVE =$SIZE_T;
+ $STU ="stwu";
+ $POP ="lwz";
+ $PUSH ="stw";
+ $UCMP ="cmplw";
+ $SHL ="slwi";
+} else { die "nonsense $flavour"; }
+
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=8*$SIZE_T;
+$prefix="aes_p8";
+
+$sp="r1";
+$vrsave="r12";
+
+#########################################################################
+{{{ # Key setup procedures #
+my ($inp,$bits,$out,$ptr,$cnt,$rounds)=map("r$_",(3..8));
+my ($zero,$in0,$in1,$key,$rcon,$mask,$tmp)=map("v$_",(0..6));
+my ($stage,$outperm,$outmask,$outhead,$outtail)=map("v$_",(7..11));
+
+$code.=<<___;
+.machine "any"
+
+.text
+
+.align 7
+rcon:
+.long 0x01000000, 0x01000000, 0x01000000, 0x01000000 ?rev
+.long 0x1b000000, 0x1b000000, 0x1b000000, 0x1b000000 ?rev
+.long 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c ?rev
+.long 0,0,0,0 ?asis
+Lconsts:
+ mflr r0
+ bcl 20,31,\$+4
+ mflr $ptr #vvvvv "distance between . and rcon
+ addi $ptr,$ptr,-0x48
+ mtlr r0
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+.asciz "AES for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
+
+.globl .${prefix}_set_encrypt_key
+Lset_encrypt_key:
+ mflr r11
+ $PUSH r11,$LRSAVE($sp)
+
+ li $ptr,-1
+ ${UCMP}i $inp,0
+ beq- Lenc_key_abort # if ($inp==0) return -1;
+ ${UCMP}i $out,0
+ beq- Lenc_key_abort # if ($out==0) return -1;
+ li $ptr,-2
+ cmpwi $bits,128
+ blt- Lenc_key_abort
+ cmpwi $bits,256
+ bgt- Lenc_key_abort
+ andi. r0,$bits,0x3f
+ bne- Lenc_key_abort
+
+ lis r0,0xfff0
+ mfspr $vrsave,256
+ mtspr 256,r0
+
+ bl Lconsts
+ mtlr r11
+
+ neg r9,$inp
+ lvx $in0,0,$inp
+ addi $inp,$inp,15 # 15 is not typo
+ lvsr $key,0,r9 # borrow $key
+ li r8,0x20
+ cmpwi $bits,192
+ lvx $in1,0,$inp
+ le?vspltisb $mask,0x0f # borrow $mask
+ lvx $rcon,0,$ptr
+ le?vxor $key,$key,$mask # adjust for byte swap
+ lvx $mask,r8,$ptr
+ addi $ptr,$ptr,0x10
+ vperm $in0,$in0,$in1,$key # align [and byte swap in LE]
+ li $cnt,8
+ vxor $zero,$zero,$zero
+ mtctr $cnt
+
+ ?lvsr $outperm,0,$out
+ vspltisb $outmask,-1
+ lvx $outhead,0,$out
+ ?vperm $outmask,$zero,$outmask,$outperm
+
+ blt Loop128
+ addi $inp,$inp,8
+ beq L192
+ addi $inp,$inp,8
+ b L256
+
+.align 4
+Loop128:
+ vperm $key,$in0,$in0,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in0,$in0,$key
+ bdnz Loop128
+
+ lvx $rcon,0,$ptr # last two round keys
+
+ vperm $key,$in0,$in0,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in0,$in0,$key
+
+ vperm $key,$in0,$in0,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vxor $in0,$in0,$key
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+
+ addi $inp,$out,15 # 15 is not typo
+ addi $out,$out,0x50
+
+ li $rounds,10
+ b Ldone
+
+.align 4
+L192:
+ lvx $tmp,0,$inp
+ li $cnt,4
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+ addi $out,$out,16
+ vperm $in1,$in1,$tmp,$key # align [and byte swap in LE]
+ vspltisb $key,8 # borrow $key
+ mtctr $cnt
+ vsububm $mask,$mask,$key # adjust the mask
+
+Loop192:
+ vperm $key,$in1,$in1,$mask # roate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vcipherlast $key,$key,$rcon
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+
+ vsldoi $stage,$zero,$in1,8
+ vspltw $tmp,$in0,3
+ vxor $tmp,$tmp,$in1
+ vsldoi $in1,$zero,$in1,12 # >>32
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in1,$in1,$tmp
+ vxor $in0,$in0,$key
+ vxor $in1,$in1,$key
+ vsldoi $stage,$stage,$in0,8
+
+ vperm $key,$in1,$in1,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$stage,$stage,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vsldoi $stage,$in0,$in1,8
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vperm $outtail,$stage,$stage,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vspltw $tmp,$in0,3
+ vxor $tmp,$tmp,$in1
+ vsldoi $in1,$zero,$in1,12 # >>32
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in1,$in1,$tmp
+ vxor $in0,$in0,$key
+ vxor $in1,$in1,$key
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+ addi $inp,$out,15 # 15 is not typo
+ addi $out,$out,16
+ bdnz Loop192
+
+ li $rounds,12
+ addi $out,$out,0x20
+ b Ldone
+
+.align 4
+L256:
+ lvx $tmp,0,$inp
+ li $cnt,7
+ li $rounds,14
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+ addi $out,$out,16
+ vperm $in1,$in1,$tmp,$key # align [and byte swap in LE]
+ mtctr $cnt
+
+Loop256:
+ vperm $key,$in1,$in1,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$in1,$in1,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in0,$in0,$key
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+ addi $inp,$out,15 # 15 is not typo
+ addi $out,$out,16
+ bdz Ldone
+
+ vspltw $key,$in0,3 # just splat
+ vsldoi $tmp,$zero,$in1,12 # >>32
+ vsbox $key,$key
+
+ vxor $in1,$in1,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in1,$in1,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in1,$in1,$tmp
+
+ vxor $in1,$in1,$key
+ b Loop256
+
+.align 4
+Ldone:
+ lvx $in1,0,$inp # redundant in aligned case
+ vsel $in1,$outhead,$in1,$outmask
+ stvx $in1,0,$inp
+ li $ptr,0
+ mtspr 256,$vrsave
+ stw $rounds,0($out)
+
+Lenc_key_abort:
+ mr r3,$ptr
+ blr
+ .long 0
+ .byte 0,12,0x14,1,0,0,3,0
+ .long 0
+.size .${prefix}_set_encrypt_key,.-.${prefix}_set_encrypt_key
+
+.globl .${prefix}_set_decrypt_key
+ $STU $sp,-$FRAME($sp)
+ mflr r10
+ $PUSH r10,$FRAME+$LRSAVE($sp)
+ bl Lset_encrypt_key
+ mtlr r10
+
+ cmpwi r3,0
+ bne- Ldec_key_abort
+
+ slwi $cnt,$rounds,4
+ subi $inp,$out,240 # first round key
+ srwi $rounds,$rounds,1
+ add $out,$inp,$cnt # last round key
+ mtctr $rounds
+
+Ldeckey:
+ lwz r0, 0($inp)
+ lwz r6, 4($inp)
+ lwz r7, 8($inp)
+ lwz r8, 12($inp)
+ addi $inp,$inp,16
+ lwz r9, 0($out)
+ lwz r10,4($out)
+ lwz r11,8($out)
+ lwz r12,12($out)
+ stw r0, 0($out)
+ stw r6, 4($out)
+ stw r7, 8($out)
+ stw r8, 12($out)
+ subi $out,$out,16
+ stw r9, -16($inp)
+ stw r10,-12($inp)
+ stw r11,-8($inp)
+ stw r12,-4($inp)
+ bdnz Ldeckey
+
+ xor r3,r3,r3 # return value
+Ldec_key_abort:
+ addi $sp,$sp,$FRAME
+ blr
+ .long 0
+ .byte 0,12,4,1,0x80,0,3,0
+ .long 0
+.size .${prefix}_set_decrypt_key,.-.${prefix}_set_decrypt_key
+___
+}}}
+#########################################################################
+{{{ # Single block en- and decrypt procedures #
+sub gen_block () {
+my $dir = shift;
+my $n = $dir eq "de" ? "n" : "";
+my ($inp,$out,$key,$rounds,$idx)=map("r$_",(3..7));
+
+$code.=<<___;
+.globl .${prefix}_${dir}crypt
+ lwz $rounds,240($key)
+ lis r0,0xfc00
+ mfspr $vrsave,256
+ li $idx,15 # 15 is not typo
+ mtspr 256,r0
+
+ lvx v0,0,$inp
+ neg r11,$out
+ lvx v1,$idx,$inp
+ lvsl v2,0,$inp # inpperm
+ le?vspltisb v4,0x0f
+ ?lvsl v3,0,r11 # outperm
+ le?vxor v2,v2,v4
+ li $idx,16
+ vperm v0,v0,v1,v2 # align [and byte swap in LE]
+ lvx v1,0,$key
+ ?lvsl v5,0,$key # keyperm
+ srwi $rounds,$rounds,1
+ lvx v2,$idx,$key
+ addi $idx,$idx,16
+ subi $rounds,$rounds,1
+ ?vperm v1,v1,v2,v5 # align round key
+
+ vxor v0,v0,v1
+ lvx v1,$idx,$key
+ addi $idx,$idx,16
+ mtctr $rounds
+
+Loop_${dir}c:
+ ?vperm v2,v2,v1,v5
+ v${n}cipher v0,v0,v2
+ lvx v2,$idx,$key
+ addi $idx,$idx,16
+ ?vperm v1,v1,v2,v5
+ v${n}cipher v0,v0,v1
+ lvx v1,$idx,$key
+ addi $idx,$idx,16
+ bdnz Loop_${dir}c
+
+ ?vperm v2,v2,v1,v5
+ v${n}cipher v0,v0,v2
+ lvx v2,$idx,$key
+ ?vperm v1,v1,v2,v5
+ v${n}cipherlast v0,v0,v1
+
+ vspltisb v2,-1
+ vxor v1,v1,v1
+ li $idx,15 # 15 is not typo
+ ?vperm v2,v1,v2,v3 # outmask
+ le?vxor v3,v3,v4
+ lvx v1,0,$out # outhead
+ vperm v0,v0,v0,v3 # rotate [and byte swap in LE]
+ vsel v1,v1,v0,v2
+ lvx v4,$idx,$out
+ stvx v1,0,$out
+ vsel v0,v0,v4,v2
+ stvx v0,$idx,$out
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size .${prefix}_${dir}crypt,.-.${prefix}_${dir}crypt
+___
+}
+&gen_block("en");
+&gen_block("de");
+}}}
+
+my $consts=1;
+foreach(split("\n",$code)) {
+ s/\`([^\`]*)\`/eval($1)/geo;
+
+ # constants table endian-specific conversion
+ if ($consts && m/\.(long|byte)\s+(.+)\s+(\?[a-z]*)$/o) {
+ my $conv=$3;
+ my @bytes=();
+
+ # convert to endian-agnostic format
+ if ($1 eq "long") {
+ foreach (split(/,\s*/,$2)) {
+ my $l = /^0/?oct:int;
+ push @bytes,($l>>24)&0xff,($l>>16)&0xff,($l>>8)&0xff,$l&0xff;
+ }
+ } else {
+ @bytes = map(/^0/?oct:int,split(/,\s*/,$2));
+ }
+
+ # little-endian conversion
+ if ($flavour =~ /le$/o) {
+ SWITCH: for($conv) {
+ /\?inv/ && do { @bytes=map($_^0xf,@bytes); last; };
+ /\?rev/ && do { @bytes=reverse(@bytes); last; };
+ }
+ }
+
+ #emit
+ print ".byte\t",join(',',map (sprintf("0x%02x",$_),@bytes)),"\n";
+ next;
+ }
+ $consts=0 if (m/Lconsts:/o); # end of table
+
+ # instructions prefixed with '?' are endian-specific and need
+ # to be adjusted accordingly...
+ if ($flavour =~ /le$/o) { # little-endian
+ s/le\?//o or
+ s/be\?/#be#/o or
+ s/\?lvsr/lvsl/o or
+ s/\?lvsl/lvsr/o or
+ s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or
+ s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or
+ s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o;
+ } else { # big-endian
+ s/le\?/#le#/o or
+ s/be\?//o or
+ s/\?([a-z]+)/$1/o;
+ }
+
+ print $_,"\n";
+}
+
+close STDOUT;
diff --git a/arch/powerpc/crypto/ghashp8-ppc.pl b/arch/powerpc/crypto/ghashp8-ppc.pl
new file mode 100644
index 000000000000..b56603b4a893
--- /dev/null
+++ b/arch/powerpc/crypto/ghashp8-ppc.pl
@@ -0,0 +1,370 @@
+#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+
+# This code is taken from the OpenSSL project but the author (Andy Polyakov)
+# has relicensed it under the GPLv2. Therefore this program is free software;
+# you can redistribute it and/or modify it under the terms of the GNU General
+# Public License version 2 as published by the Free Software Foundation.
+#
+# The original headers, including the original license headers, are
+# included below for completeness.
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see https://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# GHASH for PowerISA v2.07.
+#
+# July 2014
+#
+# Accurate performance measurements are problematic, because it's
+# always virtualized setup with possibly throttled processor.
+# Relative comparison is therefore more informative. This initial
+# version is ~2.1x slower than hardware-assisted AES-128-CTR, ~12x
+# faster than "4-bit" integer-only compiler-generated 64-bit code.
+# "Initial version" means that there is room for futher improvement.
+
+$flavour=shift;
+$output =shift;
+
+if ($flavour =~ /64/) {
+ $SIZE_T=8;
+ $LRSAVE=2*$SIZE_T;
+ $STU="stdu";
+ $POP="ld";
+ $PUSH="std";
+} elsif ($flavour =~ /32/) {
+ $SIZE_T=4;
+ $LRSAVE=$SIZE_T;
+ $STU="stwu";
+ $POP="lwz";
+ $PUSH="stw";
+} else { die "nonsense $flavour"; }
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
+
+my ($Xip,$Htbl,$inp,$len)=map("r$_",(3..6)); # argument block
+
+my ($Xl,$Xm,$Xh,$IN)=map("v$_",(0..3));
+my ($zero,$t0,$t1,$t2,$xC2,$H,$Hh,$Hl,$lemask)=map("v$_",(4..12));
+my ($Xl1,$Xm1,$Xh1,$IN1,$H2,$H2h,$H2l)=map("v$_",(13..19));
+my $vrsave="r12";
+my ($t4,$t5,$t6) = ($Hl,$H,$Hh);
+
+$code=<<___;
+.machine "any"
+
+.text
+
+.globl .gcm_init_p8
+ lis r0,0xfff0
+ li r8,0x10
+ mfspr $vrsave,256
+ li r9,0x20
+ mtspr 256,r0
+ li r10,0x30
+ lvx_u $H,0,r4 # load H
+ le?xor r7,r7,r7
+ le?addi r7,r7,0x8 # need a vperm start with 08
+ le?lvsr 5,0,r7
+ le?vspltisb 6,0x0f
+ le?vxor 5,5,6 # set a b-endian mask
+ le?vperm $H,$H,$H,5
+
+ vspltisb $xC2,-16 # 0xf0
+ vspltisb $t0,1 # one
+ vaddubm $xC2,$xC2,$xC2 # 0xe0
+ vxor $zero,$zero,$zero
+ vor $xC2,$xC2,$t0 # 0xe1
+ vsldoi $xC2,$xC2,$zero,15 # 0xe1...
+ vsldoi $t1,$zero,$t0,1 # ...1
+ vaddubm $xC2,$xC2,$xC2 # 0xc2...
+ vspltisb $t2,7
+ vor $xC2,$xC2,$t1 # 0xc2....01
+ vspltb $t1,$H,0 # most significant byte
+ vsl $H,$H,$t0 # H<<=1
+ vsrab $t1,$t1,$t2 # broadcast carry bit
+ vand $t1,$t1,$xC2
+ vxor $H,$H,$t1 # twisted H
+
+ vsldoi $H,$H,$H,8 # twist even more ...
+ vsldoi $xC2,$zero,$xC2,8 # 0xc2.0
+ vsldoi $Hl,$zero,$H,8 # ... and split
+ vsldoi $Hh,$H,$zero,8
+
+ stvx_u $xC2,0,r3 # save pre-computed table
+ stvx_u $Hl,r8,r3
+ stvx_u $H, r9,r3
+ stvx_u $Hh,r10,r3
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
+.size .gcm_init_p8,.-.gcm_init_p8
+
+.globl .gcm_init_htable
+ lis r0,0xfff0
+ li r8,0x10
+ mfspr $vrsave,256
+ li r9,0x20
+ mtspr 256,r0
+ li r10,0x30
+ lvx_u $H,0,r4 # load H
+
+ vspltisb $xC2,-16 # 0xf0
+ vspltisb $t0,1 # one
+ vaddubm $xC2,$xC2,$xC2 # 0xe0
+ vxor $zero,$zero,$zero
+ vor $xC2,$xC2,$t0 # 0xe1
+ vsldoi $xC2,$xC2,$zero,15 # 0xe1...
+ vsldoi $t1,$zero,$t0,1 # ...1
+ vaddubm $xC2,$xC2,$xC2 # 0xc2...
+ vspltisb $t2,7
+ vor $xC2,$xC2,$t1 # 0xc2....01
+ vspltb $t1,$H,0 # most significant byte
+ vsl $H,$H,$t0 # H<<=1
+ vsrab $t1,$t1,$t2 # broadcast carry bit
+ vand $t1,$t1,$xC2
+ vxor $IN,$H,$t1 # twisted H
+
+ vsldoi $H,$IN,$IN,8 # twist even more ...
+ vsldoi $xC2,$zero,$xC2,8 # 0xc2.0
+ vsldoi $Hl,$zero,$H,8 # ... and split
+ vsldoi $Hh,$H,$zero,8
+
+ stvx_u $xC2,0,r3 # save pre-computed table
+ stvx_u $Hl,r8,r3
+ li r8,0x40
+ stvx_u $H, r9,r3
+ li r9,0x50
+ stvx_u $Hh,r10,r3
+ li r10,0x60
+
+ vpmsumd $Xl,$IN,$Hl # H.lo·H.lo
+ vpmsumd $Xm,$IN,$H # H.hi·H.lo+H.lo·H.hi
+ vpmsumd $Xh,$IN,$Hh # H.hi·H.hi
+
+ vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
+
+ vsldoi $t0,$Xm,$zero,8
+ vsldoi $t1,$zero,$Xm,8
+ vxor $Xl,$Xl,$t0
+ vxor $Xh,$Xh,$t1
+
+ vsldoi $Xl,$Xl,$Xl,8
+ vxor $Xl,$Xl,$t2
+
+ vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
+ vpmsumd $Xl,$Xl,$xC2
+ vxor $t1,$t1,$Xh
+ vxor $IN1,$Xl,$t1
+
+ vsldoi $H2,$IN1,$IN1,8
+ vsldoi $H2l,$zero,$H2,8
+ vsldoi $H2h,$H2,$zero,8
+
+ stvx_u $H2l,r8,r3 # save H^2
+ li r8,0x70
+ stvx_u $H2,r9,r3
+ li r9,0x80
+ stvx_u $H2h,r10,r3
+ li r10,0x90
+
+ vpmsumd $Xl,$IN,$H2l # H.lo·H^2.lo
+ vpmsumd $Xl1,$IN1,$H2l # H^2.lo·H^2.lo
+ vpmsumd $Xm,$IN,$H2 # H.hi·H^2.lo+H.lo·H^2.hi
+ vpmsumd $Xm1,$IN1,$H2 # H^2.hi·H^2.lo+H^2.lo·H^2.hi
+ vpmsumd $Xh,$IN,$H2h # H.hi·H^2.hi
+ vpmsumd $Xh1,$IN1,$H2h # H^2.hi·H^2.hi
+
+ vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
+ vpmsumd $t6,$Xl1,$xC2 # 1st reduction phase
+
+ vsldoi $t0,$Xm,$zero,8
+ vsldoi $t1,$zero,$Xm,8
+ vsldoi $t4,$Xm1,$zero,8
+ vsldoi $t5,$zero,$Xm1,8
+ vxor $Xl,$Xl,$t0
+ vxor $Xh,$Xh,$t1
+ vxor $Xl1,$Xl1,$t4
+ vxor $Xh1,$Xh1,$t5
+
+ vsldoi $Xl,$Xl,$Xl,8
+ vsldoi $Xl1,$Xl1,$Xl1,8
+ vxor $Xl,$Xl,$t2
+ vxor $Xl1,$Xl1,$t6
+
+ vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
+ vsldoi $t5,$Xl1,$Xl1,8 # 2nd reduction phase
+ vpmsumd $Xl,$Xl,$xC2
+ vpmsumd $Xl1,$Xl1,$xC2
+ vxor $t1,$t1,$Xh
+ vxor $t5,$t5,$Xh1
+ vxor $Xl,$Xl,$t1
+ vxor $Xl1,$Xl1,$t5
+
+ vsldoi $H,$Xl,$Xl,8
+ vsldoi $H2,$Xl1,$Xl1,8
+ vsldoi $Hl,$zero,$H,8
+ vsldoi $Hh,$H,$zero,8
+ vsldoi $H2l,$zero,$H2,8
+ vsldoi $H2h,$H2,$zero,8
+
+ stvx_u $Hl,r8,r3 # save H^3
+ li r8,0xa0
+ stvx_u $H,r9,r3
+ li r9,0xb0
+ stvx_u $Hh,r10,r3
+ li r10,0xc0
+ stvx_u $H2l,r8,r3 # save H^4
+ stvx_u $H2,r9,r3
+ stvx_u $H2h,r10,r3
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
+.size .gcm_init_htable,.-.gcm_init_htable
+
+.globl .gcm_gmult_p8
+ lis r0,0xfff8
+ li r8,0x10
+ mfspr $vrsave,256
+ li r9,0x20
+ mtspr 256,r0
+ li r10,0x30
+ lvx_u $IN,0,$Xip # load Xi
+
+ lvx_u $Hl,r8,$Htbl # load pre-computed table
+ le?lvsl $lemask,r0,r0
+ lvx_u $H, r9,$Htbl
+ le?vspltisb $t0,0x07
+ lvx_u $Hh,r10,$Htbl
+ le?vxor $lemask,$lemask,$t0
+ lvx_u $xC2,0,$Htbl
+ le?vperm $IN,$IN,$IN,$lemask
+ vxor $zero,$zero,$zero
+
+ vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
+ vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
+ vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
+
+ vpmsumd $t2,$Xl,$xC2 # 1st phase
+
+ vsldoi $t0,$Xm,$zero,8
+ vsldoi $t1,$zero,$Xm,8
+ vxor $Xl,$Xl,$t0
+ vxor $Xh,$Xh,$t1
+
+ vsldoi $Xl,$Xl,$Xl,8
+ vxor $Xl,$Xl,$t2
+
+ vsldoi $t1,$Xl,$Xl,8 # 2nd phase
+ vpmsumd $Xl,$Xl,$xC2
+ vxor $t1,$t1,$Xh
+ vxor $Xl,$Xl,$t1
+
+ le?vperm $Xl,$Xl,$Xl,$lemask
+ stvx_u $Xl,0,$Xip # write out Xi
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
+.size .gcm_gmult_p8,.-.gcm_gmult_p8
+
+.globl .gcm_ghash_p8
+ lis r0,0xfff8
+ li r8,0x10
+ mfspr $vrsave,256
+ li r9,0x20
+ mtspr 256,r0
+ li r10,0x30
+ lvx_u $Xl,0,$Xip # load Xi
+
+ lvx_u $Hl,r8,$Htbl # load pre-computed table
+ le?lvsl $lemask,r0,r0
+ lvx_u $H, r9,$Htbl
+ le?vspltisb $t0,0x07
+ lvx_u $Hh,r10,$Htbl
+ le?vxor $lemask,$lemask,$t0
+ lvx_u $xC2,0,$Htbl
+ le?vperm $Xl,$Xl,$Xl,$lemask
+ vxor $zero,$zero,$zero
+
+ lvx_u $IN,0,$inp
+ addi $inp,$inp,16
+ subi $len,$len,16
+ le?vperm $IN,$IN,$IN,$lemask
+ vxor $IN,$IN,$Xl
+ b Loop
+
+.align 5
+Loop:
+ subic $len,$len,16
+ vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
+ subfe. r0,r0,r0 # borrow?-1:0
+ vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
+ and r0,r0,$len
+ vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
+ add $inp,$inp,r0
+
+ vpmsumd $t2,$Xl,$xC2 # 1st phase
+
+ vsldoi $t0,$Xm,$zero,8
+ vsldoi $t1,$zero,$Xm,8
+ vxor $Xl,$Xl,$t0
+ vxor $Xh,$Xh,$t1
+
+ vsldoi $Xl,$Xl,$Xl,8
+ vxor $Xl,$Xl,$t2
+ lvx_u $IN,0,$inp
+ addi $inp,$inp,16
+
+ vsldoi $t1,$Xl,$Xl,8 # 2nd phase
+ vpmsumd $Xl,$Xl,$xC2
+ le?vperm $IN,$IN,$IN,$lemask
+ vxor $t1,$t1,$Xh
+ vxor $IN,$IN,$t1
+ vxor $IN,$IN,$Xl
+ beq Loop # did $len-=16 borrow?
+
+ vxor $Xl,$Xl,$t1
+ le?vperm $Xl,$Xl,$Xl,$lemask
+ stvx_u $Xl,0,$Xip # write out Xi
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
+.size .gcm_ghash_p8,.-.gcm_ghash_p8
+
+.asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+___
+
+foreach (split("\n",$code)) {
+ if ($flavour =~ /le$/o) { # little-endian
+ s/le\?//o or
+ s/be\?/#be#/o;
+ } else {
+ s/le\?/#le#/o or
+ s/be\?//o;
+ }
+ print $_,"\n";
+}
+
+close STDOUT; # enforce flush
diff --git a/arch/powerpc/crypto/ppc-xlate.pl b/arch/powerpc/crypto/ppc-xlate.pl
new file mode 100644
index 000000000000..23cca703ce29
--- /dev/null
+++ b/arch/powerpc/crypto/ppc-xlate.pl
@@ -0,0 +1,229 @@
+#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+
+# PowerPC assembler distiller by <appro>.
+
+my $flavour = shift;
+my $output = shift;
+open STDOUT,">$output" || die "can't open $output: $!";
+
+my %GLOBALS;
+my $dotinlocallabels=($flavour=~/linux/)?1:0;
+
+################################################################
+# directives which need special treatment on different platforms
+################################################################
+my $globl = sub {
+ my $junk = shift;
+ my $name = shift;
+ my $global = \$GLOBALS{$name};
+ my $ret;
+
+ $name =~ s|^[\.\_]||;
+
+ SWITCH: for ($flavour) {
+ /aix/ && do { $name = ".$name";
+ last;
+ };
+ /osx/ && do { $name = "_$name";
+ last;
+ };
+ /linux/
+ && do { $ret = "_GLOBAL($name)";
+ last;
+ };
+ }
+
+ $ret = ".globl $name\nalign 5\n$name:" if (!$ret);
+ $$global = $name;
+ $ret;
+};
+my $text = sub {
+ my $ret = ($flavour =~ /aix/) ? ".csect\t.text[PR],7" : ".text";
+ $ret = ".abiversion 2\n".$ret if ($flavour =~ /linux.*64le/);
+ $ret;
+};
+my $machine = sub {
+ my $junk = shift;
+ my $arch = shift;
+ if ($flavour =~ /osx/)
+ { $arch =~ s/\"//g;
+ $arch = ($flavour=~/64/) ? "ppc970-64" : "ppc970" if ($arch eq "any");
+ }
+ ".machine $arch";
+};
+my $size = sub {
+ if ($flavour =~ /linux/)
+ { shift;
+ my $name = shift; $name =~ s|^[\.\_]||;
+ my $ret = ".size $name,.-".($flavour=~/64$/?".":"").$name;
+ $ret .= "\n.size .$name,.-.$name" if ($flavour=~/64$/);
+ $ret;
+ }
+ else
+ { ""; }
+};
+my $asciz = sub {
+ shift;
+ my $line = join(",",@_);
+ if ($line =~ /^"(.*)"$/)
+ { ".byte " . join(",",unpack("C*",$1),0) . "\n.align 2"; }
+ else
+ { ""; }
+};
+my $quad = sub {
+ shift;
+ my @ret;
+ my ($hi,$lo);
+ for (@_) {
+ if (/^0x([0-9a-f]*?)([0-9a-f]{1,8})$/io)
+ { $hi=$1?"0x$1":"0"; $lo="0x$2"; }
+ elsif (/^([0-9]+)$/o)
+ { $hi=$1>>32; $lo=$1&0xffffffff; } # error-prone with 32-bit perl
+ else
+ { $hi=undef; $lo=$_; }
+
+ if (defined($hi))
+ { push(@ret,$flavour=~/le$/o?".long\t$lo,$hi":".long\t$hi,$lo"); }
+ else
+ { push(@ret,".quad $lo"); }
+ }
+ join("\n",@ret);
+};
+
+################################################################
+# simplified mnemonics not handled by at least one assembler
+################################################################
+my $cmplw = sub {
+ my $f = shift;
+ my $cr = 0; $cr = shift if ($#_>1);
+ # Some out-of-date 32-bit GNU assembler just can't handle cmplw...
+ ($flavour =~ /linux.*32/) ?
+ " .long ".sprintf "0x%x",31<<26|$cr<<23|$_[0]<<16|$_[1]<<11|64 :
+ " cmplw ".join(',',$cr,@_);
+};
+my $bdnz = sub {
+ my $f = shift;
+ my $bo = $f=~/[\+\-]/ ? 16+9 : 16; # optional "to be taken" hint
+ " bc $bo,0,".shift;
+} if ($flavour!~/linux/);
+my $bltlr = sub {
+ my $f = shift;
+ my $bo = $f=~/\-/ ? 12+2 : 12; # optional "not to be taken" hint
+ ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints
+ " .long ".sprintf "0x%x",19<<26|$bo<<21|16<<1 :
+ " bclr $bo,0";
+};
+my $bnelr = sub {
+ my $f = shift;
+ my $bo = $f=~/\-/ ? 4+2 : 4; # optional "not to be taken" hint
+ ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints
+ " .long ".sprintf "0x%x",19<<26|$bo<<21|2<<16|16<<1 :
+ " bclr $bo,2";
+};
+my $beqlr = sub {
+ my $f = shift;
+ my $bo = $f=~/-/ ? 12+2 : 12; # optional "not to be taken" hint
+ ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints
+ " .long ".sprintf "0x%X",19<<26|$bo<<21|2<<16|16<<1 :
+ " bclr $bo,2";
+};
+# GNU assembler can't handle extrdi rA,rS,16,48, or when sum of last two
+# arguments is 64, with "operand out of range" error.
+my $extrdi = sub {
+ my ($f,$ra,$rs,$n,$b) = @_;
+ $b = ($b+$n)&63; $n = 64-$n;
+ " rldicl $ra,$rs,$b,$n";
+};
+my $vmr = sub {
+ my ($f,$vx,$vy) = @_;
+ " vor $vx,$vy,$vy";
+};
+
+# Some ABIs specify vrsave, special-purpose register #256, as reserved
+# for system use.
+my $no_vrsave = ($flavour =~ /linux-ppc64le/);
+my $mtspr = sub {
+ my ($f,$idx,$ra) = @_;
+ if ($idx == 256 && $no_vrsave) {
+ " or $ra,$ra,$ra";
+ } else {
+ " mtspr $idx,$ra";
+ }
+};
+my $mfspr = sub {
+ my ($f,$rd,$idx) = @_;
+ if ($idx == 256 && $no_vrsave) {
+ " li $rd,-1";
+ } else {
+ " mfspr $rd,$idx";
+ }
+};
+
+# PowerISA 2.06 stuff
+sub vsxmem_op {
+ my ($f, $vrt, $ra, $rb, $op) = @_;
+ " .long ".sprintf "0x%X",(31<<26)|($vrt<<21)|($ra<<16)|($rb<<11)|($op*2+1);
+}
+# made-up unaligned memory reference AltiVec/VMX instructions
+my $lvx_u = sub { vsxmem_op(@_, 844); }; # lxvd2x
+my $stvx_u = sub { vsxmem_op(@_, 972); }; # stxvd2x
+my $lvdx_u = sub { vsxmem_op(@_, 588); }; # lxsdx
+my $stvdx_u = sub { vsxmem_op(@_, 716); }; # stxsdx
+my $lvx_4w = sub { vsxmem_op(@_, 780); }; # lxvw4x
+my $stvx_4w = sub { vsxmem_op(@_, 908); }; # stxvw4x
+
+# PowerISA 2.07 stuff
+sub vcrypto_op {
+ my ($f, $vrt, $vra, $vrb, $op) = @_;
+ " .long ".sprintf "0x%X",(4<<26)|($vrt<<21)|($vra<<16)|($vrb<<11)|$op;
+}
+my $vcipher = sub { vcrypto_op(@_, 1288); };
+my $vcipherlast = sub { vcrypto_op(@_, 1289); };
+my $vncipher = sub { vcrypto_op(@_, 1352); };
+my $vncipherlast= sub { vcrypto_op(@_, 1353); };
+my $vsbox = sub { vcrypto_op(@_, 0, 1480); };
+my $vshasigmad = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1730); };
+my $vshasigmaw = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1666); };
+my $vpmsumb = sub { vcrypto_op(@_, 1032); };
+my $vpmsumd = sub { vcrypto_op(@_, 1224); };
+my $vpmsubh = sub { vcrypto_op(@_, 1096); };
+my $vpmsumw = sub { vcrypto_op(@_, 1160); };
+my $vaddudm = sub { vcrypto_op(@_, 192); };
+my $vadduqm = sub { vcrypto_op(@_, 256); };
+
+my $mtsle = sub {
+ my ($f, $arg) = @_;
+ " .long ".sprintf "0x%X",(31<<26)|($arg<<21)|(147*2);
+};
+
+print "#include <asm/ppc_asm.h>\n" if $flavour =~ /linux/;
+
+while($line=<>) {
+
+ $line =~ s|[#!;].*$||; # get rid of asm-style comments...
+ $line =~ s|/\*.*\*/||; # ... and C-style comments...
+ $line =~ s|^\s+||; # ... and skip white spaces in beginning...
+ $line =~ s|\s+$||; # ... and at the end
+
+ {
+ $line =~ s|\b\.L(\w+)|L$1|g; # common denominator for Locallabel
+ $line =~ s|\bL(\w+)|\.L$1|g if ($dotinlocallabels);
+ }
+
+ {
+ $line =~ s|^\s*(\.?)(\w+)([\.\+\-]?)\s*||;
+ my $c = $1; $c = "\t" if ($c eq "");
+ my $mnemonic = $2;
+ my $f = $3;
+ my $opcode = eval("\$$mnemonic");
+ $line =~ s/\b(c?[rf]|v|vs)([0-9]+)\b/$2/g if ($c ne "." and $flavour !~ /osx/);
+ if (ref($opcode) eq 'CODE') { $line = &$opcode($f,split(',',$line)); }
+ elsif ($mnemonic) { $line = $c.$mnemonic.$f."\t".$line; }
+ }
+
+ print $line if ($line);
+ print "\n";
+}
+
+close STDOUT;
diff --git a/arch/powerpc/include/asm/cpufeature.h b/arch/powerpc/include/asm/cpufeature.h
index f6f790a90367..2dcc66225e7f 100644
--- a/arch/powerpc/include/asm/cpufeature.h
+++ b/arch/powerpc/include/asm/cpufeature.h
@@ -22,6 +22,7 @@
*/
#define PPC_MODULE_FEATURE_VEC_CRYPTO (32 + ilog2(PPC_FEATURE2_VEC_CRYPTO))
+#define PPC_MODULE_FEATURE_P10 (32 + ilog2(PPC_FEATURE2_ARCH_3_1))
#define cpu_feature(x) (x)
diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S
index cdf3215ec272..ad7f4c891625 100644
--- a/arch/x86/crypto/aegis128-aesni-asm.S
+++ b/arch/x86/crypto/aegis128-aesni-asm.S
@@ -201,8 +201,8 @@ SYM_FUNC_START(crypto_aegis128_aesni_init)
movdqa KEY, STATE4
/* load the constants: */
- movdqa .Laegis128_const_0, STATE2
- movdqa .Laegis128_const_1, STATE1
+ movdqa .Laegis128_const_0(%rip), STATE2
+ movdqa .Laegis128_const_1(%rip), STATE1
pxor STATE2, STATE3
pxor STATE1, STATE4
@@ -682,7 +682,7 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail)
punpcklbw T0, T0
punpcklbw T0, T0
punpcklbw T0, T0
- movdqa .Laegis128_counter, T1
+ movdqa .Laegis128_counter(%rip), T1
pcmpgtb T1, T0
pand T0, MSG
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 837c1e0aa021..3ac7487ecad2 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -288,53 +288,53 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
# Encrypt/Decrypt first few blocks
and $(3<<4), %r12
- jz _initial_num_blocks_is_0_\@
+ jz .L_initial_num_blocks_is_0_\@
cmp $(2<<4), %r12
- jb _initial_num_blocks_is_1_\@
- je _initial_num_blocks_is_2_\@
-_initial_num_blocks_is_3_\@:
+ jb .L_initial_num_blocks_is_1_\@
+ je .L_initial_num_blocks_is_2_\@
+.L_initial_num_blocks_is_3_\@:
INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, \operation
sub $48, %r13
- jmp _initial_blocks_\@
-_initial_num_blocks_is_2_\@:
+ jmp .L_initial_blocks_\@
+.L_initial_num_blocks_is_2_\@:
INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, \operation
sub $32, %r13
- jmp _initial_blocks_\@
-_initial_num_blocks_is_1_\@:
+ jmp .L_initial_blocks_\@
+.L_initial_num_blocks_is_1_\@:
INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, \operation
sub $16, %r13
- jmp _initial_blocks_\@
-_initial_num_blocks_is_0_\@:
+ jmp .L_initial_blocks_\@
+.L_initial_num_blocks_is_0_\@:
INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, \operation
-_initial_blocks_\@:
+.L_initial_blocks_\@:
# Main loop - Encrypt/Decrypt remaining blocks
test %r13, %r13
- je _zero_cipher_left_\@
+ je .L_zero_cipher_left_\@
sub $64, %r13
- je _four_cipher_left_\@
-_crypt_by_4_\@:
+ je .L_four_cipher_left_\@
+.L_crypt_by_4_\@:
GHASH_4_ENCRYPT_4_PARALLEL_\operation %xmm9, %xmm10, %xmm11, %xmm12, \
%xmm13, %xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, \
%xmm7, %xmm8, enc
add $64, %r11
sub $64, %r13
- jne _crypt_by_4_\@
-_four_cipher_left_\@:
+ jne .L_crypt_by_4_\@
+.L_four_cipher_left_\@:
GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
-_zero_cipher_left_\@:
+.L_zero_cipher_left_\@:
movdqu %xmm8, AadHash(%arg2)
movdqu %xmm0, CurCount(%arg2)
mov %arg5, %r13
and $15, %r13 # %r13 = arg5 (mod 16)
- je _multiple_of_16_bytes_\@
+ je .L_multiple_of_16_bytes_\@
mov %r13, PBlockLen(%arg2)
@@ -348,14 +348,14 @@ _zero_cipher_left_\@:
movdqu %xmm0, PBlockEncKey(%arg2)
cmp $16, %arg5
- jge _large_enough_update_\@
+ jge .L_large_enough_update_\@
lea (%arg4,%r11,1), %r10
mov %r13, %r12
READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
- jmp _data_read_\@
+ jmp .L_data_read_\@
-_large_enough_update_\@:
+.L_large_enough_update_\@:
sub $16, %r11
add %r13, %r11
@@ -374,7 +374,7 @@ _large_enough_update_\@:
# shift right 16-r13 bytes
pshufb %xmm2, %xmm1
-_data_read_\@:
+.L_data_read_\@:
lea ALL_F+16(%rip), %r12
sub %r13, %r12
@@ -409,19 +409,19 @@ _data_read_\@:
# Output %r13 bytes
movq %xmm0, %rax
cmp $8, %r13
- jle _less_than_8_bytes_left_\@
+ jle .L_less_than_8_bytes_left_\@
mov %rax, (%arg3 , %r11, 1)
add $8, %r11
psrldq $8, %xmm0
movq %xmm0, %rax
sub $8, %r13
-_less_than_8_bytes_left_\@:
+.L_less_than_8_bytes_left_\@:
mov %al, (%arg3, %r11, 1)
add $1, %r11
shr $8, %rax
sub $1, %r13
- jne _less_than_8_bytes_left_\@
-_multiple_of_16_bytes_\@:
+ jne .L_less_than_8_bytes_left_\@
+.L_multiple_of_16_bytes_\@:
.endm
# GCM_COMPLETE Finishes update of tag of last partial block
@@ -434,11 +434,11 @@ _multiple_of_16_bytes_\@:
mov PBlockLen(%arg2), %r12
test %r12, %r12
- je _partial_done\@
+ je .L_partial_done\@
GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
-_partial_done\@:
+.L_partial_done\@:
mov AadLen(%arg2), %r12 # %r13 = aadLen (number of bytes)
shl $3, %r12 # convert into number of bits
movd %r12d, %xmm15 # len(A) in %xmm15
@@ -457,44 +457,44 @@ _partial_done\@:
movdqu OrigIV(%arg2), %xmm0 # %xmm0 = Y0
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0)
pxor %xmm8, %xmm0
-_return_T_\@:
+.L_return_T_\@:
mov \AUTHTAG, %r10 # %r10 = authTag
mov \AUTHTAGLEN, %r11 # %r11 = auth_tag_len
cmp $16, %r11
- je _T_16_\@
+ je .L_T_16_\@
cmp $8, %r11
- jl _T_4_\@
-_T_8_\@:
+ jl .L_T_4_\@
+.L_T_8_\@:
movq %xmm0, %rax
mov %rax, (%r10)
add $8, %r10
sub $8, %r11
psrldq $8, %xmm0
test %r11, %r11
- je _return_T_done_\@
-_T_4_\@:
+ je .L_return_T_done_\@
+.L_T_4_\@:
movd %xmm0, %eax
mov %eax, (%r10)
add $4, %r10
sub $4, %r11
psrldq $4, %xmm0
test %r11, %r11
- je _return_T_done_\@
-_T_123_\@:
+ je .L_return_T_done_\@
+.L_T_123_\@:
movd %xmm0, %eax
cmp $2, %r11
- jl _T_1_\@
+ jl .L_T_1_\@
mov %ax, (%r10)
cmp $2, %r11
- je _return_T_done_\@
+ je .L_return_T_done_\@
add $2, %r10
sar $16, %eax
-_T_1_\@:
+.L_T_1_\@:
mov %al, (%r10)
- jmp _return_T_done_\@
-_T_16_\@:
+ jmp .L_return_T_done_\@
+.L_T_16_\@:
movdqu %xmm0, (%r10)
-_return_T_done_\@:
+.L_return_T_done_\@:
.endm
#ifdef __x86_64__
@@ -563,30 +563,30 @@ _return_T_done_\@:
# Clobbers %rax, DLEN and XMM1
.macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst
cmp $8, \DLEN
- jl _read_lt8_\@
+ jl .L_read_lt8_\@
mov (\DPTR), %rax
movq %rax, \XMMDst
sub $8, \DLEN
- jz _done_read_partial_block_\@
+ jz .L_done_read_partial_block_\@
xor %eax, %eax
-_read_next_byte_\@:
+.L_read_next_byte_\@:
shl $8, %rax
mov 7(\DPTR, \DLEN, 1), %al
dec \DLEN
- jnz _read_next_byte_\@
+ jnz .L_read_next_byte_\@
movq %rax, \XMM1
pslldq $8, \XMM1
por \XMM1, \XMMDst
- jmp _done_read_partial_block_\@
-_read_lt8_\@:
+ jmp .L_done_read_partial_block_\@
+.L_read_lt8_\@:
xor %eax, %eax
-_read_next_byte_lt8_\@:
+.L_read_next_byte_lt8_\@:
shl $8, %rax
mov -1(\DPTR, \DLEN, 1), %al
dec \DLEN
- jnz _read_next_byte_lt8_\@
+ jnz .L_read_next_byte_lt8_\@
movq %rax, \XMMDst
-_done_read_partial_block_\@:
+.L_done_read_partial_block_\@:
.endm
# CALC_AAD_HASH: Calculates the hash of the data which will not be encrypted.
@@ -600,8 +600,8 @@ _done_read_partial_block_\@:
pxor \TMP6, \TMP6
cmp $16, %r11
- jl _get_AAD_rest\@
-_get_AAD_blocks\@:
+ jl .L_get_AAD_rest\@
+.L_get_AAD_blocks\@:
movdqu (%r10), \TMP7
pshufb %xmm14, \TMP7 # byte-reflect the AAD data
pxor \TMP7, \TMP6
@@ -609,14 +609,14 @@ _get_AAD_blocks\@:
add $16, %r10
sub $16, %r11
cmp $16, %r11
- jge _get_AAD_blocks\@
+ jge .L_get_AAD_blocks\@
movdqu \TMP6, \TMP7
/* read the last <16B of AAD */
-_get_AAD_rest\@:
+.L_get_AAD_rest\@:
test %r11, %r11
- je _get_AAD_done\@
+ je .L_get_AAD_done\@
READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
pshufb %xmm14, \TMP7 # byte-reflect the AAD data
@@ -624,7 +624,7 @@ _get_AAD_rest\@:
GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
movdqu \TMP7, \TMP6
-_get_AAD_done\@:
+.L_get_AAD_done\@:
movdqu \TMP6, AadHash(%arg2)
.endm
@@ -637,21 +637,21 @@ _get_AAD_done\@:
AAD_HASH operation
mov PBlockLen(%arg2), %r13
test %r13, %r13
- je _partial_block_done_\@ # Leave Macro if no partial blocks
+ je .L_partial_block_done_\@ # Leave Macro if no partial blocks
# Read in input data without over reading
cmp $16, \PLAIN_CYPH_LEN
- jl _fewer_than_16_bytes_\@
+ jl .L_fewer_than_16_bytes_\@
movups (\PLAIN_CYPH_IN), %xmm1 # If more than 16 bytes, just fill xmm
- jmp _data_read_\@
+ jmp .L_data_read_\@
-_fewer_than_16_bytes_\@:
+.L_fewer_than_16_bytes_\@:
lea (\PLAIN_CYPH_IN, \DATA_OFFSET, 1), %r10
mov \PLAIN_CYPH_LEN, %r12
READ_PARTIAL_BLOCK %r10 %r12 %xmm0 %xmm1
mov PBlockLen(%arg2), %r13
-_data_read_\@: # Finished reading in data
+.L_data_read_\@: # Finished reading in data
movdqu PBlockEncKey(%arg2), %xmm9
movdqu HashKey(%arg2), %xmm13
@@ -674,9 +674,9 @@ _data_read_\@: # Finished reading in data
sub $16, %r10
# Determine if if partial block is not being filled and
# shift mask accordingly
- jge _no_extra_mask_1_\@
+ jge .L_no_extra_mask_1_\@
sub %r10, %r12
-_no_extra_mask_1_\@:
+.L_no_extra_mask_1_\@:
movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
# get the appropriate mask to mask out bottom r13 bytes of xmm9
@@ -689,17 +689,17 @@ _no_extra_mask_1_\@:
pxor %xmm3, \AAD_HASH
test %r10, %r10
- jl _partial_incomplete_1_\@
+ jl .L_partial_incomplete_1_\@
# GHASH computation for the last <16 Byte block
GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
xor %eax, %eax
mov %rax, PBlockLen(%arg2)
- jmp _dec_done_\@
-_partial_incomplete_1_\@:
+ jmp .L_dec_done_\@
+.L_partial_incomplete_1_\@:
add \PLAIN_CYPH_LEN, PBlockLen(%arg2)
-_dec_done_\@:
+.L_dec_done_\@:
movdqu \AAD_HASH, AadHash(%arg2)
.else
pxor %xmm1, %xmm9 # Plaintext XOR E(K, Yn)
@@ -710,9 +710,9 @@ _dec_done_\@:
sub $16, %r10
# Determine if if partial block is not being filled and
# shift mask accordingly
- jge _no_extra_mask_2_\@
+ jge .L_no_extra_mask_2_\@
sub %r10, %r12
-_no_extra_mask_2_\@:
+.L_no_extra_mask_2_\@:
movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
# get the appropriate mask to mask out bottom r13 bytes of xmm9
@@ -724,17 +724,17 @@ _no_extra_mask_2_\@:
pxor %xmm9, \AAD_HASH
test %r10, %r10
- jl _partial_incomplete_2_\@
+ jl .L_partial_incomplete_2_\@
# GHASH computation for the last <16 Byte block
GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
xor %eax, %eax
mov %rax, PBlockLen(%arg2)
- jmp _encode_done_\@
-_partial_incomplete_2_\@:
+ jmp .L_encode_done_\@
+.L_partial_incomplete_2_\@:
add \PLAIN_CYPH_LEN, PBlockLen(%arg2)
-_encode_done_\@:
+.L_encode_done_\@:
movdqu \AAD_HASH, AadHash(%arg2)
movdqa SHUF_MASK(%rip), %xmm10
@@ -744,32 +744,32 @@ _encode_done_\@:
.endif
# output encrypted Bytes
test %r10, %r10
- jl _partial_fill_\@
+ jl .L_partial_fill_\@
mov %r13, %r12
mov $16, %r13
# Set r13 to be the number of bytes to write out
sub %r12, %r13
- jmp _count_set_\@
-_partial_fill_\@:
+ jmp .L_count_set_\@
+.L_partial_fill_\@:
mov \PLAIN_CYPH_LEN, %r13
-_count_set_\@:
+.L_count_set_\@:
movdqa %xmm9, %xmm0
movq %xmm0, %rax
cmp $8, %r13
- jle _less_than_8_bytes_left_\@
+ jle .L_less_than_8_bytes_left_\@
mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
add $8, \DATA_OFFSET
psrldq $8, %xmm0
movq %xmm0, %rax
sub $8, %r13
-_less_than_8_bytes_left_\@:
+.L_less_than_8_bytes_left_\@:
movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
add $1, \DATA_OFFSET
shr $8, %rax
sub $1, %r13
- jne _less_than_8_bytes_left_\@
-_partial_block_done_\@:
+ jne .L_less_than_8_bytes_left_\@
+.L_partial_block_done_\@:
.endm # PARTIAL_BLOCK
/*
@@ -813,14 +813,14 @@ _partial_block_done_\@:
shr $2,%eax # 128->4, 192->6, 256->8
add $5,%eax # 128->9, 192->11, 256->13
-aes_loop_initial_\@:
+.Laes_loop_initial_\@:
MOVADQ (%r10),\TMP1
.irpc index, \i_seq
aesenc \TMP1, %xmm\index
.endr
add $16,%r10
sub $1,%eax
- jnz aes_loop_initial_\@
+ jnz .Laes_loop_initial_\@
MOVADQ (%r10), \TMP1
.irpc index, \i_seq
@@ -861,7 +861,7 @@ aes_loop_initial_\@:
GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
.endif
cmp $64, %r13
- jl _initial_blocks_done\@
+ jl .L_initial_blocks_done\@
# no need for precomputed values
/*
*
@@ -908,18 +908,18 @@ aes_loop_initial_\@:
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
sub $4,%eax # 128->0, 192->2, 256->4
- jz aes_loop_pre_done\@
+ jz .Laes_loop_pre_done\@
-aes_loop_pre_\@:
+.Laes_loop_pre_\@:
MOVADQ (%r10),\TMP2
.irpc index, 1234
aesenc \TMP2, %xmm\index
.endr
add $16,%r10
sub $1,%eax
- jnz aes_loop_pre_\@
+ jnz .Laes_loop_pre_\@
-aes_loop_pre_done\@:
+.Laes_loop_pre_done\@:
MOVADQ (%r10), \TMP2
aesenclast \TMP2, \XMM1
aesenclast \TMP2, \XMM2
@@ -963,7 +963,7 @@ aes_loop_pre_done\@:
pshufb %xmm14, \XMM3 # perform a 16 byte swap
pshufb %xmm14, \XMM4 # perform a 16 byte swap
-_initial_blocks_done\@:
+.L_initial_blocks_done\@:
.endm
@@ -1095,18 +1095,18 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
sub $4,%eax # 128->0, 192->2, 256->4
- jz aes_loop_par_enc_done\@
+ jz .Laes_loop_par_enc_done\@
-aes_loop_par_enc\@:
+.Laes_loop_par_enc\@:
MOVADQ (%r10),\TMP3
.irpc index, 1234
aesenc \TMP3, %xmm\index
.endr
add $16,%r10
sub $1,%eax
- jnz aes_loop_par_enc\@
+ jnz .Laes_loop_par_enc\@
-aes_loop_par_enc_done\@:
+.Laes_loop_par_enc_done\@:
MOVADQ (%r10), \TMP3
aesenclast \TMP3, \XMM1 # Round 10
aesenclast \TMP3, \XMM2
@@ -1303,18 +1303,18 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
sub $4,%eax # 128->0, 192->2, 256->4
- jz aes_loop_par_dec_done\@
+ jz .Laes_loop_par_dec_done\@
-aes_loop_par_dec\@:
+.Laes_loop_par_dec\@:
MOVADQ (%r10),\TMP3
.irpc index, 1234
aesenc \TMP3, %xmm\index
.endr
add $16,%r10
sub $1,%eax
- jnz aes_loop_par_dec\@
+ jnz .Laes_loop_par_dec\@
-aes_loop_par_dec_done\@:
+.Laes_loop_par_dec_done\@:
MOVADQ (%r10), \TMP3
aesenclast \TMP3, \XMM1 # last round
aesenclast \TMP3, \XMM2
@@ -2717,7 +2717,7 @@ SYM_FUNC_END(aesni_cts_cbc_dec)
* BSWAP_MASK == endian swapping mask
*/
SYM_FUNC_START_LOCAL(_aesni_inc_init)
- movaps .Lbswap_mask, BSWAP_MASK
+ movaps .Lbswap_mask(%rip), BSWAP_MASK
movaps IV, CTR
pshufb BSWAP_MASK, CTR
mov $1, TCTR_LOW
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index 0852ab573fd3..46cddd78857b 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -154,30 +154,6 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.octa 0x00000000000000000000000000000000
-.section .rodata
-.align 16
-.type aad_shift_arr, @object
-.size aad_shift_arr, 272
-aad_shift_arr:
- .octa 0xffffffffffffffffffffffffffffffff
- .octa 0xffffffffffffffffffffffffffffff0C
- .octa 0xffffffffffffffffffffffffffff0D0C
- .octa 0xffffffffffffffffffffffffff0E0D0C
- .octa 0xffffffffffffffffffffffff0F0E0D0C
- .octa 0xffffffffffffffffffffff0C0B0A0908
- .octa 0xffffffffffffffffffff0D0C0B0A0908
- .octa 0xffffffffffffffffff0E0D0C0B0A0908
- .octa 0xffffffffffffffff0F0E0D0C0B0A0908
- .octa 0xffffffffffffff0C0B0A090807060504
- .octa 0xffffffffffff0D0C0B0A090807060504
- .octa 0xffffffffff0E0D0C0B0A090807060504
- .octa 0xffffffff0F0E0D0C0B0A090807060504
- .octa 0xffffff0C0B0A09080706050403020100
- .octa 0xffff0D0C0B0A09080706050403020100
- .octa 0xff0E0D0C0B0A09080706050403020100
- .octa 0x0F0E0D0C0B0A09080706050403020100
-
-
.text
@@ -302,68 +278,68 @@ VARIABLE_OFFSET = 16*8
mov %r13, %r12
shr $4, %r12
and $7, %r12
- jz _initial_num_blocks_is_0\@
+ jz .L_initial_num_blocks_is_0\@
cmp $7, %r12
- je _initial_num_blocks_is_7\@
+ je .L_initial_num_blocks_is_7\@
cmp $6, %r12
- je _initial_num_blocks_is_6\@
+ je .L_initial_num_blocks_is_6\@
cmp $5, %r12
- je _initial_num_blocks_is_5\@
+ je .L_initial_num_blocks_is_5\@
cmp $4, %r12
- je _initial_num_blocks_is_4\@
+ je .L_initial_num_blocks_is_4\@
cmp $3, %r12
- je _initial_num_blocks_is_3\@
+ je .L_initial_num_blocks_is_3\@
cmp $2, %r12
- je _initial_num_blocks_is_2\@
+ je .L_initial_num_blocks_is_2\@
- jmp _initial_num_blocks_is_1\@
+ jmp .L_initial_num_blocks_is_1\@
-_initial_num_blocks_is_7\@:
+.L_initial_num_blocks_is_7\@:
\INITIAL_BLOCKS \REP, 7, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
sub $16*7, %r13
- jmp _initial_blocks_encrypted\@
+ jmp .L_initial_blocks_encrypted\@
-_initial_num_blocks_is_6\@:
+.L_initial_num_blocks_is_6\@:
\INITIAL_BLOCKS \REP, 6, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
sub $16*6, %r13
- jmp _initial_blocks_encrypted\@
+ jmp .L_initial_blocks_encrypted\@
-_initial_num_blocks_is_5\@:
+.L_initial_num_blocks_is_5\@:
\INITIAL_BLOCKS \REP, 5, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
sub $16*5, %r13
- jmp _initial_blocks_encrypted\@
+ jmp .L_initial_blocks_encrypted\@
-_initial_num_blocks_is_4\@:
+.L_initial_num_blocks_is_4\@:
\INITIAL_BLOCKS \REP, 4, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
sub $16*4, %r13
- jmp _initial_blocks_encrypted\@
+ jmp .L_initial_blocks_encrypted\@
-_initial_num_blocks_is_3\@:
+.L_initial_num_blocks_is_3\@:
\INITIAL_BLOCKS \REP, 3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
sub $16*3, %r13
- jmp _initial_blocks_encrypted\@
+ jmp .L_initial_blocks_encrypted\@
-_initial_num_blocks_is_2\@:
+.L_initial_num_blocks_is_2\@:
\INITIAL_BLOCKS \REP, 2, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
sub $16*2, %r13
- jmp _initial_blocks_encrypted\@
+ jmp .L_initial_blocks_encrypted\@
-_initial_num_blocks_is_1\@:
+.L_initial_num_blocks_is_1\@:
\INITIAL_BLOCKS \REP, 1, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
sub $16*1, %r13
- jmp _initial_blocks_encrypted\@
+ jmp .L_initial_blocks_encrypted\@
-_initial_num_blocks_is_0\@:
+.L_initial_num_blocks_is_0\@:
\INITIAL_BLOCKS \REP, 0, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
-_initial_blocks_encrypted\@:
+.L_initial_blocks_encrypted\@:
test %r13, %r13
- je _zero_cipher_left\@
+ je .L_zero_cipher_left\@
sub $128, %r13
- je _eight_cipher_left\@
+ je .L_eight_cipher_left\@
@@ -373,9 +349,9 @@ _initial_blocks_encrypted\@:
vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
-_encrypt_by_8_new\@:
+.L_encrypt_by_8_new\@:
cmp $(255-8), %r15d
- jg _encrypt_by_8\@
+ jg .L_encrypt_by_8\@
@@ -383,30 +359,30 @@ _encrypt_by_8_new\@:
\GHASH_8_ENCRYPT_8_PARALLEL \REP, %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, out_order, \ENC_DEC
add $128, %r11
sub $128, %r13
- jne _encrypt_by_8_new\@
+ jne .L_encrypt_by_8_new\@
vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
- jmp _eight_cipher_left\@
+ jmp .L_eight_cipher_left\@
-_encrypt_by_8\@:
+.L_encrypt_by_8\@:
vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
add $8, %r15b
\GHASH_8_ENCRYPT_8_PARALLEL \REP, %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, in_order, \ENC_DEC
vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
add $128, %r11
sub $128, %r13
- jne _encrypt_by_8_new\@
+ jne .L_encrypt_by_8_new\@
vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
-_eight_cipher_left\@:
+.L_eight_cipher_left\@:
\GHASH_LAST_8 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8
-_zero_cipher_left\@:
+.L_zero_cipher_left\@:
vmovdqu %xmm14, AadHash(arg2)
vmovdqu %xmm9, CurCount(arg2)
@@ -414,7 +390,7 @@ _zero_cipher_left\@:
mov arg5, %r13
and $15, %r13 # r13 = (arg5 mod 16)
- je _multiple_of_16_bytes\@
+ je .L_multiple_of_16_bytes\@
# handle the last <16 Byte block separately
@@ -428,7 +404,7 @@ _zero_cipher_left\@:
vmovdqu %xmm9, PBlockEncKey(arg2)
cmp $16, arg5
- jge _large_enough_update\@
+ jge .L_large_enough_update\@
lea (arg4,%r11,1), %r10
mov %r13, %r12
@@ -440,9 +416,9 @@ _zero_cipher_left\@:
# able to shift 16-r13 bytes (r13 is the
# number of bytes in plaintext mod 16)
- jmp _final_ghash_mul\@
+ jmp .L_final_ghash_mul\@
-_large_enough_update\@:
+.L_large_enough_update\@:
sub $16, %r11
add %r13, %r11
@@ -461,7 +437,7 @@ _large_enough_update\@:
# shift right 16-r13 bytes
vpshufb %xmm2, %xmm1, %xmm1
-_final_ghash_mul\@:
+.L_final_ghash_mul\@:
.if \ENC_DEC == DEC
vmovdqa %xmm1, %xmm2
vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
@@ -490,7 +466,7 @@ _final_ghash_mul\@:
# output r13 Bytes
vmovq %xmm9, %rax
cmp $8, %r13
- jle _less_than_8_bytes_left\@
+ jle .L_less_than_8_bytes_left\@
mov %rax, (arg3 , %r11)
add $8, %r11
@@ -498,15 +474,15 @@ _final_ghash_mul\@:
vmovq %xmm9, %rax
sub $8, %r13
-_less_than_8_bytes_left\@:
+.L_less_than_8_bytes_left\@:
movb %al, (arg3 , %r11)
add $1, %r11
shr $8, %rax
sub $1, %r13
- jne _less_than_8_bytes_left\@
+ jne .L_less_than_8_bytes_left\@
#############################
-_multiple_of_16_bytes\@:
+.L_multiple_of_16_bytes\@:
.endm
@@ -519,12 +495,12 @@ _multiple_of_16_bytes\@:
mov PBlockLen(arg2), %r12
test %r12, %r12
- je _partial_done\@
+ je .L_partial_done\@
#GHASH computation for the last <16 Byte block
\GHASH_MUL %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
-_partial_done\@:
+.L_partial_done\@:
mov AadLen(arg2), %r12 # r12 = aadLen (number of bytes)
shl $3, %r12 # convert into number of bits
vmovd %r12d, %xmm15 # len(A) in xmm15
@@ -547,49 +523,49 @@ _partial_done\@:
-_return_T\@:
+.L_return_T\@:
mov \AUTH_TAG, %r10 # r10 = authTag
mov \AUTH_TAG_LEN, %r11 # r11 = auth_tag_len
cmp $16, %r11
- je _T_16\@
+ je .L_T_16\@
cmp $8, %r11
- jl _T_4\@
+ jl .L_T_4\@
-_T_8\@:
+.L_T_8\@:
vmovq %xmm9, %rax
mov %rax, (%r10)
add $8, %r10
sub $8, %r11
vpsrldq $8, %xmm9, %xmm9
test %r11, %r11
- je _return_T_done\@
-_T_4\@:
+ je .L_return_T_done\@
+.L_T_4\@:
vmovd %xmm9, %eax
mov %eax, (%r10)
add $4, %r10
sub $4, %r11
vpsrldq $4, %xmm9, %xmm9
test %r11, %r11
- je _return_T_done\@
-_T_123\@:
+ je .L_return_T_done\@
+.L_T_123\@:
vmovd %xmm9, %eax
cmp $2, %r11
- jl _T_1\@
+ jl .L_T_1\@
mov %ax, (%r10)
cmp $2, %r11
- je _return_T_done\@
+ je .L_return_T_done\@
add $2, %r10
sar $16, %eax
-_T_1\@:
+.L_T_1\@:
mov %al, (%r10)
- jmp _return_T_done\@
+ jmp .L_return_T_done\@
-_T_16\@:
+.L_T_16\@:
vmovdqu %xmm9, (%r10)
-_return_T_done\@:
+.L_return_T_done\@:
.endm
.macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8
@@ -603,8 +579,8 @@ _return_T_done\@:
vpxor \T8, \T8, \T8
vpxor \T7, \T7, \T7
cmp $16, %r11
- jl _get_AAD_rest8\@
-_get_AAD_blocks\@:
+ jl .L_get_AAD_rest8\@
+.L_get_AAD_blocks\@:
vmovdqu (%r10), \T7
vpshufb SHUF_MASK(%rip), \T7, \T7
vpxor \T7, \T8, \T8
@@ -613,29 +589,29 @@ _get_AAD_blocks\@:
sub $16, %r12
sub $16, %r11
cmp $16, %r11
- jge _get_AAD_blocks\@
+ jge .L_get_AAD_blocks\@
vmovdqu \T8, \T7
test %r11, %r11
- je _get_AAD_done\@
+ je .L_get_AAD_done\@
vpxor \T7, \T7, \T7
/* read the last <16B of AAD. since we have at least 4B of
data right after the AAD (the ICV, and maybe some CT), we can
read 4B/8B blocks safely, and then get rid of the extra stuff */
-_get_AAD_rest8\@:
+.L_get_AAD_rest8\@:
cmp $4, %r11
- jle _get_AAD_rest4\@
+ jle .L_get_AAD_rest4\@
movq (%r10), \T1
add $8, %r10
sub $8, %r11
vpslldq $8, \T1, \T1
vpsrldq $8, \T7, \T7
vpxor \T1, \T7, \T7
- jmp _get_AAD_rest8\@
-_get_AAD_rest4\@:
+ jmp .L_get_AAD_rest8\@
+.L_get_AAD_rest4\@:
test %r11, %r11
- jle _get_AAD_rest0\@
+ jle .L_get_AAD_rest0\@
mov (%r10), %eax
movq %rax, \T1
add $4, %r10
@@ -643,20 +619,22 @@ _get_AAD_rest4\@:
vpslldq $12, \T1, \T1
vpsrldq $4, \T7, \T7
vpxor \T1, \T7, \T7
-_get_AAD_rest0\@:
+.L_get_AAD_rest0\@:
/* finalize: shift out the extra bytes we read, and align
left. since pslldq can only shift by an immediate, we use
- vpshufb and an array of shuffle masks */
- movq %r12, %r11
- salq $4, %r11
- vmovdqu aad_shift_arr(%r11), \T1
- vpshufb \T1, \T7, \T7
-_get_AAD_rest_final\@:
+ vpshufb and a pair of shuffle masks */
+ leaq ALL_F(%rip), %r11
+ subq %r12, %r11
+ vmovdqu 16(%r11), \T1
+ andq $~3, %r11
+ vpshufb (%r11), \T7, \T7
+ vpand \T1, \T7, \T7
+.L_get_AAD_rest_final\@:
vpshufb SHUF_MASK(%rip), \T7, \T7
vpxor \T8, \T7, \T7
\GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6
-_get_AAD_done\@:
+.L_get_AAD_done\@:
vmovdqu \T7, AadHash(arg2)
.endm
@@ -707,28 +685,28 @@ _get_AAD_done\@:
vpxor \XMMDst, \XMMDst, \XMMDst
cmp $8, \DLEN
- jl _read_lt8_\@
+ jl .L_read_lt8_\@
mov (\DPTR), %rax
vpinsrq $0, %rax, \XMMDst, \XMMDst
sub $8, \DLEN
- jz _done_read_partial_block_\@
+ jz .L_done_read_partial_block_\@
xor %eax, %eax
-_read_next_byte_\@:
+.L_read_next_byte_\@:
shl $8, %rax
mov 7(\DPTR, \DLEN, 1), %al
dec \DLEN
- jnz _read_next_byte_\@
+ jnz .L_read_next_byte_\@
vpinsrq $1, %rax, \XMMDst, \XMMDst
- jmp _done_read_partial_block_\@
-_read_lt8_\@:
+ jmp .L_done_read_partial_block_\@
+.L_read_lt8_\@:
xor %eax, %eax
-_read_next_byte_lt8_\@:
+.L_read_next_byte_lt8_\@:
shl $8, %rax
mov -1(\DPTR, \DLEN, 1), %al
dec \DLEN
- jnz _read_next_byte_lt8_\@
+ jnz .L_read_next_byte_lt8_\@
vpinsrq $0, %rax, \XMMDst, \XMMDst
-_done_read_partial_block_\@:
+.L_done_read_partial_block_\@:
.endm
# PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks
@@ -740,21 +718,21 @@ _done_read_partial_block_\@:
AAD_HASH ENC_DEC
mov PBlockLen(arg2), %r13
test %r13, %r13
- je _partial_block_done_\@ # Leave Macro if no partial blocks
+ je .L_partial_block_done_\@ # Leave Macro if no partial blocks
# Read in input data without over reading
cmp $16, \PLAIN_CYPH_LEN
- jl _fewer_than_16_bytes_\@
+ jl .L_fewer_than_16_bytes_\@
vmovdqu (\PLAIN_CYPH_IN), %xmm1 # If more than 16 bytes, just fill xmm
- jmp _data_read_\@
+ jmp .L_data_read_\@
-_fewer_than_16_bytes_\@:
+.L_fewer_than_16_bytes_\@:
lea (\PLAIN_CYPH_IN, \DATA_OFFSET, 1), %r10
mov \PLAIN_CYPH_LEN, %r12
READ_PARTIAL_BLOCK %r10 %r12 %xmm1
mov PBlockLen(arg2), %r13
-_data_read_\@: # Finished reading in data
+.L_data_read_\@: # Finished reading in data
vmovdqu PBlockEncKey(arg2), %xmm9
vmovdqu HashKey(arg2), %xmm13
@@ -777,9 +755,9 @@ _data_read_\@: # Finished reading in data
sub $16, %r10
# Determine if if partial block is not being filled and
# shift mask accordingly
- jge _no_extra_mask_1_\@
+ jge .L_no_extra_mask_1_\@
sub %r10, %r12
-_no_extra_mask_1_\@:
+.L_no_extra_mask_1_\@:
vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1
# get the appropriate mask to mask out bottom r13 bytes of xmm9
@@ -792,17 +770,17 @@ _no_extra_mask_1_\@:
vpxor %xmm3, \AAD_HASH, \AAD_HASH
test %r10, %r10
- jl _partial_incomplete_1_\@
+ jl .L_partial_incomplete_1_\@
# GHASH computation for the last <16 Byte block
\GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
xor %eax,%eax
mov %rax, PBlockLen(arg2)
- jmp _dec_done_\@
-_partial_incomplete_1_\@:
+ jmp .L_dec_done_\@
+.L_partial_incomplete_1_\@:
add \PLAIN_CYPH_LEN, PBlockLen(arg2)
-_dec_done_\@:
+.L_dec_done_\@:
vmovdqu \AAD_HASH, AadHash(arg2)
.else
vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
@@ -813,9 +791,9 @@ _dec_done_\@:
sub $16, %r10
# Determine if if partial block is not being filled and
# shift mask accordingly
- jge _no_extra_mask_2_\@
+ jge .L_no_extra_mask_2_\@
sub %r10, %r12
-_no_extra_mask_2_\@:
+.L_no_extra_mask_2_\@:
vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1
# get the appropriate mask to mask out bottom r13 bytes of xmm9
@@ -827,17 +805,17 @@ _no_extra_mask_2_\@:
vpxor %xmm9, \AAD_HASH, \AAD_HASH
test %r10, %r10
- jl _partial_incomplete_2_\@
+ jl .L_partial_incomplete_2_\@
# GHASH computation for the last <16 Byte block
\GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
xor %eax,%eax
mov %rax, PBlockLen(arg2)
- jmp _encode_done_\@
-_partial_incomplete_2_\@:
+ jmp .L_encode_done_\@
+.L_partial_incomplete_2_\@:
add \PLAIN_CYPH_LEN, PBlockLen(arg2)
-_encode_done_\@:
+.L_encode_done_\@:
vmovdqu \AAD_HASH, AadHash(arg2)
vmovdqa SHUF_MASK(%rip), %xmm10
@@ -847,32 +825,32 @@ _encode_done_\@:
.endif
# output encrypted Bytes
test %r10, %r10
- jl _partial_fill_\@
+ jl .L_partial_fill_\@
mov %r13, %r12
mov $16, %r13
# Set r13 to be the number of bytes to write out
sub %r12, %r13
- jmp _count_set_\@
-_partial_fill_\@:
+ jmp .L_count_set_\@
+.L_partial_fill_\@:
mov \PLAIN_CYPH_LEN, %r13
-_count_set_\@:
+.L_count_set_\@:
vmovdqa %xmm9, %xmm0
vmovq %xmm0, %rax
cmp $8, %r13
- jle _less_than_8_bytes_left_\@
+ jle .L_less_than_8_bytes_left_\@
mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
add $8, \DATA_OFFSET
psrldq $8, %xmm0
vmovq %xmm0, %rax
sub $8, %r13
-_less_than_8_bytes_left_\@:
+.L_less_than_8_bytes_left_\@:
movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
add $1, \DATA_OFFSET
shr $8, %rax
sub $1, %r13
- jne _less_than_8_bytes_left_\@
-_partial_block_done_\@:
+ jne .L_less_than_8_bytes_left_\@
+.L_partial_block_done_\@:
.endm # PARTIAL_BLOCK
###############################################################################
@@ -1073,7 +1051,7 @@ _partial_block_done_\@:
vmovdqa \XMM8, \T3
cmp $128, %r13
- jl _initial_blocks_done\@ # no need for precomputed constants
+ jl .L_initial_blocks_done\@ # no need for precomputed constants
###############################################################################
# Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
@@ -1215,7 +1193,7 @@ _partial_block_done_\@:
###############################################################################
-_initial_blocks_done\@:
+.L_initial_blocks_done\@:
.endm
@@ -2023,7 +2001,7 @@ SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)
vmovdqa \XMM8, \T3
cmp $128, %r13
- jl _initial_blocks_done\@ # no need for precomputed constants
+ jl .L_initial_blocks_done\@ # no need for precomputed constants
###############################################################################
# Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
@@ -2167,7 +2145,7 @@ SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)
###############################################################################
-_initial_blocks_done\@:
+.L_initial_blocks_done\@:
.endm
diff --git a/arch/x86/crypto/aria-aesni-avx-asm_64.S b/arch/x86/crypto/aria-aesni-avx-asm_64.S
index 9243f6289d34..7c1abc513f34 100644
--- a/arch/x86/crypto/aria-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/aria-aesni-avx-asm_64.S
@@ -80,7 +80,7 @@
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
- vmovdqu .Lshufb_16x16b, a0; \
+ vmovdqu .Lshufb_16x16b(%rip), a0; \
vmovdqu st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@@ -132,7 +132,7 @@
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
- vmovdqu .Lshufb_16x16b, a0; \
+ vmovdqu .Lshufb_16x16b(%rip), a0; \
vmovdqu st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@@ -300,11 +300,11 @@
x4, x5, x6, x7, \
t0, t1, t2, t3, \
t4, t5, t6, t7) \
- vmovdqa .Ltf_s2_bitmatrix, t0; \
- vmovdqa .Ltf_inv_bitmatrix, t1; \
- vmovdqa .Ltf_id_bitmatrix, t2; \
- vmovdqa .Ltf_aff_bitmatrix, t3; \
- vmovdqa .Ltf_x2_bitmatrix, t4; \
+ vmovdqa .Ltf_s2_bitmatrix(%rip), t0; \
+ vmovdqa .Ltf_inv_bitmatrix(%rip), t1; \
+ vmovdqa .Ltf_id_bitmatrix(%rip), t2; \
+ vmovdqa .Ltf_aff_bitmatrix(%rip), t3; \
+ vmovdqa .Ltf_x2_bitmatrix(%rip), t4; \
vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1; \
vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5; \
vgf2p8affineqb $(tf_inv_const), t1, x2, x2; \
@@ -324,13 +324,13 @@
x4, x5, x6, x7, \
t0, t1, t2, t3, \
t4, t5, t6, t7) \
- vmovdqa .Linv_shift_row, t0; \
- vmovdqa .Lshift_row, t1; \
- vbroadcastss .L0f0f0f0f, t6; \
- vmovdqa .Ltf_lo__inv_aff__and__s2, t2; \
- vmovdqa .Ltf_hi__inv_aff__and__s2, t3; \
- vmovdqa .Ltf_lo__x2__and__fwd_aff, t4; \
- vmovdqa .Ltf_hi__x2__and__fwd_aff, t5; \
+ vmovdqa .Linv_shift_row(%rip), t0; \
+ vmovdqa .Lshift_row(%rip), t1; \
+ vbroadcastss .L0f0f0f0f(%rip), t6; \
+ vmovdqa .Ltf_lo__inv_aff__and__s2(%rip), t2; \
+ vmovdqa .Ltf_hi__inv_aff__and__s2(%rip), t3; \
+ vmovdqa .Ltf_lo__x2__and__fwd_aff(%rip), t4; \
+ vmovdqa .Ltf_hi__x2__and__fwd_aff(%rip), t5; \
\
vaesenclast t7, x0, x0; \
vaesenclast t7, x4, x4; \
diff --git a/arch/x86/crypto/aria-aesni-avx2-asm_64.S b/arch/x86/crypto/aria-aesni-avx2-asm_64.S
index 82a14b4ad920..c60fa2980630 100644
--- a/arch/x86/crypto/aria-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/aria-aesni-avx2-asm_64.S
@@ -96,7 +96,7 @@
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
- vbroadcasti128 .Lshufb_16x16b, a0; \
+ vbroadcasti128 .Lshufb_16x16b(%rip), a0; \
vmovdqu st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@@ -148,7 +148,7 @@
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
- vbroadcasti128 .Lshufb_16x16b, a0; \
+ vbroadcasti128 .Lshufb_16x16b(%rip), a0; \
vmovdqu st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@@ -307,11 +307,11 @@
x4, x5, x6, x7, \
t0, t1, t2, t3, \
t4, t5, t6, t7) \
- vpbroadcastq .Ltf_s2_bitmatrix, t0; \
- vpbroadcastq .Ltf_inv_bitmatrix, t1; \
- vpbroadcastq .Ltf_id_bitmatrix, t2; \
- vpbroadcastq .Ltf_aff_bitmatrix, t3; \
- vpbroadcastq .Ltf_x2_bitmatrix, t4; \
+ vpbroadcastq .Ltf_s2_bitmatrix(%rip), t0; \
+ vpbroadcastq .Ltf_inv_bitmatrix(%rip), t1; \
+ vpbroadcastq .Ltf_id_bitmatrix(%rip), t2; \
+ vpbroadcastq .Ltf_aff_bitmatrix(%rip), t3; \
+ vpbroadcastq .Ltf_x2_bitmatrix(%rip), t4; \
vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1; \
vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5; \
vgf2p8affineqb $(tf_inv_const), t1, x2, x2; \
@@ -332,12 +332,12 @@
t4, t5, t6, t7) \
vpxor t7, t7, t7; \
vpxor t6, t6, t6; \
- vbroadcasti128 .Linv_shift_row, t0; \
- vbroadcasti128 .Lshift_row, t1; \
- vbroadcasti128 .Ltf_lo__inv_aff__and__s2, t2; \
- vbroadcasti128 .Ltf_hi__inv_aff__and__s2, t3; \
- vbroadcasti128 .Ltf_lo__x2__and__fwd_aff, t4; \
- vbroadcasti128 .Ltf_hi__x2__and__fwd_aff, t5; \
+ vbroadcasti128 .Linv_shift_row(%rip), t0; \
+ vbroadcasti128 .Lshift_row(%rip), t1; \
+ vbroadcasti128 .Ltf_lo__inv_aff__and__s2(%rip), t2; \
+ vbroadcasti128 .Ltf_hi__inv_aff__and__s2(%rip), t3; \
+ vbroadcasti128 .Ltf_lo__x2__and__fwd_aff(%rip), t4; \
+ vbroadcasti128 .Ltf_hi__x2__and__fwd_aff(%rip), t5; \
\
vextracti128 $1, x0, t6##_x; \
vaesenclast t7##_x, x0##_x, x0##_x; \
@@ -369,7 +369,7 @@
vaesdeclast t7##_x, t6##_x, t6##_x; \
vinserti128 $1, t6##_x, x6, x6; \
\
- vpbroadcastd .L0f0f0f0f, t6; \
+ vpbroadcastd .L0f0f0f0f(%rip), t6; \
\
/* AES inverse shift rows */ \
vpshufb t0, x0, x0; \
diff --git a/arch/x86/crypto/aria-gfni-avx512-asm_64.S b/arch/x86/crypto/aria-gfni-avx512-asm_64.S
index 3193f0701450..860887e5d02e 100644
--- a/arch/x86/crypto/aria-gfni-avx512-asm_64.S
+++ b/arch/x86/crypto/aria-gfni-avx512-asm_64.S
@@ -80,7 +80,7 @@
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
- vbroadcasti64x2 .Lshufb_16x16b, a0; \
+ vbroadcasti64x2 .Lshufb_16x16b(%rip), a0; \
vmovdqu64 st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@@ -132,7 +132,7 @@
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
- vbroadcasti64x2 .Lshufb_16x16b, a0; \
+ vbroadcasti64x2 .Lshufb_16x16b(%rip), a0; \
vmovdqu64 st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@@ -308,11 +308,11 @@
x4, x5, x6, x7, \
t0, t1, t2, t3, \
t4, t5, t6, t7) \
- vpbroadcastq .Ltf_s2_bitmatrix, t0; \
- vpbroadcastq .Ltf_inv_bitmatrix, t1; \
- vpbroadcastq .Ltf_id_bitmatrix, t2; \
- vpbroadcastq .Ltf_aff_bitmatrix, t3; \
- vpbroadcastq .Ltf_x2_bitmatrix, t4; \
+ vpbroadcastq .Ltf_s2_bitmatrix(%rip), t0; \
+ vpbroadcastq .Ltf_inv_bitmatrix(%rip), t1; \
+ vpbroadcastq .Ltf_id_bitmatrix(%rip), t2; \
+ vpbroadcastq .Ltf_aff_bitmatrix(%rip), t3; \
+ vpbroadcastq .Ltf_x2_bitmatrix(%rip), t4; \
vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1; \
vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5; \
vgf2p8affineqb $(tf_inv_const), t1, x2, x2; \
@@ -332,11 +332,11 @@
y4, y5, y6, y7, \
t0, t1, t2, t3, \
t4, t5, t6, t7) \
- vpbroadcastq .Ltf_s2_bitmatrix, t0; \
- vpbroadcastq .Ltf_inv_bitmatrix, t1; \
- vpbroadcastq .Ltf_id_bitmatrix, t2; \
- vpbroadcastq .Ltf_aff_bitmatrix, t3; \
- vpbroadcastq .Ltf_x2_bitmatrix, t4; \
+ vpbroadcastq .Ltf_s2_bitmatrix(%rip), t0; \
+ vpbroadcastq .Ltf_inv_bitmatrix(%rip), t1; \
+ vpbroadcastq .Ltf_id_bitmatrix(%rip), t2; \
+ vpbroadcastq .Ltf_aff_bitmatrix(%rip), t3; \
+ vpbroadcastq .Ltf_x2_bitmatrix(%rip), t4; \
vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1; \
vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5; \
vgf2p8affineqb $(tf_inv_const), t1, x2, x2; \
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index 4a30618281ec..646477a13e11 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -52,10 +52,10 @@
/* \
* S-function with AES subbytes \
*/ \
- vmovdqa .Linv_shift_row, t4; \
- vbroadcastss .L0f0f0f0f, t7; \
- vmovdqa .Lpre_tf_lo_s1, t0; \
- vmovdqa .Lpre_tf_hi_s1, t1; \
+ vmovdqa .Linv_shift_row(%rip), t4; \
+ vbroadcastss .L0f0f0f0f(%rip), t7; \
+ vmovdqa .Lpre_tf_lo_s1(%rip), t0; \
+ vmovdqa .Lpre_tf_hi_s1(%rip), t1; \
\
/* AES inverse shift rows */ \
vpshufb t4, x0, x0; \
@@ -68,8 +68,8 @@
vpshufb t4, x6, x6; \
\
/* prefilter sboxes 1, 2 and 3 */ \
- vmovdqa .Lpre_tf_lo_s4, t2; \
- vmovdqa .Lpre_tf_hi_s4, t3; \
+ vmovdqa .Lpre_tf_lo_s4(%rip), t2; \
+ vmovdqa .Lpre_tf_hi_s4(%rip), t3; \
filter_8bit(x0, t0, t1, t7, t6); \
filter_8bit(x7, t0, t1, t7, t6); \
filter_8bit(x1, t0, t1, t7, t6); \
@@ -83,8 +83,8 @@
filter_8bit(x6, t2, t3, t7, t6); \
\
/* AES subbytes + AES shift rows */ \
- vmovdqa .Lpost_tf_lo_s1, t0; \
- vmovdqa .Lpost_tf_hi_s1, t1; \
+ vmovdqa .Lpost_tf_lo_s1(%rip), t0; \
+ vmovdqa .Lpost_tf_hi_s1(%rip), t1; \
vaesenclast t4, x0, x0; \
vaesenclast t4, x7, x7; \
vaesenclast t4, x1, x1; \
@@ -95,16 +95,16 @@
vaesenclast t4, x6, x6; \
\
/* postfilter sboxes 1 and 4 */ \
- vmovdqa .Lpost_tf_lo_s3, t2; \
- vmovdqa .Lpost_tf_hi_s3, t3; \
+ vmovdqa .Lpost_tf_lo_s3(%rip), t2; \
+ vmovdqa .Lpost_tf_hi_s3(%rip), t3; \
filter_8bit(x0, t0, t1, t7, t6); \
filter_8bit(x7, t0, t1, t7, t6); \
filter_8bit(x3, t0, t1, t7, t6); \
filter_8bit(x6, t0, t1, t7, t6); \
\
/* postfilter sbox 3 */ \
- vmovdqa .Lpost_tf_lo_s2, t4; \
- vmovdqa .Lpost_tf_hi_s2, t5; \
+ vmovdqa .Lpost_tf_lo_s2(%rip), t4; \
+ vmovdqa .Lpost_tf_hi_s2(%rip), t5; \
filter_8bit(x2, t2, t3, t7, t6); \
filter_8bit(x5, t2, t3, t7, t6); \
\
@@ -443,7 +443,7 @@ SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
- vmovdqu .Lshufb_16x16b, a0; \
+ vmovdqu .Lshufb_16x16b(%rip), a0; \
vmovdqu st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@@ -482,7 +482,7 @@ SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
#define inpack16_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
y6, y7, rio, key) \
vmovq key, x0; \
- vpshufb .Lpack_bswap, x0, x0; \
+ vpshufb .Lpack_bswap(%rip), x0, x0; \
\
vpxor 0 * 16(rio), x0, y7; \
vpxor 1 * 16(rio), x0, y6; \
@@ -533,7 +533,7 @@ SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
vmovdqu x0, stack_tmp0; \
\
vmovq key, x0; \
- vpshufb .Lpack_bswap, x0, x0; \
+ vpshufb .Lpack_bswap(%rip), x0, x0; \
\
vpxor x0, y7, y7; \
vpxor x0, y6, y6; \
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index deaf62aa73a6..a0eb94e53b1b 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -64,12 +64,12 @@
/* \
* S-function with AES subbytes \
*/ \
- vbroadcasti128 .Linv_shift_row, t4; \
- vpbroadcastd .L0f0f0f0f, t7; \
- vbroadcasti128 .Lpre_tf_lo_s1, t5; \
- vbroadcasti128 .Lpre_tf_hi_s1, t6; \
- vbroadcasti128 .Lpre_tf_lo_s4, t2; \
- vbroadcasti128 .Lpre_tf_hi_s4, t3; \
+ vbroadcasti128 .Linv_shift_row(%rip), t4; \
+ vpbroadcastd .L0f0f0f0f(%rip), t7; \
+ vbroadcasti128 .Lpre_tf_lo_s1(%rip), t5; \
+ vbroadcasti128 .Lpre_tf_hi_s1(%rip), t6; \
+ vbroadcasti128 .Lpre_tf_lo_s4(%rip), t2; \
+ vbroadcasti128 .Lpre_tf_hi_s4(%rip), t3; \
\
/* AES inverse shift rows */ \
vpshufb t4, x0, x0; \
@@ -115,8 +115,8 @@
vinserti128 $1, t2##_x, x6, x6; \
vextracti128 $1, x1, t3##_x; \
vextracti128 $1, x4, t2##_x; \
- vbroadcasti128 .Lpost_tf_lo_s1, t0; \
- vbroadcasti128 .Lpost_tf_hi_s1, t1; \
+ vbroadcasti128 .Lpost_tf_lo_s1(%rip), t0; \
+ vbroadcasti128 .Lpost_tf_hi_s1(%rip), t1; \
vaesenclast t4##_x, x2##_x, x2##_x; \
vaesenclast t4##_x, t6##_x, t6##_x; \
vinserti128 $1, t6##_x, x2, x2; \
@@ -131,16 +131,16 @@
vinserti128 $1, t2##_x, x4, x4; \
\
/* postfilter sboxes 1 and 4 */ \
- vbroadcasti128 .Lpost_tf_lo_s3, t2; \
- vbroadcasti128 .Lpost_tf_hi_s3, t3; \
+ vbroadcasti128 .Lpost_tf_lo_s3(%rip), t2; \
+ vbroadcasti128 .Lpost_tf_hi_s3(%rip), t3; \
filter_8bit(x0, t0, t1, t7, t6); \
filter_8bit(x7, t0, t1, t7, t6); \
filter_8bit(x3, t0, t1, t7, t6); \
filter_8bit(x6, t0, t1, t7, t6); \
\
/* postfilter sbox 3 */ \
- vbroadcasti128 .Lpost_tf_lo_s2, t4; \
- vbroadcasti128 .Lpost_tf_hi_s2, t5; \
+ vbroadcasti128 .Lpost_tf_lo_s2(%rip), t4; \
+ vbroadcasti128 .Lpost_tf_hi_s2(%rip), t5; \
filter_8bit(x2, t2, t3, t7, t6); \
filter_8bit(x5, t2, t3, t7, t6); \
\
@@ -475,7 +475,7 @@ SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
- vbroadcasti128 .Lshufb_16x16b, a0; \
+ vbroadcasti128 .Lshufb_16x16b(%rip), a0; \
vmovdqu st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@@ -514,7 +514,7 @@ SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
#define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
y6, y7, rio, key) \
vpbroadcastq key, x0; \
- vpshufb .Lpack_bswap, x0, x0; \
+ vpshufb .Lpack_bswap(%rip), x0, x0; \
\
vpxor 0 * 32(rio), x0, y7; \
vpxor 1 * 32(rio), x0, y6; \
@@ -565,7 +565,7 @@ SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
vmovdqu x0, stack_tmp0; \
\
vpbroadcastq key, x0; \
- vpshufb .Lpack_bswap, x0, x0; \
+ vpshufb .Lpack_bswap(%rip), x0, x0; \
\
vpxor x0, y7, y7; \
vpxor x0, y6, y6; \
diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
index 347c059f5940..816b6bb8bded 100644
--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
@@ -77,11 +77,13 @@
#define RXORbl %r9b
#define xor2ror16(T0, T1, tmp1, tmp2, ab, dst) \
+ leaq T0(%rip), tmp1; \
movzbl ab ## bl, tmp2 ## d; \
+ xorq (tmp1, tmp2, 8), dst; \
+ leaq T1(%rip), tmp2; \
movzbl ab ## bh, tmp1 ## d; \
rorq $16, ab; \
- xorq T0(, tmp2, 8), dst; \
- xorq T1(, tmp1, 8), dst;
+ xorq (tmp2, tmp1, 8), dst;
/**********************************************************************
1-way camellia
diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
index 0326a01503c3..b4e460a87f18 100644
--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
@@ -84,15 +84,19 @@
#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
movzbl src ## bh, RID1d; \
+ leaq s1(%rip), RID2; \
+ movl (RID2,RID1,4), dst ## d; \
movzbl src ## bl, RID2d; \
+ leaq s2(%rip), RID1; \
+ op1 (RID1,RID2,4), dst ## d; \
shrq $16, src; \
- movl s1(, RID1, 4), dst ## d; \
- op1 s2(, RID2, 4), dst ## d; \
movzbl src ## bh, RID1d; \
+ leaq s3(%rip), RID2; \
+ op2 (RID2,RID1,4), dst ## d; \
movzbl src ## bl, RID2d; \
interleave_op(il_reg); \
- op2 s3(, RID1, 4), dst ## d; \
- op3 s4(, RID2, 4), dst ## d;
+ leaq s4(%rip), RID1; \
+ op3 (RID1,RID2,4), dst ## d;
#define dummy(d) /* do nothing */
@@ -151,15 +155,15 @@
subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
#define enc_preload_rkr() \
- vbroadcastss .L16_mask, RKR; \
+ vbroadcastss .L16_mask(%rip), RKR; \
/* add 16-bit rotation to key rotations (mod 32) */ \
vpxor kr(CTX), RKR, RKR;
#define dec_preload_rkr() \
- vbroadcastss .L16_mask, RKR; \
+ vbroadcastss .L16_mask(%rip), RKR; \
/* add 16-bit rotation to key rotations (mod 32) */ \
vpxor kr(CTX), RKR, RKR; \
- vpshufb .Lbswap128_mask, RKR, RKR;
+ vpshufb .Lbswap128_mask(%rip), RKR, RKR;
#define transpose_2x4(x0, x1, t0, t1) \
vpunpckldq x1, x0, t0; \
@@ -235,9 +239,9 @@ SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
movq %rdi, CTX;
- vmovdqa .Lbswap_mask, RKM;
- vmovd .Lfirst_mask, R1ST;
- vmovd .L32_mask, R32;
+ vmovdqa .Lbswap_mask(%rip), RKM;
+ vmovd .Lfirst_mask(%rip), R1ST;
+ vmovd .L32_mask(%rip), R32;
enc_preload_rkr();
inpack_blocks(RL1, RR1, RTMP, RX, RKM);
@@ -271,7 +275,7 @@ SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
popq %rbx;
popq %r15;
- vmovdqa .Lbswap_mask, RKM;
+ vmovdqa .Lbswap_mask(%rip), RKM;
outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
@@ -308,9 +312,9 @@ SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
movq %rdi, CTX;
- vmovdqa .Lbswap_mask, RKM;
- vmovd .Lfirst_mask, R1ST;
- vmovd .L32_mask, R32;
+ vmovdqa .Lbswap_mask(%rip), RKM;
+ vmovd .Lfirst_mask(%rip), R1ST;
+ vmovd .L32_mask(%rip), R32;
dec_preload_rkr();
inpack_blocks(RL1, RR1, RTMP, RX, RKM);
@@ -341,7 +345,7 @@ SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
round(RL, RR, 1, 2);
round(RR, RL, 0, 1);
- vmovdqa .Lbswap_mask, RKM;
+ vmovdqa .Lbswap_mask(%rip), RKM;
popq %rbx;
popq %r15;
@@ -504,8 +508,8 @@ SYM_FUNC_START(cast5_ctr_16way)
vpcmpeqd RKR, RKR, RKR;
vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
- vmovdqa .Lbswap_iv_mask, R1ST;
- vmovdqa .Lbswap128_mask, RKM;
+ vmovdqa .Lbswap_iv_mask(%rip), R1ST;
+ vmovdqa .Lbswap128_mask(%rip), RKM;
/* load IV and byteswap */
vmovq (%rcx), RX;
diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
index 82b716fd5dba..9e86d460b409 100644
--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
@@ -84,15 +84,19 @@
#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
movzbl src ## bh, RID1d; \
+ leaq s1(%rip), RID2; \
+ movl (RID2,RID1,4), dst ## d; \
movzbl src ## bl, RID2d; \
+ leaq s2(%rip), RID1; \
+ op1 (RID1,RID2,4), dst ## d; \
shrq $16, src; \
- movl s1(, RID1, 4), dst ## d; \
- op1 s2(, RID2, 4), dst ## d; \
movzbl src ## bh, RID1d; \
+ leaq s3(%rip), RID2; \
+ op2 (RID2,RID1,4), dst ## d; \
movzbl src ## bl, RID2d; \
interleave_op(il_reg); \
- op2 s3(, RID1, 4), dst ## d; \
- op3 s4(, RID2, 4), dst ## d;
+ leaq s4(%rip), RID1; \
+ op3 (RID1,RID2,4), dst ## d;
#define dummy(d) /* do nothing */
@@ -175,10 +179,10 @@
qop(RD, RC, 1);
#define shuffle(mask) \
- vpshufb mask, RKR, RKR;
+ vpshufb mask(%rip), RKR, RKR;
#define preload_rkr(n, do_mask, mask) \
- vbroadcastss .L16_mask, RKR; \
+ vbroadcastss .L16_mask(%rip), RKR; \
/* add 16-bit rotation to key rotations (mod 32) */ \
vpxor (kr+n*16)(CTX), RKR, RKR; \
do_mask(mask);
@@ -258,9 +262,9 @@ SYM_FUNC_START_LOCAL(__cast6_enc_blk8)
movq %rdi, CTX;
- vmovdqa .Lbswap_mask, RKM;
- vmovd .Lfirst_mask, R1ST;
- vmovd .L32_mask, R32;
+ vmovdqa .Lbswap_mask(%rip), RKM;
+ vmovd .Lfirst_mask(%rip), R1ST;
+ vmovd .L32_mask(%rip), R32;
inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
@@ -284,7 +288,7 @@ SYM_FUNC_START_LOCAL(__cast6_enc_blk8)
popq %rbx;
popq %r15;
- vmovdqa .Lbswap_mask, RKM;
+ vmovdqa .Lbswap_mask(%rip), RKM;
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
@@ -306,9 +310,9 @@ SYM_FUNC_START_LOCAL(__cast6_dec_blk8)
movq %rdi, CTX;
- vmovdqa .Lbswap_mask, RKM;
- vmovd .Lfirst_mask, R1ST;
- vmovd .L32_mask, R32;
+ vmovdqa .Lbswap_mask(%rip), RKM;
+ vmovd .Lfirst_mask(%rip), R1ST;
+ vmovd .L32_mask(%rip), R32;
inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
@@ -332,7 +336,7 @@ SYM_FUNC_START_LOCAL(__cast6_dec_blk8)
popq %rbx;
popq %r15;
- vmovdqa .Lbswap_mask, RKM;
+ vmovdqa .Lbswap_mask(%rip), RKM;
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S
index ca53e96996ac..5d31137e2c7d 100644
--- a/arch/x86/crypto/crc32-pclmul_asm.S
+++ b/arch/x86/crypto/crc32-pclmul_asm.S
@@ -90,7 +90,7 @@ SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligne
sub $0x40, LEN
add $0x40, BUF
cmp $0x40, LEN
- jb less_64
+ jb .Lless_64
#ifdef __x86_64__
movdqa .Lconstant_R2R1(%rip), CONSTANT
@@ -98,7 +98,7 @@ SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligne
movdqa .Lconstant_R2R1, CONSTANT
#endif
-loop_64:/* 64 bytes Full cache line folding */
+.Lloop_64:/* 64 bytes Full cache line folding */
prefetchnta 0x40(BUF)
movdqa %xmm1, %xmm5
movdqa %xmm2, %xmm6
@@ -139,8 +139,8 @@ loop_64:/* 64 bytes Full cache line folding */
sub $0x40, LEN
add $0x40, BUF
cmp $0x40, LEN
- jge loop_64
-less_64:/* Folding cache line into 128bit */
+ jge .Lloop_64
+.Lless_64:/* Folding cache line into 128bit */
#ifdef __x86_64__
movdqa .Lconstant_R4R3(%rip), CONSTANT
#else
@@ -167,8 +167,8 @@ less_64:/* Folding cache line into 128bit */
pxor %xmm4, %xmm1
cmp $0x10, LEN
- jb fold_64
-loop_16:/* Folding rest buffer into 128bit */
+ jb .Lfold_64
+.Lloop_16:/* Folding rest buffer into 128bit */
movdqa %xmm1, %xmm5
pclmulqdq $0x00, CONSTANT, %xmm1
pclmulqdq $0x11, CONSTANT, %xmm5
@@ -177,9 +177,9 @@ loop_16:/* Folding rest buffer into 128bit */
sub $0x10, LEN
add $0x10, BUF
cmp $0x10, LEN
- jge loop_16
+ jge .Lloop_16
-fold_64:
+.Lfold_64:
/* perform the last 64 bit fold, also adds 32 zeroes
* to the input stream */
pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index ec35915f0901..81ce0f4db555 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -49,15 +49,15 @@
## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
.macro LABEL prefix n
-\prefix\n\():
+.L\prefix\n\():
.endm
.macro JMPTBL_ENTRY i
-.quad crc_\i
+.quad .Lcrc_\i
.endm
.macro JNC_LESS_THAN j
- jnc less_than_\j
+ jnc .Lless_than_\j
.endm
# Define threshold where buffers are considered "small" and routed to more
@@ -108,30 +108,30 @@ SYM_FUNC_START(crc_pcl)
neg %bufp
and $7, %bufp # calculate the unalignment amount of
# the address
- je proc_block # Skip if aligned
+ je .Lproc_block # Skip if aligned
## If len is less than 8 and we're unaligned, we need to jump
## to special code to avoid reading beyond the end of the buffer
cmp $8, len
- jae do_align
+ jae .Ldo_align
# less_than_8 expects length in upper 3 bits of len_dw
# less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30]
shl $32-3+1, len_dw
- jmp less_than_8_post_shl1
+ jmp .Lless_than_8_post_shl1
-do_align:
+.Ldo_align:
#### Calculate CRC of unaligned bytes of the buffer (if any)
movq (bufptmp), tmp # load a quadward from the buffer
add %bufp, bufptmp # align buffer pointer for quadword
# processing
sub %bufp, len # update buffer length
-align_loop:
+.Lalign_loop:
crc32b %bl, crc_init_dw # compute crc32 of 1-byte
shr $8, tmp # get next byte
dec %bufp
- jne align_loop
+ jne .Lalign_loop
-proc_block:
+.Lproc_block:
################################################################
## 2) PROCESS BLOCKS:
@@ -141,11 +141,11 @@ proc_block:
movq len, tmp # save num bytes in tmp
cmpq $128*24, len
- jae full_block
+ jae .Lfull_block
-continue_block:
+.Lcontinue_block:
cmpq $SMALL_SIZE, len
- jb small
+ jb .Lsmall
## len < 128*24
movq $2731, %rax # 2731 = ceil(2^16 / 24)
@@ -168,13 +168,14 @@ continue_block:
xor crc2, crc2
## branch into array
- mov jump_table(,%rax,8), %bufp
+ leaq jump_table(%rip), %bufp
+ mov (%bufp,%rax,8), %bufp
JMP_NOSPEC bufp
################################################################
## 2a) PROCESS FULL BLOCKS:
################################################################
-full_block:
+.Lfull_block:
movl $128,%eax
lea 128*8*2(block_0), block_1
lea 128*8*3(block_0), block_2
@@ -189,7 +190,6 @@ full_block:
## 3) CRC Array:
################################################################
-crc_array:
i=128
.rept 128-1
.altmacro
@@ -242,28 +242,28 @@ LABEL crc_ 0
ENDBR
mov tmp, len
cmp $128*24, tmp
- jae full_block
+ jae .Lfull_block
cmp $24, tmp
- jae continue_block
+ jae .Lcontinue_block
-less_than_24:
+.Lless_than_24:
shl $32-4, len_dw # less_than_16 expects length
# in upper 4 bits of len_dw
- jnc less_than_16
+ jnc .Lless_than_16
crc32q (bufptmp), crc_init
crc32q 8(bufptmp), crc_init
- jz do_return
+ jz .Ldo_return
add $16, bufptmp
# len is less than 8 if we got here
# less_than_8 expects length in upper 3 bits of len_dw
# less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30]
shl $2, len_dw
- jmp less_than_8_post_shl1
+ jmp .Lless_than_8_post_shl1
#######################################################################
## 6) LESS THAN 256-bytes REMAIN AT THIS POINT (8-bits of len are full)
#######################################################################
-small:
+.Lsmall:
shl $32-8, len_dw # Prepare len_dw for less_than_256
j=256
.rept 5 # j = {256, 128, 64, 32, 16}
@@ -279,32 +279,32 @@ LABEL less_than_ %j # less_than_j: Length should be in
crc32q i(bufptmp), crc_init # Compute crc32 of 8-byte data
i=i+8
.endr
- jz do_return # Return if remaining length is zero
+ jz .Ldo_return # Return if remaining length is zero
add $j, bufptmp # Advance buf
.endr
-less_than_8: # Length should be stored in
+.Lless_than_8: # Length should be stored in
# upper 3 bits of len_dw
shl $1, len_dw
-less_than_8_post_shl1:
- jnc less_than_4
+.Lless_than_8_post_shl1:
+ jnc .Lless_than_4
crc32l (bufptmp), crc_init_dw # CRC of 4 bytes
- jz do_return # return if remaining data is zero
+ jz .Ldo_return # return if remaining data is zero
add $4, bufptmp
-less_than_4: # Length should be stored in
+.Lless_than_4: # Length should be stored in
# upper 2 bits of len_dw
shl $1, len_dw
- jnc less_than_2
+ jnc .Lless_than_2
crc32w (bufptmp), crc_init_dw # CRC of 2 bytes
- jz do_return # return if remaining data is zero
+ jz .Ldo_return # return if remaining data is zero
add $2, bufptmp
-less_than_2: # Length should be stored in the MSB
+.Lless_than_2: # Length should be stored in the MSB
# of len_dw
shl $1, len_dw
- jnc less_than_1
+ jnc .Lless_than_1
crc32b (bufptmp), crc_init_dw # CRC of 1 byte
-less_than_1: # Length should be zero
-do_return:
+.Lless_than_1: # Length should be zero
+.Ldo_return:
movq crc_init, %rax
popq %rsi
popq %rdi
diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S
index f4c760f4cade..cf21b998e77c 100644
--- a/arch/x86/crypto/des3_ede-asm_64.S
+++ b/arch/x86/crypto/des3_ede-asm_64.S
@@ -129,21 +129,29 @@
movzbl RW0bl, RT2d; \
movzbl RW0bh, RT3d; \
shrq $16, RW0; \
- movq s8(, RT0, 8), RT0; \
- xorq s6(, RT1, 8), to; \
+ leaq s8(%rip), RW1; \
+ movq (RW1, RT0, 8), RT0; \
+ leaq s6(%rip), RW1; \
+ xorq (RW1, RT1, 8), to; \
movzbl RW0bl, RL1d; \
movzbl RW0bh, RT1d; \
shrl $16, RW0d; \
- xorq s4(, RT2, 8), RT0; \
- xorq s2(, RT3, 8), to; \
+ leaq s4(%rip), RW1; \
+ xorq (RW1, RT2, 8), RT0; \
+ leaq s2(%rip), RW1; \
+ xorq (RW1, RT3, 8), to; \
movzbl RW0bl, RT2d; \
movzbl RW0bh, RT3d; \
- xorq s7(, RL1, 8), RT0; \
- xorq s5(, RT1, 8), to; \
- xorq s3(, RT2, 8), RT0; \
+ leaq s7(%rip), RW1; \
+ xorq (RW1, RL1, 8), RT0; \
+ leaq s5(%rip), RW1; \
+ xorq (RW1, RT1, 8), to; \
+ leaq s3(%rip), RW1; \
+ xorq (RW1, RT2, 8), RT0; \
load_next_key(n, RW0); \
xorq RT0, to; \
- xorq s1(, RT3, 8), to; \
+ leaq s1(%rip), RW1; \
+ xorq (RW1, RT3, 8), to; \
#define load_next_key(n, RWx) \
movq (((n) + 1) * 8)(CTX), RWx;
@@ -355,65 +363,89 @@ SYM_FUNC_END(des3_ede_x86_64_crypt_blk)
movzbl RW0bl, RT3d; \
movzbl RW0bh, RT1d; \
shrq $16, RW0; \
- xorq s8(, RT3, 8), to##0; \
- xorq s6(, RT1, 8), to##0; \
+ leaq s8(%rip), RT2; \
+ xorq (RT2, RT3, 8), to##0; \
+ leaq s6(%rip), RT2; \
+ xorq (RT2, RT1, 8), to##0; \
movzbl RW0bl, RT3d; \
movzbl RW0bh, RT1d; \
shrq $16, RW0; \
- xorq s4(, RT3, 8), to##0; \
- xorq s2(, RT1, 8), to##0; \
+ leaq s4(%rip), RT2; \
+ xorq (RT2, RT3, 8), to##0; \
+ leaq s2(%rip), RT2; \
+ xorq (RT2, RT1, 8), to##0; \
movzbl RW0bl, RT3d; \
movzbl RW0bh, RT1d; \
shrl $16, RW0d; \
- xorq s7(, RT3, 8), to##0; \
- xorq s5(, RT1, 8), to##0; \
+ leaq s7(%rip), RT2; \
+ xorq (RT2, RT3, 8), to##0; \
+ leaq s5(%rip), RT2; \
+ xorq (RT2, RT1, 8), to##0; \
movzbl RW0bl, RT3d; \
movzbl RW0bh, RT1d; \
load_next_key(n, RW0); \
- xorq s3(, RT3, 8), to##0; \
- xorq s1(, RT1, 8), to##0; \
+ leaq s3(%rip), RT2; \
+ xorq (RT2, RT3, 8), to##0; \
+ leaq s1(%rip), RT2; \
+ xorq (RT2, RT1, 8), to##0; \
xorq from##1, RW1; \
movzbl RW1bl, RT3d; \
movzbl RW1bh, RT1d; \
shrq $16, RW1; \
- xorq s8(, RT3, 8), to##1; \
- xorq s6(, RT1, 8), to##1; \
+ leaq s8(%rip), RT2; \
+ xorq (RT2, RT3, 8), to##1; \
+ leaq s6(%rip), RT2; \
+ xorq (RT2, RT1, 8), to##1; \
movzbl RW1bl, RT3d; \
movzbl RW1bh, RT1d; \
shrq $16, RW1; \
- xorq s4(, RT3, 8), to##1; \
- xorq s2(, RT1, 8), to##1; \
+ leaq s4(%rip), RT2; \
+ xorq (RT2, RT3, 8), to##1; \
+ leaq s2(%rip), RT2; \
+ xorq (RT2, RT1, 8), to##1; \
movzbl RW1bl, RT3d; \
movzbl RW1bh, RT1d; \
shrl $16, RW1d; \
- xorq s7(, RT3, 8), to##1; \
- xorq s5(, RT1, 8), to##1; \
+ leaq s7(%rip), RT2; \
+ xorq (RT2, RT3, 8), to##1; \
+ leaq s5(%rip), RT2; \
+ xorq (RT2, RT1, 8), to##1; \
movzbl RW1bl, RT3d; \
movzbl RW1bh, RT1d; \
do_movq(RW0, RW1); \
- xorq s3(, RT3, 8), to##1; \
- xorq s1(, RT1, 8), to##1; \
+ leaq s3(%rip), RT2; \
+ xorq (RT2, RT3, 8), to##1; \
+ leaq s1(%rip), RT2; \
+ xorq (RT2, RT1, 8), to##1; \
xorq from##2, RW2; \
movzbl RW2bl, RT3d; \
movzbl RW2bh, RT1d; \
shrq $16, RW2; \
- xorq s8(, RT3, 8), to##2; \
- xorq s6(, RT1, 8), to##2; \
+ leaq s8(%rip), RT2; \
+ xorq (RT2, RT3, 8), to##2; \
+ leaq s6(%rip), RT2; \
+ xorq (RT2, RT1, 8), to##2; \
movzbl RW2bl, RT3d; \
movzbl RW2bh, RT1d; \
shrq $16, RW2; \
- xorq s4(, RT3, 8), to##2; \
- xorq s2(, RT1, 8), to##2; \
+ leaq s4(%rip), RT2; \
+ xorq (RT2, RT3, 8), to##2; \
+ leaq s2(%rip), RT2; \
+ xorq (RT2, RT1, 8), to##2; \
movzbl RW2bl, RT3d; \
movzbl RW2bh, RT1d; \
shrl $16, RW2d; \
- xorq s7(, RT3, 8), to##2; \
- xorq s5(, RT1, 8), to##2; \
+ leaq s7(%rip), RT2; \
+ xorq (RT2, RT3, 8), to##2; \
+ leaq s5(%rip), RT2; \
+ xorq (RT2, RT1, 8), to##2; \
movzbl RW2bl, RT3d; \
movzbl RW2bh, RT1d; \
do_movq(RW0, RW2); \
- xorq s3(, RT3, 8), to##2; \
- xorq s1(, RT1, 8), to##2;
+ leaq s3(%rip), RT2; \
+ xorq (RT2, RT3, 8), to##2; \
+ leaq s1(%rip), RT2; \
+ xorq (RT2, RT1, 8), to##2;
#define __movq(src, dst) \
movq src, dst;
diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
index 257ed9446f3e..99cb983ded9e 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
@@ -93,7 +93,7 @@ SYM_FUNC_START(clmul_ghash_mul)
FRAME_BEGIN
movups (%rdi), DATA
movups (%rsi), SHASH
- movaps .Lbswap_mask, BSWAP
+ movaps .Lbswap_mask(%rip), BSWAP
pshufb BSWAP, DATA
call __clmul_gf128mul_ble
pshufb BSWAP, DATA
@@ -110,7 +110,7 @@ SYM_FUNC_START(clmul_ghash_update)
FRAME_BEGIN
cmp $16, %rdx
jb .Lupdate_just_ret # check length
- movaps .Lbswap_mask, BSWAP
+ movaps .Lbswap_mask(%rip), BSWAP
movups (%rdi), DATA
movups (%rcx), SHASH
pshufb BSWAP, DATA
diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
index a96b2fd26dab..4b49bdc95265 100644
--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
@@ -485,18 +485,18 @@
xchg WK_BUF, PRECALC_BUF
.align 32
-_loop:
+.L_loop:
/*
* code loops through more than one block
* we use K_BASE value as a signal of a last block,
* it is set below by: cmovae BUFFER_PTR, K_BASE
*/
test BLOCKS_CTR, BLOCKS_CTR
- jnz _begin
+ jnz .L_begin
.align 32
- jmp _end
+ jmp .L_end
.align 32
-_begin:
+.L_begin:
/*
* Do first block
@@ -508,9 +508,6 @@ _begin:
.set j, j+2
.endr
- jmp _loop0
-_loop0:
-
/*
* rounds:
* 10,12,14,16,18
@@ -545,7 +542,7 @@ _loop0:
UPDATE_HASH 16(HASH_PTR), E
test BLOCKS_CTR, BLOCKS_CTR
- jz _loop
+ jz .L_loop
mov TB, B
@@ -562,8 +559,6 @@ _loop0:
.set j, j+2
.endr
- jmp _loop1
-_loop1:
/*
* rounds
* 20+80,22+80,24+80,26+80,28+80
@@ -574,9 +569,6 @@ _loop1:
.set j, j+2
.endr
- jmp _loop2
-_loop2:
-
/*
* rounds
* 40+80,42+80,44+80,46+80,48+80
@@ -592,9 +584,6 @@ _loop2:
/* Move to the next block only if needed*/
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
- jmp _loop3
-_loop3:
-
/*
* rounds
* 60+80,62+80,64+80,66+80,68+80
@@ -623,10 +612,10 @@ _loop3:
xchg WK_BUF, PRECALC_BUF
- jmp _loop
+ jmp .L_loop
.align 32
- _end:
+.L_end:
.endm
/*
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
index 5555b5d5215a..53de72bdd851 100644
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/crypto/sha256-avx-asm.S
@@ -360,7 +360,7 @@ SYM_TYPED_FUNC_START(sha256_transform_avx)
and $~15, %rsp # align stack pointer
shl $6, NUM_BLKS # convert to bytes
- jz done_hash
+ jz .Ldone_hash
add INP, NUM_BLKS # pointer to end of data
mov NUM_BLKS, _INP_END(%rsp)
@@ -377,7 +377,7 @@ SYM_TYPED_FUNC_START(sha256_transform_avx)
vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
vmovdqa _SHUF_00BA(%rip), SHUF_00BA
vmovdqa _SHUF_DC00(%rip), SHUF_DC00
-loop0:
+.Lloop0:
lea K256(%rip), TBL
## byte swap first 16 dwords
@@ -391,7 +391,7 @@ loop0:
## schedule 48 input dwords, by doing 3 rounds of 16 each
mov $3, SRND
.align 16
-loop1:
+.Lloop1:
vpaddd (TBL), X0, XFER
vmovdqa XFER, _XFER(%rsp)
FOUR_ROUNDS_AND_SCHED
@@ -410,10 +410,10 @@ loop1:
FOUR_ROUNDS_AND_SCHED
sub $1, SRND
- jne loop1
+ jne .Lloop1
mov $2, SRND
-loop2:
+.Lloop2:
vpaddd (TBL), X0, XFER
vmovdqa XFER, _XFER(%rsp)
DO_ROUND 0
@@ -433,7 +433,7 @@ loop2:
vmovdqa X3, X1
sub $1, SRND
- jne loop2
+ jne .Lloop2
addm (4*0)(CTX),a
addm (4*1)(CTX),b
@@ -447,9 +447,9 @@ loop2:
mov _INP(%rsp), INP
add $64, INP
cmp _INP_END(%rsp), INP
- jne loop0
+ jne .Lloop0
-done_hash:
+.Ldone_hash:
mov %rbp, %rsp
popq %rbp
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
index 3eada9416852..9918212faf91 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -538,12 +538,12 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
and $-32, %rsp # align rsp to 32 byte boundary
shl $6, NUM_BLKS # convert to bytes
- jz done_hash
+ jz .Ldone_hash
lea -64(INP, NUM_BLKS), NUM_BLKS # pointer to last block
mov NUM_BLKS, _INP_END(%rsp)
cmp NUM_BLKS, INP
- je only_one_block
+ je .Lonly_one_block
## load initial digest
mov (CTX), a
@@ -561,7 +561,7 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
mov CTX, _CTX(%rsp)
-loop0:
+.Lloop0:
## Load first 16 dwords from two blocks
VMOVDQ 0*32(INP),XTMP0
VMOVDQ 1*32(INP),XTMP1
@@ -580,7 +580,7 @@ loop0:
vperm2i128 $0x20, XTMP3, XTMP1, X2
vperm2i128 $0x31, XTMP3, XTMP1, X3
-last_block_enter:
+.Llast_block_enter:
add $64, INP
mov INP, _INP(%rsp)
@@ -588,34 +588,40 @@ last_block_enter:
xor SRND, SRND
.align 16
-loop1:
- vpaddd K256+0*32(SRND), X0, XFER
+.Lloop1:
+ leaq K256+0*32(%rip), INP ## reuse INP as scratch reg
+ vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 0*32
- vpaddd K256+1*32(SRND), X0, XFER
+ leaq K256+1*32(%rip), INP
+ vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 1*32
- vpaddd K256+2*32(SRND), X0, XFER
+ leaq K256+2*32(%rip), INP
+ vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 2*32
- vpaddd K256+3*32(SRND), X0, XFER
+ leaq K256+3*32(%rip), INP
+ vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 3*32
add $4*32, SRND
cmp $3*4*32, SRND
- jb loop1
+ jb .Lloop1
-loop2:
+.Lloop2:
## Do last 16 rounds with no scheduling
- vpaddd K256+0*32(SRND), X0, XFER
+ leaq K256+0*32(%rip), INP
+ vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
DO_4ROUNDS _XFER + 0*32
- vpaddd K256+1*32(SRND), X1, XFER
+ leaq K256+1*32(%rip), INP
+ vpaddd (INP, SRND), X1, XFER
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
DO_4ROUNDS _XFER + 1*32
add $2*32, SRND
@@ -624,7 +630,7 @@ loop2:
vmovdqa X3, X1
cmp $4*4*32, SRND
- jb loop2
+ jb .Lloop2
mov _CTX(%rsp), CTX
mov _INP(%rsp), INP
@@ -639,17 +645,17 @@ loop2:
addm (4*7)(CTX),h
cmp _INP_END(%rsp), INP
- ja done_hash
+ ja .Ldone_hash
#### Do second block using previously scheduled results
xor SRND, SRND
.align 16
-loop3:
+.Lloop3:
DO_4ROUNDS _XFER + 0*32 + 16
DO_4ROUNDS _XFER + 1*32 + 16
add $2*32, SRND
cmp $4*4*32, SRND
- jb loop3
+ jb .Lloop3
mov _CTX(%rsp), CTX
mov _INP(%rsp), INP
@@ -665,10 +671,10 @@ loop3:
addm (4*7)(CTX),h
cmp _INP_END(%rsp), INP
- jb loop0
- ja done_hash
+ jb .Lloop0
+ ja .Ldone_hash
-do_last_block:
+.Ldo_last_block:
VMOVDQ 0*16(INP),XWORD0
VMOVDQ 1*16(INP),XWORD1
VMOVDQ 2*16(INP),XWORD2
@@ -679,9 +685,9 @@ do_last_block:
vpshufb X_BYTE_FLIP_MASK, XWORD2, XWORD2
vpshufb X_BYTE_FLIP_MASK, XWORD3, XWORD3
- jmp last_block_enter
+ jmp .Llast_block_enter
-only_one_block:
+.Lonly_one_block:
## load initial digest
mov (4*0)(CTX),a
@@ -698,9 +704,9 @@ only_one_block:
vmovdqa _SHUF_DC00(%rip), SHUF_DC00
mov CTX, _CTX(%rsp)
- jmp do_last_block
+ jmp .Ldo_last_block
-done_hash:
+.Ldone_hash:
mov %rbp, %rsp
pop %rbp
diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
index 959288eecc68..93264ee44543 100644
--- a/arch/x86/crypto/sha256-ssse3-asm.S
+++ b/arch/x86/crypto/sha256-ssse3-asm.S
@@ -369,7 +369,7 @@ SYM_TYPED_FUNC_START(sha256_transform_ssse3)
and $~15, %rsp
shl $6, NUM_BLKS # convert to bytes
- jz done_hash
+ jz .Ldone_hash
add INP, NUM_BLKS
mov NUM_BLKS, _INP_END(%rsp) # pointer to end of data
@@ -387,7 +387,7 @@ SYM_TYPED_FUNC_START(sha256_transform_ssse3)
movdqa _SHUF_00BA(%rip), SHUF_00BA
movdqa _SHUF_DC00(%rip), SHUF_DC00
-loop0:
+.Lloop0:
lea K256(%rip), TBL
## byte swap first 16 dwords
@@ -401,7 +401,7 @@ loop0:
## schedule 48 input dwords, by doing 3 rounds of 16 each
mov $3, SRND
.align 16
-loop1:
+.Lloop1:
movdqa (TBL), XFER
paddd X0, XFER
movdqa XFER, _XFER(%rsp)
@@ -424,10 +424,10 @@ loop1:
FOUR_ROUNDS_AND_SCHED
sub $1, SRND
- jne loop1
+ jne .Lloop1
mov $2, SRND
-loop2:
+.Lloop2:
paddd (TBL), X0
movdqa X0, _XFER(%rsp)
DO_ROUND 0
@@ -446,7 +446,7 @@ loop2:
movdqa X3, X1
sub $1, SRND
- jne loop2
+ jne .Lloop2
addm (4*0)(CTX),a
addm (4*1)(CTX),b
@@ -460,9 +460,9 @@ loop2:
mov _INP(%rsp), INP
add $64, INP
cmp _INP_END(%rsp), INP
- jne loop0
+ jne .Lloop0
-done_hash:
+.Ldone_hash:
mov %rbp, %rsp
popq %rbp
diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
index b0984f19fdb4..d902b8ea0721 100644
--- a/arch/x86/crypto/sha512-avx-asm.S
+++ b/arch/x86/crypto/sha512-avx-asm.S
@@ -276,7 +276,7 @@ frame_size = frame_WK + WK_SIZE
########################################################################
SYM_TYPED_FUNC_START(sha512_transform_avx)
test msglen, msglen
- je nowork
+ je .Lnowork
# Save GPRs
push %rbx
@@ -291,7 +291,7 @@ SYM_TYPED_FUNC_START(sha512_transform_avx)
sub $frame_size, %rsp
and $~(0x20 - 1), %rsp
-updateblock:
+.Lupdateblock:
# Load state variables
mov DIGEST(0), a_64
@@ -348,7 +348,7 @@ updateblock:
# Advance to next message block
add $16*8, msg
dec msglen
- jnz updateblock
+ jnz .Lupdateblock
# Restore Stack Pointer
mov %rbp, %rsp
@@ -361,7 +361,7 @@ updateblock:
pop %r12
pop %rbx
-nowork:
+.Lnowork:
RET
SYM_FUNC_END(sha512_transform_avx)
diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
index b1ca99055ef9..f08496cd6870 100644
--- a/arch/x86/crypto/sha512-avx2-asm.S
+++ b/arch/x86/crypto/sha512-avx2-asm.S
@@ -581,7 +581,7 @@ SYM_TYPED_FUNC_START(sha512_transform_rorx)
and $~(0x20 - 1), %rsp
shl $7, NUM_BLKS # convert to bytes
- jz done_hash
+ jz .Ldone_hash
add INP, NUM_BLKS # pointer to end of data
mov NUM_BLKS, frame_INPEND(%rsp)
@@ -600,7 +600,7 @@ SYM_TYPED_FUNC_START(sha512_transform_rorx)
vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
-loop0:
+.Lloop0:
lea K512(%rip), TBL
## byte swap first 16 dwords
@@ -615,7 +615,7 @@ loop0:
movq $4, frame_SRND(%rsp)
.align 16
-loop1:
+.Lloop1:
vpaddq (TBL), Y_0, XFER
vmovdqa XFER, frame_XFER(%rsp)
FOUR_ROUNDS_AND_SCHED
@@ -634,10 +634,10 @@ loop1:
FOUR_ROUNDS_AND_SCHED
subq $1, frame_SRND(%rsp)
- jne loop1
+ jne .Lloop1
movq $2, frame_SRND(%rsp)
-loop2:
+.Lloop2:
vpaddq (TBL), Y_0, XFER
vmovdqa XFER, frame_XFER(%rsp)
DO_4ROUNDS
@@ -650,7 +650,7 @@ loop2:
vmovdqa Y_3, Y_1
subq $1, frame_SRND(%rsp)
- jne loop2
+ jne .Lloop2
mov frame_CTX(%rsp), CTX2
addm 8*0(CTX2), a
@@ -665,9 +665,9 @@ loop2:
mov frame_INP(%rsp), INP
add $128, INP
cmp frame_INPEND(%rsp), INP
- jne loop0
+ jne .Lloop0
-done_hash:
+.Ldone_hash:
# Restore Stack Pointer
mov %rbp, %rsp
diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
index c06afb5270e5..65be30156816 100644
--- a/arch/x86/crypto/sha512-ssse3-asm.S
+++ b/arch/x86/crypto/sha512-ssse3-asm.S
@@ -278,7 +278,7 @@ frame_size = frame_WK + WK_SIZE
SYM_TYPED_FUNC_START(sha512_transform_ssse3)
test msglen, msglen
- je nowork
+ je .Lnowork
# Save GPRs
push %rbx
@@ -293,7 +293,7 @@ SYM_TYPED_FUNC_START(sha512_transform_ssse3)
sub $frame_size, %rsp
and $~(0x20 - 1), %rsp
-updateblock:
+.Lupdateblock:
# Load state variables
mov DIGEST(0), a_64
@@ -350,7 +350,7 @@ updateblock:
# Advance to next message block
add $16*8, msg
dec msglen
- jnz updateblock
+ jnz .Lupdateblock
# Restore Stack Pointer
mov %rbp, %rsp
@@ -363,7 +363,7 @@ updateblock:
pop %r12
pop %rbx
-nowork:
+.Lnowork:
RET
SYM_FUNC_END(sha512_transform_ssse3)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 52398d49bc2f..69ae5e1b3120 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -12,6 +12,7 @@
#include <linux/kvm_host.h>
#include <linux/kernel.h>
#include <linux/highmem.h>
+#include <linux/psp.h>
#include <linux/psp-sev.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
diff --git a/crypto/acompress.c b/crypto/acompress.c
index c32c72048a1c..82a290df2822 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -6,25 +6,35 @@
* Authors: Weigang Li <weigang.li@intel.com>
* Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
+
+#include <crypto/internal/acompress.h>
+#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
#include <net/netlink.h>
-#include <crypto/internal/acompress.h>
-#include <crypto/internal/scompress.h>
-#include "internal.h"
+
+#include "compress.h"
+
+struct crypto_scomp;
static const struct crypto_type crypto_acomp_type;
-#ifdef CONFIG_NET
-static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct acomp_alg, calg.base);
+}
+
+static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
+{
+ return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
+}
+
+static int __maybe_unused crypto_acomp_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_acomp racomp;
@@ -34,12 +44,6 @@ static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
}
-#else
-static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -89,13 +93,44 @@ static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
return extsize;
}
+static inline int __crypto_acomp_report_stat(struct sk_buff *skb,
+ struct crypto_alg *alg)
+{
+ struct comp_alg_common *calg = __crypto_comp_alg_common(alg);
+ struct crypto_istat_compress *istat = comp_get_stat(calg);
+ struct crypto_stat_compress racomp;
+
+ memset(&racomp, 0, sizeof(racomp));
+
+ strscpy(racomp.type, "acomp", sizeof(racomp.type));
+ racomp.stat_compress_cnt = atomic64_read(&istat->compress_cnt);
+ racomp.stat_compress_tlen = atomic64_read(&istat->compress_tlen);
+ racomp.stat_decompress_cnt = atomic64_read(&istat->decompress_cnt);
+ racomp.stat_decompress_tlen = atomic64_read(&istat->decompress_tlen);
+ racomp.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
+}
+
+#ifdef CONFIG_CRYPTO_STATS
+int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return __crypto_acomp_report_stat(skb, alg);
+}
+#endif
+
static const struct crypto_type crypto_acomp_type = {
.extsize = crypto_acomp_extsize,
.init_tfm = crypto_acomp_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_acomp_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_acomp_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_acomp_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
@@ -147,12 +182,24 @@ void acomp_request_free(struct acomp_req *req)
}
EXPORT_SYMBOL_GPL(acomp_request_free);
-int crypto_register_acomp(struct acomp_alg *alg)
+void comp_prepare_alg(struct comp_alg_common *alg)
{
+ struct crypto_istat_compress *istat = comp_get_stat(alg);
struct crypto_alg *base = &alg->base;
- base->cra_type = &crypto_acomp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+}
+
+int crypto_register_acomp(struct acomp_alg *alg)
+{
+ struct crypto_alg *base = &alg->calg.base;
+
+ comp_prepare_alg(&alg->calg);
+
+ base->cra_type = &crypto_acomp_type;
base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
return crypto_register_alg(base);
diff --git a/crypto/aead.c b/crypto/aead.c
index 16991095270d..ffc48a7dfb34 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -8,17 +8,27 @@
*/
#include <crypto/internal/aead.h>
+#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
-#include <linux/cryptouser.h>
+#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
+static inline struct crypto_istat_aead *aead_get_stat(struct aead_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@@ -80,39 +90,62 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
}
EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
+static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&istat->err_cnt);
+
+ return err;
+}
+
int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_alg *alg = aead->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct crypto_istat_aead *istat;
int ret;
- crypto_stats_get(alg);
+ istat = aead_get_stat(alg);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+ }
+
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
- ret = crypto_aead_alg(aead)->encrypt(req);
- crypto_stats_aead_encrypt(cryptlen, alg, ret);
- return ret;
+ ret = alg->encrypt(req);
+
+ return crypto_aead_errstat(istat, ret);
}
EXPORT_SYMBOL_GPL(crypto_aead_encrypt);
int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_alg *alg = aead->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct crypto_istat_aead *istat;
int ret;
- crypto_stats_get(alg);
+ istat = aead_get_stat(alg);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+ }
+
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else if (req->cryptlen < crypto_aead_authsize(aead))
ret = -EINVAL;
else
- ret = crypto_aead_alg(aead)->decrypt(req);
- crypto_stats_aead_decrypt(cryptlen, alg, ret);
- return ret;
+ ret = alg->decrypt(req);
+
+ return crypto_aead_errstat(istat, ret);
}
EXPORT_SYMBOL_GPL(crypto_aead_decrypt);
@@ -142,8 +175,8 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
return 0;
}
-#ifdef CONFIG_NET
-static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_aead_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_aead raead;
struct aead_alg *aead = container_of(alg, struct aead_alg, base);
@@ -159,12 +192,6 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_AEAD, sizeof(raead), &raead);
}
-#else
-static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -188,6 +215,26 @@ static void crypto_aead_free_instance(struct crypto_instance *inst)
aead->free(aead);
}
+static int __maybe_unused crypto_aead_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct aead_alg *aead = container_of(alg, struct aead_alg, base);
+ struct crypto_istat_aead *istat = aead_get_stat(aead);
+ struct crypto_stat_aead raead;
+
+ memset(&raead, 0, sizeof(raead));
+
+ strscpy(raead.type, "aead", sizeof(raead.type));
+
+ raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+ raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+ raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+ raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+ raead.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
+}
+
static const struct crypto_type crypto_aead_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_aead_init_tfm,
@@ -195,7 +242,12 @@ static const struct crypto_type crypto_aead_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_aead_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_aead_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_aead_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AEAD,
@@ -219,6 +271,7 @@ EXPORT_SYMBOL_GPL(crypto_alloc_aead);
static int aead_prepare_alg(struct aead_alg *alg)
{
+ struct crypto_istat_aead *istat = aead_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
@@ -232,6 +285,9 @@ static int aead_prepare_alg(struct aead_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+
return 0;
}
diff --git a/crypto/ahash.c b/crypto/ahash.c
index ff8c79d975c1..b8a607928e72 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -8,19 +8,18 @@
* Copyright (c) 2008 Loc Ho <lho@amcc.com>
*/
-#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
+#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
+#include <linux/string.h>
#include <net/netlink.h>
-#include "internal.h"
+#include "hash.h"
static const struct crypto_type crypto_ahash_type;
@@ -296,55 +295,60 @@ static int crypto_ahash_op(struct ahash_request *req,
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ int err;
if ((unsigned long)req->result & alignmask)
- return ahash_op_unaligned(req, op, has_state);
+ err = ahash_op_unaligned(req, op, has_state);
+ else
+ err = op(req);
- return op(req);
+ return crypto_hash_errstat(crypto_hash_alg_common(tfm), err);
}
int crypto_ahash_final(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
+ struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
- crypto_stats_get(alg);
- ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final, true);
- crypto_stats_ahash_final(nbytes, ret, alg);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&hash_get_stat(alg)->hash_cnt);
+
+ return crypto_ahash_op(req, tfm->final, true);
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
+ struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
- crypto_stats_get(alg);
- ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup, true);
- crypto_stats_ahash_final(nbytes, ret, alg);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_hash *istat = hash_get_stat(alg);
+
+ atomic64_inc(&istat->hash_cnt);
+ atomic64_add(req->nbytes, &istat->hash_tlen);
+ }
+
+ return crypto_ahash_op(req, tfm->finup, true);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
+ struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_hash *istat = hash_get_stat(alg);
+
+ atomic64_inc(&istat->hash_cnt);
+ atomic64_add(req->nbytes, &istat->hash_tlen);
+ }
- crypto_stats_get(alg);
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else
- ret = crypto_ahash_op(req, tfm->digest, false);
- crypto_stats_ahash_final(nbytes, ret, alg);
- return ret;
+ return crypto_hash_errstat(alg, -ENOKEY);
+
+ return crypto_ahash_op(req, tfm->digest, false);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -465,8 +469,8 @@ static void crypto_ahash_free_instance(struct crypto_instance *inst)
ahash->free(ahash);
}
-#ifdef CONFIG_NET
-static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_ahash_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
@@ -479,12 +483,6 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
-#else
-static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -498,6 +496,12 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__crypto_hash_alg_common(alg)->digestsize);
}
+static int __maybe_unused crypto_ahash_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return crypto_hash_report_stat(skb, alg, "ahash");
+}
+
static const struct crypto_type crypto_ahash_type = {
.extsize = crypto_ahash_extsize,
.init_tfm = crypto_ahash_init_tfm,
@@ -505,7 +509,12 @@ static const struct crypto_type crypto_ahash_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_ahash_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_ahash_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH,
@@ -534,17 +543,70 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_ahash);
+struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
+{
+ struct hash_alg_common *halg = crypto_hash_alg_common(hash);
+ struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
+ struct crypto_ahash *nhash;
+ struct ahash_alg *alg;
+ int err;
+
+ if (!crypto_hash_alg_has_setkey(halg)) {
+ tfm = crypto_tfm_get(tfm);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+
+ return hash;
+ }
+
+ nhash = crypto_clone_tfm(&crypto_ahash_type, tfm);
+
+ if (IS_ERR(nhash))
+ return nhash;
+
+ nhash->init = hash->init;
+ nhash->update = hash->update;
+ nhash->final = hash->final;
+ nhash->finup = hash->finup;
+ nhash->digest = hash->digest;
+ nhash->export = hash->export;
+ nhash->import = hash->import;
+ nhash->setkey = hash->setkey;
+ nhash->reqsize = hash->reqsize;
+
+ if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
+ return crypto_clone_shash_ops_async(nhash, hash);
+
+ err = -ENOSYS;
+ alg = crypto_ahash_alg(hash);
+ if (!alg->clone_tfm)
+ goto out_free_nhash;
+
+ err = alg->clone_tfm(nhash, hash);
+ if (err)
+ goto out_free_nhash;
+
+ return nhash;
+
+out_free_nhash:
+ crypto_free_ahash(nhash);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(crypto_clone_ahash);
+
static int ahash_prepare_alg(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
+ int err;
- if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
- alg->halg.statesize > HASH_MAX_STATESIZE ||
- alg->halg.statesize == 0)
+ if (alg->halg.statesize == 0)
return -EINVAL;
+ err = hash_prepare_alg(&alg->halg);
+ if (err)
+ return err;
+
base->cra_type = &crypto_ahash_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
return 0;
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index ab975a420e1e..186e762b509a 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -5,23 +5,20 @@
* Copyright (c) 2015, Intel Corporation
* Authors: Tadeusz Struk <tadeusz.struk@intel.com>
*/
+#include <crypto/internal/akcipher.h>
+#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/crypto.h>
-#include <linux/compiler.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
#include <net/netlink.h>
-#include <crypto/akcipher.h>
-#include <crypto/internal/akcipher.h>
+
#include "internal.h"
-#ifdef CONFIG_NET
-static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_akcipher_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_akcipher rakcipher;
@@ -32,12 +29,6 @@ static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
sizeof(rakcipher), &rakcipher);
}
-#else
-static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -76,6 +67,30 @@ static void crypto_akcipher_free_instance(struct crypto_instance *inst)
akcipher->free(akcipher);
}
+static int __maybe_unused crypto_akcipher_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct akcipher_alg *akcipher = __crypto_akcipher_alg(alg);
+ struct crypto_istat_akcipher *istat;
+ struct crypto_stat_akcipher rakcipher;
+
+ istat = akcipher_get_stat(akcipher);
+
+ memset(&rakcipher, 0, sizeof(rakcipher));
+
+ strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+ rakcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+ rakcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+ rakcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+ rakcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+ rakcipher.stat_sign_cnt = atomic64_read(&istat->sign_cnt);
+ rakcipher.stat_verify_cnt = atomic64_read(&istat->verify_cnt);
+ rakcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
+ sizeof(rakcipher), &rakcipher);
+}
+
static const struct crypto_type crypto_akcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_akcipher_init_tfm,
@@ -83,7 +98,12 @@ static const struct crypto_type crypto_akcipher_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_akcipher_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_akcipher_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_akcipher_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AKCIPHER,
@@ -108,11 +128,15 @@ EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
static void akcipher_prepare_alg(struct akcipher_alg *alg)
{
+ struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_akcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
}
static int akcipher_default_op(struct akcipher_request *req)
diff --git a/crypto/algapi.c b/crypto/algapi.c
index d08f864f08be..d7eb8f9e9883 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -339,8 +339,6 @@ __crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put)
list_add(&alg->cra_list, &crypto_alg_list);
- crypto_stats_init(alg);
-
if (larval) {
/* No cheating! */
alg->cra_flags &= ~CRYPTO_ALG_TESTED;
@@ -493,7 +491,9 @@ void crypto_unregister_alg(struct crypto_alg *alg)
if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
return;
- BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
+ if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
+ return;
+
if (alg->cra_destroy)
alg->cra_destroy(alg);
@@ -1038,219 +1038,6 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
}
EXPORT_SYMBOL_GPL(crypto_type_has_alg);
-#ifdef CONFIG_CRYPTO_STATS
-void crypto_stats_init(struct crypto_alg *alg)
-{
- memset(&alg->stats, 0, sizeof(alg->stats));
-}
-EXPORT_SYMBOL_GPL(crypto_stats_init);
-
-void crypto_stats_get(struct crypto_alg *alg)
-{
- crypto_alg_get(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_get);
-
-void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg,
- int ret)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.aead.err_cnt);
- } else {
- atomic64_inc(&alg->stats.aead.encrypt_cnt);
- atomic64_add(cryptlen, &alg->stats.aead.encrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_aead_encrypt);
-
-void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg,
- int ret)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.aead.err_cnt);
- } else {
- atomic64_inc(&alg->stats.aead.decrypt_cnt);
- atomic64_add(cryptlen, &alg->stats.aead.decrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_aead_decrypt);
-
-void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.akcipher.encrypt_cnt);
- atomic64_add(src_len, &alg->stats.akcipher.encrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_encrypt);
-
-void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.akcipher.decrypt_cnt);
- atomic64_add(src_len, &alg->stats.akcipher.decrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_decrypt);
-
-void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- else
- atomic64_inc(&alg->stats.akcipher.sign_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign);
-
-void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- else
- atomic64_inc(&alg->stats.akcipher.verify_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify);
-
-void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.compress.err_cnt);
- } else {
- atomic64_inc(&alg->stats.compress.compress_cnt);
- atomic64_add(slen, &alg->stats.compress.compress_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_compress);
-
-void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.compress.err_cnt);
- } else {
- atomic64_inc(&alg->stats.compress.decompress_cnt);
- atomic64_add(slen, &alg->stats.compress.decompress_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_decompress);
-
-void crypto_stats_ahash_update(unsigned int nbytes, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.hash.err_cnt);
- else
- atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_ahash_update);
-
-void crypto_stats_ahash_final(unsigned int nbytes, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.hash.err_cnt);
- } else {
- atomic64_inc(&alg->stats.hash.hash_cnt);
- atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_ahash_final);
-
-void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
-{
- if (ret)
- atomic64_inc(&alg->stats.kpp.err_cnt);
- else
- atomic64_inc(&alg->stats.kpp.setsecret_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_kpp_set_secret);
-
-void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
-{
- if (ret)
- atomic64_inc(&alg->stats.kpp.err_cnt);
- else
- atomic64_inc(&alg->stats.kpp.generate_public_key_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_kpp_generate_public_key);
-
-void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
-{
- if (ret)
- atomic64_inc(&alg->stats.kpp.err_cnt);
- else
- atomic64_inc(&alg->stats.kpp.compute_shared_secret_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_kpp_compute_shared_secret);
-
-void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.rng.err_cnt);
- else
- atomic64_inc(&alg->stats.rng.seed_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_rng_seed);
-
-void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen,
- int ret)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.rng.err_cnt);
- } else {
- atomic64_inc(&alg->stats.rng.generate_cnt);
- atomic64_add(dlen, &alg->stats.rng.generate_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_rng_generate);
-
-void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.cipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.cipher.encrypt_cnt);
- atomic64_add(cryptlen, &alg->stats.cipher.encrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_skcipher_encrypt);
-
-void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.cipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.cipher.decrypt_cnt);
- atomic64_add(cryptlen, &alg->stats.cipher.decrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_skcipher_decrypt);
-#endif
-
static void __init crypto_start_tests(void)
{
if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 1d017ec5c63c..63af72e19fa8 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -235,24 +235,31 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
struct ahash_request *req = &ctx->req;
- char state[HASH_MAX_STATESIZE];
+ struct crypto_ahash *tfm;
struct sock *sk2;
struct alg_sock *ask2;
struct hash_ctx *ctx2;
+ char *state;
bool more;
int err;
+ tfm = crypto_ahash_reqtfm(req);
+ state = kmalloc(crypto_ahash_statesize(tfm), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!state)
+ goto out;
+
lock_sock(sk);
more = ctx->more;
err = more ? crypto_ahash_export(req, state) : 0;
release_sock(sk);
if (err)
- return err;
+ goto out_free_state;
err = af_alg_accept(ask->parent, newsock, kern);
if (err)
- return err;
+ goto out_free_state;
sk2 = newsock->sk;
ask2 = alg_sk(sk2);
@@ -260,7 +267,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
ctx2->more = more;
if (!more)
- return err;
+ goto out_free_state;
err = crypto_ahash_import(&ctx2->req, state);
if (err) {
@@ -268,6 +275,10 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
sock_put(sk2);
}
+out_free_state:
+ kfree_sensitive(state);
+
+out:
return err;
}
diff --git a/crypto/api.c b/crypto/api.c
index e67cc63368ed..d375e8cd770d 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -408,6 +408,7 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
goto out_err;
tfm->__crt_alg = alg;
+ refcount_set(&tfm->refcnt, 1);
err = crypto_init_ops(tfm, type, mask);
if (err)
@@ -487,26 +488,43 @@ err:
}
EXPORT_SYMBOL_GPL(crypto_alloc_base);
-void *crypto_create_tfm_node(struct crypto_alg *alg,
- const struct crypto_type *frontend,
- int node)
+static void *crypto_alloc_tfmmem(struct crypto_alg *alg,
+ const struct crypto_type *frontend, int node,
+ gfp_t gfp)
{
- char *mem;
- struct crypto_tfm *tfm = NULL;
+ struct crypto_tfm *tfm;
unsigned int tfmsize;
unsigned int total;
- int err = -ENOMEM;
+ char *mem;
tfmsize = frontend->tfmsize;
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
- mem = kzalloc_node(total, GFP_KERNEL, node);
+ mem = kzalloc_node(total, gfp, node);
if (mem == NULL)
- goto out_err;
+ return ERR_PTR(-ENOMEM);
tfm = (struct crypto_tfm *)(mem + tfmsize);
tfm->__crt_alg = alg;
tfm->node = node;
+ refcount_set(&tfm->refcnt, 1);
+
+ return mem;
+}
+
+void *crypto_create_tfm_node(struct crypto_alg *alg,
+ const struct crypto_type *frontend,
+ int node)
+{
+ struct crypto_tfm *tfm;
+ char *mem;
+ int err;
+
+ mem = crypto_alloc_tfmmem(alg, frontend, node, GFP_KERNEL);
+ if (IS_ERR(mem))
+ goto out;
+
+ tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
err = frontend->init_tfm(tfm);
if (err)
@@ -523,13 +541,38 @@ out_free_tfm:
if (err == -EAGAIN)
crypto_shoot_alg(alg);
kfree(mem);
-out_err:
mem = ERR_PTR(err);
out:
return mem;
}
EXPORT_SYMBOL_GPL(crypto_create_tfm_node);
+void *crypto_clone_tfm(const struct crypto_type *frontend,
+ struct crypto_tfm *otfm)
+{
+ struct crypto_alg *alg = otfm->__crt_alg;
+ struct crypto_tfm *tfm;
+ char *mem;
+
+ mem = ERR_PTR(-ESTALE);
+ if (unlikely(!crypto_mod_get(alg)))
+ goto out;
+
+ mem = crypto_alloc_tfmmem(alg, frontend, otfm->node, GFP_ATOMIC);
+ if (IS_ERR(mem)) {
+ crypto_mod_put(alg);
+ goto out;
+ }
+
+ tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
+ tfm->crt_flags = otfm->crt_flags;
+ tfm->exit = otfm->exit;
+
+out:
+ return mem;
+}
+EXPORT_SYMBOL_GPL(crypto_clone_tfm);
+
struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask)
@@ -619,6 +662,8 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
if (IS_ERR_OR_NULL(mem))
return;
+ if (!refcount_dec_and_test(&tfm->refcnt))
+ return;
alg = tfm->__crt_alg;
if (!tfm->exit && alg->cra_exit)
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index f9cdc5e91664..5e2b2680d7db 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -11,8 +11,8 @@
#include <linux/async_tx.h>
#include <linux/gfp.h>
-/**
- * pq_scribble_page - space to hold throwaway P or Q buffer for
+/*
+ * struct pq_scribble_page - space to hold throwaway P or Q buffer for
* synchronous gen_syndrome
*/
static struct page *pq_scribble_page;
@@ -28,7 +28,7 @@ static struct page *pq_scribble_page;
#define MAX_DISKS 255
-/**
+/*
* do_async_gen_syndrome - asynchronously calculate P and/or Q
*/
static __async_inline struct dma_async_tx_descriptor *
@@ -100,7 +100,7 @@ do_async_gen_syndrome(struct dma_chan *chan,
return tx;
}
-/**
+/*
* do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
*/
static void
@@ -281,7 +281,7 @@ pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, si
/**
* async_syndrome_val - asynchronously validate a raid6 syndrome
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
- * @offset: common offset into each block (src and dest) to start transaction
+ * @offsets: common offset into each block (src and dest) to start transaction
* @disks: number of blocks (including missing P or Q, see below)
* @len: length of operation in bytes
* @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 9256934312d7..ad72057a5e0d 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -124,7 +124,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
/**
- * submit_disposition - flags for routing an incoming operation
+ * enum submit_disposition - flags for routing an incoming operation
* @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
* @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
* @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(async_trigger_callback);
/**
* async_tx_quiesce - ensure tx is complete and freeable upon return
- * @tx - transaction to quiesce
+ * @tx: transaction to quiesce
*/
void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
{
diff --git a/crypto/compress.h b/crypto/compress.h
new file mode 100644
index 000000000000..19f65516d699
--- /dev/null
+++ b/crypto/compress.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic API.
+ *
+ * Copyright 2015 LG Electronics Inc.
+ * Copyright (c) 2016, Intel Corporation
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _LOCAL_CRYPTO_COMPRESS_H
+#define _LOCAL_CRYPTO_COMPRESS_H
+
+#include "internal.h"
+
+struct acomp_req;
+struct comp_alg_common;
+struct sk_buff;
+
+int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
+struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
+void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
+
+int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg);
+
+void comp_prepare_alg(struct comp_alg_common *alg);
+
+#endif /* _LOCAL_CRYPTO_COMPRESS_H */
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 37365ed30b38..bbcc368b6a55 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -427,12 +427,12 @@ err_free_inst:
return err;
}
-static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
+static int cryptd_hash_init_tfm(struct crypto_ahash *tfm)
{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct ahash_instance *inst = ahash_alg_instance(tfm);
+ struct hashd_instance_ctx *ictx = ahash_instance_ctx(inst);
struct crypto_shash_spawn *spawn = &ictx->spawn;
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *hash;
hash = crypto_spawn_shash(spawn);
@@ -440,15 +440,30 @@ static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
return PTR_ERR(hash);
ctx->child = hash;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ crypto_ahash_set_reqsize(tfm,
sizeof(struct cryptd_hash_request_ctx) +
crypto_shash_descsize(hash));
return 0;
}
-static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
+static int cryptd_hash_clone_tfm(struct crypto_ahash *ntfm,
+ struct crypto_ahash *tfm)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cryptd_hash_ctx *nctx = crypto_ahash_ctx(ntfm);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct crypto_shash *hash;
+
+ hash = crypto_clone_shash(ctx->child);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ nctx->child = hash;
+ return 0;
+}
+
+static void cryptd_hash_exit_tfm(struct crypto_ahash *tfm)
+{
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
crypto_free_shash(ctx->child);
}
@@ -677,8 +692,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.halg.statesize = alg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
- inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
- inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
+ inst->alg.init_tfm = cryptd_hash_init_tfm;
+ inst->alg.clone_tfm = cryptd_hash_clone_tfm;
+ inst->alg.exit_tfm = cryptd_hash_exit_tfm;
inst->alg.init = cryptd_hash_init_enqueue;
inst->alg.update = cryptd_hash_update_enqueue;
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
index 154884bf9275..d4f3d39b5137 100644
--- a/crypto/crypto_user_stat.c
+++ b/crypto/crypto_user_stat.c
@@ -6,18 +6,14 @@
*
*/
-#include <linux/crypto.h>
-#include <linux/cryptouser.h>
-#include <linux/sched.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/cryptouser.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
#include <net/netlink.h>
#include <net/sock.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/internal/rng.h>
-#include <crypto/akcipher.h>
-#include <crypto/kpp.h>
-#include <crypto/internal/cryptouser.h>
-
-#include "internal.h"
#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
@@ -28,23 +24,6 @@ struct crypto_dump_info {
u16 nlmsg_flags;
};
-static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_aead raead;
-
- memset(&raead, 0, sizeof(raead));
-
- strscpy(raead.type, "aead", sizeof(raead.type));
-
- raead.stat_encrypt_cnt = atomic64_read(&alg->stats.aead.encrypt_cnt);
- raead.stat_encrypt_tlen = atomic64_read(&alg->stats.aead.encrypt_tlen);
- raead.stat_decrypt_cnt = atomic64_read(&alg->stats.aead.decrypt_cnt);
- raead.stat_decrypt_tlen = atomic64_read(&alg->stats.aead.decrypt_tlen);
- raead.stat_err_cnt = atomic64_read(&alg->stats.aead.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
-}
-
static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_stat_cipher rcipher;
@@ -53,12 +32,6 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
- rcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.cipher.encrypt_cnt);
- rcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.cipher.encrypt_tlen);
- rcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.cipher.decrypt_cnt);
- rcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.cipher.decrypt_tlen);
- rcipher.stat_err_cnt = atomic64_read(&alg->stats.cipher.err_cnt);
-
return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
}
@@ -69,112 +42,10 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
memset(&rcomp, 0, sizeof(rcomp));
strscpy(rcomp.type, "compression", sizeof(rcomp.type));
- rcomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt);
- rcomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
- rcomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
- rcomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
- rcomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp);
}
-static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_compress racomp;
-
- memset(&racomp, 0, sizeof(racomp));
-
- strscpy(racomp.type, "acomp", sizeof(racomp.type));
- racomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt);
- racomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
- racomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
- racomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
- racomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
-}
-
-static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_akcipher rakcipher;
-
- memset(&rakcipher, 0, sizeof(rakcipher));
-
- strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
- rakcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.akcipher.encrypt_cnt);
- rakcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.akcipher.encrypt_tlen);
- rakcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.akcipher.decrypt_cnt);
- rakcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.akcipher.decrypt_tlen);
- rakcipher.stat_sign_cnt = atomic64_read(&alg->stats.akcipher.sign_cnt);
- rakcipher.stat_verify_cnt = atomic64_read(&alg->stats.akcipher.verify_cnt);
- rakcipher.stat_err_cnt = atomic64_read(&alg->stats.akcipher.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
- sizeof(rakcipher), &rakcipher);
-}
-
-static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_kpp rkpp;
-
- memset(&rkpp, 0, sizeof(rkpp));
-
- strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
-
- rkpp.stat_setsecret_cnt = atomic64_read(&alg->stats.kpp.setsecret_cnt);
- rkpp.stat_generate_public_key_cnt = atomic64_read(&alg->stats.kpp.generate_public_key_cnt);
- rkpp.stat_compute_shared_secret_cnt = atomic64_read(&alg->stats.kpp.compute_shared_secret_cnt);
- rkpp.stat_err_cnt = atomic64_read(&alg->stats.kpp.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
-}
-
-static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_hash rhash;
-
- memset(&rhash, 0, sizeof(rhash));
-
- strscpy(rhash.type, "ahash", sizeof(rhash.type));
-
- rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
- rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
- rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
-}
-
-static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_hash rhash;
-
- memset(&rhash, 0, sizeof(rhash));
-
- strscpy(rhash.type, "shash", sizeof(rhash.type));
-
- rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
- rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
- rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
-}
-
-static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_rng rrng;
-
- memset(&rrng, 0, sizeof(rrng));
-
- strscpy(rrng.type, "rng", sizeof(rrng.type));
-
- rrng.stat_generate_cnt = atomic64_read(&alg->stats.rng.generate_cnt);
- rrng.stat_generate_tlen = atomic64_read(&alg->stats.rng.generate_tlen);
- rrng.stat_seed_cnt = atomic64_read(&alg->stats.rng.seed_cnt);
- rrng.stat_err_cnt = atomic64_read(&alg->stats.rng.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
-}
-
static int crypto_reportstat_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg,
struct sk_buff *skb)
@@ -204,15 +75,13 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
goto out;
}
- switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
- case CRYPTO_ALG_TYPE_AEAD:
- if (crypto_report_aead(skb, alg))
+ if (alg->cra_type && alg->cra_type->report_stat) {
+ if (alg->cra_type->report_stat(skb, alg))
goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_SKCIPHER:
- if (crypto_report_cipher(skb, alg))
- goto nla_put_failure;
- break;
+ goto out;
+ }
+
+ switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
case CRYPTO_ALG_TYPE_CIPHER:
if (crypto_report_cipher(skb, alg))
goto nla_put_failure;
@@ -221,34 +90,6 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
if (crypto_report_comp(skb, alg))
goto nla_put_failure;
break;
- case CRYPTO_ALG_TYPE_ACOMPRESS:
- if (crypto_report_acomp(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_SCOMPRESS:
- if (crypto_report_acomp(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_AKCIPHER:
- if (crypto_report_akcipher(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_KPP:
- if (crypto_report_kpp(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_AHASH:
- if (crypto_report_ahash(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_HASH:
- if (crypto_report_shash(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_RNG:
- if (crypto_report_rng(skb, alg))
- goto nla_put_failure;
- break;
default:
pr_err("ERROR: Unhandled alg %d in %s\n",
alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 982d4ca4526d..ff4ebbc68efa 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1546,7 +1546,7 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
const int err = PTR_ERR(drbg->jent);
drbg->jent = NULL;
- if (fips_enabled || err != -ENOENT)
+ if (fips_enabled)
return err;
pr_info("DRBG: Continuing without Jitter RNG\n");
}
diff --git a/crypto/fips.c b/crypto/fips.c
index b05d3c7b3ca5..92fd506abb21 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -66,20 +66,11 @@ static struct ctl_table crypto_sysctl_table[] = {
{}
};
-static struct ctl_table crypto_dir_table[] = {
- {
- .procname = "crypto",
- .mode = 0555,
- .child = crypto_sysctl_table
- },
- {}
-};
-
static struct ctl_table_header *crypto_sysctls;
static void crypto_proc_fips_init(void)
{
- crypto_sysctls = register_sysctl_table(crypto_dir_table);
+ crypto_sysctls = register_sysctl("crypto", crypto_sysctl_table);
}
static void crypto_proc_fips_exit(void)
diff --git a/crypto/hash.h b/crypto/hash.h
new file mode 100644
index 000000000000..7e6c1a948692
--- /dev/null
+++ b/crypto/hash.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic API.
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _LOCAL_CRYPTO_HASH_H
+#define _LOCAL_CRYPTO_HASH_H
+
+#include <crypto/internal/hash.h>
+#include <linux/cryptouser.h>
+
+#include "internal.h"
+
+static inline int crypto_hash_report_stat(struct sk_buff *skb,
+ struct crypto_alg *alg,
+ const char *type)
+{
+ struct hash_alg_common *halg = __crypto_hash_alg_common(alg);
+ struct crypto_istat_hash *istat = hash_get_stat(halg);
+ struct crypto_stat_hash rhash;
+
+ memset(&rhash, 0, sizeof(rhash));
+
+ strscpy(rhash.type, type, sizeof(rhash.type));
+
+ rhash.stat_hash_cnt = atomic64_read(&istat->hash_cnt);
+ rhash.stat_hash_tlen = atomic64_read(&istat->hash_tlen);
+ rhash.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
+}
+
+int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
+struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash,
+ struct crypto_ahash *hash);
+
+int hash_prepare_alg(struct hash_alg_common *alg);
+
+#endif /* _LOCAL_CRYPTO_HASH_H */
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 3610ff0b6739..09a7872b4060 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -160,6 +160,20 @@ static int hmac_init_tfm(struct crypto_shash *parent)
return 0;
}
+static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src)
+{
+ struct hmac_ctx *sctx = hmac_ctx(src);
+ struct hmac_ctx *dctx = hmac_ctx(dst);
+ struct crypto_shash *hash;
+
+ hash = crypto_clone_shash(sctx->hash);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ dctx->hash = hash;
+ return 0;
+}
+
static void hmac_exit_tfm(struct crypto_shash *parent)
{
struct hmac_ctx *ctx = hmac_ctx(parent);
@@ -227,6 +241,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.import = hmac_import;
inst->alg.setkey = hmac_setkey;
inst->alg.init_tfm = hmac_init_tfm;
+ inst->alg.clone_tfm = hmac_clone_tfm;
inst->alg.exit_tfm = hmac_exit_tfm;
inst->free = shash_free_singlespawn_instance;
diff --git a/crypto/internal.h b/crypto/internal.h
index 932f0aafddc3..8dd746b1130b 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -10,6 +10,7 @@
#include <crypto/algapi.h>
#include <linux/completion.h>
+#include <linux/err.h>
#include <linux/jump_label.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -47,6 +48,8 @@ extern struct list_head crypto_alg_list;
extern struct rw_semaphore crypto_alg_sem;
extern struct blocking_notifier_head crypto_chain;
+int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
+
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
static inline bool crypto_boot_test_finished(void)
{
@@ -103,6 +106,8 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
void *crypto_create_tfm_node(struct crypto_alg *alg,
const struct crypto_type *frontend, int node);
+void *crypto_clone_tfm(const struct crypto_type *frontend,
+ struct crypto_tfm *otfm);
static inline void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend)
@@ -184,5 +189,10 @@ static inline int crypto_is_test_larval(struct crypto_larval *larval)
return larval->alg.cra_driver_name[0];
}
+static inline struct crypto_tfm *crypto_tfm_get(struct crypto_tfm *tfm)
+{
+ return refcount_inc_not_zero(&tfm->refcnt) ? tfm : ERR_PTR(-EOVERFLOW);
+}
+
#endif /* _CRYPTO_INTERNAL_H */
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 2d115bec15ae..b9edfaa51b27 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -37,6 +37,7 @@
* DAMAGE.
*/
+#include <linux/fips.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -59,11 +60,6 @@ void jent_zfree(void *ptr)
kfree_sensitive(ptr);
}
-void jent_panic(char *s)
-{
- panic("%s", s);
-}
-
void jent_memcpy(void *dest, const void *src, unsigned int n)
{
memcpy(dest, src, n);
@@ -102,7 +98,6 @@ void jent_get_nstime(__u64 *out)
struct jitterentropy {
spinlock_t jent_lock;
struct rand_data *entropy_collector;
- unsigned int reset_cnt;
};
static int jent_kcapi_init(struct crypto_tfm *tfm)
@@ -138,32 +133,30 @@ static int jent_kcapi_random(struct crypto_rng *tfm,
spin_lock(&rng->jent_lock);
- /* Return a permanent error in case we had too many resets in a row. */
- if (rng->reset_cnt > (1<<10)) {
- ret = -EFAULT;
- goto out;
- }
-
ret = jent_read_entropy(rng->entropy_collector, rdata, dlen);
- /* Reset RNG in case of health failures */
- if (ret < -1) {
- pr_warn_ratelimited("Reset Jitter RNG due to health test failure: %s failure\n",
- (ret == -2) ? "Repetition Count Test" :
- "Adaptive Proportion Test");
-
- rng->reset_cnt++;
-
+ if (ret == -3) {
+ /* Handle permanent health test error */
+ /*
+ * If the kernel was booted with fips=1, it implies that
+ * the entire kernel acts as a FIPS 140 module. In this case
+ * an SP800-90B permanent health test error is treated as
+ * a FIPS module error.
+ */
+ if (fips_enabled)
+ panic("Jitter RNG permanent health test failure\n");
+
+ pr_err("Jitter RNG permanent health test failure\n");
+ ret = -EFAULT;
+ } else if (ret == -2) {
+ /* Handle intermittent health test error */
+ pr_warn_ratelimited("Reset Jitter RNG due to intermittent health test failure\n");
ret = -EAGAIN;
- } else {
- rng->reset_cnt = 0;
-
- /* Convert the Jitter RNG error into a usable error code */
- if (ret == -1)
- ret = -EINVAL;
+ } else if (ret == -1) {
+ /* Handle other errors */
+ ret = -EINVAL;
}
-out:
spin_unlock(&rng->jent_lock);
return ret;
@@ -197,6 +190,10 @@ static int __init jent_mod_init(void)
ret = jent_entropy_init();
if (ret) {
+ /* Handle permanent health test error */
+ if (fips_enabled)
+ panic("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
+
pr_info("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
return -EFAULT;
}
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index 93bff3213823..22f48bf4c6f5 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -85,10 +85,14 @@ struct rand_data {
* bit generation */
/* Repetition Count Test */
- int rct_count; /* Number of stuck values */
+ unsigned int rct_count; /* Number of stuck values */
- /* Adaptive Proportion Test for a significance level of 2^-30 */
+ /* Intermittent health test failure threshold of 2^-30 */
+#define JENT_RCT_CUTOFF 30 /* Taken from SP800-90B sec 4.4.1 */
#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */
+ /* Permanent health test failure threshold of 2^-60 */
+#define JENT_RCT_CUTOFF_PERMANENT 60
+#define JENT_APT_CUTOFF_PERMANENT 355
#define JENT_APT_WINDOW_SIZE 512 /* Data window size */
/* LSB of time stamp to process */
#define JENT_APT_LSB 16
@@ -97,8 +101,6 @@ struct rand_data {
unsigned int apt_count; /* APT counter */
unsigned int apt_base; /* APT base reference */
unsigned int apt_base_set:1; /* APT base reference set? */
-
- unsigned int health_failure:1; /* Permanent health failure */
};
/* Flags that can be used to initialize the RNG */
@@ -169,19 +171,26 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
return;
}
- if (delta_masked == ec->apt_base) {
+ if (delta_masked == ec->apt_base)
ec->apt_count++;
- if (ec->apt_count >= JENT_APT_CUTOFF)
- ec->health_failure = 1;
- }
-
ec->apt_observations++;
if (ec->apt_observations >= JENT_APT_WINDOW_SIZE)
jent_apt_reset(ec, delta_masked);
}
+/* APT health test failure detection */
+static int jent_apt_permanent_failure(struct rand_data *ec)
+{
+ return (ec->apt_count >= JENT_APT_CUTOFF_PERMANENT) ? 1 : 0;
+}
+
+static int jent_apt_failure(struct rand_data *ec)
+{
+ return (ec->apt_count >= JENT_APT_CUTOFF) ? 1 : 0;
+}
+
/***************************************************************************
* Stuck Test and its use as Repetition Count Test
*
@@ -206,55 +215,14 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
*/
static void jent_rct_insert(struct rand_data *ec, int stuck)
{
- /*
- * If we have a count less than zero, a previous RCT round identified
- * a failure. We will not overwrite it.
- */
- if (ec->rct_count < 0)
- return;
-
if (stuck) {
ec->rct_count++;
-
- /*
- * The cutoff value is based on the following consideration:
- * alpha = 2^-30 as recommended in FIPS 140-2 IG 9.8.
- * In addition, we require an entropy value H of 1/OSR as this
- * is the minimum entropy required to provide full entropy.
- * Note, we collect 64 * OSR deltas for inserting them into
- * the entropy pool which should then have (close to) 64 bits
- * of entropy.
- *
- * Note, ec->rct_count (which equals to value B in the pseudo
- * code of SP800-90B section 4.4.1) starts with zero. Hence
- * we need to subtract one from the cutoff value as calculated
- * following SP800-90B.
- */
- if ((unsigned int)ec->rct_count >= (31 * ec->osr)) {
- ec->rct_count = -1;
- ec->health_failure = 1;
- }
} else {
+ /* Reset RCT */
ec->rct_count = 0;
}
}
-/*
- * Is there an RCT health test failure?
- *
- * @ec [in] Reference to entropy collector
- *
- * @return
- * 0 No health test failure
- * 1 Permanent health test failure
- */
-static int jent_rct_failure(struct rand_data *ec)
-{
- if (ec->rct_count < 0)
- return 1;
- return 0;
-}
-
static inline __u64 jent_delta(__u64 prev, __u64 next)
{
#define JENT_UINT64_MAX (__u64)(~((__u64) 0))
@@ -303,18 +271,26 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta)
return 0;
}
-/*
- * Report any health test failures
- *
- * @ec [in] Reference to entropy collector
- *
- * @return
- * 0 No health test failure
- * 1 Permanent health test failure
- */
+/* RCT health test failure detection */
+static int jent_rct_permanent_failure(struct rand_data *ec)
+{
+ return (ec->rct_count >= JENT_RCT_CUTOFF_PERMANENT) ? 1 : 0;
+}
+
+static int jent_rct_failure(struct rand_data *ec)
+{
+ return (ec->rct_count >= JENT_RCT_CUTOFF) ? 1 : 0;
+}
+
+/* Report of health test failures */
static int jent_health_failure(struct rand_data *ec)
{
- return ec->health_failure;
+ return jent_rct_failure(ec) | jent_apt_failure(ec);
+}
+
+static int jent_permanent_health_failure(struct rand_data *ec)
+{
+ return jent_rct_permanent_failure(ec) | jent_apt_permanent_failure(ec);
}
/***************************************************************************
@@ -600,8 +576,8 @@ static void jent_gen_entropy(struct rand_data *ec)
*
* The following error codes can occur:
* -1 entropy_collector is NULL
- * -2 RCT failed
- * -3 APT test failed
+ * -2 Intermittent health failure
+ * -3 Permanent health failure
*/
int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int len)
@@ -616,39 +592,23 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
jent_gen_entropy(ec);
- if (jent_health_failure(ec)) {
- int ret;
-
- if (jent_rct_failure(ec))
- ret = -2;
- else
- ret = -3;
-
+ if (jent_permanent_health_failure(ec)) {
/*
- * Re-initialize the noise source
- *
- * If the health test fails, the Jitter RNG remains
- * in failure state and will return a health failure
- * during next invocation.
+ * At this point, the Jitter RNG instance is considered
+ * as a failed instance. There is no rerun of the
+ * startup test any more, because the caller
+ * is assumed to not further use this instance.
*/
- if (jent_entropy_init())
- return ret;
-
- /* Set APT to initial state */
- jent_apt_reset(ec, 0);
- ec->apt_base_set = 0;
-
- /* Set RCT to initial state */
- ec->rct_count = 0;
-
- /* Re-enable Jitter RNG */
- ec->health_failure = 0;
-
+ return -3;
+ } else if (jent_health_failure(ec)) {
/*
- * Return the health test failure status to the
- * caller as the generated value is not appropriate.
+ * Perform startup health tests and return permanent
+ * error if it fails.
*/
- return ret;
+ if (jent_entropy_init())
+ return -3;
+
+ return -2;
}
if ((DATA_SIZE_BITS / 8) < len)
diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h
index b7397b617ef0..5cc583f6bc6b 100644
--- a/crypto/jitterentropy.h
+++ b/crypto/jitterentropy.h
@@ -2,7 +2,6 @@
extern void *jent_zalloc(unsigned int len);
extern void jent_zfree(void *ptr);
-extern void jent_panic(char *s);
extern void jent_memcpy(void *dest, const void *src, unsigned int n);
extern void jent_get_nstime(__u64 *out);
diff --git a/crypto/kpp.c b/crypto/kpp.c
index 678e871ce418..74f2e8e918fa 100644
--- a/crypto/kpp.c
+++ b/crypto/kpp.c
@@ -5,23 +5,20 @@
* Copyright (c) 2016, Intel Corporation
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
*/
+
+#include <crypto/internal/kpp.h>
+#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
-#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
#include <net/netlink.h>
-#include <crypto/kpp.h>
-#include <crypto/internal/kpp.h>
+
#include "internal.h"
-#ifdef CONFIG_NET
-static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_kpp_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_kpp rkpp;
@@ -31,12 +28,6 @@ static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_KPP, sizeof(rkpp), &rkpp);
}
-#else
-static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -75,6 +66,29 @@ static void crypto_kpp_free_instance(struct crypto_instance *inst)
kpp->free(kpp);
}
+static int __maybe_unused crypto_kpp_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct kpp_alg *kpp = __crypto_kpp_alg(alg);
+ struct crypto_istat_kpp *istat;
+ struct crypto_stat_kpp rkpp;
+
+ istat = kpp_get_stat(kpp);
+
+ memset(&rkpp, 0, sizeof(rkpp));
+
+ strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
+
+ rkpp.stat_setsecret_cnt = atomic64_read(&istat->setsecret_cnt);
+ rkpp.stat_generate_public_key_cnt =
+ atomic64_read(&istat->generate_public_key_cnt);
+ rkpp.stat_compute_shared_secret_cnt =
+ atomic64_read(&istat->compute_shared_secret_cnt);
+ rkpp.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
+}
+
static const struct crypto_type crypto_kpp_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_kpp_init_tfm,
@@ -82,7 +96,12 @@ static const struct crypto_type crypto_kpp_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_kpp_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_kpp_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_kpp_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_KPP,
@@ -112,11 +131,15 @@ EXPORT_SYMBOL_GPL(crypto_has_kpp);
static void kpp_prepare_alg(struct kpp_alg *alg)
{
+ struct crypto_istat_kpp *istat = kpp_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_kpp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_KPP;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
}
int crypto_register_kpp(struct kpp_alg *alg)
diff --git a/crypto/rng.c b/crypto/rng.c
index fea082b25fe4..ffde0f64fb25 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -8,17 +8,17 @@
* Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
*/
-#include <linux/atomic.h>
#include <crypto/internal/rng.h>
+#include <linux/atomic.h>
+#include <linux/cryptouser.h>
#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
#include <net/netlink.h>
#include "internal.h"
@@ -30,27 +30,30 @@ static int crypto_default_rng_refcnt;
int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
{
- struct crypto_alg *alg = tfm->base.__crt_alg;
+ struct rng_alg *alg = crypto_rng_alg(tfm);
u8 *buf = NULL;
int err;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&rng_get_stat(alg)->seed_cnt);
+
if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL);
+ err = -ENOMEM;
if (!buf)
- return -ENOMEM;
+ goto out;
err = get_random_bytes_wait(buf, slen);
if (err)
- goto out;
+ goto free_buf;
seed = buf;
}
- crypto_stats_get(alg);
- err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
- crypto_stats_rng_seed(alg, err);
-out:
+ err = alg->seed(tfm, seed, slen);
+free_buf:
kfree_sensitive(buf);
- return err;
+out:
+ return crypto_rng_errstat(alg, err);
}
EXPORT_SYMBOL_GPL(crypto_rng_reset);
@@ -66,8 +69,8 @@ static unsigned int seedsize(struct crypto_alg *alg)
return ralg->seedsize;
}
-#ifdef CONFIG_NET
-static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_rng_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_rng rrng;
@@ -79,12 +82,6 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_RNG, sizeof(rrng), &rrng);
}
-#else
-static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -94,13 +91,39 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "seedsize : %u\n", seedsize(alg));
}
+static int __maybe_unused crypto_rng_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct rng_alg *rng = __crypto_rng_alg(alg);
+ struct crypto_istat_rng *istat;
+ struct crypto_stat_rng rrng;
+
+ istat = rng_get_stat(rng);
+
+ memset(&rrng, 0, sizeof(rrng));
+
+ strscpy(rrng.type, "rng", sizeof(rrng.type));
+
+ rrng.stat_generate_cnt = atomic64_read(&istat->generate_cnt);
+ rrng.stat_generate_tlen = atomic64_read(&istat->generate_tlen);
+ rrng.stat_seed_cnt = atomic64_read(&istat->seed_cnt);
+ rrng.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
+}
+
static const struct crypto_type crypto_rng_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_rng_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_rng_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_rng_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_rng_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_RNG,
@@ -176,6 +199,7 @@ EXPORT_SYMBOL_GPL(crypto_del_default_rng);
int crypto_register_rng(struct rng_alg *alg)
{
+ struct crypto_istat_rng *istat = rng_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->seedsize > PAGE_SIZE / 8)
@@ -185,6 +209,9 @@ int crypto_register_rng(struct rng_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_RNG;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_rng);
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 738f4f8f0f41..24138b42a648 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -6,23 +6,22 @@
* Copyright (c) 2016, Intel Corporation
* Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
-#include <linux/errno.h>
+
+#include <crypto/internal/acompress.h>
+#include <crypto/internal/scompress.h>
+#include <crypto/scatterwalk.h>
+#include <linux/cryptouser.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/crypto.h>
-#include <linux/compiler.h>
#include <linux/vmalloc.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
#include <net/netlink.h>
-#include <linux/scatterlist.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/acompress.h>
-#include <crypto/internal/scompress.h>
-#include "internal.h"
+
+#include "compress.h"
struct scomp_scratch {
spinlock_t lock;
@@ -38,8 +37,8 @@ static const struct crypto_type crypto_scomp_type;
static int scomp_scratch_users;
static DEFINE_MUTEX(scomp_lock);
-#ifdef CONFIG_NET
-static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_scomp_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rscomp;
@@ -50,12 +49,6 @@ static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(rscomp), &rscomp);
}
-#else
-static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -247,7 +240,12 @@ static const struct crypto_type crypto_scomp_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_scomp_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_scomp_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_acomp_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SCOMPRESS,
@@ -256,10 +254,11 @@ static const struct crypto_type crypto_scomp_type = {
int crypto_register_scomp(struct scomp_alg *alg)
{
- struct crypto_alg *base = &alg->base;
+ struct crypto_alg *base = &alg->calg.base;
+
+ comp_prepare_alg(&alg->calg);
base->cra_type = &crypto_scomp_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
return crypto_register_alg(base);
diff --git a/crypto/shash.c b/crypto/shash.c
index 58b46f198449..5845b7d59b2f 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -6,22 +6,31 @@
*/
#include <crypto/scatterwalk.h>
-#include <crypto/internal/hash.h>
+#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
-#include <linux/cryptouser.h>
+#include <linux/string.h>
#include <net/netlink.h>
-#include <linux/compiler.h>
-#include "internal.h"
+#include "hash.h"
#define MAX_SHASH_ALIGNMASK 63
static const struct crypto_type crypto_shash_type;
+static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg)
+{
+ return hash_get_stat(&alg->halg);
+}
+
+static inline int crypto_shash_errstat(struct shash_alg *alg, int err)
+{
+ return crypto_hash_errstat(&alg->halg, err);
+}
+
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -114,11 +123,17 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ int err;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_add(len, &shash_get_stat(shash)->hash_tlen);
if ((unsigned long)data & alignmask)
- return shash_update_unaligned(desc, data, len);
+ err = shash_update_unaligned(desc, data, len);
+ else
+ err = shash->update(desc, data, len);
- return shash->update(desc, data, len);
+ return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_update);
@@ -155,19 +170,25 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out)
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ int err;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&shash_get_stat(shash)->hash_cnt);
if ((unsigned long)out & alignmask)
- return shash_final_unaligned(desc, out);
+ err = shash_final_unaligned(desc, out);
+ else
+ err = shash->final(desc, out);
- return shash->final(desc, out);
+ return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_final);
static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- return crypto_shash_update(desc, data, len) ?:
- crypto_shash_final(desc, out);
+ return shash_update_unaligned(desc, data, len) ?:
+ shash_final_unaligned(desc, out);
}
int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
@@ -176,11 +197,22 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ int err;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_hash *istat = shash_get_stat(shash);
+
+ atomic64_inc(&istat->hash_cnt);
+ atomic64_add(len, &istat->hash_tlen);
+ }
if (((unsigned long)data | (unsigned long)out) & alignmask)
- return shash_finup_unaligned(desc, data, len, out);
+ err = shash_finup_unaligned(desc, data, len, out);
+ else
+ err = shash->finup(desc, data, len, out);
- return shash->finup(desc, data, len, out);
+
+ return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_finup);
@@ -188,7 +220,8 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return crypto_shash_init(desc) ?:
- crypto_shash_finup(desc, data, len, out);
+ shash_update_unaligned(desc, data, len) ?:
+ shash_final_unaligned(desc, out);
}
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
@@ -197,14 +230,23 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ int err;
- if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_hash *istat = shash_get_stat(shash);
- if (((unsigned long)data | (unsigned long)out) & alignmask)
- return shash_digest_unaligned(desc, data, len, out);
+ atomic64_inc(&istat->hash_cnt);
+ atomic64_add(len, &istat->hash_tlen);
+ }
- return shash->digest(desc, data, len, out);
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ err = -ENOKEY;
+ else if (((unsigned long)data | (unsigned long)out) & alignmask)
+ err = shash_digest_unaligned(desc, data, len, out);
+ else
+ err = shash->digest(desc, data, len, out);
+
+ return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
@@ -403,6 +445,24 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
return 0;
}
+struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash,
+ struct crypto_ahash *hash)
+{
+ struct crypto_shash **nctx = crypto_ahash_ctx(nhash);
+ struct crypto_shash **ctx = crypto_ahash_ctx(hash);
+ struct crypto_shash *shash;
+
+ shash = crypto_clone_shash(*ctx);
+ if (IS_ERR(shash)) {
+ crypto_free_ahash(nhash);
+ return ERR_CAST(shash);
+ }
+
+ *nctx = shash;
+
+ return nhash;
+}
+
static void crypto_shash_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
@@ -448,8 +508,8 @@ static void crypto_shash_free_instance(struct crypto_instance *inst)
shash->free(shash);
}
-#ifdef CONFIG_NET
-static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_shash_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
struct shash_alg *salg = __crypto_shash_alg(alg);
@@ -463,12 +523,6 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
-#else
-static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -481,6 +535,12 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "digestsize : %u\n", salg->digestsize);
}
+static int __maybe_unused crypto_shash_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return crypto_hash_report_stat(skb, alg, "shash");
+}
+
static const struct crypto_type crypto_shash_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_shash_init_tfm,
@@ -488,7 +548,12 @@ static const struct crypto_type crypto_shash_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_shash_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_shash_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_shash_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SHASH,
@@ -517,13 +582,62 @@ int crypto_has_shash(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_shash);
-static int shash_prepare_alg(struct shash_alg *alg)
+struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
+{
+ struct crypto_tfm *tfm = crypto_shash_tfm(hash);
+ struct shash_alg *alg = crypto_shash_alg(hash);
+ struct crypto_shash *nhash;
+ int err;
+
+ if (!crypto_shash_alg_has_setkey(alg)) {
+ tfm = crypto_tfm_get(tfm);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+
+ return hash;
+ }
+
+ if (!alg->clone_tfm)
+ return ERR_PTR(-ENOSYS);
+
+ nhash = crypto_clone_tfm(&crypto_shash_type, tfm);
+ if (IS_ERR(nhash))
+ return nhash;
+
+ nhash->descsize = hash->descsize;
+
+ err = alg->clone_tfm(nhash, hash);
+ if (err) {
+ crypto_free_shash(nhash);
+ return ERR_PTR(err);
+ }
+
+ return nhash;
+}
+EXPORT_SYMBOL_GPL(crypto_clone_shash);
+
+int hash_prepare_alg(struct hash_alg_common *alg)
{
+ struct crypto_istat_hash *istat = hash_get_stat(alg);
struct crypto_alg *base = &alg->base;
- if (alg->digestsize > HASH_MAX_DIGESTSIZE ||
- alg->descsize > HASH_MAX_DESCSIZE ||
- alg->statesize > HASH_MAX_STATESIZE)
+ if (alg->digestsize > HASH_MAX_DIGESTSIZE)
+ return -EINVAL;
+
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+
+ return 0;
+}
+
+static int shash_prepare_alg(struct shash_alg *alg)
+{
+ struct crypto_alg *base = &alg->halg.base;
+ int err;
+
+ if (alg->descsize > HASH_MAX_DESCSIZE)
return -EINVAL;
if (base->cra_alignmask > MAX_SHASH_ALIGNMASK)
@@ -532,8 +646,11 @@ static int shash_prepare_alg(struct shash_alg *alg)
if ((alg->export && !alg->import) || (alg->import && !alg->export))
return -EINVAL;
+ err = hash_prepare_alg(&alg->halg);
+ if (err)
+ return err;
+
base->cra_type = &crypto_shash_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
if (!alg->finup)
@@ -543,7 +660,7 @@ static int shash_prepare_alg(struct shash_alg *alg)
if (!alg->export) {
alg->export = shash_default_export;
alg->import = shash_default_import;
- alg->statesize = alg->descsize;
+ alg->halg.statesize = alg->descsize;
}
if (!alg->setkey)
alg->setkey = shash_no_setkey;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 7bf4871fec80..6caca02d7e55 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -15,11 +15,14 @@
#include <crypto/scatterwalk.h>
#include <linux/bug.h>
#include <linux/cryptouser.h>
-#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/rtnetlink.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
@@ -77,6 +80,35 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
+static inline struct skcipher_alg *__crypto_skcipher_alg(
+ struct crypto_alg *alg)
+{
+ return container_of(alg, struct skcipher_alg, base);
+}
+
+static inline struct crypto_istat_cipher *skcipher_get_stat(
+ struct skcipher_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
+static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
+{
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&istat->err_cnt);
+
+ return err;
+}
+
static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
{
u8 *addr;
@@ -605,34 +637,44 @@ EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
int ret;
- crypto_stats_get(alg);
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+ }
+
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
- ret = crypto_skcipher_alg(tfm)->encrypt(req);
- crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
- return ret;
+ ret = alg->encrypt(req);
+
+ return crypto_skcipher_errstat(alg, ret);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
int ret;
- crypto_stats_get(alg);
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+ atomic64_inc(&istat->decrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->decrypt_tlen);
+ }
+
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
- ret = crypto_skcipher_alg(tfm)->decrypt(req);
- crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
- return ret;
+ ret = alg->decrypt(req);
+
+ return crypto_skcipher_errstat(alg, ret);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
@@ -672,8 +714,7 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
{
- struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
- base);
+ struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
seq_printf(m, "type : skcipher\n");
seq_printf(m, "async : %s\n",
@@ -686,12 +727,11 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "walksize : %u\n", skcipher->walksize);
}
-#ifdef CONFIG_NET
-static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_skcipher_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
+ struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
struct crypto_report_blkcipher rblkcipher;
- struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
- base);
memset(&rblkcipher, 0, sizeof(rblkcipher));
@@ -706,12 +746,28 @@ static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
sizeof(rblkcipher), &rblkcipher);
}
-#else
-static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+
+static int __maybe_unused crypto_skcipher_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
- return -ENOSYS;
+ struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
+ struct crypto_istat_cipher *istat;
+ struct crypto_stat_cipher rcipher;
+
+ istat = skcipher_get_stat(skcipher);
+
+ memset(&rcipher, 0, sizeof(rcipher));
+
+ strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+ rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+ rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+ rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+ rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+ rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
}
-#endif
static const struct crypto_type crypto_skcipher_type = {
.extsize = crypto_alg_extsize,
@@ -720,7 +776,12 @@ static const struct crypto_type crypto_skcipher_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_skcipher_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_skcipher_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_skcipher_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
@@ -775,6 +836,7 @@ EXPORT_SYMBOL_GPL(crypto_has_skcipher);
static int skcipher_prepare_alg(struct skcipher_alg *alg)
{
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
@@ -790,6 +852,9 @@ static int skcipher_prepare_alg(struct skcipher_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+
return 0;
}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 6521feec7756..202ca1a3105d 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -25,14 +25,17 @@
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/init.h>
-#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/moduleparam.h>
-#include <linux/jiffies.h>
#include <linux/timex.h>
-#include <linux/interrupt.h>
+
+#include "internal.h"
#include "tcrypt.h"
/*
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index c91e93ece20b..216878c8bc3d 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -860,12 +860,50 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+/*
+ * The fuzz tests use prandom instead of the normal Linux RNG since they don't
+ * need cryptographically secure random numbers. This greatly improves the
+ * performance of these tests, especially if they are run before the Linux RNG
+ * has been initialized or if they are run on a lockdep-enabled kernel.
+ */
+
+static inline void init_rnd_state(struct rnd_state *rng)
+{
+ prandom_seed_state(rng, get_random_u64());
+}
+
+static inline u8 prandom_u8(struct rnd_state *rng)
+{
+ return prandom_u32_state(rng);
+}
+
+static inline u32 prandom_u32_below(struct rnd_state *rng, u32 ceil)
+{
+ /*
+ * This is slightly biased for non-power-of-2 values of 'ceil', but this
+ * isn't important here.
+ */
+ return prandom_u32_state(rng) % ceil;
+}
+
+static inline bool prandom_bool(struct rnd_state *rng)
+{
+ return prandom_u32_below(rng, 2);
+}
+
+static inline u32 prandom_u32_inclusive(struct rnd_state *rng,
+ u32 floor, u32 ceil)
+{
+ return floor + prandom_u32_below(rng, ceil - floor + 1);
+}
+
/* Generate a random length in range [0, max_len], but prefer smaller values */
-static unsigned int generate_random_length(unsigned int max_len)
+static unsigned int generate_random_length(struct rnd_state *rng,
+ unsigned int max_len)
{
- unsigned int len = get_random_u32_below(max_len + 1);
+ unsigned int len = prandom_u32_below(rng, max_len + 1);
- switch (get_random_u32_below(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
return len % 64;
case 1:
@@ -878,43 +916,44 @@ static unsigned int generate_random_length(unsigned int max_len)
}
/* Flip a random bit in the given nonempty data buffer */
-static void flip_random_bit(u8 *buf, size_t size)
+static void flip_random_bit(struct rnd_state *rng, u8 *buf, size_t size)
{
size_t bitpos;
- bitpos = get_random_u32_below(size * 8);
+ bitpos = prandom_u32_below(rng, size * 8);
buf[bitpos / 8] ^= 1 << (bitpos % 8);
}
/* Flip a random byte in the given nonempty data buffer */
-static void flip_random_byte(u8 *buf, size_t size)
+static void flip_random_byte(struct rnd_state *rng, u8 *buf, size_t size)
{
- buf[get_random_u32_below(size)] ^= 0xff;
+ buf[prandom_u32_below(rng, size)] ^= 0xff;
}
/* Sometimes make some random changes to the given nonempty data buffer */
-static void mutate_buffer(u8 *buf, size_t size)
+static void mutate_buffer(struct rnd_state *rng, u8 *buf, size_t size)
{
size_t num_flips;
size_t i;
/* Sometimes flip some bits */
- if (get_random_u32_below(4) == 0) {
- num_flips = min_t(size_t, 1 << get_random_u32_below(8), size * 8);
+ if (prandom_u32_below(rng, 4) == 0) {
+ num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8),
+ size * 8);
for (i = 0; i < num_flips; i++)
- flip_random_bit(buf, size);
+ flip_random_bit(rng, buf, size);
}
/* Sometimes flip some bytes */
- if (get_random_u32_below(4) == 0) {
- num_flips = min_t(size_t, 1 << get_random_u32_below(8), size);
+ if (prandom_u32_below(rng, 4) == 0) {
+ num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8), size);
for (i = 0; i < num_flips; i++)
- flip_random_byte(buf, size);
+ flip_random_byte(rng, buf, size);
}
}
/* Randomly generate 'count' bytes, but sometimes make them "interesting" */
-static void generate_random_bytes(u8 *buf, size_t count)
+static void generate_random_bytes(struct rnd_state *rng, u8 *buf, size_t count)
{
u8 b;
u8 increment;
@@ -923,11 +962,11 @@ static void generate_random_bytes(u8 *buf, size_t count)
if (count == 0)
return;
- switch (get_random_u32_below(8)) { /* Choose a generation strategy */
+ switch (prandom_u32_below(rng, 8)) { /* Choose a generation strategy */
case 0:
case 1:
/* All the same byte, plus optional mutations */
- switch (get_random_u32_below(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
b = 0x00;
break;
@@ -935,28 +974,28 @@ static void generate_random_bytes(u8 *buf, size_t count)
b = 0xff;
break;
default:
- b = get_random_u8();
+ b = prandom_u8(rng);
break;
}
memset(buf, b, count);
- mutate_buffer(buf, count);
+ mutate_buffer(rng, buf, count);
break;
case 2:
/* Ascending or descending bytes, plus optional mutations */
- increment = get_random_u8();
- b = get_random_u8();
+ increment = prandom_u8(rng);
+ b = prandom_u8(rng);
for (i = 0; i < count; i++, b += increment)
buf[i] = b;
- mutate_buffer(buf, count);
+ mutate_buffer(rng, buf, count);
break;
default:
/* Fully random bytes */
- for (i = 0; i < count; i++)
- buf[i] = get_random_u8();
+ prandom_bytes_state(rng, buf, count);
}
}
-static char *generate_random_sgl_divisions(struct test_sg_division *divs,
+static char *generate_random_sgl_divisions(struct rnd_state *rng,
+ struct test_sg_division *divs,
size_t max_divs, char *p, char *end,
bool gen_flushes, u32 req_flags)
{
@@ -967,24 +1006,26 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
unsigned int this_len;
const char *flushtype_str;
- if (div == &divs[max_divs - 1] || get_random_u32_below(2) == 0)
+ if (div == &divs[max_divs - 1] || prandom_bool(rng))
this_len = remaining;
else
- this_len = get_random_u32_inclusive(1, remaining);
+ this_len = prandom_u32_inclusive(rng, 1, remaining);
div->proportion_of_total = this_len;
- if (get_random_u32_below(4) == 0)
- div->offset = get_random_u32_inclusive(PAGE_SIZE - 128, PAGE_SIZE - 1);
- else if (get_random_u32_below(2) == 0)
- div->offset = get_random_u32_below(32);
+ if (prandom_u32_below(rng, 4) == 0)
+ div->offset = prandom_u32_inclusive(rng,
+ PAGE_SIZE - 128,
+ PAGE_SIZE - 1);
+ else if (prandom_bool(rng))
+ div->offset = prandom_u32_below(rng, 32);
else
- div->offset = get_random_u32_below(PAGE_SIZE);
- if (get_random_u32_below(8) == 0)
+ div->offset = prandom_u32_below(rng, PAGE_SIZE);
+ if (prandom_u32_below(rng, 8) == 0)
div->offset_relative_to_alignmask = true;
div->flush_type = FLUSH_TYPE_NONE;
if (gen_flushes) {
- switch (get_random_u32_below(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
div->flush_type = FLUSH_TYPE_REIMPORT;
break;
@@ -996,7 +1037,7 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
if (div->flush_type != FLUSH_TYPE_NONE &&
!(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
- get_random_u32_below(2) == 0)
+ prandom_bool(rng))
div->nosimd = true;
switch (div->flush_type) {
@@ -1031,7 +1072,8 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
}
/* Generate a random testvec_config for fuzz testing */
-static void generate_random_testvec_config(struct testvec_config *cfg,
+static void generate_random_testvec_config(struct rnd_state *rng,
+ struct testvec_config *cfg,
char *name, size_t max_namelen)
{
char *p = name;
@@ -1043,7 +1085,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
p += scnprintf(p, end - p, "random:");
- switch (get_random_u32_below(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
case 1:
cfg->inplace_mode = OUT_OF_PLACE;
@@ -1058,12 +1100,12 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
break;
}
- if (get_random_u32_below(2) == 0) {
+ if (prandom_bool(rng)) {
cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
p += scnprintf(p, end - p, " may_sleep");
}
- switch (get_random_u32_below(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
cfg->finalization_type = FINALIZATION_TYPE_FINAL;
p += scnprintf(p, end - p, " use_final");
@@ -1078,36 +1120,37 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
break;
}
- if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
- get_random_u32_below(2) == 0) {
+ if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && prandom_bool(rng)) {
cfg->nosimd = true;
p += scnprintf(p, end - p, " nosimd");
}
p += scnprintf(p, end - p, " src_divs=[");
- p = generate_random_sgl_divisions(cfg->src_divs,
+ p = generate_random_sgl_divisions(rng, cfg->src_divs,
ARRAY_SIZE(cfg->src_divs), p, end,
(cfg->finalization_type !=
FINALIZATION_TYPE_DIGEST),
cfg->req_flags);
p += scnprintf(p, end - p, "]");
- if (cfg->inplace_mode == OUT_OF_PLACE && get_random_u32_below(2) == 0) {
+ if (cfg->inplace_mode == OUT_OF_PLACE && prandom_bool(rng)) {
p += scnprintf(p, end - p, " dst_divs=[");
- p = generate_random_sgl_divisions(cfg->dst_divs,
+ p = generate_random_sgl_divisions(rng, cfg->dst_divs,
ARRAY_SIZE(cfg->dst_divs),
p, end, false,
cfg->req_flags);
p += scnprintf(p, end - p, "]");
}
- if (get_random_u32_below(2) == 0) {
- cfg->iv_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK);
+ if (prandom_bool(rng)) {
+ cfg->iv_offset = prandom_u32_inclusive(rng, 1,
+ MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
}
- if (get_random_u32_below(2) == 0) {
- cfg->key_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK);
+ if (prandom_bool(rng)) {
+ cfg->key_offset = prandom_u32_inclusive(rng, 1,
+ MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
}
@@ -1620,11 +1663,14 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
if (!noextratests) {
+ struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
+ init_rnd_state(&rng);
+
for (i = 0; i < fuzz_iterations; i++) {
- generate_random_testvec_config(&cfg, cfgname,
+ generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_hash_vec_cfg(vec, vec_name, &cfg,
req, desc, tsgl, hashstate);
@@ -1642,15 +1688,16 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
* Generate a hash test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
*/
-static void generate_random_hash_testvec(struct shash_desc *desc,
+static void generate_random_hash_testvec(struct rnd_state *rng,
+ struct shash_desc *desc,
struct hash_testvec *vec,
unsigned int maxkeysize,
unsigned int maxdatasize,
char *name, size_t max_namelen)
{
/* Data */
- vec->psize = generate_random_length(maxdatasize);
- generate_random_bytes((u8 *)vec->plaintext, vec->psize);
+ vec->psize = generate_random_length(rng, maxdatasize);
+ generate_random_bytes(rng, (u8 *)vec->plaintext, vec->psize);
/*
* Key: length in range [1, maxkeysize], but usually choose maxkeysize.
@@ -1660,9 +1707,9 @@ static void generate_random_hash_testvec(struct shash_desc *desc,
vec->ksize = 0;
if (maxkeysize) {
vec->ksize = maxkeysize;
- if (get_random_u32_below(4) == 0)
- vec->ksize = get_random_u32_inclusive(1, maxkeysize);
- generate_random_bytes((u8 *)vec->key, vec->ksize);
+ if (prandom_u32_below(rng, 4) == 0)
+ vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize);
+ generate_random_bytes(rng, (u8 *)vec->key, vec->ksize);
vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
vec->ksize);
@@ -1696,6 +1743,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
const char *driver = crypto_ahash_driver_name(tfm);
+ struct rnd_state rng;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
struct crypto_shash *generic_tfm = NULL;
struct shash_desc *generic_desc = NULL;
@@ -1709,6 +1757,8 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
if (noextratests)
return 0;
+ init_rnd_state(&rng);
+
if (!generic_driver) { /* Use default naming convention? */
err = build_generic_driver_name(algname, _generic_driver);
if (err)
@@ -1777,10 +1827,11 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
}
for (i = 0; i < fuzz_iterations * 8; i++) {
- generate_random_hash_testvec(generic_desc, &vec,
+ generate_random_hash_testvec(&rng, generic_desc, &vec,
maxkeysize, maxdatasize,
vec_name, sizeof(vec_name));
- generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
+ generate_random_testvec_config(&rng, cfg, cfgname,
+ sizeof(cfgname));
err = test_hash_vec_cfg(&vec, vec_name, cfg,
req, desc, tsgl, hashstate);
@@ -2182,11 +2233,14 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
if (!noextratests) {
+ struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
+ init_rnd_state(&rng);
+
for (i = 0; i < fuzz_iterations; i++) {
- generate_random_testvec_config(&cfg, cfgname,
+ generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_aead_vec_cfg(enc, vec, vec_name,
&cfg, req, tsgls);
@@ -2202,6 +2256,7 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
struct aead_extra_tests_ctx {
+ struct rnd_state rng;
struct aead_request *req;
struct crypto_aead *tfm;
const struct alg_test_desc *test_desc;
@@ -2220,24 +2275,26 @@ struct aead_extra_tests_ctx {
* here means the full ciphertext including the authentication tag. The
* authentication tag (and hence also the ciphertext) is assumed to be nonempty.
*/
-static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
+static void mutate_aead_message(struct rnd_state *rng,
+ struct aead_testvec *vec, bool aad_iv,
unsigned int ivsize)
{
const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
const unsigned int authsize = vec->clen - vec->plen;
- if (get_random_u32_below(2) == 0 && vec->alen > aad_tail_size) {
+ if (prandom_bool(rng) && vec->alen > aad_tail_size) {
/* Mutate the AAD */
- flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size);
- if (get_random_u32_below(2) == 0)
+ flip_random_bit(rng, (u8 *)vec->assoc,
+ vec->alen - aad_tail_size);
+ if (prandom_bool(rng))
return;
}
- if (get_random_u32_below(2) == 0) {
+ if (prandom_bool(rng)) {
/* Mutate auth tag (assuming it's at the end of ciphertext) */
- flip_random_bit((u8 *)vec->ctext + vec->plen, authsize);
+ flip_random_bit(rng, (u8 *)vec->ctext + vec->plen, authsize);
} else {
/* Mutate any part of the ciphertext */
- flip_random_bit((u8 *)vec->ctext, vec->clen);
+ flip_random_bit(rng, (u8 *)vec->ctext, vec->clen);
}
}
@@ -2248,7 +2305,8 @@ static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
*/
#define MIN_COLLISION_FREE_AUTHSIZE 8
-static void generate_aead_message(struct aead_request *req,
+static void generate_aead_message(struct rnd_state *rng,
+ struct aead_request *req,
const struct aead_test_suite *suite,
struct aead_testvec *vec,
bool prefer_inauthentic)
@@ -2257,17 +2315,18 @@ static void generate_aead_message(struct aead_request *req,
const unsigned int ivsize = crypto_aead_ivsize(tfm);
const unsigned int authsize = vec->clen - vec->plen;
const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
- (prefer_inauthentic || get_random_u32_below(4) == 0);
+ (prefer_inauthentic ||
+ prandom_u32_below(rng, 4) == 0);
/* Generate the AAD. */
- generate_random_bytes((u8 *)vec->assoc, vec->alen);
+ generate_random_bytes(rng, (u8 *)vec->assoc, vec->alen);
if (suite->aad_iv && vec->alen >= ivsize)
/* Avoid implementation-defined behavior. */
memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
- if (inauthentic && get_random_u32_below(2) == 0) {
+ if (inauthentic && prandom_bool(rng)) {
/* Generate a random ciphertext. */
- generate_random_bytes((u8 *)vec->ctext, vec->clen);
+ generate_random_bytes(rng, (u8 *)vec->ctext, vec->clen);
} else {
int i = 0;
struct scatterlist src[2], dst;
@@ -2279,7 +2338,7 @@ static void generate_aead_message(struct aead_request *req,
if (vec->alen)
sg_set_buf(&src[i++], vec->assoc, vec->alen);
if (vec->plen) {
- generate_random_bytes((u8 *)vec->ptext, vec->plen);
+ generate_random_bytes(rng, (u8 *)vec->ptext, vec->plen);
sg_set_buf(&src[i++], vec->ptext, vec->plen);
}
sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
@@ -2299,7 +2358,7 @@ static void generate_aead_message(struct aead_request *req,
* Mutate the authentic (ciphertext, AAD) pair to get an
* inauthentic one.
*/
- mutate_aead_message(vec, suite->aad_iv, ivsize);
+ mutate_aead_message(rng, vec, suite->aad_iv, ivsize);
}
vec->novrfy = 1;
if (suite->einval_allowed)
@@ -2313,7 +2372,8 @@ static void generate_aead_message(struct aead_request *req,
* If 'prefer_inauthentic' is true, then this function will generate inauthentic
* test vectors (i.e. vectors with 'vec->novrfy=1') more often.
*/
-static void generate_random_aead_testvec(struct aead_request *req,
+static void generate_random_aead_testvec(struct rnd_state *rng,
+ struct aead_request *req,
struct aead_testvec *vec,
const struct aead_test_suite *suite,
unsigned int maxkeysize,
@@ -2329,18 +2389,18 @@ static void generate_random_aead_testvec(struct aead_request *req,
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
- if (get_random_u32_below(4) == 0)
- vec->klen = get_random_u32_below(maxkeysize + 1);
- generate_random_bytes((u8 *)vec->key, vec->klen);
+ if (prandom_u32_below(rng, 4) == 0)
+ vec->klen = prandom_u32_below(rng, maxkeysize + 1);
+ generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
/* IV */
- generate_random_bytes((u8 *)vec->iv, ivsize);
+ generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
/* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
authsize = maxauthsize;
- if (get_random_u32_below(4) == 0)
- authsize = get_random_u32_below(maxauthsize + 1);
+ if (prandom_u32_below(rng, 4) == 0)
+ authsize = prandom_u32_below(rng, maxauthsize + 1);
if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
authsize = MIN_COLLISION_FREE_AUTHSIZE;
if (WARN_ON(authsize > maxdatasize))
@@ -2349,11 +2409,11 @@ static void generate_random_aead_testvec(struct aead_request *req,
vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
/* AAD, plaintext, and ciphertext lengths */
- total_len = generate_random_length(maxdatasize);
- if (get_random_u32_below(4) == 0)
+ total_len = generate_random_length(rng, maxdatasize);
+ if (prandom_u32_below(rng, 4) == 0)
vec->alen = 0;
else
- vec->alen = generate_random_length(total_len);
+ vec->alen = generate_random_length(rng, total_len);
vec->plen = total_len - vec->alen;
vec->clen = vec->plen + authsize;
@@ -2364,7 +2424,7 @@ static void generate_random_aead_testvec(struct aead_request *req,
vec->novrfy = 0;
vec->crypt_error = 0;
if (vec->setkey_error == 0 && vec->setauthsize_error == 0)
- generate_aead_message(req, suite, vec, prefer_inauthentic);
+ generate_aead_message(rng, req, suite, vec, prefer_inauthentic);
snprintf(name, max_namelen,
"\"random: alen=%u plen=%u authsize=%u klen=%u novrfy=%d\"",
vec->alen, vec->plen, authsize, vec->klen, vec->novrfy);
@@ -2376,7 +2436,7 @@ static void try_to_generate_inauthentic_testvec(
int i;
for (i = 0; i < 10; i++) {
- generate_random_aead_testvec(ctx->req, &ctx->vec,
+ generate_random_aead_testvec(&ctx->rng, ctx->req, &ctx->vec,
&ctx->test_desc->suite.aead,
ctx->maxkeysize, ctx->maxdatasize,
ctx->vec_name,
@@ -2407,7 +2467,8 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
*/
try_to_generate_inauthentic_testvec(ctx);
if (ctx->vec.novrfy) {
- generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
+ generate_random_testvec_config(&ctx->rng, &ctx->cfg,
+ ctx->cfgname,
sizeof(ctx->cfgname));
err = test_aead_vec_cfg(DECRYPT, &ctx->vec,
ctx->vec_name, &ctx->cfg,
@@ -2497,12 +2558,13 @@ static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
* the other implementation against them.
*/
for (i = 0; i < fuzz_iterations * 8; i++) {
- generate_random_aead_testvec(generic_req, &ctx->vec,
+ generate_random_aead_testvec(&ctx->rng, generic_req, &ctx->vec,
&ctx->test_desc->suite.aead,
ctx->maxkeysize, ctx->maxdatasize,
ctx->vec_name,
sizeof(ctx->vec_name), false);
- generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
+ generate_random_testvec_config(&ctx->rng, &ctx->cfg,
+ ctx->cfgname,
sizeof(ctx->cfgname));
if (!ctx->vec.novrfy) {
err = test_aead_vec_cfg(ENCRYPT, &ctx->vec,
@@ -2541,6 +2603,7 @@ static int test_aead_extra(const struct alg_test_desc *test_desc,
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ init_rnd_state(&ctx->rng);
ctx->req = req;
ctx->tfm = crypto_aead_reqtfm(req);
ctx->test_desc = test_desc;
@@ -2930,11 +2993,14 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
if (!noextratests) {
+ struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
+ init_rnd_state(&rng);
+
for (i = 0; i < fuzz_iterations; i++) {
- generate_random_testvec_config(&cfg, cfgname,
+ generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_skcipher_vec_cfg(enc, vec, vec_name,
&cfg, req, tsgls);
@@ -2952,7 +3018,8 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
* Generate a symmetric cipher test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
*/
-static void generate_random_cipher_testvec(struct skcipher_request *req,
+static void generate_random_cipher_testvec(struct rnd_state *rng,
+ struct skcipher_request *req,
struct cipher_testvec *vec,
unsigned int maxdatasize,
char *name, size_t max_namelen)
@@ -2966,17 +3033,17 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
- if (get_random_u32_below(4) == 0)
- vec->klen = get_random_u32_below(maxkeysize + 1);
- generate_random_bytes((u8 *)vec->key, vec->klen);
+ if (prandom_u32_below(rng, 4) == 0)
+ vec->klen = prandom_u32_below(rng, maxkeysize + 1);
+ generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
/* IV */
- generate_random_bytes((u8 *)vec->iv, ivsize);
+ generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
/* Plaintext */
- vec->len = generate_random_length(maxdatasize);
- generate_random_bytes((u8 *)vec->ptext, vec->len);
+ vec->len = generate_random_length(rng, maxdatasize);
+ generate_random_bytes(rng, (u8 *)vec->ptext, vec->len);
/* If the key couldn't be set, no need to continue to encrypt. */
if (vec->setkey_error)
@@ -3018,6 +3085,7 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
const char *algname = crypto_skcipher_alg(tfm)->base.cra_name;
const char *driver = crypto_skcipher_driver_name(tfm);
+ struct rnd_state rng;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
struct crypto_skcipher *generic_tfm = NULL;
struct skcipher_request *generic_req = NULL;
@@ -3035,6 +3103,8 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
if (strncmp(algname, "kw(", 3) == 0)
return 0;
+ init_rnd_state(&rng);
+
if (!generic_driver) { /* Use default naming convention? */
err = build_generic_driver_name(algname, _generic_driver);
if (err)
@@ -3119,9 +3189,11 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
}
for (i = 0; i < fuzz_iterations * 8; i++) {
- generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
+ generate_random_cipher_testvec(&rng, generic_req, &vec,
+ maxdatasize,
vec_name, sizeof(vec_name));
- generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
+ generate_random_testvec_config(&rng, cfg, cfgname,
+ sizeof(cfgname));
err = test_skcipher_vec_cfg(ENCRYPT, &vec, vec_name,
cfg, req, tsgls);
@@ -4573,6 +4645,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(aes_cmac128_tv_template)
}
}, {
+ .alg = "cmac(camellia)",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(camellia_cmac128_tv_template)
+ }
+ }, {
.alg = "cmac(des3_ede)",
.test = alg_test_hash,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index f10bfb9d9973..5ca7a412508f 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -25665,6 +25665,53 @@ static const struct cipher_testvec fcrypt_pcbc_tv_template[] = {
/*
* CAMELLIA test vectors.
*/
+static const struct hash_testvec camellia_cmac128_tv_template[] = {
+ { /* From draft-kato-ipsec-camellia-cmac96and128-01 */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .plaintext = zeroed_string,
+ .digest = "\xba\x92\x57\x82\xaa\xa1\xf5\xd9"
+ "\xa0\x0f\x89\x64\x80\x94\xfc\x71",
+ .psize = 0,
+ .ksize = 16,
+ }, {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
+ .digest = "\x6d\x96\x28\x54\xa3\xb9\xfd\xa5"
+ "\x6d\x7d\x45\xa9\x5e\xe1\x79\x93",
+ .psize = 16,
+ .ksize = 16,
+ }, {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11",
+ .digest = "\x5c\x18\xd1\x19\xcc\xd6\x76\x61"
+ "\x44\xac\x18\x66\x13\x1d\x9f\x22",
+ .psize = 40,
+ .ksize = 16,
+ }, {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .digest = "\xc2\x69\x9a\x6e\xba\x55\xce\x9d"
+ "\x93\x9a\x8a\x4e\x19\x46\x6e\xe9",
+ .psize = 64,
+ .ksize = 16,
+ }
+};
static const struct cipher_testvec camellia_tv_template[] = {
{
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 8bb30282ca46..a4eb8e35f13d 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -18,9 +18,7 @@
struct meson_rng_data {
void __iomem *base;
- struct platform_device *pdev;
struct hwrng rng;
- struct clk *core_clk;
};
static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -33,47 +31,28 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
return sizeof(u32);
}
-static void meson_rng_clk_disable(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int meson_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_rng_data *data;
- int ret;
+ struct clk *core_clk;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->pdev = pdev;
-
data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
- data->core_clk = devm_clk_get_optional(dev, "core");
- if (IS_ERR(data->core_clk))
- return dev_err_probe(dev, PTR_ERR(data->core_clk),
+ core_clk = devm_clk_get_optional_enabled(dev, "core");
+ if (IS_ERR(core_clk))
+ return dev_err_probe(dev, PTR_ERR(core_clk),
"Failed to get core clock\n");
- if (data->core_clk) {
- ret = clk_prepare_enable(data->core_clk);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(dev, meson_rng_clk_disable,
- data->core_clk);
- if (ret)
- return ret;
- }
-
data->rng.name = pdev->name;
data->rng.read = meson_rng_read;
- platform_set_drvdata(pdev, data);
-
return devm_hwrng_register(dev, &data->rng);
}
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index 008e6db9ce01..7c8f3cb7c6af 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -84,7 +84,6 @@ struct xgene_rng_dev {
unsigned long failure_ts;/* First failure timestamp */
struct timer_list failure_timer;
struct device *dev;
- struct clk *clk;
};
static void xgene_rng_expired_timer(struct timer_list *t)
@@ -200,7 +199,7 @@ static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx)
static irqreturn_t xgene_rng_irq_handler(int irq, void *id)
{
- struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) id;
+ struct xgene_rng_dev *ctx = id;
/* RNG Alarm Counter overflow */
xgene_rng_chk_overflow(ctx);
@@ -314,6 +313,7 @@ static struct hwrng xgene_rng_func = {
static int xgene_rng_probe(struct platform_device *pdev)
{
struct xgene_rng_dev *ctx;
+ struct clk *clk;
int rc = 0;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
@@ -337,58 +337,36 @@ static int xgene_rng_probe(struct platform_device *pdev)
rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0,
dev_name(&pdev->dev), ctx);
- if (rc) {
- dev_err(&pdev->dev, "Could not request RNG alarm IRQ\n");
- return rc;
- }
+ if (rc)
+ return dev_err_probe(&pdev->dev, rc, "Could not request RNG alarm IRQ\n");
/* Enable IP clock */
- ctx->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(ctx->clk)) {
- dev_warn(&pdev->dev, "Couldn't get the clock for RNG\n");
- } else {
- rc = clk_prepare_enable(ctx->clk);
- if (rc) {
- dev_warn(&pdev->dev,
- "clock prepare enable failed for RNG");
- return rc;
- }
- }
+ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk), "Couldn't get the clock for RNG\n");
xgene_rng_func.priv = (unsigned long) ctx;
rc = devm_hwrng_register(&pdev->dev, &xgene_rng_func);
- if (rc) {
- dev_err(&pdev->dev, "RNG registering failed error %d\n", rc);
- if (!IS_ERR(ctx->clk))
- clk_disable_unprepare(ctx->clk);
- return rc;
- }
+ if (rc)
+ return dev_err_probe(&pdev->dev, rc, "RNG registering failed\n");
rc = device_init_wakeup(&pdev->dev, 1);
- if (rc) {
- dev_err(&pdev->dev, "RNG device_init_wakeup failed error %d\n",
- rc);
- if (!IS_ERR(ctx->clk))
- clk_disable_unprepare(ctx->clk);
- return rc;
- }
+ if (rc)
+ return dev_err_probe(&pdev->dev, rc, "RNG device_init_wakeup failed\n");
return 0;
}
static int xgene_rng_remove(struct platform_device *pdev)
{
- struct xgene_rng_dev *ctx = platform_get_drvdata(pdev);
int rc;
rc = device_init_wakeup(&pdev->dev, 0);
if (rc)
dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc);
- if (!IS_ERR(ctx->clk))
- clk_disable_unprepare(ctx->clk);
- return rc;
+ return 0;
}
static const struct of_device_id xgene_rng_of_match[] = {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 3b2516d1433f..9c440cd0fed0 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -240,21 +240,6 @@ config CRYPTO_DEV_TALITOS2
Say 'Y' here to use the Freescale Security Engine (SEC)
version 2 and following as found on MPC83xx, MPC85xx, etc ...
-config CRYPTO_DEV_IXP4XX
- tristate "Driver for IXP4xx crypto hardware acceleration"
- depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
- select CRYPTO_AES
- select CRYPTO_DES
- select CRYPTO_ECB
- select CRYPTO_CBC
- select CRYPTO_CTR
- select CRYPTO_LIB_DES
- select CRYPTO_AEAD
- select CRYPTO_AUTHENC
- select CRYPTO_SKCIPHER
- help
- Driver for the IXP4xx NPE crypto engine.
-
config CRYPTO_DEV_PPC4XX
tristate "Driver AMCC PPC4xx crypto accelerator"
depends on PPC && 4xx
@@ -502,10 +487,10 @@ config CRYPTO_DEV_MXS_DCP
To compile this driver as a module, choose M here: the module
will be called mxs-dcp.
-source "drivers/crypto/qat/Kconfig"
source "drivers/crypto/cavium/cpt/Kconfig"
source "drivers/crypto/cavium/nitrox/Kconfig"
source "drivers/crypto/marvell/Kconfig"
+source "drivers/crypto/intel/Kconfig"
config CRYPTO_DEV_CAVIUM_ZIP
tristate "Cavium ZIP driver"
@@ -774,7 +759,7 @@ config CRYPTO_DEV_ARTPEC6
config CRYPTO_DEV_CCREE
tristate "Support for ARM TrustZone CryptoCell family of security processors"
depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
- default n
+ depends on HAS_IOMEM
select CRYPTO_HASH
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
@@ -810,6 +795,7 @@ config CRYPTO_DEV_SA2UL
select CRYPTO_AES
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
+ select CRYPTO_DES
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
@@ -820,7 +806,6 @@ config CRYPTO_DEV_SA2UL
used for crypto offload. Select this if you want to use hardware
acceleration for cryptographic algorithms on these devices.
-source "drivers/crypto/keembay/Kconfig"
source "drivers/crypto/aspeed/Kconfig"
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 476f1a25ca32..51d36701e785 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
-obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
@@ -33,7 +32,6 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
-obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
@@ -51,4 +49,4 @@ obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-y += xilinx/
obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
-obj-y += keembay/
+obj-y += intel/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 50dc783821b6..d553f3f1efbe 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1101,7 +1101,7 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
u32 clr_val)
{
- struct device *dev = (struct device *)data;
+ struct device *dev = data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c
index 1f77ebd73489..470122c87fea 100644
--- a/drivers/crypto/aspeed/aspeed-acry.c
+++ b/drivers/crypto/aspeed/aspeed-acry.c
@@ -289,7 +289,7 @@ static int aspeed_acry_rsa_ctx_copy(struct aspeed_acry_dev *acry_dev, void *buf,
if (mode == ASPEED_RSA_EXP_MODE)
idx = acry_dev->exp_dw_mapping[j - 1];
- else if (mode == ASPEED_RSA_MOD_MODE)
+ else /* mode == ASPEED_RSA_MOD_MODE */
idx = acry_dev->mod_dw_mapping[j - 1];
dw_buf[idx] = cpu_to_le32(data);
@@ -712,7 +712,6 @@ static int aspeed_acry_probe(struct platform_device *pdev)
{
struct aspeed_acry_dev *acry_dev;
struct device *dev = &pdev->dev;
- struct resource *res;
int rc;
acry_dev = devm_kzalloc(dev, sizeof(struct aspeed_acry_dev),
@@ -724,13 +723,11 @@ static int aspeed_acry_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, acry_dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- acry_dev->regs = devm_ioremap_resource(dev, res);
+ acry_dev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(acry_dev->regs))
return PTR_ERR(acry_dev->regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- acry_dev->acry_sram = devm_ioremap_resource(dev, res);
+ acry_dev->acry_sram = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(acry_dev->acry_sram))
return PTR_ERR(acry_dev->acry_sram);
@@ -782,7 +779,10 @@ static int aspeed_acry_probe(struct platform_device *pdev)
acry_dev->buf_addr = dmam_alloc_coherent(dev, ASPEED_ACRY_BUFF_SIZE,
&acry_dev->buf_dma_addr,
GFP_KERNEL);
- memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE);
+ if (!acry_dev->buf_addr) {
+ rc = -ENOMEM;
+ goto err_engine_rsa_start;
+ }
aspeed_acry_register(acry_dev);
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index ed10f2ae4523..143d33fbb316 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -493,17 +493,11 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
if (req->cryptlen < ivsize)
return;
- if (rctx->mode & AES_FLAGS_ENCRYPT) {
+ if (rctx->mode & AES_FLAGS_ENCRYPT)
scatterwalk_map_and_copy(req->iv, req->dst,
req->cryptlen - ivsize, ivsize, 0);
- } else {
- if (req->src == req->dst)
- memcpy(req->iv, rctx->lastc, ivsize);
- else
- scatterwalk_map_and_copy(req->iv, req->src,
- req->cryptlen - ivsize,
- ivsize, 0);
- }
+ else
+ memcpy(req->iv, rctx->lastc, ivsize);
}
static inline struct atmel_aes_ctr_ctx *
@@ -1146,7 +1140,7 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
rctx->mode = mode;
if (opmode != AES_FLAGS_ECB &&
- !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
+ !(mode & AES_FLAGS_ENCRYPT)) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
@@ -1341,7 +1335,7 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb(aes)",
.base.cra_driver_name = "atmel-cfb-aes",
- .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.init = atmel_aes_init_tfm,
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index e7c1db2739ec..6bef634d3c86 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1948,14 +1948,32 @@ static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+ struct scatterlist *sgbuf;
size_t hs = ctx->hash_size;
size_t i, num_words = hs / sizeof(u32);
bool use_dma = false;
u32 mr;
/* Special case for empty message. */
- if (!req->nbytes)
- return atmel_sha_complete(dd, -EINVAL); // TODO:
+ if (!req->nbytes) {
+ req->nbytes = 0;
+ ctx->bufcnt = 0;
+ ctx->digcnt[0] = 0;
+ ctx->digcnt[1] = 0;
+ switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+ case SHA_FLAGS_SHA1:
+ case SHA_FLAGS_SHA224:
+ case SHA_FLAGS_SHA256:
+ atmel_sha_fill_padding(ctx, 64);
+ break;
+
+ case SHA_FLAGS_SHA384:
+ case SHA_FLAGS_SHA512:
+ atmel_sha_fill_padding(ctx, 128);
+ break;
+ }
+ sg_init_one(&dd->tmp, ctx->buffer, ctx->bufcnt);
+ }
/* Check DMA threshold and alignment. */
if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD &&
@@ -1985,12 +2003,20 @@ static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+ /* Special case for empty message. */
+ if (!req->nbytes) {
+ sgbuf = &dd->tmp;
+ req->nbytes = ctx->bufcnt;
+ } else {
+ sgbuf = req->src;
+ }
+
/* Process data. */
if (use_dma)
- return atmel_sha_dma_start(dd, req->src, req->nbytes,
+ return atmel_sha_dma_start(dd, sgbuf, req->nbytes,
atmel_sha_hmac_final_done);
- return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true,
+ return atmel_sha_cpu_start(dd, sgbuf, req->nbytes, false, true,
atmel_sha_hmac_final_done);
}
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index 4403dbb0f0b1..44a185a84760 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -126,7 +126,7 @@ static void atmel_sha204a_remove(struct i2c_client *client)
kfree((void *)i2c_priv->hwrng.priv);
}
-static const struct of_device_id atmel_sha204a_dt_ids[] = {
+static const struct of_device_id atmel_sha204a_dt_ids[] __maybe_unused = {
{ .compatible = "atmel,atsha204", },
{ .compatible = "atmel,atsha204a", },
{ /* sentinel */ }
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index b2d48c1649b9..c9ded8be9c39 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -565,17 +565,12 @@ atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
if (req->cryptlen < ivsize)
return;
- if (rctx->mode & TDES_FLAGS_ENCRYPT) {
+ if (rctx->mode & TDES_FLAGS_ENCRYPT)
scatterwalk_map_and_copy(req->iv, req->dst,
req->cryptlen - ivsize, ivsize, 0);
- } else {
- if (req->src == req->dst)
- memcpy(req->iv, rctx->lastc, ivsize);
- else
- scatterwalk_map_and_copy(req->iv, req->src,
- req->cryptlen - ivsize,
- ivsize, 0);
- }
+ else
+ memcpy(req->iv, rctx->lastc, ivsize);
+
}
static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
@@ -722,7 +717,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
rctx->mode = mode;
if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
- !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
+ !(mode & TDES_FLAGS_ENCRYPT)) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 12b1c8346243..feb86013dbf6 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2019, 2023 NXP
*
* Based on talitos crypto API driver.
*
@@ -3542,13 +3542,14 @@ int caam_algapi_init(struct device *ctrldev)
* First, detect presence and attributes of DES, AES, and MD blocks.
*/
if (priv->era < 10) {
+ struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
u32 cha_vid, cha_inst, aes_rn;
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ cha_vid = rd_reg32(&perfmon->cha_id_ls);
aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ cha_inst = rd_reg32(&perfmon->cha_num_ls);
des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
CHA_ID_LS_DES_SHIFT;
aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
@@ -3556,23 +3557,23 @@ int caam_algapi_init(struct device *ctrldev)
ccha_inst = 0;
ptha_inst = 0;
- aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
- CHA_ID_LS_AES_MASK;
+ aes_rn = rd_reg32(&perfmon->cha_rev_ls) & CHA_ID_LS_AES_MASK;
gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
} else {
+ struct version_regs __iomem *vreg = &priv->jr[0]->vreg;
u32 aesa, mdha;
- aesa = rd_reg32(&priv->ctrl->vreg.aesa);
- mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+ aesa = rd_reg32(&vreg->aesa);
+ mdha = rd_reg32(&vreg->mdha);
aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
- des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
+ des_inst = rd_reg32(&vreg->desa) & CHA_VER_NUM_MASK;
aes_inst = aesa & CHA_VER_NUM_MASK;
md_inst = mdha & CHA_VER_NUM_MASK;
- ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
- ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
+ ccha_inst = rd_reg32(&vreg->ccha) & CHA_VER_NUM_MASK;
+ ptha_inst = rd_reg32(&vreg->ptha) & CHA_VER_NUM_MASK;
gcm_support = aesa & CHA_VER_MISC_AES_GCM;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 82d3c730a502..80deb003f0a5 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
* Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -1956,12 +1956,14 @@ int caam_algapi_hash_init(struct device *ctrldev)
* presence and attributes of MD block.
*/
if (priv->era < 10) {
- md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
+ struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
+
+ md_vid = (rd_reg32(&perfmon->cha_id_ls) &
CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
- md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ md_inst = (rd_reg32(&perfmon->cha_num_ls) &
CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
} else {
- u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+ u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha);
md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
md_inst = mdha & CHA_VER_NUM_MASK;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index e40614fef39d..72afc249d42f 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for Public Key Cryptography
*
* Copyright 2016 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
*
* There is no Shared Descriptor for PKC so that the Job Descriptor must carry
* all the desired key parameters, input and output pointers.
@@ -1168,10 +1168,10 @@ int caam_pkc_init(struct device *ctrldev)
/* Determine public key hardware accelerator presence. */
if (priv->era < 10) {
- pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ pk_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
} else {
- pkha = rd_reg32(&priv->ctrl->vreg.pkha);
+ pkha = rd_reg32(&priv->jr[0]->vreg.pkha);
pk_inst = pkha & CHA_VER_NUM_MASK;
/*
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 1fd8ff965006..50eb55da45c2 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for hw_random
*
* Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -227,10 +227,10 @@ int caam_rng_init(struct device *ctrldev)
/* Check for an instantiated RNG before registration */
if (priv->era < 10)
- rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
else
- rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
+ rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK;
if (!rng_inst)
return 0;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 6278afb951c3..bedcc2ab3a00 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -3,7 +3,7 @@
* Controller-level driver, kernel property detection, initialization
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
*/
#include <linux/device.h>
@@ -284,6 +284,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
const u32 rdsta_if = RDSTA_IF0 << sh_idx;
const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
const u32 rdsta_mask = rdsta_if | rdsta_pr;
+
+ /* Clear the contents before using the descriptor */
+ memset(desc, 0x00, CAAM_CMD_SZ * 7);
+
/*
* If the corresponding bit is set, this state handle
* was initialized by somebody else, so it's left alone.
@@ -327,8 +331,6 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
}
dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
- /* Clear the contents before recreating the descriptor */
- memset(desc, 0x00, CAAM_CMD_SZ * 7);
}
kfree(desc);
@@ -395,7 +397,7 @@ start_rng:
RTMCTL_SAMP_MODE_RAW_ES_SC);
}
-static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
+static int caam_get_era_from_hw(struct caam_perfmon __iomem *perfmon)
{
static const struct {
u16 ip_id;
@@ -421,12 +423,12 @@ static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
u16 ip_id;
int i;
- ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
+ ccbvid = rd_reg32(&perfmon->ccb_id);
era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
if (era) /* This is '0' prior to CAAM ERA-6 */
return era;
- id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
+ id_ms = rd_reg32(&perfmon->caam_id_ms);
ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
@@ -444,9 +446,9 @@ static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
* In case this property is not passed an attempt to retrieve the CAAM
* era via register reads will be made.
*
- * @ctrl: controller region
+ * @perfmon: Performance Monitor Registers
*/
-static int caam_get_era(struct caam_ctrl __iomem *ctrl)
+static int caam_get_era(struct caam_perfmon __iomem *perfmon)
{
struct device_node *caam_node;
int ret;
@@ -459,7 +461,7 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl)
if (!ret)
return prop;
else
- return caam_get_era_from_hw(ctrl);
+ return caam_get_era_from_hw(perfmon);
}
/*
@@ -626,12 +628,14 @@ static int caam_probe(struct platform_device *pdev)
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
struct caam_drv_private *ctrlpriv;
+ struct caam_perfmon __iomem *perfmon;
struct dentry *dfs_root;
u32 scfgr, comp_params;
u8 rng_vid;
int pg_size;
int BLOCK_OFFSET = 0;
bool pr_support = false;
+ bool reg_access = true;
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
if (!ctrlpriv)
@@ -645,6 +649,17 @@ static int caam_probe(struct platform_device *pdev)
caam_imx = (bool)imx_soc_match;
if (imx_soc_match) {
+ /*
+ * Until Layerscape and i.MX OP-TEE get in sync,
+ * only i.MX OP-TEE use cases disallow access to
+ * caam page 0 (controller) registers.
+ */
+ np = of_find_compatible_node(NULL, NULL, "linaro,optee-tz");
+ ctrlpriv->optee_en = !!np;
+ of_node_put(np);
+
+ reg_access = !ctrlpriv->optee_en;
+
if (!imx_soc_match->data) {
dev_err(dev, "No clock data provided for i.MX SoC");
return -EINVAL;
@@ -665,10 +680,38 @@ static int caam_probe(struct platform_device *pdev)
return ret;
}
- caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
+ ring = 0;
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+ u32 reg;
+
+ if (of_property_read_u32_index(np, "reg", 0, &reg)) {
+ dev_err(dev, "%s read reg property error\n",
+ np->full_name);
+ continue;
+ }
+
+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+ ((__force uint8_t *)ctrl + reg);
+
+ ctrlpriv->total_jobrs++;
+ ring++;
+ }
+
+ /*
+ * Wherever possible, instead of accessing registers from the global page,
+ * use the alias registers in the first (cf. DT nodes order)
+ * job ring's page.
+ */
+ perfmon = ring ? (struct caam_perfmon __iomem *)&ctrlpriv->jr[0]->perfmon :
+ (struct caam_perfmon __iomem *)&ctrl->perfmon;
+
+ caam_little_end = !(bool)(rd_reg32(&perfmon->status) &
(CSTA_PLEND | CSTA_ALT_PLEND));
- comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
- if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
+ comp_params = rd_reg32(&perfmon->comp_parms_ms);
+ if (reg_access && comp_params & CTPR_MS_PS &&
+ rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
caam_ptr_sz = sizeof(u64);
else
caam_ptr_sz = sizeof(u32);
@@ -733,6 +776,9 @@ static int caam_probe(struct platform_device *pdev)
}
#endif
+ if (!reg_access)
+ goto set_dma_mask;
+
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register.
@@ -772,13 +818,14 @@ static int caam_probe(struct platform_device *pdev)
JRSTART_JR1_START | JRSTART_JR2_START |
JRSTART_JR3_START);
+set_dma_mask:
ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
return ret;
}
- ctrlpriv->era = caam_get_era(ctrl);
+ ctrlpriv->era = caam_get_era(perfmon);
ctrlpriv->domain = iommu_get_domain_for_dev(dev);
dfs_root = debugfs_create_dir(dev_name(dev), NULL);
@@ -789,7 +836,7 @@ static int caam_probe(struct platform_device *pdev)
return ret;
}
- caam_debugfs_init(ctrlpriv, dfs_root);
+ caam_debugfs_init(ctrlpriv, perfmon, dfs_root);
/* Check to see if (DPAA 1.x) QI present. If so, enable */
if (ctrlpriv->qi_present && !caam_dpaa2) {
@@ -808,26 +855,16 @@ static int caam_probe(struct platform_device *pdev)
#endif
}
- ring = 0;
- for_each_available_child_of_node(nprop, np)
- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
- of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
- ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
- ((__force uint8_t *)ctrl +
- (ring + JR_BLOCK_NUMBER) *
- BLOCK_OFFSET
- );
- ctrlpriv->total_jobrs++;
- ring++;
- }
-
/* If no QI and no rings specified, quit and go home */
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
dev_err(dev, "no queues configured, terminating\n");
return -ENOMEM;
}
- comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ls);
+ if (!reg_access)
+ goto report_live;
+
+ comp_params = rd_reg32(&perfmon->comp_parms_ls);
ctrlpriv->blob_present = !!(comp_params & CTPR_LS_BLOB);
/*
@@ -836,15 +873,21 @@ static int caam_probe(struct platform_device *pdev)
* check both here.
*/
if (ctrlpriv->era < 10) {
- rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
+ rng_vid = (rd_reg32(&perfmon->cha_id_ls) &
CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
ctrlpriv->blob_present = ctrlpriv->blob_present &&
- (rd_reg32(&ctrl->perfmon.cha_num_ls) & CHA_ID_LS_AES_MASK);
+ (rd_reg32(&perfmon->cha_num_ls) & CHA_ID_LS_AES_MASK);
} else {
- rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
+ struct version_regs __iomem *vreg;
+
+ vreg = ctrlpriv->total_jobrs ?
+ (struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg :
+ (struct version_regs __iomem *)&ctrl->vreg;
+
+ rng_vid = (rd_reg32(&vreg->rng) & CHA_VER_VID_MASK) >>
CHA_VER_VID_SHIFT;
ctrlpriv->blob_present = ctrlpriv->blob_present &&
- (rd_reg32(&ctrl->vreg.aesa) & CHA_VER_MISC_AES_NUM_MASK);
+ (rd_reg32(&vreg->aesa) & CHA_VER_MISC_AES_NUM_MASK);
}
/*
@@ -923,10 +966,11 @@ static int caam_probe(struct platform_device *pdev)
clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
}
+report_live:
/* NOTE: RTIC detection ought to go here, around Si time */
- caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
- (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
+ caam_id = (u64)rd_reg32(&perfmon->caam_id_ms) << 32 |
+ (u64)rd_reg32(&perfmon->caam_id_ls);
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
diff --git a/drivers/crypto/caam/debugfs.c b/drivers/crypto/caam/debugfs.c
index 806bb20d2aa1..6358d3cabf57 100644
--- a/drivers/crypto/caam/debugfs.c
+++ b/drivers/crypto/caam/debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/* Copyright 2019 NXP */
+/* Copyright 2019, 2023 NXP */
#include <linux/debugfs.h>
#include "compat.h"
@@ -42,16 +42,15 @@ void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv)
}
#endif
-void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root)
+void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
+ struct caam_perfmon __force *perfmon,
+ struct dentry *root)
{
- struct caam_perfmon *perfmon;
-
/*
* FIXME: needs better naming distinction, as some amalgamation of
* "caam" and nprop->full_name. The OF name isn't distinctive,
* but does separate instances
*/
- perfmon = (struct caam_perfmon __force *)&ctrlpriv->ctrl->perfmon;
ctrlpriv->ctl = debugfs_create_dir("ctl", root);
@@ -78,6 +77,9 @@ void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root)
debugfs_create_file("fault_status", 0444, ctrlpriv->ctl,
&perfmon->status, &caam_fops_u32_ro);
+ if (ctrlpriv->optee_en)
+ return;
+
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
diff --git a/drivers/crypto/caam/debugfs.h b/drivers/crypto/caam/debugfs.h
index 661d768acdbf..8b5d1acd21a7 100644
--- a/drivers/crypto/caam/debugfs.h
+++ b/drivers/crypto/caam/debugfs.h
@@ -1,16 +1,19 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/* Copyright 2019 NXP */
+/* Copyright 2019, 2023 NXP */
#ifndef CAAM_DEBUGFS_H
#define CAAM_DEBUGFS_H
struct dentry;
struct caam_drv_private;
+struct caam_perfmon;
#ifdef CONFIG_DEBUG_FS
-void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root);
+void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
+ struct caam_perfmon __force *perfmon, struct dentry *root);
#else
static inline void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
+ struct caam_perfmon __force *perfmon,
struct dentry *root)
{}
#endif
diff --git a/drivers/crypto/caam/dpseci-debugfs.c b/drivers/crypto/caam/dpseci-debugfs.c
index 0eca8c2fd916..020a9d8a8a07 100644
--- a/drivers/crypto/caam/dpseci-debugfs.c
+++ b/drivers/crypto/caam/dpseci-debugfs.c
@@ -8,7 +8,7 @@
static int dpseci_dbg_fqs_show(struct seq_file *file, void *offset)
{
- struct dpaa2_caam_priv *priv = (struct dpaa2_caam_priv *)file->private;
+ struct dpaa2_caam_priv *priv = file->private;
u32 fqid, fcnt, bcnt;
int i, err;
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 572cf66c887a..86ed1b91c22d 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -94,6 +94,7 @@ struct caam_drv_private {
u8 qi_present; /* Nonzero if QI present in device */
u8 blob_present; /* Nonzero if BLOB support present in device */
u8 mc_en; /* Nonzero if MC f/w is active */
+ u8 optee_en; /* Nonzero if OP-TEE f/w is active */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
int era; /* CAAM Era (internal HW revision) */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 724fdec18bf9..96dea5304d22 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -4,7 +4,7 @@
* JobR backend functionality
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
- * Copyright 2019 NXP
+ * Copyright 2019, 2023 NXP
*/
#include <linux/of_irq.h>
@@ -72,19 +72,27 @@ static void caam_jr_crypto_engine_exit(void *data)
crypto_engine_exit(jrpriv->engine);
}
-static int caam_reset_hw_jr(struct device *dev)
+/*
+ * Put the CAAM in quiesce, ie stop
+ *
+ * Must be called with itr disabled
+ */
+static int caam_jr_stop_processing(struct device *dev, u32 jrcr_bits)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
unsigned int timeout = 100000;
- /*
- * mask interrupts since we are going to poll
- * for reset completion status
- */
- clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
+ /* Check the current status */
+ if (rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_INPROGRESS)
+ goto wait_quiesce_completion;
- /* initiate flush (required prior to reset) */
- wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+ /* Reset the field */
+ clrsetbits_32(&jrp->rregs->jrintstatus, JRINT_ERR_HALT_MASK, 0);
+
+ /* initiate flush / park (required prior to reset) */
+ wr_reg32(&jrp->rregs->jrcommand, jrcr_bits);
+
+wait_quiesce_completion:
while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
JRINT_ERR_HALT_INPROGRESS) && --timeout)
cpu_relax();
@@ -95,8 +103,35 @@ static int caam_reset_hw_jr(struct device *dev)
return -EIO;
}
+ return 0;
+}
+
+/*
+ * Flush the job ring, so the jobs running will be stopped, jobs queued will be
+ * invalidated and the CAAM will no longer fetch fron input ring.
+ *
+ * Must be called with itr disabled
+ */
+static int caam_jr_flush(struct device *dev)
+{
+ return caam_jr_stop_processing(dev, JRCR_RESET);
+}
+
+static int caam_reset_hw_jr(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ unsigned int timeout = 100000;
+ int err;
+ /*
+ * mask interrupts since we are going to poll
+ * for reset completion status
+ */
+ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
+ err = caam_jr_flush(dev);
+ if (err)
+ return err;
+
/* initiate reset */
- timeout = 100000;
wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
cpu_relax();
@@ -163,6 +198,11 @@ static int caam_jr_remove(struct platform_device *pdev)
return ret;
}
+static void caam_jr_platform_shutdown(struct platform_device *pdev)
+{
+ caam_jr_remove(pdev);
+}
+
/* Main per-ring interrupt handler */
static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
{
@@ -618,6 +658,7 @@ static struct platform_driver caam_jr_driver = {
},
.probe = caam_jr_probe,
.remove = caam_jr_remove,
+ .shutdown = caam_jr_platform_shutdown,
};
static int __init jr_driver_init(void)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 432a61aca0c5..65114f766e7d 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/aer.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/list.h>
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index db362fe472ea..f6196495e862 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -10,7 +10,8 @@ ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o
ccp-$(CONFIG_PCI) += sp-pci.o
ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
sev-dev.o \
- tee-dev.o
+ tee-dev.o \
+ platform-access.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/platform-access.c b/drivers/crypto/ccp/platform-access.c
new file mode 100644
index 000000000000..939c924fc383
--- /dev/null
+++ b/drivers/crypto/ccp/platform-access.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Security Processor (PSP) Platform Access interface
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ *
+ * Some of this code is adapted from drivers/i2c/busses/i2c-designware-amdpsp.c
+ * developed by Jan Dabros <jsd@semihalf.com> and Copyright (C) 2022 Google Inc.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+
+#include "platform-access.h"
+
+#define PSP_CMD_TIMEOUT_US (500 * USEC_PER_MSEC)
+#define DOORBELL_CMDRESP_STS GENMASK(7, 0)
+
+/* Recovery field should be equal 0 to start sending commands */
+static int check_recovery(u32 __iomem *cmd)
+{
+ return FIELD_GET(PSP_CMDRESP_RECOVERY, ioread32(cmd));
+}
+
+static int wait_cmd(u32 __iomem *cmd)
+{
+ u32 tmp, expected;
+
+ /* Expect mbox_cmd to be cleared and ready bit to be set by PSP */
+ expected = FIELD_PREP(PSP_CMDRESP_RESP, 1);
+
+ /*
+ * Check for readiness of PSP mailbox in a tight loop in order to
+ * process further as soon as command was consumed.
+ */
+ return readl_poll_timeout(cmd, tmp, (tmp & expected), 0,
+ PSP_CMD_TIMEOUT_US);
+}
+
+int psp_check_platform_access_status(void)
+{
+ struct psp_device *psp = psp_get_master_device();
+
+ if (!psp || !psp->platform_access_data)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL(psp_check_platform_access_status);
+
+int psp_send_platform_access_msg(enum psp_platform_access_msg msg,
+ struct psp_request *req)
+{
+ struct psp_device *psp = psp_get_master_device();
+ u32 __iomem *cmd, *lo, *hi;
+ struct psp_platform_access_device *pa_dev;
+ phys_addr_t req_addr;
+ u32 cmd_reg;
+ int ret;
+
+ if (!psp || !psp->platform_access_data)
+ return -ENODEV;
+
+ pa_dev = psp->platform_access_data;
+ cmd = psp->io_regs + pa_dev->vdata->cmdresp_reg;
+ lo = psp->io_regs + pa_dev->vdata->cmdbuff_addr_lo_reg;
+ hi = psp->io_regs + pa_dev->vdata->cmdbuff_addr_hi_reg;
+
+ mutex_lock(&pa_dev->mailbox_mutex);
+
+ if (check_recovery(cmd)) {
+ dev_dbg(psp->dev, "platform mailbox is in recovery\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (wait_cmd(cmd)) {
+ dev_dbg(psp->dev, "platform mailbox is not done processing command\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /*
+ * Fill mailbox with address of command-response buffer, which will be
+ * used for sending i2c requests as well as reading status returned by
+ * PSP. Use physical address of buffer, since PSP will map this region.
+ */
+ req_addr = __psp_pa(req);
+ iowrite32(lower_32_bits(req_addr), lo);
+ iowrite32(upper_32_bits(req_addr), hi);
+
+ print_hex_dump_debug("->psp ", DUMP_PREFIX_OFFSET, 16, 2, req,
+ req->header.payload_size, false);
+
+ /* Write command register to trigger processing */
+ cmd_reg = FIELD_PREP(PSP_CMDRESP_CMD, msg);
+ iowrite32(cmd_reg, cmd);
+
+ if (wait_cmd(cmd)) {
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+
+ /* Ensure it was triggered by this driver */
+ if (ioread32(lo) != lower_32_bits(req_addr) ||
+ ioread32(hi) != upper_32_bits(req_addr)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /* Store the status in request header for caller to investigate */
+ cmd_reg = ioread32(cmd);
+ req->header.status = FIELD_GET(PSP_CMDRESP_STS, cmd_reg);
+ if (req->header.status) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ print_hex_dump_debug("<-psp ", DUMP_PREFIX_OFFSET, 16, 2, req,
+ req->header.payload_size, false);
+
+ ret = 0;
+
+unlock:
+ mutex_unlock(&pa_dev->mailbox_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(psp_send_platform_access_msg);
+
+int psp_ring_platform_doorbell(int msg, u32 *result)
+{
+ struct psp_device *psp = psp_get_master_device();
+ struct psp_platform_access_device *pa_dev;
+ u32 __iomem *button, *cmd;
+ int ret, val;
+
+ if (!psp || !psp->platform_access_data)
+ return -ENODEV;
+
+ pa_dev = psp->platform_access_data;
+ button = psp->io_regs + pa_dev->vdata->doorbell_button_reg;
+ cmd = psp->io_regs + pa_dev->vdata->doorbell_cmd_reg;
+
+ mutex_lock(&pa_dev->doorbell_mutex);
+
+ if (wait_cmd(cmd)) {
+ dev_err(psp->dev, "doorbell command not done processing\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ iowrite32(FIELD_PREP(DOORBELL_CMDRESP_STS, msg), cmd);
+ iowrite32(PSP_DRBL_RING, button);
+
+ if (wait_cmd(cmd)) {
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+
+ val = FIELD_GET(DOORBELL_CMDRESP_STS, ioread32(cmd));
+ if (val) {
+ if (result)
+ *result = val;
+ ret = -EIO;
+ goto unlock;
+ }
+
+ ret = 0;
+unlock:
+ mutex_unlock(&pa_dev->doorbell_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(psp_ring_platform_doorbell);
+
+void platform_access_dev_destroy(struct psp_device *psp)
+{
+ struct psp_platform_access_device *pa_dev = psp->platform_access_data;
+
+ if (!pa_dev)
+ return;
+
+ mutex_destroy(&pa_dev->mailbox_mutex);
+ mutex_destroy(&pa_dev->doorbell_mutex);
+ psp->platform_access_data = NULL;
+}
+
+int platform_access_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct psp_platform_access_device *pa_dev;
+
+ pa_dev = devm_kzalloc(dev, sizeof(*pa_dev), GFP_KERNEL);
+ if (!pa_dev)
+ return -ENOMEM;
+
+ psp->platform_access_data = pa_dev;
+ pa_dev->psp = psp;
+ pa_dev->dev = dev;
+
+ pa_dev->vdata = (struct platform_access_vdata *)psp->vdata->platform_access;
+
+ mutex_init(&pa_dev->mailbox_mutex);
+ mutex_init(&pa_dev->doorbell_mutex);
+
+ dev_dbg(dev, "platform access enabled\n");
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/platform-access.h b/drivers/crypto/ccp/platform-access.h
new file mode 100644
index 000000000000..a83f03beb869
--- /dev/null
+++ b/drivers/crypto/ccp/platform-access.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) Platform Access interface
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#ifndef __PSP_PLATFORM_ACCESS_H__
+#define __PSP_PLATFORM_ACCESS_H__
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/psp-platform-access.h>
+
+#include "psp-dev.h"
+
+struct psp_platform_access_device {
+ struct device *dev;
+ struct psp_device *psp;
+
+ struct platform_access_vdata *vdata;
+
+ struct mutex mailbox_mutex;
+ struct mutex doorbell_mutex;
+
+ void *platform_access_data;
+};
+
+void platform_access_dev_destroy(struct psp_device *psp);
+int platform_access_dev_init(struct psp_device *psp);
+
+#endif /* __PSP_PLATFORM_ACCESS_H__ */
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index c9c741ac8442..e3d6955d3265 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -14,6 +14,7 @@
#include "psp-dev.h"
#include "sev-dev.h"
#include "tee-dev.h"
+#include "platform-access.h"
struct psp_device *psp_master;
@@ -42,18 +43,15 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
/* Read the interrupt status: */
status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
+ /* Clear the interrupt status by writing the same value we read. */
+ iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
+
/* invoke subdevice interrupt handlers */
if (status) {
if (psp->sev_irq_handler)
psp->sev_irq_handler(irq, psp->sev_irq_data, status);
-
- if (psp->tee_irq_handler)
- psp->tee_irq_handler(irq, psp->tee_irq_data, status);
}
- /* Clear the interrupt status by writing the same value we read. */
- iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
-
return IRQ_HANDLED;
}
@@ -105,6 +103,17 @@ static int psp_check_tee_support(struct psp_device *psp)
return 0;
}
+static void psp_init_platform_access(struct psp_device *psp)
+{
+ int ret;
+
+ ret = platform_access_dev_init(psp);
+ if (ret) {
+ dev_warn(psp->dev, "platform access init failed: %d\n", ret);
+ return;
+ }
+}
+
static int psp_init(struct psp_device *psp)
{
int ret;
@@ -121,6 +130,9 @@ static int psp_init(struct psp_device *psp)
return ret;
}
+ if (psp->vdata->platform_access)
+ psp_init_platform_access(psp);
+
return 0;
}
@@ -201,6 +213,8 @@ void psp_dev_destroy(struct sp_device *sp)
tee_dev_destroy(psp);
+ platform_access_dev_destroy(psp);
+
sp_free_psp_irq(sp, psp);
if (sp->clear_psp_master_device)
@@ -219,18 +233,6 @@ void psp_clear_sev_irq_handler(struct psp_device *psp)
psp_set_sev_irq_handler(psp, NULL, NULL);
}
-void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
- void *data)
-{
- psp->tee_irq_data = data;
- psp->tee_irq_handler = handler;
-}
-
-void psp_clear_tee_irq_handler(struct psp_device *psp)
-{
- psp_set_tee_irq_handler(psp, NULL, NULL);
-}
-
struct psp_device *psp_get_master_device(void)
{
struct sp_device *sp = sp_get_psp_master_device();
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index d528eb04c3ef..505e4bdeaca8 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -17,9 +17,6 @@
#include "sp-dev.h"
-#define PSP_CMDRESP_RESP BIT(31)
-#define PSP_CMDRESP_ERR_MASK 0xffff
-
#define MAX_PSP_NAME_LEN 16
extern struct psp_device *psp_master;
@@ -40,11 +37,9 @@ struct psp_device {
psp_irq_handler_t sev_irq_handler;
void *sev_irq_data;
- psp_irq_handler_t tee_irq_handler;
- void *tee_irq_data;
-
void *sev_data;
void *tee_data;
+ void *platform_access_data;
unsigned int capability;
};
@@ -53,10 +48,6 @@ void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
void *data);
void psp_clear_sev_irq_handler(struct psp_device *psp);
-void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
- void *data);
-void psp_clear_tee_irq_handler(struct psp_device *psp);
-
struct psp_device *psp_get_master_device(void);
#define PSP_CAPABILITY_SEV BIT(0)
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index e346c00b132a..b1f3327f65e0 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -7,6 +7,7 @@
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
@@ -24,6 +25,7 @@
#include <linux/cpufeature.h>
#include <linux/fs.h>
#include <linux/fs_struct.h>
+#include <linux/psp.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
@@ -102,7 +104,7 @@ static void sev_irq_handler(int irq, void *data, unsigned int status)
/* Check if it is SEV command completion: */
reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
- if (reg & PSP_CMDRESP_RESP) {
+ if (FIELD_GET(PSP_CMDRESP_RESP, reg)) {
sev->int_rcvd = 1;
wake_up(&sev->int_queue);
}
@@ -346,9 +348,7 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
sev->int_rcvd = 0;
- reg = cmd;
- reg <<= SEV_CMDRESP_CMD_SHIFT;
- reg |= SEV_CMDRESP_IOC;
+ reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC;
iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
/* wait for command completion */
@@ -366,11 +366,11 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
psp_timeout = psp_cmd_timeout;
if (psp_ret)
- *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
+ *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg);
- if (reg & PSP_CMDRESP_ERR_MASK) {
- dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n",
- cmd, reg & PSP_CMDRESP_ERR_MASK);
+ if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
+ dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n",
+ cmd, FIELD_GET(PSP_CMDRESP_STS, reg));
ret = -EIO;
} else {
ret = sev_write_init_ex_file_if_required(cmd);
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
index 666c21eb81ab..778c95155e74 100644
--- a/drivers/crypto/ccp/sev-dev.h
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -25,8 +25,8 @@
#include <linux/miscdevice.h>
#include <linux/capability.h>
+#define SEV_CMDRESP_CMD GENMASK(26, 16)
#define SEV_CMD_COMPLETE BIT(1)
-#define SEV_CMDRESP_CMD_SHIFT 16
#define SEV_CMDRESP_IOC BIT(0)
struct sev_misc_dev {
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 20377e67f65d..1253a0217985 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -53,9 +53,19 @@ struct tee_vdata {
const unsigned int ring_rptr_reg;
};
+struct platform_access_vdata {
+ const unsigned int cmdresp_reg;
+ const unsigned int cmdbuff_addr_lo_reg;
+ const unsigned int cmdbuff_addr_hi_reg;
+ const unsigned int doorbell_button_reg;
+ const unsigned int doorbell_cmd_reg;
+
+};
+
struct psp_vdata {
const struct sev_vdata *sev;
const struct tee_vdata *tee;
+ const struct platform_access_vdata *platform_access;
const unsigned int feature_reg;
const unsigned int inten_reg;
const unsigned int intsts_reg;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index cde33b2ac71b..aa15bc4cac2b 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -361,6 +361,14 @@ static const struct tee_vdata teev1 = {
.ring_rptr_reg = 0x10554, /* C2PMSG_21 */
};
+static const struct platform_access_vdata pa_v1 = {
+ .cmdresp_reg = 0x10570, /* C2PMSG_28 */
+ .cmdbuff_addr_lo_reg = 0x10574, /* C2PMSG_29 */
+ .cmdbuff_addr_hi_reg = 0x10578, /* C2PMSG_30 */
+ .doorbell_button_reg = 0x10a24, /* C2PMSG_73 */
+ .doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */
+};
+
static const struct psp_vdata pspv1 = {
.sev = &sevv1,
.feature_reg = 0x105fc, /* C2PMSG_63 */
@@ -377,6 +385,7 @@ static const struct psp_vdata pspv2 = {
static const struct psp_vdata pspv3 = {
.tee = &teev1,
+ .platform_access = &pa_v1,
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */
@@ -451,9 +460,9 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
{ PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
- { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
+ { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
index 5c9d47f3be37..5560bf8329a1 100644
--- a/drivers/crypto/ccp/tee-dev.c
+++ b/drivers/crypto/ccp/tee-dev.c
@@ -8,12 +8,13 @@
* Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
*/
+#include <linux/bitfield.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/gfp.h>
-#include <linux/psp-sev.h>
+#include <linux/psp.h>
#include <linux/psp-tee.h>
#include "psp-dev.h"
@@ -69,7 +70,7 @@ static int tee_wait_cmd_poll(struct psp_tee_device *tee, unsigned int timeout,
while (--nloop) {
*reg = ioread32(tee->io_regs + tee->vdata->cmdresp_reg);
- if (*reg & PSP_CMDRESP_RESP)
+ if (FIELD_GET(PSP_CMDRESP_RESP, *reg))
return 0;
usleep_range(10000, 10100);
@@ -149,9 +150,9 @@ static int tee_init_ring(struct psp_tee_device *tee)
goto free_buf;
}
- if (reg & PSP_CMDRESP_ERR_MASK) {
- dev_err(tee->dev, "tee: ring init command failed (%#010x)\n",
- reg & PSP_CMDRESP_ERR_MASK);
+ if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
+ dev_err(tee->dev, "tee: ring init command failed (%#010lx)\n",
+ FIELD_GET(PSP_CMDRESP_STS, reg));
tee_free_ring(tee);
ret = -EIO;
}
@@ -179,9 +180,9 @@ static void tee_destroy_ring(struct psp_tee_device *tee)
ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
if (ret) {
dev_err(tee->dev, "tee: ring destroy command timed out\n");
- } else if (reg & PSP_CMDRESP_ERR_MASK) {
- dev_err(tee->dev, "tee: ring destroy command failed (%#010x)\n",
- reg & PSP_CMDRESP_ERR_MASK);
+ } else if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
+ dev_err(tee->dev, "tee: ring destroy command failed (%#010lx)\n",
+ FIELD_GET(PSP_CMDRESP_STS, reg));
}
free_ring:
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index d489c6f80892..c57f929805d5 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -350,9 +350,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
/* Get device resources */
/* First CC registers space */
- req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
/* Map registers space */
- new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+ new_drvdata->cc_base = devm_platform_get_and_ioremap_resource(plat_dev,
+ 0, &req_mem_cc_regs);
if (IS_ERR(new_drvdata->cc_base))
return PTR_ERR(new_drvdata->cc_base);
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 5a7f6611803c..8e4a49b7ab4f 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -879,7 +879,7 @@ static int hifn_enable_crypto(struct hifn_device *dev)
static void hifn_init_dma(struct hifn_device *dev)
{
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
u32 dptr = dev->desc_dma;
int i;
@@ -1072,7 +1072,7 @@ static int hifn_setup_crypto_command(struct hifn_device *dev,
u8 *buf, unsigned dlen, unsigned slen,
u8 *key, int keylen, u8 *iv, int ivsize, u16 mode)
{
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
struct hifn_crypt_command *cry_cmd;
u8 *buf_pos = buf;
u16 cmd_len;
@@ -1113,7 +1113,7 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev,
struct hifn_context *ctx, struct hifn_request_context *rctx,
void *priv, unsigned int nbytes)
{
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
int cmd_len, sa_idx;
u8 *buf, *buf_pos;
u16 mask;
@@ -1231,7 +1231,7 @@ err_out:
static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
unsigned int offset, unsigned int size, int last)
{
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
int idx;
dma_addr_t addr;
@@ -1264,7 +1264,7 @@ static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
static void hifn_setup_res_desc(struct hifn_device *dev)
{
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
HIFN_D_VALID | HIFN_D_LAST);
@@ -1290,7 +1290,7 @@ static void hifn_setup_res_desc(struct hifn_device *dev)
static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
unsigned offset, unsigned size, int last)
{
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
int idx;
dma_addr_t addr;
@@ -1710,7 +1710,7 @@ static void hifn_process_ready(struct skcipher_request *req, int error)
static void hifn_clear_rings(struct hifn_device *dev, int error)
{
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
int i, u;
dev_dbg(&dev->pdev->dev, "ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
@@ -1784,7 +1784,7 @@ static void hifn_work(struct work_struct *work)
spin_lock_irqsave(&dev->lock, flags);
if (dev->active == 0) {
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) {
dev->flags &= ~HIFN_FLAG_CMD_BUSY;
@@ -1815,7 +1815,7 @@ static void hifn_work(struct work_struct *work)
if (reset) {
if (++dev->reset >= 5) {
int i;
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
dev_info(&dev->pdev->dev,
"r: %08x, active: %d, started: %d, "
@@ -1848,8 +1848,8 @@ static void hifn_work(struct work_struct *work)
static irqreturn_t hifn_interrupt(int irq, void *data)
{
- struct hifn_device *dev = (struct hifn_device *)data;
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_device *dev = data;
+ struct hifn_dma *dma = dev->desc_virt;
u32 dmacsr, restart;
dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
@@ -1914,7 +1914,7 @@ static void hifn_flush(struct hifn_device *dev)
unsigned long flags;
struct crypto_async_request *async_req;
struct skcipher_request *req;
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
int i;
for (i = 0; i < HIFN_D_RES_RSIZE; ++i) {
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 4137a8bf131f..e8690c223584 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -82,3 +82,10 @@ config CRYPTO_DEV_HISI_TRNG
select CRYPTO_RNG
help
Support for HiSilicon TRNG Driver.
+
+config CRYPTO_DEV_HISTB_TRNG
+ tristate "Support for HiSTB TRNG Driver"
+ depends on ARCH_HISI || COMPILE_TEST
+ select HW_RANDOM
+ help
+ Support for HiSTB TRNG Driver.
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
index 8595a5a5d228..fc51e0edec69 100644
--- a/drivers/crypto/hisilicon/Makefile
+++ b/drivers/crypto/hisilicon/Makefile
@@ -5,4 +5,4 @@ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/
obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o
hisi_qm-objs = qm.o sgl.o debugfs.o
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
-obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += trng/
+obj-y += trng/
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 923f9c279265..5d0adfb54a34 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019 HiSilicon Limited. */
#include <linux/acpi.h>
-#include <linux/aer.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/init.h>
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index e4c84433a88a..8b563ab47484 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2019 HiSilicon Limited. */
#include <asm/page.h>
#include <linux/acpi.h>
-#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 93572c0d4faa..77f9f131b850 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2019 HiSilicon Limited. */
#include <linux/acpi.h>
-#include <linux/aer.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/init.h>
diff --git a/drivers/crypto/hisilicon/trng/Makefile b/drivers/crypto/hisilicon/trng/Makefile
index d909079f351c..cf20b057c66b 100644
--- a/drivers/crypto/hisilicon/trng/Makefile
+++ b/drivers/crypto/hisilicon/trng/Makefile
@@ -1,2 +1,5 @@
obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += hisi-trng-v2.o
hisi-trng-v2-objs = trng.o
+
+obj-$(CONFIG_CRYPTO_DEV_HISTB_TRNG) += histb-trng.o
+histb-trng-objs += trng-stb.o
diff --git a/drivers/crypto/hisilicon/trng/trng-stb.c b/drivers/crypto/hisilicon/trng/trng-stb.c
new file mode 100644
index 000000000000..29200a7d3d81
--- /dev/null
+++ b/drivers/crypto/hisilicon/trng/trng-stb.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Device driver for True RNG in HiSTB SoCs
+ *
+ * Copyright (c) 2023 David Yang
+ */
+
+#include <crypto/internal/rng.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+
+#define HISTB_TRNG_CTRL 0x0
+#define RNG_SOURCE GENMASK(1, 0)
+#define DROP_ENABLE BIT(5)
+#define POST_PROCESS_ENABLE BIT(7)
+#define POST_PROCESS_DEPTH GENMASK(15, 8)
+#define HISTB_TRNG_NUMBER 0x4
+#define HISTB_TRNG_STAT 0x8
+#define DATA_COUNT GENMASK(2, 0) /* max 4 */
+
+struct histb_trng_priv {
+ struct hwrng rng;
+ void __iomem *base;
+};
+
+/*
+ * Observed:
+ * depth = 1 -> ~1ms
+ * depth = 255 -> ~16ms
+ */
+static int histb_trng_wait(void __iomem *base)
+{
+ u32 val;
+
+ return readl_relaxed_poll_timeout(base + HISTB_TRNG_STAT, val,
+ val & DATA_COUNT, 1000, 30 * 1000);
+}
+
+static void histb_trng_init(void __iomem *base, unsigned int depth)
+{
+ u32 val;
+
+ val = readl_relaxed(base + HISTB_TRNG_CTRL);
+
+ val &= ~RNG_SOURCE;
+ val |= 2;
+
+ val &= ~POST_PROCESS_DEPTH;
+ val |= min(depth, 0xffu) << 8;
+
+ val |= POST_PROCESS_ENABLE;
+ val |= DROP_ENABLE;
+
+ writel_relaxed(val, base + HISTB_TRNG_CTRL);
+}
+
+static int histb_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct histb_trng_priv *priv = container_of(rng, typeof(*priv), rng);
+ void __iomem *base = priv->base;
+
+ for (int i = 0; i < max; i += sizeof(u32)) {
+ if (!(readl_relaxed(base + HISTB_TRNG_STAT) & DATA_COUNT)) {
+ if (!wait)
+ return i;
+ if (histb_trng_wait(base)) {
+ pr_err("failed to generate random number, generated %d\n",
+ i);
+ return i ? i : -ETIMEDOUT;
+ }
+ }
+ *(u32 *) (data + i) = readl_relaxed(base + HISTB_TRNG_NUMBER);
+ }
+
+ return max;
+}
+
+static unsigned int histb_trng_get_depth(void __iomem *base)
+{
+ return (readl_relaxed(base + HISTB_TRNG_CTRL) & POST_PROCESS_DEPTH) >> 8;
+}
+
+static ssize_t
+depth_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct histb_trng_priv *priv = dev_get_drvdata(dev);
+ void __iomem *base = priv->base;
+
+ return sprintf(buf, "%d\n", histb_trng_get_depth(base));
+}
+
+static ssize_t
+depth_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct histb_trng_priv *priv = dev_get_drvdata(dev);
+ void __iomem *base = priv->base;
+ unsigned int depth;
+
+ if (kstrtouint(buf, 0, &depth))
+ return -ERANGE;
+
+ histb_trng_init(base, depth);
+ return count;
+}
+
+static DEVICE_ATTR_RW(depth);
+
+static struct attribute *histb_trng_attrs[] = {
+ &dev_attr_depth.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(histb_trng);
+
+static int histb_trng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct histb_trng_priv *priv;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return -ENOMEM;
+
+ histb_trng_init(base, 144);
+ if (histb_trng_wait(base)) {
+ dev_err(dev, "cannot bring up device\n");
+ return -ENODEV;
+ }
+
+ priv->base = base;
+ priv->rng.name = pdev->name;
+ priv->rng.read = histb_trng_read;
+ ret = devm_hwrng_register(dev, &priv->rng);
+ if (ret) {
+ dev_err(dev, "failed to register hwrng: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ dev_set_drvdata(dev, priv);
+ return 0;
+}
+
+static const struct of_device_id histb_trng_of_match[] = {
+ { .compatible = "hisilicon,histb-trng", },
+ { }
+};
+
+static struct platform_driver histb_trng_driver = {
+ .probe = histb_trng_probe,
+ .driver = {
+ .name = "histb-trng",
+ .of_match_table = histb_trng_of_match,
+ .dev_groups = histb_trng_groups,
+ },
+};
+
+module_platform_driver(histb_trng_driver);
+
+MODULE_DESCRIPTION("HiSTB True RNG");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("David Yang <mmyangfl@gmail.com>");
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 1549bec3aea5..f3ce34198775 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <linux/acpi.h>
-#include <linux/aer.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/init.h>
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index fe93d19e3044..359aa2b41016 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -209,7 +209,7 @@ static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
static void img_hash_dma_callback(void *data)
{
- struct img_hash_dev *hdev = (struct img_hash_dev *)data;
+ struct img_hash_dev *hdev = data;
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
if (ctx->bufcnt) {
@@ -927,7 +927,7 @@ finish:
img_hash_finish_req(hdev->req, err);
}
-static const struct of_device_id img_hash_match[] = {
+static const struct of_device_id img_hash_match[] __maybe_unused = {
{ .compatible = "img,hash-accelerator" },
{}
};
@@ -966,8 +966,7 @@ static int img_hash_probe(struct platform_device *pdev)
}
/* Write port (DMA or CPU) */
- hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
+ hdev->cpu_addr = devm_platform_get_and_ioremap_resource(pdev, 1, &hash_res);
if (IS_ERR(hdev->cpu_addr)) {
err = PTR_ERR(hdev->cpu_addr);
goto res_err;
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 6858753af6b3..9ff02b5abc4a 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -474,7 +474,7 @@ release_fw:
goto retry_fw;
}
- dev_dbg(priv->dev, "Firmware load failed.\n");
+ dev_err(priv->dev, "Firmware load failed.\n");
return ret;
}
@@ -1628,19 +1628,23 @@ static int safexcel_probe_generic(void *pdev,
&priv->ring[i].rdr);
if (ret) {
dev_err(dev, "Failed to initialize rings\n");
- return ret;
+ goto err_cleanup_rings;
}
priv->ring[i].rdr_req = devm_kcalloc(dev,
EIP197_DEFAULT_RING_SIZE,
sizeof(*priv->ring[i].rdr_req),
GFP_KERNEL);
- if (!priv->ring[i].rdr_req)
- return -ENOMEM;
+ if (!priv->ring[i].rdr_req) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
- if (!ring_irq)
- return -ENOMEM;
+ if (!ring_irq) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
ring_irq->priv = priv;
ring_irq->ring = i;
@@ -1654,7 +1658,8 @@ static int safexcel_probe_generic(void *pdev,
ring_irq);
if (irq < 0) {
dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
- return irq;
+ ret = irq;
+ goto err_cleanup_rings;
}
priv->ring[i].irq = irq;
@@ -1666,8 +1671,10 @@ static int safexcel_probe_generic(void *pdev,
snprintf(wq_name, 9, "wq_ring%d", i);
priv->ring[i].workqueue =
create_singlethread_workqueue(wq_name);
- if (!priv->ring[i].workqueue)
- return -ENOMEM;
+ if (!priv->ring[i].workqueue) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
priv->ring[i].requests = 0;
priv->ring[i].busy = false;
@@ -1684,16 +1691,26 @@ static int safexcel_probe_generic(void *pdev,
ret = safexcel_hw_init(priv);
if (ret) {
dev_err(dev, "HW init failed (%d)\n", ret);
- return ret;
+ goto err_cleanup_rings;
}
ret = safexcel_register_algorithms(priv);
if (ret) {
dev_err(dev, "Failed to register algorithms (%d)\n", ret);
- return ret;
+ goto err_cleanup_rings;
}
return 0;
+
+err_cleanup_rings:
+ for (i = 0; i < priv->config.rings; i++) {
+ if (priv->ring[i].irq)
+ irq_set_affinity_hint(priv->ring[i].irq, NULL);
+ if (priv->ring[i].workqueue)
+ destroy_workqueue(priv->ring[i].workqueue);
+ }
+
+ return ret;
}
static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
diff --git a/drivers/crypto/intel/Kconfig b/drivers/crypto/intel/Kconfig
new file mode 100644
index 000000000000..3d90c87d4094
--- /dev/null
+++ b/drivers/crypto/intel/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+source "drivers/crypto/intel/keembay/Kconfig"
+source "drivers/crypto/intel/ixp4xx/Kconfig"
+source "drivers/crypto/intel/qat/Kconfig"
diff --git a/drivers/crypto/intel/Makefile b/drivers/crypto/intel/Makefile
new file mode 100644
index 000000000000..b3d0352ae188
--- /dev/null
+++ b/drivers/crypto/intel/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-y += keembay/
+obj-y += ixp4xx/
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
diff --git a/drivers/crypto/intel/ixp4xx/Kconfig b/drivers/crypto/intel/ixp4xx/Kconfig
new file mode 100644
index 000000000000..af3cc5688328
--- /dev/null
+++ b/drivers/crypto/intel/ixp4xx/Kconfig
@@ -0,0 +1,14 @@
+config CRYPTO_DEV_IXP4XX
+ tristate "Driver for IXP4xx crypto hardware acceleration"
+ depends on (ARCH_IXP4XX || COMPILE_TEST) && IXP4XX_QMGR && IXP4XX_NPE
+ select CRYPTO_AES
+ select CRYPTO_DES
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_CTR
+ select CRYPTO_LIB_DES
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_SKCIPHER
+ help
+ Driver for the IXP4xx NPE crypto engine.
diff --git a/drivers/crypto/intel/ixp4xx/Makefile b/drivers/crypto/intel/ixp4xx/Makefile
new file mode 100644
index 000000000000..74ebefd93046
--- /dev/null
+++ b/drivers/crypto/intel/ixp4xx/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
index b63e2359a133..ed15379a9818 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
@@ -118,9 +118,9 @@ struct crypt_ctl {
u8 mode; /* NPE_OP_* operation mode */
#endif
u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
- dma_addr_t icv_rev_aes; /* icv or rev aes */
- dma_addr_t src_buf;
- dma_addr_t dst_buf;
+ u32 icv_rev_aes; /* icv or rev aes */
+ u32 src_buf;
+ u32 dst_buf;
#ifdef __ARMEB__
u16 auth_offs; /* Authentication start offset */
u16 auth_len; /* Authentication data length */
@@ -263,7 +263,9 @@ static int setup_crypt_desc(void)
{
struct device *dev = &pdev->dev;
- BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
+ BUILD_BUG_ON(!(IS_ENABLED(CONFIG_COMPILE_TEST) &&
+ IS_ENABLED(CONFIG_64BIT)) &&
+ sizeof(struct crypt_ctl) != 64);
crypt_virt = dma_alloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
&crypt_phys, GFP_ATOMIC);
@@ -1169,10 +1171,11 @@ static int aead_perform(struct aead_request *req, int encrypt,
}
if (unlikely(lastlen < authsize)) {
+ dma_addr_t dma;
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
- req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
- &crypt->icv_rev_aes);
+ req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &dma);
+ crypt->icv_rev_aes = dma;
if (unlikely(!req_ctx->hmac_virt))
goto free_buf_dst;
if (!encrypt) {
diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/intel/keembay/Kconfig
index 1cd62f9c3e3a..1cd62f9c3e3a 100644
--- a/drivers/crypto/keembay/Kconfig
+++ b/drivers/crypto/intel/keembay/Kconfig
diff --git a/drivers/crypto/keembay/Makefile b/drivers/crypto/intel/keembay/Makefile
index 7c12c3c138bd..7c12c3c138bd 100644
--- a/drivers/crypto/keembay/Makefile
+++ b/drivers/crypto/intel/keembay/Makefile
diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
index 9953f5590ac4..ae31be00357a 100644
--- a/drivers/crypto/keembay/keembay-ocs-aes-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
@@ -1580,8 +1580,6 @@ static int kmb_ocs_aes_remove(struct platform_device *pdev)
struct ocs_aes_dev *aes_dev;
aes_dev = platform_get_drvdata(pdev);
- if (!aes_dev)
- return -ENODEV;
unregister_aes_algs(aes_dev);
diff --git a/drivers/crypto/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
index 2269df17514c..2269df17514c 100644
--- a/drivers/crypto/keembay/keembay-ocs-ecc.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index d4bcbed1f546..d4bcbed1f546 100644
--- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
diff --git a/drivers/crypto/keembay/ocs-aes.c b/drivers/crypto/intel/keembay/ocs-aes.c
index be9f32fc8f42..be9f32fc8f42 100644
--- a/drivers/crypto/keembay/ocs-aes.c
+++ b/drivers/crypto/intel/keembay/ocs-aes.c
diff --git a/drivers/crypto/keembay/ocs-aes.h b/drivers/crypto/intel/keembay/ocs-aes.h
index c035fc48b7ed..c035fc48b7ed 100644
--- a/drivers/crypto/keembay/ocs-aes.h
+++ b/drivers/crypto/intel/keembay/ocs-aes.h
diff --git a/drivers/crypto/keembay/ocs-hcu.c b/drivers/crypto/intel/keembay/ocs-hcu.c
index deb9bd460ee6..deb9bd460ee6 100644
--- a/drivers/crypto/keembay/ocs-hcu.c
+++ b/drivers/crypto/intel/keembay/ocs-hcu.c
diff --git a/drivers/crypto/keembay/ocs-hcu.h b/drivers/crypto/intel/keembay/ocs-hcu.h
index fbbbb92a0592..fbbbb92a0592 100644
--- a/drivers/crypto/keembay/ocs-hcu.h
+++ b/drivers/crypto/intel/keembay/ocs-hcu.h
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 1220cc86f910..1220cc86f910 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/intel/qat/Makefile
index 258c8a626ce0..258c8a626ce0 100644
--- a/drivers/crypto/qat/Makefile
+++ b/drivers/crypto/intel/qat/Makefile
diff --git a/drivers/crypto/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile
index ff9c8b5897ea..ff9c8b5897ea 100644
--- a/drivers/crypto/qat/qat_4xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 834a705180c0..7324b86a4f40 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -28,13 +28,31 @@ static struct adf_fw_config adf_4xxx_fw_dc_config[] = {
{0x100, ADF_4XXX_ADMIN_OBJ},
};
+static struct adf_fw_config adf_402xx_fw_cy_config[] = {
+ {0xF0, ADF_402XX_SYM_OBJ},
+ {0xF, ADF_402XX_ASYM_OBJ},
+ {0x100, ADF_402XX_ADMIN_OBJ},
+};
+
+static struct adf_fw_config adf_402xx_fw_dc_config[] = {
+ {0xF0, ADF_402XX_DC_OBJ},
+ {0xF, ADF_402XX_DC_OBJ},
+ {0x100, ADF_402XX_ADMIN_OBJ},
+};
+
/* Worker thread to service arbiter mappings */
-static const u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
+static const u32 thrd_to_arb_map_cy[ADF_4XXX_MAX_ACCELENGINES] = {
0x5555555, 0x5555555, 0x5555555, 0x5555555,
0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
0x0
};
+static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
+ 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
+ 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
+ 0x0
+};
+
static struct adf_hw_device_class adf_4xxx_class = {
.name = ADF_4XXX_DEVICE_NAME,
.type = DEV_4XXX,
@@ -206,9 +224,16 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_1;
}
-static const u32 *adf_get_arbiter_mapping(void)
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
- return thrd_to_arb_map;
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ return thrd_to_arb_map_cy;
+ case SVC_DC:
+ return thrd_to_arb_map_dc;
+ }
+
+ return NULL;
}
static void get_arb_info(struct arb_info *arb_info)
@@ -286,7 +311,7 @@ static u32 uof_get_num_objs(void)
return ARRAY_SIZE(adf_4xxx_fw_cy_config);
}
-static char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
+static char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
{
switch (get_service_enabled(accel_dev)) {
case SVC_CY:
@@ -298,6 +323,18 @@ static char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
return NULL;
}
+static char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ return adf_402xx_fw_cy_config[obj_num].obj_name;
+ case SVC_DC:
+ return adf_402xx_fw_dc_config[obj_num].obj_name;
+ }
+
+ return NULL;
+}
+
static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
switch (get_service_enabled(accel_dev)) {
@@ -310,7 +347,7 @@ static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
return 0;
}
-void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
+void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
{
hw_data->dev_class = &adf_4xxx_class;
hw_data->instance_id = adf_4xxx_class.instances++;
@@ -337,8 +374,6 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->get_admin_info = get_admin_info;
hw_data->get_accel_cap = get_accel_cap;
hw_data->get_sku = get_sku;
- hw_data->fw_name = ADF_4XXX_FW;
- hw_data->fw_mmp_name = ADF_4XXX_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->send_admin_init = adf_send_admin_init;
@@ -349,8 +384,19 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->init_device = adf_init_device;
hw_data->reset_device = adf_reset_flr;
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
+ switch (dev_id) {
+ case ADF_402XX_PCI_DEVICE_ID:
+ hw_data->fw_name = ADF_402XX_FW;
+ hw_data->fw_mmp_name = ADF_402XX_MMP;
+ hw_data->uof_get_name = uof_get_name_402xx;
+ break;
+
+ default:
+ hw_data->fw_name = ADF_4XXX_FW;
+ hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ hw_data->uof_get_name = uof_get_name_4xxx;
+ }
hw_data->uof_get_num_objs = uof_get_num_objs;
- hw_data->uof_get_name = uof_get_name;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->set_msix_rttable = set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
index e98428ba78e2..085e259c245a 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -56,6 +56,13 @@
#define ADF_4XXX_DC_OBJ "qat_4xxx_dc.bin"
#define ADF_4XXX_ASYM_OBJ "qat_4xxx_asym.bin"
#define ADF_4XXX_ADMIN_OBJ "qat_4xxx_admin.bin"
+/* Firmware for 402XXX */
+#define ADF_402XX_FW "qat_402xx.bin"
+#define ADF_402XX_MMP "qat_402xx_mmp.bin"
+#define ADF_402XX_SYM_OBJ "qat_402xx_sym.bin"
+#define ADF_402XX_DC_OBJ "qat_402xx_dc.bin"
+#define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin"
+#define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin"
/* qat_4xxx fuse bits are different from old GENs, redefine them */
enum icp_qat_4xxx_slice_mask {
@@ -68,7 +75,7 @@ enum icp_qat_4xxx_slice_mask {
ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(6),
};
-void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data);
+void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id);
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index b3a4c7b23864..ceb87327a5fe 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -16,6 +16,7 @@
static const struct pci_device_id adf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
{ PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -330,7 +331,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
accel_dev->hw_device = hw_data;
- adf_init_hw_data_4xxx(accel_dev->hw_device);
+ adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses);
@@ -403,38 +404,24 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- adf_enable_aer(accel_dev);
-
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state.\n");
ret = -ENOMEM;
- goto out_err_disable_aer;
+ goto out_err;
}
- ret = adf_sysfs_init(accel_dev);
- if (ret)
- goto out_err_disable_aer;
-
- ret = hw_data->dev_config(accel_dev);
+ ret = adf_dev_up(accel_dev, true);
if (ret)
- goto out_err_disable_aer;
-
- ret = adf_dev_init(accel_dev);
- if (ret)
- goto out_err_dev_shutdown;
+ goto out_err_dev_stop;
- ret = adf_dev_start(accel_dev);
+ ret = adf_sysfs_init(accel_dev);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
- adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
- adf_dev_shutdown(accel_dev);
-out_err_disable_aer:
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
out_err:
adf_cleanup_accel(accel_dev);
return ret;
@@ -448,9 +435,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
}
diff --git a/drivers/crypto/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
index 92ef416ccc78..92ef416ccc78 100644
--- a/drivers/crypto/qat/qat_c3xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index c55c51a07677..475643654e64 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -75,7 +75,7 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_UNKNOWN;
}
-static const u32 *adf_get_arbiter_mapping(void)
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
return thrd_to_arb_map;
}
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
index 336a06f11dbd..336a06f11dbd 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
index 1f4fbf4562b2..bb4dca735ab5 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -193,34 +193,20 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- adf_enable_aer(accel_dev);
-
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
- goto out_err_disable_aer;
+ goto out_err_free_reg;
}
- ret = hw_data->dev_config(accel_dev);
- if (ret)
- goto out_err_disable_aer;
-
- ret = adf_dev_init(accel_dev);
- if (ret)
- goto out_err_dev_shutdown;
-
- ret = adf_dev_start(accel_dev);
+ ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
- adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
- adf_dev_shutdown(accel_dev);
-out_err_disable_aer:
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -239,9 +225,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
index b6d76825a92c..b6d76825a92c 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index 84d9486e04de..84d9486e04de 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
index 6b4bf181d15b..6b4bf181d15b 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
index cf4ef83e186f..e8cc10f64134 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
@@ -173,20 +173,14 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
- ret = adf_dev_init(accel_dev);
- if (ret)
- goto out_err_dev_shutdown;
-
- ret = adf_dev_start(accel_dev);
+ ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
- adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -206,8 +200,7 @@ static void adf_remove(struct pci_dev *pdev)
return;
}
adf_flush_vf_wq(accel_dev);
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile
index d581f7c87d6c..d581f7c87d6c 100644
--- a/drivers/crypto/qat/qat_c62x/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62x/Makefile
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index b7aa19d2fa80..e14270703670 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -77,7 +77,7 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_UNKNOWN;
}
-static const u32 *adf_get_arbiter_mapping(void)
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
return thrd_to_arb_map;
}
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
index 008c0a3a9769..008c0a3a9769 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
index 4ccaf298250c..ca18ae14c099 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -193,34 +193,20 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- adf_enable_aer(accel_dev);
-
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
- goto out_err_disable_aer;
+ goto out_err_free_reg;
}
- ret = hw_data->dev_config(accel_dev);
- if (ret)
- goto out_err_disable_aer;
-
- ret = adf_dev_init(accel_dev);
- if (ret)
- goto out_err_dev_shutdown;
-
- ret = adf_dev_start(accel_dev);
+ ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
- adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
- adf_dev_shutdown(accel_dev);
-out_err_disable_aer:
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -239,9 +225,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
index 446c3d638605..446c3d638605 100644
--- a/drivers/crypto/qat/qat_c62xvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 751d7aa57fc7..751d7aa57fc7 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h
index a1a62c003ebf..a1a62c003ebf 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
index 0e642c94b929..37566309df94 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
@@ -173,20 +173,14 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
- ret = adf_dev_init(accel_dev);
- if (ret)
- goto out_err_dev_shutdown;
-
- ret = adf_dev_start(accel_dev);
+ ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
- adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -206,8 +200,7 @@ static void adf_remove(struct pci_dev *pdev)
return;
}
adf_flush_vf_wq(accel_dev);
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 1fb8d50f509f..1fb8d50f509f 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 284f5aad3ee0..bd19e6460899 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -21,6 +21,8 @@
#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
#define ADF_401XX_PCI_DEVICE_ID 0x4942
#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
+#define ADF_402XX_PCI_DEVICE_ID 0x4944
+#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
@@ -188,7 +190,7 @@ struct adf_hw_device_data {
int (*send_admin_init)(struct adf_accel_dev *accel_dev);
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
- const u32 *(*get_arb_mapping)(void);
+ const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
int (*init_device)(struct adf_accel_dev *accel_dev);
int (*enable_pm)(struct adf_accel_dev *accel_dev);
bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
@@ -310,6 +312,7 @@ struct adf_accel_dev {
u8 pf_compat_ver;
} vf;
};
+ struct mutex state_lock; /* protect state of the device */
bool is_vf;
u32 accel_id;
};
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
index 4ce2b666929e..4ce2b666929e 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index 3b6184c35081..3b6184c35081 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index fe9bb2f3536a..04af32a2811c 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -2,7 +2,6 @@
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
@@ -90,9 +89,7 @@ static void adf_device_reset_worker(struct work_struct *work)
struct adf_accel_dev *accel_dev = reset_data->accel_dev;
adf_dev_restarting_notify(accel_dev);
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
- if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
+ if (adf_dev_restart(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
kfree(reset_data);
@@ -173,40 +170,6 @@ const struct pci_error_handlers adf_err_handler = {
};
EXPORT_SYMBOL_GPL(adf_err_handler);
-/**
- * adf_enable_aer() - Enable Advance Error Reporting for acceleration device
- * @accel_dev: Pointer to acceleration device.
- *
- * Function enables PCI Advance Error Reporting for the
- * QAT acceleration device accel_dev.
- * To be used by QAT device specific drivers.
- */
-void adf_enable_aer(struct adf_accel_dev *accel_dev)
-{
- struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-
- pci_enable_pcie_error_reporting(pdev);
-}
-EXPORT_SYMBOL_GPL(adf_enable_aer);
-
-/**
- * adf_disable_aer() - Disable Advance Error Reporting for acceleration device
- * @accel_dev: Pointer to acceleration device.
- *
- * Function disables PCI Advance Error Reporting for the
- * QAT acceleration device accel_dev.
- * To be used by QAT device specific drivers.
- *
- * Return: void
- */
-void adf_disable_aer(struct adf_accel_dev *accel_dev)
-{
- struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-
- pci_disable_pcie_error_reporting(pdev);
-}
-EXPORT_SYMBOL_GPL(adf_disable_aer);
-
int adf_init_aer(void)
{
device_reset_wq = alloc_workqueue("qat_device_reset_wq",
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
index 1931e5b37f2b..1931e5b37f2b 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/intel/qat/qat_common/adf_cfg.h
index 376cde61a60e..376cde61a60e 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.h
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
index 6e5de1dab97b..6e5de1dab97b 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
index 5d8c3bdb258c..5d8c3bdb258c 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_user.h
index 421f4fb8b4dd..421f4fb8b4dd 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_user.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_user.h
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index 7189265573c0..db79759bee61 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -52,11 +52,9 @@ struct service_hndl {
int adf_service_register(struct service_hndl *service);
int adf_service_unregister(struct service_hndl *service);
-int adf_dev_init(struct adf_accel_dev *accel_dev);
-int adf_dev_start(struct adf_accel_dev *accel_dev);
-void adf_dev_stop(struct adf_accel_dev *accel_dev);
-void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
-int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev);
+int adf_dev_up(struct adf_accel_dev *accel_dev, bool init_config);
+int adf_dev_down(struct adf_accel_dev *accel_dev, bool cache_config);
+int adf_dev_restart(struct adf_accel_dev *accel_dev);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);
@@ -88,8 +86,6 @@ int adf_ae_start(struct adf_accel_dev *accel_dev);
int adf_ae_stop(struct adf_accel_dev *accel_dev);
extern const struct pci_error_handlers adf_err_handler;
-void adf_enable_aer(struct adf_accel_dev *accel_dev);
-void adf_disable_aer(struct adf_accel_dev *accel_dev);
void adf_reset_sbr(struct adf_accel_dev *accel_dev);
void adf_reset_flr(struct adf_accel_dev *accel_dev);
void adf_dev_restore(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
index 9190532b27eb..88c41d6fbb7c 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
+
+#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -8,7 +10,6 @@
#include <linux/pci.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
-#include <linux/crypto.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
@@ -243,8 +244,7 @@ static void adf_ctl_stop_devices(u32 id)
if (!accel_dev->is_vf)
continue;
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
}
}
@@ -253,8 +253,7 @@ static void adf_ctl_stop_devices(u32 id)
if (!adf_dev_started(accel_dev))
continue;
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
}
}
}
@@ -308,23 +307,16 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
if (!accel_dev)
goto out;
- if (!adf_dev_started(accel_dev)) {
- dev_info(&GET_DEV(accel_dev),
- "Starting acceleration device qat_dev%d.\n",
- ctl_data->device_id);
- ret = adf_dev_init(accel_dev);
- if (!ret)
- ret = adf_dev_start(accel_dev);
- } else {
- dev_info(&GET_DEV(accel_dev),
- "Acceleration device qat_dev%d already started.\n",
- ctl_data->device_id);
- }
+ dev_info(&GET_DEV(accel_dev),
+ "Starting acceleration device qat_dev%d.\n",
+ ctl_data->device_id);
+
+ ret = adf_dev_up(accel_dev, false);
+
if (ret) {
dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
ctl_data->device_id);
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
}
out:
kfree(ctl_data);
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
index 4c752eed10fe..86ee36feefad 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
@@ -223,6 +223,7 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
map->attached = true;
list_add_tail(&map->list, &vfs_table);
}
+ mutex_init(&accel_dev->state_lock);
unlock:
mutex_unlock(&table_lock);
return ret;
@@ -269,6 +270,7 @@ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
}
}
unlock:
+ mutex_destroy(&accel_dev->state_lock);
list_del(&accel_dev->list);
mutex_unlock(&table_lock);
}
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
index eeb30da7587a..eeb30da7587a 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_config.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.h
index 4bf9da2de68a..4bf9da2de68a 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_config.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.h
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c
index 47261b1c1da6..47261b1c1da6 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_dc.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
index 6eae023354d7..6eae023354d7 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_dc.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
index d1884547b5a1..d1884547b5a1 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
index e4bc07529be4..e4bc07529be4 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
index 70ef11963938..70ef11963938 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
index a716545a764c..a716545a764c 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
index 5859238e37de..5859238e37de 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_dc.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
index 0b1a6774412e..0b1a6774412e 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_dc.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 3148a62938fd..3148a62938fd 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 4fb4b3df5a18..4fb4b3df5a18 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
index 8e8efe93f3ee..8e8efe93f3ee 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
index 17d1b774d4a8..17d1b774d4a8 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
index 7037c0892a8a..7037c0892a8a 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_pm.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
index f8f8a9ee29e5..f8f8a9ee29e5 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_pm.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
index 64e4596a24f4..da6956699246 100644
--- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
@@ -36,7 +36,7 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
WRITE_CSR_ARB_SARCONFIG(csr, arb_off, arb, arb_cfg);
/* Map worker threads to service arbiters */
- thd_2_arb_cfg = hw_data->get_arb_mapping();
+ thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev);
for_each_set_bit(i, &ae_mask, hw_data->num_engines)
WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, thd_2_arb_cfg[i]);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index cef7bb8ec007..0985f64ab11a 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -56,7 +56,7 @@ int adf_service_unregister(struct service_hndl *service)
*
* Return: 0 on success, error code otherwise.
*/
-int adf_dev_init(struct adf_accel_dev *accel_dev)
+static int adf_dev_init(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
struct list_head *list_itr;
@@ -146,7 +146,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
return 0;
}
-EXPORT_SYMBOL_GPL(adf_dev_init);
/**
* adf_dev_start() - Start acceleration service for the given accel device
@@ -158,7 +157,7 @@ EXPORT_SYMBOL_GPL(adf_dev_init);
*
* Return: 0 on success, error code otherwise.
*/
-int adf_dev_start(struct adf_accel_dev *accel_dev)
+static int adf_dev_start(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
@@ -219,7 +218,6 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
}
return 0;
}
-EXPORT_SYMBOL_GPL(adf_dev_start);
/**
* adf_dev_stop() - Stop acceleration service for the given accel device
@@ -231,7 +229,7 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
*
* Return: void
*/
-void adf_dev_stop(struct adf_accel_dev *accel_dev)
+static void adf_dev_stop(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
struct list_head *list_itr;
@@ -276,7 +274,6 @@ void adf_dev_stop(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
}
}
-EXPORT_SYMBOL_GPL(adf_dev_stop);
/**
* adf_dev_shutdown() - shutdown acceleration services and data strucutures
@@ -285,7 +282,7 @@ EXPORT_SYMBOL_GPL(adf_dev_stop);
* Cleanup the ring data structures and the admin comms and arbitration
* services.
*/
-void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
+static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
@@ -343,7 +340,6 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
adf_cleanup_etr_data(accel_dev);
adf_dev_restore(accel_dev);
}
-EXPORT_SYMBOL_GPL(adf_dev_shutdown);
int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
{
@@ -375,7 +371,7 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
return 0;
}
-int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
+static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
int ret;
@@ -400,3 +396,85 @@ int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
return 0;
}
+
+int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
+{
+ int ret = 0;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ mutex_lock(&accel_dev->state_lock);
+
+ if (!adf_dev_started(accel_dev)) {
+ dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
+ accel_dev->accel_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (reconfig) {
+ ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ goto out;
+ }
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+
+out:
+ mutex_unlock(&accel_dev->state_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_down);
+
+int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
+{
+ int ret = 0;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ mutex_lock(&accel_dev->state_lock);
+
+ if (adf_dev_started(accel_dev)) {
+ dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
+ accel_dev->accel_id);
+ ret = -EALREADY;
+ goto out;
+ }
+
+ if (config && GET_HW_DATA(accel_dev)->dev_config) {
+ ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
+ if (unlikely(ret))
+ goto out;
+ }
+
+ ret = adf_dev_init(accel_dev);
+ if (unlikely(ret))
+ goto out;
+
+ ret = adf_dev_start(accel_dev);
+
+out:
+ mutex_unlock(&accel_dev->state_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_up);
+
+int adf_dev_restart(struct adf_accel_dev *accel_dev)
+{
+ int ret = 0;
+
+ if (!accel_dev)
+ return -EFAULT;
+
+ adf_dev_down(accel_dev, false);
+
+ ret = adf_dev_up(accel_dev, false);
+ /* if device is already up return success*/
+ if (ret == -EALREADY)
+ return 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_restart);
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
index ad9e135b8560..ad9e135b8560 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
index 204a42438992..204a42438992 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_msg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
index 14c069f0d71a..14c069f0d71a 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
index e8982d1ac896..e8982d1ac896 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
index 388e58bcbcaf..388e58bcbcaf 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h
index 165d266d023d..165d266d023d 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_utils.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c
index c5f6d77d4bb8..c5f6d77d4bb8 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_utils.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
index 2be048e2287b..2be048e2287b 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_utils.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c
index 1141258db4b6..1141258db4b6 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h
index 71bc0e3f1d93..71bc0e3f1d93 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
index 1015155b6374..1015155b6374 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h
index f6ee9b38c0e1..f6ee9b38c0e1 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index d85a90cc387b..f44025bb6f99 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -159,7 +159,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
return -EBUSY;
}
- ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ ret = adf_dev_down(accel_dev, true);
if (ret)
return ret;
}
@@ -184,13 +184,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
if (!accel_dev->pf.vf_info)
return -ENOMEM;
- if (adf_dev_init(accel_dev)) {
- dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
- accel_dev->accel_id);
- return -EFAULT;
- }
-
- if (adf_dev_start(accel_dev)) {
+ if (adf_dev_up(accel_dev, false)) {
dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
accel_dev->accel_id);
return -EFAULT;
diff --git a/drivers/crypto/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index e8b078e719c2..3eb6611ab1b1 100644
--- a/drivers/crypto/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -50,38 +50,21 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
switch (ret) {
case DEV_DOWN:
- if (!adf_dev_started(accel_dev)) {
- dev_info(dev, "Device qat_dev%d already down\n",
- accel_id);
- return -EINVAL;
- }
-
dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
- ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ ret = adf_dev_down(accel_dev, true);
if (ret < 0)
return -EINVAL;
break;
case DEV_UP:
- if (adf_dev_started(accel_dev)) {
- dev_info(dev, "Device qat_dev%d already up\n",
- accel_id);
- return -EINVAL;
- }
-
dev_info(dev, "Starting device qat_dev%d\n", accel_id);
- ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
- if (!ret)
- ret = adf_dev_init(accel_dev);
- if (!ret)
- ret = adf_dev_start(accel_dev);
-
+ ret = adf_dev_up(accel_dev, true);
if (ret < 0) {
dev_err(dev, "Failed to start device qat_dev%d\n",
accel_id);
- adf_dev_shutdown_cache_cfg(accel_dev);
+ adf_dev_down(accel_dev, true);
return ret;
}
break;
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/intel/qat/qat_common/adf_transport.c
index 630d0483c4e0..630d0483c4e0 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport.c
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/intel/qat/qat_common/adf_transport.h
index e6ef6f9b7691..e6ef6f9b7691 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport.h
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h
index d3667dbd9826..d3667dbd9826 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
index 08bca1c506c0..08bca1c506c0 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/intel/qat/qat_common/adf_transport_internal.h
index 8b2c92ba7ca1..8b2c92ba7ca1 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_internal.h
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
index 8c95fcd8e64b..b05c3957a160 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
@@ -71,8 +71,7 @@ static void adf_dev_stop_async(struct work_struct *work)
struct adf_accel_dev *accel_dev = stop_data->accel_dev;
adf_dev_restarting_notify(accel_dev);
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
/* Re-enable PF2VF interrupts */
adf_enable_pf2vf_interrupts(accel_dev);
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h
index c141160421e1..c141160421e1 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
index a03d43fef2b3..a03d43fef2b3 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_comp.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
index 56cb827f93ea..56cb827f93ea 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h
index 28fa17f14be4..28fa17f14be4 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
index 7eb5daef4f88..7eb5daef4f88 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h
index 9dddae0009fc..9dddae0009fc 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hal.h
index 20b2ee1fc65a..20b2ee1fc65a 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hal.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hal.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
index 4042739bb6fa..4042739bb6fa 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h
index 7ea8962272f2..7ea8962272f2 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h
index 208d4554283b..208d4554283b 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
index 69482abdb8b9..69482abdb8b9 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
index 538dcbfbcd26..538dcbfbcd26 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
index bb80455b3e81..bb80455b3e81 100644
--- a/drivers/crypto/qat/qat_common/qat_algs_send.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.h b/drivers/crypto/intel/qat/qat_common/qat_algs_send.h
index 0baca16e1eff..0baca16e1eff 100644
--- a/drivers/crypto/qat/qat_common/qat_algs_send.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs_send.h
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
index 935a7e012946..935a7e012946 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
diff --git a/drivers/crypto/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
index 76baed0a76c0..76baed0a76c0 100644
--- a/drivers/crypto/qat/qat_common/qat_bl.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c
diff --git a/drivers/crypto/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h
index d87e4f35ac39..d87e4f35ac39 100644
--- a/drivers/crypto/qat/qat_common/qat_bl.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h
diff --git a/drivers/crypto/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
index b533984906ec..b533984906ec 100644
--- a/drivers/crypto/qat/qat_common/qat_comp_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
diff --git a/drivers/crypto/qat/qat_common/qat_comp_req.h b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
index 404e32c5e778..404e32c5e778 100644
--- a/drivers/crypto/qat/qat_common/qat_comp_req.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
diff --git a/drivers/crypto/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
index 3f1f35283266..3f1f35283266 100644
--- a/drivers/crypto/qat/qat_common/qat_compression.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
diff --git a/drivers/crypto/qat/qat_common/qat_compression.h b/drivers/crypto/intel/qat/qat_common/qat_compression.h
index aebac2302dcf..aebac2302dcf 100644
--- a/drivers/crypto/qat/qat_common/qat_compression.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.h
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/intel/qat/qat_common/qat_crypto.c
index 40c8e74d1cf9..40c8e74d1cf9 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_crypto.c
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/intel/qat/qat_common/qat_crypto.h
index 6a0e961bb9dc..6a0e961bb9dc 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_crypto.h
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
index 7bba35280dac..cbb946a80076 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -696,6 +696,7 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
switch (handle->pci_dev->device) {
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
+ case ADF_402XX_PCI_DEVICE_ID:
handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = false;
handle->chip_info->lm2lm3 = true;
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index b7f7869ef8b2..3ba8ca20b3d7 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -732,6 +732,7 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
return ICP_QAT_AC_C3XXX_DEV_TYPE;
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
+ case ADF_402XX_PCI_DEVICE_ID:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
index 38d6f8e1624a..38d6f8e1624a 100644
--- a/drivers/crypto/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index bc80bb475118..1ebe0b351fae 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -106,7 +106,7 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_UNKNOWN;
}
-static const u32 *adf_get_arbiter_mapping(void)
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
return thrd_to_arb_map;
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 7b674bbe4192..7b674bbe4192 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
index ebeb17b67fcd..e18860ab5c8e 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -193,34 +193,20 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- adf_enable_aer(accel_dev);
-
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
- goto out_err_disable_aer;
+ goto out_err_free_reg;
}
- ret = hw_data->dev_config(accel_dev);
- if (ret)
- goto out_err_disable_aer;
-
- ret = adf_dev_init(accel_dev);
- if (ret)
- goto out_err_dev_shutdown;
-
- ret = adf_dev_start(accel_dev);
+ ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
- adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
- adf_dev_shutdown(accel_dev);
-out_err_disable_aer:
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -239,9 +225,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
index 0153c85ce743..0153c85ce743 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index 70e56cc16ece..70e56cc16ece 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
index 6973fa967bc8..6973fa967bc8 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
index c1485e702b3e..96854a1cd87e 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
@@ -173,20 +173,14 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
- ret = adf_dev_init(accel_dev);
- if (ret)
- goto out_err_dev_shutdown;
-
- ret = adf_dev_start(accel_dev);
+ ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
- adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -206,8 +200,7 @@ static void adf_remove(struct pci_dev *pdev)
return;
}
adf_flush_vf_wq(accel_dev);
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 1c11946a4f0b..f6b7bce0e656 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -1022,21 +1022,15 @@ static int mxs_dcp_probe(struct platform_device *pdev)
sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
/* DCP clock is optional, only used on some SOCs */
- sdcp->dcp_clk = devm_clk_get(dev, "dcp");
- if (IS_ERR(sdcp->dcp_clk)) {
- if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
- return PTR_ERR(sdcp->dcp_clk);
- sdcp->dcp_clk = NULL;
- }
- ret = clk_prepare_enable(sdcp->dcp_clk);
- if (ret)
- return ret;
+ sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, "dcp");
+ if (IS_ERR(sdcp->dcp_clk))
+ return PTR_ERR(sdcp->dcp_clk);
/* Restart the DCP block. */
ret = stmp_reset_block(sdcp->base);
if (ret) {
dev_err(dev, "Failed reset\n");
- goto err_disable_unprepare_clk;
+ return ret;
}
/* Initialize control register. */
@@ -1076,7 +1070,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
dev_err(dev, "Error starting SHA thread!\n");
ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
- goto err_disable_unprepare_clk;
+ return ret;
}
sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
@@ -1134,9 +1128,6 @@ err_destroy_aes_thread:
err_destroy_sha_thread:
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
-err_disable_unprepare_clk:
- clk_disable_unprepare(sdcp->dcp_clk);
-
return ret;
}
@@ -1156,8 +1147,6 @@ static int mxs_dcp_remove(struct platform_device *pdev)
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
- clk_disable_unprepare(sdcp->dcp_clk);
-
platform_set_drvdata(pdev, NULL);
global_sdcp = NULL;
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 74deca4f96e0..fce49c0dee3e 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -22,6 +23,8 @@
#define QCE_MAJOR_VERSION5 0x05
#define QCE_QUEUE_LENGTH 1
+#define QCE_DEFAULT_MEM_BANDWIDTH 393600
+
static const struct qce_algo_ops *qce_ops[] = {
#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
&skcipher_ops,
@@ -206,22 +209,30 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- qce->core = devm_clk_get(qce->dev, "core");
+ qce->core = devm_clk_get_optional(qce->dev, "core");
if (IS_ERR(qce->core))
return PTR_ERR(qce->core);
- qce->iface = devm_clk_get(qce->dev, "iface");
+ qce->iface = devm_clk_get_optional(qce->dev, "iface");
if (IS_ERR(qce->iface))
return PTR_ERR(qce->iface);
- qce->bus = devm_clk_get(qce->dev, "bus");
+ qce->bus = devm_clk_get_optional(qce->dev, "bus");
if (IS_ERR(qce->bus))
return PTR_ERR(qce->bus);
- ret = clk_prepare_enable(qce->core);
+ qce->mem_path = devm_of_icc_get(qce->dev, "memory");
+ if (IS_ERR(qce->mem_path))
+ return PTR_ERR(qce->mem_path);
+
+ ret = icc_set_bw(qce->mem_path, QCE_DEFAULT_MEM_BANDWIDTH, QCE_DEFAULT_MEM_BANDWIDTH);
if (ret)
return ret;
+ ret = clk_prepare_enable(qce->core);
+ if (ret)
+ goto err_mem_path_disable;
+
ret = clk_prepare_enable(qce->iface);
if (ret)
goto err_clks_core;
@@ -260,6 +271,9 @@ err_clks_iface:
clk_disable_unprepare(qce->iface);
err_clks_core:
clk_disable_unprepare(qce->core);
+err_mem_path_disable:
+ icc_set_bw(qce->mem_path, 0, 0);
+
return ret;
}
@@ -279,6 +293,7 @@ static int qce_crypto_remove(struct platform_device *pdev)
static const struct of_device_id qce_crypto_of_match[] = {
{ .compatible = "qcom,crypto-v5.1", },
{ .compatible = "qcom,crypto-v5.4", },
+ { .compatible = "qcom,qce", },
{}
};
MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
index 085774cdf641..228fcd69ec51 100644
--- a/drivers/crypto/qce/core.h
+++ b/drivers/crypto/qce/core.h
@@ -35,6 +35,7 @@ struct qce_device {
void __iomem *base;
struct device *dev;
struct clk *core, *iface, *bus;
+ struct icc_path *mem_path;
struct qce_dma_data dma;
int burst_size;
unsigned int pipe_pair_id;
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index f4bc06c24ad8..df5f9d675c57 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -1037,7 +1037,7 @@ static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
static void sa_aes_dma_in_callback(void *data)
{
- struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct sa_rx_data *rxd = data;
struct skcipher_request *req;
u32 *result;
__be32 *mdptr;
@@ -1351,7 +1351,7 @@ static int sa_decrypt(struct skcipher_request *req)
static void sa_sha_dma_in_callback(void *data)
{
- struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct sa_rx_data *rxd = data;
struct ahash_request *req;
struct crypto_ahash *tfm;
unsigned int authsize;
@@ -1689,7 +1689,7 @@ static void sa_sha_cra_exit(struct crypto_tfm *tfm)
static void sa_aead_dma_in_callback(void *data)
{
- struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct sa_rx_data *rxd = data;
struct aead_request *req;
struct crypto_aead *tfm;
unsigned int start;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index dd4c703cd855..4c799df3e883 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1035,7 +1035,7 @@ static int sahara_sha_process(struct ahash_request *req)
static int sahara_queue_manage(void *data)
{
- struct sahara_dev *dev = (struct sahara_dev *)data;
+ struct sahara_dev *dev = data;
struct crypto_async_request *async_req;
struct crypto_async_request *backlog;
int ret = 0;
@@ -1270,7 +1270,7 @@ static struct ahash_alg sha_v4_algs[] = {
static irqreturn_t sahara_irq_handler(int irq, void *data)
{
- struct sahara_dev *dev = (struct sahara_dev *)data;
+ struct sahara_dev *dev = data;
unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 7bf805563ac2..f0df32382719 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -7,7 +7,6 @@
*/
#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -68,7 +67,7 @@
#define HASH_MASK_DATA_INPUT BIT(1)
/* Context swap register */
-#define HASH_CSR_REGISTER_NUMBER 53
+#define HASH_CSR_REGISTER_NUMBER 54
/* Status Flags */
#define HASH_SR_DATA_INPUT_READY BIT(0)
@@ -96,7 +95,7 @@
#define HASH_FLAGS_SHA1 BIT(19)
#define HASH_FLAGS_SHA224 BIT(20)
#define HASH_FLAGS_SHA256 BIT(21)
-#define HASH_FLAGS_ERRORS BIT(22)
+#define HASH_FLAGS_EMPTY BIT(22)
#define HASH_FLAGS_HMAC BIT(23)
#define HASH_OP_UPDATE 1
@@ -127,15 +126,24 @@ struct stm32_hash_ctx {
int keylen;
};
+struct stm32_hash_state {
+ u32 flags;
+
+ u16 bufcnt;
+ u16 buflen;
+
+ u8 buffer[HASH_BUFLEN] __aligned(4);
+
+ /* hash state */
+ u32 hw_context[3 + HASH_CSR_REGISTER_NUMBER];
+};
+
struct stm32_hash_request_ctx {
struct stm32_hash_dev *hdev;
- unsigned long flags;
unsigned long op;
u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
size_t digcnt;
- size_t bufcnt;
- size_t buflen;
/* DMA */
struct scatterlist *sg;
@@ -149,10 +157,7 @@ struct stm32_hash_request_ctx {
u8 data_type;
- u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
-
- /* Export Context */
- u32 *hw_context;
+ struct stm32_hash_state state;
};
struct stm32_hash_algs_info {
@@ -183,7 +188,6 @@ struct stm32_hash_dev {
struct ahash_request *req;
struct crypto_engine *engine;
- int err;
unsigned long flags;
struct dma_chan *dma_lch;
@@ -270,11 +274,12 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct stm32_hash_state *state = &rctx->state;
u32 reg = HASH_CR_INIT;
if (!(hdev->flags & HASH_FLAGS_INIT)) {
- switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
+ switch (state->flags & HASH_FLAGS_ALGO_MASK) {
case HASH_FLAGS_MD5:
reg |= HASH_CR_ALGO_MD5;
break;
@@ -299,20 +304,13 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
- if (rctx->flags & HASH_FLAGS_HMAC) {
+ if (state->flags & HASH_FLAGS_HMAC) {
hdev->flags |= HASH_FLAGS_HMAC;
reg |= HASH_CR_MODE;
if (ctx->keylen > HASH_LONG_KEY)
reg |= HASH_CR_LKEY;
}
- /*
- * On the Ux500 we need to set a special flag to indicate that
- * the message is zero length.
- */
- if (hdev->pdata->ux500 && bufcnt == 0)
- reg |= HASH_CR_UX500_EMPTYMSG;
-
if (!hdev->polled)
stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
@@ -326,11 +324,12 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
{
+ struct stm32_hash_state *state = &rctx->state;
size_t count;
- while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
+ while ((state->bufcnt < state->buflen) && rctx->total) {
count = min(rctx->sg->length - rctx->offset, rctx->total);
- count = min(count, rctx->buflen - rctx->bufcnt);
+ count = min_t(size_t, count, state->buflen - state->bufcnt);
if (count <= 0) {
if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
@@ -341,10 +340,10 @@ static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
}
}
- scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
- rctx->offset, count, 0);
+ scatterwalk_map_and_copy(state->buffer + state->bufcnt,
+ rctx->sg, rctx->offset, count, 0);
- rctx->bufcnt += count;
+ state->bufcnt += count;
rctx->offset += count;
rctx->total -= count;
@@ -361,13 +360,23 @@ static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
const u8 *buf, size_t length, int final)
{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct stm32_hash_state *state = &rctx->state;
unsigned int count, len32;
const u32 *buffer = (const u32 *)buf;
u32 reg;
- if (final)
+ if (final) {
hdev->flags |= HASH_FLAGS_FINAL;
+ /* Do not process empty messages if hw is buggy. */
+ if (!(hdev->flags & HASH_FLAGS_INIT) && !length &&
+ hdev->pdata->broken_emptymsg) {
+ state->flags |= HASH_FLAGS_EMPTY;
+ return 0;
+ }
+ }
+
len32 = DIV_ROUND_UP(length, sizeof(u32));
dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
@@ -413,36 +422,48 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct stm32_hash_state *state = &rctx->state;
+ u32 *preg = state->hw_context;
int bufcnt, err = 0, final;
+ int i;
- dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
+ dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
- final = (rctx->flags & HASH_FLAGS_FINUP);
+ final = state->flags & HASH_FLAGS_FINAL;
- while ((rctx->total >= rctx->buflen) ||
- (rctx->bufcnt + rctx->total >= rctx->buflen)) {
+ while ((rctx->total >= state->buflen) ||
+ (state->bufcnt + rctx->total >= state->buflen)) {
stm32_hash_append_sg(rctx);
- bufcnt = rctx->bufcnt;
- rctx->bufcnt = 0;
- err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
+ bufcnt = state->bufcnt;
+ state->bufcnt = 0;
+ err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0);
+ if (err)
+ return err;
}
stm32_hash_append_sg(rctx);
if (final) {
- bufcnt = rctx->bufcnt;
- rctx->bufcnt = 0;
- err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 1);
-
- /* If we have an IRQ, wait for that, else poll for completion */
- if (hdev->polled) {
- if (stm32_hash_wait_busy(hdev))
- return -ETIMEDOUT;
- hdev->flags |= HASH_FLAGS_OUTPUT_READY;
- err = 0;
- }
+ bufcnt = state->bufcnt;
+ state->bufcnt = 0;
+ return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
}
+ if (!(hdev->flags & HASH_FLAGS_INIT))
+ return 0;
+
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+
+ if (!hdev->pdata->ux500)
+ *preg++ = stm32_hash_read(hdev, HASH_IMR);
+ *preg++ = stm32_hash_read(hdev, HASH_STR);
+ *preg++ = stm32_hash_read(hdev, HASH_CR);
+ for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
+
+ state->flags |= HASH_FLAGS_INIT;
+
return err;
}
@@ -584,10 +605,10 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ u32 *buffer = (void *)rctx->state.buffer;
struct scatterlist sg[1], *tsg;
int err = 0, len = 0, reg, ncp = 0;
unsigned int i;
- u32 *buffer = (void *)rctx->buffer;
rctx->sg = hdev->req->src;
rctx->total = hdev->req->nbytes;
@@ -615,7 +636,7 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
ncp = sg_pcopy_to_buffer(
rctx->sg, rctx->nents,
- rctx->buffer, sg->length - len,
+ rctx->state.buffer, sg->length - len,
rctx->total - sg->length + len);
sg->length = len;
@@ -726,47 +747,52 @@ static int stm32_hash_init(struct ahash_request *req)
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ struct stm32_hash_state *state = &rctx->state;
rctx->hdev = hdev;
- rctx->flags = HASH_FLAGS_CPU;
+ state->flags = HASH_FLAGS_CPU;
rctx->digcnt = crypto_ahash_digestsize(tfm);
switch (rctx->digcnt) {
case MD5_DIGEST_SIZE:
- rctx->flags |= HASH_FLAGS_MD5;
+ state->flags |= HASH_FLAGS_MD5;
break;
case SHA1_DIGEST_SIZE:
- rctx->flags |= HASH_FLAGS_SHA1;
+ state->flags |= HASH_FLAGS_SHA1;
break;
case SHA224_DIGEST_SIZE:
- rctx->flags |= HASH_FLAGS_SHA224;
+ state->flags |= HASH_FLAGS_SHA224;
break;
case SHA256_DIGEST_SIZE:
- rctx->flags |= HASH_FLAGS_SHA256;
+ state->flags |= HASH_FLAGS_SHA256;
break;
default:
return -EINVAL;
}
- rctx->bufcnt = 0;
- rctx->buflen = HASH_BUFLEN;
+ rctx->state.bufcnt = 0;
+ rctx->state.buflen = HASH_BUFLEN;
rctx->total = 0;
rctx->offset = 0;
rctx->data_type = HASH_DATA_8_BITS;
- memset(rctx->buffer, 0, HASH_BUFLEN);
-
if (ctx->flags & HASH_FLAGS_HMAC)
- rctx->flags |= HASH_FLAGS_HMAC;
+ state->flags |= HASH_FLAGS_HMAC;
- dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
+ dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags);
return 0;
}
static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct stm32_hash_state *state = &rctx->state;
+
+ if (!(state->flags & HASH_FLAGS_CPU))
+ return stm32_hash_dma_send(hdev);
+
return stm32_hash_update_cpu(hdev);
}
@@ -774,26 +800,15 @@ static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
{
struct ahash_request *req = hdev->req;
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- int err;
- int buflen = rctx->bufcnt;
+ struct stm32_hash_state *state = &rctx->state;
+ int buflen = state->bufcnt;
- rctx->bufcnt = 0;
+ if (state->flags & HASH_FLAGS_FINUP)
+ return stm32_hash_update_req(hdev);
- if (!(rctx->flags & HASH_FLAGS_CPU))
- err = stm32_hash_dma_send(hdev);
- else
- err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
+ state->bufcnt = 0;
- /* If we have an IRQ, wait for that, else poll for completion */
- if (hdev->polled) {
- if (stm32_hash_wait_busy(hdev))
- return -ETIMEDOUT;
- hdev->flags |= HASH_FLAGS_OUTPUT_READY;
- /* Caller will call stm32_hash_finish_req() */
- err = 0;
- }
-
- return err;
+ return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1);
}
static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
@@ -828,14 +843,15 @@ static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
static void stm32_hash_copy_hash(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_state *state = &rctx->state;
struct stm32_hash_dev *hdev = rctx->hdev;
__be32 *hash = (void *)rctx->digest;
unsigned int i, hashsize;
- if (hdev->pdata->broken_emptymsg && !req->nbytes)
+ if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY))
return stm32_hash_emptymsg_fallback(req);
- switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
+ switch (state->flags & HASH_FLAGS_ALGO_MASK) {
case HASH_FLAGS_MD5:
hashsize = MD5_DIGEST_SIZE;
break;
@@ -882,13 +898,6 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
stm32_hash_copy_hash(req);
err = stm32_hash_finish(req);
- hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
- HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
- HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
- HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
- HASH_FLAGS_HMAC_KEY);
- } else {
- rctx->flags |= HASH_FLAGS_ERRORS;
}
pm_runtime_mark_last_busy(hdev->dev);
@@ -897,73 +906,70 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
crypto_finalize_hash_request(hdev->engine, req, err);
}
-static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
- struct stm32_hash_request_ctx *rctx)
-{
- pm_runtime_get_sync(hdev->dev);
-
- if (!(HASH_FLAGS_INIT & hdev->flags)) {
- stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
- stm32_hash_write(hdev, HASH_STR, 0);
- stm32_hash_write(hdev, HASH_DIN, 0);
- stm32_hash_write(hdev, HASH_IMR, 0);
- hdev->err = 0;
- }
-
- return 0;
-}
-
-static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
-static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
-
static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
struct ahash_request *req)
{
return crypto_transfer_hash_request_to_engine(hdev->engine, req);
}
-static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
+static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = container_of(areq, struct ahash_request,
base);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- struct stm32_hash_request_ctx *rctx;
+ struct stm32_hash_state *state = &rctx->state;
+ int err = 0;
if (!hdev)
return -ENODEV;
- hdev->req = req;
-
- rctx = ahash_request_ctx(req);
-
dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
rctx->op, req->nbytes);
- return stm32_hash_hw_init(hdev, rctx);
-}
+ pm_runtime_get_sync(hdev->dev);
-static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
-{
- struct ahash_request *req = container_of(areq, struct ahash_request,
- base);
- struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- struct stm32_hash_request_ctx *rctx;
- int err = 0;
+ hdev->req = req;
+ hdev->flags = 0;
+
+ if (state->flags & HASH_FLAGS_INIT) {
+ u32 *preg = rctx->state.hw_context;
+ u32 reg;
+ int i;
+
+ if (!hdev->pdata->ux500)
+ stm32_hash_write(hdev, HASH_IMR, *preg++);
+ stm32_hash_write(hdev, HASH_STR, *preg++);
+ stm32_hash_write(hdev, HASH_CR, *preg);
+ reg = *preg++ | HASH_CR_INIT;
+ stm32_hash_write(hdev, HASH_CR, reg);
- if (!hdev)
- return -ENODEV;
+ for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ stm32_hash_write(hdev, HASH_CSR(i), *preg++);
- hdev->req = req;
+ hdev->flags |= HASH_FLAGS_INIT;
- rctx = ahash_request_ctx(req);
+ if (state->flags & HASH_FLAGS_HMAC)
+ hdev->flags |= HASH_FLAGS_HMAC |
+ HASH_FLAGS_HMAC_KEY;
+ }
if (rctx->op == HASH_OP_UPDATE)
err = stm32_hash_update_req(hdev);
else if (rctx->op == HASH_OP_FINAL)
err = stm32_hash_final_req(hdev);
+ /* If we have an IRQ, wait for that, else poll for completion */
+ if (err == -EINPROGRESS && hdev->polled) {
+ if (stm32_hash_wait_busy(hdev))
+ err = -ETIMEDOUT;
+ else {
+ hdev->flags |= HASH_FLAGS_OUTPUT_READY;
+ err = 0;
+ }
+ }
+
if (err != -EINPROGRESS)
/* done task will not finish it, so do it here */
stm32_hash_finish_req(req, err);
@@ -985,15 +991,16 @@ static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
static int stm32_hash_update(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_state *state = &rctx->state;
- if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
+ if (!req->nbytes || !(state->flags & HASH_FLAGS_CPU))
return 0;
rctx->total = req->nbytes;
rctx->sg = req->src;
rctx->offset = 0;
- if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
+ if ((state->bufcnt + rctx->total < state->buflen)) {
stm32_hash_append_sg(rctx);
return 0;
}
@@ -1004,8 +1011,9 @@ static int stm32_hash_update(struct ahash_request *req)
static int stm32_hash_final(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_state *state = &rctx->state;
- rctx->flags |= HASH_FLAGS_FINUP;
+ state->flags |= HASH_FLAGS_FINAL;
return stm32_hash_enqueue(req, HASH_OP_FINAL);
}
@@ -1015,25 +1023,21 @@ static int stm32_hash_finup(struct ahash_request *req)
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- int err1, err2;
+ struct stm32_hash_state *state = &rctx->state;
- rctx->flags |= HASH_FLAGS_FINUP;
+ if (!req->nbytes)
+ goto out;
- if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
- rctx->flags &= ~HASH_FLAGS_CPU;
-
- err1 = stm32_hash_update(req);
-
- if (err1 == -EINPROGRESS || err1 == -EBUSY)
- return err1;
+ state->flags |= HASH_FLAGS_FINUP;
+ rctx->total = req->nbytes;
+ rctx->sg = req->src;
+ rctx->offset = 0;
- /*
- * final() has to be always called to cleanup resources
- * even if update() failed, except EINPROGRESS
- */
- err2 = stm32_hash_final(req);
+ if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
+ state->flags &= ~HASH_FLAGS_CPU;
- return err1 ?: err2;
+out:
+ return stm32_hash_final(req);
}
static int stm32_hash_digest(struct ahash_request *req)
@@ -1044,35 +1048,8 @@ static int stm32_hash_digest(struct ahash_request *req)
static int stm32_hash_export(struct ahash_request *req, void *out)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- u32 *preg;
- unsigned int i;
- int ret;
- pm_runtime_get_sync(hdev->dev);
-
- ret = stm32_hash_wait_busy(hdev);
- if (ret)
- return ret;
-
- rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
- sizeof(u32),
- GFP_KERNEL);
-
- preg = rctx->hw_context;
-
- if (!hdev->pdata->ux500)
- *preg++ = stm32_hash_read(hdev, HASH_IMR);
- *preg++ = stm32_hash_read(hdev, HASH_STR);
- *preg++ = stm32_hash_read(hdev, HASH_CR);
- for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
- *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
-
- pm_runtime_mark_last_busy(hdev->dev);
- pm_runtime_put_autosuspend(hdev->dev);
-
- memcpy(out, rctx, sizeof(*rctx));
+ memcpy(out, &rctx->state, sizeof(rctx->state));
return 0;
}
@@ -1080,32 +1057,9 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
static int stm32_hash_import(struct ahash_request *req, const void *in)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- const u32 *preg = in;
- u32 reg;
- unsigned int i;
-
- memcpy(rctx, in, sizeof(*rctx));
-
- preg = rctx->hw_context;
-
- pm_runtime_get_sync(hdev->dev);
-
- if (!hdev->pdata->ux500)
- stm32_hash_write(hdev, HASH_IMR, *preg++);
- stm32_hash_write(hdev, HASH_STR, *preg++);
- stm32_hash_write(hdev, HASH_CR, *preg);
- reg = *preg++ | HASH_CR_INIT;
- stm32_hash_write(hdev, HASH_CR, reg);
-
- for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
- stm32_hash_write(hdev, HASH_CSR(i), *preg++);
-
- pm_runtime_mark_last_busy(hdev->dev);
- pm_runtime_put_autosuspend(hdev->dev);
- kfree(rctx->hw_context);
+ stm32_hash_init(req);
+ memcpy(&rctx->state, in, sizeof(rctx->state));
return 0;
}
@@ -1162,8 +1116,6 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
ctx->flags |= HASH_FLAGS_HMAC;
ctx->enginectx.op.do_one_request = stm32_hash_one_request;
- ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
- ctx->enginectx.op.unprepare_request = NULL;
return stm32_hash_init_fallback(tfm);
}
@@ -1255,7 +1207,7 @@ static struct ahash_alg algs_md5[] = {
.import = stm32_hash_import,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "md5",
.cra_driver_name = "stm32-md5",
@@ -1282,7 +1234,7 @@ static struct ahash_alg algs_md5[] = {
.setkey = stm32_hash_setkey,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "stm32-hmac-md5",
@@ -1311,7 +1263,7 @@ static struct ahash_alg algs_sha1[] = {
.import = stm32_hash_import,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "stm32-sha1",
@@ -1338,7 +1290,7 @@ static struct ahash_alg algs_sha1[] = {
.setkey = stm32_hash_setkey,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "stm32-hmac-sha1",
@@ -1367,7 +1319,7 @@ static struct ahash_alg algs_sha224[] = {
.import = stm32_hash_import,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "sha224",
.cra_driver_name = "stm32-sha224",
@@ -1394,7 +1346,7 @@ static struct ahash_alg algs_sha224[] = {
.import = stm32_hash_import,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "stm32-hmac-sha224",
@@ -1423,7 +1375,7 @@ static struct ahash_alg algs_sha256[] = {
.import = stm32_hash_import,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "stm32-sha256",
@@ -1450,7 +1402,7 @@ static struct ahash_alg algs_sha256[] = {
.setkey = stm32_hash_setkey,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "stm32-hmac-sha256",
@@ -1616,8 +1568,7 @@ static int stm32_hash_probe(struct platform_device *pdev)
if (!hdev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdev->io_base = devm_ioremap_resource(dev, res);
+ hdev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hdev->io_base))
return PTR_ERR(hdev->io_base);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 25eb4e8fd22f..4b4323bbf268 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -566,9 +566,12 @@ config I2C_DESIGNWARE_PLATFORM
config I2C_DESIGNWARE_AMDPSP
bool "AMD PSP I2C semaphore support"
- depends on X86_MSR
depends on ACPI
+ depends on CRYPTO_DEV_SP_PSP
+ depends on PCI
depends on I2C_DESIGNWARE_PLATFORM
+ depends on (I2C_DESIGNWARE_PLATFORM=y && CRYPTO_DEV_CCP_DD=y) || \
+ (I2C_DESIGNWARE_PLATFORM=m && CRYPTO_DEV_CCP_DD)
help
This driver enables managed host access to the selected I2C bus shared
between AMD CPU and AMD PSP.
diff --git a/drivers/i2c/busses/i2c-designware-amdpsp.c b/drivers/i2c/busses/i2c-designware-amdpsp.c
index 8f36167bce62..63454b06e5da 100644
--- a/drivers/i2c/busses/i2c-designware-amdpsp.c
+++ b/drivers/i2c/busses/i2c-designware-amdpsp.c
@@ -1,41 +1,21 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/bitfield.h>
-#include <linux/bits.h>
#include <linux/i2c.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/psp-sev.h>
-#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/psp-platform-access.h>
+#include <linux/psp.h>
#include <linux/workqueue.h>
-#include <asm/msr.h>
-
#include "i2c-designware-core.h"
-#define MSR_AMD_PSP_ADDR 0xc00110a2
-#define PSP_MBOX_OFFSET 0x10570
-#define PSP_CMD_TIMEOUT_US (500 * USEC_PER_MSEC)
-
#define PSP_I2C_RESERVATION_TIME_MS 100
-#define PSP_I2C_REQ_BUS_CMD 0x64
#define PSP_I2C_REQ_RETRY_CNT 400
#define PSP_I2C_REQ_RETRY_DELAY_US (25 * USEC_PER_MSEC)
#define PSP_I2C_REQ_STS_OK 0x0
#define PSP_I2C_REQ_STS_BUS_BUSY 0x1
#define PSP_I2C_REQ_STS_INV_PARAM 0x3
-#define PSP_MBOX_FIELDS_STS GENMASK(15, 0)
-#define PSP_MBOX_FIELDS_CMD GENMASK(23, 16)
-#define PSP_MBOX_FIELDS_RESERVED GENMASK(29, 24)
-#define PSP_MBOX_FIELDS_RECOVERY BIT(30)
-#define PSP_MBOX_FIELDS_READY BIT(31)
-
-struct psp_req_buffer_hdr {
- u32 total_size;
- u32 status;
-};
-
enum psp_i2c_req_type {
PSP_I2C_REQ_ACQUIRE,
PSP_I2C_REQ_RELEASE,
@@ -47,118 +27,13 @@ struct psp_i2c_req {
enum psp_i2c_req_type type;
};
-struct psp_mbox {
- u32 cmd_fields;
- u64 i2c_req_addr;
-} __packed;
-
static DEFINE_MUTEX(psp_i2c_access_mutex);
static unsigned long psp_i2c_sem_acquired;
-static void __iomem *mbox_iomem;
static u32 psp_i2c_access_count;
static bool psp_i2c_mbox_fail;
static struct device *psp_i2c_dev;
-/*
- * Implementation of PSP-x86 i2c-arbitration mailbox introduced for AMD Cezanne
- * family of SoCs.
- */
-
-static int psp_get_mbox_addr(unsigned long *mbox_addr)
-{
- unsigned long long psp_mmio;
-
- if (rdmsrl_safe(MSR_AMD_PSP_ADDR, &psp_mmio))
- return -EIO;
-
- *mbox_addr = (unsigned long)(psp_mmio + PSP_MBOX_OFFSET);
-
- return 0;
-}
-
-static int psp_mbox_probe(void)
-{
- unsigned long mbox_addr;
- int ret;
-
- ret = psp_get_mbox_addr(&mbox_addr);
- if (ret)
- return ret;
-
- mbox_iomem = ioremap(mbox_addr, sizeof(struct psp_mbox));
- if (!mbox_iomem)
- return -ENOMEM;
-
- return 0;
-}
-
-/* Recovery field should be equal 0 to start sending commands */
-static int psp_check_mbox_recovery(struct psp_mbox __iomem *mbox)
-{
- u32 tmp;
-
- tmp = readl(&mbox->cmd_fields);
-
- return FIELD_GET(PSP_MBOX_FIELDS_RECOVERY, tmp);
-}
-
-static int psp_wait_cmd(struct psp_mbox __iomem *mbox)
-{
- u32 tmp, expected;
-
- /* Expect mbox_cmd to be cleared and ready bit to be set by PSP */
- expected = FIELD_PREP(PSP_MBOX_FIELDS_READY, 1);
-
- /*
- * Check for readiness of PSP mailbox in a tight loop in order to
- * process further as soon as command was consumed.
- */
- return readl_poll_timeout(&mbox->cmd_fields, tmp, (tmp == expected),
- 0, PSP_CMD_TIMEOUT_US);
-}
-
-/* Status equal to 0 means that PSP succeed processing command */
-static u32 psp_check_mbox_sts(struct psp_mbox __iomem *mbox)
-{
- u32 cmd_reg;
-
- cmd_reg = readl(&mbox->cmd_fields);
-
- return FIELD_GET(PSP_MBOX_FIELDS_STS, cmd_reg);
-}
-
-static int psp_send_cmd(struct psp_i2c_req *req)
-{
- struct psp_mbox __iomem *mbox = mbox_iomem;
- phys_addr_t req_addr;
- u32 cmd_reg;
-
- if (psp_check_mbox_recovery(mbox))
- return -EIO;
-
- if (psp_wait_cmd(mbox))
- return -EBUSY;
-
- /*
- * Fill mailbox with address of command-response buffer, which will be
- * used for sending i2c requests as well as reading status returned by
- * PSP. Use physical address of buffer, since PSP will map this region.
- */
- req_addr = __psp_pa((void *)req);
- writeq(req_addr, &mbox->i2c_req_addr);
-
- /* Write command register to trigger processing */
- cmd_reg = FIELD_PREP(PSP_MBOX_FIELDS_CMD, PSP_I2C_REQ_BUS_CMD);
- writel(cmd_reg, &mbox->cmd_fields);
-
- if (psp_wait_cmd(mbox))
- return -ETIMEDOUT;
-
- if (psp_check_mbox_sts(mbox))
- return -EIO;
-
- return 0;
-}
+static int (*_psp_send_i2c_req)(struct psp_i2c_req *req);
/* Helper to verify status returned by PSP */
static int check_i2c_req_sts(struct psp_i2c_req *req)
@@ -179,22 +54,36 @@ static int check_i2c_req_sts(struct psp_i2c_req *req)
}
}
-static int psp_send_check_i2c_req(struct psp_i2c_req *req)
+/*
+ * Errors in x86-PSP i2c-arbitration protocol may occur at two levels:
+ * 1. mailbox communication - PSP is not operational or some IO errors with
+ * basic communication had happened.
+ * 2. i2c-requests - PSP refuses to grant i2c arbitration to x86 for too long.
+ *
+ * In order to distinguish between these in error handling code all mailbox
+ * communication errors on the first level (from CCP symbols) will be passed
+ * up and if -EIO is returned the second level will be checked.
+ */
+static int psp_send_i2c_req_cezanne(struct psp_i2c_req *req)
{
- /*
- * Errors in x86-PSP i2c-arbitration protocol may occur at two levels:
- * 1. mailbox communication - PSP is not operational or some IO errors
- * with basic communication had happened;
- * 2. i2c-requests - PSP refuses to grant i2c arbitration to x86 for too
- * long.
- * In order to distinguish between these two in error handling code, all
- * errors on the first level (returned by psp_send_cmd) are shadowed by
- * -EIO.
- */
- if (psp_send_cmd(req))
- return -EIO;
+ int ret;
+
+ ret = psp_send_platform_access_msg(PSP_I2C_REQ_BUS_CMD, (struct psp_request *)req);
+ if (ret == -EIO)
+ return check_i2c_req_sts(req);
- return check_i2c_req_sts(req);
+ return ret;
+}
+
+static int psp_send_i2c_req_doorbell(struct psp_i2c_req *req)
+{
+ int ret;
+
+ ret = psp_ring_platform_doorbell(req->type, &req->hdr.status);
+ if (ret == -EIO)
+ return check_i2c_req_sts(req);
+
+ return ret;
}
static int psp_send_i2c_req(enum psp_i2c_req_type i2c_req_type)
@@ -208,11 +97,11 @@ static int psp_send_i2c_req(enum psp_i2c_req_type i2c_req_type)
if (!req)
return -ENOMEM;
- req->hdr.total_size = sizeof(*req);
+ req->hdr.payload_size = sizeof(*req);
req->type = i2c_req_type;
start = jiffies;
- ret = read_poll_timeout(psp_send_check_i2c_req, status,
+ ret = read_poll_timeout(_psp_send_i2c_req, status,
(status != -EBUSY),
PSP_I2C_REQ_RETRY_DELAY_US,
PSP_I2C_REQ_RETRY_CNT * PSP_I2C_REQ_RETRY_DELAY_US,
@@ -387,7 +276,10 @@ static const struct i2c_lock_operations i2c_dw_psp_lock_ops = {
int i2c_dw_amdpsp_probe_lock_support(struct dw_i2c_dev *dev)
{
- int ret;
+ struct pci_dev *rdev;
+
+ if (!IS_REACHABLE(CONFIG_CRYPTO_DEV_CCP_DD))
+ return -ENODEV;
if (!dev)
return -ENODEV;
@@ -399,11 +291,18 @@ int i2c_dw_amdpsp_probe_lock_support(struct dw_i2c_dev *dev)
if (psp_i2c_dev)
return -EEXIST;
- psp_i2c_dev = dev->dev;
+ /* Cezanne uses platform mailbox, Mendocino and later use doorbell */
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (rdev->device == 0x1630)
+ _psp_send_i2c_req = psp_send_i2c_req_cezanne;
+ else
+ _psp_send_i2c_req = psp_send_i2c_req_doorbell;
+ pci_dev_put(rdev);
- ret = psp_mbox_probe();
- if (ret)
- return ret;
+ if (psp_check_platform_access_status())
+ return -EPROBE_DEFER;
+
+ psp_i2c_dev = dev->dev;
dev_info(psp_i2c_dev, "I2C bus managed by AMD PSP\n");
@@ -417,9 +316,3 @@ int i2c_dw_amdpsp_probe_lock_support(struct dw_i2c_dev *dev)
return 0;
}
-
-/* Unmap area used as a mailbox with PSP */
-void i2c_dw_amdpsp_remove_lock_support(struct dw_i2c_dev *dev)
-{
- iounmap(mbox_iomem);
-}
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 050d8c63ad3c..c5d87aae39c6 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -383,7 +383,6 @@ int i2c_dw_baytrail_probe_lock_support(struct dw_i2c_dev *dev);
#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_AMDPSP)
int i2c_dw_amdpsp_probe_lock_support(struct dw_i2c_dev *dev);
-void i2c_dw_amdpsp_remove_lock_support(struct dw_i2c_dev *dev);
#endif
int i2c_dw_validate_speed(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 74182db03a88..89ad88c54754 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -214,7 +214,6 @@ static const struct i2c_dw_semaphore_callbacks i2c_dw_semaphore_cb_table[] = {
#ifdef CONFIG_I2C_DESIGNWARE_AMDPSP
{
.probe = i2c_dw_amdpsp_probe_lock_support,
- .remove = i2c_dw_amdpsp_remove_lock_support,
},
#endif
{}
diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
index cec6e70f0ac9..e8cd9aaa3467 100644
--- a/drivers/tee/amdtee/call.c
+++ b/drivers/tee/amdtee/call.c
@@ -8,7 +8,7 @@
#include <linux/tee_drv.h>
#include <linux/psp-tee.h>
#include <linux/slab.h>
-#include <linux/psp-sev.h>
+#include <linux/psp.h>
#include "amdtee_if.h"
#include "amdtee_private.h"
diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c
index f87f96a291c9..f0303126f199 100644
--- a/drivers/tee/amdtee/shm_pool.c
+++ b/drivers/tee/amdtee/shm_pool.c
@@ -5,7 +5,7 @@
#include <linux/slab.h>
#include <linux/tee_drv.h>
-#include <linux/psp-sev.h>
+#include <linux/psp.h>
#include "amdtee_private.h"
static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index e4bc96528902..574cffc90730 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -8,6 +8,9 @@
*/
#ifndef _CRYPTO_ACOMP_H
#define _CRYPTO_ACOMP_H
+
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
@@ -53,37 +56,35 @@ struct crypto_acomp {
struct crypto_tfm base;
};
-/**
- * struct acomp_alg - asynchronous compression algorithm
- *
- * @compress: Function performs a compress operation
- * @decompress: Function performs a de-compress operation
- * @dst_free: Frees destination buffer if allocated inside the algorithm
- * @init: Initialize the cryptographic transformation object.
- * This function is used to initialize the cryptographic
- * transformation object. This function is called only once at
- * the instantiation time, right after the transformation context
- * was allocated. In case the cryptographic hardware has some
- * special requirements which need to be handled by software, this
- * function shall check for the precise requirement of the
- * transformation and put any software fallbacks in place.
- * @exit: Deinitialize the cryptographic transformation object. This is a
- * counterpart to @init, used to remove various changes set in
- * @init.
- *
- * @reqsize: Context size for (de)compression requests
- * @base: Common crypto API algorithm data structure
+/*
+ * struct crypto_istat_compress - statistics for compress algorithm
+ * @compress_cnt: number of compress requests
+ * @compress_tlen: total data size handled by compress requests
+ * @decompress_cnt: number of decompress requests
+ * @decompress_tlen: total data size handled by decompress requests
+ * @err_cnt: number of error for compress requests
*/
-struct acomp_alg {
- int (*compress)(struct acomp_req *req);
- int (*decompress)(struct acomp_req *req);
- void (*dst_free)(struct scatterlist *dst);
- int (*init)(struct crypto_acomp *tfm);
- void (*exit)(struct crypto_acomp *tfm);
- unsigned int reqsize;
- struct crypto_alg base;
+struct crypto_istat_compress {
+ atomic64_t compress_cnt;
+ atomic64_t compress_tlen;
+ atomic64_t decompress_cnt;
+ atomic64_t decompress_tlen;
+ atomic64_t err_cnt;
};
+#ifdef CONFIG_CRYPTO_STATS
+#define COMP_ALG_COMMON_STATS struct crypto_istat_compress stat;
+#else
+#define COMP_ALG_COMMON_STATS
+#endif
+
+#define COMP_ALG_COMMON { \
+ COMP_ALG_COMMON_STATS \
+ \
+ struct crypto_alg base; \
+}
+struct comp_alg_common COMP_ALG_COMMON;
+
/**
* DOC: Asynchronous Compression API
*
@@ -131,9 +132,10 @@ static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
return &tfm->base;
}
-static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
+static inline struct comp_alg_common *__crypto_comp_alg_common(
+ struct crypto_alg *alg)
{
- return container_of(alg, struct acomp_alg, base);
+ return container_of(alg, struct comp_alg_common, base);
}
static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
@@ -141,9 +143,10 @@ static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
return container_of(tfm, struct crypto_acomp, base);
}
-static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
+static inline struct comp_alg_common *crypto_comp_alg_common(
+ struct crypto_acomp *tfm)
{
- return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
+ return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg);
}
static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
@@ -219,7 +222,8 @@ static inline void acomp_request_set_callback(struct acomp_req *req,
{
req->base.complete = cmpl;
req->base.data = data;
- req->base.flags = flgs;
+ req->base.flags &= CRYPTO_ACOMP_ALLOC_OUTPUT;
+ req->base.flags |= flgs & ~CRYPTO_ACOMP_ALLOC_OUTPUT;
}
/**
@@ -246,10 +250,32 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->slen = slen;
req->dlen = dlen;
+ req->flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT;
if (!req->dst)
req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
}
+static inline struct crypto_istat_compress *comp_get_stat(
+ struct comp_alg_common *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
+static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&comp_get_stat(alg)->err_cnt);
+
+ return err;
+}
+
/**
* crypto_acomp_compress() -- Invoke asynchronous compress operation
*
@@ -262,14 +288,18 @@ static inline void acomp_request_set_params(struct acomp_req *req,
static inline int crypto_acomp_compress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int slen = req->slen;
- int ret;
-
- crypto_stats_get(alg);
- ret = tfm->compress(req);
- crypto_stats_compress(slen, ret, alg);
- return ret;
+ struct comp_alg_common *alg;
+
+ alg = crypto_comp_alg_common(tfm);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_compress *istat = comp_get_stat(alg);
+
+ atomic64_inc(&istat->compress_cnt);
+ atomic64_add(req->slen, &istat->compress_tlen);
+ }
+
+ return crypto_comp_errstat(alg, tfm->compress(req));
}
/**
@@ -284,14 +314,18 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
static inline int crypto_acomp_decompress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int slen = req->slen;
- int ret;
-
- crypto_stats_get(alg);
- ret = tfm->decompress(req);
- crypto_stats_decompress(slen, ret, alg);
- return ret;
+ struct comp_alg_common *alg;
+
+ alg = crypto_comp_alg_common(tfm);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_compress *istat = comp_get_stat(alg);
+
+ atomic64_inc(&istat->decompress_cnt);
+ atomic64_add(req->slen, &istat->decompress_tlen);
+ }
+
+ return crypto_comp_errstat(alg, tfm->decompress(req));
}
#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 4a2b7e6e0c1f..35e45b854a6f 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -8,6 +8,7 @@
#ifndef _CRYPTO_AEAD_H
#define _CRYPTO_AEAD_H
+#include <linux/atomic.h>
#include <linux/container_of.h>
#include <linux/crypto.h>
#include <linux/slab.h>
@@ -100,6 +101,22 @@ struct aead_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
+/*
+ * struct crypto_istat_aead - statistics for AEAD algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @err_cnt: number of error for AEAD requests
+ */
+struct crypto_istat_aead {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t err_cnt;
+};
+
/**
* struct aead_alg - AEAD cipher definition
* @maxauthsize: Set the maximum authentication tag size supported by the
@@ -118,6 +135,7 @@ struct aead_request {
* @setkey: see struct skcipher_alg
* @encrypt: see struct skcipher_alg
* @decrypt: see struct skcipher_alg
+ * @stat: statistics for AEAD algorithm
* @ivsize: see struct skcipher_alg
* @chunksize: see struct skcipher_alg
* @init: Initialize the cryptographic transformation object. This function
@@ -144,6 +162,10 @@ struct aead_alg {
int (*init)(struct crypto_aead *tfm);
void (*exit)(struct crypto_aead *tfm);
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_istat_aead stat;
+#endif
+
unsigned int ivsize;
unsigned int maxauthsize;
unsigned int chunksize;
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 734c213918bd..f35fd653e4e5 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -7,6 +7,8 @@
*/
#ifndef _CRYPTO_AKCIPHER_H
#define _CRYPTO_AKCIPHER_H
+
+#include <linux/atomic.h>
#include <linux/crypto.h>
/**
@@ -52,6 +54,26 @@ struct crypto_akcipher {
struct crypto_tfm base;
};
+/*
+ * struct crypto_istat_akcipher - statistics for akcipher algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @verify_cnt: number of verify operation
+ * @sign_cnt: number of sign requests
+ * @err_cnt: number of error for akcipher requests
+ */
+struct crypto_istat_akcipher {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t verify_cnt;
+ atomic64_t sign_cnt;
+ atomic64_t err_cnt;
+};
+
/**
* struct akcipher_alg - generic public key algorithm
*
@@ -88,6 +110,7 @@ struct crypto_akcipher {
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
+ * @stat: Statistics for akcipher algorithm
*
* @base: Common crypto API algorithm data structure
*/
@@ -104,6 +127,10 @@ struct akcipher_alg {
int (*init)(struct crypto_akcipher *tfm);
void (*exit)(struct crypto_akcipher *tfm);
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_istat_akcipher stat;
+#endif
+
struct crypto_alg base;
};
@@ -275,6 +302,27 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
return alg->max_size(tfm);
}
+static inline struct crypto_istat_akcipher *akcipher_get_stat(
+ struct akcipher_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
+static inline int crypto_akcipher_errstat(struct akcipher_alg *alg, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&akcipher_get_stat(alg)->err_cnt);
+
+ return err;
+}
+
/**
* crypto_akcipher_encrypt() - Invoke public key encrypt operation
*
@@ -289,14 +337,15 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- unsigned int src_len = req->src_len;
- int ret;
-
- crypto_stats_get(calg);
- ret = alg->encrypt(req);
- crypto_stats_akcipher_encrypt(src_len, ret, calg);
- return ret;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
+
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(req->src_len, &istat->encrypt_tlen);
+ }
+
+ return crypto_akcipher_errstat(alg, alg->encrypt(req));
}
/**
@@ -313,14 +362,15 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- unsigned int src_len = req->src_len;
- int ret;
-
- crypto_stats_get(calg);
- ret = alg->decrypt(req);
- crypto_stats_akcipher_decrypt(src_len, ret, calg);
- return ret;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
+
+ atomic64_inc(&istat->decrypt_cnt);
+ atomic64_add(req->src_len, &istat->decrypt_tlen);
+ }
+
+ return crypto_akcipher_errstat(alg, alg->decrypt(req));
}
/**
@@ -337,13 +387,11 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->sign(req);
- crypto_stats_akcipher_sign(ret, calg);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&akcipher_get_stat(alg)->sign_cnt);
+
+ return crypto_akcipher_errstat(alg, alg->sign(req));
}
/**
@@ -364,13 +412,11 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->verify(req);
- crypto_stats_akcipher_verify(ret, calg);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&akcipher_get_stat(alg)->verify_cnt);
+
+ return crypto_akcipher_errstat(alg, alg->verify(req));
}
/**
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index fede394ae2ab..016d5a302b84 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -7,15 +7,12 @@
#ifndef _CRYPTO_ALGAPI_H
#define _CRYPTO_ALGAPI_H
+#include <crypto/utils.h>
#include <linux/align.h>
#include <linux/cache.h>
#include <linux/crypto.h>
-#include <linux/kconfig.h>
-#include <linux/list.h>
#include <linux/types.h>
-#include <asm/unaligned.h>
-
/*
* Maximum values for blocksize and alignmask, used to allocate
* static buffers that are big enough for any combination of
@@ -34,11 +31,25 @@
#define CRYPTO_DMA_PADDING ((CRYPTO_DMA_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
+/*
+ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
+ * arbitrary modules to be loaded. Loading from userspace may still need the
+ * unprefixed names, so retains those aliases as well.
+ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
+ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
+ * expands twice on the same line. Instead, use a separate base name for the
+ * alias.
+ */
+#define MODULE_ALIAS_CRYPTO(name) \
+ __MODULE_INFO(alias, alias_userspace, name); \
+ __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+
struct crypto_aead;
struct crypto_instance;
struct module;
struct notifier_block;
struct rtattr;
+struct scatterlist;
struct seq_file;
struct sk_buff;
@@ -50,6 +61,9 @@ struct crypto_type {
void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
void (*free)(struct crypto_instance *inst);
+#ifdef CONFIG_CRYPTO_STATS
+ int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg);
+#endif
unsigned int type;
unsigned int maskclear;
@@ -119,6 +133,14 @@ struct crypto_attr_type {
u32 mask;
};
+/*
+ * Algorithm registration interface.
+ */
+int crypto_register_alg(struct crypto_alg *alg);
+void crypto_unregister_alg(struct crypto_alg *alg);
+int crypto_register_algs(struct crypto_alg *algs, int count);
+void crypto_unregister_algs(struct crypto_alg *algs, int count);
+
void crypto_mod_put(struct crypto_alg *alg);
int crypto_register_template(struct crypto_template *tmpl);
@@ -156,47 +178,6 @@ static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
}
void crypto_inc(u8 *a, unsigned int size);
-void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
-
-static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
-{
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
- __builtin_constant_p(size) &&
- (size % sizeof(unsigned long)) == 0) {
- unsigned long *d = (unsigned long *)dst;
- unsigned long *s = (unsigned long *)src;
- unsigned long l;
-
- while (size > 0) {
- l = get_unaligned(d) ^ get_unaligned(s++);
- put_unaligned(l, d++);
- size -= sizeof(unsigned long);
- }
- } else {
- __crypto_xor(dst, dst, src, size);
- }
-}
-
-static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
- unsigned int size)
-{
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
- __builtin_constant_p(size) &&
- (size % sizeof(unsigned long)) == 0) {
- unsigned long *d = (unsigned long *)dst;
- unsigned long *s1 = (unsigned long *)src1;
- unsigned long *s2 = (unsigned long *)src2;
- unsigned long l;
-
- while (size > 0) {
- l = get_unaligned(s1++) ^ get_unaligned(s2++);
- put_unaligned(l, d++);
- size -= sizeof(unsigned long);
- }
- } else {
- __crypto_xor(dst, src1, src2, size);
- }
-}
static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
{
@@ -275,23 +256,6 @@ static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
}
-noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
-
-/**
- * crypto_memneq - Compare two areas of memory without leaking
- * timing information.
- *
- * @a: One area of memory
- * @b: Another area of memory
- * @size: The size of the area.
- *
- * Returns 0 when data is equal, 1 otherwise.
- */
-static inline int crypto_memneq(const void *a, const void *b, size_t size)
-{
- return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
-}
-
int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb);
@@ -308,4 +272,9 @@ static inline void crypto_request_complete(struct crypto_async_request *req,
req->complete(req->data, err);
}
+static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
+}
+
#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index f5841992dc9b..e69542d86a2b 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -8,6 +8,7 @@
#ifndef _CRYPTO_HASH_H
#define _CRYPTO_HASH_H
+#include <linux/atomic.h>
#include <linux/crypto.h>
#include <linux/string.h>
@@ -22,8 +23,27 @@ struct crypto_ahash;
* crypto_unregister_shash().
*/
-/**
+/*
+ * struct crypto_istat_hash - statistics for has algorithm
+ * @hash_cnt: number of hash requests
+ * @hash_tlen: total data size hashed
+ * @err_cnt: number of error for hash requests
+ */
+struct crypto_istat_hash {
+ atomic64_t hash_cnt;
+ atomic64_t hash_tlen;
+ atomic64_t err_cnt;
+};
+
+#ifdef CONFIG_CRYPTO_STATS
+#define HASH_ALG_COMMON_STAT struct crypto_istat_hash stat;
+#else
+#define HASH_ALG_COMMON_STAT
+#endif
+
+/*
* struct hash_alg_common - define properties of message digest
+ * @stat: Statistics for hash algorithm.
* @digestsize: Size of the result of the transformation. A buffer of this size
* must be available to the @final and @finup calls, so they can
* store the resulting hash into it. For various predefined sizes,
@@ -39,12 +59,15 @@ struct crypto_ahash;
* The hash_alg_common data structure now adds the hash-specific
* information.
*/
-struct hash_alg_common {
- unsigned int digestsize;
- unsigned int statesize;
-
- struct crypto_alg base;
-};
+#define HASH_ALG_COMMON { \
+ HASH_ALG_COMMON_STAT \
+ \
+ unsigned int digestsize; \
+ unsigned int statesize; \
+ \
+ struct crypto_alg base; \
+}
+struct hash_alg_common HASH_ALG_COMMON;
struct ahash_request {
struct crypto_async_request base;
@@ -129,6 +152,7 @@ struct ahash_request {
* @exit_tfm: Deinitialize the cryptographic transformation object.
* This is a counterpart to @init_tfm, used to remove
* various changes set in @init_tfm.
+ * @clone_tfm: Copy transform into new object, may allocate memory.
* @halg: see struct hash_alg_common
*/
struct ahash_alg {
@@ -143,6 +167,7 @@ struct ahash_alg {
unsigned int keylen);
int (*init_tfm)(struct crypto_ahash *tfm);
void (*exit_tfm)(struct crypto_ahash *tfm);
+ int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src);
struct hash_alg_common halg;
};
@@ -160,8 +185,6 @@ struct shash_desc {
*/
#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360)
-#define HASH_MAX_STATESIZE 512
-
#define SHASH_DESC_ON_STACK(shash, ctx) \
char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
__aligned(__alignof__(struct shash_desc)); \
@@ -188,12 +211,16 @@ struct shash_desc {
* @exit_tfm: Deinitialize the cryptographic transformation object.
* This is a counterpart to @init_tfm, used to remove
* various changes set in @init_tfm.
+ * @clone_tfm: Copy transform into new object, may allocate memory.
* @digestsize: see struct ahash_alg
* @statesize: see struct ahash_alg
* @descsize: Size of the operational state for the message digest. This state
* size is the memory size that needs to be allocated for
* shash_desc.__ctx
+ * @stat: Statistics for hash algorithm.
* @base: internally used
+ * @halg: see struct hash_alg_common
+ * @HASH_ALG_COMMON: see struct hash_alg_common
*/
struct shash_alg {
int (*init)(struct shash_desc *desc);
@@ -210,16 +237,17 @@ struct shash_alg {
unsigned int keylen);
int (*init_tfm)(struct crypto_shash *tfm);
void (*exit_tfm)(struct crypto_shash *tfm);
+ int (*clone_tfm)(struct crypto_shash *dst, struct crypto_shash *src);
unsigned int descsize;
- /* These fields must match hash_alg_common. */
- unsigned int digestsize
- __attribute__ ((aligned(__alignof__(struct hash_alg_common))));
- unsigned int statesize;
-
- struct crypto_alg base;
+ union {
+ struct HASH_ALG_COMMON;
+ struct hash_alg_common halg;
+ };
};
+#undef HASH_ALG_COMMON
+#undef HASH_ALG_COMMON_STAT
struct crypto_ahash {
int (*init)(struct ahash_request *req);
@@ -273,6 +301,8 @@ static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
u32 mask);
+struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *tfm);
+
static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
{
return &tfm->base;
@@ -535,6 +565,27 @@ static inline int crypto_ahash_init(struct ahash_request *req)
return tfm->init(req);
}
+static inline struct crypto_istat_hash *hash_get_stat(
+ struct hash_alg_common *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
+static inline int crypto_hash_errstat(struct hash_alg_common *alg, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&hash_get_stat(alg)->err_cnt);
+
+ return err;
+}
+
/**
* crypto_ahash_update() - add data to message digest for processing
* @req: ahash_request handle that was previously initialized with the
@@ -549,14 +600,12 @@ static inline int crypto_ahash_init(struct ahash_request *req)
static inline int crypto_ahash_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
+ struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_add(req->nbytes, &hash_get_stat(alg)->hash_tlen);
- crypto_stats_get(alg);
- ret = crypto_ahash_reqtfm(req)->update(req);
- crypto_stats_ahash_update(nbytes, ret, alg);
- return ret;
+ return crypto_hash_errstat(alg, tfm->update(req));
}
/**
@@ -718,6 +767,8 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
u32 mask);
+struct crypto_shash *crypto_clone_shash(struct crypto_shash *tfm);
+
int crypto_has_shash(const char *alg_name, u32 type, u32 mask);
static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 978b57a3f4f0..4ac46bafba9d 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -12,6 +12,44 @@
#include <crypto/acompress.h>
#include <crypto/algapi.h>
+/**
+ * struct acomp_alg - asynchronous compression algorithm
+ *
+ * @compress: Function performs a compress operation
+ * @decompress: Function performs a de-compress operation
+ * @dst_free: Frees destination buffer if allocated inside the algorithm
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * @reqsize: Context size for (de)compression requests
+ * @stat: Statistics for compress algorithm
+ * @base: Common crypto API algorithm data structure
+ * @calg: Cmonn algorithm data structure shared with scomp
+ */
+struct acomp_alg {
+ int (*compress)(struct acomp_req *req);
+ int (*decompress)(struct acomp_req *req);
+ void (*dst_free)(struct scatterlist *dst);
+ int (*init)(struct crypto_acomp *tfm);
+ void (*exit)(struct crypto_acomp *tfm);
+
+ unsigned int reqsize;
+
+ union {
+ struct COMP_ALG_COMMON;
+ struct comp_alg_common calg;
+ };
+};
+
/*
* Transform internal helpers.
*/
@@ -31,11 +69,6 @@ static inline void acomp_request_complete(struct acomp_req *req,
crypto_request_complete(&req->base, err);
}
-static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
-{
- return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
-}
-
static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
{
struct acomp_req *req;
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 0b259dbb97af..37edf3f4e8af 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -133,8 +133,6 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
-int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
-
static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
{
return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index 252cc949d4ee..858fe3965ae3 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -9,10 +9,13 @@
#ifndef _CRYPTO_SCOMP_INT_H
#define _CRYPTO_SCOMP_INT_H
+#include <crypto/acompress.h>
#include <crypto/algapi.h>
#define SCOMP_SCRATCH_SIZE 131072
+struct acomp_req;
+
struct crypto_scomp {
struct crypto_tfm base;
};
@@ -24,7 +27,9 @@ struct crypto_scomp {
* @free_ctx: Function frees context allocated with alloc_ctx
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
+ * @stat: Statistics for compress algorithm
* @base: Common crypto API algorithm data structure
+ * @calg: Cmonn algorithm data structure shared with acomp
*/
struct scomp_alg {
void *(*alloc_ctx)(struct crypto_scomp *tfm);
@@ -35,7 +40,11 @@ struct scomp_alg {
int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
- struct crypto_alg base;
+
+ union {
+ struct COMP_ALG_COMMON;
+ struct comp_alg_common calg;
+ };
};
static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
@@ -90,10 +99,6 @@ static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
ctx);
}
-int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
-struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
-void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
-
/**
* crypto_register_scomp() -- Register synchronous compression algorithm
*
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 33ff32878802..1988e24a0d1d 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -8,7 +8,11 @@
#ifndef _CRYPTO_KPP_
#define _CRYPTO_KPP_
+
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
+#include <linux/slab.h>
/**
* struct kpp_request
@@ -47,6 +51,20 @@ struct crypto_kpp {
struct crypto_tfm base;
};
+/*
+ * struct crypto_istat_kpp - statistics for KPP algorithm
+ * @setsecret_cnt: number of setsecrey operation
+ * @generate_public_key_cnt: number of generate_public_key operation
+ * @compute_shared_secret_cnt: number of compute_shared_secret operation
+ * @err_cnt: number of error for KPP requests
+ */
+struct crypto_istat_kpp {
+ atomic64_t setsecret_cnt;
+ atomic64_t generate_public_key_cnt;
+ atomic64_t compute_shared_secret_cnt;
+ atomic64_t err_cnt;
+};
+
/**
* struct kpp_alg - generic key-agreement protocol primitives
*
@@ -69,6 +87,7 @@ struct crypto_kpp {
* @exit: Undo everything @init did.
*
* @base: Common crypto API algorithm data structure
+ * @stat: Statistics for KPP algorithm
*/
struct kpp_alg {
int (*set_secret)(struct crypto_kpp *tfm, const void *buffer,
@@ -81,6 +100,10 @@ struct kpp_alg {
int (*init)(struct crypto_kpp *tfm);
void (*exit)(struct crypto_kpp *tfm);
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_istat_kpp stat;
+#endif
+
struct crypto_alg base;
};
@@ -268,6 +291,26 @@ struct kpp_secret {
unsigned short len;
};
+static inline struct crypto_istat_kpp *kpp_get_stat(struct kpp_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
+static inline int crypto_kpp_errstat(struct kpp_alg *alg, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&kpp_get_stat(alg)->err_cnt);
+
+ return err;
+}
+
/**
* crypto_kpp_set_secret() - Invoke kpp operation
*
@@ -287,13 +330,11 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
const void *buffer, unsigned int len)
{
struct kpp_alg *alg = crypto_kpp_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->set_secret(tfm, buffer, len);
- crypto_stats_kpp_set_secret(calg, ret);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&kpp_get_stat(alg)->setsecret_cnt);
+
+ return crypto_kpp_errstat(alg, alg->set_secret(tfm, buffer, len));
}
/**
@@ -313,13 +354,11 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->generate_public_key(req);
- crypto_stats_kpp_generate_public_key(calg, ret);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&kpp_get_stat(alg)->generate_public_key_cnt);
+
+ return crypto_kpp_errstat(alg, alg->generate_public_key(req));
}
/**
@@ -336,13 +375,11 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->compute_shared_secret(req);
- crypto_stats_kpp_compute_shared_secret(calg, ret);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&kpp_get_stat(alg)->compute_shared_secret_cnt);
+
+ return crypto_kpp_errstat(alg, alg->compute_shared_secret(req));
}
/**
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index 17bb3673d3c1..6abe5102e5fb 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -9,10 +9,26 @@
#ifndef _CRYPTO_RNG_H
#define _CRYPTO_RNG_H
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
struct crypto_rng;
+/*
+ * struct crypto_istat_rng: statistics for RNG algorithm
+ * @generate_cnt: number of RNG generate requests
+ * @generate_tlen: total data size of generated data by the RNG
+ * @seed_cnt: number of times the RNG was seeded
+ * @err_cnt: number of error for RNG requests
+ */
+struct crypto_istat_rng {
+ atomic64_t generate_cnt;
+ atomic64_t generate_tlen;
+ atomic64_t seed_cnt;
+ atomic64_t err_cnt;
+};
+
/**
* struct rng_alg - random number generator definition
*
@@ -30,6 +46,7 @@ struct crypto_rng;
* size of the seed is defined with @seedsize .
* @set_ent: Set entropy that would otherwise be obtained from
* entropy source. Internal use only.
+ * @stat: Statistics for rng algorithm
* @seedsize: The seed size required for a random number generator
* initialization defined with this variable. Some
* random number generators does not require a seed
@@ -46,6 +63,10 @@ struct rng_alg {
void (*set_ent)(struct crypto_rng *tfm, const u8 *data,
unsigned int len);
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_istat_rng stat;
+#endif
+
unsigned int seedsize;
struct crypto_alg base;
@@ -94,6 +115,11 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
return &tfm->base;
}
+static inline struct rng_alg *__crypto_rng_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct rng_alg, base);
+}
+
/**
* crypto_rng_alg - obtain name of RNG
* @tfm: cipher handle
@@ -104,8 +130,7 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
*/
static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
{
- return container_of(crypto_rng_tfm(tfm)->__crt_alg,
- struct rng_alg, base);
+ return __crypto_rng_alg(crypto_rng_tfm(tfm)->__crt_alg);
}
/**
@@ -119,6 +144,26 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
}
+static inline struct crypto_istat_rng *rng_get_stat(struct rng_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
+static inline int crypto_rng_errstat(struct rng_alg *alg, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&rng_get_stat(alg)->err_cnt);
+
+ return err;
+}
+
/**
* crypto_rng_generate() - get random number
* @tfm: cipher handle
@@ -137,13 +182,17 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
- struct crypto_alg *alg = tfm->base.__crt_alg;
- int ret;
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_rng *istat = rng_get_stat(alg);
+
+ atomic64_inc(&istat->generate_cnt);
+ atomic64_add(dlen, &istat->generate_tlen);
+ }
- crypto_stats_get(alg);
- ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
- crypto_stats_rng_generate(alg, dlen, ret);
- return ret;
+ return crypto_rng_errstat(alg,
+ alg->generate(tfm, src, slen, dst, dlen));
}
/**
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 39f5b67c3069..080d1ba3611d 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -8,6 +8,7 @@
#ifndef _CRYPTO_SKCIPHER_H
#define _CRYPTO_SKCIPHER_H
+#include <linux/atomic.h>
#include <linux/container_of.h>
#include <linux/crypto.h>
#include <linux/slab.h>
@@ -48,6 +49,22 @@ struct crypto_sync_skcipher {
struct crypto_skcipher base;
};
+/*
+ * struct crypto_istat_cipher - statistics for cipher algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @err_cnt: number of error for cipher requests
+ */
+struct crypto_istat_cipher {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t err_cnt;
+};
+
/**
* struct skcipher_alg - symmetric key cipher definition
* @min_keysize: Minimum key size supported by the transformation. This is the
@@ -101,6 +118,7 @@ struct crypto_sync_skcipher {
* @walksize: Equal to the chunk size except in cases where the algorithm is
* considerably more efficient if it can operate on multiple chunks
* in parallel. Should be a multiple of chunksize.
+ * @stat: Statistics for cipher algorithm
* @base: Definition of a generic crypto algorithm.
*
* All fields except @ivsize are mandatory and must be filled.
@@ -119,6 +137,10 @@ struct skcipher_alg {
unsigned int chunksize;
unsigned int walksize;
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_istat_cipher stat;
+#endif
+
struct crypto_alg base;
};
diff --git a/include/crypto/utils.h b/include/crypto/utils.h
new file mode 100644
index 000000000000..acbb917a00c6
--- /dev/null
+++ b/include/crypto/utils.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic utilities
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_UTILS_H
+#define _CRYPTO_UTILS_H
+
+#include <asm/unaligned.h>
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
+
+static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+ __builtin_constant_p(size) &&
+ (size % sizeof(unsigned long)) == 0) {
+ unsigned long *d = (unsigned long *)dst;
+ unsigned long *s = (unsigned long *)src;
+ unsigned long l;
+
+ while (size > 0) {
+ l = get_unaligned(d) ^ get_unaligned(s++);
+ put_unaligned(l, d++);
+ size -= sizeof(unsigned long);
+ }
+ } else {
+ __crypto_xor(dst, dst, src, size);
+ }
+}
+
+static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
+ unsigned int size)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+ __builtin_constant_p(size) &&
+ (size % sizeof(unsigned long)) == 0) {
+ unsigned long *d = (unsigned long *)dst;
+ unsigned long *s1 = (unsigned long *)src1;
+ unsigned long *s2 = (unsigned long *)src2;
+ unsigned long l;
+
+ while (size > 0) {
+ l = get_unaligned(s1++) ^ get_unaligned(s2++);
+ put_unaligned(l, d++);
+ size -= sizeof(unsigned long);
+ }
+ } else {
+ __crypto_xor(dst, src1, src2, size);
+ }
+}
+
+noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
+
+/**
+ * crypto_memneq - Compare two areas of memory without leaking
+ * timing information.
+ *
+ * @a: One area of memory
+ * @b: Another area of memory
+ * @size: The size of the area.
+ *
+ * Returns 0 when data is equal, 1 otherwise.
+ */
+static inline int crypto_memneq(const void *a, const void *b, size_t size)
+{
+ return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
+}
+
+#endif /* _CRYPTO_UTILS_H */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index bb1d9b0e1647..fa310ac1db59 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -12,26 +12,10 @@
#ifndef _LINUX_CRYPTO_H
#define _LINUX_CRYPTO_H
-#include <linux/atomic.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/bug.h>
+#include <linux/completion.h>
#include <linux/refcount.h>
#include <linux/slab.h>
-#include <linux/completion.h>
-
-/*
- * Autoloaded crypto modules should only use a prefixed name to avoid allowing
- * arbitrary modules to be loaded. Loading from userspace may still need the
- * unprefixed names, so retains those aliases as well.
- * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
- * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
- * expands twice on the same line. Instead, use a separate base name for the
- * alias.
- */
-#define MODULE_ALIAS_CRYPTO(name) \
- __MODULE_INFO(alias, alias_userspace, name); \
- __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+#include <linux/types.h>
/*
* Algorithm masks and types.
@@ -171,10 +155,9 @@
#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
-struct scatterlist;
-struct crypto_async_request;
struct crypto_tfm;
struct crypto_type;
+struct module;
typedef void (*crypto_completion_t)(void *req, int err);
@@ -275,116 +258,6 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
-#ifdef CONFIG_CRYPTO_STATS
-/*
- * struct crypto_istat_aead - statistics for AEAD algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @err_cnt: number of error for AEAD requests
- */
-struct crypto_istat_aead {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_akcipher - statistics for akcipher algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @verify_cnt: number of verify operation
- * @sign_cnt: number of sign requests
- * @err_cnt: number of error for akcipher requests
- */
-struct crypto_istat_akcipher {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t verify_cnt;
- atomic64_t sign_cnt;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_cipher - statistics for cipher algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @err_cnt: number of error for cipher requests
- */
-struct crypto_istat_cipher {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_compress - statistics for compress algorithm
- * @compress_cnt: number of compress requests
- * @compress_tlen: total data size handled by compress requests
- * @decompress_cnt: number of decompress requests
- * @decompress_tlen: total data size handled by decompress requests
- * @err_cnt: number of error for compress requests
- */
-struct crypto_istat_compress {
- atomic64_t compress_cnt;
- atomic64_t compress_tlen;
- atomic64_t decompress_cnt;
- atomic64_t decompress_tlen;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_hash - statistics for has algorithm
- * @hash_cnt: number of hash requests
- * @hash_tlen: total data size hashed
- * @err_cnt: number of error for hash requests
- */
-struct crypto_istat_hash {
- atomic64_t hash_cnt;
- atomic64_t hash_tlen;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_kpp - statistics for KPP algorithm
- * @setsecret_cnt: number of setsecrey operation
- * @generate_public_key_cnt: number of generate_public_key operation
- * @compute_shared_secret_cnt: number of compute_shared_secret operation
- * @err_cnt: number of error for KPP requests
- */
-struct crypto_istat_kpp {
- atomic64_t setsecret_cnt;
- atomic64_t generate_public_key_cnt;
- atomic64_t compute_shared_secret_cnt;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_rng: statistics for RNG algorithm
- * @generate_cnt: number of RNG generate requests
- * @generate_tlen: total data size of generated data by the RNG
- * @seed_cnt: number of times the RNG was seeded
- * @err_cnt: number of error for RNG requests
- */
-struct crypto_istat_rng {
- atomic64_t generate_cnt;
- atomic64_t generate_tlen;
- atomic64_t seed_cnt;
- atomic64_t err_cnt;
-};
-#endif /* CONFIG_CRYPTO_STATS */
-
#define cra_cipher cra_u.cipher
#define cra_compress cra_u.compress
@@ -462,15 +335,6 @@ struct crypto_istat_rng {
* @cra_refcnt: internally used
* @cra_destroy: internally used
*
- * @stats: union of all possible crypto_istat_xxx structures
- * @stats.aead: statistics for AEAD algorithm
- * @stats.akcipher: statistics for akcipher algorithm
- * @stats.cipher: statistics for cipher algorithm
- * @stats.compress: statistics for compress algorithm
- * @stats.hash: statistics for hash algorithm
- * @stats.rng: statistics for rng algorithm
- * @stats.kpp: statistics for KPP algorithm
- *
* The struct crypto_alg describes a generic Crypto API algorithm and is common
* for all of the transformations. Any variable not documented here shall not
* be used by a cipher implementation as it is internal to the Crypto API.
@@ -502,81 +366,8 @@ struct crypto_alg {
void (*cra_destroy)(struct crypto_alg *alg);
struct module *cra_module;
-
-#ifdef CONFIG_CRYPTO_STATS
- union {
- struct crypto_istat_aead aead;
- struct crypto_istat_akcipher akcipher;
- struct crypto_istat_cipher cipher;
- struct crypto_istat_compress compress;
- struct crypto_istat_hash hash;
- struct crypto_istat_rng rng;
- struct crypto_istat_kpp kpp;
- } stats;
-#endif /* CONFIG_CRYPTO_STATS */
-
} CRYPTO_MINALIGN_ATTR;
-#ifdef CONFIG_CRYPTO_STATS
-void crypto_stats_init(struct crypto_alg *alg);
-void crypto_stats_get(struct crypto_alg *alg);
-void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
-void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
-void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg);
-void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg);
-void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg);
-void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg);
-void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret);
-void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret);
-void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret);
-void crypto_stats_rng_seed(struct crypto_alg *alg, int ret);
-void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret);
-void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
-void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
-#else
-static inline void crypto_stats_init(struct crypto_alg *alg)
-{}
-static inline void crypto_stats_get(struct crypto_alg *alg)
-{}
-static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret)
-{}
-static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
-{}
-#endif
/*
* A helper struct for waiting for completion of async crypto ops
*/
@@ -617,14 +408,6 @@ static inline void crypto_init_wait(struct crypto_wait *wait)
}
/*
- * Algorithm registration interface.
- */
-int crypto_register_alg(struct crypto_alg *alg);
-void crypto_unregister_alg(struct crypto_alg *alg);
-int crypto_register_algs(struct crypto_alg *algs, int count);
-void crypto_unregister_algs(struct crypto_alg *algs, int count);
-
-/*
* Algorithm query interface.
*/
int crypto_has_alg(const char *name, u32 type, u32 mask);
@@ -636,6 +419,7 @@ int crypto_has_alg(const char *name, u32 type, u32 mask);
*/
struct crypto_tfm {
+ refcount_t refcnt;
u32 crt_flags;
@@ -664,8 +448,6 @@ static inline void crypto_free_tfm(struct crypto_tfm *tfm)
return crypto_destroy_tfm(tfm, tfm);
}
-int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
-
/*
* Transform helpers which query the underlying algorithm.
*/
@@ -679,16 +461,6 @@ static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_driver_name;
}
-static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_priority;
-}
-
-static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
-}
-
static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_blocksize;
diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h
new file mode 100644
index 000000000000..75da8f5f7ad8
--- /dev/null
+++ b/include/linux/psp-platform-access.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PSP_PLATFORM_ACCESS_H
+#define __PSP_PLATFORM_ACCESS_H
+
+#include <linux/psp.h>
+
+enum psp_platform_access_msg {
+ PSP_CMD_NONE = 0x0,
+ PSP_I2C_REQ_BUS_CMD = 0x64,
+};
+
+struct psp_req_buffer_hdr {
+ u32 payload_size;
+ u32 status;
+} __packed;
+
+struct psp_request {
+ struct psp_req_buffer_hdr header;
+ void *buf;
+} __packed;
+
+/**
+ * psp_send_platform_access_msg() - Send a message to control platform features
+ *
+ * This function is intended to be used by drivers outside of ccp to communicate
+ * with the platform.
+ *
+ * Returns:
+ * 0: success
+ * -%EBUSY: mailbox in recovery or in use
+ * -%ENODEV: driver not bound with PSP device
+ * -%ETIMEDOUT: request timed out
+ * -%EIO: unknown error (see kernel log)
+ */
+int psp_send_platform_access_msg(enum psp_platform_access_msg, struct psp_request *req);
+
+/**
+ * psp_ring_platform_doorbell() - Ring platform doorbell
+ *
+ * This function is intended to be used by drivers outside of ccp to ring the
+ * platform doorbell with a message.
+ *
+ * Returns:
+ * 0: success
+ * -%EBUSY: mailbox in recovery or in use
+ * -%ENODEV: driver not bound with PSP device
+ * -%ETIMEDOUT: request timed out
+ * -%EIO: error will be stored in result argument
+ */
+int psp_ring_platform_doorbell(int msg, u32 *result);
+
+/**
+ * psp_check_platform_access_status() - Checks whether platform features is ready
+ *
+ * This function is intended to be used by drivers outside of ccp to determine
+ * if platform features has initialized.
+ *
+ * Returns:
+ * 0 platform features is ready
+ * -%ENODEV platform features is not ready or present
+ */
+int psp_check_platform_access_status(void);
+
+#endif /* __PSP_PLATFORM_ACCESS_H */
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index 1595088c428b..7fd17e82bab4 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -14,14 +14,6 @@
#include <uapi/linux/psp-sev.h>
-#ifdef CONFIG_X86
-#include <linux/mem_encrypt.h>
-
-#define __psp_pa(x) __sme_pa(x)
-#else
-#define __psp_pa(x) __pa(x)
-#endif
-
#define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */
/**
diff --git a/include/linux/psp.h b/include/linux/psp.h
new file mode 100644
index 000000000000..92e60aeef21e
--- /dev/null
+++ b/include/linux/psp.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PSP_H
+#define __PSP_H
+
+#ifdef CONFIG_X86
+#include <linux/mem_encrypt.h>
+
+#define __psp_pa(x) __sme_pa(x)
+#else
+#define __psp_pa(x) __pa(x)
+#endif
+
+/*
+ * Fields and bits used by most PSP mailboxes
+ *
+ * Note: Some mailboxes (such as SEV) have extra bits or different meanings
+ * and should include an appropriate local definition in their source file.
+ */
+#define PSP_CMDRESP_STS GENMASK(15, 0)
+#define PSP_CMDRESP_CMD GENMASK(23, 16)
+#define PSP_CMDRESP_RESERVED GENMASK(29, 24)
+#define PSP_CMDRESP_RECOVERY BIT(30)
+#define PSP_CMDRESP_RESP BIT(31)
+
+#define PSP_DRBL_MSG PSP_CMDRESP_CMD
+#define PSP_DRBL_RING BIT(0)
+
+#endif /* __PSP_H */
diff --git a/kernel/padata.c b/kernel/padata.c
index e007b8a4b738..222d60195de6 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -491,7 +491,7 @@ void __init padata_do_multithreaded(struct padata_mt_job *job)
return;
/* Ensure at least one thread when size < min_chunk. */
- nworks = max(job->size / job->min_chunk, 1ul);
+ nworks = max(job->size / max(job->min_chunk, job->align), 1ul);
nworks = min(nworks, job->max_threads);
if (nworks == 1) {
@@ -967,7 +967,7 @@ static const struct sysfs_ops padata_sysfs_ops = {
.store = padata_sysfs_store,
};
-static struct kobj_type padata_attr_type = {
+static const struct kobj_type padata_attr_type = {
.sysfs_ops = &padata_sysfs_ops,
.default_groups = padata_default_groups,
.release = padata_sysfs_release,
diff --git a/lib/crypto/utils.c b/lib/crypto/utils.c
index 53230ab1b195..c852c7151b0a 100644
--- a/lib/crypto/utils.c
+++ b/lib/crypto/utils.c
@@ -6,7 +6,7 @@
*/
#include <asm/unaligned.h>
-#include <crypto/algapi.h>
+#include <crypto/utils.h>
#include <linux/module.h>
/*