summaryrefslogtreecommitdiff
path: root/arch/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/Kconfig11
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts12
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts12
-rw-r--r--arch/riscv/crypto/Kconfig11
-rw-r--r--arch/riscv/crypto/Makefile3
-rw-r--r--arch/riscv/crypto/sha512-riscv64-glue.c124
-rw-r--r--arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S203
-rw-r--r--arch/riscv/include/asm/kvm_aia.h4
-rw-r--r--arch/riscv/include/asm/kvm_host.h3
-rw-r--r--arch/riscv/include/asm/pgtable.h1
-rw-r--r--arch/riscv/include/asm/runtime-const.h2
-rw-r--r--arch/riscv/include/asm/uaccess.h7
-rw-r--r--arch/riscv/include/asm/vdso/getrandom.h2
-rw-r--r--arch/riscv/include/asm/vector.h12
-rw-r--r--arch/riscv/kernel/cpu_ops_sbi.c6
-rw-r--r--arch/riscv/kernel/entry.S2
-rw-r--r--arch/riscv/kernel/ftrace.c18
-rw-r--r--arch/riscv/kernel/pi/Makefile2
-rw-r--r--arch/riscv/kernel/ptrace.c12
-rw-r--r--arch/riscv/kernel/setup.c1
-rw-r--r--arch/riscv/kernel/traps.c10
-rw-r--r--arch/riscv/kernel/traps_misaligned.c6
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S2
-rw-r--r--arch/riscv/kernel/vendor_extensions/sifive.c2
-rw-r--r--arch/riscv/kvm/aia.c51
-rw-r--r--arch/riscv/kvm/aia_imsic.c45
-rw-r--r--arch/riscv/kvm/vcpu.c10
-rw-r--r--arch/riscv/kvm/vcpu_sbi_replace.c8
-rw-r--r--arch/riscv/kvm/vcpu_timer.c16
-rw-r--r--arch/riscv/lib/Makefile7
-rw-r--r--arch/riscv/lib/crc-clmul-consts.h122
-rw-r--r--arch/riscv/lib/crc-clmul-template.h265
-rw-r--r--arch/riscv/lib/crc-clmul.h23
-rw-r--r--arch/riscv/lib/crc-t10dif.c24
-rw-r--r--arch/riscv/lib/crc16_msb.c18
-rw-r--r--arch/riscv/lib/crc32.c53
-rw-r--r--arch/riscv/lib/crc32_lsb.c18
-rw-r--r--arch/riscv/lib/crc32_msb.c18
-rw-r--r--arch/riscv/lib/crc64.c34
-rw-r--r--arch/riscv/lib/crc64_lsb.c18
-rw-r--r--arch/riscv/lib/crc64_msb.c18
-rw-r--r--arch/riscv/lib/crypto/Kconfig16
-rw-r--r--arch/riscv/lib/crypto/Makefile7
-rw-r--r--arch/riscv/lib/crypto/chacha-riscv64-glue.c75
-rw-r--r--arch/riscv/lib/crypto/chacha-riscv64-zvkb.S297
-rw-r--r--arch/riscv/lib/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S225
-rw-r--r--arch/riscv/lib/crypto/sha256.c67
-rw-r--r--arch/riscv/purgatory/Makefile2
-rw-r--r--arch/riscv/purgatory/purgatory.c8
-rwxr-xr-xarch/riscv/tools/relocs_check.sh4
50 files changed, 148 insertions, 1769 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 36061f4732b7..5352932badd8 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -24,9 +24,6 @@ config RISCV
select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_BINFMT_FLAT
- select ARCH_HAS_CRC32 if RISCV_ISA_ZBC
- select ARCH_HAS_CRC64 if 64BIT && RISCV_ISA_ZBC
- select ARCH_HAS_CRC_T10DIF if RISCV_ISA_ZBC
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEBUG_VM_PGTABLE
@@ -63,7 +60,8 @@ config RISCV
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
- select ARCH_SUPPORTS_CFI_CLANG
+ # clang >= 17: https://github.com/llvm/llvm-project/commit/62fa708ceb027713b386c7e0efda994f8bdc27e2
+ select ARCH_SUPPORTS_CFI_CLANG if CLANG_VERSION >= 170000
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_HUGETLBFS if MMU
@@ -97,6 +95,7 @@ config RISCV
select CLONE_BACKWARDS
select COMMON_CLK
select CPU_PM if CPU_IDLE || HIBERNATION || SUSPEND
+ select DYNAMIC_FTRACE if FUNCTION_TRACER
select EDAC_SUPPORT
select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE
@@ -135,13 +134,13 @@ config RISCV
select HAVE_ARCH_KASAN if MMU && 64BIT
select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
select HAVE_ARCH_KFENCE if MMU && 64BIT
+ select HAVE_ARCH_KSTACK_ERASE
select HAVE_ARCH_KGDB if !XIP_KERNEL
select HAVE_ARCH_KGDB_QXFER_PKT
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
- select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU
@@ -161,7 +160,7 @@ config RISCV
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if HAVE_DYNAMIC_FTRACE_WITH_ARGS
select HAVE_FUNCTION_GRAPH_FREGS
- select HAVE_FUNCTION_TRACER if !XIP_KERNEL
+ select HAVE_FUNCTION_TRACER if !XIP_KERNEL && HAVE_DYNAMIC_FTRACE
select HAVE_EBPF_JIT if MMU
select HAVE_GUP_FAST if MMU
select HAVE_FUNCTION_ARG_ACCESS_API
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
index 900a50526d77..06731b8c7bc3 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -49,32 +49,28 @@
compatible = "pwm-leds";
led-d1 {
- pwms = <&pwm0 0 7812500 PWM_POLARITY_INVERTED>;
- active-low;
+ pwms = <&pwm0 0 7812500 0>;
color = <LED_COLOR_ID_GREEN>;
max-brightness = <255>;
label = "d1";
};
led-d2 {
- pwms = <&pwm0 1 7812500 PWM_POLARITY_INVERTED>;
- active-low;
+ pwms = <&pwm0 1 7812500 0>;
color = <LED_COLOR_ID_GREEN>;
max-brightness = <255>;
label = "d2";
};
led-d3 {
- pwms = <&pwm0 2 7812500 PWM_POLARITY_INVERTED>;
- active-low;
+ pwms = <&pwm0 2 7812500 0>;
color = <LED_COLOR_ID_GREEN>;
max-brightness = <255>;
label = "d3";
};
led-d4 {
- pwms = <&pwm0 3 7812500 PWM_POLARITY_INVERTED>;
- active-low;
+ pwms = <&pwm0 3 7812500 0>;
color = <LED_COLOR_ID_GREEN>;
max-brightness = <255>;
label = "d4";
diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
index 72b87b08ab44..03ce2cee4e97 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
@@ -51,8 +51,7 @@
compatible = "pwm-leds";
led-d12 {
- pwms = <&pwm0 0 7812500 PWM_POLARITY_INVERTED>;
- active-low;
+ pwms = <&pwm0 0 7812500 0>;
color = <LED_COLOR_ID_GREEN>;
max-brightness = <255>;
label = "d12";
@@ -68,20 +67,17 @@
label = "d2";
led-red {
- pwms = <&pwm0 2 7812500 PWM_POLARITY_INVERTED>;
- active-low;
+ pwms = <&pwm0 2 7812500 0>;
color = <LED_COLOR_ID_RED>;
};
led-green {
- pwms = <&pwm0 1 7812500 PWM_POLARITY_INVERTED>;
- active-low;
+ pwms = <&pwm0 1 7812500 0>;
color = <LED_COLOR_ID_GREEN>;
};
led-blue {
- pwms = <&pwm0 3 7812500 PWM_POLARITY_INVERTED>;
- active-low;
+ pwms = <&pwm0 3 7812500 0>;
color = <LED_COLOR_ID_BLUE>;
};
};
diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig
index cd9b776602f8..a75d6325607b 100644
--- a/arch/riscv/crypto/Kconfig
+++ b/arch/riscv/crypto/Kconfig
@@ -28,17 +28,6 @@ config CRYPTO_GHASH_RISCV64
Architecture: riscv64 using:
- Zvkg vector crypto extension
-config CRYPTO_SHA512_RISCV64
- tristate "Hash functions: SHA-384 and SHA-512"
- depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
- select CRYPTO_SHA512
- help
- SHA-384 and SHA-512 secure hash algorithm (FIPS 180)
-
- Architecture: riscv64 using:
- - Zvknhb vector crypto extension
- - Zvkb vector crypto extension
-
config CRYPTO_SM3_RISCV64
tristate "Hash functions: SM3 (ShangMi 3)"
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
diff --git a/arch/riscv/crypto/Makefile b/arch/riscv/crypto/Makefile
index e10e8257734e..183495a95cc0 100644
--- a/arch/riscv/crypto/Makefile
+++ b/arch/riscv/crypto/Makefile
@@ -7,9 +7,6 @@ aes-riscv64-y := aes-riscv64-glue.o aes-riscv64-zvkned.o \
obj-$(CONFIG_CRYPTO_GHASH_RISCV64) += ghash-riscv64.o
ghash-riscv64-y := ghash-riscv64-glue.o ghash-riscv64-zvkg.o
-obj-$(CONFIG_CRYPTO_SHA512_RISCV64) += sha512-riscv64.o
-sha512-riscv64-y := sha512-riscv64-glue.o sha512-riscv64-zvknhb-zvkb.o
-
obj-$(CONFIG_CRYPTO_SM3_RISCV64) += sm3-riscv64.o
sm3-riscv64-y := sm3-riscv64-glue.o sm3-riscv64-zvksh-zvkb.o
diff --git a/arch/riscv/crypto/sha512-riscv64-glue.c b/arch/riscv/crypto/sha512-riscv64-glue.c
deleted file mode 100644
index 4634fca78ae2..000000000000
--- a/arch/riscv/crypto/sha512-riscv64-glue.c
+++ /dev/null
@@ -1,124 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SHA-512 and SHA-384 using the RISC-V vector crypto extensions
- *
- * Copyright (C) 2023 VRULL GmbH
- * Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
- *
- * Copyright (C) 2023 SiFive, Inc.
- * Author: Jerry Shih <jerry.shih@sifive.com>
- */
-
-#include <asm/simd.h>
-#include <asm/vector.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <crypto/sha512_base.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-/*
- * Note: the asm function only uses the 'state' field of struct sha512_state.
- * It is assumed to be the first field.
- */
-asmlinkage void sha512_transform_zvknhb_zvkb(
- struct sha512_state *state, const u8 *data, int num_blocks);
-
-static void sha512_block(struct sha512_state *state, const u8 *data,
- int num_blocks)
-{
- /*
- * Ensure struct sha512_state begins directly with the SHA-512
- * 512-bit internal state, as this is what the asm function expects.
- */
- BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0);
-
- if (crypto_simd_usable()) {
- kernel_vector_begin();
- sha512_transform_zvknhb_zvkb(state, data, num_blocks);
- kernel_vector_end();
- } else {
- sha512_generic_block_fn(state, data, num_blocks);
- }
-}
-
-static int riscv64_sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- return sha512_base_do_update_blocks(desc, data, len, sha512_block);
-}
-
-static int riscv64_sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- sha512_base_do_finup(desc, data, len, sha512_block);
- return sha512_base_finish(desc, out);
-}
-
-static int riscv64_sha512_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return sha512_base_init(desc) ?:
- riscv64_sha512_finup(desc, data, len, out);
-}
-
-static struct shash_alg riscv64_sha512_algs[] = {
- {
- .init = sha512_base_init,
- .update = riscv64_sha512_update,
- .finup = riscv64_sha512_finup,
- .digest = riscv64_sha512_digest,
- .descsize = SHA512_STATE_SIZE,
- .digestsize = SHA512_DIGEST_SIZE,
- .base = {
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_priority = 300,
- .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
- CRYPTO_AHASH_ALG_FINUP_MAX,
- .cra_name = "sha512",
- .cra_driver_name = "sha512-riscv64-zvknhb-zvkb",
- .cra_module = THIS_MODULE,
- },
- }, {
- .init = sha384_base_init,
- .update = riscv64_sha512_update,
- .finup = riscv64_sha512_finup,
- .descsize = SHA512_STATE_SIZE,
- .digestsize = SHA384_DIGEST_SIZE,
- .base = {
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_priority = 300,
- .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
- CRYPTO_AHASH_ALG_FINUP_MAX,
- .cra_name = "sha384",
- .cra_driver_name = "sha384-riscv64-zvknhb-zvkb",
- .cra_module = THIS_MODULE,
- },
- },
-};
-
-static int __init riscv64_sha512_mod_init(void)
-{
- if (riscv_isa_extension_available(NULL, ZVKNHB) &&
- riscv_isa_extension_available(NULL, ZVKB) &&
- riscv_vector_vlen() >= 128)
- return crypto_register_shashes(riscv64_sha512_algs,
- ARRAY_SIZE(riscv64_sha512_algs));
-
- return -ENODEV;
-}
-
-static void __exit riscv64_sha512_mod_exit(void)
-{
- crypto_unregister_shashes(riscv64_sha512_algs,
- ARRAY_SIZE(riscv64_sha512_algs));
-}
-
-module_init(riscv64_sha512_mod_init);
-module_exit(riscv64_sha512_mod_exit);
-
-MODULE_DESCRIPTION("SHA-512 (RISC-V accelerated)");
-MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("sha512");
-MODULE_ALIAS_CRYPTO("sha384");
diff --git a/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S b/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S
deleted file mode 100644
index 89f4a10d12dd..000000000000
--- a/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S
+++ /dev/null
@@ -1,203 +0,0 @@
-/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
-//
-// This file is dual-licensed, meaning that you can use it under your
-// choice of either of the following two licenses:
-//
-// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
-//
-// Licensed under the Apache License 2.0 (the "License"). You can obtain
-// a copy in the file LICENSE in the source distribution or at
-// https://www.openssl.org/source/license.html
-//
-// or
-//
-// Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
-// Copyright (c) 2023, Phoebe Chen <phoebe.chen@sifive.com>
-// Copyright 2024 Google LLC
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The generated code of this file depends on the following RISC-V extensions:
-// - RV64I
-// - RISC-V Vector ('V') with VLEN >= 128
-// - RISC-V Vector SHA-2 Secure Hash extension ('Zvknhb')
-// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
-
-#include <linux/linkage.h>
-
-.text
-.option arch, +zvknhb, +zvkb
-
-#define STATEP a0
-#define DATA a1
-#define NUM_BLOCKS a2
-
-#define STATEP_C a3
-#define K a4
-
-#define MASK v0
-#define INDICES v1
-#define W0 v10 // LMUL=2
-#define W1 v12 // LMUL=2
-#define W2 v14 // LMUL=2
-#define W3 v16 // LMUL=2
-#define VTMP v20 // LMUL=2
-#define FEBA v22 // LMUL=2
-#define HGDC v24 // LMUL=2
-#define PREV_FEBA v26 // LMUL=2
-#define PREV_HGDC v28 // LMUL=2
-
-// Do 4 rounds of SHA-512. w0 contains the current 4 message schedule words.
-//
-// If not all the message schedule words have been computed yet, then this also
-// computes 4 more message schedule words. w1-w3 contain the next 3 groups of 4
-// message schedule words; this macro computes the group after w3 and writes it
-// to w0. This means that the next (w0, w1, w2, w3) is the current (w1, w2, w3,
-// w0), so the caller must cycle through the registers accordingly.
-.macro sha512_4rounds last, w0, w1, w2, w3
- vle64.v VTMP, (K)
- addi K, K, 32
- vadd.vv VTMP, VTMP, \w0
- vsha2cl.vv HGDC, FEBA, VTMP
- vsha2ch.vv FEBA, HGDC, VTMP
-.if !\last
- vmerge.vvm VTMP, \w2, \w1, MASK
- vsha2ms.vv \w0, VTMP, \w3
-.endif
-.endm
-
-.macro sha512_16rounds last
- sha512_4rounds \last, W0, W1, W2, W3
- sha512_4rounds \last, W1, W2, W3, W0
- sha512_4rounds \last, W2, W3, W0, W1
- sha512_4rounds \last, W3, W0, W1, W2
-.endm
-
-// void sha512_transform_zvknhb_zvkb(u64 state[8], const u8 *data,
-// int num_blocks);
-SYM_FUNC_START(sha512_transform_zvknhb_zvkb)
-
- // Setup mask for the vmerge to replace the first word (idx==0) in
- // message scheduling. There are 4 words, so an 8-bit mask suffices.
- vsetivli zero, 1, e8, m1, ta, ma
- vmv.v.i MASK, 0x01
-
- // Load the state. The state is stored as {a,b,c,d,e,f,g,h}, but we
- // need {f,e,b,a},{h,g,d,c}. The dst vtype is e64m2 and the index vtype
- // is e8mf4. We use index-load with the i8 indices {40, 32, 8, 0},
- // loaded using the 32-bit little endian value 0x00082028.
- li t0, 0x00082028
- vsetivli zero, 1, e32, m1, ta, ma
- vmv.v.x INDICES, t0
- addi STATEP_C, STATEP, 16
- vsetivli zero, 4, e64, m2, ta, ma
- vluxei8.v FEBA, (STATEP), INDICES
- vluxei8.v HGDC, (STATEP_C), INDICES
-
-.Lnext_block:
- la K, K512
- addi NUM_BLOCKS, NUM_BLOCKS, -1
-
- // Save the previous state, as it's needed later.
- vmv.v.v PREV_FEBA, FEBA
- vmv.v.v PREV_HGDC, HGDC
-
- // Load the next 1024-bit message block and endian-swap each 64-bit word
- vle64.v W0, (DATA)
- vrev8.v W0, W0
- addi DATA, DATA, 32
- vle64.v W1, (DATA)
- vrev8.v W1, W1
- addi DATA, DATA, 32
- vle64.v W2, (DATA)
- vrev8.v W2, W2
- addi DATA, DATA, 32
- vle64.v W3, (DATA)
- vrev8.v W3, W3
- addi DATA, DATA, 32
-
- // Do the 80 rounds of SHA-512.
- sha512_16rounds 0
- sha512_16rounds 0
- sha512_16rounds 0
- sha512_16rounds 0
- sha512_16rounds 1
-
- // Add the previous state.
- vadd.vv FEBA, FEBA, PREV_FEBA
- vadd.vv HGDC, HGDC, PREV_HGDC
-
- // Repeat if more blocks remain.
- bnez NUM_BLOCKS, .Lnext_block
-
- // Store the new state and return.
- vsuxei8.v FEBA, (STATEP), INDICES
- vsuxei8.v HGDC, (STATEP_C), INDICES
- ret
-SYM_FUNC_END(sha512_transform_zvknhb_zvkb)
-
-.section ".rodata"
-.p2align 3
-.type K512, @object
-K512:
- .dword 0x428a2f98d728ae22, 0x7137449123ef65cd
- .dword 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc
- .dword 0x3956c25bf348b538, 0x59f111f1b605d019
- .dword 0x923f82a4af194f9b, 0xab1c5ed5da6d8118
- .dword 0xd807aa98a3030242, 0x12835b0145706fbe
- .dword 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2
- .dword 0x72be5d74f27b896f, 0x80deb1fe3b1696b1
- .dword 0x9bdc06a725c71235, 0xc19bf174cf692694
- .dword 0xe49b69c19ef14ad2, 0xefbe4786384f25e3
- .dword 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65
- .dword 0x2de92c6f592b0275, 0x4a7484aa6ea6e483
- .dword 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5
- .dword 0x983e5152ee66dfab, 0xa831c66d2db43210
- .dword 0xb00327c898fb213f, 0xbf597fc7beef0ee4
- .dword 0xc6e00bf33da88fc2, 0xd5a79147930aa725
- .dword 0x06ca6351e003826f, 0x142929670a0e6e70
- .dword 0x27b70a8546d22ffc, 0x2e1b21385c26c926
- .dword 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df
- .dword 0x650a73548baf63de, 0x766a0abb3c77b2a8
- .dword 0x81c2c92e47edaee6, 0x92722c851482353b
- .dword 0xa2bfe8a14cf10364, 0xa81a664bbc423001
- .dword 0xc24b8b70d0f89791, 0xc76c51a30654be30
- .dword 0xd192e819d6ef5218, 0xd69906245565a910
- .dword 0xf40e35855771202a, 0x106aa07032bbd1b8
- .dword 0x19a4c116b8d2d0c8, 0x1e376c085141ab53
- .dword 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8
- .dword 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb
- .dword 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3
- .dword 0x748f82ee5defb2fc, 0x78a5636f43172f60
- .dword 0x84c87814a1f0ab72, 0x8cc702081a6439ec
- .dword 0x90befffa23631e28, 0xa4506cebde82bde9
- .dword 0xbef9a3f7b2c67915, 0xc67178f2e372532b
- .dword 0xca273eceea26619c, 0xd186b8c721c0c207
- .dword 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178
- .dword 0x06f067aa72176fba, 0x0a637dc5a2c898a6
- .dword 0x113f9804bef90dae, 0x1b710b35131c471b
- .dword 0x28db77f523047d84, 0x32caab7b40c72493
- .dword 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c
- .dword 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a
- .dword 0x5fcb6fab3ad6faec, 0x6c44198c4a475817
-.size K512, . - K512
diff --git a/arch/riscv/include/asm/kvm_aia.h b/arch/riscv/include/asm/kvm_aia.h
index 3b643b9efc07..5acce285e56e 100644
--- a/arch/riscv/include/asm/kvm_aia.h
+++ b/arch/riscv/include/asm/kvm_aia.h
@@ -87,6 +87,9 @@ DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
extern struct kvm_device_ops kvm_riscv_aia_device_ops;
+bool kvm_riscv_vcpu_aia_imsic_has_interrupt(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_riscv_vcpu_aia_imsic_put(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu);
@@ -161,7 +164,6 @@ void kvm_riscv_aia_destroy_vm(struct kvm *kvm);
int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
void __iomem **hgei_va, phys_addr_t *hgei_pa);
void kvm_riscv_aia_free_hgei(int cpu, int hgei);
-void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable);
void kvm_riscv_aia_enable(void);
void kvm_riscv_aia_disable(void);
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 85cfebc32e4c..bcbf8b1ec115 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -306,6 +306,9 @@ static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+
#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 438ce7df24c3..5bd5aae60d53 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -1075,7 +1075,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
*/
#ifdef CONFIG_64BIT
#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
-#define TASK_SIZE_MAX LONG_MAX
#ifdef CONFIG_COMPAT
#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
diff --git a/arch/riscv/include/asm/runtime-const.h b/arch/riscv/include/asm/runtime-const.h
index 451fd76b8811..d766e2b9e6df 100644
--- a/arch/riscv/include/asm/runtime-const.h
+++ b/arch/riscv/include/asm/runtime-const.h
@@ -206,7 +206,7 @@ static inline void __runtime_fixup_32(__le16 *lui_parcel, __le16 *addi_parcel, u
addi_insn_mask &= 0x07fff;
}
- if (lower_immediate & 0x00000fff) {
+ if (lower_immediate & 0x00000fff || lui_insn == RISCV_INSN_NOP4) {
/* replace upper 12 bits of addi with lower 12 bits of val */
addi_insn &= addi_insn_mask;
addi_insn |= (lower_immediate & 0x00000fff) << 20;
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index d472da4450e6..b88a6218b7f2 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -127,6 +127,7 @@ do { \
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#define __get_user_8(x, ptr, label) \
+do { \
u32 __user *__ptr = (u32 __user *)(ptr); \
u32 __lo, __hi; \
asm_goto_output( \
@@ -141,7 +142,7 @@ do { \
: : label); \
(x) = (__typeof__(x))((__typeof__((x) - (x)))( \
(((u64)__hi << 32) | __lo))); \
-
+} while (0)
#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
#define __get_user_8(x, ptr, label) \
do { \
@@ -310,8 +311,8 @@ do { \
do { \
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
- __inttype(x) val = (__inttype(x))x; \
- if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(val), sizeof(*__gu_ptr))) \
+ __inttype(x) ___val = (__inttype(x))x; \
+ if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(___val), sizeof(*__gu_ptr))) \
goto label; \
break; \
} \
diff --git a/arch/riscv/include/asm/vdso/getrandom.h b/arch/riscv/include/asm/vdso/getrandom.h
index 8dc92441702a..c6d66895c1f5 100644
--- a/arch/riscv/include/asm/vdso/getrandom.h
+++ b/arch/riscv/include/asm/vdso/getrandom.h
@@ -18,7 +18,7 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns
register unsigned int flags asm("a2") = _flags;
asm volatile ("ecall\n"
- : "+r" (ret)
+ : "=r" (ret)
: "r" (nr), "r" (buffer), "r" (len), "r" (flags)
: "memory");
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index 45c9b426fcc5..b61786d43c20 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -205,11 +205,11 @@ static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to,
THEAD_VSETVLI_T4X0E8M8D1
THEAD_VSB_V_V0T0
"add t0, t0, t4\n\t"
- THEAD_VSB_V_V0T0
+ THEAD_VSB_V_V8T0
"add t0, t0, t4\n\t"
- THEAD_VSB_V_V0T0
+ THEAD_VSB_V_V16T0
"add t0, t0, t4\n\t"
- THEAD_VSB_V_V0T0
+ THEAD_VSB_V_V24T0
: : "r" (datap) : "memory", "t0", "t4");
} else {
asm volatile (
@@ -241,11 +241,11 @@ static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_
THEAD_VSETVLI_T4X0E8M8D1
THEAD_VLB_V_V0T0
"add t0, t0, t4\n\t"
- THEAD_VLB_V_V0T0
+ THEAD_VLB_V_V8T0
"add t0, t0, t4\n\t"
- THEAD_VLB_V_V0T0
+ THEAD_VLB_V_V16T0
"add t0, t0, t4\n\t"
- THEAD_VLB_V_V0T0
+ THEAD_VLB_V_V24T0
: : "r" (datap) : "memory", "t0", "t4");
} else {
asm volatile (
diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
index e6fbaaf54956..87d655944803 100644
--- a/arch/riscv/kernel/cpu_ops_sbi.c
+++ b/arch/riscv/kernel/cpu_ops_sbi.c
@@ -18,10 +18,10 @@ const struct cpu_operations cpu_ops_sbi;
/*
* Ordered booting via HSM brings one cpu at a time. However, cpu hotplug can
- * be invoked from multiple threads in parallel. Define a per cpu data
+ * be invoked from multiple threads in parallel. Define an array of boot data
* to handle that.
*/
-static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
+static struct sbi_hart_boot_data boot_data[NR_CPUS];
static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
unsigned long priv)
@@ -67,7 +67,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
unsigned long hartid = cpuid_to_hartid_map(cpuid);
unsigned long hsm_data;
- struct sbi_hart_boot_data *bdata = &per_cpu(boot_data, cpuid);
+ struct sbi_hart_boot_data *bdata = &boot_data[cpuid];
/* Make sure tidle is updated */
smp_mb();
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 75656afa2d6b..3a0ec6fd5956 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -220,7 +220,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
#endif
bnez s0, 1f
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#ifdef CONFIG_KSTACK_ERASE
call stackleak_erase_on_task_stack
#endif
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 4c6c24380cfd..8d18d6727f0f 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -14,6 +14,18 @@
#include <asm/text-patching.h>
#ifdef CONFIG_DYNAMIC_FTRACE
+void ftrace_arch_code_modify_prepare(void)
+ __acquires(&text_mutex)
+{
+ mutex_lock(&text_mutex);
+}
+
+void ftrace_arch_code_modify_post_process(void)
+ __releases(&text_mutex)
+{
+ mutex_unlock(&text_mutex);
+}
+
unsigned long ftrace_call_adjust(unsigned long addr)
{
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
@@ -29,10 +41,8 @@ unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
void arch_ftrace_update_code(int command)
{
- mutex_lock(&text_mutex);
command |= FTRACE_MAY_SLEEP;
ftrace_modify_all_code(command);
- mutex_unlock(&text_mutex);
flush_icache_all();
}
@@ -149,6 +159,8 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
unsigned int nops[2], offset;
int ret;
+ guard(mutex)(&text_mutex);
+
ret = ftrace_rec_set_nop_ops(rec);
if (ret)
return ret;
@@ -157,9 +169,7 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
nops[0] = to_auipc_t0(offset);
nops[1] = RISCV_INSN_NOP4;
- mutex_lock(&text_mutex);
ret = patch_insn_write((void *)pc, nops, 2 * MCOUNT_INSN_SIZE);
- mutex_unlock(&text_mutex);
return ret;
}
diff --git a/arch/riscv/kernel/pi/Makefile b/arch/riscv/kernel/pi/Makefile
index 81d69d45c06c..7dd15be69c90 100644
--- a/arch/riscv/kernel/pi/Makefile
+++ b/arch/riscv/kernel/pi/Makefile
@@ -2,7 +2,7 @@
# This file was copied from arm64/kernel/pi/Makefile.
KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \
- -Os -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN) \
+ -Os -DDISABLE_BRANCH_PROFILING $(DISABLE_KSTACK_ERASE) \
$(call cc-option,-mbranch-protection=none) \
-I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \
-include $(srctree)/include/linux/hidden.h \
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index ea67e9fb7a58..8e86305831ea 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -186,7 +186,7 @@ static int tagged_addr_ctrl_set(struct task_struct *target,
static const struct user_regset riscv_user_regset[] = {
[REGSET_X] = {
- .core_note_type = NT_PRSTATUS,
+ USER_REGSET_NOTE_TYPE(PRSTATUS),
.n = ELF_NGREG,
.size = sizeof(elf_greg_t),
.align = sizeof(elf_greg_t),
@@ -195,7 +195,7 @@ static const struct user_regset riscv_user_regset[] = {
},
#ifdef CONFIG_FPU
[REGSET_F] = {
- .core_note_type = NT_PRFPREG,
+ USER_REGSET_NOTE_TYPE(PRFPREG),
.n = ELF_NFPREG,
.size = sizeof(elf_fpreg_t),
.align = sizeof(elf_fpreg_t),
@@ -205,7 +205,7 @@ static const struct user_regset riscv_user_regset[] = {
#endif
#ifdef CONFIG_RISCV_ISA_V
[REGSET_V] = {
- .core_note_type = NT_RISCV_VECTOR,
+ USER_REGSET_NOTE_TYPE(RISCV_VECTOR),
.align = 16,
.n = ((32 * RISCV_MAX_VLENB) +
sizeof(struct __riscv_v_regset_state)) / sizeof(__u32),
@@ -216,7 +216,7 @@ static const struct user_regset riscv_user_regset[] = {
#endif
#ifdef CONFIG_RISCV_ISA_SUPM
[REGSET_TAGGED_ADDR_CTRL] = {
- .core_note_type = NT_RISCV_TAGGED_ADDR_CTRL,
+ USER_REGSET_NOTE_TYPE(RISCV_TAGGED_ADDR_CTRL),
.n = 1,
.size = sizeof(long),
.align = sizeof(long),
@@ -380,7 +380,7 @@ static int compat_riscv_gpr_set(struct task_struct *target,
static const struct user_regset compat_riscv_user_regset[] = {
[REGSET_X] = {
- .core_note_type = NT_PRSTATUS,
+ USER_REGSET_NOTE_TYPE(PRSTATUS),
.n = ELF_NGREG,
.size = sizeof(compat_elf_greg_t),
.align = sizeof(compat_elf_greg_t),
@@ -389,7 +389,7 @@ static const struct user_regset compat_riscv_user_regset[] = {
},
#ifdef CONFIG_FPU
[REGSET_F] = {
- .core_note_type = NT_PRFPREG,
+ USER_REGSET_NOTE_TYPE(PRFPREG),
.n = ELF_NFPREG,
.size = sizeof(elf_fpreg_t),
.align = sizeof(elf_fpreg_t),
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index f7c9a1caa83e..14888e5ea19a 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -50,6 +50,7 @@ atomic_t hart_lottery __section(".sdata")
#endif
;
unsigned long boot_cpu_hartid;
+EXPORT_SYMBOL_GPL(boot_cpu_hartid);
/*
* Place kernel memory regions on the resource tree so that
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 9c83848797a7..80230de167de 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -6,6 +6,7 @@
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/irqflags.h>
#include <linux/randomize_kstack.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
@@ -151,7 +152,9 @@ asmlinkage __visible __trap_section void name(struct pt_regs *regs) \
{ \
if (user_mode(regs)) { \
irqentry_enter_from_user_mode(regs); \
+ local_irq_enable(); \
do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
+ local_irq_disable(); \
irqentry_exit_to_user_mode(regs); \
} else { \
irqentry_state_t state = irqentry_nmi_enter(regs); \
@@ -173,17 +176,14 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs);
-
local_irq_enable();
handled = riscv_v_first_use_handler(regs);
-
- local_irq_disable();
-
if (!handled)
do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
"Oops - illegal instruction");
+ local_irq_disable();
irqentry_exit_to_user_mode(regs);
} else {
irqentry_state_t state = irqentry_nmi_enter(regs);
@@ -308,9 +308,11 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
{
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs);
+ local_irq_enable();
handle_break(regs);
+ local_irq_disable();
irqentry_exit_to_user_mode(regs);
} else {
irqentry_state_t state = irqentry_nmi_enter(regs);
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index dd8e4af6583f..f760e4fcc052 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -454,14 +454,14 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs)
val.data_u64 = 0;
if (user_mode(regs)) {
- if (copy_from_user_nofault(&val, (u8 __user *)addr, len))
+ if (copy_from_user(&val, (u8 __user *)addr, len))
return -1;
} else {
memcpy(&val, (u8 *)addr, len);
}
if (!fp)
- SET_RD(insn, regs, val.data_ulong << shift >> shift);
+ SET_RD(insn, regs, (long)(val.data_ulong << shift) >> shift);
else if (len == 8)
set_f64_rd(insn, regs, val.data_u64);
else
@@ -555,7 +555,7 @@ static int handle_scalar_misaligned_store(struct pt_regs *regs)
return -EOPNOTSUPP;
if (user_mode(regs)) {
- if (copy_to_user_nofault((u8 __user *)addr, &val, len))
+ if (copy_to_user((u8 __user *)addr, &val, len))
return -1;
} else {
memcpy((u8 *)addr, &val, len);
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
index 7c15b0f4ee3b..c29ef12a63bb 100644
--- a/arch/riscv/kernel/vdso/vdso.lds.S
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -30,7 +30,7 @@ SECTIONS
*(.data .data.* .gnu.linkonce.d.*)
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
- }
+ } :text
.note : { *(.note.*) } :text :note
diff --git a/arch/riscv/kernel/vendor_extensions/sifive.c b/arch/riscv/kernel/vendor_extensions/sifive.c
index 1411337dc1e6..8fcf67e8c07f 100644
--- a/arch/riscv/kernel/vendor_extensions/sifive.c
+++ b/arch/riscv/kernel/vendor_extensions/sifive.c
@@ -8,7 +8,7 @@
#include <linux/types.h>
/* All SiFive vendor extensions supported in Linux */
-const struct riscv_isa_ext_data riscv_isa_vendor_ext_sifive[] = {
+static const struct riscv_isa_ext_data riscv_isa_vendor_ext_sifive[] = {
__RISCV_ISA_EXT_DATA(xsfvfnrclipxfqf, RISCV_ISA_VENDOR_EXT_XSFVFNRCLIPXFQF),
__RISCV_ISA_EXT_DATA(xsfvfwmaccqqq, RISCV_ISA_VENDOR_EXT_XSFVFWMACCQQQ),
__RISCV_ISA_EXT_DATA(xsfvqmaccdod, RISCV_ISA_VENDOR_EXT_XSFVQMACCDOD),
diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c
index 19afd1f23537..dad318185660 100644
--- a/arch/riscv/kvm/aia.c
+++ b/arch/riscv/kvm/aia.c
@@ -30,28 +30,6 @@ unsigned int kvm_riscv_aia_nr_hgei;
unsigned int kvm_riscv_aia_max_ids;
DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
-static int aia_find_hgei(struct kvm_vcpu *owner)
-{
- int i, hgei;
- unsigned long flags;
- struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
-
- raw_spin_lock_irqsave(&hgctrl->lock, flags);
-
- hgei = -1;
- for (i = 1; i <= kvm_riscv_aia_nr_hgei; i++) {
- if (hgctrl->owners[i] == owner) {
- hgei = i;
- break;
- }
- }
-
- raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
-
- put_cpu_ptr(&aia_hgei);
- return hgei;
-}
-
static inline unsigned long aia_hvictl_value(bool ext_irq_pending)
{
unsigned long hvictl;
@@ -95,7 +73,6 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
{
- int hgei;
unsigned long seip;
if (!kvm_riscv_aia_available())
@@ -114,11 +91,7 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
return false;
- hgei = aia_find_hgei(vcpu);
- if (hgei > 0)
- return !!(ncsr_read(CSR_HGEIP) & BIT(hgei));
-
- return false;
+ return kvm_riscv_vcpu_aia_imsic_has_interrupt(vcpu);
}
void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
@@ -164,6 +137,9 @@ void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
#endif
}
+
+ if (kvm_riscv_aia_initialized(vcpu->kvm))
+ kvm_riscv_vcpu_aia_imsic_load(vcpu, cpu);
}
void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
@@ -174,6 +150,9 @@ void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
if (!kvm_riscv_aia_available())
return;
+ if (kvm_riscv_aia_initialized(vcpu->kvm))
+ kvm_riscv_vcpu_aia_imsic_put(vcpu);
+
if (kvm_riscv_nacl_available()) {
nsh = nacl_shmem();
csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT);
@@ -472,22 +451,6 @@ void kvm_riscv_aia_free_hgei(int cpu, int hgei)
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
}
-void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable)
-{
- int hgei;
-
- if (!kvm_riscv_aia_available())
- return;
-
- hgei = aia_find_hgei(owner);
- if (hgei > 0) {
- if (enable)
- csr_set(CSR_HGEIE, BIT(hgei));
- else
- csr_clear(CSR_HGEIE, BIT(hgei));
- }
-}
-
static irqreturn_t hgei_interrupt(int irq, void *dev_id)
{
int i;
diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c
index 29ef9c2133a9..2ff865943ebb 100644
--- a/arch/riscv/kvm/aia_imsic.c
+++ b/arch/riscv/kvm/aia_imsic.c
@@ -676,6 +676,48 @@ static void imsic_swfile_update(struct kvm_vcpu *vcpu,
imsic_swfile_extirq_update(vcpu);
}
+bool kvm_riscv_vcpu_aia_imsic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
+ unsigned long flags;
+ bool ret = false;
+
+ /*
+ * The IMSIC SW-file directly injects interrupt via hvip so
+ * only check for interrupt when IMSIC VS-file is being used.
+ */
+
+ read_lock_irqsave(&imsic->vsfile_lock, flags);
+ if (imsic->vsfile_cpu > -1)
+ ret = !!(csr_read(CSR_HGEIP) & BIT(imsic->vsfile_hgei));
+ read_unlock_irqrestore(&imsic->vsfile_lock, flags);
+
+ return ret;
+}
+
+void kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ /*
+ * No need to explicitly clear HGEIE CSR bits because the
+ * hgei interrupt handler (aka hgei_interrupt()) will always
+ * clear it for us.
+ */
+}
+
+void kvm_riscv_vcpu_aia_imsic_put(struct kvm_vcpu *vcpu)
+{
+ struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
+ unsigned long flags;
+
+ if (!kvm_vcpu_is_blocking(vcpu))
+ return;
+
+ read_lock_irqsave(&imsic->vsfile_lock, flags);
+ if (imsic->vsfile_cpu > -1)
+ csr_set(CSR_HGEIE, BIT(imsic->vsfile_hgei));
+ read_unlock_irqrestore(&imsic->vsfile_lock, flags);
+}
+
void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu)
{
unsigned long flags;
@@ -781,6 +823,9 @@ int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu)
* producers to the new IMSIC VS-file.
*/
+ /* Ensure HGEIE CSR bit is zero before using the new IMSIC VS-file */
+ csr_clear(CSR_HGEIE, BIT(new_vsfile_hgei));
+
/* Zero-out new IMSIC VS-file */
imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix);
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index e0a01af426ff..0462863206ca 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -207,16 +207,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return kvm_riscv_vcpu_timer_pending(vcpu);
}
-void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
-{
- kvm_riscv_aia_wakeon_hgei(vcpu, true);
-}
-
-void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
-{
- kvm_riscv_aia_wakeon_hgei(vcpu, false);
-}
-
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
index 5fbf3f94f1e8..b17fad091bab 100644
--- a/arch/riscv/kvm/vcpu_sbi_replace.c
+++ b/arch/riscv/kvm/vcpu_sbi_replace.c
@@ -103,7 +103,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
- if (cp->a2 == 0 && cp->a3 == 0)
+ if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask);
else
kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
@@ -111,7 +111,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
- if (cp->a2 == 0 && cp->a3 == 0)
+ if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
hbase, hmask, cp->a4);
else
@@ -127,9 +127,9 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
/*
* Until nested virtualization is implemented, the
- * SBI HFENCE calls should be treated as NOPs
+ * SBI HFENCE calls should return not supported
+ * hence fallthrough.
*/
- break;
default:
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
}
diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c
index ff672fa71fcc..85a7262115e1 100644
--- a/arch/riscv/kvm/vcpu_timer.c
+++ b/arch/riscv/kvm/vcpu_timer.c
@@ -345,8 +345,24 @@ void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
/*
* The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
* upon every VM exit so no need to save here.
+ *
+ * If VS-timer expires when no VCPU running on a host CPU then
+ * WFI executed by such host CPU will be effective NOP resulting
+ * in no power savings. This is because as-per RISC-V Privileged
+ * specificaiton: "WFI is also required to resume execution for
+ * locally enabled interrupts pending at any privilege level,
+ * regardless of the global interrupt enable at each privilege
+ * level."
+ *
+ * To address the above issue, vstimecmp CSR must be set to -1UL
+ * over here when VCPU is scheduled-out or exits to user space.
*/
+ csr_write(CSR_VSTIMECMP, -1UL);
+#if defined(CONFIG_32BIT)
+ csr_write(CSR_VSTIMECMPH, -1UL);
+#endif
+
/* timer should be enabled for the remaining operations */
if (unlikely(!t->init_done))
return;
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 0baec92d2f55..bbc031124974 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += crypto/
lib-y += delay.o
lib-y += memcpy.o
lib-y += memset.o
@@ -16,12 +15,6 @@ endif
lib-$(CONFIG_MMU) += uaccess.o
lib-$(CONFIG_64BIT) += tishift.o
lib-$(CONFIG_RISCV_ISA_ZICBOZ) += clear_page.o
-obj-$(CONFIG_CRC32_ARCH) += crc32-riscv.o
-crc32-riscv-y := crc32.o crc32_msb.o crc32_lsb.o
-obj-$(CONFIG_CRC64_ARCH) += crc64-riscv.o
-crc64-riscv-y := crc64.o crc64_msb.o crc64_lsb.o
-obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-riscv.o
-crc-t10dif-riscv-y := crc-t10dif.o crc16_msb.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_RISCV_ISA_V) += xor.o
lib-$(CONFIG_RISCV_ISA_V) += riscv_v_helpers.o
diff --git a/arch/riscv/lib/crc-clmul-consts.h b/arch/riscv/lib/crc-clmul-consts.h
deleted file mode 100644
index 8d73449235ef..000000000000
--- a/arch/riscv/lib/crc-clmul-consts.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CRC constants generated by:
- *
- * ./scripts/gen-crc-consts.py riscv_clmul crc16_msb_0x8bb7,crc32_msb_0x04c11db7,crc32_lsb_0xedb88320,crc32_lsb_0x82f63b78,crc64_msb_0x42f0e1eba9ea3693,crc64_lsb_0x9a6c9329ac4bc9b5
- *
- * Do not edit manually.
- */
-
-struct crc_clmul_consts {
- unsigned long fold_across_2_longs_const_hi;
- unsigned long fold_across_2_longs_const_lo;
- unsigned long barrett_reduction_const_1;
- unsigned long barrett_reduction_const_2;
-};
-
-/*
- * Constants generated for most-significant-bit-first CRC-16 using
- * G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
- */
-static const struct crc_clmul_consts crc16_msb_0x8bb7_consts __maybe_unused = {
-#ifdef CONFIG_64BIT
- .fold_across_2_longs_const_hi = 0x0000000000001faa, /* x^192 mod G */
- .fold_across_2_longs_const_lo = 0x000000000000a010, /* x^128 mod G */
- .barrett_reduction_const_1 = 0xfb2d2bfc0e99d245, /* floor(x^79 / G) */
- .barrett_reduction_const_2 = 0x0000000000008bb7, /* G - x^16 */
-#else
- .fold_across_2_longs_const_hi = 0x00005890, /* x^96 mod G */
- .fold_across_2_longs_const_lo = 0x0000f249, /* x^64 mod G */
- .barrett_reduction_const_1 = 0xfb2d2bfc, /* floor(x^47 / G) */
- .barrett_reduction_const_2 = 0x00008bb7, /* G - x^16 */
-#endif
-};
-
-/*
- * Constants generated for most-significant-bit-first CRC-32 using
- * G(x) = x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 +
- * x^5 + x^4 + x^2 + x^1 + x^0
- */
-static const struct crc_clmul_consts crc32_msb_0x04c11db7_consts __maybe_unused = {
-#ifdef CONFIG_64BIT
- .fold_across_2_longs_const_hi = 0x00000000c5b9cd4c, /* x^192 mod G */
- .fold_across_2_longs_const_lo = 0x00000000e8a45605, /* x^128 mod G */
- .barrett_reduction_const_1 = 0x826880efa40da72d, /* floor(x^95 / G) */
- .barrett_reduction_const_2 = 0x0000000004c11db7, /* G - x^32 */
-#else
- .fold_across_2_longs_const_hi = 0xf200aa66, /* x^96 mod G */
- .fold_across_2_longs_const_lo = 0x490d678d, /* x^64 mod G */
- .barrett_reduction_const_1 = 0x826880ef, /* floor(x^63 / G) */
- .barrett_reduction_const_2 = 0x04c11db7, /* G - x^32 */
-#endif
-};
-
-/*
- * Constants generated for least-significant-bit-first CRC-32 using
- * G(x) = x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 +
- * x^5 + x^4 + x^2 + x^1 + x^0
- */
-static const struct crc_clmul_consts crc32_lsb_0xedb88320_consts __maybe_unused = {
-#ifdef CONFIG_64BIT
- .fold_across_2_longs_const_hi = 0x65673b4600000000, /* x^191 mod G */
- .fold_across_2_longs_const_lo = 0x9ba54c6f00000000, /* x^127 mod G */
- .barrett_reduction_const_1 = 0xb4e5b025f7011641, /* floor(x^95 / G) */
- .barrett_reduction_const_2 = 0x00000000edb88320, /* (G - x^32) * x^32 */
-#else
- .fold_across_2_longs_const_hi = 0xccaa009e, /* x^95 mod G */
- .fold_across_2_longs_const_lo = 0xb8bc6765, /* x^63 mod G */
- .barrett_reduction_const_1 = 0xf7011641, /* floor(x^63 / G) */
- .barrett_reduction_const_2 = 0xedb88320, /* (G - x^32) * x^0 */
-#endif
-};
-
-/*
- * Constants generated for least-significant-bit-first CRC-32 using
- * G(x) = x^32 + x^28 + x^27 + x^26 + x^25 + x^23 + x^22 + x^20 + x^19 + x^18 +
- * x^14 + x^13 + x^11 + x^10 + x^9 + x^8 + x^6 + x^0
- */
-static const struct crc_clmul_consts crc32_lsb_0x82f63b78_consts __maybe_unused = {
-#ifdef CONFIG_64BIT
- .fold_across_2_longs_const_hi = 0x3743f7bd00000000, /* x^191 mod G */
- .fold_across_2_longs_const_lo = 0x3171d43000000000, /* x^127 mod G */
- .barrett_reduction_const_1 = 0x4869ec38dea713f1, /* floor(x^95 / G) */
- .barrett_reduction_const_2 = 0x0000000082f63b78, /* (G - x^32) * x^32 */
-#else
- .fold_across_2_longs_const_hi = 0x493c7d27, /* x^95 mod G */
- .fold_across_2_longs_const_lo = 0xdd45aab8, /* x^63 mod G */
- .barrett_reduction_const_1 = 0xdea713f1, /* floor(x^63 / G) */
- .barrett_reduction_const_2 = 0x82f63b78, /* (G - x^32) * x^0 */
-#endif
-};
-
-/*
- * Constants generated for most-significant-bit-first CRC-64 using
- * G(x) = x^64 + x^62 + x^57 + x^55 + x^54 + x^53 + x^52 + x^47 + x^46 + x^45 +
- * x^40 + x^39 + x^38 + x^37 + x^35 + x^33 + x^32 + x^31 + x^29 + x^27 +
- * x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
- * x^7 + x^4 + x^1 + x^0
- */
-#ifdef CONFIG_64BIT
-static const struct crc_clmul_consts crc64_msb_0x42f0e1eba9ea3693_consts __maybe_unused = {
- .fold_across_2_longs_const_hi = 0x4eb938a7d257740e, /* x^192 mod G */
- .fold_across_2_longs_const_lo = 0x05f5c3c7eb52fab6, /* x^128 mod G */
- .barrett_reduction_const_1 = 0xabc694e836627c39, /* floor(x^127 / G) */
- .barrett_reduction_const_2 = 0x42f0e1eba9ea3693, /* G - x^64 */
-};
-#endif
-
-/*
- * Constants generated for least-significant-bit-first CRC-64 using
- * G(x) = x^64 + x^63 + x^61 + x^59 + x^58 + x^56 + x^55 + x^52 + x^49 + x^48 +
- * x^47 + x^46 + x^44 + x^41 + x^37 + x^36 + x^34 + x^32 + x^31 + x^28 +
- * x^26 + x^23 + x^22 + x^19 + x^16 + x^13 + x^12 + x^10 + x^9 + x^6 +
- * x^4 + x^3 + x^0
- */
-#ifdef CONFIG_64BIT
-static const struct crc_clmul_consts crc64_lsb_0x9a6c9329ac4bc9b5_consts __maybe_unused = {
- .fold_across_2_longs_const_hi = 0xeadc41fd2ba3d420, /* x^191 mod G */
- .fold_across_2_longs_const_lo = 0x21e9761e252621ac, /* x^127 mod G */
- .barrett_reduction_const_1 = 0x27ecfa329aef9f77, /* floor(x^127 / G) */
- .barrett_reduction_const_2 = 0x9a6c9329ac4bc9b5, /* (G - x^64) * x^0 */
-};
-#endif
diff --git a/arch/riscv/lib/crc-clmul-template.h b/arch/riscv/lib/crc-clmul-template.h
deleted file mode 100644
index 77187e7f1762..000000000000
--- a/arch/riscv/lib/crc-clmul-template.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* Copyright 2025 Google LLC */
-
-/*
- * This file is a "template" that generates a CRC function optimized using the
- * RISC-V Zbc (scalar carryless multiplication) extension. The includer of this
- * file must define the following parameters to specify the type of CRC:
- *
- * crc_t: the data type of the CRC, e.g. u32 for a 32-bit CRC
- * LSB_CRC: 0 for a msb (most-significant-bit) first CRC, i.e. natural
- * mapping between bits and polynomial coefficients
- * 1 for a lsb (least-significant-bit) first CRC, i.e. reflected
- * mapping between bits and polynomial coefficients
- */
-
-#include <asm/byteorder.h>
-#include <linux/minmax.h>
-
-#define CRC_BITS (8 * sizeof(crc_t)) /* a.k.a. 'n' */
-
-static inline unsigned long clmul(unsigned long a, unsigned long b)
-{
- unsigned long res;
-
- asm(".option push\n"
- ".option arch,+zbc\n"
- "clmul %0, %1, %2\n"
- ".option pop\n"
- : "=r" (res) : "r" (a), "r" (b));
- return res;
-}
-
-static inline unsigned long clmulh(unsigned long a, unsigned long b)
-{
- unsigned long res;
-
- asm(".option push\n"
- ".option arch,+zbc\n"
- "clmulh %0, %1, %2\n"
- ".option pop\n"
- : "=r" (res) : "r" (a), "r" (b));
- return res;
-}
-
-static inline unsigned long clmulr(unsigned long a, unsigned long b)
-{
- unsigned long res;
-
- asm(".option push\n"
- ".option arch,+zbc\n"
- "clmulr %0, %1, %2\n"
- ".option pop\n"
- : "=r" (res) : "r" (a), "r" (b));
- return res;
-}
-
-/*
- * crc_load_long() loads one "unsigned long" of aligned data bytes, producing a
- * polynomial whose bit order matches the CRC's bit order.
- */
-#ifdef CONFIG_64BIT
-# if LSB_CRC
-# define crc_load_long(x) le64_to_cpup(x)
-# else
-# define crc_load_long(x) be64_to_cpup(x)
-# endif
-#else
-# if LSB_CRC
-# define crc_load_long(x) le32_to_cpup(x)
-# else
-# define crc_load_long(x) be32_to_cpup(x)
-# endif
-#endif
-
-/* XOR @crc into the end of @msgpoly that represents the high-order terms. */
-static inline unsigned long
-crc_clmul_prep(crc_t crc, unsigned long msgpoly)
-{
-#if LSB_CRC
- return msgpoly ^ crc;
-#else
- return msgpoly ^ ((unsigned long)crc << (BITS_PER_LONG - CRC_BITS));
-#endif
-}
-
-/*
- * Multiply the long-sized @msgpoly by x^n (a.k.a. x^CRC_BITS) and reduce it
- * modulo the generator polynomial G. This gives the CRC of @msgpoly.
- */
-static inline crc_t
-crc_clmul_long(unsigned long msgpoly, const struct crc_clmul_consts *consts)
-{
- unsigned long tmp;
-
- /*
- * First step of Barrett reduction with integrated multiplication by
- * x^n: calculate floor((msgpoly * x^n) / G). This is the value by
- * which G needs to be multiplied to cancel out the x^n and higher terms
- * of msgpoly * x^n. Do it using the following formula:
- *
- * msb-first:
- * floor((msgpoly * floor(x^(BITS_PER_LONG-1+n) / G)) / x^(BITS_PER_LONG-1))
- * lsb-first:
- * floor((msgpoly * floor(x^(BITS_PER_LONG-1+n) / G) * x) / x^BITS_PER_LONG)
- *
- * barrett_reduction_const_1 contains floor(x^(BITS_PER_LONG-1+n) / G),
- * which fits a long exactly. Using any lower power of x there would
- * not carry enough precision through the calculation, while using any
- * higher power of x would require extra instructions to handle a wider
- * multiplication. In the msb-first case, using this power of x results
- * in needing a floored division by x^(BITS_PER_LONG-1), which matches
- * what clmulr produces. In the lsb-first case, a factor of x gets
- * implicitly introduced by each carryless multiplication (shown as
- * '* x' above), and the floored division instead needs to be by
- * x^BITS_PER_LONG which matches what clmul produces.
- */
-#if LSB_CRC
- tmp = clmul(msgpoly, consts->barrett_reduction_const_1);
-#else
- tmp = clmulr(msgpoly, consts->barrett_reduction_const_1);
-#endif
-
- /*
- * Second step of Barrett reduction:
- *
- * crc := (msgpoly * x^n) + (G * floor((msgpoly * x^n) / G))
- *
- * This reduces (msgpoly * x^n) modulo G by adding the appropriate
- * multiple of G to it. The result uses only the x^0..x^(n-1) terms.
- * HOWEVER, since the unreduced value (msgpoly * x^n) is zero in those
- * terms in the first place, it is more efficient to do the equivalent:
- *
- * crc := ((G - x^n) * floor((msgpoly * x^n) / G)) mod x^n
- *
- * In the lsb-first case further modify it to the following which avoids
- * a shift, as the crc ends up in the physically low n bits from clmulr:
- *
- * product := ((G - x^n) * x^(BITS_PER_LONG - n)) * floor((msgpoly * x^n) / G) * x
- * crc := floor(product / x^(BITS_PER_LONG + 1 - n)) mod x^n
- *
- * barrett_reduction_const_2 contains the constant multiplier (G - x^n)
- * or (G - x^n) * x^(BITS_PER_LONG - n) from the formulas above. The
- * cast of the result to crc_t is essential, as it applies the mod x^n!
- */
-#if LSB_CRC
- return clmulr(tmp, consts->barrett_reduction_const_2);
-#else
- return clmul(tmp, consts->barrett_reduction_const_2);
-#endif
-}
-
-/* Update @crc with the data from @msgpoly. */
-static inline crc_t
-crc_clmul_update_long(crc_t crc, unsigned long msgpoly,
- const struct crc_clmul_consts *consts)
-{
- return crc_clmul_long(crc_clmul_prep(crc, msgpoly), consts);
-}
-
-/* Update @crc with 1 <= @len < sizeof(unsigned long) bytes of data. */
-static inline crc_t
-crc_clmul_update_partial(crc_t crc, const u8 *p, size_t len,
- const struct crc_clmul_consts *consts)
-{
- unsigned long msgpoly;
- size_t i;
-
-#if LSB_CRC
- msgpoly = (unsigned long)p[0] << (BITS_PER_LONG - 8);
- for (i = 1; i < len; i++)
- msgpoly = (msgpoly >> 8) ^ ((unsigned long)p[i] << (BITS_PER_LONG - 8));
-#else
- msgpoly = p[0];
- for (i = 1; i < len; i++)
- msgpoly = (msgpoly << 8) ^ p[i];
-#endif
-
- if (len >= sizeof(crc_t)) {
- #if LSB_CRC
- msgpoly ^= (unsigned long)crc << (BITS_PER_LONG - 8*len);
- #else
- msgpoly ^= (unsigned long)crc << (8*len - CRC_BITS);
- #endif
- return crc_clmul_long(msgpoly, consts);
- }
-#if LSB_CRC
- msgpoly ^= (unsigned long)crc << (BITS_PER_LONG - 8*len);
- return crc_clmul_long(msgpoly, consts) ^ (crc >> (8*len));
-#else
- msgpoly ^= crc >> (CRC_BITS - 8*len);
- return crc_clmul_long(msgpoly, consts) ^ (crc << (8*len));
-#endif
-}
-
-static inline crc_t
-crc_clmul(crc_t crc, const void *p, size_t len,
- const struct crc_clmul_consts *consts)
-{
- size_t align;
-
- /* This implementation assumes that the CRC fits in an unsigned long. */
- BUILD_BUG_ON(sizeof(crc_t) > sizeof(unsigned long));
-
- /* If the buffer is not long-aligned, align it. */
- align = (unsigned long)p % sizeof(unsigned long);
- if (align && len) {
- align = min(sizeof(unsigned long) - align, len);
- crc = crc_clmul_update_partial(crc, p, align, consts);
- p += align;
- len -= align;
- }
-
- if (len >= 4 * sizeof(unsigned long)) {
- unsigned long m0, m1;
-
- m0 = crc_clmul_prep(crc, crc_load_long(p));
- m1 = crc_load_long(p + sizeof(unsigned long));
- p += 2 * sizeof(unsigned long);
- len -= 2 * sizeof(unsigned long);
- /*
- * Main loop. Each iteration starts with a message polynomial
- * (x^BITS_PER_LONG)*m0 + m1, then logically extends it by two
- * more longs of data to form x^(3*BITS_PER_LONG)*m0 +
- * x^(2*BITS_PER_LONG)*m1 + x^BITS_PER_LONG*m2 + m3, then
- * "folds" that back into a congruent (modulo G) value that uses
- * just m0 and m1 again. This is done by multiplying m0 by the
- * precomputed constant (x^(3*BITS_PER_LONG) mod G) and m1 by
- * the precomputed constant (x^(2*BITS_PER_LONG) mod G), then
- * adding the results to m2 and m3 as appropriate. Each such
- * multiplication produces a result twice the length of a long,
- * which in RISC-V is two instructions clmul and clmulh.
- *
- * This could be changed to fold across more than 2 longs at a
- * time if there is a CPU that can take advantage of it.
- */
- do {
- unsigned long p0, p1, p2, p3;
-
- p0 = clmulh(m0, consts->fold_across_2_longs_const_hi);
- p1 = clmul(m0, consts->fold_across_2_longs_const_hi);
- p2 = clmulh(m1, consts->fold_across_2_longs_const_lo);
- p3 = clmul(m1, consts->fold_across_2_longs_const_lo);
- m0 = (LSB_CRC ? p1 ^ p3 : p0 ^ p2) ^ crc_load_long(p);
- m1 = (LSB_CRC ? p0 ^ p2 : p1 ^ p3) ^
- crc_load_long(p + sizeof(unsigned long));
-
- p += 2 * sizeof(unsigned long);
- len -= 2 * sizeof(unsigned long);
- } while (len >= 2 * sizeof(unsigned long));
-
- crc = crc_clmul_long(m0, consts);
- crc = crc_clmul_update_long(crc, m1, consts);
- }
-
- while (len >= sizeof(unsigned long)) {
- crc = crc_clmul_update_long(crc, crc_load_long(p), consts);
- p += sizeof(unsigned long);
- len -= sizeof(unsigned long);
- }
-
- if (len)
- crc = crc_clmul_update_partial(crc, p, len, consts);
-
- return crc;
-}
diff --git a/arch/riscv/lib/crc-clmul.h b/arch/riscv/lib/crc-clmul.h
deleted file mode 100644
index dd1736245815..000000000000
--- a/arch/riscv/lib/crc-clmul.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* Copyright 2025 Google LLC */
-
-#ifndef _RISCV_CRC_CLMUL_H
-#define _RISCV_CRC_CLMUL_H
-
-#include <linux/types.h>
-#include "crc-clmul-consts.h"
-
-u16 crc16_msb_clmul(u16 crc, const void *p, size_t len,
- const struct crc_clmul_consts *consts);
-u32 crc32_msb_clmul(u32 crc, const void *p, size_t len,
- const struct crc_clmul_consts *consts);
-u32 crc32_lsb_clmul(u32 crc, const void *p, size_t len,
- const struct crc_clmul_consts *consts);
-#ifdef CONFIG_64BIT
-u64 crc64_msb_clmul(u64 crc, const void *p, size_t len,
- const struct crc_clmul_consts *consts);
-u64 crc64_lsb_clmul(u64 crc, const void *p, size_t len,
- const struct crc_clmul_consts *consts);
-#endif
-
-#endif /* _RISCV_CRC_CLMUL_H */
diff --git a/arch/riscv/lib/crc-t10dif.c b/arch/riscv/lib/crc-t10dif.c
deleted file mode 100644
index e6b0051ccd86..000000000000
--- a/arch/riscv/lib/crc-t10dif.c
+++ /dev/null
@@ -1,24 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RISC-V optimized CRC-T10DIF function
- *
- * Copyright 2025 Google LLC
- */
-
-#include <asm/hwcap.h>
-#include <asm/alternative-macros.h>
-#include <linux/crc-t10dif.h>
-#include <linux/module.h>
-
-#include "crc-clmul.h"
-
-u16 crc_t10dif_arch(u16 crc, const u8 *p, size_t len)
-{
- if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
- return crc16_msb_clmul(crc, p, len, &crc16_msb_0x8bb7_consts);
- return crc_t10dif_generic(crc, p, len);
-}
-EXPORT_SYMBOL(crc_t10dif_arch);
-
-MODULE_DESCRIPTION("RISC-V optimized CRC-T10DIF function");
-MODULE_LICENSE("GPL");
diff --git a/arch/riscv/lib/crc16_msb.c b/arch/riscv/lib/crc16_msb.c
deleted file mode 100644
index 554d295e95f5..000000000000
--- a/arch/riscv/lib/crc16_msb.c
+++ /dev/null
@@ -1,18 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RISC-V optimized most-significant-bit-first CRC16
- *
- * Copyright 2025 Google LLC
- */
-
-#include "crc-clmul.h"
-
-typedef u16 crc_t;
-#define LSB_CRC 0
-#include "crc-clmul-template.h"
-
-u16 crc16_msb_clmul(u16 crc, const void *p, size_t len,
- const struct crc_clmul_consts *consts)
-{
- return crc_clmul(crc, p, len, consts);
-}
diff --git a/arch/riscv/lib/crc32.c b/arch/riscv/lib/crc32.c
deleted file mode 100644
index a3188b7d9c40..000000000000
--- a/arch/riscv/lib/crc32.c
+++ /dev/null
@@ -1,53 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RISC-V optimized CRC32 functions
- *
- * Copyright 2025 Google LLC
- */
-
-#include <asm/hwcap.h>
-#include <asm/alternative-macros.h>
-#include <linux/crc32.h>
-#include <linux/module.h>
-
-#include "crc-clmul.h"
-
-u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
-{
- if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
- return crc32_lsb_clmul(crc, p, len,
- &crc32_lsb_0xedb88320_consts);
- return crc32_le_base(crc, p, len);
-}
-EXPORT_SYMBOL(crc32_le_arch);
-
-u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
-{
- if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
- return crc32_msb_clmul(crc, p, len,
- &crc32_msb_0x04c11db7_consts);
- return crc32_be_base(crc, p, len);
-}
-EXPORT_SYMBOL(crc32_be_arch);
-
-u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
-{
- if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
- return crc32_lsb_clmul(crc, p, len,
- &crc32_lsb_0x82f63b78_consts);
- return crc32c_base(crc, p, len);
-}
-EXPORT_SYMBOL(crc32c_arch);
-
-u32 crc32_optimizations(void)
-{
- if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
- return CRC32_LE_OPTIMIZATION |
- CRC32_BE_OPTIMIZATION |
- CRC32C_OPTIMIZATION;
- return 0;
-}
-EXPORT_SYMBOL(crc32_optimizations);
-
-MODULE_DESCRIPTION("RISC-V optimized CRC32 functions");
-MODULE_LICENSE("GPL");
diff --git a/arch/riscv/lib/crc32_lsb.c b/arch/riscv/lib/crc32_lsb.c
deleted file mode 100644
index 72fd67e7470c..000000000000
--- a/arch/riscv/lib/crc32_lsb.c
+++ /dev/null
@@ -1,18 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RISC-V optimized least-significant-bit-first CRC32
- *
- * Copyright 2025 Google LLC
- */
-
-#include "crc-clmul.h"
-
-typedef u32 crc_t;
-#define LSB_CRC 1
-#include "crc-clmul-template.h"
-
-u32 crc32_lsb_clmul(u32 crc, const void *p, size_t len,
- const struct crc_clmul_consts *consts)
-{
- return crc_clmul(crc, p, len, consts);
-}
diff --git a/arch/riscv/lib/crc32_msb.c b/arch/riscv/lib/crc32_msb.c
deleted file mode 100644
index fdbeaccc369f..000000000000
--- a/arch/riscv/lib/crc32_msb.c
+++ /dev/null
@@ -1,18 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RISC-V optimized most-significant-bit-first CRC32
- *
- * Copyright 2025 Google LLC
- */
-
-#include "crc-clmul.h"
-
-typedef u32 crc_t;
-#define LSB_CRC 0
-#include "crc-clmul-template.h"
-
-u32 crc32_msb_clmul(u32 crc, const void *p, size_t len,
- const struct crc_clmul_consts *consts)
-{
- return crc_clmul(crc, p, len, consts);
-}
diff --git a/arch/riscv/lib/crc64.c b/arch/riscv/lib/crc64.c
deleted file mode 100644
index f0015a27836a..000000000000
--- a/arch/riscv/lib/crc64.c
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RISC-V optimized CRC64 functions
- *
- * Copyright 2025 Google LLC
- */
-
-#include <asm/hwcap.h>
-#include <asm/alternative-macros.h>
-#include <linux/crc64.h>
-#include <linux/module.h>
-
-#include "crc-clmul.h"
-
-u64 crc64_be_arch(u64 crc, const u8 *p, size_t len)
-{
- if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
- return crc64_msb_clmul(crc, p, len,
- &crc64_msb_0x42f0e1eba9ea3693_consts);
- return crc64_be_generic(crc, p, len);
-}
-EXPORT_SYMBOL(crc64_be_arch);
-
-u64 crc64_nvme_arch(u64 crc, const u8 *p, size_t len)
-{
- if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
- return crc64_lsb_clmul(crc, p, len,
- &crc64_lsb_0x9a6c9329ac4bc9b5_consts);
- return crc64_nvme_generic(crc, p, len);
-}
-EXPORT_SYMBOL(crc64_nvme_arch);
-
-MODULE_DESCRIPTION("RISC-V optimized CRC64 functions");
-MODULE_LICENSE("GPL");
diff --git a/arch/riscv/lib/crc64_lsb.c b/arch/riscv/lib/crc64_lsb.c
deleted file mode 100644
index c5371bb85d90..000000000000
--- a/arch/riscv/lib/crc64_lsb.c
+++ /dev/null
@@ -1,18 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RISC-V optimized least-significant-bit-first CRC64
- *
- * Copyright 2025 Google LLC
- */
-
-#include "crc-clmul.h"
-
-typedef u64 crc_t;
-#define LSB_CRC 1
-#include "crc-clmul-template.h"
-
-u64 crc64_lsb_clmul(u64 crc, const void *p, size_t len,
- const struct crc_clmul_consts *consts)
-{
- return crc_clmul(crc, p, len, consts);
-}
diff --git a/arch/riscv/lib/crc64_msb.c b/arch/riscv/lib/crc64_msb.c
deleted file mode 100644
index 1925d1dbe225..000000000000
--- a/arch/riscv/lib/crc64_msb.c
+++ /dev/null
@@ -1,18 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RISC-V optimized most-significant-bit-first CRC64
- *
- * Copyright 2025 Google LLC
- */
-
-#include "crc-clmul.h"
-
-typedef u64 crc_t;
-#define LSB_CRC 0
-#include "crc-clmul-template.h"
-
-u64 crc64_msb_clmul(u64 crc, const void *p, size_t len,
- const struct crc_clmul_consts *consts)
-{
- return crc_clmul(crc, p, len, consts);
-}
diff --git a/arch/riscv/lib/crypto/Kconfig b/arch/riscv/lib/crypto/Kconfig
deleted file mode 100644
index 47c99ea97ce2..000000000000
--- a/arch/riscv/lib/crypto/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-config CRYPTO_CHACHA_RISCV64
- tristate
- depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
- default CRYPTO_LIB_CHACHA
- select CRYPTO_ARCH_HAVE_LIB_CHACHA
- select CRYPTO_LIB_CHACHA_GENERIC
-
-config CRYPTO_SHA256_RISCV64
- tristate
- depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
- default CRYPTO_LIB_SHA256
- select CRYPTO_ARCH_HAVE_LIB_SHA256
- select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
- select CRYPTO_LIB_SHA256_GENERIC
diff --git a/arch/riscv/lib/crypto/Makefile b/arch/riscv/lib/crypto/Makefile
deleted file mode 100644
index b7cb877a2c07..000000000000
--- a/arch/riscv/lib/crypto/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-obj-$(CONFIG_CRYPTO_CHACHA_RISCV64) += chacha-riscv64.o
-chacha-riscv64-y := chacha-riscv64-glue.o chacha-riscv64-zvkb.o
-
-obj-$(CONFIG_CRYPTO_SHA256_RISCV64) += sha256-riscv64.o
-sha256-riscv64-y := sha256.o sha256-riscv64-zvknha_or_zvknhb-zvkb.o
diff --git a/arch/riscv/lib/crypto/chacha-riscv64-glue.c b/arch/riscv/lib/crypto/chacha-riscv64-glue.c
deleted file mode 100644
index 8c3f11d79be3..000000000000
--- a/arch/riscv/lib/crypto/chacha-riscv64-glue.c
+++ /dev/null
@@ -1,75 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ChaCha stream cipher (RISC-V optimized)
- *
- * Copyright (C) 2023 SiFive, Inc.
- * Author: Jerry Shih <jerry.shih@sifive.com>
- */
-
-#include <asm/simd.h>
-#include <asm/vector.h>
-#include <crypto/chacha.h>
-#include <crypto/internal/simd.h>
-#include <linux/linkage.h>
-#include <linux/module.h>
-
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_zvkb);
-
-asmlinkage void chacha_zvkb(struct chacha_state *state, const u8 *in, u8 *out,
- size_t nblocks, int nrounds);
-
-void hchacha_block_arch(const struct chacha_state *state,
- u32 out[HCHACHA_OUT_WORDS], int nrounds)
-{
- hchacha_block_generic(state, out, nrounds);
-}
-EXPORT_SYMBOL(hchacha_block_arch);
-
-void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds)
-{
- u8 block_buffer[CHACHA_BLOCK_SIZE];
- unsigned int full_blocks = bytes / CHACHA_BLOCK_SIZE;
- unsigned int tail_bytes = bytes % CHACHA_BLOCK_SIZE;
-
- if (!static_branch_likely(&use_zvkb) || !crypto_simd_usable())
- return chacha_crypt_generic(state, dst, src, bytes, nrounds);
-
- kernel_vector_begin();
- if (full_blocks) {
- chacha_zvkb(state, src, dst, full_blocks, nrounds);
- src += full_blocks * CHACHA_BLOCK_SIZE;
- dst += full_blocks * CHACHA_BLOCK_SIZE;
- }
- if (tail_bytes) {
- memcpy(block_buffer, src, tail_bytes);
- chacha_zvkb(state, block_buffer, block_buffer, 1, nrounds);
- memcpy(dst, block_buffer, tail_bytes);
- }
- kernel_vector_end();
-}
-EXPORT_SYMBOL(chacha_crypt_arch);
-
-bool chacha_is_arch_optimized(void)
-{
- return static_key_enabled(&use_zvkb);
-}
-EXPORT_SYMBOL(chacha_is_arch_optimized);
-
-static int __init riscv64_chacha_mod_init(void)
-{
- if (riscv_isa_extension_available(NULL, ZVKB) &&
- riscv_vector_vlen() >= 128)
- static_branch_enable(&use_zvkb);
- return 0;
-}
-subsys_initcall(riscv64_chacha_mod_init);
-
-static void __exit riscv64_chacha_mod_exit(void)
-{
-}
-module_exit(riscv64_chacha_mod_exit);
-
-MODULE_DESCRIPTION("ChaCha stream cipher (RISC-V optimized)");
-MODULE_AUTHOR("Jerry Shih <jerry.shih@sifive.com>");
-MODULE_LICENSE("GPL");
diff --git a/arch/riscv/lib/crypto/chacha-riscv64-zvkb.S b/arch/riscv/lib/crypto/chacha-riscv64-zvkb.S
deleted file mode 100644
index b777d0b4e379..000000000000
--- a/arch/riscv/lib/crypto/chacha-riscv64-zvkb.S
+++ /dev/null
@@ -1,297 +0,0 @@
-/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
-//
-// This file is dual-licensed, meaning that you can use it under your
-// choice of either of the following two licenses:
-//
-// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
-//
-// Licensed under the Apache License 2.0 (the "License"). You can obtain
-// a copy in the file LICENSE in the source distribution or at
-// https://www.openssl.org/source/license.html
-//
-// or
-//
-// Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
-// Copyright 2024 Google LLC
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The generated code of this file depends on the following RISC-V extensions:
-// - RV64I
-// - RISC-V Vector ('V') with VLEN >= 128
-// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
-
-#include <linux/linkage.h>
-
-.text
-.option arch, +zvkb
-
-#define STATEP a0
-#define INP a1
-#define OUTP a2
-#define NBLOCKS a3
-#define NROUNDS a4
-
-#define CONSTS0 a5
-#define CONSTS1 a6
-#define CONSTS2 a7
-#define CONSTS3 t0
-#define TMP t1
-#define VL t2
-#define STRIDE t3
-#define ROUND_CTR t4
-#define KEY0 s0
-#define KEY1 s1
-#define KEY2 s2
-#define KEY3 s3
-#define KEY4 s4
-#define KEY5 s5
-#define KEY6 s6
-#define KEY7 s7
-#define COUNTER s8
-#define NONCE0 s9
-#define NONCE1 s10
-#define NONCE2 s11
-
-.macro chacha_round a0, b0, c0, d0, a1, b1, c1, d1, \
- a2, b2, c2, d2, a3, b3, c3, d3
- // a += b; d ^= a; d = rol(d, 16);
- vadd.vv \a0, \a0, \b0
- vadd.vv \a1, \a1, \b1
- vadd.vv \a2, \a2, \b2
- vadd.vv \a3, \a3, \b3
- vxor.vv \d0, \d0, \a0
- vxor.vv \d1, \d1, \a1
- vxor.vv \d2, \d2, \a2
- vxor.vv \d3, \d3, \a3
- vror.vi \d0, \d0, 32 - 16
- vror.vi \d1, \d1, 32 - 16
- vror.vi \d2, \d2, 32 - 16
- vror.vi \d3, \d3, 32 - 16
-
- // c += d; b ^= c; b = rol(b, 12);
- vadd.vv \c0, \c0, \d0
- vadd.vv \c1, \c1, \d1
- vadd.vv \c2, \c2, \d2
- vadd.vv \c3, \c3, \d3
- vxor.vv \b0, \b0, \c0
- vxor.vv \b1, \b1, \c1
- vxor.vv \b2, \b2, \c2
- vxor.vv \b3, \b3, \c3
- vror.vi \b0, \b0, 32 - 12
- vror.vi \b1, \b1, 32 - 12
- vror.vi \b2, \b2, 32 - 12
- vror.vi \b3, \b3, 32 - 12
-
- // a += b; d ^= a; d = rol(d, 8);
- vadd.vv \a0, \a0, \b0
- vadd.vv \a1, \a1, \b1
- vadd.vv \a2, \a2, \b2
- vadd.vv \a3, \a3, \b3
- vxor.vv \d0, \d0, \a0
- vxor.vv \d1, \d1, \a1
- vxor.vv \d2, \d2, \a2
- vxor.vv \d3, \d3, \a3
- vror.vi \d0, \d0, 32 - 8
- vror.vi \d1, \d1, 32 - 8
- vror.vi \d2, \d2, 32 - 8
- vror.vi \d3, \d3, 32 - 8
-
- // c += d; b ^= c; b = rol(b, 7);
- vadd.vv \c0, \c0, \d0
- vadd.vv \c1, \c1, \d1
- vadd.vv \c2, \c2, \d2
- vadd.vv \c3, \c3, \d3
- vxor.vv \b0, \b0, \c0
- vxor.vv \b1, \b1, \c1
- vxor.vv \b2, \b2, \c2
- vxor.vv \b3, \b3, \c3
- vror.vi \b0, \b0, 32 - 7
- vror.vi \b1, \b1, 32 - 7
- vror.vi \b2, \b2, 32 - 7
- vror.vi \b3, \b3, 32 - 7
-.endm
-
-// void chacha_zvkb(struct chacha_state *state, const u8 *in, u8 *out,
-// size_t nblocks, int nrounds);
-//
-// |nblocks| is the number of 64-byte blocks to process, and must be nonzero.
-//
-// |state| gives the ChaCha state matrix, including the 32-bit counter in
-// state->x[12] following the RFC7539 convention; note that this differs from
-// the original Salsa20 paper which uses a 64-bit counter in state->x[12..13].
-// The updated 32-bit counter is written back to state->x[12] before returning.
-SYM_FUNC_START(chacha_zvkb)
- addi sp, sp, -96
- sd s0, 0(sp)
- sd s1, 8(sp)
- sd s2, 16(sp)
- sd s3, 24(sp)
- sd s4, 32(sp)
- sd s5, 40(sp)
- sd s6, 48(sp)
- sd s7, 56(sp)
- sd s8, 64(sp)
- sd s9, 72(sp)
- sd s10, 80(sp)
- sd s11, 88(sp)
-
- li STRIDE, 64
-
- // Set up the initial state matrix in scalar registers.
- lw CONSTS0, 0(STATEP)
- lw CONSTS1, 4(STATEP)
- lw CONSTS2, 8(STATEP)
- lw CONSTS3, 12(STATEP)
- lw KEY0, 16(STATEP)
- lw KEY1, 20(STATEP)
- lw KEY2, 24(STATEP)
- lw KEY3, 28(STATEP)
- lw KEY4, 32(STATEP)
- lw KEY5, 36(STATEP)
- lw KEY6, 40(STATEP)
- lw KEY7, 44(STATEP)
- lw COUNTER, 48(STATEP)
- lw NONCE0, 52(STATEP)
- lw NONCE1, 56(STATEP)
- lw NONCE2, 60(STATEP)
-
-.Lblock_loop:
- // Set vl to the number of blocks to process in this iteration.
- vsetvli VL, NBLOCKS, e32, m1, ta, ma
-
- // Set up the initial state matrix for the next VL blocks in v0-v15.
- // v{i} holds the i'th 32-bit word of the state matrix for all blocks.
- // Note that only the counter word, at index 12, differs across blocks.
- vmv.v.x v0, CONSTS0
- vmv.v.x v1, CONSTS1
- vmv.v.x v2, CONSTS2
- vmv.v.x v3, CONSTS3
- vmv.v.x v4, KEY0
- vmv.v.x v5, KEY1
- vmv.v.x v6, KEY2
- vmv.v.x v7, KEY3
- vmv.v.x v8, KEY4
- vmv.v.x v9, KEY5
- vmv.v.x v10, KEY6
- vmv.v.x v11, KEY7
- vid.v v12
- vadd.vx v12, v12, COUNTER
- vmv.v.x v13, NONCE0
- vmv.v.x v14, NONCE1
- vmv.v.x v15, NONCE2
-
- // Load the first half of the input data for each block into v16-v23.
- // v{16+i} holds the i'th 32-bit word for all blocks.
- vlsseg8e32.v v16, (INP), STRIDE
-
- mv ROUND_CTR, NROUNDS
-.Lnext_doubleround:
- addi ROUND_CTR, ROUND_CTR, -2
- // column round
- chacha_round v0, v4, v8, v12, v1, v5, v9, v13, \
- v2, v6, v10, v14, v3, v7, v11, v15
- // diagonal round
- chacha_round v0, v5, v10, v15, v1, v6, v11, v12, \
- v2, v7, v8, v13, v3, v4, v9, v14
- bnez ROUND_CTR, .Lnext_doubleround
-
- // Load the second half of the input data for each block into v24-v31.
- // v{24+i} holds the {8+i}'th 32-bit word for all blocks.
- addi TMP, INP, 32
- vlsseg8e32.v v24, (TMP), STRIDE
-
- // Finalize the first half of the keystream for each block.
- vadd.vx v0, v0, CONSTS0
- vadd.vx v1, v1, CONSTS1
- vadd.vx v2, v2, CONSTS2
- vadd.vx v3, v3, CONSTS3
- vadd.vx v4, v4, KEY0
- vadd.vx v5, v5, KEY1
- vadd.vx v6, v6, KEY2
- vadd.vx v7, v7, KEY3
-
- // Encrypt/decrypt the first half of the data for each block.
- vxor.vv v16, v16, v0
- vxor.vv v17, v17, v1
- vxor.vv v18, v18, v2
- vxor.vv v19, v19, v3
- vxor.vv v20, v20, v4
- vxor.vv v21, v21, v5
- vxor.vv v22, v22, v6
- vxor.vv v23, v23, v7
-
- // Store the first half of the output data for each block.
- vssseg8e32.v v16, (OUTP), STRIDE
-
- // Finalize the second half of the keystream for each block.
- vadd.vx v8, v8, KEY4
- vadd.vx v9, v9, KEY5
- vadd.vx v10, v10, KEY6
- vadd.vx v11, v11, KEY7
- vid.v v0
- vadd.vx v12, v12, COUNTER
- vadd.vx v13, v13, NONCE0
- vadd.vx v14, v14, NONCE1
- vadd.vx v15, v15, NONCE2
- vadd.vv v12, v12, v0
-
- // Encrypt/decrypt the second half of the data for each block.
- vxor.vv v24, v24, v8
- vxor.vv v25, v25, v9
- vxor.vv v26, v26, v10
- vxor.vv v27, v27, v11
- vxor.vv v29, v29, v13
- vxor.vv v28, v28, v12
- vxor.vv v30, v30, v14
- vxor.vv v31, v31, v15
-
- // Store the second half of the output data for each block.
- addi TMP, OUTP, 32
- vssseg8e32.v v24, (TMP), STRIDE
-
- // Update the counter, the remaining number of blocks, and the input and
- // output pointers according to the number of blocks processed (VL).
- add COUNTER, COUNTER, VL
- sub NBLOCKS, NBLOCKS, VL
- slli TMP, VL, 6
- add OUTP, OUTP, TMP
- add INP, INP, TMP
- bnez NBLOCKS, .Lblock_loop
-
- sw COUNTER, 48(STATEP)
- ld s0, 0(sp)
- ld s1, 8(sp)
- ld s2, 16(sp)
- ld s3, 24(sp)
- ld s4, 32(sp)
- ld s5, 40(sp)
- ld s6, 48(sp)
- ld s7, 56(sp)
- ld s8, 64(sp)
- ld s9, 72(sp)
- ld s10, 80(sp)
- ld s11, 88(sp)
- addi sp, sp, 96
- ret
-SYM_FUNC_END(chacha_zvkb)
diff --git a/arch/riscv/lib/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S b/arch/riscv/lib/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S
deleted file mode 100644
index fad501ad0617..000000000000
--- a/arch/riscv/lib/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S
+++ /dev/null
@@ -1,225 +0,0 @@
-/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
-//
-// This file is dual-licensed, meaning that you can use it under your
-// choice of either of the following two licenses:
-//
-// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
-//
-// Licensed under the Apache License 2.0 (the "License"). You can obtain
-// a copy in the file LICENSE in the source distribution or at
-// https://www.openssl.org/source/license.html
-//
-// or
-//
-// Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
-// Copyright (c) 2023, Phoebe Chen <phoebe.chen@sifive.com>
-// Copyright 2024 Google LLC
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The generated code of this file depends on the following RISC-V extensions:
-// - RV64I
-// - RISC-V Vector ('V') with VLEN >= 128
-// - RISC-V Vector SHA-2 Secure Hash extension ('Zvknha' or 'Zvknhb')
-// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
-
-#include <linux/linkage.h>
-
-.text
-.option arch, +zvknha, +zvkb
-
-#define STATEP a0
-#define DATA a1
-#define NUM_BLOCKS a2
-
-#define STATEP_C a3
-
-#define MASK v0
-#define INDICES v1
-#define W0 v2
-#define W1 v3
-#define W2 v4
-#define W3 v5
-#define VTMP v6
-#define FEBA v7
-#define HGDC v8
-#define K0 v10
-#define K1 v11
-#define K2 v12
-#define K3 v13
-#define K4 v14
-#define K5 v15
-#define K6 v16
-#define K7 v17
-#define K8 v18
-#define K9 v19
-#define K10 v20
-#define K11 v21
-#define K12 v22
-#define K13 v23
-#define K14 v24
-#define K15 v25
-#define PREV_FEBA v26
-#define PREV_HGDC v27
-
-// Do 4 rounds of SHA-256. w0 contains the current 4 message schedule words.
-//
-// If not all the message schedule words have been computed yet, then this also
-// computes 4 more message schedule words. w1-w3 contain the next 3 groups of 4
-// message schedule words; this macro computes the group after w3 and writes it
-// to w0. This means that the next (w0, w1, w2, w3) is the current (w1, w2, w3,
-// w0), so the caller must cycle through the registers accordingly.
-.macro sha256_4rounds last, k, w0, w1, w2, w3
- vadd.vv VTMP, \k, \w0
- vsha2cl.vv HGDC, FEBA, VTMP
- vsha2ch.vv FEBA, HGDC, VTMP
-.if !\last
- vmerge.vvm VTMP, \w2, \w1, MASK
- vsha2ms.vv \w0, VTMP, \w3
-.endif
-.endm
-
-.macro sha256_16rounds last, k0, k1, k2, k3
- sha256_4rounds \last, \k0, W0, W1, W2, W3
- sha256_4rounds \last, \k1, W1, W2, W3, W0
- sha256_4rounds \last, \k2, W2, W3, W0, W1
- sha256_4rounds \last, \k3, W3, W0, W1, W2
-.endm
-
-// void sha256_transform_zvknha_or_zvknhb_zvkb(u32 state[SHA256_STATE_WORDS],
-// const u8 *data, size_t nblocks);
-SYM_FUNC_START(sha256_transform_zvknha_or_zvknhb_zvkb)
-
- // Load the round constants into K0-K15.
- vsetivli zero, 4, e32, m1, ta, ma
- la t0, K256
- vle32.v K0, (t0)
- addi t0, t0, 16
- vle32.v K1, (t0)
- addi t0, t0, 16
- vle32.v K2, (t0)
- addi t0, t0, 16
- vle32.v K3, (t0)
- addi t0, t0, 16
- vle32.v K4, (t0)
- addi t0, t0, 16
- vle32.v K5, (t0)
- addi t0, t0, 16
- vle32.v K6, (t0)
- addi t0, t0, 16
- vle32.v K7, (t0)
- addi t0, t0, 16
- vle32.v K8, (t0)
- addi t0, t0, 16
- vle32.v K9, (t0)
- addi t0, t0, 16
- vle32.v K10, (t0)
- addi t0, t0, 16
- vle32.v K11, (t0)
- addi t0, t0, 16
- vle32.v K12, (t0)
- addi t0, t0, 16
- vle32.v K13, (t0)
- addi t0, t0, 16
- vle32.v K14, (t0)
- addi t0, t0, 16
- vle32.v K15, (t0)
-
- // Setup mask for the vmerge to replace the first word (idx==0) in
- // message scheduling. There are 4 words, so an 8-bit mask suffices.
- vsetivli zero, 1, e8, m1, ta, ma
- vmv.v.i MASK, 0x01
-
- // Load the state. The state is stored as {a,b,c,d,e,f,g,h}, but we
- // need {f,e,b,a},{h,g,d,c}. The dst vtype is e32m1 and the index vtype
- // is e8mf4. We use index-load with the i8 indices {20, 16, 4, 0},
- // loaded using the 32-bit little endian value 0x00041014.
- li t0, 0x00041014
- vsetivli zero, 1, e32, m1, ta, ma
- vmv.v.x INDICES, t0
- addi STATEP_C, STATEP, 8
- vsetivli zero, 4, e32, m1, ta, ma
- vluxei8.v FEBA, (STATEP), INDICES
- vluxei8.v HGDC, (STATEP_C), INDICES
-
-.Lnext_block:
- addi NUM_BLOCKS, NUM_BLOCKS, -1
-
- // Save the previous state, as it's needed later.
- vmv.v.v PREV_FEBA, FEBA
- vmv.v.v PREV_HGDC, HGDC
-
- // Load the next 512-bit message block and endian-swap each 32-bit word.
- vle32.v W0, (DATA)
- vrev8.v W0, W0
- addi DATA, DATA, 16
- vle32.v W1, (DATA)
- vrev8.v W1, W1
- addi DATA, DATA, 16
- vle32.v W2, (DATA)
- vrev8.v W2, W2
- addi DATA, DATA, 16
- vle32.v W3, (DATA)
- vrev8.v W3, W3
- addi DATA, DATA, 16
-
- // Do the 64 rounds of SHA-256.
- sha256_16rounds 0, K0, K1, K2, K3
- sha256_16rounds 0, K4, K5, K6, K7
- sha256_16rounds 0, K8, K9, K10, K11
- sha256_16rounds 1, K12, K13, K14, K15
-
- // Add the previous state.
- vadd.vv FEBA, FEBA, PREV_FEBA
- vadd.vv HGDC, HGDC, PREV_HGDC
-
- // Repeat if more blocks remain.
- bnez NUM_BLOCKS, .Lnext_block
-
- // Store the new state and return.
- vsuxei8.v FEBA, (STATEP), INDICES
- vsuxei8.v HGDC, (STATEP_C), INDICES
- ret
-SYM_FUNC_END(sha256_transform_zvknha_or_zvknhb_zvkb)
-
-.section ".rodata"
-.p2align 2
-.type K256, @object
-K256:
- .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
- .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
- .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
- .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
- .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
- .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
- .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
- .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
- .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
- .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
- .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
- .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
- .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
- .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
- .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
- .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
-.size K256, . - K256
diff --git a/arch/riscv/lib/crypto/sha256.c b/arch/riscv/lib/crypto/sha256.c
deleted file mode 100644
index 71808397dff4..000000000000
--- a/arch/riscv/lib/crypto/sha256.c
+++ /dev/null
@@ -1,67 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SHA-256 (RISC-V accelerated)
- *
- * Copyright (C) 2022 VRULL GmbH
- * Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
- *
- * Copyright (C) 2023 SiFive, Inc.
- * Author: Jerry Shih <jerry.shih@sifive.com>
- */
-
-#include <asm/vector.h>
-#include <crypto/internal/sha2.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-asmlinkage void sha256_transform_zvknha_or_zvknhb_zvkb(
- u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks);
-
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_extensions);
-
-void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
- const u8 *data, size_t nblocks)
-{
- if (static_branch_likely(&have_extensions)) {
- kernel_vector_begin();
- sha256_transform_zvknha_or_zvknhb_zvkb(state, data, nblocks);
- kernel_vector_end();
- } else {
- sha256_blocks_generic(state, data, nblocks);
- }
-}
-EXPORT_SYMBOL_GPL(sha256_blocks_simd);
-
-void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
- const u8 *data, size_t nblocks)
-{
- sha256_blocks_generic(state, data, nblocks);
-}
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
-
-bool sha256_is_arch_optimized(void)
-{
- return static_key_enabled(&have_extensions);
-}
-EXPORT_SYMBOL_GPL(sha256_is_arch_optimized);
-
-static int __init riscv64_sha256_mod_init(void)
-{
- /* Both zvknha and zvknhb provide the SHA-256 instructions. */
- if ((riscv_isa_extension_available(NULL, ZVKNHA) ||
- riscv_isa_extension_available(NULL, ZVKNHB)) &&
- riscv_isa_extension_available(NULL, ZVKB) &&
- riscv_vector_vlen() >= 128)
- static_branch_enable(&have_extensions);
- return 0;
-}
-subsys_initcall(riscv64_sha256_mod_init);
-
-static void __exit riscv64_sha256_mod_exit(void)
-{
-}
-module_exit(riscv64_sha256_mod_exit);
-
-MODULE_DESCRIPTION("SHA-256 (RISC-V accelerated)");
-MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>");
-MODULE_LICENSE("GPL");
diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile
index fb9c917c9b45..240592e3f5c2 100644
--- a/arch/riscv/purgatory/Makefile
+++ b/arch/riscv/purgatory/Makefile
@@ -53,7 +53,7 @@ targets += purgatory.ro purgatory.chk
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
PURGATORY_CFLAGS := -mcmodel=medany -ffreestanding -fno-zero-initialized-in-bss
-PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
+PURGATORY_CFLAGS += $(DISABLE_KSTACK_ERASE) -DDISABLE_BRANCH_PROFILING
PURGATORY_CFLAGS += -fno-stack-protector -g0
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
diff --git a/arch/riscv/purgatory/purgatory.c b/arch/riscv/purgatory/purgatory.c
index 80596ab5fb62..bbd5cfa4d741 100644
--- a/arch/riscv/purgatory/purgatory.c
+++ b/arch/riscv/purgatory/purgatory.c
@@ -20,14 +20,14 @@ struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(".kex
static int verify_sha256_digest(void)
{
struct kexec_sha_region *ptr, *end;
- struct sha256_state ss;
+ struct sha256_ctx sctx;
u8 digest[SHA256_DIGEST_SIZE];
- sha256_init(&ss);
+ sha256_init(&sctx);
end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions);
for (ptr = purgatory_sha_regions; ptr < end; ptr++)
- sha256_update(&ss, (uint8_t *)(ptr->start), ptr->len);
- sha256_final(&ss, digest);
+ sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len);
+ sha256_final(&sctx, digest);
if (memcmp(digest, purgatory_sha256_digest, sizeof(digest)) != 0)
return 1;
return 0;
diff --git a/arch/riscv/tools/relocs_check.sh b/arch/riscv/tools/relocs_check.sh
index baeb2e7b2290..742993e6a8cb 100755
--- a/arch/riscv/tools/relocs_check.sh
+++ b/arch/riscv/tools/relocs_check.sh
@@ -14,7 +14,9 @@ bad_relocs=$(
${srctree}/scripts/relocs_check.sh "$@" |
# These relocations are okay
# R_RISCV_RELATIVE
- grep -F -w -v 'R_RISCV_RELATIVE'
+ # R_RISCV_NONE
+ grep -F -w -v 'R_RISCV_RELATIVE
+R_RISCV_NONE'
)
if [ -z "$bad_relocs" ]; then