diff options
Diffstat (limited to 'arch/arm64')
42 files changed, 346 insertions, 523 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1b8e97331ffb..34f487d5d84e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1,7 +1,7 @@ config ARM64 def_bool y - select ARCH_BINFMT_ELF_RANDOMIZE_PIE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE + select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SG_CHAIN select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST @@ -143,6 +143,13 @@ config KERNEL_MODE_NEON config FIX_EARLYCON_MEM def_bool y +config PGTABLE_LEVELS + int + default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42 + default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48 + default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39 + default 4 if ARM64_4K_PAGES && ARM64_VA_BITS_48 + source "init/Kconfig" source "kernel/Kconfig.freezer" @@ -413,13 +420,6 @@ config ARM64_VA_BITS default 42 if ARM64_VA_BITS_42 default 48 if ARM64_VA_BITS_48 -config ARM64_PGTABLE_LEVELS - int - default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42 - default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48 - default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39 - default 4 if ARM64_4K_PAGES && ARM64_VA_BITS_48 - config CPU_BIG_ENDIAN bool "Build big-endian kernel" help diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts index 2e25de0800b9..83578e766b94 100644 --- a/arch/arm64/boot/dts/apm/apm-mustang.dts +++ b/arch/arm64/boot/dts/apm/apm-mustang.dts @@ -45,6 +45,10 @@ status = "ok"; }; +&sgenet1 { + status = "ok"; +}; + &xgenet { status = "ok"; }; diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index a857794432d6..e74f6e0a208c 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -186,6 +186,16 @@ clock-output-names = "sge0clk"; }; + sge1clk: sge1clk@1f21c000 { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f21c000 0x0 0x1000>; + reg-names = "csr-reg"; + csr-mask = <0xc>; + clock-output-names = "sge1clk"; + }; + xge0clk: xge0clk@1f61c000 { compatible = "apm,xgene-device-clock"; #clock-cells = <1>; @@ -628,13 +638,30 @@ <0x0 0x1f200000 0x0 0Xc300>, <0x0 0x1B000000 0x0 0X200>; reg-names = "enet_csr", "ring_csr", "ring_cmd"; - interrupts = <0x0 0xA0 0x4>; + interrupts = <0x0 0xA0 0x4>, + <0x0 0xA1 0x4>; dma-coherent; clocks = <&sge0clk 0>; local-mac-address = [00 00 00 00 00 00]; phy-connection-type = "sgmii"; }; + sgenet1: ethernet@1f210030 { + compatible = "apm,xgene1-sgenet"; + status = "disabled"; + reg = <0x0 0x1f210030 0x0 0xd100>, + <0x0 0x1f200000 0x0 0Xc300>, + <0x0 0x1B000000 0x0 0X8000>; + reg-names = "enet_csr", "ring_csr", "ring_cmd"; + interrupts = <0x0 0xAC 0x4>, + <0x0 0xAD 0x4>; + port-id = <1>; + dma-coherent; + clocks = <&sge1clk 0>; + local-mac-address = [00 00 00 00 00 00]; + phy-connection-type = "sgmii"; + }; + xgenet: ethernet@1f610000 { compatible = "apm,xgene1-xgenet"; status = "disabled"; @@ -642,7 +669,8 @@ <0x0 0x1f600000 0x0 0Xc300>, <0x0 0x18000000 0x0 0X200>; reg-names = "enet_csr", "ring_csr", "ring_cmd"; - interrupts = <0x0 0x60 0x4>; + interrupts = <0x0 0x60 0x4>, + <0x0 0x61 0x4>; dma-coherent; clocks = <&xge0clk 0>; /* mac address will be overwritten by the bootloader */ diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi index ea2b5666a16f..c9b89efe0f56 100644 --- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi +++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi @@ -8,7 +8,7 @@ */ /* SoC fixed clocks */ - soc_uartclk: refclk72738khz { + soc_uartclk: refclk7273800hz { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <7273800>; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index be1f12a5a5f0..af6a452b1aac 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -48,7 +48,7 @@ CONFIG_CMDLINE="console=ttyAMA0" # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_COMPAT=y CONFIG_CPU_IDLE=y -CONFIG_ARM64_CPUIDLE=y +CONFIG_ARM_CPUIDLE=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index b1b5b893eb20..05d9e16c0dfd 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -284,7 +284,8 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "__ecb-aes-" MODE, .cra_driver_name = "__driver-ecb-aes-" MODE, .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_INTERNAL, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_alignmask = 7, @@ -302,7 +303,8 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "__cbc-aes-" MODE, .cra_driver_name = "__driver-cbc-aes-" MODE, .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_INTERNAL, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_alignmask = 7, @@ -320,7 +322,8 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "__ctr-aes-" MODE, .cra_driver_name = "__driver-ctr-aes-" MODE, .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_INTERNAL, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_alignmask = 7, @@ -338,7 +341,8 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "__xts-aes-" MODE, .cra_driver_name = "__driver-xts-aes-" MODE, .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_INTERNAL, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx), .cra_alignmask = 7, diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S index 09d57d98609c..033aae6d732a 100644 --- a/arch/arm64/crypto/sha1-ce-core.S +++ b/arch/arm64/crypto/sha1-ce-core.S @@ -66,8 +66,8 @@ .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 /* - * void sha1_ce_transform(int blocks, u8 const *src, u32 *state, - * u8 *head, long bytes) + * void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, + * int blocks) */ ENTRY(sha1_ce_transform) /* load round constants */ @@ -78,25 +78,22 @@ ENTRY(sha1_ce_transform) ld1r {k3.4s}, [x6] /* load state */ - ldr dga, [x2] - ldr dgb, [x2, #16] + ldr dga, [x0] + ldr dgb, [x0, #16] - /* load partial state (if supplied) */ - cbz x3, 0f - ld1 {v8.4s-v11.4s}, [x3] - b 1f + /* load sha1_ce_state::finalize */ + ldr w4, [x0, #:lo12:sha1_ce_offsetof_finalize] /* load input */ 0: ld1 {v8.4s-v11.4s}, [x1], #64 - sub w0, w0, #1 + sub w2, w2, #1 -1: CPU_LE( rev32 v8.16b, v8.16b ) CPU_LE( rev32 v9.16b, v9.16b ) CPU_LE( rev32 v10.16b, v10.16b ) CPU_LE( rev32 v11.16b, v11.16b ) -2: add t0.4s, v8.4s, k0.4s +1: add t0.4s, v8.4s, k0.4s mov dg0v.16b, dgav.16b add_update c, ev, k0, 8, 9, 10, 11, dgb @@ -127,15 +124,15 @@ CPU_LE( rev32 v11.16b, v11.16b ) add dgbv.2s, dgbv.2s, dg1v.2s add dgav.4s, dgav.4s, dg0v.4s - cbnz w0, 0b + cbnz w2, 0b /* * Final block: add padding and total bit count. - * Skip if we have no total byte count in x4. In that case, the input - * size was not a round multiple of the block size, and the padding is - * handled by the C code. + * Skip if the input size was not a round multiple of the block size, + * the padding is handled by the C code in that case. */ cbz x4, 3f + ldr x4, [x0, #:lo12:sha1_ce_offsetof_count] movi v9.2d, #0 mov x8, #0x80000000 movi v10.2d, #0 @@ -144,10 +141,10 @@ CPU_LE( rev32 v11.16b, v11.16b ) mov x4, #0 mov v11.d[0], xzr mov v11.d[1], x7 - b 2b + b 1b /* store new state */ -3: str dga, [x2] - str dgb, [x2, #16] +3: str dga, [x0] + str dgb, [x0, #16] ret ENDPROC(sha1_ce_transform) diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index 6fe83f37a750..114e7cc5de8c 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -12,144 +12,81 @@ #include <asm/unaligned.h> #include <crypto/internal/hash.h> #include <crypto/sha.h> +#include <crypto/sha1_base.h> #include <linux/cpufeature.h> #include <linux/crypto.h> #include <linux/module.h> +#define ASM_EXPORT(sym, val) \ + asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val)); + MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); -asmlinkage void sha1_ce_transform(int blocks, u8 const *src, u32 *state, - u8 *head, long bytes); +struct sha1_ce_state { + struct sha1_state sst; + u32 finalize; +}; -static int sha1_init(struct shash_desc *desc) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); +asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, + int blocks); - *sctx = (struct sha1_state){ - .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, - }; - return 0; -} - -static int sha1_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int sha1_ce_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - struct sha1_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; - - sctx->count += len; - - if ((partial + len) >= SHA1_BLOCK_SIZE) { - int blocks; - - if (partial) { - int p = SHA1_BLOCK_SIZE - partial; + struct sha1_ce_state *sctx = shash_desc_ctx(desc); - memcpy(sctx->buffer + partial, data, p); - data += p; - len -= p; - } - - blocks = len / SHA1_BLOCK_SIZE; - len %= SHA1_BLOCK_SIZE; - - kernel_neon_begin_partial(16); - sha1_ce_transform(blocks, data, sctx->state, - partial ? sctx->buffer : NULL, 0); - kernel_neon_end(); + sctx->finalize = 0; + kernel_neon_begin_partial(16); + sha1_base_do_update(desc, data, len, + (sha1_block_fn *)sha1_ce_transform); + kernel_neon_end(); - data += blocks * SHA1_BLOCK_SIZE; - partial = 0; - } - if (len) - memcpy(sctx->buffer + partial, data, len); return 0; } -static int sha1_final(struct shash_desc *desc, u8 *out) +static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { - static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, }; + struct sha1_ce_state *sctx = shash_desc_ctx(desc); + bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE); - struct sha1_state *sctx = shash_desc_ctx(desc); - __be64 bits = cpu_to_be64(sctx->count << 3); - __be32 *dst = (__be32 *)out; - int i; - - u32 padlen = SHA1_BLOCK_SIZE - - ((sctx->count + sizeof(bits)) % SHA1_BLOCK_SIZE); - - sha1_update(desc, padding, padlen); - sha1_update(desc, (const u8 *)&bits, sizeof(bits)); - - for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++) - put_unaligned_be32(sctx->state[i], dst++); - - *sctx = (struct sha1_state){}; - return 0; -} - -static int sha1_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - __be32 *dst = (__be32 *)out; - int blocks; - int i; - - if (sctx->count || !len || (len % SHA1_BLOCK_SIZE)) { - sha1_update(desc, data, len); - return sha1_final(desc, out); - } + ASM_EXPORT(sha1_ce_offsetof_count, + offsetof(struct sha1_ce_state, sst.count)); + ASM_EXPORT(sha1_ce_offsetof_finalize, + offsetof(struct sha1_ce_state, finalize)); /* - * Use a fast path if the input is a multiple of 64 bytes. In - * this case, there is no need to copy data around, and we can - * perform the entire digest calculation in a single invocation - * of sha1_ce_transform() + * Allow the asm code to perform the finalization if there is no + * partial data and the input is a round multiple of the block size. */ - blocks = len / SHA1_BLOCK_SIZE; + sctx->finalize = finalize; kernel_neon_begin_partial(16); - sha1_ce_transform(blocks, data, sctx->state, NULL, len); + sha1_base_do_update(desc, data, len, + (sha1_block_fn *)sha1_ce_transform); + if (!finalize) + sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); kernel_neon_end(); - - for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++) - put_unaligned_be32(sctx->state[i], dst++); - - *sctx = (struct sha1_state){}; - return 0; + return sha1_base_finish(desc, out); } -static int sha1_export(struct shash_desc *desc, void *out) +static int sha1_ce_final(struct shash_desc *desc, u8 *out) { - struct sha1_state *sctx = shash_desc_ctx(desc); - struct sha1_state *dst = out; - - *dst = *sctx; - return 0; -} - -static int sha1_import(struct shash_desc *desc, const void *in) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - struct sha1_state const *src = in; - - *sctx = *src; - return 0; + kernel_neon_begin_partial(16); + sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); + kernel_neon_end(); + return sha1_base_finish(desc, out); } static struct shash_alg alg = { - .init = sha1_init, - .update = sha1_update, - .final = sha1_final, - .finup = sha1_finup, - .export = sha1_export, - .import = sha1_import, - .descsize = sizeof(struct sha1_state), + .init = sha1_base_init, + .update = sha1_ce_update, + .final = sha1_ce_final, + .finup = sha1_ce_finup, + .descsize = sizeof(struct sha1_ce_state), .digestsize = SHA1_DIGEST_SIZE, - .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name = "sha1-ce", diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S index 7f29fc031ea8..5df9d9d470ad 100644 --- a/arch/arm64/crypto/sha2-ce-core.S +++ b/arch/arm64/crypto/sha2-ce-core.S @@ -73,8 +73,8 @@ .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 /* - * void sha2_ce_transform(int blocks, u8 const *src, u32 *state, - * u8 *head, long bytes) + * void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, + * int blocks) */ ENTRY(sha2_ce_transform) /* load round constants */ @@ -85,24 +85,21 @@ ENTRY(sha2_ce_transform) ld1 {v12.4s-v15.4s}, [x8] /* load state */ - ldp dga, dgb, [x2] + ldp dga, dgb, [x0] - /* load partial input (if supplied) */ - cbz x3, 0f - ld1 {v16.4s-v19.4s}, [x3] - b 1f + /* load sha256_ce_state::finalize */ + ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize] /* load input */ 0: ld1 {v16.4s-v19.4s}, [x1], #64 - sub w0, w0, #1 + sub w2, w2, #1 -1: CPU_LE( rev32 v16.16b, v16.16b ) CPU_LE( rev32 v17.16b, v17.16b ) CPU_LE( rev32 v18.16b, v18.16b ) CPU_LE( rev32 v19.16b, v19.16b ) -2: add t0.4s, v16.4s, v0.4s +1: add t0.4s, v16.4s, v0.4s mov dg0v.16b, dgav.16b mov dg1v.16b, dgbv.16b @@ -131,15 +128,15 @@ CPU_LE( rev32 v19.16b, v19.16b ) add dgbv.4s, dgbv.4s, dg1v.4s /* handled all input blocks? */ - cbnz w0, 0b + cbnz w2, 0b /* * Final block: add padding and total bit count. - * Skip if we have no total byte count in x4. In that case, the input - * size was not a round multiple of the block size, and the padding is - * handled by the C code. + * Skip if the input size was not a round multiple of the block size, + * the padding is handled by the C code in that case. */ cbz x4, 3f + ldr x4, [x0, #:lo12:sha256_ce_offsetof_count] movi v17.2d, #0 mov x8, #0x80000000 movi v18.2d, #0 @@ -148,9 +145,9 @@ CPU_LE( rev32 v19.16b, v19.16b ) mov x4, #0 mov v19.d[0], xzr mov v19.d[1], x7 - b 2b + b 1b /* store new state */ -3: stp dga, dgb, [x2] +3: stp dga, dgb, [x0] ret ENDPROC(sha2_ce_transform) diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index ae67e88c28b9..1340e44c048b 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -12,206 +12,82 @@ #include <asm/unaligned.h> #include <crypto/internal/hash.h> #include <crypto/sha.h> +#include <crypto/sha256_base.h> #include <linux/cpufeature.h> #include <linux/crypto.h> #include <linux/module.h> +#define ASM_EXPORT(sym, val) \ + asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val)); + MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); -asmlinkage int sha2_ce_transform(int blocks, u8 const *src, u32 *state, - u8 *head, long bytes); - -static int sha224_init(struct shash_desc *desc) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - *sctx = (struct sha256_state){ - .state = { - SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, - SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, - } - }; - return 0; -} - -static int sha256_init(struct shash_desc *desc) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - *sctx = (struct sha256_state){ - .state = { - SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, - SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, - } - }; - return 0; -} - -static int sha2_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; - - sctx->count += len; - - if ((partial + len) >= SHA256_BLOCK_SIZE) { - int blocks; - - if (partial) { - int p = SHA256_BLOCK_SIZE - partial; - - memcpy(sctx->buf + partial, data, p); - data += p; - len -= p; - } +struct sha256_ce_state { + struct sha256_state sst; + u32 finalize; +}; - blocks = len / SHA256_BLOCK_SIZE; - len %= SHA256_BLOCK_SIZE; +asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, + int blocks); - kernel_neon_begin_partial(28); - sha2_ce_transform(blocks, data, sctx->state, - partial ? sctx->buf : NULL, 0); - kernel_neon_end(); - - data += blocks * SHA256_BLOCK_SIZE; - partial = 0; - } - if (len) - memcpy(sctx->buf + partial, data, len); - return 0; -} - -static void sha2_final(struct shash_desc *desc) +static int sha256_ce_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, }; - - struct sha256_state *sctx = shash_desc_ctx(desc); - __be64 bits = cpu_to_be64(sctx->count << 3); - u32 padlen = SHA256_BLOCK_SIZE - - ((sctx->count + sizeof(bits)) % SHA256_BLOCK_SIZE); - - sha2_update(desc, padding, padlen); - sha2_update(desc, (const u8 *)&bits, sizeof(bits)); -} - -static int sha224_final(struct shash_desc *desc, u8 *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - __be32 *dst = (__be32 *)out; - int i; - - sha2_final(desc); - - for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++) - put_unaligned_be32(sctx->state[i], dst++); - - *sctx = (struct sha256_state){}; - return 0; -} + struct sha256_ce_state *sctx = shash_desc_ctx(desc); -static int sha256_final(struct shash_desc *desc, u8 *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - __be32 *dst = (__be32 *)out; - int i; - - sha2_final(desc); - - for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++) - put_unaligned_be32(sctx->state[i], dst++); + sctx->finalize = 0; + kernel_neon_begin_partial(28); + sha256_base_do_update(desc, data, len, + (sha256_block_fn *)sha2_ce_transform); + kernel_neon_end(); - *sctx = (struct sha256_state){}; return 0; } -static void sha2_finup(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { - struct sha256_state *sctx = shash_desc_ctx(desc); - int blocks; + struct sha256_ce_state *sctx = shash_desc_ctx(desc); + bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE); - if (sctx->count || !len || (len % SHA256_BLOCK_SIZE)) { - sha2_update(desc, data, len); - sha2_final(desc); - return; - } + ASM_EXPORT(sha256_ce_offsetof_count, + offsetof(struct sha256_ce_state, sst.count)); + ASM_EXPORT(sha256_ce_offsetof_finalize, + offsetof(struct sha256_ce_state, finalize)); /* - * Use a fast path if the input is a multiple of 64 bytes. In - * this case, there is no need to copy data around, and we can - * perform the entire digest calculation in a single invocation - * of sha2_ce_transform() + * Allow the asm code to perform the finalization if there is no + * partial data and the input is a round multiple of the block size. */ - blocks = len / SHA256_BLOCK_SIZE; + sctx->finalize = finalize; kernel_neon_begin_partial(28); - sha2_ce_transform(blocks, data, sctx->state, NULL, len); + sha256_base_do_update(desc, data, len, + (sha256_block_fn *)sha2_ce_transform); + if (!finalize) + sha256_base_do_finalize(desc, + (sha256_block_fn *)sha2_ce_transform); kernel_neon_end(); + return sha256_base_finish(desc, out); } -static int sha224_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) +static int sha256_ce_final(struct shash_desc *desc, u8 *out) { - struct sha256_state *sctx = shash_desc_ctx(desc); - __be32 *dst = (__be32 *)out; - int i; - - sha2_finup(desc, data, len); - - for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++) - put_unaligned_be32(sctx->state[i], dst++); - - *sctx = (struct sha256_state){}; - return 0; -} - -static int sha256_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - __be32 *dst = (__be32 *)out; - int i; - - sha2_finup(desc, data, len); - - for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++) - put_unaligned_be32(sctx->state[i], dst++); - - *sctx = (struct sha256_state){}; - return 0; -} - -static int sha2_export(struct shash_desc *desc, void *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - struct sha256_state *dst = out; - - *dst = *sctx; - return 0; -} - -static int sha2_import(struct shash_desc *desc, const void *in) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - struct sha256_state const *src = in; - - *sctx = *src; - return 0; + kernel_neon_begin_partial(28); + sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); + kernel_neon_end(); + return sha256_base_finish(desc, out); } static struct shash_alg algs[] = { { - .init = sha224_init, - .update = sha2_update, - .final = sha224_final, - .finup = sha224_finup, - .export = sha2_export, - .import = sha2_import, - .descsize = sizeof(struct sha256_state), + .init = sha224_base_init, + .update = sha256_ce_update, + .final = sha256_ce_final, + .finup = sha256_ce_finup, + .descsize = sizeof(struct sha256_ce_state), .digestsize = SHA224_DIGEST_SIZE, - .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha224", .cra_driver_name = "sha224-ce", @@ -221,15 +97,12 @@ static struct shash_alg algs[] = { { .cra_module = THIS_MODULE, } }, { - .init = sha256_init, - .update = sha2_update, - .final = sha256_final, - .finup = sha256_finup, - .export = sha2_export, - .import = sha2_import, - .descsize = sizeof(struct sha256_state), + .init = sha256_base_init, + .update = sha256_ce_update, + .final = sha256_ce_final, + .finup = sha256_ce_finup, + .descsize = sizeof(struct sha256_ce_state), .digestsize = SHA256_DIGEST_SIZE, - .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-ce", diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index cb9593079f29..d8c25b7b18fb 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, __ret; \ }) -#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) -#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) -#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) -#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) - -#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ - cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ - o1, o2, n1, n2) +#define _protect_cmpxchg_local(pcp, o, n) \ +({ \ + typeof(*raw_cpu_ptr(&(pcp))) __ret; \ + preempt_disable(); \ + __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ + preempt_enable(); \ + __ret; \ +}) + +#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) + +#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ +({ \ + int __ret; \ + preempt_disable(); \ + __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \ + raw_cpu_ptr(&(ptr2)), \ + o1, o2, n1, n2); \ + preempt_enable(); \ + __ret; \ +}) #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h index c60643f14cda..141b2fcabaa6 100644 --- a/arch/arm64/include/asm/cpuidle.h +++ b/arch/arm64/include/asm/cpuidle.h @@ -4,10 +4,10 @@ #include <asm/proc-fns.h> #ifdef CONFIG_CPU_IDLE -extern int cpu_init_idle(unsigned int cpu); +extern int arm_cpuidle_init(unsigned int cpu); extern int cpu_suspend(unsigned long arg); #else -static inline int cpu_init_idle(unsigned int cpu) +static inline int arm_cpuidle_init(unsigned int cpu) { return -EOPNOTSUPP; } @@ -17,5 +17,8 @@ static inline int cpu_suspend(unsigned long arg) return -EOPNOTSUPP; } #endif - +static inline int arm_cpuidle_suspend(int index) +{ + return cpu_suspend(index); +} #endif diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 1f65be393139..faad6df49e5b 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -125,7 +125,6 @@ typedef struct user_fpsimd_state elf_fpregset_t; * the loader. We need to make sure that it is out of the way of the program * that it will "exec", and that there is sufficient room for the brk. */ -extern unsigned long randomize_et_dyn(unsigned long base); #define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) /* @@ -157,10 +156,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) #endif -struct mm_struct; -extern unsigned long arch_randomize_brk(struct mm_struct *mm); -#define arch_randomize_brk arch_randomize_brk - #ifdef CONFIG_COMPAT #ifdef __AARCH64EB__ diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 92bbae381598..70522450ca23 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -90,6 +90,7 @@ #define ESR_ELx_FSC (0x3F) #define ESR_ELx_FSC_TYPE (0x3C) #define ESR_ELx_FSC_EXTABT (0x10) +#define ESR_ELx_FSC_ACCESS (0x08) #define ESR_ELx_FSC_FAULT (0x04) #define ESR_ELx_FSC_PERM (0x0C) #define ESR_ELx_CV (UL(1) << 24) diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h index 076a1c714049..c0e5165c2f76 100644 --- a/arch/arm64/include/asm/jump_label.h +++ b/arch/arm64/include/asm/jump_label.h @@ -18,11 +18,12 @@ */ #ifndef __ASM_JUMP_LABEL_H #define __ASM_JUMP_LABEL_H + +#ifndef __ASSEMBLY__ + #include <linux/types.h> #include <asm/insn.h> -#ifdef __KERNEL__ - #define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE static __always_inline bool arch_static_branch(struct static_key *key) @@ -39,8 +40,6 @@ l_yes: return true; } -#endif /* __KERNEL__ */ - typedef u64 jump_label_t; struct jump_entry { @@ -49,4 +48,5 @@ struct jump_entry { jump_label_t key; }; +#endif /* __ASSEMBLY__ */ #endif /* __ASM_JUMP_LABEL_H */ diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 94674eb7e7bb..ac6fafb95fe7 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -129,6 +129,9 @@ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are * not known to exist and will break with this configuration. * + * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time + * (see hyp-init.S). + * * Note that when using 4K pages, we concatenate two first level page tables * together. * @@ -138,7 +141,6 @@ #ifdef CONFIG_ARM64_64K_PAGES /* * Stage2 translation configuration: - * 40bits output (PS = 2) * 40bits input (T0SZ = 24) * 64kB pages (TG0 = 1) * 2 level page tables (SL = 1) @@ -150,7 +152,6 @@ #else /* * Stage2 translation configuration: - * 40bits output (PS = 2) * 40bits input (T0SZ = 24) * 4kB pages (TG0 = 0) * 3 level page tables (SL = 1) @@ -187,6 +188,7 @@ /* For compatibility with fault code shared with 32-bit */ #define FSC_FAULT ESR_ELx_FSC_FAULT +#define FSC_ACCESS ESR_ELx_FSC_ACCESS #define FSC_PERM ESR_ELx_FSC_PERM /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8ac3c70fe3c6..f0f58c9beec0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -28,6 +28,8 @@ #include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> +#define __KVM_HAVE_ARCH_INTC_INITIALIZED + #if defined(CONFIG_KVM_ARM_MAX_VCPUS) #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS #else @@ -177,19 +179,10 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); /* We do not have shadow page tables, hence the empty hooks */ -static inline int kvm_age_hva(struct kvm *kvm, unsigned long start, - unsigned long end) -{ - return 0; -} - -static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) -{ - return 0; -} - static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, unsigned long address) { diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h index 9f52beb7cb13..889c908ee631 100644 --- a/arch/arm64/include/asm/kvm_mmio.h +++ b/arch/arm64/include/asm/kvm_mmio.h @@ -31,28 +31,6 @@ struct kvm_decode { bool sign_extend; }; -/* - * The in-kernel MMIO emulation code wants to use a copy of run->mmio, - * which is an anonymous type. Use our own type instead. - */ -struct kvm_exit_mmio { - phys_addr_t phys_addr; - u8 data[8]; - u32 len; - bool is_write; - void *private; -}; - -static inline void kvm_prepare_mmio(struct kvm_run *run, - struct kvm_exit_mmio *mmio) -{ - run->mmio.phys_addr = mmio->phys_addr; - run->mmio.len = mmio->len; - run->mmio.is_write = mmio->is_write; - memcpy(run->mmio.data, mmio->data, mmio->len); - run->exit_reason = KVM_EXIT_MMIO; -} - int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, phys_addr_t fault_ipa); diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6458b5373142..36250705dc4c 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -158,56 +158,21 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) +#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) + /* * If we are concatenating first level stage-2 page tables, we would have less * than or equal to 16 pointers in the fake PGD, because that's what the - * architecture allows. In this case, (4 - CONFIG_ARM64_PGTABLE_LEVELS) + * architecture allows. In this case, (4 - CONFIG_PGTABLE_LEVELS) * represents the first level for the host, and we add 1 to go to the next * level (which uses contatenation) for the stage-2 tables. */ #if PTRS_PER_S2_PGD <= 16 -#define KVM_PREALLOC_LEVEL (4 - CONFIG_ARM64_PGTABLE_LEVELS + 1) +#define KVM_PREALLOC_LEVEL (4 - CONFIG_PGTABLE_LEVELS + 1) #else #define KVM_PREALLOC_LEVEL (0) #endif -/** - * kvm_prealloc_hwpgd - allocate inital table for VTTBR - * @kvm: The KVM struct pointer for the VM. - * @pgd: The kernel pseudo pgd - * - * When the kernel uses more levels of page tables than the guest, we allocate - * a fake PGD and pre-populate it to point to the next-level page table, which - * will be the real initial page table pointed to by the VTTBR. - * - * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and - * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we - * allocate 2 consecutive PUD pages. - */ -static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) -{ - unsigned int i; - unsigned long hwpgd; - - if (KVM_PREALLOC_LEVEL == 0) - return 0; - - hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT); - if (!hwpgd) - return -ENOMEM; - - for (i = 0; i < PTRS_PER_S2_PGD; i++) { - if (KVM_PREALLOC_LEVEL == 1) - pgd_populate(NULL, pgd + i, - (pud_t *)hwpgd + i * PTRS_PER_PUD); - else if (KVM_PREALLOC_LEVEL == 2) - pud_populate(NULL, pud_offset(pgd, 0) + i, - (pmd_t *)hwpgd + i * PTRS_PER_PMD); - } - - return 0; -} - static inline void *kvm_get_hwpgd(struct kvm *kvm) { pgd_t *pgd = kvm->arch.pgd; @@ -224,12 +189,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm) return pmd_offset(pud, 0); } -static inline void kvm_free_hwpgd(struct kvm *kvm) +static inline unsigned int kvm_get_hwpgd_size(void) { - if (KVM_PREALLOC_LEVEL > 0) { - unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); - free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); - } + if (KVM_PREALLOC_LEVEL > 0) + return PTRS_PER_S2_PGD * PAGE_SIZE; + return PTRS_PER_S2_PGD * sizeof(pgd_t); } static inline bool kvm_page_empty(void *ptr) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index a9eee33dfa62..101a42bde728 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, { unsigned int cpu = smp_processor_id(); + /* + * init_mm.pgd does not contain any user mappings and it is always + * active for kernel addresses in TTBR1. Just set the reserved TTBR0. + */ + if (next == &init_mm) { + cpu_set_reserved_ttbr0(); + return; + } + if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) check_and_switch_context(next, tsk); } diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 22b16232bd60..8fc8fa280e92 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -36,9 +36,9 @@ * for more information). */ #ifdef CONFIG_ARM64_64K_PAGES -#define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS) +#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) #else -#define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS - 1) +#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1) #endif #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 09da25bc596f..4fde8c1df97f 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, return ret; } +#define _percpu_read(pcp) \ +({ \ + typeof(pcp) __retval; \ + preempt_disable(); \ + __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \ + sizeof(pcp)); \ + preempt_enable(); \ + __retval; \ +}) + +#define _percpu_write(pcp, val) \ +do { \ + preempt_disable(); \ + __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \ + sizeof(pcp)); \ + preempt_enable(); \ +} while(0) \ + +#define _pcp_protect(operation, pcp, val) \ +({ \ + typeof(pcp) __retval; \ + preempt_disable(); \ + __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \ + (val), sizeof(pcp)); \ + preempt_enable(); \ + __retval; \ +}) + #define _percpu_add(pcp, val) \ - __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) + _pcp_protect(__percpu_add, pcp, val) -#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val)) +#define _percpu_add_return(pcp, val) _percpu_add(pcp, val) #define _percpu_and(pcp, val) \ - __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) + _pcp_protect(__percpu_and, pcp, val) #define _percpu_or(pcp, val) \ - __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) - -#define _percpu_read(pcp) (typeof(pcp)) \ - (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp))) - -#define _percpu_write(pcp, val) \ - __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)) + _pcp_protect(__percpu_or, pcp, val) #define _percpu_xchg(pcp, val) (typeof(pcp)) \ - (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))) + _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)) #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index e20df38a8ff3..76420568d66a 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -28,7 +28,7 @@ #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) -#if CONFIG_ARM64_PGTABLE_LEVELS > 2 +#if CONFIG_PGTABLE_LEVELS > 2 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { @@ -46,9 +46,9 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); } -#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */ +#endif /* CONFIG_PGTABLE_LEVELS > 2 */ -#if CONFIG_ARM64_PGTABLE_LEVELS > 3 +#if CONFIG_PGTABLE_LEVELS > 3 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { @@ -66,7 +66,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE)); } -#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */ +#endif /* CONFIG_PGTABLE_LEVELS > 3 */ extern pgd_t *pgd_alloc(struct mm_struct *mm); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 5f930cc9ea83..80f3d241cff8 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -21,7 +21,7 @@ /* * PMD_SHIFT determines the size a level 2 page table entry can map. */ -#if CONFIG_ARM64_PGTABLE_LEVELS > 2 +#if CONFIG_PGTABLE_LEVELS > 2 #define PMD_SHIFT ((PAGE_SHIFT - 3) * 2 + 3) #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) @@ -31,7 +31,7 @@ /* * PUD_SHIFT determines the size a level 1 page table entry can map. */ -#if CONFIG_ARM64_PGTABLE_LEVELS > 3 +#if CONFIG_PGTABLE_LEVELS > 3 #define PUD_SHIFT ((PAGE_SHIFT - 3) * 3 + 3) #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) @@ -42,7 +42,7 @@ * PGDIR_SHIFT determines the size a top-level page table entry can map * (depending on the configuration, this level can be 0, 1 or 2). */ -#define PGDIR_SHIFT ((PAGE_SHIFT - 3) * CONFIG_ARM64_PGTABLE_LEVELS + 3) +#define PGDIR_SHIFT ((PAGE_SHIFT - 3) * CONFIG_PGTABLE_LEVELS + 3) #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT)) diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h index ca9df80af896..2b1bd7e52c3b 100644 --- a/arch/arm64/include/asm/pgtable-types.h +++ b/arch/arm64/include/asm/pgtable-types.h @@ -38,13 +38,13 @@ typedef struct { pteval_t pte; } pte_t; #define pte_val(x) ((x).pte) #define __pte(x) ((pte_t) { (x) } ) -#if CONFIG_ARM64_PGTABLE_LEVELS > 2 +#if CONFIG_PGTABLE_LEVELS > 2 typedef struct { pmdval_t pmd; } pmd_t; #define pmd_val(x) ((x).pmd) #define __pmd(x) ((pmd_t) { (x) } ) #endif -#if CONFIG_ARM64_PGTABLE_LEVELS > 3 +#if CONFIG_PGTABLE_LEVELS > 3 typedef struct { pudval_t pud; } pud_t; #define pud_val(x) ((x).pud) #define __pud(x) ((pud_t) { (x) } ) @@ -64,13 +64,13 @@ typedef pteval_t pte_t; #define pte_val(x) (x) #define __pte(x) (x) -#if CONFIG_ARM64_PGTABLE_LEVELS > 2 +#if CONFIG_PGTABLE_LEVELS > 2 typedef pmdval_t pmd_t; #define pmd_val(x) (x) #define __pmd(x) (x) #endif -#if CONFIG_ARM64_PGTABLE_LEVELS > 3 +#if CONFIG_PGTABLE_LEVELS > 3 typedef pudval_t pud_t; #define pud_val(x) (x) #define __pud(x) (x) @@ -86,9 +86,9 @@ typedef pteval_t pgprot_t; #endif /* STRICT_MM_TYPECHECKS */ -#if CONFIG_ARM64_PGTABLE_LEVELS == 2 +#if CONFIG_PGTABLE_LEVELS == 2 #include <asm-generic/pgtable-nopmd.h> -#elif CONFIG_ARM64_PGTABLE_LEVELS == 3 +#elif CONFIG_PGTABLE_LEVELS == 3 #include <asm-generic/pgtable-nopud.h> #endif diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 800ec0e87ed9..56283f8a675c 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -374,7 +374,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) */ #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) -#if CONFIG_ARM64_PGTABLE_LEVELS > 2 +#if CONFIG_PGTABLE_LEVELS > 2 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) @@ -409,9 +409,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK)) -#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */ +#endif /* CONFIG_PGTABLE_LEVELS > 2 */ -#if CONFIG_ARM64_PGTABLE_LEVELS > 3 +#if CONFIG_PGTABLE_LEVELS > 3 #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud)) @@ -445,7 +445,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK)) -#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */ +#endif /* CONFIG_PGTABLE_LEVELS > 3 */ #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 9a8fd84f8fb2..941c375616e2 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #include <asm/memory.h> -#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) +#define cpu_switch_mm(pgd,mm) \ +do { \ + BUG_ON(pgd == swapper_pg_dir); \ + cpu_do_switch_mm(virt_to_phys(pgd),mm); \ +} while (0) #define cpu_get_pgd() \ ({ \ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 702e1e6a0d80..dcd06d18a42a 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -33,7 +33,6 @@ #ifndef __ASSEMBLY__ struct task_struct; -struct exec_domain; #include <asm/types.h> @@ -47,7 +46,6 @@ struct thread_info { unsigned long flags; /* low level flags */ mm_segment_t addr_limit; /* address limit */ struct task_struct *task; /* main task structure */ - struct exec_domain *exec_domain; /* execution domain */ int preempt_count; /* 0 => preemptable, <0 => bug */ int cpu; /* cpu */ }; @@ -55,7 +53,6 @@ struct thread_info { #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ - .exec_domain = &default_exec_domain, \ .flags = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 53d9c354219f..3a0242c7eb8d 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -53,7 +53,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, tlb_remove_entry(tlb, pte); } -#if CONFIG_ARM64_PGTABLE_LEVELS > 2 +#if CONFIG_PGTABLE_LEVELS > 2 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { @@ -62,7 +62,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, } #endif -#if CONFIG_ARM64_PGTABLE_LEVELS > 3 +#if CONFIG_PGTABLE_LEVELS > 3 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, unsigned long addr) { diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 3ef77a466018..c154c0b7eb60 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -191,6 +191,9 @@ struct kvm_arch_memory_slot { /* Highest supported SPI, from VGIC_NR_IRQS */ #define KVM_ARM_IRQ_GIC_MAX 127 +/* One single KVM irqchip, ie. the VGIC */ +#define KVM_NR_IRQCHIPS 1 + /* PSCI interface */ #define KVM_PSCI_FN_BASE 0x95c1ba5e #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index f7fa65d4c352..56cadd3606bf 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -38,7 +38,6 @@ int main(void) DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); BLANK(); DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index 5c0896647fd1..a78143a5c99f 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -15,7 +15,7 @@ #include <asm/cpuidle.h> #include <asm/cpu_ops.h> -int cpu_init_idle(unsigned int cpu) +int arm_cpuidle_init(unsigned int cpu) { int ret = -EOPNOTSUPP; struct device_node *cpu_node = of_cpu_device_node_get(cpu); diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 2b8d70164428..ab21e0d58278 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init); static void efi_set_pgd(struct mm_struct *mm) { - cpu_switch_mm(mm->pgd, mm); + if (mm == &init_mm) + cpu_set_reserved_ttbr0(); + else + cpu_switch_mm(mm->pgd, mm); + flush_tlb_all(); if (icache_is_aivivt()) __flush_icache_all(); diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 98bbe06e469c..e7d934d3afe0 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -527,7 +527,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * Disallow per-task kernel breakpoints since these would * complicate the stepping code. */ - if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.bp_target) + if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target) return -EINVAL; return 0; diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 660ccf9f7524..e18c48cb6db1 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -287,19 +287,12 @@ static void setup_restart_syscall(struct pt_regs *regs) */ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { - struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; sigset_t *oldset = sigmask_to_save(); int usig = ksig->sig; int ret; /* - * translate the signal - */ - if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) - usig = thread->exec_domain->signal_invmap[usig]; - - /* * Set up the stack frame */ if (is_compat_task()) { diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 32aeea083d93..ec37ab3f524f 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -200,7 +200,7 @@ up_fail: void update_vsyscall(struct timekeeper *tk) { struct timespec xtime_coarse; - u32 use_syscall = strcmp(tk->tkr.clock->name, "arch_sys_counter"); + u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); ++vdso_data->tb_seq_count; smp_wmb(); @@ -213,11 +213,11 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; if (!use_syscall) { - vdso_data->cs_cycle_last = tk->tkr.cycle_last; + vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; vdso_data->xtime_clock_sec = tk->xtime_sec; - vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; - vdso_data->cs_mult = tk->tkr.mult; - vdso_data->cs_shift = tk->tkr.shift; + vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; + vdso_data->cs_mult = tk->tkr_mono.mult; + vdso_data->cs_shift = tk->tkr_mono.shift; } smp_wmb(); diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index f5590c81d95f..5105e297ed5f 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -18,6 +18,7 @@ if VIRTUALIZATION config KVM bool "Kernel-based Virtual Machine (KVM) support" + depends on OF select MMU_NOTIFIER select PREEMPT_NOTIFIERS select ANON_INODES @@ -25,10 +26,10 @@ config KVM select HAVE_KVM_ARCH_TLB_FLUSH_ALL select KVM_MMIO select KVM_ARM_HOST - select KVM_ARM_VGIC - select KVM_ARM_TIMER select KVM_GENERIC_DIRTYLOG_READ_PROTECT select SRCU + select HAVE_KVM_EVENTFD + select HAVE_KVM_IRQFD ---help--- Support hosting virtualized guest machines. @@ -50,17 +51,4 @@ config KVM_ARM_MAX_VCPUS large, so only choose a reasonable number that you expect to actually use. -config KVM_ARM_VGIC - bool - depends on KVM_ARM_HOST && OF - select HAVE_KVM_IRQCHIP - ---help--- - Adds support for a hardware assisted, in-kernel GIC emulation. - -config KVM_ARM_TIMER - bool - depends on KVM_ARM_VGIC - ---help--- - Adds support for the Architected Timers in virtual machines. - endif # VIRTUALIZATION diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 4e6e09ee4033..d5904f876cdb 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -2,7 +2,7 @@ # Makefile for Kernel-based Virtual Machine module # -ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm +ccflags-y += -Iarch/arm64/kvm CFLAGS_arm.o := -I. CFLAGS_mmu.o := -I. @@ -11,7 +11,7 @@ ARM=../../../arch/arm/kvm obj-$(CONFIG_KVM_ARM_HOST) += kvm.o -kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o +kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o @@ -19,11 +19,11 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o -kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o -kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o -kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o -kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o -kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o -kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3-emul.o -kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o -kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o +kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o +kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o +kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o +kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o +kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o +kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o +kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o +kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 58e0c2bdde04..ef7d112f5ce0 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p) } early_param("coherent_pool", early_coherent_pool); -static void *__alloc_from_pool(size_t size, struct page **ret_page) +static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) { unsigned long val; void *ptr = NULL; @@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) *ret_page = phys_to_page(phys); ptr = (void *)val; + if (flags & __GFP_ZERO) + memset(ptr, 0, size); } return ptr; @@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, flags |= GFP_DMA; if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { struct page *page; + void *addr; size = PAGE_ALIGN(size); page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, @@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, return NULL; *dma_handle = phys_to_dma(dev, page_to_phys(page)); - return page_address(page); + addr = page_address(page); + if (flags & __GFP_ZERO) + memset(addr, 0, size); + return addr; } else { return swiotlb_alloc_coherent(dev, size, dma_handle, flags); } @@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size, if (!coherent && !(flags & __GFP_WAIT)) { struct page *page = NULL; - void *addr = __alloc_from_pool(size, &page); + void *addr = __alloc_from_pool(size, &page, flags); if (addr) *dma_handle = phys_to_dma(dev, page_to_phys(page)); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index ae85da6307bb..597831bdddf3 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -190,6 +190,8 @@ void __init bootmem_init(void) min = PFN_UP(memblock_start_of_DRAM()); max = PFN_DOWN(memblock_end_of_DRAM()); + early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); + /* * Sparsemem tries to allocate bootmem in memory_present(), so must be * done after the fixed reservations. diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 54922d1275b8..ed177475dd8c 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -47,17 +47,16 @@ static int mmap_is_legacy(void) return sysctl_legacy_va_layout; } -static unsigned long mmap_rnd(void) +unsigned long arch_mmap_rnd(void) { - unsigned long rnd = 0; + unsigned long rnd; - if (current->flags & PF_RANDOMIZE) - rnd = (long)get_random_int() & STACK_RND_MASK; + rnd = (unsigned long)get_random_int() & STACK_RND_MASK; return rnd << PAGE_SHIFT; } -static unsigned long mmap_base(void) +static unsigned long mmap_base(unsigned long rnd) { unsigned long gap = rlimit(RLIMIT_STACK); @@ -66,7 +65,7 @@ static unsigned long mmap_base(void) else if (gap > MAX_GAP) gap = MAX_GAP; - return PAGE_ALIGN(STACK_TOP - gap - mmap_rnd()); + return PAGE_ALIGN(STACK_TOP - gap - rnd); } /* @@ -75,15 +74,20 @@ static unsigned long mmap_base(void) */ void arch_pick_mmap_layout(struct mm_struct *mm) { + unsigned long random_factor = 0UL; + + if (current->flags & PF_RANDOMIZE) + random_factor = arch_mmap_rnd(); + /* * Fall back to the standard layout if the personality bit is set, or * if the expected stack growth is unlimited: */ if (mmap_is_legacy()) { - mm->mmap_base = TASK_UNMAPPED_BASE; + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; } else { - mm->mmap_base = mmap_base(); + mm->mmap_base = mmap_base(random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index c6daaf6c6f97..79e01163a981 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -550,10 +550,10 @@ void vmemmap_free(unsigned long start, unsigned long end) #endif /* CONFIG_SPARSEMEM_VMEMMAP */ static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; -#if CONFIG_ARM64_PGTABLE_LEVELS > 2 +#if CONFIG_PGTABLE_LEVELS > 2 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss; #endif -#if CONFIG_ARM64_PGTABLE_LEVELS > 3 +#if CONFIG_PGTABLE_LEVELS > 3 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss; #endif |