summaryrefslogtreecommitdiff
path: root/arch/Kconfig
diff options
context:
space:
mode:
Diffstat (limited to 'arch/Kconfig')
-rw-r--r--arch/Kconfig835
1 files changed, 698 insertions, 137 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 48b5e103bdb0..c91917b50873 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -11,59 +11,51 @@ source "arch/$(SRCARCH)/Kconfig"
menu "General architecture-dependent options"
-config CRASH_CORE
+config ARCH_HAS_SUBPAGE_FAULTS
bool
+ help
+ Select if the architecture can check permissions at sub-page
+ granularity (e.g. arm64 MTE). The probe_user_*() functions
+ must be implemented.
-config KEXEC_CORE
- select CRASH_CORE
+config HOTPLUG_SMT
bool
-config KEXEC_ELF
+config SMT_NUM_THREADS_DYNAMIC
bool
-config HAVE_IMA_KEXEC
+# Selected by HOTPLUG_CORE_SYNC_DEAD or HOTPLUG_CORE_SYNC_FULL
+config HOTPLUG_CORE_SYNC
bool
-config HOTPLUG_SMT
+# Basic CPU dead synchronization selected by architecture
+config HOTPLUG_CORE_SYNC_DEAD
bool
+ select HOTPLUG_CORE_SYNC
-config OPROFILE
- tristate "OProfile system profiling"
- depends on PROFILING
- depends on HAVE_OPROFILE
- select RING_BUFFER
- select RING_BUFFER_ALLOW_SWAP
- help
- OProfile is a profiling system capable of profiling the
- whole system, include the kernel, kernel modules, libraries,
- and applications.
-
- If unsure, say N.
-
-config OPROFILE_EVENT_MULTIPLEX
- bool "OProfile multiplexing support (EXPERIMENTAL)"
- default n
- depends on OPROFILE && X86
- help
- The number of hardware counters is limited. The multiplexing
- feature enables OProfile to gather more events than counters
- are provided by the hardware. This is realized by switching
- between events at a user specified time interval.
+# Full CPU synchronization with alive state selected by architecture
+config HOTPLUG_CORE_SYNC_FULL
+ bool
+ select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU
+ select HOTPLUG_CORE_SYNC
- If unsure, say N.
+config HOTPLUG_SPLIT_STARTUP
+ bool
+ select HOTPLUG_CORE_SYNC_FULL
-config HAVE_OPROFILE
+config HOTPLUG_PARALLEL
bool
+ select HOTPLUG_SPLIT_STARTUP
-config OPROFILE_NMI_TIMER
- def_bool y
- depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !PPC64
+config GENERIC_ENTRY
+ bool
config KPROBES
bool "Kprobes"
depends on MODULES
depends on HAVE_KPROBES
select KALLSYMS
+ select TASKS_RCU if PREEMPTION
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
@@ -74,28 +66,28 @@ config KPROBES
config JUMP_LABEL
bool "Optimize very unlikely/likely branches"
depends on HAVE_ARCH_JUMP_LABEL
- depends on CC_HAS_ASM_GOTO
+ select OBJTOOL if HAVE_JUMP_LABEL_HACK
help
- This option enables a transparent branch optimization that
- makes certain almost-always-true or almost-always-false branch
- conditions even cheaper to execute within the kernel.
+ This option enables a transparent branch optimization that
+ makes certain almost-always-true or almost-always-false branch
+ conditions even cheaper to execute within the kernel.
- Certain performance-sensitive kernel code, such as trace points,
- scheduler functionality, networking code and KVM have such
- branches and include support for this optimization technique.
+ Certain performance-sensitive kernel code, such as trace points,
+ scheduler functionality, networking code and KVM have such
+ branches and include support for this optimization technique.
- If it is detected that the compiler has support for "asm goto",
- the kernel will compile such branches with just a nop
- instruction. When the condition flag is toggled to true, the
- nop will be converted to a jump instruction to execute the
- conditional block of instructions.
+ If it is detected that the compiler has support for "asm goto",
+ the kernel will compile such branches with just a nop
+ instruction. When the condition flag is toggled to true, the
+ nop will be converted to a jump instruction to execute the
+ conditional block of instructions.
- This technique lowers overhead and stress on the branch prediction
- of the processor and generally makes the kernel faster. The update
- of the condition is slower, but those are always very rare.
+ This technique lowers overhead and stress on the branch prediction
+ of the processor and generally makes the kernel faster. The update
+ of the condition is slower, but those are always very rare.
- ( On 32-bit x86, the necessary options added to the compiler
- flags may increase the size of the kernel slightly. )
+ ( On 32-bit x86, the necessary options added to the compiler
+ flags may increase the size of the kernel slightly. )
config STATIC_KEYS_SELFTEST
bool "Static key selftest"
@@ -103,6 +95,12 @@ config STATIC_KEYS_SELFTEST
help
Boot time self-test of the branch patching code.
+config STATIC_CALL_SELFTEST
+ bool "Static call selftest"
+ depends on HAVE_STATIC_CALL
+ help
+ Boot time self-test of the call patching code.
+
config OPTPROBES
def_bool y
depends on KPROBES && HAVE_OPTPROBES
@@ -113,9 +111,9 @@ config KPROBES_ON_FTRACE
depends on KPROBES && HAVE_KPROBES_ON_FTRACE
depends on DYNAMIC_FTRACE_WITH_REGS
help
- If function tracer is enabled and the arch supports full
- passing of pt_regs to function tracing, then kprobes can
- optimize on top of function tracing.
+ If function tracer is enabled and the arch supports full
+ passing of pt_regs to function tracing, then kprobes can
+ optimize on top of function tracing.
config UPROBES
def_bool n
@@ -131,6 +129,22 @@ config UPROBES
managed by the kernel and kept transparent to the probed
application. )
+config HAVE_64BIT_ALIGNED_ACCESS
+ def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS
+ help
+ Some architectures require 64 bit accesses to be 64 bit
+ aligned, which also requires structs containing 64 bit values
+ to be 64 bit aligned too. This includes some 32 bit
+ architectures which can do 64 bit accesses, as well as 64 bit
+ architectures without unaligned access.
+
+ This symbol should be selected by an architecture if 64 bit
+ accesses are required to be 64 bit aligned in this way even
+ though it is not a 64 bit architecture.
+
+ See Documentation/core-api/unaligned-memory-access.rst for
+ more information on the topic of unaligned memory accesses.
+
config HAVE_EFFICIENT_UNALIGNED_ACCESS
bool
help
@@ -147,31 +161,37 @@ config HAVE_EFFICIENT_UNALIGNED_ACCESS
problems with received packets if doing so would not help
much.
- See Documentation/unaligned-memory-access.txt for more
+ See Documentation/core-api/unaligned-memory-access.rst for more
information on the topic of unaligned memory accesses.
config ARCH_USE_BUILTIN_BSWAP
bool
help
- Modern versions of GCC (since 4.4) have builtin functions
- for handling byte-swapping. Using these, instead of the old
- inline assembler that the architecture code provides in the
- __arch_bswapXX() macros, allows the compiler to see what's
- happening and offers more opportunity for optimisation. In
- particular, the compiler will be able to combine the byteswap
- with a nearby load or store and use load-and-swap or
- store-and-swap instructions if the architecture has them. It
- should almost *never* result in code which is worse than the
- hand-coded assembler in <asm/swab.h>. But just in case it
- does, the use of the builtins is optional.
+ Modern versions of GCC (since 4.4) have builtin functions
+ for handling byte-swapping. Using these, instead of the old
+ inline assembler that the architecture code provides in the
+ __arch_bswapXX() macros, allows the compiler to see what's
+ happening and offers more opportunity for optimisation. In
+ particular, the compiler will be able to combine the byteswap
+ with a nearby load or store and use load-and-swap or
+ store-and-swap instructions if the architecture has them. It
+ should almost *never* result in code which is worse than the
+ hand-coded assembler in <asm/swab.h>. But just in case it
+ does, the use of the builtins is optional.
- Any architecture with load-and-swap or store-and-swap
- instructions should set this. And it shouldn't hurt to set it
- on architectures that don't have such instructions.
+ Any architecture with load-and-swap or store-and-swap
+ instructions should set this. And it shouldn't hurt to set it
+ on architectures that don't have such instructions.
config KRETPROBES
def_bool y
- depends on KPROBES && HAVE_KRETPROBES
+ depends on KPROBES && (HAVE_KRETPROBES || HAVE_RETHOOK)
+
+config KRETPROBE_ON_RETHOOK
+ def_bool y
+ depends on HAVE_RETHOOK
+ depends on KRETPROBES
+ select RETHOOK
config USER_RETURN_NOTIFIER
bool
@@ -195,12 +215,29 @@ config HAVE_OPTPROBES
config HAVE_KPROBES_ON_FTRACE
bool
+config ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+ bool
+ help
+ Since kretprobes modifies return address on the stack, the
+ stacktrace may see the kretprobe trampoline address instead
+ of correct one. If the architecture stacktrace code and
+ unwinder can adjust such entries, select this configuration.
+
config HAVE_FUNCTION_ERROR_INJECTION
bool
config HAVE_NMI
bool
+config HAVE_FUNCTION_DESCRIPTORS
+ bool
+
+config TRACE_IRQFLAGS_SUPPORT
+ bool
+
+config TRACE_IRQFLAGS_NMI_SUPPORT
+ bool
+
#
# An arch should select this if it provides all these things:
#
@@ -210,9 +247,8 @@ config HAVE_NMI
# asm/syscall.h supplying asm-generic/syscall.h interface
# linux/regset.h user_regset interfaces
# CORE_DUMP_USE_REGSET #define'd in linux/elf.h
-# TIF_SYSCALL_TRACE calls tracehook_report_syscall_{entry,exit}
-# TIF_NOTIFY_RESUME calls tracehook_notify_resume()
-# signal delivery calls tracehook_signal_handler()
+# TIF_SYSCALL_TRACE calls ptrace_report_syscall_{entry,exit}
+# TIF_NOTIFY_RESUME calls resume_user_mode_work()
#
config HAVE_ARCH_TRACEHOOK
bool
@@ -248,24 +284,30 @@ config ARCH_HAS_SET_DIRECT_MAP
bool
#
-# Select if arch has an uncached kernel segment and provides the
-# uncached_kernel_address / cached_kernel_address symbols to use it
+# Select if the architecture provides the arch_dma_set_uncached symbol to
+# either provide an uncached segment alias for a DMA allocation, or
+# to remap the page tables in place.
#
-config ARCH_HAS_UNCACHED_SEGMENT
- select ARCH_HAS_DMA_PREP_COHERENT
+config ARCH_HAS_DMA_SET_UNCACHED
bool
-# Select if arch init_task must go in the __init_task_data section
-config ARCH_TASK_STRUCT_ON_STACK
+#
+# Select if the architectures provides the arch_dma_clear_uncached symbol
+# to undo an in-place page table remap for uncached access.
+#
+config ARCH_HAS_DMA_CLEAR_UNCACHED
+ bool
+
+config ARCH_HAS_CPU_FINALIZE_INIT
bool
-# Select if arch has its private alloc_task_struct() function
-config ARCH_TASK_STRUCT_ALLOCATOR
+# The architecture has a per-task state that includes the mm's PASID
+config ARCH_HAS_CPU_PASID
bool
+ select IOMMU_MM_DATA
config HAVE_ARCH_THREAD_STRUCT_WHITELIST
bool
- depends on !ARCH_TASK_STRUCT_ALLOCATOR
help
An architecture should select this to provide hardened usercopy
knowledge about what region of the thread_struct should be
@@ -274,14 +316,17 @@ config HAVE_ARCH_THREAD_STRUCT_WHITELIST
should be implemented. Without this, the entire thread_struct
field in task_struct will be left whitelisted.
-# Select if arch has its private alloc_thread_stack() function
-config ARCH_THREAD_STACK_ALLOCATOR
- bool
-
# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
config ARCH_WANTS_DYNAMIC_TASK_STRUCT
bool
+config ARCH_WANTS_NO_INSTR
+ bool
+ help
+ An architecture should select this if the noinstr macro is being used on
+ functions to denote that the toolchain should avoid instrumenting such
+ functions and is required for correctness.
+
config ARCH_32BIT_OFF_T
bool
depends on !64BIT
@@ -292,17 +337,21 @@ config ARCH_32BIT_OFF_T
still support 32-bit off_t. This option is enabled for all such
architectures explicitly.
+# Selected by 64 bit architectures which have a 32 bit f_tinode in struct ustat
+config ARCH_32BIT_USTAT_F_TINODE
+ bool
+
config HAVE_ASM_MODVERSIONS
bool
help
- This symbol should be selected by an architecure if it provides
+ This symbol should be selected by an architecture if it provides
<asm/asm-prototypes.h> to support the module versioning for symbols
exported from assembly code.
config HAVE_REGS_AND_STACK_ACCESS_API
bool
help
- This symbol should be selected by an architecure if it supports
+ This symbol should be selected by an architecture if it supports
the API needed to access registers and stack entries from pt_regs,
declared in asm/ptrace.h
For example the kprobes-based event tracer needs this API.
@@ -314,18 +363,18 @@ config HAVE_RSEQ
This symbol should be selected by an architecture if it
supports an implementation of restartable sequences.
-config HAVE_FUNCTION_ARG_ACCESS_API
+config HAVE_RUST
bool
help
- This symbol should be selected by an architecure if it supports
- the API needed to access function arguments from pt_regs,
- declared in asm/ptrace.h
+ This symbol should be selected by an architecture if it
+ supports Rust.
-config HAVE_CLK
+config HAVE_FUNCTION_ARG_ACCESS_API
bool
help
- The <linux/clk.h> calls support software clock gating and
- thus are a key power management tool on many systems.
+ This symbol should be selected by an architecture if it supports
+ the API needed to access function arguments from pt_regs,
+ declared in asm/ptrace.h
config HAVE_HW_BREAKPOINT
bool
@@ -359,20 +408,14 @@ config HAVE_HARDLOCKUP_DETECTOR_PERF
The arch chooses to use the generic perf-NMI-based hardlockup
detector. Must define HAVE_PERF_EVENTS_NMI.
-config HAVE_NMI_WATCHDOG
- depends on HAVE_NMI
- bool
- help
- The arch provides a low level NMI watchdog. It provides
- asm/nmi.h, and defines its own arch_touch_nmi_watchdog().
-
config HAVE_HARDLOCKUP_DETECTOR_ARCH
bool
- select HAVE_NMI_WATCHDOG
help
- The arch chooses to provide its own hardlockup detector, which is
- a superset of the HAVE_NMI_WATCHDOG. It also conforms to config
- interfaces and parameters provided by hardlockup detector subsystem.
+ The arch provides its own hardlockup detector implementation instead
+ of the generic ones.
+
+ It uses the same command line parameters, and sysctl interface,
+ as the generic hardlockup detectors.
config HAVE_PERF_REGS
bool
@@ -393,21 +436,75 @@ config HAVE_ARCH_JUMP_LABEL
config HAVE_ARCH_JUMP_LABEL_RELATIVE
bool
-config HAVE_RCU_TABLE_FREE
+config MMU_GATHER_TABLE_FREE
+ bool
+
+config MMU_GATHER_RCU_TABLE_FREE
bool
+ select MMU_GATHER_TABLE_FREE
-config HAVE_RCU_TABLE_NO_INVALIDATE
+config MMU_GATHER_PAGE_SIZE
bool
-config HAVE_MMU_GATHER_PAGE_SIZE
+config MMU_GATHER_NO_RANGE
bool
+ select MMU_GATHER_MERGE_VMAS
-config HAVE_MMU_GATHER_NO_GATHER
+config MMU_GATHER_NO_FLUSH_CACHE
+ bool
+
+config MMU_GATHER_MERGE_VMAS
+ bool
+
+config MMU_GATHER_NO_GATHER
+ bool
+ depends on MMU_GATHER_TABLE_FREE
+
+config ARCH_WANT_IRQS_OFF_ACTIVATE_MM
+ bool
+ help
+ Temporary select until all architectures can be converted to have
+ irqs disabled over activate_mm. Architectures that do IPI based TLB
+ shootdowns should enable this.
+
+# Use normal mm refcounting for MMU_LAZY_TLB kernel thread references.
+# MMU_LAZY_TLB_REFCOUNT=n can improve the scalability of context switching
+# to/from kernel threads when the same mm is running on a lot of CPUs (a large
+# multi-threaded application), by reducing contention on the mm refcount.
+#
+# This can be disabled if the architecture ensures no CPUs are using an mm as a
+# "lazy tlb" beyond its final refcount (i.e., by the time __mmdrop frees the mm
+# or its kernel page tables). This could be arranged by arch_exit_mmap(), or
+# final exit(2) TLB flush, for example.
+#
+# To implement this, an arch *must*:
+# Ensure the _lazy_tlb variants of mmgrab/mmdrop are used when manipulating
+# the lazy tlb reference of a kthread's ->active_mm (non-arch code has been
+# converted already).
+config MMU_LAZY_TLB_REFCOUNT
+ def_bool y
+ depends on !MMU_LAZY_TLB_SHOOTDOWN
+
+# This option allows MMU_LAZY_TLB_REFCOUNT=n. It ensures no CPUs are using an
+# mm as a lazy tlb beyond its last reference count, by shooting down these
+# users before the mm is deallocated. __mmdrop() first IPIs all CPUs that may
+# be using the mm as a lazy tlb, so that they may switch themselves to using
+# init_mm for their active mm. mm_cpumask(mm) is used to determine which CPUs
+# may be using mm as a lazy tlb mm.
+#
+# To implement this, an arch *must*:
+# - At the time of the final mmdrop of the mm, ensure mm_cpumask(mm) contains
+# at least all possible CPUs in which the mm is lazy.
+# - It must meet the requirements for MMU_LAZY_TLB_REFCOUNT=n (see above).
+config MMU_LAZY_TLB_SHOOTDOWN
bool
config ARCH_HAVE_NMI_SAFE_CMPXCHG
bool
+config ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
+ bool
+
config HAVE_ALIGNED_STRUCT_PAGE
bool
help
@@ -435,10 +532,23 @@ config ARCH_WANT_OLD_COMPAT_IPC
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
bool
+config HAVE_ARCH_SECCOMP
+ bool
+ help
+ An arch should select this symbol to support seccomp mode 1 (the fixed
+ syscall policy), and must provide an overrides for __NR_seccomp_sigreturn,
+ and compat syscalls if the asm-generic/seccomp.h defaults need adjustment:
+ - __NR_seccomp_read_32
+ - __NR_seccomp_write_32
+ - __NR_seccomp_exit_32
+ - __NR_seccomp_sigreturn_32
+
config HAVE_ARCH_SECCOMP_FILTER
bool
+ select HAVE_ARCH_SECCOMP
help
An arch should select this symbol if it provides all of these things:
+ - all the requirements for HAVE_ARCH_SECCOMP
- syscall_get_arch()
- syscall_get_arguments()
- syscall_rollback()
@@ -448,6 +558,26 @@ config HAVE_ARCH_SECCOMP_FILTER
- secure_computing return value is checked and a return value of -1
results in the system call being skipped immediately.
- seccomp syscall wired up
+ - if !HAVE_SPARSE_SYSCALL_NR, have SECCOMP_ARCH_NATIVE,
+ SECCOMP_ARCH_NATIVE_NR, SECCOMP_ARCH_NATIVE_NAME defined. If
+ COMPAT is supported, have the SECCOMP_ARCH_COMPAT* defines too.
+
+config SECCOMP
+ prompt "Enable seccomp to safely execute untrusted bytecode"
+ def_bool y
+ depends on HAVE_ARCH_SECCOMP
+ help
+ This kernel feature is useful for number crunching applications
+ that may need to handle untrusted bytecode during their
+ execution. By using pipes or other transports made available
+ to the process as file descriptors supporting the read/write
+ syscalls, it's possible to isolate those applications in their
+ own address space using seccomp. Once seccomp is enabled via
+ prctl(PR_SET_SECCOMP) or the seccomp() syscall, it cannot be
+ disabled and the task is only allowed to execute a few safe
+ syscalls defined by each seccomp mode.
+
+ If unsure, say Y.
config SECCOMP_FILTER
def_bool y
@@ -459,6 +589,20 @@ config SECCOMP_FILTER
See Documentation/userspace-api/seccomp_filter.rst for details.
+config SECCOMP_CACHE_DEBUG
+ bool "Show seccomp filter cache status in /proc/pid/seccomp_cache"
+ depends on SECCOMP_FILTER && !HAVE_SPARSE_SYSCALL_NR
+ depends on PROC_FS
+ help
+ This enables the /proc/pid/seccomp_cache interface to monitor
+ seccomp cache data. The file format is subject to change. Reading
+ the file requires CAP_SYS_ADMIN.
+
+ This option is for debugging only. Enabling presents the risk that
+ an adversary may be able to infer the seccomp filter logic.
+
+ If unsure, say N.
+
config HAVE_ARCH_STACKLEAK
bool
help
@@ -472,9 +616,6 @@ config HAVE_STACKPROTECTOR
An arch should select this symbol if:
- it has implemented a stack canary (e.g. __stack_chk_guard)
-config CC_HAS_STACKPROTECTOR_NONE
- def_bool $(cc-option,-fno-stack-protector)
-
config STACKPROTECTOR
bool "Stack Protector buffer overflow detection"
depends on HAVE_STACKPROTECTOR
@@ -521,6 +662,162 @@ config STACKPROTECTOR_STRONG
about 20% of all kernel functions, which increases the kernel code
size by about 2%.
+config ARCH_SUPPORTS_SHADOW_CALL_STACK
+ bool
+ help
+ An architecture should select this if it supports the compiler's
+ Shadow Call Stack and implements runtime support for shadow stack
+ switching.
+
+config SHADOW_CALL_STACK
+ bool "Shadow Call Stack"
+ depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
+ depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
+ help
+ This option enables the compiler's Shadow Call Stack, which
+ uses a shadow stack to protect function return addresses from
+ being overwritten by an attacker. More information can be found
+ in the compiler's documentation:
+
+ - Clang: https://clang.llvm.org/docs/ShadowCallStack.html
+ - GCC: https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html#Instrumentation-Options
+
+ Note that security guarantees in the kernel differ from the
+ ones documented for user space. The kernel must store addresses
+ of shadow stacks in memory, which means an attacker capable of
+ reading and writing arbitrary memory may be able to locate them
+ and hijack control flow by modifying the stacks.
+
+config DYNAMIC_SCS
+ bool
+ help
+ Set by the arch code if it relies on code patching to insert the
+ shadow call stack push and pop instructions rather than on the
+ compiler.
+
+config LTO
+ bool
+ help
+ Selected if the kernel will be built using the compiler's LTO feature.
+
+config LTO_CLANG
+ bool
+ select LTO
+ help
+ Selected if the kernel will be built using Clang's LTO feature.
+
+config ARCH_SUPPORTS_LTO_CLANG
+ bool
+ help
+ An architecture should select this option if it supports:
+ - compiling with Clang,
+ - compiling inline assembly with Clang's integrated assembler,
+ - and linking with LLD.
+
+config ARCH_SUPPORTS_LTO_CLANG_THIN
+ bool
+ help
+ An architecture should select this option if it can support Clang's
+ ThinLTO mode.
+
+config HAS_LTO_CLANG
+ def_bool y
+ depends on CC_IS_CLANG && LD_IS_LLD && AS_IS_LLVM
+ depends on $(success,$(NM) --help | head -n 1 | grep -qi llvm)
+ depends on $(success,$(AR) --help | head -n 1 | grep -qi llvm)
+ depends on ARCH_SUPPORTS_LTO_CLANG
+ depends on !FTRACE_MCOUNT_USE_RECORDMCOUNT
+ # https://github.com/ClangBuiltLinux/linux/issues/1721
+ depends on (!KASAN || KASAN_HW_TAGS || CLANG_VERSION >= 170000) || !DEBUG_INFO
+ depends on (!KCOV || CLANG_VERSION >= 170000) || !DEBUG_INFO
+ depends on !GCOV_KERNEL
+ help
+ The compiler and Kconfig options support building with Clang's
+ LTO.
+
+choice
+ prompt "Link Time Optimization (LTO)"
+ default LTO_NONE
+ help
+ This option enables Link Time Optimization (LTO), which allows the
+ compiler to optimize binaries globally.
+
+ If unsure, select LTO_NONE. Note that LTO is very resource-intensive
+ so it's disabled by default.
+
+config LTO_NONE
+ bool "None"
+ help
+ Build the kernel normally, without Link Time Optimization (LTO).
+
+config LTO_CLANG_FULL
+ bool "Clang Full LTO (EXPERIMENTAL)"
+ depends on HAS_LTO_CLANG
+ depends on !COMPILE_TEST
+ select LTO_CLANG
+ help
+ This option enables Clang's full Link Time Optimization (LTO), which
+ allows the compiler to optimize the kernel globally. If you enable
+ this option, the compiler generates LLVM bitcode instead of ELF
+ object files, and the actual compilation from bitcode happens at
+ the LTO link step, which may take several minutes depending on the
+ kernel configuration. More information can be found from LLVM's
+ documentation:
+
+ https://llvm.org/docs/LinkTimeOptimization.html
+
+ During link time, this option can use a large amount of RAM, and
+ may take much longer than the ThinLTO option.
+
+config LTO_CLANG_THIN
+ bool "Clang ThinLTO (EXPERIMENTAL)"
+ depends on HAS_LTO_CLANG && ARCH_SUPPORTS_LTO_CLANG_THIN
+ select LTO_CLANG
+ help
+ This option enables Clang's ThinLTO, which allows for parallel
+ optimization and faster incremental compiles compared to the
+ CONFIG_LTO_CLANG_FULL option. More information can be found
+ from Clang's documentation:
+
+ https://clang.llvm.org/docs/ThinLTO.html
+
+ If unsure, say Y.
+endchoice
+
+config ARCH_SUPPORTS_CFI_CLANG
+ bool
+ help
+ An architecture should select this option if it can support Clang's
+ Control-Flow Integrity (CFI) checking.
+
+config ARCH_USES_CFI_TRAPS
+ bool
+
+config CFI_CLANG
+ bool "Use Clang's Control Flow Integrity (CFI)"
+ depends on ARCH_SUPPORTS_CFI_CLANG
+ depends on $(cc-option,-fsanitize=kcfi)
+ help
+ This option enables Clang’s forward-edge Control Flow Integrity
+ (CFI) checking, where the compiler injects a runtime check to each
+ indirect function call to ensure the target is a valid function with
+ the correct static type. This restricts possible call targets and
+ makes it more difficult for an attacker to exploit bugs that allow
+ the modification of stored function pointers. More information can be
+ found from Clang's documentation:
+
+ https://clang.llvm.org/docs/ControlFlowIntegrity.html
+
+config CFI_PERMISSIVE
+ bool "Use CFI in permissive mode"
+ depends on CFI_CLANG
+ help
+ When selected, Control Flow Integrity (CFI) violations result in a
+ warning instead of a kernel panic. This option should only be used
+ for finding indirect call type mismatches during development.
+
+ If unsure, say N.
+
config HAVE_ARCH_WITHIN_STACK_FRAMES
bool
help
@@ -530,20 +827,49 @@ config HAVE_ARCH_WITHIN_STACK_FRAMES
and similar) by implementing an inline arch_within_stack_frames(),
which is used by CONFIG_HARDENED_USERCOPY.
-config HAVE_CONTEXT_TRACKING
+config HAVE_CONTEXT_TRACKING_USER
bool
help
Provide kernel/user boundaries probes necessary for subsystems
that need it, such as userspace RCU extended quiescent state.
- Syscalls need to be wrapped inside user_exit()-user_enter() through
- the slow path using TIF_NOHZ flag. Exceptions handlers must be
- wrapped as well. Irqs are already protected inside
- rcu_irq_enter/rcu_irq_exit() but preemption or signal handling on
- irq exit still need to be protected.
+ Syscalls need to be wrapped inside user_exit()-user_enter(), either
+ optimized behind static key or through the slow path using TIF_NOHZ
+ flag. Exceptions handlers must be wrapped as well. Irqs are already
+ protected inside ct_irq_enter/ct_irq_exit() but preemption or signal
+ handling on irq exit still need to be protected.
+
+config HAVE_CONTEXT_TRACKING_USER_OFFSTACK
+ bool
+ help
+ Architecture neither relies on exception_enter()/exception_exit()
+ nor on schedule_user(). Also preempt_schedule_notrace() and
+ preempt_schedule_irq() can't be called in a preemptible section
+ while context tracking is CONTEXT_USER. This feature reflects a sane
+ entry implementation where the following requirements are met on
+ critical entry code, ie: before user_exit() or after user_enter():
+
+ - Critical entry code isn't preemptible (or better yet:
+ not interruptible).
+ - No use of RCU read side critical sections, unless ct_nmi_enter()
+ got called.
+ - No use of instrumentation, unless instrumentation_begin() got
+ called.
+
+config HAVE_TIF_NOHZ
+ bool
+ help
+ Arch relies on TIF_NOHZ and syscall slow path to implement context
+ tracking calls to user_enter()/user_exit().
config HAVE_VIRT_CPU_ACCOUNTING
bool
+config HAVE_VIRT_CPU_ACCOUNTING_IDLE
+ bool
+ help
+ Architecture has its own way to account idle CPU time and therefore
+ doesn't implement vtime_account_idle().
+
config ARCH_HAS_SCALED_CPUTIME
bool
@@ -558,13 +884,19 @@ config HAVE_VIRT_CPU_ACCOUNTING_GEN
some 32-bit arches may require multiple accesses, so proper
locking is needed to protect against concurrent accesses.
-
config HAVE_IRQ_TIME_ACCOUNTING
bool
help
Archs need to ensure they use a high enough resolution clock to
support irq time accounting and then call enable_sched_clock_irqtime().
+config HAVE_MOVE_PUD
+ bool
+ help
+ Architectures that select this are able to move page tables at the
+ PUD level. If there are only 3 page table levels, the move effectively
+ happens at the PGD level.
+
config HAVE_MOVE_PMD
bool
help
@@ -579,9 +911,26 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
config HAVE_ARCH_HUGE_VMAP
bool
+#
+# Archs that select this would be capable of PMD-sized vmaps (i.e.,
+# arch_vmap_pmd_supported() returns true). The VM_ALLOW_HUGE_VMAP flag
+# must be used to enable allocations to use hugepages.
+#
+config HAVE_ARCH_HUGE_VMALLOC
+ depends on HAVE_ARCH_HUGE_VMAP
+ bool
+
config ARCH_WANT_HUGE_PMD_SHARE
bool
+# Archs that want to use pmd_mkwrite on kernel memory need it defined even
+# if there are no userspace memory management features that use it
+config ARCH_WANT_KERNEL_PMD_MKWRITE
+ bool
+
+config ARCH_WANT_PMD_MKWRITE
+ def_bool TRANSPARENT_HUGEPAGE || ARCH_WANT_KERNEL_PMD_MKWRITE
+
config HAVE_ARCH_SOFT_DIRTY
bool
@@ -604,6 +953,12 @@ config MODULES_USE_ELF_REL
Modules only use ELF REL relocations. Modules with ELF RELA
relocations will give an error.
+config ARCH_WANTS_MODULES_DATA_IN_VMALLOC
+ bool
+ help
+ For architectures like powerpc/32 which have constraints on module
+ allocation and need to allocate module data outside of module area.
+
config HAVE_IRQ_EXIT_ON_IRQ_STACK
bool
help
@@ -614,6 +969,22 @@ config HAVE_IRQ_EXIT_ON_IRQ_STACK
This spares a stack switch and improves cache usage on softirq
processing.
+config HAVE_SOFTIRQ_ON_OWN_STACK
+ bool
+ help
+ Architecture provides a function to run __do_softirq() on a
+ separate stack.
+
+config SOFTIRQ_ON_OWN_STACK
+ def_bool HAVE_SOFTIRQ_ON_OWN_STACK && !PREEMPT_RT
+
+config ALTERNATE_USER_ADDRESS_SPACE
+ bool
+ help
+ Architectures set this when the CPU uses separate address
+ spaces for kernel and user space pointers. In this case, the
+ access_ok() check on a __user pointer is skipped.
+
config PGTABLE_LEVELS
int
default 2
@@ -706,6 +1077,17 @@ config HAVE_ARCH_COMPAT_MMAP_BASES
and vice-versa 32-bit applications to call 64-bit mmap().
Required for applications doing different bitness syscalls.
+config PAGE_SIZE_LESS_THAN_64KB
+ def_bool y
+ depends on !ARM64_64K_PAGES
+ depends on !PAGE_SIZE_64KB
+ depends on !PARISC_PAGE_SIZE_64KB
+ depends on PAGE_SIZE_LESS_THAN_256KB
+
+config PAGE_SIZE_LESS_THAN_256KB
+ def_bool y
+ depends on !PAGE_SIZE_256KB
+
# This allows to use a set of generic functions to determine mmap base
# address by giving priority to top-down scheme only if the process
# is not in legacy mode (compat task, unlimited stack size or
@@ -717,24 +1099,34 @@ config ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
depends on MMU
select ARCH_HAS_ELF_RANDOMIZE
-config HAVE_COPY_THREAD_TLS
+config HAVE_OBJTOOL
+ bool
+
+config HAVE_JUMP_LABEL_HACK
+ bool
+
+config HAVE_NOINSTR_HACK
bool
- help
- Architecture provides copy_thread_tls to accept tls argument via
- normal C parameter passing, rather than extracting the syscall
- argument from pt_regs.
+
+config HAVE_NOINSTR_VALIDATION
+ bool
+
+config HAVE_UACCESS_VALIDATION
+ bool
+ select OBJTOOL
config HAVE_STACK_VALIDATION
bool
help
- Architecture supports the 'objtool check' host tool command, which
- performs compile-time stack metadata validation.
+ Architecture supports objtool compile-time frame pointer rule
+ validation.
config HAVE_RELIABLE_STACKTRACE
bool
help
- Architecture has a save_stack_trace_tsk_reliable() function which
- only returns a stack trace if it can guarantee the trace is reliable.
+ Architecture has either save_stack_trace_tsk_reliable() or
+ arch_stack_walk_reliable() function which only returns a stack trace
+ if it can guarantee the trace is reliable.
config HAVE_ARCH_HASH
bool
@@ -837,16 +1229,53 @@ config VMAP_STACK
default y
bool "Use a virtually-mapped stack"
depends on HAVE_ARCH_VMAP_STACK
- depends on !KASAN || KASAN_VMALLOC
- ---help---
+ depends on !KASAN || KASAN_HW_TAGS || KASAN_VMALLOC
+ help
Enable this if you want the use virtually-mapped kernel stacks
with guard pages. This causes kernel stack overflows to be
caught immediately rather than causing difficult-to-diagnose
corruption.
- To use this with KASAN, the architecture must support backing
- virtual mappings with real shadow memory, and KASAN_VMALLOC must
- be enabled.
+ To use this with software KASAN modes, the architecture must support
+ backing virtual mappings with real shadow memory, and KASAN_VMALLOC
+ must be enabled.
+
+config HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
+ def_bool n
+ help
+ An arch should select this symbol if it can support kernel stack
+ offset randomization with calls to add_random_kstack_offset()
+ during syscall entry and choose_random_kstack_offset() during
+ syscall exit. Careful removal of -fstack-protector-strong and
+ -fstack-protector should also be applied to the entry code and
+ closely examined, as the artificial stack bump looks like an array
+ to the compiler, so it will attempt to add canary checks regardless
+ of the static branch state.
+
+config RANDOMIZE_KSTACK_OFFSET
+ bool "Support for randomizing kernel stack offset on syscall entry" if EXPERT
+ default y
+ depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
+ depends on INIT_STACK_NONE || !CC_IS_CLANG || CLANG_VERSION >= 140000
+ help
+ The kernel stack offset can be randomized (after pt_regs) by
+ roughly 5 bits of entropy, frustrating memory corruption
+ attacks that depend on stack address determinism or
+ cross-syscall address exposures.
+
+ The feature is controlled via the "randomize_kstack_offset=on/off"
+ kernel boot param, and if turned off has zero overhead due to its use
+ of static branches (see JUMP_LABEL).
+
+ If unsure, say Y.
+
+config RANDOMIZE_KSTACK_OFFSET_DEFAULT
+ bool "Default state of kernel stack offset randomization"
+ depends on RANDOMIZE_KSTACK_OFFSET
+ help
+ Kernel stack offset randomization is controlled by kernel boot param
+ "randomize_kstack_offset=on/off", and this config chooses the default
+ boot state.
config ARCH_OPTIONAL_KERNEL_RWX
def_bool n
@@ -910,7 +1339,7 @@ config ARCH_USE_MEMREMAP_PROT
config LOCK_EVENT_COUNTS
bool "Locking event counts collection"
depends on DEBUG_FS
- ---help---
+ help
Enable light-weight counting of various locking related events
in the system with minimal performance impact. This reduces
the chance of application behavior change because of timing
@@ -933,16 +1362,148 @@ config RELR
config ARCH_HAS_MEM_ENCRYPT
bool
+config ARCH_HAS_CC_PLATFORM
+ bool
+
config HAVE_SPARSE_SYSCALL_NR
- bool
- help
- An architecture should select this if its syscall numbering is sparse
+ bool
+ help
+ An architecture should select this if its syscall numbering is sparse
to save space. For example, MIPS architecture has a syscall array with
entries at 4000, 5000 and 6000 locations. This option turns on syscall
related optimizations for a given architecture.
+config ARCH_HAS_VDSO_DATA
+ bool
+
+config HAVE_STATIC_CALL
+ bool
+
+config HAVE_STATIC_CALL_INLINE
+ bool
+ depends on HAVE_STATIC_CALL
+ select OBJTOOL
+
+config HAVE_PREEMPT_DYNAMIC
+ bool
+
+config HAVE_PREEMPT_DYNAMIC_CALL
+ bool
+ depends on HAVE_STATIC_CALL
+ select HAVE_PREEMPT_DYNAMIC
+ help
+ An architecture should select this if it can handle the preemption
+ model being selected at boot time using static calls.
+
+ Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a
+ preemption function will be patched directly.
+
+ Where an architecture does not select HAVE_STATIC_CALL_INLINE, any
+ call to a preemption function will go through a trampoline, and the
+ trampoline will be patched.
+
+ It is strongly advised to support inline static call to avoid any
+ overhead.
+
+config HAVE_PREEMPT_DYNAMIC_KEY
+ bool
+ depends on HAVE_ARCH_JUMP_LABEL
+ select HAVE_PREEMPT_DYNAMIC
+ help
+ An architecture should select this if it can handle the preemption
+ model being selected at boot time using static keys.
+
+ Each preemption function will be given an early return based on a
+ static key. This should have slightly lower overhead than non-inline
+ static calls, as this effectively inlines each trampoline into the
+ start of its callee. This may avoid redundant work, and may
+ integrate better with CFI schemes.
+
+ This will have greater overhead than using inline static calls as
+ the call to the preemption function cannot be entirely elided.
+
+config ARCH_WANT_LD_ORPHAN_WARN
+ bool
+ help
+ An arch should select this symbol once all linker sections are explicitly
+ included, size-asserted, or discarded in the linker scripts. This is
+ important because we never want expected sections to be placed heuristically
+ by the linker, since the locations of such sections can change between linker
+ versions.
+
+config HAVE_ARCH_PFN_VALID
+ bool
+
+config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ bool
+
+config ARCH_SUPPORTS_PAGE_TABLE_CHECK
+ bool
+
+config ARCH_SPLIT_ARG64
+ bool
+ help
+ If a 32-bit architecture requires 64-bit arguments to be split into
+ pairs of 32-bit arguments, select this option.
+
+config ARCH_HAS_ELFCORE_COMPAT
+ bool
+
+config ARCH_HAS_PARANOID_L1D_FLUSH
+ bool
+
+config ARCH_HAVE_TRACE_MMIO_ACCESS
+ bool
+
+config DYNAMIC_SIGFRAME
+ bool
+
+# Select, if arch has a named attribute group bound to NUMA device nodes.
+config HAVE_ARCH_NODE_DEV_GROUP
+ bool
+
+config ARCH_HAS_HW_PTE_YOUNG
+ bool
+ help
+ Architectures that select this option are capable of setting the
+ accessed bit in PTE entries when using them as part of linear address
+ translations. Architectures that require runtime check should select
+ this option and override arch_has_hw_pte_young().
+
+config ARCH_HAS_NONLEAF_PMD_YOUNG
+ bool
+ help
+ Architectures that select this option are capable of setting the
+ accessed bit in non-leaf PMD entries when using them as part of linear
+ address translations. Page table walkers that clear the accessed bit
+ may use this capability to reduce their search space.
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
+config FUNCTION_ALIGNMENT_4B
+ bool
+
+config FUNCTION_ALIGNMENT_8B
+ bool
+
+config FUNCTION_ALIGNMENT_16B
+ bool
+
+config FUNCTION_ALIGNMENT_32B
+ bool
+
+config FUNCTION_ALIGNMENT_64B
+ bool
+
+config FUNCTION_ALIGNMENT
+ int
+ default 64 if FUNCTION_ALIGNMENT_64B
+ default 32 if FUNCTION_ALIGNMENT_32B
+ default 16 if FUNCTION_ALIGNMENT_16B
+ default 8 if FUNCTION_ALIGNMENT_8B
+ default 4 if FUNCTION_ALIGNMENT_4B
+ default 0
+
endmenu