summaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/Makefile.build3
-rw-r--r--scripts/Makefile.gcc-plugins16
-rw-r--r--scripts/Makefile.kstack_erase21
-rwxr-xr-xscripts/check-sysctl-docs184
-rw-r--r--scripts/const_structs.checkpatch1
-rwxr-xr-xscripts/crypto/gen-hash-testvecs.py147
-rw-r--r--scripts/gcc-plugins/stackleak_plugin.c52
-rw-r--r--scripts/gdb/linux/constants.py.in7
-rw-r--r--scripts/gdb/linux/interrupts.py16
-rw-r--r--scripts/gdb/linux/mapletree.py252
-rw-r--r--scripts/gdb/linux/symbols.py26
-rw-r--r--scripts/gdb/linux/vfs.py2
-rw-r--r--scripts/gdb/linux/xarray.py28
-rw-r--r--scripts/syscall.tbl2
14 files changed, 625 insertions, 132 deletions
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index a6461ea411f7..ba71b27aa363 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -312,10 +312,11 @@ $(obj)/%.lst: $(obj)/%.c FORCE
# - Stable since Rust 1.82.0: `feature(asm_const)`, `feature(raw_ref_op)`.
# - Stable since Rust 1.87.0: `feature(asm_goto)`.
# - Expected to become stable: `feature(arbitrary_self_types)`.
+# - To be determined: `feature(used_with_arg)`.
#
# Please see https://github.com/Rust-for-Linux/linux/issues/2 for details on
# the unstable features in use.
-rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,raw_ref_op
+rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,raw_ref_op,used_with_arg
# `--out-dir` is required to avoid temporaries being created by `rustc` in the
# current working directory, which may be not accessible in the out-of-tree
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
index 435ab3f0ec44..b0e1423b09c2 100644
--- a/scripts/Makefile.gcc-plugins
+++ b/scripts/Makefile.gcc-plugins
@@ -8,20 +8,6 @@ ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
endif
export DISABLE_LATENT_ENTROPY_PLUGIN
-gcc-plugin-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak_plugin.so
-gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \
- += -DSTACKLEAK_PLUGIN
-gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \
- += -fplugin-arg-stackleak_plugin-track-min-size=$(CONFIG_STACKLEAK_TRACK_MIN_SIZE)
-gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \
- += -fplugin-arg-stackleak_plugin-arch=$(SRCARCH)
-gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK_VERBOSE) \
- += -fplugin-arg-stackleak_plugin-verbose
-ifdef CONFIG_GCC_PLUGIN_STACKLEAK
- DISABLE_STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-disable
-endif
-export DISABLE_STACKLEAK_PLUGIN
-
# All the plugin CFLAGS are collected here in case a build target needs to
# filter them out of the KBUILD_CFLAGS.
GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y)) -DGCC_PLUGINS
@@ -34,6 +20,8 @@ KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
# be included in GCC_PLUGIN so they can get built.
gcc-plugin-external-$(CONFIG_GCC_PLUGIN_RANDSTRUCT) \
+= randomize_layout_plugin.so
+gcc-plugin-external-$(CONFIG_GCC_PLUGIN_STACKLEAK) \
+ += stackleak_plugin.so
# All enabled GCC plugins are collected here for building in
# scripts/gcc-scripts/Makefile.
diff --git a/scripts/Makefile.kstack_erase b/scripts/Makefile.kstack_erase
new file mode 100644
index 000000000000..ee7e4ef7b892
--- /dev/null
+++ b/scripts/Makefile.kstack_erase
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+kstack-erase-cflags-y += -fplugin=$(objtree)/scripts/gcc-plugins/stackleak_plugin.so
+kstack-erase-cflags-y += -fplugin-arg-stackleak_plugin-track-min-size=$(CONFIG_KSTACK_ERASE_TRACK_MIN_SIZE)
+kstack-erase-cflags-y += -fplugin-arg-stackleak_plugin-arch=$(SRCARCH)
+kstack-erase-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK_VERBOSE) += -fplugin-arg-stackleak_plugin-verbose
+DISABLE_KSTACK_ERASE := -fplugin-arg-stackleak_plugin-disable
+endif
+
+ifdef CONFIG_CC_IS_CLANG
+kstack-erase-cflags-y += -fsanitize-coverage=stack-depth
+kstack-erase-cflags-y += -fsanitize-coverage-stack-depth-callback-min=$(CONFIG_KSTACK_ERASE_TRACK_MIN_SIZE)
+DISABLE_KSTACK_ERASE := -fno-sanitize-coverage=stack-depth
+endif
+
+KSTACK_ERASE_CFLAGS := $(kstack-erase-cflags-y)
+
+export KSTACK_ERASE_CFLAGS DISABLE_KSTACK_ERASE
+
+KBUILD_CFLAGS += $(KSTACK_ERASE_CFLAGS)
diff --git a/scripts/check-sysctl-docs b/scripts/check-sysctl-docs
index 20274c63e745..910fd8a9a268 100755
--- a/scripts/check-sysctl-docs
+++ b/scripts/check-sysctl-docs
@@ -1,4 +1,4 @@
-#!/usr/bin/gawk -f
+#!/usr/bin/env -S gawk -f
# SPDX-License-Identifier: GPL-2.0
# Script to check sysctl documentation against source files
@@ -13,10 +13,22 @@
# Specify -vdebug=1 to see debugging information
BEGIN {
- if (!table) {
+ if (!table) {
print "Please specify the table to look for using the table variable" > "/dev/stderr"
exit 1
- }
+ }
+
+ # Documentation title skiplist
+ skiplist[0] = "^Documentation for"
+ skiplist[1] = "Network core options$"
+ skiplist[2] = "POSIX message queues filesystem$"
+ skiplist[3] = "Configuration options"
+ skiplist[4] = ". /proc/sys/fs"
+ skiplist[5] = "^Introduction$"
+ skiplist[6] = "^seccomp$"
+ skiplist[7] = "^pty$"
+ skiplist[8] = "^firmware_config$"
+ skiplist[9] = "^random$"
}
# The following globals are used:
@@ -31,124 +43,132 @@ BEGIN {
# Remove punctuation from the given value
function trimpunct(value) {
- while (value ~ /^["&]/) {
- value = substr(value, 2)
- }
- while (value ~ /[]["&,}]$/) {
- value = substr(value, 1, length(value) - 1)
- }
- return value
+ while (value ~ /^["&]/) {
+ value = substr(value, 2)
+ }
+ while (value ~ /[]["&,}]$/) {
+ value = substr(value, 1, length(value) - 1)
+ }
+ return value
}
# Print the information for the given entry
function printentry(entry) {
- seen[entry]++
- printf "* %s from %s", entry, file[entry]
- if (documented[entry]) {
- printf " (documented)"
- }
- print ""
+ seen[entry]++
+ printf "* %s from %s", entry, file[entry]
+ if (documented[entry]) {
+ printf " (documented)"
+ }
+ print ""
}
# Stage 1: build the list of documented entries
FNR == NR && /^=+$/ {
- if (prevline ~ /Documentation for/) {
- # This is the main title
- next
- }
-
- # The previous line is a section title, parse it
- $0 = prevline
- if (debug) print "Parsing " $0
- inbrackets = 0
- for (i = 1; i <= NF; i++) {
- if (length($i) == 0) {
- continue
- }
- if (!inbrackets && substr($i, 1, 1) == "(") {
- inbrackets = 1
- }
- if (!inbrackets) {
- token = trimpunct($i)
- if (length(token) > 0 && token != "and") {
- if (debug) print trimpunct($i)
- documented[trimpunct($i)]++
- }
+ for (i in skiplist) {
+ if (prevline ~ skiplist[i]) {
+ next
+ }
}
- if (inbrackets && substr($i, length($i), 1) == ")") {
- inbrackets = 0
+
+ # The previous line is a section title, parse it
+ $0 = prevline
+ if (debug) print "Parsing " $0
+ inbrackets = 0
+ for (i = 1; i <= NF; i++) {
+ if (length($i) == 0) {
+ continue
+ }
+ if (!inbrackets && substr($i, 1, 1) == "(") {
+ inbrackets = 1
+ }
+ if (!inbrackets) {
+ token = trimpunct($i)
+ if (length(token) > 0 && token != "and") {
+ if (debug) print trimpunct($i)
+ documented[trimpunct($i)]++
+ }
+ }
+ if (inbrackets && substr($i, length($i), 1) == ")") {
+ inbrackets = 0
+ }
}
- }
}
FNR == NR {
- prevline = $0
- next
+ prevline = $0
+ next
}
# Stage 2: process each file and find all sysctl tables
BEGINFILE {
- delete entries
- curtable = ""
- curentry = ""
- delete vars
- if (debug) print "Processing file " FILENAME
+ delete entries
+ curtable = ""
+ curentry = ""
+ delete vars
+ if (debug) print "Processing file " FILENAME
}
/^static( const)? struct ctl_table/ {
- match($0, /static( const)? struct ctl_table ([^][]+)/, tables)
- curtable = tables[2]
- if (debug) print "Processing table " curtable
+ match($0, /static( const)? struct ctl_table ([^][]+)/, tables)
+ curtable = tables[2]
+ if (debug) print "Processing table " curtable
}
/^};$/ {
- curtable = ""
- curentry = ""
- delete vars
+ curtable = ""
+ curentry = ""
+ delete vars
}
curtable && /\.procname[\t ]*=[\t ]*".+"/ {
- match($0, /.procname[\t ]*=[\t ]*"([^"]+)"/, names)
- curentry = names[1]
- if (debug) print "Adding entry " curentry " to table " curtable
- entries[curtable][curentry]++
- file[curentry] = FILENAME
+ match($0, /.procname[\t ]*=[\t ]*"([^"]+)"/, names)
+ curentry = names[1]
+ if (debug) print "Adding entry " curentry " to table " curtable
+ entries[curtable][curentry]++
+ file[curentry] = FILENAME
+}
+
+curtable && /UCOUNT_ENTRY.*/ {
+ match($0, /UCOUNT_ENTRY\("([^"]+)"\)/, names)
+ curentry = names[1]
+ if (debug) print "Adding entry " curentry " to table " curtable
+ entries[curtable][curentry]++
+ file[curentry] = FILENAME
}
/register_sysctl.*/ {
- match($0, /register_sysctl(|_init|_sz)\("([^"]+)" *, *([^,)]+)/, tables)
- if (debug) print "Registering table " tables[3] " at " tables[2]
- if (tables[2] == table) {
- for (entry in entries[tables[3]]) {
- printentry(entry)
- }
- }
+ match($0, /register_sysctl(|_init|_sz)\("([^"]+)" *, *([^,)]+)/, tables)
+ if (debug) print "Registering table " tables[3] " at " tables[2]
+ if (tables[2] == table) {
+ for (entry in entries[tables[3]]) {
+ printentry(entry)
+ }
+ }
}
/kmemdup.*/ {
- match($0, /([^ \t]+) *= *kmemdup\(([^,]+) *,/, names)
- if (debug) print "Found variable " names[1] " for table " names[2]
- if (names[2] in entries) {
- vars[names[1]] = names[2]
- }
+ match($0, /([^ \t]+) *= *kmemdup\(([^,]+) *,/, names)
+ if (debug) print "Found variable " names[1] " for table " names[2]
+ if (names[2] in entries) {
+ vars[names[1]] = names[2]
+ }
}
/__register_sysctl_table.*/ {
- match($0, /__register_sysctl_table\([^,]+, *"([^"]+)" *, *([^,]+)/, tables)
- if (debug) print "Registering variable table " tables[2] " at " tables[1]
- if (tables[1] == table && tables[2] in vars) {
- for (entry in entries[vars[tables[2]]]) {
- printentry(entry)
- }
- }
+ match($0, /__register_sysctl_table\([^,]+, *"([^"]+)" *, *([^,]+)/, tables)
+ if (debug) print "Registering variable table " tables[2] " at " tables[1]
+ if (tables[1] == table && tables[2] in vars) {
+ for (entry in entries[vars[tables[2]]]) {
+ printentry(entry)
+ }
+ }
}
END {
- for (entry in documented) {
- if (!seen[entry]) {
- print "No implementation for " entry
+ for (entry in documented) {
+ if (!seen[entry])
+ print "No implementation for " entry
}
- }
}
diff --git a/scripts/const_structs.checkpatch b/scripts/const_structs.checkpatch
index e8609a03c3d8..6eb94fddc338 100644
--- a/scripts/const_structs.checkpatch
+++ b/scripts/const_structs.checkpatch
@@ -1,6 +1,7 @@
acpi_dock_ops
address_space_operations
backlight_ops
+bin_attribute
block_device_operations
bus_type
clk_ops
diff --git a/scripts/crypto/gen-hash-testvecs.py b/scripts/crypto/gen-hash-testvecs.py
new file mode 100755
index 000000000000..4ac927d40cf5
--- /dev/null
+++ b/scripts/crypto/gen-hash-testvecs.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Script that generates test vectors for the given cryptographic hash function.
+#
+# Copyright 2025 Google LLC
+
+import hashlib
+import hmac
+import sys
+
+DATA_LENS = [0, 1, 2, 3, 16, 32, 48, 49, 63, 64, 65, 127, 128, 129, 256, 511,
+ 513, 1000, 3333, 4096, 4128, 4160, 4224, 16384]
+
+# Generate the given number of random bytes, using the length itself as the seed
+# for a simple linear congruential generator (LCG). The C test code uses the
+# same LCG with the same seeding strategy to reconstruct the data, ensuring
+# reproducibility without explicitly storing the data in the test vectors.
+def rand_bytes(length):
+ seed = length
+ out = []
+ for _ in range(length):
+ seed = (seed * 25214903917 + 11) % 2**48
+ out.append((seed >> 16) % 256)
+ return bytes(out)
+
+POLY1305_KEY_SIZE = 32
+
+# A straightforward, unoptimized implementation of Poly1305.
+# Reference: https://cr.yp.to/mac/poly1305-20050329.pdf
+class Poly1305:
+ def __init__(self, key):
+ assert len(key) == POLY1305_KEY_SIZE
+ self.h = 0
+ rclamp = 0x0ffffffc0ffffffc0ffffffc0fffffff
+ self.r = int.from_bytes(key[:16], byteorder='little') & rclamp
+ self.s = int.from_bytes(key[16:], byteorder='little')
+
+ # Note: this supports partial blocks only at the end.
+ def update(self, data):
+ for i in range(0, len(data), 16):
+ chunk = data[i:i+16]
+ c = int.from_bytes(chunk, byteorder='little') + 2**(8 * len(chunk))
+ self.h = ((self.h + c) * self.r) % (2**130 - 5)
+ return self
+
+ # Note: gen_additional_poly1305_testvecs() relies on this being
+ # nondestructive, i.e. not changing any field of self.
+ def digest(self):
+ m = (self.h + self.s) % 2**128
+ return m.to_bytes(16, byteorder='little')
+
+def hash_init(alg):
+ if alg == 'poly1305':
+ # Use a fixed random key here, to present Poly1305 as an unkeyed hash.
+ # This allows all the test cases for unkeyed hashes to work on Poly1305.
+ return Poly1305(rand_bytes(POLY1305_KEY_SIZE))
+ return hashlib.new(alg)
+
+def hash_update(ctx, data):
+ ctx.update(data)
+
+def hash_final(ctx):
+ return ctx.digest()
+
+def compute_hash(alg, data):
+ ctx = hash_init(alg)
+ hash_update(ctx, data)
+ return hash_final(ctx)
+
+def print_bytes(prefix, value, bytes_per_line):
+ for i in range(0, len(value), bytes_per_line):
+ line = prefix + ''.join(f'0x{b:02x}, ' for b in value[i:i+bytes_per_line])
+ print(f'{line.rstrip()}')
+
+def print_static_u8_array_definition(name, value):
+ print('')
+ print(f'static const u8 {name} = {{')
+ print_bytes('\t', value, 8)
+ print('};')
+
+def print_c_struct_u8_array_field(name, value):
+ print(f'\t\t.{name} = {{')
+ print_bytes('\t\t\t', value, 8)
+ print('\t\t},')
+
+def gen_unkeyed_testvecs(alg):
+ print('')
+ print('static const struct {')
+ print('\tsize_t data_len;')
+ print(f'\tu8 digest[{alg.upper()}_DIGEST_SIZE];')
+ print('} hash_testvecs[] = {')
+ for data_len in DATA_LENS:
+ data = rand_bytes(data_len)
+ print('\t{')
+ print(f'\t\t.data_len = {data_len},')
+ print_c_struct_u8_array_field('digest', compute_hash(alg, data))
+ print('\t},')
+ print('};')
+
+ data = rand_bytes(4096)
+ ctx = hash_init(alg)
+ for data_len in range(len(data) + 1):
+ hash_update(ctx, compute_hash(alg, data[:data_len]))
+ print_static_u8_array_definition(
+ f'hash_testvec_consolidated[{alg.upper()}_DIGEST_SIZE]',
+ hash_final(ctx))
+
+def gen_hmac_testvecs(alg):
+ ctx = hmac.new(rand_bytes(32), digestmod=alg)
+ data = rand_bytes(4096)
+ for data_len in range(len(data) + 1):
+ ctx.update(data[:data_len])
+ key_len = data_len % 293
+ key = rand_bytes(key_len)
+ mac = hmac.digest(key, data[:data_len], alg)
+ ctx.update(mac)
+ print_static_u8_array_definition(
+ f'hmac_testvec_consolidated[{alg.upper()}_DIGEST_SIZE]',
+ ctx.digest())
+
+def gen_additional_poly1305_testvecs():
+ key = b'\xff' * POLY1305_KEY_SIZE
+ data = b''
+ ctx = Poly1305(key)
+ for _ in range(32):
+ for j in range(0, 4097, 16):
+ ctx.update(b'\xff' * j)
+ data += ctx.digest()
+ print_static_u8_array_definition(
+ 'poly1305_allones_macofmacs[POLY1305_DIGEST_SIZE]',
+ Poly1305(key).update(data).digest())
+
+if len(sys.argv) != 2:
+ sys.stderr.write('Usage: gen-hash-testvecs.py ALGORITHM\n')
+ sys.stderr.write('ALGORITHM may be any supported by Python hashlib, or poly1305.\n')
+ sys.stderr.write('Example: gen-hash-testvecs.py sha512\n')
+ sys.exit(1)
+
+alg = sys.argv[1]
+print('/* SPDX-License-Identifier: GPL-2.0-or-later */')
+print(f'/* This file was generated by: {sys.argv[0]} {" ".join(sys.argv[1:])} */')
+gen_unkeyed_testvecs(alg)
+if alg == 'poly1305':
+ gen_additional_poly1305_testvecs()
+else:
+ gen_hmac_testvecs(alg)
diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c
index d20c47d21ad8..e486488c867d 100644
--- a/scripts/gcc-plugins/stackleak_plugin.c
+++ b/scripts/gcc-plugins/stackleak_plugin.c
@@ -9,7 +9,7 @@
* any of the gcc libraries
*
* This gcc plugin is needed for tracking the lowest border of the kernel stack.
- * It instruments the kernel code inserting stackleak_track_stack() calls:
+ * It instruments the kernel code inserting __sanitizer_cov_stack_depth() calls:
* - after alloca();
* - for the functions with a stack frame size greater than or equal
* to the "track-min-size" plugin parameter.
@@ -33,7 +33,7 @@ __visible int plugin_is_GPL_compatible;
static int track_frame_size = -1;
static bool build_for_x86 = false;
-static const char track_function[] = "stackleak_track_stack";
+static const char track_function[] = "__sanitizer_cov_stack_depth";
static bool disable = false;
static bool verbose = false;
@@ -58,7 +58,7 @@ static void add_stack_tracking_gcall(gimple_stmt_iterator *gsi, bool after)
cgraph_node_ptr node;
basic_block bb;
- /* Insert calling stackleak_track_stack() */
+ /* Insert calling __sanitizer_cov_stack_depth() */
stmt = gimple_build_call(track_function_decl, 0);
gimple_call = as_a_gcall(stmt);
if (after)
@@ -120,12 +120,12 @@ static void add_stack_tracking_gasm(gimple_stmt_iterator *gsi, bool after)
gcc_assert(build_for_x86);
/*
- * Insert calling stackleak_track_stack() in asm:
- * asm volatile("call stackleak_track_stack"
+ * Insert calling __sanitizer_cov_stack_depth() in asm:
+ * asm volatile("call __sanitizer_cov_stack_depth"
* :: "r" (current_stack_pointer))
* Use ASM_CALL_CONSTRAINT trick from arch/x86/include/asm/asm.h.
* This constraint is taken into account during gcc shrink-wrapping
- * optimization. It is needed to be sure that stackleak_track_stack()
+ * optimization. It is needed to be sure that __sanitizer_cov_stack_depth()
* call is inserted after the prologue of the containing function,
* when the stack frame is prepared.
*/
@@ -137,7 +137,7 @@ static void add_stack_tracking_gasm(gimple_stmt_iterator *gsi, bool after)
input = build_tree_list(NULL_TREE, build_const_char_string(2, "r"));
input = chainon(NULL_TREE, build_tree_list(input, sp_decl));
vec_safe_push(inputs, input);
- asm_call = gimple_build_asm_vec("call stackleak_track_stack",
+ asm_call = gimple_build_asm_vec("call __sanitizer_cov_stack_depth",
inputs, NULL, NULL, NULL);
gimple_asm_set_volatile(asm_call, true);
if (after)
@@ -151,11 +151,11 @@ static void add_stack_tracking(gimple_stmt_iterator *gsi, bool after)
{
/*
* The 'no_caller_saved_registers' attribute is used for
- * stackleak_track_stack(). If the compiler supports this attribute for
- * the target arch, we can add calling stackleak_track_stack() in asm.
+ * __sanitizer_cov_stack_depth(). If the compiler supports this attribute for
+ * the target arch, we can add calling __sanitizer_cov_stack_depth() in asm.
* That improves performance: we avoid useless operations with the
* caller-saved registers in the functions from which we will remove
- * stackleak_track_stack() call during the stackleak_cleanup pass.
+ * __sanitizer_cov_stack_depth() call during the stackleak_cleanup pass.
*/
if (lookup_attribute_spec(get_identifier("no_caller_saved_registers")))
add_stack_tracking_gasm(gsi, after);
@@ -165,7 +165,7 @@ static void add_stack_tracking(gimple_stmt_iterator *gsi, bool after)
/*
* Work with the GIMPLE representation of the code. Insert the
- * stackleak_track_stack() call after alloca() and into the beginning
+ * __sanitizer_cov_stack_depth() call after alloca() and into the beginning
* of the function if it is not instrumented.
*/
static unsigned int stackleak_instrument_execute(void)
@@ -205,7 +205,7 @@ static unsigned int stackleak_instrument_execute(void)
DECL_NAME_POINTER(current_function_decl));
}
- /* Insert stackleak_track_stack() call after alloca() */
+ /* Insert __sanitizer_cov_stack_depth() call after alloca() */
add_stack_tracking(&gsi, true);
if (bb == entry_bb)
prologue_instrumented = true;
@@ -241,7 +241,7 @@ static unsigned int stackleak_instrument_execute(void)
return 0;
}
- /* Insert stackleak_track_stack() call at the function beginning */
+ /* Insert __sanitizer_cov_stack_depth() call at the function beginning */
bb = entry_bb;
if (!single_pred_p(bb)) {
/* gcc_assert(bb_loop_depth(bb) ||
@@ -270,15 +270,15 @@ static void remove_stack_tracking_gcall(void)
rtx_insn *insn, *next;
/*
- * Find stackleak_track_stack() calls. Loop through the chain of insns,
+ * Find __sanitizer_cov_stack_depth() calls. Loop through the chain of insns,
* which is an RTL representation of the code for a function.
*
* The example of a matching insn:
- * (call_insn 8 4 10 2 (call (mem (symbol_ref ("stackleak_track_stack")
- * [flags 0x41] <function_decl 0x7f7cd3302a80 stackleak_track_stack>)
- * [0 stackleak_track_stack S1 A8]) (0)) 675 {*call} (expr_list
- * (symbol_ref ("stackleak_track_stack") [flags 0x41] <function_decl
- * 0x7f7cd3302a80 stackleak_track_stack>) (expr_list (0) (nil))) (nil))
+ * (call_insn 8 4 10 2 (call (mem (symbol_ref ("__sanitizer_cov_stack_depth")
+ * [flags 0x41] <function_decl 0x7f7cd3302a80 __sanitizer_cov_stack_depth>)
+ * [0 __sanitizer_cov_stack_depth S1 A8]) (0)) 675 {*call} (expr_list
+ * (symbol_ref ("__sanitizer_cov_stack_depth") [flags 0x41] <function_decl
+ * 0x7f7cd3302a80 __sanitizer_cov_stack_depth>) (expr_list (0) (nil))) (nil))
*/
for (insn = get_insns(); insn; insn = next) {
rtx body;
@@ -318,7 +318,7 @@ static void remove_stack_tracking_gcall(void)
if (SYMBOL_REF_DECL(body) != track_function_decl)
continue;
- /* Delete the stackleak_track_stack() call */
+ /* Delete the __sanitizer_cov_stack_depth() call */
delete_insn_and_edges(insn);
#if BUILDING_GCC_VERSION < 8000
if (GET_CODE(next) == NOTE &&
@@ -340,12 +340,12 @@ static bool remove_stack_tracking_gasm(void)
gcc_assert(build_for_x86);
/*
- * Find stackleak_track_stack() asm calls. Loop through the chain of
+ * Find __sanitizer_cov_stack_depth() asm calls. Loop through the chain of
* insns, which is an RTL representation of the code for a function.
*
* The example of a matching insn:
* (insn 11 5 12 2 (parallel [ (asm_operands/v
- * ("call stackleak_track_stack") ("") 0
+ * ("call __sanitizer_cov_stack_depth") ("") 0
* [ (reg/v:DI 7 sp [ current_stack_pointer ]) ]
* [ (asm_input:DI ("r")) ] [])
* (clobber (reg:CC 17 flags)) ]) -1 (nil))
@@ -375,7 +375,7 @@ static bool remove_stack_tracking_gasm(void)
continue;
if (strcmp(ASM_OPERANDS_TEMPLATE(body),
- "call stackleak_track_stack")) {
+ "call __sanitizer_cov_stack_depth")) {
continue;
}
@@ -389,7 +389,7 @@ static bool remove_stack_tracking_gasm(void)
/*
* Work with the RTL representation of the code.
- * Remove the unneeded stackleak_track_stack() calls from the functions
+ * Remove the unneeded __sanitizer_cov_stack_depth() calls from the functions
* which don't call alloca() and don't have a large enough stack frame size.
*/
static unsigned int stackleak_cleanup_execute(void)
@@ -474,13 +474,13 @@ static bool stackleak_gate(void)
return track_frame_size >= 0;
}
-/* Build the function declaration for stackleak_track_stack() */
+/* Build the function declaration for __sanitizer_cov_stack_depth() */
static void stackleak_start_unit(void *gcc_data __unused,
void *user_data __unused)
{
tree fntype;
- /* void stackleak_track_stack(void) */
+ /* void __sanitizer_cov_stack_depth(void) */
fntype = build_function_type_list(void_type_node, NULL_TREE);
track_function_decl = build_fn_decl(track_function, fntype);
DECL_ASSEMBLER_NAME(track_function_decl); /* for LTO */
diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
index fd6bd69c5096..f795302ddfa8 100644
--- a/scripts/gdb/linux/constants.py.in
+++ b/scripts/gdb/linux/constants.py.in
@@ -20,6 +20,7 @@
#include <linux/of_fdt.h>
#include <linux/page_ext.h>
#include <linux/radix-tree.h>
+#include <linux/maple_tree.h>
#include <linux/slab.h>
#include <linux/threads.h>
#include <linux/vmalloc.h>
@@ -93,6 +94,12 @@ LX_GDBPARSED(RADIX_TREE_MAP_SIZE)
LX_GDBPARSED(RADIX_TREE_MAP_SHIFT)
LX_GDBPARSED(RADIX_TREE_MAP_MASK)
+/* linux/maple_tree.h */
+LX_VALUE(MAPLE_NODE_SLOTS)
+LX_VALUE(MAPLE_RANGE64_SLOTS)
+LX_VALUE(MAPLE_ARANGE64_SLOTS)
+LX_GDBPARSED(MAPLE_NODE_MASK)
+
/* linux/vmalloc.h */
LX_VALUE(VM_IOREMAP)
LX_VALUE(VM_ALLOC)
diff --git a/scripts/gdb/linux/interrupts.py b/scripts/gdb/linux/interrupts.py
index 616a5f26377a..f4f715a8f0e3 100644
--- a/scripts/gdb/linux/interrupts.py
+++ b/scripts/gdb/linux/interrupts.py
@@ -7,7 +7,7 @@ import gdb
from linux import constants
from linux import cpus
from linux import utils
-from linux import radixtree
+from linux import mapletree
irq_desc_type = utils.CachedType("struct irq_desc")
@@ -23,12 +23,12 @@ def irqd_is_level(desc):
def show_irq_desc(prec, irq):
text = ""
- desc = radixtree.lookup(gdb.parse_and_eval("&irq_desc_tree"), irq)
+ desc = mapletree.mtree_load(gdb.parse_and_eval("&sparse_irqs"), irq)
if desc is None:
return text
- desc = desc.cast(irq_desc_type.get_type())
- if desc is None:
+ desc = desc.cast(irq_desc_type.get_type().pointer())
+ if desc == 0:
return text
if irq_settings_is_hidden(desc):
@@ -110,7 +110,7 @@ def x86_show_mce(prec, var, pfx, desc):
pvar = gdb.parse_and_eval(var)
text = "%*s: " % (prec, pfx)
for cpu in cpus.each_online_cpu():
- text += "%10u " % (cpus.per_cpu(pvar, cpu))
+ text += "%10u " % (cpus.per_cpu(pvar, cpu).dereference())
text += " %s\n" % (desc)
return text
@@ -142,7 +142,7 @@ def x86_show_interupts(prec):
if constants.LX_CONFIG_X86_MCE:
text += x86_show_mce(prec, "&mce_exception_count", "MCE", "Machine check exceptions")
- text == x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls")
+ text += x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls")
text += show_irq_err_count(prec)
@@ -221,8 +221,8 @@ class LxInterruptList(gdb.Command):
gdb.write("CPU%-8d" % cpu)
gdb.write("\n")
- if utils.gdb_eval_or_none("&irq_desc_tree") is None:
- return
+ if utils.gdb_eval_or_none("&sparse_irqs") is None:
+ raise gdb.GdbError("Unable to find the sparse IRQ tree, is CONFIG_SPARSE_IRQ enabled?")
for irq in range(nr_irqs):
gdb.write(show_irq_desc(prec, irq))
diff --git a/scripts/gdb/linux/mapletree.py b/scripts/gdb/linux/mapletree.py
new file mode 100644
index 000000000000..d52d51c0a03f
--- /dev/null
+++ b/scripts/gdb/linux/mapletree.py
@@ -0,0 +1,252 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Maple tree helpers
+#
+# Copyright (c) 2025 Broadcom
+#
+# Authors:
+# Florian Fainelli <florian.fainelli@broadcom.com>
+
+import gdb
+
+from linux import utils
+from linux import constants
+from linux import xarray
+
+maple_tree_root_type = utils.CachedType("struct maple_tree")
+maple_node_type = utils.CachedType("struct maple_node")
+maple_enode_type = utils.CachedType("void")
+
+maple_dense = 0
+maple_leaf_64 = 1
+maple_range_64 = 2
+maple_arange_64 = 3
+
+class Mas(object):
+ ma_active = 0
+ ma_start = 1
+ ma_root = 2
+ ma_none = 3
+ ma_pause = 4
+ ma_overflow = 5
+ ma_underflow = 6
+ ma_error = 7
+
+ def __init__(self, mt, first, end):
+ if mt.type == maple_tree_root_type.get_type().pointer():
+ self.tree = mt.dereference()
+ elif mt.type != maple_tree_root_type.get_type():
+ raise gdb.GdbError("must be {} not {}"
+ .format(maple_tree_root_type.get_type().pointer(), mt.type))
+ self.tree = mt
+ self.index = first
+ self.last = end
+ self.node = None
+ self.status = self.ma_start
+ self.min = 0
+ self.max = -1
+
+ def is_start(self):
+ # mas_is_start()
+ return self.status == self.ma_start
+
+ def is_ptr(self):
+ # mas_is_ptr()
+ return self.status == self.ma_root
+
+ def is_none(self):
+ # mas_is_none()
+ return self.status == self.ma_none
+
+ def root(self):
+ # mas_root()
+ return self.tree['ma_root'].cast(maple_enode_type.get_type().pointer())
+
+ def start(self):
+ # mas_start()
+ if self.is_start() is False:
+ return None
+
+ self.min = 0
+ self.max = ~0
+
+ while True:
+ self.depth = 0
+ root = self.root()
+ if xarray.xa_is_node(root):
+ self.depth = 0
+ self.status = self.ma_active
+ self.node = mte_safe_root(root)
+ self.offset = 0
+ if mte_dead_node(self.node) is True:
+ continue
+
+ return None
+
+ self.node = None
+ # Empty tree
+ if root is None:
+ self.status = self.ma_none
+ self.offset = constants.LX_MAPLE_NODE_SLOTS
+ return None
+
+ # Single entry tree
+ self.status = self.ma_root
+ self.offset = constants.LX_MAPLE_NODE_SLOTS
+
+ if self.index != 0:
+ return None
+
+ return root
+
+ return None
+
+ def reset(self):
+ # mas_reset()
+ self.status = self.ma_start
+ self.node = None
+
+def mte_safe_root(node):
+ if node.type != maple_enode_type.get_type().pointer():
+ raise gdb.GdbError("{} must be {} not {}"
+ .format(mte_safe_root.__name__, maple_enode_type.get_type().pointer(), node.type))
+ ulong_type = utils.get_ulong_type()
+ indirect_ptr = node.cast(ulong_type) & ~0x2
+ val = indirect_ptr.cast(maple_enode_type.get_type().pointer())
+ return val
+
+def mte_node_type(entry):
+ ulong_type = utils.get_ulong_type()
+ val = None
+ if entry.type == maple_enode_type.get_type().pointer():
+ val = entry.cast(ulong_type)
+ elif entry.type == ulong_type:
+ val = entry
+ else:
+ raise gdb.GdbError("{} must be {} not {}"
+ .format(mte_node_type.__name__, maple_enode_type.get_type().pointer(), entry.type))
+ return (val >> 0x3) & 0xf
+
+def ma_dead_node(node):
+ if node.type != maple_node_type.get_type().pointer():
+ raise gdb.GdbError("{} must be {} not {}"
+ .format(ma_dead_node.__name__, maple_node_type.get_type().pointer(), node.type))
+ ulong_type = utils.get_ulong_type()
+ parent = node['parent']
+ indirect_ptr = node['parent'].cast(ulong_type) & ~constants.LX_MAPLE_NODE_MASK
+ return indirect_ptr == node
+
+def mte_to_node(enode):
+ ulong_type = utils.get_ulong_type()
+ if enode.type == maple_enode_type.get_type().pointer():
+ indirect_ptr = enode.cast(ulong_type)
+ elif enode.type == ulong_type:
+ indirect_ptr = enode
+ else:
+ raise gdb.GdbError("{} must be {} not {}"
+ .format(mte_to_node.__name__, maple_enode_type.get_type().pointer(), enode.type))
+ indirect_ptr = indirect_ptr & ~constants.LX_MAPLE_NODE_MASK
+ return indirect_ptr.cast(maple_node_type.get_type().pointer())
+
+def mte_dead_node(enode):
+ if enode.type != maple_enode_type.get_type().pointer():
+ raise gdb.GdbError("{} must be {} not {}"
+ .format(mte_dead_node.__name__, maple_enode_type.get_type().pointer(), enode.type))
+ node = mte_to_node(enode)
+ return ma_dead_node(node)
+
+def ma_is_leaf(tp):
+ result = tp < maple_range_64
+ return tp < maple_range_64
+
+def mt_pivots(t):
+ if t == maple_dense:
+ return 0
+ elif t == maple_leaf_64 or t == maple_range_64:
+ return constants.LX_MAPLE_RANGE64_SLOTS - 1
+ elif t == maple_arange_64:
+ return constants.LX_MAPLE_ARANGE64_SLOTS - 1
+
+def ma_pivots(node, t):
+ if node.type != maple_node_type.get_type().pointer():
+ raise gdb.GdbError("{}: must be {} not {}"
+ .format(ma_pivots.__name__, maple_node_type.get_type().pointer(), node.type))
+ if t == maple_arange_64:
+ return node['ma64']['pivot']
+ elif t == maple_leaf_64 or t == maple_range_64:
+ return node['mr64']['pivot']
+ else:
+ return None
+
+def ma_slots(node, tp):
+ if node.type != maple_node_type.get_type().pointer():
+ raise gdb.GdbError("{}: must be {} not {}"
+ .format(ma_slots.__name__, maple_node_type.get_type().pointer(), node.type))
+ if tp == maple_arange_64:
+ return node['ma64']['slot']
+ elif tp == maple_range_64 or tp == maple_leaf_64:
+ return node['mr64']['slot']
+ elif tp == maple_dense:
+ return node['slot']
+ else:
+ return None
+
+def mt_slot(mt, slots, offset):
+ ulong_type = utils.get_ulong_type()
+ return slots[offset].cast(ulong_type)
+
+def mtree_lookup_walk(mas):
+ ulong_type = utils.get_ulong_type()
+ n = mas.node
+
+ while True:
+ node = mte_to_node(n)
+ tp = mte_node_type(n)
+ pivots = ma_pivots(node, tp)
+ end = mt_pivots(tp)
+ offset = 0
+ while True:
+ if pivots[offset] >= mas.index:
+ break
+ if offset >= end:
+ break
+ offset += 1
+
+ slots = ma_slots(node, tp)
+ n = mt_slot(mas.tree, slots, offset)
+ if ma_dead_node(node) is True:
+ mas.reset()
+ return None
+ break
+
+ if ma_is_leaf(tp) is True:
+ break
+
+ return n
+
+def mtree_load(mt, index):
+ ulong_type = utils.get_ulong_type()
+ # MT_STATE(...)
+ mas = Mas(mt, index, index)
+ entry = None
+
+ while True:
+ entry = mas.start()
+ if mas.is_none():
+ return None
+
+ if mas.is_ptr():
+ if index != 0:
+ entry = None
+ return entry
+
+ entry = mtree_lookup_walk(mas)
+ if entry is None and mas.is_start():
+ continue
+ else:
+ break
+
+ if xarray.xa_is_zero(entry):
+ return None
+
+ return entry
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
index 2332bd8eddf1..6edb99221675 100644
--- a/scripts/gdb/linux/symbols.py
+++ b/scripts/gdb/linux/symbols.py
@@ -84,6 +84,30 @@ def get_kerneloffset():
return None
+def is_in_s390_decompressor():
+ # DAT is always off in decompressor. Use this as an indicator.
+ # Note that in the kernel, DAT can be off during kexec() or restart.
+ # Accept this imprecision in order to avoid complicating things.
+ # It is unlikely that someone will run lx-symbols at these points.
+ pswm = int(gdb.parse_and_eval("$pswm"))
+ return (pswm & 0x0400000000000000) == 0
+
+
+def skip_decompressor():
+ if utils.is_target_arch("s390"):
+ if is_in_s390_decompressor():
+ # The address of the jump_to_kernel function is statically placed
+ # into svc_old_psw.addr (see ipl_data.c); read it from there. DAT
+ # is off, so we do not need to care about lowcore relocation.
+ svc_old_pswa = 0x148
+ jump_to_kernel = int(gdb.parse_and_eval("*(unsigned long long *)" +
+ hex(svc_old_pswa)))
+ gdb.execute("tbreak *" + hex(jump_to_kernel))
+ gdb.execute("continue")
+ while is_in_s390_decompressor():
+ gdb.execute("stepi")
+
+
class LxSymbols(gdb.Command):
"""(Re-)load symbols of Linux kernel and currently loaded modules.
@@ -204,6 +228,8 @@ lx-symbols command."""
saved_state['breakpoint'].enabled = saved_state['enabled']
def invoke(self, arg, from_tty):
+ skip_decompressor()
+
self.module_paths = [os.path.abspath(os.path.expanduser(p))
for p in arg.split()]
self.module_paths.append(os.getcwd())
diff --git a/scripts/gdb/linux/vfs.py b/scripts/gdb/linux/vfs.py
index c77b9ce75f6d..9e921b645a68 100644
--- a/scripts/gdb/linux/vfs.py
+++ b/scripts/gdb/linux/vfs.py
@@ -22,7 +22,7 @@ def dentry_name(d):
if parent == d or parent == 0:
return ""
p = dentry_name(d['d_parent']) + "/"
- return p + d['d_iname'].string()
+ return p + d['d_name']['name'].string()
class DentryName(gdb.Function):
"""Return string of the full path of a dentry.
diff --git a/scripts/gdb/linux/xarray.py b/scripts/gdb/linux/xarray.py
new file mode 100644
index 000000000000..f4477b5def75
--- /dev/null
+++ b/scripts/gdb/linux/xarray.py
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Xarray helpers
+#
+# Copyright (c) 2025 Broadcom
+#
+# Authors:
+# Florian Fainelli <florian.fainelli@broadcom.com>
+
+import gdb
+
+from linux import utils
+from linux import constants
+
+def xa_is_internal(entry):
+ ulong_type = utils.get_ulong_type()
+ return ((entry.cast(ulong_type) & 3) == 2)
+
+def xa_mk_internal(v):
+ return ((v << 2) | 2)
+
+def xa_is_zero(entry):
+ ulong_type = utils.get_ulong_type()
+ return entry.cast(ulong_type) == xa_mk_internal(257)
+
+def xa_is_node(entry):
+ ulong_type = utils.get_ulong_type()
+ return xa_is_internal(entry) and (entry.cast(ulong_type) > 4096)
diff --git a/scripts/syscall.tbl b/scripts/syscall.tbl
index 580b4e246aec..d1ae5e92c615 100644
--- a/scripts/syscall.tbl
+++ b/scripts/syscall.tbl
@@ -408,3 +408,5 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr