diff options
Diffstat (limited to 'scripts')
28 files changed, 1635 insertions, 700 deletions
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index a6461ea411f7..ba71b27aa363 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -312,10 +312,11 @@ $(obj)/%.lst: $(obj)/%.c FORCE # - Stable since Rust 1.82.0: `feature(asm_const)`, `feature(raw_ref_op)`. # - Stable since Rust 1.87.0: `feature(asm_goto)`. # - Expected to become stable: `feature(arbitrary_self_types)`. +# - To be determined: `feature(used_with_arg)`. # # Please see https://github.com/Rust-for-Linux/linux/issues/2 for details on # the unstable features in use. -rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,raw_ref_op +rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,raw_ref_op,used_with_arg # `--out-dir` is required to avoid temporaries being created by `rustc` in the # current working directory, which may be not accessible in the out-of-tree diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins index 435ab3f0ec44..b0e1423b09c2 100644 --- a/scripts/Makefile.gcc-plugins +++ b/scripts/Makefile.gcc-plugins @@ -8,20 +8,6 @@ ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY endif export DISABLE_LATENT_ENTROPY_PLUGIN -gcc-plugin-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak_plugin.so -gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \ - += -DSTACKLEAK_PLUGIN -gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \ - += -fplugin-arg-stackleak_plugin-track-min-size=$(CONFIG_STACKLEAK_TRACK_MIN_SIZE) -gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \ - += -fplugin-arg-stackleak_plugin-arch=$(SRCARCH) -gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK_VERBOSE) \ - += -fplugin-arg-stackleak_plugin-verbose -ifdef CONFIG_GCC_PLUGIN_STACKLEAK - DISABLE_STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-disable -endif -export DISABLE_STACKLEAK_PLUGIN - # All the plugin CFLAGS are collected here in case a build target needs to # filter them out of the KBUILD_CFLAGS. GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y)) -DGCC_PLUGINS @@ -34,6 +20,8 @@ KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) # be included in GCC_PLUGIN so they can get built. gcc-plugin-external-$(CONFIG_GCC_PLUGIN_RANDSTRUCT) \ += randomize_layout_plugin.so +gcc-plugin-external-$(CONFIG_GCC_PLUGIN_STACKLEAK) \ + += stackleak_plugin.so # All enabled GCC plugins are collected here for building in # scripts/gcc-scripts/Makefile. diff --git a/scripts/Makefile.kstack_erase b/scripts/Makefile.kstack_erase new file mode 100644 index 000000000000..ee7e4ef7b892 --- /dev/null +++ b/scripts/Makefile.kstack_erase @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0 + +ifdef CONFIG_GCC_PLUGIN_STACKLEAK +kstack-erase-cflags-y += -fplugin=$(objtree)/scripts/gcc-plugins/stackleak_plugin.so +kstack-erase-cflags-y += -fplugin-arg-stackleak_plugin-track-min-size=$(CONFIG_KSTACK_ERASE_TRACK_MIN_SIZE) +kstack-erase-cflags-y += -fplugin-arg-stackleak_plugin-arch=$(SRCARCH) +kstack-erase-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK_VERBOSE) += -fplugin-arg-stackleak_plugin-verbose +DISABLE_KSTACK_ERASE := -fplugin-arg-stackleak_plugin-disable +endif + +ifdef CONFIG_CC_IS_CLANG +kstack-erase-cflags-y += -fsanitize-coverage=stack-depth +kstack-erase-cflags-y += -fsanitize-coverage-stack-depth-callback-min=$(CONFIG_KSTACK_ERASE_TRACK_MIN_SIZE) +DISABLE_KSTACK_ERASE := -fno-sanitize-coverage=stack-depth +endif + +KSTACK_ERASE_CFLAGS := $(kstack-erase-cflags-y) + +export KSTACK_ERASE_CFLAGS DISABLE_KSTACK_ERASE + +KBUILD_CFLAGS += $(KSTACK_ERASE_CFLAGS) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 664f7b7a622c..22a6de59b77b 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -857,8 +857,6 @@ our %deprecated_apis = ( "kunmap" => "kunmap_local", "kmap_atomic" => "kmap_local_page", "kunmap_atomic" => "kunmap_local", - "srcu_read_lock_lite" => "srcu_read_lock_fast", - "srcu_read_unlock_lite" => "srcu_read_unlock_fast", ); #Create a search pattern for all these strings to speed up a loop below @@ -3741,6 +3739,18 @@ sub process { } } +# Check for RGMII phy-mode with delay on PCB + if ($realfile =~ /\.(dts|dtsi|dtso)$/ && + $line =~ /^\+\s*(phy-mode|phy-connection-type)\s*=\s*"/ && + !ctx_has_comment($first_line, $linenr)) { + my $prop = $1; + my $mode = get_quoted_string($line, $rawline); + if ($mode =~ /^"rgmii(?:|-rxid|-txid)"$/) { + WARN("UNCOMMENTED_RGMII_MODE", + "$prop $mode without comment -- delays on the PCB should be described, otherwise use \"rgmii-id\"\n" . $herecurr); + } + } + # check for using SPDX license tag at beginning of files if ($realline == $checklicenseline) { if ($rawline =~ /^[ \+]\s*\#\!\s*\//) { diff --git a/scripts/checktransupdate.py b/scripts/checktransupdate.py index 578c3fecfdfd..e39529e46c3d 100755 --- a/scripts/checktransupdate.py +++ b/scripts/checktransupdate.py @@ -24,6 +24,7 @@ commit 42fb9cfd5b18 ("Documentation: dev-tools: Add link to RV docs") """ import os +import re import time import logging from argparse import ArgumentParser, ArgumentTypeError, BooleanOptionalAction @@ -69,6 +70,38 @@ def get_origin_from_trans(origin_path, t_from_head): return o_from_t +def get_origin_from_trans_smartly(origin_path, t_from_head): + """Get the latest origin commit from the formatted translation commit: + (1) update to commit HASH (TITLE) + (2) Update the translation through commit HASH (TITLE) + """ + # catch flag for 12-bit commit hash + HASH = r'([0-9a-f]{12})' + # pattern 1: contains "update to commit HASH" + pat_update_to = re.compile(rf'update to commit {HASH}') + # pattern 2: contains "Update the translation through commit HASH" + pat_update_translation = re.compile(rf'Update the translation through commit {HASH}') + + origin_commit_hash = None + for line in t_from_head["message"]: + # check if the line matches the first pattern + match = pat_update_to.search(line) + if match: + origin_commit_hash = match.group(1) + break + # check if the line matches the second pattern + match = pat_update_translation.search(line) + if match: + origin_commit_hash = match.group(1) + break + if origin_commit_hash is None: + return None + o_from_t = get_latest_commit_from(origin_path, origin_commit_hash) + if o_from_t is not None: + logging.debug("tracked origin commit id: %s", o_from_t["hash"]) + return o_from_t + + def get_commits_count_between(opath, commit1, commit2): """Get the commits count between two commits for the specified file""" command = f"git log --pretty=format:%H {commit1}...{commit2} -- {opath}" @@ -108,7 +141,10 @@ def check_per_file(file_path): logging.error("Cannot find the latest commit for %s", file_path) return - o_from_t = get_origin_from_trans(opath, t_from_head) + o_from_t = get_origin_from_trans_smartly(opath, t_from_head) + # notice, o_from_t from get_*_smartly() is always more accurate than from get_*() + if o_from_t is None: + o_from_t = get_origin_from_trans(opath, t_from_head) if o_from_t is None: logging.error("Error: Cannot find the latest origin commit for %s", file_path) diff --git a/scripts/const_structs.checkpatch b/scripts/const_structs.checkpatch index e8609a03c3d8..6eb94fddc338 100644 --- a/scripts/const_structs.checkpatch +++ b/scripts/const_structs.checkpatch @@ -1,6 +1,7 @@ acpi_dock_ops address_space_operations backlight_ops +bin_attribute block_device_operations bus_type clk_ops diff --git a/scripts/crypto/gen-hash-testvecs.py b/scripts/crypto/gen-hash-testvecs.py new file mode 100755 index 000000000000..4ac927d40cf5 --- /dev/null +++ b/scripts/crypto/gen-hash-testvecs.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Script that generates test vectors for the given cryptographic hash function. +# +# Copyright 2025 Google LLC + +import hashlib +import hmac +import sys + +DATA_LENS = [0, 1, 2, 3, 16, 32, 48, 49, 63, 64, 65, 127, 128, 129, 256, 511, + 513, 1000, 3333, 4096, 4128, 4160, 4224, 16384] + +# Generate the given number of random bytes, using the length itself as the seed +# for a simple linear congruential generator (LCG). The C test code uses the +# same LCG with the same seeding strategy to reconstruct the data, ensuring +# reproducibility without explicitly storing the data in the test vectors. +def rand_bytes(length): + seed = length + out = [] + for _ in range(length): + seed = (seed * 25214903917 + 11) % 2**48 + out.append((seed >> 16) % 256) + return bytes(out) + +POLY1305_KEY_SIZE = 32 + +# A straightforward, unoptimized implementation of Poly1305. +# Reference: https://cr.yp.to/mac/poly1305-20050329.pdf +class Poly1305: + def __init__(self, key): + assert len(key) == POLY1305_KEY_SIZE + self.h = 0 + rclamp = 0x0ffffffc0ffffffc0ffffffc0fffffff + self.r = int.from_bytes(key[:16], byteorder='little') & rclamp + self.s = int.from_bytes(key[16:], byteorder='little') + + # Note: this supports partial blocks only at the end. + def update(self, data): + for i in range(0, len(data), 16): + chunk = data[i:i+16] + c = int.from_bytes(chunk, byteorder='little') + 2**(8 * len(chunk)) + self.h = ((self.h + c) * self.r) % (2**130 - 5) + return self + + # Note: gen_additional_poly1305_testvecs() relies on this being + # nondestructive, i.e. not changing any field of self. + def digest(self): + m = (self.h + self.s) % 2**128 + return m.to_bytes(16, byteorder='little') + +def hash_init(alg): + if alg == 'poly1305': + # Use a fixed random key here, to present Poly1305 as an unkeyed hash. + # This allows all the test cases for unkeyed hashes to work on Poly1305. + return Poly1305(rand_bytes(POLY1305_KEY_SIZE)) + return hashlib.new(alg) + +def hash_update(ctx, data): + ctx.update(data) + +def hash_final(ctx): + return ctx.digest() + +def compute_hash(alg, data): + ctx = hash_init(alg) + hash_update(ctx, data) + return hash_final(ctx) + +def print_bytes(prefix, value, bytes_per_line): + for i in range(0, len(value), bytes_per_line): + line = prefix + ''.join(f'0x{b:02x}, ' for b in value[i:i+bytes_per_line]) + print(f'{line.rstrip()}') + +def print_static_u8_array_definition(name, value): + print('') + print(f'static const u8 {name} = {{') + print_bytes('\t', value, 8) + print('};') + +def print_c_struct_u8_array_field(name, value): + print(f'\t\t.{name} = {{') + print_bytes('\t\t\t', value, 8) + print('\t\t},') + +def gen_unkeyed_testvecs(alg): + print('') + print('static const struct {') + print('\tsize_t data_len;') + print(f'\tu8 digest[{alg.upper()}_DIGEST_SIZE];') + print('} hash_testvecs[] = {') + for data_len in DATA_LENS: + data = rand_bytes(data_len) + print('\t{') + print(f'\t\t.data_len = {data_len},') + print_c_struct_u8_array_field('digest', compute_hash(alg, data)) + print('\t},') + print('};') + + data = rand_bytes(4096) + ctx = hash_init(alg) + for data_len in range(len(data) + 1): + hash_update(ctx, compute_hash(alg, data[:data_len])) + print_static_u8_array_definition( + f'hash_testvec_consolidated[{alg.upper()}_DIGEST_SIZE]', + hash_final(ctx)) + +def gen_hmac_testvecs(alg): + ctx = hmac.new(rand_bytes(32), digestmod=alg) + data = rand_bytes(4096) + for data_len in range(len(data) + 1): + ctx.update(data[:data_len]) + key_len = data_len % 293 + key = rand_bytes(key_len) + mac = hmac.digest(key, data[:data_len], alg) + ctx.update(mac) + print_static_u8_array_definition( + f'hmac_testvec_consolidated[{alg.upper()}_DIGEST_SIZE]', + ctx.digest()) + +def gen_additional_poly1305_testvecs(): + key = b'\xff' * POLY1305_KEY_SIZE + data = b'' + ctx = Poly1305(key) + for _ in range(32): + for j in range(0, 4097, 16): + ctx.update(b'\xff' * j) + data += ctx.digest() + print_static_u8_array_definition( + 'poly1305_allones_macofmacs[POLY1305_DIGEST_SIZE]', + Poly1305(key).update(data).digest()) + +if len(sys.argv) != 2: + sys.stderr.write('Usage: gen-hash-testvecs.py ALGORITHM\n') + sys.stderr.write('ALGORITHM may be any supported by Python hashlib, or poly1305.\n') + sys.stderr.write('Example: gen-hash-testvecs.py sha512\n') + sys.exit(1) + +alg = sys.argv[1] +print('/* SPDX-License-Identifier: GPL-2.0-or-later */') +print(f'/* This file was generated by: {sys.argv[0]} {" ".join(sys.argv[1:])} */') +gen_unkeyed_testvecs(alg) +if alg == 'poly1305': + gen_additional_poly1305_testvecs() +else: + gen_hmac_testvecs(alg) diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c index d20c47d21ad8..e486488c867d 100644 --- a/scripts/gcc-plugins/stackleak_plugin.c +++ b/scripts/gcc-plugins/stackleak_plugin.c @@ -9,7 +9,7 @@ * any of the gcc libraries * * This gcc plugin is needed for tracking the lowest border of the kernel stack. - * It instruments the kernel code inserting stackleak_track_stack() calls: + * It instruments the kernel code inserting __sanitizer_cov_stack_depth() calls: * - after alloca(); * - for the functions with a stack frame size greater than or equal * to the "track-min-size" plugin parameter. @@ -33,7 +33,7 @@ __visible int plugin_is_GPL_compatible; static int track_frame_size = -1; static bool build_for_x86 = false; -static const char track_function[] = "stackleak_track_stack"; +static const char track_function[] = "__sanitizer_cov_stack_depth"; static bool disable = false; static bool verbose = false; @@ -58,7 +58,7 @@ static void add_stack_tracking_gcall(gimple_stmt_iterator *gsi, bool after) cgraph_node_ptr node; basic_block bb; - /* Insert calling stackleak_track_stack() */ + /* Insert calling __sanitizer_cov_stack_depth() */ stmt = gimple_build_call(track_function_decl, 0); gimple_call = as_a_gcall(stmt); if (after) @@ -120,12 +120,12 @@ static void add_stack_tracking_gasm(gimple_stmt_iterator *gsi, bool after) gcc_assert(build_for_x86); /* - * Insert calling stackleak_track_stack() in asm: - * asm volatile("call stackleak_track_stack" + * Insert calling __sanitizer_cov_stack_depth() in asm: + * asm volatile("call __sanitizer_cov_stack_depth" * :: "r" (current_stack_pointer)) * Use ASM_CALL_CONSTRAINT trick from arch/x86/include/asm/asm.h. * This constraint is taken into account during gcc shrink-wrapping - * optimization. It is needed to be sure that stackleak_track_stack() + * optimization. It is needed to be sure that __sanitizer_cov_stack_depth() * call is inserted after the prologue of the containing function, * when the stack frame is prepared. */ @@ -137,7 +137,7 @@ static void add_stack_tracking_gasm(gimple_stmt_iterator *gsi, bool after) input = build_tree_list(NULL_TREE, build_const_char_string(2, "r")); input = chainon(NULL_TREE, build_tree_list(input, sp_decl)); vec_safe_push(inputs, input); - asm_call = gimple_build_asm_vec("call stackleak_track_stack", + asm_call = gimple_build_asm_vec("call __sanitizer_cov_stack_depth", inputs, NULL, NULL, NULL); gimple_asm_set_volatile(asm_call, true); if (after) @@ -151,11 +151,11 @@ static void add_stack_tracking(gimple_stmt_iterator *gsi, bool after) { /* * The 'no_caller_saved_registers' attribute is used for - * stackleak_track_stack(). If the compiler supports this attribute for - * the target arch, we can add calling stackleak_track_stack() in asm. + * __sanitizer_cov_stack_depth(). If the compiler supports this attribute for + * the target arch, we can add calling __sanitizer_cov_stack_depth() in asm. * That improves performance: we avoid useless operations with the * caller-saved registers in the functions from which we will remove - * stackleak_track_stack() call during the stackleak_cleanup pass. + * __sanitizer_cov_stack_depth() call during the stackleak_cleanup pass. */ if (lookup_attribute_spec(get_identifier("no_caller_saved_registers"))) add_stack_tracking_gasm(gsi, after); @@ -165,7 +165,7 @@ static void add_stack_tracking(gimple_stmt_iterator *gsi, bool after) /* * Work with the GIMPLE representation of the code. Insert the - * stackleak_track_stack() call after alloca() and into the beginning + * __sanitizer_cov_stack_depth() call after alloca() and into the beginning * of the function if it is not instrumented. */ static unsigned int stackleak_instrument_execute(void) @@ -205,7 +205,7 @@ static unsigned int stackleak_instrument_execute(void) DECL_NAME_POINTER(current_function_decl)); } - /* Insert stackleak_track_stack() call after alloca() */ + /* Insert __sanitizer_cov_stack_depth() call after alloca() */ add_stack_tracking(&gsi, true); if (bb == entry_bb) prologue_instrumented = true; @@ -241,7 +241,7 @@ static unsigned int stackleak_instrument_execute(void) return 0; } - /* Insert stackleak_track_stack() call at the function beginning */ + /* Insert __sanitizer_cov_stack_depth() call at the function beginning */ bb = entry_bb; if (!single_pred_p(bb)) { /* gcc_assert(bb_loop_depth(bb) || @@ -270,15 +270,15 @@ static void remove_stack_tracking_gcall(void) rtx_insn *insn, *next; /* - * Find stackleak_track_stack() calls. Loop through the chain of insns, + * Find __sanitizer_cov_stack_depth() calls. Loop through the chain of insns, * which is an RTL representation of the code for a function. * * The example of a matching insn: - * (call_insn 8 4 10 2 (call (mem (symbol_ref ("stackleak_track_stack") - * [flags 0x41] <function_decl 0x7f7cd3302a80 stackleak_track_stack>) - * [0 stackleak_track_stack S1 A8]) (0)) 675 {*call} (expr_list - * (symbol_ref ("stackleak_track_stack") [flags 0x41] <function_decl - * 0x7f7cd3302a80 stackleak_track_stack>) (expr_list (0) (nil))) (nil)) + * (call_insn 8 4 10 2 (call (mem (symbol_ref ("__sanitizer_cov_stack_depth") + * [flags 0x41] <function_decl 0x7f7cd3302a80 __sanitizer_cov_stack_depth>) + * [0 __sanitizer_cov_stack_depth S1 A8]) (0)) 675 {*call} (expr_list + * (symbol_ref ("__sanitizer_cov_stack_depth") [flags 0x41] <function_decl + * 0x7f7cd3302a80 __sanitizer_cov_stack_depth>) (expr_list (0) (nil))) (nil)) */ for (insn = get_insns(); insn; insn = next) { rtx body; @@ -318,7 +318,7 @@ static void remove_stack_tracking_gcall(void) if (SYMBOL_REF_DECL(body) != track_function_decl) continue; - /* Delete the stackleak_track_stack() call */ + /* Delete the __sanitizer_cov_stack_depth() call */ delete_insn_and_edges(insn); #if BUILDING_GCC_VERSION < 8000 if (GET_CODE(next) == NOTE && @@ -340,12 +340,12 @@ static bool remove_stack_tracking_gasm(void) gcc_assert(build_for_x86); /* - * Find stackleak_track_stack() asm calls. Loop through the chain of + * Find __sanitizer_cov_stack_depth() asm calls. Loop through the chain of * insns, which is an RTL representation of the code for a function. * * The example of a matching insn: * (insn 11 5 12 2 (parallel [ (asm_operands/v - * ("call stackleak_track_stack") ("") 0 + * ("call __sanitizer_cov_stack_depth") ("") 0 * [ (reg/v:DI 7 sp [ current_stack_pointer ]) ] * [ (asm_input:DI ("r")) ] []) * (clobber (reg:CC 17 flags)) ]) -1 (nil)) @@ -375,7 +375,7 @@ static bool remove_stack_tracking_gasm(void) continue; if (strcmp(ASM_OPERANDS_TEMPLATE(body), - "call stackleak_track_stack")) { + "call __sanitizer_cov_stack_depth")) { continue; } @@ -389,7 +389,7 @@ static bool remove_stack_tracking_gasm(void) /* * Work with the RTL representation of the code. - * Remove the unneeded stackleak_track_stack() calls from the functions + * Remove the unneeded __sanitizer_cov_stack_depth() calls from the functions * which don't call alloca() and don't have a large enough stack frame size. */ static unsigned int stackleak_cleanup_execute(void) @@ -474,13 +474,13 @@ static bool stackleak_gate(void) return track_frame_size >= 0; } -/* Build the function declaration for stackleak_track_stack() */ +/* Build the function declaration for __sanitizer_cov_stack_depth() */ static void stackleak_start_unit(void *gcc_data __unused, void *user_data __unused) { tree fntype; - /* void stackleak_track_stack(void) */ + /* void __sanitizer_cov_stack_depth(void) */ fntype = build_function_type_list(void_type_node, NULL_TREE); track_function_decl = build_fn_decl(track_function, fntype); DECL_ASSEMBLER_NAME(track_function_decl); /* for LTO */ diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in index fd6bd69c5096..f795302ddfa8 100644 --- a/scripts/gdb/linux/constants.py.in +++ b/scripts/gdb/linux/constants.py.in @@ -20,6 +20,7 @@ #include <linux/of_fdt.h> #include <linux/page_ext.h> #include <linux/radix-tree.h> +#include <linux/maple_tree.h> #include <linux/slab.h> #include <linux/threads.h> #include <linux/vmalloc.h> @@ -93,6 +94,12 @@ LX_GDBPARSED(RADIX_TREE_MAP_SIZE) LX_GDBPARSED(RADIX_TREE_MAP_SHIFT) LX_GDBPARSED(RADIX_TREE_MAP_MASK) +/* linux/maple_tree.h */ +LX_VALUE(MAPLE_NODE_SLOTS) +LX_VALUE(MAPLE_RANGE64_SLOTS) +LX_VALUE(MAPLE_ARANGE64_SLOTS) +LX_GDBPARSED(MAPLE_NODE_MASK) + /* linux/vmalloc.h */ LX_VALUE(VM_IOREMAP) LX_VALUE(VM_ALLOC) diff --git a/scripts/gdb/linux/interrupts.py b/scripts/gdb/linux/interrupts.py index 616a5f26377a..f4f715a8f0e3 100644 --- a/scripts/gdb/linux/interrupts.py +++ b/scripts/gdb/linux/interrupts.py @@ -7,7 +7,7 @@ import gdb from linux import constants from linux import cpus from linux import utils -from linux import radixtree +from linux import mapletree irq_desc_type = utils.CachedType("struct irq_desc") @@ -23,12 +23,12 @@ def irqd_is_level(desc): def show_irq_desc(prec, irq): text = "" - desc = radixtree.lookup(gdb.parse_and_eval("&irq_desc_tree"), irq) + desc = mapletree.mtree_load(gdb.parse_and_eval("&sparse_irqs"), irq) if desc is None: return text - desc = desc.cast(irq_desc_type.get_type()) - if desc is None: + desc = desc.cast(irq_desc_type.get_type().pointer()) + if desc == 0: return text if irq_settings_is_hidden(desc): @@ -110,7 +110,7 @@ def x86_show_mce(prec, var, pfx, desc): pvar = gdb.parse_and_eval(var) text = "%*s: " % (prec, pfx) for cpu in cpus.each_online_cpu(): - text += "%10u " % (cpus.per_cpu(pvar, cpu)) + text += "%10u " % (cpus.per_cpu(pvar, cpu).dereference()) text += " %s\n" % (desc) return text @@ -142,7 +142,7 @@ def x86_show_interupts(prec): if constants.LX_CONFIG_X86_MCE: text += x86_show_mce(prec, "&mce_exception_count", "MCE", "Machine check exceptions") - text == x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls") + text += x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls") text += show_irq_err_count(prec) @@ -221,8 +221,8 @@ class LxInterruptList(gdb.Command): gdb.write("CPU%-8d" % cpu) gdb.write("\n") - if utils.gdb_eval_or_none("&irq_desc_tree") is None: - return + if utils.gdb_eval_or_none("&sparse_irqs") is None: + raise gdb.GdbError("Unable to find the sparse IRQ tree, is CONFIG_SPARSE_IRQ enabled?") for irq in range(nr_irqs): gdb.write(show_irq_desc(prec, irq)) diff --git a/scripts/gdb/linux/mapletree.py b/scripts/gdb/linux/mapletree.py new file mode 100644 index 000000000000..d52d51c0a03f --- /dev/null +++ b/scripts/gdb/linux/mapletree.py @@ -0,0 +1,252 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Maple tree helpers +# +# Copyright (c) 2025 Broadcom +# +# Authors: +# Florian Fainelli <florian.fainelli@broadcom.com> + +import gdb + +from linux import utils +from linux import constants +from linux import xarray + +maple_tree_root_type = utils.CachedType("struct maple_tree") +maple_node_type = utils.CachedType("struct maple_node") +maple_enode_type = utils.CachedType("void") + +maple_dense = 0 +maple_leaf_64 = 1 +maple_range_64 = 2 +maple_arange_64 = 3 + +class Mas(object): + ma_active = 0 + ma_start = 1 + ma_root = 2 + ma_none = 3 + ma_pause = 4 + ma_overflow = 5 + ma_underflow = 6 + ma_error = 7 + + def __init__(self, mt, first, end): + if mt.type == maple_tree_root_type.get_type().pointer(): + self.tree = mt.dereference() + elif mt.type != maple_tree_root_type.get_type(): + raise gdb.GdbError("must be {} not {}" + .format(maple_tree_root_type.get_type().pointer(), mt.type)) + self.tree = mt + self.index = first + self.last = end + self.node = None + self.status = self.ma_start + self.min = 0 + self.max = -1 + + def is_start(self): + # mas_is_start() + return self.status == self.ma_start + + def is_ptr(self): + # mas_is_ptr() + return self.status == self.ma_root + + def is_none(self): + # mas_is_none() + return self.status == self.ma_none + + def root(self): + # mas_root() + return self.tree['ma_root'].cast(maple_enode_type.get_type().pointer()) + + def start(self): + # mas_start() + if self.is_start() is False: + return None + + self.min = 0 + self.max = ~0 + + while True: + self.depth = 0 + root = self.root() + if xarray.xa_is_node(root): + self.depth = 0 + self.status = self.ma_active + self.node = mte_safe_root(root) + self.offset = 0 + if mte_dead_node(self.node) is True: + continue + + return None + + self.node = None + # Empty tree + if root is None: + self.status = self.ma_none + self.offset = constants.LX_MAPLE_NODE_SLOTS + return None + + # Single entry tree + self.status = self.ma_root + self.offset = constants.LX_MAPLE_NODE_SLOTS + + if self.index != 0: + return None + + return root + + return None + + def reset(self): + # mas_reset() + self.status = self.ma_start + self.node = None + +def mte_safe_root(node): + if node.type != maple_enode_type.get_type().pointer(): + raise gdb.GdbError("{} must be {} not {}" + .format(mte_safe_root.__name__, maple_enode_type.get_type().pointer(), node.type)) + ulong_type = utils.get_ulong_type() + indirect_ptr = node.cast(ulong_type) & ~0x2 + val = indirect_ptr.cast(maple_enode_type.get_type().pointer()) + return val + +def mte_node_type(entry): + ulong_type = utils.get_ulong_type() + val = None + if entry.type == maple_enode_type.get_type().pointer(): + val = entry.cast(ulong_type) + elif entry.type == ulong_type: + val = entry + else: + raise gdb.GdbError("{} must be {} not {}" + .format(mte_node_type.__name__, maple_enode_type.get_type().pointer(), entry.type)) + return (val >> 0x3) & 0xf + +def ma_dead_node(node): + if node.type != maple_node_type.get_type().pointer(): + raise gdb.GdbError("{} must be {} not {}" + .format(ma_dead_node.__name__, maple_node_type.get_type().pointer(), node.type)) + ulong_type = utils.get_ulong_type() + parent = node['parent'] + indirect_ptr = node['parent'].cast(ulong_type) & ~constants.LX_MAPLE_NODE_MASK + return indirect_ptr == node + +def mte_to_node(enode): + ulong_type = utils.get_ulong_type() + if enode.type == maple_enode_type.get_type().pointer(): + indirect_ptr = enode.cast(ulong_type) + elif enode.type == ulong_type: + indirect_ptr = enode + else: + raise gdb.GdbError("{} must be {} not {}" + .format(mte_to_node.__name__, maple_enode_type.get_type().pointer(), enode.type)) + indirect_ptr = indirect_ptr & ~constants.LX_MAPLE_NODE_MASK + return indirect_ptr.cast(maple_node_type.get_type().pointer()) + +def mte_dead_node(enode): + if enode.type != maple_enode_type.get_type().pointer(): + raise gdb.GdbError("{} must be {} not {}" + .format(mte_dead_node.__name__, maple_enode_type.get_type().pointer(), enode.type)) + node = mte_to_node(enode) + return ma_dead_node(node) + +def ma_is_leaf(tp): + result = tp < maple_range_64 + return tp < maple_range_64 + +def mt_pivots(t): + if t == maple_dense: + return 0 + elif t == maple_leaf_64 or t == maple_range_64: + return constants.LX_MAPLE_RANGE64_SLOTS - 1 + elif t == maple_arange_64: + return constants.LX_MAPLE_ARANGE64_SLOTS - 1 + +def ma_pivots(node, t): + if node.type != maple_node_type.get_type().pointer(): + raise gdb.GdbError("{}: must be {} not {}" + .format(ma_pivots.__name__, maple_node_type.get_type().pointer(), node.type)) + if t == maple_arange_64: + return node['ma64']['pivot'] + elif t == maple_leaf_64 or t == maple_range_64: + return node['mr64']['pivot'] + else: + return None + +def ma_slots(node, tp): + if node.type != maple_node_type.get_type().pointer(): + raise gdb.GdbError("{}: must be {} not {}" + .format(ma_slots.__name__, maple_node_type.get_type().pointer(), node.type)) + if tp == maple_arange_64: + return node['ma64']['slot'] + elif tp == maple_range_64 or tp == maple_leaf_64: + return node['mr64']['slot'] + elif tp == maple_dense: + return node['slot'] + else: + return None + +def mt_slot(mt, slots, offset): + ulong_type = utils.get_ulong_type() + return slots[offset].cast(ulong_type) + +def mtree_lookup_walk(mas): + ulong_type = utils.get_ulong_type() + n = mas.node + + while True: + node = mte_to_node(n) + tp = mte_node_type(n) + pivots = ma_pivots(node, tp) + end = mt_pivots(tp) + offset = 0 + while True: + if pivots[offset] >= mas.index: + break + if offset >= end: + break + offset += 1 + + slots = ma_slots(node, tp) + n = mt_slot(mas.tree, slots, offset) + if ma_dead_node(node) is True: + mas.reset() + return None + break + + if ma_is_leaf(tp) is True: + break + + return n + +def mtree_load(mt, index): + ulong_type = utils.get_ulong_type() + # MT_STATE(...) + mas = Mas(mt, index, index) + entry = None + + while True: + entry = mas.start() + if mas.is_none(): + return None + + if mas.is_ptr(): + if index != 0: + entry = None + return entry + + entry = mtree_lookup_walk(mas) + if entry is None and mas.is_start(): + continue + else: + break + + if xarray.xa_is_zero(entry): + return None + + return entry diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py index 2332bd8eddf1..6edb99221675 100644 --- a/scripts/gdb/linux/symbols.py +++ b/scripts/gdb/linux/symbols.py @@ -84,6 +84,30 @@ def get_kerneloffset(): return None +def is_in_s390_decompressor(): + # DAT is always off in decompressor. Use this as an indicator. + # Note that in the kernel, DAT can be off during kexec() or restart. + # Accept this imprecision in order to avoid complicating things. + # It is unlikely that someone will run lx-symbols at these points. + pswm = int(gdb.parse_and_eval("$pswm")) + return (pswm & 0x0400000000000000) == 0 + + +def skip_decompressor(): + if utils.is_target_arch("s390"): + if is_in_s390_decompressor(): + # The address of the jump_to_kernel function is statically placed + # into svc_old_psw.addr (see ipl_data.c); read it from there. DAT + # is off, so we do not need to care about lowcore relocation. + svc_old_pswa = 0x148 + jump_to_kernel = int(gdb.parse_and_eval("*(unsigned long long *)" + + hex(svc_old_pswa))) + gdb.execute("tbreak *" + hex(jump_to_kernel)) + gdb.execute("continue") + while is_in_s390_decompressor(): + gdb.execute("stepi") + + class LxSymbols(gdb.Command): """(Re-)load symbols of Linux kernel and currently loaded modules. @@ -204,6 +228,8 @@ lx-symbols command.""" saved_state['breakpoint'].enabled = saved_state['enabled'] def invoke(self, arg, from_tty): + skip_decompressor() + self.module_paths = [os.path.abspath(os.path.expanduser(p)) for p in arg.split()] self.module_paths.append(os.getcwd()) diff --git a/scripts/gdb/linux/vfs.py b/scripts/gdb/linux/vfs.py index c77b9ce75f6d..9e921b645a68 100644 --- a/scripts/gdb/linux/vfs.py +++ b/scripts/gdb/linux/vfs.py @@ -22,7 +22,7 @@ def dentry_name(d): if parent == d or parent == 0: return "" p = dentry_name(d['d_parent']) + "/" - return p + d['d_iname'].string() + return p + d['d_name']['name'].string() class DentryName(gdb.Function): """Return string of the full path of a dentry. diff --git a/scripts/gdb/linux/xarray.py b/scripts/gdb/linux/xarray.py new file mode 100644 index 000000000000..f4477b5def75 --- /dev/null +++ b/scripts/gdb/linux/xarray.py @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Xarray helpers +# +# Copyright (c) 2025 Broadcom +# +# Authors: +# Florian Fainelli <florian.fainelli@broadcom.com> + +import gdb + +from linux import utils +from linux import constants + +def xa_is_internal(entry): + ulong_type = utils.get_ulong_type() + return ((entry.cast(ulong_type) & 3) == 2) + +def xa_mk_internal(v): + return ((v << 2) | 2) + +def xa_is_zero(entry): + ulong_type = utils.get_ulong_type() + return entry.cast(ulong_type) == xa_mk_internal(257) + +def xa_is_node(entry): + ulong_type = utils.get_ulong_type() + return xa_is_internal(entry) and (entry.cast(ulong_type) > 4096) diff --git a/scripts/gendwarfksyms/gendwarfksyms.h b/scripts/gendwarfksyms/gendwarfksyms.h index 7dd03ffe0c5c..d9c06d2cb1df 100644 --- a/scripts/gendwarfksyms/gendwarfksyms.h +++ b/scripts/gendwarfksyms/gendwarfksyms.h @@ -216,24 +216,14 @@ int cache_get(struct cache *cache, unsigned long key); void cache_init(struct cache *cache); void cache_free(struct cache *cache); -static inline void __cache_mark_expanded(struct cache *cache, uintptr_t addr) -{ - cache_set(cache, addr, 1); -} - -static inline bool __cache_was_expanded(struct cache *cache, uintptr_t addr) -{ - return cache_get(cache, addr) == 1; -} - static inline void cache_mark_expanded(struct cache *cache, void *addr) { - __cache_mark_expanded(cache, (uintptr_t)addr); + cache_set(cache, (unsigned long)addr, 1); } static inline bool cache_was_expanded(struct cache *cache, void *addr) { - return __cache_was_expanded(cache, (uintptr_t)addr); + return cache_get(cache, (unsigned long)addr) == 1; } /* diff --git a/scripts/gendwarfksyms/types.c b/scripts/gendwarfksyms/types.c index 39ce1770e463..7bd459ea6c59 100644 --- a/scripts/gendwarfksyms/types.c +++ b/scripts/gendwarfksyms/types.c @@ -333,37 +333,11 @@ static void calculate_version(struct version *version, cache_free(&expansion_cache); } -static void __type_expand(struct die *cache, struct type_expansion *type, - bool recursive); - -static void type_expand_child(struct die *cache, struct type_expansion *type, - bool recursive) -{ - struct type_expansion child; - char *name; - - name = get_type_name(cache); - if (!name) { - __type_expand(cache, type, recursive); - return; - } - - if (recursive && !__cache_was_expanded(&expansion_cache, cache->addr)) { - __cache_mark_expanded(&expansion_cache, cache->addr); - type_expansion_init(&child); - __type_expand(cache, &child, true); - type_map_add(name, &child); - type_expansion_free(&child); - } - - type_expansion_append(type, name, name); -} - -static void __type_expand(struct die *cache, struct type_expansion *type, - bool recursive) +static void __type_expand(struct die *cache, struct type_expansion *type) { struct die_fragment *df; struct die *child; + char *name; list_for_each_entry(df, &cache->fragments, list) { switch (df->type) { @@ -379,7 +353,12 @@ static void __type_expand(struct die *cache, struct type_expansion *type, error("unknown child: %" PRIxPTR, df->data.addr); - type_expand_child(child, type, recursive); + name = get_type_name(child); + if (name) + type_expansion_append(type, name, name); + else + __type_expand(child, type); + break; case FRAGMENT_LINEBREAK: /* @@ -397,12 +376,17 @@ static void __type_expand(struct die *cache, struct type_expansion *type, } } -static void type_expand(struct die *cache, struct type_expansion *type, - bool recursive) +static void type_expand(const char *name, struct die *cache, + struct type_expansion *type) { + const char *override; + type_expansion_init(type); - __type_expand(cache, type, recursive); - cache_free(&expansion_cache); + + if (stable && kabi_get_type_string(name, &override)) + type_parse(name, override, type); + else + __type_expand(cache, type); } static void type_parse(const char *name, const char *str, @@ -416,8 +400,6 @@ static void type_parse(const char *name, const char *str, if (!*str) error("empty type string override for '%s'", name); - type_expansion_init(type); - for (pos = 0; str[pos]; ++pos) { bool empty; char marker = ' '; @@ -478,7 +460,6 @@ static void type_parse(const char *name, const char *str, static void expand_type(struct die *cache, void *arg) { struct type_expansion type; - const char *override; char *name; if (cache->mapped) @@ -504,11 +485,7 @@ static void expand_type(struct die *cache, void *arg) debug("%s", name); - if (stable && kabi_get_type_string(name, &override)) - type_parse(name, override, &type); - else - type_expand(cache, &type, true); - + type_expand(name, cache, &type); type_map_add(name, &type); type_expansion_free(&type); free(name); @@ -518,7 +495,6 @@ static void expand_symbol(struct symbol *sym, void *arg) { struct type_expansion type; struct version version; - const char *override; struct die *cache; /* @@ -532,10 +508,7 @@ static void expand_symbol(struct symbol *sym, void *arg) if (__die_map_get(sym->die_addr, DIE_SYMBOL, &cache)) return; /* We'll warn about missing CRCs later. */ - if (stable && kabi_get_type_string(sym->name, &override)) - type_parse(sym->name, override, &type); - else - type_expand(cache, &type, false); + type_expand(sym->name, cache, &type); /* If the symbol already has a version, don't calculate it again. */ if (sym->state != SYMBOL_PROCESSED) { diff --git a/scripts/kernel-doc.py b/scripts/kernel-doc.py index 12ae66f40bd7..fc3d46ef519f 100755 --- a/scripts/kernel-doc.py +++ b/scripts/kernel-doc.py @@ -271,6 +271,16 @@ def main(): logger.addHandler(handler) + python_ver = sys.version_info[:2] + if python_ver < (3,6): + logger.warning("Python 3.6 or later is required by kernel-doc") + + # Return 0 here to avoid breaking compilation + sys.exit(0) + + if python_ver < (3,7): + logger.warning("Python 3.7 or later is required for correct results") + if args.man: out_style = ManFormat(modulename=args.modulename) elif args.none: diff --git a/scripts/lib/kdoc/kdoc_files.py b/scripts/lib/kdoc/kdoc_files.py index 9be4a64df71d..9e09b45b02fa 100644 --- a/scripts/lib/kdoc/kdoc_files.py +++ b/scripts/lib/kdoc/kdoc_files.py @@ -275,8 +275,8 @@ class KernelFiles(): self.config.log.warning("No kernel-doc for file %s", fname) continue - for name, arg in self.results[fname]: - m = self.out_msg(fname, name, arg) + for arg in self.results[fname]: + m = self.out_msg(fname, arg.name, arg) if m is None: ln = arg.get("ln", 0) diff --git a/scripts/lib/kdoc/kdoc_item.py b/scripts/lib/kdoc/kdoc_item.py new file mode 100644 index 000000000000..b3b225764550 --- /dev/null +++ b/scripts/lib/kdoc/kdoc_item.py @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# A class that will, eventually, encapsulate all of the parsed data that we +# then pass into the output modules. +# + +class KdocItem: + def __init__(self, name, type, start_line, **other_stuff): + self.name = name + self.type = type + self.declaration_start_line = start_line + self.sections = {} + self.sections_start_lines = {} + self.parameterlist = [] + self.parameterdesc_start_lines = [] + self.parameterdescs = {} + self.parametertypes = {} + # + # Just save everything else into our own dict so that the output + # side can grab it directly as before. As we move things into more + # structured data, this will, hopefully, fade away. + # + self.other_stuff = other_stuff + + def get(self, key, default = None): + return self.other_stuff.get(key, default) + + def __getitem__(self, key): + return self.get(key) + + # + # Tracking of section and parameter information. + # + def set_sections(self, sections, start_lines): + self.sections = sections + self.section_start_lines = start_lines + + def set_params(self, names, descs, types, starts): + self.parameterlist = names + self.parameterdescs = descs + self.parametertypes = types + self.parameterdesc_start_lines = starts diff --git a/scripts/lib/kdoc/kdoc_output.py b/scripts/lib/kdoc/kdoc_output.py index 86102e628d91..ea8914537ba0 100644 --- a/scripts/lib/kdoc/kdoc_output.py +++ b/scripts/lib/kdoc/kdoc_output.py @@ -124,9 +124,7 @@ class OutputFormat: Output warnings for identifiers that will be displayed. """ - warnings = args.get('warnings', []) - - for log_msg in warnings: + for log_msg in args.warnings: self.config.warning(log_msg) def check_doc(self, name, args): @@ -184,7 +182,7 @@ class OutputFormat: self.data = "" - dtype = args.get('type', "") + dtype = args.type if dtype == "doc": self.out_doc(fname, name, args) @@ -338,12 +336,7 @@ class RestFormat(OutputFormat): starts by putting out the name of the doc section itself, but that tends to duplicate a header already in the template file. """ - - sectionlist = args.get('sectionlist', []) - sections = args.get('sections', {}) - section_start_lines = args.get('section_start_lines', {}) - - for section in sectionlist: + for section, text in args.sections.items(): # Skip sections that are in the nosymbol_table if section in self.nosymbol: continue @@ -355,8 +348,8 @@ class RestFormat(OutputFormat): else: self.data += f'{self.lineprefix}**{section}**\n\n' - self.print_lineno(section_start_lines.get(section, 0)) - self.output_highlight(sections[section]) + self.print_lineno(args.section_start_lines.get(section, 0)) + self.output_highlight(text) self.data += "\n" self.data += "\n" @@ -372,24 +365,19 @@ class RestFormat(OutputFormat): func_macro = args.get('func_macro', False) if func_macro: - signature = args['function'] + signature = name else: if args.get('functiontype'): signature = args['functiontype'] + " " - signature += args['function'] + " (" - - parameterlist = args.get('parameterlist', []) - parameterdescs = args.get('parameterdescs', {}) - parameterdesc_start_lines = args.get('parameterdesc_start_lines', {}) - - ln = args.get('declaration_start_line', 0) + signature += name + " (" + ln = args.declaration_start_line count = 0 - for parameter in parameterlist: + for parameter in args.parameterlist: if count != 0: signature += ", " count += 1 - dtype = args['parametertypes'].get(parameter, "") + dtype = args.parametertypes.get(parameter, "") if function_pointer.search(dtype): signature += function_pointer.group(1) + parameter + function_pointer.group(3) @@ -401,7 +389,7 @@ class RestFormat(OutputFormat): self.print_lineno(ln) if args.get('typedef') or not args.get('functiontype'): - self.data += f".. c:macro:: {args['function']}\n\n" + self.data += f".. c:macro:: {name}\n\n" if args.get('typedef'): self.data += " **Typedef**: " @@ -424,26 +412,26 @@ class RestFormat(OutputFormat): # function prototypes apart self.lineprefix = " " - if parameterlist: + if args.parameterlist: self.data += ".. container:: kernelindent\n\n" self.data += f"{self.lineprefix}**Parameters**\n\n" - for parameter in parameterlist: + for parameter in args.parameterlist: parameter_name = KernRe(r'\[.*').sub('', parameter) - dtype = args['parametertypes'].get(parameter, "") + dtype = args.parametertypes.get(parameter, "") if dtype: self.data += f"{self.lineprefix}``{dtype}``\n" else: self.data += f"{self.lineprefix}``{parameter}``\n" - self.print_lineno(parameterdesc_start_lines.get(parameter_name, 0)) + self.print_lineno(args.parameterdesc_start_lines.get(parameter_name, 0)) self.lineprefix = " " - if parameter_name in parameterdescs and \ - parameterdescs[parameter_name] != KernelDoc.undescribed: + if parameter_name in args.parameterdescs and \ + args.parameterdescs[parameter_name] != KernelDoc.undescribed: - self.output_highlight(parameterdescs[parameter_name]) + self.output_highlight(args.parameterdescs[parameter_name]) self.data += "\n" else: self.data += f"{self.lineprefix}*undescribed*\n\n" @@ -455,10 +443,7 @@ class RestFormat(OutputFormat): def out_enum(self, fname, name, args): oldprefix = self.lineprefix - name = args.get('enum', '') - parameterlist = args.get('parameterlist', []) - parameterdescs = args.get('parameterdescs', {}) - ln = args.get('declaration_start_line', 0) + ln = args.declaration_start_line self.data += f"\n\n.. c:enum:: {name}\n\n" @@ -472,11 +457,11 @@ class RestFormat(OutputFormat): self.lineprefix = outer + " " self.data += f"{outer}**Constants**\n\n" - for parameter in parameterlist: + for parameter in args.parameterlist: self.data += f"{outer}``{parameter}``\n" - if parameterdescs.get(parameter, '') != KernelDoc.undescribed: - self.output_highlight(parameterdescs[parameter]) + if args.parameterdescs.get(parameter, '') != KernelDoc.undescribed: + self.output_highlight(args.parameterdescs[parameter]) else: self.data += f"{self.lineprefix}*undescribed*\n\n" self.data += "\n" @@ -487,8 +472,7 @@ class RestFormat(OutputFormat): def out_typedef(self, fname, name, args): oldprefix = self.lineprefix - name = args.get('typedef', '') - ln = args.get('declaration_start_line', 0) + ln = args.declaration_start_line self.data += f"\n\n.. c:type:: {name}\n\n" @@ -504,15 +488,10 @@ class RestFormat(OutputFormat): def out_struct(self, fname, name, args): - name = args.get('struct', "") purpose = args.get('purpose', "") declaration = args.get('definition', "") - dtype = args.get('type', "struct") - ln = args.get('declaration_start_line', 0) - - parameterlist = args.get('parameterlist', []) - parameterdescs = args.get('parameterdescs', {}) - parameterdesc_start_lines = args.get('parameterdesc_start_lines', {}) + dtype = args.type + ln = args.declaration_start_line self.data += f"\n\n.. c:{dtype}:: {name}\n\n" @@ -536,21 +515,21 @@ class RestFormat(OutputFormat): self.lineprefix = " " self.data += f"{self.lineprefix}**Members**\n\n" - for parameter in parameterlist: + for parameter in args.parameterlist: if not parameter or parameter.startswith("#"): continue parameter_name = parameter.split("[", maxsplit=1)[0] - if parameterdescs.get(parameter_name) == KernelDoc.undescribed: + if args.parameterdescs.get(parameter_name) == KernelDoc.undescribed: continue - self.print_lineno(parameterdesc_start_lines.get(parameter_name, 0)) + self.print_lineno(args.parameterdesc_start_lines.get(parameter_name, 0)) self.data += f"{self.lineprefix}``{parameter}``\n" self.lineprefix = " " - self.output_highlight(parameterdescs[parameter_name]) + self.output_highlight(args.parameterdescs[parameter_name]) self.lineprefix = " " self.data += "\n" @@ -636,46 +615,38 @@ class ManFormat(OutputFormat): self.data += line + "\n" def out_doc(self, fname, name, args): - sectionlist = args.get('sectionlist', []) - sections = args.get('sections', {}) - if not self.check_doc(name, args): return self.data += f'.TH "{self.modulename}" 9 "{self.modulename}" "{self.man_date}" "API Manual" LINUX' + "\n" - for section in sectionlist: + for section, text in args.sections.items(): self.data += f'.SH "{section}"' + "\n" - self.output_highlight(sections.get(section)) + self.output_highlight(text) def out_function(self, fname, name, args): """output function in man""" - parameterlist = args.get('parameterlist', []) - parameterdescs = args.get('parameterdescs', {}) - sectionlist = args.get('sectionlist', []) - sections = args.get('sections', {}) - - self.data += f'.TH "{args["function"]}" 9 "{args["function"]}" "{self.man_date}" "Kernel Hacker\'s Manual" LINUX' + "\n" + self.data += f'.TH "{name}" 9 "{name}" "{self.man_date}" "Kernel Hacker\'s Manual" LINUX' + "\n" self.data += ".SH NAME\n" - self.data += f"{args['function']} \\- {args['purpose']}\n" + self.data += f"{name} \\- {args['purpose']}\n" self.data += ".SH SYNOPSIS\n" if args.get('functiontype', ''): - self.data += f'.B "{args["functiontype"]}" {args["function"]}' + "\n" + self.data += f'.B "{args["functiontype"]}" {name}' + "\n" else: - self.data += f'.B "{args["function"]}' + "\n" + self.data += f'.B "{name}' + "\n" count = 0 parenth = "(" post = "," - for parameter in parameterlist: - if count == len(parameterlist) - 1: + for parameter in args.parameterlist: + if count == len(args.parameterlist) - 1: post = ");" - dtype = args['parametertypes'].get(parameter, "") + dtype = args.parametertypes.get(parameter, "") if function_pointer.match(dtype): # Pointer-to-function self.data += f'".BI "{parenth}{function_pointer.group(1)}" " ") ({function_pointer.group(2)}){post}"' + "\n" @@ -686,38 +657,32 @@ class ManFormat(OutputFormat): count += 1 parenth = "" - if parameterlist: + if args.parameterlist: self.data += ".SH ARGUMENTS\n" - for parameter in parameterlist: + for parameter in args.parameterlist: parameter_name = re.sub(r'\[.*', '', parameter) self.data += f'.IP "{parameter}" 12' + "\n" - self.output_highlight(parameterdescs.get(parameter_name, "")) + self.output_highlight(args.parameterdescs.get(parameter_name, "")) - for section in sectionlist: + for section, text in args.sections.items(): self.data += f'.SH "{section.upper()}"' + "\n" - self.output_highlight(sections[section]) + self.output_highlight(text) def out_enum(self, fname, name, args): - - name = args.get('enum', '') - parameterlist = args.get('parameterlist', []) - sectionlist = args.get('sectionlist', []) - sections = args.get('sections', {}) - - self.data += f'.TH "{self.modulename}" 9 "enum {args["enum"]}" "{self.man_date}" "API Manual" LINUX' + "\n" + self.data += f'.TH "{self.modulename}" 9 "enum {name}" "{self.man_date}" "API Manual" LINUX' + "\n" self.data += ".SH NAME\n" - self.data += f"enum {args['enum']} \\- {args['purpose']}\n" + self.data += f"enum {name} \\- {args['purpose']}\n" self.data += ".SH SYNOPSIS\n" - self.data += f"enum {args['enum']}" + " {\n" + self.data += f"enum {name}" + " {\n" count = 0 - for parameter in parameterlist: + for parameter in args.parameterlist: self.data += f'.br\n.BI " {parameter}"' + "\n" - if count == len(parameterlist) - 1: + if count == len(args.parameterlist) - 1: self.data += "\n};\n" else: self.data += ", \n.br\n" @@ -726,68 +691,59 @@ class ManFormat(OutputFormat): self.data += ".SH Constants\n" - for parameter in parameterlist: + for parameter in args.parameterlist: parameter_name = KernRe(r'\[.*').sub('', parameter) self.data += f'.IP "{parameter}" 12' + "\n" - self.output_highlight(args['parameterdescs'].get(parameter_name, "")) + self.output_highlight(args.parameterdescs.get(parameter_name, "")) - for section in sectionlist: + for section, text in args.sections.items(): self.data += f'.SH "{section}"' + "\n" - self.output_highlight(sections[section]) + self.output_highlight(text) def out_typedef(self, fname, name, args): module = self.modulename - typedef = args.get('typedef') purpose = args.get('purpose') - sectionlist = args.get('sectionlist', []) - sections = args.get('sections', {}) - self.data += f'.TH "{module}" 9 "{typedef}" "{self.man_date}" "API Manual" LINUX' + "\n" + self.data += f'.TH "{module}" 9 "{name}" "{self.man_date}" "API Manual" LINUX' + "\n" self.data += ".SH NAME\n" - self.data += f"typedef {typedef} \\- {purpose}\n" + self.data += f"typedef {name} \\- {purpose}\n" - for section in sectionlist: + for section, text in args.sections.items(): self.data += f'.SH "{section}"' + "\n" - self.output_highlight(sections.get(section)) + self.output_highlight(text) def out_struct(self, fname, name, args): module = self.modulename - struct_type = args.get('type') - struct_name = args.get('struct') purpose = args.get('purpose') definition = args.get('definition') - sectionlist = args.get('sectionlist', []) - parameterlist = args.get('parameterlist', []) - sections = args.get('sections', {}) - parameterdescs = args.get('parameterdescs', {}) - self.data += f'.TH "{module}" 9 "{struct_type} {struct_name}" "{self.man_date}" "API Manual" LINUX' + "\n" + self.data += f'.TH "{module}" 9 "{args.type} {name}" "{self.man_date}" "API Manual" LINUX' + "\n" self.data += ".SH NAME\n" - self.data += f"{struct_type} {struct_name} \\- {purpose}\n" + self.data += f"{args.type} {name} \\- {purpose}\n" # Replace tabs with two spaces and handle newlines declaration = definition.replace("\t", " ") declaration = KernRe(r"\n").sub('"\n.br\n.BI "', declaration) self.data += ".SH SYNOPSIS\n" - self.data += f"{struct_type} {struct_name} " + "{" + "\n.br\n" + self.data += f"{args.type} {name} " + "{" + "\n.br\n" self.data += f'.BI "{declaration}\n' + "};\n.br\n\n" self.data += ".SH Members\n" - for parameter in parameterlist: + for parameter in args.parameterlist: if parameter.startswith("#"): continue parameter_name = re.sub(r"\[.*", "", parameter) - if parameterdescs.get(parameter_name) == KernelDoc.undescribed: + if args.parameterdescs.get(parameter_name) == KernelDoc.undescribed: continue self.data += f'.IP "{parameter}" 12' + "\n" - self.output_highlight(parameterdescs.get(parameter_name)) + self.output_highlight(args.parameterdescs.get(parameter_name)) - for section in sectionlist: + for section, text in args.sections.items(): self.data += f'.SH "{section}"' + "\n" - self.output_highlight(sections.get(section)) + self.output_highlight(text) diff --git a/scripts/lib/kdoc/kdoc_parser.py b/scripts/lib/kdoc/kdoc_parser.py index 062453eefc7a..fe730099eca8 100644 --- a/scripts/lib/kdoc/kdoc_parser.py +++ b/scripts/lib/kdoc/kdoc_parser.py @@ -12,11 +12,12 @@ Read a C language source or header FILE and extract embedded documentation comments """ +import sys import re from pprint import pformat from kdoc_re import NestedMatch, KernRe - +from kdoc_item import KdocItem # # Regular expressions used to parse kernel-doc markups at KernelDoc class. @@ -42,12 +43,13 @@ doc_decl = doc_com + KernRe(r'(\w+)', cache=False) # @{section-name}: # while trying to not match literal block starts like "example::" # +known_section_names = 'description|context|returns?|notes?|examples?' +known_sections = KernRe(known_section_names, flags = re.I) doc_sect = doc_com + \ - KernRe(r'\s*(\@[.\w]+|\@\.\.\.|description|context|returns?|notes?|examples?)\s*:([^:].*)?$', - flags=re.I, cache=False) + KernRe(r'\s*(\@[.\w]+|\@\.\.\.|' + known_section_names + r')\s*:([^:].*)?$', + flags=re.I, cache=False) doc_content = doc_com_body + KernRe(r'(.*)', cache=False) -doc_block = doc_com + KernRe(r'DOC:\s*(.*)?', cache=False) doc_inline_start = KernRe(r'^\s*/\*\*\s*$', cache=False) doc_inline_sect = KernRe(r'\s*\*\s*(@\s*[\w][\w\.]*\s*):(.*)', cache=False) doc_inline_end = KernRe(r'^\s*\*/\s*$', cache=False) @@ -60,6 +62,25 @@ export_symbol_ns = KernRe(r'^\s*EXPORT_SYMBOL_NS(_GPL)?\s*\(\s*(\w+)\s*,\s*"\S+" type_param = KernRe(r"\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)", cache=False) +# +# Tests for the beginning of a kerneldoc block in its various forms. +# +doc_block = doc_com + KernRe(r'DOC:\s*(.*)?', cache=False) +doc_begin_data = KernRe(r"^\s*\*?\s*(struct|union|enum|typedef)\b\s*(\w*)", cache = False) +doc_begin_func = KernRe(str(doc_com) + # initial " * ' + r"(?:\w+\s*\*\s*)?" + # type (not captured) + r'(?:define\s+)?' + # possible "define" (not captured) + r'(\w+)\s*(?:\(\w*\))?\s*' + # name and optional "(...)" + r'(?:[-:].*)?$', # description (not captured) + cache = False) + +# +# A little helper to get rid of excess white space +# +multi_space = KernRe(r'\s\s+') +def trim_whitespace(s): + return multi_space.sub(' ', s.strip()) + class state: """ State machine enums @@ -68,40 +89,26 @@ class state: # Parser states NORMAL = 0 # normal code NAME = 1 # looking for function name - BODY_MAYBE = 2 # body - or maybe more description + DECLARATION = 2 # We have seen a declaration which might not be done BODY = 3 # the body of the comment - BODY_WITH_BLANK_LINE = 4 # the body which has a blank line + SPECIAL_SECTION = 4 # doc section ending with a blank line PROTO = 5 # scanning prototype DOCBLOCK = 6 # documentation block - INLINE = 7 # gathering doc outside main block + INLINE_NAME = 7 # gathering doc outside main block + INLINE_TEXT = 8 # reading the body of inline docs name = [ "NORMAL", "NAME", - "BODY_MAYBE", + "DECLARATION", "BODY", - "BODY_WITH_BLANK_LINE", + "SPECIAL_SECTION", "PROTO", "DOCBLOCK", - "INLINE", + "INLINE_NAME", + "INLINE_TEXT", ] - # Inline documentation state - INLINE_NA = 0 # not applicable ($state != INLINE) - INLINE_NAME = 1 # looking for member name (@foo:) - INLINE_TEXT = 2 # looking for member documentation - INLINE_END = 3 # done - INLINE_ERROR = 4 # error - Comment without header was found. - # Spit a warning as it's not - # proper kernel-doc and ignore the rest. - - inline_name = [ - "", - "_NAME", - "_TEXT", - "_END", - "_ERROR", - ] SECTION_DEFAULT = "Description" # default section @@ -110,10 +117,7 @@ class KernelEntry: def __init__(self, config, ln): self.config = config - self.contents = "" - self.function = "" - self.sectcheck = "" - self.struct_actual = "" + self._contents = [] self.prototype = "" self.warnings = [] @@ -124,7 +128,6 @@ class KernelEntry: self.parameterdesc_start_lines = {} self.section_start_lines = {} - self.sectionlist = [] self.sections = {} self.anon_struct_union = False @@ -133,10 +136,17 @@ class KernelEntry: # State flags self.brcount = 0 - - self.in_doc_sect = False self.declaration_start_line = ln + 1 + # + # Management of section contents + # + def add_text(self, text): + self._contents.append(text) + + def contents(self): + return '\n'.join(self._contents) + '\n' + # TODO: rename to emit_message after removal of kernel-doc.pl def emit_msg(self, log_msg, warning=True): """Emit a message""" @@ -151,13 +161,27 @@ class KernelEntry: self.warnings.append(log_msg) return + # + # Begin a new section. + # + def begin_section(self, line_no, title = SECTION_DEFAULT, dump = False): + if dump: + self.dump_section(start_new = True) + self.section = title + self.new_start_line = line_no + def dump_section(self, start_new=True): """ Dumps section contents to arrays/hashes intended for that purpose. """ - + # + # If we have accumulated no contents in the default ("description") + # section, don't bother. + # + if self.section == SECTION_DEFAULT and not self._contents: + return name = self.section - contents = self.contents + contents = self.contents() if type_param.match(name): name = type_param.group(1) @@ -165,14 +189,6 @@ class KernelEntry: self.parameterdescs[name] = contents self.parameterdesc_start_lines[name] = self.new_start_line - self.sectcheck += name + " " - self.new_start_line = 0 - - elif name == "@...": - name = "..." - self.parameterdescs[name] = contents - self.sectcheck += name + " " - self.parameterdesc_start_lines[name] = self.new_start_line self.new_start_line = 0 else: @@ -181,10 +197,10 @@ class KernelEntry: if name != SECTION_DEFAULT: self.emit_msg(self.new_start_line, f"duplicate section name '{name}'\n") - self.sections[name] += contents + # Treat as a new paragraph - add a blank line + self.sections[name] += '\n' + contents else: self.sections[name] = contents - self.sectionlist.append(name) self.section_start_lines[name] = self.new_start_line self.new_start_line = 0 @@ -192,7 +208,7 @@ class KernelEntry: if start_new: self.section = SECTION_DEFAULT - self.contents = "" + self._contents = [] class KernelDoc: @@ -203,7 +219,6 @@ class KernelDoc: # Section names - section_intro = "Introduction" section_context = "Context" section_return = "Return" @@ -217,7 +232,6 @@ class KernelDoc: # Initial state for the state machines self.state = state.NORMAL - self.inline_doc_state = state.INLINE_NA # Store entry currently being processed self.entry = None @@ -225,6 +239,14 @@ class KernelDoc: # Place all potential outputs into an array self.entries = [] + # + # We need Python 3.7 for its "dicts remember the insertion + # order" guarantee + # + if sys.version_info.major == 3 and sys.version_info.minor < 7: + self.emit_msg(0, + 'Python 3.7 or later is required for correct results') + def emit_msg(self, ln, msg, warning=True): """Emit a message""" @@ -255,32 +277,20 @@ class KernelDoc: The actual output and output filters will be handled elsewhere """ - # The implementation here is different than the original kernel-doc: - # instead of checking for output filters or actually output anything, - # it just stores the declaration content at self.entries, as the - # output will happen on a separate class. - # - # For now, we're keeping the same name of the function just to make - # easier to compare the source code of both scripts - - args["declaration_start_line"] = self.entry.declaration_start_line - args["type"] = dtype - args["warnings"] = self.entry.warnings - - # TODO: use colletions.OrderedDict to remove sectionlist - - sections = args.get('sections', {}) - sectionlist = args.get('sectionlist', []) + item = KdocItem(name, dtype, self.entry.declaration_start_line, **args) + item.warnings = self.entry.warnings # Drop empty sections # TODO: improve empty sections logic to emit warnings + sections = self.entry.sections for section in ["Description", "Return"]: - if section in sectionlist: - if not sections[section].rstrip(): - del sections[section] - sectionlist.remove(section) - - self.entries.append((name, args)) + if section in sections and not sections[section].rstrip(): + del sections[section] + item.set_sections(sections, self.entry.section_start_lines) + item.set_params(self.entry.parameterlist, self.entry.parameterdescs, + self.entry.parametertypes, + self.entry.parameterdesc_start_lines) + self.entries.append(item) self.config.log.debug("Output: %s:%s = %s", dtype, name, pformat(args)) @@ -294,7 +304,6 @@ class KernelDoc: # State flags self.state = state.NORMAL - self.inline_doc_state = state.INLINE_NA def push_parameter(self, ln, decl_type, param, dtype, org_arg, declaration_name): @@ -367,15 +376,6 @@ class KernelDoc: org_arg = KernRe(r'\s\s+').sub(' ', org_arg) self.entry.parametertypes[param] = org_arg - def save_struct_actual(self, actual): - """ - Strip all spaces from the actual param so that it looks like - one string item. - """ - - actual = KernRe(r'\s*').sub("", actual, count=1) - - self.entry.struct_actual += actual + " " def create_parameter_list(self, ln, decl_type, args, splitter, declaration_name): @@ -421,7 +421,6 @@ class KernelDoc: param = arg dtype = KernRe(r'([^\(]+\(\*?)\s*' + re.escape(param)).sub(r'\1', arg) - self.save_struct_actual(param) self.push_parameter(ln, decl_type, param, dtype, arg, declaration_name) @@ -438,7 +437,6 @@ class KernelDoc: dtype = KernRe(r'([^\(]+\(\*?)\s*' + re.escape(param)).sub(r'\1', arg) - self.save_struct_actual(param) self.push_parameter(ln, decl_type, param, dtype, arg, declaration_name) @@ -471,7 +469,6 @@ class KernelDoc: param = r.group(1) - self.save_struct_actual(r.group(2)) self.push_parameter(ln, decl_type, r.group(2), f"{dtype} {r.group(1)}", arg, declaration_name) @@ -483,52 +480,27 @@ class KernelDoc: continue if dtype != "": # Skip unnamed bit-fields - self.save_struct_actual(r.group(1)) self.push_parameter(ln, decl_type, r.group(1), f"{dtype}:{r.group(2)}", arg, declaration_name) else: - self.save_struct_actual(param) self.push_parameter(ln, decl_type, param, dtype, arg, declaration_name) - def check_sections(self, ln, decl_name, decl_type, sectcheck, prmscheck): + def check_sections(self, ln, decl_name, decl_type): """ Check for errors inside sections, emitting warnings if not found parameters are described. """ - - sects = sectcheck.split() - prms = prmscheck.split() - err = False - - for sx in range(len(sects)): # pylint: disable=C0200 - err = True - for px in range(len(prms)): # pylint: disable=C0200 - prm_clean = prms[px] - prm_clean = KernRe(r'\[.*\]').sub('', prm_clean) - prm_clean = attribute.sub('', prm_clean) - - # ignore array size in a parameter string; - # however, the original param string may contain - # spaces, e.g.: addr[6 + 2] - # and this appears in @prms as "addr[6" since the - # parameter list is split at spaces; - # hence just ignore "[..." for the sections check; - prm_clean = KernRe(r'\[.*').sub('', prm_clean) - - if prm_clean == sects[sx]: - err = False - break - - if err: + for section in self.entry.sections: + if section not in self.entry.parameterlist and \ + not known_sections.search(section): if decl_type == 'function': dname = f"{decl_type} parameter" else: dname = f"{decl_type} member" - self.emit_msg(ln, - f"Excess {dname} '{sects[sx]}' description in '{decl_name}'") + f"Excess {dname} '{section}' description in '{decl_name}'") def check_return_section(self, ln, declaration_name, return_type): """ @@ -666,6 +638,7 @@ class KernelDoc: (KernRe(r'(?:__)?DECLARE_FLEX_ARRAY\s*\(' + args_pattern + r',\s*' + args_pattern + r'\)', re.S), r'\1 \2[]'), (KernRe(r'DEFINE_DMA_UNMAP_ADDR\s*\(' + args_pattern + r'\)', re.S), r'dma_addr_t \1'), (KernRe(r'DEFINE_DMA_UNMAP_LEN\s*\(' + args_pattern + r'\)', re.S), r'__u32 \1'), + (KernRe(r'VIRTIO_DECLARE_FEATURES\s*\(' + args_pattern + r'\)', re.S), r'u64 \1; u64 \1_array[VIRTIO_FEATURES_DWORDS]'), ] # Regexes here are guaranteed to have the end limiter matching @@ -782,8 +755,7 @@ class KernelDoc: self.create_parameter_list(ln, decl_type, members, ';', declaration_name) - self.check_sections(ln, declaration_name, decl_type, - self.entry.sectcheck, self.entry.struct_actual) + self.check_sections(ln, declaration_name, decl_type) # Adjust declaration for better display declaration = KernRe(r'([\{;])').sub(r'\1\n', declaration) @@ -819,15 +791,7 @@ class KernelDoc: level += 1 self.output_declaration(decl_type, declaration_name, - struct=declaration_name, definition=declaration, - parameterlist=self.entry.parameterlist, - parameterdescs=self.entry.parameterdescs, - parametertypes=self.entry.parametertypes, - parameterdesc_start_lines=self.entry.parameterdesc_start_lines, - sectionlist=self.entry.sectionlist, - sections=self.entry.sections, - section_start_lines=self.entry.section_start_lines, purpose=self.entry.declaration_purpose) def dump_enum(self, ln, proto): @@ -845,39 +809,48 @@ class KernelDoc: # Strip #define macros inside enums proto = KernRe(r'#\s*((define|ifdef|if)\s+|endif)[^;]*;', flags=re.S).sub('', proto) - members = None - declaration_name = None - + # + # Parse out the name and members of the enum. Typedef form first. + # r = KernRe(r'typedef\s+enum\s*\{(.*)\}\s*(\w*)\s*;') if r.search(proto): declaration_name = r.group(2) members = r.group(1).rstrip() + # + # Failing that, look for a straight enum + # else: r = KernRe(r'enum\s+(\w*)\s*\{(.*)\}') if r.match(proto): declaration_name = r.group(1) members = r.group(2).rstrip() - - if not members: - self.emit_msg(ln, f"{proto}: error: Cannot parse enum!") - return - + # + # OK, this isn't going to work. + # + else: + self.emit_msg(ln, f"{proto}: error: Cannot parse enum!") + return + # + # Make sure we found what we were expecting. + # if self.entry.identifier != declaration_name: if self.entry.identifier == "": self.emit_msg(ln, f"{proto}: wrong kernel-doc identifier on prototype") else: self.emit_msg(ln, - f"expecting prototype for enum {self.entry.identifier}. Prototype was for enum {declaration_name} instead") + f"expecting prototype for enum {self.entry.identifier}. " + f"Prototype was for enum {declaration_name} instead") return if not declaration_name: declaration_name = "(anonymous)" - + # + # Parse out the name of each enum member, and verify that we + # have a description for it. + # member_set = set() - - members = KernRe(r'\([^;]*?[\)]').sub('', members) - + members = KernRe(r'\([^;)]*\)').sub('', members) for arg in members.split(','): if not arg: continue @@ -888,20 +861,15 @@ class KernelDoc: self.emit_msg(ln, f"Enum value '{arg}' not described in enum '{declaration_name}'") member_set.add(arg) - + # + # Ensure that every described member actually exists in the enum. + # for k in self.entry.parameterdescs: if k not in member_set: self.emit_msg(ln, f"Excess enum value '%{k}' description in '{declaration_name}'") self.output_declaration('enum', declaration_name, - enum=declaration_name, - parameterlist=self.entry.parameterlist, - parameterdescs=self.entry.parameterdescs, - parameterdesc_start_lines=self.entry.parameterdesc_start_lines, - sectionlist=self.entry.sectionlist, - sections=self.entry.sections, - section_start_lines=self.entry.section_start_lines, purpose=self.entry.declaration_purpose) def dump_declaration(self, ln, prototype): @@ -911,18 +879,13 @@ class KernelDoc: if self.entry.decl_type == "enum": self.dump_enum(ln, prototype) - return - - if self.entry.decl_type == "typedef": + elif self.entry.decl_type == "typedef": self.dump_typedef(ln, prototype) - return - - if self.entry.decl_type in ["union", "struct"]: + elif self.entry.decl_type in ["union", "struct"]: self.dump_struct(ln, prototype) - return - - self.output_declaration(self.entry.decl_type, prototype, - entry=self.entry) + else: + # This would be a bug + self.emit_message(ln, f'Unknown declaration type: {self.entry.decl_type}') def dump_function(self, ln, prototype): """ @@ -1056,38 +1019,20 @@ class KernelDoc: f"expecting prototype for {self.entry.identifier}(). Prototype was for {declaration_name}() instead") return - prms = " ".join(self.entry.parameterlist) - self.check_sections(ln, declaration_name, "function", - self.entry.sectcheck, prms) + self.check_sections(ln, declaration_name, "function") self.check_return_section(ln, declaration_name, return_type) if 'typedef' in return_type: self.output_declaration(decl_type, declaration_name, - function=declaration_name, typedef=True, functiontype=return_type, - parameterlist=self.entry.parameterlist, - parameterdescs=self.entry.parameterdescs, - parametertypes=self.entry.parametertypes, - parameterdesc_start_lines=self.entry.parameterdesc_start_lines, - sectionlist=self.entry.sectionlist, - sections=self.entry.sections, - section_start_lines=self.entry.section_start_lines, purpose=self.entry.declaration_purpose, func_macro=func_macro) else: self.output_declaration(decl_type, declaration_name, - function=declaration_name, typedef=False, functiontype=return_type, - parameterlist=self.entry.parameterlist, - parameterdescs=self.entry.parameterdescs, - parametertypes=self.entry.parametertypes, - parameterdesc_start_lines=self.entry.parameterdesc_start_lines, - sectionlist=self.entry.sectionlist, - sections=self.entry.sections, - section_start_lines=self.entry.section_start_lines, purpose=self.entry.declaration_purpose, func_macro=func_macro) @@ -1124,16 +1069,8 @@ class KernelDoc: self.create_parameter_list(ln, decl_type, args, ',', declaration_name) self.output_declaration(decl_type, declaration_name, - function=declaration_name, typedef=True, functiontype=return_type, - parameterlist=self.entry.parameterlist, - parameterdescs=self.entry.parameterdescs, - parametertypes=self.entry.parametertypes, - parameterdesc_start_lines=self.entry.parameterdesc_start_lines, - sectionlist=self.entry.sectionlist, - sections=self.entry.sections, - section_start_lines=self.entry.section_start_lines, purpose=self.entry.declaration_purpose) return @@ -1153,10 +1090,6 @@ class KernelDoc: return self.output_declaration('typedef', declaration_name, - typedef=declaration_name, - sectionlist=self.entry.sectionlist, - sections=self.entry.sections, - section_start_lines=self.entry.section_start_lines, purpose=self.entry.declaration_purpose) return @@ -1171,17 +1104,28 @@ class KernelDoc: with a staticmethod decorator. """ + # We support documenting some exported symbols with different + # names. A horrible hack. + suffixes = [ '_noprof' ] + # Note: it accepts only one EXPORT_SYMBOL* per line, as having # multiple export lines would violate Kernel coding style. if export_symbol.search(line): symbol = export_symbol.group(2) - function_set.add(symbol) - return - - if export_symbol_ns.search(line): + elif export_symbol_ns.search(line): symbol = export_symbol_ns.group(2) - function_set.add(symbol) + else: + return False + # + # Found an export, trim out any special suffixes + # + for suffix in suffixes: + # Be backward compatible with Python < 3.9 + if symbol.endswith(suffix): + symbol = symbol[:-len(suffix)] + function_set.add(symbol) + return True def process_normal(self, ln, line): """ @@ -1193,7 +1137,6 @@ class KernelDoc: # start a new entry self.reset_state(ln) - self.entry.in_doc_sect = False # next line is always the function name self.state = state.NAME @@ -1202,81 +1145,61 @@ class KernelDoc: """ STATE_NAME: Looking for the "name - description" line """ - + # + # Check for a DOC: block and handle them specially. + # if doc_block.search(line): - self.entry.new_start_line = ln if not doc_block.group(1): - self.entry.section = self.section_intro + self.entry.begin_section(ln, "Introduction") else: - self.entry.section = doc_block.group(1) + self.entry.begin_section(ln, doc_block.group(1)) self.entry.identifier = self.entry.section self.state = state.DOCBLOCK - return - - if doc_decl.search(line): + # + # Otherwise we're looking for a normal kerneldoc declaration line. + # + elif doc_decl.search(line): self.entry.identifier = doc_decl.group(1) - self.entry.is_kernel_comment = False - - decl_start = str(doc_com) # comment block asterisk - fn_type = r"(?:\w+\s*\*\s*)?" # type (for non-functions) - parenthesis = r"(?:\(\w*\))?" # optional parenthesis on function - decl_end = r"(?:[-:].*)" # end of the name part - - # test for pointer declaration type, foo * bar() - desc - r = KernRe(fr"^{decl_start}([\w\s]+?){parenthesis}?\s*{decl_end}?$") - if r.search(line): - self.entry.identifier = r.group(1) # Test for data declaration - r = KernRe(r"^\s*\*?\s*(struct|union|enum|typedef)\b\s*(\w*)") - if r.search(line): - self.entry.decl_type = r.group(1) - self.entry.identifier = r.group(2) - self.entry.is_kernel_comment = True + if doc_begin_data.search(line): + self.entry.decl_type = doc_begin_data.group(1) + self.entry.identifier = doc_begin_data.group(2) + # + # Look for a function description + # + elif doc_begin_func.search(line): + self.entry.identifier = doc_begin_func.group(1) + self.entry.decl_type = "function" + # + # We struck out. + # else: - # Look for foo() or static void foo() - description; - # or misspelt identifier - - r1 = KernRe(fr"^{decl_start}{fn_type}(\w+)\s*{parenthesis}\s*{decl_end}?$") - r2 = KernRe(fr"^{decl_start}{fn_type}(\w+[^-:]*){parenthesis}\s*{decl_end}$") - - for r in [r1, r2]: - if r.search(line): - self.entry.identifier = r.group(1) - self.entry.decl_type = "function" - - r = KernRe(r"define\s+") - self.entry.identifier = r.sub("", self.entry.identifier) - self.entry.is_kernel_comment = True - break - - self.entry.identifier = self.entry.identifier.strip(" ") - + self.emit_msg(ln, + f"This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst\n{line}") + self.state = state.NORMAL + return + # + # OK, set up for a new kerneldoc entry. + # self.state = state.BODY - + self.entry.identifier = self.entry.identifier.strip(" ") # if there's no @param blocks need to set up default section here - self.entry.section = SECTION_DEFAULT - self.entry.new_start_line = ln + 1 - + self.entry.begin_section(ln + 1) + # + # Find the description portion, which *should* be there but + # isn't always. + # (We should be able to capture this from the previous parsing - someday) + # r = KernRe("[-:](.*)") if r.search(line): - # strip leading/trailing/multiple spaces - self.entry.descr = r.group(1).strip(" ") - - r = KernRe(r"\s+") - self.entry.descr = r.sub(" ", self.entry.descr) - self.entry.declaration_purpose = self.entry.descr - self.state = state.BODY_MAYBE + self.entry.declaration_purpose = trim_whitespace(r.group(1)) + self.state = state.DECLARATION else: self.entry.declaration_purpose = "" - if not self.entry.is_kernel_comment: - self.emit_msg(ln, - f"This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst\n{line}") - self.state = state.NORMAL - if not self.entry.declaration_purpose and self.config.wshort_desc: self.emit_msg(ln, f"missing initial short description on line:\n{line}") @@ -1290,60 +1213,51 @@ class KernelDoc: self.emit_msg(ln, f"Scanning doc for {self.entry.decl_type} {self.entry.identifier}", warning=False) - - return - + # # Failed to find an identifier. Emit a warning - self.emit_msg(ln, f"Cannot find identifier on line:\n{line}") - - def process_body(self, ln, line): - """ - STATE_BODY and STATE_BODY_MAYBE: the bulk of a kerneldoc comment. - """ - - if self.state == state.BODY_WITH_BLANK_LINE: - r = KernRe(r"\s*\*\s?\S") - if r.match(line): - self.dump_section() - self.entry.section = SECTION_DEFAULT - self.entry.new_start_line = ln - self.entry.contents = "" + # + else: + self.emit_msg(ln, f"Cannot find identifier on line:\n{line}") + # + # Helper function to determine if a new section is being started. + # + def is_new_section(self, ln, line): if doc_sect.search(line): - self.entry.in_doc_sect = True + self.state = state.BODY + # + # Pick out the name of our new section, tweaking it if need be. + # newsection = doc_sect.group(1) - - if newsection.lower() in ["description", "context"]: - newsection = newsection.title() - - # Special case: @return is a section, not a param description - if newsection.lower() in ["@return", "@returns", - "return", "returns"]: + if newsection.lower() == 'description': + newsection = 'Description' + elif newsection.lower() == 'context': + newsection = 'Context' + self.state = state.SPECIAL_SECTION + elif newsection.lower() in ["@return", "@returns", + "return", "returns"]: newsection = "Return" - - # Perl kernel-doc has a check here for contents before sections. - # the logic there is always false, as in_doc_sect variable is - # always true. So, just don't implement Wcontents_before_sections - - # .title() + self.state = state.SPECIAL_SECTION + elif newsection[0] == '@': + self.state = state.SPECIAL_SECTION + # + # Initialize the contents, and get the new section going. + # newcontents = doc_sect.group(2) if not newcontents: newcontents = "" - - if self.entry.contents.strip("\n"): - self.dump_section() - - self.entry.new_start_line = ln - self.entry.section = newsection + self.dump_section() + self.entry.begin_section(ln, newsection) self.entry.leading_space = None - self.entry.contents = newcontents.lstrip() - if self.entry.contents: - self.entry.contents += "\n" - - self.state = state.BODY - return + self.entry.add_text(newcontents.lstrip()) + return True + return False + # + # Helper function to detect (and effect) the end of a kerneldoc comment. + # + def is_comment_end(self, ln, line): if doc_end.search(line): self.dump_section() @@ -1356,100 +1270,128 @@ class KernelDoc: self.entry.new_start_line = ln + 1 self.state = state.PROTO - return + return True + return False + + def process_decl(self, ln, line): + """ + STATE_DECLARATION: We've seen the beginning of a declaration + """ + if self.is_new_section(ln, line) or self.is_comment_end(ln, line): + return + # + # Look for anything with the " * " line beginning. + # if doc_content.search(line): cont = doc_content.group(1) - + # + # A blank line means that we have moved out of the declaration + # part of the comment (without any "special section" parameter + # descriptions). + # if cont == "": - if self.entry.section == self.section_context: - self.dump_section() - - self.entry.new_start_line = ln - self.state = state.BODY - else: - if self.entry.section != SECTION_DEFAULT: - self.state = state.BODY_WITH_BLANK_LINE - else: - self.state = state.BODY - - self.entry.contents += "\n" - - elif self.state == state.BODY_MAYBE: - - # Continued declaration purpose - self.entry.declaration_purpose = self.entry.declaration_purpose.rstrip() - self.entry.declaration_purpose += " " + cont - - r = KernRe(r"\s+") - self.entry.declaration_purpose = r.sub(' ', - self.entry.declaration_purpose) - + self.state = state.BODY + # + # Otherwise we have more of the declaration section to soak up. + # else: - if self.entry.section.startswith('@') or \ - self.entry.section == self.section_context: - if self.entry.leading_space is None: - r = KernRe(r'^(\s+)') - if r.match(cont): - self.entry.leading_space = len(r.group(1)) - else: - self.entry.leading_space = 0 - - # Double-check if leading space are realy spaces - pos = 0 - for i in range(0, self.entry.leading_space): - if cont[i] != " ": - break - pos += 1 - - cont = cont[pos:] + self.entry.declaration_purpose = \ + trim_whitespace(self.entry.declaration_purpose + ' ' + cont) + else: + # Unknown line, ignore + self.emit_msg(ln, f"bad line: {line}") - # NEW LOGIC: - # In case it is different, update it - if self.entry.leading_space != pos: - self.entry.leading_space = pos - self.entry.contents += cont + "\n" + def process_special(self, ln, line): + """ + STATE_SPECIAL_SECTION: a section ending with a blank line + """ + # + # If we have hit a blank line (only the " * " marker), then this + # section is done. + # + if KernRe(r"\s*\*\s*$").match(line): + self.entry.begin_section(ln, dump = True) + self.state = state.BODY + return + # + # Not a blank line, look for the other ways to end the section. + # + if self.is_new_section(ln, line) or self.is_comment_end(ln, line): return + # + # OK, we should have a continuation of the text for this section. + # + if doc_content.search(line): + cont = doc_content.group(1) + # + # If the lines of text after the first in a special section have + # leading white space, we need to trim it out or Sphinx will get + # confused. For the second line (the None case), see what we + # find there and remember it. + # + if self.entry.leading_space is None: + r = KernRe(r'^(\s+)') + if r.match(cont): + self.entry.leading_space = len(r.group(1)) + else: + self.entry.leading_space = 0 + # + # Otherwise, before trimming any leading chars, be *sure* + # that they are white space. We should maybe warn if this + # isn't the case. + # + for i in range(0, self.entry.leading_space): + if cont[i] != " ": + self.entry.leading_space = i + break + # + # Add the trimmed result to the section and we're done. + # + self.entry.add_text(cont[self.entry.leading_space:]) + else: + # Unknown line, ignore + self.emit_msg(ln, f"bad line: {line}") - # Unknown line, ignore - self.emit_msg(ln, f"bad line: {line}") + def process_body(self, ln, line): + """ + STATE_BODY: the bulk of a kerneldoc comment. + """ + if self.is_new_section(ln, line) or self.is_comment_end(ln, line): + return - def process_inline(self, ln, line): - """STATE_INLINE: docbook comments within a prototype.""" + if doc_content.search(line): + cont = doc_content.group(1) + self.entry.add_text(cont) + else: + # Unknown line, ignore + self.emit_msg(ln, f"bad line: {line}") - if self.inline_doc_state == state.INLINE_NAME and \ - doc_inline_sect.search(line): - self.entry.section = doc_inline_sect.group(1) - self.entry.new_start_line = ln + def process_inline_name(self, ln, line): + """STATE_INLINE_NAME: beginning of docbook comments within a prototype.""" - self.entry.contents = doc_inline_sect.group(2).lstrip() - if self.entry.contents != "": - self.entry.contents += "\n" + if doc_inline_sect.search(line): + self.entry.begin_section(ln, doc_inline_sect.group(1)) + self.entry.add_text(doc_inline_sect.group(2).lstrip()) + self.state = state.INLINE_TEXT + elif doc_inline_end.search(line): + self.dump_section() + self.state = state.PROTO + elif doc_content.search(line): + self.emit_msg(ln, f"Incorrect use of kernel-doc format: {line}") + self.state = state.PROTO + # else ... ?? - self.inline_doc_state = state.INLINE_TEXT - # Documentation block end */ - return + def process_inline_text(self, ln, line): + """STATE_INLINE_TEXT: docbook comments within a prototype.""" if doc_inline_end.search(line): - if self.entry.contents not in ["", "\n"]: - self.dump_section() - + self.dump_section() self.state = state.PROTO - self.inline_doc_state = state.INLINE_NA - return - - if doc_content.search(line): - if self.inline_doc_state == state.INLINE_TEXT: - self.entry.contents += doc_content.group(1) + "\n" - if not self.entry.contents.strip(" ").rstrip("\n"): - self.entry.contents = "" - - elif self.inline_doc_state == state.INLINE_NAME: - self.emit_msg(ln, - f"Incorrect use of kernel-doc format: {line}") - - self.inline_doc_state = state.INLINE_ERROR + elif doc_content.search(line): + self.entry.add_text(doc_content.group(1)) + # else ... ?? def syscall_munge(self, ln, proto): # pylint: disable=W0613 """ @@ -1531,105 +1473,94 @@ class KernelDoc: """Ancillary routine to process a function prototype""" # strip C99-style comments to end of line - r = KernRe(r"\/\/.*$", re.S) - line = r.sub('', line) - + line = KernRe(r"\/\/.*$", re.S).sub('', line) + # + # Soak up the line's worth of prototype text, stopping at { or ; if present. + # if KernRe(r'\s*#\s*define').match(line): self.entry.prototype = line - elif line.startswith('#'): - # Strip other macros like #ifdef/#ifndef/#endif/... - pass - else: + elif not line.startswith('#'): # skip other preprocessor stuff r = KernRe(r'([^\{]*)') if r.match(line): self.entry.prototype += r.group(1) + " " - + # + # If we now have the whole prototype, clean it up and declare victory. + # if '{' in line or ';' in line or KernRe(r'\s*#\s*define').match(line): - # strip comments - r = KernRe(r'/\*.*?\*/') - self.entry.prototype = r.sub('', self.entry.prototype) - - # strip newlines/cr's - r = KernRe(r'[\r\n]+') - self.entry.prototype = r.sub(' ', self.entry.prototype) - - # strip leading spaces - r = KernRe(r'^\s+') - self.entry.prototype = r.sub('', self.entry.prototype) - + # strip comments and surrounding spaces + self.entry.prototype = KernRe(r'/\*.*\*/').sub('', self.entry.prototype).strip() + # # Handle self.entry.prototypes for function pointers like: # int (*pcs_config)(struct foo) - + # by turning it into + # int pcs_config(struct foo) + # r = KernRe(r'^(\S+\s+)\(\s*\*(\S+)\)') self.entry.prototype = r.sub(r'\1\2', self.entry.prototype) - + # + # Handle special declaration syntaxes + # if 'SYSCALL_DEFINE' in self.entry.prototype: self.entry.prototype = self.syscall_munge(ln, self.entry.prototype) - - r = KernRe(r'TRACE_EVENT|DEFINE_EVENT|DEFINE_SINGLE_EVENT') - if r.search(self.entry.prototype): - self.entry.prototype = self.tracepoint_munge(ln, - self.entry.prototype) - + else: + r = KernRe(r'TRACE_EVENT|DEFINE_EVENT|DEFINE_SINGLE_EVENT') + if r.search(self.entry.prototype): + self.entry.prototype = self.tracepoint_munge(ln, + self.entry.prototype) + # + # ... and we're done + # self.dump_function(ln, self.entry.prototype) self.reset_state(ln) def process_proto_type(self, ln, line): """Ancillary routine to process a type""" - # Strip newlines/cr's. - line = KernRe(r'[\r\n]+', re.S).sub(' ', line) - - # Strip leading spaces - line = KernRe(r'^\s+', re.S).sub('', line) - - # Strip trailing spaces - line = KernRe(r'\s+$', re.S).sub('', line) - - # Strip C99-style comments to the end of the line - line = KernRe(r"\/\/.*$", re.S).sub('', line) + # Strip C99-style comments and surrounding whitespace + line = KernRe(r"//.*$", re.S).sub('', line).strip() + if not line: + return # nothing to see here # To distinguish preprocessor directive from regular declaration later. if line.startswith('#'): line += ";" - - r = KernRe(r'([^\{\};]*)([\{\};])(.*)') - while True: - if r.search(line): - if self.entry.prototype: - self.entry.prototype += " " - self.entry.prototype += r.group(1) + r.group(2) - - self.entry.brcount += r.group(2).count('{') - self.entry.brcount -= r.group(2).count('}') - - self.entry.brcount = max(self.entry.brcount, 0) - - if r.group(2) == ';' and self.entry.brcount == 0: + # + # Split the declaration on any of { } or ;, and accumulate pieces + # until we hit a semicolon while not inside {brackets} + # + r = KernRe(r'(.*?)([{};])') + for chunk in r.split(line): + if chunk: # Ignore empty matches + self.entry.prototype += chunk + # + # This cries out for a match statement ... someday after we can + # drop Python 3.9 ... + # + if chunk == '{': + self.entry.brcount += 1 + elif chunk == '}': + self.entry.brcount -= 1 + elif chunk == ';' and self.entry.brcount <= 0: self.dump_declaration(ln, self.entry.prototype) self.reset_state(ln) - break - - line = r.group(3) - else: - self.entry.prototype += line - break + return + # + # We hit the end of the line while still in the declaration; put + # in a space to represent the newline. + # + self.entry.prototype += ' ' def process_proto(self, ln, line): """STATE_PROTO: reading a function/whatever prototype.""" if doc_inline_oneline.search(line): - self.entry.section = doc_inline_oneline.group(1) - self.entry.contents = doc_inline_oneline.group(2) - - if self.entry.contents != "": - self.entry.contents += "\n" - self.dump_section(start_new=False) + self.entry.begin_section(ln, doc_inline_oneline.group(1)) + self.entry.add_text(doc_inline_oneline.group(2)) + self.dump_section() elif doc_inline_start.search(line): - self.state = state.INLINE - self.inline_doc_state = state.INLINE_NAME + self.state = state.INLINE_NAME elif self.entry.decl_type == 'function': self.process_proto_function(ln, line) @@ -1642,14 +1573,11 @@ class KernelDoc: if doc_end.search(line): self.dump_section() - self.output_declaration("doc", self.entry.identifier, - sectionlist=self.entry.sectionlist, - sections=self.entry.sections, - section_start_lines=self.entry.section_start_lines) + self.output_declaration("doc", self.entry.identifier) self.reset_state(ln) elif doc_content.search(line): - self.entry.contents += doc_content.group(1) + "\n" + self.entry.add_text(doc_content.group(1)) def parse_export(self): """ @@ -1670,6 +1598,22 @@ class KernelDoc: return export_table + # + # The state/action table telling us which function to invoke in + # each state. + # + state_actions = { + state.NORMAL: process_normal, + state.NAME: process_name, + state.BODY: process_body, + state.DECLARATION: process_decl, + state.SPECIAL_SECTION: process_special, + state.INLINE_NAME: process_inline_name, + state.INLINE_TEXT: process_inline_text, + state.PROTO: process_proto, + state.DOCBLOCK: process_docblock, + } + def parse_kdoc(self): """ Open and process each line of a C source file. @@ -1680,7 +1624,6 @@ class KernelDoc: Besides parsing kernel-doc tags, it also parses export symbols. """ - cont = False prev = "" prev_ln = None export_table = set() @@ -1696,23 +1639,18 @@ class KernelDoc: if self.state == state.PROTO: if line.endswith("\\"): prev += line.rstrip("\\") - cont = True - if not prev_ln: prev_ln = ln - continue - if cont: + if prev: ln = prev_ln line = prev + line prev = "" - cont = False prev_ln = None - self.config.log.debug("%d %s%s: %s", + self.config.log.debug("%d %s: %s", ln, state.name[self.state], - state.inline_name[self.inline_doc_state], line) # This is an optimization over the original script. @@ -1720,25 +1658,11 @@ class KernelDoc: # it was read twice. Here, we use the already-existing # loop to parse exported symbols as well. # - # TODO: It should be noticed that not all states are - # needed here. On a future cleanup, process export only - # at the states that aren't handling comment markups. - self.process_export(export_table, line) + if (self.state != state.NORMAL) or \ + not self.process_export(export_table, line): + # Hand this line to the appropriate state handler + self.state_actions[self.state](self, ln, line) - # Hand this line to the appropriate state handler - if self.state == state.NORMAL: - self.process_normal(ln, line) - elif self.state == state.NAME: - self.process_name(ln, line) - elif self.state in [state.BODY, state.BODY_MAYBE, - state.BODY_WITH_BLANK_LINE]: - self.process_body(ln, line) - elif self.state == state.INLINE: # scanning for inline parameters - self.process_inline(ln, line) - elif self.state == state.PROTO: - self.process_proto(ln, line) - elif self.state == state.DOCBLOCK: - self.process_docblock(ln, line) except OSError: self.config.log.error(f"Error: Cannot open file {self.fname}") diff --git a/scripts/lib/kdoc/kdoc_re.py b/scripts/lib/kdoc/kdoc_re.py index e81695b273bf..612223e1e723 100644 --- a/scripts/lib/kdoc/kdoc_re.py +++ b/scripts/lib/kdoc/kdoc_re.py @@ -29,12 +29,9 @@ class KernRe: """ Adds a new regex or re-use it from the cache. """ - - if string in re_cache: - self.regex = re_cache[string] - else: + self.regex = re_cache.get(string, None) + if not self.regex: self.regex = re.compile(string, flags=flags) - if self.cache: re_cache[string] = self.regex diff --git a/scripts/misc-check b/scripts/misc-check index a74450e799d1..84f08da17b2c 100755 --- a/scripts/misc-check +++ b/scripts/misc-check @@ -62,6 +62,15 @@ check_unnecessary_include_linux_export_h () { xargs -r printf "%s: warning: EXPORT_SYMBOL() is not used, but #include <linux/export.h> is present\n" >&2 } -check_tracked_ignored_files -check_missing_include_linux_export_h -check_unnecessary_include_linux_export_h +case "${KBUILD_EXTRA_WARN}" in +*1*) + check_tracked_ignored_files + ;; +esac + +case "${KBUILD_EXTRA_WARN}" in +*2*) + check_missing_include_linux_export_h + check_unnecessary_include_linux_export_h + ;; +esac diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 0871b2e92584..861b56dda64e 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -359,7 +359,7 @@ if ($arch eq "x86_64") { $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_CKCORE_PCREL_JSR_IMM26BY2\\s+_mcount\$"; $alignment = 2; } else { - die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; + die "Arch $arch is not supported with CONFIG_DYNAMIC_FTRACE"; } my $text_found = 0; diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install index ad9945ccb0cf..3f8d6925e896 100755 --- a/scripts/sphinx-pre-install +++ b/scripts/sphinx-pre-install @@ -245,6 +245,10 @@ sub check_missing_tex($) sub get_sphinx_fname() { + if ($ENV{'SPHINXBUILD'}) { + return $ENV{'SPHINXBUILD'}; + } + my $fname = "sphinx-build"; return $fname if findprog($fname); @@ -409,7 +413,7 @@ sub give_redhat_hints() my $old = 0; my $rel; my $noto_sans_redhat = "google-noto-sans-cjk-ttc-fonts"; - $rel = $1 if ($system_release =~ /release\s+(\d+)/); + $rel = $1 if ($system_release =~ /(release|Linux)\s+(\d+)/); if (!($system_release =~ /Fedora/)) { $map{"virtualenv"} = "python-virtualenv"; diff --git a/scripts/syscall.tbl b/scripts/syscall.tbl index 580b4e246aec..d1ae5e92c615 100644 --- a/scripts/syscall.tbl +++ b/scripts/syscall.tbl @@ -408,3 +408,5 @@ 465 common listxattrat sys_listxattrat 466 common removexattrat sys_removexattrat 467 common open_tree_attr sys_open_tree_attr +468 common file_getattr sys_file_getattr +469 common file_setattr sys_file_setattr diff --git a/scripts/test_doc_build.py b/scripts/test_doc_build.py new file mode 100755 index 000000000000..47b4606569f9 --- /dev/null +++ b/scripts/test_doc_build.py @@ -0,0 +1,513 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab+huawei@kernel.org> +# +# pylint: disable=R0903,R0912,R0913,R0914,R0917,C0301 + +""" +Install minimal supported requirements for different Sphinx versions +and optionally test the build. +""" + +import argparse +import asyncio +import os.path +import shutil +import sys +import time +import subprocess + +# Minimal python version supported by the building system. + +PYTHON = os.path.basename(sys.executable) + +min_python_bin = None + +for i in range(9, 13): + p = f"python3.{i}" + if shutil.which(p): + min_python_bin = p + break + +if not min_python_bin: + min_python_bin = PYTHON + +# Starting from 8.0, Python 3.9 is not supported anymore. +PYTHON_VER_CHANGES = {(8, 0, 0): PYTHON} + +DEFAULT_VERSIONS_TO_TEST = [ + (3, 4, 3), # Minimal supported version + (5, 3, 0), # CentOS Stream 9 / AlmaLinux 9 + (6, 1, 1), # Debian 12 + (7, 2, 1), # openSUSE Leap 15.6 + (7, 2, 6), # Ubuntu 24.04 LTS + (7, 4, 7), # Ubuntu 24.10 + (7, 3, 0), # openSUSE Tumbleweed + (8, 1, 3), # Fedora 42 + (8, 2, 3) # Latest version - covers rolling distros +] + +# Sphinx versions to be installed and their incremental requirements +SPHINX_REQUIREMENTS = { + # Oldest versions we support for each package required by Sphinx 3.4.3 + (3, 4, 3): { + "docutils": "0.16", + "alabaster": "0.7.12", + "babel": "2.8.0", + "certifi": "2020.6.20", + "docutils": "0.16", + "idna": "2.10", + "imagesize": "1.2.0", + "Jinja2": "2.11.2", + "MarkupSafe": "1.1.1", + "packaging": "20.4", + "Pygments": "2.6.1", + "PyYAML": "5.1", + "requests": "2.24.0", + "snowballstemmer": "2.0.0", + "sphinxcontrib-applehelp": "1.0.2", + "sphinxcontrib-devhelp": "1.0.2", + "sphinxcontrib-htmlhelp": "1.0.3", + "sphinxcontrib-jsmath": "1.0.1", + "sphinxcontrib-qthelp": "1.0.3", + "sphinxcontrib-serializinghtml": "1.1.4", + "urllib3": "1.25.9", + }, + + # Update package dependencies to a more modern base. The goal here + # is to avoid to many incremental changes for the next entries + (3, 5, 0): { + "alabaster": "0.7.13", + "babel": "2.17.0", + "certifi": "2025.6.15", + "idna": "3.10", + "imagesize": "1.4.1", + "packaging": "25.0", + "Pygments": "2.8.1", + "requests": "2.32.4", + "snowballstemmer": "3.0.1", + "sphinxcontrib-applehelp": "1.0.4", + "sphinxcontrib-htmlhelp": "2.0.1", + "sphinxcontrib-serializinghtml": "1.1.5", + "urllib3": "2.0.0", + }, + + # Starting from here, ensure all docutils versions are covered with + # supported Sphinx versions. Other packages are upgraded only when + # required by pip + (4, 0, 0): { + "PyYAML": "5.1", + }, + (4, 1, 0): { + "docutils": "0.17", + "Pygments": "2.19.1", + "Jinja2": "3.0.3", + "MarkupSafe": "2.0", + }, + (4, 3, 0): {}, + (4, 4, 0): {}, + (4, 5, 0): { + "docutils": "0.17.1", + }, + (5, 0, 0): {}, + (5, 1, 0): {}, + (5, 2, 0): { + "docutils": "0.18", + "Jinja2": "3.1.2", + "MarkupSafe": "2.0", + "PyYAML": "5.3.1", + }, + (5, 3, 0): { + "docutils": "0.18.1", + }, + (6, 0, 0): {}, + (6, 1, 0): {}, + (6, 2, 0): { + "PyYAML": "5.4.1", + }, + (7, 0, 0): {}, + (7, 1, 0): {}, + (7, 2, 0): { + "docutils": "0.19", + "PyYAML": "6.0.1", + "sphinxcontrib-serializinghtml": "1.1.9", + }, + (7, 2, 6): { + "docutils": "0.20", + }, + (7, 3, 0): { + "alabaster": "0.7.14", + "PyYAML": "6.0.1", + "tomli": "2.0.1", + }, + (7, 4, 0): { + "docutils": "0.20.1", + "PyYAML": "6.0.1", + }, + (8, 0, 0): { + "docutils": "0.21", + }, + (8, 1, 0): { + "docutils": "0.21.1", + "PyYAML": "6.0.1", + "sphinxcontrib-applehelp": "1.0.7", + "sphinxcontrib-devhelp": "1.0.6", + "sphinxcontrib-htmlhelp": "2.0.6", + "sphinxcontrib-qthelp": "1.0.6", + }, + (8, 2, 0): { + "docutils": "0.21.2", + "PyYAML": "6.0.1", + "sphinxcontrib-serializinghtml": "1.1.9", + }, +} + + +class AsyncCommands: + """Excecute command synchronously""" + + def __init__(self, fp=None): + + self.stdout = None + self.stderr = None + self.output = None + self.fp = fp + + def log(self, out, verbose, is_info=True): + out = out.removesuffix('\n') + + if verbose: + if is_info: + print(out) + else: + print(out, file=sys.stderr) + + if self.fp: + self.fp.write(out + "\n") + + async def _read(self, stream, verbose, is_info): + """Ancillary routine to capture while displaying""" + + while stream is not None: + line = await stream.readline() + if line: + out = line.decode("utf-8", errors="backslashreplace") + self.log(out, verbose, is_info) + if is_info: + self.stdout += out + else: + self.stderr += out + else: + break + + async def run(self, cmd, capture_output=False, check=False, + env=None, verbose=True): + + """ + Execute an arbitrary command, handling errors. + + Please notice that this class is not thread safe + """ + + self.stdout = "" + self.stderr = "" + + self.log("$ " + " ".join(cmd), verbose) + + proc = await asyncio.create_subprocess_exec(cmd[0], + *cmd[1:], + env=env, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE) + + # Handle input and output in realtime + await asyncio.gather( + self._read(proc.stdout, verbose, True), + self._read(proc.stderr, verbose, False), + ) + + await proc.wait() + + if check and proc.returncode > 0: + raise subprocess.CalledProcessError(returncode=proc.returncode, + cmd=" ".join(cmd), + output=self.stdout, + stderr=self.stderr) + + if capture_output: + if proc.returncode > 0: + self.log(f"Error {proc.returncode}", verbose=True, is_info=False) + return "" + + return self.output + + ret = subprocess.CompletedProcess(args=cmd, + returncode=proc.returncode, + stdout=self.stdout, + stderr=self.stderr) + + return ret + + +class SphinxVenv: + """ + Installs Sphinx on one virtual env per Sphinx version with a minimal + set of dependencies, adjusting them to each specific version. + """ + + def __init__(self): + """Initialize instance variables""" + + self.built_time = {} + self.first_run = True + + async def _handle_version(self, args, fp, + cur_ver, cur_requirements, python_bin): + """Handle a single Sphinx version""" + + cmd = AsyncCommands(fp) + + ver = ".".join(map(str, cur_ver)) + + if not self.first_run and args.wait_input and args.build: + ret = input("Press Enter to continue or 'a' to abort: ").strip().lower() + if ret == "a": + print("Aborted.") + sys.exit() + else: + self.first_run = False + + venv_dir = f"Sphinx_{ver}" + req_file = f"requirements_{ver}.txt" + + cmd.log(f"\nSphinx {ver} with {python_bin}", verbose=True) + + # Create venv + await cmd.run([python_bin, "-m", "venv", venv_dir], + verbose=args.verbose, check=True) + pip = os.path.join(venv_dir, "bin/pip") + + # Create install list + reqs = [] + for pkg, verstr in cur_requirements.items(): + reqs.append(f"{pkg}=={verstr}") + + reqs.append(f"Sphinx=={ver}") + + await cmd.run([pip, "install"] + reqs, check=True, verbose=args.verbose) + + # Freeze environment + result = await cmd.run([pip, "freeze"], verbose=False, check=True) + + # Pip install succeeded. Write requirements file + if args.req_file: + with open(req_file, "w", encoding="utf-8") as fp: + fp.write(result.stdout) + + if args.build: + start_time = time.time() + + # Prepare a venv environment + env = os.environ.copy() + bin_dir = os.path.join(venv_dir, "bin") + env["PATH"] = bin_dir + ":" + env["PATH"] + env["VIRTUAL_ENV"] = venv_dir + if "PYTHONHOME" in env: + del env["PYTHONHOME"] + + # Test doc build + await cmd.run(["make", "cleandocs"], env=env, check=True) + make = ["make"] + + if args.output: + sphinx_build = os.path.realpath(f"{bin_dir}/sphinx-build") + make += [f"O={args.output}", f"SPHINXBUILD={sphinx_build}"] + + if args.make_args: + make += args.make_args + + make += args.targets + + if args.verbose: + cmd.log(f". {bin_dir}/activate", verbose=True) + await cmd.run(make, env=env, check=True, verbose=True) + if args.verbose: + cmd.log("deactivate", verbose=True) + + end_time = time.time() + elapsed_time = end_time - start_time + hours, minutes = divmod(elapsed_time, 3600) + minutes, seconds = divmod(minutes, 60) + + hours = int(hours) + minutes = int(minutes) + seconds = int(seconds) + + self.built_time[ver] = f"{hours:02d}:{minutes:02d}:{seconds:02d}" + + cmd.log(f"Finished doc build for Sphinx {ver}. Elapsed time: {self.built_time[ver]}", verbose=True) + + async def run(self, args): + """ + Navigate though multiple Sphinx versions, handling each of them + on a loop. + """ + + if args.log: + fp = open(args.log, "w", encoding="utf-8") + if not args.verbose: + args.verbose = False + else: + fp = None + if not args.verbose: + args.verbose = True + + cur_requirements = {} + python_bin = min_python_bin + + vers = set(SPHINX_REQUIREMENTS.keys()) | set(args.versions) + + for cur_ver in sorted(vers): + if cur_ver in SPHINX_REQUIREMENTS: + new_reqs = SPHINX_REQUIREMENTS[cur_ver] + cur_requirements.update(new_reqs) + + if cur_ver in PYTHON_VER_CHANGES: # pylint: disable=R1715 + python_bin = PYTHON_VER_CHANGES[cur_ver] + + if cur_ver not in args.versions: + continue + + if args.min_version: + if cur_ver < args.min_version: + continue + + if args.max_version: + if cur_ver > args.max_version: + break + + await self._handle_version(args, fp, cur_ver, cur_requirements, + python_bin) + + if args.build: + cmd = AsyncCommands(fp) + cmd.log("\nSummary:", verbose=True) + for ver, elapsed_time in sorted(self.built_time.items()): + cmd.log(f"\tSphinx {ver} elapsed time: {elapsed_time}", + verbose=True) + + if fp: + fp.close() + +def parse_version(ver_str): + """Convert a version string into a tuple.""" + + return tuple(map(int, ver_str.split("."))) + + +DEFAULT_VERS = " - " +DEFAULT_VERS += "\n - ".join(map(lambda v: f"{v[0]}.{v[1]}.{v[2]}", + DEFAULT_VERSIONS_TO_TEST)) + +SCRIPT = os.path.relpath(__file__) + +DESCRIPTION = f""" +This tool allows creating Python virtual environments for different +Sphinx versions that are supported by the Linux Kernel build system. + +Besides creating the virtual environment, it can also test building +the documentation using "make htmldocs" (and/or other doc targets). + +If called without "--versions" argument, it covers the versions shipped +on major distros, plus the lowest supported version: + +{DEFAULT_VERS} + +A typical usage is to run: + + {SCRIPT} -m -l sphinx_builds.log + +This will create one virtual env for the default version set and run +"make htmldocs" for each version, creating a log file with the +excecuted commands on it. + +NOTE: The build time can be very long, specially on old versions. Also, there +is a known bug with Sphinx version 6.0.x: each subprocess uses a lot of +memory. That, together with "-jauto" may cause OOM killer to cause +failures at the doc generation. To minimize the risk, you may use the +"-a" command line parameter to constrain the built directories and/or +reduce the number of threads from "-jauto" to, for instance, "-j4": + + {SCRIPT} -m -V 6.0.1 -a "SPHINXDIRS=process" "SPHINXOPTS='-j4'" + +""" + +MAKE_TARGETS = [ + "htmldocs", + "texinfodocs", + "infodocs", + "latexdocs", + "pdfdocs", + "epubdocs", + "xmldocs", +] + +async def main(): + """Main program""" + + parser = argparse.ArgumentParser(description=DESCRIPTION, + formatter_class=argparse.RawDescriptionHelpFormatter) + + ver_group = parser.add_argument_group("Version range options") + + ver_group.add_argument('-V', '--versions', nargs="*", + default=DEFAULT_VERSIONS_TO_TEST,type=parse_version, + help='Sphinx versions to test') + ver_group.add_argument('--min-version', "--min", type=parse_version, + help='Sphinx minimal version') + ver_group.add_argument('--max-version', "--max", type=parse_version, + help='Sphinx maximum version') + ver_group.add_argument('-f', '--full', action='store_true', + help='Add all Sphinx (major,minor) supported versions to the version range') + + build_group = parser.add_argument_group("Build options") + + build_group.add_argument('-b', '--build', action='store_true', + help='Build documentation') + build_group.add_argument('-a', '--make-args', nargs="*", + help='extra arguments for make, like SPHINXDIRS=netlink/specs', + ) + build_group.add_argument('-t', '--targets', nargs="+", choices=MAKE_TARGETS, + default=[MAKE_TARGETS[0]], + help="make build targets. Default: htmldocs.") + build_group.add_argument("-o", '--output', + help="output directory for the make O=OUTPUT") + + other_group = parser.add_argument_group("Other options") + + other_group.add_argument('-r', '--req-file', action='store_true', + help='write a requirements.txt file') + other_group.add_argument('-l', '--log', + help='Log command output on a file') + other_group.add_argument('-v', '--verbose', action='store_true', + help='Verbose all commands') + other_group.add_argument('-i', '--wait-input', action='store_true', + help='Wait for an enter before going to the next version') + + args = parser.parse_args() + + if not args.make_args: + args.make_args = [] + + sphinx_versions = sorted(list(SPHINX_REQUIREMENTS.keys())) + + if args.full: + args.versions += list(SPHINX_REQUIREMENTS.keys()) + + venv = SphinxVenv() + await venv.run(args) + + +# Call main method +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/ver_linux b/scripts/ver_linux index 1a8ee4ff0e32..d6f2362d3792 100755 --- a/scripts/ver_linux +++ b/scripts/ver_linux @@ -25,8 +25,6 @@ BEGIN { printversion("Module-init-tools", version("depmod -V")) printversion("E2fsprogs", version("tune2fs")) printversion("Jfsutils", version("fsck.jfs -V")) - printversion("Reiserfsprogs", version("reiserfsck -V")) - printversion("Reiser4fsprogs", version("fsck.reiser4 -V")) printversion("Xfsprogs", version("xfs_db -V")) printversion("Pcmciautils", version("pccardctl -V")) printversion("Pcmcia-cs", version("cardmgr -V")) |