From 484e9aa6efaf96a7a5b5fe3216f24973166fbfe3 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 10 Feb 2025 11:17:59 +0100 Subject: scripts/get_abi.py: add a Python tool to generate ReST output The get_abi.pl script is requiring some care, but it seems that the number of changes on it since when I originally wrote it was not too high. Maintaining perl scripts without using classes requires a higher efforted than on python, due to global variables management. Also, it sounds easier to find python developer those days than perl ones. As a plus, using a Python class to handle ABI allows a better integration with Sphinx extensions, allowing, for instance, to let automarkup to generate cross-references for ABI symbols. With that in mind, rewrite the core of get_abi.pl in Python, using classes, to help producing documentation. This will allow a better integration in the future with the Sphinx ABI extension. The algorithms used there are the same as the ones in Perl, with a couple of cleanups to remove redundant variables and to help with cross-reference generation. While doing that, remove some code that were important in the past, where ABI files weren't using ReST format. Some minor improvements were added like using a fixed seed when generating ABI keys for duplicated names, making its results reproductible. The end script is a little bit faster than the original one (tested on a machine with ssd disks). That's probably because we're now using only pre-compiled regular expressions, and it is using string replacement methods instead of regex where possible. The new version is a little bit more conservative when converting text to cross-references to avoid adding them into literal blocks. To ensure that the ReST output is parsing all variables and files properly, the end result was compared using diff with the one produced by the perl script and showed no regressions. There are minor improvements at the results, as it now properly groups What on some special cases. It also better escape some XREF names. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet Link: https://lore.kernel.org/r/71a894211a8b69664711144d9c4f8a0e73d1ae3c.1739182025.git.mchehab+huawei@kernel.org --- scripts/lib/abi/abi_parser.py | 512 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 512 insertions(+) create mode 100644 scripts/lib/abi/abi_parser.py (limited to 'scripts/lib/abi/abi_parser.py') diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py new file mode 100644 index 000000000000..b3fa70eee412 --- /dev/null +++ b/scripts/lib/abi/abi_parser.py @@ -0,0 +1,512 @@ +#!/usr/bin/env python3 +# pylint: disable=R0902,R0903,R0911,R0912,R0913,R0914,R0915,R0917,C0302 +# Copyright(c) 2025: Mauro Carvalho Chehab . +# SPDX-License-Identifier: GPL-2.0 + +""" +Parse ABI documentation and produce results from it. +""" + +from argparse import Namespace +import logging +import os +import re + +from glob import glob +from pprint import pformat +from random import randrange, seed + +# Import Python modules + +from helpers import AbiDebug, ABI_DIR + + +class AbiParser: + """Main class to parse ABI files""" + + TAGS = r"(what|where|date|kernelversion|contact|description|users)" + XREF = r"(?:^|\s|\()(\/(?:sys|config|proc|dev|kvd)\/[^,.:;\)\s]+)(?:[,.:;\)\s]|\Z)" + + def __init__(self, directory, logger=None, + enable_lineno=False, show_warnings=True, debug=0): + """Stores arguments for the class and initialize class vars""" + + self.directory = directory + self.enable_lineno = enable_lineno + self.show_warnings = show_warnings + self.debug = debug + + if not logger: + self.log = logging.getLogger("get_abi") + else: + self.log = logger + + self.data = {} + self.what_symbols = {} + self.file_refs = {} + self.what_refs = {} + + # Regular expressions used on parser + self.re_tag = re.compile(r"(\S+)(:\s*)(.*)", re.I) + self.re_valid = re.compile(self.TAGS) + self.re_start_spc = re.compile(r"(\s*)(\S.*)") + self.re_whitespace = re.compile(r"^\s+") + + # Regular used on print + self.re_what = re.compile(r"(\/?(?:[\w\-]+\/?){1,2})") + self.re_escape = re.compile(r"([\.\x01-\x08\x0e-\x1f\x21-\x2f\x3a-\x40\x7b-\xff])") + self.re_unprintable = re.compile(r"([\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\xff]+)") + self.re_title_mark = re.compile(r"\n[\-\*\=\^\~]+\n") + self.re_doc = re.compile(r"Documentation/(?!devicetree)(\S+)\.rst") + self.re_abi = re.compile(r"(Documentation/ABI/)([\w\/\-]+)") + self.re_xref_node = re.compile(self.XREF) + + def warn(self, fdata, msg, extra=None): + """Displays a parse error if warning is enabled""" + + if not self.show_warnings: + return + + msg = f"{fdata.fname}:{fdata.ln}: {msg}" + if extra: + msg += "\n\t\t" + extra + + self.log.warning(msg) + + def add_symbol(self, what, fname, ln=None, xref=None): + """Create a reference table describing where each 'what' is located""" + + if what not in self.what_symbols: + self.what_symbols[what] = {"file": {}} + + if fname not in self.what_symbols[what]["file"]: + self.what_symbols[what]["file"][fname] = [] + + if ln and ln not in self.what_symbols[what]["file"][fname]: + self.what_symbols[what]["file"][fname].append(ln) + + if xref: + self.what_symbols[what]["xref"] = xref + + def _parse_line(self, fdata, line): + """Parse a single line of an ABI file""" + + new_what = False + new_tag = False + content = None + + match = self.re_tag.match(line) + if match: + new = match.group(1).lower() + sep = match.group(2) + content = match.group(3) + + match = self.re_valid.search(new) + if match: + new_tag = match.group(1) + else: + if fdata.tag == "description": + # New "tag" is actually part of description. + # Don't consider it a tag + new_tag = False + elif fdata.tag != "": + self.warn(fdata, f"tag '{fdata.tag}' is invalid", line) + + if new_tag: + # "where" is Invalid, but was a common mistake. Warn if found + if new_tag == "where": + self.warn(fdata, "tag 'Where' is invalid. Should be 'What:' instead") + new_tag = "what" + + if new_tag == "what": + fdata.space = None + + if content not in self.what_symbols: + self.add_symbol(what=content, fname=fdata.fname, ln=fdata.ln) + + if fdata.tag == "what": + fdata.what.append(content.strip("\n")) + else: + if fdata.key: + if "description" not in self.data.get(fdata.key, {}): + self.warn(fdata, f"{fdata.key} doesn't have a description") + + for w in fdata.what: + self.add_symbol(what=w, fname=fdata.fname, + ln=fdata.what_ln, xref=fdata.key) + + fdata.label = content + new_what = True + + key = "abi_" + content.lower() + fdata.key = self.re_unprintable.sub("_", key).strip("_") + + # Avoid duplicated keys but using a defined seed, to make + # the namespace identical if there aren't changes at the + # ABI symbols + seed(42) + + while fdata.key in self.data: + char = randrange(0, 51) + ord("A") + if char > ord("Z"): + char += ord("a") - ord("Z") - 1 + + fdata.key += chr(char) + + if fdata.key and fdata.key not in self.data: + self.data[fdata.key] = { + "what": [content], + "file": [fdata.file_ref], + "line_no": fdata.ln, + } + + fdata.what = self.data[fdata.key]["what"] + + self.what_refs[content] = fdata.key + fdata.tag = new_tag + fdata.what_ln = fdata.ln + + if fdata.nametag["what"]: + t = (content, fdata.key) + if t not in fdata.nametag["symbols"]: + fdata.nametag["symbols"].append(t) + + return + + if fdata.tag and new_tag: + fdata.tag = new_tag + + if new_what: + fdata.label = "" + + self.data[fdata.key]["type"] = fdata.ftype + + if "description" in self.data[fdata.key]: + self.data[fdata.key]["description"] += "\n\n" + + if fdata.file_ref not in self.data[fdata.key]["file"]: + self.data[fdata.key]["file"].append(fdata.file_ref) + + if self.debug == AbiDebug.WHAT_PARSING: + self.log.debug("what: %s", fdata.what) + + if not fdata.what: + self.warn(fdata, "'What:' should come first:", line) + return + + if new_tag == "description": + fdata.space = None + + if content: + sep = sep.replace(":", " ") + + c = " " * len(new_tag) + sep + content + c = c.expandtabs() + + match = self.re_start_spc.match(c) + if match: + # Preserve initial spaces for the first line + fdata.space = match.group(1) + content = match.group(2) + "\n" + + self.data[fdata.key][fdata.tag] = content + + return + + # Store any contents before tags at the database + if not fdata.tag and "what" in fdata.nametag: + fdata.nametag["description"] += line + return + + if fdata.tag == "description": + content = line.expandtabs() + + if self.re_whitespace.sub("", content) == "": + self.data[fdata.key][fdata.tag] += "\n" + return + + if fdata.space is None: + match = self.re_start_spc.match(content) + if match: + # Preserve initial spaces for the first line + fdata.space = match.group(1) + + content = match.group(2) + "\n" + else: + if content.startswith(fdata.space): + content = content[len(fdata.space):] + + else: + fdata.space = "" + + if fdata.tag == "what": + w = content.strip("\n") + if w: + self.data[fdata.key][fdata.tag].append(w) + else: + self.data[fdata.key][fdata.tag] += content + return + + content = line.strip() + if fdata.tag: + if fdata.tag == "what": + w = content.strip("\n") + if w: + self.data[fdata.key][fdata.tag].append(w) + else: + self.data[fdata.key][fdata.tag] += "\n" + content.rstrip("\n") + return + + # Everything else is error + if content: + self.warn(fdata, "Unexpected content", line) + + def parse_file(self, fname, path, basename): + """Parse a single file""" + + ref = f"abi_file_{path}_{basename}" + ref = self.re_unprintable.sub("_", ref).strip("_") + + # Store per-file state into a namespace variable. This will be used + # by the per-line parser state machine and by the warning function. + fdata = Namespace + + fdata.fname = fname + fdata.name = basename + + pos = fname.find(ABI_DIR) + if pos > 0: + f = fname[pos:] + else: + f = fname + + fdata.file_ref = (f, ref) + self.file_refs[f] = ref + + fdata.ln = 0 + fdata.what_ln = 0 + fdata.tag = "" + fdata.label = "" + fdata.what = [] + fdata.key = None + fdata.xrefs = None + fdata.space = None + fdata.ftype = path.split("/")[0] + + fdata.nametag = {} + fdata.nametag["what"] = [f"File {path}/{basename}"] + fdata.nametag["type"] = "File" + fdata.nametag["file"] = [fdata.file_ref] + fdata.nametag["line_no"] = 1 + fdata.nametag["description"] = "" + fdata.nametag["symbols"] = [] + + self.data[ref] = fdata.nametag + + if self.debug & AbiDebug.WHAT_OPEN: + self.log.debug("Opening file %s", fname) + + with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp: + for line in fp: + fdata.ln += 1 + + self._parse_line(fdata, line) + + if "description" in fdata.nametag: + fdata.nametag["description"] = fdata.nametag["description"].lstrip("\n") + + if fdata.key: + if "description" not in self.data.get(fdata.key, {}): + self.warn(fdata, f"{fdata.key} doesn't have a description") + + for w in fdata.what: + self.add_symbol(what=w, fname=fname, xref=fdata.key) + + def parse_abi(self): + """Parse documentation ABI""" + + ignore_suffixes = ("rej", "org", "orig", "bak", "~") + re_abi = re.compile(r".*" + ABI_DIR) + + for fname in glob(os.path.join(self.directory, "**"), recursive=True): + if os.path.isdir(fname): + continue + + basename = os.path.basename(fname) + + if basename == "README": + continue + if basename.startswith(".") or basename.endswith(ignore_suffixes): + continue + + path = re_abi.sub("", os.path.dirname(fname)) + + self.parse_file(fname, path, basename) + + if self.debug & AbiDebug.DUMP_ABI_STRUCTS: + self.log.debug(pformat(self.data)) + + def print_desc_txt(self, desc): + """Print description as found inside ABI files""" + + desc = desc.strip(" \t\n") + + print(desc + "\n") + + def print_desc_rst(self, desc): + """Enrich ReST output by creating cross-references""" + + # Remove title markups from the description + # Having titles inside ABI files will only work if extra + # care would be taken in order to strictly follow the same + # level order for each markup. + desc = self.re_title_mark.sub("\n\n", "\n" + desc) + desc = desc.rstrip(" \t\n").lstrip("\n") + + # Python's regex performance for non-compiled expressions is a lot + # than Perl, as Perl automatically caches them at their + # first usage. Here, we'll need to do the same, as otherwise the + # performance penalty is be high + + new_desc = "" + for d in desc.split("\n"): + if d == "": + new_desc += "\n" + continue + + # Use cross-references for doc files where needed + d = self.re_doc.sub(r":doc:`/\1`", d) + + # Use cross-references for ABI generated docs where needed + matches = self.re_abi.findall(d) + for m in matches: + abi = m[0] + m[1] + + xref = self.file_refs.get(abi) + if not xref: + # This may happen if ABI is on a separate directory, + # like parsing ABI testing and symbol is at stable. + # The proper solution is to move this part of the code + # for it to be inside sphinx/kernel_abi.py + self.log.info("Didn't find ABI reference for '%s'", abi) + else: + new = self.re_escape.sub(r"\\\1", m[1]) + d = re.sub(fr"\b{abi}\b", f":ref:`{new} <{xref}>`", d) + + # Seek for cross reference symbols like /sys/... + # Need to be careful to avoid doing it on a code block + if d[0] not in [" ", "\t"]: + matches = self.re_xref_node.findall(d) + for m in matches: + # Finding ABI here is more complex due to wildcards + xref = self.what_refs.get(m) + if xref: + new = self.re_escape.sub(r"\\\1", m) + d = re.sub(fr"\b{m}\b", f":ref:`{new} <{xref}>`", d) + + new_desc += d + "\n" + + print(new_desc + "\n") + + def print_data(self, enable_lineno, output_in_txt, show_file=False): + """Print ABI at stdout""" + + part = None + for key, v in sorted(self.data.items(), + key=lambda x: (x[1].get("type", ""), + x[1].get("what"))): + + wtype = v.get("type", "Var") + file_ref = v.get("file") + names = v.get("what", [""]) + + if not show_file and wtype == "File": + continue + + if enable_lineno: + ln = v.get("line_no", 1) + print(f".. LINENO {file_ref[0][0]}#{ln}\n") + + if wtype != "File": + cur_part = names[0] + if cur_part.find("/") >= 0: + match = self.re_what.match(cur_part) + if match: + symbol = match.group(1).rstrip("/") + cur_part = "Symbols under " + symbol + + if cur_part and cur_part != part: + part = cur_part + print(f"{part}\n{"-" * len(part)}\n") + + print(f".. _{key}:\n") + + max_len = 0 + for i in range(0, len(names)): # pylint: disable=C0200 + names[i] = "**" + self.re_escape.sub(r"\\\1", names[i]) + "**" + + max_len = max(max_len, len(names[i])) + + print("+-" + "-" * max_len + "-+") + for name in names: + print(f"| {name}" + " " * (max_len - len(name)) + " |") + print("+-" + "-" * max_len + "-+") + print() + + for ref in file_ref: + if wtype == "File": + print(f".. _{ref[1]}:\n") + else: + base = os.path.basename(ref[0]) + print(f"Defined on file :ref:`{base} <{ref[1]}>`\n") + + if wtype == "File": + print(f"{names[0]}\n{"-" * len(names[0])}\n") + + desc = v.get("description") + if not desc and wtype != "File": + print(f"DESCRIPTION MISSING for {names[0]}\n") + + if desc: + if output_in_txt: + self.print_desc_txt(desc) + else: + self.print_desc_rst(desc) + + symbols = v.get("symbols") + if symbols: + print("Has the following ABI:\n") + + for w, label in symbols: + # Escape special chars from content + content = self.re_escape.sub(r"\\\1", w) + + print(f"- :ref:`{content} <{label}>`\n") + + users = v.get("users") + if users and users.strip(" \t\n"): + print(f"Users:\n\t{users.strip("\n").replace('\n', '\n\t')}\n") + + def check_issues(self): + """Warn about duplicated ABI entries""" + + for what, v in self.what_symbols.items(): + files = v.get("file") + if not files: + # Should never happen if the parser works properly + self.log.warning("%s doesn't have a file associated", what) + continue + + if len(files) == 1: + continue + + f = [] + for fname, lines in sorted(files.items()): + if not lines: + f.append(f"{fname}") + elif len(lines) == 1: + f.append(f"{fname}:{lines[0]}") + else: + f.append(f"{fname} lines {", ".join(str(x) for x in lines)}") + + self.log.warning("%s is defined %d times: %s", what, len(f), "; ".join(f)) -- cgit From 6b48bea16848dd7c771411db3dcc01b3bc4dd4c2 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 10 Feb 2025 11:18:00 +0100 Subject: scripts/get_abi.py: add support for symbol search Add support for searching symbols from Documentation/ABI using regular expressions to match the symbols' names. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet Link: https://lore.kernel.org/r/21b2c48657dde112d5417dcd7e0aa7cd383b9a0a.1739182025.git.mchehab+huawei@kernel.org --- scripts/lib/abi/abi_parser.py | 52 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) (limited to 'scripts/lib/abi/abi_parser.py') diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py index b3fa70eee412..bea7f1a76165 100644 --- a/scripts/lib/abi/abi_parser.py +++ b/scripts/lib/abi/abi_parser.py @@ -510,3 +510,55 @@ class AbiParser: f.append(f"{fname} lines {", ".join(str(x) for x in lines)}") self.log.warning("%s is defined %d times: %s", what, len(f), "; ".join(f)) + + def search_symbols(self, expr): + """ Searches for ABI symbols """ + + regex = re.compile(expr, re.I) + + found_keys = 0 + for t in sorted(self.data.items(), key=lambda x: [0]): + v = t[1] + + wtype = v.get("type", "") + if wtype == "File": + continue + + for what in v.get("what", [""]): + if regex.search(what): + found_keys += 1 + + kernelversion = v.get("kernelversion", "").strip(" \t\n") + date = v.get("date", "").strip(" \t\n") + contact = v.get("contact", "").strip(" \t\n") + users = v.get("users", "").strip(" \t\n") + desc = v.get("description", "").strip(" \t\n") + + files = [] + for f in v.get("file", ()): + files.append(f[0]) + + what = str(found_keys) + ". " + what + title_tag = "-" * len(what) + + print(f"\n{what}\n{title_tag}\n") + + if kernelversion: + print(f"Kernel version:\t\t{kernelversion}") + + if date: + print(f"Date:\t\t\t{date}") + + if contact: + print(f"Contact:\t\t{contact}") + + if users: + print(f"Users:\t\t\t{users}") + + print(f"Defined on file{'s'[:len(files) ^ 1]}:\t{", ".join(files)}") + + if desc: + print(f"\n{desc.strip("\n")}\n") + + if not found_keys: + print(f"Regular expression /{expr}/ not found.") -- cgit From c67c3fbdd917884e38a366c38717c9f769075c15 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 10 Feb 2025 11:18:02 +0100 Subject: scripts/lib/abi/abi_parser.py: optimize parse_abi() function Instead of using glob, use a recursive function to parse all files. Such change reduces the total excecution time by 15% with my SSD disks. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet Link: https://lore.kernel.org/r/190dd358897017ed82c56f1e263192215ffbae43.1739182025.git.mchehab+huawei@kernel.org --- scripts/lib/abi/abi_parser.py | 49 ++++++++++++++++++++++++++++++------------- 1 file changed, 34 insertions(+), 15 deletions(-) (limited to 'scripts/lib/abi/abi_parser.py') diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py index bea7f1a76165..6052a8aec443 100644 --- a/scripts/lib/abi/abi_parser.py +++ b/scripts/lib/abi/abi_parser.py @@ -12,7 +12,6 @@ import logging import os import re -from glob import glob from pprint import pformat from random import randrange, seed @@ -46,7 +45,11 @@ class AbiParser: self.file_refs = {} self.what_refs = {} + # Ignore files that contain such suffixes + self.ignore_suffixes = (".rej", ".org", ".orig", ".bak", "~") + # Regular expressions used on parser + self.re_abi_dir = re.compile(r"(.*)" + ABI_DIR) self.re_tag = re.compile(r"(\S+)(:\s*)(.*)", re.I) self.re_valid = re.compile(self.TAGS) self.re_start_spc = re.compile(r"(\s*)(\S.*)") @@ -322,26 +325,42 @@ class AbiParser: for w in fdata.what: self.add_symbol(what=w, fname=fname, xref=fdata.key) - def parse_abi(self): - """Parse documentation ABI""" + def _parse_abi(self, root=None): + """Internal function to parse documentation ABI recursively""" - ignore_suffixes = ("rej", "org", "orig", "bak", "~") - re_abi = re.compile(r".*" + ABI_DIR) + if not root: + root = self.directory - for fname in glob(os.path.join(self.directory, "**"), recursive=True): - if os.path.isdir(fname): - continue + with os.scandir(root) as obj: + for entry in obj: + name = os.path.join(root, entry.name) - basename = os.path.basename(fname) + if entry.is_dir(): + self.parse_abi(name) + continue - if basename == "README": - continue - if basename.startswith(".") or basename.endswith(ignore_suffixes): - continue + if not entry.is_file(): + continue + + basename = os.path.basename(name) - path = re_abi.sub("", os.path.dirname(fname)) + if basename == "README": + continue + + if basename.startswith("."): + continue + + if basename.endswith(self.ignore_suffixes): + continue + + path = self.re_abi_dir.sub("", os.path.dirname(name)) + + self.parse_file(name, path, basename) + + def parse_abi(self, root=None): + """Parse documentation ABI""" - self.parse_file(fname, path, basename) + self._parse_abi(root) if self.debug & AbiDebug.DUMP_ABI_STRUCTS: self.log.debug(pformat(self.data)) -- cgit From 9bec7870c64c00983773cfddab8d6a037f7767f3 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 10 Feb 2025 11:18:03 +0100 Subject: scripts/lib/abi/abi_parser.py: use an interactor for ReST output Instead of printing all results line per line, use an interactor to return each variable as a separate message. This won't change much when using it via command line, but it will help Sphinx integration by providing an interactor that could be used there to handle ABI symbol by symbol. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet Link: https://lore.kernel.org/r/e3c94b8cdfd5e955aa19a703921f364a89089634.1739182025.git.mchehab+huawei@kernel.org --- scripts/lib/abi/abi_parser.py | 48 +++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 22 deletions(-) (limited to 'scripts/lib/abi/abi_parser.py') diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py index 6052a8aec443..960e27161c26 100644 --- a/scripts/lib/abi/abi_parser.py +++ b/scripts/lib/abi/abi_parser.py @@ -336,7 +336,7 @@ class AbiParser: name = os.path.join(root, entry.name) if entry.is_dir(): - self.parse_abi(name) + self._parse_abi(name) continue if not entry.is_file(): @@ -365,14 +365,14 @@ class AbiParser: if self.debug & AbiDebug.DUMP_ABI_STRUCTS: self.log.debug(pformat(self.data)) - def print_desc_txt(self, desc): + def desc_txt(self, desc): """Print description as found inside ABI files""" desc = desc.strip(" \t\n") - print(desc + "\n") + return desc + "\n\n" - def print_desc_rst(self, desc): + def desc_rst(self, desc): """Enrich ReST output by creating cross-references""" # Remove title markups from the description @@ -425,9 +425,9 @@ class AbiParser: new_desc += d + "\n" - print(new_desc + "\n") + return new_desc + "\n\n" - def print_data(self, enable_lineno, output_in_txt, show_file=False): + def doc(self, enable_lineno, output_in_txt, show_file=False): """Print ABI at stdout""" part = None @@ -442,9 +442,11 @@ class AbiParser: if not show_file and wtype == "File": continue + msg = "" + if enable_lineno: ln = v.get("line_no", 1) - print(f".. LINENO {file_ref[0][0]}#{ln}\n") + msg += f".. LINENO {file_ref[0][0]}#{ln}\n\n" if wtype != "File": cur_part = names[0] @@ -456,9 +458,9 @@ class AbiParser: if cur_part and cur_part != part: part = cur_part - print(f"{part}\n{"-" * len(part)}\n") + msg += f"{part}\n{"-" * len(part)}\n\n" - print(f".. _{key}:\n") + msg += f".. _{key}:\n\n" max_len = 0 for i in range(0, len(names)): # pylint: disable=C0200 @@ -466,45 +468,47 @@ class AbiParser: max_len = max(max_len, len(names[i])) - print("+-" + "-" * max_len + "-+") + msg += "+-" + "-" * max_len + "-+\n" for name in names: - print(f"| {name}" + " " * (max_len - len(name)) + " |") - print("+-" + "-" * max_len + "-+") - print() + msg += f"| {name}" + " " * (max_len - len(name)) + " |\n" + msg += "+-" + "-" * max_len + "-+\n" + msg += "\n" for ref in file_ref: if wtype == "File": - print(f".. _{ref[1]}:\n") + msg += f".. _{ref[1]}:\n\n" else: base = os.path.basename(ref[0]) - print(f"Defined on file :ref:`{base} <{ref[1]}>`\n") + msg += f"Defined on file :ref:`{base} <{ref[1]}>`\n\n" if wtype == "File": - print(f"{names[0]}\n{"-" * len(names[0])}\n") + msg += f"{names[0]}\n{"-" * len(names[0])}\n\n" desc = v.get("description") if not desc and wtype != "File": - print(f"DESCRIPTION MISSING for {names[0]}\n") + msg += f"DESCRIPTION MISSING for {names[0]}\n\n" if desc: if output_in_txt: - self.print_desc_txt(desc) + msg += self.desc_txt(desc) else: - self.print_desc_rst(desc) + msg += self.desc_rst(desc) symbols = v.get("symbols") if symbols: - print("Has the following ABI:\n") + msg += "Has the following ABI:\n\n" for w, label in symbols: # Escape special chars from content content = self.re_escape.sub(r"\\\1", w) - print(f"- :ref:`{content} <{label}>`\n") + msg += f"- :ref:`{content} <{label}>`\n\n" users = v.get("users") if users and users.strip(" \t\n"): - print(f"Users:\n\t{users.strip("\n").replace('\n', '\n\t')}\n") + msg += f"Users:\n\t{users.strip("\n").replace('\n', '\n\t')}\n\n" + + yield msg def check_issues(self): """Warn about duplicated ABI entries""" -- cgit From ee34f8300c8940758dc69f80107d9f5873c08f17 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 10 Feb 2025 11:18:04 +0100 Subject: docs: sphinx/kernel_abi: use AbiParser directly Instead of running get_abi.py script, import AbiParser class and handle messages directly there using an interactor. This shold save some memory, as there's no need to exec python inside the Sphinx python extension. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet Link: https://lore.kernel.org/r/8dbc244dcda97112c1b694e2512a5d600e62873b.1739182025.git.mchehab+huawei@kernel.org --- scripts/lib/abi/abi_parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts/lib/abi/abi_parser.py') diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py index 960e27161c26..57c125fd40a5 100644 --- a/scripts/lib/abi/abi_parser.py +++ b/scripts/lib/abi/abi_parser.py @@ -427,7 +427,7 @@ class AbiParser: return new_desc + "\n\n" - def doc(self, enable_lineno, output_in_txt, show_file=False): + def doc(self, enable_lineno, output_in_txt=False, show_file=False): """Print ABI at stdout""" part = None -- cgit From aea5e52dce74f679b91c66caad91d587d5504f6c Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 10 Feb 2025 11:18:05 +0100 Subject: docs: sphinx/kernel_abi: reduce buffer usage for ABI messages Instead of producing a big message with all ABI contents and then parse as a whole, simplify the code by handling each ABI symbol in separate. As an additional benefit, there's no need to place file/line nubers inlined at the data and use a regex to convert them. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet Link: https://lore.kernel.org/r/15be22955e3c6df49d7256c8fd24f62b397ad0ff.1739182025.git.mchehab+huawei@kernel.org --- scripts/lib/abi/abi_parser.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'scripts/lib/abi/abi_parser.py') diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py index 57c125fd40a5..1db6c54fc65a 100644 --- a/scripts/lib/abi/abi_parser.py +++ b/scripts/lib/abi/abi_parser.py @@ -427,7 +427,7 @@ class AbiParser: return new_desc + "\n\n" - def doc(self, enable_lineno, output_in_txt=False, show_file=False): + def doc(self, output_in_txt=False, show_file=True): """Print ABI at stdout""" part = None @@ -444,10 +444,6 @@ class AbiParser: msg = "" - if enable_lineno: - ln = v.get("line_no", 1) - msg += f".. LINENO {file_ref[0][0]}#{ln}\n\n" - if wtype != "File": cur_part = names[0] if cur_part.find("/") >= 0: @@ -508,7 +504,9 @@ class AbiParser: if users and users.strip(" \t\n"): msg += f"Users:\n\t{users.strip("\n").replace('\n', '\n\t')}\n\n" - yield msg + ln = v.get("line_no", 1) + + yield (msg, file_ref[0][0], ln) def check_issues(self): """Warn about duplicated ABI entries""" -- cgit From 2a21d80dfb4135b4766d8ff3231a3ea1c19bcc83 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 10 Feb 2025 11:18:07 +0100 Subject: scripts/get_abi.pl: Add filtering capabilities to rest output This way, Sphinx ABI extension can parse symbols only once, while keep displaying results in separate files. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet Link: https://lore.kernel.org/r/41e108e816e46434aa596e5c0d25d227cb9f0fe5.1739182025.git.mchehab+huawei@kernel.org --- scripts/lib/abi/abi_parser.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'scripts/lib/abi/abi_parser.py') diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py index 1db6c54fc65a..b20d5c9d920e 100644 --- a/scripts/lib/abi/abi_parser.py +++ b/scripts/lib/abi/abi_parser.py @@ -160,6 +160,7 @@ class AbiParser: self.data[fdata.key] = { "what": [content], "file": [fdata.file_ref], + "path": fdata.ftype, "line_no": fdata.ln, } @@ -182,8 +183,6 @@ class AbiParser: if new_what: fdata.label = "" - self.data[fdata.key]["type"] = fdata.ftype - if "description" in self.data[fdata.key]: self.data[fdata.key]["description"] += "\n\n" @@ -299,6 +298,7 @@ class AbiParser: fdata.nametag = {} fdata.nametag["what"] = [f"File {path}/{basename}"] fdata.nametag["type"] = "File" + fdata.nametag["path"] = fdata.ftype fdata.nametag["file"] = [fdata.file_ref] fdata.nametag["line_no"] = 1 fdata.nametag["description"] = "" @@ -427,7 +427,8 @@ class AbiParser: return new_desc + "\n\n" - def doc(self, output_in_txt=False, show_file=True): + def doc(self, output_in_txt=False, show_symbols=True, show_file=True, + filter_path=None): """Print ABI at stdout""" part = None @@ -435,12 +436,20 @@ class AbiParser: key=lambda x: (x[1].get("type", ""), x[1].get("what"))): - wtype = v.get("type", "Var") + wtype = v.get("type", "Symbol") file_ref = v.get("file") names = v.get("what", [""]) - if not show_file and wtype == "File": - continue + if wtype == "File": + if not show_file: + continue + else: + if not show_symbols: + continue + + if filter_path: + if v.get("path") != filter_path: + continue msg = "" -- cgit From 98a4324a8b7bbe433483c90524026be0ccc9ffa8 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 10 Feb 2025 11:18:08 +0100 Subject: scripts/get_abi.pl: add support to parse ABI README file The Documentation/ABI/README file is currently outside the documentation tree. Yet, it may still provide some useful information. Add it to the documentation parsing. As a plus, this avoids a warning when detecting missing cross-references. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet Link: https://lore.kernel.org/r/f1285dedfe4d0eb0f0af34f6a68bee6fde36dd7d.1739182025.git.mchehab+huawei@kernel.org --- scripts/lib/abi/abi_parser.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) (limited to 'scripts/lib/abi/abi_parser.py') diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py index b20d5c9d920e..6fac461d794c 100644 --- a/scripts/lib/abi/abi_parser.py +++ b/scripts/lib/abi/abi_parser.py @@ -263,6 +263,16 @@ class AbiParser: if content: self.warn(fdata, "Unexpected content", line) + def parse_readme(self, nametag, fname): + """Parse ABI README file""" + + with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp: + nametag["description"] = "```\n" + for line in fp: + nametag["description"] += " " + line + + nametag["description"] += "```\n" + def parse_file(self, fname, path, basename): """Parse a single file""" @@ -309,6 +319,10 @@ class AbiParser: if self.debug & AbiDebug.WHAT_OPEN: self.log.debug("Opening file %s", fname) + if basename == "README": + self.parse_readme(fdata.nametag, fname) + return + with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp: for line in fp: fdata.ln += 1 @@ -344,9 +358,6 @@ class AbiParser: basename = os.path.basename(name) - if basename == "README": - continue - if basename.startswith("."): continue @@ -448,8 +459,12 @@ class AbiParser: continue if filter_path: - if v.get("path") != filter_path: - continue + if filter_path == "README": + if not names[0].endswith("README"): + continue + else: + if v.get("path") != filter_path: + continue msg = "" -- cgit From 5d7871d77f6d62406b3d459a58810c1ddb8904c2 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 10 Feb 2025 11:18:09 +0100 Subject: docs: sphinx/kernel_abi: parse ABI files only once Right now, the logic parses ABI files on 4 steps, one for each directory. While this is fine in principle, by doing that, not all symbol cross-references will be created. Change the logic to do the parsing only once in order to get a global dictionary to be used when creating ABI cross-references. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet Link: https://lore.kernel.org/r/5205c53838b6ea25f4cdd4cc1e3d17c0141e75a6.1739182025.git.mchehab+huawei@kernel.org --- scripts/lib/abi/abi_parser.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) (limited to 'scripts/lib/abi/abi_parser.py') diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py index 6fac461d794c..87d1b9e14bb3 100644 --- a/scripts/lib/abi/abi_parser.py +++ b/scripts/lib/abi/abi_parser.py @@ -266,12 +266,20 @@ class AbiParser: def parse_readme(self, nametag, fname): """Parse ABI README file""" + nametag["what"] = ["ABI file contents"] + nametag["path"] = "README" with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp: - nametag["description"] = "```\n" for line in fp: - nametag["description"] += " " + line + match = self.re_tag.match(line) + if match: + new = match.group(1).lower() + + match = self.re_valid.search(new) + if match: + nametag["description"] += "\n:" + line + continue - nametag["description"] += "```\n" + nametag["description"] += line def parse_file(self, fname, path, basename): """Parse a single file""" @@ -459,12 +467,8 @@ class AbiParser: continue if filter_path: - if filter_path == "README": - if not names[0].endswith("README"): - continue - else: - if v.get("path") != filter_path: - continue + if v.get("path") != filter_path: + continue msg = "" -- cgit From c940816968da6ef9a9462b7c070cc333d609a16c Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 10 Feb 2025 11:18:11 +0100 Subject: docs: sphinx/automarkup: add cross-references for ABI Now that all ABI files are handled together, we can add a feature at automarkup for it to generate cross-references for ABI symbols. The cross-reference logic can produce references for all existing files, except for README (as this is not parsed). For symbols, they need to be an exact match of what it is described at the docs, which is not always true due to wildcards. If symbols at /sys /proc and /config are identical, a cross-reference will be used. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet Link: https://lore.kernel.org/r/0b97a51b68b1c20127ad4a6a55658557fe0848d0.1739182025.git.mchehab+huawei@kernel.org --- scripts/lib/abi/abi_parser.py | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'scripts/lib/abi/abi_parser.py') diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py index 87d1b9e14bb3..3b1ab4c0bdd7 100644 --- a/scripts/lib/abi/abi_parser.py +++ b/scripts/lib/abi/abi_parser.py @@ -391,6 +391,17 @@ class AbiParser: return desc + "\n\n" + def xref(self, fname): + """ + Converts a Documentation/ABI + basename into a ReST cross-reference + """ + + xref = self.file_refs.get(fname) + if not xref: + return None + else: + return xref + def desc_rst(self, desc): """Enrich ReST output by creating cross-references""" -- cgit From dc525a7650d70668c4d54cf03b4cba793b72cb5a Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 10 Feb 2025 11:18:13 +0100 Subject: scripts/lib/abi/abi_parser.py: Rename title name for ABI files This makes them look better when generating cross-references. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet Link: https://lore.kernel.org/r/e44574cb2796861d6acbce839068ed3ef385d16c.1739182025.git.mchehab+huawei@kernel.org --- scripts/lib/abi/abi_parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts/lib/abi/abi_parser.py') diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py index 3b1ab4c0bdd7..0c3837e52afa 100644 --- a/scripts/lib/abi/abi_parser.py +++ b/scripts/lib/abi/abi_parser.py @@ -314,7 +314,7 @@ class AbiParser: fdata.ftype = path.split("/")[0] fdata.nametag = {} - fdata.nametag["what"] = [f"File {path}/{basename}"] + fdata.nametag["what"] = [f"ABI file {path}/{basename}"] fdata.nametag["type"] = "File" fdata.nametag["path"] = fdata.ftype fdata.nametag["file"] = [fdata.file_ref] -- cgit From 6649b4217089c5d17dc210946baf9c9537c7fb5d Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 10 Feb 2025 11:18:14 +0100 Subject: scripts/lib/abi/abi_parser.py: make it backward-compatible with Python 3.6 Despite being introduced on Python 3.6, the original implementation was too limited: it doesn't accept anything but the argument. Even on python 3.10.12, support was still limited, as more complex operations cause SyntaxError: Exception occurred: File ".../linux/Documentation/sphinx/kernel_abi.py", line 48, in from get_abi import AbiParser File ".../linux/scripts/lib/abi/abi_parser.py", line 525 msg += f"{part}\n{"-" * len(part)}\n\n" ^ SyntaxError: f-string: expecting '}' Replace f-strings by normal string concatenation when it doesn't work on Python 3.6. Reported-by: Akira Yokosawa Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet Link: https://lore.kernel.org/r/41d2f85df134a46db46fed73a0f9697a3d2ae9ba.1739182025.git.mchehab+huawei@kernel.org --- scripts/lib/abi/abi_parser.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'scripts/lib/abi/abi_parser.py') diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py index 0c3837e52afa..f08de6d3bf7c 100644 --- a/scripts/lib/abi/abi_parser.py +++ b/scripts/lib/abi/abi_parser.py @@ -493,7 +493,7 @@ class AbiParser: if cur_part and cur_part != part: part = cur_part - msg += f"{part}\n{"-" * len(part)}\n\n" + msg += part + "\n"+ "-" * len(part) +"\n\n" msg += f".. _{key}:\n\n" @@ -517,7 +517,7 @@ class AbiParser: msg += f"Defined on file :ref:`{base} <{ref[1]}>`\n\n" if wtype == "File": - msg += f"{names[0]}\n{"-" * len(names[0])}\n\n" + msg += names[0] +"\n" + "-" * len(names[0]) +"\n\n" desc = v.get("description") if not desc and wtype != "File": @@ -541,7 +541,8 @@ class AbiParser: users = v.get("users") if users and users.strip(" \t\n"): - msg += f"Users:\n\t{users.strip("\n").replace('\n', '\n\t')}\n\n" + users = users.strip("\n").replace('\n', '\n\t') + msg += f"Users:\n\t{users}\n\n" ln = v.get("line_no", 1) @@ -567,7 +568,9 @@ class AbiParser: elif len(lines) == 1: f.append(f"{fname}:{lines[0]}") else: - f.append(f"{fname} lines {", ".join(str(x) for x in lines)}") + m = fname + "lines " + m += ", ".join(str(x) for x in lines) + f.append(m) self.log.warning("%s is defined %d times: %s", what, len(f), "; ".join(f)) @@ -615,10 +618,11 @@ class AbiParser: if users: print(f"Users:\t\t\t{users}") - print(f"Defined on file{'s'[:len(files) ^ 1]}:\t{", ".join(files)}") + print("Defined on file(s):\t" + ", ".join(files)) if desc: - print(f"\n{desc.strip("\n")}\n") + desc = desc.strip("\n") + print(f"\n{desc}\n") if not found_keys: print(f"Regular expression /{expr}/ not found.") -- cgit