diff options
Diffstat (limited to 'tools/docs/lib')
| -rw-r--r-- | tools/docs/lib/__init__.py | 0 | ||||
| -rw-r--r-- | tools/docs/lib/enrich_formatter.py | 70 | ||||
| -rwxr-xr-x | tools/docs/lib/latex_fonts.py | 167 | ||||
| -rwxr-xr-x | tools/docs/lib/parse_data_structs.py | 482 | ||||
| -rw-r--r-- | tools/docs/lib/python_version.py | 178 |
5 files changed, 0 insertions, 897 deletions
diff --git a/tools/docs/lib/__init__.py b/tools/docs/lib/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 --- a/tools/docs/lib/__init__.py +++ /dev/null diff --git a/tools/docs/lib/enrich_formatter.py b/tools/docs/lib/enrich_formatter.py deleted file mode 100644 index bb171567a4ca..000000000000 --- a/tools/docs/lib/enrich_formatter.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python3 -# SPDX-License-Identifier: GPL-2.0 -# Copyright (c) 2025 by Mauro Carvalho Chehab <mchehab@kernel.org>. - -""" -Ancillary argparse HelpFormatter class that works on a similar way as -argparse.RawDescriptionHelpFormatter, e.g. description maintains line -breaks, but it also implement transformations to the help text. The -actual transformations ar given by enrich_text(), if the output is tty. - -Currently, the follow transformations are done: - - - Positional arguments are shown in upper cases; - - if output is TTY, ``var`` and positional arguments are shown prepended - by an ANSI SGR code. This is usually translated to bold. On some - terminals, like, konsole, this is translated into a colored bold text. -""" - -import argparse -import re -import sys - -class EnrichFormatter(argparse.HelpFormatter): - """ - Better format the output, making easier to identify the positional args - and how they're used at the __doc__ description. - """ - def __init__(self, *args, **kwargs): - """Initialize class and check if is TTY""" - super().__init__(*args, **kwargs) - self._tty = sys.stdout.isatty() - - def enrich_text(self, text): - """Handle ReST markups (currently, only ``foo``)""" - if self._tty and text: - # Replace ``text`` with ANSI SGR (bold) - return re.sub(r'\`\`(.+?)\`\`', - lambda m: f'\033[1m{m.group(1)}\033[0m', text) - return text - - def _fill_text(self, text, width, indent): - """Enrich descriptions with markups on it""" - enriched = self.enrich_text(text) - return "\n".join(indent + line for line in enriched.splitlines()) - - def _format_usage(self, usage, actions, groups, prefix): - """Enrich positional arguments at usage: line""" - - prog = self._prog - parts = [] - - for action in actions: - if action.option_strings: - opt = action.option_strings[0] - if action.nargs != 0: - opt += f" {action.dest.upper()}" - parts.append(f"[{opt}]") - else: - # Positional argument - parts.append(self.enrich_text(f"``{action.dest.upper()}``")) - - usage_text = f"{prefix or 'usage: '} {prog} {' '.join(parts)}\n" - return usage_text - - def _format_action_invocation(self, action): - """Enrich argument names""" - if not action.option_strings: - return self.enrich_text(f"``{action.dest.upper()}``") - - return ", ".join(action.option_strings) diff --git a/tools/docs/lib/latex_fonts.py b/tools/docs/lib/latex_fonts.py deleted file mode 100755 index 29317f8006ea..000000000000 --- a/tools/docs/lib/latex_fonts.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env python3 -# SPDX-License-Identifier: GPL-2.0-only -# Copyright (C) Akira Yokosawa, 2024 -# -# Ported to Python by (c) Mauro Carvalho Chehab, 2025 - -""" -Detect problematic Noto CJK variable fonts. - -For "make pdfdocs", reports of build errors of translations.pdf started -arriving early 2024 [1, 2]. It turned out that Fedora and openSUSE -tumbleweed have started deploying variable-font [3] format of "Noto CJK" -fonts [4, 5]. For PDF, a LaTeX package named xeCJK is used for CJK -(Chinese, Japanese, Korean) pages. xeCJK requires XeLaTeX/XeTeX, which -does not (and likely never will) understand variable fonts for historical -reasons. - -The build error happens even when both of variable- and non-variable-format -fonts are found on the build system. To make matters worse, Fedora enlists -variable "Noto CJK" fonts in the requirements of langpacks-ja, -ko, -zh_CN, --zh_TW, etc. Hence developers who have interest in CJK pages are more -likely to encounter the build errors. - -This script is invoked from the error path of "make pdfdocs" and emits -suggestions if variable-font files of "Noto CJK" fonts are in the list of -fonts accessible from XeTeX. - -References: -[1]: https://lore.kernel.org/r/8734tqsrt7.fsf@meer.lwn.net/ -[2]: https://lore.kernel.org/r/1708585803.600323099@f111.i.mail.ru/ -[3]: https://en.wikipedia.org/wiki/Variable_font -[4]: https://fedoraproject.org/wiki/Changes/Noto_CJK_Variable_Fonts -[5]: https://build.opensuse.org/request/show/1157217 - -#=========================================================================== -Workarounds for building translations.pdf -#=========================================================================== - -* Denylist "variable font" Noto CJK fonts. - - Create $HOME/deny-vf/fontconfig/fonts.conf from template below, with - tweaks if necessary. Remove leading "". - - Path of fontconfig/fonts.conf can be overridden by setting an env - variable FONTS_CONF_DENY_VF. - - * Template: ------------------------------------------------------------------ -<?xml version="1.0"?> -<!DOCTYPE fontconfig SYSTEM "urn:fontconfig:fonts.dtd"> -<fontconfig> -<!-- - Ignore variable-font glob (not to break xetex) ---> - <selectfont> - <rejectfont> - <!-- - for Fedora - --> - <glob>/usr/share/fonts/google-noto-*-cjk-vf-fonts</glob> - <!-- - for openSUSE tumbleweed - --> - <glob>/usr/share/fonts/truetype/Noto*CJK*-VF.otf</glob> - </rejectfont> - </selectfont> -</fontconfig> ------------------------------------------------------------------ - - The denylisting is activated for "make pdfdocs". - -* For skipping CJK pages in PDF - - Uninstall texlive-xecjk. - Denylisting is not needed in this case. - -* For printing CJK pages in PDF - - Need non-variable "Noto CJK" fonts. - * Fedora - - google-noto-sans-cjk-fonts - - google-noto-serif-cjk-fonts - * openSUSE tumbleweed - - Non-variable "Noto CJK" fonts are not available as distro packages - as of April, 2024. Fetch a set of font files from upstream Noto - CJK Font released at: - https://github.com/notofonts/noto-cjk/tree/main/Sans#super-otc - and at: - https://github.com/notofonts/noto-cjk/tree/main/Serif#super-otc - , then uncompress and deploy them. - - Remember to update fontconfig cache by running fc-cache. - -!!! Caution !!! - Uninstalling "variable font" packages can be dangerous. - They might be depended upon by other packages important for your work. - Denylisting should be less invasive, as it is effective only while - XeLaTeX runs in "make pdfdocs". -""" - -import os -import re -import subprocess -import textwrap -import sys - -class LatexFontChecker: - """ - Detect problems with CJK variable fonts that affect PDF builds for - translations. - """ - - def __init__(self, deny_vf=None): - if not deny_vf: - deny_vf = os.environ.get('FONTS_CONF_DENY_VF', "~/deny-vf") - - self.environ = os.environ.copy() - self.environ['XDG_CONFIG_HOME'] = os.path.expanduser(deny_vf) - - self.re_cjk = re.compile(r"([^:]+):\s*Noto\s+(Sans|Sans Mono|Serif) CJK") - - def description(self): - return __doc__ - - def get_noto_cjk_vf_fonts(self): - """Get Noto CJK fonts""" - - cjk_fonts = set() - cmd = ["fc-list", ":", "file", "family", "variable"] - try: - result = subprocess.run(cmd,stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - env=self.environ, - check=True) - - except subprocess.CalledProcessError as exc: - sys.exit(f"Error running fc-list: {repr(exc)}") - - for line in result.stdout.splitlines(): - if 'variable=True' not in line: - continue - - match = self.re_cjk.search(line) - if match: - cjk_fonts.add(match.group(1)) - - return sorted(cjk_fonts) - - def check(self): - """Check for problems with CJK fonts""" - - fonts = textwrap.indent("\n".join(self.get_noto_cjk_vf_fonts()), " ") - if not fonts: - return None - - rel_file = os.path.relpath(__file__, os.getcwd()) - - msg = "=" * 77 + "\n" - msg += 'XeTeX is confused by "variable font" files listed below:\n' - msg += fonts + "\n" - msg += textwrap.dedent(f""" - For CJK pages in PDF, they need to be hidden from XeTeX by denylisting. - Or, CJK pages can be skipped by uninstalling texlive-xecjk. - - For more info on denylisting, other options, and variable font, run: - - tools/docs/check-variable-fonts.py -h - """) - msg += "=" * 77 - - return msg diff --git a/tools/docs/lib/parse_data_structs.py b/tools/docs/lib/parse_data_structs.py deleted file mode 100755 index 25361996cd20..000000000000 --- a/tools/docs/lib/parse_data_structs.py +++ /dev/null @@ -1,482 +0,0 @@ -#!/usr/bin/env python3 -# SPDX-License-Identifier: GPL-2.0 -# Copyright (c) 2016-2025 by Mauro Carvalho Chehab <mchehab@kernel.org>. -# pylint: disable=R0912,R0915 - -""" -Parse a source file or header, creating ReStructured Text cross references. - -It accepts an optional file to change the default symbol reference or to -suppress symbols from the output. - -It is capable of identifying defines, functions, structs, typedefs, -enums and enum symbols and create cross-references for all of them. -It is also capable of distinguish #define used for specifying a Linux -ioctl. - -The optional rules file contains a set of rules like: - - ignore ioctl VIDIOC_ENUM_FMT - replace ioctl VIDIOC_DQBUF vidioc_qbuf - replace define V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ :c:type:`v4l2_event_motion_det` -""" - -import os -import re -import sys - - -class ParseDataStructs: - """ - Creates an enriched version of a Kernel header file with cross-links - to each C data structure type. - - It is meant to allow having a more comprehensive documentation, where - uAPI headers will create cross-reference links to the code. - - It is capable of identifying defines, functions, structs, typedefs, - enums and enum symbols and create cross-references for all of them. - It is also capable of distinguish #define used for specifying a Linux - ioctl. - - By default, it create rules for all symbols and defines, but it also - allows parsing an exception file. Such file contains a set of rules - using the syntax below: - - 1. Ignore rules: - - ignore <type> <symbol>` - - Removes the symbol from reference generation. - - 2. Replace rules: - - replace <type> <old_symbol> <new_reference> - - Replaces how old_symbol with a new reference. The new_reference can be: - - - A simple symbol name; - - A full Sphinx reference. - - 3. Namespace rules - - namespace <namespace> - - Sets C namespace to be used during cross-reference generation. Can - be overridden by replace rules. - - On ignore and replace rules, <type> can be: - - ioctl: for defines that end with _IO*, e.g. ioctl definitions - - define: for other defines - - symbol: for symbols defined within enums; - - typedef: for typedefs; - - enum: for the name of a non-anonymous enum; - - struct: for structs. - - Examples: - - ignore define __LINUX_MEDIA_H - ignore ioctl VIDIOC_ENUM_FMT - replace ioctl VIDIOC_DQBUF vidioc_qbuf - replace define V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ :c:type:`v4l2_event_motion_det` - - namespace MC - """ - - # Parser regexes with multiple ways to capture enums and structs - RE_ENUMS = [ - re.compile(r"^\s*enum\s+([\w_]+)\s*\{"), - re.compile(r"^\s*enum\s+([\w_]+)\s*$"), - re.compile(r"^\s*typedef\s*enum\s+([\w_]+)\s*\{"), - re.compile(r"^\s*typedef\s*enum\s+([\w_]+)\s*$"), - ] - RE_STRUCTS = [ - re.compile(r"^\s*struct\s+([_\w][\w\d_]+)\s*\{"), - re.compile(r"^\s*struct\s+([_\w][\w\d_]+)$"), - re.compile(r"^\s*typedef\s*struct\s+([_\w][\w\d_]+)\s*\{"), - re.compile(r"^\s*typedef\s*struct\s+([_\w][\w\d_]+)$"), - ] - - # FIXME: the original code was written a long time before Sphinx C - # domain to have multiple namespaces. To avoid to much turn at the - # existing hyperlinks, the code kept using "c:type" instead of the - # right types. To change that, we need to change the types not only - # here, but also at the uAPI media documentation. - DEF_SYMBOL_TYPES = { - "ioctl": { - "prefix": "\\ ", - "suffix": "\\ ", - "ref_type": ":ref", - "description": "IOCTL Commands", - }, - "define": { - "prefix": "\\ ", - "suffix": "\\ ", - "ref_type": ":ref", - "description": "Macros and Definitions", - }, - # We're calling each definition inside an enum as "symbol" - "symbol": { - "prefix": "\\ ", - "suffix": "\\ ", - "ref_type": ":ref", - "description": "Enumeration values", - }, - "typedef": { - "prefix": "\\ ", - "suffix": "\\ ", - "ref_type": ":c:type", - "description": "Type Definitions", - }, - # This is the description of the enum itself - "enum": { - "prefix": "\\ ", - "suffix": "\\ ", - "ref_type": ":c:type", - "description": "Enumerations", - }, - "struct": { - "prefix": "\\ ", - "suffix": "\\ ", - "ref_type": ":c:type", - "description": "Structures", - }, - } - - def __init__(self, debug: bool = False): - """Initialize internal vars""" - self.debug = debug - self.data = "" - - self.symbols = {} - - self.namespace = None - self.ignore = [] - self.replace = [] - - for symbol_type in self.DEF_SYMBOL_TYPES: - self.symbols[symbol_type] = {} - - def read_exceptions(self, fname: str): - if not fname: - return - - name = os.path.basename(fname) - - with open(fname, "r", encoding="utf-8", errors="backslashreplace") as f: - for ln, line in enumerate(f): - ln += 1 - line = line.strip() - if not line or line.startswith("#"): - continue - - # ignore rules - match = re.match(r"^ignore\s+(\w+)\s+(\S+)", line) - - if match: - self.ignore.append((ln, match.group(1), match.group(2))) - continue - - # replace rules - match = re.match(r"^replace\s+(\S+)\s+(\S+)\s+(\S+)", line) - if match: - self.replace.append((ln, match.group(1), match.group(2), - match.group(3))) - continue - - match = re.match(r"^namespace\s+(\S+)", line) - if match: - self.namespace = match.group(1) - continue - - sys.exit(f"{name}:{ln}: invalid line: {line}") - - def apply_exceptions(self): - """ - Process exceptions file with rules to ignore or replace references. - """ - - # Handle ignore rules - for ln, c_type, symbol in self.ignore: - if c_type not in self.DEF_SYMBOL_TYPES: - sys.exit(f"{name}:{ln}: {c_type} is invalid") - - d = self.symbols[c_type] - if symbol in d: - del d[symbol] - - # Handle replace rules - for ln, c_type, old, new in self.replace: - if c_type not in self.DEF_SYMBOL_TYPES: - sys.exit(f"{name}:{ln}: {c_type} is invalid") - - reftype = None - - # Parse reference type when the type is specified - - match = re.match(r"^\:c\:(\w+)\:\`(.+)\`", new) - if match: - reftype = f":c:{match.group(1)}" - new = match.group(2) - else: - match = re.search(r"(\:ref)\:\`(.+)\`", new) - if match: - reftype = match.group(1) - new = match.group(2) - - # If the replacement rule doesn't have a type, get default - if not reftype: - reftype = self.DEF_SYMBOL_TYPES[c_type].get("ref_type") - if not reftype: - reftype = self.DEF_SYMBOL_TYPES[c_type].get("real_type") - - new_ref = f"{reftype}:`{old} <{new}>`" - - # Change self.symbols to use the replacement rule - if old in self.symbols[c_type]: - (_, ln) = self.symbols[c_type][old] - self.symbols[c_type][old] = (new_ref, ln) - else: - print(f"{name}:{ln}: Warning: can't find {old} {c_type}") - - def store_type(self, ln, symbol_type: str, symbol: str, - ref_name: str = None, replace_underscores: bool = True): - """ - Stores a new symbol at self.symbols under symbol_type. - - By default, underscores are replaced by "-" - """ - defs = self.DEF_SYMBOL_TYPES[symbol_type] - - prefix = defs.get("prefix", "") - suffix = defs.get("suffix", "") - ref_type = defs.get("ref_type") - - # Determine ref_link based on symbol type - if ref_type or self.namespace: - if not ref_name: - ref_name = symbol.lower() - - # c-type references don't support hash - if ref_type == ":ref" and replace_underscores: - ref_name = ref_name.replace("_", "-") - - # C domain references may have namespaces - if ref_type.startswith(":c:"): - if self.namespace: - ref_name = f"{self.namespace}.{ref_name}" - - if ref_type: - ref_link = f"{ref_type}:`{symbol} <{ref_name}>`" - else: - ref_link = f"`{symbol} <{ref_name}>`" - else: - ref_link = symbol - - self.symbols[symbol_type][symbol] = (f"{prefix}{ref_link}{suffix}", ln) - - def store_line(self, line): - """Stores a line at self.data, properly indented""" - line = " " + line.expandtabs() - self.data += line.rstrip(" ") - - def parse_file(self, file_in: str, exceptions: str = None): - """Reads a C source file and get identifiers""" - self.data = "" - is_enum = False - is_comment = False - multiline = "" - - self.read_exceptions(exceptions) - - with open(file_in, "r", - encoding="utf-8", errors="backslashreplace") as f: - for line_no, line in enumerate(f): - self.store_line(line) - line = line.strip("\n") - - # Handle continuation lines - if line.endswith(r"\\"): - multiline += line[-1] - continue - - if multiline: - line = multiline + line - multiline = "" - - # Handle comments. They can be multilined - if not is_comment: - if re.search(r"/\*.*", line): - is_comment = True - else: - # Strip C99-style comments - line = re.sub(r"(//.*)", "", line) - - if is_comment: - if re.search(r".*\*/", line): - is_comment = False - else: - multiline = line - continue - - # At this point, line variable may be a multilined statement, - # if lines end with \ or if they have multi-line comments - # With that, it can safely remove the entire comments, - # and there's no need to use re.DOTALL for the logic below - - line = re.sub(r"(/\*.*\*/)", "", line) - if not line.strip(): - continue - - # It can be useful for debug purposes to print the file after - # having comments stripped and multi-lines grouped. - if self.debug > 1: - print(f"line {line_no + 1}: {line}") - - # Now the fun begins: parse each type and store it. - - # We opted for a two parsing logic here due to: - # 1. it makes easier to debug issues not-parsed symbols; - # 2. we want symbol replacement at the entire content, not - # just when the symbol is detected. - - if is_enum: - match = re.match(r"^\s*([_\w][\w\d_]+)\s*[\,=]?", line) - if match: - self.store_type(line_no, "symbol", match.group(1)) - if "}" in line: - is_enum = False - continue - - match = re.match(r"^\s*#\s*define\s+([\w_]+)\s+_IO", line) - if match: - self.store_type(line_no, "ioctl", match.group(1), - replace_underscores=False) - continue - - match = re.match(r"^\s*#\s*define\s+([\w_]+)(\s+|$)", line) - if match: - self.store_type(line_no, "define", match.group(1)) - continue - - match = re.match(r"^\s*typedef\s+([_\w][\w\d_]+)\s+(.*)\s+([_\w][\w\d_]+);", - line) - if match: - name = match.group(2).strip() - symbol = match.group(3) - self.store_type(line_no, "typedef", symbol, ref_name=name) - continue - - for re_enum in self.RE_ENUMS: - match = re_enum.match(line) - if match: - self.store_type(line_no, "enum", match.group(1)) - is_enum = True - break - - for re_struct in self.RE_STRUCTS: - match = re_struct.match(line) - if match: - self.store_type(line_no, "struct", match.group(1)) - break - - self.apply_exceptions() - - def debug_print(self): - """ - Print debug information containing the replacement rules per symbol. - To make easier to check, group them per type. - """ - if not self.debug: - return - - for c_type, refs in self.symbols.items(): - if not refs: # Skip empty dictionaries - continue - - print(f"{c_type}:") - - for symbol, (ref, ln) in sorted(refs.items()): - print(f" #{ln:<5d} {symbol} -> {ref}") - - print() - - def gen_output(self): - """Write the formatted output to a file.""" - - # Avoid extra blank lines - text = re.sub(r"\s+$", "", self.data) + "\n" - text = re.sub(r"\n\s+\n", "\n\n", text) - - # Escape Sphinx special characters - text = re.sub(r"([\_\`\*\<\>\&\\\\:\/\|\%\$\#\{\}\~\^])", r"\\\1", text) - - # Source uAPI files may have special notes. Use bold font for them - text = re.sub(r"DEPRECATED", "**DEPRECATED**", text) - - # Delimiters to catch the entire symbol after escaped - start_delim = r"([ \n\t\(=\*\@])" - end_delim = r"(\s|,|\\=|\\:|\;|\)|\}|\{)" - - # Process all reference types - for ref_dict in self.symbols.values(): - for symbol, (replacement, _) in ref_dict.items(): - symbol = re.escape(re.sub(r"([\_\`\*\<\>\&\\\\:\/])", r"\\\1", symbol)) - text = re.sub(fr'{start_delim}{symbol}{end_delim}', - fr'\1{replacement}\2', text) - - # Remove "\ " where not needed: before spaces and at the end of lines - text = re.sub(r"\\ ([\n ])", r"\1", text) - text = re.sub(r" \\ ", " ", text) - - return text - - def gen_toc(self): - """ - Create a list of symbols to be part of a TOC contents table - """ - text = [] - - # Sort symbol types per description - symbol_descriptions = [] - for k, v in self.DEF_SYMBOL_TYPES.items(): - symbol_descriptions.append((v['description'], k)) - - symbol_descriptions.sort() - - # Process each category - for description, c_type in symbol_descriptions: - - refs = self.symbols[c_type] - if not refs: # Skip empty categories - continue - - text.append(f"{description}") - text.append("-" * len(description)) - text.append("") - - # Sort symbols alphabetically - for symbol, (ref, ln) in sorted(refs.items()): - text.append(f"- LINENO_{ln}: {ref}") - - text.append("") # Add empty line between categories - - return "\n".join(text) - - def write_output(self, file_in: str, file_out: str, toc: bool): - title = os.path.basename(file_in) - - if toc: - text = self.gen_toc() - else: - text = self.gen_output() - - with open(file_out, "w", encoding="utf-8", errors="backslashreplace") as f: - f.write(".. -*- coding: utf-8; mode: rst -*-\n\n") - f.write(f"{title}\n") - f.write("=" * len(title) + "\n\n") - - if not toc: - f.write(".. parsed-literal::\n\n") - - f.write(text) diff --git a/tools/docs/lib/python_version.py b/tools/docs/lib/python_version.py deleted file mode 100644 index 4fde1b882164..000000000000 --- a/tools/docs/lib/python_version.py +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/env python3 -# SPDX-License-Identifier: GPL-2.0-or-later -# Copyright (c) 2017-2025 Mauro Carvalho Chehab <mchehab+huawei@kernel.org> - -""" -Handle Python version check logic. - -Not all Python versions are supported by scripts. Yet, on some cases, -like during documentation build, a newer version of python could be -available. - -This class allows checking if the minimal requirements are followed. - -Better than that, PythonVersion.check_python() not only checks the minimal -requirements, but it automatically switches to a the newest available -Python version if present. - -""" - -import os -import re -import subprocess -import shlex -import sys - -from glob import glob -from textwrap import indent - -class PythonVersion: - """ - Ancillary methods that checks for missing dependencies for different - types of types, like binaries, python modules, rpm deps, etc. - """ - - def __init__(self, version): - """Ïnitialize self.version tuple from a version string""" - self.version = self.parse_version(version) - - @staticmethod - def parse_version(version): - """Convert a major.minor.patch version into a tuple""" - return tuple(int(x) for x in version.split(".")) - - @staticmethod - def ver_str(version): - """Returns a version tuple as major.minor.patch""" - return ".".join([str(x) for x in version]) - - @staticmethod - def cmd_print(cmd, max_len=80): - cmd_line = [] - - for w in cmd: - w = shlex.quote(w) - - if cmd_line: - if not max_len or len(cmd_line[-1]) + len(w) < max_len: - cmd_line[-1] += " " + w - continue - else: - cmd_line[-1] += " \\" - cmd_line.append(w) - else: - cmd_line.append(w) - - return "\n ".join(cmd_line) - - def __str__(self): - """Returns a version tuple as major.minor.patch from self.version""" - return self.ver_str(self.version) - - @staticmethod - def get_python_version(cmd): - """ - Get python version from a Python binary. As we need to detect if - are out there newer python binaries, we can't rely on sys.release here. - """ - - kwargs = {} - if sys.version_info < (3, 7): - kwargs['universal_newlines'] = True - else: - kwargs['text'] = True - - result = subprocess.run([cmd, "--version"], - stdout = subprocess.PIPE, - stderr = subprocess.PIPE, - **kwargs, check=False) - - version = result.stdout.strip() - - match = re.search(r"(\d+\.\d+\.\d+)", version) - if match: - return PythonVersion.parse_version(match.group(1)) - - print(f"Can't parse version {version}") - return (0, 0, 0) - - @staticmethod - def find_python(min_version): - """ - Detect if are out there any python 3.xy version newer than the - current one. - - Note: this routine is limited to up to 2 digits for python3. We - may need to update it one day, hopefully on a distant future. - """ - patterns = [ - "python3.[0-9][0-9]", - "python3.[0-9]", - ] - - python_cmd = [] - - # Seek for a python binary newer than min_version - for path in os.getenv("PATH", "").split(":"): - for pattern in patterns: - for cmd in glob(os.path.join(path, pattern)): - if os.path.isfile(cmd) and os.access(cmd, os.X_OK): - version = PythonVersion.get_python_version(cmd) - if version >= min_version: - python_cmd.append((version, cmd)) - - return sorted(python_cmd, reverse=True) - - @staticmethod - def check_python(min_version, show_alternatives=False, bail_out=False, - success_on_error=False): - """ - Check if the current python binary satisfies our minimal requirement - for Sphinx build. If not, re-run with a newer version if found. - """ - cur_ver = sys.version_info[:3] - if cur_ver >= min_version: - ver = PythonVersion.ver_str(cur_ver) - return - - python_ver = PythonVersion.ver_str(cur_ver) - - available_versions = PythonVersion.find_python(min_version) - if not available_versions: - print(f"ERROR: Python version {python_ver} is not spported anymore\n") - print(" Can't find a new version. This script may fail") - return - - script_path = os.path.abspath(sys.argv[0]) - - # Check possible alternatives - if available_versions: - new_python_cmd = available_versions[0][1] - else: - new_python_cmd = None - - if show_alternatives and available_versions: - print("You could run, instead:") - for _, cmd in available_versions: - args = [cmd, script_path] + sys.argv[1:] - - cmd_str = indent(PythonVersion.cmd_print(args), " ") - print(f"{cmd_str}\n") - - if bail_out: - msg = f"Python {python_ver} not supported. Bailing out" - if success_on_error: - print(msg, file=sys.stderr) - sys.exit(0) - else: - sys.exit(msg) - - print(f"Python {python_ver} not supported. Changing to {new_python_cmd}") - - # Restart script using the newer version - args = [new_python_cmd, script_path] + sys.argv[1:] - - try: - os.execv(new_python_cmd, args) - except OSError as e: - sys.exit(f"Failed to restart with {new_python_cmd}: {e}") |
