summaryrefslogtreecommitdiff
path: root/Documentation/sphinx
diff options
context:
space:
mode:
Diffstat (limited to 'Documentation/sphinx')
-rw-r--r--Documentation/sphinx/automarkup.py2
-rw-r--r--Documentation/sphinx/cdomain.py247
-rw-r--r--Documentation/sphinx/kernel_feat.py4
-rwxr-xr-xDocumentation/sphinx/kernel_include.py528
-rwxr-xr-xDocumentation/sphinx/maintainers_include.py4
-rwxr-xr-xDocumentation/sphinx/parse-headers.pl404
-rwxr-xr-xDocumentation/sphinx/parser_yaml.py123
-rw-r--r--Documentation/sphinx/templates/kernel-toc.html3
-rw-r--r--Documentation/sphinx/templates/translations.html4
9 files changed, 526 insertions, 793 deletions
diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py
index 563033f764bb..1d9dada40a74 100644
--- a/Documentation/sphinx/automarkup.py
+++ b/Documentation/sphinx/automarkup.py
@@ -244,7 +244,7 @@ def add_and_resolve_xref(app, docname, domain, reftype, target, contnode=None):
return contnode
#
-# Variant of markup_abi_ref() that warns whan a reference is not found
+# Variant of markup_abi_ref() that warns when a reference is not found
#
def markup_abi_file_ref(docname, app, match):
return markup_abi_ref(docname, app, match, warning=True)
diff --git a/Documentation/sphinx/cdomain.py b/Documentation/sphinx/cdomain.py
deleted file mode 100644
index 3dc285dc70f5..000000000000
--- a/Documentation/sphinx/cdomain.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-# SPDX-License-Identifier: GPL-2.0
-# pylint: disable=W0141,C0113,C0103,C0325
-"""
- cdomain
- ~~~~~~~
-
- Replacement for the sphinx c-domain.
-
- :copyright: Copyright (C) 2016 Markus Heiser
- :license: GPL Version 2, June 1991 see Linux/COPYING for details.
-
- List of customizations:
-
- * Moved the *duplicate C object description* warnings for function
- declarations in the nitpicky mode. See Sphinx documentation for
- the config values for ``nitpick`` and ``nitpick_ignore``.
-
- * Add option 'name' to the "c:function:" directive. With option 'name' the
- ref-name of a function can be modified. E.g.::
-
- .. c:function:: int ioctl( int fd, int request )
- :name: VIDIOC_LOG_STATUS
-
- The func-name (e.g. ioctl) remains in the output but the ref-name changed
- from 'ioctl' to 'VIDIOC_LOG_STATUS'. The function is referenced by::
-
- * :c:func:`VIDIOC_LOG_STATUS` or
- * :any:`VIDIOC_LOG_STATUS` (``:any:`` needs sphinx 1.3)
-
- * Handle signatures of function-like macros well. Don't try to deduce
- arguments types of function-like macros.
-
-"""
-
-from docutils import nodes
-from docutils.parsers.rst import directives
-
-import sphinx
-from sphinx import addnodes
-from sphinx.domains.c import c_funcptr_sig_re, c_sig_re
-from sphinx.domains.c import CObject as Base_CObject
-from sphinx.domains.c import CDomain as Base_CDomain
-from itertools import chain
-import re
-
-__version__ = '1.1'
-
-# Namespace to be prepended to the full name
-namespace = None
-
-#
-# Handle trivial newer c domain tags that are part of Sphinx 3.1 c domain tags
-# - Store the namespace if ".. c:namespace::" tag is found
-#
-RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
-
-def markup_namespace(match):
- global namespace
-
- namespace = match.group(1)
-
- return ""
-
-#
-# Handle c:macro for function-style declaration
-#
-RE_macro = re.compile(r'^\s*..\s*c:macro::\s*(\S+)\s+(\S.*)\s*$')
-def markup_macro(match):
- return ".. c:function:: " + match.group(1) + ' ' + match.group(2)
-
-#
-# Handle newer c domain tags that are evaluated as .. c:type: for
-# backward-compatibility with Sphinx < 3.0
-#
-RE_ctype = re.compile(r'^\s*..\s*c:(struct|union|enum|enumerator|alias)::\s*(.*)$')
-
-def markup_ctype(match):
- return ".. c:type:: " + match.group(2)
-
-#
-# Handle newer c domain tags that are evaluated as :c:type: for
-# backward-compatibility with Sphinx < 3.0
-#
-RE_ctype_refs = re.compile(r':c:(var|struct|union|enum|enumerator)::`([^\`]+)`')
-def markup_ctype_refs(match):
- return ":c:type:`" + match.group(2) + '`'
-
-#
-# Simply convert :c:expr: and :c:texpr: into a literal block.
-#
-RE_expr = re.compile(r':c:(expr|texpr):`([^\`]+)`')
-def markup_c_expr(match):
- return '\\ ``' + match.group(2) + '``\\ '
-
-#
-# Parse Sphinx 3.x C markups, replacing them by backward-compatible ones
-#
-def c_markups(app, docname, source):
- result = ""
- markup_func = {
- RE_namespace: markup_namespace,
- RE_expr: markup_c_expr,
- RE_macro: markup_macro,
- RE_ctype: markup_ctype,
- RE_ctype_refs: markup_ctype_refs,
- }
-
- lines = iter(source[0].splitlines(True))
- for n in lines:
- match_iterators = [regex.finditer(n) for regex in markup_func]
- matches = sorted(chain(*match_iterators), key=lambda m: m.start())
- for m in matches:
- n = n[:m.start()] + markup_func[m.re](m) + n[m.end():]
-
- result = result + n
-
- source[0] = result
-
-#
-# Now implements support for the cdomain namespacing logic
-#
-
-def setup(app):
-
- # Handle easy Sphinx 3.1+ simple new tags: :c:expr and .. c:namespace::
- app.connect('source-read', c_markups)
- app.add_domain(CDomain, override=True)
-
- return dict(
- version = __version__,
- parallel_read_safe = True,
- parallel_write_safe = True
- )
-
-class CObject(Base_CObject):
-
- """
- Description of a C language object.
- """
- option_spec = {
- "name" : directives.unchanged
- }
-
- def handle_func_like_macro(self, sig, signode):
- """Handles signatures of function-like macros.
-
- If the objtype is 'function' and the signature ``sig`` is a
- function-like macro, the name of the macro is returned. Otherwise
- ``False`` is returned. """
-
- global namespace
-
- if not self.objtype == 'function':
- return False
-
- m = c_funcptr_sig_re.match(sig)
- if m is None:
- m = c_sig_re.match(sig)
- if m is None:
- raise ValueError('no match')
-
- rettype, fullname, arglist, _const = m.groups()
- arglist = arglist.strip()
- if rettype or not arglist:
- return False
-
- arglist = arglist.replace('`', '').replace('\\ ', '') # remove markup
- arglist = [a.strip() for a in arglist.split(",")]
-
- # has the first argument a type?
- if len(arglist[0].split(" ")) > 1:
- return False
-
- # This is a function-like macro, its arguments are typeless!
- signode += addnodes.desc_name(fullname, fullname)
- paramlist = addnodes.desc_parameterlist()
- signode += paramlist
-
- for argname in arglist:
- param = addnodes.desc_parameter('', '', noemph=True)
- # separate by non-breaking space in the output
- param += nodes.emphasis(argname, argname)
- paramlist += param
-
- if namespace:
- fullname = namespace + "." + fullname
-
- return fullname
-
- def handle_signature(self, sig, signode):
- """Transform a C signature into RST nodes."""
-
- global namespace
-
- fullname = self.handle_func_like_macro(sig, signode)
- if not fullname:
- fullname = super(CObject, self).handle_signature(sig, signode)
-
- if "name" in self.options:
- if self.objtype == 'function':
- fullname = self.options["name"]
- else:
- # FIXME: handle :name: value of other declaration types?
- pass
- else:
- if namespace:
- fullname = namespace + "." + fullname
-
- return fullname
-
- def add_target_and_index(self, name, sig, signode):
- # for C API items we add a prefix since names are usually not qualified
- # by a module name and so easily clash with e.g. section titles
- targetname = 'c.' + name
- if targetname not in self.state.document.ids:
- signode['names'].append(targetname)
- signode['ids'].append(targetname)
- signode['first'] = (not self.names)
- self.state.document.note_explicit_target(signode)
- inv = self.env.domaindata['c']['objects']
- if (name in inv and self.env.config.nitpicky):
- if self.objtype == 'function':
- if ('c:func', name) not in self.env.config.nitpick_ignore:
- self.state_machine.reporter.warning(
- 'duplicate C object description of %s, ' % name +
- 'other instance in ' + self.env.doc2path(inv[name][0]),
- line=self.lineno)
- inv[name] = (self.env.docname, self.objtype)
-
- indextext = self.get_index_text(name)
- if indextext:
- self.indexnode['entries'].append(
- ('single', indextext, targetname, '', None))
-
-class CDomain(Base_CDomain):
-
- """C language domain."""
- name = 'c'
- label = 'C'
- directives = {
- 'function': CObject,
- 'member': CObject,
- 'macro': CObject,
- 'type': CObject,
- 'var': CObject,
- }
diff --git a/Documentation/sphinx/kernel_feat.py b/Documentation/sphinx/kernel_feat.py
index e3a51867f27b..aaac76892ceb 100644
--- a/Documentation/sphinx/kernel_feat.py
+++ b/Documentation/sphinx/kernel_feat.py
@@ -40,9 +40,11 @@ import sys
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
-from docutils.utils.error_reporting import ErrorString
from sphinx.util.docutils import switch_source_input
+def ErrorString(exc): # Shamelessly stolen from docutils
+ return f'{exc.__class__.__name}: {exc}'
+
__version__ = '1.0'
def setup(app):
diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py
index 1e566e87ebcd..f94412cd17c9 100755
--- a/Documentation/sphinx/kernel_include.py
+++ b/Documentation/sphinx/kernel_include.py
@@ -1,31 +1,82 @@
#!/usr/bin/env python3
-# -*- coding: utf-8; mode: python -*-
# SPDX-License-Identifier: GPL-2.0
-# pylint: disable=R0903, C0330, R0914, R0912, E0401
+# pylint: disable=R0903, R0912, R0914, R0915, C0209,W0707
+
"""
- kernel-include
- ~~~~~~~~~~~~~~
+Implementation of the ``kernel-include`` reST-directive.
+
+:copyright: Copyright (C) 2016 Markus Heiser
+:license: GPL Version 2, June 1991 see linux/COPYING for details.
+
+The ``kernel-include`` reST-directive is a replacement for the ``include``
+directive. The ``kernel-include`` directive expand environment variables in
+the path name and allows to include files from arbitrary locations.
+
+.. hint::
+
+ Including files from arbitrary locations (e.g. from ``/etc``) is a
+ security risk for builders. This is why the ``include`` directive from
+ docutils *prohibit* pathnames pointing to locations *above* the filesystem
+ tree where the reST document with the include directive is placed.
+
+Substrings of the form $name or ${name} are replaced by the value of
+environment variable name. Malformed variable names and references to
+non-existing variables are left unchanged.
+
+**Supported Sphinx Include Options**:
+
+:param literal:
+ If present, the included file is inserted as a literal block.
+
+:param code:
+ Specify the language for syntax highlighting (e.g., 'c', 'python').
+
+:param encoding:
+ Specify the encoding of the included file (default: 'utf-8').
+
+:param tab-width:
+ Specify the number of spaces that a tab represents.
+
+:param start-line:
+ Line number at which to start including the file (1-based).
+
+:param end-line:
+ Line number at which to stop including the file (inclusive).
+
+:param start-after:
+ Include lines after the first line matching this text.
+
+:param end-before:
+ Include lines before the first line matching this text.
- Implementation of the ``kernel-include`` reST-directive.
+:param number-lines:
+ Number the included lines (integer specifies start number).
+ Only effective with 'literal' or 'code' options.
- :copyright: Copyright (C) 2016 Markus Heiser
- :license: GPL Version 2, June 1991 see linux/COPYING for details.
+:param class:
+ Specify HTML class attribute for the included content.
- The ``kernel-include`` reST-directive is a replacement for the ``include``
- directive. The ``kernel-include`` directive expand environment variables in
- the path name and allows to include files from arbitrary locations.
+**Kernel-specific Extensions**:
- .. hint::
+:param generate-cross-refs:
+ If present, instead of directly including the file, it calls
+ ParseDataStructs() to convert C data structures into cross-references
+ that link to comprehensive documentation in other ReST files.
- Including files from arbitrary locations (e.g. from ``/etc``) is a
- security risk for builders. This is why the ``include`` directive from
- docutils *prohibit* pathnames pointing to locations *above* the filesystem
- tree where the reST document with the include directive is placed.
+:param exception-file:
+ (Used with generate-cross-refs)
- Substrings of the form $name or ${name} are replaced by the value of
- environment variable name. Malformed variable names and references to
- non-existing variables are left unchanged.
+ Path to a file containing rules for handling special cases:
+ - Ignore specific C data structures
+ - Use alternative reference names
+ - Specify different reference types
+
+:param warn-broken:
+ (Used with generate-cross-refs)
+
+ Enables warnings when auto-generated cross-references don't point to
+ existing documentation targets.
"""
# ==============================================================================
@@ -33,161 +84,366 @@
# ==============================================================================
import os.path
+import re
+import sys
from docutils import io, nodes, statemachine
-from docutils.utils.error_reporting import SafeString, ErrorString
-from docutils.parsers.rst import directives
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.directives.body import CodeBlock, NumberLines
-from docutils.parsers.rst.directives.misc import Include
-__version__ = '1.0'
+from sphinx.util import logging
-# ==============================================================================
-def setup(app):
-# ==============================================================================
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "tools/docs/lib"))
- app.add_directive("kernel-include", KernelInclude)
- return dict(
- version = __version__,
- parallel_read_safe = True,
- parallel_write_safe = True
- )
+from parse_data_structs import ParseDataStructs
-# ==============================================================================
-class KernelInclude(Include):
-# ==============================================================================
+__version__ = "1.0"
+logger = logging.getLogger(__name__)
- """KernelInclude (``kernel-include``) directive"""
+RE_DOMAIN_REF = re.compile(r'\\ :(ref|c:type|c:func):`([^<`]+)(?:<([^>]+)>)?`\\')
+RE_SIMPLE_REF = re.compile(r'`([^`]+)`')
- def run(self):
- env = self.state.document.settings.env
- path = os.path.realpath(
- os.path.expandvars(self.arguments[0]))
+def ErrorString(exc): # Shamelessly stolen from docutils
+ return f'{exc.__class__.__name}: {exc}'
- # to get a bit security back, prohibit /etc:
- if path.startswith(os.sep + "etc"):
- raise self.severe(
- 'Problems with "%s" directive, prohibited path: %s'
- % (self.name, path))
- self.arguments[0] = path
+# ==============================================================================
+class KernelInclude(Directive):
+ """
+ KernelInclude (``kernel-include``) directive
- env.note_dependency(os.path.abspath(path))
+ Most of the stuff here came from Include directive defined at:
+ docutils/parsers/rst/directives/misc.py
- #return super(KernelInclude, self).run() # won't work, see HINTs in _run()
- return self._run()
+ Yet, overriding the class don't has any benefits: the original class
+ only have run() and argument list. Not all of them are implemented,
+ when checked against latest Sphinx version, as with time more arguments
+ were added.
- def _run(self):
- """Include a file as part of the content of this reST file."""
+ So, keep its own list of supported arguments
+ """
- # HINT: I had to copy&paste the whole Include.run method. I'am not happy
- # with this, but due to security reasons, the Include.run method does
- # not allow absolute or relative pathnames pointing to locations *above*
- # the filesystem tree where the reST document is placed.
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = True
+ option_spec = {
+ 'literal': directives.flag,
+ 'code': directives.unchanged,
+ 'encoding': directives.encoding,
+ 'tab-width': int,
+ 'start-line': int,
+ 'end-line': int,
+ 'start-after': directives.unchanged_required,
+ 'end-before': directives.unchanged_required,
+ # ignored except for 'literal' or 'code':
+ 'number-lines': directives.unchanged, # integer or None
+ 'class': directives.class_option,
- if not self.state.document.settings.file_insertion_enabled:
- raise self.warning('"%s" directive disabled.' % self.name)
- source = self.state_machine.input_lines.source(
- self.lineno - self.state_machine.input_offset - 1)
- source_dir = os.path.dirname(os.path.abspath(source))
- path = directives.path(self.arguments[0])
- if path.startswith('<') and path.endswith('>'):
- path = os.path.join(self.standard_include_path, path[1:-1])
- path = os.path.normpath(os.path.join(source_dir, path))
+ # Arguments that aren't from Sphinx Include directive
+ 'generate-cross-refs': directives.flag,
+ 'warn-broken': directives.flag,
+ 'toc': directives.flag,
+ 'exception-file': directives.unchanged,
+ }
- # HINT: this is the only line I had to change / commented out:
- #path = utils.relative_path(None, path)
+ def read_rawtext(self, path, encoding):
+ """Read and process file content with error handling"""
+ try:
+ self.state.document.settings.record_dependencies.add(path)
+ include_file = io.FileInput(source_path=path,
+ encoding=encoding,
+ error_handler=self.state.document.settings.input_encoding_error_handler)
+ except UnicodeEncodeError:
+ raise self.severe('Problems with directive path:\n'
+ 'Cannot encode input file path "%s" '
+ '(wrong locale?).' % path)
+ except IOError as error:
+ raise self.severe('Problems with directive path:\n%s.' % ErrorString(error))
- encoding = self.options.get(
- 'encoding', self.state.document.settings.input_encoding)
- e_handler=self.state.document.settings.input_encoding_error_handler
- tab_width = self.options.get(
- 'tab-width', self.state.document.settings.tab_width)
- try:
- self.state.document.settings.record_dependencies.add(path)
- include_file = io.FileInput(source_path=path,
- encoding=encoding,
- error_handler=e_handler)
- except UnicodeEncodeError as error:
- raise self.severe('Problems with "%s" directive path:\n'
- 'Cannot encode input file path "%s" '
- '(wrong locale?).' %
- (self.name, SafeString(path)))
- except IOError as error:
- raise self.severe('Problems with "%s" directive path:\n%s.' %
- (self.name, ErrorString(error)))
+ try:
+ return include_file.read()
+ except UnicodeError as error:
+ raise self.severe('Problem with directive:\n%s' % ErrorString(error))
+
+ def apply_range(self, rawtext):
+ """
+ Handles start-line, end-line, start-after and end-before parameters
+ """
+
+ # Get to-be-included content
startline = self.options.get('start-line', None)
endline = self.options.get('end-line', None)
try:
if startline or (endline is not None):
- lines = include_file.readlines()
- rawtext = ''.join(lines[startline:endline])
- else:
- rawtext = include_file.read()
+ lines = rawtext.splitlines()
+ rawtext = '\n'.join(lines[startline:endline])
except UnicodeError as error:
- raise self.severe('Problem with "%s" directive:\n%s' %
- (self.name, ErrorString(error)))
+ raise self.severe(f'Problem with "{self.name}" directive:\n'
+ + io.error_string(error))
# start-after/end-before: no restrictions on newlines in match-text,
# and no restrictions on matching inside lines vs. line boundaries
- after_text = self.options.get('start-after', None)
+ after_text = self.options.get("start-after", None)
if after_text:
# skip content in rawtext before *and incl.* a matching text
after_index = rawtext.find(after_text)
if after_index < 0:
raise self.severe('Problem with "start-after" option of "%s" '
- 'directive:\nText not found.' % self.name)
- rawtext = rawtext[after_index + len(after_text):]
- before_text = self.options.get('end-before', None)
+ "directive:\nText not found." % self.name)
+ rawtext = rawtext[after_index + len(after_text) :]
+ before_text = self.options.get("end-before", None)
if before_text:
# skip content in rawtext after *and incl.* a matching text
before_index = rawtext.find(before_text)
if before_index < 0:
raise self.severe('Problem with "end-before" option of "%s" '
- 'directive:\nText not found.' % self.name)
+ "directive:\nText not found." % self.name)
rawtext = rawtext[:before_index]
+ return rawtext
+
+ def xref_text(self, env, path, tab_width):
+ """
+ Read and add contents from a C file parsed to have cross references.
+
+ There are two types of supported output here:
+ - A C source code with cross-references;
+ - a TOC table containing cross references.
+ """
+ parser = ParseDataStructs()
+ parser.parse_file(path)
+
+ if 'exception-file' in self.options:
+ source_dir = os.path.dirname(os.path.abspath(
+ self.state_machine.input_lines.source(
+ self.lineno - self.state_machine.input_offset - 1)))
+ exceptions_file = os.path.join(source_dir, self.options['exception-file'])
+ parser.process_exceptions(exceptions_file)
+
+ # Store references on a symbol dict to be used at check time
+ if 'warn-broken' in self.options:
+ env._xref_files.add(path)
+
+ if "toc" not in self.options:
+
+ rawtext = ".. parsed-literal::\n\n" + parser.gen_output()
+ self.apply_range(rawtext)
+
+ include_lines = statemachine.string2lines(rawtext, tab_width,
+ convert_whitespace=True)
+
+ # Sphinx always blame the ".. <directive>", so placing
+ # line numbers here won't make any difference
+
+ self.state_machine.insert_input(include_lines, path)
+ return []
+
+ # TOC output is a ReST file, not a literal. So, we can add line
+ # numbers
+
+ rawtext = parser.gen_toc()
+
include_lines = statemachine.string2lines(rawtext, tab_width,
convert_whitespace=True)
- if 'literal' in self.options:
- # Convert tabs to spaces, if `tab_width` is positive.
- if tab_width >= 0:
- text = rawtext.expandtabs(tab_width)
- else:
- text = rawtext
- literal_block = nodes.literal_block(rawtext, source=path,
- classes=self.options.get('class', []))
- literal_block.line = 1
- self.add_name(literal_block)
- if 'number-lines' in self.options:
- try:
- startline = int(self.options['number-lines'] or 1)
- except ValueError:
- raise self.error(':number-lines: with non-integer '
- 'start value')
- endline = startline + len(include_lines)
- if text.endswith('\n'):
- text = text[:-1]
- tokens = NumberLines([([], text)], startline, endline)
- for classes, value in tokens:
- if classes:
- literal_block += nodes.inline(value, value,
- classes=classes)
- else:
- literal_block += nodes.Text(value, value)
- else:
- literal_block += nodes.Text(text, text)
- return [literal_block]
- if 'code' in self.options:
- self.options['source'] = path
- codeblock = CodeBlock(self.name,
- [self.options.pop('code')], # arguments
- self.options,
- include_lines, # content
- self.lineno,
- self.content_offset,
- self.block_text,
- self.state,
- self.state_machine)
- return codeblock.run()
- self.state_machine.insert_input(include_lines, path)
+
+ # Append line numbers data
+
+ startline = self.options.get('start-line', None)
+
+ result = ViewList()
+ if startline and startline > 0:
+ offset = startline - 1
+ else:
+ offset = 0
+
+ for ln, line in enumerate(include_lines, start=offset):
+ result.append(line, path, ln)
+
+ self.state_machine.insert_input(result, path)
+
return []
+
+ def literal(self, path, tab_width, rawtext):
+ """Output a literal block"""
+
+ # Convert tabs to spaces, if `tab_width` is positive.
+ if tab_width >= 0:
+ text = rawtext.expandtabs(tab_width)
+ else:
+ text = rawtext
+ literal_block = nodes.literal_block(rawtext, source=path,
+ classes=self.options.get("class", []))
+ literal_block.line = 1
+ self.add_name(literal_block)
+ if "number-lines" in self.options:
+ try:
+ startline = int(self.options["number-lines"] or 1)
+ except ValueError:
+ raise self.error(":number-lines: with non-integer start value")
+ endline = startline + len(include_lines)
+ if text.endswith("\n"):
+ text = text[:-1]
+ tokens = NumberLines([([], text)], startline, endline)
+ for classes, value in tokens:
+ if classes:
+ literal_block += nodes.inline(value, value,
+ classes=classes)
+ else:
+ literal_block += nodes.Text(value, value)
+ else:
+ literal_block += nodes.Text(text, text)
+ return [literal_block]
+
+ def code(self, path, tab_width):
+ """Output a code block"""
+
+ include_lines = statemachine.string2lines(rawtext, tab_width,
+ convert_whitespace=True)
+
+ self.options["source"] = path
+ codeblock = CodeBlock(self.name,
+ [self.options.pop("code")], # arguments
+ self.options,
+ include_lines,
+ self.lineno,
+ self.content_offset,
+ self.block_text,
+ self.state,
+ self.state_machine)
+ return codeblock.run()
+
+ def run(self):
+ """Include a file as part of the content of this reST file."""
+ env = self.state.document.settings.env
+
+ #
+ # The include logic accepts only patches relative to the
+ # Kernel source tree. The logic does check it to prevent
+ # directory traverse issues.
+ #
+
+ srctree = os.path.abspath(os.environ["srctree"])
+
+ path = os.path.expandvars(self.arguments[0])
+ src_path = os.path.join(srctree, path)
+
+ if os.path.isfile(src_path):
+ base = srctree
+ path = src_path
+ else:
+ raise self.warning(f'File "%s" doesn\'t exist', path)
+
+ abs_base = os.path.abspath(base)
+ abs_full_path = os.path.abspath(os.path.join(base, path))
+
+ try:
+ if os.path.commonpath([abs_full_path, abs_base]) != abs_base:
+ raise self.severe('Problems with "%s" directive, prohibited path: %s' %
+ (self.name, path))
+ except ValueError:
+ # Paths don't have the same drive (Windows) or other incompatibility
+ raise self.severe('Problems with "%s" directive, invalid path: %s' %
+ (self.name, path))
+
+ self.arguments[0] = path
+
+ #
+ # Add path location to Sphinx dependencies to ensure proper cache
+ # invalidation check.
+ #
+
+ env.note_dependency(os.path.abspath(path))
+
+ if not self.state.document.settings.file_insertion_enabled:
+ raise self.warning('"%s" directive disabled.' % self.name)
+ source = self.state_machine.input_lines.source(self.lineno -
+ self.state_machine.input_offset - 1)
+ source_dir = os.path.dirname(os.path.abspath(source))
+ path = directives.path(self.arguments[0])
+ if path.startswith("<") and path.endswith(">"):
+ path = os.path.join(self.standard_include_path, path[1:-1])
+ path = os.path.normpath(os.path.join(source_dir, path))
+
+ # HINT: this is the only line I had to change / commented out:
+ # path = utils.relative_path(None, path)
+
+ encoding = self.options.get("encoding",
+ self.state.document.settings.input_encoding)
+ tab_width = self.options.get("tab-width",
+ self.state.document.settings.tab_width)
+
+ # Get optional arguments to related to cross-references generation
+ if "generate-cross-refs" in self.options:
+ return self.xref_text(env, path, tab_width)
+
+ rawtext = self.read_rawtext(path, encoding)
+ rawtext = self.apply_range(rawtext)
+
+ if "code" in self.options:
+ return self.code(path, tab_width, rawtext)
+
+ return self.literal(path, tab_width, rawtext)
+
+# ==============================================================================
+
+reported = set()
+
+def check_missing_refs(app, env, node, contnode):
+ """Check broken refs for the files it creates xrefs"""
+ if not node.source:
+ return None
+
+ try:
+ xref_files = env._xref_files
+ except AttributeError:
+ logger.critical("FATAL: _xref_files not initialized!")
+ raise
+
+ # Only show missing references for kernel-include reference-parsed files
+ if node.source not in xref_files:
+ return None
+
+ target = node.get('reftarget', '')
+ domain = node.get('refdomain', 'std')
+ reftype = node.get('reftype', '')
+
+ msg = f"can't link to: {domain}:{reftype}:: {target}"
+
+ # Don't duplicate warnings
+ data = (node.source, msg)
+ if data in reported:
+ return None
+ reported.add(data)
+
+ logger.warning(msg, location=node, type='ref', subtype='missing')
+
+ return None
+
+def merge_xref_info(app, env, docnames, other):
+ """
+ As each process modify env._xref_files, we need to merge them back.
+ """
+ if not hasattr(other, "_xref_files"):
+ return
+ env._xref_files.update(getattr(other, "_xref_files", set()))
+
+def init_xref_docs(app, env, docnames):
+ """Initialize a list of files that we're generating cross references¨"""
+ app.env._xref_files = set()
+
+# ==============================================================================
+
+def setup(app):
+ """Setup Sphinx exension"""
+
+ app.connect("env-before-read-docs", init_xref_docs)
+ app.connect("env-merge-info", merge_xref_info)
+ app.add_directive("kernel-include", KernelInclude)
+ app.connect("missing-reference", check_missing_refs)
+
+ return {
+ "version": __version__,
+ "parallel_read_safe": True,
+ "parallel_write_safe": True,
+ }
diff --git a/Documentation/sphinx/maintainers_include.py b/Documentation/sphinx/maintainers_include.py
index d31cff867436..519ad18685b2 100755
--- a/Documentation/sphinx/maintainers_include.py
+++ b/Documentation/sphinx/maintainers_include.py
@@ -22,10 +22,12 @@ import re
import os.path
from docutils import statemachine
-from docutils.utils.error_reporting import ErrorString
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives.misc import Include
+def ErrorString(exc): # Shamelessly stolen from docutils
+ return f'{exc.__class__.__name}: {exc}'
+
__version__ = '1.0'
def setup(app):
diff --git a/Documentation/sphinx/parse-headers.pl b/Documentation/sphinx/parse-headers.pl
deleted file mode 100755
index 7b1458544e2e..000000000000
--- a/Documentation/sphinx/parse-headers.pl
+++ /dev/null
@@ -1,404 +0,0 @@
-#!/usr/bin/env perl
-# SPDX-License-Identifier: GPL-2.0
-# Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@kernel.org>.
-
-use strict;
-use Text::Tabs;
-use Getopt::Long;
-use Pod::Usage;
-
-my $debug;
-my $help;
-my $man;
-
-GetOptions(
- "debug" => \$debug,
- 'usage|?' => \$help,
- 'help' => \$man
-) or pod2usage(2);
-
-pod2usage(1) if $help;
-pod2usage(-exitstatus => 0, -verbose => 2) if $man;
-pod2usage(2) if (scalar @ARGV < 2 || scalar @ARGV > 3);
-
-my ($file_in, $file_out, $file_exceptions) = @ARGV;
-
-my $data;
-my %ioctls;
-my %defines;
-my %typedefs;
-my %enums;
-my %enum_symbols;
-my %structs;
-
-require Data::Dumper if ($debug);
-
-#
-# read the file and get identifiers
-#
-
-my $is_enum = 0;
-my $is_comment = 0;
-open IN, $file_in or die "Can't open $file_in";
-while (<IN>) {
- $data .= $_;
-
- my $ln = $_;
- if (!$is_comment) {
- $ln =~ s,/\*.*(\*/),,g;
-
- $is_comment = 1 if ($ln =~ s,/\*.*,,);
- } else {
- if ($ln =~ s,^(.*\*/),,) {
- $is_comment = 0;
- } else {
- next;
- }
- }
-
- if ($is_enum && $ln =~ m/^\s*([_\w][\w\d_]+)\s*[\,=]?/) {
- my $s = $1;
- my $n = $1;
- $n =~ tr/A-Z/a-z/;
- $n =~ tr/_/-/;
-
- $enum_symbols{$s} = "\\ :ref:`$s <$n>`\\ ";
-
- $is_enum = 0 if ($is_enum && m/\}/);
- next;
- }
- $is_enum = 0 if ($is_enum && m/\}/);
-
- if ($ln =~ m/^\s*#\s*define\s+([_\w][\w\d_]+)\s+_IO/) {
- my $s = $1;
- my $n = $1;
- $n =~ tr/A-Z/a-z/;
-
- $ioctls{$s} = "\\ :ref:`$s <$n>`\\ ";
- next;
- }
-
- if ($ln =~ m/^\s*#\s*define\s+([_\w][\w\d_]+)\s+/) {
- my $s = $1;
- my $n = $1;
- $n =~ tr/A-Z/a-z/;
- $n =~ tr/_/-/;
-
- $defines{$s} = "\\ :ref:`$s <$n>`\\ ";
- next;
- }
-
- if ($ln =~ m/^\s*typedef\s+([_\w][\w\d_]+)\s+(.*)\s+([_\w][\w\d_]+);/) {
- my $s = $2;
- my $n = $3;
-
- $typedefs{$n} = "\\ :c:type:`$n <$s>`\\ ";
- next;
- }
- if ($ln =~ m/^\s*enum\s+([_\w][\w\d_]+)\s+\{/
- || $ln =~ m/^\s*enum\s+([_\w][\w\d_]+)$/
- || $ln =~ m/^\s*typedef\s*enum\s+([_\w][\w\d_]+)\s+\{/
- || $ln =~ m/^\s*typedef\s*enum\s+([_\w][\w\d_]+)$/) {
- my $s = $1;
-
- $enums{$s} = "enum :c:type:`$s`\\ ";
-
- $is_enum = $1;
- next;
- }
- if ($ln =~ m/^\s*struct\s+([_\w][\w\d_]+)\s+\{/
- || $ln =~ m/^\s*struct\s+([[_\w][\w\d_]+)$/
- || $ln =~ m/^\s*typedef\s*struct\s+([_\w][\w\d_]+)\s+\{/
- || $ln =~ m/^\s*typedef\s*struct\s+([[_\w][\w\d_]+)$/
- ) {
- my $s = $1;
-
- $structs{$s} = "struct $s\\ ";
- next;
- }
-}
-close IN;
-
-#
-# Handle multi-line typedefs
-#
-
-my @matches = ($data =~ m/typedef\s+struct\s+\S+?\s*\{[^\}]+\}\s*(\S+)\s*\;/g,
- $data =~ m/typedef\s+enum\s+\S+?\s*\{[^\}]+\}\s*(\S+)\s*\;/g,);
-foreach my $m (@matches) {
- my $s = $m;
-
- $typedefs{$s} = "\\ :c:type:`$s`\\ ";
- next;
-}
-
-#
-# Handle exceptions, if any
-#
-
-my %def_reftype = (
- "ioctl" => ":ref",
- "define" => ":ref",
- "symbol" => ":ref",
- "typedef" => ":c:type",
- "enum" => ":c:type",
- "struct" => ":c:type",
-);
-
-if ($file_exceptions) {
- open IN, $file_exceptions or die "Can't read $file_exceptions";
- while (<IN>) {
- next if (m/^\s*$/ || m/^\s*#/);
-
- # Parsers to ignore a symbol
-
- if (m/^ignore\s+ioctl\s+(\S+)/) {
- delete $ioctls{$1} if (exists($ioctls{$1}));
- next;
- }
- if (m/^ignore\s+define\s+(\S+)/) {
- delete $defines{$1} if (exists($defines{$1}));
- next;
- }
- if (m/^ignore\s+typedef\s+(\S+)/) {
- delete $typedefs{$1} if (exists($typedefs{$1}));
- next;
- }
- if (m/^ignore\s+enum\s+(\S+)/) {
- delete $enums{$1} if (exists($enums{$1}));
- next;
- }
- if (m/^ignore\s+struct\s+(\S+)/) {
- delete $structs{$1} if (exists($structs{$1}));
- next;
- }
- if (m/^ignore\s+symbol\s+(\S+)/) {
- delete $enum_symbols{$1} if (exists($enum_symbols{$1}));
- next;
- }
-
- # Parsers to replace a symbol
- my ($type, $old, $new, $reftype);
-
- if (m/^replace\s+(\S+)\s+(\S+)\s+(\S+)/) {
- $type = $1;
- $old = $2;
- $new = $3;
- } else {
- die "Can't parse $file_exceptions: $_";
- }
-
- if ($new =~ m/^\:c\:(data|func|macro|type)\:\`(.+)\`/) {
- $reftype = ":c:$1";
- $new = $2;
- } elsif ($new =~ m/\:ref\:\`(.+)\`/) {
- $reftype = ":ref";
- $new = $1;
- } else {
- $reftype = $def_reftype{$type};
- }
- $new = "$reftype:`$old <$new>`";
-
- if ($type eq "ioctl") {
- $ioctls{$old} = $new if (exists($ioctls{$old}));
- next;
- }
- if ($type eq "define") {
- $defines{$old} = $new if (exists($defines{$old}));
- next;
- }
- if ($type eq "symbol") {
- $enum_symbols{$old} = $new if (exists($enum_symbols{$old}));
- next;
- }
- if ($type eq "typedef") {
- $typedefs{$old} = $new if (exists($typedefs{$old}));
- next;
- }
- if ($type eq "enum") {
- $enums{$old} = $new if (exists($enums{$old}));
- next;
- }
- if ($type eq "struct") {
- $structs{$old} = $new if (exists($structs{$old}));
- next;
- }
-
- die "Can't parse $file_exceptions: $_";
- }
-}
-
-if ($debug) {
- print Data::Dumper->Dump([\%ioctls], [qw(*ioctls)]) if (%ioctls);
- print Data::Dumper->Dump([\%typedefs], [qw(*typedefs)]) if (%typedefs);
- print Data::Dumper->Dump([\%enums], [qw(*enums)]) if (%enums);
- print Data::Dumper->Dump([\%structs], [qw(*structs)]) if (%structs);
- print Data::Dumper->Dump([\%defines], [qw(*defines)]) if (%defines);
- print Data::Dumper->Dump([\%enum_symbols], [qw(*enum_symbols)]) if (%enum_symbols);
-}
-
-#
-# Align block
-#
-$data = expand($data);
-$data = " " . $data;
-$data =~ s/\n/\n /g;
-$data =~ s/\n\s+$/\n/g;
-$data =~ s/\n\s+\n/\n\n/g;
-
-#
-# Add escape codes for special characters
-#
-$data =~ s,([\_\`\*\<\>\&\\\\:\/\|\%\$\#\{\}\~\^]),\\$1,g;
-
-$data =~ s,DEPRECATED,**DEPRECATED**,g;
-
-#
-# Add references
-#
-
-my $start_delim = "[ \n\t\(\=\*\@]";
-my $end_delim = "(\\s|,|\\\\=|\\\\:|\\;|\\\)|\\}|\\{)";
-
-foreach my $r (keys %ioctls) {
- my $s = $ioctls{$r};
-
- $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
-
- print "$r -> $s\n" if ($debug);
-
- $data =~ s/($start_delim)($r)$end_delim/$1$s$3/g;
-}
-
-foreach my $r (keys %defines) {
- my $s = $defines{$r};
-
- $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
-
- print "$r -> $s\n" if ($debug);
-
- $data =~ s/($start_delim)($r)$end_delim/$1$s$3/g;
-}
-
-foreach my $r (keys %enum_symbols) {
- my $s = $enum_symbols{$r};
-
- $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
-
- print "$r -> $s\n" if ($debug);
-
- $data =~ s/($start_delim)($r)$end_delim/$1$s$3/g;
-}
-
-foreach my $r (keys %enums) {
- my $s = $enums{$r};
-
- $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
-
- print "$r -> $s\n" if ($debug);
-
- $data =~ s/enum\s+($r)$end_delim/$s$2/g;
-}
-
-foreach my $r (keys %structs) {
- my $s = $structs{$r};
-
- $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
-
- print "$r -> $s\n" if ($debug);
-
- $data =~ s/struct\s+($r)$end_delim/$s$2/g;
-}
-
-foreach my $r (keys %typedefs) {
- my $s = $typedefs{$r};
-
- $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
-
- print "$r -> $s\n" if ($debug);
- $data =~ s/($start_delim)($r)$end_delim/$1$s$3/g;
-}
-
-$data =~ s/\\ ([\n\s])/\1/g;
-
-#
-# Generate output file
-#
-
-my $title = $file_in;
-$title =~ s,.*/,,;
-
-open OUT, "> $file_out" or die "Can't open $file_out";
-print OUT ".. -*- coding: utf-8; mode: rst -*-\n\n";
-print OUT "$title\n";
-print OUT "=" x length($title);
-print OUT "\n\n.. parsed-literal::\n\n";
-print OUT $data;
-close OUT;
-
-__END__
-
-=head1 NAME
-
-parse_headers.pl - parse a C file, in order to identify functions, structs,
-enums and defines and create cross-references to a Sphinx book.
-
-=head1 SYNOPSIS
-
-B<parse_headers.pl> [<options>] <C_FILE> <OUT_FILE> [<EXCEPTIONS_FILE>]
-
-Where <options> can be: --debug, --help or --usage.
-
-=head1 OPTIONS
-
-=over 8
-
-=item B<--debug>
-
-Put the script in verbose mode, useful for debugging.
-
-=item B<--usage>
-
-Prints a brief help message and exits.
-
-=item B<--help>
-
-Prints a more detailed help message and exits.
-
-=back
-
-=head1 DESCRIPTION
-
-Convert a C header or source file (C_FILE), into a ReStructured Text
-included via ..parsed-literal block with cross-references for the
-documentation files that describe the API. It accepts an optional
-EXCEPTIONS_FILE with describes what elements will be either ignored or
-be pointed to a non-default reference.
-
-The output is written at the (OUT_FILE).
-
-It is capable of identifying defines, functions, structs, typedefs,
-enums and enum symbols and create cross-references for all of them.
-It is also capable of distinguish #define used for specifying a Linux
-ioctl.
-
-The EXCEPTIONS_FILE contain two rules to allow ignoring a symbol or
-to replace the default references by a custom one.
-
-Please read Documentation/doc-guide/parse-headers.rst at the Kernel's
-tree for more details.
-
-=head1 BUGS
-
-Report bugs to Mauro Carvalho Chehab <mchehab@kernel.org>
-
-=head1 COPYRIGHT
-
-Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@kernel.org>.
-
-License GPLv2: GNU GPL version 2 <https://gnu.org/licenses/gpl.html>.
-
-This is free software: you are free to change and redistribute it.
-There is NO WARRANTY, to the extent permitted by law.
-
-=cut
diff --git a/Documentation/sphinx/parser_yaml.py b/Documentation/sphinx/parser_yaml.py
new file mode 100755
index 000000000000..634d84a202fc
--- /dev/null
+++ b/Documentation/sphinx/parser_yaml.py
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2025 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+
+"""
+Sphinx extension for processing YAML files
+"""
+
+import os
+import re
+import sys
+
+from pprint import pformat
+
+from docutils import statemachine
+from docutils.parsers.rst import Parser as RSTParser
+from docutils.parsers.rst import states
+from docutils.statemachine import ViewList
+
+from sphinx.util import logging
+from sphinx.parsers import Parser
+
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "tools/net/ynl/pyynl/lib"))
+
+from doc_generator import YnlDocGenerator # pylint: disable=C0413
+
+logger = logging.getLogger(__name__)
+
+class YamlParser(Parser):
+ """
+ Kernel parser for YAML files.
+
+ This is a simple sphinx.Parser to handle yaml files inside the
+ Kernel tree that will be part of the built documentation.
+
+ The actual parser function is not contained here: the code was
+ written in a way that parsing yaml for different subsystems
+ can be done from a single dispatcher.
+
+ All it takes to have parse YAML patches is to have an import line:
+
+ from some_parser_code import NewYamlGenerator
+
+ To this module. Then add an instance of the parser with:
+
+ new_parser = NewYamlGenerator()
+
+ and add a logic inside parse() to handle it based on the path,
+ like this:
+
+ if "/foo" in fname:
+ msg = self.new_parser.parse_yaml_file(fname)
+ """
+
+ supported = ('yaml', )
+
+ netlink_parser = YnlDocGenerator()
+
+ re_lineno = re.compile(r"\.\. LINENO ([0-9]+)$")
+
+ tab_width = 8
+
+ def rst_parse(self, inputstring, document, msg):
+ """
+ Receives a ReST content that was previously converted by the
+ YAML parser, adding it to the document tree.
+ """
+
+ self.setup_parse(inputstring, document)
+
+ result = ViewList()
+
+ self.statemachine = states.RSTStateMachine(state_classes=states.state_classes,
+ initial_state='Body',
+ debug=document.reporter.debug_flag)
+
+ try:
+ # Parse message with RSTParser
+ lineoffset = 0;
+
+ lines = statemachine.string2lines(msg, self.tab_width,
+ convert_whitespace=True)
+
+ for line in lines:
+ match = self.re_lineno.match(line)
+ if match:
+ lineoffset = int(match.group(1))
+ continue
+
+ result.append(line, document.current_source, lineoffset)
+
+ self.statemachine.run(result, document)
+
+ except Exception as e:
+ document.reporter.error("YAML parsing error: %s" % pformat(e))
+
+ self.finish_parse()
+
+ # Overrides docutils.parsers.Parser. See sphinx.parsers.RSTParser
+ def parse(self, inputstring, document):
+ """Check if a YAML is meant to be parsed."""
+
+ fname = document.current_source
+
+ # Handle netlink yaml specs
+ if "/netlink/specs/" in fname:
+ msg = self.netlink_parser.parse_yaml_file(fname)
+ self.rst_parse(inputstring, document, msg)
+
+ # All other yaml files are ignored
+
+def setup(app):
+ """Setup function for the Sphinx extension."""
+
+ # Add YAML parser
+ app.add_source_parser(YamlParser)
+ app.add_source_suffix('.yaml', 'yaml')
+
+ return {
+ 'version': '1.0',
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/Documentation/sphinx/templates/kernel-toc.html b/Documentation/sphinx/templates/kernel-toc.html
index 41f1efbe64bb..b84969bd31c4 100644
--- a/Documentation/sphinx/templates/kernel-toc.html
+++ b/Documentation/sphinx/templates/kernel-toc.html
@@ -1,4 +1,5 @@
-<!-- SPDX-License-Identifier: GPL-2.0 -->
+{# SPDX-License-Identifier: GPL-2.0 #}
+
{# Create a local TOC the kernel way #}
<p>
<h3 class="kernel-toc-contents">Contents</h3>
diff --git a/Documentation/sphinx/templates/translations.html b/Documentation/sphinx/templates/translations.html
index 8df5d42d8dcd..351586f41938 100644
--- a/Documentation/sphinx/templates/translations.html
+++ b/Documentation/sphinx/templates/translations.html
@@ -1,5 +1,5 @@
-<!-- SPDX-License-Identifier: GPL-2.0 -->
-<!-- Copyright © 2023, Oracle and/or its affiliates. -->
+{# SPDX-License-Identifier: GPL-2.0 #}
+{# Copyright © 2023, Oracle and/or its affiliates. #}
{# Create a language menu for translations #}
{% if languages|length > 0: %}