summaryrefslogtreecommitdiff
path: root/Documentation/sphinx
diff options
context:
space:
mode:
Diffstat (limited to 'Documentation/sphinx')
-rw-r--r--Documentation/sphinx/automarkup.py306
-rw-r--r--Documentation/sphinx/kernel_abi.py173
-rw-r--r--Documentation/sphinx/kernel_feat.py137
-rwxr-xr-xDocumentation/sphinx/kernel_include.py527
-rw-r--r--Documentation/sphinx/kerneldoc-preamble.sty234
-rw-r--r--Documentation/sphinx/kerneldoc.py313
-rw-r--r--Documentation/sphinx/kfigure.py655
-rwxr-xr-xDocumentation/sphinx/maintainers_include.py197
-rw-r--r--Documentation/sphinx/min_requirements.txt11
-rwxr-xr-xDocumentation/sphinx/parser_yaml.py123
-rw-r--r--Documentation/sphinx/requirements.txt4
-rwxr-xr-xDocumentation/sphinx/rstFlatTable.py365
-rw-r--r--Documentation/sphinx/templates/kernel-toc.html19
-rw-r--r--Documentation/sphinx/templates/translations.html15
-rw-r--r--Documentation/sphinx/translations.py99
15 files changed, 3178 insertions, 0 deletions
diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py
new file mode 100644
index 000000000000..1d9dada40a74
--- /dev/null
+++ b/Documentation/sphinx/automarkup.py
@@ -0,0 +1,306 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2019 Jonathan Corbet <corbet@lwn.net>
+#
+# Apply kernel-specific tweaks after the initial document processing
+# has been done.
+#
+from docutils import nodes
+import sphinx
+from sphinx import addnodes
+from sphinx.errors import NoUri
+import re
+from itertools import chain
+
+from kernel_abi import get_kernel_abi
+
+#
+# Regex nastiness. Of course.
+# Try to identify "function()" that's not already marked up some
+# other way. Sphinx doesn't like a lot of stuff right after a
+# :c:func: block (i.e. ":c:func:`mmap()`s" flakes out), so the last
+# bit tries to restrict matches to things that won't create trouble.
+#
+RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=re.ASCII)
+
+#
+# Sphinx 3 uses a different C role for each one of struct, union, enum and
+# typedef
+#
+RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
+RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
+RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
+RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
+
+#
+# Detects a reference to a documentation page of the form Documentation/... with
+# an optional extension
+#
+RE_doc = re.compile(r'(\bDocumentation/)?((\.\./)*[\w\-/]+)\.(rst|txt)')
+RE_abi_file = re.compile(r'(\bDocumentation/ABI/[\w\-/]+)')
+RE_abi_symbol = re.compile(r'(\b/(sys|config|proc)/[\w\-/]+)')
+
+RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
+
+#
+# Reserved C words that we should skip when cross-referencing
+#
+Skipnames = [ 'for', 'if', 'register', 'sizeof', 'struct', 'unsigned' ]
+
+
+#
+# Many places in the docs refer to common system calls. It is
+# pointless to try to cross-reference them and, as has been known
+# to happen, somebody defining a function by these names can lead
+# to the creation of incorrect and confusing cross references. So
+# just don't even try with these names.
+#
+Skipfuncs = [ 'open', 'close', 'read', 'write', 'fcntl', 'mmap',
+ 'select', 'poll', 'fork', 'execve', 'clone', 'ioctl',
+ 'socket' ]
+
+c_namespace = ''
+
+#
+# Detect references to commits.
+#
+RE_git = re.compile(r'commit\s+(?P<rev>[0-9a-f]{12,40})(?:\s+\(".*?"\))?',
+ flags=re.IGNORECASE | re.DOTALL)
+
+def markup_refs(docname, app, node):
+ t = node.astext()
+ done = 0
+ repl = [ ]
+ #
+ # Associate each regex with the function that will markup its matches
+ #
+
+ markup_func = {RE_doc: markup_doc_ref,
+ RE_abi_file: markup_abi_file_ref,
+ RE_abi_symbol: markup_abi_ref,
+ RE_function: markup_func_ref_sphinx3,
+ RE_struct: markup_c_ref,
+ RE_union: markup_c_ref,
+ RE_enum: markup_c_ref,
+ RE_typedef: markup_c_ref,
+ RE_git: markup_git}
+
+ match_iterators = [regex.finditer(t) for regex in markup_func]
+ #
+ # Sort all references by the starting position in text
+ #
+ sorted_matches = sorted(chain(*match_iterators), key=lambda m: m.start())
+ for m in sorted_matches:
+ #
+ # Include any text prior to match as a normal text node.
+ #
+ if m.start() > done:
+ repl.append(nodes.Text(t[done:m.start()]))
+
+ #
+ # Call the function associated with the regex that matched this text and
+ # append its return to the text
+ #
+ repl.append(markup_func[m.re](docname, app, m))
+
+ done = m.end()
+ if done < len(t):
+ repl.append(nodes.Text(t[done:]))
+ return repl
+
+#
+# Keep track of cross-reference lookups that failed so we don't have to
+# do them again.
+#
+failed_lookups = { }
+def failure_seen(target):
+ return (target) in failed_lookups
+def note_failure(target):
+ failed_lookups[target] = True
+
+#
+# In sphinx3 we can cross-reference to C macro and function, each one with its
+# own C role, but both match the same regex, so we try both.
+#
+def markup_func_ref_sphinx3(docname, app, match):
+ base_target = match.group(2)
+ target_text = nodes.Text(match.group(0))
+ possible_targets = [base_target]
+ # Check if this document has a namespace, and if so, try
+ # cross-referencing inside it first.
+ if c_namespace:
+ possible_targets.insert(0, c_namespace + "." + base_target)
+
+ if base_target not in Skipnames:
+ for target in possible_targets:
+ if (target not in Skipfuncs) and not failure_seen(target):
+ lit_text = nodes.literal(classes=['xref', 'c', 'c-func'])
+ lit_text += target_text
+ xref = add_and_resolve_xref(app, docname, 'c', 'function',
+ target, contnode=lit_text)
+ if xref:
+ return xref
+ note_failure(target)
+
+ return target_text
+
+def markup_c_ref(docname, app, match):
+ class_str = {RE_struct: 'c-struct',
+ RE_union: 'c-union',
+ RE_enum: 'c-enum',
+ RE_typedef: 'c-type',
+ }
+ reftype_str = {RE_struct: 'struct',
+ RE_union: 'union',
+ RE_enum: 'enum',
+ RE_typedef: 'type',
+ }
+
+ base_target = match.group(2)
+ target_text = nodes.Text(match.group(0))
+ possible_targets = [base_target]
+ # Check if this document has a namespace, and if so, try
+ # cross-referencing inside it first.
+ if c_namespace:
+ possible_targets.insert(0, c_namespace + "." + base_target)
+
+ if base_target not in Skipnames:
+ for target in possible_targets:
+ if not (match.re == RE_function and target in Skipfuncs):
+ lit_text = nodes.literal(classes=['xref', 'c', class_str[match.re]])
+ lit_text += target_text
+ xref = add_and_resolve_xref(app, docname, 'c',
+ reftype_str[match.re], target,
+ contnode=lit_text)
+ if xref:
+ return xref
+
+ return target_text
+
+#
+# Try to replace a documentation reference of the form Documentation/... with a
+# cross reference to that page
+#
+def markup_doc_ref(docname, app, match):
+ absolute = match.group(1)
+ target = match.group(2)
+ if absolute:
+ target = "/" + target
+
+ xref = add_and_resolve_xref(app, docname, 'std', 'doc', target)
+ if xref:
+ return xref
+ else:
+ return nodes.Text(match.group(0))
+
+#
+# Try to replace a documentation reference for ABI symbols and files
+# with a cross reference to that page
+#
+def markup_abi_ref(docname, app, match, warning=False):
+ kernel_abi = get_kernel_abi()
+
+ fname = match.group(1)
+ target = kernel_abi.xref(fname)
+
+ # Kernel ABI doesn't describe such file or symbol
+ if not target:
+ if warning:
+ kernel_abi.log.warning("%s not found", fname)
+ return nodes.Text(match.group(0))
+
+ xref = add_and_resolve_xref(app, docname, 'std', 'ref', target)
+ if xref:
+ return xref
+ else:
+ return nodes.Text(match.group(0))
+
+def add_and_resolve_xref(app, docname, domain, reftype, target, contnode=None):
+ #
+ # Go through the dance of getting an xref out of the corresponding domain
+ #
+ dom_obj = app.env.domains[domain]
+ pxref = addnodes.pending_xref('', refdomain = domain, reftype = reftype,
+ reftarget = target, modname = None,
+ classname = None, refexplicit = False)
+
+ #
+ # XXX The Latex builder will throw NoUri exceptions here,
+ # work around that by ignoring them.
+ #
+ try:
+ xref = dom_obj.resolve_xref(app.env, docname, app.builder, reftype,
+ target, pxref, contnode)
+ except NoUri:
+ xref = None
+
+ if xref:
+ return xref
+ #
+ # We didn't find the xref; if a container node was supplied,
+ # mark it as a broken xref
+ #
+ if contnode:
+ contnode['classes'].append("broken_xref")
+ return contnode
+
+#
+# Variant of markup_abi_ref() that warns when a reference is not found
+#
+def markup_abi_file_ref(docname, app, match):
+ return markup_abi_ref(docname, app, match, warning=True)
+
+
+def get_c_namespace(app, docname):
+ source = app.env.doc2path(docname)
+ with open(source) as f:
+ for l in f:
+ match = RE_namespace.search(l)
+ if match:
+ return match.group(1)
+ return ''
+
+def markup_git(docname, app, match):
+ # While we could probably assume that we are running in a git
+ # repository, we can't know for sure, so let's just mechanically
+ # turn them into git.kernel.org links without checking their
+ # validity. (Maybe we can do something in the future to warn about
+ # these references if this is explicitly requested.)
+ text = match.group(0)
+ rev = match.group('rev')
+ return nodes.reference('', nodes.Text(text),
+ refuri=f'https://git.kernel.org/torvalds/c/{rev}')
+
+def auto_markup(app, doctree, name):
+ global c_namespace
+ c_namespace = get_c_namespace(app, name)
+ def text_but_not_a_reference(node):
+ # The nodes.literal test catches ``literal text``, its purpose is to
+ # avoid adding cross-references to functions that have been explicitly
+ # marked with cc:func:.
+ if not isinstance(node, nodes.Text) or isinstance(node.parent, nodes.literal):
+ return False
+
+ child_of_reference = False
+ parent = node.parent
+ while parent:
+ if isinstance(parent, nodes.Referential):
+ child_of_reference = True
+ break
+ parent = parent.parent
+ return not child_of_reference
+
+ #
+ # This loop could eventually be improved on. Someday maybe we
+ # want a proper tree traversal with a lot of awareness of which
+ # kinds of nodes to prune. But this works well for now.
+ #
+ for para in doctree.traverse(nodes.paragraph):
+ for node in para.traverse(condition=text_but_not_a_reference):
+ node.parent.replace(node, markup_refs(name, app, node))
+
+def setup(app):
+ app.connect('doctree-resolved', auto_markup)
+ return {
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/Documentation/sphinx/kernel_abi.py b/Documentation/sphinx/kernel_abi.py
new file mode 100644
index 000000000000..5667f207d175
--- /dev/null
+++ b/Documentation/sphinx/kernel_abi.py
@@ -0,0 +1,173 @@
+# -*- coding: utf-8; mode: python -*-
+# coding=utf-8
+# SPDX-License-Identifier: GPL-2.0
+#
+"""
+ kernel-abi
+ ~~~~~~~~~~
+
+ Implementation of the ``kernel-abi`` reST-directive.
+
+ :copyright: Copyright (C) 2016 Markus Heiser
+ :copyright: Copyright (C) 2016-2020 Mauro Carvalho Chehab
+ :maintained-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+ :license: GPL Version 2, June 1991 see Linux/COPYING for details.
+
+ The ``kernel-abi`` (:py:class:`KernelCmd`) directive calls the
+ AbiParser class to parse the Kernel ABI files.
+
+ Overview of directive's argument and options.
+
+ .. code-block:: rst
+
+ .. kernel-abi:: <ABI directory location>
+ :debug:
+
+ The argument ``<ABI directory location>`` is required. It contains the
+ location of the ABI files to be parsed.
+
+ ``debug``
+ Inserts a code-block with the *raw* reST. Sometimes it is helpful to see
+ what reST is generated.
+
+"""
+
+import os
+import re
+import sys
+
+from docutils import nodes, statemachine
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import directives, Directive
+from sphinx.util.docutils import switch_source_input
+from sphinx.util import logging
+
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "tools/lib/python"))
+
+from abi.abi_parser import AbiParser
+
+__version__ = "1.0"
+
+logger = logging.getLogger('kernel_abi')
+path = os.path.join(srctree, "Documentation/ABI")
+
+_kernel_abi = None
+
+def get_kernel_abi():
+ """
+ Initialize kernel_abi global var, if not initialized yet.
+
+ This is needed to avoid warnings during Sphinx module initialization.
+ """
+ global _kernel_abi
+
+ if not _kernel_abi:
+ # Parse ABI symbols only once
+ _kernel_abi = AbiParser(path, logger=logger)
+ _kernel_abi.parse_abi()
+ _kernel_abi.check_issues()
+
+ return _kernel_abi
+
+def setup(app):
+
+ app.add_directive("kernel-abi", KernelCmd)
+ return {
+ "version": __version__,
+ "parallel_read_safe": True,
+ "parallel_write_safe": True
+ }
+
+
+class KernelCmd(Directive):
+ """KernelABI (``kernel-abi``) directive"""
+
+ required_arguments = 1
+ optional_arguments = 3
+ has_content = False
+ final_argument_whitespace = True
+ parser = None
+
+ option_spec = {
+ "debug": directives.flag,
+ "no-symbols": directives.flag,
+ "no-files": directives.flag,
+ }
+
+ def run(self):
+ kernel_abi = get_kernel_abi()
+
+ doc = self.state.document
+ if not doc.settings.file_insertion_enabled:
+ raise self.warning("docutils: file insertion disabled")
+
+ env = self.state.document.settings.env
+ content = ViewList()
+ node = nodes.section()
+
+ abi_type = self.arguments[0]
+
+ if "no-symbols" in self.options:
+ show_symbols = False
+ else:
+ show_symbols = True
+
+ if "no-files" in self.options:
+ show_file = False
+ else:
+ show_file = True
+
+ tab_width = self.options.get('tab-width',
+ self.state.document.settings.tab_width)
+
+ old_f = None
+ n = 0
+ n_sym = 0
+ for msg, f, ln in kernel_abi.doc(show_file=show_file,
+ show_symbols=show_symbols,
+ filter_path=abi_type):
+ n_sym += 1
+ msg_list = statemachine.string2lines(msg, tab_width,
+ convert_whitespace=True)
+ if "debug" in self.options:
+ lines = [
+ "", "", ".. code-block:: rst",
+ " :linenos:", ""
+ ]
+ for m in msg_list:
+ lines.append(" " + m)
+ else:
+ lines = msg_list
+
+ for line in lines:
+ # sphinx counts lines from 0
+ content.append(line, f, ln - 1)
+ n += 1
+
+ if f != old_f:
+ # Add the file to Sphinx build dependencies if the file exists
+ fname = os.path.join(srctree, f)
+ if os.path.isfile(fname):
+ env.note_dependency(fname)
+
+ old_f = f
+
+ # Sphinx doesn't like to parse big messages. So, let's
+ # add content symbol by symbol
+ if content:
+ self.do_parse(content, node)
+ content = ViewList()
+
+ if show_symbols and not show_file:
+ logger.verbose("%s ABI: %i symbols (%i ReST lines)" % (abi_type, n_sym, n))
+ elif not show_symbols and show_file:
+ logger.verbose("%s ABI: %i files (%i ReST lines)" % (abi_type, n_sym, n))
+ else:
+ logger.verbose("%s ABI: %i data (%i ReST lines)" % (abi_type, n_sym, n))
+
+ return node.children
+
+ def do_parse(self, content, node):
+ with switch_source_input(self.state, content):
+ self.state.nested_parse(content, 0, node, match_titles=1)
diff --git a/Documentation/sphinx/kernel_feat.py b/Documentation/sphinx/kernel_feat.py
new file mode 100644
index 000000000000..bdc0fef5c87f
--- /dev/null
+++ b/Documentation/sphinx/kernel_feat.py
@@ -0,0 +1,137 @@
+# coding=utf-8
+# SPDX-License-Identifier: GPL-2.0
+#
+"""
+ kernel-feat
+ ~~~~~~~~~~~
+
+ Implementation of the ``kernel-feat`` reST-directive.
+
+ :copyright: Copyright (C) 2016 Markus Heiser
+ :copyright: Copyright (C) 2016-2019 Mauro Carvalho Chehab
+ :maintained-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+ :license: GPL Version 2, June 1991 see Linux/COPYING for details.
+
+ The ``kernel-feat`` (:py:class:`KernelFeat`) directive calls the
+ tools/docs/get_feat.pl script to parse the Kernel ABI files.
+
+ Overview of directive's argument and options.
+
+ .. code-block:: rst
+
+ .. kernel-feat:: <ABI directory location>
+ :debug:
+
+ The argument ``<ABI directory location>`` is required. It contains the
+ location of the ABI files to be parsed.
+
+ ``debug``
+ Inserts a code-block with the *raw* reST. Sometimes it is helpful to see
+ what reST is generated.
+
+"""
+
+import codecs
+import os
+import re
+import sys
+
+from docutils import nodes, statemachine
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import directives, Directive
+from sphinx.util.docutils import switch_source_input
+
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "tools/lib/python"))
+
+from feat.parse_features import ParseFeature # pylint: disable=C0413
+
+def ErrorString(exc): # Shamelessly stolen from docutils
+ return f'{exc.__class__.__name}: {exc}'
+
+__version__ = '1.0'
+
+def setup(app):
+
+ app.add_directive("kernel-feat", KernelFeat)
+ return dict(
+ version = __version__
+ , parallel_read_safe = True
+ , parallel_write_safe = True
+ )
+
+class KernelFeat(Directive):
+
+ """KernelFeat (``kernel-feat``) directive"""
+
+ required_arguments = 1
+ optional_arguments = 2
+ has_content = False
+ final_argument_whitespace = True
+
+ option_spec = {
+ "debug" : directives.flag
+ }
+
+ def warn(self, message, **replace):
+ replace["fname"] = self.state.document.current_source
+ replace["line_no"] = replace.get("line_no", self.lineno)
+ message = ("%(fname)s:%(line_no)s: [kernel-feat WARN] : " + message) % replace
+ self.state.document.settings.env.app.warn(message, prefix="")
+
+ def run(self):
+ doc = self.state.document
+ if not doc.settings.file_insertion_enabled:
+ raise self.warning("docutils: file insertion disabled")
+
+ env = doc.settings.env
+
+ srctree = os.path.abspath(os.environ["srctree"])
+
+ feature_dir = os.path.join(srctree, 'Documentation', self.arguments[0])
+
+ feat = ParseFeature(feature_dir, False, True)
+ feat.parse()
+
+ if len(self.arguments) > 1:
+ arch = self.arguments[1]
+ lines = feat.output_arch_table(arch)
+ else:
+ lines = feat.output_matrix()
+
+ line_regex = re.compile(r"^\.\. FILE (\S+)$")
+
+ out_lines = ""
+
+ for line in lines.split("\n"):
+ match = line_regex.search(line)
+ if match:
+ fname = match.group(1)
+
+ # Add the file to Sphinx build dependencies
+ env.note_dependency(os.path.abspath(fname))
+ else:
+ out_lines += line + "\n"
+
+ nodeList = self.nestedParse(out_lines, self.arguments[0])
+ return nodeList
+
+ def nestedParse(self, lines, fname):
+ content = ViewList()
+ node = nodes.section()
+
+ if "debug" in self.options:
+ code_block = "\n\n.. code-block:: rst\n :linenos:\n"
+ for l in lines.split("\n"):
+ code_block += "\n " + l
+ lines = code_block + "\n\n"
+
+ for c, l in enumerate(lines.split("\n")):
+ content.append(l, fname, c)
+
+ buf = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
+
+ with switch_source_input(self.state, content):
+ self.state.nested_parse(content, 0, node, match_titles=1)
+
+ return node.children
diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py
new file mode 100755
index 000000000000..626762ff6af3
--- /dev/null
+++ b/Documentation/sphinx/kernel_include.py
@@ -0,0 +1,527 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# pylint: disable=R0903, R0912, R0914, R0915, C0209,W0707
+
+
+"""
+Implementation of the ``kernel-include`` reST-directive.
+
+:copyright: Copyright (C) 2016 Markus Heiser
+:license: GPL Version 2, June 1991 see linux/COPYING for details.
+
+The ``kernel-include`` reST-directive is a replacement for the ``include``
+directive. The ``kernel-include`` directive expand environment variables in
+the path name and allows to include files from arbitrary locations.
+
+.. hint::
+
+ Including files from arbitrary locations (e.g. from ``/etc``) is a
+ security risk for builders. This is why the ``include`` directive from
+ docutils *prohibit* pathnames pointing to locations *above* the filesystem
+ tree where the reST document with the include directive is placed.
+
+Substrings of the form $name or ${name} are replaced by the value of
+environment variable name. Malformed variable names and references to
+non-existing variables are left unchanged.
+
+**Supported Sphinx Include Options**:
+
+:param literal:
+ If present, the included file is inserted as a literal block.
+
+:param code:
+ Specify the language for syntax highlighting (e.g., 'c', 'python').
+
+:param encoding:
+ Specify the encoding of the included file (default: 'utf-8').
+
+:param tab-width:
+ Specify the number of spaces that a tab represents.
+
+:param start-line:
+ Line number at which to start including the file (1-based).
+
+:param end-line:
+ Line number at which to stop including the file (inclusive).
+
+:param start-after:
+ Include lines after the first line matching this text.
+
+:param end-before:
+ Include lines before the first line matching this text.
+
+:param number-lines:
+ Number the included lines (integer specifies start number).
+ Only effective with 'literal' or 'code' options.
+
+:param class:
+ Specify HTML class attribute for the included content.
+
+**Kernel-specific Extensions**:
+
+:param generate-cross-refs:
+ If present, instead of directly including the file, it calls
+ ParseDataStructs() to convert C data structures into cross-references
+ that link to comprehensive documentation in other ReST files.
+
+:param exception-file:
+ (Used with generate-cross-refs)
+
+ Path to a file containing rules for handling special cases:
+ - Ignore specific C data structures
+ - Use alternative reference names
+ - Specify different reference types
+
+:param warn-broken:
+ (Used with generate-cross-refs)
+
+ Enables warnings when auto-generated cross-references don't point to
+ existing documentation targets.
+"""
+
+# ==============================================================================
+# imports
+# ==============================================================================
+
+import os.path
+import re
+import sys
+
+from difflib import get_close_matches
+
+from docutils import io, nodes, statemachine
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import Directive, directives
+from docutils.parsers.rst.directives.body import CodeBlock, NumberLines
+
+from sphinx.util import logging
+
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "tools/lib/python"))
+
+from kdoc.parse_data_structs import ParseDataStructs
+
+__version__ = "1.0"
+logger = logging.getLogger(__name__)
+
+RE_DOMAIN_REF = re.compile(r'\\ :(ref|c:type|c:func):`([^<`]+)(?:<([^>]+)>)?`\\')
+RE_SIMPLE_REF = re.compile(r'`([^`]+)`')
+RE_LINENO_REF = re.compile(r'^\s*-\s+LINENO_(\d+):\s+(.*)')
+RE_SPLIT_DOMAIN = re.compile(r"(.*)\.(.*)")
+
+def ErrorString(exc): # Shamelessly stolen from docutils
+ return f'{exc.__class__.__name}: {exc}'
+
+
+# ==============================================================================
+class KernelInclude(Directive):
+ """
+ KernelInclude (``kernel-include``) directive
+
+ Most of the stuff here came from Include directive defined at:
+ docutils/parsers/rst/directives/misc.py
+
+ Yet, overriding the class don't has any benefits: the original class
+ only have run() and argument list. Not all of them are implemented,
+ when checked against latest Sphinx version, as with time more arguments
+ were added.
+
+ So, keep its own list of supported arguments
+ """
+
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = True
+ option_spec = {
+ 'literal': directives.flag,
+ 'code': directives.unchanged,
+ 'encoding': directives.encoding,
+ 'tab-width': int,
+ 'start-line': int,
+ 'end-line': int,
+ 'start-after': directives.unchanged_required,
+ 'end-before': directives.unchanged_required,
+ # ignored except for 'literal' or 'code':
+ 'number-lines': directives.unchanged, # integer or None
+ 'class': directives.class_option,
+
+ # Arguments that aren't from Sphinx Include directive
+ 'generate-cross-refs': directives.flag,
+ 'warn-broken': directives.flag,
+ 'toc': directives.flag,
+ 'exception-file': directives.unchanged,
+ }
+
+ def read_rawtext(self, path, encoding):
+ """Read and process file content with error handling"""
+ try:
+ self.state.document.settings.record_dependencies.add(path)
+ include_file = io.FileInput(source_path=path,
+ encoding=encoding,
+ error_handler=self.state.document.settings.input_encoding_error_handler)
+ except UnicodeEncodeError:
+ raise self.severe('Problems with directive path:\n'
+ 'Cannot encode input file path "%s" '
+ '(wrong locale?).' % path)
+ except IOError as error:
+ raise self.severe('Problems with directive path:\n%s.' % ErrorString(error))
+
+ try:
+ return include_file.read()
+ except UnicodeError as error:
+ raise self.severe('Problem with directive:\n%s' % ErrorString(error))
+
+ def apply_range(self, rawtext):
+ """
+ Handles start-line, end-line, start-after and end-before parameters
+ """
+
+ # Get to-be-included content
+ startline = self.options.get('start-line', None)
+ endline = self.options.get('end-line', None)
+ try:
+ if startline or (endline is not None):
+ lines = rawtext.splitlines()
+ rawtext = '\n'.join(lines[startline:endline])
+ except UnicodeError as error:
+ raise self.severe(f'Problem with "{self.name}" directive:\n'
+ + io.error_string(error))
+ # start-after/end-before: no restrictions on newlines in match-text,
+ # and no restrictions on matching inside lines vs. line boundaries
+ after_text = self.options.get("start-after", None)
+ if after_text:
+ # skip content in rawtext before *and incl.* a matching text
+ after_index = rawtext.find(after_text)
+ if after_index < 0:
+ raise self.severe('Problem with "start-after" option of "%s" '
+ "directive:\nText not found." % self.name)
+ rawtext = rawtext[after_index + len(after_text) :]
+ before_text = self.options.get("end-before", None)
+ if before_text:
+ # skip content in rawtext after *and incl.* a matching text
+ before_index = rawtext.find(before_text)
+ if before_index < 0:
+ raise self.severe('Problem with "end-before" option of "%s" '
+ "directive:\nText not found." % self.name)
+ rawtext = rawtext[:before_index]
+
+ return rawtext
+
+ def xref_text(self, env, path, tab_width):
+ """
+ Read and add contents from a C file parsed to have cross references.
+
+ There are two types of supported output here:
+ - A C source code with cross-references;
+ - a TOC table containing cross references.
+ """
+ parser = ParseDataStructs()
+
+ if 'exception-file' in self.options:
+ source_dir = os.path.dirname(os.path.abspath(
+ self.state_machine.input_lines.source(
+ self.lineno - self.state_machine.input_offset - 1)))
+ exceptions_file = os.path.join(source_dir, self.options['exception-file'])
+ else:
+ exceptions_file = None
+
+ parser.parse_file(path, exceptions_file)
+
+ # Store references on a symbol dict to be used at check time
+ if 'warn-broken' in self.options:
+ env._xref_files.add(path)
+
+ if "toc" not in self.options:
+
+ rawtext = ".. parsed-literal::\n\n" + parser.gen_output()
+ self.apply_range(rawtext)
+
+ include_lines = statemachine.string2lines(rawtext, tab_width,
+ convert_whitespace=True)
+
+ # Sphinx always blame the ".. <directive>", so placing
+ # line numbers here won't make any difference
+
+ self.state_machine.insert_input(include_lines, path)
+ return []
+
+ # TOC output is a ReST file, not a literal. So, we can add line
+ # numbers
+
+ startline = self.options.get('start-line', None)
+ endline = self.options.get('end-line', None)
+
+ relpath = os.path.relpath(path, srctree)
+
+ result = ViewList()
+ for line in parser.gen_toc().split("\n"):
+ match = RE_LINENO_REF.match(line)
+ if not match:
+ result.append(line, path)
+ continue
+
+ ln, ref = match.groups()
+ ln = int(ln)
+
+ # Filter line range if needed
+ if startline and (ln < startline):
+ continue
+
+ if endline and (ln > endline):
+ continue
+
+ # Sphinx numerates starting with zero, but text editors
+ # and other tools start from one
+ realln = ln + 1
+ result.append(f"- {ref}: {relpath}#{realln}", path, ln)
+
+ self.state_machine.insert_input(result, path)
+
+ return []
+
+ def literal(self, path, tab_width, rawtext):
+ """Output a literal block"""
+
+ # Convert tabs to spaces, if `tab_width` is positive.
+ if tab_width >= 0:
+ text = rawtext.expandtabs(tab_width)
+ else:
+ text = rawtext
+ literal_block = nodes.literal_block(rawtext, source=path,
+ classes=self.options.get("class", []))
+ literal_block.line = 1
+ self.add_name(literal_block)
+ if "number-lines" in self.options:
+ try:
+ startline = int(self.options["number-lines"] or 1)
+ except ValueError:
+ raise self.error(":number-lines: with non-integer start value")
+ endline = startline + len(include_lines)
+ if text.endswith("\n"):
+ text = text[:-1]
+ tokens = NumberLines([([], text)], startline, endline)
+ for classes, value in tokens:
+ if classes:
+ literal_block += nodes.inline(value, value,
+ classes=classes)
+ else:
+ literal_block += nodes.Text(value, value)
+ else:
+ literal_block += nodes.Text(text, text)
+ return [literal_block]
+
+ def code(self, path, tab_width):
+ """Output a code block"""
+
+ include_lines = statemachine.string2lines(rawtext, tab_width,
+ convert_whitespace=True)
+
+ self.options["source"] = path
+ codeblock = CodeBlock(self.name,
+ [self.options.pop("code")], # arguments
+ self.options,
+ include_lines,
+ self.lineno,
+ self.content_offset,
+ self.block_text,
+ self.state,
+ self.state_machine)
+ return codeblock.run()
+
+ def run(self):
+ """Include a file as part of the content of this reST file."""
+ env = self.state.document.settings.env
+
+ #
+ # The include logic accepts only patches relative to the
+ # Kernel source tree. The logic does check it to prevent
+ # directory traverse issues.
+ #
+
+ srctree = os.path.abspath(os.environ["srctree"])
+
+ path = os.path.expandvars(self.arguments[0])
+ src_path = os.path.join(srctree, path)
+
+ if os.path.isfile(src_path):
+ base = srctree
+ path = src_path
+ else:
+ raise self.warning(f'File "%s" doesn\'t exist', path)
+
+ abs_base = os.path.abspath(base)
+ abs_full_path = os.path.abspath(os.path.join(base, path))
+
+ try:
+ if os.path.commonpath([abs_full_path, abs_base]) != abs_base:
+ raise self.severe('Problems with "%s" directive, prohibited path: %s' %
+ (self.name, path))
+ except ValueError:
+ # Paths don't have the same drive (Windows) or other incompatibility
+ raise self.severe('Problems with "%s" directive, invalid path: %s' %
+ (self.name, path))
+
+ self.arguments[0] = path
+
+ #
+ # Add path location to Sphinx dependencies to ensure proper cache
+ # invalidation check.
+ #
+
+ env.note_dependency(os.path.abspath(path))
+
+ if not self.state.document.settings.file_insertion_enabled:
+ raise self.warning('"%s" directive disabled.' % self.name)
+ source = self.state_machine.input_lines.source(self.lineno -
+ self.state_machine.input_offset - 1)
+ source_dir = os.path.dirname(os.path.abspath(source))
+ path = directives.path(self.arguments[0])
+ if path.startswith("<") and path.endswith(">"):
+ path = os.path.join(self.standard_include_path, path[1:-1])
+ path = os.path.normpath(os.path.join(source_dir, path))
+
+ # HINT: this is the only line I had to change / commented out:
+ # path = utils.relative_path(None, path)
+
+ encoding = self.options.get("encoding",
+ self.state.document.settings.input_encoding)
+ tab_width = self.options.get("tab-width",
+ self.state.document.settings.tab_width)
+
+ # Get optional arguments to related to cross-references generation
+ if "generate-cross-refs" in self.options:
+ return self.xref_text(env, path, tab_width)
+
+ rawtext = self.read_rawtext(path, encoding)
+ rawtext = self.apply_range(rawtext)
+
+ if "code" in self.options:
+ return self.code(path, tab_width, rawtext)
+
+ return self.literal(path, tab_width, rawtext)
+
+# ==============================================================================
+
+reported = set()
+DOMAIN_INFO = {}
+all_refs = {}
+
+def fill_domain_info(env):
+ """
+ Get supported reference types for each Sphinx domain and C namespaces
+ """
+ if DOMAIN_INFO:
+ return
+
+ for domain_name, domain_instance in env.domains.items():
+ try:
+ object_types = list(domain_instance.object_types.keys())
+ DOMAIN_INFO[domain_name] = object_types
+ except AttributeError:
+ # Ignore domains that we can't retrieve object types, if any
+ pass
+
+ for domain in DOMAIN_INFO.keys():
+ domain_obj = env.get_domain(domain)
+ for name, dispname, objtype, docname, anchor, priority in domain_obj.get_objects():
+ ref_name = name.lower()
+
+ if domain == "c":
+ if '.' in ref_name:
+ ref_name = ref_name.split(".")[-1]
+
+ if not ref_name in all_refs:
+ all_refs[ref_name] = []
+
+ all_refs[ref_name].append(f"\t{domain}:{objtype}:`{name}` (from {docname})")
+
+def get_suggestions(app, env, node,
+ original_target, original_domain, original_reftype):
+ """Check if target exists in the other domain or with different reftypes."""
+ original_target = original_target.lower()
+
+ # Remove namespace if present
+ if original_domain == "c":
+ if '.' in original_target:
+ original_target = original_target.split(".")[-1]
+
+ suggestions = []
+
+ # If name exists, propose exact name match on different domains
+ if original_target in all_refs:
+ return all_refs[original_target]
+
+ # If not found, get a close match, using difflib.
+ # Such method is based on Ratcliff-Obershelp Algorithm, which seeks
+ # for a close match within a certain distance. We're using the defaults
+ # here, e.g. cutoff=0.6, proposing 3 alternatives
+ matches = get_close_matches(original_target, all_refs.keys())
+ for match in matches:
+ suggestions += all_refs[match]
+
+ return suggestions
+
+def check_missing_refs(app, env, node, contnode):
+ """Check broken refs for the files it creates xrefs"""
+ if not node.source:
+ return None
+
+ try:
+ xref_files = env._xref_files
+ except AttributeError:
+ logger.critical("FATAL: _xref_files not initialized!")
+ raise
+
+ # Only show missing references for kernel-include reference-parsed files
+ if node.source not in xref_files:
+ return None
+
+ fill_domain_info(env)
+
+ target = node.get('reftarget', '')
+ domain = node.get('refdomain', 'std')
+ reftype = node.get('reftype', '')
+
+ msg = f"Invalid xref: {domain}:{reftype}:`{target}`"
+
+ # Don't duplicate warnings
+ data = (node.source, msg)
+ if data in reported:
+ return None
+ reported.add(data)
+
+ suggestions = get_suggestions(app, env, node, target, domain, reftype)
+ if suggestions:
+ msg += ". Possible alternatives:\n" + '\n'.join(suggestions)
+
+ logger.warning(msg, location=node, type='ref', subtype='missing')
+
+ return None
+
+def merge_xref_info(app, env, docnames, other):
+ """
+ As each process modify env._xref_files, we need to merge them back.
+ """
+ if not hasattr(other, "_xref_files"):
+ return
+ env._xref_files.update(getattr(other, "_xref_files", set()))
+
+def init_xref_docs(app, env, docnames):
+ """Initialize a list of files that we're generating cross references¨"""
+ app.env._xref_files = set()
+
+# ==============================================================================
+
+def setup(app):
+ """Setup Sphinx exension"""
+
+ app.connect("env-before-read-docs", init_xref_docs)
+ app.connect("env-merge-info", merge_xref_info)
+ app.add_directive("kernel-include", KernelInclude)
+ app.connect("missing-reference", check_missing_refs)
+
+ return {
+ "version": __version__,
+ "parallel_read_safe": True,
+ "parallel_write_safe": True,
+ }
diff --git a/Documentation/sphinx/kerneldoc-preamble.sty b/Documentation/sphinx/kerneldoc-preamble.sty
new file mode 100644
index 000000000000..16d9ff46fdf6
--- /dev/null
+++ b/Documentation/sphinx/kerneldoc-preamble.sty
@@ -0,0 +1,234 @@
+% -*- coding: utf-8 -*-
+% SPDX-License-Identifier: GPL-2.0
+%
+% LaTeX preamble for "make latexdocs" or "make pdfdocs" including:
+% - TOC width settings
+% - Setting of tabulary (\tymin)
+% - Headheight setting for fancyhdr
+% - Fontfamily settings for CJK (Chinese, Japanese, and Korean) translations
+%
+% Note on the suffix of .sty:
+% This is not implemented as a LaTeX style file, but as a file containing
+% plain LaTeX code to be included into preamble.
+% ".sty" is chosen because ".tex" would cause the build scripts to confuse
+% this file with a LaTeX main file.
+%
+% Copyright (C) 2022 Akira Yokosawa
+
+% Custom width parameters for TOC
+% - Redefine low-level commands defined in report.cls.
+% - Indent of 2 chars is preserved for ease of comparison.
+% Summary of changes from default params:
+% Width of page number (\@pnumwidth): 1.55em -> 2.7em
+% Width of chapter number: 1.5em -> 2.4em
+% Indent of section number: 1.5em -> 2.4em
+% Width of section number: 2.6em -> 3.2em
+% Indent of subsection number: 4.1em -> 5.6em
+% Width of subsection number: 3.5em -> 4.3em
+%
+% These params can have 4 digit page counts, 3 digit chapter counts,
+% section counts of 4 digits + 1 period (e.g., 18.10), and subsection counts
+% of 5 digits + 2 periods (e.g., 18.7.13).
+\makeatletter
+%% Redefine \@pnumwidth (page number width)
+\renewcommand*\@pnumwidth{2.7em}
+%% Redefine \l@chapter (chapter list entry)
+\renewcommand*\l@chapter[2]{%
+ \ifnum \c@tocdepth >\m@ne
+ \addpenalty{-\@highpenalty}%
+ \vskip 1.0em \@plus\p@
+ \setlength\@tempdima{2.4em}%
+ \begingroup
+ \parindent \z@ \rightskip \@pnumwidth
+ \parfillskip -\@pnumwidth
+ \leavevmode \bfseries
+ \advance\leftskip\@tempdima
+ \hskip -\leftskip
+ #1\nobreak\hfil
+ \nobreak\hb@xt@\@pnumwidth{\hss #2%
+ \kern-\p@\kern\p@}\par
+ \penalty\@highpenalty
+ \endgroup
+ \fi}
+%% Redefine \l@section and \l@subsection
+\renewcommand*\l@section{\@dottedtocline{1}{2.4em}{3.2em}}
+\renewcommand*\l@subsection{\@dottedtocline{2}{5.6em}{4.3em}}
+\makeatother
+%% Prevent default \sphinxtableofcontentshook from overwriting above tweaks.
+\renewcommand{\sphinxtableofcontentshook}{} % Empty the hook
+
+% Prevent column squeezing of tabulary. \tymin is set by Sphinx as:
+% \setlength{\tymin}{3\fontcharwd\font`0 }
+% , which is too short.
+\setlength{\tymin}{20em}
+
+% Adjust \headheight for fancyhdr
+\addtolength{\headheight}{1.6pt}
+\addtolength{\topmargin}{-1.6pt}
+
+% Translations have Asian (CJK) characters which are only displayed if
+% xeCJK is used
+\usepackage{ifthen}
+\newboolean{enablecjk}
+\setboolean{enablecjk}{false}
+\IfFontExistsTF{Noto Sans CJK SC}{
+ \IfFileExists{xeCJK.sty}{
+ \setboolean{enablecjk}{true}
+ }{}
+}{}
+\ifthenelse{\boolean{enablecjk}}{
+ % Load xeCJK when both the Noto Sans CJK font and xeCJK.sty are available.
+ \usepackage{xeCJK}
+ % Noto CJK fonts don't provide slant shape. [AutoFakeSlant] permits
+ % its emulation.
+ % Select KR variant at the beginning of each document so that quotation
+ % and apostorph symbols of half-width is used in TOC of Latin documents.
+ \IfFontExistsTF{Noto Serif CJK KR}{
+ \setCJKmainfont{Noto Serif CJK KR}[AutoFakeSlant]
+ }{
+ \setCJKmainfont{Noto Sans CJK KR}[AutoFakeSlant]
+ }
+ \setCJKsansfont{Noto Sans CJK KR}[AutoFakeSlant]
+ \setCJKmonofont{Noto Sans Mono CJK KR}[AutoFakeSlant]
+ % Teach xeCJK of half-width symbols
+ \xeCJKDeclareCharClass{HalfLeft}{`“,`‘}
+ \xeCJKDeclareCharClass{HalfRight}{`”,`’}
+ % CJK Language-specific font choices
+ %% for Simplified Chinese
+ \IfFontExistsTF{Noto Serif CJK SC}{
+ \newCJKfontfamily[SCmain]\scmain{Noto Serif CJK SC}[AutoFakeSlant]
+ \newCJKfontfamily[SCserif]\scserif{Noto Serif CJK SC}[AutoFakeSlant]
+ }{
+ \newCJKfontfamily[SCmain]\scmain{Noto Sans CJK SC}[AutoFakeSlant]
+ \newCJKfontfamily[SCserif]\scserif{Noto Sans CJK SC}[AutoFakeSlant]
+ }
+ \newCJKfontfamily[SCsans]\scsans{Noto Sans CJK SC}[AutoFakeSlant]
+ \newCJKfontfamily[SCmono]\scmono{Noto Sans Mono CJK SC}[AutoFakeSlant]
+ %% for Traditional Chinese
+ \IfFontExistsTF{Noto Serif CJK TC}{
+ \newCJKfontfamily[TCmain]\tcmain{Noto Serif CJK TC}[AutoFakeSlant]
+ \newCJKfontfamily[TCserif]\tcserif{Noto Serif CJK TC}[AutoFakeSlant]
+ }{
+ \newCJKfontfamily[TCmain]\tcmain{Noto Sans CJK TC}[AutoFakeSlant]
+ \newCJKfontfamily[TCserif]\tcserif{Noto Sans CJK TC}[AutoFakeSlant]
+ }
+ \newCJKfontfamily[TCsans]\tcsans{Noto Sans CJK TC}[AutoFakeSlant]
+ \newCJKfontfamily[TCmono]\tcmono{Noto Sans Mono CJK TC}[AutoFakeSlant]
+ %% for Korean
+ \IfFontExistsTF{Noto Serif CJK KR}{
+ \newCJKfontfamily[KRmain]\krmain{Noto Serif CJK KR}[AutoFakeSlant]
+ \newCJKfontfamily[KRserif]\krserif{Noto Serif CJK KR}[AutoFakeSlant]
+ }{
+ \newCJKfontfamily[KRmain]\krmain{Noto Sans CJK KR}[AutoFakeSlant]
+ \newCJKfontfamily[KRserif]\krserif{Noto Sans CJK KR}[AutoFakeSlant]
+ }
+ \newCJKfontfamily[KRsans]\krsans{Noto Sans CJK KR}[AutoFakeSlant]
+ \newCJKfontfamily[KRmono]\krmono{Noto Sans Mono CJK KR}[AutoFakeSlant]
+ %% for Japanese
+ \IfFontExistsTF{Noto Serif CJK JP}{
+ \newCJKfontfamily[JPmain]\jpmain{Noto Serif CJK JP}[AutoFakeSlant]
+ \newCJKfontfamily[JPserif]\jpserif{Noto Serif CJK JP}[AutoFakeSlant]
+ }{
+ \newCJKfontfamily[JPmain]\jpmain{Noto Sans CJK JP}[AutoFakeSlant]
+ \newCJKfontfamily[JPserif]\jpserif{Noto Sans CJK JP}[AutoFakeSlant]
+ }
+ \newCJKfontfamily[JPsans]\jpsans{Noto Sans CJK JP}[AutoFakeSlant]
+ \newCJKfontfamily[JPmono]\jpmono{Noto Sans Mono CJK JP}[AutoFakeSlant]
+ % Define custom macros to on/off CJK
+ %% One and half spacing for CJK contents
+ \newcommand{\kerneldocCJKon}{\makexeCJKactive\onehalfspacing}
+ \newcommand{\kerneldocCJKoff}{\makexeCJKinactive\singlespacing}
+ % Define custom macros for switching CJK font setting
+ %% for Simplified Chinese
+ \newcommand{\kerneldocBeginSC}{%
+ \begingroup%
+ \scmain%
+ \xeCJKDeclareCharClass{FullLeft}{`“,`‘}% Full-width in SC
+ \xeCJKDeclareCharClass{FullRight}{`”,`’}% Full-width in SC
+ \renewcommand{\CJKrmdefault}{SCserif}%
+ \renewcommand{\CJKsfdefault}{SCsans}%
+ \renewcommand{\CJKttdefault}{SCmono}%
+ \xeCJKsetup{CJKspace = false}% gobble white spaces by ' '
+ % For CJK ascii-art alignment
+ \setmonofont{Noto Sans Mono CJK SC}[AutoFakeSlant]%
+ }
+ \newcommand{\kerneldocEndSC}{\endgroup}
+ %% for Traditional Chinese
+ \newcommand{\kerneldocBeginTC}{%
+ \begingroup%
+ \tcmain%
+ \xeCJKDeclareCharClass{FullLeft}{`“,`‘}% Full-width in TC
+ \xeCJKDeclareCharClass{FullRight}{`”,`’}% Full-width in TC
+ \renewcommand{\CJKrmdefault}{TCserif}%
+ \renewcommand{\CJKsfdefault}{TCsans}%
+ \renewcommand{\CJKttdefault}{TCmono}%
+ \xeCJKsetup{CJKspace = false}% gobble white spaces by ' '
+ % For CJK ascii-art alignment
+ \setmonofont{Noto Sans Mono CJK TC}[AutoFakeSlant]%
+ }
+ \newcommand{\kerneldocEndTC}{\endgroup}
+ %% for Korean
+ \newcommand{\kerneldocBeginKR}{%
+ \begingroup%
+ \krmain%
+ \renewcommand{\CJKrmdefault}{KRserif}%
+ \renewcommand{\CJKsfdefault}{KRsans}%
+ \renewcommand{\CJKttdefault}{KRmono}%
+ % \xeCJKsetup{CJKspace = true} % true by default
+ % For CJK ascii-art alignment (still misaligned for Hangul)
+ \setmonofont{Noto Sans Mono CJK KR}[AutoFakeSlant]%
+ }
+ \newcommand{\kerneldocEndKR}{\endgroup}
+ %% for Japanese
+ \newcommand{\kerneldocBeginJP}{%
+ \begingroup%
+ \jpmain%
+ \renewcommand{\CJKrmdefault}{JPserif}%
+ \renewcommand{\CJKsfdefault}{JPsans}%
+ \renewcommand{\CJKttdefault}{JPmono}%
+ \xeCJKsetup{CJKspace = false}% gobble white space by ' '
+ % For CJK ascii-art alignment
+ \setmonofont{Noto Sans Mono CJK JP}[AutoFakeSlant]%
+ }
+ \newcommand{\kerneldocEndJP}{\endgroup}
+
+ % Single spacing in literal blocks
+ \fvset{baselinestretch=1}
+ % To customize \sphinxtableofcontents
+ \usepackage{etoolbox}
+ % Inactivate CJK after tableofcontents
+ \apptocmd{\sphinxtableofcontents}{\kerneldocCJKoff}{}{}
+ \xeCJKsetup{CJKspace = true}% For inter-phrase space of Korean TOC
+ % Suppress extra white space at latin .. non-latin in literal blocks
+ \AtBeginEnvironment{sphinxVerbatim}{\CJKsetecglue{}}
+}{ % Don't enable CJK
+ % Custom macros to on/off CJK and switch CJK fonts (Dummy)
+ \newcommand{\kerneldocCJKon}{}
+ \newcommand{\kerneldocCJKoff}{}
+ %% By defining \kerneldocBegin(SC|TC|KR|JP) as commands with an argument
+ %% and ignore the argument (#1) in their definitions, whole contents of
+ %% CJK chapters can be ignored.
+ \newcommand{\kerneldocBeginSC}[1]{%
+ %% Put a note on missing CJK fonts or the xecjk package in place of
+ %% zh_CN translation.
+ \begin{sphinxadmonition}{note}{Note on missing fonts and a package:}
+ Translations of Simplified Chinese (zh\_CN), Traditional Chinese
+ (zh\_TW), Korean (ko\_KR), and Japanese (ja\_JP) were skipped
+ due to the lack of suitable font families and/or the texlive-xecjk
+ package.
+
+ If you want them, please install non-variable ``Noto Sans CJK''
+ font families along with the texlive-xecjk package by following
+ instructions from
+ \sphinxcode{./tools/docs/sphinx-pre-install}.
+ Having optional non-variable ``Noto Serif CJK'' font families will
+ improve the looks of those translations.
+ \end{sphinxadmonition}}
+ \newcommand{\kerneldocEndSC}{}
+ \newcommand{\kerneldocBeginTC}[1]{}
+ \newcommand{\kerneldocEndTC}{}
+ \newcommand{\kerneldocBeginKR}[1]{}
+ \newcommand{\kerneldocEndKR}{}
+ \newcommand{\kerneldocBeginJP}[1]{}
+ \newcommand{\kerneldocEndJP}{}
+}
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
new file mode 100644
index 000000000000..d8cdf068ef35
--- /dev/null
+++ b/Documentation/sphinx/kerneldoc.py
@@ -0,0 +1,313 @@
+# coding=utf-8
+# SPDX-License-Identifier: MIT
+#
+# Copyright © 2016 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+# Authors:
+# Jani Nikula <jani.nikula@intel.com>
+#
+
+import codecs
+import os
+import subprocess
+import sys
+import re
+import glob
+
+from docutils import nodes, statemachine
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import directives, Directive
+import sphinx
+from sphinx.util.docutils import switch_source_input
+from sphinx.util import logging
+from pprint import pformat
+
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "tools/lib/python"))
+
+from kdoc.kdoc_files import KernelFiles
+from kdoc.kdoc_output import RestFormat
+
+__version__ = '1.0'
+kfiles = None
+logger = logging.getLogger(__name__)
+
+def cmd_str(cmd):
+ """
+ Helper function to output a command line that can be used to produce
+ the same records via command line. Helpful to debug troubles at the
+ script.
+ """
+
+ cmd_line = ""
+
+ for w in cmd:
+ if w == "" or " " in w:
+ esc_cmd = "'" + w + "'"
+ else:
+ esc_cmd = w
+
+ if cmd_line:
+ cmd_line += " " + esc_cmd
+ continue
+ else:
+ cmd_line = esc_cmd
+
+ return cmd_line
+
+class KernelDocDirective(Directive):
+ """Extract kernel-doc comments from the specified file"""
+ required_argument = 1
+ optional_arguments = 4
+ option_spec = {
+ 'doc': directives.unchanged_required,
+ 'export': directives.unchanged,
+ 'internal': directives.unchanged,
+ 'identifiers': directives.unchanged,
+ 'no-identifiers': directives.unchanged,
+ 'functions': directives.unchanged,
+ }
+ has_content = False
+ verbose = 0
+
+ parse_args = {}
+ msg_args = {}
+
+ def handle_args(self):
+
+ env = self.state.document.settings.env
+ cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno']
+
+ filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
+
+ # Arguments used by KernelFiles.parse() function
+ self.parse_args = {
+ "file_list": [filename],
+ "export_file": []
+ }
+
+ # Arguments used by KernelFiles.msg() function
+ self.msg_args = {
+ "enable_lineno": True,
+ "export": False,
+ "internal": False,
+ "symbol": [],
+ "nosymbol": [],
+ "no_doc_sections": False
+ }
+
+ export_file_patterns = []
+
+ verbose = os.environ.get("V")
+ if verbose:
+ try:
+ self.verbose = int(verbose)
+ except ValueError:
+ pass
+
+ # Tell sphinx of the dependency
+ env.note_dependency(os.path.abspath(filename))
+
+ self.tab_width = self.options.get('tab-width',
+ self.state.document.settings.tab_width)
+
+ # 'function' is an alias of 'identifiers'
+ if 'functions' in self.options:
+ self.options['identifiers'] = self.options.get('functions')
+
+ # FIXME: make this nicer and more robust against errors
+ if 'export' in self.options:
+ cmd += ['-export']
+ self.msg_args["export"] = True
+ export_file_patterns = str(self.options.get('export')).split()
+ elif 'internal' in self.options:
+ cmd += ['-internal']
+ self.msg_args["internal"] = True
+ export_file_patterns = str(self.options.get('internal')).split()
+ elif 'doc' in self.options:
+ func = str(self.options.get('doc'))
+ cmd += ['-function', func]
+ self.msg_args["symbol"].append(func)
+ elif 'identifiers' in self.options:
+ identifiers = self.options.get('identifiers').split()
+ if identifiers:
+ for i in identifiers:
+ i = i.rstrip("\\").strip()
+ if not i:
+ continue
+
+ cmd += ['-function', i]
+ self.msg_args["symbol"].append(i)
+ else:
+ cmd += ['-no-doc-sections']
+ self.msg_args["no_doc_sections"] = True
+
+ if 'no-identifiers' in self.options:
+ no_identifiers = self.options.get('no-identifiers').split()
+ if no_identifiers:
+ for i in no_identifiers:
+ i = i.rstrip("\\").strip()
+ if not i:
+ continue
+
+ cmd += ['-nosymbol', i]
+ self.msg_args["nosymbol"].append(i)
+
+ for pattern in export_file_patterns:
+ pattern = pattern.rstrip("\\").strip()
+ if not pattern:
+ continue
+
+ for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern):
+ env.note_dependency(os.path.abspath(f))
+ cmd += ['-export-file', f]
+ self.parse_args["export_file"].append(f)
+
+ # Export file is needed by both parse and msg, as kernel-doc
+ # cache exports.
+ self.msg_args["export_file"] = self.parse_args["export_file"]
+
+ cmd += [filename]
+
+ return cmd
+
+ def run_cmd(self, cmd):
+ """
+ Execute an external kernel-doc command.
+ """
+
+ env = self.state.document.settings.env
+ node = nodes.section()
+
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+
+ out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
+
+ if p.returncode != 0:
+ sys.stderr.write(err)
+
+ logger.warning("kernel-doc '%s' failed with return code %d"
+ % (" ".join(cmd), p.returncode))
+ return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
+ elif env.config.kerneldoc_verbosity > 0:
+ sys.stderr.write(err)
+
+ filenames = self.parse_args["file_list"]
+ for filename in filenames:
+ self.parse_msg(filename, node, out, cmd)
+
+ return node.children
+
+ def parse_msg(self, filename, node, out, cmd):
+ """
+ Handles a kernel-doc output for a given file
+ """
+
+ env = self.state.document.settings.env
+
+ lines = statemachine.string2lines(out, self.tab_width,
+ convert_whitespace=True)
+ result = ViewList()
+
+ lineoffset = 0;
+ line_regex = re.compile(r"^\.\. LINENO ([0-9]+)$")
+ for line in lines:
+ match = line_regex.search(line)
+ if match:
+ # sphinx counts lines from 0
+ lineoffset = int(match.group(1)) - 1
+ # we must eat our comments since the upset the markup
+ else:
+ doc = str(env.srcdir) + "/" + env.docname + ":" + str(self.lineno)
+ result.append(line, doc + ": " + filename, lineoffset)
+ lineoffset += 1
+
+ self.do_parse(result, node)
+
+ def run_kdoc(self, cmd, kfiles):
+ """
+ Execute kernel-doc classes directly instead of running as a separate
+ command.
+ """
+
+ env = self.state.document.settings.env
+
+ node = nodes.section()
+
+ kfiles.parse(**self.parse_args)
+ filenames = self.parse_args["file_list"]
+
+ for filename, out in kfiles.msg(**self.msg_args, filenames=filenames):
+ self.parse_msg(filename, node, out, cmd)
+
+ return node.children
+
+ def run(self):
+ global kfiles
+
+ cmd = self.handle_args()
+ if self.verbose >= 1:
+ logger.info(cmd_str(cmd))
+
+ try:
+ if kfiles:
+ return self.run_kdoc(cmd, kfiles)
+ else:
+ return self.run_cmd(cmd)
+
+ except Exception as e: # pylint: disable=W0703
+ logger.warning("kernel-doc '%s' processing failed with: %s" %
+ (cmd_str(cmd), pformat(e)))
+ return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
+
+ def do_parse(self, result, node):
+ with switch_source_input(self.state, result):
+ self.state.nested_parse(result, 0, node, match_titles=1)
+
+def setup_kfiles(app):
+ global kfiles
+
+ kerneldoc_bin = app.env.config.kerneldoc_bin
+
+ if kerneldoc_bin and kerneldoc_bin.endswith("kernel-doc.py"):
+ print("Using Python kernel-doc")
+ out_style = RestFormat()
+ kfiles = KernelFiles(out_style=out_style, logger=logger)
+ else:
+ print(f"Using {kerneldoc_bin}")
+
+
+def setup(app):
+ app.add_config_value('kerneldoc_bin', None, 'env')
+ app.add_config_value('kerneldoc_srctree', None, 'env')
+ app.add_config_value('kerneldoc_verbosity', 1, 'env')
+
+ app.add_directive('kernel-doc', KernelDocDirective)
+
+ app.connect('builder-inited', setup_kfiles)
+
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py
new file mode 100644
index 000000000000..ad495c0da270
--- /dev/null
+++ b/Documentation/sphinx/kfigure.py
@@ -0,0 +1,655 @@
+# -*- coding: utf-8; mode: python -*-
+# SPDX-License-Identifier: GPL-2.0
+# pylint: disable=C0103, R0903, R0912, R0915
+"""
+ scalable figure and image handling
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Sphinx extension which implements scalable image handling.
+
+ :copyright: Copyright (C) 2016 Markus Heiser
+ :license: GPL Version 2, June 1991 see Linux/COPYING for details.
+
+ The build for image formats depend on image's source format and output's
+ destination format. This extension implement methods to simplify image
+ handling from the author's POV. Directives like ``kernel-figure`` implement
+ methods *to* always get the best output-format even if some tools are not
+ installed. For more details take a look at ``convert_image(...)`` which is
+ the core of all conversions.
+
+ * ``.. kernel-image``: for image handling / a ``.. image::`` replacement
+
+ * ``.. kernel-figure``: for figure handling / a ``.. figure::`` replacement
+
+ * ``.. kernel-render``: for render markup / a concept to embed *render*
+ markups (or languages). Supported markups (see ``RENDER_MARKUP_EXT``)
+
+ - ``DOT``: render embedded Graphviz's **DOC**
+ - ``SVG``: render embedded Scalable Vector Graphics (**SVG**)
+ - ... *developable*
+
+ Used tools:
+
+ * ``dot(1)``: Graphviz (https://www.graphviz.org). If Graphviz is not
+ available, the DOT language is inserted as literal-block.
+ For conversion to PDF, ``rsvg-convert(1)`` of librsvg
+ (https://gitlab.gnome.org/GNOME/librsvg) is used when available.
+
+ * SVG to PDF: To generate PDF, you need at least one of this tools:
+
+ - ``convert(1)``: ImageMagick (https://www.imagemagick.org)
+ - ``inkscape(1)``: Inkscape (https://inkscape.org/)
+
+ List of customizations:
+
+ * generate PDF from SVG / used by PDF (LaTeX) builder
+
+ * generate SVG (html-builder) and PDF (latex-builder) from DOT files.
+ DOT: see https://www.graphviz.org/content/dot-language
+
+ """
+
+import os
+from os import path
+import subprocess
+from hashlib import sha1
+import re
+from docutils import nodes
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import directives
+from docutils.parsers.rst.directives import images
+import sphinx
+from sphinx.util.nodes import clean_astext
+from sphinx.util import logging
+
+Figure = images.Figure
+
+__version__ = '1.0.0'
+
+logger = logging.getLogger('kfigure')
+
+# simple helper
+# -------------
+
+def which(cmd):
+ """Searches the ``cmd`` in the ``PATH`` environment.
+
+ This *which* searches the PATH for executable ``cmd`` . First match is
+ returned, if nothing is found, ``None` is returned.
+ """
+ envpath = os.environ.get('PATH', None) or os.defpath
+ for folder in envpath.split(os.pathsep):
+ fname = folder + os.sep + cmd
+ if path.isfile(fname):
+ return fname
+
+def mkdir(folder, mode=0o775):
+ if not path.isdir(folder):
+ os.makedirs(folder, mode)
+
+def file2literal(fname):
+ with open(fname, "r") as src:
+ data = src.read()
+ node = nodes.literal_block(data, data)
+ return node
+
+def isNewer(path1, path2):
+ """Returns True if ``path1`` is newer than ``path2``
+
+ If ``path1`` exists and is newer than ``path2`` the function returns
+ ``True`` is returned otherwise ``False``
+ """
+ return (path.exists(path1)
+ and os.stat(path1).st_ctime > os.stat(path2).st_ctime)
+
+def pass_handle(self, node): # pylint: disable=W0613
+ pass
+
+# setup conversion tools and sphinx extension
+# -------------------------------------------
+
+# Graphviz's dot(1) support
+dot_cmd = None
+# dot(1) -Tpdf should be used
+dot_Tpdf = False
+
+# ImageMagick' convert(1) support
+convert_cmd = None
+
+# librsvg's rsvg-convert(1) support
+rsvg_convert_cmd = None
+
+# Inkscape's inkscape(1) support
+inkscape_cmd = None
+# Inkscape prior to 1.0 uses different command options
+inkscape_ver_one = False
+
+
+def setup(app):
+ # check toolchain first
+ app.connect('builder-inited', setupTools)
+
+ # image handling
+ app.add_directive("kernel-image", KernelImage)
+ app.add_node(kernel_image,
+ html = (visit_kernel_image, pass_handle),
+ latex = (visit_kernel_image, pass_handle),
+ texinfo = (visit_kernel_image, pass_handle),
+ text = (visit_kernel_image, pass_handle),
+ man = (visit_kernel_image, pass_handle), )
+
+ # figure handling
+ app.add_directive("kernel-figure", KernelFigure)
+ app.add_node(kernel_figure,
+ html = (visit_kernel_figure, pass_handle),
+ latex = (visit_kernel_figure, pass_handle),
+ texinfo = (visit_kernel_figure, pass_handle),
+ text = (visit_kernel_figure, pass_handle),
+ man = (visit_kernel_figure, pass_handle), )
+
+ # render handling
+ app.add_directive('kernel-render', KernelRender)
+ app.add_node(kernel_render,
+ html = (visit_kernel_render, pass_handle),
+ latex = (visit_kernel_render, pass_handle),
+ texinfo = (visit_kernel_render, pass_handle),
+ text = (visit_kernel_render, pass_handle),
+ man = (visit_kernel_render, pass_handle), )
+
+ app.connect('doctree-read', add_kernel_figure_to_std_domain)
+
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
+
+
+def setupTools(app):
+ """
+ Check available build tools and log some *verbose* messages.
+
+ This function is called once, when the builder is initiated.
+ """
+ global dot_cmd, dot_Tpdf, convert_cmd, rsvg_convert_cmd # pylint: disable=W0603
+ global inkscape_cmd, inkscape_ver_one # pylint: disable=W0603
+ logger.verbose("kfigure: check installed tools ...")
+
+ dot_cmd = which('dot')
+ convert_cmd = which('convert')
+ rsvg_convert_cmd = which('rsvg-convert')
+ inkscape_cmd = which('inkscape')
+
+ if dot_cmd:
+ logger.verbose("use dot(1) from: " + dot_cmd)
+
+ try:
+ dot_Thelp_list = subprocess.check_output([dot_cmd, '-Thelp'],
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as err:
+ dot_Thelp_list = err.output
+ pass
+
+ dot_Tpdf_ptn = b'pdf'
+ dot_Tpdf = re.search(dot_Tpdf_ptn, dot_Thelp_list)
+ else:
+ logger.warning(
+ "dot(1) not found, for better output quality install graphviz from https://www.graphviz.org"
+ )
+ if inkscape_cmd:
+ logger.verbose("use inkscape(1) from: " + inkscape_cmd)
+ inkscape_ver = subprocess.check_output([inkscape_cmd, '--version'],
+ stderr=subprocess.DEVNULL)
+ ver_one_ptn = b'Inkscape 1'
+ inkscape_ver_one = re.search(ver_one_ptn, inkscape_ver)
+ convert_cmd = None
+ rsvg_convert_cmd = None
+ dot_Tpdf = False
+
+ else:
+ if convert_cmd:
+ logger.verbose("use convert(1) from: " + convert_cmd)
+ else:
+ logger.verbose(
+ "Neither inkscape(1) nor convert(1) found.\n"
+ "For SVG to PDF conversion, install either Inkscape (https://inkscape.org/) (preferred) or\n"
+ "ImageMagick (https://www.imagemagick.org)"
+ )
+
+ if rsvg_convert_cmd:
+ logger.verbose("use rsvg-convert(1) from: " + rsvg_convert_cmd)
+ logger.verbose("use 'dot -Tsvg' and rsvg-convert(1) for DOT -> PDF conversion")
+ dot_Tpdf = False
+ else:
+ logger.verbose(
+ "rsvg-convert(1) not found.\n"
+ " SVG rendering of convert(1) is done by ImageMagick-native renderer."
+ )
+ if dot_Tpdf:
+ logger.verbose("use 'dot -Tpdf' for DOT -> PDF conversion")
+ else:
+ logger.verbose("use 'dot -Tsvg' and convert(1) for DOT -> PDF conversion")
+
+
+# integrate conversion tools
+# --------------------------
+
+RENDER_MARKUP_EXT = {
+ # The '.ext' must be handled by convert_image(..) function's *in_ext* input.
+ # <name> : <.ext>
+ 'DOT' : '.dot',
+ 'SVG' : '.svg'
+}
+
+def convert_image(img_node, translator, src_fname=None):
+ """Convert a image node for the builder.
+
+ Different builder prefer different image formats, e.g. *latex* builder
+ prefer PDF while *html* builder prefer SVG format for images.
+
+ This function handles output image formats in dependence of source the
+ format (of the image) and the translator's output format.
+ """
+ app = translator.builder.app
+
+ fname, in_ext = path.splitext(path.basename(img_node['uri']))
+ if src_fname is None:
+ src_fname = path.join(translator.builder.srcdir, img_node['uri'])
+ if not path.exists(src_fname):
+ src_fname = path.join(translator.builder.outdir, img_node['uri'])
+
+ dst_fname = None
+
+ # in kernel builds, use 'make SPHINXOPTS=-v' to see verbose messages
+
+ logger.verbose('assert best format for: ' + img_node['uri'])
+
+ if in_ext == '.dot':
+
+ if not dot_cmd:
+ logger.verbose("dot from graphviz not available / include DOT raw.")
+ img_node.replace_self(file2literal(src_fname))
+
+ elif translator.builder.format == 'latex':
+ dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
+ img_node['uri'] = fname + '.pdf'
+ img_node['candidates'] = {'*': fname + '.pdf'}
+
+
+ elif translator.builder.format == 'html':
+ dst_fname = path.join(
+ translator.builder.outdir,
+ translator.builder.imagedir,
+ fname + '.svg')
+ img_node['uri'] = path.join(
+ translator.builder.imgpath, fname + '.svg')
+ img_node['candidates'] = {
+ '*': path.join(translator.builder.imgpath, fname + '.svg')}
+
+ else:
+ # all other builder formats will include DOT as raw
+ img_node.replace_self(file2literal(src_fname))
+
+ elif in_ext == '.svg':
+
+ if translator.builder.format == 'latex':
+ if not inkscape_cmd and convert_cmd is None:
+ logger.warning(
+ "no SVG to PDF conversion available / include SVG raw.\n"
+ "Including large raw SVGs can cause xelatex error.\n"
+ "Install Inkscape (preferred) or ImageMagick."
+ )
+ img_node.replace_self(file2literal(src_fname))
+ else:
+ dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
+ img_node['uri'] = fname + '.pdf'
+ img_node['candidates'] = {'*': fname + '.pdf'}
+
+ if dst_fname:
+ # the builder needs not to copy one more time, so pop it if exists.
+ translator.builder.images.pop(img_node['uri'], None)
+ _name = dst_fname[len(str(translator.builder.outdir)) + 1:]
+
+ if isNewer(dst_fname, src_fname):
+ logger.verbose("convert: {out}/%s already exists and is newer" % _name)
+
+ else:
+ ok = False
+ mkdir(path.dirname(dst_fname))
+
+ if in_ext == '.dot':
+ logger.verbose('convert DOT to: {out}/' + _name)
+ if translator.builder.format == 'latex' and not dot_Tpdf:
+ svg_fname = path.join(translator.builder.outdir, fname + '.svg')
+ ok1 = dot2format(app, src_fname, svg_fname)
+ ok2 = svg2pdf_by_rsvg(app, svg_fname, dst_fname)
+ ok = ok1 and ok2
+
+ else:
+ ok = dot2format(app, src_fname, dst_fname)
+
+ elif in_ext == '.svg':
+ logger.verbose('convert SVG to: {out}/' + _name)
+ ok = svg2pdf(app, src_fname, dst_fname)
+
+ if not ok:
+ img_node.replace_self(file2literal(src_fname))
+
+
+def dot2format(app, dot_fname, out_fname):
+ """Converts DOT file to ``out_fname`` using ``dot(1)``.
+
+ * ``dot_fname`` pathname of the input DOT file, including extension ``.dot``
+ * ``out_fname`` pathname of the output file, including format extension
+
+ The *format extension* depends on the ``dot`` command (see ``man dot``
+ option ``-Txxx``). Normally you will use one of the following extensions:
+
+ - ``.ps`` for PostScript,
+ - ``.svg`` or ``svgz`` for Structured Vector Graphics,
+ - ``.fig`` for XFIG graphics and
+ - ``.png`` or ``gif`` for common bitmap graphics.
+
+ """
+ out_format = path.splitext(out_fname)[1][1:]
+ cmd = [dot_cmd, '-T%s' % out_format, dot_fname]
+ exit_code = 42
+
+ with open(out_fname, "w") as out:
+ exit_code = subprocess.call(cmd, stdout = out)
+ if exit_code != 0:
+ logger.warning(
+ "Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
+ return bool(exit_code == 0)
+
+def svg2pdf(app, svg_fname, pdf_fname):
+ """Converts SVG to PDF with ``inkscape(1)`` or ``convert(1)`` command.
+
+ Uses ``inkscape(1)`` from Inkscape (https://inkscape.org/) or ``convert(1)``
+ from ImageMagick (https://www.imagemagick.org) for conversion.
+ Returns ``True`` on success and ``False`` if an error occurred.
+
+ * ``svg_fname`` pathname of the input SVG file with extension (``.svg``)
+ * ``pdf_name`` pathname of the output PDF file with extension (``.pdf``)
+
+ """
+ cmd = [convert_cmd, svg_fname, pdf_fname]
+ cmd_name = 'convert(1)'
+
+ if inkscape_cmd:
+ cmd_name = 'inkscape(1)'
+ if inkscape_ver_one:
+ cmd = [inkscape_cmd, '-o', pdf_fname, svg_fname]
+ else:
+ cmd = [inkscape_cmd, '-z', '--export-pdf=%s' % pdf_fname, svg_fname]
+
+ try:
+ warning_msg = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ exit_code = 0
+ except subprocess.CalledProcessError as err:
+ warning_msg = err.output
+ exit_code = err.returncode
+ pass
+
+ if exit_code != 0:
+ logger.warning("Error #%d when calling: %s" %
+ (exit_code, " ".join(cmd)))
+ if warning_msg:
+ logger.warning( "Warning msg from %s: %s" %
+ (cmd_name, str(warning_msg, 'utf-8')))
+ elif warning_msg:
+ logger.verbose("Warning msg from %s (likely harmless):\n%s" %
+ (cmd_name, str(warning_msg, 'utf-8')))
+
+ return bool(exit_code == 0)
+
+def svg2pdf_by_rsvg(app, svg_fname, pdf_fname):
+ """Convert SVG to PDF with ``rsvg-convert(1)`` command.
+
+ * ``svg_fname`` pathname of input SVG file, including extension ``.svg``
+ * ``pdf_fname`` pathname of output PDF file, including extension ``.pdf``
+
+ Input SVG file should be the one generated by ``dot2format()``.
+ SVG -> PDF conversion is done by ``rsvg-convert(1)``.
+
+ If ``rsvg-convert(1)`` is unavailable, fall back to ``svg2pdf()``.
+
+ """
+
+ if rsvg_convert_cmd is None:
+ ok = svg2pdf(app, svg_fname, pdf_fname)
+ else:
+ cmd = [rsvg_convert_cmd, '--format=pdf', '-o', pdf_fname, svg_fname]
+ # use stdout and stderr from parent
+ exit_code = subprocess.call(cmd)
+ if exit_code != 0:
+ logger.warning("Error #%d when calling: %s" %
+ (exit_code, " ".join(cmd)))
+ ok = bool(exit_code == 0)
+
+ return ok
+
+
+# image handling
+# ---------------------
+
+def visit_kernel_image(self, node): # pylint: disable=W0613
+ """Visitor of the ``kernel_image`` Node.
+
+ Handles the ``image`` child-node with the ``convert_image(...)``.
+ """
+ img_node = node[0]
+ convert_image(img_node, self)
+
+class kernel_image(nodes.image):
+ """Node for ``kernel-image`` directive."""
+ pass
+
+class KernelImage(images.Image):
+ """KernelImage directive
+
+ Earns everything from ``.. image::`` directive, except *remote URI* and
+ *glob* pattern. The KernelImage wraps a image node into a
+ kernel_image node. See ``visit_kernel_image``.
+ """
+
+ def run(self):
+ uri = self.arguments[0]
+ if uri.endswith('.*') or uri.find('://') != -1:
+ raise self.severe(
+ 'Error in "%s: %s": glob pattern and remote images are not allowed'
+ % (self.name, uri))
+ result = images.Image.run(self)
+ if len(result) == 2 or isinstance(result[0], nodes.system_message):
+ return result
+ (image_node,) = result
+ # wrap image node into a kernel_image node / see visitors
+ node = kernel_image('', image_node)
+ return [node]
+
+# figure handling
+# ---------------------
+
+def visit_kernel_figure(self, node): # pylint: disable=W0613
+ """Visitor of the ``kernel_figure`` Node.
+
+ Handles the ``image`` child-node with the ``convert_image(...)``.
+ """
+ img_node = node[0][0]
+ convert_image(img_node, self)
+
+class kernel_figure(nodes.figure):
+ """Node for ``kernel-figure`` directive."""
+
+class KernelFigure(Figure):
+ """KernelImage directive
+
+ Earns everything from ``.. figure::`` directive, except *remote URI* and
+ *glob* pattern. The KernelFigure wraps a figure node into a kernel_figure
+ node. See ``visit_kernel_figure``.
+ """
+
+ def run(self):
+ uri = self.arguments[0]
+ if uri.endswith('.*') or uri.find('://') != -1:
+ raise self.severe(
+ 'Error in "%s: %s":'
+ ' glob pattern and remote images are not allowed'
+ % (self.name, uri))
+ result = Figure.run(self)
+ if len(result) == 2 or isinstance(result[0], nodes.system_message):
+ return result
+ (figure_node,) = result
+ # wrap figure node into a kernel_figure node / see visitors
+ node = kernel_figure('', figure_node)
+ return [node]
+
+
+# render handling
+# ---------------------
+
+def visit_kernel_render(self, node):
+ """Visitor of the ``kernel_render`` Node.
+
+ If rendering tools available, save the markup of the ``literal_block`` child
+ node into a file and replace the ``literal_block`` node with a new created
+ ``image`` node, pointing to the saved markup file. Afterwards, handle the
+ image child-node with the ``convert_image(...)``.
+ """
+ app = self.builder.app
+ srclang = node.get('srclang')
+
+ logger.verbose('visit kernel-render node lang: "%s"' % srclang)
+
+ tmp_ext = RENDER_MARKUP_EXT.get(srclang, None)
+ if tmp_ext is None:
+ logger.warning( 'kernel-render: "%s" unknown / include raw.' % srclang)
+ return
+
+ if not dot_cmd and tmp_ext == '.dot':
+ logger.verbose("dot from graphviz not available / include raw.")
+ return
+
+ literal_block = node[0]
+
+ code = literal_block.astext()
+ hashobj = code.encode('utf-8') # str(node.attributes)
+ fname = path.join('%s-%s' % (srclang, sha1(hashobj).hexdigest()))
+
+ tmp_fname = path.join(
+ self.builder.outdir, self.builder.imagedir, fname + tmp_ext)
+
+ if not path.isfile(tmp_fname):
+ mkdir(path.dirname(tmp_fname))
+ with open(tmp_fname, "w") as out:
+ out.write(code)
+
+ img_node = nodes.image(node.rawsource, **node.attributes)
+ img_node['uri'] = path.join(self.builder.imgpath, fname + tmp_ext)
+ img_node['candidates'] = {
+ '*': path.join(self.builder.imgpath, fname + tmp_ext)}
+
+ literal_block.replace_self(img_node)
+ convert_image(img_node, self, tmp_fname)
+
+
+class kernel_render(nodes.General, nodes.Inline, nodes.Element):
+ """Node for ``kernel-render`` directive."""
+ pass
+
+class KernelRender(Figure):
+ """KernelRender directive
+
+ Render content by external tool. Has all the options known from the
+ *figure* directive, plus option ``caption``. If ``caption`` has a
+ value, a figure node with the *caption* is inserted. If not, a image node is
+ inserted.
+
+ The KernelRender directive wraps the text of the directive into a
+ literal_block node and wraps it into a kernel_render node. See
+ ``visit_kernel_render``.
+ """
+ has_content = True
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = False
+
+ # earn options from 'figure'
+ option_spec = Figure.option_spec.copy()
+ option_spec['caption'] = directives.unchanged
+
+ def run(self):
+ return [self.build_node()]
+
+ def build_node(self):
+
+ srclang = self.arguments[0].strip()
+ if srclang not in RENDER_MARKUP_EXT.keys():
+ return [self.state_machine.reporter.warning(
+ 'Unknown source language "%s", use one of: %s.' % (
+ srclang, ",".join(RENDER_MARKUP_EXT.keys())),
+ line=self.lineno)]
+
+ code = '\n'.join(self.content)
+ if not code.strip():
+ return [self.state_machine.reporter.warning(
+ 'Ignoring "%s" directive without content.' % (
+ self.name),
+ line=self.lineno)]
+
+ node = kernel_render()
+ node['alt'] = self.options.get('alt','')
+ node['srclang'] = srclang
+ literal_node = nodes.literal_block(code, code)
+ node += literal_node
+
+ caption = self.options.get('caption')
+ if caption:
+ # parse caption's content
+ parsed = nodes.Element()
+ self.state.nested_parse(
+ ViewList([caption], source=''), self.content_offset, parsed)
+ caption_node = nodes.caption(
+ parsed[0].rawsource, '', *parsed[0].children)
+ caption_node.source = parsed[0].source
+ caption_node.line = parsed[0].line
+
+ figure_node = nodes.figure('', node)
+ for k,v in self.options.items():
+ figure_node[k] = v
+ figure_node += caption_node
+
+ node = figure_node
+
+ return node
+
+def add_kernel_figure_to_std_domain(app, doctree):
+ """Add kernel-figure anchors to 'std' domain.
+
+ The ``StandardDomain.process_doc(..)`` method does not know how to resolve
+ the caption (label) of ``kernel-figure`` directive (it only knows about
+ standard nodes, e.g. table, figure etc.). Without any additional handling
+ this will result in a 'undefined label' for kernel-figures.
+
+ This handle adds labels of kernel-figure to the 'std' domain labels.
+ """
+
+ std = app.env.domains["std"]
+ docname = app.env.docname
+ labels = std.data["labels"]
+
+ for name, explicit in doctree.nametypes.items():
+ if not explicit:
+ continue
+ labelid = doctree.nameids[name]
+ if labelid is None:
+ continue
+ node = doctree.ids[labelid]
+
+ if node.tagname == 'kernel_figure':
+ for n in node.next_node():
+ if n.tagname == 'caption':
+ sectname = clean_astext(n)
+ # add label to std domain
+ labels[name] = docname, labelid, sectname
+ break
diff --git a/Documentation/sphinx/maintainers_include.py b/Documentation/sphinx/maintainers_include.py
new file mode 100755
index 000000000000..519ad18685b2
--- /dev/null
+++ b/Documentation/sphinx/maintainers_include.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0
+# -*- coding: utf-8; mode: python -*-
+# pylint: disable=R0903, C0330, R0914, R0912, E0401
+
+"""
+ maintainers-include
+ ~~~~~~~~~~~~~~~~~~~
+
+ Implementation of the ``maintainers-include`` reST-directive.
+
+ :copyright: Copyright (C) 2019 Kees Cook <keescook@chromium.org>
+ :license: GPL Version 2, June 1991 see linux/COPYING for details.
+
+ The ``maintainers-include`` reST-directive performs extensive parsing
+ specific to the Linux kernel's standard "MAINTAINERS" file, in an
+ effort to avoid needing to heavily mark up the original plain text.
+"""
+
+import sys
+import re
+import os.path
+
+from docutils import statemachine
+from docutils.parsers.rst import Directive
+from docutils.parsers.rst.directives.misc import Include
+
+def ErrorString(exc): # Shamelessly stolen from docutils
+ return f'{exc.__class__.__name}: {exc}'
+
+__version__ = '1.0'
+
+def setup(app):
+ app.add_directive("maintainers-include", MaintainersInclude)
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
+
+class MaintainersInclude(Include):
+ """MaintainersInclude (``maintainers-include``) directive"""
+ required_arguments = 0
+
+ def parse_maintainers(self, path):
+ """Parse all the MAINTAINERS lines into ReST for human-readability"""
+
+ result = list()
+ result.append(".. _maintainers:")
+ result.append("")
+
+ # Poor man's state machine.
+ descriptions = False
+ maintainers = False
+ subsystems = False
+
+ # Field letter to field name mapping.
+ field_letter = None
+ fields = dict()
+
+ prev = None
+ field_prev = ""
+ field_content = ""
+
+ for line in open(path):
+ # Have we reached the end of the preformatted Descriptions text?
+ if descriptions and line.startswith('Maintainers'):
+ descriptions = False
+ # Ensure a blank line following the last "|"-prefixed line.
+ result.append("")
+
+ # Start subsystem processing? This is to skip processing the text
+ # between the Maintainers heading and the first subsystem name.
+ if maintainers and not subsystems:
+ if re.search('^[A-Z0-9]', line):
+ subsystems = True
+
+ # Drop needless input whitespace.
+ line = line.rstrip()
+
+ # Linkify all non-wildcard refs to ReST files in Documentation/.
+ pat = r'(Documentation/([^\s\?\*]*)\.rst)'
+ m = re.search(pat, line)
+ if m:
+ # maintainers.rst is in a subdirectory, so include "../".
+ line = re.sub(pat, ':doc:`%s <../%s>`' % (m.group(2), m.group(2)), line)
+
+ # Check state machine for output rendering behavior.
+ output = None
+ if descriptions:
+ # Escape the escapes in preformatted text.
+ output = "| %s" % (line.replace("\\", "\\\\"))
+ # Look for and record field letter to field name mappings:
+ # R: Designated *reviewer*: FullName <address@domain>
+ m = re.search(r"\s(\S):\s", line)
+ if m:
+ field_letter = m.group(1)
+ if field_letter and not field_letter in fields:
+ m = re.search(r"\*([^\*]+)\*", line)
+ if m:
+ fields[field_letter] = m.group(1)
+ elif subsystems:
+ # Skip empty lines: subsystem parser adds them as needed.
+ if len(line) == 0:
+ continue
+ # Subsystem fields are batched into "field_content"
+ if line[1] != ':':
+ # Render a subsystem entry as:
+ # SUBSYSTEM NAME
+ # ~~~~~~~~~~~~~~
+
+ # Flush pending field content.
+ output = field_content + "\n\n"
+ field_content = ""
+
+ # Collapse whitespace in subsystem name.
+ heading = re.sub(r"\s+", " ", line)
+ output = output + "%s\n%s" % (heading, "~" * len(heading))
+ field_prev = ""
+ else:
+ # Render a subsystem field as:
+ # :Field: entry
+ # entry...
+ field, details = line.split(':', 1)
+ details = details.strip()
+
+ # Mark paths (and regexes) as literal text for improved
+ # readability and to escape any escapes.
+ if field in ['F', 'N', 'X', 'K']:
+ # But only if not already marked :)
+ if not ':doc:' in details:
+ details = '``%s``' % (details)
+
+ # Comma separate email field continuations.
+ if field == field_prev and field_prev in ['M', 'R', 'L']:
+ field_content = field_content + ","
+
+ # Do not repeat field names, so that field entries
+ # will be collapsed together.
+ if field != field_prev:
+ output = field_content + "\n"
+ field_content = ":%s:" % (fields.get(field, field))
+ field_content = field_content + "\n\t%s" % (details)
+ field_prev = field
+ else:
+ output = line
+
+ # Re-split on any added newlines in any above parsing.
+ if output != None:
+ for separated in output.split('\n'):
+ result.append(separated)
+
+ # Update the state machine when we find heading separators.
+ if line.startswith('----------'):
+ if prev.startswith('Descriptions'):
+ descriptions = True
+ if prev.startswith('Maintainers'):
+ maintainers = True
+
+ # Retain previous line for state machine transitions.
+ prev = line
+
+ # Flush pending field contents.
+ if field_content != "":
+ for separated in field_content.split('\n'):
+ result.append(separated)
+
+ output = "\n".join(result)
+ # For debugging the pre-rendered results...
+ #print(output, file=open("/tmp/MAINTAINERS.rst", "w"))
+
+ self.state_machine.insert_input(
+ statemachine.string2lines(output), path)
+
+ def run(self):
+ """Include the MAINTAINERS file as part of this reST file."""
+ if not self.state.document.settings.file_insertion_enabled:
+ raise self.warning('"%s" directive disabled.' % self.name)
+
+ # Walk up source path directories to find Documentation/../
+ path = self.state_machine.document.attributes['source']
+ path = os.path.realpath(path)
+ tail = path
+ while tail != "Documentation" and tail != "":
+ (path, tail) = os.path.split(path)
+
+ # Append "MAINTAINERS"
+ path = os.path.join(path, "MAINTAINERS")
+
+ try:
+ self.state.document.settings.record_dependencies.add(path)
+ lines = self.parse_maintainers(path)
+ except IOError as error:
+ raise self.severe('Problems with "%s" directive path:\n%s.' %
+ (self.name, ErrorString(error)))
+
+ return []
diff --git a/Documentation/sphinx/min_requirements.txt b/Documentation/sphinx/min_requirements.txt
new file mode 100644
index 000000000000..96b5e0bfa3d7
--- /dev/null
+++ b/Documentation/sphinx/min_requirements.txt
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+alabaster >=0.7,<0.8
+docutils>=0.15,<0.18
+jinja2>=2.3,<3.1
+PyYAML>=5.1,<6.1
+Sphinx==3.4.3
+sphinxcontrib-applehelp==1.0.2
+sphinxcontrib-devhelp==1.0.1
+sphinxcontrib-htmlhelp==1.0.3
+sphinxcontrib-qthelp==1.0.2
+sphinxcontrib-serializinghtml==1.1.4
diff --git a/Documentation/sphinx/parser_yaml.py b/Documentation/sphinx/parser_yaml.py
new file mode 100755
index 000000000000..634d84a202fc
--- /dev/null
+++ b/Documentation/sphinx/parser_yaml.py
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2025 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+
+"""
+Sphinx extension for processing YAML files
+"""
+
+import os
+import re
+import sys
+
+from pprint import pformat
+
+from docutils import statemachine
+from docutils.parsers.rst import Parser as RSTParser
+from docutils.parsers.rst import states
+from docutils.statemachine import ViewList
+
+from sphinx.util import logging
+from sphinx.parsers import Parser
+
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "tools/net/ynl/pyynl/lib"))
+
+from doc_generator import YnlDocGenerator # pylint: disable=C0413
+
+logger = logging.getLogger(__name__)
+
+class YamlParser(Parser):
+ """
+ Kernel parser for YAML files.
+
+ This is a simple sphinx.Parser to handle yaml files inside the
+ Kernel tree that will be part of the built documentation.
+
+ The actual parser function is not contained here: the code was
+ written in a way that parsing yaml for different subsystems
+ can be done from a single dispatcher.
+
+ All it takes to have parse YAML patches is to have an import line:
+
+ from some_parser_code import NewYamlGenerator
+
+ To this module. Then add an instance of the parser with:
+
+ new_parser = NewYamlGenerator()
+
+ and add a logic inside parse() to handle it based on the path,
+ like this:
+
+ if "/foo" in fname:
+ msg = self.new_parser.parse_yaml_file(fname)
+ """
+
+ supported = ('yaml', )
+
+ netlink_parser = YnlDocGenerator()
+
+ re_lineno = re.compile(r"\.\. LINENO ([0-9]+)$")
+
+ tab_width = 8
+
+ def rst_parse(self, inputstring, document, msg):
+ """
+ Receives a ReST content that was previously converted by the
+ YAML parser, adding it to the document tree.
+ """
+
+ self.setup_parse(inputstring, document)
+
+ result = ViewList()
+
+ self.statemachine = states.RSTStateMachine(state_classes=states.state_classes,
+ initial_state='Body',
+ debug=document.reporter.debug_flag)
+
+ try:
+ # Parse message with RSTParser
+ lineoffset = 0;
+
+ lines = statemachine.string2lines(msg, self.tab_width,
+ convert_whitespace=True)
+
+ for line in lines:
+ match = self.re_lineno.match(line)
+ if match:
+ lineoffset = int(match.group(1))
+ continue
+
+ result.append(line, document.current_source, lineoffset)
+
+ self.statemachine.run(result, document)
+
+ except Exception as e:
+ document.reporter.error("YAML parsing error: %s" % pformat(e))
+
+ self.finish_parse()
+
+ # Overrides docutils.parsers.Parser. See sphinx.parsers.RSTParser
+ def parse(self, inputstring, document):
+ """Check if a YAML is meant to be parsed."""
+
+ fname = document.current_source
+
+ # Handle netlink yaml specs
+ if "/netlink/specs/" in fname:
+ msg = self.netlink_parser.parse_yaml_file(fname)
+ self.rst_parse(inputstring, document, msg)
+
+ # All other yaml files are ignored
+
+def setup(app):
+ """Setup function for the Sphinx extension."""
+
+ # Add YAML parser
+ app.add_source_parser(YamlParser)
+ app.add_source_suffix('.yaml', 'yaml')
+
+ return {
+ 'version': '1.0',
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/Documentation/sphinx/requirements.txt b/Documentation/sphinx/requirements.txt
new file mode 100644
index 000000000000..76b4255061d0
--- /dev/null
+++ b/Documentation/sphinx/requirements.txt
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+alabaster
+Sphinx
+pyyaml
diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py
new file mode 100755
index 000000000000..3d19569e5728
--- /dev/null
+++ b/Documentation/sphinx/rstFlatTable.py
@@ -0,0 +1,365 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8; mode: python -*-
+# SPDX-License-Identifier: GPL-2.0
+# pylint: disable=C0330, R0903, R0912
+
+"""
+ flat-table
+ ~~~~~~~~~~
+
+ Implementation of the ``flat-table`` reST-directive.
+
+ :copyright: Copyright (C) 2016 Markus Heiser
+ :license: GPL Version 2, June 1991 see linux/COPYING for details.
+
+ The ``flat-table`` (:py:class:`FlatTable`) is a double-stage list similar to
+ the ``list-table`` with some additional features:
+
+ * *column-span*: with the role ``cspan`` a cell can be extended through
+ additional columns
+
+ * *row-span*: with the role ``rspan`` a cell can be extended through
+ additional rows
+
+ * *auto span* rightmost cell of a table row over the missing cells on the
+ right side of that table-row. With Option ``:fill-cells:`` this behavior
+ can be changed from *auto span* to *auto fill*, which automatically inserts
+ (empty) cells instead of spanning the last cell.
+
+ Options:
+
+ * header-rows: [int] count of header rows
+ * stub-columns: [int] count of stub columns
+ * widths: [[int] [int] ... ] widths of columns
+ * fill-cells: instead of autospann missing cells, insert missing cells
+
+ roles:
+
+ * cspan: [int] additionale columns (*morecols*)
+ * rspan: [int] additionale rows (*morerows*)
+"""
+
+# ==============================================================================
+# imports
+# ==============================================================================
+
+from docutils import nodes
+from docutils.parsers.rst import directives, roles
+from docutils.parsers.rst.directives.tables import Table
+from docutils.utils import SystemMessagePropagation
+
+# ==============================================================================
+# common globals
+# ==============================================================================
+
+__version__ = '1.0'
+
+# ==============================================================================
+def setup(app):
+# ==============================================================================
+
+ app.add_directive("flat-table", FlatTable)
+ roles.register_local_role('cspan', c_span)
+ roles.register_local_role('rspan', r_span)
+
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
+
+# ==============================================================================
+def c_span(name, rawtext, text, lineno, inliner, options=None, content=None):
+# ==============================================================================
+ # pylint: disable=W0613
+
+ options = options if options is not None else {}
+ content = content if content is not None else []
+ nodelist = [colSpan(span=int(text))]
+ msglist = []
+ return nodelist, msglist
+
+# ==============================================================================
+def r_span(name, rawtext, text, lineno, inliner, options=None, content=None):
+# ==============================================================================
+ # pylint: disable=W0613
+
+ options = options if options is not None else {}
+ content = content if content is not None else []
+ nodelist = [rowSpan(span=int(text))]
+ msglist = []
+ return nodelist, msglist
+
+
+# ==============================================================================
+class rowSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321
+class colSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321
+# ==============================================================================
+
+# ==============================================================================
+class FlatTable(Table):
+# ==============================================================================
+
+ """FlatTable (``flat-table``) directive"""
+
+ option_spec = {
+ 'name': directives.unchanged
+ , 'class': directives.class_option
+ , 'header-rows': directives.nonnegative_int
+ , 'stub-columns': directives.nonnegative_int
+ , 'widths': directives.positive_int_list
+ , 'fill-cells' : directives.flag }
+
+ def run(self):
+
+ if not self.content:
+ error = self.state_machine.reporter.error(
+ 'The "%s" directive is empty; content required.' % self.name,
+ nodes.literal_block(self.block_text, self.block_text),
+ line=self.lineno)
+ return [error]
+
+ title, messages = self.make_title()
+ node = nodes.Element() # anonymous container for parsing
+ self.state.nested_parse(self.content, self.content_offset, node)
+
+ tableBuilder = ListTableBuilder(self)
+ tableBuilder.parseFlatTableNode(node)
+ tableNode = tableBuilder.buildTableNode()
+ # SDK.CONSOLE() # print --> tableNode.asdom().toprettyxml()
+ if title:
+ tableNode.insert(0, title)
+ return [tableNode] + messages
+
+
+# ==============================================================================
+class ListTableBuilder(object):
+# ==============================================================================
+
+ """Builds a table from a double-stage list"""
+
+ def __init__(self, directive):
+ self.directive = directive
+ self.rows = []
+ self.max_cols = 0
+
+ def buildTableNode(self):
+
+ colwidths = self.directive.get_column_widths(self.max_cols)
+ if isinstance(colwidths, tuple):
+ # Since docutils 0.13, get_column_widths returns a (widths,
+ # colwidths) tuple, where widths is a string (i.e. 'auto').
+ # See https://sourceforge.net/p/docutils/patches/120/.
+ colwidths = colwidths[1]
+ stub_columns = self.directive.options.get('stub-columns', 0)
+ header_rows = self.directive.options.get('header-rows', 0)
+
+ table = nodes.table()
+ tgroup = nodes.tgroup(cols=len(colwidths))
+ table += tgroup
+
+
+ for colwidth in colwidths:
+ colspec = nodes.colspec(colwidth=colwidth)
+ # FIXME: It seems, that the stub method only works well in the
+ # absence of rowspan (observed by the html builder, the docutils-xml
+ # build seems OK). This is not extraordinary, because there exists
+ # no table directive (except *this* flat-table) which allows to
+ # define coexistent of rowspan and stubs (there was no use-case
+ # before flat-table). This should be reviewed (later).
+ if stub_columns:
+ colspec.attributes['stub'] = 1
+ stub_columns -= 1
+ tgroup += colspec
+ stub_columns = self.directive.options.get('stub-columns', 0)
+
+ if header_rows:
+ thead = nodes.thead()
+ tgroup += thead
+ for row in self.rows[:header_rows]:
+ thead += self.buildTableRowNode(row)
+
+ tbody = nodes.tbody()
+ tgroup += tbody
+
+ for row in self.rows[header_rows:]:
+ tbody += self.buildTableRowNode(row)
+ return table
+
+ def buildTableRowNode(self, row_data, classes=None):
+ classes = [] if classes is None else classes
+ row = nodes.row()
+ for cell in row_data:
+ if cell is None:
+ continue
+ cspan, rspan, cellElements = cell
+
+ attributes = {"classes" : classes}
+ if rspan:
+ attributes['morerows'] = rspan
+ if cspan:
+ attributes['morecols'] = cspan
+ entry = nodes.entry(**attributes)
+ entry.extend(cellElements)
+ row += entry
+ return row
+
+ def raiseError(self, msg):
+ error = self.directive.state_machine.reporter.error(
+ msg
+ , nodes.literal_block(self.directive.block_text
+ , self.directive.block_text)
+ , line = self.directive.lineno )
+ raise SystemMessagePropagation(error)
+
+ def parseFlatTableNode(self, node):
+ """parses the node from a :py:class:`FlatTable` directive's body"""
+
+ if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
+ self.raiseError(
+ 'Error parsing content block for the "%s" directive: '
+ 'exactly one bullet list expected.' % self.directive.name )
+
+ for rowNum, rowItem in enumerate(node[0]):
+ row = self.parseRowItem(rowItem, rowNum)
+ self.rows.append(row)
+ self.roundOffTableDefinition()
+
+ def roundOffTableDefinition(self):
+ """Round off the table definition.
+
+ This method rounds off the table definition in :py:member:`rows`.
+
+ * This method inserts the needed ``None`` values for the missing cells
+ arising from spanning cells over rows and/or columns.
+
+ * recount the :py:member:`max_cols`
+
+ * Autospan or fill (option ``fill-cells``) missing cells on the right
+ side of the table-row
+ """
+
+ y = 0
+ while y < len(self.rows):
+ x = 0
+
+ while x < len(self.rows[y]):
+ cell = self.rows[y][x]
+ if cell is None:
+ x += 1
+ continue
+ cspan, rspan = cell[:2]
+ # handle colspan in current row
+ for c in range(cspan):
+ try:
+ self.rows[y].insert(x+c+1, None)
+ except: # pylint: disable=W0702
+ # the user sets ambiguous rowspans
+ pass # SDK.CONSOLE()
+ # handle colspan in spanned rows
+ for r in range(rspan):
+ for c in range(cspan + 1):
+ try:
+ self.rows[y+r+1].insert(x+c, None)
+ except: # pylint: disable=W0702
+ # the user sets ambiguous rowspans
+ pass # SDK.CONSOLE()
+ x += 1
+ y += 1
+
+ # Insert the missing cells on the right side. For this, first
+ # re-calculate the max columns.
+
+ for row in self.rows:
+ if self.max_cols < len(row):
+ self.max_cols = len(row)
+
+ # fill with empty cells or cellspan?
+
+ fill_cells = False
+ if 'fill-cells' in self.directive.options:
+ fill_cells = True
+
+ for row in self.rows:
+ x = self.max_cols - len(row)
+ if x and not fill_cells:
+ if row[-1] is None:
+ row.append( ( x - 1, 0, []) )
+ else:
+ cspan, rspan, content = row[-1]
+ row[-1] = (cspan + x, rspan, content)
+ elif x and fill_cells:
+ for i in range(x):
+ row.append( (0, 0, nodes.comment()) )
+
+ def pprint(self):
+ # for debugging
+ retVal = "[ "
+ for row in self.rows:
+ retVal += "[ "
+ for col in row:
+ if col is None:
+ retVal += ('%r' % col)
+ retVal += "\n , "
+ else:
+ content = col[2][0].astext()
+ if len (content) > 30:
+ content = content[:30] + "..."
+ retVal += ('(cspan=%s, rspan=%s, %r)'
+ % (col[0], col[1], content))
+ retVal += "]\n , "
+ retVal = retVal[:-2]
+ retVal += "]\n , "
+ retVal = retVal[:-2]
+ return retVal + "]"
+
+ def parseRowItem(self, rowItem, rowNum):
+ row = []
+ childNo = 0
+ error = False
+ cell = None
+ target = None
+
+ for child in rowItem:
+ if (isinstance(child , nodes.comment)
+ or isinstance(child, nodes.system_message)):
+ pass
+ elif isinstance(child , nodes.target):
+ target = child
+ elif isinstance(child, nodes.bullet_list):
+ childNo += 1
+ cell = child
+ else:
+ error = True
+ break
+
+ if childNo != 1 or error:
+ self.raiseError(
+ 'Error parsing content block for the "%s" directive: '
+ 'two-level bullet list expected, but row %s does not '
+ 'contain a second-level bullet list.'
+ % (self.directive.name, rowNum + 1))
+
+ for cellItem in cell:
+ cspan, rspan, cellElements = self.parseCellItem(cellItem)
+ if target is not None:
+ cellElements.insert(0, target)
+ row.append( (cspan, rspan, cellElements) )
+ return row
+
+ def parseCellItem(self, cellItem):
+ # search and remove cspan, rspan colspec from the first element in
+ # this listItem (field).
+ cspan = rspan = 0
+ if not len(cellItem):
+ return cspan, rspan, []
+ for elem in cellItem[0]:
+ if isinstance(elem, colSpan):
+ cspan = elem.get("span")
+ elem.parent.remove(elem)
+ continue
+ if isinstance(elem, rowSpan):
+ rspan = elem.get("span")
+ elem.parent.remove(elem)
+ continue
+ return cspan, rspan, cellItem[:]
diff --git a/Documentation/sphinx/templates/kernel-toc.html b/Documentation/sphinx/templates/kernel-toc.html
new file mode 100644
index 000000000000..b84969bd31c4
--- /dev/null
+++ b/Documentation/sphinx/templates/kernel-toc.html
@@ -0,0 +1,19 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{# Create a local TOC the kernel way #}
+<p>
+<h3 class="kernel-toc-contents">Contents</h3>
+<input type="checkbox" class="kernel-toc-toggle" id = "kernel-toc-toggle" checked>
+<label class="kernel-toc-title" for="kernel-toc-toggle"></label>
+
+<div class="kerneltoc" id="kerneltoc">
+{{ toctree(maxdepth=3) }}
+</div>
+{# hacky script to try to position the left column #}
+<script type="text/javascript"> <!--
+ var sbar = document.getElementsByClassName("sphinxsidebar")[0];
+ let currents = document.getElementsByClassName("current")
+ if (currents.length) {
+ sbar.scrollTop = currents[currents.length - 1].offsetTop;
+ }
+ --> </script>
diff --git a/Documentation/sphinx/templates/translations.html b/Documentation/sphinx/templates/translations.html
new file mode 100644
index 000000000000..351586f41938
--- /dev/null
+++ b/Documentation/sphinx/templates/translations.html
@@ -0,0 +1,15 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{# Copyright © 2023, Oracle and/or its affiliates. #}
+
+{# Create a language menu for translations #}
+{% if languages|length > 0: %}
+<div class="language-selection">
+{{ current_language }}
+
+<ul>
+{% for ref in languages: %}
+<li><a href="{{ ref.refuri }}">{{ ref.astext() }}</a></li>
+{% endfor %}
+</ul>
+</div>
+{% endif %}
diff --git a/Documentation/sphinx/translations.py b/Documentation/sphinx/translations.py
new file mode 100644
index 000000000000..32c2b32b2b5e
--- /dev/null
+++ b/Documentation/sphinx/translations.py
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright © 2023, Oracle and/or its affiliates.
+# Author: Vegard Nossum <vegard.nossum@oracle.com>
+#
+# Add translation links to the top of the document.
+#
+
+import os
+
+from docutils import nodes
+from docutils.transforms import Transform
+
+import sphinx
+from sphinx import addnodes
+from sphinx.errors import NoUri
+
+all_languages = {
+ # English is always first
+ None: 'English',
+
+ # Keep the rest sorted alphabetically
+ 'zh_CN': 'Chinese (Simplified)',
+ 'zh_TW': 'Chinese (Traditional)',
+ 'it_IT': 'Italian',
+ 'ja_JP': 'Japanese',
+ 'ko_KR': 'Korean',
+ 'sp_SP': 'Spanish',
+}
+
+class LanguagesNode(nodes.Element):
+ pass
+
+class TranslationsTransform(Transform):
+ default_priority = 900
+
+ def apply(self):
+ app = self.document.settings.env.app
+ docname = self.document.settings.env.docname
+
+ this_lang_code = None
+ components = docname.split(os.sep)
+ if components[0] == 'translations' and len(components) > 2:
+ this_lang_code = components[1]
+
+ # normalize docname to be the untranslated one
+ docname = os.path.join(*components[2:])
+
+ new_nodes = LanguagesNode()
+ new_nodes['current_language'] = all_languages[this_lang_code]
+
+ for lang_code, lang_name in all_languages.items():
+ if lang_code == this_lang_code:
+ continue
+
+ if lang_code is None:
+ target_name = docname
+ else:
+ target_name = os.path.join('translations', lang_code, docname)
+
+ pxref = addnodes.pending_xref('', refdomain='std',
+ reftype='doc', reftarget='/' + target_name, modname=None,
+ classname=None, refexplicit=True)
+ pxref += nodes.Text(lang_name)
+ new_nodes += pxref
+
+ self.document.insert(0, new_nodes)
+
+def process_languages(app, doctree, docname):
+ for node in doctree.traverse(LanguagesNode):
+ if app.builder.format not in ['html']:
+ node.parent.remove(node)
+ continue
+
+ languages = []
+
+ # Iterate over the child nodes; any resolved links will have
+ # the type 'nodes.reference', while unresolved links will be
+ # type 'nodes.Text'.
+ languages = list(filter(lambda xref:
+ isinstance(xref, nodes.reference), node.children))
+
+ html_content = app.builder.templates.render('translations.html',
+ context={
+ 'current_language': node['current_language'],
+ 'languages': languages,
+ })
+
+ node.replace_self(nodes.raw('', html_content, format='html'))
+
+def setup(app):
+ app.add_node(LanguagesNode)
+ app.add_transform(TranslationsTransform)
+ app.connect('doctree-resolved', process_languages)
+
+ return {
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }