summaryrefslogtreecommitdiff
path: root/Documentation/sphinx
diff options
context:
space:
mode:
Diffstat (limited to 'Documentation/sphinx')
-rw-r--r--Documentation/sphinx/automarkup.py162
-rw-r--r--Documentation/sphinx/cdomain.py249
-rw-r--r--Documentation/sphinx/kernel_abi.py164
-rw-r--r--Documentation/sphinx/kernel_feat.py34
-rwxr-xr-xDocumentation/sphinx/kernel_include.py611
-rw-r--r--Documentation/sphinx/kerneldoc-preamble.sty2
-rw-r--r--Documentation/sphinx/kerneldoc.py229
-rw-r--r--Documentation/sphinx/kernellog.py22
-rw-r--r--Documentation/sphinx/kfigure.py92
-rw-r--r--Documentation/sphinx/load_config.py59
-rwxr-xr-xDocumentation/sphinx/maintainers_include.py8
-rw-r--r--Documentation/sphinx/min_requirements.txt11
-rw-r--r--Documentation/sphinx/parallel-wrapper.sh33
-rwxr-xr-xDocumentation/sphinx/parse-headers.pl401
-rwxr-xr-xDocumentation/sphinx/parser_yaml.py123
-rw-r--r--Documentation/sphinx/requirements.txt1
-rwxr-xr-xDocumentation/sphinx/rstFlatTable.py11
-rw-r--r--Documentation/sphinx/templates/kernel-toc.html3
-rw-r--r--Documentation/sphinx/templates/translations.html4
19 files changed, 1047 insertions, 1172 deletions
diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py
index a413f8dd5115..1d9dada40a74 100644
--- a/Documentation/sphinx/automarkup.py
+++ b/Documentation/sphinx/automarkup.py
@@ -11,13 +11,7 @@ from sphinx.errors import NoUri
import re
from itertools import chain
-#
-# Python 2 lacks re.ASCII...
-#
-try:
- ascii_p3 = re.ASCII
-except AttributeError:
- ascii_p3 = 0
+from kernel_abi import get_kernel_abi
#
# Regex nastiness. Of course.
@@ -26,28 +20,24 @@ except AttributeError:
# :c:func: block (i.e. ":c:func:`mmap()`s" flakes out), so the last
# bit tries to restrict matches to things that won't create trouble.
#
-RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=ascii_p3)
-
-#
-# Sphinx 2 uses the same :c:type role for struct, union, enum and typedef
-#
-RE_generic_type = re.compile(r'\b(struct|union|enum|typedef)\s+([a-zA-Z_]\w+)',
- flags=ascii_p3)
+RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=re.ASCII)
#
# Sphinx 3 uses a different C role for each one of struct, union, enum and
# typedef
#
-RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
-RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
-RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
-RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
+RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
+RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
+RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
+RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
#
# Detects a reference to a documentation page of the form Documentation/... with
# an optional extension
#
RE_doc = re.compile(r'(\bDocumentation/)?((\.\./)*[\w\-/]+)\.(rst|txt)')
+RE_abi_file = re.compile(r'(\bDocumentation/ABI/[\w\-/]+)')
+RE_abi_symbol = re.compile(r'(\b/(sys|config|proc)/[\w\-/]+)')
RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
@@ -83,11 +73,10 @@ def markup_refs(docname, app, node):
#
# Associate each regex with the function that will markup its matches
#
- markup_func_sphinx2 = {RE_doc: markup_doc_ref,
- RE_function: markup_c_ref,
- RE_generic_type: markup_c_ref}
- markup_func_sphinx3 = {RE_doc: markup_doc_ref,
+ markup_func = {RE_doc: markup_doc_ref,
+ RE_abi_file: markup_abi_file_ref,
+ RE_abi_symbol: markup_abi_ref,
RE_function: markup_func_ref_sphinx3,
RE_struct: markup_c_ref,
RE_union: markup_c_ref,
@@ -95,11 +84,6 @@ def markup_refs(docname, app, node):
RE_typedef: markup_c_ref,
RE_git: markup_git}
- if sphinx.version_info[0] >= 3:
- markup_func = markup_func_sphinx3
- else:
- markup_func = markup_func_sphinx2
-
match_iterators = [regex.finditer(t) for regex in markup_func]
#
# Sort all references by the starting position in text
@@ -138,13 +122,8 @@ def note_failure(target):
# own C role, but both match the same regex, so we try both.
#
def markup_func_ref_sphinx3(docname, app, match):
- cdom = app.env.domains['c']
- #
- # Go through the dance of getting an xref out of the C domain
- #
base_target = match.group(2)
target_text = nodes.Text(match.group(0))
- xref = None
possible_targets = [base_target]
# Check if this document has a namespace, and if so, try
# cross-referencing inside it first.
@@ -156,22 +135,8 @@ def markup_func_ref_sphinx3(docname, app, match):
if (target not in Skipfuncs) and not failure_seen(target):
lit_text = nodes.literal(classes=['xref', 'c', 'c-func'])
lit_text += target_text
- pxref = addnodes.pending_xref('', refdomain = 'c',
- reftype = 'function',
- reftarget = target,
- modname = None,
- classname = None)
- #
- # XXX The Latex builder will throw NoUri exceptions here,
- # work around that by ignoring them.
- #
- try:
- xref = cdom.resolve_xref(app.env, docname, app.builder,
- 'function', target, pxref,
- lit_text)
- except NoUri:
- xref = None
-
+ xref = add_and_resolve_xref(app, docname, 'c', 'function',
+ target, contnode=lit_text)
if xref:
return xref
note_failure(target)
@@ -179,32 +144,19 @@ def markup_func_ref_sphinx3(docname, app, match):
return target_text
def markup_c_ref(docname, app, match):
- class_str = {# Sphinx 2 only
- RE_function: 'c-func',
- RE_generic_type: 'c-type',
- # Sphinx 3+ only
- RE_struct: 'c-struct',
+ class_str = {RE_struct: 'c-struct',
RE_union: 'c-union',
RE_enum: 'c-enum',
RE_typedef: 'c-type',
}
- reftype_str = {# Sphinx 2 only
- RE_function: 'function',
- RE_generic_type: 'type',
- # Sphinx 3+ only
- RE_struct: 'struct',
+ reftype_str = {RE_struct: 'struct',
RE_union: 'union',
RE_enum: 'enum',
RE_typedef: 'type',
}
- cdom = app.env.domains['c']
- #
- # Go through the dance of getting an xref out of the C domain
- #
base_target = match.group(2)
target_text = nodes.Text(match.group(0))
- xref = None
possible_targets = [base_target]
# Check if this document has a namespace, and if so, try
# cross-referencing inside it first.
@@ -216,21 +168,9 @@ def markup_c_ref(docname, app, match):
if not (match.re == RE_function and target in Skipfuncs):
lit_text = nodes.literal(classes=['xref', 'c', class_str[match.re]])
lit_text += target_text
- pxref = addnodes.pending_xref('', refdomain = 'c',
- reftype = reftype_str[match.re],
- reftarget = target, modname = None,
- classname = None)
- #
- # XXX The Latex builder will throw NoUri exceptions here,
- # work around that by ignoring them.
- #
- try:
- xref = cdom.resolve_xref(app.env, docname, app.builder,
- reftype_str[match.re], target, pxref,
- lit_text)
- except NoUri:
- xref = None
-
+ xref = add_and_resolve_xref(app, docname, 'c',
+ reftype_str[match.re], target,
+ contnode=lit_text)
if xref:
return xref
@@ -241,34 +181,74 @@ def markup_c_ref(docname, app, match):
# cross reference to that page
#
def markup_doc_ref(docname, app, match):
- stddom = app.env.domains['std']
- #
- # Go through the dance of getting an xref out of the std domain
- #
absolute = match.group(1)
target = match.group(2)
if absolute:
target = "/" + target
- xref = None
- pxref = addnodes.pending_xref('', refdomain = 'std', reftype = 'doc',
+
+ xref = add_and_resolve_xref(app, docname, 'std', 'doc', target)
+ if xref:
+ return xref
+ else:
+ return nodes.Text(match.group(0))
+
+#
+# Try to replace a documentation reference for ABI symbols and files
+# with a cross reference to that page
+#
+def markup_abi_ref(docname, app, match, warning=False):
+ kernel_abi = get_kernel_abi()
+
+ fname = match.group(1)
+ target = kernel_abi.xref(fname)
+
+ # Kernel ABI doesn't describe such file or symbol
+ if not target:
+ if warning:
+ kernel_abi.log.warning("%s not found", fname)
+ return nodes.Text(match.group(0))
+
+ xref = add_and_resolve_xref(app, docname, 'std', 'ref', target)
+ if xref:
+ return xref
+ else:
+ return nodes.Text(match.group(0))
+
+def add_and_resolve_xref(app, docname, domain, reftype, target, contnode=None):
+ #
+ # Go through the dance of getting an xref out of the corresponding domain
+ #
+ dom_obj = app.env.domains[domain]
+ pxref = addnodes.pending_xref('', refdomain = domain, reftype = reftype,
reftarget = target, modname = None,
classname = None, refexplicit = False)
+
#
# XXX The Latex builder will throw NoUri exceptions here,
# work around that by ignoring them.
#
try:
- xref = stddom.resolve_xref(app.env, docname, app.builder, 'doc',
- target, pxref, None)
+ xref = dom_obj.resolve_xref(app.env, docname, app.builder, reftype,
+ target, pxref, contnode)
except NoUri:
xref = None
- #
- # Return the xref if we got it; otherwise just return the plain text.
- #
+
if xref:
return xref
- else:
- return nodes.Text(match.group(0))
+ #
+ # We didn't find the xref; if a container node was supplied,
+ # mark it as a broken xref
+ #
+ if contnode:
+ contnode['classes'].append("broken_xref")
+ return contnode
+
+#
+# Variant of markup_abi_ref() that warns when a reference is not found
+#
+def markup_abi_file_ref(docname, app, match):
+ return markup_abi_ref(docname, app, match, warning=True)
+
def get_c_namespace(app, docname):
source = app.env.doc2path(docname)
diff --git a/Documentation/sphinx/cdomain.py b/Documentation/sphinx/cdomain.py
deleted file mode 100644
index e6959af25402..000000000000
--- a/Documentation/sphinx/cdomain.py
+++ /dev/null
@@ -1,249 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-# pylint: disable=W0141,C0113,C0103,C0325
-u"""
- cdomain
- ~~~~~~~
-
- Replacement for the sphinx c-domain.
-
- :copyright: Copyright (C) 2016 Markus Heiser
- :license: GPL Version 2, June 1991 see Linux/COPYING for details.
-
- List of customizations:
-
- * Moved the *duplicate C object description* warnings for function
- declarations in the nitpicky mode. See Sphinx documentation for
- the config values for ``nitpick`` and ``nitpick_ignore``.
-
- * Add option 'name' to the "c:function:" directive. With option 'name' the
- ref-name of a function can be modified. E.g.::
-
- .. c:function:: int ioctl( int fd, int request )
- :name: VIDIOC_LOG_STATUS
-
- The func-name (e.g. ioctl) remains in the output but the ref-name changed
- from 'ioctl' to 'VIDIOC_LOG_STATUS'. The function is referenced by::
-
- * :c:func:`VIDIOC_LOG_STATUS` or
- * :any:`VIDIOC_LOG_STATUS` (``:any:`` needs sphinx 1.3)
-
- * Handle signatures of function-like macros well. Don't try to deduce
- arguments types of function-like macros.
-
-"""
-
-from docutils import nodes
-from docutils.parsers.rst import directives
-
-import sphinx
-from sphinx import addnodes
-from sphinx.domains.c import c_funcptr_sig_re, c_sig_re
-from sphinx.domains.c import CObject as Base_CObject
-from sphinx.domains.c import CDomain as Base_CDomain
-from itertools import chain
-import re
-
-__version__ = '1.1'
-
-# Get Sphinx version
-major, minor, patch = sphinx.version_info[:3]
-
-# Namespace to be prepended to the full name
-namespace = None
-
-#
-# Handle trivial newer c domain tags that are part of Sphinx 3.1 c domain tags
-# - Store the namespace if ".. c:namespace::" tag is found
-#
-RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
-
-def markup_namespace(match):
- global namespace
-
- namespace = match.group(1)
-
- return ""
-
-#
-# Handle c:macro for function-style declaration
-#
-RE_macro = re.compile(r'^\s*..\s*c:macro::\s*(\S+)\s+(\S.*)\s*$')
-def markup_macro(match):
- return ".. c:function:: " + match.group(1) + ' ' + match.group(2)
-
-#
-# Handle newer c domain tags that are evaluated as .. c:type: for
-# backward-compatibility with Sphinx < 3.0
-#
-RE_ctype = re.compile(r'^\s*..\s*c:(struct|union|enum|enumerator|alias)::\s*(.*)$')
-
-def markup_ctype(match):
- return ".. c:type:: " + match.group(2)
-
-#
-# Handle newer c domain tags that are evaluated as :c:type: for
-# backward-compatibility with Sphinx < 3.0
-#
-RE_ctype_refs = re.compile(r':c:(var|struct|union|enum|enumerator)::`([^\`]+)`')
-def markup_ctype_refs(match):
- return ":c:type:`" + match.group(2) + '`'
-
-#
-# Simply convert :c:expr: and :c:texpr: into a literal block.
-#
-RE_expr = re.compile(r':c:(expr|texpr):`([^\`]+)`')
-def markup_c_expr(match):
- return '\\ ``' + match.group(2) + '``\\ '
-
-#
-# Parse Sphinx 3.x C markups, replacing them by backward-compatible ones
-#
-def c_markups(app, docname, source):
- result = ""
- markup_func = {
- RE_namespace: markup_namespace,
- RE_expr: markup_c_expr,
- RE_macro: markup_macro,
- RE_ctype: markup_ctype,
- RE_ctype_refs: markup_ctype_refs,
- }
-
- lines = iter(source[0].splitlines(True))
- for n in lines:
- match_iterators = [regex.finditer(n) for regex in markup_func]
- matches = sorted(chain(*match_iterators), key=lambda m: m.start())
- for m in matches:
- n = n[:m.start()] + markup_func[m.re](m) + n[m.end():]
-
- result = result + n
-
- source[0] = result
-
-#
-# Now implements support for the cdomain namespacing logic
-#
-
-def setup(app):
-
- # Handle easy Sphinx 3.1+ simple new tags: :c:expr and .. c:namespace::
- app.connect('source-read', c_markups)
- app.add_domain(CDomain, override=True)
-
- return dict(
- version = __version__,
- parallel_read_safe = True,
- parallel_write_safe = True
- )
-
-class CObject(Base_CObject):
-
- """
- Description of a C language object.
- """
- option_spec = {
- "name" : directives.unchanged
- }
-
- def handle_func_like_macro(self, sig, signode):
- u"""Handles signatures of function-like macros.
-
- If the objtype is 'function' and the signature ``sig`` is a
- function-like macro, the name of the macro is returned. Otherwise
- ``False`` is returned. """
-
- global namespace
-
- if not self.objtype == 'function':
- return False
-
- m = c_funcptr_sig_re.match(sig)
- if m is None:
- m = c_sig_re.match(sig)
- if m is None:
- raise ValueError('no match')
-
- rettype, fullname, arglist, _const = m.groups()
- arglist = arglist.strip()
- if rettype or not arglist:
- return False
-
- arglist = arglist.replace('`', '').replace('\\ ', '') # remove markup
- arglist = [a.strip() for a in arglist.split(",")]
-
- # has the first argument a type?
- if len(arglist[0].split(" ")) > 1:
- return False
-
- # This is a function-like macro, its arguments are typeless!
- signode += addnodes.desc_name(fullname, fullname)
- paramlist = addnodes.desc_parameterlist()
- signode += paramlist
-
- for argname in arglist:
- param = addnodes.desc_parameter('', '', noemph=True)
- # separate by non-breaking space in the output
- param += nodes.emphasis(argname, argname)
- paramlist += param
-
- if namespace:
- fullname = namespace + "." + fullname
-
- return fullname
-
- def handle_signature(self, sig, signode):
- """Transform a C signature into RST nodes."""
-
- global namespace
-
- fullname = self.handle_func_like_macro(sig, signode)
- if not fullname:
- fullname = super(CObject, self).handle_signature(sig, signode)
-
- if "name" in self.options:
- if self.objtype == 'function':
- fullname = self.options["name"]
- else:
- # FIXME: handle :name: value of other declaration types?
- pass
- else:
- if namespace:
- fullname = namespace + "." + fullname
-
- return fullname
-
- def add_target_and_index(self, name, sig, signode):
- # for C API items we add a prefix since names are usually not qualified
- # by a module name and so easily clash with e.g. section titles
- targetname = 'c.' + name
- if targetname not in self.state.document.ids:
- signode['names'].append(targetname)
- signode['ids'].append(targetname)
- signode['first'] = (not self.names)
- self.state.document.note_explicit_target(signode)
- inv = self.env.domaindata['c']['objects']
- if (name in inv and self.env.config.nitpicky):
- if self.objtype == 'function':
- if ('c:func', name) not in self.env.config.nitpick_ignore:
- self.state_machine.reporter.warning(
- 'duplicate C object description of %s, ' % name +
- 'other instance in ' + self.env.doc2path(inv[name][0]),
- line=self.lineno)
- inv[name] = (self.env.docname, self.objtype)
-
- indextext = self.get_index_text(name)
- if indextext:
- self.indexnode['entries'].append(
- ('single', indextext, targetname, '', None))
-
-class CDomain(Base_CDomain):
-
- """C language domain."""
- name = 'c'
- label = 'C'
- directives = {
- 'function': CObject,
- 'member': CObject,
- 'macro': CObject,
- 'type': CObject,
- 'var': CObject,
- }
diff --git a/Documentation/sphinx/kernel_abi.py b/Documentation/sphinx/kernel_abi.py
index 5911bd0d7965..5667f207d175 100644
--- a/Documentation/sphinx/kernel_abi.py
+++ b/Documentation/sphinx/kernel_abi.py
@@ -2,7 +2,7 @@
# coding=utf-8
# SPDX-License-Identifier: GPL-2.0
#
-u"""
+"""
kernel-abi
~~~~~~~~~~
@@ -14,7 +14,7 @@ u"""
:license: GPL Version 2, June 1991 see Linux/COPYING for details.
The ``kernel-abi`` (:py:class:`KernelCmd`) directive calls the
- scripts/get_abi.pl script to parse the Kernel ABI files.
+ AbiParser class to parse the Kernel ABI files.
Overview of directive's argument and options.
@@ -32,107 +32,139 @@ u"""
"""
-import codecs
import os
-import subprocess
-import sys
import re
-import kernellog
+import sys
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
-from docutils.utils.error_reporting import ErrorString
from sphinx.util.docutils import switch_source_input
+from sphinx.util import logging
+
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "tools/lib/python"))
+
+from abi.abi_parser import AbiParser
+
+__version__ = "1.0"
+
+logger = logging.getLogger('kernel_abi')
+path = os.path.join(srctree, "Documentation/ABI")
-__version__ = '1.0'
+_kernel_abi = None
+
+def get_kernel_abi():
+ """
+ Initialize kernel_abi global var, if not initialized yet.
+
+ This is needed to avoid warnings during Sphinx module initialization.
+ """
+ global _kernel_abi
+
+ if not _kernel_abi:
+ # Parse ABI symbols only once
+ _kernel_abi = AbiParser(path, logger=logger)
+ _kernel_abi.parse_abi()
+ _kernel_abi.check_issues()
+
+ return _kernel_abi
def setup(app):
app.add_directive("kernel-abi", KernelCmd)
- return dict(
- version = __version__
- , parallel_read_safe = True
- , parallel_write_safe = True
- )
+ return {
+ "version": __version__,
+ "parallel_read_safe": True,
+ "parallel_write_safe": True
+ }
-class KernelCmd(Directive):
- u"""KernelABI (``kernel-abi``) directive"""
+class KernelCmd(Directive):
+ """KernelABI (``kernel-abi``) directive"""
required_arguments = 1
- optional_arguments = 2
+ optional_arguments = 3
has_content = False
final_argument_whitespace = True
+ parser = None
option_spec = {
- "debug" : directives.flag,
- "rst" : directives.unchanged
+ "debug": directives.flag,
+ "no-symbols": directives.flag,
+ "no-files": directives.flag,
}
def run(self):
+ kernel_abi = get_kernel_abi()
+
doc = self.state.document
if not doc.settings.file_insertion_enabled:
raise self.warning("docutils: file insertion disabled")
- srctree = os.path.abspath(os.environ["srctree"])
-
- args = [
- os.path.join(srctree, 'scripts/get_abi.pl'),
- 'rest',
- '--enable-lineno',
- '--dir', os.path.join(srctree, 'Documentation', self.arguments[0]),
- ]
-
- if 'rst' in self.options:
- args.append('--rst-source')
-
- lines = subprocess.check_output(args, cwd=os.path.dirname(doc.current_source)).decode('utf-8')
- nodeList = self.nestedParse(lines, self.arguments[0])
- return nodeList
-
- def nestedParse(self, lines, fname):
env = self.state.document.settings.env
content = ViewList()
node = nodes.section()
- if "debug" in self.options:
- code_block = "\n\n.. code-block:: rst\n :linenos:\n"
- for l in lines.split("\n"):
- code_block += "\n " + l
- lines = code_block + "\n\n"
+ abi_type = self.arguments[0]
- line_regex = re.compile(r"^\.\. LINENO (\S+)\#([0-9]+)$")
- ln = 0
- n = 0
- f = fname
+ if "no-symbols" in self.options:
+ show_symbols = False
+ else:
+ show_symbols = True
- for line in lines.split("\n"):
- n = n + 1
- match = line_regex.search(line)
- if match:
- new_f = match.group(1)
+ if "no-files" in self.options:
+ show_file = False
+ else:
+ show_file = True
- # Sphinx parser is lazy: it stops parsing contents in the
- # middle, if it is too big. So, handle it per input file
- if new_f != f and content:
- self.do_parse(content, node)
- content = ViewList()
+ tab_width = self.options.get('tab-width',
+ self.state.document.settings.tab_width)
- # Add the file to Sphinx build dependencies
- env.note_dependency(os.path.abspath(f))
-
- f = new_f
-
- # sphinx counts lines from 0
- ln = int(match.group(2)) - 1
+ old_f = None
+ n = 0
+ n_sym = 0
+ for msg, f, ln in kernel_abi.doc(show_file=show_file,
+ show_symbols=show_symbols,
+ filter_path=abi_type):
+ n_sym += 1
+ msg_list = statemachine.string2lines(msg, tab_width,
+ convert_whitespace=True)
+ if "debug" in self.options:
+ lines = [
+ "", "", ".. code-block:: rst",
+ " :linenos:", ""
+ ]
+ for m in msg_list:
+ lines.append(" " + m)
else:
- content.append(line, f, ln)
-
- kernellog.info(self.state.document.settings.env.app, "%s: parsed %i lines" % (fname, n))
+ lines = msg_list
- if content:
- self.do_parse(content, node)
+ for line in lines:
+ # sphinx counts lines from 0
+ content.append(line, f, ln - 1)
+ n += 1
+
+ if f != old_f:
+ # Add the file to Sphinx build dependencies if the file exists
+ fname = os.path.join(srctree, f)
+ if os.path.isfile(fname):
+ env.note_dependency(fname)
+
+ old_f = f
+
+ # Sphinx doesn't like to parse big messages. So, let's
+ # add content symbol by symbol
+ if content:
+ self.do_parse(content, node)
+ content = ViewList()
+
+ if show_symbols and not show_file:
+ logger.verbose("%s ABI: %i symbols (%i ReST lines)" % (abi_type, n_sym, n))
+ elif not show_symbols and show_file:
+ logger.verbose("%s ABI: %i files (%i ReST lines)" % (abi_type, n_sym, n))
+ else:
+ logger.verbose("%s ABI: %i data (%i ReST lines)" % (abi_type, n_sym, n))
return node.children
diff --git a/Documentation/sphinx/kernel_feat.py b/Documentation/sphinx/kernel_feat.py
index 03ace5f01b5c..bdc0fef5c87f 100644
--- a/Documentation/sphinx/kernel_feat.py
+++ b/Documentation/sphinx/kernel_feat.py
@@ -1,7 +1,7 @@
# coding=utf-8
# SPDX-License-Identifier: GPL-2.0
#
-u"""
+"""
kernel-feat
~~~~~~~~~~~
@@ -13,7 +13,7 @@ u"""
:license: GPL Version 2, June 1991 see Linux/COPYING for details.
The ``kernel-feat`` (:py:class:`KernelFeat`) directive calls the
- scripts/get_feat.pl script to parse the Kernel ABI files.
+ tools/docs/get_feat.pl script to parse the Kernel ABI files.
Overview of directive's argument and options.
@@ -34,15 +34,21 @@ u"""
import codecs
import os
import re
-import subprocess
import sys
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
-from docutils.utils.error_reporting import ErrorString
from sphinx.util.docutils import switch_source_input
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "tools/lib/python"))
+
+from feat.parse_features import ParseFeature # pylint: disable=C0413
+
+def ErrorString(exc): # Shamelessly stolen from docutils
+ return f'{exc.__class__.__name}: {exc}'
+
__version__ = '1.0'
def setup(app):
@@ -56,7 +62,7 @@ def setup(app):
class KernelFeat(Directive):
- u"""KernelFeat (``kernel-feat``) directive"""
+ """KernelFeat (``kernel-feat``) directive"""
required_arguments = 1
optional_arguments = 2
@@ -82,18 +88,16 @@ class KernelFeat(Directive):
srctree = os.path.abspath(os.environ["srctree"])
- args = [
- os.path.join(srctree, 'scripts/get_feat.pl'),
- 'rest',
- '--enable-fname',
- '--dir',
- os.path.join(srctree, 'Documentation', self.arguments[0]),
- ]
+ feature_dir = os.path.join(srctree, 'Documentation', self.arguments[0])
- if len(self.arguments) > 1:
- args.extend(['--arch', self.arguments[1]])
+ feat = ParseFeature(feature_dir, False, True)
+ feat.parse()
- lines = subprocess.check_output(args, cwd=os.path.dirname(doc.current_source)).decode('utf-8')
+ if len(self.arguments) > 1:
+ arch = self.arguments[1]
+ lines = feat.output_arch_table(arch)
+ else:
+ lines = feat.output_matrix()
line_regex = re.compile(r"^\.\. FILE (\S+)$")
diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py
index 638762442336..626762ff6af3 100755
--- a/Documentation/sphinx/kernel_include.py
+++ b/Documentation/sphinx/kernel_include.py
@@ -1,30 +1,82 @@
#!/usr/bin/env python3
-# -*- coding: utf-8; mode: python -*-
-# pylint: disable=R0903, C0330, R0914, R0912, E0401
+# SPDX-License-Identifier: GPL-2.0
+# pylint: disable=R0903, R0912, R0914, R0915, C0209,W0707
-u"""
- kernel-include
- ~~~~~~~~~~~~~~
- Implementation of the ``kernel-include`` reST-directive.
+"""
+Implementation of the ``kernel-include`` reST-directive.
+
+:copyright: Copyright (C) 2016 Markus Heiser
+:license: GPL Version 2, June 1991 see linux/COPYING for details.
+
+The ``kernel-include`` reST-directive is a replacement for the ``include``
+directive. The ``kernel-include`` directive expand environment variables in
+the path name and allows to include files from arbitrary locations.
+
+.. hint::
+
+ Including files from arbitrary locations (e.g. from ``/etc``) is a
+ security risk for builders. This is why the ``include`` directive from
+ docutils *prohibit* pathnames pointing to locations *above* the filesystem
+ tree where the reST document with the include directive is placed.
+
+Substrings of the form $name or ${name} are replaced by the value of
+environment variable name. Malformed variable names and references to
+non-existing variables are left unchanged.
+
+**Supported Sphinx Include Options**:
+
+:param literal:
+ If present, the included file is inserted as a literal block.
+
+:param code:
+ Specify the language for syntax highlighting (e.g., 'c', 'python').
+
+:param encoding:
+ Specify the encoding of the included file (default: 'utf-8').
+
+:param tab-width:
+ Specify the number of spaces that a tab represents.
+
+:param start-line:
+ Line number at which to start including the file (1-based).
+
+:param end-line:
+ Line number at which to stop including the file (inclusive).
+
+:param start-after:
+ Include lines after the first line matching this text.
+
+:param end-before:
+ Include lines before the first line matching this text.
+
+:param number-lines:
+ Number the included lines (integer specifies start number).
+ Only effective with 'literal' or 'code' options.
+
+:param class:
+ Specify HTML class attribute for the included content.
- :copyright: Copyright (C) 2016 Markus Heiser
- :license: GPL Version 2, June 1991 see linux/COPYING for details.
+**Kernel-specific Extensions**:
- The ``kernel-include`` reST-directive is a replacement for the ``include``
- directive. The ``kernel-include`` directive expand environment variables in
- the path name and allows to include files from arbitrary locations.
+:param generate-cross-refs:
+ If present, instead of directly including the file, it calls
+ ParseDataStructs() to convert C data structures into cross-references
+ that link to comprehensive documentation in other ReST files.
- .. hint::
+:param exception-file:
+ (Used with generate-cross-refs)
- Including files from arbitrary locations (e.g. from ``/etc``) is a
- security risk for builders. This is why the ``include`` directive from
- docutils *prohibit* pathnames pointing to locations *above* the filesystem
- tree where the reST document with the include directive is placed.
+ Path to a file containing rules for handling special cases:
+ - Ignore specific C data structures
+ - Use alternative reference names
+ - Specify different reference types
- Substrings of the form $name or ${name} are replaced by the value of
- environment variable name. Malformed variable names and references to
- non-existing variables are left unchanged.
+:param warn-broken:
+ (Used with generate-cross-refs)
+
+ Enables warnings when auto-generated cross-references don't point to
+ existing documentation targets.
"""
# ==============================================================================
@@ -32,161 +84,444 @@ u"""
# ==============================================================================
import os.path
+import re
+import sys
+
+from difflib import get_close_matches
from docutils import io, nodes, statemachine
-from docutils.utils.error_reporting import SafeString, ErrorString
-from docutils.parsers.rst import directives
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.directives.body import CodeBlock, NumberLines
-from docutils.parsers.rst.directives.misc import Include
-__version__ = '1.0'
+from sphinx.util import logging
-# ==============================================================================
-def setup(app):
-# ==============================================================================
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "tools/lib/python"))
- app.add_directive("kernel-include", KernelInclude)
- return dict(
- version = __version__,
- parallel_read_safe = True,
- parallel_write_safe = True
- )
+from kdoc.parse_data_structs import ParseDataStructs
-# ==============================================================================
-class KernelInclude(Include):
-# ==============================================================================
+__version__ = "1.0"
+logger = logging.getLogger(__name__)
- u"""KernelInclude (``kernel-include``) directive"""
+RE_DOMAIN_REF = re.compile(r'\\ :(ref|c:type|c:func):`([^<`]+)(?:<([^>]+)>)?`\\')
+RE_SIMPLE_REF = re.compile(r'`([^`]+)`')
+RE_LINENO_REF = re.compile(r'^\s*-\s+LINENO_(\d+):\s+(.*)')
+RE_SPLIT_DOMAIN = re.compile(r"(.*)\.(.*)")
- def run(self):
- env = self.state.document.settings.env
- path = os.path.realpath(
- os.path.expandvars(self.arguments[0]))
+def ErrorString(exc): # Shamelessly stolen from docutils
+ return f'{exc.__class__.__name}: {exc}'
- # to get a bit security back, prohibit /etc:
- if path.startswith(os.sep + "etc"):
- raise self.severe(
- 'Problems with "%s" directive, prohibited path: %s'
- % (self.name, path))
- self.arguments[0] = path
+# ==============================================================================
+class KernelInclude(Directive):
+ """
+ KernelInclude (``kernel-include``) directive
- env.note_dependency(os.path.abspath(path))
+ Most of the stuff here came from Include directive defined at:
+ docutils/parsers/rst/directives/misc.py
- #return super(KernelInclude, self).run() # won't work, see HINTs in _run()
- return self._run()
+ Yet, overriding the class don't has any benefits: the original class
+ only have run() and argument list. Not all of them are implemented,
+ when checked against latest Sphinx version, as with time more arguments
+ were added.
- def _run(self):
- """Include a file as part of the content of this reST file."""
+ So, keep its own list of supported arguments
+ """
- # HINT: I had to copy&paste the whole Include.run method. I'am not happy
- # with this, but due to security reasons, the Include.run method does
- # not allow absolute or relative pathnames pointing to locations *above*
- # the filesystem tree where the reST document is placed.
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = True
+ option_spec = {
+ 'literal': directives.flag,
+ 'code': directives.unchanged,
+ 'encoding': directives.encoding,
+ 'tab-width': int,
+ 'start-line': int,
+ 'end-line': int,
+ 'start-after': directives.unchanged_required,
+ 'end-before': directives.unchanged_required,
+ # ignored except for 'literal' or 'code':
+ 'number-lines': directives.unchanged, # integer or None
+ 'class': directives.class_option,
- if not self.state.document.settings.file_insertion_enabled:
- raise self.warning('"%s" directive disabled.' % self.name)
- source = self.state_machine.input_lines.source(
- self.lineno - self.state_machine.input_offset - 1)
- source_dir = os.path.dirname(os.path.abspath(source))
- path = directives.path(self.arguments[0])
- if path.startswith('<') and path.endswith('>'):
- path = os.path.join(self.standard_include_path, path[1:-1])
- path = os.path.normpath(os.path.join(source_dir, path))
+ # Arguments that aren't from Sphinx Include directive
+ 'generate-cross-refs': directives.flag,
+ 'warn-broken': directives.flag,
+ 'toc': directives.flag,
+ 'exception-file': directives.unchanged,
+ }
- # HINT: this is the only line I had to change / commented out:
- #path = utils.relative_path(None, path)
+ def read_rawtext(self, path, encoding):
+ """Read and process file content with error handling"""
+ try:
+ self.state.document.settings.record_dependencies.add(path)
+ include_file = io.FileInput(source_path=path,
+ encoding=encoding,
+ error_handler=self.state.document.settings.input_encoding_error_handler)
+ except UnicodeEncodeError:
+ raise self.severe('Problems with directive path:\n'
+ 'Cannot encode input file path "%s" '
+ '(wrong locale?).' % path)
+ except IOError as error:
+ raise self.severe('Problems with directive path:\n%s.' % ErrorString(error))
- encoding = self.options.get(
- 'encoding', self.state.document.settings.input_encoding)
- e_handler=self.state.document.settings.input_encoding_error_handler
- tab_width = self.options.get(
- 'tab-width', self.state.document.settings.tab_width)
- try:
- self.state.document.settings.record_dependencies.add(path)
- include_file = io.FileInput(source_path=path,
- encoding=encoding,
- error_handler=e_handler)
- except UnicodeEncodeError as error:
- raise self.severe('Problems with "%s" directive path:\n'
- 'Cannot encode input file path "%s" '
- '(wrong locale?).' %
- (self.name, SafeString(path)))
- except IOError as error:
- raise self.severe('Problems with "%s" directive path:\n%s.' %
- (self.name, ErrorString(error)))
+ try:
+ return include_file.read()
+ except UnicodeError as error:
+ raise self.severe('Problem with directive:\n%s' % ErrorString(error))
+
+ def apply_range(self, rawtext):
+ """
+ Handles start-line, end-line, start-after and end-before parameters
+ """
+
+ # Get to-be-included content
startline = self.options.get('start-line', None)
endline = self.options.get('end-line', None)
try:
if startline or (endline is not None):
- lines = include_file.readlines()
- rawtext = ''.join(lines[startline:endline])
- else:
- rawtext = include_file.read()
+ lines = rawtext.splitlines()
+ rawtext = '\n'.join(lines[startline:endline])
except UnicodeError as error:
- raise self.severe('Problem with "%s" directive:\n%s' %
- (self.name, ErrorString(error)))
+ raise self.severe(f'Problem with "{self.name}" directive:\n'
+ + io.error_string(error))
# start-after/end-before: no restrictions on newlines in match-text,
# and no restrictions on matching inside lines vs. line boundaries
- after_text = self.options.get('start-after', None)
+ after_text = self.options.get("start-after", None)
if after_text:
# skip content in rawtext before *and incl.* a matching text
after_index = rawtext.find(after_text)
if after_index < 0:
raise self.severe('Problem with "start-after" option of "%s" '
- 'directive:\nText not found.' % self.name)
- rawtext = rawtext[after_index + len(after_text):]
- before_text = self.options.get('end-before', None)
+ "directive:\nText not found." % self.name)
+ rawtext = rawtext[after_index + len(after_text) :]
+ before_text = self.options.get("end-before", None)
if before_text:
# skip content in rawtext after *and incl.* a matching text
before_index = rawtext.find(before_text)
if before_index < 0:
raise self.severe('Problem with "end-before" option of "%s" '
- 'directive:\nText not found.' % self.name)
+ "directive:\nText not found." % self.name)
rawtext = rawtext[:before_index]
+ return rawtext
+
+ def xref_text(self, env, path, tab_width):
+ """
+ Read and add contents from a C file parsed to have cross references.
+
+ There are two types of supported output here:
+ - A C source code with cross-references;
+ - a TOC table containing cross references.
+ """
+ parser = ParseDataStructs()
+
+ if 'exception-file' in self.options:
+ source_dir = os.path.dirname(os.path.abspath(
+ self.state_machine.input_lines.source(
+ self.lineno - self.state_machine.input_offset - 1)))
+ exceptions_file = os.path.join(source_dir, self.options['exception-file'])
+ else:
+ exceptions_file = None
+
+ parser.parse_file(path, exceptions_file)
+
+ # Store references on a symbol dict to be used at check time
+ if 'warn-broken' in self.options:
+ env._xref_files.add(path)
+
+ if "toc" not in self.options:
+
+ rawtext = ".. parsed-literal::\n\n" + parser.gen_output()
+ self.apply_range(rawtext)
+
+ include_lines = statemachine.string2lines(rawtext, tab_width,
+ convert_whitespace=True)
+
+ # Sphinx always blame the ".. <directive>", so placing
+ # line numbers here won't make any difference
+
+ self.state_machine.insert_input(include_lines, path)
+ return []
+
+ # TOC output is a ReST file, not a literal. So, we can add line
+ # numbers
+
+ startline = self.options.get('start-line', None)
+ endline = self.options.get('end-line', None)
+
+ relpath = os.path.relpath(path, srctree)
+
+ result = ViewList()
+ for line in parser.gen_toc().split("\n"):
+ match = RE_LINENO_REF.match(line)
+ if not match:
+ result.append(line, path)
+ continue
+
+ ln, ref = match.groups()
+ ln = int(ln)
+
+ # Filter line range if needed
+ if startline and (ln < startline):
+ continue
+
+ if endline and (ln > endline):
+ continue
+
+ # Sphinx numerates starting with zero, but text editors
+ # and other tools start from one
+ realln = ln + 1
+ result.append(f"- {ref}: {relpath}#{realln}", path, ln)
+
+ self.state_machine.insert_input(result, path)
+
+ return []
+
+ def literal(self, path, tab_width, rawtext):
+ """Output a literal block"""
+
+ # Convert tabs to spaces, if `tab_width` is positive.
+ if tab_width >= 0:
+ text = rawtext.expandtabs(tab_width)
+ else:
+ text = rawtext
+ literal_block = nodes.literal_block(rawtext, source=path,
+ classes=self.options.get("class", []))
+ literal_block.line = 1
+ self.add_name(literal_block)
+ if "number-lines" in self.options:
+ try:
+ startline = int(self.options["number-lines"] or 1)
+ except ValueError:
+ raise self.error(":number-lines: with non-integer start value")
+ endline = startline + len(include_lines)
+ if text.endswith("\n"):
+ text = text[:-1]
+ tokens = NumberLines([([], text)], startline, endline)
+ for classes, value in tokens:
+ if classes:
+ literal_block += nodes.inline(value, value,
+ classes=classes)
+ else:
+ literal_block += nodes.Text(value, value)
+ else:
+ literal_block += nodes.Text(text, text)
+ return [literal_block]
+
+ def code(self, path, tab_width):
+ """Output a code block"""
+
include_lines = statemachine.string2lines(rawtext, tab_width,
convert_whitespace=True)
- if 'literal' in self.options:
- # Convert tabs to spaces, if `tab_width` is positive.
- if tab_width >= 0:
- text = rawtext.expandtabs(tab_width)
- else:
- text = rawtext
- literal_block = nodes.literal_block(rawtext, source=path,
- classes=self.options.get('class', []))
- literal_block.line = 1
- self.add_name(literal_block)
- if 'number-lines' in self.options:
- try:
- startline = int(self.options['number-lines'] or 1)
- except ValueError:
- raise self.error(':number-lines: with non-integer '
- 'start value')
- endline = startline + len(include_lines)
- if text.endswith('\n'):
- text = text[:-1]
- tokens = NumberLines([([], text)], startline, endline)
- for classes, value in tokens:
- if classes:
- literal_block += nodes.inline(value, value,
- classes=classes)
- else:
- literal_block += nodes.Text(value, value)
- else:
- literal_block += nodes.Text(text, text)
- return [literal_block]
- if 'code' in self.options:
- self.options['source'] = path
- codeblock = CodeBlock(self.name,
- [self.options.pop('code')], # arguments
- self.options,
- include_lines, # content
- self.lineno,
- self.content_offset,
- self.block_text,
- self.state,
- self.state_machine)
- return codeblock.run()
- self.state_machine.insert_input(include_lines, path)
- return []
+
+ self.options["source"] = path
+ codeblock = CodeBlock(self.name,
+ [self.options.pop("code")], # arguments
+ self.options,
+ include_lines,
+ self.lineno,
+ self.content_offset,
+ self.block_text,
+ self.state,
+ self.state_machine)
+ return codeblock.run()
+
+ def run(self):
+ """Include a file as part of the content of this reST file."""
+ env = self.state.document.settings.env
+
+ #
+ # The include logic accepts only patches relative to the
+ # Kernel source tree. The logic does check it to prevent
+ # directory traverse issues.
+ #
+
+ srctree = os.path.abspath(os.environ["srctree"])
+
+ path = os.path.expandvars(self.arguments[0])
+ src_path = os.path.join(srctree, path)
+
+ if os.path.isfile(src_path):
+ base = srctree
+ path = src_path
+ else:
+ raise self.warning(f'File "%s" doesn\'t exist', path)
+
+ abs_base = os.path.abspath(base)
+ abs_full_path = os.path.abspath(os.path.join(base, path))
+
+ try:
+ if os.path.commonpath([abs_full_path, abs_base]) != abs_base:
+ raise self.severe('Problems with "%s" directive, prohibited path: %s' %
+ (self.name, path))
+ except ValueError:
+ # Paths don't have the same drive (Windows) or other incompatibility
+ raise self.severe('Problems with "%s" directive, invalid path: %s' %
+ (self.name, path))
+
+ self.arguments[0] = path
+
+ #
+ # Add path location to Sphinx dependencies to ensure proper cache
+ # invalidation check.
+ #
+
+ env.note_dependency(os.path.abspath(path))
+
+ if not self.state.document.settings.file_insertion_enabled:
+ raise self.warning('"%s" directive disabled.' % self.name)
+ source = self.state_machine.input_lines.source(self.lineno -
+ self.state_machine.input_offset - 1)
+ source_dir = os.path.dirname(os.path.abspath(source))
+ path = directives.path(self.arguments[0])
+ if path.startswith("<") and path.endswith(">"):
+ path = os.path.join(self.standard_include_path, path[1:-1])
+ path = os.path.normpath(os.path.join(source_dir, path))
+
+ # HINT: this is the only line I had to change / commented out:
+ # path = utils.relative_path(None, path)
+
+ encoding = self.options.get("encoding",
+ self.state.document.settings.input_encoding)
+ tab_width = self.options.get("tab-width",
+ self.state.document.settings.tab_width)
+
+ # Get optional arguments to related to cross-references generation
+ if "generate-cross-refs" in self.options:
+ return self.xref_text(env, path, tab_width)
+
+ rawtext = self.read_rawtext(path, encoding)
+ rawtext = self.apply_range(rawtext)
+
+ if "code" in self.options:
+ return self.code(path, tab_width, rawtext)
+
+ return self.literal(path, tab_width, rawtext)
+
+# ==============================================================================
+
+reported = set()
+DOMAIN_INFO = {}
+all_refs = {}
+
+def fill_domain_info(env):
+ """
+ Get supported reference types for each Sphinx domain and C namespaces
+ """
+ if DOMAIN_INFO:
+ return
+
+ for domain_name, domain_instance in env.domains.items():
+ try:
+ object_types = list(domain_instance.object_types.keys())
+ DOMAIN_INFO[domain_name] = object_types
+ except AttributeError:
+ # Ignore domains that we can't retrieve object types, if any
+ pass
+
+ for domain in DOMAIN_INFO.keys():
+ domain_obj = env.get_domain(domain)
+ for name, dispname, objtype, docname, anchor, priority in domain_obj.get_objects():
+ ref_name = name.lower()
+
+ if domain == "c":
+ if '.' in ref_name:
+ ref_name = ref_name.split(".")[-1]
+
+ if not ref_name in all_refs:
+ all_refs[ref_name] = []
+
+ all_refs[ref_name].append(f"\t{domain}:{objtype}:`{name}` (from {docname})")
+
+def get_suggestions(app, env, node,
+ original_target, original_domain, original_reftype):
+ """Check if target exists in the other domain or with different reftypes."""
+ original_target = original_target.lower()
+
+ # Remove namespace if present
+ if original_domain == "c":
+ if '.' in original_target:
+ original_target = original_target.split(".")[-1]
+
+ suggestions = []
+
+ # If name exists, propose exact name match on different domains
+ if original_target in all_refs:
+ return all_refs[original_target]
+
+ # If not found, get a close match, using difflib.
+ # Such method is based on Ratcliff-Obershelp Algorithm, which seeks
+ # for a close match within a certain distance. We're using the defaults
+ # here, e.g. cutoff=0.6, proposing 3 alternatives
+ matches = get_close_matches(original_target, all_refs.keys())
+ for match in matches:
+ suggestions += all_refs[match]
+
+ return suggestions
+
+def check_missing_refs(app, env, node, contnode):
+ """Check broken refs for the files it creates xrefs"""
+ if not node.source:
+ return None
+
+ try:
+ xref_files = env._xref_files
+ except AttributeError:
+ logger.critical("FATAL: _xref_files not initialized!")
+ raise
+
+ # Only show missing references for kernel-include reference-parsed files
+ if node.source not in xref_files:
+ return None
+
+ fill_domain_info(env)
+
+ target = node.get('reftarget', '')
+ domain = node.get('refdomain', 'std')
+ reftype = node.get('reftype', '')
+
+ msg = f"Invalid xref: {domain}:{reftype}:`{target}`"
+
+ # Don't duplicate warnings
+ data = (node.source, msg)
+ if data in reported:
+ return None
+ reported.add(data)
+
+ suggestions = get_suggestions(app, env, node, target, domain, reftype)
+ if suggestions:
+ msg += ". Possible alternatives:\n" + '\n'.join(suggestions)
+
+ logger.warning(msg, location=node, type='ref', subtype='missing')
+
+ return None
+
+def merge_xref_info(app, env, docnames, other):
+ """
+ As each process modify env._xref_files, we need to merge them back.
+ """
+ if not hasattr(other, "_xref_files"):
+ return
+ env._xref_files.update(getattr(other, "_xref_files", set()))
+
+def init_xref_docs(app, env, docnames):
+ """Initialize a list of files that we're generating cross references¨"""
+ app.env._xref_files = set()
+
+# ==============================================================================
+
+def setup(app):
+ """Setup Sphinx exension"""
+
+ app.connect("env-before-read-docs", init_xref_docs)
+ app.connect("env-merge-info", merge_xref_info)
+ app.add_directive("kernel-include", KernelInclude)
+ app.connect("missing-reference", check_missing_refs)
+
+ return {
+ "version": __version__,
+ "parallel_read_safe": True,
+ "parallel_write_safe": True,
+ }
diff --git a/Documentation/sphinx/kerneldoc-preamble.sty b/Documentation/sphinx/kerneldoc-preamble.sty
index 5d68395539fe..16d9ff46fdf6 100644
--- a/Documentation/sphinx/kerneldoc-preamble.sty
+++ b/Documentation/sphinx/kerneldoc-preamble.sty
@@ -220,7 +220,7 @@
If you want them, please install non-variable ``Noto Sans CJK''
font families along with the texlive-xecjk package by following
instructions from
- \sphinxcode{./scripts/sphinx-pre-install}.
+ \sphinxcode{./tools/docs/sphinx-pre-install}.
Having optional non-variable ``Noto Serif CJK'' font families will
improve the looks of those translations.
\end{sphinxadmonition}}
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
index ec1ddfff1863..d8cdf068ef35 100644
--- a/Documentation/sphinx/kerneldoc.py
+++ b/Documentation/sphinx/kerneldoc.py
@@ -1,4 +1,5 @@
# coding=utf-8
+# SPDX-License-Identifier: MIT
#
# Copyright © 2016 Intel Corporation
#
@@ -24,8 +25,6 @@
# Authors:
# Jani Nikula <jani.nikula@intel.com>
#
-# Please make sure this works on both python2 and python3.
-#
import codecs
import os
@@ -39,9 +38,41 @@ from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
import sphinx
from sphinx.util.docutils import switch_source_input
-import kernellog
+from sphinx.util import logging
+from pprint import pformat
+
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "tools/lib/python"))
+
+from kdoc.kdoc_files import KernelFiles
+from kdoc.kdoc_output import RestFormat
__version__ = '1.0'
+kfiles = None
+logger = logging.getLogger(__name__)
+
+def cmd_str(cmd):
+ """
+ Helper function to output a command line that can be used to produce
+ the same records via command line. Helpful to debug troubles at the
+ script.
+ """
+
+ cmd_line = ""
+
+ for w in cmd:
+ if w == "" or " " in w:
+ esc_cmd = "'" + w + "'"
+ else:
+ esc_cmd = w
+
+ if cmd_line:
+ cmd_line += " " + esc_cmd
+ continue
+ else:
+ cmd_line = esc_cmd
+
+ return cmd_line
class KernelDocDirective(Directive):
"""Extract kernel-doc comments from the specified file"""
@@ -56,23 +87,48 @@ class KernelDocDirective(Directive):
'functions': directives.unchanged,
}
has_content = False
+ verbose = 0
+
+ parse_args = {}
+ msg_args = {}
+
+ def handle_args(self):
- def run(self):
env = self.state.document.settings.env
cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno']
- # Pass the version string to kernel-doc, as it needs to use a different
- # dialect, depending what the C domain supports for each specific
- # Sphinx versions
- cmd += ['-sphinx-version', sphinx.__version__]
-
filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
+
+ # Arguments used by KernelFiles.parse() function
+ self.parse_args = {
+ "file_list": [filename],
+ "export_file": []
+ }
+
+ # Arguments used by KernelFiles.msg() function
+ self.msg_args = {
+ "enable_lineno": True,
+ "export": False,
+ "internal": False,
+ "symbol": [],
+ "nosymbol": [],
+ "no_doc_sections": False
+ }
+
export_file_patterns = []
+ verbose = os.environ.get("V")
+ if verbose:
+ try:
+ self.verbose = int(verbose)
+ except ValueError:
+ pass
+
# Tell sphinx of the dependency
env.note_dependency(os.path.abspath(filename))
- tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
+ self.tab_width = self.options.get('tab-width',
+ self.state.document.settings.tab_width)
# 'function' is an alias of 'identifiers'
if 'functions' in self.options:
@@ -81,81 +137,166 @@ class KernelDocDirective(Directive):
# FIXME: make this nicer and more robust against errors
if 'export' in self.options:
cmd += ['-export']
+ self.msg_args["export"] = True
export_file_patterns = str(self.options.get('export')).split()
elif 'internal' in self.options:
cmd += ['-internal']
+ self.msg_args["internal"] = True
export_file_patterns = str(self.options.get('internal')).split()
elif 'doc' in self.options:
- cmd += ['-function', str(self.options.get('doc'))]
+ func = str(self.options.get('doc'))
+ cmd += ['-function', func]
+ self.msg_args["symbol"].append(func)
elif 'identifiers' in self.options:
identifiers = self.options.get('identifiers').split()
if identifiers:
for i in identifiers:
+ i = i.rstrip("\\").strip()
+ if not i:
+ continue
+
cmd += ['-function', i]
+ self.msg_args["symbol"].append(i)
else:
cmd += ['-no-doc-sections']
+ self.msg_args["no_doc_sections"] = True
if 'no-identifiers' in self.options:
no_identifiers = self.options.get('no-identifiers').split()
if no_identifiers:
for i in no_identifiers:
+ i = i.rstrip("\\").strip()
+ if not i:
+ continue
+
cmd += ['-nosymbol', i]
+ self.msg_args["nosymbol"].append(i)
for pattern in export_file_patterns:
+ pattern = pattern.rstrip("\\").strip()
+ if not pattern:
+ continue
+
for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern):
env.note_dependency(os.path.abspath(f))
cmd += ['-export-file', f]
+ self.parse_args["export_file"].append(f)
+
+ # Export file is needed by both parse and msg, as kernel-doc
+ # cache exports.
+ self.msg_args["export_file"] = self.parse_args["export_file"]
cmd += [filename]
- try:
- kernellog.verbose(env.app,
- 'calling kernel-doc \'%s\'' % (" ".join(cmd)))
+ return cmd
+
+ def run_cmd(self, cmd):
+ """
+ Execute an external kernel-doc command.
+ """
+
+ env = self.state.document.settings.env
+ node = nodes.section()
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = p.communicate()
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
- out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
+ out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
- if p.returncode != 0:
- sys.stderr.write(err)
+ if p.returncode != 0:
+ sys.stderr.write(err)
- kernellog.warn(env.app,
- 'kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode))
- return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
- elif env.config.kerneldoc_verbosity > 0:
- sys.stderr.write(err)
+ logger.warning("kernel-doc '%s' failed with return code %d"
+ % (" ".join(cmd), p.returncode))
+ return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
+ elif env.config.kerneldoc_verbosity > 0:
+ sys.stderr.write(err)
+
+ filenames = self.parse_args["file_list"]
+ for filename in filenames:
+ self.parse_msg(filename, node, out, cmd)
+
+ return node.children
+
+ def parse_msg(self, filename, node, out, cmd):
+ """
+ Handles a kernel-doc output for a given file
+ """
+
+ env = self.state.document.settings.env
+
+ lines = statemachine.string2lines(out, self.tab_width,
+ convert_whitespace=True)
+ result = ViewList()
+
+ lineoffset = 0;
+ line_regex = re.compile(r"^\.\. LINENO ([0-9]+)$")
+ for line in lines:
+ match = line_regex.search(line)
+ if match:
+ # sphinx counts lines from 0
+ lineoffset = int(match.group(1)) - 1
+ # we must eat our comments since the upset the markup
+ else:
+ doc = str(env.srcdir) + "/" + env.docname + ":" + str(self.lineno)
+ result.append(line, doc + ": " + filename, lineoffset)
+ lineoffset += 1
+
+ self.do_parse(result, node)
+
+ def run_kdoc(self, cmd, kfiles):
+ """
+ Execute kernel-doc classes directly instead of running as a separate
+ command.
+ """
+
+ env = self.state.document.settings.env
+
+ node = nodes.section()
+
+ kfiles.parse(**self.parse_args)
+ filenames = self.parse_args["file_list"]
- lines = statemachine.string2lines(out, tab_width, convert_whitespace=True)
- result = ViewList()
+ for filename, out in kfiles.msg(**self.msg_args, filenames=filenames):
+ self.parse_msg(filename, node, out, cmd)
- lineoffset = 0;
- line_regex = re.compile(r"^\.\. LINENO ([0-9]+)$")
- for line in lines:
- match = line_regex.search(line)
- if match:
- # sphinx counts lines from 0
- lineoffset = int(match.group(1)) - 1
- # we must eat our comments since the upset the markup
- else:
- doc = str(env.srcdir) + "/" + env.docname + ":" + str(self.lineno)
- result.append(line, doc + ": " + filename, lineoffset)
- lineoffset += 1
+ return node.children
- node = nodes.section()
- self.do_parse(result, node)
+ def run(self):
+ global kfiles
+
+ cmd = self.handle_args()
+ if self.verbose >= 1:
+ logger.info(cmd_str(cmd))
- return node.children
+ try:
+ if kfiles:
+ return self.run_kdoc(cmd, kfiles)
+ else:
+ return self.run_cmd(cmd)
except Exception as e: # pylint: disable=W0703
- kernellog.warn(env.app, 'kernel-doc \'%s\' processing failed with: %s' %
- (" ".join(cmd), str(e)))
+ logger.warning("kernel-doc '%s' processing failed with: %s" %
+ (cmd_str(cmd), pformat(e)))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
def do_parse(self, result, node):
with switch_source_input(self.state, result):
self.state.nested_parse(result, 0, node, match_titles=1)
+def setup_kfiles(app):
+ global kfiles
+
+ kerneldoc_bin = app.env.config.kerneldoc_bin
+
+ if kerneldoc_bin and kerneldoc_bin.endswith("kernel-doc.py"):
+ print("Using Python kernel-doc")
+ out_style = RestFormat()
+ kfiles = KernelFiles(out_style=out_style, logger=logger)
+ else:
+ print(f"Using {kerneldoc_bin}")
+
+
def setup(app):
app.add_config_value('kerneldoc_bin', None, 'env')
app.add_config_value('kerneldoc_srctree', None, 'env')
@@ -163,6 +304,8 @@ def setup(app):
app.add_directive('kernel-doc', KernelDocDirective)
+ app.connect('builder-inited', setup_kfiles)
+
return dict(
version = __version__,
parallel_read_safe = True,
diff --git a/Documentation/sphinx/kernellog.py b/Documentation/sphinx/kernellog.py
deleted file mode 100644
index 0bc00c138cad..000000000000
--- a/Documentation/sphinx/kernellog.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Sphinx has deprecated its older logging interface, but the replacement
-# only goes back to 1.6. So here's a wrapper layer to keep around for
-# as long as we support 1.4.
-#
-# We don't support 1.4 anymore, but we'll keep the wrappers around until
-# we change all the code to not use them anymore :)
-#
-import sphinx
-from sphinx.util import logging
-
-logger = logging.getLogger('kerneldoc')
-
-def warn(app, message):
- logger.warning(message)
-
-def verbose(app, message):
- logger.verbose(message)
-
-def info(app, message):
- logger.info(message)
diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py
index 97166333b727..ad495c0da270 100644
--- a/Documentation/sphinx/kfigure.py
+++ b/Documentation/sphinx/kfigure.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8; mode: python -*-
+# SPDX-License-Identifier: GPL-2.0
# pylint: disable=C0103, R0903, R0912, R0915
-u"""
+"""
scalable figure and image handling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -59,12 +60,14 @@ from docutils.parsers.rst import directives
from docutils.parsers.rst.directives import images
import sphinx
from sphinx.util.nodes import clean_astext
-import kernellog
+from sphinx.util import logging
Figure = images.Figure
__version__ = '1.0.0'
+logger = logging.getLogger('kfigure')
+
# simple helper
# -------------
@@ -163,14 +166,14 @@ def setup(app):
def setupTools(app):
- u"""
+ """
Check available build tools and log some *verbose* messages.
This function is called once, when the builder is initiated.
"""
global dot_cmd, dot_Tpdf, convert_cmd, rsvg_convert_cmd # pylint: disable=W0603
global inkscape_cmd, inkscape_ver_one # pylint: disable=W0603
- kernellog.verbose(app, "kfigure: check installed tools ...")
+ logger.verbose("kfigure: check installed tools ...")
dot_cmd = which('dot')
convert_cmd = which('convert')
@@ -178,7 +181,7 @@ def setupTools(app):
inkscape_cmd = which('inkscape')
if dot_cmd:
- kernellog.verbose(app, "use dot(1) from: " + dot_cmd)
+ logger.verbose("use dot(1) from: " + dot_cmd)
try:
dot_Thelp_list = subprocess.check_output([dot_cmd, '-Thelp'],
@@ -190,10 +193,11 @@ def setupTools(app):
dot_Tpdf_ptn = b'pdf'
dot_Tpdf = re.search(dot_Tpdf_ptn, dot_Thelp_list)
else:
- kernellog.warn(app, "dot(1) not found, for better output quality install "
- "graphviz from https://www.graphviz.org")
+ logger.warning(
+ "dot(1) not found, for better output quality install graphviz from https://www.graphviz.org"
+ )
if inkscape_cmd:
- kernellog.verbose(app, "use inkscape(1) from: " + inkscape_cmd)
+ logger.verbose("use inkscape(1) from: " + inkscape_cmd)
inkscape_ver = subprocess.check_output([inkscape_cmd, '--version'],
stderr=subprocess.DEVNULL)
ver_one_ptn = b'Inkscape 1'
@@ -204,26 +208,27 @@ def setupTools(app):
else:
if convert_cmd:
- kernellog.verbose(app, "use convert(1) from: " + convert_cmd)
+ logger.verbose("use convert(1) from: " + convert_cmd)
else:
- kernellog.verbose(app,
+ logger.verbose(
"Neither inkscape(1) nor convert(1) found.\n"
- "For SVG to PDF conversion, "
- "install either Inkscape (https://inkscape.org/) (preferred) or\n"
- "ImageMagick (https://www.imagemagick.org)")
+ "For SVG to PDF conversion, install either Inkscape (https://inkscape.org/) (preferred) or\n"
+ "ImageMagick (https://www.imagemagick.org)"
+ )
if rsvg_convert_cmd:
- kernellog.verbose(app, "use rsvg-convert(1) from: " + rsvg_convert_cmd)
- kernellog.verbose(app, "use 'dot -Tsvg' and rsvg-convert(1) for DOT -> PDF conversion")
+ logger.verbose("use rsvg-convert(1) from: " + rsvg_convert_cmd)
+ logger.verbose("use 'dot -Tsvg' and rsvg-convert(1) for DOT -> PDF conversion")
dot_Tpdf = False
else:
- kernellog.verbose(app,
+ logger.verbose(
"rsvg-convert(1) not found.\n"
- " SVG rendering of convert(1) is done by ImageMagick-native renderer.")
+ " SVG rendering of convert(1) is done by ImageMagick-native renderer."
+ )
if dot_Tpdf:
- kernellog.verbose(app, "use 'dot -Tpdf' for DOT -> PDF conversion")
+ logger.verbose("use 'dot -Tpdf' for DOT -> PDF conversion")
else:
- kernellog.verbose(app, "use 'dot -Tsvg' and convert(1) for DOT -> PDF conversion")
+ logger.verbose("use 'dot -Tsvg' and convert(1) for DOT -> PDF conversion")
# integrate conversion tools
@@ -257,13 +262,12 @@ def convert_image(img_node, translator, src_fname=None):
# in kernel builds, use 'make SPHINXOPTS=-v' to see verbose messages
- kernellog.verbose(app, 'assert best format for: ' + img_node['uri'])
+ logger.verbose('assert best format for: ' + img_node['uri'])
if in_ext == '.dot':
if not dot_cmd:
- kernellog.verbose(app,
- "dot from graphviz not available / include DOT raw.")
+ logger.verbose("dot from graphviz not available / include DOT raw.")
img_node.replace_self(file2literal(src_fname))
elif translator.builder.format == 'latex':
@@ -290,10 +294,11 @@ def convert_image(img_node, translator, src_fname=None):
if translator.builder.format == 'latex':
if not inkscape_cmd and convert_cmd is None:
- kernellog.warn(app,
- "no SVG to PDF conversion available / include SVG raw."
- "\nIncluding large raw SVGs can cause xelatex error."
- "\nInstall Inkscape (preferred) or ImageMagick.")
+ logger.warning(
+ "no SVG to PDF conversion available / include SVG raw.\n"
+ "Including large raw SVGs can cause xelatex error.\n"
+ "Install Inkscape (preferred) or ImageMagick."
+ )
img_node.replace_self(file2literal(src_fname))
else:
dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
@@ -306,15 +311,14 @@ def convert_image(img_node, translator, src_fname=None):
_name = dst_fname[len(str(translator.builder.outdir)) + 1:]
if isNewer(dst_fname, src_fname):
- kernellog.verbose(app,
- "convert: {out}/%s already exists and is newer" % _name)
+ logger.verbose("convert: {out}/%s already exists and is newer" % _name)
else:
ok = False
mkdir(path.dirname(dst_fname))
if in_ext == '.dot':
- kernellog.verbose(app, 'convert DOT to: {out}/' + _name)
+ logger.verbose('convert DOT to: {out}/' + _name)
if translator.builder.format == 'latex' and not dot_Tpdf:
svg_fname = path.join(translator.builder.outdir, fname + '.svg')
ok1 = dot2format(app, src_fname, svg_fname)
@@ -325,7 +329,7 @@ def convert_image(img_node, translator, src_fname=None):
ok = dot2format(app, src_fname, dst_fname)
elif in_ext == '.svg':
- kernellog.verbose(app, 'convert SVG to: {out}/' + _name)
+ logger.verbose('convert SVG to: {out}/' + _name)
ok = svg2pdf(app, src_fname, dst_fname)
if not ok:
@@ -354,7 +358,7 @@ def dot2format(app, dot_fname, out_fname):
with open(out_fname, "w") as out:
exit_code = subprocess.call(cmd, stdout = out)
if exit_code != 0:
- kernellog.warn(app,
+ logger.warning(
"Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
return bool(exit_code == 0)
@@ -388,13 +392,14 @@ def svg2pdf(app, svg_fname, pdf_fname):
pass
if exit_code != 0:
- kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
+ logger.warning("Error #%d when calling: %s" %
+ (exit_code, " ".join(cmd)))
if warning_msg:
- kernellog.warn(app, "Warning msg from %s: %s"
- % (cmd_name, str(warning_msg, 'utf-8')))
+ logger.warning( "Warning msg from %s: %s" %
+ (cmd_name, str(warning_msg, 'utf-8')))
elif warning_msg:
- kernellog.verbose(app, "Warning msg from %s (likely harmless):\n%s"
- % (cmd_name, str(warning_msg, 'utf-8')))
+ logger.verbose("Warning msg from %s (likely harmless):\n%s" %
+ (cmd_name, str(warning_msg, 'utf-8')))
return bool(exit_code == 0)
@@ -418,7 +423,8 @@ def svg2pdf_by_rsvg(app, svg_fname, pdf_fname):
# use stdout and stderr from parent
exit_code = subprocess.call(cmd)
if exit_code != 0:
- kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
+ logger.warning("Error #%d when calling: %s" %
+ (exit_code, " ".join(cmd)))
ok = bool(exit_code == 0)
return ok
@@ -440,7 +446,7 @@ class kernel_image(nodes.image):
pass
class KernelImage(images.Image):
- u"""KernelImage directive
+ """KernelImage directive
Earns everything from ``.. image::`` directive, except *remote URI* and
*glob* pattern. The KernelImage wraps a image node into a
@@ -476,7 +482,7 @@ class kernel_figure(nodes.figure):
"""Node for ``kernel-figure`` directive."""
class KernelFigure(Figure):
- u"""KernelImage directive
+ """KernelImage directive
Earns everything from ``.. figure::`` directive, except *remote URI* and
*glob* pattern. The KernelFigure wraps a figure node into a kernel_figure
@@ -513,15 +519,15 @@ def visit_kernel_render(self, node):
app = self.builder.app
srclang = node.get('srclang')
- kernellog.verbose(app, 'visit kernel-render node lang: "%s"' % (srclang))
+ logger.verbose('visit kernel-render node lang: "%s"' % srclang)
tmp_ext = RENDER_MARKUP_EXT.get(srclang, None)
if tmp_ext is None:
- kernellog.warn(app, 'kernel-render: "%s" unknown / include raw.' % (srclang))
+ logger.warning( 'kernel-render: "%s" unknown / include raw.' % srclang)
return
if not dot_cmd and tmp_ext == '.dot':
- kernellog.verbose(app, "dot from graphviz not available / include raw.")
+ logger.verbose("dot from graphviz not available / include raw.")
return
literal_block = node[0]
@@ -552,7 +558,7 @@ class kernel_render(nodes.General, nodes.Inline, nodes.Element):
pass
class KernelRender(Figure):
- u"""KernelRender directive
+ """KernelRender directive
Render content by external tool. Has all the options known from the
*figure* directive, plus option ``caption``. If ``caption`` has a
diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py
deleted file mode 100644
index 8b416bfd75ac..000000000000
--- a/Documentation/sphinx/load_config.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-# pylint: disable=R0903, C0330, R0914, R0912, E0401
-
-import os
-import sys
-from sphinx.util.osutil import fs_encoding
-
-# ------------------------------------------------------------------------------
-def loadConfig(namespace):
-# ------------------------------------------------------------------------------
-
- u"""Load an additional configuration file into *namespace*.
-
- The name of the configuration file is taken from the environment
- ``SPHINX_CONF``. The external configuration file extends (or overwrites) the
- configuration values from the origin ``conf.py``. With this you are able to
- maintain *build themes*. """
-
- config_file = os.environ.get("SPHINX_CONF", None)
- if (config_file is not None
- and os.path.normpath(namespace["__file__"]) != os.path.normpath(config_file) ):
- config_file = os.path.abspath(config_file)
-
- # Let's avoid one conf.py file just due to latex_documents
- start = config_file.find('Documentation/')
- if start >= 0:
- start = config_file.find('/', start + 1)
-
- end = config_file.rfind('/')
- if start >= 0 and end > 0:
- dir = config_file[start + 1:end]
-
- print("source directory: %s" % dir)
- new_latex_docs = []
- latex_documents = namespace['latex_documents']
-
- for l in latex_documents:
- if l[0].find(dir + '/') == 0:
- has = True
- fn = l[0][len(dir) + 1:]
- new_latex_docs.append((fn, l[1], l[2], l[3], l[4]))
- break
-
- namespace['latex_documents'] = new_latex_docs
-
- # If there is an extra conf.py file, load it
- if os.path.isfile(config_file):
- sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
- config = namespace.copy()
- config['__file__'] = config_file
- with open(config_file, 'rb') as f:
- code = compile(f.read(), fs_encoding, 'exec')
- exec(code, config)
- del config['__file__']
- namespace.update(config)
- else:
- config = namespace.copy()
- config['tags'].add("subproject")
- namespace.update(config)
diff --git a/Documentation/sphinx/maintainers_include.py b/Documentation/sphinx/maintainers_include.py
index dcad0fff4723..519ad18685b2 100755
--- a/Documentation/sphinx/maintainers_include.py
+++ b/Documentation/sphinx/maintainers_include.py
@@ -3,7 +3,7 @@
# -*- coding: utf-8; mode: python -*-
# pylint: disable=R0903, C0330, R0914, R0912, E0401
-u"""
+"""
maintainers-include
~~~~~~~~~~~~~~~~~~~
@@ -22,10 +22,12 @@ import re
import os.path
from docutils import statemachine
-from docutils.utils.error_reporting import ErrorString
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives.misc import Include
+def ErrorString(exc): # Shamelessly stolen from docutils
+ return f'{exc.__class__.__name}: {exc}'
+
__version__ = '1.0'
def setup(app):
@@ -37,7 +39,7 @@ def setup(app):
)
class MaintainersInclude(Include):
- u"""MaintainersInclude (``maintainers-include``) directive"""
+ """MaintainersInclude (``maintainers-include``) directive"""
required_arguments = 0
def parse_maintainers(self, path):
diff --git a/Documentation/sphinx/min_requirements.txt b/Documentation/sphinx/min_requirements.txt
new file mode 100644
index 000000000000..96b5e0bfa3d7
--- /dev/null
+++ b/Documentation/sphinx/min_requirements.txt
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+alabaster >=0.7,<0.8
+docutils>=0.15,<0.18
+jinja2>=2.3,<3.1
+PyYAML>=5.1,<6.1
+Sphinx==3.4.3
+sphinxcontrib-applehelp==1.0.2
+sphinxcontrib-devhelp==1.0.1
+sphinxcontrib-htmlhelp==1.0.3
+sphinxcontrib-qthelp==1.0.2
+sphinxcontrib-serializinghtml==1.1.4
diff --git a/Documentation/sphinx/parallel-wrapper.sh b/Documentation/sphinx/parallel-wrapper.sh
deleted file mode 100644
index e54c44ce117d..000000000000
--- a/Documentation/sphinx/parallel-wrapper.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0+
-#
-# Figure out if we should follow a specific parallelism from the make
-# environment (as exported by scripts/jobserver-exec), or fall back to
-# the "auto" parallelism when "-jN" is not specified at the top-level
-# "make" invocation.
-
-sphinx="$1"
-shift || true
-
-parallel="$PARALLELISM"
-if [ -z "$parallel" ] ; then
- # If no parallelism is specified at the top-level make, then
- # fall back to the expected "-jauto" mode that the "htmldocs"
- # target has had.
- auto=$(perl -e 'open IN,"'"$sphinx"' --version 2>&1 |";
- while (<IN>) {
- if (m/([\d\.]+)/) {
- print "auto" if ($1 >= "1.7")
- }
- }
- close IN')
- if [ -n "$auto" ] ; then
- parallel="$auto"
- fi
-fi
-# Only if some parallelism has been determined do we add the -jN option.
-if [ -n "$parallel" ] ; then
- parallel="-j$parallel"
-fi
-
-exec "$sphinx" $parallel "$@"
diff --git a/Documentation/sphinx/parse-headers.pl b/Documentation/sphinx/parse-headers.pl
deleted file mode 100755
index b063f2f1cfb2..000000000000
--- a/Documentation/sphinx/parse-headers.pl
+++ /dev/null
@@ -1,401 +0,0 @@
-#!/usr/bin/env perl
-use strict;
-use Text::Tabs;
-use Getopt::Long;
-use Pod::Usage;
-
-my $debug;
-my $help;
-my $man;
-
-GetOptions(
- "debug" => \$debug,
- 'usage|?' => \$help,
- 'help' => \$man
-) or pod2usage(2);
-
-pod2usage(1) if $help;
-pod2usage(-exitstatus => 0, -verbose => 2) if $man;
-pod2usage(2) if (scalar @ARGV < 2 || scalar @ARGV > 3);
-
-my ($file_in, $file_out, $file_exceptions) = @ARGV;
-
-my $data;
-my %ioctls;
-my %defines;
-my %typedefs;
-my %enums;
-my %enum_symbols;
-my %structs;
-
-require Data::Dumper if ($debug);
-
-#
-# read the file and get identifiers
-#
-
-my $is_enum = 0;
-my $is_comment = 0;
-open IN, $file_in or die "Can't open $file_in";
-while (<IN>) {
- $data .= $_;
-
- my $ln = $_;
- if (!$is_comment) {
- $ln =~ s,/\*.*(\*/),,g;
-
- $is_comment = 1 if ($ln =~ s,/\*.*,,);
- } else {
- if ($ln =~ s,^(.*\*/),,) {
- $is_comment = 0;
- } else {
- next;
- }
- }
-
- if ($is_enum && $ln =~ m/^\s*([_\w][\w\d_]+)\s*[\,=]?/) {
- my $s = $1;
- my $n = $1;
- $n =~ tr/A-Z/a-z/;
- $n =~ tr/_/-/;
-
- $enum_symbols{$s} = "\\ :ref:`$s <$n>`\\ ";
-
- $is_enum = 0 if ($is_enum && m/\}/);
- next;
- }
- $is_enum = 0 if ($is_enum && m/\}/);
-
- if ($ln =~ m/^\s*#\s*define\s+([_\w][\w\d_]+)\s+_IO/) {
- my $s = $1;
- my $n = $1;
- $n =~ tr/A-Z/a-z/;
-
- $ioctls{$s} = "\\ :ref:`$s <$n>`\\ ";
- next;
- }
-
- if ($ln =~ m/^\s*#\s*define\s+([_\w][\w\d_]+)\s+/) {
- my $s = $1;
- my $n = $1;
- $n =~ tr/A-Z/a-z/;
- $n =~ tr/_/-/;
-
- $defines{$s} = "\\ :ref:`$s <$n>`\\ ";
- next;
- }
-
- if ($ln =~ m/^\s*typedef\s+([_\w][\w\d_]+)\s+(.*)\s+([_\w][\w\d_]+);/) {
- my $s = $2;
- my $n = $3;
-
- $typedefs{$n} = "\\ :c:type:`$n <$s>`\\ ";
- next;
- }
- if ($ln =~ m/^\s*enum\s+([_\w][\w\d_]+)\s+\{/
- || $ln =~ m/^\s*enum\s+([_\w][\w\d_]+)$/
- || $ln =~ m/^\s*typedef\s*enum\s+([_\w][\w\d_]+)\s+\{/
- || $ln =~ m/^\s*typedef\s*enum\s+([_\w][\w\d_]+)$/) {
- my $s = $1;
-
- $enums{$s} = "enum :c:type:`$s`\\ ";
-
- $is_enum = $1;
- next;
- }
- if ($ln =~ m/^\s*struct\s+([_\w][\w\d_]+)\s+\{/
- || $ln =~ m/^\s*struct\s+([[_\w][\w\d_]+)$/
- || $ln =~ m/^\s*typedef\s*struct\s+([_\w][\w\d_]+)\s+\{/
- || $ln =~ m/^\s*typedef\s*struct\s+([[_\w][\w\d_]+)$/
- ) {
- my $s = $1;
-
- $structs{$s} = "struct $s\\ ";
- next;
- }
-}
-close IN;
-
-#
-# Handle multi-line typedefs
-#
-
-my @matches = ($data =~ m/typedef\s+struct\s+\S+?\s*\{[^\}]+\}\s*(\S+)\s*\;/g,
- $data =~ m/typedef\s+enum\s+\S+?\s*\{[^\}]+\}\s*(\S+)\s*\;/g,);
-foreach my $m (@matches) {
- my $s = $m;
-
- $typedefs{$s} = "\\ :c:type:`$s`\\ ";
- next;
-}
-
-#
-# Handle exceptions, if any
-#
-
-my %def_reftype = (
- "ioctl" => ":ref",
- "define" => ":ref",
- "symbol" => ":ref",
- "typedef" => ":c:type",
- "enum" => ":c:type",
- "struct" => ":c:type",
-);
-
-if ($file_exceptions) {
- open IN, $file_exceptions or die "Can't read $file_exceptions";
- while (<IN>) {
- next if (m/^\s*$/ || m/^\s*#/);
-
- # Parsers to ignore a symbol
-
- if (m/^ignore\s+ioctl\s+(\S+)/) {
- delete $ioctls{$1} if (exists($ioctls{$1}));
- next;
- }
- if (m/^ignore\s+define\s+(\S+)/) {
- delete $defines{$1} if (exists($defines{$1}));
- next;
- }
- if (m/^ignore\s+typedef\s+(\S+)/) {
- delete $typedefs{$1} if (exists($typedefs{$1}));
- next;
- }
- if (m/^ignore\s+enum\s+(\S+)/) {
- delete $enums{$1} if (exists($enums{$1}));
- next;
- }
- if (m/^ignore\s+struct\s+(\S+)/) {
- delete $structs{$1} if (exists($structs{$1}));
- next;
- }
- if (m/^ignore\s+symbol\s+(\S+)/) {
- delete $enum_symbols{$1} if (exists($enum_symbols{$1}));
- next;
- }
-
- # Parsers to replace a symbol
- my ($type, $old, $new, $reftype);
-
- if (m/^replace\s+(\S+)\s+(\S+)\s+(\S+)/) {
- $type = $1;
- $old = $2;
- $new = $3;
- } else {
- die "Can't parse $file_exceptions: $_";
- }
-
- if ($new =~ m/^\:c\:(data|func|macro|type)\:\`(.+)\`/) {
- $reftype = ":c:$1";
- $new = $2;
- } elsif ($new =~ m/\:ref\:\`(.+)\`/) {
- $reftype = ":ref";
- $new = $1;
- } else {
- $reftype = $def_reftype{$type};
- }
- $new = "$reftype:`$old <$new>`";
-
- if ($type eq "ioctl") {
- $ioctls{$old} = $new if (exists($ioctls{$old}));
- next;
- }
- if ($type eq "define") {
- $defines{$old} = $new if (exists($defines{$old}));
- next;
- }
- if ($type eq "symbol") {
- $enum_symbols{$old} = $new if (exists($enum_symbols{$old}));
- next;
- }
- if ($type eq "typedef") {
- $typedefs{$old} = $new if (exists($typedefs{$old}));
- next;
- }
- if ($type eq "enum") {
- $enums{$old} = $new if (exists($enums{$old}));
- next;
- }
- if ($type eq "struct") {
- $structs{$old} = $new if (exists($structs{$old}));
- next;
- }
-
- die "Can't parse $file_exceptions: $_";
- }
-}
-
-if ($debug) {
- print Data::Dumper->Dump([\%ioctls], [qw(*ioctls)]) if (%ioctls);
- print Data::Dumper->Dump([\%typedefs], [qw(*typedefs)]) if (%typedefs);
- print Data::Dumper->Dump([\%enums], [qw(*enums)]) if (%enums);
- print Data::Dumper->Dump([\%structs], [qw(*structs)]) if (%structs);
- print Data::Dumper->Dump([\%defines], [qw(*defines)]) if (%defines);
- print Data::Dumper->Dump([\%enum_symbols], [qw(*enum_symbols)]) if (%enum_symbols);
-}
-
-#
-# Align block
-#
-$data = expand($data);
-$data = " " . $data;
-$data =~ s/\n/\n /g;
-$data =~ s/\n\s+$/\n/g;
-$data =~ s/\n\s+\n/\n\n/g;
-
-#
-# Add escape codes for special characters
-#
-$data =~ s,([\_\`\*\<\>\&\\\\:\/\|\%\$\#\{\}\~\^]),\\$1,g;
-
-$data =~ s,DEPRECATED,**DEPRECATED**,g;
-
-#
-# Add references
-#
-
-my $start_delim = "[ \n\t\(\=\*\@]";
-my $end_delim = "(\\s|,|\\\\=|\\\\:|\\;|\\\)|\\}|\\{)";
-
-foreach my $r (keys %ioctls) {
- my $s = $ioctls{$r};
-
- $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
-
- print "$r -> $s\n" if ($debug);
-
- $data =~ s/($start_delim)($r)$end_delim/$1$s$3/g;
-}
-
-foreach my $r (keys %defines) {
- my $s = $defines{$r};
-
- $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
-
- print "$r -> $s\n" if ($debug);
-
- $data =~ s/($start_delim)($r)$end_delim/$1$s$3/g;
-}
-
-foreach my $r (keys %enum_symbols) {
- my $s = $enum_symbols{$r};
-
- $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
-
- print "$r -> $s\n" if ($debug);
-
- $data =~ s/($start_delim)($r)$end_delim/$1$s$3/g;
-}
-
-foreach my $r (keys %enums) {
- my $s = $enums{$r};
-
- $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
-
- print "$r -> $s\n" if ($debug);
-
- $data =~ s/enum\s+($r)$end_delim/$s$2/g;
-}
-
-foreach my $r (keys %structs) {
- my $s = $structs{$r};
-
- $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
-
- print "$r -> $s\n" if ($debug);
-
- $data =~ s/struct\s+($r)$end_delim/$s$2/g;
-}
-
-foreach my $r (keys %typedefs) {
- my $s = $typedefs{$r};
-
- $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
-
- print "$r -> $s\n" if ($debug);
- $data =~ s/($start_delim)($r)$end_delim/$1$s$3/g;
-}
-
-$data =~ s/\\ ([\n\s])/\1/g;
-
-#
-# Generate output file
-#
-
-my $title = $file_in;
-$title =~ s,.*/,,;
-
-open OUT, "> $file_out" or die "Can't open $file_out";
-print OUT ".. -*- coding: utf-8; mode: rst -*-\n\n";
-print OUT "$title\n";
-print OUT "=" x length($title);
-print OUT "\n\n.. parsed-literal::\n\n";
-print OUT $data;
-close OUT;
-
-__END__
-
-=head1 NAME
-
-parse_headers.pl - parse a C file, in order to identify functions, structs,
-enums and defines and create cross-references to a Sphinx book.
-
-=head1 SYNOPSIS
-
-B<parse_headers.pl> [<options>] <C_FILE> <OUT_FILE> [<EXCEPTIONS_FILE>]
-
-Where <options> can be: --debug, --help or --usage.
-
-=head1 OPTIONS
-
-=over 8
-
-=item B<--debug>
-
-Put the script in verbose mode, useful for debugging.
-
-=item B<--usage>
-
-Prints a brief help message and exits.
-
-=item B<--help>
-
-Prints a more detailed help message and exits.
-
-=back
-
-=head1 DESCRIPTION
-
-Convert a C header or source file (C_FILE), into a ReStructured Text
-included via ..parsed-literal block with cross-references for the
-documentation files that describe the API. It accepts an optional
-EXCEPTIONS_FILE with describes what elements will be either ignored or
-be pointed to a non-default reference.
-
-The output is written at the (OUT_FILE).
-
-It is capable of identifying defines, functions, structs, typedefs,
-enums and enum symbols and create cross-references for all of them.
-It is also capable of distinguish #define used for specifying a Linux
-ioctl.
-
-The EXCEPTIONS_FILE contain two rules to allow ignoring a symbol or
-to replace the default references by a custom one.
-
-Please read Documentation/doc-guide/parse-headers.rst at the Kernel's
-tree for more details.
-
-=head1 BUGS
-
-Report bugs to Mauro Carvalho Chehab <mchehab@kernel.org>
-
-=head1 COPYRIGHT
-
-Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab+samsung@kernel.org>.
-
-License GPLv2: GNU GPL version 2 <https://gnu.org/licenses/gpl.html>.
-
-This is free software: you are free to change and redistribute it.
-There is NO WARRANTY, to the extent permitted by law.
-
-=cut
diff --git a/Documentation/sphinx/parser_yaml.py b/Documentation/sphinx/parser_yaml.py
new file mode 100755
index 000000000000..634d84a202fc
--- /dev/null
+++ b/Documentation/sphinx/parser_yaml.py
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2025 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+
+"""
+Sphinx extension for processing YAML files
+"""
+
+import os
+import re
+import sys
+
+from pprint import pformat
+
+from docutils import statemachine
+from docutils.parsers.rst import Parser as RSTParser
+from docutils.parsers.rst import states
+from docutils.statemachine import ViewList
+
+from sphinx.util import logging
+from sphinx.parsers import Parser
+
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "tools/net/ynl/pyynl/lib"))
+
+from doc_generator import YnlDocGenerator # pylint: disable=C0413
+
+logger = logging.getLogger(__name__)
+
+class YamlParser(Parser):
+ """
+ Kernel parser for YAML files.
+
+ This is a simple sphinx.Parser to handle yaml files inside the
+ Kernel tree that will be part of the built documentation.
+
+ The actual parser function is not contained here: the code was
+ written in a way that parsing yaml for different subsystems
+ can be done from a single dispatcher.
+
+ All it takes to have parse YAML patches is to have an import line:
+
+ from some_parser_code import NewYamlGenerator
+
+ To this module. Then add an instance of the parser with:
+
+ new_parser = NewYamlGenerator()
+
+ and add a logic inside parse() to handle it based on the path,
+ like this:
+
+ if "/foo" in fname:
+ msg = self.new_parser.parse_yaml_file(fname)
+ """
+
+ supported = ('yaml', )
+
+ netlink_parser = YnlDocGenerator()
+
+ re_lineno = re.compile(r"\.\. LINENO ([0-9]+)$")
+
+ tab_width = 8
+
+ def rst_parse(self, inputstring, document, msg):
+ """
+ Receives a ReST content that was previously converted by the
+ YAML parser, adding it to the document tree.
+ """
+
+ self.setup_parse(inputstring, document)
+
+ result = ViewList()
+
+ self.statemachine = states.RSTStateMachine(state_classes=states.state_classes,
+ initial_state='Body',
+ debug=document.reporter.debug_flag)
+
+ try:
+ # Parse message with RSTParser
+ lineoffset = 0;
+
+ lines = statemachine.string2lines(msg, self.tab_width,
+ convert_whitespace=True)
+
+ for line in lines:
+ match = self.re_lineno.match(line)
+ if match:
+ lineoffset = int(match.group(1))
+ continue
+
+ result.append(line, document.current_source, lineoffset)
+
+ self.statemachine.run(result, document)
+
+ except Exception as e:
+ document.reporter.error("YAML parsing error: %s" % pformat(e))
+
+ self.finish_parse()
+
+ # Overrides docutils.parsers.Parser. See sphinx.parsers.RSTParser
+ def parse(self, inputstring, document):
+ """Check if a YAML is meant to be parsed."""
+
+ fname = document.current_source
+
+ # Handle netlink yaml specs
+ if "/netlink/specs/" in fname:
+ msg = self.netlink_parser.parse_yaml_file(fname)
+ self.rst_parse(inputstring, document, msg)
+
+ # All other yaml files are ignored
+
+def setup(app):
+ """Setup function for the Sphinx extension."""
+
+ # Add YAML parser
+ app.add_source_parser(YamlParser)
+ app.add_source_suffix('.yaml', 'yaml')
+
+ return {
+ 'version': '1.0',
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/Documentation/sphinx/requirements.txt b/Documentation/sphinx/requirements.txt
index 5017f307c8a4..76b4255061d0 100644
--- a/Documentation/sphinx/requirements.txt
+++ b/Documentation/sphinx/requirements.txt
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
alabaster
Sphinx
pyyaml
diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py
index 16bea0632555..3d19569e5728 100755
--- a/Documentation/sphinx/rstFlatTable.py
+++ b/Documentation/sphinx/rstFlatTable.py
@@ -1,8 +1,9 @@
#!/usr/bin/env python3
# -*- coding: utf-8; mode: python -*-
+# SPDX-License-Identifier: GPL-2.0
# pylint: disable=C0330, R0903, R0912
-u"""
+"""
flat-table
~~~~~~~~~~
@@ -99,7 +100,7 @@ class colSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321
class FlatTable(Table):
# ==============================================================================
- u"""FlatTable (``flat-table``) directive"""
+ """FlatTable (``flat-table``) directive"""
option_spec = {
'name': directives.unchanged
@@ -135,7 +136,7 @@ class FlatTable(Table):
class ListTableBuilder(object):
# ==============================================================================
- u"""Builds a table from a double-stage list"""
+ """Builds a table from a double-stage list"""
def __init__(self, directive):
self.directive = directive
@@ -212,7 +213,7 @@ class ListTableBuilder(object):
raise SystemMessagePropagation(error)
def parseFlatTableNode(self, node):
- u"""parses the node from a :py:class:`FlatTable` directive's body"""
+ """parses the node from a :py:class:`FlatTable` directive's body"""
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
self.raiseError(
@@ -225,7 +226,7 @@ class ListTableBuilder(object):
self.roundOffTableDefinition()
def roundOffTableDefinition(self):
- u"""Round off the table definition.
+ """Round off the table definition.
This method rounds off the table definition in :py:member:`rows`.
diff --git a/Documentation/sphinx/templates/kernel-toc.html b/Documentation/sphinx/templates/kernel-toc.html
index 41f1efbe64bb..b84969bd31c4 100644
--- a/Documentation/sphinx/templates/kernel-toc.html
+++ b/Documentation/sphinx/templates/kernel-toc.html
@@ -1,4 +1,5 @@
-<!-- SPDX-License-Identifier: GPL-2.0 -->
+{# SPDX-License-Identifier: GPL-2.0 #}
+
{# Create a local TOC the kernel way #}
<p>
<h3 class="kernel-toc-contents">Contents</h3>
diff --git a/Documentation/sphinx/templates/translations.html b/Documentation/sphinx/templates/translations.html
index 8df5d42d8dcd..351586f41938 100644
--- a/Documentation/sphinx/templates/translations.html
+++ b/Documentation/sphinx/templates/translations.html
@@ -1,5 +1,5 @@
-<!-- SPDX-License-Identifier: GPL-2.0 -->
-<!-- Copyright © 2023, Oracle and/or its affiliates. -->
+{# SPDX-License-Identifier: GPL-2.0 #}
+{# Copyright © 2023, Oracle and/or its affiliates. #}
{# Create a language menu for translations #}
{% if languages|length > 0: %}