summaryrefslogtreecommitdiff
path: root/scripts/gdb
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/gdb')
-rw-r--r--scripts/gdb/Makefile2
-rw-r--r--scripts/gdb/linux/.gitignore4
-rw-r--r--scripts/gdb/linux/Makefile25
-rw-r--r--scripts/gdb/linux/__init__.py1
-rw-r--r--scripts/gdb/linux/bpf.py253
-rw-r--r--scripts/gdb/linux/clk.py78
-rw-r--r--scripts/gdb/linux/config.py44
-rw-r--r--scripts/gdb/linux/constants.py.in175
-rw-r--r--scripts/gdb/linux/cpus.py227
-rw-r--r--scripts/gdb/linux/device.py182
-rw-r--r--scripts/gdb/linux/dmesg.py152
-rw-r--r--scripts/gdb/linux/genpd.py85
-rw-r--r--scripts/gdb/linux/interrupts.py232
-rw-r--r--scripts/gdb/linux/kasan.py44
-rw-r--r--scripts/gdb/linux/lists.py136
-rw-r--r--scripts/gdb/linux/mapletree.py252
-rw-r--r--scripts/gdb/linux/mm.py403
-rw-r--r--scripts/gdb/linux/modules.py134
-rw-r--r--scripts/gdb/linux/page_owner.py182
-rw-r--r--scripts/gdb/linux/pgtable.py220
-rw-r--r--scripts/gdb/linux/proc.py277
-rw-r--r--scripts/gdb/linux/radixtree.py215
-rw-r--r--scripts/gdb/linux/rbtree.py189
-rw-r--r--scripts/gdb/linux/slab.py325
-rw-r--r--scripts/gdb/linux/stackdepot.py86
-rw-r--r--scripts/gdb/linux/symbols.py338
-rw-r--r--scripts/gdb/linux/tasks.py127
-rw-r--r--scripts/gdb/linux/timerlist.py220
-rw-r--r--scripts/gdb/linux/utils.py273
-rw-r--r--scripts/gdb/linux/vfs.py59
-rw-r--r--scripts/gdb/linux/vmalloc.py62
-rw-r--r--scripts/gdb/linux/xarray.py28
-rw-r--r--scripts/gdb/vmlinux-gdb.py52
33 files changed, 5082 insertions, 0 deletions
diff --git a/scripts/gdb/Makefile b/scripts/gdb/Makefile
new file mode 100644
index 000000000000..3fca1937d956
--- /dev/null
+++ b/scripts/gdb/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+subdir-y := linux
diff --git a/scripts/gdb/linux/.gitignore b/scripts/gdb/linux/.gitignore
new file mode 100644
index 000000000000..43234cbcb529
--- /dev/null
+++ b/scripts/gdb/linux/.gitignore
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+*.pyc
+*.pyo
+constants.py
diff --git a/scripts/gdb/linux/Makefile b/scripts/gdb/linux/Makefile
new file mode 100644
index 000000000000..fcd32fcf3ae0
--- /dev/null
+++ b/scripts/gdb/linux/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ifdef building_out_of_srctree
+
+symlinks := $(patsubst $(src)/%,%,$(wildcard $(src)/*.py))
+
+quiet_cmd_symlink = SYMLINK $@
+ cmd_symlink = ln -fsn $(patsubst $(obj)/%,$(abspath $(src))/%,$@) $@
+
+always-y += $(symlinks)
+$(addprefix $(obj)/, $(symlinks)): FORCE
+ $(call if_changed,symlink)
+
+endif
+
+quiet_cmd_gen_constants_py = GEN $@
+ cmd_gen_constants_py = \
+ $(CPP) -E -x c -P $(c_flags) $< > $@ ;\
+ sed -i '1,/<!-- end-c-headers -->/d;' $@
+
+always-y += constants.py
+$(obj)/constants.py: $(src)/constants.py.in FORCE
+ $(call if_changed_dep,gen_constants_py)
+
+clean-files := *.pyc *.pyo
diff --git a/scripts/gdb/linux/__init__.py b/scripts/gdb/linux/__init__.py
new file mode 100644
index 000000000000..4680fb176337
--- /dev/null
+++ b/scripts/gdb/linux/__init__.py
@@ -0,0 +1 @@
+# nothing to do for the initialization of this package
diff --git a/scripts/gdb/linux/bpf.py b/scripts/gdb/linux/bpf.py
new file mode 100644
index 000000000000..1870534ef6f9
--- /dev/null
+++ b/scripts/gdb/linux/bpf.py
@@ -0,0 +1,253 @@
+# SPDX-License-Identifier: GPL-2.0
+
+import json
+import subprocess
+import tempfile
+
+import gdb
+
+from linux import constants, lists, radixtree, utils
+
+
+if constants.LX_CONFIG_BPF and constants.LX_CONFIG_BPF_JIT:
+ bpf_ksym_type = utils.CachedType("struct bpf_ksym")
+if constants.LX_CONFIG_BPF_SYSCALL:
+ bpf_prog_type = utils.CachedType("struct bpf_prog")
+
+
+def get_ksym_name(ksym):
+ name = ksym["name"].bytes
+ end = name.find(b"\x00")
+ if end != -1:
+ name = name[:end]
+ return name.decode()
+
+
+def list_ksyms():
+ if not (constants.LX_CONFIG_BPF and constants.LX_CONFIG_BPF_JIT):
+ return []
+ bpf_kallsyms = gdb.parse_and_eval("&bpf_kallsyms")
+ bpf_ksym_ptr_type = bpf_ksym_type.get_type().pointer()
+ return list(lists.list_for_each_entry(bpf_kallsyms,
+ bpf_ksym_ptr_type,
+ "lnode"))
+
+
+class KsymAddBreakpoint(gdb.Breakpoint):
+ def __init__(self, monitor):
+ super(KsymAddBreakpoint, self).__init__("bpf_ksym_add", internal=True)
+ self.silent = True
+ self.monitor = monitor
+
+ def stop(self):
+ self.monitor.add(gdb.parse_and_eval("ksym"))
+ return False
+
+
+class KsymRemoveBreakpoint(gdb.Breakpoint):
+ def __init__(self, monitor):
+ super(KsymRemoveBreakpoint, self).__init__("bpf_ksym_del",
+ internal=True)
+ self.silent = True
+ self.monitor = monitor
+
+ def stop(self):
+ self.monitor.remove(gdb.parse_and_eval("ksym"))
+ return False
+
+
+class KsymMonitor:
+ def __init__(self, add, remove):
+ self.add = add
+ self.remove = remove
+
+ self.add_bp = KsymAddBreakpoint(self)
+ self.remove_bp = KsymRemoveBreakpoint(self)
+
+ self.notify_initial()
+
+ def notify_initial(self):
+ for ksym in list_ksyms():
+ self.add(ksym)
+
+ def delete(self):
+ self.add_bp.delete()
+ self.remove_bp.delete()
+
+
+def list_progs():
+ if not constants.LX_CONFIG_BPF_SYSCALL:
+ return []
+ idr_rt = gdb.parse_and_eval("&prog_idr.idr_rt")
+ bpf_prog_ptr_type = bpf_prog_type.get_type().pointer()
+ progs = []
+ for _, slot in radixtree.for_each_slot(idr_rt):
+ prog = slot.dereference().cast(bpf_prog_ptr_type)
+ progs.append(prog)
+ # Subprogs are not registered in prog_idr, fetch them manually.
+ # func[0] is the current prog.
+ aux = prog["aux"]
+ func = aux["func"]
+ real_func_cnt = int(aux["real_func_cnt"])
+ for i in range(1, real_func_cnt):
+ progs.append(func[i])
+ return progs
+
+
+class ProgAddBreakpoint(gdb.Breakpoint):
+ def __init__(self, monitor):
+ super(ProgAddBreakpoint, self).__init__("bpf_prog_kallsyms_add",
+ internal=True)
+ self.silent = True
+ self.monitor = monitor
+
+ def stop(self):
+ self.monitor.add(gdb.parse_and_eval("fp"))
+ return False
+
+
+class ProgRemoveBreakpoint(gdb.Breakpoint):
+ def __init__(self, monitor):
+ super(ProgRemoveBreakpoint, self).__init__("bpf_prog_free_id",
+ internal=True)
+ self.silent = True
+ self.monitor = monitor
+
+ def stop(self):
+ self.monitor.remove(gdb.parse_and_eval("prog"))
+ return False
+
+
+class ProgMonitor:
+ def __init__(self, add, remove):
+ self.add = add
+ self.remove = remove
+
+ self.add_bp = ProgAddBreakpoint(self)
+ self.remove_bp = ProgRemoveBreakpoint(self)
+
+ self.notify_initial()
+
+ def notify_initial(self):
+ for prog in list_progs():
+ self.add(prog)
+
+ def delete(self):
+ self.add_bp.delete()
+ self.remove_bp.delete()
+
+
+def btf_str_by_offset(btf, offset):
+ while offset < btf["start_str_off"]:
+ btf = btf["base_btf"]
+
+ offset -= btf["start_str_off"]
+ if offset < btf["hdr"]["str_len"]:
+ return (btf["strings"] + offset).string()
+
+ return None
+
+
+def bpf_line_info_line_num(line_col):
+ return line_col >> 10
+
+
+def bpf_line_info_line_col(line_col):
+ return line_col & 0x3ff
+
+
+class LInfoIter:
+ def __init__(self, prog):
+ # See bpf_prog_get_file_line() for details.
+ self.pos = 0
+ self.nr_linfo = 0
+
+ if prog is None:
+ return
+
+ self.bpf_func = int(prog["bpf_func"])
+ aux = prog["aux"]
+ self.btf = aux["btf"]
+ linfo_idx = aux["linfo_idx"]
+ self.nr_linfo = int(aux["nr_linfo"]) - linfo_idx
+ if self.nr_linfo == 0:
+ return
+
+ linfo_ptr = aux["linfo"]
+ tpe = linfo_ptr.type.target().array(self.nr_linfo).pointer()
+ self.linfo = (linfo_ptr + linfo_idx).cast(tpe).dereference()
+ jited_linfo_ptr = aux["jited_linfo"]
+ tpe = jited_linfo_ptr.type.target().array(self.nr_linfo).pointer()
+ self.jited_linfo = (jited_linfo_ptr + linfo_idx).cast(tpe).dereference()
+
+ self.filenos = {}
+
+ def get_code_off(self):
+ if self.pos >= self.nr_linfo:
+ return -1
+ return self.jited_linfo[self.pos] - self.bpf_func
+
+ def advance(self):
+ self.pos += 1
+
+ def get_fileno(self):
+ file_name_off = int(self.linfo[self.pos]["file_name_off"])
+ fileno = self.filenos.get(file_name_off)
+ if fileno is not None:
+ return fileno, None
+ file_name = btf_str_by_offset(self.btf, file_name_off)
+ fileno = len(self.filenos) + 1
+ self.filenos[file_name_off] = fileno
+ return fileno, file_name
+
+ def get_line_col(self):
+ line_col = int(self.linfo[self.pos]["line_col"])
+ return bpf_line_info_line_num(line_col), \
+ bpf_line_info_line_col(line_col)
+
+
+def generate_debug_obj(ksym, prog):
+ name = get_ksym_name(ksym)
+ # Avoid read_memory(); it throws bogus gdb.MemoryError in some contexts.
+ start = ksym["start"]
+ code = start.cast(gdb.lookup_type("unsigned char")
+ .array(int(ksym["end"]) - int(start))
+ .pointer()).dereference().bytes
+ linfo_iter = LInfoIter(prog)
+
+ result = tempfile.NamedTemporaryFile(suffix=".o", mode="wb")
+ try:
+ with tempfile.NamedTemporaryFile(suffix=".s", mode="w") as src:
+ # ".loc" does not apply to ".byte"s, only to ".insn"s, but since
+ # this needs to work for all architectures, the latter are not an
+ # option. Ask the assembler to apply ".loc"s to labels as well,
+ # and generate dummy labels after each ".loc".
+ src.write(".loc_mark_labels 1\n")
+
+ src.write(".globl {}\n".format(name))
+ src.write(".type {},@function\n".format(name))
+ src.write("{}:\n".format(name))
+ for code_off, code_byte in enumerate(code):
+ if linfo_iter.get_code_off() == code_off:
+ fileno, file_name = linfo_iter.get_fileno()
+ if file_name is not None:
+ src.write(".file {} {}\n".format(
+ fileno, json.dumps(file_name)))
+ line, col = linfo_iter.get_line_col()
+ src.write(".loc {} {} {}\n".format(fileno, line, col))
+ src.write("0:\n")
+ linfo_iter.advance()
+ src.write(".byte {}\n".format(code_byte))
+ src.write(".size {},{}\n".format(name, len(code)))
+ src.flush()
+
+ try:
+ subprocess.check_call(["as", "-c", src.name, "-o", result.name])
+ except FileNotFoundError:
+ # "as" is not installed.
+ result.close()
+ return None
+ return result
+ except:
+ result.close()
+ raise
diff --git a/scripts/gdb/linux/clk.py b/scripts/gdb/linux/clk.py
new file mode 100644
index 000000000000..7a01fdc3e844
--- /dev/null
+++ b/scripts/gdb/linux/clk.py
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) NXP 2019
+
+import gdb
+import sys
+
+from linux import utils, lists, constants
+
+clk_core_type = utils.CachedType("struct clk_core")
+
+
+def clk_core_for_each_child(hlist_head):
+ return lists.hlist_for_each_entry(hlist_head,
+ clk_core_type.get_type().pointer(), "child_node")
+
+
+class LxClkSummary(gdb.Command):
+ """Print clk tree summary
+
+Output is a subset of /sys/kernel/debug/clk/clk_summary
+
+No calls are made during printing, instead a (c) if printed after values which
+are cached and potentially out of date"""
+
+ def __init__(self):
+ super(LxClkSummary, self).__init__("lx-clk-summary", gdb.COMMAND_DATA)
+
+ def show_subtree(self, clk, level):
+ gdb.write("%*s%-*s %7d %8d %8d %11lu%s\n" % (
+ level * 3 + 1, "",
+ 30 - level * 3,
+ clk['name'].string(),
+ clk['enable_count'],
+ clk['prepare_count'],
+ clk['protect_count'],
+ clk['rate'],
+ '(c)' if clk['flags'] & constants.LX_CLK_GET_RATE_NOCACHE else ' '))
+
+ for child in clk_core_for_each_child(clk['children']):
+ self.show_subtree(child, level + 1)
+
+ def invoke(self, arg, from_tty):
+ if utils.gdb_eval_or_none("clk_root_list") is None:
+ raise gdb.GdbError("No clocks registered")
+ gdb.write(" enable prepare protect \n")
+ gdb.write(" clock count count count rate \n")
+ gdb.write("------------------------------------------------------------------------\n")
+ for clk in clk_core_for_each_child(gdb.parse_and_eval("clk_root_list")):
+ self.show_subtree(clk, 0)
+ for clk in clk_core_for_each_child(gdb.parse_and_eval("clk_orphan_list")):
+ self.show_subtree(clk, 0)
+
+
+LxClkSummary()
+
+
+class LxClkCoreLookup(gdb.Function):
+ """Find struct clk_core by name"""
+
+ def __init__(self):
+ super(LxClkCoreLookup, self).__init__("lx_clk_core_lookup")
+
+ def lookup_hlist(self, hlist_head, name):
+ for child in clk_core_for_each_child(hlist_head):
+ if child['name'].string() == name:
+ return child
+ result = self.lookup_hlist(child['children'], name)
+ if result:
+ return result
+
+ def invoke(self, name):
+ name = name.string()
+ return (self.lookup_hlist(gdb.parse_and_eval("clk_root_list"), name) or
+ self.lookup_hlist(gdb.parse_and_eval("clk_orphan_list"), name))
+
+
+LxClkCoreLookup()
diff --git a/scripts/gdb/linux/config.py b/scripts/gdb/linux/config.py
new file mode 100644
index 000000000000..8843ab3cbadd
--- /dev/null
+++ b/scripts/gdb/linux/config.py
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2019 Google LLC.
+
+import gdb
+import zlib
+
+from linux import utils
+
+
+class LxConfigDump(gdb.Command):
+ """Output kernel config to the filename specified as the command
+ argument. Equivalent to 'zcat /proc/config.gz > config.txt' on
+ a running target"""
+
+ def __init__(self):
+ super(LxConfigDump, self).__init__("lx-configdump", gdb.COMMAND_DATA,
+ gdb.COMPLETE_FILENAME)
+
+ def invoke(self, arg, from_tty):
+ if len(arg) == 0:
+ filename = "config.txt"
+ else:
+ filename = arg
+
+ try:
+ py_config_ptr = gdb.parse_and_eval("&kernel_config_data")
+ py_config_ptr_end = gdb.parse_and_eval("&kernel_config_data_end")
+ py_config_size = py_config_ptr_end - py_config_ptr
+ except gdb.error as e:
+ raise gdb.GdbError("Can't find config, enable CONFIG_IKCONFIG?")
+
+ inf = gdb.inferiors()[0]
+ zconfig_buf = utils.read_memoryview(inf, py_config_ptr,
+ py_config_size).tobytes()
+
+ config_buf = zlib.decompress(zconfig_buf, 16)
+ with open(filename, 'wb') as f:
+ f.write(config_buf)
+
+ gdb.write("Dumped config to " + filename + "\n")
+
+
+LxConfigDump()
diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
new file mode 100644
index 000000000000..6d475540c6ba
--- /dev/null
+++ b/scripts/gdb/linux/constants.py.in
@@ -0,0 +1,175 @@
+/*
+ * gdb helper commands and functions for Linux kernel debugging
+ *
+ * Kernel constants derived from include files.
+ *
+ * Copyright (c) 2016 Linaro Ltd
+ *
+ * Authors:
+ * Kieran Bingham <kieran.bingham@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL version 2.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/fs.h>
+#include <linux/hrtimer.h>
+#include <linux/irq.h>
+#include <linux/mount.h>
+#include <linux/of_fdt.h>
+#include <linux/page_ext.h>
+#include <linux/radix-tree.h>
+#include <linux/maple_tree.h>
+#include <linux/slab.h>
+#include <linux/threads.h>
+#include <linux/vmalloc.h>
+
+/* We need to stringify expanded macros so that they can be parsed */
+
+#define STRING(x) #x
+#define XSTRING(x) STRING(x)
+
+#define LX_VALUE(x) LX_##x = x
+#define LX_GDBPARSED(x) LX_##x = gdb.parse_and_eval(XSTRING(x))
+
+/*
+ * IS_ENABLED generates (a || b) which is not compatible with python
+ * We can only switch on configuration items we know are available
+ * Therefore - IS_BUILTIN() is more appropriate
+ */
+#define LX_CONFIG(x) LX_##x = IS_BUILTIN(x)
+
+/* The build system will take care of deleting everything above this marker */
+<!-- end-c-headers -->
+
+import gdb
+
+LX_CONFIG(CONFIG_DEBUG_INFO_REDUCED)
+
+/* linux/clk-provider.h */
+if IS_BUILTIN(CONFIG_COMMON_CLK):
+ LX_GDBPARSED(CLK_GET_RATE_NOCACHE)
+
+/* linux/fs.h */
+LX_GDBPARSED(SB_RDONLY)
+LX_GDBPARSED(SB_SYNCHRONOUS)
+LX_GDBPARSED(SB_MANDLOCK)
+LX_GDBPARSED(SB_DIRSYNC)
+LX_GDBPARSED(SB_NOATIME)
+LX_GDBPARSED(SB_NODIRATIME)
+
+/* linux/htimer.h */
+LX_GDBPARSED(hrtimer_resolution)
+
+/* linux/irq.h */
+LX_GDBPARSED(IRQD_LEVEL)
+LX_GDBPARSED(IRQ_HIDDEN)
+
+/* linux/module.h */
+if IS_BUILTIN(CONFIG_MODULES):
+ LX_GDBPARSED(MOD_TEXT)
+ LX_GDBPARSED(MOD_DATA)
+ LX_GDBPARSED(MOD_RODATA)
+ LX_GDBPARSED(MOD_RO_AFTER_INIT)
+
+/* linux/mount.h */
+LX_GDBPARSED(MNT_NOSUID)
+LX_GDBPARSED(MNT_NODEV)
+LX_GDBPARSED(MNT_NOEXEC)
+LX_GDBPARSED(MNT_NOATIME)
+LX_GDBPARSED(MNT_NODIRATIME)
+LX_GDBPARSED(MNT_RELATIME)
+
+/* linux/threads.h */
+LX_VALUE(NR_CPUS)
+
+/* linux/of_fdt.h> */
+LX_VALUE(OF_DT_HEADER)
+
+/* linux/radix-tree.h */
+LX_GDBPARSED(RADIX_TREE_ENTRY_MASK)
+LX_GDBPARSED(RADIX_TREE_INTERNAL_NODE)
+LX_GDBPARSED(RADIX_TREE_MAP_SIZE)
+LX_GDBPARSED(RADIX_TREE_MAP_SHIFT)
+LX_GDBPARSED(RADIX_TREE_MAP_MASK)
+
+/* linux/maple_tree.h */
+LX_VALUE(MAPLE_NODE_SLOTS)
+LX_VALUE(MAPLE_RANGE64_SLOTS)
+LX_VALUE(MAPLE_ARANGE64_SLOTS)
+LX_GDBPARSED(MAPLE_NODE_MASK)
+
+/* linux/vmalloc.h */
+LX_VALUE(VM_IOREMAP)
+LX_VALUE(VM_ALLOC)
+LX_VALUE(VM_MAP)
+LX_VALUE(VM_USERMAP)
+LX_VALUE(VM_DMA_COHERENT)
+
+/* linux/page_ext.h */
+if IS_BUILTIN(CONFIG_PAGE_OWNER):
+ LX_GDBPARSED(PAGE_EXT_OWNER)
+ LX_GDBPARSED(PAGE_EXT_OWNER_ALLOCATED)
+
+/* linux/slab.h */
+LX_GDBPARSED(SLAB_RED_ZONE)
+LX_GDBPARSED(SLAB_POISON)
+LX_GDBPARSED(SLAB_KMALLOC)
+LX_GDBPARSED(SLAB_HWCACHE_ALIGN)
+LX_GDBPARSED(SLAB_CACHE_DMA)
+LX_GDBPARSED(SLAB_CACHE_DMA32)
+LX_GDBPARSED(SLAB_STORE_USER)
+LX_GDBPARSED(SLAB_PANIC)
+
+/* Kernel Configs */
+LX_CONFIG(CONFIG_GENERIC_CLOCKEVENTS)
+LX_CONFIG(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+LX_CONFIG(CONFIG_HIGH_RES_TIMERS)
+LX_CONFIG(CONFIG_NR_CPUS)
+LX_CONFIG(CONFIG_OF)
+LX_CONFIG(CONFIG_TICK_ONESHOT)
+LX_CONFIG(CONFIG_GENERIC_IRQ_SHOW_LEVEL)
+LX_CONFIG(CONFIG_X86_LOCAL_APIC)
+LX_CONFIG(CONFIG_SMP)
+LX_CONFIG(CONFIG_X86_THERMAL_VECTOR)
+LX_CONFIG(CONFIG_X86_MCE_THRESHOLD)
+LX_CONFIG(CONFIG_X86_MCE_AMD)
+LX_CONFIG(CONFIG_X86_MCE)
+LX_CONFIG(CONFIG_X86_IO_APIC)
+/*
+ * CONFIG_KVM can be "m" but it affects common code too. Use CONFIG_KVM_COMMON
+ * as a proxy for IS_ENABLED(CONFIG_KVM).
+ */
+LX_CONFIG_KVM = IS_BUILTIN(CONFIG_KVM_COMMON)
+LX_CONFIG(CONFIG_NUMA)
+LX_CONFIG(CONFIG_ARM64)
+LX_CONFIG(CONFIG_ARM64_4K_PAGES)
+LX_CONFIG(CONFIG_ARM64_16K_PAGES)
+LX_CONFIG(CONFIG_ARM64_64K_PAGES)
+if IS_BUILTIN(CONFIG_ARM64):
+ LX_VALUE(CONFIG_ARM64_PA_BITS)
+ LX_VALUE(CONFIG_ARM64_VA_BITS)
+ LX_VALUE(CONFIG_PAGE_SHIFT)
+ LX_VALUE(CONFIG_ARCH_FORCE_MAX_ORDER)
+LX_CONFIG(CONFIG_SPARSEMEM)
+LX_CONFIG(CONFIG_SPARSEMEM_EXTREME)
+LX_CONFIG(CONFIG_SPARSEMEM_VMEMMAP)
+LX_CONFIG(CONFIG_KASAN)
+LX_CONFIG(CONFIG_KASAN_GENERIC)
+LX_CONFIG(CONFIG_KASAN_SW_TAGS)
+LX_CONFIG(CONFIG_KASAN_HW_TAGS)
+if IS_BUILTIN(CONFIG_KASAN_GENERIC) or IS_BUILTIN(CONFIG_KASAN_SW_TAGS):
+ LX_VALUE(CONFIG_KASAN_SHADOW_OFFSET)
+LX_CONFIG(CONFIG_VMAP_STACK)
+if IS_BUILTIN(CONFIG_NUMA):
+ LX_VALUE(CONFIG_NODES_SHIFT)
+LX_CONFIG(CONFIG_DEBUG_VIRTUAL)
+LX_CONFIG(CONFIG_STACKDEPOT)
+LX_CONFIG(CONFIG_PAGE_OWNER)
+LX_CONFIG(CONFIG_SLUB_DEBUG)
+LX_CONFIG(CONFIG_SLAB_FREELIST_HARDENED)
+LX_CONFIG(CONFIG_MMU)
+LX_CONFIG(CONFIG_BPF)
+LX_CONFIG(CONFIG_BPF_JIT)
+LX_CONFIG(CONFIG_BPF_SYSCALL)
diff --git a/scripts/gdb/linux/cpus.py b/scripts/gdb/linux/cpus.py
new file mode 100644
index 000000000000..6edf4ef61636
--- /dev/null
+++ b/scripts/gdb/linux/cpus.py
@@ -0,0 +1,227 @@
+#
+# gdb helper commands and functions for Linux kernel debugging
+#
+# per-cpu tools
+#
+# Copyright (c) Siemens AG, 2011-2013
+#
+# Authors:
+# Jan Kiszka <jan.kiszka@siemens.com>
+#
+# This work is licensed under the terms of the GNU GPL version 2.
+#
+
+import gdb
+
+from linux import tasks, utils
+
+
+task_type = utils.CachedType("struct task_struct")
+
+
+MAX_CPUS = 4096
+
+
+def get_current_cpu():
+ if utils.get_gdbserver_type() == utils.GDBSERVER_QEMU:
+ return gdb.selected_thread().num - 1
+ elif utils.get_gdbserver_type() == utils.GDBSERVER_KGDB:
+ return gdb.parse_and_eval("kgdb_active.counter")
+ else:
+ raise gdb.GdbError("Sorry, obtaining the current CPU is not yet "
+ "supported with this gdb server.")
+
+
+def per_cpu(var_ptr, cpu):
+ if cpu == -1:
+ cpu = get_current_cpu()
+ if utils.is_target_arch("sparc:v9"):
+ offset = gdb.parse_and_eval(
+ "trap_block[{0}].__per_cpu_base".format(str(cpu)))
+ else:
+ try:
+ offset = gdb.parse_and_eval(
+ "__per_cpu_offset[{0}]".format(str(cpu)))
+ except gdb.error:
+ # !CONFIG_SMP case
+ offset = 0
+ pointer = var_ptr.cast(utils.get_long_type()) + offset
+ return pointer.cast(var_ptr.type)
+
+
+cpu_mask = {}
+
+
+def cpu_mask_invalidate(event):
+ global cpu_mask
+ cpu_mask = {}
+ gdb.events.stop.disconnect(cpu_mask_invalidate)
+ if hasattr(gdb.events, 'new_objfile'):
+ gdb.events.new_objfile.disconnect(cpu_mask_invalidate)
+
+
+def cpu_list(mask_name):
+ global cpu_mask
+ mask = None
+ if mask_name in cpu_mask:
+ mask = cpu_mask[mask_name]
+ if mask is None:
+ mask = gdb.parse_and_eval(mask_name + ".bits")
+ if hasattr(gdb, 'events'):
+ cpu_mask[mask_name] = mask
+ gdb.events.stop.connect(cpu_mask_invalidate)
+ if hasattr(gdb.events, 'new_objfile'):
+ gdb.events.new_objfile.connect(cpu_mask_invalidate)
+ bits_per_entry = mask[0].type.sizeof * 8
+ num_entries = mask.type.sizeof * 8 / bits_per_entry
+ entry = -1
+ bits = 0
+
+ while True:
+ while bits == 0:
+ entry += 1
+ if entry == num_entries:
+ return
+ bits = mask[entry]
+ if bits != 0:
+ bit = 0
+ break
+
+ while bits & 1 == 0:
+ bits >>= 1
+ bit += 1
+
+ cpu = entry * bits_per_entry + bit
+
+ bits >>= 1
+ bit += 1
+
+ yield int(cpu)
+
+
+def each_online_cpu():
+ for cpu in cpu_list("__cpu_online_mask"):
+ yield cpu
+
+
+def each_present_cpu():
+ for cpu in cpu_list("__cpu_present_mask"):
+ yield cpu
+
+
+def each_possible_cpu():
+ for cpu in cpu_list("__cpu_possible_mask"):
+ yield cpu
+
+
+def each_active_cpu():
+ for cpu in cpu_list("__cpu_active_mask"):
+ yield cpu
+
+
+class LxCpus(gdb.Command):
+ """List CPU status arrays
+
+Displays the known state of each CPU based on the kernel masks
+and can help identify the state of hotplugged CPUs"""
+
+ def __init__(self):
+ super(LxCpus, self).__init__("lx-cpus", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ gdb.write("Possible CPUs : {}\n".format(list(each_possible_cpu())))
+ gdb.write("Present CPUs : {}\n".format(list(each_present_cpu())))
+ gdb.write("Online CPUs : {}\n".format(list(each_online_cpu())))
+ gdb.write("Active CPUs : {}\n".format(list(each_active_cpu())))
+
+
+LxCpus()
+
+
+class PerCpu(gdb.Function):
+ """Return per-cpu variable.
+
+$lx_per_cpu(VAR[, CPU]): Return the per-cpu variable called VAR for the
+given CPU number. If CPU is omitted, the CPU of the current context is used.
+Note that VAR has to be quoted as string."""
+
+ def __init__(self):
+ super(PerCpu, self).__init__("lx_per_cpu")
+
+ def invoke(self, var, cpu=-1):
+ return per_cpu(var.address, cpu).dereference()
+
+
+PerCpu()
+
+
+class PerCpuPtr(gdb.Function):
+ """Return per-cpu pointer.
+
+$lx_per_cpu_ptr(VAR[, CPU]): Return the per-cpu pointer called VAR for the
+given CPU number. If CPU is omitted, the CPU of the current context is used.
+Note that VAR has to be quoted as string."""
+
+ def __init__(self):
+ super(PerCpuPtr, self).__init__("lx_per_cpu_ptr")
+
+ def invoke(self, var, cpu=-1):
+ return per_cpu(var, cpu)
+
+
+PerCpuPtr()
+
+
+def get_current_task(cpu):
+ task_ptr_type = task_type.get_type().pointer()
+
+ if utils.is_target_arch("x86"):
+ if gdb.lookup_global_symbol("cpu_tasks"):
+ # This is a UML kernel, which stores the current task
+ # differently than other x86 sub architectures
+ var_ptr = gdb.parse_and_eval("(struct task_struct *)cpu_tasks[0].task")
+ return var_ptr.dereference()
+ else:
+ var_ptr = gdb.parse_and_eval("&current_task")
+ return per_cpu(var_ptr, cpu).dereference()
+ elif utils.is_target_arch("aarch64"):
+ current_task_addr = gdb.parse_and_eval("(unsigned long)$SP_EL0")
+ if (current_task_addr >> 63) != 0:
+ current_task = current_task_addr.cast(task_ptr_type)
+ return current_task.dereference()
+ else:
+ raise gdb.GdbError("Sorry, obtaining the current task is not allowed "
+ "while running in userspace(EL0)")
+ elif utils.is_target_arch("riscv"):
+ current_tp = gdb.parse_and_eval("$tp")
+ scratch_reg = gdb.parse_and_eval("$sscratch")
+
+ # by default tp points to current task
+ current_task = current_tp.cast(task_ptr_type)
+
+ # scratch register is set 0 in trap handler after entering kernel.
+ # When hart is in user mode, scratch register is pointing to task_struct.
+ # and tp is used by user mode. So when scratch register holds larger value
+ # (negative address as ulong is larger value) than tp, then use scratch register.
+ if (scratch_reg.cast(utils.get_ulong_type()) > current_tp.cast(utils.get_ulong_type())):
+ current_task = scratch_reg.cast(task_ptr_type)
+
+ return current_task.dereference()
+ else:
+ raise gdb.GdbError("Sorry, obtaining the current task is not yet "
+ "supported with this arch")
+
+class LxCurrentFunc(gdb.Function):
+ """Return current task.
+
+$lx_current([CPU]): Return the per-cpu task variable for the given CPU
+number. If CPU is omitted, the CPU of the current context is used."""
+
+ def __init__(self):
+ super(LxCurrentFunc, self).__init__("lx_current")
+
+ def invoke(self, cpu=-1):
+ return get_current_task(cpu)
+
+
+LxCurrentFunc()
diff --git a/scripts/gdb/linux/device.py b/scripts/gdb/linux/device.py
new file mode 100644
index 000000000000..0eabc5f4f8ca
--- /dev/null
+++ b/scripts/gdb/linux/device.py
@@ -0,0 +1,182 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) NXP 2019
+
+import gdb
+
+from linux.utils import CachedType
+from linux.utils import container_of
+from linux.lists import list_for_each_entry
+
+
+device_private_type = CachedType('struct device_private')
+device_type = CachedType('struct device')
+
+subsys_private_type = CachedType('struct subsys_private')
+kobject_type = CachedType('struct kobject')
+kset_type = CachedType('struct kset')
+
+bus_type = CachedType('struct bus_type')
+class_type = CachedType('struct class')
+
+
+def dev_name(dev):
+ dev_init_name = dev['init_name']
+ if dev_init_name:
+ return dev_init_name.string()
+ return dev['kobj']['name'].string()
+
+
+def kset_for_each_object(kset):
+ return list_for_each_entry(kset['list'],
+ kobject_type.get_type().pointer(), "entry")
+
+
+def for_each_bus():
+ for kobj in kset_for_each_object(gdb.parse_and_eval('bus_kset')):
+ subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj')
+ subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys')
+ yield subsys_priv
+
+
+def for_each_class():
+ for kobj in kset_for_each_object(gdb.parse_and_eval('class_kset')):
+ subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj')
+ subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys')
+ yield subsys_priv
+
+
+def get_bus_by_name(name):
+ for item in for_each_bus():
+ if item['bus']['name'].string() == name:
+ return item
+ raise gdb.GdbError("Can't find bus type {!r}".format(name))
+
+
+def get_class_by_name(name):
+ for item in for_each_class():
+ if item['class']['name'].string() == name:
+ return item
+ raise gdb.GdbError("Can't find device class {!r}".format(name))
+
+
+klist_type = CachedType('struct klist')
+klist_node_type = CachedType('struct klist_node')
+
+
+def klist_for_each(klist):
+ return list_for_each_entry(klist['k_list'],
+ klist_node_type.get_type().pointer(), 'n_node')
+
+
+def bus_for_each_device(bus):
+ for kn in klist_for_each(bus['klist_devices']):
+ dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_bus')
+ yield dp['device']
+
+
+def class_for_each_device(cls):
+ for kn in klist_for_each(cls['klist_devices']):
+ dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_class')
+ yield dp['device']
+
+
+def device_for_each_child(dev):
+ for kn in klist_for_each(dev['p']['klist_children']):
+ dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_parent')
+ yield dp['device']
+
+
+def _show_device(dev, level=0, recursive=False):
+ gdb.write('{}dev {}:\t{}\n'.format('\t' * level, dev_name(dev), dev))
+ if recursive:
+ for child in device_for_each_child(dev):
+ _show_device(child, level + 1, recursive)
+
+
+class LxDeviceListBus(gdb.Command):
+ '''Print devices on a bus (or all buses if not specified)'''
+
+ def __init__(self):
+ super(LxDeviceListBus, self).__init__('lx-device-list-bus', gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ if not arg:
+ for bus in for_each_bus():
+ gdb.write('bus {}:\t{}\n'.format(bus['bus']['name'].string(), bus))
+ for dev in bus_for_each_device(bus):
+ _show_device(dev, level=1)
+ else:
+ bus = get_bus_by_name(arg)
+ if not bus:
+ raise gdb.GdbError("Can't find bus {!r}".format(arg))
+ for dev in bus_for_each_device(bus):
+ _show_device(dev)
+
+
+class LxDeviceListClass(gdb.Command):
+ '''Print devices in a class (or all classes if not specified)'''
+
+ def __init__(self):
+ super(LxDeviceListClass, self).__init__('lx-device-list-class', gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ if not arg:
+ for cls in for_each_class():
+ gdb.write("class {}:\t{}\n".format(cls['class']['name'].string(), cls))
+ for dev in class_for_each_device(cls):
+ _show_device(dev, level=1)
+ else:
+ cls = get_class_by_name(arg)
+ for dev in class_for_each_device(cls):
+ _show_device(dev)
+
+
+class LxDeviceListTree(gdb.Command):
+ '''Print a device and its children recursively'''
+
+ def __init__(self):
+ super(LxDeviceListTree, self).__init__('lx-device-list-tree', gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ if not arg:
+ raise gdb.GdbError('Please provide pointer to struct device')
+ dev = gdb.parse_and_eval(arg)
+ if dev.type != device_type.get_type().pointer():
+ raise gdb.GdbError('Please provide pointer to struct device')
+ _show_device(dev, level=0, recursive=True)
+
+
+class LxDeviceFindByBusName(gdb.Function):
+ '''Find struct device by bus and name (both strings)'''
+
+ def __init__(self):
+ super(LxDeviceFindByBusName, self).__init__('lx_device_find_by_bus_name')
+
+ def invoke(self, bus, name):
+ name = name.string()
+ bus = get_bus_by_name(bus.string())
+ for dev in bus_for_each_device(bus):
+ if dev_name(dev) == name:
+ return dev
+
+
+class LxDeviceFindByClassName(gdb.Function):
+ '''Find struct device by class and name (both strings)'''
+
+ def __init__(self):
+ super(LxDeviceFindByClassName, self).__init__('lx_device_find_by_class_name')
+
+ def invoke(self, cls, name):
+ name = name.string()
+ cls = get_class_by_name(cls.string())
+ for dev in class_for_each_device(cls):
+ if dev_name(dev) == name:
+ return dev
+
+
+LxDeviceListBus()
+LxDeviceListClass()
+LxDeviceListTree()
+LxDeviceFindByBusName()
+LxDeviceFindByClassName()
diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py
new file mode 100644
index 000000000000..c771831eb077
--- /dev/null
+++ b/scripts/gdb/linux/dmesg.py
@@ -0,0 +1,152 @@
+#
+# gdb helper commands and functions for Linux kernel debugging
+#
+# kernel log buffer dump
+#
+# Copyright (c) Siemens AG, 2011, 2012
+#
+# Authors:
+# Jan Kiszka <jan.kiszka@siemens.com>
+#
+# This work is licensed under the terms of the GNU GPL version 2.
+#
+
+import gdb
+import sys
+
+from linux import utils
+
+printk_info_type = utils.CachedType("struct printk_info")
+prb_data_blk_lpos_type = utils.CachedType("struct prb_data_blk_lpos")
+prb_desc_type = utils.CachedType("struct prb_desc")
+prb_desc_ring_type = utils.CachedType("struct prb_desc_ring")
+prb_data_ring_type = utils.CachedType("struct prb_data_ring")
+printk_ringbuffer_type = utils.CachedType("struct printk_ringbuffer")
+
+class LxDmesg(gdb.Command):
+ """Print Linux kernel log buffer."""
+
+ def __init__(self):
+ super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ inf = gdb.inferiors()[0]
+
+ # read in prb structure
+ prb_addr = int(str(gdb.parse_and_eval("(void *)'printk.c'::prb")).split()[0], 16)
+ sz = printk_ringbuffer_type.get_type().sizeof
+ prb = utils.read_memoryview(inf, prb_addr, sz).tobytes()
+
+ # read in descriptor ring structure
+ off = printk_ringbuffer_type.get_type()['desc_ring'].bitpos // 8
+ addr = prb_addr + off
+ sz = prb_desc_ring_type.get_type().sizeof
+ desc_ring = utils.read_memoryview(inf, addr, sz).tobytes()
+
+ # read in descriptor count, size, and address
+ off = prb_desc_ring_type.get_type()['count_bits'].bitpos // 8
+ desc_ring_count = 1 << utils.read_u32(desc_ring, off)
+ desc_sz = prb_desc_type.get_type().sizeof
+ off = prb_desc_ring_type.get_type()['descs'].bitpos // 8
+ desc_addr = utils.read_ulong(desc_ring, off)
+
+ # read in info size and address
+ info_sz = printk_info_type.get_type().sizeof
+ off = prb_desc_ring_type.get_type()['infos'].bitpos // 8
+ info_addr = utils.read_ulong(desc_ring, off)
+
+ # read in text data ring structure
+ off = printk_ringbuffer_type.get_type()['text_data_ring'].bitpos // 8
+ addr = prb_addr + off
+ sz = prb_data_ring_type.get_type().sizeof
+ text_data_ring = utils.read_memoryview(inf, addr, sz).tobytes()
+
+ # read in text data size and address
+ off = prb_data_ring_type.get_type()['size_bits'].bitpos // 8
+ text_data_sz = 1 << utils.read_u32(text_data_ring, off)
+ off = prb_data_ring_type.get_type()['data'].bitpos // 8
+ text_data_addr = utils.read_ulong(text_data_ring, off)
+
+ sv_off = prb_desc_type.get_type()['state_var'].bitpos // 8
+
+ off = prb_desc_type.get_type()['text_blk_lpos'].bitpos // 8
+ begin_off = off + (prb_data_blk_lpos_type.get_type()['begin'].bitpos // 8)
+ next_off = off + (prb_data_blk_lpos_type.get_type()['next'].bitpos // 8)
+
+ ts_off = printk_info_type.get_type()['ts_nsec'].bitpos // 8
+ len_off = printk_info_type.get_type()['text_len'].bitpos // 8
+
+ # definitions from kernel/printk/printk_ringbuffer.h
+ desc_committed = 1
+ desc_finalized = 2
+ desc_sv_bits = utils.get_long_type().sizeof * 8
+ desc_flags_shift = desc_sv_bits - 2
+ desc_flags_mask = 3 << desc_flags_shift
+ desc_id_mask = ~desc_flags_mask
+
+ # read in tail and head descriptor ids
+ off = prb_desc_ring_type.get_type()['tail_id'].bitpos // 8
+ tail_id = utils.read_atomic_long(desc_ring, off)
+ off = prb_desc_ring_type.get_type()['head_id'].bitpos // 8
+ head_id = utils.read_atomic_long(desc_ring, off)
+
+ did = tail_id
+ while True:
+ ind = did % desc_ring_count
+ desc_off = desc_sz * ind
+ info_off = info_sz * ind
+
+ desc = utils.read_memoryview(inf, desc_addr + desc_off, desc_sz).tobytes()
+
+ # skip non-committed record
+ state = 3 & (utils.read_atomic_long(desc, sv_off) >> desc_flags_shift)
+ if state != desc_committed and state != desc_finalized:
+ if did == head_id:
+ break
+ did = (did + 1) & desc_id_mask
+ continue
+
+ begin = utils.read_ulong(desc, begin_off) % text_data_sz
+ end = utils.read_ulong(desc, next_off) % text_data_sz
+
+ info = utils.read_memoryview(inf, info_addr + info_off, info_sz).tobytes()
+
+ # handle data-less record
+ if begin & 1 == 1:
+ text = ""
+ else:
+ # handle wrapping data block
+ if begin > end:
+ begin = 0
+
+ # skip over descriptor id
+ text_start = begin + utils.get_long_type().sizeof
+
+ text_len = utils.read_u16(info, len_off)
+
+ # handle truncated message
+ if end - text_start < text_len:
+ text_len = end - text_start
+
+ text_data = utils.read_memoryview(inf, text_data_addr + text_start,
+ text_len).tobytes()
+ text = text_data[0:text_len].decode(encoding='utf8', errors='replace')
+
+ time_stamp = utils.read_u64(info, ts_off)
+
+ for line in text.splitlines():
+ msg = u"[{time:12.6f}] {line}\n".format(
+ time=time_stamp / 1000000000.0,
+ line=line)
+ # With python2 gdb.write will attempt to convert unicode to
+ # ascii and might fail so pass an utf8-encoded str instead.
+ if sys.hexversion < 0x03000000:
+ msg = msg.encode(encoding='utf8', errors='replace')
+ gdb.write(msg)
+
+ if did == head_id:
+ break
+ did = (did + 1) & desc_id_mask
+
+
+LxDmesg()
diff --git a/scripts/gdb/linux/genpd.py b/scripts/gdb/linux/genpd.py
new file mode 100644
index 000000000000..b53649c0a77a
--- /dev/null
+++ b/scripts/gdb/linux/genpd.py
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) NXP 2019
+
+import gdb
+import sys
+
+from linux.utils import CachedType, gdb_eval_or_none
+from linux.lists import list_for_each_entry
+
+generic_pm_domain_type = CachedType('struct generic_pm_domain')
+pm_domain_data_type = CachedType('struct pm_domain_data')
+device_link_type = CachedType('struct device_link')
+
+
+def kobject_get_path(kobj):
+ path = kobj['name'].string()
+ parent = kobj['parent']
+ if parent:
+ path = kobject_get_path(parent) + '/' + path
+ return path
+
+
+def rtpm_status_str(dev):
+ if dev['power']['runtime_error']:
+ return 'error'
+ if dev['power']['disable_depth']:
+ return 'unsupported'
+ _RPM_STATUS_LOOKUP = [
+ "active",
+ "resuming",
+ "suspended",
+ "suspending"
+ ]
+ return _RPM_STATUS_LOOKUP[dev['power']['runtime_status']]
+
+
+class LxGenPDSummary(gdb.Command):
+ '''Print genpd summary
+
+Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
+
+ def __init__(self):
+ super(LxGenPDSummary, self).__init__('lx-genpd-summary', gdb.COMMAND_DATA)
+
+ def summary_one(self, genpd):
+ if genpd['status'] == 0:
+ status_string = 'on'
+ else:
+ status_string = 'off-{}'.format(genpd['state_idx'])
+
+ child_names = []
+ for link in list_for_each_entry(
+ genpd['parent_links'],
+ device_link_type.get_type().pointer(),
+ 'parent_node'):
+ child_names.append(link['child']['name'])
+
+ gdb.write('%-30s %-15s %s\n' % (
+ genpd['name'].string(),
+ status_string,
+ ', '.join(child_names)))
+
+ # Print devices in domain
+ for pm_data in list_for_each_entry(genpd['dev_list'],
+ pm_domain_data_type.get_type().pointer(),
+ 'list_node'):
+ dev = pm_data['dev']
+ kobj_path = kobject_get_path(dev['kobj'])
+ gdb.write(' %-50s %s\n' % (kobj_path, rtpm_status_str(dev)))
+
+ def invoke(self, arg, from_tty):
+ if gdb_eval_or_none("&gpd_list") is None:
+ raise gdb.GdbError("No power domain(s) registered")
+ gdb.write('domain status children\n');
+ gdb.write(' /device runtime status\n');
+ gdb.write('----------------------------------------------------------------------\n');
+ for genpd in list_for_each_entry(
+ gdb.parse_and_eval('&gpd_list'),
+ generic_pm_domain_type.get_type().pointer(),
+ 'gpd_list_node'):
+ self.summary_one(genpd)
+
+
+LxGenPDSummary()
diff --git a/scripts/gdb/linux/interrupts.py b/scripts/gdb/linux/interrupts.py
new file mode 100644
index 000000000000..f4f715a8f0e3
--- /dev/null
+++ b/scripts/gdb/linux/interrupts.py
@@ -0,0 +1,232 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2023 Broadcom
+
+import gdb
+
+from linux import constants
+from linux import cpus
+from linux import utils
+from linux import mapletree
+
+irq_desc_type = utils.CachedType("struct irq_desc")
+
+def irq_settings_is_hidden(desc):
+ return desc['status_use_accessors'] & constants.LX_IRQ_HIDDEN
+
+def irq_desc_is_chained(desc):
+ return desc['action'] and desc['action'] == gdb.parse_and_eval("&chained_action")
+
+def irqd_is_level(desc):
+ return desc['irq_data']['common']['state_use_accessors'] & constants.LX_IRQD_LEVEL
+
+def show_irq_desc(prec, irq):
+ text = ""
+
+ desc = mapletree.mtree_load(gdb.parse_and_eval("&sparse_irqs"), irq)
+ if desc is None:
+ return text
+
+ desc = desc.cast(irq_desc_type.get_type().pointer())
+ if desc == 0:
+ return text
+
+ if irq_settings_is_hidden(desc):
+ return text
+
+ any_count = 0
+ if desc['kstat_irqs']:
+ for cpu in cpus.each_online_cpu():
+ any_count += cpus.per_cpu(desc['kstat_irqs'], cpu)['cnt']
+
+ if (desc['action'] == 0 or irq_desc_is_chained(desc)) and any_count == 0:
+ return text;
+
+ text += "%*d: " % (prec, irq)
+ for cpu in cpus.each_online_cpu():
+ if desc['kstat_irqs']:
+ count = cpus.per_cpu(desc['kstat_irqs'], cpu)['cnt']
+ else:
+ count = 0
+ text += "%10u" % (count)
+
+ name = "None"
+ if desc['irq_data']['chip']:
+ chip = desc['irq_data']['chip']
+ if chip['name']:
+ name = chip['name'].string()
+ else:
+ name = "-"
+
+ text += " %8s" % (name)
+
+ if desc['irq_data']['domain']:
+ text += " %*lu" % (prec, desc['irq_data']['hwirq'])
+ else:
+ text += " %*s" % (prec, "")
+
+ if constants.LX_CONFIG_GENERIC_IRQ_SHOW_LEVEL:
+ text += " %-8s" % ("Level" if irqd_is_level(desc) else "Edge")
+
+ if desc['name']:
+ text += "-%-8s" % (desc['name'].string())
+
+ """ Some toolchains may not be able to provide information about irqaction """
+ try:
+ gdb.lookup_type("struct irqaction")
+ action = desc['action']
+ if action is not None:
+ text += " %s" % (action['name'].string())
+ while True:
+ action = action['next']
+ if action is not None:
+ break
+ if action['name']:
+ text += ", %s" % (action['name'].string())
+ except:
+ pass
+
+ text += "\n"
+
+ return text
+
+def show_irq_err_count(prec):
+ cnt = utils.gdb_eval_or_none("irq_err_count")
+ text = ""
+ if cnt is not None:
+ text += "%*s: %10u\n" % (prec, "ERR", cnt['counter'])
+ return text
+
+def x86_show_irqstat(prec, pfx, field, desc):
+ irq_stat = gdb.parse_and_eval("&irq_stat")
+ text = "%*s: " % (prec, pfx)
+ for cpu in cpus.each_online_cpu():
+ stat = cpus.per_cpu(irq_stat, cpu)
+ text += "%10u " % (stat[field])
+ text += " %s\n" % (desc)
+ return text
+
+def x86_show_mce(prec, var, pfx, desc):
+ pvar = gdb.parse_and_eval(var)
+ text = "%*s: " % (prec, pfx)
+ for cpu in cpus.each_online_cpu():
+ text += "%10u " % (cpus.per_cpu(pvar, cpu).dereference())
+ text += " %s\n" % (desc)
+ return text
+
+def x86_show_interupts(prec):
+ text = x86_show_irqstat(prec, "NMI", '__nmi_count', 'Non-maskable interrupts')
+
+ if constants.LX_CONFIG_X86_LOCAL_APIC:
+ text += x86_show_irqstat(prec, "LOC", 'apic_timer_irqs', "Local timer interrupts")
+ text += x86_show_irqstat(prec, "SPU", 'irq_spurious_count', "Spurious interrupts")
+ text += x86_show_irqstat(prec, "PMI", 'apic_perf_irqs', "Performance monitoring interrupts")
+ text += x86_show_irqstat(prec, "IWI", 'apic_irq_work_irqs', "IRQ work interrupts")
+ text += x86_show_irqstat(prec, "RTR", 'icr_read_retry_count', "APIC ICR read retries")
+ if utils.gdb_eval_or_none("x86_platform_ipi_callback") is not None:
+ text += x86_show_irqstat(prec, "PLT", 'x86_platform_ipis', "Platform interrupts")
+
+ if constants.LX_CONFIG_SMP:
+ text += x86_show_irqstat(prec, "RES", 'irq_resched_count', "Rescheduling interrupts")
+ text += x86_show_irqstat(prec, "CAL", 'irq_call_count', "Function call interrupts")
+ text += x86_show_irqstat(prec, "TLB", 'irq_tlb_count', "TLB shootdowns")
+
+ if constants.LX_CONFIG_X86_THERMAL_VECTOR:
+ text += x86_show_irqstat(prec, "TRM", 'irq_thermal_count', "Thermal events interrupts")
+
+ if constants.LX_CONFIG_X86_MCE_THRESHOLD:
+ text += x86_show_irqstat(prec, "THR", 'irq_threshold_count', "Threshold APIC interrupts")
+
+ if constants.LX_CONFIG_X86_MCE_AMD:
+ text += x86_show_irqstat(prec, "DFR", 'irq_deferred_error_count', "Deferred Error APIC interrupts")
+
+ if constants.LX_CONFIG_X86_MCE:
+ text += x86_show_mce(prec, "&mce_exception_count", "MCE", "Machine check exceptions")
+ text += x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls")
+
+ text += show_irq_err_count(prec)
+
+ if constants.LX_CONFIG_X86_IO_APIC:
+ cnt = utils.gdb_eval_or_none("irq_mis_count")
+ if cnt is not None:
+ text += "%*s: %10u\n" % (prec, "MIS", cnt['counter'])
+
+ if constants.LX_CONFIG_KVM:
+ text += x86_show_irqstat(prec, "PIN", 'kvm_posted_intr_ipis', 'Posted-interrupt notification event')
+ text += x86_show_irqstat(prec, "NPI", 'kvm_posted_intr_nested_ipis', 'Nested posted-interrupt event')
+ text += x86_show_irqstat(prec, "PIW", 'kvm_posted_intr_wakeup_ipis', 'Posted-interrupt wakeup event')
+
+ return text
+
+def arm_common_show_interrupts(prec):
+ text = ""
+ nr_ipi = utils.gdb_eval_or_none("nr_ipi")
+ ipi_desc = utils.gdb_eval_or_none("ipi_desc")
+ ipi_types = utils.gdb_eval_or_none("ipi_types")
+ if nr_ipi is None or ipi_desc is None or ipi_types is None:
+ return text
+
+ if prec >= 4:
+ sep = " "
+ else:
+ sep = ""
+
+ for ipi in range(nr_ipi):
+ text += "%*s%u:%s" % (prec - 1, "IPI", ipi, sep)
+ desc = ipi_desc[ipi].cast(irq_desc_type.get_type().pointer())
+ if desc == 0:
+ continue
+ for cpu in cpus.each_online_cpu():
+ text += "%10u" % (cpus.per_cpu(desc['kstat_irqs'], cpu)['cnt'])
+ text += " %s" % (ipi_types[ipi].string())
+ text += "\n"
+ return text
+
+def aarch64_show_interrupts(prec):
+ text = arm_common_show_interrupts(prec)
+ text += "%*s: %10lu\n" % (prec, "ERR", gdb.parse_and_eval("irq_err_count"))
+ return text
+
+def arch_show_interrupts(prec):
+ text = ""
+ if utils.is_target_arch("x86"):
+ text += x86_show_interupts(prec)
+ elif utils.is_target_arch("aarch64"):
+ text += aarch64_show_interrupts(prec)
+ elif utils.is_target_arch("arm"):
+ text += arm_common_show_interrupts(prec)
+ elif utils.is_target_arch("mips"):
+ text += show_irq_err_count(prec)
+ else:
+ raise gdb.GdbError("Unsupported architecture: {}".format(target_arch))
+
+ return text
+
+class LxInterruptList(gdb.Command):
+ """Print /proc/interrupts"""
+
+ def __init__(self):
+ super(LxInterruptList, self).__init__("lx-interruptlist", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ nr_irqs = gdb.parse_and_eval("nr_irqs")
+ prec = 3
+ j = 1000
+ while prec < 10 and j <= nr_irqs:
+ prec += 1
+ j *= 10
+
+ gdb.write("%*s" % (prec + 8, ""))
+ for cpu in cpus.each_online_cpu():
+ gdb.write("CPU%-8d" % cpu)
+ gdb.write("\n")
+
+ if utils.gdb_eval_or_none("&sparse_irqs") is None:
+ raise gdb.GdbError("Unable to find the sparse IRQ tree, is CONFIG_SPARSE_IRQ enabled?")
+
+ for irq in range(nr_irqs):
+ gdb.write(show_irq_desc(prec, irq))
+ gdb.write(arch_show_interrupts(prec))
+
+
+LxInterruptList()
diff --git a/scripts/gdb/linux/kasan.py b/scripts/gdb/linux/kasan.py
new file mode 100644
index 000000000000..56730b3fde0b
--- /dev/null
+++ b/scripts/gdb/linux/kasan.py
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2024 Canonical Ltd.
+#
+# Authors:
+# Kuan-Ying Lee <kuan-ying.lee@canonical.com>
+#
+
+import gdb
+from linux import constants, mm
+
+def help():
+ t = """Usage: lx-kasan_mem_to_shadow [Hex memory addr]
+ Example:
+ lx-kasan_mem_to_shadow 0xffff000008eca008\n"""
+ gdb.write("Unrecognized command\n")
+ raise gdb.GdbError(t)
+
+class KasanMemToShadow(gdb.Command):
+ """Translate memory address to kasan shadow address"""
+
+ p_ops = None
+
+ def __init__(self):
+ if constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS:
+ super(KasanMemToShadow, self).__init__("lx-kasan_mem_to_shadow", gdb.COMMAND_SUPPORT)
+
+ def invoke(self, args, from_tty):
+ if not constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS:
+ raise gdb.GdbError('CONFIG_KASAN_GENERIC or CONFIG_KASAN_SW_TAGS is not set')
+
+ argv = gdb.string_to_argv(args)
+ if len(argv) == 1:
+ if self.p_ops is None:
+ self.p_ops = mm.page_ops().ops
+ addr = int(argv[0], 16)
+ shadow_addr = self.kasan_mem_to_shadow(addr)
+ gdb.write('shadow addr: 0x%x\n' % shadow_addr)
+ else:
+ help()
+ def kasan_mem_to_shadow(self, addr):
+ return (addr >> self.p_ops.KASAN_SHADOW_SCALE_SHIFT) + self.p_ops.KASAN_SHADOW_OFFSET
+
+KasanMemToShadow()
diff --git a/scripts/gdb/linux/lists.py b/scripts/gdb/linux/lists.py
new file mode 100644
index 000000000000..bae4d70b7eae
--- /dev/null
+++ b/scripts/gdb/linux/lists.py
@@ -0,0 +1,136 @@
+#
+# gdb helper commands and functions for Linux kernel debugging
+#
+# list tools
+#
+# Copyright (c) Thiebaud Weksteen, 2015
+#
+# Authors:
+# Thiebaud Weksteen <thiebaud@weksteen.fr>
+#
+# This work is licensed under the terms of the GNU GPL version 2.
+#
+
+import gdb
+
+from linux import utils
+
+list_head = utils.CachedType("struct list_head")
+hlist_head = utils.CachedType("struct hlist_head")
+hlist_node = utils.CachedType("struct hlist_node")
+
+
+def list_for_each(head):
+ if head.type == list_head.get_type().pointer():
+ head = head.dereference()
+ elif head.type != list_head.get_type():
+ raise TypeError("Must be struct list_head not {}"
+ .format(head.type))
+
+ if head['next'] == 0:
+ gdb.write("list_for_each: Uninitialized list '{}' treated as empty\n"
+ .format(head.address))
+ return
+
+ node = head['next'].dereference()
+ while node.address != head.address:
+ yield node.address
+ node = node['next'].dereference()
+
+
+def list_for_each_entry(head, gdbtype, member):
+ for node in list_for_each(head):
+ yield utils.container_of(node, gdbtype, member)
+
+
+def hlist_for_each(head):
+ if head.type == hlist_head.get_type().pointer():
+ head = head.dereference()
+ elif head.type != hlist_head.get_type():
+ raise TypeError("Must be struct hlist_head not {}"
+ .format(head.type))
+
+ node = head['first'].dereference()
+ while node.address:
+ yield node.address
+ node = node['next'].dereference()
+
+
+def hlist_for_each_entry(head, gdbtype, member):
+ for node in hlist_for_each(head):
+ yield utils.container_of(node, gdbtype, member)
+
+
+def list_check(head):
+ nb = 0
+ if (head.type == list_head.get_type().pointer()):
+ head = head.dereference()
+ elif (head.type != list_head.get_type()):
+ raise gdb.GdbError('argument must be of type (struct list_head [*])')
+ c = head
+ try:
+ gdb.write("Starting with: {}\n".format(c))
+ except gdb.MemoryError:
+ gdb.write('head is not accessible\n')
+ return
+ while True:
+ p = c['prev'].dereference()
+ n = c['next'].dereference()
+ try:
+ if p['next'] != c.address:
+ gdb.write('prev.next != current: '
+ 'current@{current_addr}={current} '
+ 'prev@{p_addr}={p}\n'.format(
+ current_addr=c.address,
+ current=c,
+ p_addr=p.address,
+ p=p,
+ ))
+ return
+ except gdb.MemoryError:
+ gdb.write('prev is not accessible: '
+ 'current@{current_addr}={current}\n'.format(
+ current_addr=c.address,
+ current=c
+ ))
+ return
+ try:
+ if n['prev'] != c.address:
+ gdb.write('next.prev != current: '
+ 'current@{current_addr}={current} '
+ 'next@{n_addr}={n}\n'.format(
+ current_addr=c.address,
+ current=c,
+ n_addr=n.address,
+ n=n,
+ ))
+ return
+ except gdb.MemoryError:
+ gdb.write('next is not accessible: '
+ 'current@{current_addr}={current}\n'.format(
+ current_addr=c.address,
+ current=c
+ ))
+ return
+ c = n
+ nb += 1
+ if c == head:
+ gdb.write("list is consistent: {} node(s)\n".format(nb))
+ return
+
+
+class LxListChk(gdb.Command):
+ """Verify a list consistency"""
+
+ def __init__(self):
+ super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA,
+ gdb.COMPLETE_EXPRESSION)
+
+ def invoke(self, arg, from_tty):
+ argv = gdb.string_to_argv(arg)
+ if len(argv) != 1:
+ raise gdb.GdbError("lx-list-check takes one argument")
+ list_check(gdb.parse_and_eval(argv[0]))
+
+
+LxListChk()
diff --git a/scripts/gdb/linux/mapletree.py b/scripts/gdb/linux/mapletree.py
new file mode 100644
index 000000000000..d52d51c0a03f
--- /dev/null
+++ b/scripts/gdb/linux/mapletree.py
@@ -0,0 +1,252 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Maple tree helpers
+#
+# Copyright (c) 2025 Broadcom
+#
+# Authors:
+# Florian Fainelli <florian.fainelli@broadcom.com>
+
+import gdb
+
+from linux import utils
+from linux import constants
+from linux import xarray
+
+maple_tree_root_type = utils.CachedType("struct maple_tree")
+maple_node_type = utils.CachedType("struct maple_node")
+maple_enode_type = utils.CachedType("void")
+
+maple_dense = 0
+maple_leaf_64 = 1
+maple_range_64 = 2
+maple_arange_64 = 3
+
+class Mas(object):
+ ma_active = 0
+ ma_start = 1
+ ma_root = 2
+ ma_none = 3
+ ma_pause = 4
+ ma_overflow = 5
+ ma_underflow = 6
+ ma_error = 7
+
+ def __init__(self, mt, first, end):
+ if mt.type == maple_tree_root_type.get_type().pointer():
+ self.tree = mt.dereference()
+ elif mt.type != maple_tree_root_type.get_type():
+ raise gdb.GdbError("must be {} not {}"
+ .format(maple_tree_root_type.get_type().pointer(), mt.type))
+ self.tree = mt
+ self.index = first
+ self.last = end
+ self.node = None
+ self.status = self.ma_start
+ self.min = 0
+ self.max = -1
+
+ def is_start(self):
+ # mas_is_start()
+ return self.status == self.ma_start
+
+ def is_ptr(self):
+ # mas_is_ptr()
+ return self.status == self.ma_root
+
+ def is_none(self):
+ # mas_is_none()
+ return self.status == self.ma_none
+
+ def root(self):
+ # mas_root()
+ return self.tree['ma_root'].cast(maple_enode_type.get_type().pointer())
+
+ def start(self):
+ # mas_start()
+ if self.is_start() is False:
+ return None
+
+ self.min = 0
+ self.max = ~0
+
+ while True:
+ self.depth = 0
+ root = self.root()
+ if xarray.xa_is_node(root):
+ self.depth = 0
+ self.status = self.ma_active
+ self.node = mte_safe_root(root)
+ self.offset = 0
+ if mte_dead_node(self.node) is True:
+ continue
+
+ return None
+
+ self.node = None
+ # Empty tree
+ if root is None:
+ self.status = self.ma_none
+ self.offset = constants.LX_MAPLE_NODE_SLOTS
+ return None
+
+ # Single entry tree
+ self.status = self.ma_root
+ self.offset = constants.LX_MAPLE_NODE_SLOTS
+
+ if self.index != 0:
+ return None
+
+ return root
+
+ return None
+
+ def reset(self):
+ # mas_reset()
+ self.status = self.ma_start
+ self.node = None
+
+def mte_safe_root(node):
+ if node.type != maple_enode_type.get_type().pointer():
+ raise gdb.GdbError("{} must be {} not {}"
+ .format(mte_safe_root.__name__, maple_enode_type.get_type().pointer(), node.type))
+ ulong_type = utils.get_ulong_type()
+ indirect_ptr = node.cast(ulong_type) & ~0x2
+ val = indirect_ptr.cast(maple_enode_type.get_type().pointer())
+ return val
+
+def mte_node_type(entry):
+ ulong_type = utils.get_ulong_type()
+ val = None
+ if entry.type == maple_enode_type.get_type().pointer():
+ val = entry.cast(ulong_type)
+ elif entry.type == ulong_type:
+ val = entry
+ else:
+ raise gdb.GdbError("{} must be {} not {}"
+ .format(mte_node_type.__name__, maple_enode_type.get_type().pointer(), entry.type))
+ return (val >> 0x3) & 0xf
+
+def ma_dead_node(node):
+ if node.type != maple_node_type.get_type().pointer():
+ raise gdb.GdbError("{} must be {} not {}"
+ .format(ma_dead_node.__name__, maple_node_type.get_type().pointer(), node.type))
+ ulong_type = utils.get_ulong_type()
+ parent = node['parent']
+ indirect_ptr = node['parent'].cast(ulong_type) & ~constants.LX_MAPLE_NODE_MASK
+ return indirect_ptr == node
+
+def mte_to_node(enode):
+ ulong_type = utils.get_ulong_type()
+ if enode.type == maple_enode_type.get_type().pointer():
+ indirect_ptr = enode.cast(ulong_type)
+ elif enode.type == ulong_type:
+ indirect_ptr = enode
+ else:
+ raise gdb.GdbError("{} must be {} not {}"
+ .format(mte_to_node.__name__, maple_enode_type.get_type().pointer(), enode.type))
+ indirect_ptr = indirect_ptr & ~constants.LX_MAPLE_NODE_MASK
+ return indirect_ptr.cast(maple_node_type.get_type().pointer())
+
+def mte_dead_node(enode):
+ if enode.type != maple_enode_type.get_type().pointer():
+ raise gdb.GdbError("{} must be {} not {}"
+ .format(mte_dead_node.__name__, maple_enode_type.get_type().pointer(), enode.type))
+ node = mte_to_node(enode)
+ return ma_dead_node(node)
+
+def ma_is_leaf(tp):
+ result = tp < maple_range_64
+ return tp < maple_range_64
+
+def mt_pivots(t):
+ if t == maple_dense:
+ return 0
+ elif t == maple_leaf_64 or t == maple_range_64:
+ return constants.LX_MAPLE_RANGE64_SLOTS - 1
+ elif t == maple_arange_64:
+ return constants.LX_MAPLE_ARANGE64_SLOTS - 1
+
+def ma_pivots(node, t):
+ if node.type != maple_node_type.get_type().pointer():
+ raise gdb.GdbError("{}: must be {} not {}"
+ .format(ma_pivots.__name__, maple_node_type.get_type().pointer(), node.type))
+ if t == maple_arange_64:
+ return node['ma64']['pivot']
+ elif t == maple_leaf_64 or t == maple_range_64:
+ return node['mr64']['pivot']
+ else:
+ return None
+
+def ma_slots(node, tp):
+ if node.type != maple_node_type.get_type().pointer():
+ raise gdb.GdbError("{}: must be {} not {}"
+ .format(ma_slots.__name__, maple_node_type.get_type().pointer(), node.type))
+ if tp == maple_arange_64:
+ return node['ma64']['slot']
+ elif tp == maple_range_64 or tp == maple_leaf_64:
+ return node['mr64']['slot']
+ elif tp == maple_dense:
+ return node['slot']
+ else:
+ return None
+
+def mt_slot(mt, slots, offset):
+ ulong_type = utils.get_ulong_type()
+ return slots[offset].cast(ulong_type)
+
+def mtree_lookup_walk(mas):
+ ulong_type = utils.get_ulong_type()
+ n = mas.node
+
+ while True:
+ node = mte_to_node(n)
+ tp = mte_node_type(n)
+ pivots = ma_pivots(node, tp)
+ end = mt_pivots(tp)
+ offset = 0
+ while True:
+ if pivots[offset] >= mas.index:
+ break
+ if offset >= end:
+ break
+ offset += 1
+
+ slots = ma_slots(node, tp)
+ n = mt_slot(mas.tree, slots, offset)
+ if ma_dead_node(node) is True:
+ mas.reset()
+ return None
+ break
+
+ if ma_is_leaf(tp) is True:
+ break
+
+ return n
+
+def mtree_load(mt, index):
+ ulong_type = utils.get_ulong_type()
+ # MT_STATE(...)
+ mas = Mas(mt, index, index)
+ entry = None
+
+ while True:
+ entry = mas.start()
+ if mas.is_none():
+ return None
+
+ if mas.is_ptr():
+ if index != 0:
+ entry = None
+ return entry
+
+ entry = mtree_lookup_walk(mas)
+ if entry is None and mas.is_start():
+ continue
+ else:
+ break
+
+ if xarray.xa_is_zero(entry):
+ return None
+
+ return entry
diff --git a/scripts/gdb/linux/mm.py b/scripts/gdb/linux/mm.py
new file mode 100644
index 000000000000..7571aebbe650
--- /dev/null
+++ b/scripts/gdb/linux/mm.py
@@ -0,0 +1,403 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2023 MediaTek Inc.
+#
+# Authors:
+# Kuan-Ying Lee <Kuan-Ying.Lee@mediatek.com>
+#
+
+import gdb
+import math
+from linux import utils, constants
+
+def DIV_ROUND_UP(n,d):
+ return ((n) + (d) - 1) // (d)
+
+def test_bit(nr, addr):
+ if addr.dereference() & (0x1 << nr):
+ return True
+ else:
+ return False
+
+class page_ops():
+ ops = None
+ def __init__(self):
+ if not constants.LX_CONFIG_SPARSEMEM_VMEMMAP:
+ raise gdb.GdbError('Only support CONFIG_SPARSEMEM_VMEMMAP now')
+ if constants.LX_CONFIG_ARM64 and utils.is_target_arch('aarch64'):
+ self.ops = aarch64_page_ops()
+ else:
+ raise gdb.GdbError('Only support aarch64 now')
+
+class aarch64_page_ops():
+ def __init__(self):
+ self.SUBSECTION_SHIFT = 21
+ self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT
+ self.MODULES_VSIZE = 2 * 1024 * 1024 * 1024
+
+ if constants.LX_CONFIG_ARM64_64K_PAGES:
+ self.SECTION_SIZE_BITS = 29
+ else:
+ self.SECTION_SIZE_BITS = 27
+ self.MAX_PHYSMEM_BITS = constants.LX_CONFIG_ARM64_VA_BITS
+
+ self.PAGE_SHIFT = constants.LX_CONFIG_PAGE_SHIFT
+ self.PAGE_SIZE = 1 << self.PAGE_SHIFT
+ self.PAGE_MASK = (~(self.PAGE_SIZE - 1)) & ((1 << 64) - 1)
+
+ self.VA_BITS = constants.LX_CONFIG_ARM64_VA_BITS
+ if self.VA_BITS > 48:
+ if constants.LX_CONFIG_ARM64_16K_PAGES:
+ self.VA_BITS_MIN = 47
+ else:
+ self.VA_BITS_MIN = 48
+ tcr_el1 = gdb.execute("info registers $TCR_EL1", to_string=True)
+ tcr_el1 = int(tcr_el1.split()[1], 16)
+ self.vabits_actual = 64 - ((tcr_el1 >> 16) & 63)
+ else:
+ self.VA_BITS_MIN = self.VA_BITS
+ self.vabits_actual = self.VA_BITS
+ self.kimage_voffset = gdb.parse_and_eval('kimage_voffset') & ((1 << 64) - 1)
+
+ self.SECTIONS_SHIFT = self.MAX_PHYSMEM_BITS - self.SECTION_SIZE_BITS
+
+ if str(constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER).isdigit():
+ self.MAX_ORDER = constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER
+ else:
+ self.MAX_ORDER = 10
+
+ self.MAX_ORDER_NR_PAGES = 1 << (self.MAX_ORDER)
+ self.PFN_SECTION_SHIFT = self.SECTION_SIZE_BITS - self.PAGE_SHIFT
+ self.NR_MEM_SECTIONS = 1 << self.SECTIONS_SHIFT
+ self.PAGES_PER_SECTION = 1 << self.PFN_SECTION_SHIFT
+ self.PAGE_SECTION_MASK = (~(self.PAGES_PER_SECTION - 1)) & ((1 << 64) - 1)
+
+ if constants.LX_CONFIG_SPARSEMEM_EXTREME:
+ self.SECTIONS_PER_ROOT = self.PAGE_SIZE // gdb.lookup_type("struct mem_section").sizeof
+ else:
+ self.SECTIONS_PER_ROOT = 1
+
+ self.NR_SECTION_ROOTS = DIV_ROUND_UP(self.NR_MEM_SECTIONS, self.SECTIONS_PER_ROOT)
+ self.SECTION_ROOT_MASK = self.SECTIONS_PER_ROOT - 1
+ self.SUBSECTION_SHIFT = 21
+ self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT
+ self.PFN_SUBSECTION_SHIFT = self.SUBSECTION_SHIFT - self.PAGE_SHIFT
+ self.PAGES_PER_SUBSECTION = 1 << self.PFN_SUBSECTION_SHIFT
+
+ self.SECTION_HAS_MEM_MAP = 1 << int(gdb.parse_and_eval('SECTION_HAS_MEM_MAP_BIT'))
+ self.SECTION_IS_EARLY = 1 << int(gdb.parse_and_eval('SECTION_IS_EARLY_BIT'))
+
+ self.struct_page_size = utils.get_page_type().sizeof
+ self.STRUCT_PAGE_MAX_SHIFT = (int)(math.log(self.struct_page_size, 2))
+
+ self.PAGE_OFFSET = self._PAGE_OFFSET(self.VA_BITS)
+ self.MODULES_VADDR = self._PAGE_END(self.VA_BITS_MIN)
+ self.MODULES_END = self.MODULES_VADDR + self.MODULES_VSIZE
+
+ self.VMEMMAP_RANGE = self._PAGE_END(self.VA_BITS_MIN) - self.PAGE_OFFSET
+ self.VMEMMAP_SIZE = (self.VMEMMAP_RANGE >> self.PAGE_SHIFT) * self.struct_page_size
+ self.VMEMMAP_END = (-(1 * 1024 * 1024 * 1024)) & 0xffffffffffffffff
+ self.VMEMMAP_START = self.VMEMMAP_END - self.VMEMMAP_SIZE
+
+ self.VMALLOC_START = self.MODULES_END
+ self.VMALLOC_END = self.VMEMMAP_START - 256 * 1024 * 1024
+
+ self.memstart_addr = gdb.parse_and_eval("memstart_addr")
+ self.PHYS_OFFSET = self.memstart_addr
+ self.vmemmap = gdb.Value(self.VMEMMAP_START).cast(utils.get_page_type().pointer()) - (self.memstart_addr >> self.PAGE_SHIFT)
+
+ self.KERNEL_START = gdb.parse_and_eval("_text")
+ self.KERNEL_END = gdb.parse_and_eval("_end")
+
+ if constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS:
+ if constants.LX_CONFIG_KASAN_GENERIC:
+ self.KASAN_SHADOW_SCALE_SHIFT = 3
+ else:
+ self.KASAN_SHADOW_SCALE_SHIFT = 4
+ self.KASAN_SHADOW_OFFSET = constants.LX_CONFIG_KASAN_SHADOW_OFFSET
+ self.KASAN_SHADOW_END = (1 << (64 - self.KASAN_SHADOW_SCALE_SHIFT)) + self.KASAN_SHADOW_OFFSET
+ self.PAGE_END = self.KASAN_SHADOW_END - (1 << (self.vabits_actual - self.KASAN_SHADOW_SCALE_SHIFT))
+ else:
+ self.PAGE_END = self._PAGE_END(self.VA_BITS_MIN)
+
+ if constants.LX_CONFIG_NUMA and constants.LX_CONFIG_NODES_SHIFT:
+ self.NODE_SHIFT = constants.LX_CONFIG_NODES_SHIFT
+ else:
+ self.NODE_SHIFT = 0
+
+ self.MAX_NUMNODES = 1 << self.NODE_SHIFT
+
+ def SECTION_NR_TO_ROOT(self, sec):
+ return sec // self.SECTIONS_PER_ROOT
+
+ def __nr_to_section(self, nr):
+ root = self.SECTION_NR_TO_ROOT(nr)
+ mem_section = gdb.parse_and_eval("mem_section")
+ return mem_section[root][nr & self.SECTION_ROOT_MASK]
+
+ def pfn_to_section_nr(self, pfn):
+ return pfn >> self.PFN_SECTION_SHIFT
+
+ def section_nr_to_pfn(self, sec):
+ return sec << self.PFN_SECTION_SHIFT
+
+ def __pfn_to_section(self, pfn):
+ return self.__nr_to_section(self.pfn_to_section_nr(pfn))
+
+ def pfn_to_section(self, pfn):
+ return self.__pfn_to_section(pfn)
+
+ def subsection_map_index(self, pfn):
+ return (pfn & ~(self.PAGE_SECTION_MASK)) // self.PAGES_PER_SUBSECTION
+
+ def pfn_section_valid(self, ms, pfn):
+ if constants.LX_CONFIG_SPARSEMEM_VMEMMAP:
+ idx = self.subsection_map_index(pfn)
+ return test_bit(idx, ms['usage']['subsection_map'])
+ else:
+ return True
+
+ def valid_section(self, mem_section):
+ if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_HAS_MEM_MAP):
+ return True
+ return False
+
+ def early_section(self, mem_section):
+ if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_IS_EARLY):
+ return True
+ return False
+
+ def pfn_valid(self, pfn):
+ ms = None
+ if self.PHYS_PFN(self.PFN_PHYS(pfn)) != pfn:
+ return False
+ if self.pfn_to_section_nr(pfn) >= self.NR_MEM_SECTIONS:
+ return False
+ ms = self.__pfn_to_section(pfn)
+
+ if not self.valid_section(ms):
+ return False
+ return self.early_section(ms) or self.pfn_section_valid(ms, pfn)
+
+ def _PAGE_OFFSET(self, va):
+ return (-(1 << (va))) & 0xffffffffffffffff
+
+ def _PAGE_END(self, va):
+ return (-(1 << (va - 1))) & 0xffffffffffffffff
+
+ def kasan_reset_tag(self, addr):
+ if constants.LX_CONFIG_KASAN_SW_TAGS or constants.LX_CONFIG_KASAN_HW_TAGS:
+ return int(addr) | (0xff << 56)
+ else:
+ return addr
+
+ def __is_lm_address(self, addr):
+ if (addr - self.PAGE_OFFSET) < (self.PAGE_END - self.PAGE_OFFSET):
+ return True
+ else:
+ return False
+ def __lm_to_phys(self, addr):
+ return addr - self.PAGE_OFFSET + self.PHYS_OFFSET
+
+ def __kimg_to_phys(self, addr):
+ return addr - self.kimage_voffset
+
+ def __virt_to_phys_nodebug(self, va):
+ untagged_va = self.kasan_reset_tag(va)
+ if self.__is_lm_address(untagged_va):
+ return self.__lm_to_phys(untagged_va)
+ else:
+ return self.__kimg_to_phys(untagged_va)
+
+ def __virt_to_phys(self, va):
+ if constants.LX_CONFIG_DEBUG_VIRTUAL:
+ if not self.__is_lm_address(self.kasan_reset_tag(va)):
+ raise gdb.GdbError("Warning: virt_to_phys used for non-linear address: 0x%lx\n" % va)
+ return self.__virt_to_phys_nodebug(va)
+
+ def virt_to_phys(self, va):
+ return self.__virt_to_phys(va)
+
+ def PFN_PHYS(self, pfn):
+ return pfn << self.PAGE_SHIFT
+
+ def PHYS_PFN(self, phys):
+ return phys >> self.PAGE_SHIFT
+
+ def __phys_to_virt(self, pa):
+ return (pa - self.PHYS_OFFSET) | self.PAGE_OFFSET
+
+ def __phys_to_pfn(self, pa):
+ return self.PHYS_PFN(pa)
+
+ def __pfn_to_phys(self, pfn):
+ return self.PFN_PHYS(pfn)
+
+ def __pa_symbol_nodebug(self, x):
+ return self.__kimg_to_phys(x)
+
+ def __phys_addr_symbol(self, x):
+ if constants.LX_CONFIG_DEBUG_VIRTUAL:
+ if x < self.KERNEL_START or x > self.KERNEL_END:
+ raise gdb.GdbError("0x%x exceed kernel range" % x)
+ return self.__pa_symbol_nodebug(x)
+
+ def __pa_symbol(self, x):
+ return self.__phys_addr_symbol(x)
+
+ def __va(self, pa):
+ return self.__phys_to_virt(pa)
+
+ def pfn_to_kaddr(self, pfn):
+ return self.__va(pfn << self.PAGE_SHIFT)
+
+ def virt_to_pfn(self, va):
+ return self.__phys_to_pfn(self.__virt_to_phys(va))
+
+ def sym_to_pfn(self, x):
+ return self.__phys_to_pfn(self.__pa_symbol(x))
+
+ def page_to_pfn(self, page):
+ return int(page.cast(utils.get_page_type().pointer()) - self.vmemmap.cast(utils.get_page_type().pointer()))
+
+ def page_to_phys(self, page):
+ return self.__pfn_to_phys(self.page_to_pfn(page))
+
+ def pfn_to_page(self, pfn):
+ return (self.vmemmap + pfn).cast(utils.get_page_type().pointer())
+
+ def page_to_virt(self, page):
+ if constants.LX_CONFIG_DEBUG_VIRTUAL:
+ return self.__va(self.page_to_phys(page))
+ else:
+ __idx = int((page.cast(gdb.lookup_type("unsigned long")) - self.VMEMMAP_START).cast(utils.get_ulong_type())) // self.struct_page_size
+ return self.PAGE_OFFSET + (__idx * self.PAGE_SIZE)
+
+ def virt_to_page(self, va):
+ if constants.LX_CONFIG_DEBUG_VIRTUAL:
+ return self.pfn_to_page(self.virt_to_pfn(va))
+ else:
+ __idx = int(self.kasan_reset_tag(va) - self.PAGE_OFFSET) // self.PAGE_SIZE
+ addr = self.VMEMMAP_START + (__idx * self.struct_page_size)
+ return gdb.Value(addr).cast(utils.get_page_type().pointer())
+
+ def page_address(self, page):
+ return self.page_to_virt(page)
+
+ def folio_address(self, folio):
+ return self.page_address(folio['page'].address)
+
+class LxPFN2Page(gdb.Command):
+ """PFN to struct page"""
+
+ def __init__(self):
+ super(LxPFN2Page, self).__init__("lx-pfn_to_page", gdb.COMMAND_USER)
+
+ def invoke(self, arg, from_tty):
+ argv = gdb.string_to_argv(arg)
+ pfn = int(argv[0])
+ page = page_ops().ops.pfn_to_page(pfn)
+ gdb.write("pfn_to_page(0x%x) = 0x%x\n" % (pfn, page))
+
+LxPFN2Page()
+
+class LxPage2PFN(gdb.Command):
+ """struct page to PFN"""
+
+ def __init__(self):
+ super(LxPage2PFN, self).__init__("lx-page_to_pfn", gdb.COMMAND_USER)
+
+ def invoke(self, arg, from_tty):
+ argv = gdb.string_to_argv(arg)
+ struct_page_addr = int(argv[0], 16)
+ page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
+ pfn = page_ops().ops.page_to_pfn(page)
+ gdb.write("page_to_pfn(0x%x) = 0x%x\n" % (page, pfn))
+
+LxPage2PFN()
+
+class LxPageAddress(gdb.Command):
+ """struct page to linear mapping address"""
+
+ def __init__(self):
+ super(LxPageAddress, self).__init__("lx-page_address", gdb.COMMAND_USER)
+
+ def invoke(self, arg, from_tty):
+ argv = gdb.string_to_argv(arg)
+ struct_page_addr = int(argv[0], 16)
+ page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
+ addr = page_ops().ops.page_address(page)
+ gdb.write("page_address(0x%x) = 0x%x\n" % (page, addr))
+
+LxPageAddress()
+
+class LxPage2Phys(gdb.Command):
+ """struct page to physical address"""
+
+ def __init__(self):
+ super(LxPage2Phys, self).__init__("lx-page_to_phys", gdb.COMMAND_USER)
+
+ def invoke(self, arg, from_tty):
+ argv = gdb.string_to_argv(arg)
+ struct_page_addr = int(argv[0], 16)
+ page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
+ phys_addr = page_ops().ops.page_to_phys(page)
+ gdb.write("page_to_phys(0x%x) = 0x%x\n" % (page, phys_addr))
+
+LxPage2Phys()
+
+class LxVirt2Phys(gdb.Command):
+ """virtual address to physical address"""
+
+ def __init__(self):
+ super(LxVirt2Phys, self).__init__("lx-virt_to_phys", gdb.COMMAND_USER)
+
+ def invoke(self, arg, from_tty):
+ argv = gdb.string_to_argv(arg)
+ linear_addr = int(argv[0], 16)
+ phys_addr = page_ops().ops.virt_to_phys(linear_addr)
+ gdb.write("virt_to_phys(0x%x) = 0x%x\n" % (linear_addr, phys_addr))
+
+LxVirt2Phys()
+
+class LxVirt2Page(gdb.Command):
+ """virtual address to struct page"""
+
+ def __init__(self):
+ super(LxVirt2Page, self).__init__("lx-virt_to_page", gdb.COMMAND_USER)
+
+ def invoke(self, arg, from_tty):
+ argv = gdb.string_to_argv(arg)
+ linear_addr = int(argv[0], 16)
+ page = page_ops().ops.virt_to_page(linear_addr)
+ gdb.write("virt_to_page(0x%x) = 0x%x\n" % (linear_addr, page))
+
+LxVirt2Page()
+
+class LxSym2PFN(gdb.Command):
+ """symbol address to PFN"""
+
+ def __init__(self):
+ super(LxSym2PFN, self).__init__("lx-sym_to_pfn", gdb.COMMAND_USER)
+
+ def invoke(self, arg, from_tty):
+ argv = gdb.string_to_argv(arg)
+ sym_addr = int(argv[0], 16)
+ pfn = page_ops().ops.sym_to_pfn(sym_addr)
+ gdb.write("sym_to_pfn(0x%x) = %d\n" % (sym_addr, pfn))
+
+LxSym2PFN()
+
+class LxPFN2Kaddr(gdb.Command):
+ """PFN to kernel address"""
+
+ def __init__(self):
+ super(LxPFN2Kaddr, self).__init__("lx-pfn_to_kaddr", gdb.COMMAND_USER)
+
+ def invoke(self, arg, from_tty):
+ argv = gdb.string_to_argv(arg)
+ pfn = int(argv[0])
+ kaddr = page_ops().ops.pfn_to_kaddr(pfn)
+ gdb.write("pfn_to_kaddr(%d) = 0x%x\n" % (pfn, kaddr))
+
+LxPFN2Kaddr()
diff --git a/scripts/gdb/linux/modules.py b/scripts/gdb/linux/modules.py
new file mode 100644
index 000000000000..fa15f872ddbe
--- /dev/null
+++ b/scripts/gdb/linux/modules.py
@@ -0,0 +1,134 @@
+#
+# gdb helper commands and functions for Linux kernel debugging
+#
+# module tools
+#
+# Copyright (c) Siemens AG, 2013
+#
+# Authors:
+# Jan Kiszka <jan.kiszka@siemens.com>
+#
+# This work is licensed under the terms of the GNU GPL version 2.
+#
+
+import gdb
+
+from linux import cpus, utils, lists, constants
+
+
+module_type = utils.CachedType("struct module")
+
+
+def has_modules():
+ return utils.gdb_eval_or_none("modules") is not None
+
+def module_list():
+ global module_type
+ modules = utils.gdb_eval_or_none("modules")
+ if modules is None:
+ return
+
+ module_ptr_type = module_type.get_type().pointer()
+
+ for module in lists.list_for_each_entry(modules, module_ptr_type, "list"):
+ yield module
+
+
+def find_module_by_name(name):
+ for module in module_list():
+ if module['name'].string() == name:
+ return module
+ return None
+
+
+class LxModule(gdb.Function):
+ """Find module by name and return the module variable.
+
+$lx_module("MODULE"): Given the name MODULE, iterate over all loaded modules
+of the target and return that module variable which MODULE matches."""
+
+ def __init__(self):
+ super(LxModule, self).__init__("lx_module")
+
+ def invoke(self, mod_name):
+ mod_name = mod_name.string()
+ module = find_module_by_name(mod_name)
+ if module:
+ return module.dereference()
+ else:
+ raise gdb.GdbError("Unable to find MODULE " + mod_name)
+
+
+LxModule()
+
+
+class LxLsmod(gdb.Command):
+ """List currently loaded modules."""
+
+ _module_use_type = utils.CachedType("struct module_use")
+
+ def __init__(self):
+ super(LxLsmod, self).__init__("lx-lsmod", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ gdb.write(
+ "Address{0} Module Size Used by\n".format(
+ " " if utils.get_long_type().sizeof == 8 else ""))
+
+ for module in module_list():
+ text = module['mem'][constants.LX_MOD_TEXT]
+ text_addr = str(text['base']).split()[0]
+ total_size = 0
+
+ for i in range(constants.LX_MOD_TEXT, constants.LX_MOD_RO_AFTER_INIT + 1):
+ total_size += module['mem'][i]['size']
+
+ gdb.write("{address} {name:<19} {size:>8} {ref}".format(
+ address=text_addr,
+ name=module['name'].string(),
+ size=str(total_size),
+ ref=str(module['refcnt']['counter'] - 1)))
+
+ t = self._module_use_type.get_type().pointer()
+ first = True
+ sources = module['source_list']
+ for use in lists.list_for_each_entry(sources, t, "source_list"):
+ gdb.write("{separator}{name}".format(
+ separator=" " if first else ",",
+ name=use['source']['name'].string()))
+ first = False
+
+ gdb.write("\n")
+
+LxLsmod()
+
+def help():
+ t = """Usage: lx-getmod-by-textaddr [Heximal Address]
+ Example: lx-getmod-by-textaddr 0xffff800002d305ac\n"""
+ gdb.write("Unrecognized command\n")
+ raise gdb.GdbError(t)
+
+class LxFindTextAddrinMod(gdb.Command):
+ '''Look up loaded kernel module by text address.'''
+
+ def __init__(self):
+ super(LxFindTextAddrinMod, self).__init__('lx-getmod-by-textaddr', gdb.COMMAND_SUPPORT)
+
+ def invoke(self, arg, from_tty):
+ args = gdb.string_to_argv(arg)
+
+ if len(args) != 1:
+ help()
+
+ addr = gdb.Value(int(args[0], 16)).cast(utils.get_ulong_type())
+ for mod in module_list():
+ mod_text_start = mod['mem'][constants.LX_MOD_TEXT]['base']
+ mod_text_end = mod_text_start + mod['mem'][constants.LX_MOD_TEXT]['size'].cast(utils.get_ulong_type())
+
+ if addr >= mod_text_start and addr < mod_text_end:
+ s = "0x%x" % addr + " is in " + mod['name'].string() + ".ko\n"
+ gdb.write(s)
+ return
+ gdb.write("0x%x is not in any module text section\n" % addr)
+
+LxFindTextAddrinMod()
diff --git a/scripts/gdb/linux/page_owner.py b/scripts/gdb/linux/page_owner.py
new file mode 100644
index 000000000000..8e713a09cfe7
--- /dev/null
+++ b/scripts/gdb/linux/page_owner.py
@@ -0,0 +1,182 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2023 MediaTek Inc.
+#
+# Authors:
+# Kuan-Ying Lee <Kuan-Ying.Lee@mediatek.com>
+#
+
+import gdb
+from linux import utils, stackdepot, constants, mm
+
+if constants.LX_CONFIG_PAGE_OWNER:
+ page_ext_t = utils.CachedType('struct page_ext')
+ page_owner_t = utils.CachedType('struct page_owner')
+
+ PAGE_OWNER_STACK_DEPTH = 16
+ PAGE_EXT_OWNER = constants.LX_PAGE_EXT_OWNER
+ PAGE_EXT_INVALID = 0x1
+ PAGE_EXT_OWNER_ALLOCATED = constants.LX_PAGE_EXT_OWNER_ALLOCATED
+
+def help():
+ t = """Usage: lx-dump-page-owner [Option]
+ Option:
+ --pfn [Decimal pfn]
+ Example:
+ lx-dump-page-owner --pfn 655360\n"""
+ gdb.write("Unrecognized command\n")
+ raise gdb.GdbError(t)
+
+class DumpPageOwner(gdb.Command):
+ """Dump page owner"""
+
+ min_pfn = None
+ max_pfn = None
+ p_ops = None
+ migrate_reason_names = None
+
+ def __init__(self):
+ super(DumpPageOwner, self).__init__("lx-dump-page-owner", gdb.COMMAND_SUPPORT)
+
+ def invoke(self, args, from_tty):
+ if not constants.LX_CONFIG_PAGE_OWNER:
+ raise gdb.GdbError('CONFIG_PAGE_OWNER does not enable')
+
+ page_owner_inited = gdb.parse_and_eval('page_owner_inited')
+ if page_owner_inited['key']['enabled']['counter'] != 0x1:
+ raise gdb.GdbError('page_owner_inited is not enabled')
+
+ self.p_ops = mm.page_ops().ops
+ self.get_page_owner_info()
+ argv = gdb.string_to_argv(args)
+ if len(argv) == 0:
+ self.read_page_owner()
+ elif len(argv) == 2:
+ if argv[0] == "--pfn":
+ pfn = int(argv[1])
+ self.read_page_owner_by_addr(self.p_ops.pfn_to_page(pfn))
+ else:
+ help()
+ else:
+ help()
+
+ def get_page_owner_info(self):
+ self.min_pfn = int(gdb.parse_and_eval("min_low_pfn"))
+ self.max_pfn = int(gdb.parse_and_eval("max_pfn"))
+ self.page_ext_size = int(gdb.parse_and_eval("page_ext_size"))
+ self.migrate_reason_names = gdb.parse_and_eval('migrate_reason_names')
+
+ def page_ext_invalid(self, page_ext):
+ if page_ext == gdb.Value(0):
+ return True
+ if page_ext.cast(utils.get_ulong_type()) & PAGE_EXT_INVALID == PAGE_EXT_INVALID:
+ return True
+ return False
+
+ def get_entry(self, base, index):
+ return (base.cast(utils.get_ulong_type()) + self.page_ext_size * index).cast(page_ext_t.get_type().pointer())
+
+ def lookup_page_ext(self, page):
+ pfn = self.p_ops.page_to_pfn(page)
+ section = self.p_ops.pfn_to_section(pfn)
+ page_ext = section["page_ext"]
+ if self.page_ext_invalid(page_ext):
+ return gdb.Value(0)
+ return self.get_entry(page_ext, pfn)
+
+ def page_ext_get(self, page):
+ page_ext = self.lookup_page_ext(page)
+ if page_ext != gdb.Value(0):
+ return page_ext
+ else:
+ return gdb.Value(0)
+
+ def get_page_owner(self, page_ext):
+ addr = page_ext.cast(utils.get_ulong_type()) + gdb.parse_and_eval("page_owner_ops")["offset"].cast(utils.get_ulong_type())
+ return addr.cast(page_owner_t.get_type().pointer())
+
+ def read_page_owner_by_addr(self, struct_page_addr):
+ page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
+ pfn = self.p_ops.page_to_pfn(page)
+
+ if pfn < self.min_pfn or pfn > self.max_pfn or (not self.p_ops.pfn_valid(pfn)):
+ gdb.write("pfn is invalid\n")
+ return
+
+ page = self.p_ops.pfn_to_page(pfn)
+ page_ext = self.page_ext_get(page)
+
+ if page_ext == gdb.Value(0):
+ gdb.write("page_ext is null\n")
+ return
+
+ if not (page_ext['flags'] & (1 << PAGE_EXT_OWNER)):
+ gdb.write("page_owner flag is invalid\n")
+ raise gdb.GdbError('page_owner info is not present (never set?)\n')
+
+ if mm.test_bit(PAGE_EXT_OWNER_ALLOCATED, page_ext['flags'].address):
+ gdb.write('page_owner tracks the page as allocated\n')
+ else:
+ gdb.write('page_owner tracks the page as freed\n')
+
+ if not (page_ext['flags'] & (1 << PAGE_EXT_OWNER_ALLOCATED)):
+ gdb.write("page_owner is not allocated\n")
+
+ page_owner = self.get_page_owner(page_ext)
+ gdb.write("Page last allocated via order %d, gfp_mask: 0x%x, pid: %d, tgid: %d (%s), ts %u ns, free_ts %u ns\n" %\
+ (page_owner["order"], page_owner["gfp_mask"],\
+ page_owner["pid"], page_owner["tgid"], page_owner["comm"].string(),\
+ page_owner["ts_nsec"], page_owner["free_ts_nsec"]))
+ gdb.write("PFN: %d, Flags: 0x%x\n" % (pfn, page['flags']))
+ if page_owner["handle"] == 0:
+ gdb.write('page_owner allocation stack trace missing\n')
+ else:
+ stackdepot.stack_depot_print(page_owner["handle"])
+
+ if page_owner["free_handle"] == 0:
+ gdb.write('page_owner free stack trace missing\n')
+ else:
+ gdb.write('page last free stack trace:\n')
+ stackdepot.stack_depot_print(page_owner["free_handle"])
+ if page_owner['last_migrate_reason'] != -1:
+ gdb.write('page has been migrated, last migrate reason: %s\n' % self.migrate_reason_names[page_owner['last_migrate_reason']])
+
+ def read_page_owner(self):
+ pfn = self.min_pfn
+
+ # Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area
+ while ((not self.p_ops.pfn_valid(pfn)) and (pfn & (self.p_ops.MAX_ORDER_NR_PAGES - 1))) != 0:
+ pfn += 1
+
+ while pfn < self.max_pfn:
+ #
+ # If the new page is in a new MAX_ORDER_NR_PAGES area,
+ # validate the area as existing, skip it if not
+ #
+ if ((pfn & (self.p_ops.MAX_ORDER_NR_PAGES - 1)) == 0) and (not self.p_ops.pfn_valid(pfn)):
+ pfn += (self.p_ops.MAX_ORDER_NR_PAGES - 1)
+ continue;
+
+ page = self.p_ops.pfn_to_page(pfn)
+ page_ext = self.page_ext_get(page)
+ if page_ext == gdb.Value(0):
+ pfn += 1
+ continue
+
+ if not (page_ext['flags'] & (1 << PAGE_EXT_OWNER)):
+ pfn += 1
+ continue
+ if not (page_ext['flags'] & (1 << PAGE_EXT_OWNER_ALLOCATED)):
+ pfn += 1
+ continue
+
+ page_owner = self.get_page_owner(page_ext)
+ gdb.write("Page allocated via order %d, gfp_mask: 0x%x, pid: %d, tgid: %d (%s), ts %u ns, free_ts %u ns\n" %\
+ (page_owner["order"], page_owner["gfp_mask"],\
+ page_owner["pid"], page_owner["tgid"], page_owner["comm"].string(),\
+ page_owner["ts_nsec"], page_owner["free_ts_nsec"]))
+ gdb.write("PFN: %d, Flags: 0x%x\n" % (pfn, page['flags']))
+ stackdepot.stack_depot_print(page_owner["handle"])
+ pfn += (1 << page_owner["order"])
+
+DumpPageOwner()
diff --git a/scripts/gdb/linux/pgtable.py b/scripts/gdb/linux/pgtable.py
new file mode 100644
index 000000000000..09aac2421fb8
--- /dev/null
+++ b/scripts/gdb/linux/pgtable.py
@@ -0,0 +1,220 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# gdb helper commands and functions for Linux kernel debugging
+#
+# routines to introspect page table
+#
+# Authors:
+# Dmitrii Bundin <dmitrii.bundin.a@gmail.com>
+#
+
+import gdb
+
+from linux import utils
+
+PHYSICAL_ADDRESS_MASK = gdb.parse_and_eval('0xfffffffffffff')
+
+
+def page_mask(level=1):
+ # 4KB
+ if level == 1:
+ return gdb.parse_and_eval('(u64) ~0xfff')
+ # 2MB
+ elif level == 2:
+ return gdb.parse_and_eval('(u64) ~0x1fffff')
+ # 1GB
+ elif level == 3:
+ return gdb.parse_and_eval('(u64) ~0x3fffffff')
+ else:
+ raise Exception(f'Unknown page level: {level}')
+
+
+def _page_offset_base():
+ pob_symbol = gdb.lookup_global_symbol('page_offset_base')
+ pob = pob_symbol.name
+ return gdb.parse_and_eval(pob)
+
+
+def is_bit_defined_tupled(data, offset):
+ return offset, bool(data >> offset & 1)
+
+def content_tupled(data, bit_start, bit_end):
+ return (bit_start, bit_end), data >> bit_start & ((1 << (1 + bit_end - bit_start)) - 1)
+
+def entry_va(level, phys_addr, translating_va):
+ def start_bit(level):
+ if level == 5:
+ return 48
+ elif level == 4:
+ return 39
+ elif level == 3:
+ return 30
+ elif level == 2:
+ return 21
+ elif level == 1:
+ return 12
+ else:
+ raise Exception(f'Unknown level {level}')
+
+ entry_offset = ((translating_va >> start_bit(level)) & 511) * 8
+ entry_va = _page_offset_base() + phys_addr + entry_offset
+ return entry_va
+
+class Cr3():
+ def __init__(self, cr3, page_levels):
+ self.cr3 = cr3
+ self.page_levels = page_levels
+ self.page_level_write_through = is_bit_defined_tupled(cr3, 3)
+ self.page_level_cache_disabled = is_bit_defined_tupled(cr3, 4)
+ self.next_entry_physical_address = cr3 & PHYSICAL_ADDRESS_MASK & page_mask()
+
+ def next_entry(self, va):
+ next_level = self.page_levels
+ return PageHierarchyEntry(entry_va(next_level, self.next_entry_physical_address, va), next_level)
+
+ def mk_string(self):
+ return f"""\
+cr3:
+ {'cr3 binary data': <30} {hex(self.cr3)}
+ {'next entry physical address': <30} {hex(self.next_entry_physical_address)}
+ ---
+ {'bit' : <4} {self.page_level_write_through[0]: <10} {'page level write through': <30} {self.page_level_write_through[1]}
+ {'bit' : <4} {self.page_level_cache_disabled[0]: <10} {'page level cache disabled': <30} {self.page_level_cache_disabled[1]}
+"""
+
+
+class PageHierarchyEntry():
+ def __init__(self, address, level):
+ data = int.from_bytes(
+ memoryview(gdb.selected_inferior().read_memory(address, 8)),
+ "little"
+ )
+ if level == 1:
+ self.is_page = True
+ self.entry_present = is_bit_defined_tupled(data, 0)
+ self.read_write = is_bit_defined_tupled(data, 1)
+ self.user_access_allowed = is_bit_defined_tupled(data, 2)
+ self.page_level_write_through = is_bit_defined_tupled(data, 3)
+ self.page_level_cache_disabled = is_bit_defined_tupled(data, 4)
+ self.entry_was_accessed = is_bit_defined_tupled(data, 5)
+ self.dirty = is_bit_defined_tupled(data, 6)
+ self.pat = is_bit_defined_tupled(data, 7)
+ self.global_translation = is_bit_defined_tupled(data, 8)
+ self.page_physical_address = data & PHYSICAL_ADDRESS_MASK & page_mask(level)
+ self.next_entry_physical_address = None
+ self.hlat_restart_with_ordinary = is_bit_defined_tupled(data, 11)
+ self.protection_key = content_tupled(data, 59, 62)
+ self.executed_disable = is_bit_defined_tupled(data, 63)
+ else:
+ page_size = is_bit_defined_tupled(data, 7)
+ page_size_bit = page_size[1]
+ self.is_page = page_size_bit
+ self.entry_present = is_bit_defined_tupled(data, 0)
+ self.read_write = is_bit_defined_tupled(data, 1)
+ self.user_access_allowed = is_bit_defined_tupled(data, 2)
+ self.page_level_write_through = is_bit_defined_tupled(data, 3)
+ self.page_level_cache_disabled = is_bit_defined_tupled(data, 4)
+ self.entry_was_accessed = is_bit_defined_tupled(data, 5)
+ self.page_size = page_size
+ self.dirty = is_bit_defined_tupled(
+ data, 6) if page_size_bit else None
+ self.global_translation = is_bit_defined_tupled(
+ data, 8) if page_size_bit else None
+ self.pat = is_bit_defined_tupled(
+ data, 12) if page_size_bit else None
+ self.page_physical_address = data & PHYSICAL_ADDRESS_MASK & page_mask(level) if page_size_bit else None
+ self.next_entry_physical_address = None if page_size_bit else data & PHYSICAL_ADDRESS_MASK & page_mask()
+ self.hlat_restart_with_ordinary = is_bit_defined_tupled(data, 11)
+ self.protection_key = content_tupled(data, 59, 62) if page_size_bit else None
+ self.executed_disable = is_bit_defined_tupled(data, 63)
+ self.address = address
+ self.page_entry_binary_data = data
+ self.page_hierarchy_level = level
+
+ def next_entry(self, va):
+ if self.is_page or not self.entry_present[1]:
+ return None
+
+ next_level = self.page_hierarchy_level - 1
+ return PageHierarchyEntry(entry_va(next_level, self.next_entry_physical_address, va), next_level)
+
+
+ def mk_string(self):
+ if not self.entry_present[1]:
+ return f"""\
+level {self.page_hierarchy_level}:
+ {'entry address': <30} {hex(self.address)}
+ {'page entry binary data': <30} {hex(self.page_entry_binary_data)}
+ ---
+ PAGE ENTRY IS NOT PRESENT!
+"""
+ elif self.is_page:
+ def page_size_line(ps_bit, ps, level):
+ return "" if level == 1 else f"{'bit': <3} {ps_bit: <5} {'page size': <30} {ps}"
+
+ return f"""\
+level {self.page_hierarchy_level}:
+ {'entry address': <30} {hex(self.address)}
+ {'page entry binary data': <30} {hex(self.page_entry_binary_data)}
+ {'page size': <30} {'1GB' if self.page_hierarchy_level == 3 else '2MB' if self.page_hierarchy_level == 2 else '4KB' if self.page_hierarchy_level == 1 else 'Unknown page size for level:' + self.page_hierarchy_level}
+ {'page physical address': <30} {hex(self.page_physical_address)}
+ ---
+ {'bit': <4} {self.entry_present[0]: <10} {'entry present': <30} {self.entry_present[1]}
+ {'bit': <4} {self.read_write[0]: <10} {'read/write access allowed': <30} {self.read_write[1]}
+ {'bit': <4} {self.user_access_allowed[0]: <10} {'user access allowed': <30} {self.user_access_allowed[1]}
+ {'bit': <4} {self.page_level_write_through[0]: <10} {'page level write through': <30} {self.page_level_write_through[1]}
+ {'bit': <4} {self.page_level_cache_disabled[0]: <10} {'page level cache disabled': <30} {self.page_level_cache_disabled[1]}
+ {'bit': <4} {self.entry_was_accessed[0]: <10} {'entry has been accessed': <30} {self.entry_was_accessed[1]}
+ {"" if self.page_hierarchy_level == 1 else f"{'bit': <4} {self.page_size[0]: <10} {'page size': <30} {self.page_size[1]}"}
+ {'bit': <4} {self.dirty[0]: <10} {'page dirty': <30} {self.dirty[1]}
+ {'bit': <4} {self.global_translation[0]: <10} {'global translation': <30} {self.global_translation[1]}
+ {'bit': <4} {self.hlat_restart_with_ordinary[0]: <10} {'restart to ordinary': <30} {self.hlat_restart_with_ordinary[1]}
+ {'bit': <4} {self.pat[0]: <10} {'pat': <30} {self.pat[1]}
+ {'bits': <4} {str(self.protection_key[0]): <10} {'protection key': <30} {self.protection_key[1]}
+ {'bit': <4} {self.executed_disable[0]: <10} {'execute disable': <30} {self.executed_disable[1]}
+"""
+ else:
+ return f"""\
+level {self.page_hierarchy_level}:
+ {'entry address': <30} {hex(self.address)}
+ {'page entry binary data': <30} {hex(self.page_entry_binary_data)}
+ {'next entry physical address': <30} {hex(self.next_entry_physical_address)}
+ ---
+ {'bit': <4} {self.entry_present[0]: <10} {'entry present': <30} {self.entry_present[1]}
+ {'bit': <4} {self.read_write[0]: <10} {'read/write access allowed': <30} {self.read_write[1]}
+ {'bit': <4} {self.user_access_allowed[0]: <10} {'user access allowed': <30} {self.user_access_allowed[1]}
+ {'bit': <4} {self.page_level_write_through[0]: <10} {'page level write through': <30} {self.page_level_write_through[1]}
+ {'bit': <4} {self.page_level_cache_disabled[0]: <10} {'page level cache disabled': <30} {self.page_level_cache_disabled[1]}
+ {'bit': <4} {self.entry_was_accessed[0]: <10} {'entry has been accessed': <30} {self.entry_was_accessed[1]}
+ {'bit': <4} {self.page_size[0]: <10} {'page size': <30} {self.page_size[1]}
+ {'bit': <4} {self.hlat_restart_with_ordinary[0]: <10} {'restart to ordinary': <30} {self.hlat_restart_with_ordinary[1]}
+ {'bit': <4} {self.executed_disable[0]: <10} {'execute disable': <30} {self.executed_disable[1]}
+"""
+
+
+class TranslateVM(gdb.Command):
+ """Prints the entire paging structure used to translate a given virtual address.
+
+Having an address space of the currently executed process translates the virtual address
+and prints detailed information of all paging structure levels used for the transaltion.
+Currently supported arch: x86"""
+
+ def __init__(self):
+ super(TranslateVM, self).__init__('translate-vm', gdb.COMMAND_USER)
+
+ def invoke(self, arg, from_tty):
+ if utils.is_target_arch("x86"):
+ vm_address = gdb.parse_and_eval(f'{arg}')
+ cr3_data = gdb.parse_and_eval('$cr3')
+ cr4 = gdb.parse_and_eval('$cr4')
+ page_levels = 5 if cr4 & (1 << 12) else 4
+ page_entry = Cr3(cr3_data, page_levels)
+ while page_entry:
+ gdb.write(page_entry.mk_string())
+ page_entry = page_entry.next_entry(vm_address)
+ else:
+ gdb.GdbError("Virtual address translation is not"
+ "supported for this arch")
+
+
+TranslateVM()
diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py
new file mode 100644
index 000000000000..65dd1bd12964
--- /dev/null
+++ b/scripts/gdb/linux/proc.py
@@ -0,0 +1,277 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# gdb helper commands and functions for Linux kernel debugging
+#
+# Kernel proc information reader
+#
+# Copyright (c) 2016 Linaro Ltd
+#
+# Authors:
+# Kieran Bingham <kieran.bingham@linaro.org>
+#
+# This work is licensed under the terms of the GNU GPL version 2.
+#
+
+import gdb
+from linux import constants
+from linux import utils
+from linux import tasks
+from linux import lists
+from linux import vfs
+from linux import rbtree
+from struct import *
+
+
+class LxCmdLine(gdb.Command):
+ """ Report the Linux Commandline used in the current kernel.
+ Equivalent to cat /proc/cmdline on a running target"""
+
+ def __init__(self):
+ super(LxCmdLine, self).__init__("lx-cmdline", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ gdb.write(gdb.parse_and_eval("saved_command_line").string() + "\n")
+
+
+LxCmdLine()
+
+
+class LxVersion(gdb.Command):
+ """ Report the Linux Version of the current kernel.
+ Equivalent to cat /proc/version on a running target"""
+
+ def __init__(self):
+ super(LxVersion, self).__init__("lx-version", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ # linux_banner should contain a newline
+ gdb.write(gdb.parse_and_eval("(char *)linux_banner").string())
+
+
+LxVersion()
+
+
+# Resource Structure Printers
+# /proc/iomem
+# /proc/ioports
+
+def get_resources(resource, depth):
+ while resource:
+ yield resource, depth
+
+ child = resource['child']
+ if child:
+ for res, deep in get_resources(child, depth + 1):
+ yield res, deep
+
+ resource = resource['sibling']
+
+
+def show_lx_resources(resource_str):
+ resource = gdb.parse_and_eval(resource_str)
+ width = 4 if resource['end'] < 0x10000 else 8
+ # Iterate straight to the first child
+ for res, depth in get_resources(resource['child'], 0):
+ start = int(res['start'])
+ end = int(res['end'])
+ gdb.write(" " * depth * 2 +
+ "{0:0{1}x}-".format(start, width) +
+ "{0:0{1}x} : ".format(end, width) +
+ res['name'].string() + "\n")
+
+
+class LxIOMem(gdb.Command):
+ """Identify the IO memory resource locations defined by the kernel
+
+Equivalent to cat /proc/iomem on a running target"""
+
+ def __init__(self):
+ super(LxIOMem, self).__init__("lx-iomem", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ return show_lx_resources("iomem_resource")
+
+
+LxIOMem()
+
+
+class LxIOPorts(gdb.Command):
+ """Identify the IO port resource locations defined by the kernel
+
+Equivalent to cat /proc/ioports on a running target"""
+
+ def __init__(self):
+ super(LxIOPorts, self).__init__("lx-ioports", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ return show_lx_resources("ioport_resource")
+
+
+LxIOPorts()
+
+
+# Mount namespace viewer
+# /proc/mounts
+
+def info_opts(lst, opt):
+ opts = ""
+ for key, string in lst.items():
+ if opt & key:
+ opts += string
+ return opts
+
+
+FS_INFO = {constants.LX_SB_SYNCHRONOUS: ",sync",
+ constants.LX_SB_MANDLOCK: ",mand",
+ constants.LX_SB_DIRSYNC: ",dirsync",
+ constants.LX_SB_NOATIME: ",noatime",
+ constants.LX_SB_NODIRATIME: ",nodiratime"}
+
+MNT_INFO = {constants.LX_MNT_NOSUID: ",nosuid",
+ constants.LX_MNT_NODEV: ",nodev",
+ constants.LX_MNT_NOEXEC: ",noexec",
+ constants.LX_MNT_NOATIME: ",noatime",
+ constants.LX_MNT_NODIRATIME: ",nodiratime",
+ constants.LX_MNT_RELATIME: ",relatime"}
+
+mount_type = utils.CachedType("struct mount")
+mount_ptr_type = mount_type.get_type().pointer()
+
+
+class LxMounts(gdb.Command):
+ """Report the VFS mounts of the current process namespace.
+
+Equivalent to cat /proc/mounts on a running target
+An integer value can be supplied to display the mount
+values of that process namespace"""
+
+ def __init__(self):
+ super(LxMounts, self).__init__("lx-mounts", gdb.COMMAND_DATA)
+
+ # Equivalent to proc_namespace.c:show_vfsmnt
+ # However, that has the ability to call into s_op functions
+ # whereas we cannot and must make do with the information we can obtain.
+ def invoke(self, arg, from_tty):
+ argv = gdb.string_to_argv(arg)
+ if len(argv) >= 1:
+ try:
+ pid = int(argv[0])
+ except gdb.error:
+ raise gdb.GdbError("Provide a PID as integer value")
+ else:
+ pid = 1
+
+ task = tasks.get_task_by_pid(pid)
+ if not task:
+ raise gdb.GdbError("Couldn't find a process with PID {}"
+ .format(pid))
+
+ namespace = task['nsproxy']['mnt_ns']
+ if not namespace:
+ raise gdb.GdbError("No namespace for current process")
+
+ gdb.write("{:^18} {:^15} {:>9} {} {} options\n".format(
+ "mount", "super_block", "devname", "pathname", "fstype"))
+
+ for mnt in rbtree.rb_inorder_for_each_entry(namespace['mounts'], mount_ptr_type, "mnt_node"):
+ devname = mnt['mnt_devname'].string()
+ devname = devname if devname else "none"
+
+ pathname = ""
+ parent = mnt
+ while True:
+ mntpoint = parent['mnt_mountpoint']
+ pathname = vfs.dentry_name(mntpoint) + pathname
+ if (parent == parent['mnt_parent']):
+ break
+ parent = parent['mnt_parent']
+
+ if (pathname == ""):
+ pathname = "/"
+
+ superblock = mnt['mnt']['mnt_sb']
+ fstype = superblock['s_type']['name'].string()
+ s_flags = int(superblock['s_flags'])
+ m_flags = int(mnt['mnt']['mnt_flags'])
+ rd = "ro" if (s_flags & constants.LX_SB_RDONLY) else "rw"
+
+ gdb.write("{} {} {} {} {} {}{}{} 0 0\n".format(
+ mnt.format_string(), superblock.format_string(), devname,
+ pathname, fstype, rd, info_opts(FS_INFO, s_flags),
+ info_opts(MNT_INFO, m_flags)))
+
+
+LxMounts()
+
+
+class LxFdtDump(gdb.Command):
+ """Output Flattened Device Tree header and dump FDT blob to the filename
+ specified as the command argument. Equivalent to
+ 'cat /proc/fdt > fdtdump.dtb' on a running target"""
+
+ def __init__(self):
+ super(LxFdtDump, self).__init__("lx-fdtdump", gdb.COMMAND_DATA,
+ gdb.COMPLETE_FILENAME)
+
+ def fdthdr_to_cpu(self, fdt_header):
+
+ fdt_header_be = ">IIIIIII"
+ fdt_header_le = "<IIIIIII"
+
+ if utils.get_target_endianness() == 1:
+ output_fmt = fdt_header_le
+ else:
+ output_fmt = fdt_header_be
+
+ return unpack(output_fmt, pack(fdt_header_be,
+ fdt_header['magic'],
+ fdt_header['totalsize'],
+ fdt_header['off_dt_struct'],
+ fdt_header['off_dt_strings'],
+ fdt_header['off_mem_rsvmap'],
+ fdt_header['version'],
+ fdt_header['last_comp_version']))
+
+ def invoke(self, arg, from_tty):
+
+ if not constants.LX_CONFIG_OF:
+ raise gdb.GdbError("Kernel not compiled with CONFIG_OF\n")
+
+ if len(arg) == 0:
+ filename = "fdtdump.dtb"
+ else:
+ filename = arg
+
+ py_fdt_header_ptr = gdb.parse_and_eval(
+ "(const struct fdt_header *) initial_boot_params")
+ py_fdt_header = py_fdt_header_ptr.dereference()
+
+ fdt_header = self.fdthdr_to_cpu(py_fdt_header)
+
+ if fdt_header[0] != constants.LX_OF_DT_HEADER:
+ raise gdb.GdbError("No flattened device tree magic found\n")
+
+ gdb.write("fdt_magic: 0x{:02X}\n".format(fdt_header[0]))
+ gdb.write("fdt_totalsize: 0x{:02X}\n".format(fdt_header[1]))
+ gdb.write("off_dt_struct: 0x{:02X}\n".format(fdt_header[2]))
+ gdb.write("off_dt_strings: 0x{:02X}\n".format(fdt_header[3]))
+ gdb.write("off_mem_rsvmap: 0x{:02X}\n".format(fdt_header[4]))
+ gdb.write("version: {}\n".format(fdt_header[5]))
+ gdb.write("last_comp_version: {}\n".format(fdt_header[6]))
+
+ inf = gdb.inferiors()[0]
+ fdt_buf = utils.read_memoryview(inf, py_fdt_header_ptr,
+ fdt_header[1]).tobytes()
+
+ try:
+ f = open(filename, 'wb')
+ except gdb.error:
+ raise gdb.GdbError("Could not open file to dump fdt")
+
+ f.write(fdt_buf)
+ f.close()
+
+ gdb.write("Dumped fdt blob to " + filename + "\n")
+
+
+LxFdtDump()
diff --git a/scripts/gdb/linux/radixtree.py b/scripts/gdb/linux/radixtree.py
new file mode 100644
index 000000000000..bc2954e45c32
--- /dev/null
+++ b/scripts/gdb/linux/radixtree.py
@@ -0,0 +1,215 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Radix Tree Parser
+#
+# Copyright (c) 2016 Linaro Ltd
+# Copyright (c) 2023 Broadcom
+#
+# Authors:
+# Kieran Bingham <kieran.bingham@linaro.org>
+# Florian Fainelli <f.fainelli@gmail.com>
+
+import gdb
+
+from linux import utils
+from linux import constants
+
+radix_tree_root_type = utils.CachedType("struct xarray")
+radix_tree_node_type = utils.CachedType("struct xa_node")
+
+def is_internal_node(node):
+ long_type = utils.get_long_type()
+ return ((node.cast(long_type) & constants.LX_RADIX_TREE_ENTRY_MASK) == constants.LX_RADIX_TREE_INTERNAL_NODE)
+
+def entry_to_node(node):
+ long_type = utils.get_long_type()
+ node_type = node.type
+ indirect_ptr = node.cast(long_type) & ~constants.LX_RADIX_TREE_INTERNAL_NODE
+ return indirect_ptr.cast(radix_tree_node_type.get_type().pointer())
+
+def node_maxindex(node):
+ return (constants.LX_RADIX_TREE_MAP_SIZE << node['shift']) - 1
+
+def resolve_root(root):
+ if root.type == radix_tree_root_type.get_type():
+ return root
+ if root.type == radix_tree_root_type.get_type().pointer():
+ return root.dereference()
+ raise gdb.GdbError("must be {} not {}"
+ .format(radix_tree_root_type.get_type(), root.type))
+
+def lookup(root, index):
+ root = resolve_root(root)
+ node = root['xa_head']
+ if node == 0:
+ return None
+
+ if not (is_internal_node(node)):
+ if (index > 0):
+ return None
+ return node
+
+ node = entry_to_node(node)
+ maxindex = node_maxindex(node)
+
+ if (index > maxindex):
+ return None
+
+ shift = node['shift'] + constants.LX_RADIX_TREE_MAP_SHIFT
+
+ while True:
+ offset = (index >> node['shift']) & constants.LX_RADIX_TREE_MAP_MASK
+ slot = node['slots'][offset]
+
+ if slot == 0:
+ return None
+
+ node = slot.cast(node.type.pointer()).dereference()
+ if node == 0:
+ return None
+
+ shift -= constants.LX_RADIX_TREE_MAP_SHIFT
+ if (shift <= 0):
+ break
+
+ return node
+
+def descend(parent, index):
+ offset = (index >> int(parent["shift"])) & constants.LX_RADIX_TREE_MAP_MASK
+ return offset, parent["slots"][offset]
+
+def load_root(root):
+ node = root["xa_head"]
+ nodep = node
+
+ if is_internal_node(node):
+ node = entry_to_node(node)
+ maxindex = node_maxindex(node)
+ return int(node["shift"]) + constants.LX_RADIX_TREE_MAP_SHIFT, \
+ nodep, maxindex
+
+ return 0, nodep, 0
+
+class RadixTreeIter:
+ def __init__(self, start):
+ self.index = 0
+ self.next_index = start
+ self.node = None
+
+def xa_mk_internal(v):
+ return (v << 2) | 2
+
+LX_XA_RETRY_ENTRY = xa_mk_internal(256)
+LX_RADIX_TREE_RETRY = LX_XA_RETRY_ENTRY
+
+def next_chunk(root, iter):
+ mask = (1 << (utils.get_ulong_type().sizeof * 8)) - 1
+
+ index = iter.next_index
+ if index == 0 and iter.index != 0:
+ return None
+
+ restart = True
+ while restart:
+ restart = False
+
+ _, child, maxindex = load_root(root)
+ if index > maxindex:
+ return None
+ if not child:
+ return None
+
+ if not is_internal_node(child):
+ iter.index = index
+ iter.next_index = (maxindex + 1) & mask
+ iter.node = None
+ return root["xa_head"].address
+
+ while True:
+ node = entry_to_node(child)
+ offset, child = descend(node, index)
+
+ if not child:
+ while True:
+ offset += 1
+ if offset >= constants.LX_RADIX_TREE_MAP_SIZE:
+ break
+ slot = node["slots"][offset]
+ if slot:
+ break
+ index &= ~node_maxindex(node)
+ index = (index + (offset << int(node["shift"]))) & mask
+ if index == 0:
+ return None
+ if offset == constants.LX_RADIX_TREE_MAP_SIZE:
+ restart = True
+ break
+ child = node["slots"][offset]
+
+ if not child:
+ restart = True
+ break
+ if child == LX_XA_RETRY_ENTRY:
+ break
+ if not node["shift"] or not is_internal_node(child):
+ break
+
+ iter.index = (index & ~node_maxindex(node)) | offset
+ iter.next_index = ((index | node_maxindex(node)) + 1) & mask
+ iter.node = node
+
+ return node["slots"][offset].address
+
+def next_slot(slot, iter):
+ mask = (1 << (utils.get_ulong_type().sizeof * 8)) - 1
+ for _ in range(iter.next_index - iter.index - 1):
+ slot += 1
+ iter.index = (iter.index + 1) & mask
+ if slot.dereference():
+ return slot
+ return None
+
+def for_each_slot(root, start=0):
+ iter = RadixTreeIter(start)
+ slot = None
+ while True:
+ if not slot:
+ slot = next_chunk(root, iter)
+ if not slot:
+ break
+ yield iter.index, slot
+ slot = next_slot(slot, iter)
+
+class LxRadixTreeLookup(gdb.Function):
+ """ Lookup and return a node from a RadixTree.
+
+$lx_radix_tree_lookup(root_node [, index]): Return the node at the given index.
+If index is omitted, the root node is dereference and returned."""
+
+ def __init__(self):
+ super(LxRadixTreeLookup, self).__init__("lx_radix_tree_lookup")
+
+ def invoke(self, root, index=0):
+ result = lookup(root, index)
+ if result is None:
+ raise gdb.GdbError("No entry in tree at index {}".format(index))
+
+ return result
+
+class LxRadixTree(gdb.Command):
+ """Show all values stored in a RadixTree."""
+
+ def __init__(self):
+ super(LxRadixTree, self).__init__("lx-radix-tree", gdb.COMMAND_DATA,
+ gdb.COMPLETE_NONE)
+
+ def invoke(self, argument, from_tty):
+ args = gdb.string_to_argv(argument)
+ if len(args) != 1:
+ raise gdb.GdbError("Usage: lx-radix-tree ROOT")
+ root = gdb.parse_and_eval(args[0])
+ for index, slot in for_each_slot(root):
+ gdb.write("[{}] = {}\n".format(index, slot.dereference()))
+
+LxRadixTree()
+LxRadixTreeLookup()
diff --git a/scripts/gdb/linux/rbtree.py b/scripts/gdb/linux/rbtree.py
new file mode 100644
index 000000000000..fcbcc5f4153c
--- /dev/null
+++ b/scripts/gdb/linux/rbtree.py
@@ -0,0 +1,189 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2019 Google LLC.
+
+import gdb
+
+from linux import utils
+
+rb_root_type = utils.CachedType("struct rb_root")
+rb_node_type = utils.CachedType("struct rb_node")
+
+def rb_inorder_for_each(root):
+ def inorder(node):
+ if node:
+ yield from inorder(node['rb_left'])
+ yield node
+ yield from inorder(node['rb_right'])
+
+ yield from inorder(root['rb_node'])
+
+def rb_inorder_for_each_entry(root, gdbtype, member):
+ for node in rb_inorder_for_each(root):
+ yield utils.container_of(node, gdbtype, member)
+
+def rb_first(root):
+ if root.type == rb_root_type.get_type():
+ node = root.address.cast(rb_root_type.get_type().pointer())
+ elif root.type != rb_root_type.get_type().pointer():
+ raise gdb.GdbError("Must be struct rb_root not {}".format(root.type))
+
+ node = root['rb_node']
+ if node == 0:
+ return None
+
+ while node['rb_left']:
+ node = node['rb_left']
+
+ return node
+
+
+def rb_last(root):
+ if root.type == rb_root_type.get_type():
+ node = root.address.cast(rb_root_type.get_type().pointer())
+ elif root.type != rb_root_type.get_type().pointer():
+ raise gdb.GdbError("Must be struct rb_root not {}".format(root.type))
+
+ node = root['rb_node']
+ if node == 0:
+ return None
+
+ while node['rb_right']:
+ node = node['rb_right']
+
+ return node
+
+
+def rb_parent(node):
+ parent = gdb.Value(node['__rb_parent_color'] & ~3)
+ return parent.cast(rb_node_type.get_type().pointer())
+
+
+def rb_empty_node(node):
+ return node['__rb_parent_color'] == node.address
+
+
+def rb_next(node):
+ if node.type == rb_node_type.get_type():
+ node = node.address.cast(rb_node_type.get_type().pointer())
+ elif node.type != rb_node_type.get_type().pointer():
+ raise gdb.GdbError("Must be struct rb_node not {}".format(node.type))
+
+ if rb_empty_node(node):
+ return None
+
+ if node['rb_right']:
+ node = node['rb_right']
+ while node['rb_left']:
+ node = node['rb_left']
+ return node
+
+ parent = rb_parent(node)
+ while parent and node == parent['rb_right']:
+ node = parent
+ parent = rb_parent(node)
+
+ return parent
+
+
+def rb_prev(node):
+ if node.type == rb_node_type.get_type():
+ node = node.address.cast(rb_node_type.get_type().pointer())
+ elif node.type != rb_node_type.get_type().pointer():
+ raise gdb.GdbError("Must be struct rb_node not {}".format(node.type))
+
+ if rb_empty_node(node):
+ return None
+
+ if node['rb_left']:
+ node = node['rb_left']
+ while node['rb_right']:
+ node = node['rb_right']
+ return node.dereference()
+
+ parent = rb_parent(node)
+ while parent and node == parent['rb_left'].dereference():
+ node = parent
+ parent = rb_parent(node)
+
+ return parent
+
+
+class LxRbFirst(gdb.Function):
+ """Lookup and return a node from an RBTree
+
+$lx_rb_first(root): Return the node at the given index.
+If index is omitted, the root node is dereferenced and returned."""
+
+ def __init__(self):
+ super(LxRbFirst, self).__init__("lx_rb_first")
+
+ def invoke(self, root):
+ result = rb_first(root)
+ if result is None:
+ raise gdb.GdbError("No entry in tree")
+
+ return result
+
+
+LxRbFirst()
+
+
+class LxRbLast(gdb.Function):
+ """Lookup and return a node from an RBTree.
+
+$lx_rb_last(root): Return the node at the given index.
+If index is omitted, the root node is dereferenced and returned."""
+
+ def __init__(self):
+ super(LxRbLast, self).__init__("lx_rb_last")
+
+ def invoke(self, root):
+ result = rb_last(root)
+ if result is None:
+ raise gdb.GdbError("No entry in tree")
+
+ return result
+
+
+LxRbLast()
+
+
+class LxRbNext(gdb.Function):
+ """Lookup and return a node from an RBTree.
+
+$lx_rb_next(node): Return the node at the given index.
+If index is omitted, the root node is dereferenced and returned."""
+
+ def __init__(self):
+ super(LxRbNext, self).__init__("lx_rb_next")
+
+ def invoke(self, node):
+ result = rb_next(node)
+ if result is None:
+ raise gdb.GdbError("No entry in tree")
+
+ return result
+
+
+LxRbNext()
+
+
+class LxRbPrev(gdb.Function):
+ """Lookup and return a node from an RBTree.
+
+$lx_rb_prev(node): Return the node at the given index.
+If index is omitted, the root node is dereferenced and returned."""
+
+ def __init__(self):
+ super(LxRbPrev, self).__init__("lx_rb_prev")
+
+ def invoke(self, node):
+ result = rb_prev(node)
+ if result is None:
+ raise gdb.GdbError("No entry in tree")
+
+ return result
+
+
+LxRbPrev()
diff --git a/scripts/gdb/linux/slab.py b/scripts/gdb/linux/slab.py
new file mode 100644
index 000000000000..0e2d93867fe2
--- /dev/null
+++ b/scripts/gdb/linux/slab.py
@@ -0,0 +1,325 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2023 MediaTek Inc.
+#
+# Authors:
+# Kuan-Ying Lee <Kuan-Ying.Lee@mediatek.com>
+#
+
+import gdb
+import re
+import traceback
+from linux import lists, utils, stackdepot, constants, mm
+
+SLAB_RED_ZONE = constants.LX_SLAB_RED_ZONE
+SLAB_POISON = constants.LX_SLAB_POISON
+SLAB_KMALLOC = constants.LX_SLAB_KMALLOC
+SLAB_HWCACHE_ALIGN = constants.LX_SLAB_HWCACHE_ALIGN
+SLAB_CACHE_DMA = constants.LX_SLAB_CACHE_DMA
+SLAB_CACHE_DMA32 = constants.LX_SLAB_CACHE_DMA32
+SLAB_STORE_USER = constants.LX_SLAB_STORE_USER
+SLAB_PANIC = constants.LX_SLAB_PANIC
+
+OO_SHIFT = 16
+OO_MASK = (1 << OO_SHIFT) - 1
+
+if constants.LX_CONFIG_SLUB_DEBUG:
+ slab_type = utils.CachedType("struct slab")
+ slab_ptr_type = slab_type.get_type().pointer()
+ kmem_cache_type = utils.CachedType("struct kmem_cache")
+ kmem_cache_ptr_type = kmem_cache_type.get_type().pointer()
+ freeptr_t = utils.CachedType("freeptr_t")
+ freeptr_t_ptr = freeptr_t.get_type().pointer()
+
+ track_type = gdb.lookup_type('struct track')
+ track_alloc = int(gdb.parse_and_eval('TRACK_ALLOC'))
+ track_free = int(gdb.parse_and_eval('TRACK_FREE'))
+
+def slab_folio(slab):
+ return slab.cast(gdb.lookup_type("struct folio").pointer())
+
+def slab_address(slab):
+ p_ops = mm.page_ops().ops
+ folio = slab_folio(slab)
+ return p_ops.folio_address(folio)
+
+def for_each_object(cache, addr, slab_objects):
+ p = addr
+ if cache['flags'] & SLAB_RED_ZONE:
+ p += int(cache['red_left_pad'])
+ while p < addr + (slab_objects * cache['size']):
+ yield p
+ p = p + int(cache['size'])
+
+def get_info_end(cache):
+ if (cache['offset'] >= cache['inuse']):
+ return cache['inuse'] + gdb.lookup_type("void").pointer().sizeof
+ else:
+ return cache['inuse']
+
+def get_orig_size(cache, obj):
+ if cache['flags'] & SLAB_STORE_USER and cache['flags'] & SLAB_KMALLOC:
+ p = mm.page_ops().ops.kasan_reset_tag(obj)
+ p += get_info_end(cache)
+ p += gdb.lookup_type('struct track').sizeof * 2
+ p = p.cast(utils.get_uint_type().pointer())
+ return p.dereference()
+ else:
+ return cache['object_size']
+
+def get_track(cache, object_pointer, alloc):
+ p = object_pointer + get_info_end(cache)
+ p += (alloc * track_type.sizeof)
+ return p
+
+def oo_objects(x):
+ return int(x['x']) & OO_MASK
+
+def oo_order(x):
+ return int(x['x']) >> OO_SHIFT
+
+def reciprocal_divide(a, R):
+ t = (a * int(R['m'])) >> 32
+ return (t + ((a - t) >> int(R['sh1']))) >> int(R['sh2'])
+
+def __obj_to_index(cache, addr, obj):
+ return reciprocal_divide(int(mm.page_ops().ops.kasan_reset_tag(obj)) - addr, cache['reciprocal_size'])
+
+def swab64(x):
+ result = (((x & 0x00000000000000ff) << 56) | \
+ ((x & 0x000000000000ff00) << 40) | \
+ ((x & 0x0000000000ff0000) << 24) | \
+ ((x & 0x00000000ff000000) << 8) | \
+ ((x & 0x000000ff00000000) >> 8) | \
+ ((x & 0x0000ff0000000000) >> 24) | \
+ ((x & 0x00ff000000000000) >> 40) | \
+ ((x & 0xff00000000000000) >> 56))
+ return result
+
+def freelist_ptr_decode(cache, ptr, ptr_addr):
+ if constants.LX_CONFIG_SLAB_FREELIST_HARDENED:
+ return ptr['v'] ^ cache['random'] ^ swab64(int(ptr_addr))
+ else:
+ return ptr['v']
+
+def get_freepointer(cache, obj):
+ obj = mm.page_ops().ops.kasan_reset_tag(obj)
+ ptr_addr = obj + cache['offset']
+ p = ptr_addr.cast(freeptr_t_ptr).dereference()
+ return freelist_ptr_decode(cache, p, ptr_addr)
+
+def loc_exist(loc_track, addr, handle, waste):
+ for loc in loc_track:
+ if loc['addr'] == addr and loc['handle'] == handle and loc['waste'] == waste:
+ return loc
+ return None
+
+def add_location(loc_track, cache, track, orig_size):
+ jiffies = gdb.parse_and_eval("jiffies_64")
+ age = jiffies - track['when']
+ handle = 0
+ waste = cache['object_size'] - int(orig_size)
+ pid = int(track['pid'])
+ cpuid = int(track['cpu'])
+ addr = track['addr']
+ if constants.LX_CONFIG_STACKDEPOT:
+ handle = track['handle']
+
+ loc = loc_exist(loc_track, addr, handle, waste)
+ if loc:
+ loc['count'] += 1
+ if track['when']:
+ loc['sum_time'] += age
+ loc['min_time'] = min(loc['min_time'], age)
+ loc['max_time'] = max(loc['max_time'], age)
+ loc['min_pid'] = min(loc['min_pid'], pid)
+ loc['max_pid'] = max(loc['max_pid'], pid)
+ loc['cpus'].add(cpuid)
+ else:
+ loc_track.append({
+ 'count' : 1,
+ 'addr' : addr,
+ 'sum_time' : age,
+ 'min_time' : age,
+ 'max_time' : age,
+ 'min_pid' : pid,
+ 'max_pid' : pid,
+ 'handle' : handle,
+ 'waste' : waste,
+ 'cpus' : {cpuid}
+ }
+ )
+
+def slabtrace(alloc, cache_name):
+
+ def __fill_map(obj_map, cache, slab):
+ p = slab['freelist']
+ addr = slab_address(slab)
+ while p != gdb.Value(0):
+ index = __obj_to_index(cache, addr, p)
+ obj_map[index] = True # free objects
+ p = get_freepointer(cache, p)
+
+ # process every slab page on the slab_list (partial and full list)
+ def process_slab(loc_track, slab_list, alloc, cache):
+ for slab in lists.list_for_each_entry(slab_list, slab_ptr_type, "slab_list"):
+ obj_map[:] = [False] * oo_objects(cache['oo'])
+ __fill_map(obj_map, cache, slab)
+ addr = slab_address(slab)
+ for object_pointer in for_each_object(cache, addr, slab['objects']):
+ if obj_map[__obj_to_index(cache, addr, object_pointer)] == True:
+ continue
+ p = get_track(cache, object_pointer, alloc)
+ track = gdb.Value(p).cast(track_type.pointer())
+ if alloc == track_alloc:
+ size = get_orig_size(cache, object_pointer)
+ else:
+ size = cache['object_size']
+ add_location(loc_track, cache, track, size)
+ continue
+
+ slab_caches = gdb.parse_and_eval("slab_caches")
+ if mm.page_ops().ops.MAX_NUMNODES > 1:
+ nr_node_ids = int(gdb.parse_and_eval("nr_node_ids"))
+ else:
+ nr_node_ids = 1
+
+ target_cache = None
+ loc_track = []
+
+ for cache in lists.list_for_each_entry(slab_caches, kmem_cache_ptr_type, 'list'):
+ if cache['name'].string() == cache_name:
+ target_cache = cache
+ break
+
+ obj_map = [False] * oo_objects(target_cache['oo'])
+
+ if target_cache['flags'] & SLAB_STORE_USER:
+ for i in range(0, nr_node_ids):
+ cache_node = target_cache['node'][i]
+ if cache_node['nr_slabs']['counter'] == 0:
+ continue
+ process_slab(loc_track, cache_node['partial'], alloc, target_cache)
+ process_slab(loc_track, cache_node['full'], alloc, target_cache)
+ else:
+ raise gdb.GdbError("SLAB_STORE_USER is not set in %s" % target_cache['name'].string())
+
+ for loc in sorted(loc_track, key=lambda x:x['count'], reverse=True):
+ if loc['addr']:
+ addr = loc['addr'].cast(utils.get_ulong_type().pointer())
+ gdb.write("%d %s" % (loc['count'], str(addr).split(' ')[-1]))
+ else:
+ gdb.write("%d <not-available>" % loc['count'])
+
+ if loc['waste']:
+ gdb.write(" waste=%d/%d" % (loc['count'] * loc['waste'], loc['waste']))
+
+ if loc['sum_time'] != loc['min_time']:
+ gdb.write(" age=%d/%d/%d" % (loc['min_time'], loc['sum_time']/loc['count'], loc['max_time']))
+ else:
+ gdb.write(" age=%d" % loc['min_time'])
+
+ if loc['min_pid'] != loc['max_pid']:
+ gdb.write(" pid=%d-%d" % (loc['min_pid'], loc['max_pid']))
+ else:
+ gdb.write(" pid=%d" % loc['min_pid'])
+
+ if constants.LX_NR_CPUS > 1:
+ nr_cpu = gdb.parse_and_eval('__num_online_cpus')['counter']
+ if nr_cpu > 1:
+ gdb.write(" cpus=")
+ gdb.write(','.join(str(cpu) for cpu in loc['cpus']))
+ gdb.write("\n")
+ if constants.LX_CONFIG_STACKDEPOT:
+ if loc['handle']:
+ stackdepot.stack_depot_print(loc['handle'])
+ gdb.write("\n")
+
+def help():
+ t = """Usage: lx-slabtrace --cache_name [cache_name] [Options]
+ Options:
+ --alloc
+ print information of allocation trace of the allocated objects
+ --free
+ print information of freeing trace of the allocated objects
+ Example:
+ lx-slabtrace --cache_name kmalloc-1k --alloc
+ lx-slabtrace --cache_name kmalloc-1k --free\n"""
+ gdb.write("Unrecognized command\n")
+ raise gdb.GdbError(t)
+
+class LxSlabTrace(gdb.Command):
+ """Show specific cache slabtrace"""
+
+ def __init__(self):
+ super(LxSlabTrace, self).__init__("lx-slabtrace", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ if not constants.LX_CONFIG_SLUB_DEBUG:
+ raise gdb.GdbError("CONFIG_SLUB_DEBUG is not enabled")
+
+ argv = gdb.string_to_argv(arg)
+ alloc = track_alloc # default show alloc_traces
+
+ if len(argv) == 3:
+ if argv[2] == '--alloc':
+ alloc = track_alloc
+ elif argv[2] == '--free':
+ alloc = track_free
+ else:
+ help()
+ if len(argv) >= 2 and argv[0] == '--cache_name':
+ slabtrace(alloc, argv[1])
+ else:
+ help()
+LxSlabTrace()
+
+def slabinfo():
+ nr_node_ids = None
+
+ if not constants.LX_CONFIG_SLUB_DEBUG:
+ raise gdb.GdbError("CONFIG_SLUB_DEBUG is not enabled")
+
+ def count_free(slab):
+ total_free = 0
+ for slab in lists.list_for_each_entry(slab, slab_ptr_type, 'slab_list'):
+ total_free += int(slab['objects'] - slab['inuse'])
+ return total_free
+
+ gdb.write("{:^18} | {:^20} | {:^12} | {:^12} | {:^8} | {:^11} | {:^13}\n".format('Pointer', 'name', 'active_objs', 'num_objs', 'objsize', 'objperslab', 'pagesperslab'))
+ gdb.write("{:-^18} | {:-^20} | {:-^12} | {:-^12} | {:-^8} | {:-^11} | {:-^13}\n".format('', '', '', '', '', '', ''))
+
+ slab_caches = gdb.parse_and_eval("slab_caches")
+ if mm.page_ops().ops.MAX_NUMNODES > 1:
+ nr_node_ids = int(gdb.parse_and_eval("nr_node_ids"))
+ else:
+ nr_node_ids = 1
+
+ for cache in lists.list_for_each_entry(slab_caches, kmem_cache_ptr_type, 'list'):
+ nr_objs = 0
+ nr_free = 0
+ nr_slabs = 0
+ for i in range(0, nr_node_ids):
+ cache_node = cache['node'][i]
+ try:
+ nr_slabs += cache_node['nr_slabs']['counter']
+ nr_objs = int(cache_node['total_objects']['counter'])
+ nr_free = count_free(cache_node['partial'])
+ except:
+ raise gdb.GdbError(traceback.format_exc())
+ active_objs = nr_objs - nr_free
+ num_objs = nr_objs
+ active_slabs = nr_slabs
+ objects_per_slab = oo_objects(cache['oo'])
+ cache_order = oo_order(cache['oo'])
+ gdb.write("{:18s} | {:20.19s} | {:12} | {:12} | {:8} | {:11} | {:13}\n".format(hex(cache), cache['name'].string(), str(active_objs), str(num_objs), str(cache['size']), str(objects_per_slab), str(1 << cache_order)))
+
+class LxSlabInfo(gdb.Command):
+ """Show slabinfo"""
+
+ def __init__(self):
+ super(LxSlabInfo, self).__init__("lx-slabinfo", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ slabinfo()
+LxSlabInfo()
diff --git a/scripts/gdb/linux/stackdepot.py b/scripts/gdb/linux/stackdepot.py
new file mode 100644
index 000000000000..37313a5a51a0
--- /dev/null
+++ b/scripts/gdb/linux/stackdepot.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2023 MediaTek Inc.
+#
+# Authors:
+# Kuan-Ying Lee <Kuan-Ying.Lee@mediatek.com>
+#
+
+import gdb
+from linux import utils, constants
+
+if constants.LX_CONFIG_STACKDEPOT:
+ stack_record_type = utils.CachedType('struct stack_record')
+ DEPOT_STACK_ALIGN = 4
+
+def help():
+ t = """Usage: lx-stack_depot_lookup [Hex handle value]
+ Example:
+ lx-stack_depot_lookup 0x00c80300\n"""
+ gdb.write("Unrecognized command\n")
+ raise gdb.GdbError(t)
+
+def stack_depot_fetch(handle):
+ global DEPOT_STACK_ALIGN
+ global stack_record_type
+
+ stack_depot_disabled = gdb.parse_and_eval('stack_depot_disabled')
+
+ if stack_depot_disabled:
+ raise gdb.GdbError("stack_depot_disabled\n")
+
+ handle_parts_t = gdb.lookup_type("union handle_parts")
+ parts = handle.cast(handle_parts_t)
+ offset = parts['offset'] << DEPOT_STACK_ALIGN
+ pools_num = gdb.parse_and_eval('pools_num')
+
+ if handle == 0:
+ raise gdb.GdbError("handle is 0\n")
+
+ pool_index = parts['pool_index_plus_1'] - 1
+ if pool_index >= pools_num:
+ gdb.write("pool index %d out of bounds (%d) for stack id 0x%08x\n" % (parts['pool_index'], pools_num, handle))
+ return gdb.Value(0), 0
+
+ stack_pools = gdb.parse_and_eval('stack_pools')
+
+ try:
+ pool = stack_pools[pool_index]
+ stack = (pool + gdb.Value(offset).cast(utils.get_size_t_type())).cast(stack_record_type.get_type().pointer())
+ size = int(stack['size'].cast(utils.get_ulong_type()))
+ return stack['entries'], size
+ except Exception as e:
+ gdb.write("%s\n" % e)
+ return gdb.Value(0), 0
+
+def stack_depot_print(handle):
+ if not constants.LX_CONFIG_STACKDEPOT:
+ raise gdb.GdbError("CONFIG_STACKDEPOT is not enabled")
+
+ entries, nr_entries = stack_depot_fetch(handle)
+ if nr_entries > 0:
+ for i in range(0, nr_entries):
+ try:
+ gdb.execute("x /i 0x%x" % (int(entries[i])))
+ except Exception as e:
+ gdb.write("%s\n" % e)
+
+class StackDepotLookup(gdb.Command):
+ """Search backtrace by handle"""
+
+ def __init__(self):
+ if constants.LX_CONFIG_STACKDEPOT:
+ super(StackDepotLookup, self).__init__("lx-stack_depot_lookup", gdb.COMMAND_SUPPORT)
+
+ def invoke(self, args, from_tty):
+ if not constants.LX_CONFIG_STACKDEPOT:
+ raise gdb.GdbError('CONFIG_STACKDEPOT is not set')
+
+ argv = gdb.string_to_argv(args)
+ if len(argv) == 1:
+ handle = int(argv[0], 16)
+ stack_depot_print(gdb.Value(handle).cast(utils.get_uint_type()))
+ else:
+ help()
+
+StackDepotLookup()
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
new file mode 100644
index 000000000000..d4308b726183
--- /dev/null
+++ b/scripts/gdb/linux/symbols.py
@@ -0,0 +1,338 @@
+#
+# gdb helper commands and functions for Linux kernel debugging
+#
+# load kernel and module symbols
+#
+# Copyright (c) Siemens AG, 2011-2013
+#
+# Authors:
+# Jan Kiszka <jan.kiszka@siemens.com>
+#
+# This work is licensed under the terms of the GNU GPL version 2.
+#
+
+import atexit
+import gdb
+import os
+import re
+import struct
+
+from itertools import count
+from linux import bpf, constants, modules, utils
+
+
+if hasattr(gdb, 'Breakpoint'):
+ class LoadModuleBreakpoint(gdb.Breakpoint):
+ def __init__(self, spec, gdb_command):
+ super(LoadModuleBreakpoint, self).__init__(spec, internal=True)
+ self.silent = True
+ self.gdb_command = gdb_command
+
+ def stop(self):
+ module = gdb.parse_and_eval("mod")
+ module_name = module['name'].string()
+ cmd = self.gdb_command
+
+ # enforce update if object file is not found
+ cmd.module_files_updated = False
+
+ # Disable pagination while reporting symbol (re-)loading.
+ # The console input is blocked in this context so that we would
+ # get stuck waiting for the user to acknowledge paged output.
+ with utils.pagination_off():
+ if module_name in cmd.loaded_modules:
+ gdb.write("refreshing all symbols to reload module "
+ "'{0}'\n".format(module_name))
+ cmd.load_all_symbols()
+ else:
+ cmd.load_module_symbols(module)
+
+ return False
+
+
+def get_vmcore_s390():
+ with utils.qemu_phy_mem_mode():
+ vmcore_info = 0x0e0c
+ paddr_vmcoreinfo_note = gdb.parse_and_eval("*(unsigned long long *)" +
+ hex(vmcore_info))
+ if paddr_vmcoreinfo_note == 0 or paddr_vmcoreinfo_note & 1:
+ # In the early boot case, extract vm_layout.kaslr_offset from the
+ # vmlinux image in physical memory.
+ if paddr_vmcoreinfo_note == 0:
+ kaslr_offset_phys = 0
+ else:
+ kaslr_offset_phys = paddr_vmcoreinfo_note - 1
+ with utils.pagination_off():
+ gdb.execute("symbol-file {0} -o {1}".format(
+ utils.get_vmlinux(), hex(kaslr_offset_phys)))
+ kaslr_offset = gdb.parse_and_eval("vm_layout.kaslr_offset")
+ return "KERNELOFFSET=" + hex(kaslr_offset)[2:]
+ inferior = gdb.selected_inferior()
+ elf_note = inferior.read_memory(paddr_vmcoreinfo_note, 12)
+ n_namesz, n_descsz, n_type = struct.unpack(">III", elf_note)
+ desc_paddr = paddr_vmcoreinfo_note + len(elf_note) + n_namesz + 1
+ return gdb.parse_and_eval("(char *)" + hex(desc_paddr)).string()
+
+
+def get_kerneloffset():
+ if utils.is_target_arch('s390'):
+ try:
+ vmcore_str = get_vmcore_s390()
+ except gdb.error as e:
+ gdb.write("{}\n".format(e))
+ return None
+ return utils.parse_vmcore(vmcore_str).kerneloffset
+ return None
+
+
+def is_in_s390_decompressor():
+ # DAT is always off in decompressor. Use this as an indicator.
+ # Note that in the kernel, DAT can be off during kexec() or restart.
+ # Accept this imprecision in order to avoid complicating things.
+ # It is unlikely that someone will run lx-symbols at these points.
+ pswm = int(gdb.parse_and_eval("$pswm"))
+ return (pswm & 0x0400000000000000) == 0
+
+
+def skip_decompressor():
+ if utils.is_target_arch("s390"):
+ if is_in_s390_decompressor():
+ # The address of the jump_to_kernel function is statically placed
+ # into svc_old_psw.addr (see ipl_data.c); read it from there. DAT
+ # is off, so we do not need to care about lowcore relocation.
+ svc_old_pswa = 0x148
+ jump_to_kernel = int(gdb.parse_and_eval("*(unsigned long long *)" +
+ hex(svc_old_pswa)))
+ gdb.execute("tbreak *" + hex(jump_to_kernel))
+ gdb.execute("continue")
+ while is_in_s390_decompressor():
+ gdb.execute("stepi")
+
+
+class LxSymbols(gdb.Command):
+ """(Re-)load symbols of Linux kernel and currently loaded modules.
+
+The kernel (vmlinux) is taken from the current working directly. Modules (.ko)
+are scanned recursively, starting in the same directory. Optionally, the module
+search path can be extended by a space separated list of paths passed to the
+lx-symbols command.
+
+When the -bpf flag is specified, symbols from the currently loaded BPF programs
+are loaded as well."""
+
+ module_paths = []
+ module_files = []
+ module_files_updated = False
+ loaded_modules = []
+ breakpoint = None
+ bpf_prog_monitor = None
+ bpf_ksym_monitor = None
+ bpf_progs = {}
+ # The remove-symbol-file command, even when invoked with -a, requires the
+ # respective object file to exist, so keep them around.
+ bpf_debug_objs = {}
+
+ def __init__(self):
+ super(LxSymbols, self).__init__("lx-symbols", gdb.COMMAND_FILES,
+ gdb.COMPLETE_FILENAME)
+ atexit.register(self.cleanup_bpf)
+
+ def _update_module_files(self):
+ self.module_files = []
+ for path in self.module_paths:
+ gdb.write("scanning for modules in {0}\n".format(path))
+ for root, dirs, files in os.walk(path):
+ for name in files:
+ if name.endswith(".ko") or name.endswith(".ko.debug"):
+ self.module_files.append(root + "/" + name)
+ self.module_files_updated = True
+
+ def _get_module_file(self, module_name):
+ module_pattern = r".*/{0}\.ko(?:.debug)?$".format(
+ module_name.replace("_", r"[_\-]"))
+ for name in self.module_files:
+ if re.match(module_pattern, name) and os.path.exists(name):
+ return name
+ return None
+
+ def _section_arguments(self, module, module_addr):
+ try:
+ sect_attrs = module['sect_attrs'].dereference()
+ except gdb.error:
+ return str(module_addr)
+
+ section_name_to_address = {}
+ for i in count():
+ # this is a NULL terminated array
+ if sect_attrs['grp']['bin_attrs'][i] == 0x0:
+ break
+
+ attr = sect_attrs['grp']['bin_attrs'][i].dereference()
+ section_name_to_address[attr['attr']['name'].string()] = attr['private']
+
+ textaddr = section_name_to_address.get(".text", module_addr)
+ args = []
+ for section_name in [".data", ".data..read_mostly", ".rodata", ".bss",
+ ".text.hot", ".text.unlikely"]:
+ address = section_name_to_address.get(section_name)
+ if address:
+ args.append(" -s {name} {addr}".format(
+ name=section_name, addr=str(address)))
+ return "{textaddr} {sections}".format(
+ textaddr=textaddr, sections="".join(args))
+
+ def load_module_symbols(self, module):
+ module_name = module['name'].string()
+ module_addr = str(module['mem'][constants.LX_MOD_TEXT]['base']).split()[0]
+
+ module_file = self._get_module_file(module_name)
+ if not module_file and not self.module_files_updated:
+ self._update_module_files()
+ module_file = self._get_module_file(module_name)
+
+ if module_file:
+ if utils.is_target_arch('s390'):
+ # Module text is preceded by PLT stubs on s390.
+ module_arch = module['arch']
+ plt_offset = int(module_arch['plt_offset'])
+ plt_size = int(module_arch['plt_size'])
+ module_addr = hex(int(module_addr, 0) + plt_offset + plt_size)
+ gdb.write("loading @{addr}: {filename}\n".format(
+ addr=module_addr, filename=module_file))
+ cmdline = "add-symbol-file {filename} {sections}".format(
+ filename=module_file,
+ sections=self._section_arguments(module, module_addr))
+ gdb.execute(cmdline, to_string=True)
+ if module_name not in self.loaded_modules:
+ self.loaded_modules.append(module_name)
+ else:
+ gdb.write("no module object found for '{0}'\n".format(module_name))
+
+ def add_bpf_prog(self, prog):
+ if prog["jited"]:
+ self.bpf_progs[int(prog["bpf_func"])] = prog
+
+ def remove_bpf_prog(self, prog):
+ self.bpf_progs.pop(int(prog["bpf_func"]), None)
+
+ def add_bpf_ksym(self, ksym):
+ addr = int(ksym["start"])
+ name = bpf.get_ksym_name(ksym)
+ with utils.pagination_off():
+ gdb.write("loading @{addr}: {name}\n".format(
+ addr=hex(addr), name=name))
+ debug_obj = bpf.generate_debug_obj(ksym, self.bpf_progs.get(addr))
+ if debug_obj is None:
+ return
+ try:
+ cmdline = "add-symbol-file {obj} {addr}".format(
+ obj=debug_obj.name, addr=hex(addr))
+ gdb.execute(cmdline, to_string=True)
+ except:
+ debug_obj.close()
+ raise
+ self.bpf_debug_objs[addr] = debug_obj
+
+ def remove_bpf_ksym(self, ksym):
+ addr = int(ksym["start"])
+ debug_obj = self.bpf_debug_objs.pop(addr, None)
+ if debug_obj is None:
+ return
+ try:
+ name = bpf.get_ksym_name(ksym)
+ gdb.write("unloading @{addr}: {name}\n".format(
+ addr=hex(addr), name=name))
+ cmdline = "remove-symbol-file {path}".format(path=debug_obj.name)
+ gdb.execute(cmdline, to_string=True)
+ finally:
+ debug_obj.close()
+
+ def cleanup_bpf(self):
+ self.bpf_progs = {}
+ while len(self.bpf_debug_objs) > 0:
+ self.bpf_debug_objs.popitem()[1].close()
+
+
+ def load_all_symbols(self):
+ gdb.write("loading vmlinux\n")
+
+ # Dropping symbols will disable all breakpoints. So save their states
+ # and restore them afterward.
+ saved_states = []
+ if hasattr(gdb, 'breakpoints') and not gdb.breakpoints() is None:
+ for bp in gdb.breakpoints():
+ saved_states.append({'breakpoint': bp, 'enabled': bp.enabled})
+
+ # drop all current symbols and reload vmlinux
+ orig_vmlinux = utils.get_vmlinux()
+ gdb.execute("symbol-file", to_string=True)
+ kerneloffset = get_kerneloffset()
+ if kerneloffset is None:
+ offset_arg = ""
+ else:
+ offset_arg = " -o " + hex(kerneloffset)
+ gdb.execute("symbol-file {0}{1}".format(orig_vmlinux, offset_arg))
+
+ self.loaded_modules = []
+ module_list = modules.module_list()
+ if not module_list:
+ gdb.write("no modules found\n")
+ else:
+ [self.load_module_symbols(module) for module in module_list]
+
+ self.cleanup_bpf()
+ if self.bpf_prog_monitor is not None:
+ self.bpf_prog_monitor.notify_initial()
+ if self.bpf_ksym_monitor is not None:
+ self.bpf_ksym_monitor.notify_initial()
+
+ for saved_state in saved_states:
+ saved_state['breakpoint'].enabled = saved_state['enabled']
+
+ def invoke(self, arg, from_tty):
+ skip_decompressor()
+
+ monitor_bpf = False
+ self.module_paths = []
+ for p in arg.split():
+ if p == "-bpf":
+ monitor_bpf = True
+ else:
+ p.append(os.path.abspath(os.path.expanduser(p)))
+ self.module_paths.append(os.getcwd())
+
+ if self.breakpoint is not None:
+ self.breakpoint.delete()
+ self.breakpoint = None
+ if self.bpf_prog_monitor is not None:
+ self.bpf_prog_monitor.delete()
+ self.bpf_prog_monitor = None
+ if self.bpf_ksym_monitor is not None:
+ self.bpf_ksym_monitor.delete()
+ self.bpf_ksym_monitor = None
+
+ # enforce update
+ self.module_files = []
+ self.module_files_updated = False
+
+ self.load_all_symbols()
+
+ if not hasattr(gdb, 'Breakpoint'):
+ gdb.write("Note: symbol update on module and BPF loading not "
+ "supported with this gdb version\n")
+ return
+
+ if modules.has_modules():
+ self.breakpoint = LoadModuleBreakpoint(
+ "kernel/module/main.c:do_init_module", self)
+
+ if monitor_bpf:
+ if constants.LX_CONFIG_BPF_SYSCALL:
+ self.bpf_prog_monitor = bpf.ProgMonitor(self.add_bpf_prog,
+ self.remove_bpf_prog)
+ if constants.LX_CONFIG_BPF and constants.LX_CONFIG_BPF_JIT:
+ self.bpf_ksym_monitor = bpf.KsymMonitor(self.add_bpf_ksym,
+ self.remove_bpf_ksym)
+
+
+LxSymbols()
diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py
new file mode 100644
index 000000000000..62348397c1f5
--- /dev/null
+++ b/scripts/gdb/linux/tasks.py
@@ -0,0 +1,127 @@
+#
+# gdb helper commands and functions for Linux kernel debugging
+#
+# task & thread tools
+#
+# Copyright (c) Siemens AG, 2011-2013
+#
+# Authors:
+# Jan Kiszka <jan.kiszka@siemens.com>
+#
+# This work is licensed under the terms of the GNU GPL version 2.
+#
+
+import gdb
+
+from linux import utils, lists
+
+
+task_type = utils.CachedType("struct task_struct")
+
+
+def task_lists():
+ task_ptr_type = task_type.get_type().pointer()
+ init_task = gdb.parse_and_eval("init_task").address
+ t = init_task
+
+ while True:
+ thread_head = t['signal']['thread_head']
+ for thread in lists.list_for_each_entry(thread_head, task_ptr_type, 'thread_node'):
+ yield thread
+
+ t = utils.container_of(t['tasks']['next'],
+ task_ptr_type, "tasks")
+ if t == init_task:
+ return
+
+
+def get_task_by_pid(pid):
+ for task in task_lists():
+ if int(task['pid']) == pid:
+ return task
+ return None
+
+
+class LxTaskByPidFunc(gdb.Function):
+ """Find Linux task by PID and return the task_struct variable.
+
+$lx_task_by_pid(PID): Given PID, iterate over all tasks of the target and
+return that task_struct variable which PID matches."""
+
+ def __init__(self):
+ super(LxTaskByPidFunc, self).__init__("lx_task_by_pid")
+
+ def invoke(self, pid):
+ task = get_task_by_pid(pid)
+ if task:
+ return task.dereference()
+ else:
+ raise gdb.GdbError("No task of PID " + str(pid))
+
+
+LxTaskByPidFunc()
+
+
+class LxPs(gdb.Command):
+ """Dump Linux tasks."""
+
+ def __init__(self):
+ super(LxPs, self).__init__("lx-ps", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ gdb.write("{:>10} {:>12} {:>7}\n".format("TASK", "PID", "COMM"))
+ for task in task_lists():
+ gdb.write("{} {:^5} {}\n".format(
+ task.format_string().split()[0],
+ task["pid"].format_string(),
+ task["comm"].string()))
+
+
+LxPs()
+
+
+thread_info_type = utils.CachedType("struct thread_info")
+
+
+def get_thread_info(task):
+ thread_info_ptr_type = thread_info_type.get_type().pointer()
+ if task_type.get_type().fields()[0].type == thread_info_type.get_type():
+ return task['thread_info']
+ thread_info = task['stack'].cast(thread_info_ptr_type)
+ return thread_info.dereference()
+
+
+class LxThreadInfoFunc (gdb.Function):
+ """Calculate Linux thread_info from task variable.
+
+$lx_thread_info(TASK): Given TASK, return the corresponding thread_info
+variable."""
+
+ def __init__(self):
+ super(LxThreadInfoFunc, self).__init__("lx_thread_info")
+
+ def invoke(self, task):
+ return get_thread_info(task)
+
+
+LxThreadInfoFunc()
+
+
+class LxThreadInfoByPidFunc (gdb.Function):
+ """Calculate Linux thread_info from task variable found by pid
+
+$lx_thread_info_by_pid(PID): Given PID, return the corresponding thread_info
+variable."""
+
+ def __init__(self):
+ super(LxThreadInfoByPidFunc, self).__init__("lx_thread_info_by_pid")
+
+ def invoke(self, pid):
+ task = get_task_by_pid(pid)
+ if task:
+ return get_thread_info(task.dereference())
+ else:
+ raise gdb.GdbError("No task of PID " + str(pid))
+
+
+LxThreadInfoByPidFunc()
diff --git a/scripts/gdb/linux/timerlist.py b/scripts/gdb/linux/timerlist.py
new file mode 100644
index 000000000000..ccc24d30de80
--- /dev/null
+++ b/scripts/gdb/linux/timerlist.py
@@ -0,0 +1,220 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2019 Google LLC.
+
+import binascii
+import gdb
+
+from linux import constants
+from linux import cpus
+from linux import rbtree
+from linux import utils
+
+timerqueue_node_type = utils.CachedType("struct timerqueue_node").get_type()
+hrtimer_type = utils.CachedType("struct hrtimer").get_type()
+
+
+def ktime_get():
+ """Returns the current time, but not very accurately
+
+ We can't read the hardware timer itself to add any nanoseconds
+ that need to be added since we last stored the time in the
+ timekeeper. But this is probably good enough for debug purposes."""
+ tk_core = gdb.parse_and_eval("&tk_core")
+
+ return tk_core['timekeeper']['tkr_mono']['base']
+
+
+def print_timer(rb_node, idx):
+ timerqueue = utils.container_of(rb_node, timerqueue_node_type.pointer(),
+ "node")
+ timer = utils.container_of(timerqueue, hrtimer_type.pointer(), "node")
+
+ function = str(timer['function']).split(" ")[1].strip("<>")
+ softexpires = timer['_softexpires']
+ expires = timer['node']['expires']
+ now = ktime_get()
+
+ text = " #{}: <{}>, {}, ".format(idx, timer, function)
+ text += "S:{:02x}\n".format(int(timer['state']))
+ text += " # expires at {}-{} nsecs [in {} to {} nsecs]\n".format(
+ softexpires, expires, softexpires - now, expires - now)
+ return text
+
+
+def print_active_timers(base):
+ curr = base['active']['rb_root']['rb_leftmost']
+ idx = 0
+ while curr:
+ yield print_timer(curr, idx)
+ curr = rbtree.rb_next(curr)
+ idx += 1
+
+
+def print_base(base):
+ text = " .base: {}\n".format(base.address)
+ text += " .index: {}\n".format(base['index'])
+
+ text += " .resolution: {} nsecs\n".format(constants.LX_hrtimer_resolution)
+ if constants.LX_CONFIG_HIGH_RES_TIMERS:
+ text += " .offset: {} nsecs\n".format(base['offset'])
+ text += "active timers:\n"
+ text += "".join([x for x in print_active_timers(base)])
+ return text
+
+
+def print_cpu(hrtimer_bases, cpu, max_clock_bases):
+ cpu_base = cpus.per_cpu(hrtimer_bases, cpu)
+ jiffies = gdb.parse_and_eval("jiffies_64")
+ tick_sched_ptr = gdb.parse_and_eval("&tick_cpu_sched")
+ ts = cpus.per_cpu(tick_sched_ptr, cpu)
+
+ text = "cpu: {}\n".format(cpu)
+ for i in range(max_clock_bases):
+ text += " clock {}:\n".format(i)
+ text += print_base(cpu_base['clock_base'][i])
+
+ if constants.LX_CONFIG_HIGH_RES_TIMERS:
+ fmts = [(" .{} : {} nsecs", 'expires_next'),
+ (" .{} : {}", 'hres_active'),
+ (" .{} : {}", 'nr_events'),
+ (" .{} : {}", 'nr_retries'),
+ (" .{} : {}", 'nr_hangs'),
+ (" .{} : {}", 'max_hang_time')]
+ text += "\n".join([s.format(f, cpu_base[f]) for s, f in fmts])
+ text += "\n"
+
+ if constants.LX_CONFIG_TICK_ONESHOT:
+ TS_FLAG_STOPPED = 1 << 1
+ TS_FLAG_NOHZ = 1 << 4
+ text += f" .{'nohz':15s}: {int(bool(ts['flags'] & TS_FLAG_NOHZ))}\n"
+ text += f" .{'last_tick':15s}: {ts['last_tick']}\n"
+ text += f" .{'tick_stopped':15s}: {int(bool(ts['flags'] & TS_FLAG_STOPPED))}\n"
+ text += f" .{'idle_jiffies':15s}: {ts['idle_jiffies']}\n"
+ text += f" .{'idle_calls':15s}: {ts['idle_calls']}\n"
+ text += f" .{'idle_sleeps':15s}: {ts['idle_sleeps']}\n"
+ text += f" .{'idle_entrytime':15s}: {ts['idle_entrytime']} nsecs\n"
+ text += f" .{'idle_waketime':15s}: {ts['idle_waketime']} nsecs\n"
+ text += f" .{'idle_exittime':15s}: {ts['idle_exittime']} nsecs\n"
+ text += f" .{'idle_sleeptime':15s}: {ts['idle_sleeptime']} nsecs\n"
+ text += f" .{'iowait_sleeptime':15s}: {ts['iowait_sleeptime']} nsecs\n"
+ text += f" .{'last_jiffies':15s}: {ts['last_jiffies']}\n"
+ text += f" .{'next_timer':15s}: {ts['next_timer']}\n"
+ text += f" .{'idle_expires':15s}: {ts['idle_expires']} nsecs\n"
+ text += "\njiffies: {}\n".format(jiffies)
+
+ text += "\n"
+
+ return text
+
+
+def print_tickdevice(td, cpu):
+ dev = td['evtdev']
+ text = "Tick Device: mode: {}\n".format(td['mode'])
+ if cpu < 0:
+ text += "Broadcast device\n"
+ else:
+ text += "Per CPU device: {}\n".format(cpu)
+
+ text += "Clock Event Device: "
+ if dev == 0:
+ text += "<NULL>\n"
+ return text
+
+ text += "{}\n".format(dev['name'])
+ text += " max_delta_ns: {}\n".format(dev['max_delta_ns'])
+ text += " min_delta_ns: {}\n".format(dev['min_delta_ns'])
+ text += " mult: {}\n".format(dev['mult'])
+ text += " shift: {}\n".format(dev['shift'])
+ text += " mode: {}\n".format(dev['state_use_accessors'])
+ text += " next_event: {} nsecs\n".format(dev['next_event'])
+
+ text += " set_next_event: {}\n".format(dev['set_next_event'])
+
+ members = [('set_state_shutdown', " shutdown: {}\n"),
+ ('set_state_periodic', " periodic: {}\n"),
+ ('set_state_oneshot', " oneshot: {}\n"),
+ ('set_state_oneshot_stopped', " oneshot stopped: {}\n"),
+ ('tick_resume', " resume: {}\n")]
+ for member, fmt in members:
+ if dev[member]:
+ text += fmt.format(dev[member])
+
+ text += " event_handler: {}\n".format(dev['event_handler'])
+ text += " retries: {}\n".format(dev['retries'])
+
+ return text
+
+
+def pr_cpumask(mask):
+ nr_cpu_ids = 1
+ if constants.LX_NR_CPUS > 1:
+ nr_cpu_ids = gdb.parse_and_eval("nr_cpu_ids")
+
+ inf = gdb.inferiors()[0]
+ bits = mask['bits']
+ num_bytes = (nr_cpu_ids + 7) / 8
+ buf = utils.read_memoryview(inf, bits, num_bytes).tobytes()
+ buf = binascii.b2a_hex(buf)
+ if type(buf) is not str:
+ buf=buf.decode()
+
+ chunks = []
+ i = num_bytes
+ while i > 0:
+ i -= 1
+ start = i * 2
+ end = start + 2
+ chunks.append(buf[start:end])
+ if i != 0 and i % 4 == 0:
+ chunks.append(',')
+
+ extra = nr_cpu_ids % 8
+ if 0 < extra <= 4:
+ chunks[0] = chunks[0][0] # Cut off the first 0
+
+ return "".join(str(chunks))
+
+
+class LxTimerList(gdb.Command):
+ """Print /proc/timer_list"""
+
+ def __init__(self):
+ super(LxTimerList, self).__init__("lx-timerlist", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ hrtimer_bases = gdb.parse_and_eval("&hrtimer_bases")
+ max_clock_bases = gdb.parse_and_eval("HRTIMER_MAX_CLOCK_BASES")
+
+ text = "Timer List Version: gdb scripts\n"
+ text += "HRTIMER_MAX_CLOCK_BASES: {}\n".format(
+ max_clock_bases.type.fields()[max_clock_bases].enumval)
+ text += "now at {} nsecs\n".format(ktime_get())
+
+ for cpu in cpus.each_online_cpu():
+ text += print_cpu(hrtimer_bases, cpu, max_clock_bases)
+
+ if constants.LX_CONFIG_GENERIC_CLOCKEVENTS:
+ if constants.LX_CONFIG_GENERIC_CLOCKEVENTS_BROADCAST:
+ bc_dev = gdb.parse_and_eval("&tick_broadcast_device")
+ text += print_tickdevice(bc_dev, -1)
+ text += "\n"
+ mask = gdb.parse_and_eval("tick_broadcast_mask")
+ mask = pr_cpumask(mask)
+ text += "tick_broadcast_mask: {}\n".format(mask)
+ if constants.LX_CONFIG_TICK_ONESHOT:
+ mask = gdb.parse_and_eval("tick_broadcast_oneshot_mask")
+ mask = pr_cpumask(mask)
+ text += "tick_broadcast_oneshot_mask: {}\n".format(mask)
+ text += "\n"
+
+ tick_cpu_devices = gdb.parse_and_eval("&tick_cpu_device")
+ for cpu in cpus.each_online_cpu():
+ tick_dev = cpus.per_cpu(tick_cpu_devices, cpu)
+ text += print_tickdevice(tick_dev, cpu)
+ text += "\n"
+
+ gdb.write(text)
+
+
+LxTimerList()
diff --git a/scripts/gdb/linux/utils.py b/scripts/gdb/linux/utils.py
new file mode 100644
index 000000000000..e11f6f67961a
--- /dev/null
+++ b/scripts/gdb/linux/utils.py
@@ -0,0 +1,273 @@
+#
+# gdb helper commands and functions for Linux kernel debugging
+#
+# common utilities
+#
+# Copyright (c) Siemens AG, 2011-2013
+#
+# Authors:
+# Jan Kiszka <jan.kiszka@siemens.com>
+#
+# This work is licensed under the terms of the GNU GPL version 2.
+#
+
+import contextlib
+import dataclasses
+import re
+import typing
+
+import gdb
+
+
+class CachedType:
+ def __init__(self, name):
+ self._type = None
+ self._name = name
+
+ def _new_objfile_handler(self, event):
+ self._type = None
+ gdb.events.new_objfile.disconnect(self._new_objfile_handler)
+
+ def get_type(self):
+ if self._type is None:
+ self._type = gdb.lookup_type(self._name)
+ if self._type is None:
+ raise gdb.GdbError(
+ "cannot resolve type '{0}'".format(self._name))
+ if hasattr(gdb, 'events') and hasattr(gdb.events, 'new_objfile'):
+ gdb.events.new_objfile.connect(self._new_objfile_handler)
+ return self._type
+
+
+long_type = CachedType("long")
+ulong_type = CachedType("unsigned long")
+uint_type = CachedType("unsigned int")
+atomic_long_type = CachedType("atomic_long_t")
+size_t_type = CachedType("size_t")
+struct_page_type = CachedType("struct page")
+
+def get_uint_type():
+ global uint_type
+ return uint_type.get_type()
+
+def get_page_type():
+ global struct_page_type
+ return struct_page_type.get_type()
+
+def get_long_type():
+ global long_type
+ return long_type.get_type()
+
+def get_ulong_type():
+ global ulong_type
+ return ulong_type.get_type()
+
+def get_size_t_type():
+ global size_t_type
+ return size_t_type.get_type()
+
+def offset_of(typeobj, field):
+ element = gdb.Value(0).cast(typeobj)
+ return int(str(element[field].address).split()[0], 16)
+
+
+def container_of(ptr, typeobj, member):
+ return (ptr.cast(get_long_type()) -
+ offset_of(typeobj, member)).cast(typeobj)
+
+
+class ContainerOf(gdb.Function):
+ """Return pointer to containing data structure.
+
+$container_of(PTR, "TYPE", "ELEMENT"): Given PTR, return a pointer to the
+data structure of the type TYPE in which PTR is the address of ELEMENT.
+Note that TYPE and ELEMENT have to be quoted as strings."""
+
+ def __init__(self):
+ super(ContainerOf, self).__init__("container_of")
+
+ def invoke(self, ptr, typename, elementname):
+ return container_of(ptr, gdb.lookup_type(typename.string()).pointer(),
+ elementname.string())
+
+
+ContainerOf()
+
+
+BIG_ENDIAN = 0
+LITTLE_ENDIAN = 1
+target_endianness = None
+
+
+def get_target_endianness():
+ global target_endianness
+ if target_endianness is None:
+ endian = gdb.execute("show endian", to_string=True)
+ if "little endian" in endian:
+ target_endianness = LITTLE_ENDIAN
+ elif "big endian" in endian:
+ target_endianness = BIG_ENDIAN
+ else:
+ raise gdb.GdbError("unknown endianness '{0}'".format(str(endian)))
+ return target_endianness
+
+
+def read_memoryview(inf, start, length):
+ m = inf.read_memory(start, length)
+ if type(m) is memoryview:
+ return m
+ return memoryview(m)
+
+
+def read_u16(buffer, offset):
+ buffer_val = buffer[offset:offset + 2]
+ value = [0, 0]
+
+ if type(buffer_val[0]) is str:
+ value[0] = ord(buffer_val[0])
+ value[1] = ord(buffer_val[1])
+ else:
+ value[0] = buffer_val[0]
+ value[1] = buffer_val[1]
+
+ if get_target_endianness() == LITTLE_ENDIAN:
+ return value[0] + (value[1] << 8)
+ else:
+ return value[1] + (value[0] << 8)
+
+
+def read_u32(buffer, offset):
+ if get_target_endianness() == LITTLE_ENDIAN:
+ return read_u16(buffer, offset) + (read_u16(buffer, offset + 2) << 16)
+ else:
+ return read_u16(buffer, offset + 2) + (read_u16(buffer, offset) << 16)
+
+
+def read_u64(buffer, offset):
+ if get_target_endianness() == LITTLE_ENDIAN:
+ return read_u32(buffer, offset) + (read_u32(buffer, offset + 4) << 32)
+ else:
+ return read_u32(buffer, offset + 4) + (read_u32(buffer, offset) << 32)
+
+
+def read_ulong(buffer, offset):
+ if get_long_type().sizeof == 8:
+ return read_u64(buffer, offset)
+ else:
+ return read_u32(buffer, offset)
+
+atomic_long_counter_offset = atomic_long_type.get_type()['counter'].bitpos
+atomic_long_counter_sizeof = atomic_long_type.get_type()['counter'].type.sizeof
+
+def read_atomic_long(buffer, offset):
+ global atomic_long_counter_offset
+ global atomic_long_counter_sizeof
+
+ if atomic_long_counter_sizeof == 8:
+ return read_u64(buffer, offset + atomic_long_counter_offset)
+ else:
+ return read_u32(buffer, offset + atomic_long_counter_offset)
+
+target_arch = None
+
+
+def is_target_arch(arch):
+ if hasattr(gdb.Frame, 'architecture'):
+ return arch in gdb.newest_frame().architecture().name()
+ else:
+ global target_arch
+ if target_arch is None:
+ target_arch = gdb.execute("show architecture", to_string=True)
+ return arch in target_arch
+
+
+GDBSERVER_QEMU = 0
+GDBSERVER_KGDB = 1
+gdbserver_type = None
+
+
+def get_gdbserver_type():
+ def exit_handler(event):
+ global gdbserver_type
+ gdbserver_type = None
+ gdb.events.exited.disconnect(exit_handler)
+
+ def probe_qemu():
+ try:
+ return gdb.execute("monitor info version", to_string=True) != ""
+ except gdb.error:
+ return False
+
+ def probe_kgdb():
+ try:
+ thread_info = gdb.execute("info thread 1", to_string=True)
+ return "shadowCPU" in thread_info
+ except gdb.error:
+ return False
+
+ global gdbserver_type
+ if gdbserver_type is None:
+ if probe_qemu():
+ gdbserver_type = GDBSERVER_QEMU
+ elif probe_kgdb():
+ gdbserver_type = GDBSERVER_KGDB
+ if gdbserver_type is not None and hasattr(gdb, 'events'):
+ gdb.events.exited.connect(exit_handler)
+ return gdbserver_type
+
+
+def gdb_eval_or_none(expresssion):
+ try:
+ return gdb.parse_and_eval(expresssion)
+ except gdb.error:
+ return None
+
+
+@contextlib.contextmanager
+def qemu_phy_mem_mode():
+ connection = gdb.selected_inferior().connection
+ orig = connection.send_packet("qqemu.PhyMemMode")
+ if orig not in b"01":
+ raise gdb.error("Unexpected qemu.PhyMemMode")
+ orig = orig.decode()
+ if connection.send_packet("Qqemu.PhyMemMode:1") != b"OK":
+ raise gdb.error("Failed to set qemu.PhyMemMode")
+ try:
+ yield
+ finally:
+ if connection.send_packet("Qqemu.PhyMemMode:" + orig) != b"OK":
+ raise gdb.error("Failed to restore qemu.PhyMemMode")
+
+
+@dataclasses.dataclass
+class VmCore:
+ kerneloffset: typing.Optional[int]
+
+
+def parse_vmcore(s):
+ match = re.search(r"KERNELOFFSET=([0-9a-f]+)", s)
+ if match is None:
+ kerneloffset = None
+ else:
+ kerneloffset = int(match.group(1), 16)
+ return VmCore(kerneloffset=kerneloffset)
+
+
+def get_vmlinux():
+ vmlinux = 'vmlinux'
+ for obj in gdb.objfiles():
+ if (obj.filename.endswith('vmlinux') or
+ obj.filename.endswith('vmlinux.debug')):
+ vmlinux = obj.filename
+ return vmlinux
+
+
+@contextlib.contextmanager
+def pagination_off():
+ show_pagination = gdb.execute("show pagination", to_string=True)
+ pagination = show_pagination.endswith("on.\n")
+ gdb.execute("set pagination off")
+ try:
+ yield
+ finally:
+ gdb.execute("set pagination %s" % ("on" if pagination else "off"))
diff --git a/scripts/gdb/linux/vfs.py b/scripts/gdb/linux/vfs.py
new file mode 100644
index 000000000000..9e921b645a68
--- /dev/null
+++ b/scripts/gdb/linux/vfs.py
@@ -0,0 +1,59 @@
+#
+# gdb helper commands and functions for Linux kernel debugging
+#
+# VFS tools
+#
+# Copyright (c) 2023 Glenn Washburn
+# Copyright (c) 2016 Linaro Ltd
+#
+# Authors:
+# Glenn Washburn <development@efficientek.com>
+# Kieran Bingham <kieran.bingham@linaro.org>
+#
+# This work is licensed under the terms of the GNU GPL version 2.
+#
+
+import gdb
+from linux import utils
+
+
+def dentry_name(d):
+ parent = d['d_parent']
+ if parent == d or parent == 0:
+ return ""
+ p = dentry_name(d['d_parent']) + "/"
+ return p + d['d_name']['name'].string()
+
+class DentryName(gdb.Function):
+ """Return string of the full path of a dentry.
+
+$lx_dentry_name(PTR): Given PTR to a dentry struct, return a string
+of the full path of the dentry."""
+
+ def __init__(self):
+ super(DentryName, self).__init__("lx_dentry_name")
+
+ def invoke(self, dentry_ptr):
+ return dentry_name(dentry_ptr)
+
+DentryName()
+
+
+dentry_type = utils.CachedType("struct dentry")
+
+class InodeDentry(gdb.Function):
+ """Return dentry pointer for inode.
+
+$lx_i_dentry(PTR): Given PTR to an inode struct, return a pointer to
+the associated dentry struct, if there is one."""
+
+ def __init__(self):
+ super(InodeDentry, self).__init__("lx_i_dentry")
+
+ def invoke(self, inode_ptr):
+ d_u = inode_ptr["i_dentry"]["first"]
+ if d_u == 0:
+ return ""
+ return utils.container_of(d_u, dentry_type.get_type().pointer(), "d_u")
+
+InodeDentry()
diff --git a/scripts/gdb/linux/vmalloc.py b/scripts/gdb/linux/vmalloc.py
new file mode 100644
index 000000000000..803f17371052
--- /dev/null
+++ b/scripts/gdb/linux/vmalloc.py
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2023 MediaTek Inc.
+#
+# Authors:
+# Kuan-Ying Lee <Kuan-Ying.Lee@mediatek.com>
+#
+
+import gdb
+import re
+from linux import lists, utils, stackdepot, constants, mm
+
+if constants.LX_CONFIG_MMU:
+ vmap_area_type = utils.CachedType('struct vmap_area')
+ vmap_area_ptr_type = vmap_area_type.get_type().pointer()
+
+def is_vmalloc_addr(x):
+ pg_ops = mm.page_ops().ops
+ addr = pg_ops.kasan_reset_tag(x)
+ return addr >= pg_ops.VMALLOC_START and addr < pg_ops.VMALLOC_END
+
+class LxVmallocInfo(gdb.Command):
+ """Show vmallocinfo"""
+
+ def __init__(self):
+ super(LxVmallocInfo, self).__init__("lx-vmallocinfo", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ if not constants.LX_CONFIG_MMU:
+ raise gdb.GdbError("Requires MMU support")
+
+ nr_vmap_nodes = gdb.parse_and_eval('nr_vmap_nodes')
+ for i in range(0, nr_vmap_nodes):
+ vn = gdb.parse_and_eval('&vmap_nodes[%d]' % i)
+ for vmap_area in lists.list_for_each_entry(vn['busy']['head'], vmap_area_ptr_type, "list"):
+ if not vmap_area['vm']:
+ gdb.write("0x%x-0x%x %10d vm_map_ram\n" % (vmap_area['va_start'], vmap_area['va_end'],
+ vmap_area['va_end'] - vmap_area['va_start']))
+ continue
+ v = vmap_area['vm']
+ gdb.write("0x%x-0x%x %10d" % (v['addr'], v['addr'] + v['size'], v['size']))
+ if v['caller']:
+ gdb.write(" %s" % str(v['caller']).split(' ')[-1])
+ if v['nr_pages']:
+ gdb.write(" pages=%d" % v['nr_pages'])
+ if v['phys_addr']:
+ gdb.write(" phys=0x%x" % v['phys_addr'])
+ if v['flags'] & constants.LX_VM_IOREMAP:
+ gdb.write(" ioremap")
+ if v['flags'] & constants.LX_VM_ALLOC:
+ gdb.write(" vmalloc")
+ if v['flags'] & constants.LX_VM_MAP:
+ gdb.write(" vmap")
+ if v['flags'] & constants.LX_VM_USERMAP:
+ gdb.write(" user")
+ if v['flags'] & constants.LX_VM_DMA_COHERENT:
+ gdb.write(" dma-coherent")
+ if is_vmalloc_addr(v['pages']):
+ gdb.write(" vpages")
+ gdb.write("\n")
+
+LxVmallocInfo()
diff --git a/scripts/gdb/linux/xarray.py b/scripts/gdb/linux/xarray.py
new file mode 100644
index 000000000000..f4477b5def75
--- /dev/null
+++ b/scripts/gdb/linux/xarray.py
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Xarray helpers
+#
+# Copyright (c) 2025 Broadcom
+#
+# Authors:
+# Florian Fainelli <florian.fainelli@broadcom.com>
+
+import gdb
+
+from linux import utils
+from linux import constants
+
+def xa_is_internal(entry):
+ ulong_type = utils.get_ulong_type()
+ return ((entry.cast(ulong_type) & 3) == 2)
+
+def xa_mk_internal(v):
+ return ((v << 2) | 2)
+
+def xa_is_zero(entry):
+ ulong_type = utils.get_ulong_type()
+ return entry.cast(ulong_type) == xa_mk_internal(257)
+
+def xa_is_node(entry):
+ ulong_type = utils.get_ulong_type()
+ return xa_is_internal(entry) and (entry.cast(ulong_type) > 4096)
diff --git a/scripts/gdb/vmlinux-gdb.py b/scripts/gdb/vmlinux-gdb.py
new file mode 100644
index 000000000000..d4eeed4506fd
--- /dev/null
+++ b/scripts/gdb/vmlinux-gdb.py
@@ -0,0 +1,52 @@
+#
+# gdb helper commands and functions for Linux kernel debugging
+#
+# loader module
+#
+# Copyright (c) Siemens AG, 2012, 2013
+#
+# Authors:
+# Jan Kiszka <jan.kiszka@siemens.com>
+#
+# This work is licensed under the terms of the GNU GPL version 2.
+#
+
+import os
+
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/scripts/gdb")
+
+try:
+ gdb.parse_and_eval("0")
+ gdb.execute("", to_string=True)
+except:
+ gdb.write("NOTE: gdb 7.2 or later required for Linux helper scripts to "
+ "work.\n")
+else:
+ import linux.constants
+ if linux.constants.LX_CONFIG_DEBUG_INFO_REDUCED:
+ raise gdb.GdbError("Reduced debug information will prevent GDB "
+ "from having complete types.\n")
+ import linux.utils
+ import linux.symbols
+ import linux.modules
+ import linux.dmesg
+ import linux.tasks
+ import linux.config
+ import linux.cpus
+ import linux.lists
+ import linux.rbtree
+ import linux.proc
+ import linux.timerlist
+ import linux.clk
+ import linux.genpd
+ import linux.device
+ import linux.vfs
+ import linux.pgtable
+ import linux.radixtree
+ import linux.interrupts
+ import linux.mm
+ import linux.stackdepot
+ import linux.page_owner
+ import linux.slab
+ import linux.vmalloc
+ import linux.kasan