summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt6
-rw-r--r--Documentation/core-api/kernel-api.rst24
-rw-r--r--Documentation/translations/zh_CN/core-api/kernel-api.rst2
-rw-r--r--MAINTAINERS13
-rw-r--r--arch/arc/kernel/unwind.c12
-rw-r--r--arch/arm/boot/compressed/decompress.c1
-rw-r--r--arch/arm/kernel/module-plts.c9
-rw-r--r--arch/arm64/kernel/module-plts.c13
-rw-r--r--arch/ia64/kernel/module.c24
-rw-r--r--arch/mips/cavium-octeon/setup.c2
-rw-r--r--arch/mips/kernel/vpe.c11
-rw-r--r--arch/parisc/kernel/module.c51
-rw-r--r--arch/powerpc/kernel/module_32.c7
-rw-r--r--arch/s390/kernel/module.c26
-rw-r--r--arch/x86/crypto/blake2s-glue.c5
-rw-r--r--arch/x86/kernel/callthunks.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c2
-rw-r--r--arch/x86/kernel/module.c4
-rw-r--r--arch/x86/mm/debug_pagetables.c1
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c1
-rw-r--r--drivers/accessibility/braille/braille_console.c1
-rw-r--r--drivers/bus/arm-integrator-lm.c1
-rw-r--r--drivers/bus/bt1-apb.c1
-rw-r--r--drivers/bus/bt1-axi.c1
-rw-r--r--drivers/bus/intel-ixp4xx-eb.c1
-rw-r--r--drivers/bus/qcom-ebi2.c1
-rw-r--r--drivers/bus/qcom-ssc-block-bus.c1
-rw-r--r--drivers/bus/simple-pm-bus.c2
-rw-r--r--drivers/clk/clk-bm1880.c1
-rw-r--r--drivers/clk/microchip/clk-mpfs.c1
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c1
-rw-r--r--drivers/clocksource/em_sti.c1
-rw-r--r--drivers/clocksource/sh_cmt.c1
-rw-r--r--drivers/clocksource/sh_mtu2.c1
-rw-r--r--drivers/clocksource/sh_tmu.c1
-rw-r--r--drivers/clocksource/timer-stm32-lp.c1
-rw-r--r--drivers/clocksource/timer-tegra186.c1
-rw-r--r--drivers/clocksource/timer-ti-dm.c1
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c1
-rw-r--r--drivers/dma-buf/heaps/system_heap.c1
-rw-r--r--drivers/dma-buf/udmabuf.c1
-rw-r--r--drivers/dma/ep93xx_dma.c1
-rw-r--r--drivers/dma/ipu/ipu_idmac.c1
-rw-r--r--drivers/dma/mv_xor_v2.c1
-rw-r--r--drivers/dma/sh/shdma-base.c1
-rw-r--r--drivers/dma/stm32-dmamux.c1
-rw-r--r--drivers/dma/stm32-mdma.c1
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c1
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c2
-rw-r--r--drivers/firmware/efi/vars.c2
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c1
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c5
-rw-r--r--drivers/interconnect/core.c5
-rw-r--r--drivers/iommu/sun50i-iommu.c1
-rw-r--r--drivers/irqchip/irq-al-fic.c1
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c1
-rw-r--r--drivers/irqchip/irq-mbigen.c1
-rw-r--r--drivers/irqchip/irq-mchp-eic.c1
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c1
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c1
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c1
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c1
-rw-r--r--drivers/irqchip/irq-sl28cpld.c1
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c1
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c1
-rw-r--r--drivers/macintosh/adb.c2
-rw-r--r--drivers/mailbox/Kconfig2
-rw-r--r--drivers/mailbox/rockchip-mailbox.c1
-rw-r--r--drivers/mfd/altera-sysmgr.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/nvmem/core.c1
-rw-r--r--drivers/phy/intel/phy-intel-lgm-combo.c1
-rw-r--r--drivers/pinctrl/actions/pinctrl-s500.c1
-rw-r--r--drivers/pinctrl/actions/pinctrl-s700.c1
-rw-r--r--drivers/pinctrl/actions/pinctrl-s900.c1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8188.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8192.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8365.c1
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c1
-rw-r--r--drivers/pinctrl/pinctrl-amd.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzn1.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c1
-rw-r--r--drivers/platform/x86/intel/ifs/sysfs.c2
-rw-r--r--drivers/power/reset/as3722-poweroff.c1
-rw-r--r--drivers/power/reset/gpio-poweroff.c1
-rw-r--r--drivers/power/reset/gpio-restart.c1
-rw-r--r--drivers/power/reset/keystone-reset.c1
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c1
-rw-r--r--drivers/power/reset/mt6323-poweroff.c1
-rw-r--r--drivers/power/reset/regulator-poweroff.c1
-rw-r--r--drivers/power/reset/restart-poweroff.c1
-rw-r--r--drivers/power/reset/tps65086-restart.c1
-rw-r--r--drivers/power/supply/power_supply_core.c1
-rw-r--r--drivers/power/supply/wm97xx_battery.c1
-rw-r--r--drivers/regulator/stm32-pwr.c1
-rw-r--r--drivers/remoteproc/remoteproc_core.c1
-rw-r--r--drivers/reset/reset-lantiq.c1
-rw-r--r--drivers/reset/reset-microchip-sparx5.c1
-rw-r--r--drivers/reset/reset-mpfs.c1
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2
-rw-r--r--drivers/soc/apple/apple-pmgr-pwrstate.c1
-rw-r--r--drivers/soc/fujitsu/a64fx-diag.c1
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c2
-rw-r--r--drivers/video/fbdev/wm8505fb.c1
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.c1
-rw-r--r--fs/binfmt_elf.c1
-rw-r--r--fs/nfs_common/nfs_ssc.c1
-rw-r--r--fs/unicode/utf8-core.c1
-rw-r--r--include/linux/dynamic_debug.h68
-rw-r--r--include/linux/kallsyms.h7
-rw-r--r--include/linux/module.h141
-rw-r--r--include/linux/module_symbol.h17
-rw-r--r--include/linux/semaphore.h10
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/dma/map_benchmark.c1
-rw-r--r--kernel/events/hw_breakpoint_test.c1
-rw-r--r--kernel/kallsyms.c5
-rw-r--r--kernel/kallsyms_selftest.c6
-rw-r--r--kernel/livepatch/core.c3
-rw-r--r--kernel/module/Kconfig100
-rw-r--r--kernel/module/Makefile6
-rw-r--r--kernel/module/decompress.c6
-rw-r--r--kernel/module/dups.c246
-rw-r--r--kernel/module/internal.h140
-rw-r--r--kernel/module/kallsyms.c78
-rw-r--r--kernel/module/kdb.c17
-rw-r--r--kernel/module/kmod.c (renamed from kernel/kmod.c)49
-rw-r--r--kernel/module/main.c1091
-rw-r--r--kernel/module/procfs.c16
-rw-r--r--kernel/module/stats.c430
-rw-r--r--kernel/module/strict_rwx.c99
-rw-r--r--kernel/module/tracking.c7
-rw-r--r--kernel/module/tree_lookup.c39
-rw-r--r--kernel/params.c2
-rw-r--r--kernel/printk/printk.c2
-rw-r--r--kernel/trace/ftrace.c3
-rw-r--r--kernel/trace/rv/reactor_panic.c1
-rw-r--r--kernel/trace/rv/reactor_printk.c1
-rw-r--r--kernel/watch_queue.c1
-rw-r--r--lib/Makefile3
-rw-r--r--lib/btree.c1
-rw-r--r--lib/crypto/blake2s-generic.c5
-rw-r--r--lib/crypto/blake2s.c1
-rw-r--r--lib/dynamic_debug.c51
-rw-r--r--lib/pldmfw/pldmfw.c1
-rw-r--r--lib/test_fprobe.c1
-rw-r--r--mm/zpool.c1
-rw-r--r--mm/zswap.c1
-rw-r--r--net/rxrpc/call_object.c6
-rw-r--r--scripts/gdb/linux/constants.py.in3
-rw-r--r--scripts/gdb/linux/modules.py4
-rw-r--r--scripts/gdb/linux/symbols.py4
-rw-r--r--scripts/mod/modpost.c12
158 files changed, 1982 insertions, 1034 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 56efff519454..fd73fafd120e 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3363,6 +3363,12 @@
specified, <module>.async_probe takes precedence for
the specific module.
+ module.enable_dups_trace
+ [KNL] When CONFIG_MODULE_DEBUG_AUTOLOAD_DUPS is set,
+ this means that duplicate request_module() calls will
+ trigger a WARN_ON() instead of a pr_warn(). Note that
+ if MODULE_DEBUG_AUTOLOAD_DUPS_TRACE is set, WARN_ON()s
+ will always be issued and this option does nothing.
module.sig_enforce
[KNL] When CONFIG_MODULE_SIG is set, this means that
modules without (valid) signatures will fail to load.
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 62f961610773..9b3f3e5f5a95 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -220,12 +220,30 @@ relay interface
Module Support
==============
-Module Loading
---------------
+Kernel module auto-loading
+--------------------------
-.. kernel-doc:: kernel/kmod.c
+.. kernel-doc:: kernel/module/kmod.c
:export:
+Module debugging
+----------------
+
+.. kernel-doc:: kernel/module/stats.c
+ :doc: module debugging statistics overview
+
+dup_failed_modules - tracks duplicate failed modules
+****************************************************
+
+.. kernel-doc:: kernel/module/stats.c
+ :doc: dup_failed_modules - tracks duplicate failed modules
+
+module statistics debugfs counters
+**********************************
+
+.. kernel-doc:: kernel/module/stats.c
+ :doc: module statistics debugfs counters
+
Inter Module support
--------------------
diff --git a/Documentation/translations/zh_CN/core-api/kernel-api.rst b/Documentation/translations/zh_CN/core-api/kernel-api.rst
index a4b373c48c0c..a1ea7081077c 100644
--- a/Documentation/translations/zh_CN/core-api/kernel-api.rst
+++ b/Documentation/translations/zh_CN/core-api/kernel-api.rst
@@ -226,7 +226,7 @@ kernel/relay.c
该API在以下内核代码中:
-kernel/kmod.c
+kernel/module/kmod.c
模块接口支持
------------
diff --git a/MAINTAINERS b/MAINTAINERS
index efb03e13c404..a53a1a29b10c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11601,16 +11601,6 @@ F: include/linux/kmemleak.h
F: mm/kmemleak.c
F: samples/kmemleak/kmemleak-test.c
-KMOD KERNEL MODULE LOADER - USERMODE HELPER
-M: Luis Chamberlain <mcgrof@kernel.org>
-L: linux-kernel@vger.kernel.org
-L: linux-modules@vger.kernel.org
-S: Maintained
-F: include/linux/kmod.h
-F: kernel/kmod.c
-F: lib/test_kmod.c
-F: tools/testing/selftests/kmod/
-
KMSAN
M: Alexander Potapenko <glider@google.com>
R: Marco Elver <elver@google.com>
@@ -14186,8 +14176,11 @@ L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof/linux.git modules-next
F: include/linux/module.h
+F: include/linux/kmod.h
F: kernel/module/
F: scripts/module*
+F: lib/test_kmod.c
+F: tools/testing/selftests/kmod/
MONOLITHIC POWER SYSTEM PMIC DRIVER
M: Saravanan Sekar <sravanhome@gmail.com>
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 200270a94558..9270d0a713c3 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -369,6 +369,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
unsigned long table_size)
{
struct unwind_table *table;
+ struct module_memory *core_text;
+ struct module_memory *init_text;
if (table_size <= 0)
return NULL;
@@ -377,11 +379,11 @@ void *unwind_add_table(struct module *module, const void *table_start,
if (!table)
return NULL;
- init_unwind_table(table, module->name,
- module->core_layout.base, module->core_layout.size,
- module->init_layout.base, module->init_layout.size,
- table_start, table_size,
- NULL, 0);
+ core_text = &module->mem[MOD_TEXT];
+ init_text = &module->mem[MOD_INIT_TEXT];
+
+ init_unwind_table(table, module->name, core_text->base, core_text->size,
+ init_text->base, init_text->size, table_start, table_size, NULL, 0);
init_unwind_hdr(table, unw_hdr_alloc);
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
index 74255e819831..0669851394f0 100644
--- a/arch/arm/boot/compressed/decompress.c
+++ b/arch/arm/boot/compressed/decompress.c
@@ -31,6 +31,7 @@
/* Not needed, but used in some headers pulled in by decompressors */
extern char * strstr(const char * s1, const char *s2);
extern size_t strlen(const char *s);
+extern int strcmp(const char *cs, const char *ct);
extern int memcmp(const void *cs, const void *ct, size_t count);
extern char * strchrnul(const char *, int);
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index af7c322ebed6..f5a43fd8c163 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -28,11 +28,6 @@ static const u32 fixed_plts[] = {
#endif
};
-static bool in_init(const struct module *mod, unsigned long loc)
-{
- return loc - (u32)mod->init_layout.base < mod->init_layout.size;
-}
-
static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt)
{
int i;
@@ -50,8 +45,8 @@ static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt)
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
{
- struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
- &mod->arch.init;
+ struct mod_plt_sec *pltsec = !within_module_init(loc, mod) ?
+ &mod->arch.core : &mod->arch.init;
struct plt_entries *plt;
int idx;
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index 5a0a8f552a61..543493bf924d 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -65,17 +65,12 @@ static bool plt_entries_equal(const struct plt_entry *a,
(q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
}
-static bool in_init(const struct module *mod, void *loc)
-{
- return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
-}
-
u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym)
{
- struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
- &mod->arch.init;
+ struct mod_plt_sec *pltsec = !within_module_init((unsigned long)loc, mod) ?
+ &mod->arch.core : &mod->arch.init;
struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
int i = pltsec->plt_num_entries;
int j = i - 1;
@@ -105,8 +100,8 @@ u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
void *loc, u64 val)
{
- struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
- &mod->arch.init;
+ struct mod_plt_sec *pltsec = !within_module_init((unsigned long)loc, mod) ?
+ &mod->arch.core : &mod->arch.init;
struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
int i = pltsec->plt_num_entries++;
u32 br;
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 8f62cf97f691..3661135da9d9 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -485,19 +485,19 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
return 0;
}
-static inline int
+static inline bool
in_init (const struct module *mod, uint64_t addr)
{
- return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size;
+ return within_module_init(addr, mod);
}
-static inline int
+static inline bool
in_core (const struct module *mod, uint64_t addr)
{
- return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size;
+ return within_module_core(addr, mod);
}
-static inline int
+static inline bool
is_internal (const struct module *mod, uint64_t value)
{
return in_init(mod, value) || in_core(mod, value);
@@ -677,7 +677,8 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
break;
case RV_BDREL:
- val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base);
+ val -= (uint64_t) (in_init(mod, val) ? mod->mem[MOD_INIT_TEXT].base :
+ mod->mem[MOD_TEXT].base);
break;
case RV_LTV:
@@ -812,15 +813,18 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
* addresses have been selected...
*/
uint64_t gp;
- if (mod->core_layout.size > MAX_LTOFF)
+ struct module_memory *mod_mem;
+
+ mod_mem = &mod->mem[MOD_DATA];
+ if (mod_mem->size > MAX_LTOFF)
/*
* This takes advantage of fact that SHF_ARCH_SMALL gets allocated
* at the end of the module.
*/
- gp = mod->core_layout.size - MAX_LTOFF / 2;
+ gp = mod_mem->size - MAX_LTOFF / 2;
else
- gp = mod->core_layout.size / 2;
- gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8);
+ gp = mod_mem->size / 2;
+ gp = (uint64_t) mod_mem->base + ((gp + 7) & -8);
mod->arch.gp = gp;
DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
}
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index a71727f7a608..c5561016f577 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -72,7 +72,7 @@ extern void pci_console_init(const char *arg);
static unsigned long long max_memory = ULLONG_MAX;
static unsigned long long reserve_low_mem;
-DEFINE_SEMAPHORE(octeon_bootbus_sem);
+DEFINE_SEMAPHORE(octeon_bootbus_sem, 1);
EXPORT_SYMBOL(octeon_bootbus_sem);
static struct octeon_boot_descriptor *octeon_boot_desc_ptr;
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 13294972707b..5c4adc6ff0c1 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -199,18 +199,17 @@ static void layout_sections(struct module *mod, const Elf_Ehdr *hdr,
for (m = 0; m < ARRAY_SIZE(masks); ++m) {
for (i = 0; i < hdr->e_shnum; ++i) {
Elf_Shdr *s = &sechdrs[i];
+ struct module_memory *mod_mem;
+
+ mod_mem = &mod->mem[MOD_TEXT];
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL)
continue;
s->sh_entsize =
- get_offset((unsigned long *)&mod->core_layout.size, s);
+ get_offset((unsigned long *)&mod_mem->size, s);
}
-
- if (m == 0)
- mod->core_layout.text_size = mod->core_layout.size;
-
}
}
@@ -641,7 +640,7 @@ static int vpe_elfload(struct vpe *v)
layout_sections(&mod, hdr, sechdrs, secstrings);
}
- v->load_addr = alloc_progmem(mod.core_layout.size);
+ v->load_addr = alloc_progmem(mod.mem[MOD_TEXT].size);
if (!v->load_addr)
return -ENOMEM;
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 7df140545b22..f6e38c4d3904 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -27,9 +27,9 @@
* We are not doing SEGREL32 handling correctly. According to the ABI, we
* should do a value offset, like this:
* if (in_init(me, (void *)val))
- * val -= (uint32_t)me->init_layout.base;
+ * val -= (uint32_t)me->mem[MOD_INIT_TEXT].base;
* else
- * val -= (uint32_t)me->core_layout.base;
+ * val -= (uint32_t)me->mem[MOD_TEXT].base;
* However, SEGREL32 is used only for PARISC unwind entries, and we want
* those entries to have an absolute address, and not just an offset.
*
@@ -76,25 +76,6 @@
* allows us to allocate up to 4095 GOT entries. */
#define MAX_GOTS 4095
-/* three functions to determine where in the module core
- * or init pieces the location is */
-static inline int in_init(struct module *me, void *loc)
-{
- return (loc >= me->init_layout.base &&
- loc <= (me->init_layout.base + me->init_layout.size));
-}
-
-static inline int in_core(struct module *me, void *loc)
-{
- return (loc >= me->core_layout.base &&
- loc <= (me->core_layout.base + me->core_layout.size));
-}
-
-static inline int in_local(struct module *me, void *loc)
-{
- return in_init(me, loc) || in_core(me, loc);
-}
-
#ifndef CONFIG_64BIT
struct got_entry {
Elf32_Addr addr;
@@ -302,6 +283,7 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
{
unsigned long gots = 0, fdescs = 0, len;
unsigned int i;
+ struct module_memory *mod_mem;
len = hdr->e_shnum * sizeof(me->arch.section[0]);
me->arch.section = kzalloc(len, GFP_KERNEL);
@@ -346,14 +328,15 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
me->arch.section[s].stub_entries += count;
}
+ mod_mem = &me->mem[MOD_TEXT];
/* align things a bit */
- me->core_layout.size = ALIGN(me->core_layout.size, 16);
- me->arch.got_offset = me->core_layout.size;
- me->core_layout.size += gots * sizeof(struct got_entry);
+ mod_mem->size = ALIGN(mod_mem->size, 16);
+ me->arch.got_offset = mod_mem->size;
+ mod_mem->size += gots * sizeof(struct got_entry);
- me->core_layout.size = ALIGN(me->core_layout.size, 16);
- me->arch.fdesc_offset = me->core_layout.size;
- me->core_layout.size += fdescs * sizeof(Elf_Fdesc);
+ mod_mem->size = ALIGN(mod_mem->size, 16);
+ me->arch.fdesc_offset = mod_mem->size;
+ mod_mem->size += fdescs * sizeof(Elf_Fdesc);
me->arch.got_max = gots;
me->arch.fdesc_max = fdescs;
@@ -371,7 +354,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
BUG_ON(value == 0);
- got = me->core_layout.base + me->arch.got_offset;
+ got = me->mem[MOD_TEXT].base + me->arch.got_offset;
for (i = 0; got[i].addr; i++)
if (got[i].addr == value)
goto out;
@@ -389,7 +372,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
#ifdef CONFIG_64BIT
static Elf_Addr get_fdesc(struct module *me, unsigned long value)
{
- Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset;
+ Elf_Fdesc *fdesc = me->mem[MOD_TEXT].base + me->arch.fdesc_offset;
if (!value) {
printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
@@ -407,7 +390,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
/* Create new one */
fdesc->addr = value;
- fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
+ fdesc->gp = (Elf_Addr)me->mem[MOD_TEXT].base + me->arch.got_offset;
return (Elf_Addr)fdesc;
}
#endif /* CONFIG_64BIT */
@@ -742,7 +725,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
loc, val);
val += addend;
/* can we reach it locally? */
- if (in_local(me, (void *)val)) {
+ if (within_module(val, me)) {
/* this is the case where the symbol is local
* to the module, but in a different section,
* so stub the jump in case it's more than 22
@@ -801,7 +784,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
break;
case R_PARISC_FPTR64:
/* 64-bit function address */
- if(in_local(me, (void *)(val + addend))) {
+ if (within_module(val + addend, me)) {
*loc64 = get_fdesc(me, val+addend);
pr_debug("FDESC for %s at %llx points to %llx\n",
strtab + sym->st_name, *loc64,
@@ -839,7 +822,7 @@ register_unwind_table(struct module *me,
table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
end = table + sechdrs[me->arch.unwind_section].sh_size;
- gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
+ gp = (Elf_Addr)me->mem[MOD_TEXT].base + me->arch.got_offset;
pr_debug("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
me->arch.unwind_section, table, end, gp);
@@ -977,7 +960,7 @@ void module_arch_cleanup(struct module *mod)
#ifdef CONFIG_64BIT
void *dereference_module_function_descriptor(struct module *mod, void *ptr)
{
- unsigned long start_opd = (Elf64_Addr)mod->core_layout.base +
+ unsigned long start_opd = (Elf64_Addr)mod->mem[MOD_TEXT].base +
mod->arch.fdesc_offset;
unsigned long end_opd = start_opd +
mod->arch.fdesc_count * sizeof(Elf64_Fdesc);
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index ea6536171778..816a63fd71fb 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -163,8 +163,7 @@ static uint32_t do_plt_call(void *location,
pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
/* Init, or core PLT? */
- if (location >= mod->core_layout.base
- && location < mod->core_layout.base + mod->core_layout.size)
+ if (within_module_core((unsigned long)location, mod))
entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
else
entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
@@ -322,14 +321,14 @@ notrace int module_trampoline_target(struct module *mod, unsigned long addr,
int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
{
- module->arch.tramp = do_plt_call(module->core_layout.base,
+ module->arch.tramp = do_plt_call(module->mem[MOD_TEXT].base,
(unsigned long)ftrace_caller,
sechdrs, module);
if (!module->arch.tramp)
return -ENOENT;
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- module->arch.tramp_regs = do_plt_call(module->core_layout.base,
+ module->arch.tramp_regs = do_plt_call(module->mem[MOD_TEXT].base,
(unsigned long)ftrace_regs_caller,
sechdrs, module);
if (!module->arch.tramp_regs)
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 2d159b32885b..b9dbf0c483ed 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -126,6 +126,7 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
Elf_Rela *rela;
char *strings;
int nrela, i, j;
+ struct module_memory *mod_mem;
/* Find symbol table and string table. */
symtab = NULL;
@@ -173,14 +174,15 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
/* Increase core size by size of got & plt and set start
offsets for got and plt. */
- me->core_layout.size = ALIGN(me->core_layout.size, 4);
- me->arch.got_offset = me->core_layout.size;
- me->core_layout.size += me->arch.got_size;
- me->arch.plt_offset = me->core_layout.size;
+ mod_mem = &me->mem[MOD_TEXT];
+ mod_mem->size = ALIGN(mod_mem->size, 4);
+ me->arch.got_offset = mod_mem->size;
+ mod_mem->size += me->arch.got_size;
+ me->arch.plt_offset = mod_mem->size;
if (me->arch.plt_size) {
if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
me->arch.plt_size += PLT_ENTRY_SIZE;
- me->core_layout.size += me->arch.plt_size;
+ mod_mem->size += me->arch.plt_size;
}
return 0;
}
@@ -304,7 +306,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
if (info->got_initialized == 0) {
- Elf_Addr *gotent = me->core_layout.base +
+ Elf_Addr *gotent = me->mem[MOD_TEXT].base +
me->arch.got_offset +
info->got_offset;
@@ -329,7 +331,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
rc = apply_rela_bits(loc, val, 0, 64, 0, write);
else if (r_type == R_390_GOTENT ||
r_type == R_390_GOTPLTENT) {
- val += (Elf_Addr) me->core_layout.base - loc;
+ val += (Elf_Addr) me->mem[MOD_TEXT].base - loc;
rc = apply_rela_bits(loc, val, 1, 32, 1, write);
}
break;
@@ -345,7 +347,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
char *plt_base;
char *ip;
- plt_base = me->core_layout.base + me->arch.plt_offset;
+ plt_base = me->mem[MOD_TEXT].base + me->arch.plt_offset;
ip = plt_base + info->plt_offset;
*(int *)insn = 0x0d10e310; /* basr 1,0 */
*(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */
@@ -375,7 +377,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
val - loc + 0xffffUL < 0x1ffffeUL) ||
(r_type == R_390_PLT32DBL &&
val - loc + 0xffffffffULL < 0x1fffffffeULL)))
- val = (Elf_Addr) me->core_layout.base +
+ val = (Elf_Addr) me->mem[MOD_TEXT].base +
me->arch.plt_offset +
info->plt_offset;
val += rela->r_addend - loc;
@@ -397,7 +399,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_GOTOFF32: /* 32 bit offset to GOT. */
case R_390_GOTOFF64: /* 64 bit offset to GOT. */
val = val + rela->r_addend -
- ((Elf_Addr) me->core_layout.base + me->arch.got_offset);
+ ((Elf_Addr) me->mem[MOD_TEXT].base + me->arch.got_offset);
if (r_type == R_390_GOTOFF16)
rc = apply_rela_bits(loc, val, 0, 16, 0, write);
else if (r_type == R_390_GOTOFF32)
@@ -407,7 +409,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
break;
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
- val = (Elf_Addr) me->core_layout.base + me->arch.got_offset +
+ val = (Elf_Addr) me->mem[MOD_TEXT].base + me->arch.got_offset +
rela->r_addend - loc;
if (r_type == R_390_GOTPC)
rc = apply_rela_bits(loc, val, 1, 32, 0, write);
@@ -515,7 +517,7 @@ int module_finalize(const Elf_Ehdr *hdr,
!nospec_disable && me->arch.plt_size) {
unsigned int *ij;
- ij = me->core_layout.base + me->arch.plt_offset +
+ ij = me->mem[MOD_TEXT].base + me->arch.plt_offset +
me->arch.plt_size - PLT_ENTRY_SIZE;
ij[0] = 0xc6000000; /* exrl %r0,.+10 */
ij[1] = 0x0005a7f4; /* j . */
diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c
index aaba21230528..0313f9673f56 100644
--- a/arch/x86/crypto/blake2s-glue.c
+++ b/arch/x86/crypto/blake2s-glue.c
@@ -8,7 +8,6 @@
#include <linux/types.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/sizes.h>
#include <asm/cpufeature.h>
@@ -72,6 +71,4 @@ static int __init blake2s_mod_init(void)
return 0;
}
-module_init(blake2s_mod_init);
-
-MODULE_LICENSE("GPL v2");
+subsys_initcall(blake2s_mod_init);
diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
index ffea98f9064b..22ab13966427 100644
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -330,8 +330,8 @@ void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
struct module *mod)
{
struct core_text ct = {
- .base = (unsigned long)mod->core_layout.base,
- .end = (unsigned long)mod->core_layout.base + mod->core_layout.size,
+ .base = (unsigned long)mod->mem[MOD_TEXT].base,
+ .end = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
.name = mod->name,
};
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1c648b09e053..1c4639588ff9 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -1177,7 +1177,7 @@ static const struct {
static struct ratelimit_state bld_ratelimit;
static unsigned int sysctl_sld_mitigate = 1;
-static DEFINE_SEMAPHORE(buslock_sem);
+static DEFINE_SEMAPHORE(buslock_sem, 1);
#ifdef CONFIG_PROC_SYSCTL
static struct ctl_table sld_sysctls[] = {
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 84ad0e61ba6e..b05f62ee2344 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -362,8 +362,8 @@ int module_finalize(const Elf_Ehdr *hdr,
}
if (locks) {
void *lseg = (void *)locks->sh_addr;
- void *text = me->core_layout.base;
- void *text_end = text + me->core_layout.text_size;
+ void *text = me->mem[MOD_TEXT].base;
+ void *text_end = text + me->mem[MOD_TEXT].size;
alternatives_smp_module_add(me, me->name,
lseg, lseg + locks->sh_size,
text, text_end);
diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c
index 092ea436c7e6..b43301cb2a80 100644
--- a/arch/x86/mm/debug_pagetables.c
+++ b/arch/x86/mm/debug_pagetables.c
@@ -71,6 +71,5 @@ static void __exit pt_dump_debug_exit(void)
module_init(pt_dump_debug_init);
module_exit(pt_dump_debug_exit);
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
MODULE_DESCRIPTION("Kernel debugging helper that dumps pagetables");
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index 41a2f0eb4ce4..a5da8ccd353e 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -17,7 +17,6 @@
#include <keys/user-type.h>
#include "asymmetric_keys.h"
-MODULE_LICENSE("GPL");
const char *const key_being_used_for[NR__KEY_BEING_USED_FOR] = {
[VERIFYING_MODULE_SIGNATURE] = "mod sig",
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index c4d54a5326b1..06b43b678d6e 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -24,7 +24,6 @@
MODULE_AUTHOR("samuel.thibault@ens-lyon.org");
MODULE_DESCRIPTION("braille device");
-MODULE_LICENSE("GPL");
/*
* Braille device support part.
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
index 2344d560b144..b715c8ab36e8 100644
--- a/drivers/bus/arm-integrator-lm.c
+++ b/drivers/bus/arm-integrator-lm.c
@@ -126,4 +126,3 @@ static struct platform_driver integrator_ap_lm_driver = {
module_platform_driver(integrator_ap_lm_driver);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Integrator AP Logical Module driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c
index 63b1b4a76671..e97c1d1c7578 100644
--- a/drivers/bus/bt1-apb.c
+++ b/drivers/bus/bt1-apb.c
@@ -416,4 +416,3 @@ module_platform_driver(bt1_apb_driver);
MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
MODULE_DESCRIPTION("Baikal-T1 APB-bus driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c
index 70e49a6e5374..4007e7322cf2 100644
--- a/drivers/bus/bt1-axi.c
+++ b/drivers/bus/bt1-axi.c
@@ -309,4 +309,3 @@ module_platform_driver(bt1_axi_driver);
MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
MODULE_DESCRIPTION("Baikal-T1 AXI-bus driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/intel-ixp4xx-eb.c b/drivers/bus/intel-ixp4xx-eb.c
index 91db001eb69a..f5ba6bee6fd8 100644
--- a/drivers/bus/intel-ixp4xx-eb.c
+++ b/drivers/bus/intel-ixp4xx-eb.c
@@ -423,4 +423,3 @@ static struct platform_driver ixp4xx_exp_driver = {
module_platform_driver(ixp4xx_exp_driver);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Intel IXP4xx external bus driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
index 663c82749222..c1fef1b4bd89 100644
--- a/drivers/bus/qcom-ebi2.c
+++ b/drivers/bus/qcom-ebi2.c
@@ -403,4 +403,3 @@ static struct platform_driver qcom_ebi2_driver = {
module_platform_driver(qcom_ebi2_driver);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Qualcomm EBI2 driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c
index eedeb29a5ff3..3fef18a43c01 100644
--- a/drivers/bus/qcom-ssc-block-bus.c
+++ b/drivers/bus/qcom-ssc-block-bus.c
@@ -386,4 +386,3 @@ module_platform_driver(qcom_ssc_block_bus_driver);
MODULE_DESCRIPTION("A driver for handling the init sequence needed for accessing the SSC block on (some) qcom SoCs over AHB");
MODULE_AUTHOR("Michael Srba <Michael.Srba@seznam.cz>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index 7afe1947e1c0..4da77ca7b75a 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Simple Power-Managed Bus Driver
*
@@ -138,4 +139,3 @@ module_platform_driver(simple_pm_bus_driver);
MODULE_DESCRIPTION("Simple Power-Managed Bus Driver");
MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c
index fad78a22218e..2a19e50fff68 100644
--- a/drivers/clk/clk-bm1880.c
+++ b/drivers/clk/clk-bm1880.c
@@ -949,4 +949,3 @@ module_platform_driver(bm1880_clk_driver);
MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
MODULE_DESCRIPTION("Clock driver for Bitmain BM1880 SoC");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index 4f0a19db7ed7..d85b345f4c08 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -513,4 +513,3 @@ MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Driver");
MODULE_AUTHOR("Padmarao Begari <padmarao.begari@microchip.com>");
MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index b9f210437ddf..38c06f82b91d 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -1127,4 +1127,3 @@ void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
}
MODULE_DESCRIPTION("Renesas CPG/MSSR Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 4bf40f6ccd1d..93b02cdc98c2 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -1440,4 +1440,3 @@ static int __init rzg2l_cpg_init(void)
subsys_initcall(rzg2l_cpg_init);
MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index c04b47bd4868..ca8d29ab70da 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -363,4 +363,3 @@ module_exit(em_sti_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 8b2e079d9df2..e81c588d9afe 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -1174,4 +1174,3 @@ module_exit(sh_cmt_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH CMT Timer Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 169a1fccc497..dd19553ef0b9 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -530,4 +530,3 @@ module_exit(sh_mtu2_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 932f31a7c5be..beffff81c00f 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -674,4 +674,3 @@ module_exit(sh_tmu_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH TMU Timer Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
index db2841d0beb8..4a10fb940de5 100644
--- a/drivers/clocksource/timer-stm32-lp.c
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -218,4 +218,3 @@ module_platform_driver(stm32_clkevent_lp_driver);
MODULE_ALIAS("platform:stm32-lptimer-timer");
MODULE_DESCRIPTION("STMicroelectronics STM32 clockevent low power driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c
index ea742889ee06..0c52c0a21830 100644
--- a/drivers/clocksource/timer-tegra186.c
+++ b/drivers/clocksource/timer-tegra186.c
@@ -511,4 +511,3 @@ module_platform_driver(tegra186_wdt_driver);
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA Tegra186 timers driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index b24b903a8822..1eb39834bad4 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -1283,5 +1283,4 @@ static struct platform_driver omap_dm_timer_driver = {
module_platform_driver(omap_dm_timer_driver);
MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 1131fb943992..a7f048048864 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -407,4 +407,3 @@ static int add_default_cma_heap(void)
}
module_init(add_default_cma_heap);
MODULE_DESCRIPTION("DMA-BUF CMA Heap");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index e8bd10e60998..79c03f5b4e28 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -440,4 +440,3 @@ static int system_heap_create(void)
return 0;
}
module_init(system_heap_create);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 740d6e426ee9..01f2e86f3f7c 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -430,4 +430,3 @@ module_init(udmabuf_dev_init)
module_exit(udmabuf_dev_exit)
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index d19ea885c63e..5338a94f1a69 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1431,4 +1431,3 @@ subsys_initcall(ep93xx_dma_module_init);
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
MODULE_DESCRIPTION("EP93xx DMA driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index baab1ca9f621..d799b99c18bd 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1797,6 +1797,5 @@ static int __init ipu_init(void)
subsys_initcall(ipu_init);
MODULE_DESCRIPTION("IPU core driver");
-MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
MODULE_ALIAS("platform:ipu-core");
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 89790beba305..b4de7d4a3105 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -917,4 +917,3 @@ static struct platform_driver mv_xor_v2_driver = {
module_platform_driver(mv_xor_v2_driver);
MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine");
-MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 158e5e7defae..588c5f409a80 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -1047,6 +1047,5 @@ static void __exit shdma_exit(void)
}
module_exit(shdma_exit);
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SH-DMA driver base library");
MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index 46b884d46188..e415bd9f4f2b 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -398,4 +398,3 @@ arch_initcall(stm32_dmamux_init);
MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX");
MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 84e7f4f4a800..1d0e9dd72ab3 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1814,4 +1814,3 @@ subsys_initcall(stm32_mdma_init);
MODULE_DESCRIPTION("Driver for STM32 MDMA controller");
MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index 5f47dbf4889a..0ea5206be4c9 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -255,4 +255,3 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size)
}
EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 1fba4e09cdcf..a400c4312c82 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -158,7 +158,7 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
* none of the remaining functions are actually ever called at runtime.
* So let's just use a single lock to serialize all Runtime Services calls.
*/
-static DEFINE_SEMAPHORE(efi_runtime_lock);
+static DEFINE_SEMAPHORE(efi_runtime_lock, 1);
/*
* Expose the EFI runtime lock to the UV platform
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index bd75b87f5fc1..bfc5fa6aa47b 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -21,7 +21,7 @@
/* Private pointer to registered efivars */
static struct efivars *__efivars;
-static DEFINE_SEMAPHORE(efivars_lock);
+static DEFINE_SEMAPHORE(efivars_lock, 1);
static efi_status_t check_var_size(bool nonblocking, u32 attributes,
unsigned long size)
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 22b8f2a70b3b..ada694ba9f95 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -949,6 +949,5 @@ struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific);
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Hardware spinlock interface");
MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 3446fbf5a560..0e513a7e5ac8 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -62,11 +62,6 @@ MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
MODULE_LICENSE("Dual BSD/GPL");
-#if !defined(CONFIG_DYNAMIC_DEBUG)
-#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
-#define DYNAMIC_DEBUG_BRANCH(descriptor) false
-#endif
-
static unsigned int srp_sg_tablesize;
static unsigned int cmd_sg_entries;
static unsigned int indirect_sg_entries;
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 0edf85ba055e..ec46bcb16d5e 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -13,7 +13,6 @@
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/list.h>
-#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/of.h>
@@ -1106,7 +1105,3 @@ static int __init icc_init(void)
}
device_initcall(icc_init);
-
-MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
-MODULE_DESCRIPTION("Interconnect Driver Core");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 2d993d0cea7d..74c5cb93e900 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -1076,4 +1076,3 @@ builtin_platform_driver_probe(sun50i_iommu_driver, sun50i_iommu_probe);
MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");
MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c
index 886de028a901..dfb761e86c9c 100644
--- a/drivers/irqchip/irq-al-fic.c
+++ b/drivers/irqchip/irq-al-fic.c
@@ -26,7 +26,6 @@
MODULE_AUTHOR("Talel Shenhar");
MODULE_DESCRIPTION("Amazon's Annapurna Labs Interrupt Controller Driver");
-MODULE_LICENSE("GPL v2");
enum al_fic_state {
AL_FIC_UNCONFIGURED = 0,
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 527c90e0920e..f5ba3f9f8415 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -430,4 +430,3 @@ module_platform_driver(ls_scfg_msi_driver);
MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 30ca1bf7fc8b..eada5e0e3eb9 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -392,5 +392,4 @@ module_platform_driver(mbigen_platform_driver);
MODULE_AUTHOR("Jun Ma <majun258@huawei.com>");
MODULE_AUTHOR("Yun Wu <wuyun.wu@huawei.com>");
-MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("HiSilicon MBI Generator driver");
diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c
index c726a19837d2..5dcd94c000a2 100644
--- a/drivers/irqchip/irq-mchp-eic.c
+++ b/drivers/irqchip/irq-mchp-eic.c
@@ -276,5 +276,4 @@ IRQCHIP_MATCH("microchip,sama7g5-eic", mchp_eic_init)
IRQCHIP_PLATFORM_DRIVER_END(mchp_eic)
MODULE_DESCRIPTION("Microchip External Interrupt Controller");
-MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea@microchip.com>");
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index e83756aca14e..26e4c17a7bf2 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -608,4 +608,3 @@ module_exit(intc_irqpin_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Renesas INTC External IRQ Pin Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 1ee5e9941f67..49b446b396f9 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -270,4 +270,3 @@ module_exit(irqc_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Renesas IRQC Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c
index 72c06e883d1c..e4c99c2e0373 100644
--- a/drivers/irqchip/irq-renesas-rza1.c
+++ b/drivers/irqchip/irq-renesas-rza1.c
@@ -281,4 +281,3 @@ module_exit(rza1_irqc_exit);
MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
MODULE_DESCRIPTION("Renesas RZ/A1 IRQC Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 25fd8ee66565..4bbfa2b0a4df 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -390,4 +390,3 @@ IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init)
IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc)
MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-sl28cpld.c b/drivers/irqchip/irq-sl28cpld.c
index f2172240172c..e50f9eaba4cd 100644
--- a/drivers/irqchip/irq-sl28cpld.c
+++ b/drivers/irqchip/irq-sl28cpld.c
@@ -92,4 +92,3 @@ module_platform_driver(sl28cpld_intc_driver);
MODULE_DESCRIPTION("sl28cpld Interrupt Controller Driver");
MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index a6ecc53d055c..7133f9fa6fd9 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -743,4 +743,3 @@ module_platform_driver(ti_sci_inta_irq_domain_driver);
MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ti.com>");
MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index 020ddf29efb8..1186f1e431a3 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -303,4 +303,3 @@ module_platform_driver(ti_sci_intr_irq_domain_driver);
MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>");
MODULE_DESCRIPTION("K3 Interrupt Router driver over TI SCI protocol");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 57e987cf84b2..057b0221f695 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -80,7 +80,7 @@ static struct adb_driver *adb_controller;
BLOCKING_NOTIFIER_HEAD(adb_client_list);
static int adb_got_sleep;
static int adb_inited;
-static DEFINE_SEMAPHORE(adb_probe_mutex);
+static DEFINE_SEMAPHORE(adb_probe_mutex, 1);
static int sleepy_trackpad;
static int autopoll_devs;
int __adb_probe_sync;
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 1495965bc394..af6b0f5b491d 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -259,7 +259,7 @@ config MTK_CMDQ_MBOX
during the vblank.
config ZYNQMP_IPI_MBOX
- bool "Xilinx ZynqMP IPI Mailbox"
+ tristate "Xilinx ZynqMP IPI Mailbox"
depends on ARCH_ZYNQMP && OF
help
Say yes here to add support for Xilinx IPI mailbox driver.
diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c
index e02d3c9e3693..28d7bfee2641 100644
--- a/drivers/mailbox/rockchip-mailbox.c
+++ b/drivers/mailbox/rockchip-mailbox.c
@@ -254,7 +254,6 @@ static struct platform_driver rockchip_mbox_driver = {
module_platform_driver(rockchip_mbox_driver);
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Rockchip mailbox: communicate between CPU cores and MCU");
MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
MODULE_AUTHOR("Caesar Wang <wxt@rock-chips.com>");
diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
index 5d3715a28b28..af205813b281 100644
--- a/drivers/mfd/altera-sysmgr.c
+++ b/drivers/mfd/altera-sysmgr.c
@@ -198,4 +198,3 @@ module_exit(altr_sysmgr_exit);
MODULE_AUTHOR("Thor Thayer <>");
MODULE_DESCRIPTION("SOCFPGA System Manager driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 3bb5ea570c87..637d162bbcfa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -297,7 +297,7 @@ const u32 dmae_reg_go_c[] = {
/* Global resources for unloading a previously loaded device */
#define BNX2X_PREV_WAIT_NEEDED 1
-static DEFINE_SEMAPHORE(bnx2x_prev_sem);
+static DEFINE_SEMAPHORE(bnx2x_prev_sem, 1);
static LIST_HEAD(bnx2x_prev_list);
/* Forward declaration */
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index a62973d010ff..342cd380b420 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -2111,4 +2111,3 @@ module_exit(nvmem_exit);
MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
MODULE_DESCRIPTION("nvmem Driver Core");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/intel/phy-intel-lgm-combo.c b/drivers/phy/intel/phy-intel-lgm-combo.c
index 6010e246d52e..8c764c457c1c 100644
--- a/drivers/phy/intel/phy-intel-lgm-combo.c
+++ b/drivers/phy/intel/phy-intel-lgm-combo.c
@@ -616,4 +616,3 @@ static struct platform_driver intel_cbphy_driver = {
module_platform_driver(intel_cbphy_driver);
MODULE_DESCRIPTION("Intel Combo-phy driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/actions/pinctrl-s500.c b/drivers/pinctrl/actions/pinctrl-s500.c
index ced778079b76..3bed4b8d08e6 100644
--- a/drivers/pinctrl/actions/pinctrl-s500.c
+++ b/drivers/pinctrl/actions/pinctrl-s500.c
@@ -1724,4 +1724,3 @@ module_exit(s500_pinctrl_exit);
MODULE_AUTHOR("Actions Semi Inc.");
MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@gmail.com>");
MODULE_DESCRIPTION("Actions Semi S500 SoC Pinctrl Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/actions/pinctrl-s700.c b/drivers/pinctrl/actions/pinctrl-s700.c
index fd00940a5799..c2b472660e53 100644
--- a/drivers/pinctrl/actions/pinctrl-s700.c
+++ b/drivers/pinctrl/actions/pinctrl-s700.c
@@ -1908,4 +1908,3 @@ module_exit(s700_pinctrl_exit);
MODULE_AUTHOR("Actions Semi Inc.");
MODULE_DESCRIPTION("Actions Semi S700 Soc Pinctrl Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/actions/pinctrl-s900.c b/drivers/pinctrl/actions/pinctrl-s900.c
index 811249a8011e..8638d3007cd9 100644
--- a/drivers/pinctrl/actions/pinctrl-s900.c
+++ b/drivers/pinctrl/actions/pinctrl-s900.c
@@ -1827,4 +1827,3 @@ module_exit(s900_pinctrl_exit);
MODULE_AUTHOR("Actions Semi Inc.");
MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
MODULE_DESCRIPTION("Actions Semi S900 SoC Pinctrl Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c
index 465cc96814a1..f80630a74d34 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns.c
@@ -299,5 +299,4 @@ static struct platform_driver ns_pinctrl_driver = {
module_platform_driver(ns_pinctrl_driver);
MODULE_AUTHOR("Rafał Miłecki");
-MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, ns_pinctrl_of_match_table);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8188.c b/drivers/pinctrl/mediatek/pinctrl-mt8188.c
index 6a3d0126288e..c067e043e619 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8188.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8188.c
@@ -1670,5 +1670,4 @@ static int __init mt8188_pinctrl_init(void)
arch_initcall(mt8188_pinctrl_init);
-MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MediaTek MT8188 Pinctrl Driver");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
index 9695f4ec6aba..dee1b3aefd36 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
@@ -1431,5 +1431,4 @@ static int __init mt8192_pinctrl_init(void)
}
arch_initcall(mt8192_pinctrl_init);
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MediaTek MT8192 Pinctrl Driver");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8365.c b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
index db4492e9ee67..75a505035e96 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8365.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
@@ -495,6 +495,5 @@ static int __init mtk_pinctrl_init(void)
}
arch_initcall(mtk_pinctrl_init);
-MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MediaTek MT8365 Pinctrl Driver");
MODULE_AUTHOR("Zhiyong Tao <zhiyong.tao@mediatek.com>");
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index ff5bcea172e8..4e12b3768d65 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -2046,7 +2046,6 @@ static int __init npcm7xx_pinctrl_register(void)
}
arch_initcall(npcm7xx_pinctrl_register);
-MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("jordan_hargrave@dell.com");
MODULE_AUTHOR("tomer.maimon@nuvoton.com");
MODULE_DESCRIPTION("Nuvoton NPCM7XX Pinctrl and GPIO driver");
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 9236a132c7ba..0cc00e9dbcf0 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -1213,6 +1213,5 @@ static struct platform_driver amd_gpio_driver = {
module_platform_driver(amd_gpio_driver);
-MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ken Xue <Ken.Xue@amd.com>, Jeff Wu <Jeff.Wu@amd.com>");
MODULE_DESCRIPTION("AMD GPIO pinctrl driver");
diff --git a/drivers/pinctrl/renesas/pinctrl-rza1.c b/drivers/pinctrl/renesas/pinctrl-rza1.c
index 529c0fc4ec06..48173355a040 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza1.c
@@ -1407,4 +1407,3 @@ core_initcall(rza1_pinctrl_init);
MODULE_AUTHOR("Jacopo Mondi <jacopo+renesas@jmondi.org");
MODULE_DESCRIPTION("Pin and gpio controller driver for Reneas RZ/A1 SoC");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
index c0a04f1ee994..40b1326a1077 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza2.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
@@ -514,4 +514,3 @@ core_initcall(rza2_pinctrl_init);
MODULE_AUTHOR("Chris Brandt <chris.brandt@renesas.com>");
MODULE_DESCRIPTION("Pin and gpio controller driver for RZ/A2 SoC");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 04b31f0c6b34..9511d920565e 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -1570,4 +1570,3 @@ core_initcall(rzg2l_pinctrl_init);
MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
MODULE_DESCRIPTION("Pin and gpio controller driver for RZ/G2L family");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/renesas/pinctrl-rzn1.c b/drivers/pinctrl/renesas/pinctrl-rzn1.c
index 9158c1757492..d73741651419 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzn1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzn1.c
@@ -952,4 +952,3 @@ subsys_initcall(_pinctrl_drv_register);
MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
MODULE_DESCRIPTION("Renesas RZ/N1 pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
index 3b65a71abd9a..e5472293bc7f 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -1117,4 +1117,3 @@ core_initcall(rzv2m_pinctrl_init);
MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
MODULE_DESCRIPTION("Pin and gpio controller driver for RZ/V2M");
-MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/ifs/sysfs.c b/drivers/platform/x86/intel/ifs/sysfs.c
index d856d6b8fc03..01b7502f46b0 100644
--- a/drivers/platform/x86/intel/ifs/sysfs.c
+++ b/drivers/platform/x86/intel/ifs/sysfs.c
@@ -13,7 +13,7 @@
* Protects against simultaneous tests on multiple cores, or
* reloading can file while a test is in progress
*/
-static DEFINE_SEMAPHORE(ifs_sem);
+static DEFINE_SEMAPHORE(ifs_sem, 1);
/*
* The sysfs interface to check additional details of last test
diff --git a/drivers/power/reset/as3722-poweroff.c b/drivers/power/reset/as3722-poweroff.c
index 661e1c67f82e..80edff1a556f 100644
--- a/drivers/power/reset/as3722-poweroff.c
+++ b/drivers/power/reset/as3722-poweroff.c
@@ -84,4 +84,3 @@ module_platform_driver(as3722_poweroff_driver);
MODULE_DESCRIPTION("Power off driver for ams AS3722 PMIC Device");
MODULE_ALIAS("platform:as3722-power-off");
MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c
index 1c5af2fef142..84b3c3528afa 100644
--- a/drivers/power/reset/gpio-poweroff.c
+++ b/drivers/power/reset/gpio-poweroff.c
@@ -105,5 +105,4 @@ module_platform_driver(gpio_poweroff_driver);
MODULE_AUTHOR("Jamie Lentin <jm@lentin.co.uk>");
MODULE_DESCRIPTION("GPIO poweroff driver");
-MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:poweroff-gpio");
diff --git a/drivers/power/reset/gpio-restart.c b/drivers/power/reset/gpio-restart.c
index 5466eeea261c..35d981d5e6c8 100644
--- a/drivers/power/reset/gpio-restart.c
+++ b/drivers/power/reset/gpio-restart.c
@@ -139,4 +139,3 @@ module_platform_driver(gpio_restart_driver);
MODULE_AUTHOR("David Riley <davidriley@chromium.org>");
MODULE_DESCRIPTION("GPIO restart driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c
index c720112db704..83a4e1c9bf94 100644
--- a/drivers/power/reset/keystone-reset.c
+++ b/drivers/power/reset/keystone-reset.c
@@ -169,5 +169,4 @@ module_platform_driver(rsctrl_driver);
MODULE_AUTHOR("Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>");
MODULE_DESCRIPTION("Texas Instruments keystone reset driver");
-MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index 65d9528cc989..eea05921a054 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -317,4 +317,3 @@ module_platform_driver(ltc2952_poweroff_driver);
MODULE_AUTHOR("René Moll <rene.moll@xsens.com>");
MODULE_DESCRIPTION("LTC PowerPath power-off driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/mt6323-poweroff.c b/drivers/power/reset/mt6323-poweroff.c
index d90e76fcb938..108167f7738b 100644
--- a/drivers/power/reset/mt6323-poweroff.c
+++ b/drivers/power/reset/mt6323-poweroff.c
@@ -97,4 +97,3 @@ module_platform_driver(mt6323_pwrc_driver);
MODULE_DESCRIPTION("Poweroff driver for MT6323 PMIC");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/regulator-poweroff.c b/drivers/power/reset/regulator-poweroff.c
index 20701203935f..7f87fbb8b051 100644
--- a/drivers/power/reset/regulator-poweroff.c
+++ b/drivers/power/reset/regulator-poweroff.c
@@ -79,5 +79,4 @@ module_platform_driver(regulator_poweroff_driver);
MODULE_AUTHOR("Michael Klein <michael@fossekall.de>");
MODULE_DESCRIPTION("Regulator poweroff driver");
-MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:poweroff-regulator");
diff --git a/drivers/power/reset/restart-poweroff.c b/drivers/power/reset/restart-poweroff.c
index 04d4228119b2..28f1822db162 100644
--- a/drivers/power/reset/restart-poweroff.c
+++ b/drivers/power/reset/restart-poweroff.c
@@ -59,5 +59,4 @@ module_platform_driver(restart_poweroff_driver);
MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch");
MODULE_DESCRIPTION("restart poweroff driver");
-MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:poweroff-restart");
diff --git a/drivers/power/reset/tps65086-restart.c b/drivers/power/reset/tps65086-restart.c
index 78b89f745a3d..5ec819eac7da 100644
--- a/drivers/power/reset/tps65086-restart.c
+++ b/drivers/power/reset/tps65086-restart.c
@@ -95,4 +95,3 @@ module_platform_driver(tps65086_restart_driver);
MODULE_AUTHOR("Emil Renner Berthing <kernel@esmil.dk>");
MODULE_DESCRIPTION("TPS65086 restart driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index bd3c37309d20..a980d60c437b 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -1485,4 +1485,3 @@ MODULE_DESCRIPTION("Universal power supply monitor class");
MODULE_AUTHOR("Ian Molton <spyro@f2s.com>, "
"Szabolcs Gyurko, "
"Anton Vorontsov <cbou@mail.ru>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/wm97xx_battery.c b/drivers/power/supply/wm97xx_battery.c
index a0e1eaa25d93..f4b190adb335 100644
--- a/drivers/power/supply/wm97xx_battery.c
+++ b/drivers/power/supply/wm97xx_battery.c
@@ -271,6 +271,5 @@ static struct platform_driver wm97xx_bat_driver = {
module_platform_driver(wm97xx_bat_driver);
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("WM97xx battery driver");
diff --git a/drivers/regulator/stm32-pwr.c b/drivers/regulator/stm32-pwr.c
index 68aa9d92953d..0e101dff6dda 100644
--- a/drivers/regulator/stm32-pwr.c
+++ b/drivers/regulator/stm32-pwr.c
@@ -183,4 +183,3 @@ module_platform_driver(stm32_pwr_driver);
MODULE_DESCRIPTION("STM32MP1 PWR voltage regulator driver");
MODULE_AUTHOR("Pascal Paillet <p.paillet@st.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 80072b6b6283..695cce218e8c 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -2766,5 +2766,4 @@ static void __exit remoteproc_exit(void)
}
module_exit(remoteproc_exit);
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Generic Remote Processor Framework");
diff --git a/drivers/reset/reset-lantiq.c b/drivers/reset/reset-lantiq.c
index b936cfe85641..549ba45d8597 100644
--- a/drivers/reset/reset-lantiq.c
+++ b/drivers/reset/reset-lantiq.c
@@ -207,4 +207,3 @@ module_platform_driver(lantiq_rcu_reset_driver);
MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
MODULE_DESCRIPTION("Lantiq XWAY RCU Reset Controller Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-microchip-sparx5.c b/drivers/reset/reset-microchip-sparx5.c
index f3528dd1d084..ead25942061d 100644
--- a/drivers/reset/reset-microchip-sparx5.c
+++ b/drivers/reset/reset-microchip-sparx5.c
@@ -179,4 +179,3 @@ postcore_initcall(mchp_sparx5_reset_init);
MODULE_DESCRIPTION("Microchip Sparx5 switch reset driver");
MODULE_AUTHOR("Steen Hegelund <steen.hegelund@microchip.com>");
-MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/reset/reset-mpfs.c b/drivers/reset/reset-mpfs.c
index e003e50590ec..e71ab73092ab 100644
--- a/drivers/reset/reset-mpfs.c
+++ b/drivers/reset/reset-mpfs.c
@@ -153,5 +153,4 @@ module_auxiliary_driver(mpfs_reset_driver);
MODULE_DESCRIPTION("Microchip PolarFire SoC Reset Driver");
MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
-MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(MCHP_CLK_MPFS);
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index e003d923acbf..055d2e87a2c8 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -56,7 +56,7 @@ dma_addr_t esas2r_buffered_ioctl_addr;
u32 esas2r_buffered_ioctl_size;
struct pci_dev *esas2r_buffered_ioctl_pcid;
-static DEFINE_SEMAPHORE(buffered_ioctl_semaphore);
+static DEFINE_SEMAPHORE(buffered_ioctl_semaphore, 1);
typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *,
struct esas2r_request *,
struct esas2r_sg_context *,
diff --git a/drivers/soc/apple/apple-pmgr-pwrstate.c b/drivers/soc/apple/apple-pmgr-pwrstate.c
index a3e2bc1d2686..d62a776c89a1 100644
--- a/drivers/soc/apple/apple-pmgr-pwrstate.c
+++ b/drivers/soc/apple/apple-pmgr-pwrstate.c
@@ -322,6 +322,5 @@ static struct platform_driver apple_pmgr_ps_driver = {
MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
MODULE_DESCRIPTION("PMGR power state driver for Apple SoCs");
-MODULE_LICENSE("GPL v2");
module_platform_driver(apple_pmgr_ps_driver);
diff --git a/drivers/soc/fujitsu/a64fx-diag.c b/drivers/soc/fujitsu/a64fx-diag.c
index d87f348427bf..524fbfeb94e3 100644
--- a/drivers/soc/fujitsu/a64fx-diag.c
+++ b/drivers/soc/fujitsu/a64fx-diag.c
@@ -149,6 +149,5 @@ static struct platform_driver a64fx_diag_driver = {
module_platform_driver(a64fx_diag_driver);
-MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Hitomi Hasegawa <hasegawa-hitomi@fujitsu.com>");
MODULE_DESCRIPTION("A64FX diag driver");
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 40dd62cf7399..90a3958d1f29 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -149,7 +149,7 @@ static char *g_fragments_base;
static char *g_free_fragments;
static struct semaphore g_free_fragments_sema;
-static DEFINE_SEMAPHORE(g_free_fragments_mutex);
+static DEFINE_SEMAPHORE(g_free_fragments_mutex, 1);
static int
vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index de84d47dcbcc..10a8b1250103 100644
--- a/drivers/video/fbdev/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
@@ -403,5 +403,4 @@ module_platform_driver(wm8505fb_driver);
MODULE_AUTHOR("Ed Spiridonov <edo.rus@gmail.com>");
MODULE_DESCRIPTION("Framebuffer driver for WMT WM8505");
-MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, wmt_dt_ids);
diff --git a/drivers/video/fbdev/wmt_ge_rops.c b/drivers/video/fbdev/wmt_ge_rops.c
index 6196b9a6c44d..3ed143457d22 100644
--- a/drivers/video/fbdev/wmt_ge_rops.c
+++ b/drivers/video/fbdev/wmt_ge_rops.c
@@ -169,5 +169,4 @@ module_platform_driver(wmt_ge_rops_driver);
MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
MODULE_DESCRIPTION("Accelerators for raster operations using "
"WonderMedia Graphics Engine");
-MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, wmt_dt_ids);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 8a884e795f6a..0f0d10697240 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2174,7 +2174,6 @@ static void __exit exit_elf_binfmt(void)
core_initcall(init_elf_binfmt);
module_exit(exit_elf_binfmt);
-MODULE_LICENSE("GPL");
#ifdef CONFIG_BINFMT_ELF_KUNIT_TEST
#include "binfmt_elf_test.c"
diff --git a/fs/nfs_common/nfs_ssc.c b/fs/nfs_common/nfs_ssc.c
index 7c1509e968c8..832246b22c51 100644
--- a/fs/nfs_common/nfs_ssc.c
+++ b/fs/nfs_common/nfs_ssc.c
@@ -12,7 +12,6 @@
#include <linux/nfs_ssc.h>
#include "../nfs/nfs4_fs.h"
-MODULE_LICENSE("GPL");
struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl;
EXPORT_SYMBOL_GPL(nfs_ssc_client_tbl);
diff --git a/fs/unicode/utf8-core.c b/fs/unicode/utf8-core.c
index 67aaadc3ab07..8395066341a4 100644
--- a/fs/unicode/utf8-core.c
+++ b/fs/unicode/utf8-core.c
@@ -214,4 +214,3 @@ void utf8_unload(struct unicode_map *um)
}
EXPORT_SYMBOL(utf8_unload);
-MODULE_LICENSE("GPL v2");
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 41682278d2e8..061dd84d09f3 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -128,17 +128,16 @@ struct ddebug_class_param {
const struct ddebug_class_map *map;
};
-#if defined(CONFIG_DYNAMIC_DEBUG_CORE)
-
-int ddebug_add_module(struct _ddebug_info *dyndbg, const char *modname);
+/*
+ * pr_debug() and friends are globally enabled or modules have selectively
+ * enabled them.
+ */
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
-extern int ddebug_remove_module(const char *mod_name);
extern __printf(2, 3)
void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
-extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
- const char *modname);
-
struct device;
extern __printf(3, 4)
@@ -287,10 +286,6 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
KERN_DEBUG, prefix_str, prefix_type, \
rowsize, groupsize, buf, len, ascii)
-struct kernel_param;
-int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp);
-int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp);
-
/* for test only, generally expect drm.debug style macro wrappers */
#define __pr_debug_cls(cls, fmt, ...) do { \
BUILD_BUG_ON_MSG(!__builtin_constant_p(cls), \
@@ -298,21 +293,38 @@ int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp);
dynamic_pr_debug_cls(cls, fmt, ##__VA_ARGS__); \
} while (0)
-#else /* !CONFIG_DYNAMIC_DEBUG_CORE */
+#else /* !(CONFIG_DYNAMIC_DEBUG || (CONFIG_DYNAMIC_DEBUG_CORE && DYNAMIC_DEBUG_MODULE)) */
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/printk.h>
-static inline int ddebug_add_module(struct _ddebug_info *dinfo, const char *modname)
-{
- return 0;
-}
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
+#define DYNAMIC_DEBUG_BRANCH(descriptor) false
-static inline int ddebug_remove_module(const char *mod)
-{
- return 0;
-}
+#define dynamic_pr_debug(fmt, ...) \
+ do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+#define dynamic_dev_dbg(dev, fmt, ...) \
+ do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
+#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ do { if (0) \
+ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii); \
+ } while (0)
+
+#endif /* CONFIG_DYNAMIC_DEBUG || (CONFIG_DYNAMIC_DEBUG_CORE && DYNAMIC_DEBUG_MODULE) */
+
+
+#ifdef CONFIG_DYNAMIC_DEBUG_CORE
+
+extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
+ const char *modname);
+struct kernel_param;
+int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp);
+int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp);
+
+#else
static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
const char *modname)
@@ -326,25 +338,15 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
return -EINVAL;
}
-#define dynamic_pr_debug(fmt, ...) \
- do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
-#define dynamic_dev_dbg(dev, fmt, ...) \
- do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
-#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
- groupsize, buf, len, ascii) \
- do { if (0) \
- print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \
- rowsize, groupsize, buf, len, ascii); \
- } while (0)
-
struct kernel_param;
static inline int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp)
{ return 0; }
static inline int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp)
{ return 0; }
-#endif /* !CONFIG_DYNAMIC_DEBUG_CORE */
+#endif
+
extern const struct kernel_param_ops param_ops_dyndbg_classes;
-#endif
+#endif /* _DYNAMIC_DEBUG_H */
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 0065209cc004..fe3c9993b5bf 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -67,8 +67,7 @@ static inline void *dereference_symbol_descriptor(void *ptr)
#ifdef CONFIG_KALLSYMS
unsigned long kallsyms_sym_address(int idx);
-int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
- unsigned long),
+int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
void *data);
int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
const char *name, void *data);
@@ -166,8 +165,8 @@ static inline bool kallsyms_show_value(const struct cred *cred)
return false;
}
-static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
- unsigned long), void *data)
+static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
+ void *data)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/module.h b/include/linux/module.h
index 3730ed99e5f7..9e56763dff81 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -27,6 +27,7 @@
#include <linux/tracepoint-defs.h>
#include <linux/srcu.h>
#include <linux/static_call_types.h>
+#include <linux/dynamic_debug.h>
#include <linux/percpu.h>
#include <asm/module.h>
@@ -320,17 +321,47 @@ struct mod_tree_node {
struct latch_tree_node node;
};
-struct module_layout {
- /* The actual code + data. */
+enum mod_mem_type {
+ MOD_TEXT = 0,
+ MOD_DATA,
+ MOD_RODATA,
+ MOD_RO_AFTER_INIT,
+ MOD_INIT_TEXT,
+ MOD_INIT_DATA,
+ MOD_INIT_RODATA,
+
+ MOD_MEM_NUM_TYPES,
+ MOD_INVALID = -1,
+};
+
+#define mod_mem_type_is_init(type) \
+ ((type) == MOD_INIT_TEXT || \
+ (type) == MOD_INIT_DATA || \
+ (type) == MOD_INIT_RODATA)
+
+#define mod_mem_type_is_core(type) (!mod_mem_type_is_init(type))
+
+#define mod_mem_type_is_text(type) \
+ ((type) == MOD_TEXT || \
+ (type) == MOD_INIT_TEXT)
+
+#define mod_mem_type_is_data(type) (!mod_mem_type_is_text(type))
+
+#define mod_mem_type_is_core_data(type) \
+ (mod_mem_type_is_core(type) && \
+ mod_mem_type_is_data(type))
+
+#define for_each_mod_mem_type(type) \
+ for (enum mod_mem_type (type) = 0; \
+ (type) < MOD_MEM_NUM_TYPES; (type)++)
+
+#define for_class_mod_mem_type(type, class) \
+ for_each_mod_mem_type(type) \
+ if (mod_mem_type_is_##class(type))
+
+struct module_memory {
void *base;
- /* Total size. */
unsigned int size;
- /* The size of the executable code. */
- unsigned int text_size;
- /* Size of RO section of the module (text+rodata) */
- unsigned int ro_size;
- /* Size of RO after init section */
- unsigned int ro_after_init_size;
#ifdef CONFIG_MODULES_TREE_LOOKUP
struct mod_tree_node mtn;
@@ -339,9 +370,9 @@ struct module_layout {
#ifdef CONFIG_MODULES_TREE_LOOKUP
/* Only touch one cacheline for common rbtree-for-core-layout case. */
-#define __module_layout_align ____cacheline_aligned
+#define __module_memory_align ____cacheline_aligned
#else
-#define __module_layout_align
+#define __module_memory_align
#endif
struct mod_kallsyms {
@@ -426,12 +457,7 @@ struct module {
/* Startup function. */
int (*init)(void);
- /* Core layout: rbtree is accessed frequently, so keep together. */
- struct module_layout core_layout __module_layout_align;
- struct module_layout init_layout;
-#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
- struct module_layout data_layout;
-#endif
+ struct module_memory mem[MOD_MEM_NUM_TYPES] __module_memory_align;
/* Arch-specific module values */
struct mod_arch_specific arch;
@@ -554,6 +580,9 @@ struct module {
struct error_injection_entry *ei_funcs;
unsigned int num_ei_funcs;
#endif
+#ifdef CONFIG_DYNAMIC_DEBUG_CORE
+ struct _ddebug_info dyndbg_info;
+#endif
} ____cacheline_aligned __randomize_layout;
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
@@ -581,23 +610,35 @@ bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
bool is_module_percpu_address(unsigned long addr);
bool is_module_text_address(unsigned long addr);
+static inline bool within_module_mem_type(unsigned long addr,
+ const struct module *mod,
+ enum mod_mem_type type)
+{
+ unsigned long base, size;
+
+ base = (unsigned long)mod->mem[type].base;
+ size = mod->mem[type].size;
+ return addr - base < size;
+}
+
static inline bool within_module_core(unsigned long addr,
const struct module *mod)
{
-#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
- if ((unsigned long)mod->data_layout.base <= addr &&
- addr < (unsigned long)mod->data_layout.base + mod->data_layout.size)
- return true;
-#endif
- return (unsigned long)mod->core_layout.base <= addr &&
- addr < (unsigned long)mod->core_layout.base + mod->core_layout.size;
+ for_class_mod_mem_type(type, core) {
+ if (within_module_mem_type(addr, mod, type))
+ return true;
+ }
+ return false;
}
static inline bool within_module_init(unsigned long addr,
const struct module *mod)
{
- return (unsigned long)mod->init_layout.base <= addr &&
- addr < (unsigned long)mod->init_layout.base + mod->init_layout.size;
+ for_class_mod_mem_type(type, init) {
+ if (within_module_mem_type(addr, mod, type))
+ return true;
+ }
+ return false;
}
static inline bool within_module(unsigned long addr, const struct module *mod)
@@ -622,10 +663,46 @@ void symbol_put_addr(void *addr);
to handle the error case (which only happens with rmmod --wait). */
extern void __module_get(struct module *module);
-/* This is the Right Way to get a module: if it fails, it's being removed,
- * so pretend it's not there. */
+/**
+ * try_module_get() - take module refcount unless module is being removed
+ * @module: the module we should check for
+ *
+ * Only try to get a module reference count if the module is not being removed.
+ * This call will fail if the module is already being removed.
+ *
+ * Care must also be taken to ensure the module exists and is alive prior to
+ * usage of this call. This can be gauranteed through two means:
+ *
+ * 1) Direct protection: you know an earlier caller must have increased the
+ * module reference through __module_get(). This can typically be achieved
+ * by having another entity other than the module itself increment the
+ * module reference count.
+ *
+ * 2) Implied protection: there is an implied protection against module
+ * removal. An example of this is the implied protection used by kernfs /
+ * sysfs. The sysfs store / read file operations are guaranteed to exist
+ * through the use of kernfs's active reference (see kernfs_active()) and a
+ * sysfs / kernfs file removal cannot happen unless the same file is not
+ * active. Therefore, if a sysfs file is being read or written to the module
+ * which created it must still exist. It is therefore safe to use
+ * try_module_get() on module sysfs store / read ops.
+ *
+ * One of the real values to try_module_get() is the module_is_live() check
+ * which ensures that the caller of try_module_get() can yield to userspace
+ * module removal requests and gracefully fail if the module is on its way out.
+ *
+ * Returns true if the reference count was successfully incremented.
+ */
extern bool try_module_get(struct module *module);
+/**
+ * module_put() - release a reference count to a module
+ * @module: the module we should release a reference count for
+ *
+ * If you successfully bump a reference count to a module with try_module_get(),
+ * when you are finished you must call module_put() to release that reference
+ * count.
+ */
extern void module_put(struct module *module);
#else /*!CONFIG_MODULE_UNLOAD*/
@@ -782,7 +859,7 @@ void *dereference_module_function_descriptor(struct module *mod, void *ptr)
#ifdef CONFIG_SYSFS
extern struct kset *module_kset;
-extern struct kobj_type module_ktype;
+extern const struct kobj_type module_ktype;
#endif /* CONFIG_SYSFS */
#define symbol_request(x) try_then_request_module(symbol_get(x), "symbol:" #x)
@@ -836,8 +913,7 @@ static inline bool module_sig_ok(struct module *module)
#if defined(CONFIG_MODULES) && defined(CONFIG_KALLSYMS)
int module_kallsyms_on_each_symbol(const char *modname,
- int (*fn)(void *, const char *,
- struct module *, unsigned long),
+ int (*fn)(void *, const char *, unsigned long),
void *data);
/* For kallsyms to ask for address resolution. namebuf should be at
@@ -870,8 +946,7 @@ unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name);
#else /* CONFIG_MODULES && CONFIG_KALLSYMS */
static inline int module_kallsyms_on_each_symbol(const char *modname,
- int (*fn)(void *, const char *,
- struct module *, unsigned long),
+ int (*fn)(void *, const char *, unsigned long),
void *data)
{
return -EOPNOTSUPP;
diff --git a/include/linux/module_symbol.h b/include/linux/module_symbol.h
new file mode 100644
index 000000000000..7ace7ba30203
--- /dev/null
+++ b/include/linux/module_symbol.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_MODULE_SYMBOL_H
+#define _LINUX_MODULE_SYMBOL_H
+
+/* This ignores the intensely annoying "mapping symbols" found in ELF files. */
+static inline int is_mapping_symbol(const char *str)
+{
+ if (str[0] == '.' && str[1] == 'L')
+ return true;
+ if (str[0] == 'L' && str[1] == '0')
+ return true;
+ return str[0] == '$' &&
+ (str[1] == 'a' || str[1] == 'd' || str[1] == 't' || str[1] == 'x')
+ && (str[2] == '\0' || str[2] == '.');
+}
+
+#endif /* _LINUX_MODULE_SYMBOL_H */
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 6694d0019a68..04655faadc2d 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -25,8 +25,14 @@ struct semaphore {
.wait_list = LIST_HEAD_INIT((name).wait_list), \
}
-#define DEFINE_SEMAPHORE(name) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+/*
+ * Unlike mutexes, binary semaphores do not have an owner, so up() can
+ * be called in a different thread from the one which called down().
+ * It is also safe to call down_trylock() and up() from interrupt
+ * context.
+ */
+#define DEFINE_SEMAPHORE(_name, _n) \
+ struct semaphore _name = __SEMAPHORE_INITIALIZER(_name, _n)
static inline void sema_init(struct semaphore *sem, int val)
{
diff --git a/kernel/Makefile b/kernel/Makefile
index 6fc72b3afbde..b69c95315480 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -13,7 +13,6 @@ obj-y = fork.o exec_domain.o panic.o \
async.o range.o smpboot.o ucount.o regset.o
obj-$(CONFIG_USERMODE_DRIVER) += usermode_driver.o
-obj-$(CONFIG_MODULES) += kmod.o
obj-$(CONFIG_MULTIUSER) += groups.o
obj-$(CONFIG_VHOST_TASK) += vhost_task.o
diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
index 0520a8f4fb1d..02205ab53b7e 100644
--- a/kernel/dma/map_benchmark.c
+++ b/kernel/dma/map_benchmark.c
@@ -356,4 +356,3 @@ module_exit(map_benchmark_cleanup);
MODULE_AUTHOR("Barry Song <song.bao.hua@hisilicon.com>");
MODULE_DESCRIPTION("dma_map benchmark driver");
-MODULE_LICENSE("GPL");
diff --git a/kernel/events/hw_breakpoint_test.c b/kernel/events/hw_breakpoint_test.c
index c57610f52bb4..2cfeeecf8de9 100644
--- a/kernel/events/hw_breakpoint_test.c
+++ b/kernel/events/hw_breakpoint_test.c
@@ -329,5 +329,4 @@ static struct kunit_suite hw_breakpoint_test_suite = {
kunit_test_suites(&hw_breakpoint_test_suite);
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marco Elver <elver@google.com>");
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 83f499182c9a..77747391f49b 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -288,8 +288,7 @@ unsigned long kallsyms_lookup_name(const char *name)
* Iterate over all symbols in vmlinux. For symbols from modules use
* module_kallsyms_on_each_symbol instead.
*/
-int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
- unsigned long),
+int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
void *data)
{
char namebuf[KSYM_NAME_LEN];
@@ -299,7 +298,7 @@ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
- ret = fn(data, namebuf, NULL, kallsyms_sym_address(i));
+ ret = fn(data, namebuf, kallsyms_sym_address(i));
if (ret != 0)
return ret;
cond_resched();
diff --git a/kernel/kallsyms_selftest.c b/kernel/kallsyms_selftest.c
index bfbc12da3326..a2e3745d15c4 100644
--- a/kernel/kallsyms_selftest.c
+++ b/kernel/kallsyms_selftest.c
@@ -95,7 +95,7 @@ static struct test_item test_items[] = {
static char stub_name[KSYM_NAME_LEN];
-static int stat_symbol_len(void *data, const char *name, struct module *mod, unsigned long addr)
+static int stat_symbol_len(void *data, const char *name, unsigned long addr)
{
*(u32 *)data += strlen(name);
@@ -154,7 +154,7 @@ static void test_kallsyms_compression_ratio(void)
pr_info(" ---------------------------------------------------------\n");
}
-static int lookup_name(void *data, const char *name, struct module *mod, unsigned long addr)
+static int lookup_name(void *data, const char *name, unsigned long addr)
{
u64 t0, t1, t;
struct test_stat *stat = (struct test_stat *)data;
@@ -207,7 +207,7 @@ static bool match_cleanup_name(const char *s, const char *name)
return !strncmp(s, name, len);
}
-static int find_symbol(void *data, const char *name, struct module *mod, unsigned long addr)
+static int find_symbol(void *data, const char *name, unsigned long addr)
{
struct test_stat *stat = (struct test_stat *)data;
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index feaf90c5e537..fc851455740c 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -142,8 +142,7 @@ static int klp_match_callback(void *data, unsigned long addr)
return 0;
}
-static int klp_find_callback(void *data, const char *name,
- struct module *mod, unsigned long addr)
+static int klp_find_callback(void *data, const char *name, unsigned long addr)
{
struct klp_find_arg *args = data;
diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig
index 424b3bc58f3f..33a2e991f608 100644
--- a/kernel/module/Kconfig
+++ b/kernel/module/Kconfig
@@ -22,6 +22,104 @@ menuconfig MODULES
if MODULES
+config MODULE_DEBUGFS
+ bool
+
+config MODULE_DEBUG
+ bool "Module debugging"
+ depends on DEBUG_FS
+ help
+ Allows you to enable / disable features which can help you debug
+ modules. You don't need these options on production systems.
+
+if MODULE_DEBUG
+
+config MODULE_STATS
+ bool "Module statistics"
+ depends on DEBUG_FS
+ select MODULE_DEBUGFS
+ help
+ This option allows you to maintain a record of module statistics.
+ For example, size of all modules, average size, text size, a list
+ of failed modules and the size for each of those. For failed
+ modules we keep track of modules which failed due to either the
+ existing module taking too long to load or that module was already
+ loaded.
+
+ You should enable this if you are debugging production loads
+ and want to see if userspace or the kernel is doing stupid things
+ with loading modules when it shouldn't or if you want to help
+ optimize userspace / kernel space module autoloading schemes.
+ You might want to do this because failed modules tend to use
+ up significant amount of memory, and so you'd be doing everyone a
+ favor in avoiding these failures proactively.
+
+ This functionality is also useful for those experimenting with
+ module .text ELF section optimization.
+
+ If unsure, say N.
+
+config MODULE_DEBUG_AUTOLOAD_DUPS
+ bool "Debug duplicate modules with auto-loading"
+ help
+ Module autoloading allows in-kernel code to request modules through
+ the *request_module*() API calls. This in turn just calls userspace
+ modprobe. Although modprobe checks to see if a module is already
+ loaded before trying to load a module there is a small time window in
+ which multiple duplicate requests can end up in userspace and multiple
+ modprobe calls race calling finit_module() around the same time for
+ duplicate modules. The finit_module() system call can consume in the
+ worst case more than twice the respective module size in virtual
+ memory for each duplicate module requests. Although duplicate module
+ requests are non-fatal virtual memory is a limited resource and each
+ duplicate module request ends up just unnecessarily straining virtual
+ memory.
+
+ This debugging facility will create pr_warn() splats for duplicate
+ module requests to help identify if module auto-loading may be the
+ culprit to your early boot virtual memory pressure. Since virtual
+ memory abuse caused by duplicate module requests could render a
+ system unusable this functionality will also converge races in
+ requests for the same module to a single request. You can boot with
+ the module.enable_dups_trace=1 kernel parameter to use WARN_ON()
+ instead of the pr_warn().
+
+ If the first module request used request_module_nowait() we cannot
+ use that as the anchor to wait for duplicate module requests, since
+ users of request_module() do want a proper return value. If a call
+ for the same module happened earlier with request_module() though,
+ then a duplicate request_module_nowait() would be detected. The
+ non-wait request_module() call is synchronous and waits until modprobe
+ completes. Subsequent auto-loading requests for the same module do
+ not trigger a new finit_module() calls and do not strain virtual
+ memory, and so as soon as modprobe successfully completes we remove
+ tracking for duplicates for that module.
+
+ Enable this functionality to try to debug virtual memory abuse during
+ boot on systems which are failing to boot or if you suspect you may be
+ straining virtual memory during boot, and you want to identify if the
+ abuse was due to module auto-loading. These issues are currently only
+ known to occur on systems with many CPUs (over 400) and is likely the
+ result of udev issuing duplicate module requests for each CPU, and so
+ module auto-loading is not the culprit. There may very well still be
+ many duplicate module auto-loading requests which could be optimized
+ for and this debugging facility can be used to help identify them.
+
+ Only enable this for debugging system functionality, never have it
+ enabled on real systems.
+
+config MODULE_DEBUG_AUTOLOAD_DUPS_TRACE
+ bool "Force full stack trace when duplicates are found"
+ depends on MODULE_DEBUG_AUTOLOAD_DUPS
+ help
+ Enabling this will force a full stack trace for duplicate module
+ auto-loading requests using WARN_ON() instead of pr_warn(). You
+ should keep this disabled at all times unless you are a developer
+ and are doing a manual inspection and want to debug exactly why
+ these duplicates occur.
+
+endif # MODULE_DEBUG
+
config MODULE_FORCE_LOAD
bool "Forced module loading"
default n
@@ -51,7 +149,7 @@ config MODULE_FORCE_UNLOAD
config MODULE_UNLOAD_TAINT_TRACKING
bool "Tainted module unload tracking"
depends on MODULE_UNLOAD
- default n
+ select MODULE_DEBUGFS
help
This option allows you to maintain a record of each unloaded
module that tainted the kernel. In addition to displaying a
diff --git a/kernel/module/Makefile b/kernel/module/Makefile
index 948efea81e85..a10b2b9a6fdf 100644
--- a/kernel/module/Makefile
+++ b/kernel/module/Makefile
@@ -7,7 +7,10 @@
# and produce insane amounts of uninteresting coverage.
KCOV_INSTRUMENT_module.o := n
-obj-y += main.o strict_rwx.o
+obj-y += main.o
+obj-y += strict_rwx.o
+obj-y += kmod.o
+obj-$(CONFIG_MODULE_DEBUG_AUTOLOAD_DUPS) += dups.o
obj-$(CONFIG_MODULE_DECOMPRESS) += decompress.o
obj-$(CONFIG_MODULE_SIG) += signing.o
obj-$(CONFIG_LIVEPATCH) += livepatch.o
@@ -19,3 +22,4 @@ obj-$(CONFIG_SYSFS) += sysfs.o
obj-$(CONFIG_KGDB_KDB) += kdb.o
obj-$(CONFIG_MODVERSIONS) += version.o
obj-$(CONFIG_MODULE_UNLOAD_TAINT_TRACKING) += tracking.o
+obj-$(CONFIG_MODULE_STATS) += stats.o
diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c
index bb79ac1a6d8f..e97232b125eb 100644
--- a/kernel/module/decompress.c
+++ b/kernel/module/decompress.c
@@ -267,7 +267,7 @@ static ssize_t module_zstd_decompress(struct load_info *info,
zstd_dec.size = PAGE_SIZE;
ret = zstd_decompress_stream(dstream, &zstd_dec, &zstd_buf);
- kunmap(page);
+ kunmap_local(zstd_dec.dst);
retval = zstd_get_error_code(ret);
if (retval)
break;
@@ -297,6 +297,10 @@ int module_decompress(struct load_info *info, const void *buf, size_t size)
ssize_t data_size;
int error;
+#if defined(CONFIG_MODULE_STATS)
+ info->compressed_len = size;
+#endif
+
/*
* Start with number of pages twice as big as needed for
* compressed data.
diff --git a/kernel/module/dups.c b/kernel/module/dups.c
new file mode 100644
index 000000000000..aa8e1361fdb5
--- /dev/null
+++ b/kernel/module/dups.c
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * kmod dups - the kernel module autoloader duplicate suppressor
+ *
+ * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
+ */
+
+#define pr_fmt(fmt) "module: " fmt
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/sched/task.h>
+#include <linux/binfmts.h>
+#include <linux/syscalls.h>
+#include <linux/unistd.h>
+#include <linux/kmod.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/cred.h>
+#include <linux/file.h>
+#include <linux/fdtable.h>
+#include <linux/workqueue.h>
+#include <linux/security.h>
+#include <linux/mount.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/resource.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+#include <linux/rwsem.h>
+#include <linux/ptrace.h>
+#include <linux/async.h>
+#include <linux/uaccess.h>
+
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "module."
+static bool enable_dups_trace = IS_ENABLED(CONFIG_MODULE_DEBUG_AUTOLOAD_DUPS_TRACE);
+module_param(enable_dups_trace, bool_enable_only, 0644);
+
+/*
+ * Protects dup_kmod_reqs list, adds / removals with RCU.
+ */
+static DEFINE_MUTEX(kmod_dup_mutex);
+static LIST_HEAD(dup_kmod_reqs);
+
+struct kmod_dup_req {
+ struct list_head list;
+ char name[MODULE_NAME_LEN];
+ struct completion first_req_done;
+ struct work_struct complete_work;
+ struct delayed_work delete_work;
+ int dup_ret;
+};
+
+static struct kmod_dup_req *kmod_dup_request_lookup(char *module_name)
+{
+ struct kmod_dup_req *kmod_req;
+
+ list_for_each_entry_rcu(kmod_req, &dup_kmod_reqs, list,
+ lockdep_is_held(&kmod_dup_mutex)) {
+ if (strlen(kmod_req->name) == strlen(module_name) &&
+ !memcmp(kmod_req->name, module_name, strlen(module_name))) {
+ return kmod_req;
+ }
+ }
+
+ return NULL;
+}
+
+static void kmod_dup_request_delete(struct work_struct *work)
+{
+ struct kmod_dup_req *kmod_req;
+ kmod_req = container_of(to_delayed_work(work), struct kmod_dup_req, delete_work);
+
+ /*
+ * The typical situation is a module successully loaded. In that
+ * situation the module will be present already in userspace. If
+ * new requests come in after that, userspace will already know the
+ * module is loaded so will just return 0 right away. There is still
+ * a small chance right after we delete this entry new request_module()
+ * calls may happen after that, they can happen. These heuristics
+ * are to protect finit_module() abuse for auto-loading, if modules
+ * are still tryign to auto-load even if a module is already loaded,
+ * that's on them, and those inneficiencies should not be fixed by
+ * kmod. The inneficies there are a call to modprobe and modprobe
+ * just returning 0.
+ */
+ mutex_lock(&kmod_dup_mutex);
+ list_del_rcu(&kmod_req->list);
+ synchronize_rcu();
+ mutex_unlock(&kmod_dup_mutex);
+ kfree(kmod_req);
+}
+
+static void kmod_dup_request_complete(struct work_struct *work)
+{
+ struct kmod_dup_req *kmod_req;
+
+ kmod_req = container_of(work, struct kmod_dup_req, complete_work);
+
+ /*
+ * This will ensure that the kernel will let all the waiters get
+ * informed its time to check the return value. It's time to
+ * go home.
+ */
+ complete_all(&kmod_req->first_req_done);
+
+ /*
+ * Now that we have allowed prior request_module() calls to go on
+ * with life, let's schedule deleting this entry. We don't have
+ * to do it right away, but we *eventually* want to do it so to not
+ * let this linger forever as this is just a boot optimization for
+ * possible abuses of vmalloc() incurred by finit_module() thrashing.
+ */
+ queue_delayed_work(system_wq, &kmod_req->delete_work, 60 * HZ);
+}
+
+bool kmod_dup_request_exists_wait(char *module_name, bool wait, int *dup_ret)
+{
+ struct kmod_dup_req *kmod_req, *new_kmod_req;
+ int ret;
+
+ /*
+ * Pre-allocate the entry in case we have to use it later
+ * to avoid contention with the mutex.
+ */
+ new_kmod_req = kzalloc(sizeof(*new_kmod_req), GFP_KERNEL);
+ if (!new_kmod_req)
+ return false;
+
+ memcpy(new_kmod_req->name, module_name, strlen(module_name));
+ INIT_WORK(&new_kmod_req->complete_work, kmod_dup_request_complete);
+ INIT_DELAYED_WORK(&new_kmod_req->delete_work, kmod_dup_request_delete);
+ init_completion(&new_kmod_req->first_req_done);
+
+ mutex_lock(&kmod_dup_mutex);
+
+ kmod_req = kmod_dup_request_lookup(module_name);
+ if (!kmod_req) {
+ /*
+ * If the first request that came through for a module
+ * was with request_module_nowait() we cannot wait for it
+ * and share its return value with other users which may
+ * have used request_module() and need a proper return value
+ * so just skip using them as an anchor.
+ *
+ * If a prior request to this one came through with
+ * request_module() though, then a request_module_nowait()
+ * would benefit from duplicate detection.
+ */
+ if (!wait) {
+ kfree(new_kmod_req);
+ pr_debug("New request_module_nowait() for %s -- cannot track duplicates for this request\n", module_name);
+ mutex_unlock(&kmod_dup_mutex);
+ return false;
+ }
+
+ /*
+ * There was no duplicate, just add the request so we can
+ * keep tab on duplicates later.
+ */
+ pr_debug("New request_module() for %s\n", module_name);
+ list_add_rcu(&new_kmod_req->list, &dup_kmod_reqs);
+ mutex_unlock(&kmod_dup_mutex);
+ return false;
+ }
+ mutex_unlock(&kmod_dup_mutex);
+
+ /* We are dealing with a duplicate request now */
+ kfree(new_kmod_req);
+
+ /*
+ * To fix these try to use try_then_request_module() instead as that
+ * will check if the component you are looking for is present or not.
+ * You could also just queue a single request to load the module once,
+ * instead of having each and everything you need try to request for
+ * the module.
+ *
+ * Duplicate request_module() calls can cause quite a bit of wasted
+ * vmalloc() space when racing with userspace.
+ */
+ if (enable_dups_trace)
+ WARN(1, "module-autoload: duplicate request for module %s\n", module_name);
+ else
+ pr_warn("module-autoload: duplicate request for module %s\n", module_name);
+
+ if (!wait) {
+ /*
+ * If request_module_nowait() was used then the user just
+ * wanted to issue the request and if another module request
+ * was already its way with the same name we don't care for
+ * the return value either. Let duplicate request_module_nowait()
+ * calls bail out right away.
+ */
+ *dup_ret = 0;
+ return true;
+ }
+
+ /*
+ * If a duplicate request_module() was used they *may* care for
+ * the return value, so we have no other option but to wait for
+ * the first caller to complete. If the first caller used
+ * the request_module_nowait() call, subsquent callers will
+ * deal with the comprmise of getting a successful call with this
+ * optimization enabled ...
+ */
+ ret = wait_for_completion_state(&kmod_req->first_req_done,
+ TASK_UNINTERRUPTIBLE | TASK_KILLABLE);
+ if (ret) {
+ *dup_ret = ret;
+ return true;
+ }
+
+ /* Now the duplicate request has the same exact return value as the first request */
+ *dup_ret = kmod_req->dup_ret;
+
+ return true;
+}
+
+void kmod_dup_request_announce(char *module_name, int ret)
+{
+ struct kmod_dup_req *kmod_req;
+
+ mutex_lock(&kmod_dup_mutex);
+
+ kmod_req = kmod_dup_request_lookup(module_name);
+ if (!kmod_req)
+ goto out;
+
+ kmod_req->dup_ret = ret;
+
+ /*
+ * If we complete() here we may allow duplicate threads
+ * to continue before the first one that submitted the
+ * request. We're in no rush also, given that each and
+ * every bounce back to userspace is slow we avoid that
+ * with a slight delay here. So queueue up the completion
+ * and let duplicates suffer, just wait a tad bit longer.
+ * There is no rush. But we also don't want to hold the
+ * caller up forever or introduce any boot delays.
+ */
+ queue_work(system_wq, &kmod_req->complete_work);
+
+out:
+ mutex_unlock(&kmod_dup_mutex);
+}
diff --git a/kernel/module/internal.h b/kernel/module/internal.h
index 1c877561a7d2..dc7b0160c480 100644
--- a/kernel/module/internal.h
+++ b/kernel/module/internal.h
@@ -3,6 +3,7 @@
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
+ * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
*/
#include <linux/elf.h>
@@ -17,27 +18,19 @@
#define ARCH_SHF_SMALL 0
#endif
-/* If this is set, the section belongs in the init part of the module */
-#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG - 1))
-/* Maximum number of characters written by module_flags() */
-#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4)
-
-#ifndef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
-#define data_layout core_layout
-#endif
-
/*
- * Modules' sections will be aligned on page boundaries
- * to ensure complete separation of code and data, but
- * only when CONFIG_STRICT_MODULE_RWX=y
+ * Use highest 4 bits of sh_entsize to store the mod_mem_type of this
+ * section. This leaves 28 bits for offset on 32-bit systems, which is
+ * about 256 MiB (WARN_ON_ONCE if we exceed that).
*/
-static inline unsigned int strict_align(unsigned int size)
-{
- if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
- return PAGE_ALIGN(size);
- else
- return size;
-}
+
+#define SH_ENTSIZE_TYPE_BITS 4
+#define SH_ENTSIZE_TYPE_SHIFT (BITS_PER_LONG - SH_ENTSIZE_TYPE_BITS)
+#define SH_ENTSIZE_TYPE_MASK ((1UL << SH_ENTSIZE_TYPE_BITS) - 1)
+#define SH_ENTSIZE_OFFSET_MASK ((1UL << (BITS_PER_LONG - SH_ENTSIZE_TYPE_BITS)) - 1)
+
+/* Maximum number of characters written by module_flags() */
+#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4)
extern struct mutex module_mutex;
extern struct list_head modules;
@@ -53,7 +46,6 @@ extern const struct kernel_symbol __stop___ksymtab_gpl[];
extern const s32 __start___kcrctab[];
extern const s32 __start___kcrctab_gpl[];
-#include <linux/dynamic_debug.h>
struct load_info {
const char *name;
/* pointer to module in temporary copy, freed at end of load_module() */
@@ -63,12 +55,14 @@ struct load_info {
Elf_Shdr *sechdrs;
char *secstrings, *strtab;
unsigned long symoffs, stroffs, init_typeoffs, core_typeoffs;
- struct _ddebug_info dyndbg;
bool sig_ok;
#ifdef CONFIG_KALLSYMS
unsigned long mod_kallsyms_init_off;
#endif
#ifdef CONFIG_MODULE_DECOMPRESS
+#ifdef CONFIG_MODULE_STATS
+ unsigned long compressed_len;
+#endif
struct page **pages;
unsigned int max_pages;
unsigned int used_pages;
@@ -101,11 +95,16 @@ int try_to_force_load(struct module *mod, const char *reason);
bool find_symbol(struct find_symbol_arg *fsa);
struct module *find_module_all(const char *name, size_t len, bool even_unformed);
int cmp_name(const void *name, const void *sym);
-long module_get_offset(struct module *mod, unsigned int *size, Elf_Shdr *sechdr,
- unsigned int section);
+long module_get_offset_and_type(struct module *mod, enum mod_mem_type type,
+ Elf_Shdr *sechdr, unsigned int section);
char *module_flags(struct module *mod, char *buf, bool show_state);
size_t module_flags_taint(unsigned long taints, char *buf);
+char *module_next_tag_pair(char *string, unsigned long *secsize);
+
+#define for_each_modinfo_entry(entry, info, name) \
+ for (entry = get_modinfo(info, name); entry; entry = get_next_modinfo(info, name, entry))
+
static inline void module_assert_mutex_or_preempt(void)
{
#ifdef CONFIG_LOCKDEP
@@ -148,6 +147,95 @@ static inline bool set_livepatch_module(struct module *mod)
#endif
}
+/**
+ * enum fail_dup_mod_reason - state at which a duplicate module was detected
+ *
+ * @FAIL_DUP_MOD_BECOMING: the module is read properly, passes all checks but
+ * we've determined that another module with the same name is already loaded
+ * or being processed on our &modules list. This happens on early_mod_check()
+ * right before layout_and_allocate(). The kernel would have already
+ * vmalloc()'d space for the entire module through finit_module(). If
+ * decompression was used two vmap() spaces were used. These failures can
+ * happen when userspace has not seen the module present on the kernel and
+ * tries to load the module multiple times at same time.
+ * @FAIL_DUP_MOD_LOAD: the module has been read properly, passes all validation
+ * checks and the kernel determines that the module was unique and because
+ * of this allocated yet another private kernel copy of the module space in
+ * layout_and_allocate() but after this determined in add_unformed_module()
+ * that another module with the same name is already loaded or being processed.
+ * These failures should be mitigated as much as possible and are indicative
+ * of really fast races in loading modules. Without module decompression
+ * they waste twice as much vmap space. With module decompression three
+ * times the module's size vmap space is wasted.
+ */
+enum fail_dup_mod_reason {
+ FAIL_DUP_MOD_BECOMING = 0,
+ FAIL_DUP_MOD_LOAD,
+};
+
+#ifdef CONFIG_MODULE_DEBUGFS
+extern struct dentry *mod_debugfs_root;
+#endif
+
+#ifdef CONFIG_MODULE_STATS
+
+#define mod_stat_add_long(count, var) atomic_long_add(count, var)
+#define mod_stat_inc(name) atomic_inc(name)
+
+extern atomic_long_t total_mod_size;
+extern atomic_long_t total_text_size;
+extern atomic_long_t invalid_kread_bytes;
+extern atomic_long_t invalid_decompress_bytes;
+
+extern atomic_t modcount;
+extern atomic_t failed_kreads;
+extern atomic_t failed_decompress;
+struct mod_fail_load {
+ struct list_head list;
+ char name[MODULE_NAME_LEN];
+ atomic_long_t count;
+ unsigned long dup_fail_mask;
+};
+
+int try_add_failed_module(const char *name, enum fail_dup_mod_reason reason);
+void mod_stat_bump_invalid(struct load_info *info, int flags);
+void mod_stat_bump_becoming(struct load_info *info, int flags);
+
+#else
+
+#define mod_stat_add_long(name, var)
+#define mod_stat_inc(name)
+
+static inline int try_add_failed_module(const char *name,
+ enum fail_dup_mod_reason reason)
+{
+ return 0;
+}
+
+static inline void mod_stat_bump_invalid(struct load_info *info, int flags)
+{
+}
+
+static inline void mod_stat_bump_becoming(struct load_info *info, int flags)
+{
+}
+
+#endif /* CONFIG_MODULE_STATS */
+
+#ifdef CONFIG_MODULE_DEBUG_AUTOLOAD_DUPS
+bool kmod_dup_request_exists_wait(char *module_name, bool wait, int *dup_ret);
+void kmod_dup_request_announce(char *module_name, int ret);
+#else
+static inline bool kmod_dup_request_exists_wait(char *module_name, bool wait, int *dup_ret)
+{
+ return false;
+}
+
+static inline void kmod_dup_request_announce(char *module_name, int ret)
+{
+}
+#endif
+
#ifdef CONFIG_MODULE_UNLOAD_TAINT_TRACKING
struct mod_unload_taint {
struct list_head list;
@@ -190,10 +278,13 @@ struct mod_tree_root {
#endif
unsigned long addr_min;
unsigned long addr_max;
+#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
+ unsigned long data_addr_min;
+ unsigned long data_addr_max;
+#endif
};
extern struct mod_tree_root mod_tree;
-extern struct mod_tree_root mod_data_tree;
#ifdef CONFIG_MODULES_TREE_LOOKUP
void mod_tree_insert(struct module *mod);
@@ -224,7 +315,6 @@ void module_enable_nx(const struct module *mod);
void module_enable_x(const struct module *mod);
int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod);
-bool module_check_misalignment(const struct module *mod);
#ifdef CONFIG_MODULE_SIG
int module_sig_check(struct load_info *info, int flags);
diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c
index bdc911dbcde5..c550d7d45f2f 100644
--- a/kernel/module/kallsyms.c
+++ b/kernel/module/kallsyms.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
+#include <linux/module_symbol.h>
#include <linux/kallsyms.h>
#include <linux/buildid.h>
#include <linux/bsearch.h>
@@ -78,6 +79,7 @@ static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
unsigned int shnum, unsigned int pcpundx)
{
const Elf_Shdr *sec;
+ enum mod_mem_type type;
if (src->st_shndx == SHN_UNDEF ||
src->st_shndx >= shnum ||
@@ -90,11 +92,12 @@ static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
#endif
sec = sechdrs + src->st_shndx;
+ type = sec->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT;
if (!(sec->sh_flags & SHF_ALLOC)
#ifndef CONFIG_KALLSYMS_ALL
|| !(sec->sh_flags & SHF_EXECINSTR)
#endif
- || (sec->sh_entsize & INIT_OFFSET_MASK))
+ || mod_mem_type_is_init(type))
return false;
return true;
@@ -113,11 +116,13 @@ void layout_symtab(struct module *mod, struct load_info *info)
Elf_Shdr *strsect = info->sechdrs + info->index.str;
const Elf_Sym *src;
unsigned int i, nsrc, ndst, strtab_size = 0;
+ struct module_memory *mod_mem_data = &mod->mem[MOD_DATA];
+ struct module_memory *mod_mem_init_data = &mod->mem[MOD_INIT_DATA];
/* Put symbol section at end of init part of module. */
symsect->sh_flags |= SHF_ALLOC;
- symsect->sh_entsize = module_get_offset(mod, &mod->init_layout.size, symsect,
- info->index.sym) | INIT_OFFSET_MASK;
+ symsect->sh_entsize = module_get_offset_and_type(mod, MOD_INIT_DATA,
+ symsect, info->index.sym);
pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
src = (void *)info->hdr + symsect->sh_offset;
@@ -134,28 +139,27 @@ void layout_symtab(struct module *mod, struct load_info *info)
}
/* Append room for core symbols at end of core part. */
- info->symoffs = ALIGN(mod->data_layout.size, symsect->sh_addralign ?: 1);
- info->stroffs = mod->data_layout.size = info->symoffs + ndst * sizeof(Elf_Sym);
- mod->data_layout.size += strtab_size;
+ info->symoffs = ALIGN(mod_mem_data->size, symsect->sh_addralign ?: 1);
+ info->stroffs = mod_mem_data->size = info->symoffs + ndst * sizeof(Elf_Sym);
+ mod_mem_data->size += strtab_size;
/* Note add_kallsyms() computes strtab_size as core_typeoffs - stroffs */
- info->core_typeoffs = mod->data_layout.size;
- mod->data_layout.size += ndst * sizeof(char);
- mod->data_layout.size = strict_align(mod->data_layout.size);
+ info->core_typeoffs = mod_mem_data->size;
+ mod_mem_data->size += ndst * sizeof(char);
/* Put string table section at end of init part of module. */
strsect->sh_flags |= SHF_ALLOC;
- strsect->sh_entsize = module_get_offset(mod, &mod->init_layout.size, strsect,
- info->index.str) | INIT_OFFSET_MASK;
+ strsect->sh_entsize = module_get_offset_and_type(mod, MOD_INIT_DATA,
+ strsect, info->index.str);
pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
/* We'll tack temporary mod_kallsyms on the end. */
- mod->init_layout.size = ALIGN(mod->init_layout.size,
- __alignof__(struct mod_kallsyms));
- info->mod_kallsyms_init_off = mod->init_layout.size;
- mod->init_layout.size += sizeof(struct mod_kallsyms);
- info->init_typeoffs = mod->init_layout.size;
- mod->init_layout.size += nsrc * sizeof(char);
- mod->init_layout.size = strict_align(mod->init_layout.size);
+ mod_mem_init_data->size = ALIGN(mod_mem_init_data->size,
+ __alignof__(struct mod_kallsyms));
+ info->mod_kallsyms_init_off = mod_mem_init_data->size;
+
+ mod_mem_init_data->size += sizeof(struct mod_kallsyms);
+ info->init_typeoffs = mod_mem_init_data->size;
+ mod_mem_init_data->size += nsrc * sizeof(char);
}
/*
@@ -171,9 +175,11 @@ void add_kallsyms(struct module *mod, const struct load_info *info)
char *s;
Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
unsigned long strtab_size;
+ void *data_base = mod->mem[MOD_DATA].base;
+ void *init_data_base = mod->mem[MOD_INIT_DATA].base;
/* Set up to point into init section. */
- mod->kallsyms = (void __rcu *)mod->init_layout.base +
+ mod->kallsyms = (void __rcu *)init_data_base +
info->mod_kallsyms_init_off;
rcu_read_lock();
@@ -183,15 +189,15 @@ void add_kallsyms(struct module *mod, const struct load_info *info)
/* Make sure we get permanent strtab: don't use info->strtab. */
rcu_dereference(mod->kallsyms)->strtab =
(void *)info->sechdrs[info->index.str].sh_addr;
- rcu_dereference(mod->kallsyms)->typetab = mod->init_layout.base + info->init_typeoffs;
+ rcu_dereference(mod->kallsyms)->typetab = init_data_base + info->init_typeoffs;
/*
* Now populate the cut down core kallsyms for after init
* and set types up while we still have access to sections.
*/
- mod->core_kallsyms.symtab = dst = mod->data_layout.base + info->symoffs;
- mod->core_kallsyms.strtab = s = mod->data_layout.base + info->stroffs;
- mod->core_kallsyms.typetab = mod->data_layout.base + info->core_typeoffs;
+ mod->core_kallsyms.symtab = dst = data_base + info->symoffs;
+ mod->core_kallsyms.strtab = s = data_base + info->stroffs;
+ mod->core_kallsyms.typetab = data_base + info->core_typeoffs;
strtab_size = info->core_typeoffs - info->stroffs;
src = rcu_dereference(mod->kallsyms)->symtab;
for (ndst = i = 0; i < rcu_dereference(mod->kallsyms)->num_symtab; i++) {
@@ -238,18 +244,6 @@ void init_build_id(struct module *mod, const struct load_info *info)
}
#endif
-/*
- * This ignores the intensely annoying "mapping symbols" found
- * in ARM ELF files: $a, $t and $d.
- */
-static inline int is_arm_mapping_symbol(const char *str)
-{
- if (str[0] == '.' && str[1] == 'L')
- return true;
- return str[0] == '$' && strchr("axtd", str[1]) &&
- (str[2] == '\0' || str[2] == '.');
-}
-
static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum)
{
return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
@@ -267,12 +261,15 @@ static const char *find_kallsyms_symbol(struct module *mod,
unsigned int i, best = 0;
unsigned long nextval, bestval;
struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
+ struct module_memory *mod_mem;
/* At worse, next value is at end of module */
if (within_module_init(addr, mod))
- nextval = (unsigned long)mod->init_layout.base + mod->init_layout.text_size;
+ mod_mem = &mod->mem[MOD_INIT_TEXT];
else
- nextval = (unsigned long)mod->core_layout.base + mod->core_layout.text_size;
+ mod_mem = &mod->mem[MOD_TEXT];
+
+ nextval = (unsigned long)mod_mem->base + mod_mem->size;
bestval = kallsyms_symbol_value(&kallsyms->symtab[best]);
@@ -292,7 +289,7 @@ static const char *find_kallsyms_symbol(struct module *mod,
* and inserted at a whim.
*/
if (*kallsyms_symbol_name(kallsyms, i) == '\0' ||
- is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i)))
+ is_mapping_symbol(kallsyms_symbol_name(kallsyms, i)))
continue;
if (thisval <= addr && thisval > bestval) {
@@ -505,8 +502,7 @@ unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name)
}
int module_kallsyms_on_each_symbol(const char *modname,
- int (*fn)(void *, const char *,
- struct module *, unsigned long),
+ int (*fn)(void *, const char *, unsigned long),
void *data)
{
struct module *mod;
@@ -535,7 +531,7 @@ int module_kallsyms_on_each_symbol(const char *modname,
continue;
ret = fn(data, kallsyms_symbol_name(kallsyms, i),
- mod, kallsyms_symbol_value(sym));
+ kallsyms_symbol_value(sym));
if (ret != 0)
goto out;
}
diff --git a/kernel/module/kdb.c b/kernel/module/kdb.c
index f4317f92e189..995c32d3698f 100644
--- a/kernel/module/kdb.c
+++ b/kernel/module/kdb.c
@@ -26,10 +26,11 @@ int kdb_lsmod(int argc, const char **argv)
if (mod->state == MODULE_STATE_UNFORMED)
continue;
- kdb_printf("%-20s%8u", mod->name, mod->core_layout.size);
-#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
- kdb_printf("/%8u", mod->data_layout.size);
-#endif
+ kdb_printf("%-20s%8u", mod->name, mod->mem[MOD_TEXT].size);
+ kdb_printf("/%8u", mod->mem[MOD_RODATA].size);
+ kdb_printf("/%8u", mod->mem[MOD_RO_AFTER_INIT].size);
+ kdb_printf("/%8u", mod->mem[MOD_DATA].size);
+
kdb_printf(" 0x%px ", (void *)mod);
#ifdef CONFIG_MODULE_UNLOAD
kdb_printf("%4d ", module_refcount(mod));
@@ -40,10 +41,10 @@ int kdb_lsmod(int argc, const char **argv)
kdb_printf(" (Loading)");
else
kdb_printf(" (Live)");
- kdb_printf(" 0x%px", mod->core_layout.base);
-#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
- kdb_printf("/0x%px", mod->data_layout.base);
-#endif
+ kdb_printf(" 0x%px", mod->mem[MOD_TEXT].base);
+ kdb_printf("/0x%px", mod->mem[MOD_RODATA].base);
+ kdb_printf("/0x%px", mod->mem[MOD_RO_AFTER_INIT].base);
+ kdb_printf("/0x%px", mod->mem[MOD_DATA].base);
#ifdef CONFIG_MODULE_UNLOAD
{
diff --git a/kernel/kmod.c b/kernel/module/kmod.c
index b717134ebe17..0800d9891692 100644
--- a/kernel/kmod.c
+++ b/kernel/module/kmod.c
@@ -1,6 +1,9 @@
/*
* kmod - the kernel module loader
+ *
+ * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
*/
+
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
@@ -27,6 +30,7 @@
#include <linux/uaccess.h>
#include <trace/events/module.h>
+#include "internal.h"
/*
* Assuming:
@@ -40,8 +44,7 @@
* effect. Systems like these are very unlikely if modules are enabled.
*/
#define MAX_KMOD_CONCURRENT 50
-static atomic_t kmod_concurrent_max = ATOMIC_INIT(MAX_KMOD_CONCURRENT);
-static DECLARE_WAIT_QUEUE_HEAD(kmod_wq);
+static DEFINE_SEMAPHORE(kmod_concurrent_max, MAX_KMOD_CONCURRENT);
/*
* This is a restriction on having *all* MAX_KMOD_CONCURRENT threads
@@ -66,7 +69,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
kfree(info->argv);
}
-static int call_modprobe(char *module_name, int wait)
+static int call_modprobe(char *orig_module_name, int wait)
{
struct subprocess_info *info;
static char *envp[] = {
@@ -75,12 +78,14 @@ static int call_modprobe(char *module_name, int wait)
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
NULL
};
+ char *module_name;
+ int ret;
char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
if (!argv)
goto out;
- module_name = kstrdup(module_name, GFP_KERNEL);
+ module_name = kstrdup(orig_module_name, GFP_KERNEL);
if (!module_name)
goto free_argv;
@@ -95,13 +100,16 @@ static int call_modprobe(char *module_name, int wait)
if (!info)
goto free_module_name;
- return call_usermodehelper_exec(info, wait | UMH_KILLABLE);
+ ret = call_usermodehelper_exec(info, wait | UMH_KILLABLE);
+ kmod_dup_request_announce(orig_module_name, ret);
+ return ret;
free_module_name:
kfree(module_name);
free_argv:
kfree(argv);
out:
+ kmod_dup_request_announce(orig_module_name, -ENOMEM);
return -ENOMEM;
}
@@ -125,7 +133,7 @@ int __request_module(bool wait, const char *fmt, ...)
{
va_list args;
char module_name[MODULE_NAME_LEN];
- int ret;
+ int ret, dup_ret;
/*
* We don't allow synchronous module loading from async. Module
@@ -148,29 +156,24 @@ int __request_module(bool wait, const char *fmt, ...)
if (ret)
return ret;
- if (atomic_dec_if_positive(&kmod_concurrent_max) < 0) {
- pr_warn_ratelimited("request_module: kmod_concurrent_max (%u) close to 0 (max_modprobes: %u), for module %s, throttling...",
- atomic_read(&kmod_concurrent_max),
- MAX_KMOD_CONCURRENT, module_name);
- ret = wait_event_killable_timeout(kmod_wq,
- atomic_dec_if_positive(&kmod_concurrent_max) >= 0,
- MAX_KMOD_ALL_BUSY_TIMEOUT * HZ);
- if (!ret) {
- pr_warn_ratelimited("request_module: modprobe %s cannot be processed, kmod busy with %d threads for more than %d seconds now",
- module_name, MAX_KMOD_CONCURRENT, MAX_KMOD_ALL_BUSY_TIMEOUT);
- return -ETIME;
- } else if (ret == -ERESTARTSYS) {
- pr_warn_ratelimited("request_module: sigkill sent for modprobe %s, giving up", module_name);
- return ret;
- }
+ ret = down_timeout(&kmod_concurrent_max, MAX_KMOD_ALL_BUSY_TIMEOUT * HZ);
+ if (ret) {
+ pr_warn_ratelimited("request_module: modprobe %s cannot be processed, kmod busy with %d threads for more than %d seconds now",
+ module_name, MAX_KMOD_CONCURRENT, MAX_KMOD_ALL_BUSY_TIMEOUT);
+ return ret;
}
trace_module_request(module_name, wait, _RET_IP_);
+ if (kmod_dup_request_exists_wait(module_name, wait, &dup_ret)) {
+ ret = dup_ret;
+ goto out;
+ }
+
ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
- atomic_inc(&kmod_concurrent_max);
- wake_up(&kmod_wq);
+out:
+ up(&kmod_concurrent_max);
return ret;
}
diff --git a/kernel/module/main.c b/kernel/module/main.c
index d3be89de706d..044aa2c9e3cb 100644
--- a/kernel/module/main.c
+++ b/kernel/module/main.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2002 Richard Henderson
* Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
+ * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
*/
#define INCLUDE_VERMAGIC
@@ -55,6 +56,7 @@
#include <linux/dynamic_debug.h>
#include <linux/audit.h>
#include <linux/cfi.h>
+#include <linux/debugfs.h>
#include <uapi/linux/module.h>
#include "internal.h"
@@ -80,12 +82,6 @@ struct mod_tree_root mod_tree __cacheline_aligned = {
.addr_min = -1UL,
};
-#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
-struct mod_tree_root mod_data_tree __cacheline_aligned = {
- .addr_min = -1UL,
-};
-#endif
-
struct symsearch {
const struct kernel_symbol *start, *stop;
const s32 *crcs;
@@ -93,14 +89,24 @@ struct symsearch {
};
/*
- * Bounds of module text, for speeding up __module_address.
+ * Bounds of module memory, for speeding up __module_address.
* Protected by module_mutex.
*/
-static void __mod_update_bounds(void *base, unsigned int size, struct mod_tree_root *tree)
+static void __mod_update_bounds(enum mod_mem_type type __maybe_unused, void *base,
+ unsigned int size, struct mod_tree_root *tree)
{
unsigned long min = (unsigned long)base;
unsigned long max = min + size;
+#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
+ if (mod_mem_type_is_core_data(type)) {
+ if (min < tree->data_addr_min)
+ tree->data_addr_min = min;
+ if (max > tree->data_addr_max)
+ tree->data_addr_max = max;
+ return;
+ }
+#endif
if (min < tree->addr_min)
tree->addr_min = min;
if (max > tree->addr_max)
@@ -109,12 +115,12 @@ static void __mod_update_bounds(void *base, unsigned int size, struct mod_tree_r
static void mod_update_bounds(struct module *mod)
{
- __mod_update_bounds(mod->core_layout.base, mod->core_layout.size, &mod_tree);
- if (mod->init_layout.size)
- __mod_update_bounds(mod->init_layout.base, mod->init_layout.size, &mod_tree);
-#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
- __mod_update_bounds(mod->data_layout.base, mod->data_layout.size, &mod_data_tree);
-#endif
+ for_each_mod_mem_type(type) {
+ struct module_memory *mod_mem = &mod->mem[type];
+
+ if (mod_mem->size)
+ __mod_update_bounds(type, mod_mem->base, mod_mem->size, &mod_tree);
+ }
}
/* Block module loading/unloading? */
@@ -559,10 +565,8 @@ static int already_uses(struct module *a, struct module *b)
struct module_use *use;
list_for_each_entry(use, &b->source_list, source_list) {
- if (use->source == a) {
- pr_debug("%s uses %s!\n", a->name, b->name);
+ if (use->source == a)
return 1;
- }
}
pr_debug("%s does not use %s!\n", a->name, b->name);
return 0;
@@ -926,7 +930,13 @@ struct module_attribute module_uevent =
static ssize_t show_coresize(struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
- return sprintf(buffer, "%u\n", mk->mod->core_layout.size);
+ unsigned int size = mk->mod->mem[MOD_TEXT].size;
+
+ if (!IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC)) {
+ for_class_mod_mem_type(type, core_data)
+ size += mk->mod->mem[type].size;
+ }
+ return sprintf(buffer, "%u\n", size);
}
static struct module_attribute modinfo_coresize =
@@ -936,7 +946,11 @@ static struct module_attribute modinfo_coresize =
static ssize_t show_datasize(struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
- return sprintf(buffer, "%u\n", mk->mod->data_layout.size);
+ unsigned int size = 0;
+
+ for_class_mod_mem_type(type, core_data)
+ size += mk->mod->mem[type].size;
+ return sprintf(buffer, "%u\n", size);
}
static struct module_attribute modinfo_datasize =
@@ -946,7 +960,11 @@ static struct module_attribute modinfo_datasize =
static ssize_t show_initsize(struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
- return sprintf(buffer, "%u\n", mk->mod->init_layout.size);
+ unsigned int size = 0;
+
+ for_class_mod_mem_type(type, init)
+ size += mk->mod->mem[type].size;
+ return sprintf(buffer, "%u\n", size);
}
static struct module_attribute modinfo_initsize =
@@ -998,9 +1016,55 @@ int try_to_force_load(struct module *mod, const char *reason)
#endif
}
-static char *get_modinfo(const struct load_info *info, const char *tag);
+/* Parse tag=value strings from .modinfo section */
+char *module_next_tag_pair(char *string, unsigned long *secsize)
+{
+ /* Skip non-zero chars */
+ while (string[0]) {
+ string++;
+ if ((*secsize)-- <= 1)
+ return NULL;
+ }
+
+ /* Skip any zero padding. */
+ while (!string[0]) {
+ string++;
+ if ((*secsize)-- <= 1)
+ return NULL;
+ }
+ return string;
+}
+
static char *get_next_modinfo(const struct load_info *info, const char *tag,
- char *prev);
+ char *prev)
+{
+ char *p;
+ unsigned int taglen = strlen(tag);
+ Elf_Shdr *infosec = &info->sechdrs[info->index.info];
+ unsigned long size = infosec->sh_size;
+
+ /*
+ * get_modinfo() calls made before rewrite_section_headers()
+ * must use sh_offset, as sh_addr isn't set!
+ */
+ char *modinfo = (char *)info->hdr + infosec->sh_offset;
+
+ if (prev) {
+ size -= prev - modinfo;
+ modinfo = module_next_tag_pair(prev, &size);
+ }
+
+ for (p = modinfo; p; p = module_next_tag_pair(p, &size)) {
+ if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
+ return p + taglen + 1;
+ }
+ return NULL;
+}
+
+static char *get_modinfo(const struct load_info *info, const char *tag)
+{
+ return get_next_modinfo(info, tag, NULL);
+}
static int verify_namespace_is_imported(const struct load_info *info,
const struct kernel_symbol *sym,
@@ -1011,12 +1075,9 @@ static int verify_namespace_is_imported(const struct load_info *info,
namespace = kernel_symbol_namespace(sym);
if (namespace && namespace[0]) {
- imported_namespace = get_modinfo(info, "import_ns");
- while (imported_namespace) {
+ for_each_modinfo_entry(imported_namespace, info, "import_ns") {
if (strcmp(namespace, imported_namespace) == 0)
return 0;
- imported_namespace = get_next_modinfo(
- info, "import_ns", imported_namespace);
}
#ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
pr_warn(
@@ -1143,6 +1204,46 @@ void __weak module_arch_freeing_init(struct module *mod)
{
}
+static bool mod_mem_use_vmalloc(enum mod_mem_type type)
+{
+ return IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC) &&
+ mod_mem_type_is_core_data(type);
+}
+
+static void *module_memory_alloc(unsigned int size, enum mod_mem_type type)
+{
+ if (mod_mem_use_vmalloc(type))
+ return vzalloc(size);
+ return module_alloc(size);
+}
+
+static void module_memory_free(void *ptr, enum mod_mem_type type)
+{
+ if (mod_mem_use_vmalloc(type))
+ vfree(ptr);
+ else
+ module_memfree(ptr);
+}
+
+static void free_mod_mem(struct module *mod)
+{
+ for_each_mod_mem_type(type) {
+ struct module_memory *mod_mem = &mod->mem[type];
+
+ if (type == MOD_DATA)
+ continue;
+
+ /* Free lock-classes; relies on the preceding sync_rcu(). */
+ lockdep_free_key_range(mod_mem->base, mod_mem->size);
+ if (mod_mem->size)
+ module_memory_free(mod_mem->base, type);
+ }
+
+ /* MOD_DATA hosts mod, so free it at last */
+ lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size);
+ module_memory_free(mod->mem[MOD_DATA].base, MOD_DATA);
+}
+
/* Free a module, remove from lists, etc. */
static void free_module(struct module *mod)
{
@@ -1158,9 +1259,6 @@ static void free_module(struct module *mod)
mod->state = MODULE_STATE_UNFORMED;
mutex_unlock(&module_mutex);
- /* Remove dynamic debug info */
- ddebug_remove_module(mod->name);
-
/* Arch-specific cleanup. */
module_arch_cleanup(mod);
@@ -1189,18 +1287,10 @@ static void free_module(struct module *mod)
/* This may be empty, but that's OK */
module_arch_freeing_init(mod);
- module_memfree(mod->init_layout.base);
kfree(mod->args);
percpu_modfree(mod);
- /* Free lock-classes; relies on the preceding sync_rcu(). */
- lockdep_free_key_range(mod->data_layout.base, mod->data_layout.size);
-
- /* Finally, free the core (containing the module structure) */
- module_memfree(mod->core_layout.base);
-#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
- vfree(mod->data_layout.base);
-#endif
+ free_mod_mem(mod);
}
void *__symbol_get(const char *symbol)
@@ -1303,8 +1393,8 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
case SHN_ABS:
/* Don't need to do anything */
- pr_debug("Absolute symbol: 0x%08lx\n",
- (long)sym[i].st_value);
+ pr_debug("Absolute symbol: 0x%08lx %s\n",
+ (long)sym[i].st_value, name);
break;
case SHN_LIVEPATCH:
@@ -1387,16 +1477,18 @@ unsigned int __weak arch_mod_section_prepend(struct module *mod,
return 0;
}
-/* Update size with this section: return offset. */
-long module_get_offset(struct module *mod, unsigned int *size,
- Elf_Shdr *sechdr, unsigned int section)
+long module_get_offset_and_type(struct module *mod, enum mod_mem_type type,
+ Elf_Shdr *sechdr, unsigned int section)
{
- long ret;
+ long offset;
+ long mask = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) << SH_ENTSIZE_TYPE_SHIFT;
- *size += arch_mod_section_prepend(mod, section);
- ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
- *size = ret + sechdr->sh_size;
- return ret;
+ mod->mem[type].size += arch_mod_section_prepend(mod, section);
+ offset = ALIGN(mod->mem[type].size, sechdr->sh_addralign ?: 1);
+ mod->mem[type].size = offset + sechdr->sh_size;
+
+ WARN_ON_ONCE(offset & mask);
+ return offset | mask;
}
static bool module_init_layout_section(const char *sname)
@@ -1408,15 +1500,11 @@ static bool module_init_layout_section(const char *sname)
return module_init_section(sname);
}
-/*
- * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
- * might -- code, read-only data, read-write data, small data. Tally
- * sizes, and place the offsets into sh_entsize fields: high bit means it
- * belongs in init.
- */
-static void layout_sections(struct module *mod, struct load_info *info)
+static void __layout_sections(struct module *mod, struct load_info *info, bool is_init)
{
- static unsigned long const masks[][2] = {
+ unsigned int m, i;
+
+ static const unsigned long masks[][2] = {
/*
* NOTE: all executable code must be the first section
* in this array; otherwise modify the text_size
@@ -1428,85 +1516,64 @@ static void layout_sections(struct module *mod, struct load_info *info)
{ SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
{ ARCH_SHF_SMALL | SHF_ALLOC, 0 }
};
- unsigned int m, i;
-
- for (i = 0; i < info->hdr->e_shnum; i++)
- info->sechdrs[i].sh_entsize = ~0UL;
+ static const int core_m_to_mem_type[] = {
+ MOD_TEXT,
+ MOD_RODATA,
+ MOD_RO_AFTER_INIT,
+ MOD_DATA,
+ MOD_INVALID, /* This is needed to match the masks array */
+ };
+ static const int init_m_to_mem_type[] = {
+ MOD_INIT_TEXT,
+ MOD_INIT_RODATA,
+ MOD_INVALID,
+ MOD_INIT_DATA,
+ MOD_INVALID, /* This is needed to match the masks array */
+ };
- pr_debug("Core section allocation order:\n");
for (m = 0; m < ARRAY_SIZE(masks); ++m) {
+ enum mod_mem_type type = is_init ? init_m_to_mem_type[m] : core_m_to_mem_type[m];
+
for (i = 0; i < info->hdr->e_shnum; ++i) {
Elf_Shdr *s = &info->sechdrs[i];
const char *sname = info->secstrings + s->sh_name;
- unsigned int *sizep;
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
- || module_init_layout_section(sname))
+ || is_init != module_init_layout_section(sname))
continue;
- sizep = m ? &mod->data_layout.size : &mod->core_layout.size;
- s->sh_entsize = module_get_offset(mod, sizep, s, i);
- pr_debug("\t%s\n", sname);
- }
- switch (m) {
- case 0: /* executable */
- mod->core_layout.size = strict_align(mod->core_layout.size);
- mod->core_layout.text_size = mod->core_layout.size;
- break;
- case 1: /* RO: text and ro-data */
- mod->data_layout.size = strict_align(mod->data_layout.size);
- mod->data_layout.ro_size = mod->data_layout.size;
- break;
- case 2: /* RO after init */
- mod->data_layout.size = strict_align(mod->data_layout.size);
- mod->data_layout.ro_after_init_size = mod->data_layout.size;
- break;
- case 4: /* whole core */
- mod->data_layout.size = strict_align(mod->data_layout.size);
- break;
- }
- }
-
- pr_debug("Init section allocation order:\n");
- for (m = 0; m < ARRAY_SIZE(masks); ++m) {
- for (i = 0; i < info->hdr->e_shnum; ++i) {
- Elf_Shdr *s = &info->sechdrs[i];
- const char *sname = info->secstrings + s->sh_name;
- if ((s->sh_flags & masks[m][0]) != masks[m][0]
- || (s->sh_flags & masks[m][1])
- || s->sh_entsize != ~0UL
- || !module_init_layout_section(sname))
+ if (WARN_ON_ONCE(type == MOD_INVALID))
continue;
- s->sh_entsize = (module_get_offset(mod, &mod->init_layout.size, s, i)
- | INIT_OFFSET_MASK);
+
+ s->sh_entsize = module_get_offset_and_type(mod, type, s, i);
pr_debug("\t%s\n", sname);
}
- switch (m) {
- case 0: /* executable */
- mod->init_layout.size = strict_align(mod->init_layout.size);
- mod->init_layout.text_size = mod->init_layout.size;
- break;
- case 1: /* RO: text and ro-data */
- mod->init_layout.size = strict_align(mod->init_layout.size);
- mod->init_layout.ro_size = mod->init_layout.size;
- break;
- case 2:
- /*
- * RO after init doesn't apply to init_layout (only
- * core_layout), so it just takes the value of ro_size.
- */
- mod->init_layout.ro_after_init_size = mod->init_layout.ro_size;
- break;
- case 4: /* whole init */
- mod->init_layout.size = strict_align(mod->init_layout.size);
- break;
- }
}
}
-static void set_license(struct module *mod, const char *license)
+/*
+ * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
+ * might -- code, read-only data, read-write data, small data. Tally
+ * sizes, and place the offsets into sh_entsize fields: high bit means it
+ * belongs in init.
+ */
+static void layout_sections(struct module *mod, struct load_info *info)
+{
+ unsigned int i;
+
+ for (i = 0; i < info->hdr->e_shnum; i++)
+ info->sechdrs[i].sh_entsize = ~0UL;
+
+ pr_debug("Core section allocation order for %s:\n", mod->name);
+ __layout_sections(mod, info, false);
+
+ pr_debug("Init section allocation order for %s:\n", mod->name);
+ __layout_sections(mod, info, true);
+}
+
+static void module_license_taint_check(struct module *mod, const char *license)
{
if (!license)
license = "unspecified";
@@ -1520,56 +1587,6 @@ static void set_license(struct module *mod, const char *license)
}
}
-/* Parse tag=value strings from .modinfo section */
-static char *next_string(char *string, unsigned long *secsize)
-{
- /* Skip non-zero chars */
- while (string[0]) {
- string++;
- if ((*secsize)-- <= 1)
- return NULL;
- }
-
- /* Skip any zero padding. */
- while (!string[0]) {
- string++;
- if ((*secsize)-- <= 1)
- return NULL;
- }
- return string;
-}
-
-static char *get_next_modinfo(const struct load_info *info, const char *tag,
- char *prev)
-{
- char *p;
- unsigned int taglen = strlen(tag);
- Elf_Shdr *infosec = &info->sechdrs[info->index.info];
- unsigned long size = infosec->sh_size;
-
- /*
- * get_modinfo() calls made before rewrite_section_headers()
- * must use sh_offset, as sh_addr isn't set!
- */
- char *modinfo = (char *)info->hdr + infosec->sh_offset;
-
- if (prev) {
- size -= prev - modinfo;
- modinfo = next_string(prev, &size);
- }
-
- for (p = modinfo; p; p = next_string(p, &size)) {
- if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
- return p + taglen + 1;
- }
- return NULL;
-}
-
-static char *get_modinfo(const struct load_info *info, const char *tag)
-{
- return get_next_modinfo(info, tag, NULL);
-}
-
static void setup_modinfo(struct module *mod, struct load_info *info)
{
struct module_attribute *attr;
@@ -1592,19 +1609,6 @@ static void free_modinfo(struct module *mod)
}
}
-static void dynamic_debug_setup(struct module *mod, struct _ddebug_info *dyndbg)
-{
- if (!dyndbg->num_descs)
- return;
- ddebug_add_module(dyndbg, mod->name);
-}
-
-static void dynamic_debug_remove(struct module *mod, struct _ddebug_info *dyndbg)
-{
- if (dyndbg->num_descs)
- ddebug_remove_module(mod->name);
-}
-
void * __weak module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
@@ -1642,16 +1646,33 @@ static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
}
/*
- * Sanity checks against invalid binaries, wrong arch, weird elf version.
+ * Check userspace passed ELF module against our expectations, and cache
+ * useful variables for further processing as we go.
*
- * Also do basic validity checks against section offsets and sizes, the
+ * This does basic validity checks against section offsets and sizes, the
* section name string table, and the indices used for it (sh_name).
+ *
+ * As a last step, since we're already checking the ELF sections we cache
+ * useful variables which will be used later for our convenience:
+ *
+ * o pointers to section headers
+ * o cache the modinfo symbol section
+ * o cache the string symbol section
+ * o cache the module section
+ *
+ * As a last step we set info->mod to the temporary copy of the module in
+ * info->hdr. The final one will be allocated in move_module(). Any
+ * modifications we make to our copy of the module will be carried over
+ * to the final minted module.
*/
-static int elf_validity_check(struct load_info *info)
+static int elf_validity_cache_copy(struct load_info *info, int flags)
{
unsigned int i;
Elf_Shdr *shdr, *strhdr;
int err;
+ unsigned int num_mod_secs = 0, mod_idx;
+ unsigned int num_info_secs = 0, info_idx;
+ unsigned int num_sym_secs = 0, sym_idx;
if (info->len < sizeof(*(info->hdr))) {
pr_err("Invalid ELF header len %lu\n", info->len);
@@ -1755,6 +1776,8 @@ static int elf_validity_check(struct load_info *info)
info->hdr->e_shnum);
goto no_exec;
}
+ num_sym_secs++;
+ sym_idx = i;
fallthrough;
default:
err = validate_section_offset(info, shdr);
@@ -1763,6 +1786,15 @@ static int elf_validity_check(struct load_info *info)
i, shdr->sh_type);
return err;
}
+ if (strcmp(info->secstrings + shdr->sh_name,
+ ".gnu.linkonce.this_module") == 0) {
+ num_mod_secs++;
+ mod_idx = i;
+ } else if (strcmp(info->secstrings + shdr->sh_name,
+ ".modinfo") == 0) {
+ num_info_secs++;
+ info_idx = i;
+ }
if (shdr->sh_flags & SHF_ALLOC) {
if (shdr->sh_name >= strhdr->sh_size) {
@@ -1775,6 +1807,91 @@ static int elf_validity_check(struct load_info *info)
}
}
+ if (num_info_secs > 1) {
+ pr_err("Only one .modinfo section must exist.\n");
+ goto no_exec;
+ } else if (num_info_secs == 1) {
+ /* Try to find a name early so we can log errors with a module name */
+ info->index.info = info_idx;
+ info->name = get_modinfo(info, "name");
+ }
+
+ if (num_sym_secs != 1) {
+ pr_warn("%s: module has no symbols (stripped?)\n",
+ info->name ?: "(missing .modinfo section or name field)");
+ goto no_exec;
+ }
+
+ /* Sets internal symbols and strings. */
+ info->index.sym = sym_idx;
+ shdr = &info->sechdrs[sym_idx];
+ info->index.str = shdr->sh_link;
+ info->strtab = (char *)info->hdr + info->sechdrs[info->index.str].sh_offset;
+
+ /*
+ * The ".gnu.linkonce.this_module" ELF section is special. It is
+ * what modpost uses to refer to __this_module and let's use rely
+ * on THIS_MODULE to point to &__this_module properly. The kernel's
+ * modpost declares it on each modules's *.mod.c file. If the struct
+ * module of the kernel changes a full kernel rebuild is required.
+ *
+ * We have a few expectaions for this special section, the following
+ * code validates all this for us:
+ *
+ * o Only one section must exist
+ * o We expect the kernel to always have to allocate it: SHF_ALLOC
+ * o The section size must match the kernel's run time's struct module
+ * size
+ */
+ if (num_mod_secs != 1) {
+ pr_err("module %s: Only one .gnu.linkonce.this_module section must exist.\n",
+ info->name ?: "(missing .modinfo section or name field)");
+ goto no_exec;
+ }
+
+ shdr = &info->sechdrs[mod_idx];
+
+ /*
+ * This is already implied on the switch above, however let's be
+ * pedantic about it.
+ */
+ if (shdr->sh_type == SHT_NOBITS) {
+ pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n",
+ info->name ?: "(missing .modinfo section or name field)");
+ goto no_exec;
+ }
+
+ if (!(shdr->sh_flags & SHF_ALLOC)) {
+ pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n",
+ info->name ?: "(missing .modinfo section or name field)");
+ goto no_exec;
+ }
+
+ if (shdr->sh_size != sizeof(struct module)) {
+ pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n",
+ info->name ?: "(missing .modinfo section or name field)");
+ goto no_exec;
+ }
+
+ info->index.mod = mod_idx;
+
+ /* This is temporary: point mod into copy of data. */
+ info->mod = (void *)info->hdr + shdr->sh_offset;
+
+ /*
+ * If we didn't load the .modinfo 'name' field earlier, fall back to
+ * on-disk struct mod 'name' field.
+ */
+ if (!info->name)
+ info->name = info->mod->name;
+
+ if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
+ info->index.vers = 0; /* Pretend no __versions section! */
+ else
+ info->index.vers = find_sec(info, "__versions");
+
+ info->index.pcpu = find_pcpusec(info);
+
return 0;
no_exec:
@@ -1804,12 +1921,8 @@ static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
/* Nothing more to do */
return 0;
- if (set_livepatch_module(mod)) {
- add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
- pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n",
- mod->name);
+ if (set_livepatch_module(mod))
return 0;
- }
pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
mod->name);
@@ -1892,63 +2005,71 @@ static int rewrite_section_headers(struct load_info *info, int flags)
}
/*
- * Set up our basic convenience variables (pointers to section headers,
- * search for module section index etc), and do some basic section
- * verification.
- *
- * Set info->mod to the temporary copy of the module in info->hdr. The final one
- * will be allocated in move_module().
- */
-static int setup_load_info(struct load_info *info, int flags)
+ * These calls taint the kernel depending certain module circumstances */
+static void module_augment_kernel_taints(struct module *mod, struct load_info *info)
{
- unsigned int i;
+ int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
- /* Try to find a name early so we can log errors with a module name */
- info->index.info = find_sec(info, ".modinfo");
- if (info->index.info)
- info->name = get_modinfo(info, "name");
+ if (!get_modinfo(info, "intree")) {
+ if (!test_taint(TAINT_OOT_MODULE))
+ pr_warn("%s: loading out-of-tree module taints kernel.\n",
+ mod->name);
+ add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
+ }
- /* Find internal symbols and strings. */
- for (i = 1; i < info->hdr->e_shnum; i++) {
- if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
- info->index.sym = i;
- info->index.str = info->sechdrs[i].sh_link;
- info->strtab = (char *)info->hdr
- + info->sechdrs[info->index.str].sh_offset;
- break;
- }
+ check_modinfo_retpoline(mod, info);
+
+ if (get_modinfo(info, "staging")) {
+ add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
+ pr_warn("%s: module is from the staging directory, the quality "
+ "is unknown, you have been warned.\n", mod->name);
}
- if (info->index.sym == 0) {
- pr_warn("%s: module has no symbols (stripped?)\n",
- info->name ?: "(missing .modinfo section or name field)");
- return -ENOEXEC;
+ if (is_livepatch_module(mod)) {
+ add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
+ pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n",
+ mod->name);
}
- info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
- if (!info->index.mod) {
- pr_warn("%s: No module found in object\n",
- info->name ?: "(missing .modinfo section or name field)");
- return -ENOEXEC;
+ module_license_taint_check(mod, get_modinfo(info, "license"));
+
+ if (get_modinfo(info, "test")) {
+ if (!test_taint(TAINT_TEST))
+ pr_warn("%s: loading test module taints kernel.\n",
+ mod->name);
+ add_taint_module(mod, TAINT_TEST, LOCKDEP_STILL_OK);
}
- /* This is temporary: point mod into copy of data. */
- info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset;
+#ifdef CONFIG_MODULE_SIG
+ mod->sig_ok = info->sig_ok;
+ if (!mod->sig_ok) {
+ pr_notice_once("%s: module verification failed: signature "
+ "and/or required key missing - tainting "
+ "kernel\n", mod->name);
+ add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
+ }
+#endif
/*
- * If we didn't load the .modinfo 'name' field earlier, fall back to
- * on-disk struct mod 'name' field.
+ * ndiswrapper is under GPL by itself, but loads proprietary modules.
+ * Don't use add_taint_module(), as it would prevent ndiswrapper from
+ * using GPL-only symbols it needs.
*/
- if (!info->name)
- info->name = info->mod->name;
+ if (strcmp(mod->name, "ndiswrapper") == 0)
+ add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
- if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
- info->index.vers = 0; /* Pretend no __versions section! */
- else
- info->index.vers = find_sec(info, "__versions");
+ /* driverloader was caught wrongly pretending to be under GPL */
+ if (strcmp(mod->name, "driverloader") == 0)
+ add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
+ LOCKDEP_NOW_UNRELIABLE);
- info->index.pcpu = find_pcpusec(info);
+ /* lve claims to be GPL but upstream won't provide source */
+ if (strcmp(mod->name, "lve") == 0)
+ add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
+ LOCKDEP_NOW_UNRELIABLE);
+
+ if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
+ pr_warn("%s: module license taints kernel.\n", mod->name);
- return 0;
}
static int check_modinfo(struct module *mod, struct load_info *info, int flags)
@@ -1970,35 +2091,10 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
return -ENOEXEC;
}
- if (!get_modinfo(info, "intree")) {
- if (!test_taint(TAINT_OOT_MODULE))
- pr_warn("%s: loading out-of-tree module taints kernel.\n",
- mod->name);
- add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
- }
-
- check_modinfo_retpoline(mod, info);
-
- if (get_modinfo(info, "staging")) {
- add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
- pr_warn("%s: module is from the staging directory, the quality "
- "is unknown, you have been warned.\n", mod->name);
- }
-
err = check_modinfo_livepatch(mod, info);
if (err)
return err;
- /* Set up license info based on the info section */
- set_license(mod, get_modinfo(info, "license"));
-
- if (get_modinfo(info, "test")) {
- if (!test_taint(TAINT_TEST))
- pr_warn("%s: loading test module taints kernel.\n",
- mod->name);
- add_taint_module(mod, TAINT_TEST, LOCKDEP_STILL_OK);
- }
-
return 0;
}
@@ -2110,10 +2206,14 @@ static int find_module_sections(struct module *mod, struct load_info *info)
if (section_addr(info, "__obsparm"))
pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
- info->dyndbg.descs = section_objs(info, "__dyndbg",
- sizeof(*info->dyndbg.descs), &info->dyndbg.num_descs);
- info->dyndbg.classes = section_objs(info, "__dyndbg_classes",
- sizeof(*info->dyndbg.classes), &info->dyndbg.num_classes);
+#ifdef CONFIG_DYNAMIC_DEBUG_CORE
+ mod->dyndbg_info.descs = section_objs(info, "__dyndbg",
+ sizeof(*mod->dyndbg_info.descs),
+ &mod->dyndbg_info.num_descs);
+ mod->dyndbg_info.classes = section_objs(info, "__dyndbg_classes",
+ sizeof(*mod->dyndbg_info.classes),
+ &mod->dyndbg_info.num_classes);
+#endif
return 0;
}
@@ -2122,109 +2222,82 @@ static int move_module(struct module *mod, struct load_info *info)
{
int i;
void *ptr;
+ enum mod_mem_type t = 0;
+ int ret = -ENOMEM;
- /* Do the allocs. */
- ptr = module_alloc(mod->core_layout.size);
- /*
- * The pointer to this block is stored in the module structure
- * which is inside the block. Just mark it as not being a
- * leak.
- */
- kmemleak_not_leak(ptr);
- if (!ptr)
- return -ENOMEM;
-
- memset(ptr, 0, mod->core_layout.size);
- mod->core_layout.base = ptr;
-
- if (mod->init_layout.size) {
- ptr = module_alloc(mod->init_layout.size);
+ for_each_mod_mem_type(type) {
+ if (!mod->mem[type].size) {
+ mod->mem[type].base = NULL;
+ continue;
+ }
+ mod->mem[type].size = PAGE_ALIGN(mod->mem[type].size);
+ ptr = module_memory_alloc(mod->mem[type].size, type);
/*
- * The pointer to this block is stored in the module structure
- * which is inside the block. This block doesn't need to be
- * scanned as it contains data and code that will be freed
- * after the module is initialized.
+ * The pointer to these blocks of memory are stored on the module
+ * structure and we keep that around so long as the module is
+ * around. We only free that memory when we unload the module.
+ * Just mark them as not being a leak then. The .init* ELF
+ * sections *do* get freed after boot so we *could* treat them
+ * slightly differently with kmemleak_ignore() and only grey
+ * them out as they work as typical memory allocations which
+ * *do* eventually get freed, but let's just keep things simple
+ * and avoid *any* false positives.
*/
- kmemleak_ignore(ptr);
+ kmemleak_not_leak(ptr);
if (!ptr) {
- module_memfree(mod->core_layout.base);
- return -ENOMEM;
+ t = type;
+ goto out_enomem;
}
- memset(ptr, 0, mod->init_layout.size);
- mod->init_layout.base = ptr;
- } else
- mod->init_layout.base = NULL;
-
-#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
- /* Do the allocs. */
- ptr = vzalloc(mod->data_layout.size);
- /*
- * The pointer to this block is stored in the module structure
- * which is inside the block. Just mark it as not being a
- * leak.
- */
- kmemleak_not_leak(ptr);
- if (!ptr) {
- module_memfree(mod->core_layout.base);
- module_memfree(mod->init_layout.base);
- return -ENOMEM;
+ memset(ptr, 0, mod->mem[type].size);
+ mod->mem[type].base = ptr;
}
- mod->data_layout.base = ptr;
-#endif
/* Transfer each section which specifies SHF_ALLOC */
- pr_debug("final section addresses:\n");
+ pr_debug("Final section addresses for %s:\n", mod->name);
for (i = 0; i < info->hdr->e_shnum; i++) {
void *dest;
Elf_Shdr *shdr = &info->sechdrs[i];
+ enum mod_mem_type type = shdr->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT;
if (!(shdr->sh_flags & SHF_ALLOC))
continue;
- if (shdr->sh_entsize & INIT_OFFSET_MASK)
- dest = mod->init_layout.base
- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
- else if (!(shdr->sh_flags & SHF_EXECINSTR))
- dest = mod->data_layout.base + shdr->sh_entsize;
- else
- dest = mod->core_layout.base + shdr->sh_entsize;
+ dest = mod->mem[type].base + (shdr->sh_entsize & SH_ENTSIZE_OFFSET_MASK);
- if (shdr->sh_type != SHT_NOBITS)
+ if (shdr->sh_type != SHT_NOBITS) {
+ /*
+ * Our ELF checker already validated this, but let's
+ * be pedantic and make the goal clearer. We actually
+ * end up copying over all modifications made to the
+ * userspace copy of the entire struct module.
+ */
+ if (i == info->index.mod &&
+ (WARN_ON_ONCE(shdr->sh_size != sizeof(struct module)))) {
+ ret = -ENOEXEC;
+ goto out_enomem;
+ }
memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
- /* Update sh_addr to point to copy in image. */
+ }
+ /*
+ * Update the userspace copy's ELF section address to point to
+ * our newly allocated memory as a pure convenience so that
+ * users of info can keep taking advantage and using the newly
+ * minted official memory area.
+ */
shdr->sh_addr = (unsigned long)dest;
- pr_debug("\t0x%lx %s\n",
- (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
+ pr_debug("\t0x%lx 0x%.8lx %s\n", (long)shdr->sh_addr,
+ (long)shdr->sh_size, info->secstrings + shdr->sh_name);
}
return 0;
+out_enomem:
+ for (t--; t >= 0; t--)
+ module_memory_free(mod->mem[t].base, t);
+ return ret;
}
-static int check_module_license_and_versions(struct module *mod)
+static int check_export_symbol_versions(struct module *mod)
{
- int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
-
- /*
- * ndiswrapper is under GPL by itself, but loads proprietary modules.
- * Don't use add_taint_module(), as it would prevent ndiswrapper from
- * using GPL-only symbols it needs.
- */
- if (strcmp(mod->name, "ndiswrapper") == 0)
- add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
-
- /* driverloader was caught wrongly pretending to be under GPL */
- if (strcmp(mod->name, "driverloader") == 0)
- add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
- LOCKDEP_NOW_UNRELIABLE);
-
- /* lve claims to be GPL but upstream won't provide source */
- if (strcmp(mod->name, "lve") == 0)
- add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
- LOCKDEP_NOW_UNRELIABLE);
-
- if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
- pr_warn("%s: module license taints kernel.\n", mod->name);
-
#ifdef CONFIG_MODVERSIONS
if ((mod->num_syms && !mod->crcs) ||
(mod->num_gpl_syms && !mod->gpl_crcs)) {
@@ -2242,12 +2315,14 @@ static void flush_module_icache(const struct module *mod)
* Do it before processing of module parameters, so the module
* can provide parameter accessor functions of its own.
*/
- if (mod->init_layout.base)
- flush_icache_range((unsigned long)mod->init_layout.base,
- (unsigned long)mod->init_layout.base
- + mod->init_layout.size);
- flush_icache_range((unsigned long)mod->core_layout.base,
- (unsigned long)mod->core_layout.base + mod->core_layout.size);
+ for_each_mod_mem_type(type) {
+ const struct module_memory *mod_mem = &mod->mem[type];
+
+ if (mod_mem->size) {
+ flush_icache_range((unsigned long)mod_mem->base,
+ (unsigned long)mod_mem->base + mod_mem->size);
+ }
+ }
}
bool __weak module_elf_check_arch(Elf_Ehdr *hdr)
@@ -2290,10 +2365,6 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
unsigned int ndx;
int err;
- err = check_modinfo(info->mod, info, flags);
- if (err)
- return ERR_PTR(err);
-
/* Allow arches to frob section contents and sizes. */
err = module_frob_arch_sections(info->hdr, info->sechdrs,
info->secstrings, info->mod);
@@ -2350,11 +2421,8 @@ static void module_deallocate(struct module *mod, struct load_info *info)
{
percpu_modfree(mod);
module_arch_freeing_init(mod);
- module_memfree(mod->init_layout.base);
- module_memfree(mod->core_layout.base);
-#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
- vfree(mod->data_layout.base);
-#endif
+
+ free_mod_mem(mod);
}
int __weak module_finalize(const Elf_Ehdr *hdr,
@@ -2380,27 +2448,6 @@ static int post_relocation(struct module *mod, const struct load_info *info)
return module_finalize(info->hdr, info->sechdrs, mod);
}
-/* Is this module of this name done loading? No locks held. */
-static bool finished_loading(const char *name)
-{
- struct module *mod;
- bool ret;
-
- /*
- * The module_mutex should not be a heavily contended lock;
- * if we get the occasional sleep here, we'll go an extra iteration
- * in the wait_event_interruptible(), which is harmless.
- */
- sched_annotate_sleep();
- mutex_lock(&module_mutex);
- mod = find_module_all(name, strlen(name), true);
- ret = !mod || mod->state == MODULE_STATE_LIVE
- || mod->state == MODULE_STATE_GOING;
- mutex_unlock(&module_mutex);
-
- return ret;
-}
-
/* Call module constructors. */
static void do_mod_ctors(struct module *mod)
{
@@ -2415,7 +2462,9 @@ static void do_mod_ctors(struct module *mod)
/* For freeing module_init on success, in case kallsyms traversing */
struct mod_initfree {
struct llist_node node;
- void *module_init;
+ void *init_text;
+ void *init_data;
+ void *init_rodata;
};
static void do_free_init(struct work_struct *w)
@@ -2429,7 +2478,9 @@ static void do_free_init(struct work_struct *w)
llist_for_each_safe(pos, n, list) {
initfree = container_of(pos, struct mod_initfree, node);
- module_memfree(initfree->module_init);
+ module_memfree(initfree->init_text);
+ module_memfree(initfree->init_data);
+ module_memfree(initfree->init_rodata);
kfree(initfree);
}
}
@@ -2450,13 +2501,27 @@ static noinline int do_init_module(struct module *mod)
{
int ret = 0;
struct mod_initfree *freeinit;
+#if defined(CONFIG_MODULE_STATS)
+ unsigned int text_size = 0, total_size = 0;
+
+ for_each_mod_mem_type(type) {
+ const struct module_memory *mod_mem = &mod->mem[type];
+ if (mod_mem->size) {
+ total_size += mod_mem->size;
+ if (type == MOD_TEXT || type == MOD_INIT_TEXT)
+ text_size += mod_mem->size;
+ }
+ }
+#endif
freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
if (!freeinit) {
ret = -ENOMEM;
goto fail;
}
- freeinit->module_init = mod->init_layout.base;
+ freeinit->init_text = mod->mem[MOD_INIT_TEXT].base;
+ freeinit->init_data = mod->mem[MOD_INIT_DATA].base;
+ freeinit->init_rodata = mod->mem[MOD_INIT_RODATA].base;
do_mod_ctors(mod);
/* Start the module */
@@ -2492,8 +2557,8 @@ static noinline int do_init_module(struct module *mod)
if (!mod->async_probe_requested)
async_synchronize_full();
- ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
- mod->init_layout.size);
+ ftrace_free_mem(mod, mod->mem[MOD_INIT_TEXT].base,
+ mod->mem[MOD_INIT_TEXT].base + mod->mem[MOD_INIT_TEXT].size);
mutex_lock(&module_mutex);
/* Drop initial reference. */
module_put(mod);
@@ -2505,11 +2570,11 @@ static noinline int do_init_module(struct module *mod)
module_enable_ro(mod, true);
mod_tree_remove_init(mod);
module_arch_freeing_init(mod);
- mod->init_layout.base = NULL;
- mod->init_layout.size = 0;
- mod->init_layout.ro_size = 0;
- mod->init_layout.ro_after_init_size = 0;
- mod->init_layout.text_size = 0;
+ for_class_mod_mem_type(type, init) {
+ mod->mem[type].base = NULL;
+ mod->mem[type].size = 0;
+ }
+
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
/* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */
mod->btf_data = NULL;
@@ -2533,6 +2598,11 @@ static noinline int do_init_module(struct module *mod)
mutex_unlock(&module_mutex);
wake_up_all(&module_wq);
+ mod_stat_add_long(text_size, &total_text_size);
+ mod_stat_add_long(total_size, &total_mod_size);
+
+ mod_stat_inc(&modcount);
+
return 0;
fail_free_freeinit:
@@ -2548,6 +2618,7 @@ fail:
ftrace_release_mod(mod);
free_module(mod);
wake_up_all(&module_wq);
+
return ret;
}
@@ -2559,6 +2630,67 @@ static int may_init_module(void)
return 0;
}
+/* Is this module of this name done loading? No locks held. */
+static bool finished_loading(const char *name)
+{
+ struct module *mod;
+ bool ret;
+
+ /*
+ * The module_mutex should not be a heavily contended lock;
+ * if we get the occasional sleep here, we'll go an extra iteration
+ * in the wait_event_interruptible(), which is harmless.
+ */
+ sched_annotate_sleep();
+ mutex_lock(&module_mutex);
+ mod = find_module_all(name, strlen(name), true);
+ ret = !mod || mod->state == MODULE_STATE_LIVE
+ || mod->state == MODULE_STATE_GOING;
+ mutex_unlock(&module_mutex);
+
+ return ret;
+}
+
+/* Must be called with module_mutex held */
+static int module_patient_check_exists(const char *name,
+ enum fail_dup_mod_reason reason)
+{
+ struct module *old;
+ int err = 0;
+
+ old = find_module_all(name, strlen(name), true);
+ if (old == NULL)
+ return 0;
+
+ if (old->state == MODULE_STATE_COMING ||
+ old->state == MODULE_STATE_UNFORMED) {
+ /* Wait in case it fails to load. */
+ mutex_unlock(&module_mutex);
+ err = wait_event_interruptible(module_wq,
+ finished_loading(name));
+ mutex_lock(&module_mutex);
+ if (err)
+ return err;
+
+ /* The module might have gone in the meantime. */
+ old = find_module_all(name, strlen(name), true);
+ }
+
+ if (try_add_failed_module(name, reason))
+ pr_warn("Could not add fail-tracking for module: %s\n", name);
+
+ /*
+ * We are here only when the same module was being loaded. Do
+ * not try to load it again right now. It prevents long delays
+ * caused by serialized module load failures. It might happen
+ * when more devices of the same type trigger load of
+ * a particular module.
+ */
+ if (old && old->state == MODULE_STATE_LIVE)
+ return -EEXIST;
+ return -EBUSY;
+}
+
/*
* We try to place it in the list now to make sure it's unique before
* we dedicate too many resources. In particular, temporary percpu
@@ -2567,41 +2699,14 @@ static int may_init_module(void)
static int add_unformed_module(struct module *mod)
{
int err;
- struct module *old;
mod->state = MODULE_STATE_UNFORMED;
mutex_lock(&module_mutex);
- old = find_module_all(mod->name, strlen(mod->name), true);
- if (old != NULL) {
- if (old->state == MODULE_STATE_COMING
- || old->state == MODULE_STATE_UNFORMED) {
- /* Wait in case it fails to load. */
- mutex_unlock(&module_mutex);
- err = wait_event_interruptible(module_wq,
- finished_loading(mod->name));
- if (err)
- goto out_unlocked;
-
- /* The module might have gone in the meantime. */
- mutex_lock(&module_mutex);
- old = find_module_all(mod->name, strlen(mod->name),
- true);
- }
-
- /*
- * We are here only when the same module was being loaded. Do
- * not try to load it again right now. It prevents long delays
- * caused by serialized module load failures. It might happen
- * when more devices of the same type trigger load of
- * a particular module.
- */
- if (old && old->state == MODULE_STATE_LIVE)
- err = -EEXIST;
- else
- err = -EBUSY;
+ err = module_patient_check_exists(mod->name, FAIL_DUP_MOD_LOAD);
+ if (err)
goto out;
- }
+
mod_update_bounds(mod);
list_add_rcu(&mod->list, &modules);
mod_tree_insert(mod);
@@ -2609,7 +2714,6 @@ static int add_unformed_module(struct module *mod)
out:
mutex_unlock(&module_mutex);
-out_unlocked:
return err;
}
@@ -2628,9 +2732,6 @@ static int complete_formation(struct module *mod, struct load_info *info)
module_bug_finalize(info->hdr, info->sechdrs, mod);
module_cfi_finalize(info->hdr, info->sechdrs, mod);
- if (module_check_misalignment(mod))
- goto out_misaligned;
-
module_enable_ro(mod, false);
module_enable_nx(mod);
module_enable_x(mod);
@@ -2644,8 +2745,6 @@ static int complete_formation(struct module *mod, struct load_info *info)
return 0;
-out_misaligned:
- err = -EINVAL;
out:
mutex_unlock(&module_mutex);
return err;
@@ -2688,6 +2787,39 @@ static int unknown_module_param_cb(char *param, char *val, const char *modname,
return 0;
}
+/* Module within temporary copy, this doesn't do any allocation */
+static int early_mod_check(struct load_info *info, int flags)
+{
+ int err;
+
+ /*
+ * Now that we know we have the correct module name, check
+ * if it's blacklisted.
+ */
+ if (blacklisted(info->name)) {
+ pr_err("Module %s is blacklisted\n", info->name);
+ return -EPERM;
+ }
+
+ err = rewrite_section_headers(info, flags);
+ if (err)
+ return err;
+
+ /* Check module struct version now, before we try to use module. */
+ if (!check_modstruct_version(info, info->mod))
+ return -ENOEXEC;
+
+ err = check_modinfo(info->mod, info, flags);
+ if (err)
+ return err;
+
+ mutex_lock(&module_mutex);
+ err = module_patient_check_exists(info->mod->name, FAIL_DUP_MOD_BECOMING);
+ mutex_unlock(&module_mutex);
+
+ return err;
+}
+
/*
* Allocate and load the module: note that size of section 0 is always
* zero, and we rely on this for optional sections.
@@ -2696,6 +2828,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
int flags)
{
struct module *mod;
+ bool module_allocated = false;
long err = 0;
char *after_dashes;
@@ -2717,40 +2850,17 @@ static int load_module(struct load_info *info, const char __user *uargs,
/*
* Do basic sanity checks against the ELF header and
- * sections.
+ * sections. Cache useful sections and set the
+ * info->mod to the userspace passed struct module.
*/
- err = elf_validity_check(info);
+ err = elf_validity_cache_copy(info, flags);
if (err)
goto free_copy;
- /*
- * Everything checks out, so set up the section info
- * in the info structure.
- */
- err = setup_load_info(info, flags);
+ err = early_mod_check(info, flags);
if (err)
goto free_copy;
- /*
- * Now that we know we have the correct module name, check
- * if it's blacklisted.
- */
- if (blacklisted(info->name)) {
- err = -EPERM;
- pr_err("Module %s is blacklisted\n", info->name);
- goto free_copy;
- }
-
- err = rewrite_section_headers(info, flags);
- if (err)
- goto free_copy;
-
- /* Check module struct version now, before we try to use module. */
- if (!check_modstruct_version(info, info->mod)) {
- err = -ENOEXEC;
- goto free_copy;
- }
-
/* Figure out module layout, and allocate all the memory. */
mod = layout_and_allocate(info, flags);
if (IS_ERR(mod)) {
@@ -2758,6 +2868,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
goto free_copy;
}
+ module_allocated = true;
+
audit_log_kern_module(mod->name);
/* Reserve our place in the list. */
@@ -2765,15 +2877,11 @@ static int load_module(struct load_info *info, const char __user *uargs,
if (err)
goto free_module;
-#ifdef CONFIG_MODULE_SIG
- mod->sig_ok = info->sig_ok;
- if (!mod->sig_ok) {
- pr_notice_once("%s: module verification failed: signature "
- "and/or required key missing - tainting "
- "kernel\n", mod->name);
- add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
- }
-#endif
+ /*
+ * We are tainting your kernel if your module gets into
+ * the modules linked list somehow.
+ */
+ module_augment_kernel_taints(mod, info);
/* To avoid stressing percpu allocator, do this once we're unique. */
err = percpu_modalloc(mod, info);
@@ -2795,7 +2903,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
if (err)
goto free_unload;
- err = check_module_license_and_versions(mod);
+ err = check_export_symbol_versions(mod);
if (err)
goto free_unload;
@@ -2825,7 +2933,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
}
init_build_id(mod, info);
- dynamic_debug_setup(mod, &info->dyndbg);
/* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
ftrace_module_init(mod);
@@ -2889,7 +2996,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
ddebug_cleanup:
ftrace_release_mod(mod);
- dynamic_debug_remove(mod, &info->dyndbg);
synchronize_rcu();
kfree(mod->args);
free_arch_cleanup:
@@ -2908,11 +3014,22 @@ static int load_module(struct load_info *info, const char __user *uargs,
synchronize_rcu();
mutex_unlock(&module_mutex);
free_module:
+ mod_stat_bump_invalid(info, flags);
/* Free lock-classes; relies on the preceding sync_rcu() */
- lockdep_free_key_range(mod->data_layout.base, mod->data_layout.size);
+ for_class_mod_mem_type(type, core_data) {
+ lockdep_free_key_range(mod->mem[type].base,
+ mod->mem[type].size);
+ }
module_deallocate(mod, info);
free_copy:
+ /*
+ * The info->len is always set. We distinguish between
+ * failures once the proper module was allocated and
+ * before that.
+ */
+ if (!module_allocated)
+ mod_stat_bump_becoming(info, flags);
free_copy(info, flags);
return err;
}
@@ -2931,8 +3048,11 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
umod, len, uargs);
err = copy_module_from_user(umod, len, &info);
- if (err)
+ if (err) {
+ mod_stat_inc(&failed_kreads);
+ mod_stat_add_long(len, &invalid_kread_bytes);
return err;
+ }
return load_module(&info, uargs, 0);
}
@@ -2957,14 +3077,20 @@ SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
len = kernel_read_file_from_fd(fd, 0, &buf, INT_MAX, NULL,
READING_MODULE);
- if (len < 0)
+ if (len < 0) {
+ mod_stat_inc(&failed_kreads);
+ mod_stat_add_long(len, &invalid_kread_bytes);
return len;
+ }
if (flags & MODULE_INIT_COMPRESSED_FILE) {
err = module_decompress(&info, buf, len);
vfree(buf); /* compressed data is no longer needed */
- if (err)
+ if (err) {
+ mod_stat_inc(&failed_decompress);
+ mod_stat_add_long(len, &invalid_decompress_bytes);
return err;
+ }
} else {
info.hdr = buf;
info.len = len;
@@ -2973,11 +3099,6 @@ SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
return load_module(&info, uargs, flags);
}
-static inline int within(unsigned long addr, void *start, unsigned long size)
-{
- return ((void *)addr >= start && (void *)addr < start + size);
-}
-
/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
char *module_flags(struct module *mod, char *buf, bool show_state)
{
@@ -3060,20 +3181,21 @@ bool is_module_address(unsigned long addr)
struct module *__module_address(unsigned long addr)
{
struct module *mod;
- struct mod_tree_root *tree;
if (addr >= mod_tree.addr_min && addr <= mod_tree.addr_max)
- tree = &mod_tree;
+ goto lookup;
+
#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
- else if (addr >= mod_data_tree.addr_min && addr <= mod_data_tree.addr_max)
- tree = &mod_data_tree;
+ if (addr >= mod_tree.data_addr_min && addr <= mod_tree.data_addr_max)
+ goto lookup;
#endif
- else
- return NULL;
+ return NULL;
+
+lookup:
module_assert_mutex_or_preempt();
- mod = mod_find(addr, tree);
+ mod = mod_find(addr, &mod_tree);
if (mod) {
BUG_ON(!within_module(addr, mod));
if (mod->state == MODULE_STATE_UNFORMED)
@@ -3113,8 +3235,8 @@ struct module *__module_text_address(unsigned long addr)
struct module *mod = __module_address(addr);
if (mod) {
/* Make sure it's within the text section. */
- if (!within(addr, mod->init_layout.base, mod->init_layout.text_size)
- && !within(addr, mod->core_layout.base, mod->core_layout.text_size))
+ if (!within_module_mem_type(addr, mod, MOD_TEXT) &&
+ !within_module_mem_type(addr, mod, MOD_INIT_TEXT))
mod = NULL;
}
return mod;
@@ -3142,3 +3264,14 @@ void print_modules(void)
last_unloaded_module.taints);
pr_cont("\n");
}
+
+#ifdef CONFIG_MODULE_DEBUGFS
+struct dentry *mod_debugfs_root;
+
+static int module_debugfs_init(void)
+{
+ mod_debugfs_root = debugfs_create_dir("modules", NULL);
+ return 0;
+}
+module_init(module_debugfs_init);
+#endif
diff --git a/kernel/module/procfs.c b/kernel/module/procfs.c
index cf5b9f1e6ec4..0a4841e88adb 100644
--- a/kernel/module/procfs.c
+++ b/kernel/module/procfs.c
@@ -62,6 +62,15 @@ static void m_stop(struct seq_file *m, void *p)
mutex_unlock(&module_mutex);
}
+static unsigned int module_total_size(struct module *mod)
+{
+ int size = 0;
+
+ for_each_mod_mem_type(type)
+ size += mod->mem[type].size;
+ return size;
+}
+
static int m_show(struct seq_file *m, void *p)
{
struct module *mod = list_entry(p, struct module, list);
@@ -73,10 +82,7 @@ static int m_show(struct seq_file *m, void *p)
if (mod->state == MODULE_STATE_UNFORMED)
return 0;
- size = mod->init_layout.size + mod->core_layout.size;
-#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
- size += mod->data_layout.size;
-#endif
+ size = module_total_size(mod);
seq_printf(m, "%s %u", mod->name, size);
print_unload_info(m, mod);
@@ -86,7 +92,7 @@ static int m_show(struct seq_file *m, void *p)
mod->state == MODULE_STATE_COMING ? "Loading" :
"Live");
/* Used by oprofile and other similar tools. */
- value = m->private ? NULL : mod->core_layout.base;
+ value = m->private ? NULL : mod->mem[MOD_TEXT].base;
seq_printf(m, " 0x%px", value);
/* Taints info */
diff --git a/kernel/module/stats.c b/kernel/module/stats.c
new file mode 100644
index 000000000000..ad7b6ada29f2
--- /dev/null
+++ b/kernel/module/stats.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Debugging module statistics.
+ *
+ * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
+ */
+
+#include <linux/module.h>
+#include <uapi/linux/module.h>
+#include <linux/string.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/rculist.h>
+#include <linux/math.h>
+
+#include "internal.h"
+
+/**
+ * DOC: module debugging statistics overview
+ *
+ * Enabling CONFIG_MODULE_STATS enables module debugging statistics which
+ * are useful to monitor and root cause memory pressure issues with module
+ * loading. These statistics are useful to allow us to improve production
+ * workloads.
+ *
+ * The current module debugging statistics supported help keep track of module
+ * loading failures to enable improvements either for kernel module auto-loading
+ * usage (request_module()) or interactions with userspace. Statistics are
+ * provided to track all possible failures in the finit_module() path and memory
+ * wasted in this process space. Each of the failure counters are associated
+ * to a type of module loading failure which is known to incur a certain amount
+ * of memory allocation loss. In the worst case loading a module will fail after
+ * a 3 step memory allocation process:
+ *
+ * a) memory allocated with kernel_read_file_from_fd()
+ * b) module decompression processes the file read from
+ * kernel_read_file_from_fd(), and vmap() is used to map
+ * the decompressed module to a new local buffer which represents
+ * a copy of the decompressed module passed from userspace. The buffer
+ * from kernel_read_file_from_fd() is freed right away.
+ * c) layout_and_allocate() allocates space for the final resting
+ * place where we would keep the module if it were to be processed
+ * successfully.
+ *
+ * If a failure occurs after these three different allocations only one
+ * counter will be incremented with the summation of the allocated bytes freed
+ * incurred during this failure. Likewise, if module loading failed only after
+ * step b) a separate counter is used and incremented for the bytes freed and
+ * not used during both of those allocations.
+ *
+ * Virtual memory space can be limited, for example on x86 virtual memory size
+ * defaults to 128 MiB. We should strive to limit and avoid wasting virtual
+ * memory allocations when possible. These module debugging statistics help
+ * to evaluate how much memory is being wasted on bootup due to module loading
+ * failures.
+ *
+ * All counters are designed to be incremental. Atomic counters are used so to
+ * remain simple and avoid delays and deadlocks.
+ */
+
+/**
+ * DOC: dup_failed_modules - tracks duplicate failed modules
+ *
+ * Linked list of modules which failed to be loaded because an already existing
+ * module with the same name was already being processed or already loaded.
+ * The finit_module() system call incurs heavy virtual memory allocations. In
+ * the worst case an finit_module() system call can end up allocating virtual
+ * memory 3 times:
+ *
+ * 1) kernel_read_file_from_fd() call uses vmalloc()
+ * 2) optional module decompression uses vmap()
+ * 3) layout_and allocate() can use vzalloc() or an arch specific variation of
+ * vmalloc to deal with ELF sections requiring special permissions
+ *
+ * In practice on a typical boot today most finit_module() calls fail due to
+ * the module with the same name already being loaded or about to be processed.
+ * All virtual memory allocated to these failed modules will be freed with
+ * no functional use.
+ *
+ * To help with this the dup_failed_modules allows us to track modules which
+ * failed to load due to the fact that a module was already loaded or being
+ * processed. There are only two points at which we can fail such calls,
+ * we list them below along with the number of virtual memory allocation
+ * calls:
+ *
+ * a) FAIL_DUP_MOD_BECOMING: at the end of early_mod_check() before
+ * layout_and_allocate().
+ * - with module decompression: 2 virtual memory allocation calls
+ * - without module decompression: 1 virtual memory allocation calls
+ * b) FAIL_DUP_MOD_LOAD: after layout_and_allocate() on add_unformed_module()
+ * - with module decompression 3 virtual memory allocation calls
+ * - without module decompression 2 virtual memory allocation calls
+ *
+ * We should strive to get this list to be as small as possible. If this list
+ * is not empty it is a reflection of possible work or optimizations possible
+ * either in-kernel or in userspace.
+ */
+static LIST_HEAD(dup_failed_modules);
+
+/**
+ * DOC: module statistics debugfs counters
+ *
+ * The total amount of wasted virtual memory allocation space during module
+ * loading can be computed by adding the total from the summation:
+ *
+ * * @invalid_kread_bytes +
+ * @invalid_decompress_bytes +
+ * @invalid_becoming_bytes +
+ * @invalid_mod_bytes
+ *
+ * The following debugfs counters are available to inspect module loading
+ * failures:
+ *
+ * * total_mod_size: total bytes ever used by all modules we've dealt with on
+ * this system
+ * * total_text_size: total bytes of the .text and .init.text ELF section
+ * sizes we've dealt with on this system
+ * * invalid_kread_bytes: bytes allocated and then freed on failures which
+ * happen due to the initial kernel_read_file_from_fd(). kernel_read_file_from_fd()
+ * uses vmalloc(). These should typically not happen unless your system is
+ * under memory pressure.
+ * * invalid_decompress_bytes: number of bytes allocated and freed due to
+ * memory allocations in the module decompression path that use vmap().
+ * These typically should not happen unless your system is under memory
+ * pressure.
+ * * invalid_becoming_bytes: total number of bytes allocated and freed used
+ * used to read the kernel module userspace wants us to read before we
+ * promote it to be processed to be added to our @modules linked list. These
+ * failures can happen if we had a check in between a successful kernel_read_file_from_fd()
+ * call and right before we allocate the our private memory for the module
+ * which would be kept if the module is successfully loaded. The most common
+ * reason for this failure is when userspace is racing to load a module
+ * which it does not yet see loaded. The first module to succeed in
+ * add_unformed_module() will add a module to our &modules list and
+ * subsequent loads of modules with the same name will error out at the
+ * end of early_mod_check(). The check for module_patient_check_exists()
+ * at the end of early_mod_check() prevents duplicate allocations
+ * on layout_and_allocate() for modules already being processed. These
+ * duplicate failed modules are non-fatal, however they typically are
+ * indicative of userspace not seeing a module in userspace loaded yet and
+ * unnecessarily trying to load a module before the kernel even has a chance
+ * to begin to process prior requests. Although duplicate failures can be
+ * non-fatal, we should try to reduce vmalloc() pressure proactively, so
+ * ideally after boot this will be close to as 0 as possible. If module
+ * decompression was used we also add to this counter the cost of the
+ * initial kernel_read_file_from_fd() of the compressed module. If module
+ * decompression was not used the value represents the total allocated and
+ * freed bytes in kernel_read_file_from_fd() calls for these type of
+ * failures. These failures can occur because:
+ *
+ * * module_sig_check() - module signature checks
+ * * elf_validity_cache_copy() - some ELF validation issue
+ * * early_mod_check():
+ *
+ * * blacklisting
+ * * failed to rewrite section headers
+ * * version magic
+ * * live patch requirements didn't check out
+ * * the module was detected as being already present
+ *
+ * * invalid_mod_bytes: these are the total number of bytes allocated and
+ * freed due to failures after we did all the sanity checks of the module
+ * which userspace passed to us and after our first check that the module
+ * is unique. A module can still fail to load if we detect the module is
+ * loaded after we allocate space for it with layout_and_allocate(), we do
+ * this check right before processing the module as live and run its
+ * initialization routines. Note that you have a failure of this type it
+ * also means the respective kernel_read_file_from_fd() memory space was
+ * also freed and not used, and so we increment this counter with twice
+ * the size of the module. Additionally if you used module decompression
+ * the size of the compressed module is also added to this counter.
+ *
+ * * modcount: how many modules we've loaded in our kernel life time
+ * * failed_kreads: how many modules failed due to failed kernel_read_file_from_fd()
+ * * failed_decompress: how many failed module decompression attempts we've had.
+ * These really should not happen unless your compression / decompression
+ * might be broken.
+ * * failed_becoming: how many modules failed after we kernel_read_file_from_fd()
+ * it and before we allocate memory for it with layout_and_allocate(). This
+ * counter is never incremented if you manage to validate the module and
+ * call layout_and_allocate() for it.
+ * * failed_load_modules: how many modules failed once we've allocated our
+ * private space for our module using layout_and_allocate(). These failures
+ * should hopefully mostly be dealt with already. Races in theory could
+ * still exist here, but it would just mean the kernel had started processing
+ * two threads concurrently up to early_mod_check() and one thread won.
+ * These failures are good signs the kernel or userspace is doing something
+ * seriously stupid or that could be improved. We should strive to fix these,
+ * but it is perhaps not easy to fix them. A recent example are the modules
+ * requests incurred for frequency modules, a separate module request was
+ * being issued for each CPU on a system.
+ */
+
+atomic_long_t total_mod_size;
+atomic_long_t total_text_size;
+atomic_long_t invalid_kread_bytes;
+atomic_long_t invalid_decompress_bytes;
+static atomic_long_t invalid_becoming_bytes;
+static atomic_long_t invalid_mod_bytes;
+atomic_t modcount;
+atomic_t failed_kreads;
+atomic_t failed_decompress;
+static atomic_t failed_becoming;
+static atomic_t failed_load_modules;
+
+static const char *mod_fail_to_str(struct mod_fail_load *mod_fail)
+{
+ if (test_bit(FAIL_DUP_MOD_BECOMING, &mod_fail->dup_fail_mask) &&
+ test_bit(FAIL_DUP_MOD_LOAD, &mod_fail->dup_fail_mask))
+ return "Becoming & Load";
+ if (test_bit(FAIL_DUP_MOD_BECOMING, &mod_fail->dup_fail_mask))
+ return "Becoming";
+ if (test_bit(FAIL_DUP_MOD_LOAD, &mod_fail->dup_fail_mask))
+ return "Load";
+ return "Bug-on-stats";
+}
+
+void mod_stat_bump_invalid(struct load_info *info, int flags)
+{
+ atomic_long_add(info->len * 2, &invalid_mod_bytes);
+ atomic_inc(&failed_load_modules);
+#if defined(CONFIG_MODULE_DECOMPRESS)
+ if (flags & MODULE_INIT_COMPRESSED_FILE)
+ atomic_long_add(info->compressed_len, &invalid_mod_bytes);
+#endif
+}
+
+void mod_stat_bump_becoming(struct load_info *info, int flags)
+{
+ atomic_inc(&failed_becoming);
+ atomic_long_add(info->len, &invalid_becoming_bytes);
+#if defined(CONFIG_MODULE_DECOMPRESS)
+ if (flags & MODULE_INIT_COMPRESSED_FILE)
+ atomic_long_add(info->compressed_len, &invalid_becoming_bytes);
+#endif
+}
+
+int try_add_failed_module(const char *name, enum fail_dup_mod_reason reason)
+{
+ struct mod_fail_load *mod_fail;
+
+ list_for_each_entry_rcu(mod_fail, &dup_failed_modules, list,
+ lockdep_is_held(&module_mutex)) {
+ if (!strcmp(mod_fail->name, name)) {
+ atomic_long_inc(&mod_fail->count);
+ __set_bit(reason, &mod_fail->dup_fail_mask);
+ goto out;
+ }
+ }
+
+ mod_fail = kzalloc(sizeof(*mod_fail), GFP_KERNEL);
+ if (!mod_fail)
+ return -ENOMEM;
+ memcpy(mod_fail->name, name, strlen(name));
+ __set_bit(reason, &mod_fail->dup_fail_mask);
+ atomic_long_inc(&mod_fail->count);
+ list_add_rcu(&mod_fail->list, &dup_failed_modules);
+out:
+ return 0;
+}
+
+/*
+ * At 64 bytes per module and assuming a 1024 bytes preamble we can fit the
+ * 112 module prints within 8k.
+ *
+ * 1024 + (64*112) = 8k
+ */
+#define MAX_PREAMBLE 1024
+#define MAX_FAILED_MOD_PRINT 112
+#define MAX_BYTES_PER_MOD 64
+static ssize_t read_file_mod_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mod_fail_load *mod_fail;
+ unsigned int len, size, count_failed = 0;
+ char *buf;
+ u32 live_mod_count, fkreads, fdecompress, fbecoming, floads;
+ unsigned long total_size, text_size, ikread_bytes, ibecoming_bytes,
+ idecompress_bytes, imod_bytes, total_virtual_lost;
+
+ live_mod_count = atomic_read(&modcount);
+ fkreads = atomic_read(&failed_kreads);
+ fdecompress = atomic_read(&failed_decompress);
+ fbecoming = atomic_read(&failed_becoming);
+ floads = atomic_read(&failed_load_modules);
+
+ total_size = atomic_long_read(&total_mod_size);
+ text_size = atomic_long_read(&total_text_size);
+ ikread_bytes = atomic_long_read(&invalid_kread_bytes);
+ idecompress_bytes = atomic_long_read(&invalid_decompress_bytes);
+ ibecoming_bytes = atomic_long_read(&invalid_becoming_bytes);
+ imod_bytes = atomic_long_read(&invalid_mod_bytes);
+
+ total_virtual_lost = ikread_bytes + idecompress_bytes + ibecoming_bytes + imod_bytes;
+
+ size = MAX_PREAMBLE + min((unsigned int)(floads + fbecoming),
+ (unsigned int)MAX_FAILED_MOD_PRINT) * MAX_BYTES_PER_MOD;
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ /* The beginning of our debug preamble */
+ len = scnprintf(buf, size, "%25s\t%u\n", "Mods ever loaded", live_mod_count);
+
+ len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on kread", fkreads);
+
+ len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on decompress",
+ fdecompress);
+ len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on becoming", fbecoming);
+
+ len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on load", floads);
+
+ len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Total module size", total_size);
+ len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Total mod text size", text_size);
+
+ len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Failed kread bytes", ikread_bytes);
+
+ len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Failed decompress bytes",
+ idecompress_bytes);
+
+ len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Failed becoming bytes", ibecoming_bytes);
+
+ len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Failed kmod bytes", imod_bytes);
+
+ len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Virtual mem wasted bytes", total_virtual_lost);
+
+ if (live_mod_count && total_size) {
+ len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Average mod size",
+ DIV_ROUND_UP(total_size, live_mod_count));
+ }
+
+ if (live_mod_count && text_size) {
+ len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Average mod text size",
+ DIV_ROUND_UP(text_size, live_mod_count));
+ }
+
+ /*
+ * We use WARN_ON_ONCE() for the counters to ensure we always have parity
+ * for keeping tabs on a type of failure with one type of byte counter.
+ * The counters for imod_bytes does not increase for fkreads failures
+ * for example, and so on.
+ */
+
+ WARN_ON_ONCE(ikread_bytes && !fkreads);
+ if (fkreads && ikread_bytes) {
+ len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Avg fail kread bytes",
+ DIV_ROUND_UP(ikread_bytes, fkreads));
+ }
+
+ WARN_ON_ONCE(ibecoming_bytes && !fbecoming);
+ if (fbecoming && ibecoming_bytes) {
+ len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Avg fail becoming bytes",
+ DIV_ROUND_UP(ibecoming_bytes, fbecoming));
+ }
+
+ WARN_ON_ONCE(idecompress_bytes && !fdecompress);
+ if (fdecompress && idecompress_bytes) {
+ len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Avg fail decomp bytes",
+ DIV_ROUND_UP(idecompress_bytes, fdecompress));
+ }
+
+ WARN_ON_ONCE(imod_bytes && !floads);
+ if (floads && imod_bytes) {
+ len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Average fail load bytes",
+ DIV_ROUND_UP(imod_bytes, floads));
+ }
+
+ /* End of our debug preamble header. */
+
+ /* Catch when we've gone beyond our expected preamble */
+ WARN_ON_ONCE(len >= MAX_PREAMBLE);
+
+ if (list_empty(&dup_failed_modules))
+ goto out;
+
+ len += scnprintf(buf + len, size - len, "Duplicate failed modules:\n");
+ len += scnprintf(buf + len, size - len, "%25s\t%15s\t%25s\n",
+ "Module-name", "How-many-times", "Reason");
+ mutex_lock(&module_mutex);
+
+
+ list_for_each_entry_rcu(mod_fail, &dup_failed_modules, list) {
+ if (WARN_ON_ONCE(++count_failed >= MAX_FAILED_MOD_PRINT))
+ goto out_unlock;
+ len += scnprintf(buf + len, size - len, "%25s\t%15lu\t%25s\n", mod_fail->name,
+ atomic_long_read(&mod_fail->count), mod_fail_to_str(mod_fail));
+ }
+out_unlock:
+ mutex_unlock(&module_mutex);
+out:
+ kfree(buf);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+#undef MAX_PREAMBLE
+#undef MAX_FAILED_MOD_PRINT
+#undef MAX_BYTES_PER_MOD
+
+static const struct file_operations fops_mod_stats = {
+ .read = read_file_mod_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#define mod_debug_add_ulong(name) debugfs_create_ulong(#name, 0400, mod_debugfs_root, (unsigned long *) &name.counter)
+#define mod_debug_add_atomic(name) debugfs_create_atomic_t(#name, 0400, mod_debugfs_root, &name)
+static int __init module_stats_init(void)
+{
+ mod_debug_add_ulong(total_mod_size);
+ mod_debug_add_ulong(total_text_size);
+ mod_debug_add_ulong(invalid_kread_bytes);
+ mod_debug_add_ulong(invalid_decompress_bytes);
+ mod_debug_add_ulong(invalid_becoming_bytes);
+ mod_debug_add_ulong(invalid_mod_bytes);
+
+ mod_debug_add_atomic(modcount);
+ mod_debug_add_atomic(failed_kreads);
+ mod_debug_add_atomic(failed_decompress);
+ mod_debug_add_atomic(failed_becoming);
+ mod_debug_add_atomic(failed_load_modules);
+
+ debugfs_create_file("stats", 0400, mod_debugfs_root, mod_debugfs_root, &fops_mod_stats);
+
+ return 0;
+}
+#undef mod_debug_add_ulong
+#undef mod_debug_add_atomic
+module_init(module_stats_init);
diff --git a/kernel/module/strict_rwx.c b/kernel/module/strict_rwx.c
index 14fbea66f12f..a2b656b4e3d2 100644
--- a/kernel/module/strict_rwx.c
+++ b/kernel/module/strict_rwx.c
@@ -11,82 +11,25 @@
#include <linux/set_memory.h>
#include "internal.h"
-/*
- * LKM RO/NX protection: protect module's text/ro-data
- * from modification and any data from execution.
- *
- * General layout of module is:
- * [text] [read-only-data] [ro-after-init] [writable data]
- * text_size -----^ ^ ^ ^
- * ro_size ------------------------| | |
- * ro_after_init_size -----------------------------| |
- * size -----------------------------------------------------------|
- *
- * These values are always page-aligned (as is base) when
- * CONFIG_STRICT_MODULE_RWX is set.
- */
+static void module_set_memory(const struct module *mod, enum mod_mem_type type,
+ int (*set_memory)(unsigned long start, int num_pages))
+{
+ const struct module_memory *mod_mem = &mod->mem[type];
+
+ set_vm_flush_reset_perms(mod_mem->base);
+ set_memory((unsigned long)mod_mem->base, mod_mem->size >> PAGE_SHIFT);
+}
/*
* Since some arches are moving towards PAGE_KERNEL module allocations instead
- * of PAGE_KERNEL_EXEC, keep frob_text() and module_enable_x() independent of
+ * of PAGE_KERNEL_EXEC, keep module_enable_x() independent of
* CONFIG_STRICT_MODULE_RWX because they are needed regardless of whether we
* are strict.
*/
-static void frob_text(const struct module_layout *layout,
- int (*set_memory)(unsigned long start, int num_pages))
-{
- set_memory((unsigned long)layout->base,
- PAGE_ALIGN(layout->text_size) >> PAGE_SHIFT);
-}
-
-static void frob_rodata(const struct module_layout *layout,
- int (*set_memory)(unsigned long start, int num_pages))
-{
- set_memory((unsigned long)layout->base + layout->text_size,
- (layout->ro_size - layout->text_size) >> PAGE_SHIFT);
-}
-
-static void frob_ro_after_init(const struct module_layout *layout,
- int (*set_memory)(unsigned long start, int num_pages))
-{
- set_memory((unsigned long)layout->base + layout->ro_size,
- (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT);
-}
-
-static void frob_writable_data(const struct module_layout *layout,
- int (*set_memory)(unsigned long start, int num_pages))
-{
- set_memory((unsigned long)layout->base + layout->ro_after_init_size,
- (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
-}
-
-static bool layout_check_misalignment(const struct module_layout *layout)
-{
- return WARN_ON(!PAGE_ALIGNED(layout->base)) ||
- WARN_ON(!PAGE_ALIGNED(layout->text_size)) ||
- WARN_ON(!PAGE_ALIGNED(layout->ro_size)) ||
- WARN_ON(!PAGE_ALIGNED(layout->ro_after_init_size)) ||
- WARN_ON(!PAGE_ALIGNED(layout->size));
-}
-
-bool module_check_misalignment(const struct module *mod)
-{
- if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
- return false;
-
- return layout_check_misalignment(&mod->core_layout) ||
- layout_check_misalignment(&mod->data_layout) ||
- layout_check_misalignment(&mod->init_layout);
-}
-
void module_enable_x(const struct module *mod)
{
- if (!PAGE_ALIGNED(mod->core_layout.base) ||
- !PAGE_ALIGNED(mod->init_layout.base))
- return;
-
- frob_text(&mod->core_layout, set_memory_x);
- frob_text(&mod->init_layout, set_memory_x);
+ for_class_mod_mem_type(type, text)
+ module_set_memory(mod, type, set_memory_x);
}
void module_enable_ro(const struct module *mod, bool after_init)
@@ -98,16 +41,13 @@ void module_enable_ro(const struct module *mod, bool after_init)
return;
#endif
- set_vm_flush_reset_perms(mod->core_layout.base);
- set_vm_flush_reset_perms(mod->init_layout.base);
- frob_text(&mod->core_layout, set_memory_ro);
-
- frob_rodata(&mod->data_layout, set_memory_ro);
- frob_text(&mod->init_layout, set_memory_ro);
- frob_rodata(&mod->init_layout, set_memory_ro);
+ module_set_memory(mod, MOD_TEXT, set_memory_ro);
+ module_set_memory(mod, MOD_INIT_TEXT, set_memory_ro);
+ module_set_memory(mod, MOD_RODATA, set_memory_ro);
+ module_set_memory(mod, MOD_INIT_RODATA, set_memory_ro);
if (after_init)
- frob_ro_after_init(&mod->data_layout, set_memory_ro);
+ module_set_memory(mod, MOD_RO_AFTER_INIT, set_memory_ro);
}
void module_enable_nx(const struct module *mod)
@@ -115,11 +55,8 @@ void module_enable_nx(const struct module *mod)
if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
return;
- frob_rodata(&mod->data_layout, set_memory_nx);
- frob_ro_after_init(&mod->data_layout, set_memory_nx);
- frob_writable_data(&mod->data_layout, set_memory_nx);
- frob_rodata(&mod->init_layout, set_memory_nx);
- frob_writable_data(&mod->init_layout, set_memory_nx);
+ for_class_mod_mem_type(type, data)
+ module_set_memory(mod, type, set_memory_nx);
}
int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
diff --git a/kernel/module/tracking.c b/kernel/module/tracking.c
index 26d812e07615..16742d1c630c 100644
--- a/kernel/module/tracking.c
+++ b/kernel/module/tracking.c
@@ -15,6 +15,7 @@
#include "internal.h"
static LIST_HEAD(unloaded_tainted_modules);
+extern struct dentry *mod_debugfs_root;
int try_add_tainted_module(struct module *mod)
{
@@ -120,12 +121,8 @@ static const struct file_operations unloaded_tainted_modules_fops = {
static int __init unloaded_tainted_modules_init(void)
{
- struct dentry *dir;
-
- dir = debugfs_create_dir("modules", NULL);
- debugfs_create_file("unloaded_tainted", 0444, dir, NULL,
+ debugfs_create_file("unloaded_tainted", 0444, mod_debugfs_root, NULL,
&unloaded_tainted_modules_fops);
-
return 0;
}
module_init(unloaded_tainted_modules_init);
diff --git a/kernel/module/tree_lookup.c b/kernel/module/tree_lookup.c
index 8ec5cfd60496..277197977d43 100644
--- a/kernel/module/tree_lookup.c
+++ b/kernel/module/tree_lookup.c
@@ -21,16 +21,16 @@
static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
{
- struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
+ struct module_memory *mod_mem = container_of(n, struct module_memory, mtn.node);
- return (unsigned long)layout->base;
+ return (unsigned long)mod_mem->base;
}
static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
{
- struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
+ struct module_memory *mod_mem = container_of(n, struct module_memory, mtn.node);
- return (unsigned long)layout->size;
+ return (unsigned long)mod_mem->size;
}
static __always_inline bool
@@ -77,32 +77,27 @@ static void __mod_tree_remove(struct mod_tree_node *node, struct mod_tree_root *
*/
void mod_tree_insert(struct module *mod)
{
- mod->core_layout.mtn.mod = mod;
- mod->init_layout.mtn.mod = mod;
-
- __mod_tree_insert(&mod->core_layout.mtn, &mod_tree);
- if (mod->init_layout.size)
- __mod_tree_insert(&mod->init_layout.mtn, &mod_tree);
-
-#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
- mod->data_layout.mtn.mod = mod;
- __mod_tree_insert(&mod->data_layout.mtn, &mod_data_tree);
-#endif
+ for_each_mod_mem_type(type) {
+ mod->mem[type].mtn.mod = mod;
+ if (mod->mem[type].size)
+ __mod_tree_insert(&mod->mem[type].mtn, &mod_tree);
+ }
}
void mod_tree_remove_init(struct module *mod)
{
- if (mod->init_layout.size)
- __mod_tree_remove(&mod->init_layout.mtn, &mod_tree);
+ for_class_mod_mem_type(type, init) {
+ if (mod->mem[type].size)
+ __mod_tree_remove(&mod->mem[type].mtn, &mod_tree);
+ }
}
void mod_tree_remove(struct module *mod)
{
- __mod_tree_remove(&mod->core_layout.mtn, &mod_tree);
- mod_tree_remove_init(mod);
-#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
- __mod_tree_remove(&mod->data_layout.mtn, &mod_data_tree);
-#endif
+ for_each_mod_mem_type(type) {
+ if (mod->mem[type].size)
+ __mod_tree_remove(&mod->mem[type].mtn, &mod_tree);
+ }
}
struct module *mod_find(unsigned long addr, struct mod_tree_root *tree)
diff --git a/kernel/params.c b/kernel/params.c
index 6e34ca89ebae..6a7548979aa9 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -948,7 +948,7 @@ static void module_kobj_release(struct kobject *kobj)
complete(mk->kobj_completion);
}
-struct kobj_type module_ktype = {
+const struct kobj_type module_ktype = {
.release = module_kobj_release,
.sysfs_ops = &module_sysfs_ops,
};
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 9644f6e5bf15..5bcb11ad50bf 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -89,7 +89,7 @@ static DEFINE_MUTEX(console_mutex);
* console_sem protects updates to console->seq and console_suspended,
* and also provides serialization for console printing.
*/
-static DEFINE_SEMAPHORE(console_sem);
+static DEFINE_SEMAPHORE(console_sem, 1);
HLIST_HEAD(console_list);
EXPORT_SYMBOL_GPL(console_list);
DEFINE_STATIC_SRCU(console_srcu);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3b46dba3f69b..08155f652554 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -8006,8 +8006,7 @@ struct kallsyms_data {
* and returns 1 in case we resolved all the requested symbols,
* 0 otherwise.
*/
-static int kallsyms_callback(void *data, const char *name,
- struct module *mod, unsigned long addr)
+static int kallsyms_callback(void *data, const char *name, unsigned long addr)
{
struct kallsyms_data *args = data;
const char **sym;
diff --git a/kernel/trace/rv/reactor_panic.c b/kernel/trace/rv/reactor_panic.c
index d65f6c25a87c..0186ff4cbd0b 100644
--- a/kernel/trace/rv/reactor_panic.c
+++ b/kernel/trace/rv/reactor_panic.c
@@ -38,6 +38,5 @@ static void __exit unregister_react_panic(void)
module_init(register_react_panic);
module_exit(unregister_react_panic);
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Bristot de Oliveira");
MODULE_DESCRIPTION("panic rv reactor: panic if an exception is found.");
diff --git a/kernel/trace/rv/reactor_printk.c b/kernel/trace/rv/reactor_printk.c
index 4b6b7106a477..178759dbf89f 100644
--- a/kernel/trace/rv/reactor_printk.c
+++ b/kernel/trace/rv/reactor_printk.c
@@ -37,6 +37,5 @@ static void __exit unregister_react_printk(void)
module_init(register_react_printk);
module_exit(unregister_react_printk);
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Bristot de Oliveira");
MODULE_DESCRIPTION("printk rv reactor: printk if an exception is hit.");
diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
index f10f403104e7..e91cb4c2833f 100644
--- a/kernel/watch_queue.c
+++ b/kernel/watch_queue.c
@@ -29,7 +29,6 @@
MODULE_DESCRIPTION("Watch queue");
MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
#define WATCH_QUEUE_NOTE_SIZE 128
#define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
diff --git a/lib/Makefile b/lib/Makefile
index 31a3a257fd49..876fcdeae34e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -231,6 +231,9 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o
obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o
+#ensure exported functions have prototypes
+CFLAGS_dynamic_debug.o := -DDYNAMIC_DEBUG_MODULE
+
obj-$(CONFIG_SYMBOLIC_ERRNAME) += errname.o
obj-$(CONFIG_NLATTR) += nlattr.o
diff --git a/lib/btree.c b/lib/btree.c
index a82100c73b55..49420cae3a83 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -794,4 +794,3 @@ module_exit(btree_module_exit);
MODULE_AUTHOR("Joern Engel <joern@logfs.org>");
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
-MODULE_LICENSE("GPL");
diff --git a/lib/crypto/blake2s-generic.c b/lib/crypto/blake2s-generic.c
index 75ccb3e633e6..3b6dcfdd9628 100644
--- a/lib/crypto/blake2s-generic.c
+++ b/lib/crypto/blake2s-generic.c
@@ -12,7 +12,6 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/bug.h>
#include <asm/unaligned.h>
@@ -109,7 +108,3 @@ void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
}
EXPORT_SYMBOL(blake2s_compress_generic);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("BLAKE2s hash function");
-MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
index 98e688c6d891..71a316552cc5 100644
--- a/lib/crypto/blake2s.c
+++ b/lib/crypto/blake2s.c
@@ -67,6 +67,5 @@ static int __init blake2s_mod_init(void)
}
module_init(blake2s_mod_init);
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("BLAKE2s hash function");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 009f2ead09c1..fdd6d9800a70 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -1223,8 +1223,7 @@ static void ddebug_attach_module_classes(struct ddebug_table *dt,
* Allocate a new ddebug_table for the given module
* and add it to the global list.
*/
-static int __ddebug_add_module(struct _ddebug_info *di, unsigned int base,
- const char *modname)
+static int ddebug_add_module(struct _ddebug_info *di, const char *modname)
{
struct ddebug_table *dt;
@@ -1263,11 +1262,6 @@ static int __ddebug_add_module(struct _ddebug_info *di, unsigned int base,
return 0;
}
-int ddebug_add_module(struct _ddebug_info *di, const char *modname)
-{
- return __ddebug_add_module(di, 0, modname);
-}
-
/* helper for ddebug_dyndbg_(boot|module)_param_cb */
static int ddebug_dyndbg_param_cb(char *param, char *val,
const char *modname, int on_err)
@@ -1314,11 +1308,13 @@ static void ddebug_table_free(struct ddebug_table *dt)
kfree(dt);
}
+#ifdef CONFIG_MODULES
+
/*
* Called in response to a module being unloaded. Removes
* any ddebug_table's which point at the module.
*/
-int ddebug_remove_module(const char *mod_name)
+static int ddebug_remove_module(const char *mod_name)
{
struct ddebug_table *dt, *nextdt;
int ret = -ENOENT;
@@ -1337,6 +1333,33 @@ int ddebug_remove_module(const char *mod_name)
return ret;
}
+static int ddebug_module_notify(struct notifier_block *self, unsigned long val,
+ void *data)
+{
+ struct module *mod = data;
+ int ret = 0;
+
+ switch (val) {
+ case MODULE_STATE_COMING:
+ ret = ddebug_add_module(&mod->dyndbg_info, mod->name);
+ if (ret)
+ WARN(1, "Failed to allocate memory: dyndbg may not work properly.\n");
+ break;
+ case MODULE_STATE_GOING:
+ ddebug_remove_module(mod->name);
+ break;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block ddebug_module_nb = {
+ .notifier_call = ddebug_module_notify,
+ .priority = 0, /* dynamic debug depends on jump label */
+};
+
+#endif /* CONFIG_MODULES */
+
static void ddebug_remove_all_tables(void)
{
mutex_lock(&ddebug_lock);
@@ -1388,6 +1411,14 @@ static int __init dynamic_debug_init(void)
.num_classes = __stop___dyndbg_classes - __start___dyndbg_classes,
};
+#ifdef CONFIG_MODULES
+ ret = register_module_notifier(&ddebug_module_nb);
+ if (ret) {
+ pr_warn("Failed to register dynamic debug module notifier\n");
+ return ret;
+ }
+#endif /* CONFIG_MODULES */
+
if (&__start___dyndbg == &__stop___dyndbg) {
if (IS_ENABLED(CONFIG_DYNAMIC_DEBUG)) {
pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n");
@@ -1408,7 +1439,7 @@ static int __init dynamic_debug_init(void)
mod_ct++;
di.num_descs = mod_sites;
di.descs = iter_mod_start;
- ret = __ddebug_add_module(&di, i - mod_sites, modname);
+ ret = ddebug_add_module(&di, modname);
if (ret)
goto out_err;
@@ -1419,7 +1450,7 @@ static int __init dynamic_debug_init(void)
}
di.num_descs = mod_sites;
di.descs = iter_mod_start;
- ret = __ddebug_add_module(&di, i - mod_sites, modname);
+ ret = ddebug_add_module(&di, modname);
if (ret)
goto out_err;
diff --git a/lib/pldmfw/pldmfw.c b/lib/pldmfw/pldmfw.c
index 6e77eb6d8e72..54e1809a38fd 100644
--- a/lib/pldmfw/pldmfw.c
+++ b/lib/pldmfw/pldmfw.c
@@ -875,5 +875,4 @@ out_release_data:
EXPORT_SYMBOL(pldmfw_flash_image);
MODULE_AUTHOR("Jacob Keller <jacob.e.keller@intel.com>");
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PLDM firmware flash update library");
diff --git a/lib/test_fprobe.c b/lib/test_fprobe.c
index 1fb56cf5e5ce..fd6153800e56 100644
--- a/lib/test_fprobe.c
+++ b/lib/test_fprobe.c
@@ -168,4 +168,3 @@ static struct kunit_suite fprobe_test_suite = {
kunit_test_suites(&fprobe_test_suite);
-MODULE_LICENSE("GPL");
diff --git a/mm/zpool.c b/mm/zpool.c
index 571f5c5031dd..6a19c4a58f77 100644
--- a/mm/zpool.c
+++ b/mm/zpool.c
@@ -395,6 +395,5 @@ bool zpool_can_sleep_mapped(struct zpool *zpool)
return zpool->driver->sleep_mapped;
}
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
MODULE_DESCRIPTION("Common API for compressed memory storage");
diff --git a/mm/zswap.c b/mm/zswap.c
index f6c89049cf70..f2fc0373b967 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1540,6 +1540,5 @@ cache_fail:
/* must be late so crypto has time to come up */
late_initcall(init_zswap);
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
MODULE_DESCRIPTION("Compressed cache for swap pages");
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index e9f1f49d18c2..3e5cc70884dd 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -40,10 +40,8 @@ const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
struct kmem_cache *rxrpc_call_jar;
-static struct semaphore rxrpc_call_limiter =
- __SEMAPHORE_INITIALIZER(rxrpc_call_limiter, 1000);
-static struct semaphore rxrpc_kernel_call_limiter =
- __SEMAPHORE_INITIALIZER(rxrpc_kernel_call_limiter, 1000);
+static DEFINE_SEMAPHORE(rxrpc_call_limiter, 1000);
+static DEFINE_SEMAPHORE(rxrpc_kernel_call_limiter, 1000);
void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what)
{
diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
index 2efbec6b6b8d..8085d3693a05 100644
--- a/scripts/gdb/linux/constants.py.in
+++ b/scripts/gdb/linux/constants.py.in
@@ -54,6 +54,9 @@ LX_VALUE(SB_NODIRATIME)
/* linux/htimer.h */
LX_GDBPARSED(hrtimer_resolution)
+/* linux/module.h */
+LX_GDBPARSED(MOD_TEXT)
+
/* linux/mount.h */
LX_VALUE(MNT_NOSUID)
LX_VALUE(MNT_NODEV)
diff --git a/scripts/gdb/linux/modules.py b/scripts/gdb/linux/modules.py
index 441b23239896..261f28640f4c 100644
--- a/scripts/gdb/linux/modules.py
+++ b/scripts/gdb/linux/modules.py
@@ -13,7 +13,7 @@
import gdb
-from linux import cpus, utils, lists
+from linux import cpus, utils, lists, constants
module_type = utils.CachedType("struct module")
@@ -73,7 +73,7 @@ class LxLsmod(gdb.Command):
" " if utils.get_long_type().sizeof == 8 else ""))
for module in module_list():
- layout = module['core_layout']
+ layout = module['mem'][constants.LX_MOD_TEXT]
gdb.write("{address} {name:<19} {size:>8} {ref}".format(
address=str(layout['base']).split()[0],
name=module['name'].string(),
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
index dc07b6d12e30..fdad3f32c747 100644
--- a/scripts/gdb/linux/symbols.py
+++ b/scripts/gdb/linux/symbols.py
@@ -15,7 +15,7 @@ import gdb
import os
import re
-from linux import modules, utils
+from linux import modules, utils, constants
if hasattr(gdb, 'Breakpoint'):
@@ -109,7 +109,7 @@ lx-symbols command."""
def load_module_symbols(self, module):
module_name = module['name'].string()
- module_addr = str(module['core_layout']['base']).split()[0]
+ module_addr = str(module['mem'][constants.LX_MOD_TEXT]['base']).split()[0]
module_file = self._get_module_file(module_name)
if not module_file and not self.module_files_updated:
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 9466b6a2abae..d4531d09984d 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -22,6 +22,7 @@
#include <errno.h>
#include "modpost.h"
#include "../../include/linux/license.h"
+#include "../../include/linux/module_symbol.h"
/* Are we using CONFIG_MODVERSIONS? */
static bool modversions;
@@ -1112,16 +1113,9 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
return 1;
}
-static inline int is_arm_mapping_symbol(const char *str)
-{
- return str[0] == '$' &&
- (str[1] == 'a' || str[1] == 'd' || str[1] == 't' || str[1] == 'x')
- && (str[2] == '\0' || str[2] == '.');
-}
-
/*
* If there's no name there, ignore it; likewise, ignore it if it's
- * one of the magic symbols emitted used by current ARM tools.
+ * one of the magic symbols emitted used by current tools.
*
* Otherwise if find_symbols_between() returns those symbols, they'll
* fail the whitelist tests and cause lots of false alarms ... fixable
@@ -1134,7 +1128,7 @@ static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
if (!name || !strlen(name))
return 0;
- return !is_arm_mapping_symbol(name);
+ return !is_mapping_symbol(name);
}
/**