diff options
Diffstat (limited to 'security')
180 files changed, 8871 insertions, 3834 deletions
diff --git a/security/Kconfig b/security/Kconfig index 28e685f53bd1..285f284dfcac 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -51,6 +51,27 @@ config PROC_MEM_NO_FORCE endchoice +config MSEAL_SYSTEM_MAPPINGS + bool "mseal system mappings" + depends on 64BIT + depends on ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS + depends on !CHECKPOINT_RESTORE + help + Apply mseal on system mappings. + The system mappings includes vdso, vvar, vvar_vclock, + vectors (arm compat-mode), sigpage (arm compat-mode), uprobes. + + A 64-bit kernel is required for the memory sealing feature. + No specific hardware features from the CPU are needed. + + WARNING: This feature breaks programs which rely on relocating + or unmapping system mappings. Known broken software at the time + of writing includes CHECKPOINT_RESTORE, UML, gVisor, rr. Therefore + this config can't be enabled universally. + + For complete descriptions of memory sealing, please see + Documentation/userspace-api/mseal.rst + config SECURITY bool "Enable different security models" depends on SYSFS @@ -64,6 +85,11 @@ config SECURITY If you are unsure how to answer this question, answer N. +config HAS_SECURITY_AUDIT + def_bool y + depends on AUDIT + depends on SECURITY + config SECURITYFS bool "Enable the securityfs filesystem" help @@ -159,27 +185,6 @@ config LSM_MMAP_MIN_ADDR this low address space will need the permission specific to the systems running LSM. -config HARDENED_USERCOPY - bool "Harden memory copies between kernel and userspace" - imply STRICT_DEVMEM - help - This option checks for obviously wrong memory regions when - copying memory to/from the kernel (via copy_to_user() and - copy_from_user() functions) by rejecting memory ranges that - are larger than the specified heap object, span multiple - separately allocated pages, are not on the process stack, - or are part of the kernel text. This prevents entire classes - of heap overflow exploits and similar kernel memory exposures. - -config FORTIFY_SOURCE - bool "Harden common str/mem functions against buffer overflows" - depends on ARCH_HAS_FORTIFY_SOURCE - # https://github.com/llvm/llvm-project/issues/53645 - depends on !CC_IS_CLANG || !X86_32 - help - Detect overflows of buffers in common string and memory functions - where the compiler can determine and validate the buffer sizes. - config STATIC_USERMODEHELPER bool "Force all usermode helper calls through a single binary" help @@ -264,6 +269,7 @@ endchoice config LSM string "Ordered list of enabled LSMs" + depends on SECURITY default "landlock,lockdown,yama,loadpin,safesetid,smack,selinux,tomoyo,apparmor,ipe,bpf" if DEFAULT_SECURITY_SMACK default "landlock,lockdown,yama,loadpin,safesetid,apparmor,selinux,smack,tomoyo,ipe,bpf" if DEFAULT_SECURITY_APPARMOR default "landlock,lockdown,yama,loadpin,safesetid,tomoyo,ipe,bpf" if DEFAULT_SECURITY_TOMOYO diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening index c9d5ca3d8d08..86f8768c63d4 100644 --- a/security/Kconfig.hardening +++ b/security/Kconfig.hardening @@ -1,22 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only menu "Kernel hardening options" -config GCC_PLUGIN_STRUCTLEAK - bool - help - While the kernel is built with warnings enabled for any missed - stack variable initializations, this warning is silenced for - anything passed by reference to another function, under the - occasionally misguided assumption that the function will do - the initialization. As this regularly leads to exploitable - flaws, this plugin is available to identify and zero-initialize - such variables, depending on the chosen level of coverage. - - This plugin was originally ported from grsecurity/PaX. More - information at: - * https://grsecurity.net/ - * https://pax.grsecurity.net/ - menu "Memory initialization" config CC_HAS_AUTO_VAR_INIT_PATTERN @@ -36,7 +20,6 @@ config CC_HAS_AUTO_VAR_INIT_ZERO choice prompt "Initialize kernel stack variables at function entry" - default GCC_PLUGIN_STRUCTLEAK_BYREF_ALL if COMPILE_TEST && GCC_PLUGINS default INIT_STACK_ALL_PATTERN if COMPILE_TEST && CC_HAS_AUTO_VAR_INIT_PATTERN default INIT_STACK_ALL_ZERO if CC_HAS_AUTO_VAR_INIT_ZERO default INIT_STACK_NONE @@ -60,55 +43,6 @@ choice classes of uninitialized stack variable exploits and information exposures. - config GCC_PLUGIN_STRUCTLEAK_USER - bool "zero-init structs marked for userspace (weak)" - # Plugin can be removed once the kernel only supports GCC 12+ - depends on GCC_PLUGINS && !CC_HAS_AUTO_VAR_INIT_ZERO - select GCC_PLUGIN_STRUCTLEAK - help - Zero-initialize any structures on the stack containing - a __user attribute. This can prevent some classes of - uninitialized stack variable exploits and information - exposures, like CVE-2013-2141: - https://git.kernel.org/linus/b9e146d8eb3b9eca - - config GCC_PLUGIN_STRUCTLEAK_BYREF - bool "zero-init structs passed by reference (strong)" - # Plugin can be removed once the kernel only supports GCC 12+ - depends on GCC_PLUGINS && !CC_HAS_AUTO_VAR_INIT_ZERO - depends on !(KASAN && KASAN_STACK) - select GCC_PLUGIN_STRUCTLEAK - help - Zero-initialize any structures on the stack that may - be passed by reference and had not already been - explicitly initialized. This can prevent most classes - of uninitialized stack variable exploits and information - exposures, like CVE-2017-1000410: - https://git.kernel.org/linus/06e7e776ca4d3654 - - As a side-effect, this keeps a lot of variables on the - stack that can otherwise be optimized out, so combining - this with CONFIG_KASAN_STACK can lead to a stack overflow - and is disallowed. - - config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL - bool "zero-init everything passed by reference (very strong)" - # Plugin can be removed once the kernel only supports GCC 12+ - depends on GCC_PLUGINS && !CC_HAS_AUTO_VAR_INIT_ZERO - depends on !(KASAN && KASAN_STACK) - select GCC_PLUGIN_STRUCTLEAK - help - Zero-initialize any stack variables that may be passed - by reference and had not already been explicitly - initialized. This is intended to eliminate all classes - of uninitialized stack variable exploits and information - exposures. - - As a side-effect, this keeps a lot of variables on the - stack that can otherwise be optimized out, so combining - this with CONFIG_KASAN_STACK can lead to a stack overflow - and is disallowed. - config INIT_STACK_ALL_PATTERN bool "pattern-init everything (strongest)" depends on CC_HAS_AUTO_VAR_INIT_PATTERN @@ -127,6 +61,7 @@ choice repeating for all types and padding except float and double which use 0xFF repeating (-NaN). Clang on 32-bit uses 0xFF repeating for all types and padding. + GCC uses 0xFE repeating for all types, and zero for padding. config INIT_STACK_ALL_ZERO bool "zero-init everything (strongest and safest)" @@ -147,20 +82,13 @@ choice endchoice -config GCC_PLUGIN_STRUCTLEAK_VERBOSE - bool "Report forcefully initialized variables" - depends on GCC_PLUGIN_STRUCTLEAK - depends on !COMPILE_TEST # too noisy - help - This option will cause a warning to be printed each time the - structleak plugin finds a variable it thinks needs to be - initialized. Since not all existing initializers are detected - by the plugin, this can produce false positive warnings. +config CC_HAS_SANCOV_STACK_DEPTH_CALLBACK + def_bool $(cc-option,-fsanitize-coverage-stack-depth-callback-min=1) -config GCC_PLUGIN_STACKLEAK +config KSTACK_ERASE bool "Poison kernel stack before returning from syscalls" - depends on GCC_PLUGINS - depends on HAVE_ARCH_STACKLEAK + depends on HAVE_ARCH_KSTACK_ERASE + depends on GCC_PLUGINS || CC_HAS_SANCOV_STACK_DEPTH_CALLBACK help This option makes the kernel erase the kernel stack before returning from system calls. This has the effect of leaving @@ -178,6 +106,10 @@ config GCC_PLUGIN_STACKLEAK are advised to test this feature on your expected workload before deploying it. +config GCC_PLUGIN_STACKLEAK + def_bool KSTACK_ERASE + depends on GCC_PLUGINS + help This plugin was ported from grsecurity/PaX. More information at: * https://grsecurity.net/ * https://pax.grsecurity.net/ @@ -192,37 +124,37 @@ config GCC_PLUGIN_STACKLEAK_VERBOSE instrumented. This is useful for comparing coverage between builds. -config STACKLEAK_TRACK_MIN_SIZE - int "Minimum stack frame size of functions tracked by STACKLEAK" +config KSTACK_ERASE_TRACK_MIN_SIZE + int "Minimum stack frame size of functions tracked by KSTACK_ERASE" default 100 range 0 4096 - depends on GCC_PLUGIN_STACKLEAK + depends on KSTACK_ERASE help - The STACKLEAK gcc plugin instruments the kernel code for tracking + The KSTACK_ERASE option instruments the kernel code for tracking the lowest border of the kernel stack (and for some other purposes). - It inserts the stackleak_track_stack() call for the functions with - a stack frame size greater than or equal to this parameter. + It inserts the __sanitizer_cov_stack_depth() call for the functions + with a stack frame size greater than or equal to this parameter. If unsure, leave the default value 100. -config STACKLEAK_METRICS - bool "Show STACKLEAK metrics in the /proc file system" - depends on GCC_PLUGIN_STACKLEAK +config KSTACK_ERASE_METRICS + bool "Show KSTACK_ERASE metrics in the /proc file system" + depends on KSTACK_ERASE depends on PROC_FS help - If this is set, STACKLEAK metrics for every task are available in - the /proc file system. In particular, /proc/<pid>/stack_depth + If this is set, KSTACK_ERASE metrics for every task are available + in the /proc file system. In particular, /proc/<pid>/stack_depth shows the maximum kernel stack consumption for the current and previous syscalls. Although this information is not precise, it - can be useful for estimating the STACKLEAK performance impact for - your workloads. + can be useful for estimating the KSTACK_ERASE performance impact + for your workloads. -config STACKLEAK_RUNTIME_DISABLE +config KSTACK_ERASE_RUNTIME_DISABLE bool "Allow runtime disabling of kernel stack erasing" - depends on GCC_PLUGIN_STACKLEAK + depends on KSTACK_ERASE help This option provides 'stack_erasing' sysctl, which can be used in runtime to control kernel stack erasing for kernels built with - CONFIG_GCC_PLUGIN_STACKLEAK. + CONFIG_KSTACK_ERASE. config INIT_ON_ALLOC_DEFAULT_ON bool "Enable heap memory zeroing on allocation by default" @@ -279,6 +211,39 @@ config ZERO_CALL_USED_REGS endmenu +menu "Bounds checking" + +config FORTIFY_SOURCE + bool "Harden common str/mem functions against buffer overflows" + depends on ARCH_HAS_FORTIFY_SOURCE + # https://github.com/llvm/llvm-project/issues/53645 + depends on !X86_32 || !CC_IS_CLANG || CLANG_VERSION >= 160000 + help + Detect overflows of buffers in common string and memory functions + where the compiler can determine and validate the buffer sizes. + +config HARDENED_USERCOPY + bool "Harden memory copies between kernel and userspace" + imply STRICT_DEVMEM + help + This option checks for obviously wrong memory regions when + copying memory to/from the kernel (via copy_to_user() and + copy_from_user() functions) by rejecting memory ranges that + are larger than the specified heap object, span multiple + separately allocated pages, are not on the process stack, + or are part of the kernel text. This prevents entire classes + of heap overflow exploits and similar kernel memory exposures. + +config HARDENED_USERCOPY_DEFAULT_ON + bool "Harden memory copies by default" + depends on HARDENED_USERCOPY + default HARDENED_USERCOPY + help + This has the effect of setting "hardened_usercopy=on" on the kernel + command line. This can be disabled with "hardened_usercopy=off". + +endmenu + menu "Hardening of kernel data structures" config LIST_HARDENED @@ -290,6 +255,16 @@ config LIST_HARDENED If unsure, say N. +config RUST_BITMAP_HARDENED + bool "Check integrity of bitmap Rust API" + depends on RUST + help + Enables additional assertions in the Rust Bitmap API to catch + arguments that are not guaranteed to result in an immediate access + fault. + + If unsure, say N. + config BUG_ON_DATA_CORRUPTION bool "Trigger a BUG when data corruption is detected" select LIST_HARDENED diff --git a/security/Makefile b/security/Makefile index cc0982214b84..4601230ba442 100644 --- a/security/Makefile +++ b/security/Makefile @@ -11,11 +11,11 @@ obj-$(CONFIG_SECURITY) += lsm_syscalls.o obj-$(CONFIG_MMU) += min_addr.o # Object file lists -obj-$(CONFIG_SECURITY) += security.o +obj-$(CONFIG_SECURITY) += security.o lsm_notifier.o lsm_init.o obj-$(CONFIG_SECURITYFS) += inode.o obj-$(CONFIG_SECURITY_SELINUX) += selinux/ obj-$(CONFIG_SECURITY_SMACK) += smack/ -obj-$(CONFIG_SECURITY) += lsm_audit.o +obj-$(CONFIG_HAS_SECURITY_AUDIT) += lsm_audit.o obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/ obj-$(CONFIG_SECURITY_YAMA) += yama/ diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig index 64cc3044a42c..1e3bd44643da 100644 --- a/security/apparmor/Kconfig +++ b/security/apparmor/Kconfig @@ -59,8 +59,7 @@ config SECURITY_APPARMOR_INTROSPECT_POLICY config SECURITY_APPARMOR_HASH bool "Enable introspection of sha256 hashes for loaded profiles" depends on SECURITY_APPARMOR_INTROSPECT_POLICY - select CRYPTO - select CRYPTO_SHA256 + select CRYPTO_LIB_SHA256 default y help This option selects whether introspection of loaded policy diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile index b9c5879dd599..12fb419714c0 100644 --- a/security/apparmor/Makefile +++ b/security/apparmor/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o apparmor-y := apparmorfs.o audit.o capability.o task.o ipc.o lib.o match.o \ path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \ resource.o secid.o file.o policy_ns.o label.o mount.o net.o \ - policy_compat.o + policy_compat.o af_unix.o apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o obj-$(CONFIG_SECURITY_APPARMOR_KUNIT_TEST) += apparmor_policy_unpack_test.o @@ -28,7 +28,7 @@ clean-files := capability_names.h rlim_names.h net_names.h # to # #define AA_SFS_AF_MASK "local inet" quiet_cmd_make-af = GEN $@ -cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ;\ +cmd_make-af = echo "static const char *const address_family_names[] = {" > $@ ;\ sed $< >>$@ -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \ 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\ echo "};" >> $@ ;\ @@ -43,7 +43,7 @@ cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ;\ # to # [1] = "stream", quiet_cmd_make-sock = GEN $@ -cmd_make-sock = echo "static const char *sock_type_names[] = {" >> $@ ;\ +cmd_make-sock = echo "static const char *const sock_type_names[] = {" >> $@ ;\ sed $^ >>$@ -r -n \ -e 's/^\tSOCK_([A-Z0-9_]+)[\t]+=[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\ echo "};" >> $@ diff --git a/security/apparmor/af_unix.c b/security/apparmor/af_unix.c new file mode 100644 index 000000000000..ac0f4be791ec --- /dev/null +++ b/security/apparmor/af_unix.c @@ -0,0 +1,799 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AppArmor security module + * + * This file contains AppArmor af_unix fine grained mediation + * + * Copyright 2023 Canonical Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + */ + +#include <linux/fs.h> +#include <net/tcp_states.h> + +#include "include/audit.h" +#include "include/af_unix.h" +#include "include/apparmor.h" +#include "include/file.h" +#include "include/label.h" +#include "include/path.h" +#include "include/policy.h" +#include "include/cred.h" + + +static inline struct sock *aa_unix_sk(struct unix_sock *u) +{ + return &u->sk; +} + +static int unix_fs_perm(const char *op, u32 mask, const struct cred *subj_cred, + struct aa_label *label, const struct path *path) +{ + AA_BUG(!label); + AA_BUG(!path); + + if (unconfined(label) || !label_mediates(label, AA_CLASS_FILE)) + return 0; + + mask &= NET_FS_PERMS; + /* if !u->path.dentry socket is being shutdown - implicit delegation + * until obj delegation is supported + */ + if (path->dentry) { + /* the sunpath may not be valid for this ns so use the path */ + struct inode *inode = path->dentry->d_inode; + vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_idmap(path->mnt), inode); + struct path_cond cond = { + .uid = vfsuid_into_kuid(vfsuid), + .mode = inode->i_mode, + }; + + return aa_path_perm(op, subj_cred, label, path, + PATH_SOCK_COND, mask, &cond); + } /* else implicitly delegated */ + + return 0; +} + +/* match_addr special constants */ +#define ABSTRACT_ADDR "\x00" /* abstract socket addr */ +#define ANONYMOUS_ADDR "\x01" /* anonymous endpoint, no addr */ +#define DISCONNECTED_ADDR "\x02" /* addr is another namespace */ +#define SHUTDOWN_ADDR "\x03" /* path addr is shutdown and cleared */ +#define FS_ADDR "/" /* path addr in fs */ + +static aa_state_t match_addr(struct aa_dfa *dfa, aa_state_t state, + struct sockaddr_un *addr, int addrlen) +{ + if (addr) + /* include leading \0 */ + state = aa_dfa_match_len(dfa, state, addr->sun_path, + unix_addr_len(addrlen)); + else + state = aa_dfa_match_len(dfa, state, ANONYMOUS_ADDR, 1); + /* todo: could change to out of band for cleaner separation */ + state = aa_dfa_null_transition(dfa, state); + + return state; +} + +static aa_state_t match_to_local(struct aa_policydb *policy, + aa_state_t state, u32 request, + int type, int protocol, + struct sockaddr_un *addr, int addrlen, + struct aa_perms **p, + const char **info) +{ + state = aa_match_to_prot(policy, state, request, PF_UNIX, type, + protocol, NULL, info); + if (state) { + state = match_addr(policy->dfa, state, addr, addrlen); + if (state) { + /* todo: local label matching */ + state = aa_dfa_null_transition(policy->dfa, state); + if (!state) + *info = "failed local label match"; + } else { + *info = "failed local address match"; + } + } + + return state; +} + +struct sockaddr_un *aa_sunaddr(const struct unix_sock *u, int *addrlen) +{ + struct unix_address *addr; + + /* memory barrier is sufficient see note in net/unix/af_unix.c */ + addr = smp_load_acquire(&u->addr); + if (addr) { + *addrlen = addr->len; + return addr->name; + } + *addrlen = 0; + return NULL; +} + +static aa_state_t match_to_sk(struct aa_policydb *policy, + aa_state_t state, u32 request, + struct unix_sock *u, struct aa_perms **p, + const char **info) +{ + int addrlen; + struct sockaddr_un *addr = aa_sunaddr(u, &addrlen); + + return match_to_local(policy, state, request, u->sk.sk_type, + u->sk.sk_protocol, addr, addrlen, p, info); +} + +#define CMD_ADDR 1 +#define CMD_LISTEN 2 +#define CMD_OPT 4 + +static aa_state_t match_to_cmd(struct aa_policydb *policy, aa_state_t state, + u32 request, struct unix_sock *u, + char cmd, struct aa_perms **p, + const char **info) +{ + AA_BUG(!p); + + state = match_to_sk(policy, state, request, u, p, info); + if (state && !*p) { + state = aa_dfa_match_len(policy->dfa, state, &cmd, 1); + if (!state) + *info = "failed cmd selection match"; + } + + return state; +} + +static aa_state_t match_to_peer(struct aa_policydb *policy, aa_state_t state, + u32 request, struct unix_sock *u, + struct sockaddr_un *peer_addr, int peer_addrlen, + struct aa_perms **p, const char **info) +{ + AA_BUG(!p); + + state = match_to_cmd(policy, state, request, u, CMD_ADDR, p, info); + if (state && !*p) { + state = match_addr(policy->dfa, state, peer_addr, peer_addrlen); + if (!state) + *info = "failed peer address match"; + } + + return state; +} + +static aa_state_t match_label(struct aa_profile *profile, + struct aa_ruleset *rule, aa_state_t state, + u32 request, struct aa_profile *peer, + struct aa_perms *p, + struct apparmor_audit_data *ad) +{ + AA_BUG(!profile); + AA_BUG(!peer); + + ad->peer = &peer->label; + + if (state && !p) { + state = aa_dfa_match(rule->policy->dfa, state, + peer->base.hname); + if (!state) + ad->info = "failed peer label match"; + + } + + return aa_do_perms(profile, rule->policy, state, request, p, ad); +} + + +/* unix sock creation comes before we know if the socket will be an fs + * socket + * v6 - semantics are handled by mapping in profile load + * v7 - semantics require sock create for tasks creating an fs socket. + * v8 - same as v7 + */ +static int profile_create_perm(struct aa_profile *profile, int family, + int type, int protocol, + struct apparmor_audit_data *ad) +{ + struct aa_ruleset *rules = profile->label.rules[0]; + aa_state_t state; + + AA_BUG(!profile); + AA_BUG(profile_unconfined(profile)); + + state = RULE_MEDIATES_v9NET(rules); + if (state) { + state = aa_match_to_prot(rules->policy, state, AA_MAY_CREATE, + PF_UNIX, type, protocol, NULL, + &ad->info); + + return aa_do_perms(profile, rules->policy, state, AA_MAY_CREATE, + NULL, ad); + } + + return aa_profile_af_perm(profile, ad, AA_MAY_CREATE, family, type, + protocol); +} + +static int profile_sk_perm(struct aa_profile *profile, + struct apparmor_audit_data *ad, + u32 request, struct sock *sk, const struct path *path) +{ + struct aa_ruleset *rules = profile->label.rules[0]; + struct aa_perms *p = NULL; + aa_state_t state; + + AA_BUG(!profile); + AA_BUG(!sk); + AA_BUG(profile_unconfined(profile)); + + state = RULE_MEDIATES_v9NET(rules); + if (state) { + if (is_unix_fs(sk)) + return unix_fs_perm(ad->op, request, ad->subj_cred, + &profile->label, + &unix_sk(sk)->path); + + state = match_to_sk(rules->policy, state, request, unix_sk(sk), + &p, &ad->info); + + return aa_do_perms(profile, rules->policy, state, request, p, + ad); + } + + return aa_profile_af_sk_perm(profile, ad, request, sk); +} + +static int profile_bind_perm(struct aa_profile *profile, struct sock *sk, + struct apparmor_audit_data *ad) +{ + struct aa_ruleset *rules = profile->label.rules[0]; + struct aa_perms *p = NULL; + aa_state_t state; + + AA_BUG(!profile); + AA_BUG(!sk); + AA_BUG(!ad); + AA_BUG(profile_unconfined(profile)); + + state = RULE_MEDIATES_v9NET(rules); + if (state) { + if (is_unix_addr_fs(ad->net.addr, ad->net.addrlen)) + /* under v7-9 fs hook handles bind */ + return 0; + /* bind for abstract socket */ + state = match_to_local(rules->policy, state, AA_MAY_BIND, + sk->sk_type, sk->sk_protocol, + unix_addr(ad->net.addr), + ad->net.addrlen, + &p, &ad->info); + + return aa_do_perms(profile, rules->policy, state, AA_MAY_BIND, + p, ad); + } + + return aa_profile_af_sk_perm(profile, ad, AA_MAY_BIND, sk); +} + +static int profile_listen_perm(struct aa_profile *profile, struct sock *sk, + int backlog, struct apparmor_audit_data *ad) +{ + struct aa_ruleset *rules = profile->label.rules[0]; + struct aa_perms *p = NULL; + aa_state_t state; + + AA_BUG(!profile); + AA_BUG(!sk); + AA_BUG(!ad); + AA_BUG(profile_unconfined(profile)); + + state = RULE_MEDIATES_v9NET(rules); + if (state) { + __be16 b = cpu_to_be16(backlog); + + if (is_unix_fs(sk)) + return unix_fs_perm(ad->op, AA_MAY_LISTEN, + ad->subj_cred, &profile->label, + &unix_sk(sk)->path); + + state = match_to_cmd(rules->policy, state, AA_MAY_LISTEN, + unix_sk(sk), CMD_LISTEN, &p, &ad->info); + if (state && !p) { + state = aa_dfa_match_len(rules->policy->dfa, state, + (char *) &b, 2); + if (!state) + ad->info = "failed listen backlog match"; + } + return aa_do_perms(profile, rules->policy, state, AA_MAY_LISTEN, + p, ad); + } + + return aa_profile_af_sk_perm(profile, ad, AA_MAY_LISTEN, sk); +} + +static int profile_accept_perm(struct aa_profile *profile, + struct sock *sk, + struct apparmor_audit_data *ad) +{ + struct aa_ruleset *rules = profile->label.rules[0]; + struct aa_perms *p = NULL; + aa_state_t state; + + AA_BUG(!profile); + AA_BUG(!sk); + AA_BUG(!ad); + AA_BUG(profile_unconfined(profile)); + + state = RULE_MEDIATES_v9NET(rules); + if (state) { + if (is_unix_fs(sk)) + return unix_fs_perm(ad->op, AA_MAY_ACCEPT, + ad->subj_cred, &profile->label, + &unix_sk(sk)->path); + + state = match_to_sk(rules->policy, state, AA_MAY_ACCEPT, + unix_sk(sk), &p, &ad->info); + + return aa_do_perms(profile, rules->policy, state, AA_MAY_ACCEPT, + p, ad); + } + + return aa_profile_af_sk_perm(profile, ad, AA_MAY_ACCEPT, sk); +} + +static int profile_opt_perm(struct aa_profile *profile, u32 request, + struct sock *sk, int optname, + struct apparmor_audit_data *ad) +{ + struct aa_ruleset *rules = profile->label.rules[0]; + struct aa_perms *p = NULL; + aa_state_t state; + + AA_BUG(!profile); + AA_BUG(!sk); + AA_BUG(!ad); + AA_BUG(profile_unconfined(profile)); + + state = RULE_MEDIATES_v9NET(rules); + if (state) { + __be16 b = cpu_to_be16(optname); + if (is_unix_fs(sk)) + return unix_fs_perm(ad->op, request, + ad->subj_cred, &profile->label, + &unix_sk(sk)->path); + + state = match_to_cmd(rules->policy, state, request, unix_sk(sk), + CMD_OPT, &p, &ad->info); + if (state && !p) { + state = aa_dfa_match_len(rules->policy->dfa, state, + (char *) &b, 2); + if (!state) + ad->info = "failed sockopt match"; + } + return aa_do_perms(profile, rules->policy, state, request, p, + ad); + } + + return aa_profile_af_sk_perm(profile, ad, request, sk); +} + +/* null peer_label is allowed, in which case the peer_sk label is used */ +static int profile_peer_perm(struct aa_profile *profile, u32 request, + struct sock *sk, const struct path *path, + struct sockaddr_un *peer_addr, + int peer_addrlen, const struct path *peer_path, + struct aa_label *peer_label, + struct apparmor_audit_data *ad) +{ + struct aa_ruleset *rules = profile->label.rules[0]; + struct aa_perms *p = NULL; + aa_state_t state; + + AA_BUG(!profile); + AA_BUG(profile_unconfined(profile)); + AA_BUG(!sk); + AA_BUG(!peer_label); + AA_BUG(!ad); + + state = RULE_MEDIATES_v9NET(rules); + if (state) { + struct aa_profile *peerp; + + if (peer_path) + return unix_fs_perm(ad->op, request, ad->subj_cred, + &profile->label, peer_path); + else if (path) + return unix_fs_perm(ad->op, request, ad->subj_cred, + &profile->label, path); + state = match_to_peer(rules->policy, state, request, + unix_sk(sk), + peer_addr, peer_addrlen, &p, &ad->info); + + return fn_for_each_in_ns(peer_label, peerp, + match_label(profile, rules, state, request, + peerp, p, ad)); + } + + return aa_profile_af_sk_perm(profile, ad, request, sk); +} + +/* -------------------------------- */ + +int aa_unix_create_perm(struct aa_label *label, int family, int type, + int protocol) +{ + if (!unconfined(label)) { + struct aa_profile *profile; + DEFINE_AUDIT_NET(ad, OP_CREATE, current_cred(), NULL, family, + type, protocol); + + return fn_for_each_confined(label, profile, + profile_create_perm(profile, family, type, + protocol, &ad)); + } + + return 0; +} + +static int aa_unix_label_sk_perm(const struct cred *subj_cred, + struct aa_label *label, + const char *op, u32 request, struct sock *sk, + const struct path *path) +{ + if (!unconfined(label)) { + struct aa_profile *profile; + DEFINE_AUDIT_SK(ad, op, subj_cred, sk); + + return fn_for_each_confined(label, profile, + profile_sk_perm(profile, &ad, request, sk, + path)); + } + return 0; +} + +/* revalidation, get/set attr, shutdown */ +int aa_unix_sock_perm(const char *op, u32 request, struct socket *sock) +{ + struct aa_label *label; + int error; + + label = begin_current_label_crit_section(); + error = aa_unix_label_sk_perm(current_cred(), label, op, + request, sock->sk, + is_unix_fs(sock->sk) ? &unix_sk(sock->sk)->path : NULL); + end_current_label_crit_section(label); + + return error; +} + +static int valid_addr(struct sockaddr *addr, int addr_len) +{ + struct sockaddr_un *sunaddr = unix_addr(addr); + + /* addr_len == offsetof(struct sockaddr_un, sun_path) is autobind */ + if (addr_len < offsetof(struct sockaddr_un, sun_path) || + addr_len > sizeof(*sunaddr)) + return -EINVAL; + return 0; +} + +int aa_unix_bind_perm(struct socket *sock, struct sockaddr *addr, + int addrlen) +{ + struct aa_profile *profile; + struct aa_label *label; + int error = 0; + + error = valid_addr(addr, addrlen); + if (error) + return error; + + label = begin_current_label_crit_section(); + /* fs bind is handled by mknod */ + if (!unconfined(label)) { + DEFINE_AUDIT_SK(ad, OP_BIND, current_cred(), sock->sk); + + ad.net.addr = unix_addr(addr); + ad.net.addrlen = addrlen; + + error = fn_for_each_confined(label, profile, + profile_bind_perm(profile, sock->sk, &ad)); + } + end_current_label_crit_section(label); + + return error; +} + +/* + * unix connections are covered by the + * - unix_stream_connect (stream) and unix_may_send hooks (dgram) + * - fs connect is handled by open + * This is just here to document this is not needed for af_unix + * +int aa_unix_connect_perm(struct socket *sock, struct sockaddr *address, + int addrlen) +{ + return 0; +} +*/ + +int aa_unix_listen_perm(struct socket *sock, int backlog) +{ + struct aa_profile *profile; + struct aa_label *label; + int error = 0; + + label = begin_current_label_crit_section(); + if (!unconfined(label)) { + DEFINE_AUDIT_SK(ad, OP_LISTEN, current_cred(), sock->sk); + + error = fn_for_each_confined(label, profile, + profile_listen_perm(profile, sock->sk, + backlog, &ad)); + } + end_current_label_crit_section(label); + + return error; +} + + +/* ability of sock to connect, not peer address binding */ +int aa_unix_accept_perm(struct socket *sock, struct socket *newsock) +{ + struct aa_profile *profile; + struct aa_label *label; + int error = 0; + + label = begin_current_label_crit_section(); + if (!unconfined(label)) { + DEFINE_AUDIT_SK(ad, OP_ACCEPT, current_cred(), sock->sk); + + error = fn_for_each_confined(label, profile, + profile_accept_perm(profile, sock->sk, &ad)); + } + end_current_label_crit_section(label); + + return error; +} + + +/* + * dgram handled by unix_may_sendmsg, right to send on stream done at connect + * could do per msg unix_stream here, but connect + socket transfer is + * sufficient. This is just here to document this is not needed for af_unix + * + * sendmsg, recvmsg +int aa_unix_msg_perm(const char *op, u32 request, struct socket *sock, + struct msghdr *msg, int size) +{ + return 0; +} +*/ + +int aa_unix_opt_perm(const char *op, u32 request, struct socket *sock, + int level, int optname) +{ + struct aa_profile *profile; + struct aa_label *label; + int error = 0; + + label = begin_current_label_crit_section(); + if (!unconfined(label)) { + DEFINE_AUDIT_SK(ad, op, current_cred(), sock->sk); + + error = fn_for_each_confined(label, profile, + profile_opt_perm(profile, request, sock->sk, + optname, &ad)); + } + end_current_label_crit_section(label); + + return error; +} + +static int unix_peer_perm(const struct cred *subj_cred, + struct aa_label *label, const char *op, u32 request, + struct sock *sk, const struct path *path, + struct sockaddr_un *peer_addr, int peer_addrlen, + const struct path *peer_path, struct aa_label *peer_label) +{ + struct aa_profile *profile; + DEFINE_AUDIT_SK(ad, op, subj_cred, sk); + + ad.net.peer.addr = peer_addr; + ad.net.peer.addrlen = peer_addrlen; + + return fn_for_each_confined(label, profile, + profile_peer_perm(profile, request, sk, path, + peer_addr, peer_addrlen, peer_path, + peer_label, &ad)); +} + +/** + * + * Requires: lock held on both @sk and @peer_sk + * called by unix_stream_connect, unix_may_send + */ +int aa_unix_peer_perm(const struct cred *subj_cred, + struct aa_label *label, const char *op, u32 request, + struct sock *sk, struct sock *peer_sk, + struct aa_label *peer_label) +{ + struct unix_sock *peeru = unix_sk(peer_sk); + struct unix_sock *u = unix_sk(sk); + int plen; + struct sockaddr_un *paddr = aa_sunaddr(unix_sk(peer_sk), &plen); + + AA_BUG(!label); + AA_BUG(!sk); + AA_BUG(!peer_sk); + AA_BUG(!peer_label); + + return unix_peer_perm(subj_cred, label, op, request, sk, + is_unix_fs(sk) ? &u->path : NULL, + paddr, plen, + is_unix_fs(peer_sk) ? &peeru->path : NULL, + peer_label); +} + +/* sk_plabel for comparison only */ +static void update_sk_ctx(struct sock *sk, struct aa_label *label, + struct aa_label *plabel) +{ + struct aa_label *l, *old; + struct aa_sk_ctx *ctx = aa_sock(sk); + bool update_sk; + + rcu_read_lock(); + update_sk = (plabel && + (plabel != rcu_access_pointer(ctx->peer_lastupdate) || + !aa_label_is_subset(plabel, rcu_dereference(ctx->peer)))) || + !__aa_subj_label_is_cached(label, rcu_dereference(ctx->label)); + rcu_read_unlock(); + if (!update_sk) + return; + + spin_lock(&unix_sk(sk)->lock); + old = rcu_dereference_protected(ctx->label, + lockdep_is_held(&unix_sk(sk)->lock)); + l = aa_label_merge(old, label, GFP_ATOMIC); + if (l) { + if (l != old) { + rcu_assign_pointer(ctx->label, l); + aa_put_label(old); + } else + aa_put_label(l); + } + if (plabel && rcu_access_pointer(ctx->peer_lastupdate) != plabel) { + old = rcu_dereference_protected(ctx->peer, lockdep_is_held(&unix_sk(sk)->lock)); + + if (old == plabel) { + rcu_assign_pointer(ctx->peer_lastupdate, plabel); + } else if (aa_label_is_subset(plabel, old)) { + rcu_assign_pointer(ctx->peer_lastupdate, plabel); + rcu_assign_pointer(ctx->peer, aa_get_label(plabel)); + aa_put_label(old); + } /* else race or a subset - don't update */ + } + spin_unlock(&unix_sk(sk)->lock); +} + +static void update_peer_ctx(struct sock *sk, struct aa_sk_ctx *ctx, + struct aa_label *label) +{ + struct aa_label *l, *old; + + spin_lock(&unix_sk(sk)->lock); + old = rcu_dereference_protected(ctx->peer, + lockdep_is_held(&unix_sk(sk)->lock)); + l = aa_label_merge(old, label, GFP_ATOMIC); + if (l) { + if (l != old) { + rcu_assign_pointer(ctx->peer, l); + aa_put_label(old); + } else + aa_put_label(l); + } + spin_unlock(&unix_sk(sk)->lock); +} + +/* This fn is only checked if something has changed in the security + * boundaries. Otherwise cached info off file is sufficient + */ +int aa_unix_file_perm(const struct cred *subj_cred, struct aa_label *label, + const char *op, u32 request, struct file *file) +{ + struct socket *sock = (struct socket *) file->private_data; + struct sockaddr_un *addr, *peer_addr; + int addrlen, peer_addrlen; + struct aa_label *plabel = NULL; + struct sock *peer_sk = NULL; + u32 sk_req = request & ~NET_PEER_MASK; + struct path path; + bool is_sk_fs; + int error = 0; + + AA_BUG(!label); + AA_BUG(!sock); + AA_BUG(!sock->sk); + AA_BUG(sock->sk->sk_family != PF_UNIX); + + /* investigate only using lock via unix_peer_get() + * addr only needs the memory barrier, but need to investigate + * path + */ + unix_state_lock(sock->sk); + peer_sk = unix_peer(sock->sk); + if (peer_sk) + sock_hold(peer_sk); + + is_sk_fs = is_unix_fs(sock->sk); + addr = aa_sunaddr(unix_sk(sock->sk), &addrlen); + path = unix_sk(sock->sk)->path; + unix_state_unlock(sock->sk); + + if (is_sk_fs && peer_sk) + sk_req = request; + if (sk_req) { + error = aa_unix_label_sk_perm(subj_cred, label, op, + sk_req, sock->sk, + is_sk_fs ? &path : NULL); + } + if (!peer_sk) + goto out; + + peer_addr = aa_sunaddr(unix_sk(peer_sk), &peer_addrlen); + + struct path peer_path; + + peer_path = unix_sk(peer_sk)->path; + if (!is_sk_fs && is_unix_fs(peer_sk)) { + last_error(error, + unix_fs_perm(op, request, subj_cred, label, + is_unix_fs(peer_sk) ? &peer_path : NULL)); + } else if (!is_sk_fs) { + struct aa_label *plabel; + struct aa_sk_ctx *pctx = aa_sock(peer_sk); + + rcu_read_lock(); + plabel = aa_get_label_rcu(&pctx->label); + rcu_read_unlock(); + /* no fs check of aa_unix_peer_perm because conditions above + * ensure they will never be done + */ + last_error(error, + xcheck(unix_peer_perm(subj_cred, label, op, + MAY_READ | MAY_WRITE, sock->sk, + is_sk_fs ? &path : NULL, + peer_addr, peer_addrlen, + is_unix_fs(peer_sk) ? + &peer_path : NULL, + plabel), + unix_peer_perm(file->f_cred, plabel, op, + MAY_READ | MAY_WRITE, peer_sk, + is_unix_fs(peer_sk) ? + &peer_path : NULL, + addr, addrlen, + is_sk_fs ? &path : NULL, + label))); + if (!error && !__aa_subj_label_is_cached(plabel, label)) + update_peer_ctx(peer_sk, pctx, label); + } + sock_put(peer_sk); + +out: + + /* update peer cache to latest successful perm check */ + if (error == 0) + update_sk_ctx(sock->sk, label, plabel); + aa_put_label(plabel); + + return error; +} + diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index 01b923d97a44..907bd2667e28 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -43,7 +43,7 @@ * The interface is split into two main components based on their function * a securityfs component: * used for static files that are always available, and which allows - * userspace to specificy the location of the security filesystem. + * userspace to specify the location of the security filesystem. * * fns and data are prefixed with * aa_sfs_ @@ -204,7 +204,7 @@ static struct file_system_type aafs_ops = { /** * __aafs_setup_d_inode - basic inode setup for apparmorfs * @dir: parent directory for the dentry - * @dentry: dentry we are seting the inode up for + * @dentry: dentry we are setting the inode up for * @mode: permissions the file should have * @data: data to store on inode.i_private, available in open() * @link: if symlink, symlink target string @@ -283,7 +283,7 @@ static struct dentry *aafs_create(const char *name, umode_t mode, dir = d_inode(parent); inode_lock(dir); - dentry = lookup_one_len(name, parent, strlen(name)); + dentry = lookup_noperm(&QSTR(name), parent); if (IS_ERR(dentry)) { error = PTR_ERR(dentry); goto fail_lock; @@ -355,17 +355,22 @@ static void aafs_remove(struct dentry *dentry) if (!dentry || IS_ERR(dentry)) return; + /* ->d_parent is stable as rename is not supported */ dir = d_inode(dentry->d_parent); - inode_lock(dir); - if (simple_positive(dentry)) { - if (d_is_dir(dentry)) - simple_rmdir(dir, dentry); - else - simple_unlink(dir, dentry); + dentry = start_removing_dentry(dentry->d_parent, dentry); + if (!IS_ERR(dentry) && simple_positive(dentry)) { + if (d_is_dir(dentry)) { + if (!WARN_ON(!simple_empty(dentry))) { + __simple_rmdir(dir, dentry); + dput(dentry); + } + } else { + __simple_unlink(dir, dentry); + dput(dentry); + } d_delete(dentry); - dput(dentry); } - inode_unlock(dir); + end_removing(dentry); simple_release_fs(&aafs_mnt, &aafs_count); } @@ -612,8 +617,7 @@ static const struct file_operations aa_fs_ns_revision_fops = { static void profile_query_cb(struct aa_profile *profile, struct aa_perms *perms, const char *match_str, size_t match_len) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; struct aa_perms tmp = { }; aa_state_t state = DFA_NOMATCH; @@ -626,11 +630,20 @@ static void profile_query_cb(struct aa_profile *profile, struct aa_perms *perms, if (state) { struct path_cond cond = { }; - tmp = *(aa_lookup_fperms(rules->file, state, &cond)); + tmp = *(aa_lookup_condperms(current_fsuid(), + rules->file, state, &cond)); } } else if (rules->policy->dfa) { if (!RULE_MEDIATES(rules, *match_str)) return; /* no change to current perms */ + /* old user space does not correctly detect dbus mediation + * support so we may get dbus policy and requests when + * the abi doesn't support it. This can cause mediation + * regressions, so explicitly test for this situation. + */ + if (*match_str == AA_CLASS_DBUS && + !RULE_MEDIATES_v9NET(rules)) + return; /* no change to current perms */ state = aa_dfa_match_len(rules->policy->dfa, rules->policy->start[0], match_str, match_len); @@ -997,7 +1010,7 @@ static int aa_sfs_seq_show(struct seq_file *seq, void *v) switch (fs_file->v_type) { case AA_SFS_TYPE_BOOLEAN: - seq_printf(seq, "%s\n", fs_file->v.boolean ? "yes" : "no"); + seq_printf(seq, "%s\n", str_yes_no(fs_file->v.boolean)); break; case AA_SFS_TYPE_STRING: seq_printf(seq, "%s\n", fs_file->v.string); @@ -1006,7 +1019,7 @@ static int aa_sfs_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%#08lx\n", fs_file->v.u64); break; default: - /* Ignore unpritable entry types. */ + /* Ignore unprintable entry types. */ break; } @@ -1152,7 +1165,7 @@ static int seq_ns_stacked_show(struct seq_file *seq, void *v) struct aa_label *label; label = begin_current_label_crit_section(); - seq_printf(seq, "%s\n", label->size > 1 ? "yes" : "no"); + seq_printf(seq, "%s\n", str_yes_no(label->size > 1)); end_current_label_crit_section(label); return 0; @@ -1175,7 +1188,7 @@ static int seq_ns_nsstacked_show(struct seq_file *seq, void *v) } } - seq_printf(seq, "%s\n", count > 1 ? "yes" : "no"); + seq_printf(seq, "%s\n", str_yes_no(count > 1)); end_current_label_crit_section(label); return 0; @@ -1795,8 +1808,8 @@ fail2: return error; } -static int ns_mkdir_op(struct mnt_idmap *idmap, struct inode *dir, - struct dentry *dentry, umode_t mode) +static struct dentry *ns_mkdir_op(struct mnt_idmap *idmap, struct inode *dir, + struct dentry *dentry, umode_t mode) { struct aa_ns *ns, *parent; /* TODO: improve permission check */ @@ -1808,7 +1821,7 @@ static int ns_mkdir_op(struct mnt_idmap *idmap, struct inode *dir, AA_MAY_LOAD_POLICY); end_current_label_crit_section(label); if (error) - return error; + return ERR_PTR(error); parent = aa_get_ns(dir->i_private); AA_BUG(d_inode(ns_subns_dir(parent)) != dir); @@ -1843,7 +1856,7 @@ out: mutex_unlock(&parent->lock); aa_put_ns(parent); - return error; + return ERR_PTR(error); } static int ns_rmdir_op(struct inode *dir, struct dentry *dentry) @@ -2244,7 +2257,7 @@ static void *p_next(struct seq_file *f, void *p, loff_t *pos) /** * p_stop - stop depth first traversal * @f: seq_file we are filling - * @p: the last profile writen + * @p: the last profile written * * Release all locking done by p_start/p_next on namespace tree */ @@ -2332,6 +2345,7 @@ static struct aa_sfs_entry aa_sfs_entry_attach[] = { static struct aa_sfs_entry aa_sfs_entry_domain[] = { AA_SFS_FILE_BOOLEAN("change_hat", 1), AA_SFS_FILE_BOOLEAN("change_hatv", 1), + AA_SFS_FILE_BOOLEAN("unconfined_allowed_children", 1), AA_SFS_FILE_BOOLEAN("change_onexec", 1), AA_SFS_FILE_BOOLEAN("change_profile", 1), AA_SFS_FILE_BOOLEAN("stack", 1), @@ -2340,6 +2354,7 @@ static struct aa_sfs_entry aa_sfs_entry_domain[] = { AA_SFS_FILE_BOOLEAN("computed_longest_left", 1), AA_SFS_DIR("attach_conditions", aa_sfs_entry_attach), AA_SFS_FILE_BOOLEAN("disconnected.path", 1), + AA_SFS_FILE_BOOLEAN("kill.signal", 1), AA_SFS_FILE_STRING("version", "1.2"), { } }; @@ -2364,8 +2379,9 @@ static struct aa_sfs_entry aa_sfs_entry_policy[] = { AA_SFS_FILE_BOOLEAN("set_load", 1), /* number of out of band transitions supported */ AA_SFS_FILE_U64("outofband", MAX_OOB_SUPPORTED), - AA_SFS_FILE_U64("permstable32_version", 1), + AA_SFS_FILE_U64("permstable32_version", 3), AA_SFS_FILE_STRING("permstable32", PERMS32STR), + AA_SFS_FILE_U64("state32", 1), AA_SFS_DIR("unconfined_restrictions", aa_sfs_entry_unconfined), { } }; @@ -2383,6 +2399,11 @@ static struct aa_sfs_entry aa_sfs_entry_ns[] = { { } }; +static struct aa_sfs_entry aa_sfs_entry_dbus[] = { + AA_SFS_FILE_STRING("mask", "acquire send receive"), + { } +}; + static struct aa_sfs_entry aa_sfs_entry_query_label[] = { AA_SFS_FILE_STRING("perms", "allow deny audit quiet"), AA_SFS_FILE_BOOLEAN("data", 1), @@ -2405,6 +2426,7 @@ static struct aa_sfs_entry aa_sfs_entry_features[] = { AA_SFS_DIR("domain", aa_sfs_entry_domain), AA_SFS_DIR("file", aa_sfs_entry_file), AA_SFS_DIR("network_v8", aa_sfs_entry_network), + AA_SFS_DIR("network_v9", aa_sfs_entry_networkv9), AA_SFS_DIR("mount", aa_sfs_entry_mount), AA_SFS_DIR("namespaces", aa_sfs_entry_ns), AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK), @@ -2412,6 +2434,7 @@ static struct aa_sfs_entry aa_sfs_entry_features[] = { AA_SFS_DIR("caps", aa_sfs_entry_caps), AA_SFS_DIR("ptrace", aa_sfs_entry_ptrace), AA_SFS_DIR("signal", aa_sfs_entry_signal), + AA_SFS_DIR("dbus", aa_sfs_entry_dbus), AA_SFS_DIR("query", aa_sfs_entry_query), AA_SFS_DIR("io_uring", aa_sfs_entry_io_uring), { } @@ -2550,7 +2573,7 @@ static int aa_mk_null_file(struct dentry *parent) return error; inode_lock(d_inode(parent)); - dentry = lookup_one_len(NULL_FILE_NAME, parent, strlen(NULL_FILE_NAME)); + dentry = lookup_noperm(&QSTR(NULL_FILE_NAME), parent); if (IS_ERR(dentry)) { error = PTR_ERR(dentry); goto out; @@ -2611,7 +2634,7 @@ static int policy_readlink(struct dentry *dentry, char __user *buffer, res = snprintf(name, sizeof(name), "%s:[%lu]", AAFS_NAME, d_inode(dentry)->i_ino); if (res > 0 && res < sizeof(name)) - res = readlink_copy(buffer, buflen, name); + res = readlink_copy(buffer, buflen, name, strlen(name)); else res = -ENOENT; @@ -2631,7 +2654,7 @@ static const struct inode_operations policy_link_iops = { * * Returns: error on failure */ -static int __init aa_create_aafs(void) +int __init aa_create_aafs(void) { struct dentry *dent; int error; @@ -2710,5 +2733,3 @@ error: AA_ERROR("Error creating AppArmor securityfs\n"); return error; } - -fs_initcall(aa_create_aafs); diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c index 73087d76f649..ac89602aa2d9 100644 --- a/security/apparmor/audit.c +++ b/security/apparmor/audit.c @@ -192,7 +192,7 @@ int aa_audit(int type, struct aa_profile *profile, aa_audit_msg(type, ad, cb); if (ad->type == AUDIT_APPARMOR_KILL) - (void)send_sig_info(SIGKILL, NULL, + (void)send_sig_info(profile->signal, NULL, ad->common.type == LSM_AUDIT_DATA_TASK && ad->common.u.tsk ? ad->common.u.tsk : current); diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c index 9934df16c843..b9ea6bc45c1a 100644 --- a/security/apparmor/capability.c +++ b/security/apparmor/capability.c @@ -12,6 +12,7 @@ #include <linux/errno.h> #include <linux/gfp.h> #include <linux/security.h> +#include <linux/timekeeping.h> #include "include/apparmor.h" #include "include/capability.h" @@ -26,12 +27,14 @@ struct aa_sfs_entry aa_sfs_entry_caps[] = { AA_SFS_FILE_STRING("mask", AA_SFS_CAPS_MASK), + AA_SFS_FILE_BOOLEAN("extended", 1), { } }; struct audit_cache { - struct aa_profile *profile; - kernel_cap_t caps; + const struct cred *ad_subj_cred; + /* Capabilities go from 0 to CAP_LAST_CAP */ + u64 ktime_ns_expiration[CAP_LAST_CAP+1]; }; static DEFINE_PER_CPU(struct audit_cache, audit_cache); @@ -64,8 +67,9 @@ static void audit_cb(struct audit_buffer *ab, void *va) static int audit_caps(struct apparmor_audit_data *ad, struct aa_profile *profile, int cap, int error) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + const u64 AUDIT_CACHE_TIMEOUT_NS = 1000*1000*1000; /* 1 second */ + + struct aa_ruleset *rules = profile->label.rules[0]; struct audit_cache *ent; int type = AUDIT_APPARMOR_AUTO; @@ -89,15 +93,16 @@ static int audit_caps(struct apparmor_audit_data *ad, struct aa_profile *profile /* Do simple duplicate message elimination */ ent = &get_cpu_var(audit_cache); - if (profile == ent->profile && cap_raised(ent->caps, cap)) { + /* If the capability was never raised the timestamp check would also catch that */ + if (ad->subj_cred == ent->ad_subj_cred && ktime_get_ns() <= ent->ktime_ns_expiration[cap]) { put_cpu_var(audit_cache); if (COMPLAIN_MODE(profile)) return complain_error(error); return error; } else { - aa_put_profile(ent->profile); - ent->profile = aa_get_profile(profile); - cap_raise(ent->caps, cap); + put_cred(ent->ad_subj_cred); + ent->ad_subj_cred = get_cred(ad->subj_cred); + ent->ktime_ns_expiration[cap] = ktime_get_ns() + AUDIT_CACHE_TIMEOUT_NS; } put_cpu_var(audit_cache); @@ -109,17 +114,39 @@ static int audit_caps(struct apparmor_audit_data *ad, struct aa_profile *profile * @profile: profile being enforced (NOT NULL, NOT unconfined) * @cap: capability to test if allowed * @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated - * @ad: audit data (MAY BE NULL indicating no auditing) + * @ad: audit data (NOT NULL) * * Returns: 0 if allowed else -EPERM */ static int profile_capable(struct aa_profile *profile, int cap, unsigned int opts, struct apparmor_audit_data *ad) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; + aa_state_t state; int error; + state = RULE_MEDIATES(rules, ad->class); + if (state) { + struct aa_perms perms = { }; + u32 request; + + /* caps broken into 256 x 32 bit permission chunks */ + state = aa_dfa_next(rules->policy->dfa, state, cap >> 5); + request = 1 << (cap & 0x1f); + perms = *aa_lookup_perms(rules->policy, state); + aa_apply_modes_to_perms(profile, &perms); + + if (opts & CAP_OPT_NOAUDIT) { + if (perms.complain & request) + ad->info = "optional: no audit"; + else + ad = NULL; + } + return aa_check_perms(profile, &perms, request, ad, + audit_cb); + } + + /* fallback to old caps mediation that doesn't support conditionals */ if (cap_raised(rules->caps.allow, cap) && !cap_raised(rules->caps.denied, cap)) error = 0; @@ -163,3 +190,34 @@ int aa_capable(const struct cred *subj_cred, struct aa_label *label, return error; } + +kernel_cap_t aa_profile_capget(struct aa_profile *profile) +{ + struct aa_ruleset *rules = profile->label.rules[0]; + aa_state_t state; + + state = RULE_MEDIATES(rules, AA_CLASS_CAP); + if (state) { + kernel_cap_t caps = CAP_EMPTY_SET; + int i; + + /* caps broken into up to 256, 32 bit permission chunks */ + for (i = 0; i < (CAP_LAST_CAP >> 5); i++) { + struct aa_perms perms = { }; + aa_state_t tmp; + + tmp = aa_dfa_next(rules->policy->dfa, state, i); + perms = *aa_lookup_perms(rules->policy, tmp); + aa_apply_modes_to_perms(profile, &perms); + caps.val |= ((u64)(perms.allow)) << (i * 5); + caps.val |= ((u64)(perms.complain)) << (i * 5); + } + return caps; + } + + /* fallback to old caps */ + if (COMPLAIN_MODE(profile)) + return CAP_FULL_SET; + + return rules->caps.allow; +} diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c index aad486b2fca6..d8a7bde94d79 100644 --- a/security/apparmor/crypto.c +++ b/security/apparmor/crypto.c @@ -11,113 +11,51 @@ * it should be. */ -#include <crypto/hash.h> +#include <crypto/sha2.h> #include "include/apparmor.h" #include "include/crypto.h" -static unsigned int apparmor_hash_size; - -static struct crypto_shash *apparmor_tfm; - unsigned int aa_hash_size(void) { - return apparmor_hash_size; + return SHA256_DIGEST_SIZE; } char *aa_calc_hash(void *data, size_t len) { - SHASH_DESC_ON_STACK(desc, apparmor_tfm); char *hash; - int error; - - if (!apparmor_tfm) - return NULL; - hash = kzalloc(apparmor_hash_size, GFP_KERNEL); + hash = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL); if (!hash) return ERR_PTR(-ENOMEM); - desc->tfm = apparmor_tfm; - - error = crypto_shash_init(desc); - if (error) - goto fail; - error = crypto_shash_update(desc, (u8 *) data, len); - if (error) - goto fail; - error = crypto_shash_final(desc, hash); - if (error) - goto fail; - + sha256(data, len, hash); return hash; - -fail: - kfree(hash); - - return ERR_PTR(error); } int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start, size_t len) { - SHASH_DESC_ON_STACK(desc, apparmor_tfm); - int error; + struct sha256_ctx sctx; __le32 le32_version = cpu_to_le32(version); if (!aa_g_hash_policy) return 0; - if (!apparmor_tfm) - return 0; - - profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL); + profile->hash = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL); if (!profile->hash) return -ENOMEM; - desc->tfm = apparmor_tfm; - - error = crypto_shash_init(desc); - if (error) - goto fail; - error = crypto_shash_update(desc, (u8 *) &le32_version, 4); - if (error) - goto fail; - error = crypto_shash_update(desc, (u8 *) start, len); - if (error) - goto fail; - error = crypto_shash_final(desc, profile->hash); - if (error) - goto fail; - + sha256_init(&sctx); + sha256_update(&sctx, (u8 *)&le32_version, 4); + sha256_update(&sctx, (u8 *)start, len); + sha256_final(&sctx, profile->hash); return 0; - -fail: - kfree(profile->hash); - profile->hash = NULL; - - return error; } -static int __init init_profile_hash(void) +int __init init_profile_hash(void) { - struct crypto_shash *tfm; - - if (!apparmor_initialized) - return 0; - - tfm = crypto_alloc_shash("sha256", 0, 0); - if (IS_ERR(tfm)) { - int error = PTR_ERR(tfm); - AA_ERROR("failed to setup profile sha256 hashing: %d\n", error); - return error; - } - apparmor_tfm = tfm; - apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm); - - aa_info_message("AppArmor sha256 policy hashing enabled"); - + if (apparmor_initialized) + aa_info_message("AppArmor sha256 policy hashing enabled"); return 0; } - -late_initcall(init_profile_hash); diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 2bc34dce9a46..267da82afb14 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -28,6 +28,12 @@ #include "include/policy.h" #include "include/policy_ns.h" +static const char * const CONFLICTING_ATTACH_STR = "conflicting profile attachments"; +static const char * const CONFLICTING_ATTACH_STR_IX = + "conflicting profile attachments - ix fallback"; +static const char * const CONFLICTING_ATTACH_STR_UX = + "conflicting profile attachments - ux fallback"; + /** * may_change_ptraced_domain - check if can change profile on ptraced task * @to_cred: cred of task changing domain @@ -87,8 +93,7 @@ static inline aa_state_t match_component(struct aa_profile *profile, struct aa_profile *tp, bool stack, aa_state_t state) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; const char *ns_name; if (stack) @@ -125,8 +130,7 @@ static int label_compound_match(struct aa_profile *profile, aa_state_t state, bool subns, u32 request, struct aa_perms *perms) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; struct aa_profile *tp; struct label_it i; struct path_cond cond = { }; @@ -154,7 +158,8 @@ next: if (!state) goto fail; } - *perms = *(aa_lookup_fperms(rules->file, state, &cond)); + *perms = *(aa_lookup_condperms(current_fsuid(), rules->file, state, + &cond)); aa_apply_modes_to_perms(profile, perms); if ((perms->allow & request) != request) return -EACCES; @@ -187,8 +192,7 @@ static int label_components_match(struct aa_profile *profile, aa_state_t start, bool subns, u32 request, struct aa_perms *perms) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; struct aa_profile *tp; struct label_it i; struct aa_perms tmp; @@ -209,7 +213,8 @@ static int label_components_match(struct aa_profile *profile, return 0; next: - tmp = *(aa_lookup_fperms(rules->file, state, &cond)); + tmp = *(aa_lookup_condperms(current_fsuid(), rules->file, state, + &cond)); aa_apply_modes_to_perms(profile, &tmp); aa_perms_accum(perms, &tmp); label_for_each_cont(i, label, tp) { @@ -218,7 +223,8 @@ next: state = match_component(profile, tp, stack, start); if (!state) goto fail; - tmp = *(aa_lookup_fperms(rules->file, state, &cond)); + tmp = *(aa_lookup_condperms(current_fsuid(), rules->file, state, + &cond)); aa_apply_modes_to_perms(profile, &tmp); aa_perms_accum(perms, &tmp); } @@ -323,7 +329,7 @@ static int aa_xattrs_match(const struct linux_binprm *bprm, size = vfs_getxattr_alloc(&nop_mnt_idmap, d, attach->xattrs[i], &value, value_size, GFP_KERNEL); if (size >= 0) { - u32 index, perm; + struct aa_perms *perms; /* * Check the xattr presence before value. This ensure @@ -335,9 +341,8 @@ static int aa_xattrs_match(const struct linux_binprm *bprm, /* Check xattr value */ state = aa_dfa_match_len(attach->xmatch->dfa, state, value, size); - index = ACCEPT_TABLE(attach->xmatch->dfa)[state]; - perm = attach->xmatch->perms[index].allow; - if (!(perm & MAY_EXEC)) { + perms = aa_lookup_perms(attach->xmatch, state); + if (!(perms->allow & MAY_EXEC)) { ret = -EINVAL; goto out; } @@ -415,15 +420,14 @@ restart: if (attach->xmatch->dfa) { unsigned int count; aa_state_t state; - u32 index, perm; + struct aa_perms *perms; state = aa_dfa_leftmatch(attach->xmatch->dfa, attach->xmatch->start[AA_CLASS_XMATCH], name, &count); - index = ACCEPT_TABLE(attach->xmatch->dfa)[state]; - perm = attach->xmatch->perms[index].allow; + perms = aa_lookup_perms(attach->xmatch, state); /* any accepting state means a valid match. */ - if (perm & MAY_EXEC) { + if (perms->allow & MAY_EXEC) { int ret = 0; if (count < candidate_len) @@ -484,7 +488,7 @@ restart: if (!candidate || conflict) { if (conflict) - *info = "conflicting profile attachments"; + *info = CONFLICTING_ATTACH_STR; rcu_read_unlock(); return NULL; } @@ -508,15 +512,16 @@ static const char *next_name(int xtype, const char *name) * @name: returns: name tested to find label (NOT NULL) * * Returns: refcounted label, or NULL on failure (MAYBE NULL) + * @name will always be set with the last name tried */ struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex, const char **name) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; struct aa_label *label = NULL; u32 xtype = xindex & AA_X_TYPE_MASK; int index = xindex & AA_X_INDEX_MASK; + const char *next; AA_BUG(!name); @@ -524,25 +529,27 @@ struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex, /* TODO: move lookup parsing to unpack time so this is a straight * index into the resultant label */ - for (*name = rules->file->trans.table[index]; !label && *name; - *name = next_name(xtype, *name)) { + for (next = rules->file->trans.table[index]; next; + next = next_name(xtype, next)) { + const char *lookup = (*next == '&') ? next + 1 : next; + *name = next; if (xindex & AA_X_CHILD) { - struct aa_profile *new_profile; - /* release by caller */ - new_profile = aa_find_child(profile, *name); - if (new_profile) - label = &new_profile->label; + /* TODO: switich to parse to get stack of child */ + struct aa_profile *new = aa_find_child(profile, lookup); + + if (new) + /* release by caller */ + return &new->label; continue; } - label = aa_label_parse(&profile->label, *name, GFP_KERNEL, + label = aa_label_parse(&profile->label, lookup, GFP_KERNEL, true, false); - if (IS_ERR(label)) - label = NULL; + if (!IS_ERR_OR_NULL(label)) + /* release by caller */ + return label; } - /* released by caller */ - - return label; + return NULL; } /** @@ -564,12 +571,12 @@ static struct aa_label *x_to_label(struct aa_profile *profile, const char **lookupname, const char **info) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); struct aa_label *new = NULL; + struct aa_label *stack = NULL; struct aa_ns *ns = profile->ns; u32 xtype = xindex & AA_X_TYPE_MASK; - const char *stack = NULL; + /* Used for info checks during fallback handling */ + const char *old_info = NULL; switch (xtype) { case AA_X_NONE: @@ -578,13 +585,14 @@ static struct aa_label *x_to_label(struct aa_profile *profile, break; case AA_X_TABLE: /* TODO: fix when perm mapping done at unload */ - stack = rules->file->trans.table[xindex & AA_X_INDEX_MASK]; - if (*stack != '&') { - /* released by caller */ - new = x_table_lookup(profile, xindex, lookupname); - stack = NULL; + /* released by caller + * if null for both stack and direct want to try fallback + */ + new = x_table_lookup(profile, xindex, lookupname); + if (!new || **lookupname != '&') break; - } + stack = new; + new = NULL; fallthrough; /* to X_NAME */ case AA_X_NAME: if (xindex & AA_X_CHILD) @@ -599,17 +607,38 @@ static struct aa_label *x_to_label(struct aa_profile *profile, break; } + /* fallback transition check */ if (!new) { if (xindex & AA_X_INHERIT) { /* (p|c|n)ix - don't change profile but do * use the newest version */ - *info = "ix fallback"; + if (*info == CONFLICTING_ATTACH_STR) { + *info = CONFLICTING_ATTACH_STR_IX; + } else { + old_info = *info; + *info = "ix fallback"; + } /* no profile && no error */ new = aa_get_newest_label(&profile->label); } else if (xindex & AA_X_UNCONFINED) { new = aa_get_newest_label(ns_unconfined(profile->ns)); - *info = "ux fallback"; + if (*info == CONFLICTING_ATTACH_STR) { + *info = CONFLICTING_ATTACH_STR_UX; + } else { + old_info = *info; + *info = "ux fallback"; + } + } + /* We set old_info on the code paths above where overwriting + * could have happened, so now check if info was set by + * find_attach as well (i.e. whether we actually overwrote) + * and warn accordingly. + */ + if (old_info && old_info != CONFLICTING_ATTACH_STR) { + pr_warn_ratelimited( + "AppArmor: find_attach (from profile %s) audit info \"%s\" dropped", + profile->base.hname, old_info); } } @@ -617,12 +646,12 @@ static struct aa_label *x_to_label(struct aa_profile *profile, /* base the stack on post domain transition */ struct aa_label *base = new; - new = aa_label_parse(base, stack, GFP_KERNEL, true, false); - if (IS_ERR(new)) - new = NULL; + new = aa_label_merge(base, stack, GFP_KERNEL); + /* null on error */ aa_put_label(base); } + aa_put_label(stack); /* released by caller */ return new; } @@ -633,9 +662,9 @@ static struct aa_label *profile_transition(const struct cred *subj_cred, char *buffer, struct path_cond *cond, bool *secure_exec) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; struct aa_label *new = NULL; + struct aa_profile *new_profile = NULL; const char *info = NULL, *name = NULL, *target = NULL; aa_state_t state = rules->file->start[AA_CLASS_FILE]; struct aa_perms perms = {}; @@ -651,7 +680,7 @@ static struct aa_label *profile_transition(const struct cred *subj_cred, if (error) { if (profile_unconfined(profile) || (profile->label.flags & FLAG_IX_ON_NAME_ERROR)) { - AA_DEBUG("name lookup ix on error"); + AA_DEBUG(DEBUG_DOMAIN, "name lookup ix on error"); error = 0; new = aa_get_newest_label(&profile->label); } @@ -662,11 +691,27 @@ static struct aa_label *profile_transition(const struct cred *subj_cred, if (profile_unconfined(profile)) { new = find_attach(bprm, profile->ns, &profile->ns->base.profiles, name, &info); + /* info set -> something unusual that we should report + * Currently this is only conflicting attachments, but other + * infos added in the future should also be logged by default + * and only excluded on a case-by-case basis + */ + if (info) { + /* Because perms is never used again after this audit + * we don't need to care about clobbering it + */ + perms.audit |= MAY_EXEC; + perms.allow |= MAY_EXEC; + /* Don't cause error if auditing fails */ + (void) aa_audit_file(subj_cred, profile, &perms, + OP_EXEC, MAY_EXEC, name, target, new, cond->uid, + info, error); + } if (new) { - AA_DEBUG("unconfined attached to new label"); + AA_DEBUG(DEBUG_DOMAIN, "unconfined attached to new label"); return new; } - AA_DEBUG("unconfined exec no attachment"); + AA_DEBUG(DEBUG_DOMAIN, "unconfined exec no attachment"); return aa_get_newest_label(&profile->label); } @@ -677,18 +722,33 @@ static struct aa_label *profile_transition(const struct cred *subj_cred, new = x_to_label(profile, bprm, name, perms.xindex, &target, &info); if (new && new->proxy == profile->label.proxy && info) { + /* Force audit on conflicting attachment fallback + * Because perms is never used again after this audit + * we don't need to care about clobbering it + */ + if (info == CONFLICTING_ATTACH_STR_IX + || info == CONFLICTING_ATTACH_STR_UX) + perms.audit |= MAY_EXEC; /* hack ix fallback - improve how this is detected */ goto audit; } else if (!new) { - error = -EACCES; + if (info) { + pr_warn_ratelimited( + "AppArmor: %s (from profile %s) audit info \"%s\" dropped on missing transition", + __func__, profile->base.hname, info); + } info = "profile transition not found"; - /* remove MAY_EXEC to audit as failure */ + /* remove MAY_EXEC to audit as failure or complaint */ perms.allow &= ~MAY_EXEC; + if (COMPLAIN_MODE(profile)) { + /* create null profile instead of failing */ + goto create_learning_profile; + } + error = -EACCES; } } else if (COMPLAIN_MODE(profile)) { +create_learning_profile: /* no exec permission - learning mode */ - struct aa_profile *new_profile = NULL; - new_profile = aa_new_learning_profile(profile, false, name, GFP_KERNEL); if (!new_profile) { @@ -709,8 +769,8 @@ static struct aa_label *profile_transition(const struct cred *subj_cred, if (!(perms.xindex & AA_X_UNSAFE)) { if (DEBUG_ON) { - dbg_printk("apparmor: scrubbing environment variables" - " for %s profile=", name); + dbg_printk("apparmor: setting AT_SECURE for %s profile=", + name); aa_label_printk(new, GFP_KERNEL); dbg_printk("\n"); } @@ -735,8 +795,7 @@ static int profile_onexec(const struct cred *subj_cred, char *buffer, struct path_cond *cond, bool *secure_exec) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; aa_state_t state = rules->file->start[AA_CLASS_FILE]; struct aa_perms perms = {}; const char *xname = NULL, *info = "change_profile onexec"; @@ -751,7 +810,7 @@ static int profile_onexec(const struct cred *subj_cred, /* change_profile on exec already granted */ /* * NOTE: Domain transitions from unconfined are allowed - * even when no_new_privs is set because this aways results + * even when no_new_privs is set because this always results * in a further reduction of permissions. */ return 0; @@ -762,7 +821,7 @@ static int profile_onexec(const struct cred *subj_cred, if (error) { if (profile_unconfined(profile) || (profile->label.flags & FLAG_IX_ON_NAME_ERROR)) { - AA_DEBUG("name lookup ix on error"); + AA_DEBUG(DEBUG_DOMAIN, "name lookup ix on error"); error = 0; } xname = bprm->filename; @@ -789,8 +848,8 @@ static int profile_onexec(const struct cred *subj_cred, if (!(perms.xindex & AA_X_UNSAFE)) { if (DEBUG_ON) { - dbg_printk("apparmor: scrubbing environment " - "variables for %s label=", xname); + dbg_printk("apparmor: setting AT_SECURE for %s label=", + xname); aa_label_printk(onexec, GFP_KERNEL); dbg_printk("\n"); } @@ -821,33 +880,19 @@ static struct aa_label *handle_onexec(const struct cred *subj_cred, AA_BUG(!bprm); AA_BUG(!buffer); - if (!stack) { - error = fn_for_each_in_ns(label, profile, - profile_onexec(subj_cred, profile, onexec, stack, - bprm, buffer, cond, unsafe)); - if (error) - return ERR_PTR(error); - new = fn_label_build_in_ns(label, profile, GFP_KERNEL, - aa_get_newest_label(onexec), - profile_transition(subj_cred, profile, bprm, - buffer, - cond, unsafe)); - - } else { - /* TODO: determine how much we want to loosen this */ - error = fn_for_each_in_ns(label, profile, - profile_onexec(subj_cred, profile, onexec, stack, bprm, - buffer, cond, unsafe)); - if (error) - return ERR_PTR(error); - new = fn_label_build_in_ns(label, profile, GFP_KERNEL, - aa_label_merge(&profile->label, onexec, - GFP_KERNEL), - profile_transition(subj_cred, profile, bprm, - buffer, - cond, unsafe)); - } + /* TODO: determine how much we want to loosen this */ + error = fn_for_each_in_ns(label, profile, + profile_onexec(subj_cred, profile, onexec, stack, + bprm, buffer, cond, unsafe)); + if (error) + return ERR_PTR(error); + new = fn_label_build_in_ns(label, profile, GFP_KERNEL, + stack ? aa_label_merge(&profile->label, onexec, + GFP_KERNEL) + : aa_get_newest_label(onexec), + profile_transition(subj_cred, profile, bprm, + buffer, cond, unsafe)); if (new) return new; @@ -936,7 +981,7 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm) * * NOTE: Domain transitions from unconfined and to stacked * subsets are allowed even when no_new_privs is set because this - * aways results in a further reduction of permissions. + * always results in a further reduction of permissions. */ if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) && !unconfined(label) && @@ -960,8 +1005,8 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm) if (unsafe) { if (DEBUG_ON) { - dbg_printk("scrubbing environment variables for %s " - "label=", bprm->filename); + dbg_printk("setting AT_SECURE for %s label=", + bprm->filename); aa_label_printk(new, GFP_KERNEL); dbg_printk("\n"); } @@ -971,8 +1016,8 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm) if (label->proxy != new->proxy) { /* when transitioning clear unsafe personality bits */ if (DEBUG_ON) { - dbg_printk("apparmor: clearing unsafe personality " - "bits. %s label=", bprm->filename); + dbg_printk("apparmor: clearing unsafe personality bits. %s label=", + bprm->filename); aa_label_printk(new, GFP_KERNEL); dbg_printk("\n"); } @@ -1198,10 +1243,24 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags) if (task_no_new_privs(current) && !unconfined(label) && !ctx->nnp) ctx->nnp = aa_get_label(label); + /* return -EPERM when unconfined doesn't have children to avoid + * changing the traditional error code for unconfined. + */ if (unconfined(label)) { - info = "unconfined can not change_hat"; - error = -EPERM; - goto fail; + struct label_it i; + bool empty = true; + + rcu_read_lock(); + label_for_each_in_ns(i, labels_ns(label), label, profile) { + empty &= list_empty(&profile->base.profiles); + } + rcu_read_unlock(); + + if (empty) { + info = "unconfined can not change_hat"; + error = -EPERM; + goto fail; + } } if (count) { @@ -1226,7 +1285,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags) if (task_no_new_privs(current) && !unconfined(label) && !aa_label_is_unconfined_subset(new, ctx->nnp)) { /* not an apparmor denial per se, so don't log it */ - AA_DEBUG("no_new_privs - change_hat denied"); + AA_DEBUG(DEBUG_DOMAIN, + "no_new_privs - change_hat denied"); error = -EPERM; goto out; } @@ -1247,7 +1307,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags) if (task_no_new_privs(current) && !unconfined(label) && !aa_label_is_unconfined_subset(previous, ctx->nnp)) { /* not an apparmor denial per se, so don't log it */ - AA_DEBUG("no_new_privs - change_hat denied"); + AA_DEBUG(DEBUG_DOMAIN, + "no_new_privs - change_hat denied"); error = -EPERM; goto out; } @@ -1292,8 +1353,7 @@ static int change_profile_perms_wrapper(const char *op, const char *name, struct aa_label *target, bool stack, u32 request, struct aa_perms *perms) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; const char *info = NULL; int error = 0; @@ -1353,7 +1413,7 @@ int aa_change_profile(const char *fqname, int flags) if (!fqname || !*fqname) { aa_put_label(label); - AA_DEBUG("no profile name"); + AA_DEBUG(DEBUG_DOMAIN, "no profile name"); return -EINVAL; } @@ -1472,7 +1532,8 @@ check: if (task_no_new_privs(current) && !unconfined(label) && !aa_label_is_unconfined_subset(new, ctx->nnp)) { /* not an apparmor denial per se, so don't log it */ - AA_DEBUG("no_new_privs - change_hat denied"); + AA_DEBUG(DEBUG_DOMAIN, + "no_new_privs - change_hat denied"); error = -EPERM; goto out; } diff --git a/security/apparmor/file.c b/security/apparmor/file.c index d52a5b14dad4..c75820402878 100644 --- a/security/apparmor/file.c +++ b/security/apparmor/file.c @@ -14,6 +14,7 @@ #include <linux/fs.h> #include <linux/mount.h> +#include "include/af_unix.h" #include "include/apparmor.h" #include "include/audit.h" #include "include/cred.h" @@ -168,8 +169,9 @@ static int path_name(const char *op, const struct cred *subj_cred, struct aa_perms default_perms = {}; /** - * aa_lookup_fperms - convert dfa compressed perms to internal perms - * @file_rules: the aa_policydb to lookup perms for (NOT NULL) + * aa_lookup_condperms - convert dfa compressed perms to internal perms + * @subj_uid: uid to use for subject owner test + * @rules: the aa_policydb to lookup perms for (NOT NULL) * @state: state in dfa * @cond: conditions to consider (NOT NULL) * @@ -177,18 +179,21 @@ struct aa_perms default_perms = {}; * * Returns: a pointer to a file permission set */ -struct aa_perms *aa_lookup_fperms(struct aa_policydb *file_rules, - aa_state_t state, struct path_cond *cond) +struct aa_perms *aa_lookup_condperms(kuid_t subj_uid, struct aa_policydb *rules, + aa_state_t state, struct path_cond *cond) { - unsigned int index = ACCEPT_TABLE(file_rules->dfa)[state]; + unsigned int index = ACCEPT_TABLE(rules->dfa)[state]; - if (!(file_rules->perms)) + if (!(rules->perms)) return &default_perms; - if (uid_eq(current_fsuid(), cond->uid)) - return &(file_rules->perms[index]); + if ((ACCEPT_TABLE2(rules->dfa)[state] & ACCEPT_FLAG_OWNER)) { + if (uid_eq(subj_uid, cond->uid)) + return &(rules->perms[index]); + return &(rules->perms[index + 1]); + } - return &(file_rules->perms[index + 1]); + return &(rules->perms[index]); } /** @@ -207,21 +212,22 @@ aa_state_t aa_str_perms(struct aa_policydb *file_rules, aa_state_t start, { aa_state_t state; state = aa_dfa_match(file_rules->dfa, start, name); - *perms = *(aa_lookup_fperms(file_rules, state, cond)); + *perms = *(aa_lookup_condperms(current_fsuid(), file_rules, state, + cond)); return state; } -static int __aa_path_perm(const char *op, const struct cred *subj_cred, - struct aa_profile *profile, const char *name, - u32 request, struct path_cond *cond, int flags, - struct aa_perms *perms) +int __aa_path_perm(const char *op, const struct cred *subj_cred, + struct aa_profile *profile, const char *name, + u32 request, struct path_cond *cond, int flags, + struct aa_perms *perms) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; int e = 0; - if (profile_unconfined(profile)) + if (profile_unconfined(profile) || + ((flags & PATH_SOCK_COND) && !RULE_MEDIATES_v9NET(rules))) return 0; aa_str_perms(rules->file, rules->file->start[AA_CLASS_FILE], name, cond, perms); @@ -316,8 +322,7 @@ static int profile_path_link(const struct cred *subj_cred, const struct path *target, char *buffer2, struct path_cond *cond) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; const char *lname, *tname = NULL; struct aa_perms lperms = {}, perms; const char *info = NULL; @@ -423,9 +428,11 @@ int aa_path_link(const struct cred *subj_cred, { struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry }; struct path target = { .mnt = new_dir->mnt, .dentry = old_dentry }; + struct inode *inode = d_backing_inode(old_dentry); + vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_idmap(target.mnt), inode); struct path_cond cond = { - d_backing_inode(old_dentry)->i_uid, - d_backing_inode(old_dentry)->i_mode + .uid = vfsuid_into_kuid(vfsuid), + .mode = inode->i_mode, }; char *buffer = NULL, *buffer2 = NULL; struct aa_profile *profile; @@ -534,22 +541,19 @@ static int __file_sock_perm(const char *op, const struct cred *subj_cred, struct aa_label *flabel, struct file *file, u32 request, u32 denied) { - struct socket *sock = (struct socket *) file->private_data; int error; - AA_BUG(!sock); - /* revalidation due to label out of date. No revocation at this time */ if (!denied && aa_label_is_subset(flabel, label)) return 0; /* TODO: improve to skip profiles cached in flabel */ - error = aa_sock_file_perm(subj_cred, label, op, request, sock); + error = aa_sock_file_perm(subj_cred, label, op, request, file); if (denied) { /* TODO: improve to skip profiles checked above */ /* check every profile in file label to is cached */ last_error(error, aa_sock_file_perm(subj_cred, flabel, op, - request, sock)); + request, file)); } if (!error) update_file_ctx(file_ctx(file), label, request); @@ -557,6 +561,35 @@ static int __file_sock_perm(const char *op, const struct cred *subj_cred, return error; } +/* for now separate fn to indicate semantics of the check */ +static bool __file_is_delegated(struct aa_label *obj_label) +{ + return unconfined(obj_label); +} + +static bool __unix_needs_revalidation(struct file *file, struct aa_label *label, + u32 request) +{ + struct socket *sock = (struct socket *) file->private_data; + + lockdep_assert_in_rcu_read_lock(); + + if (!S_ISSOCK(file_inode(file)->i_mode)) + return false; + if (request & NET_PEER_MASK) + return false; + if (sock->sk->sk_family == PF_UNIX) { + struct aa_sk_ctx *ctx = aa_sock(sock->sk); + + if (rcu_access_pointer(ctx->peer) != + rcu_access_pointer(ctx->peer_lastupdate)) + return true; + return !__aa_subj_label_is_cached(rcu_dereference(ctx->label), + label); + } + return false; +} + /** * aa_file_perm - do permission revalidation check & audit for @file * @op: operation being checked @@ -594,17 +627,18 @@ int aa_file_perm(const char *op, const struct cred *subj_cred, * delegation from unconfined tasks */ denied = request & ~fctx->allow; - if (unconfined(label) || unconfined(flabel) || - (!denied && aa_label_is_subset(flabel, label))) { + if (unconfined(label) || __file_is_delegated(flabel) || + __unix_needs_revalidation(file, label, request) || + (!denied && __aa_subj_label_is_cached(label, flabel))) { rcu_read_unlock(); goto done; } + /* slow path - revalidate access */ flabel = aa_get_newest_label(flabel); rcu_read_unlock(); - /* TODO: label cross check */ - if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry)) + if (path_mediated_fs(file->f_path.dentry)) error = __file_path_perm(op, subj_cred, label, flabel, file, request, denied, in_atomic); diff --git a/security/apparmor/include/af_unix.h b/security/apparmor/include/af_unix.h new file mode 100644 index 000000000000..4a62e600d82b --- /dev/null +++ b/security/apparmor/include/af_unix.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * AppArmor security module + * + * This file contains AppArmor af_unix fine grained mediation + * + * Copyright 2023 Canonical Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + */ +#ifndef __AA_AF_UNIX_H + +#include <net/af_unix.h> + +#include "label.h" + +#define unix_addr(A) ((struct sockaddr_un *)(A)) +#define unix_addr_len(L) ((L) - sizeof(sa_family_t)) +#define unix_peer(sk) (unix_sk(sk)->peer) +#define is_unix_addr_abstract_name(B) ((B)[0] == 0) +#define is_unix_addr_anon(A, L) ((A) && unix_addr_len(L) <= 0) +#define is_unix_addr_fs(A, L) (!is_unix_addr_anon(A, L) && \ + !is_unix_addr_abstract_name(unix_addr(A)->sun_path)) + +#define is_unix_anonymous(U) (!unix_sk(U)->addr) +#define is_unix_fs(U) (!is_unix_anonymous(U) && \ + unix_sk(U)->addr->name->sun_path[0]) +#define is_unix_connected(S) ((S)->state == SS_CONNECTED) + + +struct sockaddr_un *aa_sunaddr(const struct unix_sock *u, int *addrlen); +int aa_unix_peer_perm(const struct cred *subj_cred, + struct aa_label *label, const char *op, u32 request, + struct sock *sk, struct sock *peer_sk, + struct aa_label *peer_label); +int aa_unix_sock_perm(const char *op, u32 request, struct socket *sock); +int aa_unix_create_perm(struct aa_label *label, int family, int type, + int protocol); +int aa_unix_bind_perm(struct socket *sock, struct sockaddr *address, + int addrlen); +int aa_unix_connect_perm(struct socket *sock, struct sockaddr *address, + int addrlen); +int aa_unix_listen_perm(struct socket *sock, int backlog); +int aa_unix_accept_perm(struct socket *sock, struct socket *newsock); +int aa_unix_msg_perm(const char *op, u32 request, struct socket *sock, + struct msghdr *msg, int size); +int aa_unix_opt_perm(const char *op, u32 request, struct socket *sock, int level, + int optname); +int aa_unix_file_perm(const struct cred *subj_cred, struct aa_label *label, + const char *op, u32 request, struct file *file); + +#endif /* __AA_AF_UNIX_H */ diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h index f83934913b0f..cc6e3df1bc62 100644 --- a/security/apparmor/include/apparmor.h +++ b/security/apparmor/include/apparmor.h @@ -28,6 +28,7 @@ #define AA_CLASS_SIGNAL 10 #define AA_CLASS_XMATCH 11 #define AA_CLASS_NET 14 +#define AA_CLASS_NETV9 15 #define AA_CLASS_LABEL 16 #define AA_CLASS_POSIX_MQUEUE 17 #define AA_CLASS_MODULE 19 @@ -38,12 +39,13 @@ #define AA_CLASS_X 31 #define AA_CLASS_DBUS 32 +/* NOTE: if AA_CLASS_LAST > 63 need to update label->mediates */ #define AA_CLASS_LAST AA_CLASS_DBUS /* Control parameters settable through module/boot flags */ extern enum audit_mode aa_g_audit; extern bool aa_g_audit_header; -extern bool aa_g_debug; +extern int aa_g_debug; extern bool aa_g_hash_policy; extern bool aa_g_export_binary; extern int aa_g_rawdata_compression_level; diff --git a/security/apparmor/include/apparmorfs.h b/security/apparmor/include/apparmorfs.h index 1e94904f68d9..dd580594dfb7 100644 --- a/security/apparmor/include/apparmorfs.h +++ b/security/apparmor/include/apparmorfs.h @@ -104,6 +104,8 @@ enum aafs_prof_type { #define prof_dir(X) ((X)->dents[AAFS_PROF_DIR]) #define prof_child_dir(X) ((X)->dents[AAFS_PROF_PROFS]) +int aa_create_aafs(void); + void __aa_bump_ns_revision(struct aa_ns *ns); void __aafs_profile_rmdir(struct aa_profile *profile); void __aafs_profile_migrate_dents(struct aa_profile *old, diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h index e27229349abb..1a71a94ea19c 100644 --- a/security/apparmor/include/audit.h +++ b/security/apparmor/include/audit.h @@ -138,9 +138,12 @@ struct apparmor_audit_data { }; struct { int type, protocol; - struct sock *peer_sk; void *addr; int addrlen; + struct { + void *addr; + int addrlen; + } peer; } net; }; }; diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h index d6dcc604ec0c..1ddcec2d1160 100644 --- a/security/apparmor/include/capability.h +++ b/security/apparmor/include/capability.h @@ -36,6 +36,7 @@ struct aa_caps { extern struct aa_sfs_entry aa_sfs_entry_caps[]; +kernel_cap_t aa_profile_capget(struct aa_profile *profile); int aa_capable(const struct cred *subj_cred, struct aa_label *label, int cap, unsigned int opts); diff --git a/security/apparmor/include/cred.h b/security/apparmor/include/cred.h index 7265d2f81dd5..b028e4c13b6f 100644 --- a/security/apparmor/include/cred.h +++ b/security/apparmor/include/cred.h @@ -114,10 +114,22 @@ static inline struct aa_label *aa_get_current_label(void) return aa_get_label(l); } -#define __end_current_label_crit_section(X) end_current_label_crit_section(X) +/** + * __end_current_label_crit_section - end crit section begun with __begin_... + * @label: label obtained from __begin_current_label_crit_section + * @needput: output: bool set by __begin_current_label_crit_section + * + * Returns: label to use for this crit section + */ +static inline void __end_current_label_crit_section(struct aa_label *label, + bool needput) +{ + if (unlikely(needput)) + aa_put_label(label); +} /** - * end_label_crit_section - put a reference found with begin_current_label.. + * end_current_label_crit_section - put a reference found with begin_current_label.. * @label: label reference to put * * Should only be used with a reference obtained with @@ -132,6 +144,7 @@ static inline void end_current_label_crit_section(struct aa_label *label) /** * __begin_current_label_crit_section - current's confining label + * @needput: store whether the label needs to be put when ending crit section * * Returns: up to date confining label or the ns unconfined label (NOT NULL) * @@ -142,13 +155,16 @@ static inline void end_current_label_crit_section(struct aa_label *label) * critical section between __begin_current_label_crit_section() .. * __end_current_label_crit_section() */ -static inline struct aa_label *__begin_current_label_crit_section(void) +static inline struct aa_label *__begin_current_label_crit_section(bool *needput) { struct aa_label *label = aa_current_raw_label(); - if (label_is_stale(label)) - label = aa_get_newest_label(label); + if (label_is_stale(label)) { + *needput = true; + return aa_get_newest_label(label); + } + *needput = false; return label; } @@ -184,10 +200,11 @@ static inline struct aa_ns *aa_get_current_ns(void) { struct aa_label *label; struct aa_ns *ns; + bool needput; - label = __begin_current_label_crit_section(); + label = __begin_current_label_crit_section(&needput); ns = aa_get_ns(labels_ns(label)); - __end_current_label_crit_section(label); + __end_current_label_crit_section(label, needput); return ns; } diff --git a/security/apparmor/include/crypto.h b/security/apparmor/include/crypto.h index 636a04e20d91..f3ffd388cc58 100644 --- a/security/apparmor/include/crypto.h +++ b/security/apparmor/include/crypto.h @@ -13,6 +13,7 @@ #include "policy.h" #ifdef CONFIG_SECURITY_APPARMOR_HASH +int init_profile_hash(void); unsigned int aa_hash_size(void); char *aa_calc_hash(void *data, size_t len); int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start, diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h index 6e8f2aa66cd6..ef60f99bc5ae 100644 --- a/security/apparmor/include/file.h +++ b/security/apparmor/include/file.h @@ -77,12 +77,17 @@ int aa_audit_file(const struct cred *cred, const char *target, struct aa_label *tlabel, kuid_t ouid, const char *info, int error); -struct aa_perms *aa_lookup_fperms(struct aa_policydb *file_rules, - aa_state_t state, struct path_cond *cond); +struct aa_perms *aa_lookup_condperms(kuid_t subj_uid, + struct aa_policydb *file_rules, + aa_state_t state, struct path_cond *cond); aa_state_t aa_str_perms(struct aa_policydb *file_rules, aa_state_t start, const char *name, struct path_cond *cond, struct aa_perms *perms); +int __aa_path_perm(const char *op, const struct cred *subj_cred, + struct aa_profile *profile, const char *name, + u32 request, struct path_cond *cond, int flags, + struct aa_perms *perms); int aa_path_perm(const char *op, const struct cred *subj_cred, struct aa_label *label, const struct path *path, int flags, u32 request, struct path_cond *cond); @@ -99,7 +104,7 @@ void aa_inherit_files(const struct cred *cred, struct files_struct *files); /** - * aa_map_file_perms - map file flags to AppArmor permissions + * aa_map_file_to_perms - map file flags to AppArmor permissions * @file: open file to map flags to AppArmor permissions * * Returns: apparmor permission set for the file diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h index 74d17052f76b..323dd071afe9 100644 --- a/security/apparmor/include/ipc.h +++ b/security/apparmor/include/ipc.h @@ -13,6 +13,9 @@ #include <linux/sched.h> +#define SIGUNKNOWN 0 +#define MAXMAPPED_SIG 35 + int aa_may_signal(const struct cred *subj_cred, struct aa_label *sender, const struct cred *target_cred, struct aa_label *target, int sig); diff --git a/security/apparmor/include/label.h b/security/apparmor/include/label.h index 2a72e6b17d68..c0812dbc1b5b 100644 --- a/security/apparmor/include/label.h +++ b/security/apparmor/include/label.h @@ -19,6 +19,7 @@ #include "lib.h" struct aa_ns; +struct aa_ruleset; #define LOCAL_VEC_ENTRIES 8 #define DEFINE_VEC(T, V) \ @@ -109,7 +110,7 @@ struct label_it { int i, j; }; -/* struct aa_label - lazy labeling struct +/* struct aa_label_base - base info of label * @count: ref count of active users * @node: rbtree position * @rcu: rcu callback struct @@ -118,7 +119,10 @@ struct label_it { * @flags: stale and other flags - values may change under label set lock * @secid: secid that references this label * @size: number of entries in @ent[] - * @ent: set of profiles for label, actual size determined by @size + * @mediates: bitmask for label_mediates + * profile: label vec when embedded in a profile FLAG_PROFILE is set + * rules: variable length rules in a profile FLAG_PROFILE is set + * vec: vector of profiles comprising the compound label */ struct aa_label { struct kref count; @@ -129,7 +133,18 @@ struct aa_label { long flags; u32 secid; int size; - struct aa_profile *vec[]; + u64 mediates; + union { + struct { + /* only used is the label is a profile, size of + * rules[] is determined by the profile + * profile[1] is poison or null as guard + */ + struct aa_profile *profile[2]; + DECLARE_FLEX_ARRAY(struct aa_ruleset *, rules); + }; + DECLARE_FLEX_ARRAY(struct aa_profile *, vec); + }; }; #define last_error(E, FN) \ @@ -160,32 +175,8 @@ int aa_label_next_confined(struct aa_label *l, int i); #define label_for_each_cont(I, L, P) \ for (++((I).i); ((P) = (L)->vec[(I).i]); ++((I).i)) -#define next_comb(I, L1, L2) \ -do { \ - (I).j++; \ - if ((I).j >= (L2)->size) { \ - (I).i++; \ - (I).j = 0; \ - } \ -} while (0) -/* for each combination of P1 in L1, and P2 in L2 */ -#define label_for_each_comb(I, L1, L2, P1, P2) \ -for ((I).i = (I).j = 0; \ - ((P1) = (L1)->vec[(I).i]) && ((P2) = (L2)->vec[(I).j]); \ - (I) = next_comb(I, L1, L2)) - -#define fn_for_each_comb(L1, L2, P1, P2, FN) \ -({ \ - struct label_it i; \ - int __E = 0; \ - label_for_each_comb(i, (L1), (L2), (P1), (P2)) { \ - last_error(__E, (FN)); \ - } \ - __E; \ -}) - /* for each profile that is enforcing confinement in a label */ #define label_for_each_confined(I, L, P) \ for ((I).i = aa_label_next_confined((L), 0); \ @@ -255,20 +246,17 @@ for ((I).i = (I).j = 0; \ #define fn_for_each_not_in_set(L1, L2, P, FN) \ fn_for_each2_XXX((L1), (L2), P, FN, _not_in_set) -#define LABEL_MEDIATES(L, C) \ -({ \ - struct aa_profile *profile; \ - struct label_it i; \ - int ret = 0; \ - label_for_each(i, (L), profile) { \ - if (RULE_MEDIATES(&profile->rules, (C))) { \ - ret = 1; \ - break; \ - } \ - } \ - ret; \ -}) +static inline bool label_mediates(struct aa_label *L, unsigned char C) +{ + return (L)->mediates & (((u64) 1) << (C)); +} +static inline bool label_mediates_safe(struct aa_label *L, unsigned char C) +{ + if (C > AA_CLASS_LAST) + return false; + return label_mediates(L, C); +} void aa_labelset_destroy(struct aa_labelset *ls); void aa_labelset_init(struct aa_labelset *ls); @@ -291,8 +279,6 @@ bool aa_label_replace(struct aa_label *old, struct aa_label *new); bool aa_label_make_newest(struct aa_labelset *ls, struct aa_label *old, struct aa_label *new); -struct aa_label *aa_label_find(struct aa_label *l); - struct aa_profile *aa_label_next_in_merge(struct label_it *I, struct aa_label *a, struct aa_label *b); @@ -320,8 +306,6 @@ void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns, struct aa_label *label, int flags, gfp_t gfp); void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags, gfp_t gfp); -void aa_label_audit(struct audit_buffer *ab, struct aa_label *label, gfp_t gfp); -void aa_label_seq_print(struct seq_file *f, struct aa_label *label, gfp_t gfp); void aa_label_printk(struct aa_label *label, gfp_t gfp); struct aa_label *aa_label_strn_parse(struct aa_label *base, const char *str, @@ -445,6 +429,13 @@ static inline void aa_put_label(struct aa_label *l) kref_put(&l->count, aa_label_kref); } +/* wrapper fn to indicate semantics of the check */ +static inline bool __aa_subj_label_is_cached(struct aa_label *subj_label, + struct aa_label *obj_label) +{ + return aa_label_is_subset(obj_label, subj_label); +} + struct aa_proxy *aa_alloc_proxy(struct aa_label *l, gfp_t gfp); void aa_proxy_kref(struct kref *kref); diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h index d7a894b1031f..444197075fd6 100644 --- a/security/apparmor/include/lib.h +++ b/security/apparmor/include/lib.h @@ -19,22 +19,34 @@ extern struct aa_dfa *stacksplitdfa; /* - * DEBUG remains global (no per profile flag) since it is mostly used in sysctl - * which is not related to profile accesses. - */ - -#define DEBUG_ON (aa_g_debug) -/* * split individual debug cases out in preparation for finer grained * debug controls in the future. */ -#define AA_DEBUG_LABEL DEBUG_ON #define dbg_printk(__fmt, __args...) pr_debug(__fmt, ##__args) -#define AA_DEBUG(fmt, args...) \ + +#define DEBUG_NONE 0 +#define DEBUG_LABEL_ABS_ROOT 1 +#define DEBUG_LABEL 2 +#define DEBUG_DOMAIN 4 +#define DEBUG_POLICY 8 +#define DEBUG_INTERFACE 0x10 + +#define DEBUG_ALL 0x1f /* update if new DEBUG_X added */ +#define DEBUG_PARSE_ERROR (-1) + +#define DEBUG_ON (aa_g_debug != DEBUG_NONE) +#define DEBUG_ABS_ROOT (aa_g_debug & DEBUG_LABEL_ABS_ROOT) + +#define AA_DEBUG(opt, fmt, args...) \ do { \ - if (DEBUG_ON) \ - pr_debug_ratelimited("AppArmor: " fmt, ##args); \ + if (aa_g_debug & opt) \ + pr_warn_ratelimited("%s: " fmt, __func__, ##args); \ } while (0) +#define AA_DEBUG_LABEL(LAB, X, fmt, args...) \ +do { \ + if ((LAB)->flags & FLAG_DEBUG1) \ + AA_DEBUG(X, fmt, args); \ +} while (0) #define AA_WARN(X) WARN((X), "APPARMOR WARN %s: %s\n", __func__, #X) @@ -48,9 +60,16 @@ extern struct aa_dfa *stacksplitdfa; #define AA_BUG_FMT(X, fmt, args...) \ WARN((X), "AppArmor WARN %s: (" #X "): " fmt, __func__, ##args) #else -#define AA_BUG_FMT(X, fmt, args...) no_printk(fmt, ##args) +#define AA_BUG_FMT(X, fmt, args...) \ + do { \ + BUILD_BUG_ON_INVALID(X); \ + no_printk(fmt, ##args); \ + } while (0) #endif +int aa_parse_debug_params(const char *str); +int aa_print_debug_params(char *buffer); + #define AA_ERROR(fmt, args...) \ pr_err_ratelimited("AppArmor: " fmt, ##args) @@ -59,7 +78,6 @@ extern int apparmor_initialized; /* fn's in lib */ const char *skipn_spaces(const char *str, size_t n); -char *aa_split_fqname(char *args, char **ns_name); const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name, size_t *ns_len); void aa_info_message(const char *str); @@ -107,6 +125,7 @@ struct aa_str_table { }; void aa_free_str_table(struct aa_str_table *table); +bool aa_resize_str_table(struct aa_str_table *t, int newsize, gfp_t gfp); struct counted_str { struct kref count; @@ -152,7 +171,7 @@ struct aa_policy { /** * basename - find the last component of an hname - * @name: hname to find the base profile name component of (NOT NULL) + * @hname: hname to find the base profile name component of (NOT NULL) * * Returns: the tail (base profile name) name component of an hname */ @@ -282,7 +301,7 @@ __do_cleanup: \ } \ __done: \ if (!__new_) \ - AA_DEBUG("label build failed\n"); \ + AA_DEBUG(DEBUG_LABEL, "label build failed\n"); \ (__new_); \ }) diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h index 4bb0405c9190..1fbe82f5021b 100644 --- a/security/apparmor/include/match.h +++ b/security/apparmor/include/match.h @@ -17,7 +17,7 @@ #define DFA_START 1 -/** +/* * The format used for transition tables is based on the GNU flex table * file format (--tables-file option; see Table File Format in the flex * info pages and the flex sources for documentation). The magic number @@ -87,10 +87,12 @@ struct table_header { char td_data[]; }; -#define DEFAULT_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_DEF]->td_data)) +#define TABLE_DATAU16(TABLE) ((u16 *)((TABLE)->td_data)) +#define TABLE_DATAU32(TABLE) ((u32 *)((TABLE)->td_data)) +#define DEFAULT_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_DEF]->td_data)) #define BASE_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_BASE]->td_data)) -#define NEXT_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_NXT]->td_data)) -#define CHECK_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_CHK]->td_data)) +#define NEXT_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_NXT]->td_data)) +#define CHECK_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_CHK]->td_data)) #define EQUIV_TABLE(DFA) ((u8 *)((DFA)->tables[YYTD_ID_EC]->td_data)) #define ACCEPT_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_ACCEPT]->td_data)) #define ACCEPT_TABLE2(DFA) ((u32 *)((DFA)->tables[YYTD_ID_ACCEPT2]->td_data)) @@ -135,17 +137,15 @@ aa_state_t aa_dfa_matchn_until(struct aa_dfa *dfa, aa_state_t start, void aa_dfa_free_kref(struct kref *kref); -#define WB_HISTORY_SIZE 24 +/* This needs to be a power of 2 */ +#define WB_HISTORY_SIZE 32 struct match_workbuf { - unsigned int count; unsigned int pos; unsigned int len; - unsigned int size; /* power of 2, same as history size */ - unsigned int history[WB_HISTORY_SIZE]; + aa_state_t history[WB_HISTORY_SIZE]; }; #define DEFINE_MATCH_WB(N) \ struct match_workbuf N = { \ - .count = 0, \ .pos = 0, \ .len = 0, \ } diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h index c42ed8a73f1c..0d0b0ce42723 100644 --- a/security/apparmor/include/net.h +++ b/security/apparmor/include/net.h @@ -47,8 +47,9 @@ #define NET_PEER_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CONNECT | \ AA_MAY_ACCEPT) struct aa_sk_ctx { - struct aa_label *label; - struct aa_label *peer; + struct aa_label __rcu *label; + struct aa_label __rcu *peer; + struct aa_label __rcu *peer_lastupdate; /* ptr cmp only, no deref */ }; static inline struct aa_sk_ctx *aa_sock(const struct sock *sk) @@ -56,7 +57,7 @@ static inline struct aa_sk_ctx *aa_sock(const struct sock *sk) return sk->sk_security + apparmor_blob_sizes.lbs_sock; } -#define DEFINE_AUDIT_NET(NAME, OP, SK, F, T, P) \ +#define DEFINE_AUDIT_NET(NAME, OP, CRED, SK, F, T, P) \ struct lsm_network_audit NAME ## _net = { .sk = (SK), \ .family = (F)}; \ DEFINE_AUDIT_DATA(NAME, \ @@ -65,24 +66,15 @@ static inline struct aa_sk_ctx *aa_sock(const struct sock *sk) AA_CLASS_NET, \ OP); \ NAME.common.u.net = &(NAME ## _net); \ + NAME.subj_cred = (CRED); \ NAME.net.type = (T); \ NAME.net.protocol = (P) -#define DEFINE_AUDIT_SK(NAME, OP, SK) \ - DEFINE_AUDIT_NET(NAME, OP, SK, (SK)->sk_family, (SK)->sk_type, \ +#define DEFINE_AUDIT_SK(NAME, OP, CRED, SK) \ + DEFINE_AUDIT_NET(NAME, OP, CRED, SK, (SK)->sk_family, (SK)->sk_type, \ (SK)->sk_protocol) -#define af_select(FAMILY, FN, DEF_FN) \ -({ \ - int __e; \ - switch ((FAMILY)) { \ - default: \ - __e = DEF_FN; \ - } \ - __e; \ -}) - struct aa_secmark { u8 audit; u8 deny; @@ -91,11 +83,19 @@ struct aa_secmark { }; extern struct aa_sfs_entry aa_sfs_entry_network[]; - +extern struct aa_sfs_entry aa_sfs_entry_networkv9[]; + +int aa_do_perms(struct aa_profile *profile, struct aa_policydb *policy, + aa_state_t state, u32 request, struct aa_perms *p, + struct apparmor_audit_data *ad); +/* passing in state returned by XXX_mediates_AF() */ +aa_state_t aa_match_to_prot(struct aa_policydb *policy, aa_state_t state, + u32 request, u16 af, int type, int protocol, + struct aa_perms **p, const char **info); void audit_net_cb(struct audit_buffer *ab, void *va); int aa_profile_af_perm(struct aa_profile *profile, struct apparmor_audit_data *ad, - u32 request, u16 family, int type); + u32 request, u16 family, int type, int protocol); int aa_af_perm(const struct cred *subj_cred, struct aa_label *label, const char *op, u32 request, u16 family, int type, int protocol); @@ -105,13 +105,13 @@ static inline int aa_profile_af_sk_perm(struct aa_profile *profile, struct sock *sk) { return aa_profile_af_perm(profile, ad, request, sk->sk_family, - sk->sk_type); + sk->sk_type, sk->sk_protocol); } int aa_sk_perm(const char *op, u32 request, struct sock *sk); int aa_sock_file_perm(const struct cred *subj_cred, struct aa_label *label, const char *op, u32 request, - struct socket *sock); + struct file *file); int apparmor_secmark_check(struct aa_label *label, char *op, u32 request, u32 secid, const struct sock *sk); diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h index 343189903dba..8bb915d48dc7 100644 --- a/security/apparmor/include/path.h +++ b/security/apparmor/include/path.h @@ -13,6 +13,7 @@ enum path_flags { PATH_IS_DIR = 0x1, /* path is a directory */ + PATH_SOCK_COND = 0x2, PATH_CONNECT_PATH = 0x4, /* connect disconnected paths to / */ PATH_CHROOT_REL = 0x8, /* do path lookup relative to chroot */ PATH_CHROOT_NSCONNECT = 0x10, /* connect paths that are at ns root */ diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h index 0f7e913c3fc2..37a3781b99a0 100644 --- a/security/apparmor/include/perms.h +++ b/security/apparmor/include/perms.h @@ -101,8 +101,8 @@ extern struct aa_perms allperms; /** * aa_perms_accum_raw - accumulate perms with out masking off overlapping perms - * @accum - perms struct to accumulate into - * @addend - perms struct to add to @accum + * @accum: perms struct to accumulate into + * @addend: perms struct to add to @accum */ static inline void aa_perms_accum_raw(struct aa_perms *accum, struct aa_perms *addend) @@ -128,8 +128,8 @@ static inline void aa_perms_accum_raw(struct aa_perms *accum, /** * aa_perms_accum - accumulate perms, masking off overlapping perms - * @accum - perms struct to accumulate into - * @addend - perms struct to add to @accum + * @accum: perms struct to accumulate into + * @addend: perms struct to add to @accum */ static inline void aa_perms_accum(struct aa_perms *accum, struct aa_perms *addend) @@ -213,9 +213,6 @@ void aa_perms_accum_raw(struct aa_perms *accum, struct aa_perms *addend); void aa_profile_match_label(struct aa_profile *profile, struct aa_ruleset *rules, struct aa_label *label, int type, u32 request, struct aa_perms *perms); -int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target, - u32 request, int type, u32 *deny, - struct apparmor_audit_data *ad); int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms, u32 request, struct apparmor_audit_data *ad, void (*cb)(struct audit_buffer *, void *)); diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h index 75088cc310b6..4c50875c9d13 100644 --- a/security/apparmor/include/policy.h +++ b/security/apparmor/include/policy.h @@ -59,6 +59,11 @@ extern const char *const aa_profile_mode_names[]; #define on_list_rcu(X) (!list_empty(X) && (X)->prev != LIST_POISON2) +/* flags in the dfa accept2 table */ +enum dfa_accept_flags { + ACCEPT_FLAG_OWNER = 1, +}; + /* * FIXME: currently need a clean way to replace and remove profiles as a * set. It should be done at the namespace level. @@ -124,6 +129,7 @@ static inline void aa_put_pdb(struct aa_policydb *pdb) kref_put(&pdb->count, aa_pdb_free_kref); } +/* lookup perm that doesn't have and object conditional */ static inline struct aa_perms *aa_lookup_perms(struct aa_policydb *policy, aa_state_t state) { @@ -135,7 +141,6 @@ static inline struct aa_perms *aa_lookup_perms(struct aa_policydb *policy, return &(policy->perms[index]); } - /* struct aa_data - generic data structure * key: name for retrieving this data * size: size of data in bytes @@ -160,8 +165,6 @@ struct aa_data { * @secmark: secmark label match info */ struct aa_ruleset { - struct list_head list; - int size; /* TODO: merge policy and file */ @@ -175,6 +178,7 @@ struct aa_ruleset { struct aa_secmark *secmark; }; + /* struct aa_attachment - data and rules for a profiles attachment * @list: * @xmatch_str: human readable attachment string @@ -193,7 +197,6 @@ struct aa_attachment { /* struct aa_profile - basic confinement data * @base - base components of the profile (name, refcount, lists, lock ...) - * @label - label this profile is an extension of * @parent: parent of profile * @ns: namespace the profile is in * @rename: optional profile name that this profile renamed @@ -201,13 +204,20 @@ struct aa_attachment { * @audit: the auditing mode of the profile * @mode: the enforcement mode of the profile * @path_flags: flags controlling path generation behavior + * @signal: the signal that should be used when kill is used * @disconnected: what to prepend if attach_disconnected is specified * @attach: attachment rules for the profile * @rules: rules to be enforced * + * learning_cache: the accesses learned in complain mode + * raw_data: rawdata of the loaded profile policy + * hash: cryptographic hash of the profile * @dents: dentries for the profiles file entries in apparmorfs * @dirname: name of the profile dir in apparmorfs + * @dents: set of dentries associated with the profile * @data: hashtable for free-form policy aa_data + * @label - label this profile is an extension of + * @rules - label with the rule vec on its end * * The AppArmor profile contains the basic confinement data. Each profile * has a name, and exists in a namespace. The @name and @exec_match are @@ -231,16 +241,19 @@ struct aa_profile { enum audit_mode audit; long mode; u32 path_flags; + int signal; const char *disconnected; struct aa_attachment attach; - struct list_head rules; struct aa_loaddata *rawdata; unsigned char *hash; char *dirname; struct dentry *dents[AAFS_PROF_SIZEOF]; struct rhashtable *data; + + int n_rules; + /* special - variable length must be last entry in profile */ struct aa_label label; }; @@ -264,7 +277,6 @@ void aa_free_profile(struct aa_profile *profile); struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name); struct aa_profile *aa_lookupn_profile(struct aa_ns *ns, const char *hname, size_t n); -struct aa_profile *aa_lookup_profile(struct aa_ns *ns, const char *name); struct aa_profile *aa_fqlookupn_profile(struct aa_label *base, const char *fqname, size_t n); @@ -299,24 +311,38 @@ static inline aa_state_t RULE_MEDIATES(struct aa_ruleset *rules, rules->policy->start[0], &class, 1); } -static inline aa_state_t RULE_MEDIATES_AF(struct aa_ruleset *rules, u16 AF) +static inline aa_state_t RULE_MEDIATES_v9NET(struct aa_ruleset *rules) { - aa_state_t state = RULE_MEDIATES(rules, AA_CLASS_NET); - __be16 be_af = cpu_to_be16(AF); + return RULE_MEDIATES(rules, AA_CLASS_NETV9); +} + +static inline aa_state_t RULE_MEDIATES_NET(struct aa_ruleset *rules) +{ + /* can not use RULE_MEDIATE_v9AF here, because AF match fail + * can not be distiguished from class match fail, and we only + * fallback to checking older class on class match failure + */ + aa_state_t state = RULE_MEDIATES(rules, AA_CLASS_NETV9); + /* fallback and check v7/8 if v9 is NOT mediated */ if (!state) - return DFA_NOMATCH; - return aa_dfa_match_len(rules->policy->dfa, state, (char *) &be_af, 2); + state = RULE_MEDIATES(rules, AA_CLASS_NET); + + return state; } -static inline aa_state_t ANY_RULE_MEDIATES(struct list_head *head, - unsigned char class) + +void aa_compute_profile_mediates(struct aa_profile *profile); +static inline bool profile_mediates(struct aa_profile *profile, + unsigned char class) { - struct aa_ruleset *rule; + return label_mediates(&profile->label, class); +} - /* TODO: change to list walk */ - rule = list_first_entry(head, typeof(*rule), list); - return RULE_MEDIATES(rule, class); +static inline bool profile_mediates_safe(struct aa_profile *profile, + unsigned char class) +{ + return label_mediates_safe(&profile->label, class); } /** diff --git a/security/apparmor/include/secid.h b/security/apparmor/include/secid.h index cc6d1c9f4a47..6025d3849cf8 100644 --- a/security/apparmor/include/secid.h +++ b/security/apparmor/include/secid.h @@ -25,15 +25,13 @@ struct aa_label; extern int apparmor_display_secid_mode; struct aa_label *aa_secid_to_label(u32 secid); -int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); -int apparmor_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata, - u32 *seclen); +int apparmor_secid_to_secctx(u32 secid, struct lsm_context *cp); +int apparmor_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp); int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); -void apparmor_release_secctx(char *secdata, u32 seclen); +void apparmor_release_secctx(struct lsm_context *cp); int aa_alloc_secid(struct aa_label *label, gfp_t gfp); void aa_free_secid(u32 secid); -void aa_secid_update(u32 secid, struct aa_label *label); #endif /* __AA_SECID_H */ diff --git a/security/apparmor/include/sig_names.h b/security/apparmor/include/sig_names.h index cbf7a997ed84..c772668cdc62 100644 --- a/security/apparmor/include/sig_names.h +++ b/security/apparmor/include/sig_names.h @@ -1,9 +1,5 @@ #include <linux/signal.h> - -#define SIGUNKNOWN 0 -#define MAXMAPPED_SIG 35 -#define MAXMAPPED_SIGNAME (MAXMAPPED_SIG + 1) -#define SIGRT_BASE 128 +#include "signal.h" /* provide a mapping of arch signal to internal signal # for mediation * those that are always an alias SIGCLD for SIGCLHD and SIGPOLL for SIGIO diff --git a/security/apparmor/include/signal.h b/security/apparmor/include/signal.h new file mode 100644 index 000000000000..729763fa7ce6 --- /dev/null +++ b/security/apparmor/include/signal.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * AppArmor security module + * + * This file contains AppArmor ipc mediation function definitions. + * + * Copyright 2023 Canonical Ltd. + */ + +#ifndef __AA_SIGNAL_H +#define __AA_SIGNAL_H + +#define SIGUNKNOWN 0 +#define MAXMAPPED_SIG 35 + +#define MAXMAPPED_SIGNAME (MAXMAPPED_SIG + 1) +#define SIGRT_BASE 128 + +#endif /* __AA_SIGNAL_H */ diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c index 0cdf4340b02d..df5712cea685 100644 --- a/security/apparmor/ipc.c +++ b/security/apparmor/ipc.c @@ -80,21 +80,20 @@ static int profile_signal_perm(const struct cred *cred, struct aa_label *peer, u32 request, struct apparmor_audit_data *ad) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; struct aa_perms perms; aa_state_t state; - if (profile_unconfined(profile) || - !ANY_RULE_MEDIATES(&profile->rules, AA_CLASS_SIGNAL)) + if (profile_unconfined(profile)) return 0; ad->subj_cred = cred; ad->peer = peer; /* TODO: secondary cache check <profile, profile, perm> */ - state = aa_dfa_next(rules->policy->dfa, - rules->policy->start[AA_CLASS_SIGNAL], - ad->signal); + state = RULE_MEDIATES(rules, AA_CLASS_SIGNAL); + if (!state) + return 0; + state = aa_dfa_next(rules->policy->dfa, state, ad->signal); aa_label_match(profile, rules, peer, state, false, request, &perms); aa_apply_modes_to_perms(profile, &perms); return aa_check_perms(profile, &perms, request, ad, audit_signal_cb); diff --git a/security/apparmor/label.c b/security/apparmor/label.c index c71e4615dd46..913678f199c3 100644 --- a/security/apparmor/label.c +++ b/security/apparmor/label.c @@ -198,21 +198,25 @@ static bool vec_is_stale(struct aa_profile **vec, int n) return false; } -static long accum_vec_flags(struct aa_profile **vec, int n) +static void accum_label_info(struct aa_label *new) { long u = FLAG_UNCONFINED; int i; - AA_BUG(!vec); + AA_BUG(!new); - for (i = 0; i < n; i++) { - u |= vec[i]->label.flags & (FLAG_DEBUG1 | FLAG_DEBUG2 | - FLAG_STALE); - if (!(u & vec[i]->label.flags & FLAG_UNCONFINED)) + /* size == 1 is a profile and flags must be set as part of creation */ + if (new->size == 1) + return; + + for (i = 0; i < new->size; i++) { + u |= new->vec[i]->label.flags & (FLAG_DEBUG1 | FLAG_DEBUG2 | + FLAG_STALE); + if (!(u & new->vec[i]->label.flags & FLAG_UNCONFINED)) u &= ~FLAG_UNCONFINED; + new->mediates |= new->vec[i]->label.mediates; } - - return u; + new->flags |= u; } static int sort_cmp(const void *a, const void *b) @@ -431,7 +435,7 @@ struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp) /* + 1 for null terminator entry on vec */ new = kzalloc(struct_size(new, vec, size + 1), gfp); - AA_DEBUG("%s (%p)\n", __func__, new); + AA_DEBUG(DEBUG_LABEL, "%s (%p)\n", __func__, new); if (!new) goto fail; @@ -645,6 +649,7 @@ static bool __label_replace(struct aa_label *old, struct aa_label *new) rb_replace_node(&old->node, &new->node, &ls->root); old->flags &= ~FLAG_IN_TREE; new->flags |= FLAG_IN_TREE; + accum_label_info(new); return true; } @@ -705,6 +710,7 @@ static struct aa_label *__label_insert(struct aa_labelset *ls, rb_link_node(&label->node, parent, new); rb_insert_color(&label->node, &ls->root); label->flags |= FLAG_IN_TREE; + accum_label_info(label); return aa_get_label(label); } @@ -899,23 +905,6 @@ struct aa_label *aa_vec_find_or_create_label(struct aa_profile **vec, int len, return vec_create_and_insert_label(vec, len, gfp); } -/** - * aa_label_find - find label @label in label set - * @label: label to find (NOT NULL) - * - * Requires: caller to hold a valid ref on l - * - * Returns: refcounted @label if @label is in tree - * refcounted label that is equiv to @label in tree - * else NULL if @label or equiv is not in tree - */ -struct aa_label *aa_label_find(struct aa_label *label) -{ - AA_BUG(!label); - - return vec_find(label->vec, label->size); -} - /** * aa_label_insert - insert label @label into @ls or return existing label @@ -1102,7 +1091,6 @@ static struct aa_label *label_merge_insert(struct aa_label *new, else if (k == b->size) return aa_get_label(b); } - new->flags |= accum_vec_flags(new->vec, new->size); ls = labels_set(new); write_lock_irqsave(&ls->lock, flags); label = __label_insert(labels_set(new), new, false); @@ -1473,7 +1461,7 @@ bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp) /* * cached label name is present and visible - * @label->hname only exists if label is namespace hierachical + * @label->hname only exists if label is namespace hierarchical */ static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label, int flags) @@ -1634,7 +1622,7 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns, AA_BUG(!str && size != 0); AA_BUG(!label); - if (AA_DEBUG_LABEL && (flags & FLAG_ABS_ROOT)) { + if (DEBUG_ABS_ROOT && (flags & FLAG_ABS_ROOT)) { ns = root_ns; len = snprintf(str, size, "_"); update_for_len(total, len, size, str); @@ -1748,7 +1736,7 @@ void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns, display_mode(ns, label, flags)) { len = aa_label_asxprint(&name, ns, label, flags, gfp); if (len < 0) { - AA_DEBUG("label print error"); + AA_DEBUG(DEBUG_LABEL, "label print error"); return; } str = name; @@ -1776,7 +1764,7 @@ void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns, len = aa_label_asxprint(&str, ns, label, flags, gfp); if (len < 0) { - AA_DEBUG("label print error"); + AA_DEBUG(DEBUG_LABEL, "label print error"); return; } seq_puts(f, str); @@ -1799,7 +1787,7 @@ void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags, len = aa_label_asxprint(&str, ns, label, flags, gfp); if (len < 0) { - AA_DEBUG("label print error"); + AA_DEBUG(DEBUG_LABEL, "label print error"); return; } pr_info("%s", str); @@ -1811,22 +1799,6 @@ void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags, pr_info("%s", label->hname); } -void aa_label_audit(struct audit_buffer *ab, struct aa_label *label, gfp_t gfp) -{ - struct aa_ns *ns = aa_get_current_ns(); - - aa_label_xaudit(ab, ns, label, FLAG_VIEW_SUBNS, gfp); - aa_put_ns(ns); -} - -void aa_label_seq_print(struct seq_file *f, struct aa_label *label, gfp_t gfp) -{ - struct aa_ns *ns = aa_get_current_ns(); - - aa_label_seq_xprint(f, ns, label, FLAG_VIEW_SUBNS, gfp); - aa_put_ns(ns); -} - void aa_label_printk(struct aa_label *label, gfp_t gfp) { struct aa_ns *ns = aa_get_current_ns(); @@ -1898,7 +1870,7 @@ struct aa_label *aa_label_strn_parse(struct aa_label *base, const char *str, AA_BUG(!str); str = skipn_spaces(str, n); - if (str == NULL || (AA_DEBUG_LABEL && *str == '_' && + if (str == NULL || (DEBUG_ABS_ROOT && *str == '_' && base != &root_ns->unconfined->label)) return ERR_PTR(-EINVAL); diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c index cd569fbbfe36..82dbb97ad406 100644 --- a/security/apparmor/lib.c +++ b/security/apparmor/lib.c @@ -25,6 +25,120 @@ struct aa_perms allperms = { .allow = ALL_PERMS_MASK, .quiet = ALL_PERMS_MASK, .hide = ALL_PERMS_MASK }; +struct val_table_ent { + const char *str; + int value; +}; + +static struct val_table_ent debug_values_table[] = { + { "N", DEBUG_NONE }, + { "none", DEBUG_NONE }, + { "n", DEBUG_NONE }, + { "0", DEBUG_NONE }, + { "all", DEBUG_ALL }, + { "Y", DEBUG_ALL }, + { "y", DEBUG_ALL }, + { "1", DEBUG_ALL }, + { "abs_root", DEBUG_LABEL_ABS_ROOT }, + { "label", DEBUG_LABEL }, + { "domain", DEBUG_DOMAIN }, + { "policy", DEBUG_POLICY }, + { "interface", DEBUG_INTERFACE }, + { NULL, 0 } +}; + +static struct val_table_ent *val_table_find_ent(struct val_table_ent *table, + const char *name, size_t len) +{ + struct val_table_ent *entry; + + for (entry = table; entry->str != NULL; entry++) { + if (strncmp(entry->str, name, len) == 0 && + strlen(entry->str) == len) + return entry; + } + return NULL; +} + +int aa_parse_debug_params(const char *str) +{ + struct val_table_ent *ent; + const char *next; + int val = 0; + + do { + size_t n = strcspn(str, "\r\n,"); + + next = str + n; + ent = val_table_find_ent(debug_values_table, str, next - str); + if (ent) + val |= ent->value; + else + AA_DEBUG(DEBUG_INTERFACE, "unknown debug type '%.*s'", + (int)(next - str), str); + str = next + 1; + } while (*next != 0); + return val; +} + +/** + * val_mask_to_str - convert a perm mask to its short string + * @str: character buffer to store string in (at least 10 characters) + * @size: size of the @str buffer + * @table: NUL-terminated character buffer of permission characters (NOT NULL) + * @mask: permission mask to convert + */ +static int val_mask_to_str(char *str, size_t size, + const struct val_table_ent *table, u32 mask) +{ + const struct val_table_ent *ent; + int total = 0; + + for (ent = table; ent->str; ent++) { + if (ent->value && (ent->value & mask) == ent->value) { + int len = scnprintf(str, size, "%s%s", total ? "," : "", + ent->str); + size -= len; + str += len; + total += len; + mask &= ~ent->value; + } + } + + return total; +} + +int aa_print_debug_params(char *buffer) +{ + if (!aa_g_debug) + return sprintf(buffer, "N"); + return val_mask_to_str(buffer, PAGE_SIZE, debug_values_table, + aa_g_debug); +} + +bool aa_resize_str_table(struct aa_str_table *t, int newsize, gfp_t gfp) +{ + char **n; + int i; + + if (t->size == newsize) + return true; + n = kcalloc(newsize, sizeof(*n), gfp); + if (!n) + return false; + for (i = 0; i < min(t->size, newsize); i++) + n[i] = t->table[i]; + for (; i < t->size; i++) + kfree_sensitive(t->table[i]); + if (newsize > t->size) + memset(&n[t->size], 0, (newsize-t->size)*sizeof(*n)); + kfree_sensitive(t->table); + t->table = n; + t->size = newsize; + + return true; +} + /** * aa_free_str_table - free entries str table * @t: the string table to free (MAYBE NULL) @@ -46,44 +160,6 @@ void aa_free_str_table(struct aa_str_table *t) } /** - * aa_split_fqname - split a fqname into a profile and namespace name - * @fqname: a full qualified name in namespace profile format (NOT NULL) - * @ns_name: pointer to portion of the string containing the ns name (NOT NULL) - * - * Returns: profile name or NULL if one is not specified - * - * Split a namespace name from a profile name (see policy.c for naming - * description). If a portion of the name is missing it returns NULL for - * that portion. - * - * NOTE: may modify the @fqname string. The pointers returned point - * into the @fqname string. - */ -char *aa_split_fqname(char *fqname, char **ns_name) -{ - char *name = strim(fqname); - - *ns_name = NULL; - if (name[0] == ':') { - char *split = strchr(&name[1], ':'); - *ns_name = skip_spaces(&name[1]); - if (split) { - /* overwrite ':' with \0 */ - *split++ = 0; - if (strncmp(split, "//", 2) == 0) - split += 2; - name = skip_spaces(split); - } else - /* a ns name without a following profile is allowed */ - name = NULL; - } - if (name && *name == 0) - name = NULL; - - return name; -} - -/** * skipn_spaces - Removes leading whitespace from @str. * @str: The string to be stripped. * @n: length of str to parse, will stop at \0 if encountered before n @@ -276,33 +352,6 @@ void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs, } /** - * aa_audit_perms_cb - generic callback fn for auditing perms - * @ab: audit buffer (NOT NULL) - * @va: audit struct to audit values of (NOT NULL) - */ -static void aa_audit_perms_cb(struct audit_buffer *ab, void *va) -{ - struct common_audit_data *sa = va; - struct apparmor_audit_data *ad = aad(sa); - - if (ad->request) { - audit_log_format(ab, " requested_mask="); - aa_audit_perm_mask(ab, ad->request, aa_file_perm_chrs, - PERMS_CHRS_MASK, aa_file_perm_names, - PERMS_NAMES_MASK); - } - if (ad->denied) { - audit_log_format(ab, "denied_mask="); - aa_audit_perm_mask(ab, ad->denied, aa_file_perm_chrs, - PERMS_CHRS_MASK, aa_file_perm_names, - PERMS_NAMES_MASK); - } - audit_log_format(ab, " peer="); - aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer, - FLAGS_NONE, GFP_ATOMIC); -} - -/** * aa_apply_modes_to_perms - apply namespace and profile flags to perms * @profile: that perms where computed from * @perms: perms to apply mode modifiers to @@ -349,25 +398,6 @@ void aa_profile_match_label(struct aa_profile *profile, } -/* currently unused */ -int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target, - u32 request, int type, u32 *deny, - struct apparmor_audit_data *ad) -{ - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); - struct aa_perms perms; - - ad->peer = &target->label; - ad->request = request; - - aa_profile_match_label(profile, rules, &target->label, type, request, - &perms); - aa_apply_modes_to_perms(profile, &perms); - *deny |= request & perms.deny; - return aa_check_perms(profile, &perms, request, ad, aa_audit_perms_cb); -} - /** * aa_check_perms - do audit mode selection based on perms set * @profile: profile being checked diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 1edc12862a7d..a87cd60ed206 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -26,11 +26,13 @@ #include <uapi/linux/mount.h> #include <uapi/linux/lsm.h> +#include "include/af_unix.h" #include "include/apparmor.h" #include "include/apparmorfs.h" #include "include/audit.h" #include "include/capability.h" #include "include/cred.h" +#include "include/crypto.h" #include "include/file.h" #include "include/ipc.h" #include "include/net.h" @@ -111,7 +113,7 @@ static void apparmor_task_free(struct task_struct *task) } static int apparmor_task_alloc(struct task_struct *task, - unsigned long clone_flags) + u64 clone_flags) { struct aa_task_ctx *new = task_ctx(task); @@ -126,14 +128,15 @@ static int apparmor_ptrace_access_check(struct task_struct *child, struct aa_label *tracer, *tracee; const struct cred *cred; int error; + bool needput; cred = get_task_cred(child); tracee = cred_label(cred); /* ref count on cred */ - tracer = __begin_current_label_crit_section(); + tracer = __begin_current_label_crit_section(&needput); error = aa_may_ptrace(current_cred(), tracer, cred, tracee, (mode & PTRACE_MODE_READ) ? AA_PTRACE_READ : AA_PTRACE_TRACE); - __end_current_label_crit_section(tracer); + __end_current_label_crit_section(tracer, needput); put_cred(cred); return error; @@ -144,14 +147,15 @@ static int apparmor_ptrace_traceme(struct task_struct *parent) struct aa_label *tracer, *tracee; const struct cred *cred; int error; + bool needput; - tracee = __begin_current_label_crit_section(); + tracee = __begin_current_label_crit_section(&needput); cred = get_task_cred(parent); tracer = cred_label(cred); /* ref count on cred */ error = aa_may_ptrace(cred, tracer, current_cred(), tracee, AA_PTRACE_TRACE); put_cred(cred); - __end_current_label_crit_section(tracee); + __end_current_label_crit_section(tracee, needput); return error; } @@ -176,15 +180,11 @@ static int apparmor_capget(const struct task_struct *target, kernel_cap_t *effec struct label_it i; label_for_each_confined(i, label, profile) { - struct aa_ruleset *rules; - if (COMPLAIN_MODE(profile)) - continue; - rules = list_first_entry(&profile->rules, - typeof(*rules), list); - *effective = cap_intersect(*effective, - rules->caps.allow); - *permitted = cap_intersect(*permitted, - rules->caps.allow); + kernel_cap_t allowed; + + allowed = aa_profile_capget(profile); + *effective = cap_intersect(*effective, allowed); + *permitted = cap_intersect(*permitted, allowed); } } rcu_read_unlock(); @@ -221,12 +221,13 @@ static int common_perm(const char *op, const struct path *path, u32 mask, { struct aa_label *label; int error = 0; + bool needput; - label = __begin_current_label_crit_section(); + label = __begin_current_label_crit_section(&needput); if (!unconfined(label)) error = aa_path_perm(op, current_cred(), label, path, 0, mask, cond); - __end_current_label_crit_section(label); + __end_current_label_crit_section(label, needput); return error; } @@ -524,14 +525,15 @@ static int common_file_perm(const char *op, struct file *file, u32 mask, { struct aa_label *label; int error = 0; + bool needput; /* don't reaudit files closed during inheritance */ - if (file->f_path.dentry == aa_null.dentry) + if (unlikely(file->f_path.dentry == aa_null.dentry)) return -EACCES; - label = __begin_current_label_crit_section(); + label = __begin_current_label_crit_section(&needput); error = aa_file_perm(op, current_cred(), label, file, mask, in_atomic); - __end_current_label_crit_section(label); + __end_current_label_crit_section(label, needput); return error; } @@ -633,7 +635,7 @@ static int profile_uring(struct aa_profile *profile, u32 request, AA_BUG(!profile); - rules = list_first_entry(&profile->rules, typeof(*rules), list); + rules = profile->label.rules[0]; state = RULE_MEDIATES(rules, AA_CLASS_IO_URING); if (state) { struct aa_perms perms = { }; @@ -664,15 +666,16 @@ static int apparmor_uring_override_creds(const struct cred *new) struct aa_profile *profile; struct aa_label *label; int error; + bool needput; DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_IO_URING, OP_URING_OVERRIDE); ad.uring.target = cred_label(new); - label = __begin_current_label_crit_section(); + label = __begin_current_label_crit_section(&needput); error = fn_for_each(label, profile, profile_uring(profile, AA_MAY_OVERRIDE_CRED, cred_label(new), CAP_SYS_ADMIN, &ad)); - __end_current_label_crit_section(label); + __end_current_label_crit_section(label, needput); return error; } @@ -688,14 +691,15 @@ static int apparmor_uring_sqpoll(void) struct aa_profile *profile; struct aa_label *label; int error; + bool needput; DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_IO_URING, OP_URING_SQPOLL); - label = __begin_current_label_crit_section(); + label = __begin_current_label_crit_section(&needput); error = fn_for_each(label, profile, profile_uring(profile, AA_MAY_CREATE_SQPOLL, NULL, CAP_SYS_ADMIN, &ad)); - __end_current_label_crit_section(label); + __end_current_label_crit_section(label, needput); return error; } @@ -706,6 +710,7 @@ static int apparmor_sb_mount(const char *dev_name, const struct path *path, { struct aa_label *label; int error = 0; + bool needput; /* Discard magic */ if ((flags & MS_MGC_MSK) == MS_MGC_VAL) @@ -713,7 +718,7 @@ static int apparmor_sb_mount(const char *dev_name, const struct path *path, flags &= ~AA_MS_IGNORE_MASK; - label = __begin_current_label_crit_section(); + label = __begin_current_label_crit_section(&needput); if (!unconfined(label)) { if (flags & MS_REMOUNT) error = aa_remount(current_cred(), label, path, flags, @@ -732,7 +737,7 @@ static int apparmor_sb_mount(const char *dev_name, const struct path *path, error = aa_new_mount(current_cred(), label, dev_name, path, type, flags, data); } - __end_current_label_crit_section(label); + __end_current_label_crit_section(label, needput); return error; } @@ -742,12 +747,13 @@ static int apparmor_move_mount(const struct path *from_path, { struct aa_label *label; int error = 0; + bool needput; - label = __begin_current_label_crit_section(); + label = __begin_current_label_crit_section(&needput); if (!unconfined(label)) error = aa_move_mount(current_cred(), label, from_path, to_path); - __end_current_label_crit_section(label); + __end_current_label_crit_section(label, needput); return error; } @@ -756,11 +762,12 @@ static int apparmor_sb_umount(struct vfsmount *mnt, int flags) { struct aa_label *label; int error = 0; + bool needput; - label = __begin_current_label_crit_section(); + label = __begin_current_label_crit_section(&needput); if (!unconfined(label)) error = aa_umount(current_cred(), label, mnt, flags); - __end_current_label_crit_section(label); + __end_current_label_crit_section(label, needput); return error; } @@ -984,10 +991,12 @@ static void apparmor_bprm_committed_creds(const struct linux_binprm *bprm) static void apparmor_current_getlsmprop_subj(struct lsm_prop *prop) { - struct aa_label *label = __begin_current_label_crit_section(); + struct aa_label *label; + bool needput; + label = __begin_current_label_crit_section(&needput); prop->apparmor.label = label; - __end_current_label_crit_section(label); + __end_current_label_crit_section(label, needput); } static void apparmor_task_getlsmprop_obj(struct task_struct *p, @@ -1002,13 +1011,16 @@ static void apparmor_task_getlsmprop_obj(struct task_struct *p, static int apparmor_task_setrlimit(struct task_struct *task, unsigned int resource, struct rlimit *new_rlim) { - struct aa_label *label = __begin_current_label_crit_section(); + struct aa_label *label; int error = 0; + bool needput; + + label = __begin_current_label_crit_section(&needput); if (!unconfined(label)) error = aa_task_setrlimit(current_cred(), label, task, resource, new_rlim); - __end_current_label_crit_section(label); + __end_current_label_crit_section(label, needput); return error; } @@ -1019,6 +1031,7 @@ static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo const struct cred *tc; struct aa_label *cl, *tl; int error; + bool needput; tc = get_task_cred(target); tl = aa_get_newest_cred_label(tc); @@ -1030,9 +1043,9 @@ static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo error = aa_may_signal(cred, cl, tc, tl, sig); aa_put_label(cl); } else { - cl = __begin_current_label_crit_section(); + cl = __begin_current_label_crit_section(&needput); error = aa_may_signal(current_cred(), cl, tc, tl, sig); - __end_current_label_crit_section(cl); + __end_current_label_crit_section(cl, needput); } aa_put_label(tl); put_cred(tc); @@ -1061,12 +1074,29 @@ static int apparmor_userns_create(const struct cred *cred) return error; } +static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t gfp) +{ + struct aa_sk_ctx *ctx = aa_sock(sk); + struct aa_label *label; + bool needput; + + label = __begin_current_label_crit_section(&needput); + //spin_lock_init(&ctx->lock); + rcu_assign_pointer(ctx->label, aa_get_label(label)); + rcu_assign_pointer(ctx->peer, NULL); + rcu_assign_pointer(ctx->peer_lastupdate, NULL); + __end_current_label_crit_section(label, needput); + return 0; +} + static void apparmor_sk_free_security(struct sock *sk) { struct aa_sk_ctx *ctx = aa_sock(sk); - aa_put_label(ctx->label); - aa_put_label(ctx->peer); + /* dead these won't be updated any more */ + aa_put_label(rcu_dereference_protected(ctx->label, true)); + aa_put_label(rcu_dereference_protected(ctx->peer, true)); + aa_put_label(rcu_dereference_protected(ctx->peer_lastupdate, true)); } /** @@ -1080,13 +1110,153 @@ static void apparmor_sk_clone_security(const struct sock *sk, struct aa_sk_ctx *ctx = aa_sock(sk); struct aa_sk_ctx *new = aa_sock(newsk); - if (new->label) - aa_put_label(new->label); - new->label = aa_get_label(ctx->label); + /* not actually in use yet */ + if (rcu_access_pointer(ctx->label) != rcu_access_pointer(new->label)) { + aa_put_label(rcu_dereference_protected(new->label, true)); + rcu_assign_pointer(new->label, aa_get_label_rcu(&ctx->label)); + } - if (new->peer) - aa_put_label(new->peer); - new->peer = aa_get_label(ctx->peer); + if (rcu_access_pointer(ctx->peer) != rcu_access_pointer(new->peer)) { + aa_put_label(rcu_dereference_protected(new->peer, true)); + rcu_assign_pointer(new->peer, aa_get_label_rcu(&ctx->peer)); + } + + if (rcu_access_pointer(ctx->peer_lastupdate) != rcu_access_pointer(new->peer_lastupdate)) { + aa_put_label(rcu_dereference_protected(new->peer_lastupdate, true)); + rcu_assign_pointer(new->peer_lastupdate, + aa_get_label_rcu(&ctx->peer_lastupdate)); + } +} + +static int unix_connect_perm(const struct cred *cred, struct aa_label *label, + struct sock *sk, struct sock *peer_sk) +{ + struct aa_sk_ctx *peer_ctx = aa_sock(peer_sk); + int error; + + error = aa_unix_peer_perm(cred, label, OP_CONNECT, + (AA_MAY_CONNECT | AA_MAY_SEND | AA_MAY_RECEIVE), + sk, peer_sk, + rcu_dereference_protected(peer_ctx->label, + lockdep_is_held(&unix_sk(peer_sk)->lock))); + if (!is_unix_fs(peer_sk)) { + last_error(error, + aa_unix_peer_perm(cred, + rcu_dereference_protected(peer_ctx->label, + lockdep_is_held(&unix_sk(peer_sk)->lock)), + OP_CONNECT, + (AA_MAY_ACCEPT | AA_MAY_SEND | AA_MAY_RECEIVE), + peer_sk, sk, label)); + } + + return error; +} + +/* lockdep check in unix_connect_perm - push sks here to check */ +static void unix_connect_peers(struct aa_sk_ctx *sk_ctx, + struct aa_sk_ctx *peer_ctx) +{ + /* Cross reference the peer labels for SO_PEERSEC */ + struct aa_label *label = rcu_dereference_protected(sk_ctx->label, true); + + aa_get_label(label); + aa_put_label(rcu_dereference_protected(peer_ctx->peer, + true)); + rcu_assign_pointer(peer_ctx->peer, label); /* transfer cnt */ + + label = aa_get_label(rcu_dereference_protected(peer_ctx->label, + true)); + //spin_unlock(&peer_ctx->lock); + + //spin_lock(&sk_ctx->lock); + aa_put_label(rcu_dereference_protected(sk_ctx->peer, + true)); + aa_put_label(rcu_dereference_protected(sk_ctx->peer_lastupdate, + true)); + + rcu_assign_pointer(sk_ctx->peer, aa_get_label(label)); + rcu_assign_pointer(sk_ctx->peer_lastupdate, label); /* transfer cnt */ + //spin_unlock(&sk_ctx->lock); +} + +/** + * apparmor_unix_stream_connect - check perms before making unix domain conn + * @sk: sk attempting to connect + * @peer_sk: sk that is accepting the connection + * @newsk: new sk created for this connection + * peer is locked when this hook is called + * + * Return: + * 0 if connection is permitted + * error code on denial or failure + */ +static int apparmor_unix_stream_connect(struct sock *sk, struct sock *peer_sk, + struct sock *newsk) +{ + struct aa_sk_ctx *sk_ctx = aa_sock(sk); + struct aa_sk_ctx *peer_ctx = aa_sock(peer_sk); + struct aa_sk_ctx *new_ctx = aa_sock(newsk); + struct aa_label *label; + int error; + bool needput; + + label = __begin_current_label_crit_section(&needput); + error = unix_connect_perm(current_cred(), label, sk, peer_sk); + __end_current_label_crit_section(label, needput); + + if (error) + return error; + + /* newsk doesn't go through post_create, but does go through + * security_sk_alloc() + */ + rcu_assign_pointer(new_ctx->label, + aa_get_label(rcu_dereference_protected(peer_ctx->label, + true))); + + /* Cross reference the peer labels for SO_PEERSEC */ + unix_connect_peers(sk_ctx, new_ctx); + + return 0; +} + +/** + * apparmor_unix_may_send - check perms before conn or sending unix dgrams + * @sock: socket sending the message + * @peer: socket message is being send to + * + * Performs bidirectional permission checks for Unix domain socket communication: + * 1. Verifies sender has AA_MAY_SEND to target socket + * 2. Verifies receiver has AA_MAY_RECEIVE from source socket + * + * sock and peer are locked when this hook is called + * called by: dgram_connect peer setup but path not copied to newsk + * + * Return: + * 0 if transmission is permitted + * error code on denial or failure + */ +static int apparmor_unix_may_send(struct socket *sock, struct socket *peer) +{ + struct aa_sk_ctx *peer_ctx = aa_sock(peer->sk); + struct aa_label *label; + int error; + bool needput; + + label = __begin_current_label_crit_section(&needput); + error = xcheck(aa_unix_peer_perm(current_cred(), + label, OP_SENDMSG, AA_MAY_SEND, + sock->sk, peer->sk, + rcu_dereference_protected(peer_ctx->label, + true)), + aa_unix_peer_perm(peer->file ? peer->file->f_cred : NULL, + rcu_dereference_protected(peer_ctx->label, + true), + OP_SENDMSG, AA_MAY_RECEIVE, peer->sk, + sock->sk, label)); + __end_current_label_crit_section(label, needput); + + return error; } static int apparmor_socket_create(int family, int type, int protocol, int kern) @@ -1096,13 +1266,19 @@ static int apparmor_socket_create(int family, int type, int protocol, int kern) AA_BUG(in_interrupt()); + if (kern) + return 0; + label = begin_current_label_crit_section(); - if (!(kern || unconfined(label))) - error = af_select(family, - create_perm(label, family, type, protocol), - aa_af_perm(current_cred(), label, - OP_CREATE, AA_MAY_CREATE, - family, type, protocol)); + if (!unconfined(label)) { + if (family == PF_UNIX) + error = aa_unix_create_perm(label, family, type, + protocol); + else + error = aa_af_perm(current_cred(), label, OP_CREATE, + AA_MAY_CREATE, family, type, + protocol); + } end_current_label_crit_section(label); return error; @@ -1135,14 +1311,58 @@ static int apparmor_socket_post_create(struct socket *sock, int family, if (sock->sk) { struct aa_sk_ctx *ctx = aa_sock(sock->sk); - aa_put_label(ctx->label); - ctx->label = aa_get_label(label); + /* still not live */ + aa_put_label(rcu_dereference_protected(ctx->label, true)); + rcu_assign_pointer(ctx->label, aa_get_label(label)); } aa_put_label(label); return 0; } +static int apparmor_socket_socketpair(struct socket *socka, + struct socket *sockb) +{ + struct aa_sk_ctx *a_ctx = aa_sock(socka->sk); + struct aa_sk_ctx *b_ctx = aa_sock(sockb->sk); + struct aa_label *label; + + /* socks not live yet - initial values set in sk_alloc */ + label = begin_current_label_crit_section(); + if (rcu_access_pointer(a_ctx->label) != label) { + AA_BUG("a_ctx != label"); + aa_put_label(rcu_dereference_protected(a_ctx->label, true)); + rcu_assign_pointer(a_ctx->label, aa_get_label(label)); + } + if (rcu_access_pointer(b_ctx->label) != label) { + AA_BUG("b_ctx != label"); + aa_put_label(rcu_dereference_protected(b_ctx->label, true)); + rcu_assign_pointer(b_ctx->label, aa_get_label(label)); + } + + if (socka->sk->sk_family == PF_UNIX) { + /* unix socket pairs by-pass unix_stream_connect */ + unix_connect_peers(a_ctx, b_ctx); + } + end_current_label_crit_section(label); + + return 0; +} + +/** + * apparmor_socket_bind - check perms before bind addr to socket + * @sock: socket to bind the address to (must be non-NULL) + * @address: address that is being bound (must be non-NULL) + * @addrlen: length of @address + * + * Performs security checks before allowing a socket to bind to an address. + * Handles Unix domain sockets specially through aa_unix_bind_perm(). + * For other socket families, uses generic permission check via aa_sk_perm(). + * + * Return: + * 0 if binding is permitted + * error code on denial or invalid parameters + */ static int apparmor_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) { @@ -1151,9 +1371,9 @@ static int apparmor_socket_bind(struct socket *sock, AA_BUG(!address); AA_BUG(in_interrupt()); - return af_select(sock->sk->sk_family, - bind_perm(sock, address, addrlen), - aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk)); + if (sock->sk->sk_family == PF_UNIX) + return aa_unix_bind_perm(sock, address, addrlen); + return aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk); } static int apparmor_socket_connect(struct socket *sock, @@ -1164,9 +1384,10 @@ static int apparmor_socket_connect(struct socket *sock, AA_BUG(!address); AA_BUG(in_interrupt()); - return af_select(sock->sk->sk_family, - connect_perm(sock, address, addrlen), - aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk)); + /* PF_UNIX goes through unix_stream_connect && unix_may_send */ + if (sock->sk->sk_family == PF_UNIX) + return 0; + return aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk); } static int apparmor_socket_listen(struct socket *sock, int backlog) @@ -1175,9 +1396,9 @@ static int apparmor_socket_listen(struct socket *sock, int backlog) AA_BUG(!sock->sk); AA_BUG(in_interrupt()); - return af_select(sock->sk->sk_family, - listen_perm(sock, backlog), - aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk)); + if (sock->sk->sk_family == PF_UNIX) + return aa_unix_listen_perm(sock, backlog); + return aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk); } /* @@ -1191,9 +1412,9 @@ static int apparmor_socket_accept(struct socket *sock, struct socket *newsock) AA_BUG(!newsock); AA_BUG(in_interrupt()); - return af_select(sock->sk->sk_family, - accept_perm(sock, newsock), - aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk)); + if (sock->sk->sk_family == PF_UNIX) + return aa_unix_accept_perm(sock, newsock); + return aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk); } static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock, @@ -1204,9 +1425,10 @@ static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock, AA_BUG(!msg); AA_BUG(in_interrupt()); - return af_select(sock->sk->sk_family, - msg_perm(op, request, sock, msg, size), - aa_sk_perm(op, request, sock->sk)); + /* PF_UNIX goes through unix_may_send */ + if (sock->sk->sk_family == PF_UNIX) + return 0; + return aa_sk_perm(op, request, sock->sk); } static int apparmor_socket_sendmsg(struct socket *sock, @@ -1228,9 +1450,9 @@ static int aa_sock_perm(const char *op, u32 request, struct socket *sock) AA_BUG(!sock->sk); AA_BUG(in_interrupt()); - return af_select(sock->sk->sk_family, - sock_perm(op, request, sock), - aa_sk_perm(op, request, sock->sk)); + if (sock->sk->sk_family == PF_UNIX) + return aa_unix_sock_perm(op, request, sock); + return aa_sk_perm(op, request, sock->sk); } static int apparmor_socket_getsockname(struct socket *sock) @@ -1251,9 +1473,9 @@ static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock, AA_BUG(!sock->sk); AA_BUG(in_interrupt()); - return af_select(sock->sk->sk_family, - opt_perm(op, request, sock, level, optname), - aa_sk_perm(op, request, sock->sk)); + if (sock->sk->sk_family == PF_UNIX) + return aa_unix_opt_perm(op, request, sock, level, optname); + return aa_sk_perm(op, request, sock->sk); } static int apparmor_socket_getsockopt(struct socket *sock, int level, @@ -1289,6 +1511,7 @@ static int apparmor_socket_shutdown(struct socket *sock, int how) static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct aa_sk_ctx *ctx = aa_sock(sk); + int error; if (!skb->secmark) return 0; @@ -1297,23 +1520,31 @@ static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) * If reach here before socket_post_create hook is called, in which * case label is null, drop the packet. */ - if (!ctx->label) + if (!rcu_access_pointer(ctx->label)) return -EACCES; - return apparmor_secmark_check(ctx->label, OP_RECVMSG, AA_MAY_RECEIVE, - skb->secmark, sk); + rcu_read_lock(); + error = apparmor_secmark_check(rcu_dereference(ctx->label), OP_RECVMSG, + AA_MAY_RECEIVE, skb->secmark, sk); + rcu_read_unlock(); + + return error; } #endif -static struct aa_label *sk_peer_label(struct sock *sk) +static struct aa_label *sk_peer_get_label(struct sock *sk) { struct aa_sk_ctx *ctx = aa_sock(sk); + struct aa_label *label = ERR_PTR(-ENOPROTOOPT); - if (ctx->peer) - return ctx->peer; + if (rcu_access_pointer(ctx->peer)) + return aa_get_label_rcu(&ctx->peer); - return ERR_PTR(-ENOPROTOOPT); + if (sk->sk_family != PF_UNIX) + return ERR_PTR(-ENOPROTOOPT); + + return label; } /** @@ -1335,19 +1566,19 @@ static int apparmor_socket_getpeersec_stream(struct socket *sock, struct aa_label *label; struct aa_label *peer; - label = begin_current_label_crit_section(); - peer = sk_peer_label(sock->sk); + peer = sk_peer_get_label(sock->sk); if (IS_ERR(peer)) { error = PTR_ERR(peer); goto done; } + label = begin_current_label_crit_section(); slen = aa_label_asxprint(&name, labels_ns(label), peer, FLAG_SHOW_MODE | FLAG_VIEW_SUBNS | FLAG_HIDDEN_UNCONFINED, GFP_KERNEL); /* don't include terminating \0 in slen, it breaks some apps */ if (slen < 0) { error = -ENOMEM; - goto done; + goto done_put; } if (slen > len) { error = -ERANGE; @@ -1359,8 +1590,11 @@ static int apparmor_socket_getpeersec_stream(struct socket *sock, done_len: if (copy_to_sockptr(optlen, &slen, sizeof(slen))) error = -EFAULT; -done: + +done_put: end_current_label_crit_section(label); + aa_put_label(peer); +done: kfree(name); return error; } @@ -1396,8 +1630,9 @@ static void apparmor_sock_graft(struct sock *sk, struct socket *parent) { struct aa_sk_ctx *ctx = aa_sock(sk); - if (!ctx->label) - ctx->label = aa_get_current_label(); + /* setup - not live */ + if (!rcu_access_pointer(ctx->label)) + rcu_assign_pointer(ctx->label, aa_get_current_label()); } #ifdef CONFIG_NETWORK_SECMARK @@ -1405,12 +1640,17 @@ static int apparmor_inet_conn_request(const struct sock *sk, struct sk_buff *skb struct request_sock *req) { struct aa_sk_ctx *ctx = aa_sock(sk); + int error; if (!skb->secmark) return 0; - return apparmor_secmark_check(ctx->label, OP_CONNECT, AA_MAY_CONNECT, - skb->secmark, sk); + rcu_read_lock(); + error = apparmor_secmark_check(rcu_dereference(ctx->label), OP_CONNECT, + AA_MAY_CONNECT, skb->secmark, sk); + rcu_read_unlock(); + + return error; } #endif @@ -1467,11 +1707,16 @@ static struct security_hook_list apparmor_hooks[] __ro_after_init = { LSM_HOOK_INIT(getprocattr, apparmor_getprocattr), LSM_HOOK_INIT(setprocattr, apparmor_setprocattr), + LSM_HOOK_INIT(sk_alloc_security, apparmor_sk_alloc_security), LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security), LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security), + LSM_HOOK_INIT(unix_stream_connect, apparmor_unix_stream_connect), + LSM_HOOK_INIT(unix_may_send, apparmor_unix_may_send), + LSM_HOOK_INIT(socket_create, apparmor_socket_create), LSM_HOOK_INIT(socket_post_create, apparmor_socket_post_create), + LSM_HOOK_INIT(socket_socketpair, apparmor_socket_socketpair), LSM_HOOK_INIT(socket_bind, apparmor_socket_bind), LSM_HOOK_INIT(socket_connect, apparmor_socket_connect), LSM_HOOK_INIT(socket_listen, apparmor_socket_listen), @@ -1571,6 +1816,9 @@ static const struct kernel_param_ops param_ops_aalockpolicy = { .get = param_get_aalockpolicy }; +static int param_set_debug(const char *val, const struct kernel_param *kp); +static int param_get_debug(char *buffer, const struct kernel_param *kp); + static int param_set_audit(const char *val, const struct kernel_param *kp); static int param_get_audit(char *buffer, const struct kernel_param *kp); @@ -1604,8 +1852,9 @@ module_param_named(rawdata_compression_level, aa_g_rawdata_compression_level, aacompressionlevel, 0400); /* Debug mode */ -bool aa_g_debug = IS_ENABLED(CONFIG_SECURITY_APPARMOR_DEBUG_MESSAGES); -module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR); +int aa_g_debug; +module_param_call(debug, param_set_debug, param_get_debug, + &aa_g_debug, 0600); /* Audit mode */ enum audit_mode aa_g_audit; @@ -1798,6 +2047,34 @@ static int param_get_aacompressionlevel(char *buffer, return param_get_int(buffer, kp); } +static int param_get_debug(char *buffer, const struct kernel_param *kp) +{ + if (!apparmor_enabled) + return -EINVAL; + if (apparmor_initialized && !aa_current_policy_view_capable(NULL)) + return -EPERM; + return aa_print_debug_params(buffer); +} + +static int param_set_debug(const char *val, const struct kernel_param *kp) +{ + int i; + + if (!apparmor_enabled) + return -EINVAL; + if (!val) + return -EINVAL; + if (apparmor_initialized && !aa_current_policy_admin_capable(NULL)) + return -EPERM; + + i = aa_parse_debug_params(val); + if (i == DEBUG_PARSE_ERROR) + return -EINVAL; + + aa_g_debug = i; + return 0; +} + static int param_get_audit(char *buffer, const struct kernel_param *kp) { if (!apparmor_enabled) @@ -2006,7 +2283,7 @@ static int __init alloc_buffers(void) * two should be enough, with more CPUs it is possible that more * buffers will be used simultaneously. The preallocated pool may grow. * This preallocation has also the side-effect that AppArmor will be - * disabled early at boot if aa_g_path_max is extremly high. + * disabled early at boot if aa_g_path_max is extremely high. */ if (num_online_cpus() > 1) num = 4 + RESERVE_COUNT; @@ -2038,7 +2315,7 @@ static int apparmor_dointvec(const struct ctl_table *table, int write, return proc_dointvec(table, write, buffer, lenp, ppos); } -static struct ctl_table apparmor_sysctl_table[] = { +static const struct ctl_table apparmor_sysctl_table[] = { #ifdef CONFIG_USER_NS { .procname = "unprivileged_userns_apparmor_policy", @@ -2082,6 +2359,7 @@ static unsigned int apparmor_ip_postroute(void *priv, { struct aa_sk_ctx *ctx; struct sock *sk; + int error; if (!skb->secmark) return NF_ACCEPT; @@ -2091,8 +2369,11 @@ static unsigned int apparmor_ip_postroute(void *priv, return NF_ACCEPT; ctx = aa_sock(sk); - if (!apparmor_secmark_check(ctx->label, OP_SENDMSG, AA_MAY_SEND, - skb->secmark, sk)) + rcu_read_lock(); + error = apparmor_secmark_check(rcu_dereference(ctx->label), OP_SENDMSG, + AA_MAY_SEND, skb->secmark, sk); + rcu_read_unlock(); + if (!error) return NF_ACCEPT; return NF_DROP_ERR(-ECONNREFUSED); @@ -2146,15 +2427,14 @@ static int __init apparmor_nf_ip_init(void) return 0; } -__initcall(apparmor_nf_ip_init); #endif -static char nulldfa_src[] = { +static char nulldfa_src[] __aligned(8) = { #include "nulldfa.in" }; static struct aa_dfa *nulldfa; -static char stacksplitdfa_src[] = { +static char stacksplitdfa_src[] __aligned(8) = { #include "stacksplitdfa.in" }; struct aa_dfa *stacksplitdfa; @@ -2250,6 +2530,9 @@ static int __init apparmor_init(void) security_add_hooks(apparmor_hooks, ARRAY_SIZE(apparmor_hooks), &apparmor_lsmid); + /* Inform the audit system that secctx is used */ + audit_cfg_lsm(&apparmor_lsmid, AUDIT_CFG_LSM_SECCTX_SUBJECT); + /* Report that AppArmor successfully initialized */ apparmor_initialized = 1; if (aa_g_profile_mode == APPARMOR_COMPLAIN) @@ -2272,9 +2555,16 @@ alloc_out: } DEFINE_LSM(apparmor) = { - .name = "apparmor", + .id = &apparmor_lsmid, .flags = LSM_FLAG_LEGACY_MAJOR | LSM_FLAG_EXCLUSIVE, .enabled = &apparmor_enabled, .blobs = &apparmor_blob_sizes, .init = apparmor_init, + .initcall_fs = aa_create_aafs, +#if defined(CONFIG_NETFILTER) && defined(CONFIG_NETWORK_SECMARK) + .initcall_device = apparmor_nf_ip_init, +#endif +#ifdef CONFIG_SECURITY_APPARMOR_HASH + .initcall_late = init_profile_hash, +#endif }; diff --git a/security/apparmor/match.c b/security/apparmor/match.c index 517d77d3c34c..c5a91600842a 100644 --- a/security/apparmor/match.c +++ b/security/apparmor/match.c @@ -247,6 +247,42 @@ void aa_dfa_free_kref(struct kref *kref) dfa_free(dfa); } + + +/** + * remap_data16_to_data32 - remap u16 @old table to a u32 based table + * @old: table to remap + * + * Returns: new table with u32 entries instead of u16. + * + * Note: will free @old so caller does not have to + */ +static struct table_header *remap_data16_to_data32(struct table_header *old) +{ + struct table_header *new; + size_t tsize; + u32 i; + + tsize = table_size(old->td_lolen, YYTD_DATA32); + new = kvzalloc(tsize, GFP_KERNEL); + if (!new) { + kvfree(old); + return NULL; + } + new->td_id = old->td_id; + new->td_flags = YYTD_DATA32; + new->td_lolen = old->td_lolen; + + for (i = 0; i < old->td_lolen; i++) + TABLE_DATAU32(new)[i] = (u32) TABLE_DATAU16(old)[i]; + + kvfree(old); + if (is_vmalloc_addr(new)) + vm_unmap_aliases(); + + return new; +} + /** * aa_dfa_unpack - unpack the binary tables of a serialized dfa * @blob: aligned serialized stream of data to unpack (NOT NULL) @@ -326,8 +362,10 @@ struct aa_dfa *aa_dfa_unpack(void *blob, size_t size, int flags) case YYTD_ID_DEF: case YYTD_ID_NXT: case YYTD_ID_CHK: - if (table->td_flags != YYTD_DATA16) + if (!(table->td_flags == YYTD_DATA16 || + table->td_flags == YYTD_DATA32)) { goto fail; + } break; case YYTD_ID_EC: if (table->td_flags != YYTD_DATA8) @@ -342,6 +380,23 @@ struct aa_dfa *aa_dfa_unpack(void *blob, size_t size, int flags) dfa->tables[table->td_id] = table; data += table_size(table->td_lolen, table->td_flags); size -= table_size(table->td_lolen, table->td_flags); + + /* + * this remapping has to be done after incrementing data above + * for now straight remap, later have dfa support both + */ + switch (table->td_id) { + case YYTD_ID_DEF: + case YYTD_ID_NXT: + case YYTD_ID_CHK: + if (table->td_flags == YYTD_DATA16) { + table = remap_data16_to_data32(table); + if (!table) + goto fail; + } + dfa->tables[table->td_id] = table; + break; + } table = NULL; } error = verify_table_headers(dfa->tables, flags); @@ -395,10 +450,10 @@ do { \ aa_state_t aa_dfa_match_len(struct aa_dfa *dfa, aa_state_t start, const char *str, int len) { - u16 *def = DEFAULT_TABLE(dfa); + u32 *def = DEFAULT_TABLE(dfa); u32 *base = BASE_TABLE(dfa); - u16 *next = NEXT_TABLE(dfa); - u16 *check = CHECK_TABLE(dfa); + u32 *next = NEXT_TABLE(dfa); + u32 *check = CHECK_TABLE(dfa); aa_state_t state = start; if (state == DFA_NOMATCH) @@ -434,10 +489,10 @@ aa_state_t aa_dfa_match_len(struct aa_dfa *dfa, aa_state_t start, */ aa_state_t aa_dfa_match(struct aa_dfa *dfa, aa_state_t start, const char *str) { - u16 *def = DEFAULT_TABLE(dfa); + u32 *def = DEFAULT_TABLE(dfa); u32 *base = BASE_TABLE(dfa); - u16 *next = NEXT_TABLE(dfa); - u16 *check = CHECK_TABLE(dfa); + u32 *next = NEXT_TABLE(dfa); + u32 *check = CHECK_TABLE(dfa); aa_state_t state = start; if (state == DFA_NOMATCH) @@ -472,10 +527,10 @@ aa_state_t aa_dfa_match(struct aa_dfa *dfa, aa_state_t start, const char *str) */ aa_state_t aa_dfa_next(struct aa_dfa *dfa, aa_state_t state, const char c) { - u16 *def = DEFAULT_TABLE(dfa); + u32 *def = DEFAULT_TABLE(dfa); u32 *base = BASE_TABLE(dfa); - u16 *next = NEXT_TABLE(dfa); - u16 *check = CHECK_TABLE(dfa); + u32 *next = NEXT_TABLE(dfa); + u32 *check = CHECK_TABLE(dfa); /* current state is <state>, matching character *str */ if (dfa->tables[YYTD_ID_EC]) { @@ -490,10 +545,10 @@ aa_state_t aa_dfa_next(struct aa_dfa *dfa, aa_state_t state, const char c) aa_state_t aa_dfa_outofband_transition(struct aa_dfa *dfa, aa_state_t state) { - u16 *def = DEFAULT_TABLE(dfa); + u32 *def = DEFAULT_TABLE(dfa); u32 *base = BASE_TABLE(dfa); - u16 *next = NEXT_TABLE(dfa); - u16 *check = CHECK_TABLE(dfa); + u32 *next = NEXT_TABLE(dfa); + u32 *check = CHECK_TABLE(dfa); u32 b = (base)[(state)]; if (!(b & MATCH_FLAG_OOB_TRANSITION)) @@ -521,10 +576,10 @@ aa_state_t aa_dfa_outofband_transition(struct aa_dfa *dfa, aa_state_t state) aa_state_t aa_dfa_match_until(struct aa_dfa *dfa, aa_state_t start, const char *str, const char **retpos) { - u16 *def = DEFAULT_TABLE(dfa); + u32 *def = DEFAULT_TABLE(dfa); u32 *base = BASE_TABLE(dfa); - u16 *next = NEXT_TABLE(dfa); - u16 *check = CHECK_TABLE(dfa); + u32 *next = NEXT_TABLE(dfa); + u32 *check = CHECK_TABLE(dfa); u32 *accept = ACCEPT_TABLE(dfa); aa_state_t state = start, pos; @@ -582,10 +637,10 @@ aa_state_t aa_dfa_match_until(struct aa_dfa *dfa, aa_state_t start, aa_state_t aa_dfa_matchn_until(struct aa_dfa *dfa, aa_state_t start, const char *str, int n, const char **retpos) { - u16 *def = DEFAULT_TABLE(dfa); + u32 *def = DEFAULT_TABLE(dfa); u32 *base = BASE_TABLE(dfa); - u16 *next = NEXT_TABLE(dfa); - u16 *check = CHECK_TABLE(dfa); + u32 *next = NEXT_TABLE(dfa); + u32 *check = CHECK_TABLE(dfa); u32 *accept = ACCEPT_TABLE(dfa); aa_state_t state = start, pos; @@ -624,44 +679,45 @@ aa_state_t aa_dfa_matchn_until(struct aa_dfa *dfa, aa_state_t start, return state; } -#define inc_wb_pos(wb) \ -do { \ +#define inc_wb_pos(wb) \ +do { \ + BUILD_BUG_ON_NOT_POWER_OF_2(WB_HISTORY_SIZE); \ wb->pos = (wb->pos + 1) & (WB_HISTORY_SIZE - 1); \ - wb->len = (wb->len + 1) & (WB_HISTORY_SIZE - 1); \ + wb->len = (wb->len + 1) > WB_HISTORY_SIZE ? WB_HISTORY_SIZE : \ + wb->len + 1; \ } while (0) /* For DFAs that don't support extended tagging of states */ +/* adjust is only set if is_loop returns true */ static bool is_loop(struct match_workbuf *wb, aa_state_t state, unsigned int *adjust) { - aa_state_t pos = wb->pos; - aa_state_t i; + int pos = wb->pos; + int i; if (wb->history[pos] < state) return false; - for (i = 0; i <= wb->len; i++) { + for (i = 0; i < wb->len; i++) { if (wb->history[pos] == state) { *adjust = i; return true; } - if (pos == 0) - pos = WB_HISTORY_SIZE; - pos--; + /* -1 wraps to WB_HISTORY_SIZE - 1 */ + pos = (pos - 1) & (WB_HISTORY_SIZE - 1); } - *adjust = i; - return true; + return false; } static aa_state_t leftmatch_fb(struct aa_dfa *dfa, aa_state_t start, const char *str, struct match_workbuf *wb, unsigned int *count) { - u16 *def = DEFAULT_TABLE(dfa); + u32 *def = DEFAULT_TABLE(dfa); u32 *base = BASE_TABLE(dfa); - u16 *next = NEXT_TABLE(dfa); - u16 *check = CHECK_TABLE(dfa); + u32 *next = NEXT_TABLE(dfa); + u32 *check = CHECK_TABLE(dfa); aa_state_t state = start, pos; AA_BUG(!dfa); diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c index bf8863253e07..523570aa1a5a 100644 --- a/security/apparmor/mount.c +++ b/security/apparmor/mount.c @@ -311,8 +311,7 @@ static int match_mnt_path_str(const struct cred *subj_cred, { struct aa_perms perms = { }; const char *mntpnt = NULL, *info = NULL; - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; int pos, error; AA_BUG(!profile); @@ -371,8 +370,7 @@ static int match_mnt(const struct cred *subj_cred, bool binary) { const char *devname = NULL, *info = NULL; - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; int error = -EACCES; AA_BUG(!profile); @@ -604,8 +602,7 @@ static int profile_umount(const struct cred *subj_cred, struct aa_profile *profile, const struct path *path, char *buffer) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; struct aa_perms perms = { }; const char *name = NULL, *info = NULL; aa_state_t state; @@ -668,8 +665,7 @@ static struct aa_label *build_pivotroot(const struct cred *subj_cred, const struct path *old_path, char *old_buffer) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; const char *old_name, *new_name = NULL, *info = NULL; const char *trans_name = NULL; struct aa_perms perms = { }; diff --git a/security/apparmor/net.c b/security/apparmor/net.c index 77413a519117..45cf25605c34 100644 --- a/security/apparmor/net.c +++ b/security/apparmor/net.c @@ -8,6 +8,7 @@ * Copyright 2009-2017 Canonical Ltd. */ +#include "include/af_unix.h" #include "include/apparmor.h" #include "include/audit.h" #include "include/cred.h" @@ -24,6 +25,12 @@ struct aa_sfs_entry aa_sfs_entry_network[] = { { } }; +struct aa_sfs_entry aa_sfs_entry_networkv9[] = { + AA_SFS_FILE_STRING("af_mask", AA_SFS_AF_MASK), + AA_SFS_FILE_BOOLEAN("af_unix", 1), + { } +}; + static const char * const net_mask_names[] = { "unknown", "send", @@ -66,6 +73,42 @@ static const char * const net_mask_names[] = { "unknown", }; +static void audit_unix_addr(struct audit_buffer *ab, const char *str, + struct sockaddr_un *addr, int addrlen) +{ + int len = unix_addr_len(addrlen); + + if (!addr || len <= 0) { + audit_log_format(ab, " %s=none", str); + } else if (addr->sun_path[0]) { + audit_log_format(ab, " %s=", str); + audit_log_untrustedstring(ab, addr->sun_path); + } else { + audit_log_format(ab, " %s=\"@", str); + if (audit_string_contains_control(&addr->sun_path[1], len - 1)) + audit_log_n_hex(ab, &addr->sun_path[1], len - 1); + else + audit_log_format(ab, "%.*s", len - 1, + &addr->sun_path[1]); + audit_log_format(ab, "\""); + } +} + +static void audit_unix_sk_addr(struct audit_buffer *ab, const char *str, + const struct sock *sk) +{ + const struct unix_sock *u = unix_sk(sk); + + if (u && u->addr) { + int addrlen; + struct sockaddr_un *addr = aa_sunaddr(u, &addrlen); + + audit_unix_addr(ab, str, addr, addrlen); + } else { + audit_unix_addr(ab, str, NULL, 0); + + } +} /* audit callback for net specific fields */ void audit_net_cb(struct audit_buffer *ab, void *va) @@ -73,12 +116,12 @@ void audit_net_cb(struct audit_buffer *ab, void *va) struct common_audit_data *sa = va; struct apparmor_audit_data *ad = aad(sa); - if (address_family_names[sa->u.net->family]) + if (address_family_names[ad->common.u.net->family]) audit_log_format(ab, " family=\"%s\"", - address_family_names[sa->u.net->family]); + address_family_names[ad->common.u.net->family]); else audit_log_format(ab, " family=\"unknown(%d)\"", - sa->u.net->family); + ad->common.u.net->family); if (sock_type_names[ad->net.type]) audit_log_format(ab, " sock_type=\"%s\"", sock_type_names[ad->net.type]); @@ -98,6 +141,19 @@ void audit_net_cb(struct audit_buffer *ab, void *va) net_mask_names, NET_PERMS_MASK); } } + if (ad->common.u.net->family == PF_UNIX) { + if (ad->net.addr || !ad->common.u.net->sk) + audit_unix_addr(ab, "addr", + unix_addr(ad->net.addr), + ad->net.addrlen); + else + audit_unix_sk_addr(ab, "addr", ad->common.u.net->sk); + if (ad->request & NET_PEER_MASK) { + audit_unix_addr(ab, "peer_addr", + unix_addr(ad->net.peer.addr), + ad->net.peer.addrlen); + } + } if (ad->peer) { audit_log_format(ab, " peer="); aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer, @@ -105,45 +161,123 @@ void audit_net_cb(struct audit_buffer *ab, void *va) } } +/* standard permission lookup pattern - supports early bailout */ +int aa_do_perms(struct aa_profile *profile, struct aa_policydb *policy, + aa_state_t state, u32 request, + struct aa_perms *p, struct apparmor_audit_data *ad) +{ + struct aa_perms perms; + + AA_BUG(!profile); + AA_BUG(!policy); + + + if (state || !p) + p = aa_lookup_perms(policy, state); + perms = *p; + aa_apply_modes_to_perms(profile, &perms); + return aa_check_perms(profile, &perms, request, ad, + audit_net_cb); +} + +/* only continue match if + * insufficient current perms at current state + * indicates there are more perms in later state + * Returns: perms struct if early match + */ +static struct aa_perms *early_match(struct aa_policydb *policy, + aa_state_t state, u32 request) +{ + struct aa_perms *p; + + p = aa_lookup_perms(policy, state); + if (((p->allow & request) != request) && (p->allow & AA_CONT_MATCH)) + return NULL; + return p; +} + +static aa_state_t aa_dfa_match_be16(struct aa_dfa *dfa, aa_state_t state, + u16 data) +{ + __be16 buffer = cpu_to_be16(data); + + return aa_dfa_match_len(dfa, state, (char *) &buffer, 2); +} + +/** + * aa_match_to_prot - match the af, type, protocol triplet + * @policy: policy being matched + * @state: state to start in + * @request: permissions being requested, ignored if @p == NULL + * @af: socket address family + * @type: socket type + * @protocol: socket protocol + * @p: output - pointer to permission associated with match + * @info: output - pointer to string describing failure + * + * RETURNS: state match stopped in. + * + * If @(p) is assigned a value the returned state will be the + * corresponding state. Will not set @p on failure or if match completes + * only if an early match occurs + */ +aa_state_t aa_match_to_prot(struct aa_policydb *policy, aa_state_t state, + u32 request, u16 af, int type, int protocol, + struct aa_perms **p, const char **info) +{ + state = aa_dfa_match_be16(policy->dfa, state, (u16)af); + if (!state) { + *info = "failed af match"; + return state; + } + state = aa_dfa_match_be16(policy->dfa, state, (u16)type); + if (state) { + if (p) + *p = early_match(policy, state, request); + if (!p || !*p) { + state = aa_dfa_match_be16(policy->dfa, state, (u16)protocol); + if (!state) + *info = "failed protocol match"; + } + } else { + *info = "failed type match"; + } + + return state; +} + /* Generic af perm */ int aa_profile_af_perm(struct aa_profile *profile, struct apparmor_audit_data *ad, u32 request, u16 family, - int type) + int type, int protocol) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); - struct aa_perms perms = { }; + struct aa_ruleset *rules = profile->label.rules[0]; + struct aa_perms *p = NULL; aa_state_t state; - __be16 buffer[2]; AA_BUG(family >= AF_MAX); AA_BUG(type < 0 || type >= SOCK_MAX); + AA_BUG(profile_unconfined(profile)); if (profile_unconfined(profile)) return 0; - state = RULE_MEDIATES(rules, AA_CLASS_NET); + state = RULE_MEDIATES_NET(rules); if (!state) return 0; - - buffer[0] = cpu_to_be16(family); - buffer[1] = cpu_to_be16((u16) type); - state = aa_dfa_match_len(rules->policy->dfa, state, (char *) &buffer, - 4); - perms = *aa_lookup_perms(rules->policy, state); - aa_apply_modes_to_perms(profile, &perms); - - return aa_check_perms(profile, &perms, request, ad, audit_net_cb); + state = aa_match_to_prot(rules->policy, state, request, family, type, + protocol, &p, &ad->info); + return aa_do_perms(profile, rules->policy, state, request, p, ad); } int aa_af_perm(const struct cred *subj_cred, struct aa_label *label, const char *op, u32 request, u16 family, int type, int protocol) { struct aa_profile *profile; - DEFINE_AUDIT_NET(ad, op, NULL, family, type, protocol); + DEFINE_AUDIT_NET(ad, op, subj_cred, NULL, family, type, protocol); return fn_for_each_confined(label, profile, aa_profile_af_perm(profile, &ad, request, family, - type)); + type, protocol)); } static int aa_label_sk_perm(const struct cred *subj_cred, @@ -157,9 +291,9 @@ static int aa_label_sk_perm(const struct cred *subj_cred, AA_BUG(!label); AA_BUG(!sk); - if (ctx->label != kernel_t && !unconfined(label)) { + if (rcu_access_pointer(ctx->label) != kernel_t && !unconfined(label)) { struct aa_profile *profile; - DEFINE_AUDIT_SK(ad, op, sk); + DEFINE_AUDIT_SK(ad, op, subj_cred, sk); ad.subj_cred = subj_cred; error = fn_for_each_confined(label, profile, @@ -187,12 +321,16 @@ int aa_sk_perm(const char *op, u32 request, struct sock *sk) int aa_sock_file_perm(const struct cred *subj_cred, struct aa_label *label, - const char *op, u32 request, struct socket *sock) + const char *op, u32 request, struct file *file) { + struct socket *sock = (struct socket *) file->private_data; + AA_BUG(!label); AA_BUG(!sock); AA_BUG(!sock->sk); + if (sock->sk->sk_family == PF_UNIX) + return aa_unix_file_perm(subj_cred, label, op, request, file); return aa_label_sk_perm(subj_cred, label, op, request, sock->sk); } @@ -223,8 +361,7 @@ static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid, { int i, ret; struct aa_perms perms = { }; - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; if (rules->secmark_count == 0) return 0; @@ -257,7 +394,7 @@ int apparmor_secmark_check(struct aa_label *label, char *op, u32 request, u32 secid, const struct sock *sk) { struct aa_profile *profile; - DEFINE_AUDIT_SK(ad, op, sk); + DEFINE_AUDIT_SK(ad, op, NULL, sk); return fn_for_each_confined(label, profile, aa_secmark_perm(profile, request, secid, diff --git a/security/apparmor/path.c b/security/apparmor/path.c index 45ec994b558d..d6c74c357ffd 100644 --- a/security/apparmor/path.c +++ b/security/apparmor/path.c @@ -130,7 +130,7 @@ static int d_namespace_path(const struct path *path, char *buf, char **name, /* handle error conditions - and still allow a partial path to * be returned. */ - if (!res || IS_ERR(res)) { + if (IS_ERR_OR_NULL(res)) { if (PTR_ERR(res) == -ENAMETOOLONG) { error = -ENAMETOOLONG; *name = buf; diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 14df15e35695..50d5345ff5cb 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -103,8 +103,7 @@ static void aa_free_pdb(struct aa_policydb *pdb) { if (pdb) { aa_put_dfa(pdb->dfa); - if (pdb->perms) - kvfree(pdb->perms); + kvfree(pdb->perms); aa_free_str_table(&pdb->trans); kfree(pdb); } @@ -244,6 +243,9 @@ static void free_ruleset(struct aa_ruleset *rules) { int i; + if (!rules) + return; + aa_put_pdb(rules->file); aa_put_pdb(rules->policy); aa_free_cap_rules(&rules->caps); @@ -260,8 +262,6 @@ struct aa_ruleset *aa_alloc_ruleset(gfp_t gfp) struct aa_ruleset *rules; rules = kzalloc(sizeof(*rules), gfp); - if (rules) - INIT_LIST_HEAD(&rules->list); return rules; } @@ -278,10 +278,9 @@ struct aa_ruleset *aa_alloc_ruleset(gfp_t gfp) */ void aa_free_profile(struct aa_profile *profile) { - struct aa_ruleset *rule, *tmp; struct rhashtable *rht; - AA_DEBUG("%s(%p)\n", __func__, profile); + AA_DEBUG(DEBUG_POLICY, "%s(%p)\n", __func__, profile); if (!profile) return; @@ -300,10 +299,9 @@ void aa_free_profile(struct aa_profile *profile) * at this point there are no tasks that can have a reference * to rules */ - list_for_each_entry_safe(rule, tmp, &profile->rules, list) { - list_del_init(&rule->list); - free_ruleset(rule); - } + for (int i = 0; i < profile->n_rules; i++) + free_ruleset(profile->label.rules[i]); + kfree_sensitive(profile->dirname); if (profile->data) { @@ -332,10 +330,12 @@ struct aa_profile *aa_alloc_profile(const char *hname, struct aa_proxy *proxy, gfp_t gfp) { struct aa_profile *profile; - struct aa_ruleset *rules; - /* freed by free_profile - usually through aa_put_profile */ - profile = kzalloc(struct_size(profile, label.vec, 2), gfp); + /* freed by free_profile - usually through aa_put_profile + * this adds space for a single ruleset in the rules section of the + * label + */ + profile = kzalloc(struct_size(profile, label.rules, 1), gfp); if (!profile) return NULL; @@ -344,13 +344,11 @@ struct aa_profile *aa_alloc_profile(const char *hname, struct aa_proxy *proxy, if (!aa_label_init(&profile->label, 1, gfp)) goto fail; - INIT_LIST_HEAD(&profile->rules); - /* allocate the first ruleset, but leave it empty */ - rules = aa_alloc_ruleset(gfp); - if (!rules) + profile->label.rules[0] = aa_alloc_ruleset(gfp); + if (!profile->label.rules[0]) goto fail; - list_add(&rules->list, &profile->rules); + profile->n_rules = 1; /* update being set needed by fs interface */ if (!proxy) { @@ -365,6 +363,7 @@ struct aa_profile *aa_alloc_profile(const char *hname, struct aa_proxy *proxy, profile->label.flags |= FLAG_PROFILE; profile->label.vec[0] = profile; + profile->signal = SIGKILL; /* refcount released by caller */ return profile; @@ -374,6 +373,41 @@ fail: return NULL; } +static inline bool ANY_RULE_MEDIATES(struct aa_profile *profile, + unsigned char class) +{ + int i; + + for (i = 0; i < profile->n_rules; i++) { + if (RULE_MEDIATES(profile->label.rules[i], class)) + return true; + } + return false; +} + +/* set of rules that are mediated by unconfined */ +static int unconfined_mediates[] = { AA_CLASS_NS, AA_CLASS_IO_URING, 0 }; + +/* must be called after profile rulesets and start information is setup */ +void aa_compute_profile_mediates(struct aa_profile *profile) +{ + int c; + + if (profile_unconfined(profile)) { + int *pos; + + for (pos = unconfined_mediates; *pos; pos++) { + if (ANY_RULE_MEDIATES(profile, *pos)) + profile->label.mediates |= ((u64) 1) << AA_CLASS_NS; + } + return; + } + for (c = 0; c <= AA_CLASS_LAST; c++) { + if (ANY_RULE_MEDIATES(profile, c)) + profile->label.mediates |= ((u64) 1) << c; + } +} + /* TODO: profile accounting - setup in remove */ /** @@ -464,7 +498,7 @@ static struct aa_policy *__lookup_parent(struct aa_ns *ns, } /** - * __create_missing_ancestors - create place holders for missing ancestores + * __create_missing_ancestors - create place holders for missing ancestors * @ns: namespace to lookup profile in (NOT NULL) * @hname: hierarchical profile name to find parent of (NOT NULL) * @gfp: type of allocation. @@ -580,11 +614,6 @@ struct aa_profile *aa_lookupn_profile(struct aa_ns *ns, const char *hname, return profile; } -struct aa_profile *aa_lookup_profile(struct aa_ns *ns, const char *hname) -{ - return aa_lookupn_profile(ns, hname, strlen(hname)); -} - struct aa_profile *aa_fqlookupn_profile(struct aa_label *base, const char *fqname, size_t n) { @@ -626,13 +655,16 @@ struct aa_profile *aa_alloc_null(struct aa_profile *parent, const char *name, /* TODO: ideally we should inherit abi from parent */ profile->label.flags |= FLAG_NULL; - rules = list_first_entry(&profile->rules, typeof(*rules), list); + profile->attach.xmatch = aa_get_pdb(nullpdb); + rules = profile->label.rules[0]; rules->file = aa_get_pdb(nullpdb); rules->policy = aa_get_pdb(nullpdb); + aa_compute_profile_mediates(profile); if (parent) { profile->path_flags = parent->path_flags; - + /* override/inherit what is mediated from parent */ + profile->label.mediates = parent->label.mediates; /* released on free_profile */ rcu_assign_pointer(profile->parent, aa_get_profile(parent)); profile->ns = aa_get_ns(parent->ns); @@ -838,8 +870,8 @@ bool aa_policy_admin_capable(const struct cred *subj_cred, bool capable = policy_ns_capable(subj_cred, label, user_ns, CAP_MAC_ADMIN) == 0; - AA_DEBUG("cap_mac_admin? %d\n", capable); - AA_DEBUG("policy locked? %d\n", aa_g_lock_policy); + AA_DEBUG(DEBUG_POLICY, "cap_mac_admin? %d\n", capable); + AA_DEBUG(DEBUG_POLICY, "policy locked? %d\n", aa_g_lock_policy); return aa_policy_view_capable(subj_cred, label, ns) && capable && !aa_g_lock_policy; @@ -848,11 +880,11 @@ bool aa_policy_admin_capable(const struct cred *subj_cred, bool aa_current_policy_view_capable(struct aa_ns *ns) { struct aa_label *label; - bool res; + bool needput, res; - label = __begin_current_label_crit_section(); + label = __begin_current_label_crit_section(&needput); res = aa_policy_view_capable(current_cred(), label, ns); - __end_current_label_crit_section(label); + __end_current_label_crit_section(label, needput); return res; } @@ -860,11 +892,11 @@ bool aa_current_policy_view_capable(struct aa_ns *ns) bool aa_current_policy_admin_capable(struct aa_ns *ns) { struct aa_label *label; - bool res; + bool needput, res; - label = __begin_current_label_crit_section(); + label = __begin_current_label_crit_section(&needput); res = aa_policy_admin_capable(current_cred(), label, ns); - __end_current_label_crit_section(label); + __end_current_label_crit_section(label, needput); return res; } @@ -1073,7 +1105,7 @@ ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label, goto out; /* ensure that profiles are all for the same ns - * TODO: update locking to remove this constaint. All profiles in + * TODO: update locking to remove this constraint. All profiles in * the load set must succeed as a set or the load will * fail. Sort ent list and take ns locks in hierarchy order */ diff --git a/security/apparmor/policy_compat.c b/security/apparmor/policy_compat.c index 423227670e68..cfc2207e5a12 100644 --- a/security/apparmor/policy_compat.c +++ b/security/apparmor/policy_compat.c @@ -286,10 +286,10 @@ static void remap_dfa_accept(struct aa_dfa *dfa, unsigned int factor) AA_BUG(!dfa); - for (state = 0; state < state_count; state++) + for (state = 0; state < state_count; state++) { ACCEPT_TABLE(dfa)[state] = state * factor; - kvfree(dfa->tables[YYTD_ID_ACCEPT2]); - dfa->tables[YYTD_ID_ACCEPT2] = NULL; + ACCEPT_TABLE2(dfa)[state] = factor > 1 ? ACCEPT_FLAG_OWNER : 0; + } } /* TODO: merge different dfa mappings into single map_policy fn */ diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c index 1f02cfe1d974..64783ca3b0f2 100644 --- a/security/apparmor/policy_ns.c +++ b/security/apparmor/policy_ns.c @@ -107,7 +107,7 @@ static struct aa_ns *alloc_ns(const char *prefix, const char *name) struct aa_ns *ns; ns = kzalloc(sizeof(*ns), GFP_KERNEL); - AA_DEBUG("%s(%p)\n", __func__, ns); + AA_DEBUG(DEBUG_POLICY, "%s(%p)\n", __func__, ns); if (!ns) return NULL; if (!aa_policy_init(&ns->base, prefix, name, GFP_KERNEL)) diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index 3483c595f999..7523971e37d9 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -29,6 +29,7 @@ #include "include/policy.h" #include "include/policy_unpack.h" #include "include/policy_compat.h" +#include "include/signal.h" /* audit callback for unpack fields */ static void audit_cb(struct audit_buffer *ab, void *va) @@ -598,8 +599,8 @@ static bool unpack_secmark(struct aa_ext *e, struct aa_ruleset *rules) fail: if (rules->secmark) { for (i = 0; i < size; i++) - kfree(rules->secmark[i].label); - kfree(rules->secmark); + kfree_sensitive(rules->secmark[i].label); + kfree_sensitive(rules->secmark); rules->secmark_count = 0; rules->secmark = NULL; } @@ -645,10 +646,13 @@ fail: static bool unpack_perm(struct aa_ext *e, u32 version, struct aa_perms *perm) { + u32 reserved; + if (version != 1) return false; - return aa_unpack_u32(e, &perm->allow, NULL) && + /* reserved entry is for later expansion, discard for now */ + return aa_unpack_u32(e, &reserved, NULL) && aa_unpack_u32(e, &perm->allow, NULL) && aa_unpack_u32(e, &perm->deny, NULL) && aa_unpack_u32(e, &perm->subtree, NULL) && @@ -713,6 +717,7 @@ static int unpack_pdb(struct aa_ext *e, struct aa_policydb **policy, void *pos = e->pos; int i, flags, error = -EPROTO; ssize_t size; + u32 version = 0; pdb = aa_alloc_pdb(GFP_KERNEL); if (!pdb) @@ -730,6 +735,9 @@ static int unpack_pdb(struct aa_ext *e, struct aa_policydb **policy, if (pdb->perms) { /* perms table present accept is index */ flags = TO_ACCEPT1_FLAG(YYTD_DATA32); + if (aa_unpack_u32(e, &version, "permsv") && version > 2) + /* accept2 used for dfa flags */ + flags |= TO_ACCEPT2_FLAG(YYTD_DATA32); } else { /* packed perms in accept1 and accept2 */ flags = TO_ACCEPT1_FLAG(YYTD_DATA32) | @@ -767,6 +775,21 @@ static int unpack_pdb(struct aa_ext *e, struct aa_policydb **policy, } } + /* accept2 is in some cases being allocated, even with perms */ + if (pdb->perms && !pdb->dfa->tables[YYTD_ID_ACCEPT2]) { + /* add dfa flags table missing in v2 */ + u32 noents = pdb->dfa->tables[YYTD_ID_ACCEPT]->td_lolen; + u16 tdflags = pdb->dfa->tables[YYTD_ID_ACCEPT]->td_flags; + size_t tsize = table_size(noents, tdflags); + + pdb->dfa->tables[YYTD_ID_ACCEPT2] = kvzalloc(tsize, GFP_KERNEL); + if (!pdb->dfa->tables[YYTD_ID_ACCEPT2]) { + *info = "failed to alloc dfa flags table"; + goto out; + } + pdb->dfa->tables[YYTD_ID_ACCEPT2]->td_lolen = noents; + pdb->dfa->tables[YYTD_ID_ACCEPT2]->td_flags = tdflags; + } /* * Unfortunately due to a bug in earlier userspaces, a * transition table may be present even when the dfa is @@ -780,9 +803,13 @@ static int unpack_pdb(struct aa_ext *e, struct aa_policydb **policy, if (!pdb->dfa && pdb->trans.table) aa_free_str_table(&pdb->trans); - /* TODO: move compat mapping here, requires dfa merging first */ - /* TODO: move verify here, it has to be done after compat mappings */ - + /* TODO: + * - move compat mapping here, requires dfa merging first + * - move verify here, it has to be done after compat mappings + * - move free of unneeded trans table here, has to be done + * after perm mapping. + */ +out: *policy = pdb; return 0; @@ -859,7 +886,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) error = -ENOMEM; goto fail; } - rules = list_first_entry(&profile->rules, typeof(*rules), list); + rules = profile->label.rules[0]; /* profile renaming is optional */ (void) aa_unpack_str(e, &profile->rename, "rename"); @@ -895,6 +922,12 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) (void) aa_unpack_strdup(e, &disconnected, "disconnected"); profile->disconnected = disconnected; + /* optional */ + (void) aa_unpack_u32(e, &profile->signal, "kill"); + if (profile->signal < 1 || profile->signal > MAXMAPPED_SIG) { + info = "profile kill.signal invalid value"; + goto fail; + } /* per profile debug flags (complain, audit) */ if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) { info = "profile missing flags"; @@ -1098,6 +1131,8 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) goto fail; } + aa_compute_profile_mediates(profile); + return profile; fail: @@ -1212,21 +1247,32 @@ static bool verify_perm(struct aa_perms *perm) static bool verify_perms(struct aa_policydb *pdb) { int i; + int xidx, xmax = -1; for (i = 0; i < pdb->size; i++) { if (!verify_perm(&pdb->perms[i])) return false; /* verify indexes into str table */ - if ((pdb->perms[i].xindex & AA_X_TYPE_MASK) == AA_X_TABLE && - (pdb->perms[i].xindex & AA_X_INDEX_MASK) >= pdb->trans.size) - return false; + if ((pdb->perms[i].xindex & AA_X_TYPE_MASK) == AA_X_TABLE) { + xidx = pdb->perms[i].xindex & AA_X_INDEX_MASK; + if (xidx >= pdb->trans.size) + return false; + if (xmax < xidx) + xmax = xidx; + } if (pdb->perms[i].tag && pdb->perms[i].tag >= pdb->trans.size) return false; if (pdb->perms[i].label && pdb->perms[i].label >= pdb->trans.size) return false; } - + /* deal with incorrectly constructed string tables */ + if (xmax == -1) { + aa_free_str_table(&pdb->trans); + } else if (pdb->trans.size > xmax + 1) { + if (!aa_resize_str_table(&pdb->trans, xmax + 1, GFP_KERNEL)) + return false; + } return true; } @@ -1240,8 +1286,8 @@ static bool verify_perms(struct aa_policydb *pdb) */ static int verify_profile(struct aa_profile *profile) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; + if (!rules) return 0; diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c index c64733d6c98f..cf18744dafe2 100644 --- a/security/apparmor/policy_unpack_test.c +++ b/security/apparmor/policy_unpack_test.c @@ -9,6 +9,8 @@ #include "include/policy.h" #include "include/policy_unpack.h" +#include <linux/unaligned.h> + #define TEST_STRING_NAME "TEST_STRING" #define TEST_STRING_DATA "testing" #define TEST_STRING_BUF_OFFSET \ @@ -44,7 +46,7 @@ #define TEST_ARRAY_BUF_OFFSET \ (TEST_NAMED_ARRAY_BUF_OFFSET + 3 + strlen(TEST_ARRAY_NAME) + 1) -MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); struct policy_unpack_fixture { struct aa_ext *e; @@ -80,7 +82,7 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf, *(buf + 1) = strlen(TEST_U32_NAME) + 1; strscpy(buf + 3, TEST_U32_NAME, e->end - (void *)(buf + 3)); *(buf + 3 + strlen(TEST_U32_NAME) + 1) = AA_U32; - *((__le32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = cpu_to_le32(TEST_U32_DATA); + put_unaligned_le32(TEST_U32_DATA, buf + 3 + strlen(TEST_U32_NAME) + 2); buf = e->start + TEST_NAMED_U64_BUF_OFFSET; *buf = AA_NAME; @@ -103,7 +105,7 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf, *(buf + 1) = strlen(TEST_ARRAY_NAME) + 1; strscpy(buf + 3, TEST_ARRAY_NAME, e->end - (void *)(buf + 3)); *(buf + 3 + strlen(TEST_ARRAY_NAME) + 1) = AA_ARRAY; - *((__le16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = cpu_to_le16(TEST_ARRAY_SIZE); + put_unaligned_le16(TEST_ARRAY_SIZE, buf + 3 + strlen(TEST_ARRAY_NAME) + 2); return e; } @@ -281,6 +283,8 @@ static void policy_unpack_test_unpack_strdup_with_null_name(struct kunit *test) ((uintptr_t)puf->e->start <= (uintptr_t)string) && ((uintptr_t)string <= (uintptr_t)puf->e->end)); KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA); + + kfree(string); } static void policy_unpack_test_unpack_strdup_with_name(struct kunit *test) @@ -296,6 +300,8 @@ static void policy_unpack_test_unpack_strdup_with_name(struct kunit *test) ((uintptr_t)puf->e->start <= (uintptr_t)string) && ((uintptr_t)string <= (uintptr_t)puf->e->end)); KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA); + + kfree(string); } static void policy_unpack_test_unpack_strdup_out_of_bounds(struct kunit *test) @@ -313,6 +319,8 @@ static void policy_unpack_test_unpack_strdup_out_of_bounds(struct kunit *test) KUNIT_EXPECT_EQ(test, size, 0); KUNIT_EXPECT_NULL(test, string); KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start); + + kfree(string); } static void policy_unpack_test_unpack_nameX_with_null_name(struct kunit *test) diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c index e3857e3d7c6c..ce40f15d4952 100644 --- a/security/apparmor/procattr.c +++ b/security/apparmor/procattr.c @@ -125,12 +125,14 @@ int aa_setprocattr_changehat(char *args, size_t size, int flags) for (count = 0; (hat < end) && count < 16; ++count) { char *next = hat + strlen(hat) + 1; hats[count] = hat; - AA_DEBUG("%s: (pid %d) Magic 0x%llx count %d hat '%s'\n" + AA_DEBUG(DEBUG_DOMAIN, + "%s: (pid %d) Magic 0x%llx count %d hat '%s'\n" , __func__, current->pid, token, count, hat); hat = next; } } else - AA_DEBUG("%s: (pid %d) Magic 0x%llx count %d Hat '%s'\n", + AA_DEBUG(DEBUG_DOMAIN, + "%s: (pid %d) Magic 0x%llx count %d Hat '%s'\n", __func__, current->pid, token, count, "<NULL>"); return aa_change_hat(hats, count, token, flags); diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c index dcc94c3153d5..8e80db3ae21c 100644 --- a/security/apparmor/resource.c +++ b/security/apparmor/resource.c @@ -89,8 +89,7 @@ static int profile_setrlimit(const struct cred *subj_cred, struct aa_profile *profile, unsigned int resource, struct rlimit *new_rlim) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; int e = 0; if (rules->rlimits.mask & (1 << resource) && new_rlim->rlim_max > @@ -165,9 +164,7 @@ void __aa_transition_rlimits(struct aa_label *old_l, struct aa_label *new_l) * to the lesser of the tasks hard limit and the init tasks soft limit */ label_for_each_confined(i, old_l, old) { - struct aa_ruleset *rules = list_first_entry(&old->rules, - typeof(*rules), - list); + struct aa_ruleset *rules = old->label.rules[0]; if (rules->rlimits.mask) { int j; @@ -185,9 +182,7 @@ void __aa_transition_rlimits(struct aa_label *old_l, struct aa_label *new_l) /* set any new hard limits as dictated by the new profile */ label_for_each_confined(i, new_l, new) { - struct aa_ruleset *rules = list_first_entry(&new->rules, - typeof(*rules), - list); + struct aa_ruleset *rules = new->label.rules[0]; int j; if (!rules->rlimits.mask) diff --git a/security/apparmor/secid.c b/security/apparmor/secid.c index 6350d107013a..28caf66b9033 100644 --- a/security/apparmor/secid.c +++ b/security/apparmor/secid.c @@ -39,20 +39,6 @@ int apparmor_display_secid_mode; * TODO: use secid_update in label replace */ -/** - * aa_secid_update - update a secid mapping to a new label - * @secid: secid to update - * @label: label the secid will now map to - */ -void aa_secid_update(u32 secid, struct aa_label *label) -{ - unsigned long flags; - - xa_lock_irqsave(&aa_secids, flags); - __xa_store(&aa_secids, secid, label, 0); - xa_unlock_irqrestore(&aa_secids, flags); -} - /* * see label for inverse aa_label_to_secid */ @@ -61,23 +47,21 @@ struct aa_label *aa_secid_to_label(u32 secid) return xa_load(&aa_secids, secid); } -static int apparmor_label_to_secctx(struct aa_label *label, char **secdata, - u32 *seclen) +static int apparmor_label_to_secctx(struct aa_label *label, + struct lsm_context *cp) { /* TODO: cache secctx and ref count so we don't have to recreate */ int flags = FLAG_VIEW_SUBNS | FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT; int len; - AA_BUG(!seclen); - if (!label) return -EINVAL; if (apparmor_display_secid_mode) flags |= FLAG_SHOW_MODE; - if (secdata) - len = aa_label_asxprint(secdata, root_ns, label, + if (cp) + len = aa_label_asxprint(&cp->context, root_ns, label, flags, GFP_ATOMIC); else len = aa_label_snxprint(NULL, 0, root_ns, label, flags); @@ -85,26 +69,28 @@ static int apparmor_label_to_secctx(struct aa_label *label, char **secdata, if (len < 0) return -ENOMEM; - *seclen = len; + if (cp) { + cp->len = len; + cp->id = LSM_ID_APPARMOR; + } - return 0; + return len; } -int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) +int apparmor_secid_to_secctx(u32 secid, struct lsm_context *cp) { struct aa_label *label = aa_secid_to_label(secid); - return apparmor_label_to_secctx(label, secdata, seclen); + return apparmor_label_to_secctx(label, cp); } -int apparmor_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata, - u32 *seclen) +int apparmor_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp) { struct aa_label *label; label = prop->apparmor.label; - return apparmor_label_to_secctx(label, secdata, seclen); + return apparmor_label_to_secctx(label, cp); } int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) @@ -120,9 +106,13 @@ int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) return 0; } -void apparmor_release_secctx(char *secdata, u32 seclen) +void apparmor_release_secctx(struct lsm_context *cp) { - kfree(secdata); + if (cp->id == LSM_ID_APPARMOR) { + kfree(cp->context); + cp->context = NULL; + cp->id = LSM_ID_UNDEF; + } } /** diff --git a/security/apparmor/task.c b/security/apparmor/task.c index c87fb9f4ac18..c9bc9cc69475 100644 --- a/security/apparmor/task.c +++ b/security/apparmor/task.c @@ -228,8 +228,7 @@ static int profile_ptrace_perm(const struct cred *cred, struct aa_label *peer, u32 request, struct apparmor_audit_data *ad) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), list); + struct aa_ruleset *rules = profile->label.rules[0]; struct aa_perms perms = { }; ad->subj_cred = cred; @@ -246,7 +245,7 @@ static int profile_tracee_perm(const struct cred *cred, struct apparmor_audit_data *ad) { if (profile_unconfined(tracee) || unconfined(tracer) || - !ANY_RULE_MEDIATES(&tracee->rules, AA_CLASS_PTRACE)) + !label_mediates(&tracee->label, AA_CLASS_PTRACE)) return 0; return profile_ptrace_perm(cred, tracee, tracer, request, ad); @@ -260,7 +259,7 @@ static int profile_tracer_perm(const struct cred *cred, if (profile_unconfined(tracer)) return 0; - if (ANY_RULE_MEDIATES(&tracer->rules, AA_CLASS_PTRACE)) + if (label_mediates(&tracer->label, AA_CLASS_PTRACE)) return profile_ptrace_perm(cred, tracer, tracee, request, ad); /* profile uses the old style capability check for ptrace */ @@ -324,9 +323,7 @@ int aa_profile_ns_perm(struct aa_profile *profile, ad->request = request; if (!profile_unconfined(profile)) { - struct aa_ruleset *rules = list_first_entry(&profile->rules, - typeof(*rules), - list); + struct aa_ruleset *rules = profile->label.rules[0]; aa_state_t state; state = RULE_MEDIATES(rules, ad->class); diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c index 3663aec7bcbd..40efde233f3a 100644 --- a/security/bpf/hooks.c +++ b/security/bpf/hooks.c @@ -13,7 +13,6 @@ static struct security_hook_list bpf_lsm_hooks[] __ro_after_init = { #include <linux/lsm_hook_defs.h> #undef LSM_HOOK LSM_HOOK_INIT(inode_free_security, bpf_inode_storage_free), - LSM_HOOK_INIT(task_free, bpf_task_storage_free), }; static const struct lsm_id bpf_lsmid = { @@ -34,7 +33,7 @@ struct lsm_blob_sizes bpf_lsm_blob_sizes __ro_after_init = { }; DEFINE_LSM(bpf) = { - .name = "bpf", + .id = &bpf_lsmid, .init = bpf_lsm_init, .blobs = &bpf_lsm_blob_sizes }; diff --git a/security/commoncap.c b/security/commoncap.c index cefad323a0b1..8a23dfab7fac 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -27,6 +27,9 @@ #include <linux/mnt_idmapping.h> #include <uapi/linux/lsm.h> +#define CREATE_TRACE_POINTS +#include <trace/events/capability.h> + /* * If a non-root user executes a setuid-root binary in * !secure(SECURE_NOROOT) mode, then we raise capabilities. @@ -50,24 +53,24 @@ static void warn_setuid_and_fcaps_mixed(const char *fname) } /** - * cap_capable - Determine whether a task has a particular effective capability + * cap_capable_helper - Determine whether a task has a particular effective + * capability. * @cred: The credentials to use - * @targ_ns: The user namespace in which we need the capability + * @target_ns: The user namespace of the resource being accessed + * @cred_ns: The user namespace of the credentials * @cap: The capability to check for - * @opts: Bitmask of options defined in include/linux/security.h * * Determine whether the nominated task has the specified capability amongst * its effective set, returning 0 if it does, -ve if it does not. * - * NOTE WELL: cap_has_capability() cannot be used like the kernel's capable() - * and has_capability() functions. That is, it has the reverse semantics: - * cap_has_capability() returns 0 when a task has a capability, but the - * kernel's capable() and has_capability() returns 1 for this case. + * See cap_capable for more details. */ -int cap_capable(const struct cred *cred, struct user_namespace *targ_ns, - int cap, unsigned int opts) +static inline int cap_capable_helper(const struct cred *cred, + struct user_namespace *target_ns, + const struct user_namespace *cred_ns, + int cap) { - struct user_namespace *ns = targ_ns; + struct user_namespace *ns = target_ns; /* See if cred has the capability in the target user namespace * by examining the target user namespace and all of the target @@ -75,21 +78,21 @@ int cap_capable(const struct cred *cred, struct user_namespace *targ_ns, */ for (;;) { /* Do we have the necessary capabilities? */ - if (ns == cred->user_ns) + if (likely(ns == cred_ns)) return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM; /* * If we're already at a lower level than we're looking for, * we're done searching. */ - if (ns->level <= cred->user_ns->level) + if (ns->level <= cred_ns->level) return -EPERM; /* * The owner of the user namespace in the parent of the * user namespace has all caps. */ - if ((ns->parent == cred->user_ns) && uid_eq(ns->owner, cred->euid)) + if ((ns->parent == cred_ns) && uid_eq(ns->owner, cred->euid)) return 0; /* @@ -103,6 +106,32 @@ int cap_capable(const struct cred *cred, struct user_namespace *targ_ns, } /** + * cap_capable - Determine whether a task has a particular effective capability + * @cred: The credentials to use + * @target_ns: The user namespace of the resource being accessed + * @cap: The capability to check for + * @opts: Bitmask of options defined in include/linux/security.h (unused) + * + * Determine whether the nominated task has the specified capability amongst + * its effective set, returning 0 if it does, -ve if it does not. + * + * NOTE WELL: cap_capable() has reverse semantics to the capable() call + * and friends. That is cap_capable() returns an int 0 when a task has + * a capability, while the kernel's capable(), has_ns_capability(), + * has_ns_capability_noaudit(), and has_capability_noaudit() return a + * bool true (1) for this case. + */ +int cap_capable(const struct cred *cred, struct user_namespace *target_ns, + int cap, unsigned int opts) +{ + const struct user_namespace *cred_ns = cred->user_ns; + int ret = cap_capable_helper(cred, target_ns, cred_ns, cap); + + trace_cap_capable(cred, target_ns, cred_ns, cap, ret); + return ret; +} + +/** * cap_settime - Determine whether the current process may set the system clock * @ts: The time to set * @tz: The timezone to set @@ -329,17 +358,17 @@ int cap_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry) return error; } -static bool rootid_owns_currentns(vfsuid_t rootvfsuid) +/** + * kuid_root_in_ns - check whether the given kuid is root in the given ns + * @kuid: the kuid to be tested + * @ns: the user namespace to test against + * + * Returns true if @kuid represents the root user in @ns, false otherwise. + */ +static bool kuid_root_in_ns(kuid_t kuid, struct user_namespace *ns) { - struct user_namespace *ns; - kuid_t kroot; - - if (!vfsuid_valid(rootvfsuid)) - return false; - - kroot = vfsuid_into_kuid(rootvfsuid); - for (ns = current_user_ns();; ns = ns->parent) { - if (from_kuid(ns, kroot) == 0) + for (;; ns = ns->parent) { + if (from_kuid(ns, kuid) == 0) return true; if (ns == &init_user_ns) break; @@ -348,6 +377,16 @@ static bool rootid_owns_currentns(vfsuid_t rootvfsuid) return false; } +static bool vfsuid_root_in_currentns(vfsuid_t vfsuid) +{ + kuid_t kuid; + + if (!vfsuid_valid(vfsuid)) + return false; + kuid = vfsuid_into_kuid(vfsuid); + return kuid_root_in_ns(kuid, current_user_ns()); +} + static __u32 sansflags(__u32 m) { return m & ~VFS_CAP_FLAGS_EFFECTIVE; @@ -452,7 +491,7 @@ int cap_inode_getsecurity(struct mnt_idmap *idmap, goto out_free; } - if (!rootid_owns_currentns(vfsroot)) { + if (!vfsuid_root_in_currentns(vfsroot)) { size = -EOVERFLOW; goto out_free; } @@ -693,7 +732,7 @@ int get_vfs_caps_from_disk(struct mnt_idmap *idmap, /* Limit the caps to the mounter of the filesystem * or the more limited uid specified in the xattr. */ - if (!rootid_owns_currentns(rootvfsuid)) + if (!vfsuid_root_in_currentns(rootvfsuid)) return -ENODATA; cpu_caps->permitted.val = le32_to_cpu(caps->data[0].permitted); @@ -827,12 +866,6 @@ static void handle_privileged_root(struct linux_binprm *bprm, bool has_fcap, #define __cap_full(field, cred) \ cap_issubset(CAP_FULL_SET, cred->cap_##field) -static inline bool __is_setuid(struct cred *new, const struct cred *old) -{ return !uid_eq(new->euid, old->uid); } - -static inline bool __is_setgid(struct cred *new, const struct cred *old) -{ return !gid_eq(new->egid, old->gid); } - /* * 1) Audit candidate if current->cap_effective is set * @@ -862,7 +895,7 @@ static inline bool nonroot_raised_pE(struct cred *new, const struct cred *old, (root_privileged() && __is_suid(root, new) && !__cap_full(effective, new)) || - (!__is_setuid(new, old) && + (uid_eq(new->euid, old->euid) && ((has_fcap && __cap_gained(permitted, new, old)) || __cap_gained(ambient, new, old)))) @@ -888,7 +921,7 @@ int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file) /* Process setpcap binaries and capabilities for uid 0 */ const struct cred *old = current_cred(); struct cred *new = bprm->cred; - bool effective = false, has_fcap = false, is_setid; + bool effective = false, has_fcap = false, id_changed; int ret; kuid_t root_uid; @@ -912,9 +945,9 @@ int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file) * * In addition, if NO_NEW_PRIVS, then ensure we get no new privs. */ - is_setid = __is_setuid(new, old) || __is_setgid(new, old); + id_changed = !uid_eq(new->euid, old->euid) || !in_group_p(new->egid); - if ((is_setid || __cap_gained(permitted, new, old)) && + if ((id_changed || __cap_gained(permitted, new, old)) && ((bprm->unsafe & ~LSM_UNSAFE_PTRACE) || !ptracer_capable(current, new->user_ns))) { /* downgrade; they get no more than they had, and maybe less */ @@ -931,7 +964,7 @@ int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file) new->sgid = new->fsgid = new->egid; /* File caps or setid cancels ambient. */ - if (has_fcap || is_setid) + if (has_fcap || id_changed) cap_clear(new->cap_ambient); /* @@ -964,7 +997,9 @@ int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file) return -EPERM; /* Check for privilege-elevated exec. */ - if (is_setid || + if (id_changed || + !uid_eq(new->euid, old->uid) || + !gid_eq(new->egid, old->gid) || (!__is_real(root_uid, new) && (effective || __cap_grew(permitted, ambient, new)))) @@ -1302,21 +1337,38 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, & (old->securebits ^ arg2)) /*[1]*/ || ((old->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/ || (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/ - || (cap_capable(current_cred(), - current_cred()->user_ns, - CAP_SETPCAP, - CAP_OPT_NONE) != 0) /*[4]*/ /* * [1] no changing of bits that are locked * [2] no unlocking of locks * [3] no setting of unsupported bits - * [4] doing anything requires privilege (go read about - * the "sendmail capabilities bug") */ ) /* cannot change a locked bit */ return -EPERM; + /* + * Doing anything requires privilege (go read about the + * "sendmail capabilities bug"), except for unprivileged bits. + * Indeed, the SECURE_ALL_UNPRIVILEGED bits are not + * restrictions enforced by the kernel but by user space on + * itself. + */ + if (cap_capable(current_cred(), current_cred()->user_ns, + CAP_SETPCAP, CAP_OPT_NONE) != 0) { + const unsigned long unpriv_and_locks = + SECURE_ALL_UNPRIVILEGED | + SECURE_ALL_UNPRIVILEGED << 1; + const unsigned long changed = old->securebits ^ arg2; + + /* For legacy reason, denies non-change. */ + if (!changed) + return -EPERM; + + /* Denies privileged changes. */ + if (changed & ~unpriv_and_locks) + return -EPERM; + } + new = prepare_creds(); if (!new) return -ENOMEM; @@ -1428,12 +1480,6 @@ int cap_mmap_addr(unsigned long addr) return ret; } -int cap_mmap_file(struct file *file, unsigned long reqprot, - unsigned long prot, unsigned long flags) -{ - return 0; -} - #ifdef CONFIG_SECURITY static const struct lsm_id capability_lsmid = { @@ -1453,7 +1499,6 @@ static struct security_hook_list capability_hooks[] __ro_after_init = { LSM_HOOK_INIT(inode_killpriv, cap_inode_killpriv), LSM_HOOK_INIT(inode_getsecurity, cap_inode_getsecurity), LSM_HOOK_INIT(mmap_addr, cap_mmap_addr), - LSM_HOOK_INIT(mmap_file, cap_mmap_file), LSM_HOOK_INIT(task_fix_setuid, cap_task_fix_setuid), LSM_HOOK_INIT(task_prctl, cap_task_prctl), LSM_HOOK_INIT(task_setscheduler, cap_task_setscheduler), @@ -1470,7 +1515,7 @@ static int __init capability_init(void) } DEFINE_LSM(capability) = { - .name = "capability", + .id = &capability_lsmid, .order = LSM_ORDER_FIRST, .init = capability_init, }; diff --git a/security/device_cgroup.c b/security/device_cgroup.c index dc4df7475081..7fec575d32d6 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -244,45 +244,40 @@ static void devcgroup_css_free(struct cgroup_subsys_state *css) #define DEVCG_DENY 2 #define DEVCG_LIST 3 -#define MAJMINLEN 13 -#define ACCLEN 4 - -static void set_access(char *acc, short access) +static void seq_putaccess(struct seq_file *m, short access) { - int idx = 0; - memset(acc, 0, ACCLEN); if (access & DEVCG_ACC_READ) - acc[idx++] = 'r'; + seq_putc(m, 'r'); if (access & DEVCG_ACC_WRITE) - acc[idx++] = 'w'; + seq_putc(m, 'w'); if (access & DEVCG_ACC_MKNOD) - acc[idx++] = 'm'; + seq_putc(m, 'm'); } -static char type_to_char(short type) +static void seq_puttype(struct seq_file *m, short type) { if (type == DEVCG_DEV_ALL) - return 'a'; - if (type == DEVCG_DEV_CHAR) - return 'c'; - if (type == DEVCG_DEV_BLOCK) - return 'b'; - return 'X'; + seq_putc(m, 'a'); + else if (type == DEVCG_DEV_CHAR) + seq_putc(m, 'c'); + else if (type == DEVCG_DEV_BLOCK) + seq_putc(m, 'b'); + else + seq_putc(m, 'X'); } -static void set_majmin(char *str, unsigned m) +static void seq_putversion(struct seq_file *m, unsigned int version) { - if (m == ~0) - strcpy(str, "*"); + if (version == ~0) + seq_putc(m, '*'); else - sprintf(str, "%u", m); + seq_printf(m, "%u", version); } static int devcgroup_seq_show(struct seq_file *m, void *v) { struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m)); struct dev_exception_item *ex; - char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN]; rcu_read_lock(); /* @@ -292,18 +287,17 @@ static int devcgroup_seq_show(struct seq_file *m, void *v) * This way, the file remains as a "whitelist of devices" */ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) { - set_access(acc, DEVCG_ACC_MASK); - set_majmin(maj, ~0); - set_majmin(min, ~0); - seq_printf(m, "%c %s:%s %s\n", type_to_char(DEVCG_DEV_ALL), - maj, min, acc); + seq_puts(m, "a *:* rwm\n"); } else { list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) { - set_access(acc, ex->access); - set_majmin(maj, ex->major); - set_majmin(min, ex->minor); - seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type), - maj, min, acc); + seq_puttype(m, ex->type); + seq_putc(m, ' '); + seq_putversion(m, ex->major); + seq_putc(m, ':'); + seq_putversion(m, ex->minor); + seq_putc(m, ' '); + seq_putaccess(m, ex->access); + seq_putc(m, '\n'); } } rcu_read_unlock(); diff --git a/security/inode.c b/security/inode.c index da3ab44c8e57..81fb5d6dd33e 100644 --- a/security/inode.c +++ b/security/inode.c @@ -22,6 +22,8 @@ #include <linux/lsm_hooks.h> #include <linux/magic.h> +#include "lsm.h" + static struct vfsmount *mount; static int mount_count; @@ -70,7 +72,7 @@ static struct file_system_type fs_type = { .owner = THIS_MODULE, .name = "securityfs", .init_fs_context = securityfs_init_fs_context, - .kill_sb = kill_litter_super, + .kill_sb = kill_anon_super, }; /** @@ -112,37 +114,34 @@ static struct dentry *securityfs_create_dentry(const char *name, umode_t mode, struct dentry *dentry; struct inode *dir, *inode; int error; + bool pinned = false; if (!(mode & S_IFMT)) mode = (mode & S_IALLUGO) | S_IFREG; pr_debug("securityfs: creating file '%s'\n",name); - error = simple_pin_fs(&fs_type, &mount, &mount_count); - if (error) - return ERR_PTR(error); - - if (!parent) + if (!parent) { + error = simple_pin_fs(&fs_type, &mount, &mount_count); + if (error) + return ERR_PTR(error); + pinned = true; parent = mount->mnt_root; + } - dir = d_inode(parent); - - inode_lock(dir); - dentry = lookup_one_len(name, parent, strlen(name)); - if (IS_ERR(dentry)) + inode = new_inode(parent->d_sb); + if (unlikely(!inode)) { + dentry = ERR_PTR(-ENOMEM); goto out; - - if (d_really_is_positive(dentry)) { - error = -EEXIST; - goto out1; } - inode = new_inode(dir->i_sb); - if (!inode) { - error = -ENOMEM; - goto out1; - } + dir = d_inode(parent); + dentry = simple_start_creating(parent, name); + if (IS_ERR(dentry)) { + iput(inode); + goto out; + } inode->i_ino = get_next_ino(); inode->i_mode = mode; simple_inode_init_ts(inode); @@ -158,17 +157,13 @@ static struct dentry *securityfs_create_dentry(const char *name, umode_t mode, } else { inode->i_fop = fops; } - d_instantiate(dentry, inode); - dget(dentry); - inode_unlock(dir); - return dentry; + d_make_persistent(dentry, inode); + simple_done_creating(dentry); + return dentry; // borrowed -out1: - dput(dentry); - dentry = ERR_PTR(error); out: - inode_unlock(dir); - simple_release_fs(&mount, &mount_count); + if (pinned) + simple_release_fs(&mount, &mount_count); return dentry; } @@ -279,6 +274,12 @@ struct dentry *securityfs_create_symlink(const char *name, } EXPORT_SYMBOL_GPL(securityfs_create_symlink); +static void remove_one(struct dentry *victim) +{ + if (victim->d_parent == victim->d_sb->s_root) + simple_release_fs(&mount, &mount_count); +} + /** * securityfs_remove - removes a file or directory from the securityfs filesystem * @@ -291,43 +292,11 @@ EXPORT_SYMBOL_GPL(securityfs_create_symlink); * This function is required to be called in order for the file to be * removed. No automatic cleanup of files will happen when a module is * removed; you are responsible here. - */ -void securityfs_remove(struct dentry *dentry) -{ - struct inode *dir; - - if (IS_ERR_OR_NULL(dentry)) - return; - - dir = d_inode(dentry->d_parent); - inode_lock(dir); - if (simple_positive(dentry)) { - if (d_is_dir(dentry)) - simple_rmdir(dir, dentry); - else - simple_unlink(dir, dentry); - dput(dentry); - } - inode_unlock(dir); - simple_release_fs(&mount, &mount_count); -} -EXPORT_SYMBOL_GPL(securityfs_remove); - -static void remove_one(struct dentry *victim) -{ - simple_release_fs(&mount, &mount_count); -} - -/** - * securityfs_recursive_remove - recursively removes a file or directory - * - * @dentry: a pointer to a the dentry of the file or directory to be removed. * - * This function recursively removes a file or directory in securityfs that was - * previously created with a call to another securityfs function (like - * securityfs_create_file() or variants thereof.) + * AV: when applied to directory it will take all children out; no need to call + * it for descendents if ancestor is getting killed. */ -void securityfs_recursive_remove(struct dentry *dentry) +void securityfs_remove(struct dentry *dentry) { if (IS_ERR_OR_NULL(dentry)) return; @@ -336,15 +305,52 @@ void securityfs_recursive_remove(struct dentry *dentry) simple_recursive_removal(dentry, remove_one); simple_release_fs(&mount, &mount_count); } -EXPORT_SYMBOL_GPL(securityfs_recursive_remove); +EXPORT_SYMBOL_GPL(securityfs_remove); #ifdef CONFIG_SECURITY +#include <linux/spinlock.h> + static struct dentry *lsm_dentry; + static ssize_t lsm_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { - return simple_read_from_buffer(buf, count, ppos, lsm_names, - strlen(lsm_names)); + int i; + static char *str; + static size_t len; + static DEFINE_SPINLOCK(lock); + + /* NOTE: we never free or modify the string once it is set */ + + if (unlikely(!str || !len)) { + char *str_tmp; + size_t len_tmp = 0; + + for (i = 0; i < lsm_active_cnt; i++) + /* the '+ 1' accounts for either a comma or a NUL */ + len_tmp += strlen(lsm_idlist[i]->name) + 1; + + str_tmp = kmalloc(len_tmp, GFP_KERNEL); + if (!str_tmp) + return -ENOMEM; + str_tmp[0] = '\0'; + + for (i = 0; i < lsm_active_cnt; i++) { + if (i > 0) + strcat(str_tmp, ","); + strcat(str_tmp, lsm_idlist[i]->name); + } + + spin_lock(&lock); + if (!str) { + str = str_tmp; + len = len_tmp - 1; + } else + kfree(str_tmp); + spin_unlock(&lock); + } + + return simple_read_from_buffer(buf, count, ppos, str, len); } static const struct file_operations lsm_ops = { @@ -353,7 +359,7 @@ static const struct file_operations lsm_ops = { }; #endif -static int __init securityfs_init(void) +int __init securityfs_init(void) { int retval; @@ -372,4 +378,3 @@ static int __init securityfs_init(void) #endif return 0; } -core_initcall(securityfs_init); diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig index 3c45f4f3455f..916d4f2bfc44 100644 --- a/security/integrity/Kconfig +++ b/security/integrity/Kconfig @@ -36,6 +36,7 @@ config INTEGRITY_ASYMMETRIC_KEYS default n select ASYMMETRIC_KEY_TYPE select ASYMMETRIC_PUBLIC_KEY_SUBTYPE + select CRYPTO select CRYPTO_RSA select X509_CERTIFICATE_PARSER help diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index 7c06ffd633d2..a5e730ffda57 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -180,7 +180,7 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode, } /* - * Dump large security xattr values as a continuous ascii hexademical string. + * Dump large security xattr values as a continuous ascii hexadecimal string. * (pr_debug is limited to 64 bytes.) */ static void dump_security_xattr_l(const char *prefix, const void *src, diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index 377e57e9084f..73d500a375cb 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -169,7 +169,7 @@ static int is_unsupported_hmac_fs(struct dentry *dentry) * and compare it against the stored security.evm xattr. * * For performance: - * - use the previoulsy retrieved xattr value and length to calculate the + * - use the previously retrieved xattr value and length to calculate the * HMAC.) * - cache the verification result in the iint, when available. * @@ -1175,10 +1175,9 @@ struct lsm_blob_sizes evm_blob_sizes __ro_after_init = { }; DEFINE_LSM(evm) = { - .name = "evm", + .id = &evm_lsmid, .init = init_evm_lsm, .order = LSM_ORDER_LAST, .blobs = &evm_blob_sizes, + .initcall_late = init_evm, }; - -late_initcall(init_evm); diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c index 9b907c2fee60..c26724690cec 100644 --- a/security/integrity/evm/evm_secfs.c +++ b/security/integrity/evm/evm_secfs.c @@ -17,7 +17,6 @@ #include "evm.h" static struct dentry *evm_dir; -static struct dentry *evm_init_tpm; static struct dentry *evm_symlink; #ifdef CONFIG_EVM_ADD_XATTRS @@ -286,7 +285,7 @@ static int evm_init_xattrs(void) { evm_xattrs = securityfs_create_file("evm_xattrs", 0660, evm_dir, NULL, &evm_xattr_ops); - if (!evm_xattrs || IS_ERR(evm_xattrs)) + if (IS_ERR(evm_xattrs)) return -EFAULT; return 0; @@ -301,21 +300,28 @@ static int evm_init_xattrs(void) int __init evm_init_secfs(void) { int error = 0; + struct dentry *dentry; - evm_dir = securityfs_create_dir("evm", integrity_dir); - if (!evm_dir || IS_ERR(evm_dir)) + error = integrity_fs_init(); + if (error < 0) return -EFAULT; - evm_init_tpm = securityfs_create_file("evm", 0660, - evm_dir, NULL, &evm_key_ops); - if (!evm_init_tpm || IS_ERR(evm_init_tpm)) { + evm_dir = securityfs_create_dir("evm", integrity_dir); + if (IS_ERR(evm_dir)) { + error = -EFAULT; + goto out; + } + + dentry = securityfs_create_file("evm", 0660, + evm_dir, NULL, &evm_key_ops); + if (IS_ERR(dentry)) { error = -EFAULT; goto out; } evm_symlink = securityfs_create_symlink("evm", NULL, "integrity/evm/evm", NULL); - if (!evm_symlink || IS_ERR(evm_symlink)) { + if (IS_ERR(evm_symlink)) { error = -EFAULT; goto out; } @@ -328,7 +334,7 @@ int __init evm_init_secfs(void) return 0; out: securityfs_remove(evm_symlink); - securityfs_remove(evm_init_tpm); securityfs_remove(evm_dir); + integrity_fs_fini(); return error; } diff --git a/security/integrity/iint.c b/security/integrity/iint.c index 068ac6c2ae1e..8ec1a3436a71 100644 --- a/security/integrity/iint.c +++ b/security/integrity/iint.c @@ -42,8 +42,11 @@ void __init integrity_load_keys(void) evm_load_x509(); } -static int __init integrity_fs_init(void) +int __init integrity_fs_init(void) { + if (integrity_dir) + return 0; + integrity_dir = securityfs_create_dir("integrity", NULL); if (IS_ERR(integrity_dir)) { int ret = PTR_ERR(integrity_dir); @@ -58,4 +61,11 @@ static int __init integrity_fs_init(void) return 0; } -late_initcall(integrity_fs_init) +void __init integrity_fs_fini(void) +{ + if (!integrity_dir || !simple_empty(integrity_dir)) + return; + + securityfs_remove(integrity_dir); + integrity_dir = NULL; +} diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig index 475c32615006..976e75f9b9ba 100644 --- a/security/integrity/ima/Kconfig +++ b/security/integrity/ima/Kconfig @@ -321,4 +321,15 @@ config IMA_DISABLE_HTABLE help This option disables htable to allow measurement of duplicate records. +config IMA_KEXEC_EXTRA_MEMORY_KB + int "Extra memory for IMA measurements added during kexec soft reboot" + range 0 40 + depends on IMA_KEXEC + default 0 + help + IMA_KEXEC_EXTRA_MEMORY_KB determines the extra memory to be + allocated (in kb) for IMA measurements added during kexec soft reboot. + If set to the default value of 0, an extra half page of memory for those + additional measurements will be allocated. + endif diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index c0d3b716d11f..e3d71d8d56e3 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -149,6 +149,9 @@ struct ima_kexec_hdr { #define IMA_CHECK_BLACKLIST 0x40000000 #define IMA_VERITY_REQUIRED 0x80000000 +/* Exclude non-action flags which are not rule-specific. */ +#define IMA_NONACTION_RULE_FLAGS (IMA_NONACTION_FLAGS & ~IMA_NEW_FILE) + #define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \ IMA_HASH | IMA_APPRAISE_SUBMASK) #define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \ @@ -178,7 +181,8 @@ struct ima_kexec_hdr { #define IMA_UPDATE_XATTR 1 #define IMA_CHANGE_ATTR 2 #define IMA_DIGSIG 3 -#define IMA_MUST_MEASURE 4 +#define IMA_MAY_EMIT_TOMTOU 4 +#define IMA_EMITTED_OPENWRITERS 5 /* IMA integrity metadata associated with an inode */ struct ima_iint_cache { @@ -240,6 +244,12 @@ void ima_post_key_create_or_update(struct key *keyring, struct key *key, unsigned long flags, bool create); #endif +#ifdef CONFIG_IMA_KEXEC +void ima_measure_kexec_event(const char *event_name); +#else +static inline void ima_measure_kexec_event(const char *event_name) {} +#endif + /* * The default binary_runtime_measurements list format is defined as the * platform native format. The canonical format is defined as little-endian. @@ -278,6 +288,7 @@ unsigned long ima_get_binary_runtime_size(void); int ima_init_template(void); void ima_init_template_list(void); int __init ima_init_digests(void); +void __init ima_init_reboot_notifier(void); int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event, void *lsm_data); diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 884a3533f7af..5149ff4fd50d 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/file.h> +#include <linux/binfmts.h> #include <linux/fs.h> #include <linux/xattr.h> #include <linux/magic.h> @@ -469,6 +470,17 @@ int ima_check_blacklist(struct ima_iint_cache *iint, return rc; } +static bool is_bprm_creds_for_exec(enum ima_hooks func, struct file *file) +{ + struct linux_binprm *bprm; + + if (func == BPRM_CHECK) { + bprm = container_of(&file, struct linux_binprm, file); + return bprm->is_check; + } + return false; +} + /* * ima_appraise_measurement - appraise file measurement * @@ -483,6 +495,7 @@ int ima_appraise_measurement(enum ima_hooks func, struct ima_iint_cache *iint, int xattr_len, const struct modsig *modsig) { static const char op[] = "appraise_data"; + int audit_msgno = AUDIT_INTEGRITY_DATA; const char *cause = "unknown"; struct dentry *dentry = file_dentry(file); struct inode *inode = d_backing_inode(dentry); @@ -494,6 +507,16 @@ int ima_appraise_measurement(enum ima_hooks func, struct ima_iint_cache *iint, if (!(inode->i_opflags & IOP_XATTR) && !try_modsig) return INTEGRITY_UNKNOWN; + /* + * Unlike any of the other LSM hooks where the kernel enforces file + * integrity, enforcing file integrity for the bprm_creds_for_exec() + * LSM hook with the AT_EXECVE_CHECK flag is left up to the discretion + * of the script interpreter(userspace). Differentiate kernel and + * userspace enforced integrity audit messages. + */ + if (is_bprm_creds_for_exec(func, file)) + audit_msgno = AUDIT_INTEGRITY_USERSPACE; + /* If reading the xattr failed and there's no modsig, error out. */ if (rc <= 0 && !try_modsig) { if (rc && rc != -ENODATA) @@ -569,7 +592,7 @@ out: (iint->flags & IMA_FAIL_UNVERIFIABLE_SIGS))) { status = INTEGRITY_FAIL; cause = "unverifiable-signature"; - integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename, + integrity_audit_msg(audit_msgno, inode, filename, op, cause, rc, 0); } else if (status != INTEGRITY_PASS) { /* Fix mode, but don't replace file signatures. */ @@ -589,7 +612,7 @@ out: status = INTEGRITY_PASS; } - integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename, + integrity_audit_msg(audit_msgno, inode, filename, op, cause, rc, 0); } else { ima_cache_flags(iint, func); @@ -671,6 +694,15 @@ static int ima_protect_xattr(struct dentry *dentry, const char *xattr_name, return 0; } +/* + * ima_reset_appraise_flags - reset ima_iint_cache flags + * + * @digsig: whether to clear/set IMA_DIGSIG flag, tristate values + * 0: clear IMA_DIGSIG + * 1: set IMA_DIGSIG + * -1: don't change IMA_DIGSIG + * + */ static void ima_reset_appraise_flags(struct inode *inode, int digsig) { struct ima_iint_cache *iint; @@ -683,9 +715,9 @@ static void ima_reset_appraise_flags(struct inode *inode, int digsig) return; iint->measured_pcrs = 0; set_bit(IMA_CHANGE_XATTR, &iint->atomic_flags); - if (digsig) + if (digsig == 1) set_bit(IMA_DIGSIG, &iint->atomic_flags); - else + else if (digsig == 0) clear_bit(IMA_DIGSIG, &iint->atomic_flags); } @@ -771,6 +803,8 @@ static int ima_inode_setxattr(struct mnt_idmap *idmap, struct dentry *dentry, digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG); } else if (!strcmp(xattr_name, XATTR_NAME_EVM) && xattr_value_len > 0) { digsig = (xvalue->type == EVM_XATTR_PORTABLE_DIGSIG); + } else { + digsig = -1; } if (result == 1 || evm_revalidate_status(xattr_name)) { ima_reset_appraise_flags(d_backing_inode(dentry), digsig); @@ -784,7 +818,7 @@ static int ima_inode_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name, struct posix_acl *kacl) { if (evm_revalidate_status(acl_name)) - ima_reset_appraise_flags(d_backing_inode(dentry), 0); + ima_reset_appraise_flags(d_backing_inode(dentry), -1); return 0; } @@ -792,11 +826,13 @@ static int ima_inode_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, static int ima_inode_removexattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *xattr_name) { - int result; + int result, digsig = -1; result = ima_protect_xattr(dentry, xattr_name, NULL, 0); if (result == 1 || evm_revalidate_status(xattr_name)) { - ima_reset_appraise_flags(d_backing_inode(dentry), 0); + if (!strcmp(xattr_name, XATTR_NAME_IMA)) + digsig = 0; + ima_reset_appraise_flags(d_backing_inode(dentry), digsig); if (result == 1) result = 0; } diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c index e4a79a9b2d58..012a58959ff0 100644 --- a/security/integrity/ima/ima_fs.c +++ b/security/integrity/ima/ima_fs.c @@ -116,28 +116,6 @@ void ima_putc(struct seq_file *m, void *data, int datalen) seq_putc(m, *(char *)data++); } -static struct dentry **ascii_securityfs_measurement_lists __ro_after_init; -static struct dentry **binary_securityfs_measurement_lists __ro_after_init; -static int securityfs_measurement_list_count __ro_after_init; - -static void lookup_template_data_hash_algo(int *algo_idx, enum hash_algo *algo, - struct seq_file *m, - struct dentry **lists) -{ - struct dentry *dentry; - int i; - - dentry = file_dentry(m->file); - - for (i = 0; i < securityfs_measurement_list_count; i++) { - if (dentry == lists[i]) { - *algo_idx = i; - *algo = ima_algo_array[i].algo; - break; - } - } -} - /* print format: * 32bit-le=pcr# * char[n]=template digest @@ -160,9 +138,10 @@ int ima_measurements_show(struct seq_file *m, void *v) algo_idx = ima_sha1_idx; algo = HASH_ALGO_SHA1; - if (m->file != NULL) - lookup_template_data_hash_algo(&algo_idx, &algo, m, - binary_securityfs_measurement_lists); + if (m->file != NULL) { + algo_idx = (unsigned long)file_inode(m->file)->i_private; + algo = ima_algo_array[algo_idx].algo; + } /* get entry */ e = qe->entry; @@ -256,9 +235,10 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v) algo_idx = ima_sha1_idx; algo = HASH_ALGO_SHA1; - if (m->file != NULL) - lookup_template_data_hash_algo(&algo_idx, &algo, m, - ascii_securityfs_measurement_lists); + if (m->file != NULL) { + algo_idx = (unsigned long)file_inode(m->file)->i_private; + algo = ima_algo_array[algo_idx].algo; + } /* get entry */ e = qe->entry; @@ -396,11 +376,6 @@ out: static struct dentry *ima_dir; static struct dentry *ima_symlink; -static struct dentry *binary_runtime_measurements; -static struct dentry *ascii_runtime_measurements; -static struct dentry *runtime_measurements_count; -static struct dentry *violations; -static struct dentry *ima_policy; enum ima_fs_flags { IMA_FS_BUSY, @@ -417,64 +392,33 @@ static const struct seq_operations ima_policy_seqops = { }; #endif -static void __init remove_securityfs_measurement_lists(struct dentry **lists) -{ - int i; - - if (lists) { - for (i = 0; i < securityfs_measurement_list_count; i++) - securityfs_remove(lists[i]); - - kfree(lists); - } -} - static int __init create_securityfs_measurement_lists(void) { - char file_name[NAME_MAX + 1]; - struct dentry *dentry; - u16 algo; - int i; - - securityfs_measurement_list_count = NR_BANKS(ima_tpm_chip); + int count = NR_BANKS(ima_tpm_chip); if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) - securityfs_measurement_list_count++; - - ascii_securityfs_measurement_lists = - kcalloc(securityfs_measurement_list_count, sizeof(struct dentry *), - GFP_KERNEL); - if (!ascii_securityfs_measurement_lists) - return -ENOMEM; + count++; - binary_securityfs_measurement_lists = - kcalloc(securityfs_measurement_list_count, sizeof(struct dentry *), - GFP_KERNEL); - if (!binary_securityfs_measurement_lists) - return -ENOMEM; - - for (i = 0; i < securityfs_measurement_list_count; i++) { - algo = ima_algo_array[i].algo; + for (int i = 0; i < count; i++) { + u16 algo = ima_algo_array[i].algo; + char file_name[NAME_MAX + 1]; + struct dentry *dentry; sprintf(file_name, "ascii_runtime_measurements_%s", hash_algo_name[algo]); dentry = securityfs_create_file(file_name, S_IRUSR | S_IRGRP, - ima_dir, NULL, + ima_dir, (void *)(uintptr_t)i, &ima_ascii_measurements_ops); if (IS_ERR(dentry)) return PTR_ERR(dentry); - ascii_securityfs_measurement_lists[i] = dentry; - sprintf(file_name, "binary_runtime_measurements_%s", hash_algo_name[algo]); dentry = securityfs_create_file(file_name, S_IRUSR | S_IRGRP, - ima_dir, NULL, + ima_dir, (void *)(uintptr_t)i, &ima_measurements_ops); if (IS_ERR(dentry)) return PTR_ERR(dentry); - - binary_securityfs_measurement_lists[i] = dentry; } return 0; @@ -533,8 +477,7 @@ static int ima_release_policy(struct inode *inode, struct file *file) ima_update_policy(); #if !defined(CONFIG_IMA_WRITE_POLICY) && !defined(CONFIG_IMA_READ_POLICY) - securityfs_remove(ima_policy); - ima_policy = NULL; + securityfs_remove(file->f_path.dentry); #elif defined(CONFIG_IMA_WRITE_POLICY) clear_bit(IMA_FS_BUSY, &ima_fs_flags); #elif defined(CONFIG_IMA_READ_POLICY) @@ -553,14 +496,18 @@ static const struct file_operations ima_measure_policy_ops = { int __init ima_fs_init(void) { + struct dentry *dentry; int ret; - ascii_securityfs_measurement_lists = NULL; - binary_securityfs_measurement_lists = NULL; + ret = integrity_fs_init(); + if (ret < 0) + return ret; ima_dir = securityfs_create_dir("ima", integrity_dir); - if (IS_ERR(ima_dir)) - return PTR_ERR(ima_dir); + if (IS_ERR(ima_dir)) { + ret = PTR_ERR(ima_dir); + goto out; + } ima_symlink = securityfs_create_symlink("ima", NULL, "integrity/ima", NULL); @@ -573,59 +520,48 @@ int __init ima_fs_init(void) if (ret != 0) goto out; - binary_runtime_measurements = - securityfs_create_symlink("binary_runtime_measurements", ima_dir, + dentry = securityfs_create_symlink("binary_runtime_measurements", ima_dir, "binary_runtime_measurements_sha1", NULL); - if (IS_ERR(binary_runtime_measurements)) { - ret = PTR_ERR(binary_runtime_measurements); + if (IS_ERR(dentry)) { + ret = PTR_ERR(dentry); goto out; } - ascii_runtime_measurements = - securityfs_create_symlink("ascii_runtime_measurements", ima_dir, + dentry = securityfs_create_symlink("ascii_runtime_measurements", ima_dir, "ascii_runtime_measurements_sha1", NULL); - if (IS_ERR(ascii_runtime_measurements)) { - ret = PTR_ERR(ascii_runtime_measurements); + if (IS_ERR(dentry)) { + ret = PTR_ERR(dentry); goto out; } - runtime_measurements_count = - securityfs_create_file("runtime_measurements_count", + dentry = securityfs_create_file("runtime_measurements_count", S_IRUSR | S_IRGRP, ima_dir, NULL, &ima_measurements_count_ops); - if (IS_ERR(runtime_measurements_count)) { - ret = PTR_ERR(runtime_measurements_count); + if (IS_ERR(dentry)) { + ret = PTR_ERR(dentry); goto out; } - violations = - securityfs_create_file("violations", S_IRUSR | S_IRGRP, + dentry = securityfs_create_file("violations", S_IRUSR | S_IRGRP, ima_dir, NULL, &ima_htable_violations_ops); - if (IS_ERR(violations)) { - ret = PTR_ERR(violations); + if (IS_ERR(dentry)) { + ret = PTR_ERR(dentry); goto out; } - ima_policy = securityfs_create_file("policy", POLICY_FILE_FLAGS, + dentry = securityfs_create_file("policy", POLICY_FILE_FLAGS, ima_dir, NULL, &ima_measure_policy_ops); - if (IS_ERR(ima_policy)) { - ret = PTR_ERR(ima_policy); + if (IS_ERR(dentry)) { + ret = PTR_ERR(dentry); goto out; } return 0; out: - securityfs_remove(ima_policy); - securityfs_remove(violations); - securityfs_remove(runtime_measurements_count); - securityfs_remove(ascii_runtime_measurements); - securityfs_remove(binary_runtime_measurements); - remove_securityfs_measurement_lists(ascii_securityfs_measurement_lists); - remove_securityfs_measurement_lists(binary_securityfs_measurement_lists); - securityfs_measurement_list_count = 0; securityfs_remove(ima_symlink); securityfs_remove(ima_dir); + integrity_fs_fini(); return ret; } diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c index 4e208239a40e..a2f34f2d8ad7 100644 --- a/security/integrity/ima/ima_init.c +++ b/security/integrity/ima/ima_init.c @@ -152,6 +152,8 @@ int __init ima_init(void) ima_init_key_queue(); + ima_init_reboot_notifier(); + ima_measure_critical_data("kernel_info", "kernel_version", UTS_RELEASE, strlen(UTS_RELEASE), false, NULL, 0); diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c index 52e00332defe..7362f68f2d8b 100644 --- a/security/integrity/ima/ima_kexec.c +++ b/security/integrity/ima/ima_kexec.c @@ -12,65 +12,118 @@ #include <linux/kexec.h> #include <linux/of.h> #include <linux/ima.h> +#include <linux/reboot.h> +#include <asm/page.h> #include "ima.h" #ifdef CONFIG_IMA_KEXEC +#define IMA_KEXEC_EVENT_LEN 256 + +static bool ima_kexec_update_registered; +static struct seq_file ima_kexec_file; +static size_t kexec_segment_size; +static void *ima_kexec_buffer; + +static void ima_free_kexec_file_buf(struct seq_file *sf) +{ + vfree(sf->buf); + sf->buf = NULL; + sf->size = 0; + sf->read_pos = 0; + sf->count = 0; +} + +void ima_measure_kexec_event(const char *event_name) +{ + char ima_kexec_event[IMA_KEXEC_EVENT_LEN]; + size_t buf_size = 0; + long len; + int n; + + buf_size = ima_get_binary_runtime_size(); + len = atomic_long_read(&ima_htable.len); + + n = scnprintf(ima_kexec_event, IMA_KEXEC_EVENT_LEN, + "kexec_segment_size=%lu;ima_binary_runtime_size=%lu;" + "ima_runtime_measurements_count=%ld;", + kexec_segment_size, buf_size, len); + + ima_measure_critical_data("ima_kexec", event_name, ima_kexec_event, n, false, NULL, 0); +} + +static int ima_alloc_kexec_file_buf(size_t segment_size) +{ + /* + * kexec 'load' may be called multiple times. + * Free and realloc the buffer only if the segment_size is + * changed from the previous kexec 'load' call. + */ + if (ima_kexec_file.buf && ima_kexec_file.size == segment_size) + goto out; + + ima_free_kexec_file_buf(&ima_kexec_file); + + /* segment size can't change between kexec load and execute */ + ima_kexec_file.buf = vmalloc(segment_size); + if (!ima_kexec_file.buf) + return -ENOMEM; + + ima_kexec_file.size = segment_size; + +out: + ima_kexec_file.read_pos = 0; + ima_kexec_file.count = sizeof(struct ima_kexec_hdr); /* reserved space */ + ima_measure_kexec_event("kexec_load"); + + return 0; +} + static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer, unsigned long segment_size) { struct ima_queue_entry *qe; - struct seq_file file; struct ima_kexec_hdr khdr; int ret = 0; /* segment size can't change between kexec load and execute */ - file.buf = vmalloc(segment_size); - if (!file.buf) { - ret = -ENOMEM; - goto out; + if (!ima_kexec_file.buf) { + pr_err("Kexec file buf not allocated\n"); + return -EINVAL; } - file.file = NULL; - file.size = segment_size; - file.read_pos = 0; - file.count = sizeof(khdr); /* reserved space */ - memset(&khdr, 0, sizeof(khdr)); khdr.version = 1; - list_for_each_entry_rcu(qe, &ima_measurements, later) { - if (file.count < file.size) { + /* This is an append-only list, no need to hold the RCU read lock */ + list_for_each_entry_rcu(qe, &ima_measurements, later, true) { + if (ima_kexec_file.count < ima_kexec_file.size) { khdr.count++; - ima_measurements_show(&file, qe); + ima_measurements_show(&ima_kexec_file, qe); } else { ret = -EINVAL; break; } } - if (ret < 0) - goto out; - /* * fill in reserved space with some buffer details * (eg. version, buffer size, number of measurements) */ - khdr.buffer_size = file.count; + khdr.buffer_size = ima_kexec_file.count; if (ima_canonical_fmt) { khdr.version = cpu_to_le16(khdr.version); khdr.count = cpu_to_le64(khdr.count); khdr.buffer_size = cpu_to_le64(khdr.buffer_size); } - memcpy(file.buf, &khdr, sizeof(khdr)); + memcpy(ima_kexec_file.buf, &khdr, sizeof(khdr)); print_hex_dump_debug("ima dump: ", DUMP_PREFIX_NONE, 16, 1, - file.buf, file.count < 100 ? file.count : 100, + ima_kexec_file.buf, ima_kexec_file.count < 100 ? + ima_kexec_file.count : 100, true); - *buffer_size = file.count; - *buffer = file.buf; -out: - if (ret == -EINVAL) - vfree(file.buf); + *buffer_size = ima_kexec_file.count; + *buffer = ima_kexec_file.buf; + return ret; } @@ -86,32 +139,39 @@ void ima_add_kexec_buffer(struct kimage *image) .buf_min = 0, .buf_max = ULONG_MAX, .top_down = true }; unsigned long binary_runtime_size; + unsigned long extra_memory; /* use more understandable variable names than defined in kbuf */ + size_t kexec_buffer_size = 0; void *kexec_buffer = NULL; - size_t kexec_buffer_size; - size_t kexec_segment_size; int ret; + if (image->type == KEXEC_TYPE_CRASH) + return; + /* - * Reserve an extra half page of memory for additional measurements - * added during the kexec load. + * Reserve extra memory for measurements added during kexec. */ - binary_runtime_size = ima_get_binary_runtime_size(); + if (CONFIG_IMA_KEXEC_EXTRA_MEMORY_KB <= 0) + extra_memory = PAGE_SIZE / 2; + else + extra_memory = CONFIG_IMA_KEXEC_EXTRA_MEMORY_KB * 1024; + + binary_runtime_size = ima_get_binary_runtime_size() + extra_memory; + if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE) kexec_segment_size = ULONG_MAX; else - kexec_segment_size = ALIGN(ima_get_binary_runtime_size() + - PAGE_SIZE / 2, PAGE_SIZE); + kexec_segment_size = ALIGN(binary_runtime_size, PAGE_SIZE); + if ((kexec_segment_size == ULONG_MAX) || ((kexec_segment_size >> PAGE_SHIFT) > totalram_pages() / 2)) { pr_err("Binary measurement list too large.\n"); return; } - ima_dump_measurement_list(&kexec_buffer_size, &kexec_buffer, - kexec_segment_size); - if (!kexec_buffer) { + ret = ima_alloc_kexec_file_buf(kexec_segment_size); + if (ret < 0) { pr_err("Not enough memory for the kexec measurement buffer.\n"); return; } @@ -119,6 +179,7 @@ void ima_add_kexec_buffer(struct kimage *image) kbuf.buffer = kexec_buffer; kbuf.bufsz = kexec_buffer_size; kbuf.memsz = kexec_segment_size; + image->is_ima_segment_index_set = false; ret = kexec_add_buffer(&kbuf); if (ret) { pr_err("Error passing over kexec measurement buffer.\n"); @@ -129,10 +190,80 @@ void ima_add_kexec_buffer(struct kimage *image) image->ima_buffer_addr = kbuf.mem; image->ima_buffer_size = kexec_segment_size; image->ima_buffer = kexec_buffer; + image->ima_segment_index = image->nr_segments - 1; + image->is_ima_segment_index_set = true; kexec_dprintk("kexec measurement buffer for the loaded kernel at 0x%lx.\n", kbuf.mem); } + +/* + * Called during kexec execute so that IMA can update the measurement list. + */ +static int ima_update_kexec_buffer(struct notifier_block *self, + unsigned long action, void *data) +{ + size_t buf_size = 0; + int ret = NOTIFY_OK; + void *buf = NULL; + + if (!kexec_in_progress) { + pr_info("No kexec in progress.\n"); + return ret; + } + + if (!ima_kexec_buffer) { + pr_err("Kexec buffer not set.\n"); + return ret; + } + + ret = ima_dump_measurement_list(&buf_size, &buf, kexec_segment_size); + + if (ret) + pr_err("Dump measurements failed. Error:%d\n", ret); + + if (buf_size != 0) + memcpy(ima_kexec_buffer, buf, buf_size); + + kimage_unmap_segment(ima_kexec_buffer); + ima_kexec_buffer = NULL; + + return ret; +} + +static struct notifier_block update_buffer_nb = { + .notifier_call = ima_update_kexec_buffer, + .priority = INT_MIN +}; + +/* + * Create a mapping for the source pages that contain the IMA buffer + * so we can update it later. + */ +void ima_kexec_post_load(struct kimage *image) +{ + if (ima_kexec_buffer) { + kimage_unmap_segment(ima_kexec_buffer); + ima_kexec_buffer = NULL; + } + + if (!image->ima_buffer_addr) + return; + + ima_kexec_buffer = kimage_map_segment(image, + image->ima_buffer_addr, + image->ima_buffer_size); + if (!ima_kexec_buffer) { + pr_err("Could not map measurements buffer.\n"); + return; + } + + if (!ima_kexec_update_registered) { + register_reboot_notifier(&update_buffer_nb); + ima_kexec_update_registered = true; + } +} + #endif /* IMA_KEXEC */ /* diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 9b87556b03a7..5770cf691912 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -27,6 +27,7 @@ #include <linux/fs.h> #include <linux/iversion.h> #include <linux/evm.h> +#include <linux/crash_dump.h> #include "ima.h" @@ -38,11 +39,30 @@ int ima_appraise; int __ro_after_init ima_hash_algo = HASH_ALGO_SHA1; static int hash_setup_done; +static int ima_disabled __ro_after_init; static struct notifier_block ima_lsm_policy_notifier = { .notifier_call = ima_lsm_policy_change, }; +static int __init ima_setup(char *str) +{ + if (!is_kdump_kernel()) { + pr_info("Warning: ima setup option only permitted in kdump"); + return 1; + } + + if (strncmp(str, "off", 3) == 0) + ima_disabled = 1; + else if (strncmp(str, "on", 2) == 0) + ima_disabled = 0; + else + pr_err("Invalid ima setup option: \"%s\" , please specify ima=on|off.", str); + + return 1; +} +__setup("ima=", ima_setup); + static int __init hash_setup(char *str) { struct ima_template_desc *template_desc = ima_template_desc_current(); @@ -129,16 +149,22 @@ static void ima_rdwr_violation_check(struct file *file, if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) { if (!iint) iint = ima_iint_find(inode); + /* IMA_MEASURE is set from reader side */ - if (iint && test_bit(IMA_MUST_MEASURE, - &iint->atomic_flags)) + if (iint && test_and_clear_bit(IMA_MAY_EMIT_TOMTOU, + &iint->atomic_flags)) send_tomtou = true; } } else { if (must_measure) - set_bit(IMA_MUST_MEASURE, &iint->atomic_flags); - if (inode_is_open_for_write(inode) && must_measure) - send_writers = true; + set_bit(IMA_MAY_EMIT_TOMTOU, &iint->atomic_flags); + + /* Limit number of open_writers violations */ + if (inode_is_open_for_write(inode) && must_measure) { + if (!test_and_set_bit(IMA_EMITTED_OPENWRITERS, + &iint->atomic_flags)) + send_writers = true; + } } if (!send_tomtou && !send_writers) @@ -167,6 +193,8 @@ static void ima_check_last_writer(struct ima_iint_cache *iint, if (atomic_read(&inode->i_writecount) == 1) { struct kstat stat; + clear_bit(IMA_EMITTED_OPENWRITERS, &iint->atomic_flags); + update = test_and_clear_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); if ((iint->flags & IMA_NEW_FILE) || @@ -207,7 +235,8 @@ static void ima_file_free(struct file *file) static int process_measurement(struct file *file, const struct cred *cred, struct lsm_prop *prop, char *buf, loff_t size, - int mask, enum ima_hooks func) + int mask, enum ima_hooks func, + enum kernel_read_file_id read_id) { struct inode *real_inode, *inode = file_inode(file); struct ima_iint_cache *iint = NULL; @@ -237,7 +266,9 @@ static int process_measurement(struct file *file, const struct cred *cred, &allowed_algos); violation_check = ((func == FILE_CHECK || func == MMAP_CHECK || func == MMAP_CHECK_REQPROT) && - (ima_policy_flag & IMA_MEASURE)); + (ima_policy_flag & IMA_MEASURE) && + ((action & IMA_MEASURE) || + (file->f_mode & FMODE_WRITE))); if (!action && !violation_check) return 0; @@ -269,10 +300,13 @@ static int process_measurement(struct file *file, const struct cred *cred, mutex_lock(&iint->mutex); if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags)) - /* reset appraisal flags if ima_inode_post_setattr was called */ + /* + * Reset appraisal flags (action and non-action rule-specific) + * if ima_inode_post_setattr was called. + */ iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED | IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK | - IMA_NONACTION_FLAGS); + IMA_NONACTION_RULE_FLAGS); /* * Re-evaulate the file if either the xattr has changed or the @@ -373,6 +407,12 @@ static int process_measurement(struct file *file, const struct cred *cred, if (rc != 0 && rc != -EBADF && rc != -EINVAL) goto out_locked; + /* Defer measuring/appraising kernel modules to READING_MODULE */ + if (read_id == READING_MODULE_COMPRESSED) { + must_appraise = 0; + goto out_locked; + } + if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */ pathname = ima_d_path(&file->f_path, &pathbuf, filename); @@ -453,14 +493,14 @@ static int ima_file_mmap(struct file *file, unsigned long reqprot, if (reqprot & PROT_EXEC) { ret = process_measurement(file, current_cred(), &prop, NULL, - 0, MAY_EXEC, MMAP_CHECK_REQPROT); + 0, MAY_EXEC, MMAP_CHECK_REQPROT, 0); if (ret) return ret; } if (prot & PROT_EXEC) return process_measurement(file, current_cred(), &prop, NULL, - 0, MAY_EXEC, MMAP_CHECK); + 0, MAY_EXEC, MMAP_CHECK, 0); return 0; } @@ -540,18 +580,69 @@ static int ima_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, */ static int ima_bprm_check(struct linux_binprm *bprm) { - int ret; struct lsm_prop prop; security_current_getlsmprop_subj(&prop); - ret = process_measurement(bprm->file, current_cred(), - &prop, NULL, 0, MAY_EXEC, BPRM_CHECK); - if (ret) - return ret; - - security_cred_getlsmprop(bprm->cred, &prop); - return process_measurement(bprm->file, bprm->cred, &prop, NULL, 0, - MAY_EXEC, CREDS_CHECK); + return process_measurement(bprm->file, current_cred(), + &prop, NULL, 0, MAY_EXEC, BPRM_CHECK, 0); +} + +/** + * ima_creds_check - based on policy, collect/store measurement. + * @bprm: contains the linux_binprm structure + * @file: contains the file descriptor of the binary being executed + * + * The OS protects against an executable file, already open for write, + * from being executed in deny_write_access() and an executable file, + * already open for execute, from being modified in get_write_access(). + * So we can be certain that what we verify and measure here is actually + * what is being executed. + * + * The difference from ima_bprm_check() is that ima_creds_check() is invoked + * only after determining the final binary to be executed without interpreter, + * and not when searching for intermediate binaries. The reason is that since + * commit 56305aa9b6fab ("exec: Compute file based creds only once"), the + * credentials to be applied to the process are calculated only at that stage + * (bprm_creds_from_file security hook instead of bprm_check_security). + * + * On success return 0. On integrity appraisal error, assuming the file + * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. + */ +static int ima_creds_check(struct linux_binprm *bprm, const struct file *file) +{ + struct lsm_prop prop; + + security_current_getlsmprop_subj(&prop); + return process_measurement((struct file *)file, bprm->cred, &prop, NULL, + 0, MAY_EXEC, CREDS_CHECK, 0); +} + +/** + * ima_bprm_creds_for_exec - collect/store/appraise measurement. + * @bprm: contains the linux_binprm structure + * + * Based on the IMA policy and the execveat(2) AT_EXECVE_CHECK flag, measure + * and appraise the integrity of a file to be executed by script interpreters. + * Unlike any of the other LSM hooks where the kernel enforces file integrity, + * enforcing file integrity is left up to the discretion of the script + * interpreter (userspace). + * + * On success return 0. On integrity appraisal error, assuming the file + * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. + */ +static int ima_bprm_creds_for_exec(struct linux_binprm *bprm) +{ + /* + * As security_bprm_check() is called multiple times, both + * the script and the shebang interpreter are measured, appraised, + * and audited. Limit usage of this LSM hook to just measuring, + * appraising, and auditing the indirect script execution + * (e.g. ./sh example.sh). + */ + if (!bprm->is_check) + return 0; + + return ima_bprm_check(bprm); } /** @@ -571,7 +662,7 @@ static int ima_file_check(struct file *file, int mask) security_current_getlsmprop_subj(&prop); return process_measurement(file, current_cred(), &prop, NULL, 0, mask & (MAY_READ | MAY_WRITE | MAY_EXEC | - MAY_APPEND), FILE_CHECK); + MAY_APPEND), FILE_CHECK, 0); } static int __ima_inode_hash(struct inode *inode, struct file *file, char *buf, @@ -790,12 +881,13 @@ static int ima_read_file(struct file *file, enum kernel_read_file_id read_id, func = read_idmap[read_id] ?: FILE_CHECK; security_current_getlsmprop_subj(&prop); return process_measurement(file, current_cred(), &prop, NULL, 0, - MAY_READ, func); + MAY_READ, func, 0); } const int read_idmap[READING_MAX_ID] = { [READING_FIRMWARE] = FIRMWARE_CHECK, [READING_MODULE] = MODULE_CHECK, + [READING_MODULE_COMPRESSED] = MODULE_CHECK, [READING_KEXEC_IMAGE] = KEXEC_KERNEL_CHECK, [READING_KEXEC_INITRAMFS] = KEXEC_INITRAMFS_CHECK, [READING_POLICY] = POLICY_CHECK @@ -833,7 +925,7 @@ static int ima_post_read_file(struct file *file, char *buf, loff_t size, func = read_idmap[read_id] ?: FILE_CHECK; security_current_getlsmprop_subj(&prop); return process_measurement(file, current_cred(), &prop, buf, size, - MAY_READ, func); + MAY_READ, func, read_id); } /** @@ -983,9 +1075,9 @@ int process_buffer_measurement(struct mnt_idmap *idmap, } /* - * Both LSM hooks and auxilary based buffer measurements are - * based on policy. To avoid code duplication, differentiate - * between the LSM hooks and auxilary buffer measurements, + * Both LSM hooks and auxiliary based buffer measurements are + * based on policy. To avoid code duplication, differentiate + * between the LSM hooks and auxiliary buffer measurements, * retrieving the policy rule information only for the LSM hook * buffer measurements. */ @@ -1145,6 +1237,12 @@ static int __init init_ima(void) { int error; + /*Note that turning IMA off is intentionally limited to kdump kernel.*/ + if (ima_disabled && is_kdump_kernel()) { + pr_info("IMA functionality is disabled"); + return 0; + } + ima_appraise_parse_cmdline(); ima_init_template_list(); hash_setup(CONFIG_IMA_DEFAULT_HASH); @@ -1174,6 +1272,8 @@ static int __init init_ima(void) static struct security_hook_list ima_hooks[] __ro_after_init = { LSM_HOOK_INIT(bprm_check_security, ima_bprm_check), + LSM_HOOK_INIT(bprm_creds_for_exec, ima_bprm_creds_for_exec), + LSM_HOOK_INIT(bprm_creds_from_file, ima_creds_check), LSM_HOOK_INIT(file_post_open, ima_file_check), LSM_HOOK_INIT(inode_post_create_tmpfile, ima_post_create_tmpfile), LSM_HOOK_INIT(file_release, ima_file_free), @@ -1211,10 +1311,10 @@ struct lsm_blob_sizes ima_blob_sizes __ro_after_init = { }; DEFINE_LSM(ima) = { - .name = "ima", + .id = &ima_lsmid, .init = init_ima_lsm, .order = LSM_ORDER_LAST, .blobs = &ima_blob_sizes, + /* Start IMA after the TPM is available */ + .initcall_late = init_ima, }; - -late_initcall(init_ima); /* Start IMA after the TPM is available */ diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index dbfd554b4624..8fbd8755f5bc 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -38,6 +38,7 @@ #define IMA_GID 0x2000 #define IMA_EGID 0x4000 #define IMA_FGROUP 0x8000 +#define IMA_FS_SUBTYPE 0x10000 #define UNKNOWN 0 #define MEASURE 0x0001 /* same as IMA_MEASURE */ @@ -45,6 +46,7 @@ #define APPRAISE 0x0004 /* same as IMA_APPRAISE */ #define DONT_APPRAISE 0x0008 #define AUDIT 0x0040 +#define DONT_AUDIT 0x0080 #define HASH 0x0100 #define DONT_HASH 0x0200 @@ -119,6 +121,7 @@ struct ima_rule_entry { int type; /* audit type */ } lsm[MAX_LSM_RULES]; char *fsname; + char *fs_subtype; struct ima_rule_opt_list *keyrings; /* Measure keys added to these keyrings */ struct ima_rule_opt_list *label; /* Measure data grouped under this label */ struct ima_template_desc *template; @@ -148,7 +151,8 @@ static struct ima_rule_entry dont_measure_rules[] __ro_after_init = { {.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC}, {.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC}, {.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC}, - {.action = DONT_MEASURE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = TMPFS_MAGIC, .func = FILE_CHECK, + .flags = IMA_FSMAGIC | IMA_FUNC}, {.action = DONT_MEASURE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC}, {.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC}, {.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC}, @@ -240,7 +244,8 @@ static struct ima_rule_entry build_appraise_rules[] __ro_after_init = { static struct ima_rule_entry secure_boot_rules[] __ro_after_init = { {.action = APPRAISE, .func = MODULE_CHECK, - .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED}, + .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED | IMA_MODSIG_ALLOWED | + IMA_CHECK_BLACKLIST}, {.action = APPRAISE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED}, {.action = APPRAISE, .func = KEXEC_KERNEL_CHECK, @@ -396,6 +401,7 @@ static void ima_free_rule(struct ima_rule_entry *entry) * the defined_templates list and cannot be freed here */ kfree(entry->fsname); + kfree(entry->fs_subtype); ima_free_rule_opt_list(entry->keyrings); ima_lsm_free_rule(entry); kfree(entry); @@ -600,6 +606,12 @@ static bool ima_match_rules(struct ima_rule_entry *rule, if ((rule->flags & IMA_FSNAME) && strcmp(rule->fsname, inode->i_sb->s_type->name)) return false; + if (rule->flags & IMA_FS_SUBTYPE) { + if (!inode->i_sb->s_subtype) + return false; + if (strcmp(rule->fs_subtype, inode->i_sb->s_subtype)) + return false; + } if ((rule->flags & IMA_FSUUID) && !uuid_equal(&rule->fsuuid, &inode->i_sb->s_uuid)) return false; @@ -635,7 +647,7 @@ static bool ima_match_rules(struct ima_rule_entry *rule, return false; for (i = 0; i < MAX_LSM_RULES; i++) { int rc = 0; - struct lsm_prop prop = { }; + struct lsm_prop inode_prop = { }; if (!lsm_rule->lsm[i].rule) { if (!lsm_rule->lsm[i].args_p) @@ -649,15 +661,16 @@ retry: case LSM_OBJ_USER: case LSM_OBJ_ROLE: case LSM_OBJ_TYPE: - security_inode_getlsmprop(inode, &prop); - rc = ima_filter_rule_match(&prop, lsm_rule->lsm[i].type, + security_inode_getlsmprop(inode, &inode_prop); + rc = ima_filter_rule_match(&inode_prop, + lsm_rule->lsm[i].type, Audit_equal, lsm_rule->lsm[i].rule); break; case LSM_SUBJ_USER: case LSM_SUBJ_ROLE: case LSM_SUBJ_TYPE: - rc = ima_filter_rule_match(&prop, lsm_rule->lsm[i].type, + rc = ima_filter_rule_match(prop, lsm_rule->lsm[i].type, Audit_equal, lsm_rule->lsm[i].rule); break; @@ -672,7 +685,7 @@ retry: goto retry; } } - if (!rc) { + if (rc <= 0) { result = false; goto out; } @@ -1062,10 +1075,10 @@ void ima_update_policy(void) enum policy_opt { Opt_measure, Opt_dont_measure, Opt_appraise, Opt_dont_appraise, - Opt_audit, Opt_hash, Opt_dont_hash, + Opt_audit, Opt_dont_audit, Opt_hash, Opt_dont_hash, Opt_obj_user, Opt_obj_role, Opt_obj_type, Opt_subj_user, Opt_subj_role, Opt_subj_type, - Opt_func, Opt_mask, Opt_fsmagic, Opt_fsname, Opt_fsuuid, + Opt_func, Opt_mask, Opt_fsmagic, Opt_fsname, Opt_fs_subtype, Opt_fsuuid, Opt_uid_eq, Opt_euid_eq, Opt_gid_eq, Opt_egid_eq, Opt_fowner_eq, Opt_fgroup_eq, Opt_uid_gt, Opt_euid_gt, Opt_gid_gt, Opt_egid_gt, @@ -1084,6 +1097,7 @@ static const match_table_t policy_tokens = { {Opt_appraise, "appraise"}, {Opt_dont_appraise, "dont_appraise"}, {Opt_audit, "audit"}, + {Opt_dont_audit, "dont_audit"}, {Opt_hash, "hash"}, {Opt_dont_hash, "dont_hash"}, {Opt_obj_user, "obj_user=%s"}, @@ -1096,6 +1110,7 @@ static const match_table_t policy_tokens = { {Opt_mask, "mask=%s"}, {Opt_fsmagic, "fsmagic=%s"}, {Opt_fsname, "fsname=%s"}, + {Opt_fs_subtype, "fs_subtype=%s"}, {Opt_fsuuid, "fsuuid=%s"}, {Opt_uid_eq, "uid=%s"}, {Opt_euid_eq, "euid=%s"}, @@ -1280,7 +1295,8 @@ static bool ima_validate_rule(struct ima_rule_entry *entry) if (entry->flags & ~(IMA_FUNC | IMA_MASK | IMA_FSMAGIC | IMA_UID | IMA_FOWNER | IMA_FSUUID | IMA_INMASK | IMA_EUID | IMA_PCR | - IMA_FSNAME | IMA_GID | IMA_EGID | + IMA_FSNAME | IMA_FS_SUBTYPE | + IMA_GID | IMA_EGID | IMA_FGROUP | IMA_DIGSIG_REQUIRED | IMA_PERMIT_DIRECTIO | IMA_VALIDATE_ALGOS | IMA_CHECK_BLACKLIST | IMA_VERITY_REQUIRED)) @@ -1293,7 +1309,8 @@ static bool ima_validate_rule(struct ima_rule_entry *entry) if (entry->flags & ~(IMA_FUNC | IMA_MASK | IMA_FSMAGIC | IMA_UID | IMA_FOWNER | IMA_FSUUID | IMA_INMASK | IMA_EUID | IMA_PCR | - IMA_FSNAME | IMA_GID | IMA_EGID | + IMA_FSNAME | IMA_FS_SUBTYPE | + IMA_GID | IMA_EGID | IMA_FGROUP | IMA_DIGSIG_REQUIRED | IMA_PERMIT_DIRECTIO | IMA_MODSIG_ALLOWED | IMA_CHECK_BLACKLIST | IMA_VALIDATE_ALGOS)) @@ -1306,7 +1323,8 @@ static bool ima_validate_rule(struct ima_rule_entry *entry) if (entry->flags & ~(IMA_FUNC | IMA_FSMAGIC | IMA_UID | IMA_FOWNER | IMA_FSUUID | IMA_EUID | - IMA_PCR | IMA_FSNAME | IMA_GID | IMA_EGID | + IMA_PCR | IMA_FSNAME | IMA_FS_SUBTYPE | + IMA_GID | IMA_EGID | IMA_FGROUP)) return false; @@ -1430,7 +1448,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) int token; unsigned long lnum; - if (result < 0) + if (result < 0 || *p == '#') /* ignore suffixed comment */ break; if ((*p == '\0') || (*p == ' ') || (*p == '\t')) continue; @@ -1476,6 +1494,14 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) entry->action = AUDIT; break; + case Opt_dont_audit: + ima_log_string(ab, "action", "dont_audit"); + + if (entry->action != UNKNOWN) + result = -EINVAL; + + entry->action = DONT_AUDIT; + break; case Opt_hash: ima_log_string(ab, "action", "hash"); @@ -1585,6 +1611,22 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) result = 0; entry->flags |= IMA_FSNAME; break; + case Opt_fs_subtype: + ima_log_string(ab, "fs_subtype", args[0].from); + + if (entry->fs_subtype) { + result = -EINVAL; + break; + } + + entry->fs_subtype = kstrdup(args[0].from, GFP_KERNEL); + if (!entry->fs_subtype) { + result = -ENOMEM; + break; + } + result = 0; + entry->flags |= IMA_FS_SUBTYPE; + break; case Opt_keyrings: ima_log_string(ab, "keyrings", args[0].from); @@ -2095,6 +2137,8 @@ int ima_policy_show(struct seq_file *m, void *v) seq_puts(m, pt(Opt_dont_appraise)); if (entry->action & AUDIT) seq_puts(m, pt(Opt_audit)); + if (entry->action & DONT_AUDIT) + seq_puts(m, pt(Opt_dont_audit)); if (entry->action & HASH) seq_puts(m, pt(Opt_hash)); if (entry->action & DONT_HASH) @@ -2131,6 +2175,12 @@ int ima_policy_show(struct seq_file *m, void *v) seq_puts(m, " "); } + if (entry->flags & IMA_FS_SUBTYPE) { + snprintf(tbuf, sizeof(tbuf), "%s", entry->fs_subtype); + seq_printf(m, pt(Opt_fs_subtype), tbuf); + seq_puts(m, " "); + } + if (entry->flags & IMA_KEYRINGS) { seq_puts(m, "keyrings="); ima_show_rule_opt_list(m, entry->keyrings); diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c index 532da87ce519..590637e81ad1 100644 --- a/security/integrity/ima/ima_queue.c +++ b/security/integrity/ima/ima_queue.c @@ -16,6 +16,7 @@ */ #include <linux/rculist.h> +#include <linux/reboot.h> #include <linux/slab.h> #include "ima.h" @@ -44,6 +45,12 @@ struct ima_h_table ima_htable = { */ static DEFINE_MUTEX(ima_extend_list_mutex); +/* + * Used internally by the kernel to suspend measurements. + * Protected by ima_extend_list_mutex. + */ +static bool ima_measurements_suspended; + /* lookup up the digest value in the hash table, and return the entry */ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value, int pcr) @@ -168,6 +175,18 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation, int result = 0, tpmresult = 0; mutex_lock(&ima_extend_list_mutex); + + /* + * Avoid appending to the measurement log when the TPM subsystem has + * been shut down while preparing for system reboot. + */ + if (ima_measurements_suspended) { + audit_cause = "measurements_suspended"; + audit_info = 0; + result = -ENODEV; + goto out; + } + if (!violation && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) { if (ima_lookup_digest_entry(digest, entry->pcr)) { audit_cause = "hash_exists"; @@ -211,6 +230,36 @@ int ima_restore_measurement_entry(struct ima_template_entry *entry) return result; } +static void ima_measurements_suspend(void) +{ + mutex_lock(&ima_extend_list_mutex); + ima_measurements_suspended = true; + mutex_unlock(&ima_extend_list_mutex); +} + +static int ima_reboot_notifier(struct notifier_block *nb, + unsigned long action, + void *data) +{ +#ifdef CONFIG_IMA_KEXEC + if (action == SYS_RESTART && data && !strcmp(data, "kexec reboot")) + ima_measure_kexec_event("kexec_execute"); +#endif + + ima_measurements_suspend(); + + return NOTIFY_DONE; +} + +static struct notifier_block ima_reboot_nb = { + .notifier_call = ima_reboot_notifier, +}; + +void __init ima_init_reboot_notifier(void) +{ + register_reboot_notifier(&ima_reboot_nb); +} + int __init ima_init_digests(void) { u16 digest_size; diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h index c2c2da691123..7b388b66cf80 100644 --- a/security/integrity/integrity.h +++ b/security/integrity/integrity.h @@ -114,6 +114,8 @@ struct ima_file_id { int integrity_kernel_read(struct file *file, loff_t offset, void *addr, unsigned long count); +int __init integrity_fs_init(void); +void __init integrity_fs_fini(void); #define INTEGRITY_KEYRING_EVM 0 #define INTEGRITY_KEYRING_IMA 1 diff --git a/security/integrity/platform_certs/load_powerpc.c b/security/integrity/platform_certs/load_powerpc.c index c85febca3343..714c961a00f5 100644 --- a/security/integrity/platform_certs/load_powerpc.c +++ b/security/integrity/platform_certs/load_powerpc.c @@ -75,12 +75,13 @@ static int __init load_powerpc_certs(void) return -ENODEV; // Check for known secure boot implementations from OPAL or PLPKS - if (strcmp("ibm,edk2-compat-v1", buf) && strcmp("ibm,plpks-sb-v1", buf)) { + if (strcmp("ibm,edk2-compat-v1", buf) && strcmp("ibm,plpks-sb-v1", buf) && + strcmp("ibm,plpks-sb-v0", buf)) { pr_err("Unsupported secvar implementation \"%s\", not loading certs\n", buf); return -ENODEV; } - if (strcmp("ibm,plpks-sb-v1", buf) == 0) + if (strcmp("ibm,plpks-sb-v1", buf) == 0 || strcmp("ibm,plpks-sb-v0", buf) == 0) /* PLPKS authenticated variables ESL data is prefixed with 8 bytes of timestamp */ offset = 8; diff --git a/security/ipe/Kconfig b/security/ipe/Kconfig index 3c75bf267da4..a110a6cd848b 100644 --- a/security/ipe/Kconfig +++ b/security/ipe/Kconfig @@ -6,6 +6,7 @@ menuconfig SECURITY_IPE bool "Integrity Policy Enforcement (IPE)" depends on SECURITY && SECURITYFS && AUDIT && AUDITSYSCALL + select CRYPTO_LIB_SHA256 select PKCS7_MESSAGE_PARSER select SYSTEM_DATA_VERIFICATION select IPE_PROP_DM_VERITY if DM_VERITY diff --git a/security/ipe/audit.c b/security/ipe/audit.c index f05f0caa4850..3f0deeb54912 100644 --- a/security/ipe/audit.c +++ b/security/ipe/audit.c @@ -6,7 +6,7 @@ #include <linux/slab.h> #include <linux/audit.h> #include <linux/types.h> -#include <crypto/hash.h> +#include <crypto/sha2.h> #include "ipe.h" #include "eval.h" @@ -17,10 +17,12 @@ #define ACTSTR(x) ((x) == IPE_ACTION_ALLOW ? "ALLOW" : "DENY") -#define IPE_AUDIT_HASH_ALG "sha256" +#define IPE_AUDIT_HASH_ALG "sha256" /* keep in sync with audit_policy() */ #define AUDIT_POLICY_LOAD_FMT "policy_name=\"%s\" policy_version=%hu.%hu.%hu "\ "policy_digest=" IPE_AUDIT_HASH_ALG ":" +#define AUDIT_POLICY_LOAD_NULL_FMT "policy_name=? policy_version=? "\ + "policy_digest=?" #define AUDIT_OLD_ACTIVE_POLICY_FMT "old_active_pol_name=\"%s\" "\ "old_active_pol_version=%hu.%hu.%hu "\ "old_policy_digest=" IPE_AUDIT_HASH_ALG ":" @@ -44,6 +46,7 @@ static const char *const audit_op_names[__IPE_OP_MAX + 1] = { static const char *const audit_hook_names[__IPE_HOOK_MAX] = { "BPRM_CHECK", + "BPRM_CREDS_FOR_EXEC", "MMAP", "MPROTECT", "KERNEL_READ", @@ -180,37 +183,14 @@ static void audit_policy(struct audit_buffer *ab, const char *audit_format, const struct ipe_policy *const p) { - SHASH_DESC_ON_STACK(desc, tfm); - struct crypto_shash *tfm; - u8 *digest = NULL; + u8 digest[SHA256_DIGEST_SIZE]; - tfm = crypto_alloc_shash(IPE_AUDIT_HASH_ALG, 0, 0); - if (IS_ERR(tfm)) - return; - - desc->tfm = tfm; - - digest = kzalloc(crypto_shash_digestsize(tfm), GFP_KERNEL); - if (!digest) - goto out; - - if (crypto_shash_init(desc)) - goto out; - - if (crypto_shash_update(desc, p->pkcs7, p->pkcs7len)) - goto out; - - if (crypto_shash_final(desc, digest)) - goto out; + sha256(p->pkcs7, p->pkcs7len, digest); audit_log_format(ab, audit_format, p->parsed->name, p->parsed->version.major, p->parsed->version.minor, p->parsed->version.rev); - audit_log_n_hex(ab, digest, crypto_shash_digestsize(tfm)); - -out: - kfree(digest); - crypto_free_shash(tfm); + audit_log_n_hex(ab, digest, sizeof(digest)); } /** @@ -248,22 +228,29 @@ void ipe_audit_policy_activation(const struct ipe_policy *const op, } /** - * ipe_audit_policy_load() - Audit a policy being loaded into the kernel. - * @p: Supplies a pointer to the policy to audit. + * ipe_audit_policy_load() - Audit a policy loading event. + * @p: Supplies a pointer to the policy to audit or an error pointer. */ void ipe_audit_policy_load(const struct ipe_policy *const p) { struct audit_buffer *ab; + int err = 0; ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_IPE_POLICY_LOAD); if (!ab) return; - audit_policy(ab, AUDIT_POLICY_LOAD_FMT, p); - audit_log_format(ab, " auid=%u ses=%u lsm=ipe res=1", + if (!IS_ERR(p)) { + audit_policy(ab, AUDIT_POLICY_LOAD_FMT, p); + } else { + audit_log_format(ab, AUDIT_POLICY_LOAD_NULL_FMT); + err = PTR_ERR(p); + } + + audit_log_format(ab, " auid=%u ses=%u lsm=ipe res=%d errno=%d", from_kuid(&init_user_ns, audit_get_loginuid(current)), - audit_get_sessionid(current)); + audit_get_sessionid(current), !err, err); audit_log_end(ab); } diff --git a/security/ipe/fs.c b/security/ipe/fs.c index 5b6d19fb844a..076c111c85c8 100644 --- a/security/ipe/fs.c +++ b/security/ipe/fs.c @@ -12,11 +12,8 @@ #include "policy.h" #include "audit.h" -static struct dentry *np __ro_after_init; static struct dentry *root __ro_after_init; struct dentry *policy_root __ro_after_init; -static struct dentry *audit_node __ro_after_init; -static struct dentry *enforce_node __ro_after_init; /** * setaudit() - Write handler for the securityfs node, "ipe/success_audit" @@ -133,6 +130,8 @@ static ssize_t getenforce(struct file *f, char __user *data, * * %-ERANGE - Policy version number overflow * * %-EINVAL - Policy version parsing error * * %-EEXIST - Same name policy already deployed + * * %-ENOKEY - Policy signing key not found + * * %-EKEYREJECTED - Policy signature verification failed */ static ssize_t new_policy(struct file *f, const char __user *data, size_t len, loff_t *offset) @@ -141,12 +140,17 @@ static ssize_t new_policy(struct file *f, const char __user *data, char *copy = NULL; int rc = 0; - if (!file_ns_capable(f, &init_user_ns, CAP_MAC_ADMIN)) - return -EPERM; + if (!file_ns_capable(f, &init_user_ns, CAP_MAC_ADMIN)) { + rc = -EPERM; + goto out; + } copy = memdup_user_nul(data, len); - if (IS_ERR(copy)) - return PTR_ERR(copy); + if (IS_ERR(copy)) { + rc = PTR_ERR(copy); + copy = NULL; + goto out; + } p = ipe_new_policy(NULL, 0, copy, len); if (IS_ERR(p)) { @@ -158,12 +162,14 @@ static ssize_t new_policy(struct file *f, const char __user *data, if (rc) goto out; - ipe_audit_policy_load(p); - out: - if (rc < 0) - ipe_free_policy(p); kfree(copy); + if (rc < 0) { + ipe_free_policy(p); + ipe_audit_policy_load(ERR_PTR(rc)); + } else { + ipe_audit_policy_load(p); + } return (rc < 0) ? rc : len; } @@ -187,31 +193,30 @@ static const struct file_operations enforce_fops = { * Return: %0 on success. If an error occurs, the function will return * the -errno. */ -static int __init ipe_init_securityfs(void) +int __init ipe_init_securityfs(void) { int rc = 0; struct ipe_policy *ap; + struct dentry *dentry; if (!ipe_enabled) return -EOPNOTSUPP; root = securityfs_create_dir("ipe", NULL); - if (IS_ERR(root)) { - rc = PTR_ERR(root); - goto err; - } + if (IS_ERR(root)) + return PTR_ERR(root); - audit_node = securityfs_create_file("success_audit", 0600, root, + dentry = securityfs_create_file("success_audit", 0600, root, NULL, &audit_fops); - if (IS_ERR(audit_node)) { - rc = PTR_ERR(audit_node); + if (IS_ERR(dentry)) { + rc = PTR_ERR(dentry); goto err; } - enforce_node = securityfs_create_file("enforce", 0600, root, NULL, + dentry = securityfs_create_file("enforce", 0600, root, NULL, &enforce_fops); - if (IS_ERR(enforce_node)) { - rc = PTR_ERR(enforce_node); + if (IS_ERR(dentry)) { + rc = PTR_ERR(dentry); goto err; } @@ -228,20 +233,14 @@ static int __init ipe_init_securityfs(void) goto err; } - np = securityfs_create_file("new_policy", 0200, root, NULL, &np_fops); - if (IS_ERR(np)) { - rc = PTR_ERR(np); + dentry = securityfs_create_file("new_policy", 0200, root, NULL, &np_fops); + if (IS_ERR(dentry)) { + rc = PTR_ERR(dentry); goto err; } return 0; err: - securityfs_remove(np); - securityfs_remove(policy_root); - securityfs_remove(enforce_node); - securityfs_remove(audit_node); securityfs_remove(root); return rc; } - -fs_initcall(ipe_init_securityfs); diff --git a/security/ipe/hooks.c b/security/ipe/hooks.c index d0323b81cd8f..603abdc9ce3b 100644 --- a/security/ipe/hooks.c +++ b/security/ipe/hooks.c @@ -36,6 +36,33 @@ int ipe_bprm_check_security(struct linux_binprm *bprm) } /** + * ipe_bprm_creds_for_exec() - ipe security hook function for bprm creds check. + * @bprm: Supplies a pointer to a linux_binprm structure to source the file + * being evaluated. + * + * This LSM hook is called when userspace signals the kernel to check a file + * for execution through the execveat syscall with the AT_EXECVE_CHECK flag. + * The hook triggers IPE policy evaluation on the script file and returns + * the policy decision to userspace. The userspace program receives the + * return code and can decide whether to proceed with script execution. + * + * Return: + * * %0 - Success + * * %-EACCES - Did not pass IPE policy + */ +int ipe_bprm_creds_for_exec(struct linux_binprm *bprm) +{ + struct ipe_eval_ctx ctx = IPE_EVAL_CTX_INIT; + + if (!bprm->is_check) + return 0; + + ipe_build_eval_ctx(&ctx, bprm->file, IPE_OP_EXEC, + IPE_HOOK_BPRM_CREDS_FOR_EXEC); + return ipe_evaluate_event(&ctx); +} + +/** * ipe_mmap_file() - ipe security hook function for mmap check. * @f: File being mmap'd. Can be NULL in the case of anonymous memory. * @reqprot: The requested protection on the mmap, passed from usermode. @@ -118,6 +145,7 @@ int ipe_kernel_read_file(struct file *file, enum kernel_read_file_id id, op = IPE_OP_FIRMWARE; break; case READING_MODULE: + case READING_MODULE_COMPRESSED: op = IPE_OP_KERNEL_MODULE; break; case READING_KEXEC_INITRAMFS: @@ -311,4 +339,4 @@ int ipe_inode_setintegrity(const struct inode *inode, return -EINVAL; } -#endif /* CONFIG_CONFIG_IPE_PROP_FS_VERITY_BUILTIN_SIG */ +#endif /* CONFIG_IPE_PROP_FS_VERITY_BUILTIN_SIG */ diff --git a/security/ipe/hooks.h b/security/ipe/hooks.h index 38d4a387d039..07db37332740 100644 --- a/security/ipe/hooks.h +++ b/security/ipe/hooks.h @@ -13,6 +13,7 @@ enum ipe_hook_type { IPE_HOOK_BPRM_CHECK = 0, + IPE_HOOK_BPRM_CREDS_FOR_EXEC, IPE_HOOK_MMAP, IPE_HOOK_MPROTECT, IPE_HOOK_KERNEL_READ, @@ -24,6 +25,8 @@ enum ipe_hook_type { int ipe_bprm_check_security(struct linux_binprm *bprm); +int ipe_bprm_creds_for_exec(struct linux_binprm *bprm); + int ipe_mmap_file(struct file *f, unsigned long reqprot, unsigned long prot, unsigned long flags); diff --git a/security/ipe/ipe.c b/security/ipe/ipe.c index 4317134cb0da..495bb765de1b 100644 --- a/security/ipe/ipe.c +++ b/security/ipe/ipe.c @@ -47,6 +47,7 @@ struct ipe_inode *ipe_inode(const struct inode *inode) static struct security_hook_list ipe_hooks[] __ro_after_init = { LSM_HOOK_INIT(bprm_check_security, ipe_bprm_check_security), + LSM_HOOK_INIT(bprm_creds_for_exec, ipe_bprm_creds_for_exec), LSM_HOOK_INIT(mmap_file, ipe_mmap_file), LSM_HOOK_INIT(file_mprotect, ipe_file_mprotect), LSM_HOOK_INIT(kernel_read_file, ipe_kernel_read_file), @@ -92,7 +93,8 @@ static int __init ipe_init(void) } DEFINE_LSM(ipe) = { - .name = "ipe", + .id = &ipe_lsmid, .init = ipe_init, .blobs = &ipe_blobs, + .initcall_fs = ipe_init_securityfs, }; diff --git a/security/ipe/ipe.h b/security/ipe/ipe.h index fb37513812dd..25cfdb8f0c20 100644 --- a/security/ipe/ipe.h +++ b/security/ipe/ipe.h @@ -23,4 +23,6 @@ struct ipe_bdev *ipe_bdev(struct block_device *b); struct ipe_inode *ipe_inode(const struct inode *inode); #endif /* CONFIG_IPE_PROP_FS_VERITY_BUILTIN_SIG */ +int ipe_init_securityfs(void); + #endif /* _IPE_H */ diff --git a/security/ipe/policy.c b/security/ipe/policy.c index b628f696e32b..1c58c29886e8 100644 --- a/security/ipe/policy.c +++ b/security/ipe/policy.c @@ -84,8 +84,11 @@ static int set_pkcs7_data(void *ctx, const void *data, size_t len, * ipe_new_policy. * * Context: Requires root->i_rwsem to be held. - * Return: %0 on success. If an error occurs, the function will return - * the -errno. + * Return: + * * %0 - Success + * * %-ENOENT - Policy was deleted while updating + * * %-EINVAL - Policy name mismatch + * * %-ESTALE - Policy version too old */ int ipe_update_policy(struct inode *root, const char *text, size_t textlen, const char *pkcs7, size_t pkcs7len) @@ -146,10 +149,12 @@ err: * * Return: * * a pointer to the ipe_policy structure - Success - * * %-EBADMSG - Policy is invalid - * * %-ENOMEM - Out of memory (OOM) - * * %-ERANGE - Policy version number overflow - * * %-EINVAL - Policy version parsing error + * * %-EBADMSG - Policy is invalid + * * %-ENOMEM - Out of memory (OOM) + * * %-ERANGE - Policy version number overflow + * * %-EINVAL - Policy version parsing error + * * %-ENOKEY - Policy signing key not found + * * %-EKEYREJECTED - Policy signature verification failed */ struct ipe_policy *ipe_new_policy(const char *text, size_t textlen, const char *pkcs7, size_t pkcs7len) diff --git a/security/ipe/policy_fs.c b/security/ipe/policy_fs.c index 3bcd8cbd09df..9d92d8a14b13 100644 --- a/security/ipe/policy_fs.c +++ b/security/ipe/policy_fs.c @@ -12,11 +12,16 @@ #include "policy.h" #include "eval.h" #include "fs.h" +#include "audit.h" #define MAX_VERSION_SIZE ARRAY_SIZE("65535.65535.65535") /** - * ipefs_file - defines a file in securityfs. + * struct ipefs_file - defines a file in securityfs. + * + * @name: file name inside the policy subdirectory + * @access: file permissions + * @fops: &file_operations specific to this file */ struct ipefs_file { const char *name; @@ -282,8 +287,13 @@ static ssize_t getactive(struct file *f, char __user *data, * On success this updates the policy represented by $name, * in-place. * - * Return: Length of buffer written on success. If an error occurs, - * the function will return the -errno. + * Return: + * * Length of buffer written - Success + * * %-EPERM - Insufficient permission + * * %-ENOMEM - Out of memory (OOM) + * * %-ENOENT - Policy was deleted while updating + * * %-EINVAL - Policy name mismatch + * * %-ESTALE - Policy version too old */ static ssize_t update_policy(struct file *f, const char __user *data, size_t len, loff_t *offset) @@ -292,21 +302,29 @@ static ssize_t update_policy(struct file *f, const char __user *data, char *copy = NULL; int rc = 0; - if (!file_ns_capable(f, &init_user_ns, CAP_MAC_ADMIN)) - return -EPERM; + if (!file_ns_capable(f, &init_user_ns, CAP_MAC_ADMIN)) { + rc = -EPERM; + goto out; + } copy = memdup_user(data, len); - if (IS_ERR(copy)) - return PTR_ERR(copy); + if (IS_ERR(copy)) { + rc = PTR_ERR(copy); + copy = NULL; + goto out; + } root = d_inode(f->f_path.dentry->d_parent); inode_lock(root); rc = ipe_update_policy(root, NULL, 0, copy, len); inode_unlock(root); +out: kfree(copy); - if (rc) + if (rc) { + ipe_audit_policy_load(ERR_PTR(rc)); return rc; + } return len; } @@ -401,7 +419,7 @@ static const struct file_operations delete_fops = { .write = delete_policy, }; -/** +/* * policy_subdir - files under a policy subdirectory */ static const struct ipefs_file policy_subdir[] = { @@ -420,7 +438,7 @@ static const struct ipefs_file policy_subdir[] = { */ void ipe_del_policyfs_node(struct ipe_policy *p) { - securityfs_recursive_remove(p->policyfs); + securityfs_remove(p->policyfs); p->policyfs = NULL; } @@ -467,6 +485,6 @@ int ipe_new_policyfs_node(struct ipe_policy *p) return 0; err: - securityfs_recursive_remove(policyfs); + securityfs_remove(policyfs); return rc; } diff --git a/security/keys/Kconfig b/security/keys/Kconfig index abb03a1b2a5c..84f39e50ca36 100644 --- a/security/keys/Kconfig +++ b/security/keys/Kconfig @@ -3,7 +3,7 @@ # Key management configuration # -config KEYS +menuconfig KEYS bool "Enable access key retention support" select ASSOCIATIVE_ARRAY help @@ -21,9 +21,10 @@ config KEYS If you are unsure as to whether this is required, answer N. +if KEYS + config KEYS_REQUEST_CACHE bool "Enable temporary caching of the last request_key() result" - depends on KEYS help This option causes the result of the last successful request_key() call that didn't upcall to the kernel to be cached temporarily in the @@ -41,7 +42,6 @@ config KEYS_REQUEST_CACHE config PERSISTENT_KEYRINGS bool "Enable register of persistent per-UID keyrings" - depends on KEYS help This option provides a register of persistent per-UID keyrings, primarily aimed at Kerberos key storage. The keyrings are persistent @@ -58,9 +58,8 @@ config PERSISTENT_KEYRINGS config BIG_KEYS bool "Large payload keys" - depends on KEYS depends on TMPFS - depends on CRYPTO_LIB_CHACHA20POLY1305 = y + select CRYPTO_LIB_CHACHA20POLY1305 help This option provides support for holding large keys within the kernel (for example Kerberos ticket caches). The data may be stored out to @@ -70,7 +69,6 @@ config BIG_KEYS config TRUSTED_KEYS tristate "TRUSTED KEYS" - depends on KEYS help This option provides support for creating, sealing, and unsealing keys in the kernel. Trusted keys are random number symmetric keys, @@ -85,12 +83,10 @@ endif config ENCRYPTED_KEYS tristate "ENCRYPTED KEYS" - depends on KEYS select CRYPTO - select CRYPTO_HMAC select CRYPTO_AES select CRYPTO_CBC - select CRYPTO_SHA256 + select CRYPTO_LIB_SHA256 select CRYPTO_RNG help This option provides support for create/encrypting/decrypting keys @@ -114,7 +110,6 @@ config USER_DECRYPTED_DATA config KEY_DH_OPERATIONS bool "Diffie-Hellman operations on retained keys" - depends on KEYS select CRYPTO select CRYPTO_KDF800108_CTR select CRYPTO_DH @@ -127,9 +122,11 @@ config KEY_DH_OPERATIONS config KEY_NOTIFICATIONS bool "Provide key/keyring change notifications" - depends on KEYS && WATCH_QUEUE + depends on WATCH_QUEUE help This option provides support for getting change notifications on keys and keyrings on which the caller has View permission. This makes use of pipes to handle the notification buffer and provides KEYCTL_WATCH_KEY to enable/disable watches. + +endif # KEYS diff --git a/security/keys/big_key.c b/security/keys/big_key.c index c3367622c683..d46862ab90d6 100644 --- a/security/keys/big_key.c +++ b/security/keys/big_key.c @@ -66,7 +66,7 @@ int big_key_preparse(struct key_preparsed_payload *prep) BUILD_BUG_ON(sizeof(*payload) != sizeof(prep->payload.data)); - if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data) + if (datalen == 0 || datalen > 1024 * 1024 || !prep->data) return -EINVAL; /* Set an arbitrary quota */ diff --git a/security/keys/encrypted-keys/ecryptfs_format.c b/security/keys/encrypted-keys/ecryptfs_format.c index 8fdd76105ce3..2fc6f3a66135 100644 --- a/security/keys/encrypted-keys/ecryptfs_format.c +++ b/security/keys/encrypted-keys/ecryptfs_format.c @@ -54,8 +54,7 @@ int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok, auth_tok->version = (((uint16_t)(major << 8) & 0xFF00) | ((uint16_t)minor & 0x00FF)); auth_tok->token_type = ECRYPTFS_PASSWORD; - strncpy((char *)auth_tok->token.password.signature, key_desc, - ECRYPTFS_PASSWORD_SIG_SIZE); + strscpy_pad(auth_tok->token.password.signature, key_desc); auth_tok->token.password.session_key_encryption_key_bytes = ECRYPTFS_MAX_KEY_BYTES; /* diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index 831cb84fd75a..596e7a30bd3c 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c @@ -27,7 +27,6 @@ #include <linux/scatterlist.h> #include <linux/ctype.h> #include <crypto/aes.h> -#include <crypto/hash.h> #include <crypto/sha2.h> #include <crypto/skcipher.h> #include <crypto/utils.h> @@ -37,8 +36,6 @@ static const char KEY_TRUSTED_PREFIX[] = "trusted:"; static const char KEY_USER_PREFIX[] = "user:"; -static const char hash_alg[] = "sha256"; -static const char hmac_alg[] = "hmac(sha256)"; static const char blkcipher_alg[] = "cbc(aes)"; static const char key_format_default[] = "default"; static const char key_format_ecryptfs[] = "ecryptfs"; @@ -54,8 +51,6 @@ static int blksize; #define MIN_DATA_SIZE 20 #define KEY_ENC32_PAYLOAD_LEN 32 -static struct crypto_shash *hash_tfm; - enum { Opt_new, Opt_load, Opt_update, Opt_err }; @@ -329,26 +324,6 @@ error: return ukey; } -static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen, - const u8 *buf, unsigned int buflen) -{ - struct crypto_shash *tfm; - int err; - - tfm = crypto_alloc_shash(hmac_alg, 0, 0); - if (IS_ERR(tfm)) { - pr_err("encrypted_key: can't alloc %s transform: %ld\n", - hmac_alg, PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - - err = crypto_shash_setkey(tfm, key, keylen); - if (!err) - err = crypto_shash_tfm_digest(tfm, buf, buflen, digest); - crypto_free_shash(tfm); - return err; -} - enum derived_key_type { ENC_KEY, AUTH_KEY }; /* Derive authentication/encryption key from trusted key */ @@ -357,7 +332,6 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type, { u8 *derived_buf; unsigned int derived_buf_len; - int ret; derived_buf_len = strlen("AUTH_KEY") + 1 + master_keylen; if (derived_buf_len < HASH_SIZE) @@ -374,10 +348,9 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type, memcpy(derived_buf + strlen(derived_buf) + 1, master_key, master_keylen); - ret = crypto_shash_tfm_digest(hash_tfm, derived_buf, derived_buf_len, - derived_key); + sha256(derived_buf, derived_buf_len, derived_key); kfree_sensitive(derived_buf); - return ret; + return 0; } static struct skcipher_request *init_skcipher_req(const u8 *key, @@ -503,10 +476,10 @@ static int datablob_hmac_append(struct encrypted_key_payload *epayload, goto out; digest = epayload->format + epayload->datablob_len; - ret = calc_hmac(digest, derived_key, sizeof derived_key, - epayload->format, epayload->datablob_len); - if (!ret) - dump_hmac(NULL, digest, HASH_SIZE); + hmac_sha256_usingrawkey(derived_key, sizeof(derived_key), + epayload->format, epayload->datablob_len, + digest); + dump_hmac(NULL, digest, HASH_SIZE); out: memzero_explicit(derived_key, sizeof(derived_key)); return ret; @@ -534,9 +507,8 @@ static int datablob_hmac_verify(struct encrypted_key_payload *epayload, } else p = epayload->format; - ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len); - if (ret < 0) - goto out; + hmac_sha256_usingrawkey(derived_key, sizeof(derived_key), p, len, + digest); ret = crypto_memneq(digest, epayload->format + epayload->datablob_len, sizeof(digest)); if (ret) { @@ -823,7 +795,7 @@ static int encrypted_instantiate(struct key *key, size_t datalen = prep->datalen; int ret; - if (datalen <= 0 || datalen > 32767 || !prep->data) + if (datalen == 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); @@ -884,7 +856,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep) if (key_is_negative(key)) return -ENOKEY; - if (datalen <= 0 || datalen > 32767 || !prep->data) + if (datalen == 0 || datalen > 32767 || !prep->data) return -EINVAL; buf = kmalloc(datalen + 1, GFP_KERNEL); @@ -1011,29 +983,14 @@ static int __init init_encrypted(void) { int ret; - hash_tfm = crypto_alloc_shash(hash_alg, 0, 0); - if (IS_ERR(hash_tfm)) { - pr_err("encrypted_key: can't allocate %s transform: %ld\n", - hash_alg, PTR_ERR(hash_tfm)); - return PTR_ERR(hash_tfm); - } - ret = aes_get_sizes(); if (ret < 0) - goto out; - ret = register_key_type(&key_type_encrypted); - if (ret < 0) - goto out; - return 0; -out: - crypto_free_shash(hash_tfm); - return ret; - + return ret; + return register_key_type(&key_type_encrypted); } static void __exit cleanup_encrypted(void) { - crypto_free_shash(hash_tfm); unregister_key_type(&key_type_encrypted); } diff --git a/security/keys/gc.c b/security/keys/gc.c index 7d687b0962b1..748e83818a76 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c @@ -218,8 +218,10 @@ continue_scanning: key = rb_entry(cursor, struct key, serial_node); cursor = rb_next(cursor); - if (refcount_read(&key->usage) == 0) + if (!test_bit_acquire(KEY_FLAG_USER_ALIVE, &key->flags)) { + /* Clobber key->user after final put seen. */ goto found_unreferenced_key; + } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) { if (key->type == key_gc_dead_keytype) { diff --git a/security/keys/key.c b/security/keys/key.c index 3d7d185019d3..3bbdde778631 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -298,6 +298,7 @@ struct key *key_alloc(struct key_type *type, const char *desc, key->restrict_link = restrict_link; key->last_used_at = ktime_get_real_seconds(); + key->flags |= 1 << KEY_FLAG_USER_ALIVE; if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) key->flags |= 1 << KEY_FLAG_IN_QUOTA; if (flags & KEY_ALLOC_BUILT_IN) @@ -658,6 +659,8 @@ void key_put(struct key *key) key->user->qnbytes -= key->quotalen; spin_unlock_irqrestore(&key->user->lock, flags); } + /* Mark key as safe for GC after key->user done. */ + clear_bit_unlock(KEY_FLAG_USER_ALIVE, &key->flags); schedule_work(&key_gc_work); } } diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index b5d5333ab330..a63c46bb2d14 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -51,7 +51,7 @@ static struct key *get_user_register(struct user_namespace *user_ns) if (!reg_keyring) { reg_keyring = keyring_alloc(".user_reg", user_ns->owner, INVALID_GID, - &init_cred, + kernel_cred(), KEY_POS_WRITE | KEY_POS_SEARCH | KEY_USR_VIEW | KEY_USR_READ, 0, diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c index 91f000eef3ad..cde08c478f32 100644 --- a/security/keys/sysctl.c +++ b/security/keys/sysctl.c @@ -9,7 +9,7 @@ #include <linux/sysctl.h> #include "internal.h" -static struct ctl_table key_sysctls[] = { +static const struct ctl_table key_sysctls[] = { { .procname = "maxkeys", .data = &key_quota_maxkeys, diff --git a/security/keys/trusted-keys/Kconfig b/security/keys/trusted-keys/Kconfig index 1fb8aa001995..204a68c1429d 100644 --- a/security/keys/trusted-keys/Kconfig +++ b/security/keys/trusted-keys/Kconfig @@ -5,10 +5,9 @@ config TRUSTED_KEYS_TPM bool "TPM-based trusted keys" depends on TCG_TPM >= TRUSTED_KEYS default y - select CRYPTO - select CRYPTO_HMAC - select CRYPTO_SHA1 select CRYPTO_HASH_INFO + select CRYPTO_LIB_SHA1 + select CRYPTO_LIB_UTILS select ASN1_ENCODER select OID_REGISTRY select ASN1 diff --git a/security/keys/trusted-keys/trusted_caam.c b/security/keys/trusted-keys/trusted_caam.c index e3415c520c0a..601943ce0d60 100644 --- a/security/keys/trusted-keys/trusted_caam.c +++ b/security/keys/trusted-keys/trusted_caam.c @@ -1,12 +1,14 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de> + * Copyright 2025 NXP */ #include <keys/trusted_caam.h> #include <keys/trusted-type.h> #include <linux/build_bug.h> #include <linux/key-type.h> +#include <linux/parser.h> #include <soc/fsl/caam-blob.h> static struct caam_blob_priv *blobifier; @@ -16,6 +18,77 @@ static struct caam_blob_priv *blobifier; static_assert(MAX_KEY_SIZE + CAAM_BLOB_OVERHEAD <= CAAM_BLOB_MAX_LEN); static_assert(MAX_BLOB_SIZE <= CAAM_BLOB_MAX_LEN); +enum { + opt_err, + opt_key_enc_algo, +}; + +static const match_table_t key_tokens = { + {opt_key_enc_algo, "key_enc_algo=%s"}, + {opt_err, NULL} +}; + +#ifdef CAAM_DEBUG +static inline void dump_options(const struct caam_pkey_info *pkey_info) +{ + pr_info("key encryption algo %d\n", pkey_info->key_enc_algo); +} +#else +static inline void dump_options(const struct caam_pkey_info *pkey_info) +{ +} +#endif + +static int get_pkey_options(char *c, + struct caam_pkey_info *pkey_info) +{ + substring_t args[MAX_OPT_ARGS]; + unsigned long token_mask = 0; + u16 key_enc_algo; + char *p = c; + int token; + int res; + + if (!c) + return 0; + + while ((p = strsep(&c, " \t"))) { + if (*p == '\0' || *p == ' ' || *p == '\t') + continue; + token = match_token(p, key_tokens, args); + if (test_and_set_bit(token, &token_mask)) + return -EINVAL; + + switch (token) { + case opt_key_enc_algo: + res = kstrtou16(args[0].from, 16, &key_enc_algo); + if (res < 0) + return -EINVAL; + pkey_info->key_enc_algo = key_enc_algo; + break; + default: + return -EINVAL; + } + } + return 0; +} + +static bool is_key_pkey(char **datablob) +{ + char *c = NULL; + + do { + /* Second argument onwards, + * determine if tied to HW + */ + c = strsep(datablob, " \t"); + if (c && (strcmp(c, "pk") == 0)) + return true; + } while (c); + + return false; +} + static int trusted_caam_seal(struct trusted_key_payload *p, char *datablob) { int ret; @@ -25,11 +98,30 @@ static int trusted_caam_seal(struct trusted_key_payload *p, char *datablob) .key_mod = KEYMOD, .key_mod_len = sizeof(KEYMOD) - 1, }; + /* + * If it is to be treated as protected key, + * read next arguments too. + */ + if (is_key_pkey(&datablob)) { + info.pkey_info.plain_key_sz = p->key_len; + info.pkey_info.is_pkey = 1; + ret = get_pkey_options(datablob, &info.pkey_info); + if (ret < 0) + return 0; + dump_options(&info.pkey_info); + } + ret = caam_encap_blob(blobifier, &info); if (ret) return ret; p->blob_len = info.output_len; + if (info.pkey_info.is_pkey) { + p->key_len = p->blob_len + sizeof(struct caam_pkey_info); + memcpy(p->key, &info.pkey_info, sizeof(struct caam_pkey_info)); + memcpy(p->key + sizeof(struct caam_pkey_info), p->blob, p->blob_len); + } + return 0; } @@ -42,11 +134,27 @@ static int trusted_caam_unseal(struct trusted_key_payload *p, char *datablob) .key_mod = KEYMOD, .key_mod_len = sizeof(KEYMOD) - 1, }; + if (is_key_pkey(&datablob)) { + info.pkey_info.plain_key_sz = p->blob_len - CAAM_BLOB_OVERHEAD; + info.pkey_info.is_pkey = 1; + ret = get_pkey_options(datablob, &info.pkey_info); + if (ret < 0) + return 0; + dump_options(&info.pkey_info); + + p->key_len = p->blob_len + sizeof(struct caam_pkey_info); + memcpy(p->key, &info.pkey_info, sizeof(struct caam_pkey_info)); + memcpy(p->key + sizeof(struct caam_pkey_info), p->blob, p->blob_len); + + return 0; + } + ret = caam_decap_blob(blobifier, &info); if (ret) return ret; p->key_len = info.output_len; + return 0; } diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c index e2d9644efde1..b1680ee53f86 100644 --- a/security/keys/trusted-keys/trusted_core.c +++ b/security/keys/trusted-keys/trusted_core.c @@ -157,7 +157,7 @@ static int trusted_instantiate(struct key *key, int key_cmd; size_t key_len; - if (datalen <= 0 || datalen > 32767 || !prep->data) + if (datalen == 0 || datalen > 32767 || !prep->data) return -EINVAL; orig_datablob = datablob = kmalloc(datalen + 1, GFP_KERNEL); @@ -240,7 +240,7 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) p = key->payload.data[0]; if (!p->migratable) return -EPERM; - if (datalen <= 0 || datalen > 32767 || !prep->data) + if (datalen == 0 || datalen > 32767 || !prep->data) return -EINVAL; orig_datablob = datablob = kmalloc(datalen + 1, GFP_KERNEL); diff --git a/security/keys/trusted-keys/trusted_dcp.c b/security/keys/trusted-keys/trusted_dcp.c index e908c53a803c..7b6eb655df0c 100644 --- a/security/keys/trusted-keys/trusted_dcp.c +++ b/security/keys/trusted-keys/trusted_dcp.c @@ -201,12 +201,16 @@ static int trusted_dcp_seal(struct trusted_key_payload *p, char *datablob) { struct dcp_blob_fmt *b = (struct dcp_blob_fmt *)p->blob; int blen, ret; - u8 plain_blob_key[AES_KEYSIZE_128]; + u8 *plain_blob_key; blen = calc_blob_len(p->key_len); if (blen > MAX_BLOB_SIZE) return -E2BIG; + plain_blob_key = kmalloc(AES_KEYSIZE_128, GFP_KERNEL); + if (!plain_blob_key) + return -ENOMEM; + b->fmt_version = DCP_BLOB_VERSION; get_random_bytes(b->nonce, AES_KEYSIZE_128); get_random_bytes(plain_blob_key, AES_KEYSIZE_128); @@ -229,7 +233,8 @@ static int trusted_dcp_seal(struct trusted_key_payload *p, char *datablob) ret = 0; out: - memzero_explicit(plain_blob_key, sizeof(plain_blob_key)); + memzero_explicit(plain_blob_key, AES_KEYSIZE_128); + kfree(plain_blob_key); return ret; } @@ -238,7 +243,7 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob) { struct dcp_blob_fmt *b = (struct dcp_blob_fmt *)p->blob; int blen, ret; - u8 plain_blob_key[AES_KEYSIZE_128]; + u8 *plain_blob_key = NULL; if (b->fmt_version != DCP_BLOB_VERSION) { pr_err("DCP blob has bad version: %i, expected %i\n", @@ -256,6 +261,12 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob) goto out; } + plain_blob_key = kmalloc(AES_KEYSIZE_128, GFP_KERNEL); + if (!plain_blob_key) { + ret = -ENOMEM; + goto out; + } + ret = decrypt_blob_key(b->blob_key, plain_blob_key); if (ret) { pr_err("Unable to decrypt blob key: %i\n", ret); @@ -271,7 +282,10 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob) ret = 0; out: - memzero_explicit(plain_blob_key, sizeof(plain_blob_key)); + if (plain_blob_key) { + memzero_explicit(plain_blob_key, AES_KEYSIZE_128); + kfree(plain_blob_key); + } return ret; } diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c index 89c9798d1800..636acb66a4f6 100644 --- a/security/keys/trusted-keys/trusted_tpm1.c +++ b/security/keys/trusted-keys/trusted_tpm1.c @@ -7,6 +7,8 @@ */ #include <crypto/hash_info.h> +#include <crypto/sha1.h> +#include <crypto/utils.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/parser.h> @@ -14,78 +16,92 @@ #include <linux/err.h> #include <keys/trusted-type.h> #include <linux/key-type.h> -#include <linux/crypto.h> -#include <crypto/hash.h> -#include <crypto/sha1.h> #include <linux/tpm.h> #include <linux/tpm_command.h> #include <keys/trusted_tpm.h> -static const char hmac_alg[] = "hmac(sha1)"; -static const char hash_alg[] = "sha1"; static struct tpm_chip *chip; static struct tpm_digest *digests; -struct sdesc { - struct shash_desc shash; - char ctx[]; +/* implementation specific TPM constants */ +#define TPM_SIZE_OFFSET 2 +#define TPM_RETURN_OFFSET 6 +#define TPM_DATA_OFFSET 10 + +#define LOAD32(buffer, offset) (ntohl(*(uint32_t *)&buffer[offset])) +#define LOAD32N(buffer, offset) (*(uint32_t *)&buffer[offset]) +#define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset])) + +struct osapsess { + uint32_t handle; + unsigned char secret[SHA1_DIGEST_SIZE]; + unsigned char enonce[TPM_NONCE_SIZE]; }; -static struct crypto_shash *hashalg; -static struct crypto_shash *hmacalg; +/* discrete values, but have to store in uint16_t for TPM use */ +enum { + SEAL_keytype = 1, + SRK_keytype = 4 +}; -static struct sdesc *init_sdesc(struct crypto_shash *alg) +#define TPM_DEBUG 0 + +#if TPM_DEBUG +static inline void dump_options(struct trusted_key_options *o) { - struct sdesc *sdesc; - int size; - - size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); - sdesc = kmalloc(size, GFP_KERNEL); - if (!sdesc) - return ERR_PTR(-ENOMEM); - sdesc->shash.tfm = alg; - return sdesc; + pr_info("sealing key type %d\n", o->keytype); + pr_info("sealing key handle %0X\n", o->keyhandle); + pr_info("pcrlock %d\n", o->pcrlock); + pr_info("pcrinfo %d\n", o->pcrinfo_len); + print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE, + 16, 1, o->pcrinfo, o->pcrinfo_len, 0); } -static int TSS_sha1(const unsigned char *data, unsigned int datalen, - unsigned char *digest) +static inline void dump_sess(struct osapsess *s) { - struct sdesc *sdesc; - int ret; + print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE, + 16, 1, &s->handle, 4, 0); + pr_info("secret:\n"); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, + 16, 1, &s->secret, SHA1_DIGEST_SIZE, 0); + pr_info("trusted-key: enonce:\n"); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, + 16, 1, &s->enonce, SHA1_DIGEST_SIZE, 0); +} - sdesc = init_sdesc(hashalg); - if (IS_ERR(sdesc)) { - pr_info("can't alloc %s\n", hash_alg); - return PTR_ERR(sdesc); - } +static inline void dump_tpm_buf(unsigned char *buf) +{ + int len; - ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest); - kfree_sensitive(sdesc); - return ret; + pr_info("\ntpm buffer\n"); + len = LOAD32(buf, TPM_SIZE_OFFSET); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0); +} +#else +static inline void dump_options(struct trusted_key_options *o) +{ } +static inline void dump_sess(struct osapsess *s) +{ +} + +static inline void dump_tpm_buf(unsigned char *buf) +{ +} +#endif + static int TSS_rawhmac(unsigned char *digest, const unsigned char *key, unsigned int keylen, ...) { - struct sdesc *sdesc; + struct hmac_sha1_ctx hmac_ctx; va_list argp; unsigned int dlen; unsigned char *data; - int ret; + int ret = 0; - sdesc = init_sdesc(hmacalg); - if (IS_ERR(sdesc)) { - pr_info("can't alloc %s\n", hmac_alg); - return PTR_ERR(sdesc); - } - - ret = crypto_shash_setkey(hmacalg, key, keylen); - if (ret < 0) - goto out; - ret = crypto_shash_init(&sdesc->shash); - if (ret < 0) - goto out; + hmac_sha1_init_usingrawkey(&hmac_ctx, key, keylen); va_start(argp, keylen); for (;;) { @@ -97,46 +113,34 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key, ret = -EINVAL; break; } - ret = crypto_shash_update(&sdesc->shash, data, dlen); - if (ret < 0) - break; + hmac_sha1_update(&hmac_ctx, data, dlen); } va_end(argp); if (!ret) - ret = crypto_shash_final(&sdesc->shash, digest); -out: - kfree_sensitive(sdesc); + hmac_sha1_final(&hmac_ctx, digest); return ret; } /* * calculate authorization info fields to send to TPM */ -int TSS_authhmac(unsigned char *digest, const unsigned char *key, +static int TSS_authhmac(unsigned char *digest, const unsigned char *key, unsigned int keylen, unsigned char *h1, unsigned char *h2, unsigned int h3, ...) { unsigned char paramdigest[SHA1_DIGEST_SIZE]; - struct sdesc *sdesc; + struct sha1_ctx sha_ctx; unsigned int dlen; unsigned char *data; unsigned char c; - int ret; + int ret = 0; va_list argp; if (!chip) return -ENODEV; - sdesc = init_sdesc(hashalg); - if (IS_ERR(sdesc)) { - pr_info("can't alloc %s\n", hash_alg); - return PTR_ERR(sdesc); - } - c = !!h3; - ret = crypto_shash_init(&sdesc->shash); - if (ret < 0) - goto out; + sha1_init(&sha_ctx); va_start(argp, h3); for (;;) { dlen = va_arg(argp, unsigned int); @@ -147,27 +151,22 @@ int TSS_authhmac(unsigned char *digest, const unsigned char *key, ret = -EINVAL; break; } - ret = crypto_shash_update(&sdesc->shash, data, dlen); - if (ret < 0) - break; + sha1_update(&sha_ctx, data, dlen); } va_end(argp); if (!ret) - ret = crypto_shash_final(&sdesc->shash, paramdigest); + sha1_final(&sha_ctx, paramdigest); if (!ret) ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, h1, TPM_NONCE_SIZE, h2, 1, &c, 0, 0); -out: - kfree_sensitive(sdesc); return ret; } -EXPORT_SYMBOL_GPL(TSS_authhmac); /* * verify the AUTH1_COMMAND (Seal) result from TPM */ -int TSS_checkhmac1(unsigned char *buffer, +static int TSS_checkhmac1(unsigned char *buffer, const uint32_t command, const unsigned char *ononce, const unsigned char *key, @@ -182,7 +181,7 @@ int TSS_checkhmac1(unsigned char *buffer, unsigned char *authdata; unsigned char testhmac[SHA1_DIGEST_SIZE]; unsigned char paramdigest[SHA1_DIGEST_SIZE]; - struct sdesc *sdesc; + struct sha1_ctx sha_ctx; unsigned int dlen; unsigned int dpos; va_list argp; @@ -203,51 +202,30 @@ int TSS_checkhmac1(unsigned char *buffer, continueflag = authdata - 1; enonce = continueflag - TPM_NONCE_SIZE; - sdesc = init_sdesc(hashalg); - if (IS_ERR(sdesc)) { - pr_info("can't alloc %s\n", hash_alg); - return PTR_ERR(sdesc); - } - ret = crypto_shash_init(&sdesc->shash); - if (ret < 0) - goto out; - ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result, - sizeof result); - if (ret < 0) - goto out; - ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal, - sizeof ordinal); - if (ret < 0) - goto out; + sha1_init(&sha_ctx); + sha1_update(&sha_ctx, (const u8 *)&result, sizeof(result)); + sha1_update(&sha_ctx, (const u8 *)&ordinal, sizeof(ordinal)); va_start(argp, keylen); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; dpos = va_arg(argp, unsigned int); - ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen); - if (ret < 0) - break; + sha1_update(&sha_ctx, buffer + dpos, dlen); } va_end(argp); - if (!ret) - ret = crypto_shash_final(&sdesc->shash, paramdigest); - if (ret < 0) - goto out; + sha1_final(&sha_ctx, paramdigest); ret = TSS_rawhmac(testhmac, key, keylen, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce, 1, continueflag, 0, 0); if (ret < 0) - goto out; + return ret; - if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE)) - ret = -EINVAL; -out: - kfree_sensitive(sdesc); - return ret; + if (crypto_memneq(testhmac, authdata, SHA1_DIGEST_SIZE)) + return -EINVAL; + return 0; } -EXPORT_SYMBOL_GPL(TSS_checkhmac1); /* * verify the AUTH2_COMMAND (unseal) result from TPM @@ -273,7 +251,7 @@ static int TSS_checkhmac2(unsigned char *buffer, unsigned char testhmac1[SHA1_DIGEST_SIZE]; unsigned char testhmac2[SHA1_DIGEST_SIZE]; unsigned char paramdigest[SHA1_DIGEST_SIZE]; - struct sdesc *sdesc; + struct sha1_ctx sha_ctx; unsigned int dlen; unsigned int dpos; va_list argp; @@ -296,22 +274,9 @@ static int TSS_checkhmac2(unsigned char *buffer, enonce1 = continueflag1 - TPM_NONCE_SIZE; enonce2 = continueflag2 - TPM_NONCE_SIZE; - sdesc = init_sdesc(hashalg); - if (IS_ERR(sdesc)) { - pr_info("can't alloc %s\n", hash_alg); - return PTR_ERR(sdesc); - } - ret = crypto_shash_init(&sdesc->shash); - if (ret < 0) - goto out; - ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result, - sizeof result); - if (ret < 0) - goto out; - ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal, - sizeof ordinal); - if (ret < 0) - goto out; + sha1_init(&sha_ctx); + sha1_update(&sha_ctx, (const u8 *)&result, sizeof(result)); + sha1_update(&sha_ctx, (const u8 *)&ordinal, sizeof(ordinal)); va_start(argp, keylen2); for (;;) { @@ -319,42 +284,33 @@ static int TSS_checkhmac2(unsigned char *buffer, if (dlen == 0) break; dpos = va_arg(argp, unsigned int); - ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen); - if (ret < 0) - break; + sha1_update(&sha_ctx, buffer + dpos, dlen); } va_end(argp); - if (!ret) - ret = crypto_shash_final(&sdesc->shash, paramdigest); - if (ret < 0) - goto out; + sha1_final(&sha_ctx, paramdigest); ret = TSS_rawhmac(testhmac1, key1, keylen1, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce1, TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0); if (ret < 0) - goto out; - if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) { - ret = -EINVAL; - goto out; - } + return ret; + if (crypto_memneq(testhmac1, authdata1, SHA1_DIGEST_SIZE)) + return -EINVAL; ret = TSS_rawhmac(testhmac2, key2, keylen2, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce2, TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0); if (ret < 0) - goto out; - if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE)) - ret = -EINVAL; -out: - kfree_sensitive(sdesc); - return ret; + return ret; + if (crypto_memneq(testhmac2, authdata2, SHA1_DIGEST_SIZE)) + return -EINVAL; + return 0; } /* * For key specific tpm requests, we will generate and send our * own TPM command packets using the drivers send function. */ -int trusted_tpm_send(unsigned char *cmd, size_t buflen) +static int trusted_tpm_send(unsigned char *cmd, size_t buflen) { struct tpm_buf buf; int rc; @@ -380,7 +336,6 @@ int trusted_tpm_send(unsigned char *cmd, size_t buflen) tpm_put_ops(chip); return rc; } -EXPORT_SYMBOL_GPL(trusted_tpm_send); /* * Lock a trusted key, by extending a selected PCR. @@ -434,7 +389,7 @@ static int osap(struct tpm_buf *tb, struct osapsess *s, /* * Create an object independent authorisation protocol (oiap) session */ -int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce) +static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce) { int ret; @@ -451,7 +406,6 @@ int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce) TPM_NONCE_SIZE); return 0; } -EXPORT_SYMBOL_GPL(oiap); struct tpm_digests { unsigned char encauth[SHA1_DIGEST_SIZE]; @@ -498,9 +452,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype, /* calculate encrypted authorization value */ memcpy(td->xorwork, sess.secret, SHA1_DIGEST_SIZE); memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE); - ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash); - if (ret < 0) - goto out; + sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash); ret = tpm_get_random(chip, td->nonceodd, TPM_NONCE_SIZE); if (ret < 0) @@ -989,40 +941,6 @@ static int trusted_tpm_get_random(unsigned char *key, size_t key_len) return tpm_get_random(chip, key, key_len); } -static void trusted_shash_release(void) -{ - if (hashalg) - crypto_free_shash(hashalg); - if (hmacalg) - crypto_free_shash(hmacalg); -} - -static int __init trusted_shash_alloc(void) -{ - int ret; - - hmacalg = crypto_alloc_shash(hmac_alg, 0, 0); - if (IS_ERR(hmacalg)) { - pr_info("could not allocate crypto %s\n", - hmac_alg); - return PTR_ERR(hmacalg); - } - - hashalg = crypto_alloc_shash(hash_alg, 0, 0); - if (IS_ERR(hashalg)) { - pr_info("could not allocate crypto %s\n", - hash_alg); - ret = PTR_ERR(hashalg); - goto hashalg_fail; - } - - return 0; - -hashalg_fail: - crypto_free_shash(hmacalg); - return ret; -} - static int __init init_digests(void) { int i; @@ -1049,15 +967,10 @@ static int __init trusted_tpm_init(void) ret = init_digests(); if (ret < 0) goto err_put; - ret = trusted_shash_alloc(); - if (ret < 0) - goto err_free; ret = register_key_type(&key_type_trusted); if (ret < 0) - goto err_release; + goto err_free; return 0; -err_release: - trusted_shash_release(); err_free: kfree(digests); err_put: @@ -1070,7 +983,6 @@ static void trusted_tpm_exit(void) if (chip) { put_device(&chip->dev); kfree(digests); - trusted_shash_release(); unregister_key_type(&key_type_trusted); } } diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c index 024be262702f..a7ea4a1c3bed 100644 --- a/security/keys/trusted-keys/trusted_tpm2.c +++ b/security/keys/trusted-keys/trusted_tpm2.c @@ -18,14 +18,6 @@ #include "tpm2key.asn1.h" -static struct tpm2_hash tpm2_hash_map[] = { - {HASH_ALGO_SHA1, TPM_ALG_SHA1}, - {HASH_ALGO_SHA256, TPM_ALG_SHA256}, - {HASH_ALGO_SHA384, TPM_ALG_SHA384}, - {HASH_ALGO_SHA512, TPM_ALG_SHA512}, - {HASH_ALGO_SM3_256, TPM_ALG_SM3_256}, -}; - static u32 tpm2key_oid[] = { 2, 23, 133, 10, 1, 5 }; static int tpm2_key_encode(struct trusted_key_payload *payload, @@ -244,20 +236,13 @@ int tpm2_seal_trusted(struct tpm_chip *chip, off_t offset = TPM_HEADER_SIZE; struct tpm_buf buf, sized; int blob_len = 0; - u32 hash; + int hash; u32 flags; - int i; int rc; - for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) { - if (options->hash == tpm2_hash_map[i].crypto_id) { - hash = tpm2_hash_map[i].tpm_id; - break; - } - } - - if (i == ARRAY_SIZE(tpm2_hash_map)) - return -EINVAL; + hash = tpm2_find_hash_alg(options->hash); + if (hash < 0) + return hash; if (!options->keyhandle) return -EINVAL; @@ -283,7 +268,10 @@ int tpm2_seal_trusted(struct tpm_chip *chip, goto out_put; } - tpm_buf_append_name(chip, &buf, options->keyhandle, NULL); + rc = tpm_buf_append_name(chip, &buf, options->keyhandle, NULL); + if (rc) + goto out; + tpm_buf_append_hmac_session(chip, &buf, TPM2_SA_DECRYPT, options->keyauth, TPM_DIGEST_SIZE); @@ -331,7 +319,10 @@ int tpm2_seal_trusted(struct tpm_chip *chip, goto out; } - tpm_buf_fill_hmac_session(chip, &buf); + rc = tpm_buf_fill_hmac_session(chip, &buf); + if (rc) + goto out; + rc = tpm_transmit_cmd(chip, &buf, 4, "sealing data"); rc = tpm_buf_check_hmac_response(chip, &buf, rc); if (rc) @@ -348,25 +339,19 @@ int tpm2_seal_trusted(struct tpm_chip *chip, } blob_len = tpm2_key_encode(payload, options, &buf.data[offset], blob_len); + if (blob_len < 0) + rc = blob_len; out: tpm_buf_destroy(&sized); tpm_buf_destroy(&buf); - if (rc > 0) { - if (tpm2_rc_value(rc) == TPM2_RC_HASH) - rc = -EINVAL; - else - rc = -EPERM; - } - if (blob_len < 0) - rc = blob_len; - else + if (!rc) payload->blob_len = blob_len; out_put: tpm_put_ops(chip); - return rc; + return tpm_ret_to_err(rc); } /** @@ -387,6 +372,7 @@ static int tpm2_load_cmd(struct tpm_chip *chip, struct trusted_key_options *options, u32 *blob_handle) { + u8 *blob_ref __free(kfree) = NULL; struct tpm_buf buf; unsigned int private_len; unsigned int public_len; @@ -400,6 +386,9 @@ static int tpm2_load_cmd(struct tpm_chip *chip, /* old form */ blob = payload->blob; payload->old_format = 1; + } else { + /* Bind for cleanup: */ + blob_ref = blob; } /* new format carries keyhandle but old format doesn't */ @@ -444,7 +433,10 @@ static int tpm2_load_cmd(struct tpm_chip *chip, return rc; } - tpm_buf_append_name(chip, &buf, options->keyhandle, NULL); + rc = tpm_buf_append_name(chip, &buf, options->keyhandle, NULL); + if (rc) + goto out; + tpm_buf_append_hmac_session(chip, &buf, 0, options->keyauth, TPM_DIGEST_SIZE); @@ -456,7 +448,10 @@ static int tpm2_load_cmd(struct tpm_chip *chip, goto out; } - tpm_buf_fill_hmac_session(chip, &buf); + rc = tpm_buf_fill_hmac_session(chip, &buf); + if (rc) + goto out; + rc = tpm_transmit_cmd(chip, &buf, 4, "loading blob"); rc = tpm_buf_check_hmac_response(chip, &buf, rc); if (!rc) @@ -464,14 +459,9 @@ static int tpm2_load_cmd(struct tpm_chip *chip, (__be32 *) &buf.data[TPM_HEADER_SIZE]); out: - if (blob != payload->blob) - kfree(blob); tpm_buf_destroy(&buf); - if (rc > 0) - rc = -EPERM; - - return rc; + return tpm_ret_to_err(rc); } /** @@ -491,8 +481,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip, struct trusted_key_options *options, u32 blob_handle) { + struct tpm_header *head; struct tpm_buf buf; u16 data_len; + int offset; u8 *data; int rc; @@ -506,7 +498,9 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip, return rc; } - tpm_buf_append_name(chip, &buf, blob_handle, NULL); + rc = tpm_buf_append_name(chip, &buf, options->keyhandle, NULL); + if (rc) + goto out; if (!options->policyhandle) { tpm_buf_append_hmac_session(chip, &buf, TPM2_SA_ENCRYPT, @@ -527,15 +521,22 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip, tpm2_buf_append_auth(&buf, options->policyhandle, NULL /* nonce */, 0, 0, options->blobauth, options->blobauth_len); - tpm_buf_append_hmac_session_opt(chip, &buf, TPM2_SA_ENCRYPT, - NULL, 0); + if (tpm2_chip_auth(chip)) { + tpm_buf_append_hmac_session(chip, &buf, TPM2_SA_ENCRYPT, NULL, 0); + } else { + offset = buf.handles * 4 + TPM_HEADER_SIZE; + head = (struct tpm_header *)buf.data; + if (tpm_buf_length(&buf) == offset) + head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); + } } - tpm_buf_fill_hmac_session(chip, &buf); + rc = tpm_buf_fill_hmac_session(chip, &buf); + if (rc) + goto out; + rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing"); rc = tpm_buf_check_hmac_response(chip, &buf, rc); - if (rc > 0) - rc = -EPERM; if (!rc) { data_len = be16_to_cpup( @@ -568,7 +569,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip, out: tpm_buf_destroy(&buf); - return rc; + return tpm_ret_to_err(rc); } /** @@ -600,6 +601,5 @@ int tpm2_unseal_trusted(struct tpm_chip *chip, out: tpm_put_ops(chip); - - return rc; + return tpm_ret_to_err(rc); } diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index 749e2a4dcb13..686d56e4cc85 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c @@ -61,7 +61,7 @@ int user_preparse(struct key_preparsed_payload *prep) struct user_key_payload *upayload; size_t datalen = prep->datalen; - if (datalen <= 0 || datalen > 32767 || !prep->data) + if (datalen == 0 || datalen > 32767 || !prep->data) return -EINVAL; upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL); diff --git a/security/landlock/.kunitconfig b/security/landlock/.kunitconfig index 03e119466604..f9423f01ac5b 100644 --- a/security/landlock/.kunitconfig +++ b/security/landlock/.kunitconfig @@ -1,4 +1,6 @@ +CONFIG_AUDIT=y CONFIG_KUNIT=y +CONFIG_NET=y CONFIG_SECURITY=y CONFIG_SECURITY_LANDLOCK=y CONFIG_SECURITY_LANDLOCK_KUNIT_TEST=y diff --git a/security/landlock/Makefile b/security/landlock/Makefile index b4538b7cf7d2..3160c2bdac1d 100644 --- a/security/landlock/Makefile +++ b/security/landlock/Makefile @@ -4,3 +4,8 @@ landlock-y := setup.o syscalls.o object.o ruleset.o \ cred.o task.o fs.o landlock-$(CONFIG_INET) += net.o + +landlock-$(CONFIG_AUDIT) += \ + id.o \ + audit.o \ + domain.o diff --git a/security/landlock/access.h b/security/landlock/access.h new file mode 100644 index 000000000000..7961c6630a2d --- /dev/null +++ b/security/landlock/access.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Landlock - Access types and helpers + * + * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> + * Copyright © 2018-2020 ANSSI + * Copyright © 2024-2025 Microsoft Corporation + */ + +#ifndef _SECURITY_LANDLOCK_ACCESS_H +#define _SECURITY_LANDLOCK_ACCESS_H + +#include <linux/bitops.h> +#include <linux/build_bug.h> +#include <linux/kernel.h> +#include <uapi/linux/landlock.h> + +#include "limits.h" + +/* + * All access rights that are denied by default whether they are handled or not + * by a ruleset/layer. This must be ORed with all ruleset->access_masks[] + * entries when we need to get the absolute handled access masks, see + * landlock_upgrade_handled_access_masks(). + */ +/* clang-format off */ +#define _LANDLOCK_ACCESS_FS_INITIALLY_DENIED ( \ + LANDLOCK_ACCESS_FS_REFER) +/* clang-format on */ + +/* clang-format off */ +#define _LANDLOCK_ACCESS_FS_OPTIONAL ( \ + LANDLOCK_ACCESS_FS_TRUNCATE | \ + LANDLOCK_ACCESS_FS_IOCTL_DEV) +/* clang-format on */ + +typedef u16 access_mask_t; + +/* Makes sure all filesystem access rights can be stored. */ +static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_FS); +/* Makes sure all network access rights can be stored. */ +static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_NET); +/* Makes sure all scoped rights can be stored. */ +static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_SCOPE); +/* Makes sure for_each_set_bit() and for_each_clear_bit() calls are OK. */ +static_assert(sizeof(unsigned long) >= sizeof(access_mask_t)); + +/* Ruleset access masks. */ +struct access_masks { + access_mask_t fs : LANDLOCK_NUM_ACCESS_FS; + access_mask_t net : LANDLOCK_NUM_ACCESS_NET; + access_mask_t scope : LANDLOCK_NUM_SCOPE; +}; + +union access_masks_all { + struct access_masks masks; + u32 all; +}; + +/* Makes sure all fields are covered. */ +static_assert(sizeof(typeof_member(union access_masks_all, masks)) == + sizeof(typeof_member(union access_masks_all, all))); + +typedef u16 layer_mask_t; + +/* Makes sure all layers can be checked. */ +static_assert(BITS_PER_TYPE(layer_mask_t) >= LANDLOCK_MAX_NUM_LAYERS); + +/* + * Tracks domains responsible of a denied access. This is required to avoid + * storing in each object the full layer_masks[] required by update_request(). + */ +typedef u8 deny_masks_t; + +/* + * Makes sure all optional access rights can be tied to a layer index (cf. + * get_deny_mask). + */ +static_assert(BITS_PER_TYPE(deny_masks_t) >= + (HWEIGHT(LANDLOCK_MAX_NUM_LAYERS - 1) * + HWEIGHT(_LANDLOCK_ACCESS_FS_OPTIONAL))); + +/* LANDLOCK_MAX_NUM_LAYERS must be a power of two (cf. deny_masks_t assert). */ +static_assert(HWEIGHT(LANDLOCK_MAX_NUM_LAYERS) == 1); + +/* Upgrades with all initially denied by default access rights. */ +static inline struct access_masks +landlock_upgrade_handled_access_masks(struct access_masks access_masks) +{ + /* + * All access rights that are denied by default whether they are + * explicitly handled or not. + */ + if (access_masks.fs) + access_masks.fs |= _LANDLOCK_ACCESS_FS_INITIALLY_DENIED; + + return access_masks; +} + +#endif /* _SECURITY_LANDLOCK_ACCESS_H */ diff --git a/security/landlock/audit.c b/security/landlock/audit.c new file mode 100644 index 000000000000..c52d079cdb77 --- /dev/null +++ b/security/landlock/audit.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Landlock - Audit helpers + * + * Copyright © 2023-2025 Microsoft Corporation + */ + +#include <kunit/test.h> +#include <linux/audit.h> +#include <linux/bitops.h> +#include <linux/lsm_audit.h> +#include <linux/pid.h> +#include <uapi/linux/landlock.h> + +#include "access.h" +#include "audit.h" +#include "common.h" +#include "cred.h" +#include "domain.h" +#include "limits.h" +#include "ruleset.h" + +static const char *const fs_access_strings[] = { + [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = "fs.execute", + [BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = "fs.write_file", + [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_FILE)] = "fs.read_file", + [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_DIR)] = "fs.read_dir", + [BIT_INDEX(LANDLOCK_ACCESS_FS_REMOVE_DIR)] = "fs.remove_dir", + [BIT_INDEX(LANDLOCK_ACCESS_FS_REMOVE_FILE)] = "fs.remove_file", + [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_CHAR)] = "fs.make_char", + [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_DIR)] = "fs.make_dir", + [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_REG)] = "fs.make_reg", + [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_SOCK)] = "fs.make_sock", + [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_FIFO)] = "fs.make_fifo", + [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_BLOCK)] = "fs.make_block", + [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_SYM)] = "fs.make_sym", + [BIT_INDEX(LANDLOCK_ACCESS_FS_REFER)] = "fs.refer", + [BIT_INDEX(LANDLOCK_ACCESS_FS_TRUNCATE)] = "fs.truncate", + [BIT_INDEX(LANDLOCK_ACCESS_FS_IOCTL_DEV)] = "fs.ioctl_dev", +}; + +static_assert(ARRAY_SIZE(fs_access_strings) == LANDLOCK_NUM_ACCESS_FS); + +static const char *const net_access_strings[] = { + [BIT_INDEX(LANDLOCK_ACCESS_NET_BIND_TCP)] = "net.bind_tcp", + [BIT_INDEX(LANDLOCK_ACCESS_NET_CONNECT_TCP)] = "net.connect_tcp", +}; + +static_assert(ARRAY_SIZE(net_access_strings) == LANDLOCK_NUM_ACCESS_NET); + +static __attribute_const__ const char * +get_blocker(const enum landlock_request_type type, + const unsigned long access_bit) +{ + switch (type) { + case LANDLOCK_REQUEST_PTRACE: + WARN_ON_ONCE(access_bit != -1); + return "ptrace"; + + case LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY: + WARN_ON_ONCE(access_bit != -1); + return "fs.change_topology"; + + case LANDLOCK_REQUEST_FS_ACCESS: + if (WARN_ON_ONCE(access_bit >= ARRAY_SIZE(fs_access_strings))) + return "unknown"; + return fs_access_strings[access_bit]; + + case LANDLOCK_REQUEST_NET_ACCESS: + if (WARN_ON_ONCE(access_bit >= ARRAY_SIZE(net_access_strings))) + return "unknown"; + return net_access_strings[access_bit]; + + case LANDLOCK_REQUEST_SCOPE_ABSTRACT_UNIX_SOCKET: + WARN_ON_ONCE(access_bit != -1); + return "scope.abstract_unix_socket"; + + case LANDLOCK_REQUEST_SCOPE_SIGNAL: + WARN_ON_ONCE(access_bit != -1); + return "scope.signal"; + } + + WARN_ON_ONCE(1); + return "unknown"; +} + +static void log_blockers(struct audit_buffer *const ab, + const enum landlock_request_type type, + const access_mask_t access) +{ + const unsigned long access_mask = access; + unsigned long access_bit; + bool is_first = true; + + for_each_set_bit(access_bit, &access_mask, BITS_PER_TYPE(access)) { + audit_log_format(ab, "%s%s", is_first ? "" : ",", + get_blocker(type, access_bit)); + is_first = false; + } + if (is_first) + audit_log_format(ab, "%s", get_blocker(type, -1)); +} + +static void log_domain(struct landlock_hierarchy *const hierarchy) +{ + struct audit_buffer *ab; + + /* Ignores already logged domains. */ + if (READ_ONCE(hierarchy->log_status) == LANDLOCK_LOG_RECORDED) + return; + + /* Uses consistent allocation flags wrt common_lsm_audit(). */ + ab = audit_log_start(audit_context(), GFP_ATOMIC | __GFP_NOWARN, + AUDIT_LANDLOCK_DOMAIN); + if (!ab) + return; + + WARN_ON_ONCE(hierarchy->id == 0); + audit_log_format( + ab, + "domain=%llx status=allocated mode=enforcing pid=%d uid=%u exe=", + hierarchy->id, pid_nr(hierarchy->details->pid), + hierarchy->details->uid); + audit_log_untrustedstring(ab, hierarchy->details->exe_path); + audit_log_format(ab, " comm="); + audit_log_untrustedstring(ab, hierarchy->details->comm); + audit_log_end(ab); + + /* + * There may be race condition leading to logging of the same domain + * several times but that is OK. + */ + WRITE_ONCE(hierarchy->log_status, LANDLOCK_LOG_RECORDED); +} + +static struct landlock_hierarchy * +get_hierarchy(const struct landlock_ruleset *const domain, const size_t layer) +{ + struct landlock_hierarchy *hierarchy = domain->hierarchy; + ssize_t i; + + if (WARN_ON_ONCE(layer >= domain->num_layers)) + return hierarchy; + + for (i = domain->num_layers - 1; i > layer; i--) { + if (WARN_ON_ONCE(!hierarchy->parent)) + break; + + hierarchy = hierarchy->parent; + } + + return hierarchy; +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static void test_get_hierarchy(struct kunit *const test) +{ + struct landlock_hierarchy dom0_hierarchy = { + .id = 10, + }; + struct landlock_hierarchy dom1_hierarchy = { + .parent = &dom0_hierarchy, + .id = 20, + }; + struct landlock_hierarchy dom2_hierarchy = { + .parent = &dom1_hierarchy, + .id = 30, + }; + struct landlock_ruleset dom2 = { + .hierarchy = &dom2_hierarchy, + .num_layers = 3, + }; + + KUNIT_EXPECT_EQ(test, 10, get_hierarchy(&dom2, 0)->id); + KUNIT_EXPECT_EQ(test, 20, get_hierarchy(&dom2, 1)->id); + KUNIT_EXPECT_EQ(test, 30, get_hierarchy(&dom2, 2)->id); + /* KUNIT_EXPECT_EQ(test, 30, get_hierarchy(&dom2, -1)->id); */ +} + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +static size_t get_denied_layer(const struct landlock_ruleset *const domain, + access_mask_t *const access_request, + const layer_mask_t (*const layer_masks)[], + const size_t layer_masks_size) +{ + const unsigned long access_req = *access_request; + unsigned long access_bit; + access_mask_t missing = 0; + long youngest_layer = -1; + + for_each_set_bit(access_bit, &access_req, layer_masks_size) { + const access_mask_t mask = (*layer_masks)[access_bit]; + long layer; + + if (!mask) + continue; + + /* __fls(1) == 0 */ + layer = __fls(mask); + if (layer > youngest_layer) { + youngest_layer = layer; + missing = BIT(access_bit); + } else if (layer == youngest_layer) { + missing |= BIT(access_bit); + } + } + + *access_request = missing; + if (youngest_layer == -1) + return domain->num_layers - 1; + + return youngest_layer; +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static void test_get_denied_layer(struct kunit *const test) +{ + const struct landlock_ruleset dom = { + .num_layers = 5, + }; + const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = { + [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT(0), + [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_FILE)] = BIT(1), + [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_DIR)] = BIT(1) | BIT(0), + [BIT_INDEX(LANDLOCK_ACCESS_FS_REMOVE_DIR)] = BIT(2), + }; + access_mask_t access; + + access = LANDLOCK_ACCESS_FS_EXECUTE; + KUNIT_EXPECT_EQ(test, 0, + get_denied_layer(&dom, &access, &layer_masks, + sizeof(layer_masks))); + KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_EXECUTE); + + access = LANDLOCK_ACCESS_FS_READ_FILE; + KUNIT_EXPECT_EQ(test, 1, + get_denied_layer(&dom, &access, &layer_masks, + sizeof(layer_masks))); + KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_READ_FILE); + + access = LANDLOCK_ACCESS_FS_READ_DIR; + KUNIT_EXPECT_EQ(test, 1, + get_denied_layer(&dom, &access, &layer_masks, + sizeof(layer_masks))); + KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_READ_DIR); + + access = LANDLOCK_ACCESS_FS_READ_FILE | LANDLOCK_ACCESS_FS_READ_DIR; + KUNIT_EXPECT_EQ(test, 1, + get_denied_layer(&dom, &access, &layer_masks, + sizeof(layer_masks))); + KUNIT_EXPECT_EQ(test, access, + LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_READ_DIR); + + access = LANDLOCK_ACCESS_FS_EXECUTE | LANDLOCK_ACCESS_FS_READ_DIR; + KUNIT_EXPECT_EQ(test, 1, + get_denied_layer(&dom, &access, &layer_masks, + sizeof(layer_masks))); + KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_READ_DIR); + + access = LANDLOCK_ACCESS_FS_WRITE_FILE; + KUNIT_EXPECT_EQ(test, 4, + get_denied_layer(&dom, &access, &layer_masks, + sizeof(layer_masks))); + KUNIT_EXPECT_EQ(test, access, 0); +} + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +static size_t +get_layer_from_deny_masks(access_mask_t *const access_request, + const access_mask_t all_existing_optional_access, + const deny_masks_t deny_masks) +{ + const unsigned long access_opt = all_existing_optional_access; + const unsigned long access_req = *access_request; + access_mask_t missing = 0; + size_t youngest_layer = 0; + size_t access_index = 0; + unsigned long access_bit; + + /* This will require change with new object types. */ + WARN_ON_ONCE(access_opt != _LANDLOCK_ACCESS_FS_OPTIONAL); + + for_each_set_bit(access_bit, &access_opt, + BITS_PER_TYPE(access_mask_t)) { + if (access_req & BIT(access_bit)) { + const size_t layer = + (deny_masks >> (access_index * 4)) & + (LANDLOCK_MAX_NUM_LAYERS - 1); + + if (layer > youngest_layer) { + youngest_layer = layer; + missing = BIT(access_bit); + } else if (layer == youngest_layer) { + missing |= BIT(access_bit); + } + } + access_index++; + } + + *access_request = missing; + return youngest_layer; +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static void test_get_layer_from_deny_masks(struct kunit *const test) +{ + deny_masks_t deny_mask; + access_mask_t access; + + /* truncate:0 ioctl_dev:2 */ + deny_mask = 0x20; + + access = LANDLOCK_ACCESS_FS_TRUNCATE; + KUNIT_EXPECT_EQ(test, 0, + get_layer_from_deny_masks(&access, + _LANDLOCK_ACCESS_FS_OPTIONAL, + deny_mask)); + KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_TRUNCATE); + + access = LANDLOCK_ACCESS_FS_TRUNCATE | LANDLOCK_ACCESS_FS_IOCTL_DEV; + KUNIT_EXPECT_EQ(test, 2, + get_layer_from_deny_masks(&access, + _LANDLOCK_ACCESS_FS_OPTIONAL, + deny_mask)); + KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_IOCTL_DEV); + + /* truncate:15 ioctl_dev:15 */ + deny_mask = 0xff; + + access = LANDLOCK_ACCESS_FS_TRUNCATE; + KUNIT_EXPECT_EQ(test, 15, + get_layer_from_deny_masks(&access, + _LANDLOCK_ACCESS_FS_OPTIONAL, + deny_mask)); + KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_TRUNCATE); + + access = LANDLOCK_ACCESS_FS_TRUNCATE | LANDLOCK_ACCESS_FS_IOCTL_DEV; + KUNIT_EXPECT_EQ(test, 15, + get_layer_from_deny_masks(&access, + _LANDLOCK_ACCESS_FS_OPTIONAL, + deny_mask)); + KUNIT_EXPECT_EQ(test, access, + LANDLOCK_ACCESS_FS_TRUNCATE | + LANDLOCK_ACCESS_FS_IOCTL_DEV); +} + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +static bool is_valid_request(const struct landlock_request *const request) +{ + if (WARN_ON_ONCE(request->layer_plus_one > LANDLOCK_MAX_NUM_LAYERS)) + return false; + + if (WARN_ON_ONCE(!(!!request->layer_plus_one ^ !!request->access))) + return false; + + if (request->access) { + if (WARN_ON_ONCE(!(!!request->layer_masks ^ + !!request->all_existing_optional_access))) + return false; + } else { + if (WARN_ON_ONCE(request->layer_masks || + request->all_existing_optional_access)) + return false; + } + + if (WARN_ON_ONCE(!!request->layer_masks ^ !!request->layer_masks_size)) + return false; + + if (request->deny_masks) { + if (WARN_ON_ONCE(!request->all_existing_optional_access)) + return false; + } + + return true; +} + +/** + * landlock_log_denial - Create audit records related to a denial + * + * @subject: The Landlock subject's credential denying an action. + * @request: Detail of the user space request. + */ +void landlock_log_denial(const struct landlock_cred_security *const subject, + const struct landlock_request *const request) +{ + struct audit_buffer *ab; + struct landlock_hierarchy *youngest_denied; + size_t youngest_layer; + access_mask_t missing; + + if (WARN_ON_ONCE(!subject || !subject->domain || + !subject->domain->hierarchy || !request)) + return; + + if (!is_valid_request(request)) + return; + + missing = request->access; + if (missing) { + /* Gets the nearest domain that denies the request. */ + if (request->layer_masks) { + youngest_layer = get_denied_layer( + subject->domain, &missing, request->layer_masks, + request->layer_masks_size); + } else { + youngest_layer = get_layer_from_deny_masks( + &missing, request->all_existing_optional_access, + request->deny_masks); + } + youngest_denied = + get_hierarchy(subject->domain, youngest_layer); + } else { + youngest_layer = request->layer_plus_one - 1; + youngest_denied = + get_hierarchy(subject->domain, youngest_layer); + } + + if (READ_ONCE(youngest_denied->log_status) == LANDLOCK_LOG_DISABLED) + return; + + /* + * Consistently keeps track of the number of denied access requests + * even if audit is currently disabled, or if audit rules currently + * exclude this record type, or if landlock_restrict_self(2)'s flags + * quiet logs. + */ + atomic64_inc(&youngest_denied->num_denials); + + if (!audit_enabled) + return; + + /* Checks if the current exec was restricting itself. */ + if (subject->domain_exec & BIT(youngest_layer)) { + /* Ignores denials for the same execution. */ + if (!youngest_denied->log_same_exec) + return; + } else { + /* Ignores denials after a new execution. */ + if (!youngest_denied->log_new_exec) + return; + } + + /* Uses consistent allocation flags wrt common_lsm_audit(). */ + ab = audit_log_start(audit_context(), GFP_ATOMIC | __GFP_NOWARN, + AUDIT_LANDLOCK_ACCESS); + if (!ab) + return; + + audit_log_format(ab, "domain=%llx blockers=", youngest_denied->id); + log_blockers(ab, request->type, missing); + audit_log_lsm_data(ab, &request->audit); + audit_log_end(ab); + + /* Logs this domain the first time it shows in log. */ + log_domain(youngest_denied); +} + +/** + * landlock_log_drop_domain - Create an audit record on domain deallocation + * + * @hierarchy: The domain's hierarchy being deallocated. + * + * Only domains which previously appeared in the audit logs are logged again. + * This is useful to know when a domain will never show again in the audit log. + * + * Called in a work queue scheduled by landlock_put_ruleset_deferred() called + * by hook_cred_free(). + */ +void landlock_log_drop_domain(const struct landlock_hierarchy *const hierarchy) +{ + struct audit_buffer *ab; + + if (WARN_ON_ONCE(!hierarchy)) + return; + + if (!audit_enabled) + return; + + /* Ignores domains that were not logged. */ + if (READ_ONCE(hierarchy->log_status) != LANDLOCK_LOG_RECORDED) + return; + + /* + * If logging of domain allocation succeeded, warns about failure to log + * domain deallocation to highlight unbalanced domain lifetime logs. + */ + ab = audit_log_start(audit_context(), GFP_KERNEL, + AUDIT_LANDLOCK_DOMAIN); + if (!ab) + return; + + audit_log_format(ab, "domain=%llx status=deallocated denials=%llu", + hierarchy->id, atomic64_read(&hierarchy->num_denials)); + audit_log_end(ab); +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static struct kunit_case test_cases[] = { + /* clang-format off */ + KUNIT_CASE(test_get_hierarchy), + KUNIT_CASE(test_get_denied_layer), + KUNIT_CASE(test_get_layer_from_deny_masks), + {} + /* clang-format on */ +}; + +static struct kunit_suite test_suite = { + .name = "landlock_audit", + .test_cases = test_cases, +}; + +kunit_test_suite(test_suite); + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ diff --git a/security/landlock/audit.h b/security/landlock/audit.h new file mode 100644 index 000000000000..92428b7fc4d8 --- /dev/null +++ b/security/landlock/audit.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Landlock - Audit helpers + * + * Copyright © 2023-2025 Microsoft Corporation + */ + +#ifndef _SECURITY_LANDLOCK_AUDIT_H +#define _SECURITY_LANDLOCK_AUDIT_H + +#include <linux/audit.h> +#include <linux/lsm_audit.h> + +#include "access.h" +#include "cred.h" + +enum landlock_request_type { + LANDLOCK_REQUEST_PTRACE = 1, + LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY, + LANDLOCK_REQUEST_FS_ACCESS, + LANDLOCK_REQUEST_NET_ACCESS, + LANDLOCK_REQUEST_SCOPE_ABSTRACT_UNIX_SOCKET, + LANDLOCK_REQUEST_SCOPE_SIGNAL, +}; + +/* + * We should be careful to only use a variable of this type for + * landlock_log_denial(). This way, the compiler can remove it entirely if + * CONFIG_AUDIT is not set. + */ +struct landlock_request { + /* Mandatory fields. */ + enum landlock_request_type type; + struct common_audit_data audit; + + /** + * layer_plus_one: First layer level that denies the request + 1. The + * extra one is useful to detect uninitialized field. + */ + size_t layer_plus_one; + + /* Required field for configurable access control. */ + access_mask_t access; + + /* Required fields for requests with layer masks. */ + const layer_mask_t (*layer_masks)[]; + size_t layer_masks_size; + + /* Required fields for requests with deny masks. */ + const access_mask_t all_existing_optional_access; + deny_masks_t deny_masks; +}; + +#ifdef CONFIG_AUDIT + +void landlock_log_drop_domain(const struct landlock_hierarchy *const hierarchy); + +void landlock_log_denial(const struct landlock_cred_security *const subject, + const struct landlock_request *const request); + +#else /* CONFIG_AUDIT */ + +static inline void +landlock_log_drop_domain(const struct landlock_hierarchy *const hierarchy) +{ +} + +static inline void +landlock_log_denial(const struct landlock_cred_security *const subject, + const struct landlock_request *const request) +{ +} + +#endif /* CONFIG_AUDIT */ + +#endif /* _SECURITY_LANDLOCK_AUDIT_H */ diff --git a/security/landlock/cred.c b/security/landlock/cred.c index db9fe7d906ba..0cb3edde4d18 100644 --- a/security/landlock/cred.c +++ b/security/landlock/cred.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Landlock LSM - Credential hooks + * Landlock - Credential hooks * * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI + * Copyright © 2024-2025 Microsoft Corporation */ +#include <linux/binfmts.h> #include <linux/cred.h> #include <linux/lsm_hooks.h> @@ -17,11 +19,12 @@ static void hook_cred_transfer(struct cred *const new, const struct cred *const old) { - struct landlock_ruleset *const old_dom = landlock_cred(old)->domain; + const struct landlock_cred_security *const old_llcred = + landlock_cred(old); - if (old_dom) { - landlock_get_ruleset(old_dom); - landlock_cred(new)->domain = old_dom; + if (old_llcred->domain) { + landlock_get_ruleset(old_llcred->domain); + *landlock_cred(new) = *old_llcred; } } @@ -40,10 +43,25 @@ static void hook_cred_free(struct cred *const cred) landlock_put_ruleset_deferred(dom); } +#ifdef CONFIG_AUDIT + +static int hook_bprm_creds_for_exec(struct linux_binprm *const bprm) +{ + /* Resets for each execution. */ + landlock_cred(bprm->cred)->domain_exec = 0; + return 0; +} + +#endif /* CONFIG_AUDIT */ + static struct security_hook_list landlock_hooks[] __ro_after_init = { LSM_HOOK_INIT(cred_prepare, hook_cred_prepare), LSM_HOOK_INIT(cred_transfer, hook_cred_transfer), LSM_HOOK_INIT(cred_free, hook_cred_free), + +#ifdef CONFIG_AUDIT + LSM_HOOK_INIT(bprm_creds_for_exec, hook_bprm_creds_for_exec), +#endif /* CONFIG_AUDIT */ }; __init void landlock_add_cred_hooks(void) diff --git a/security/landlock/cred.h b/security/landlock/cred.h index bf755459838a..c82fe63ec598 100644 --- a/security/landlock/cred.h +++ b/security/landlock/cred.h @@ -1,24 +1,63 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Landlock LSM - Credential hooks + * Landlock - Credential hooks * * Copyright © 2019-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2019-2020 ANSSI + * Copyright © 2021-2025 Microsoft Corporation */ #ifndef _SECURITY_LANDLOCK_CRED_H #define _SECURITY_LANDLOCK_CRED_H +#include <linux/container_of.h> #include <linux/cred.h> #include <linux/init.h> #include <linux/rcupdate.h> +#include "access.h" +#include "limits.h" #include "ruleset.h" #include "setup.h" +/** + * struct landlock_cred_security - Credential security blob + * + * This structure is packed to minimize the size of struct + * landlock_file_security. However, it is always aligned in the LSM cred blob, + * see lsm_set_blob_size(). + */ struct landlock_cred_security { + /** + * @domain: Immutable ruleset enforced on a task. + */ struct landlock_ruleset *domain; -}; + +#ifdef CONFIG_AUDIT + /** + * @domain_exec: Bitmask identifying the domain layers that were enforced by + * the current task's executed file (i.e. no new execve(2) since + * landlock_restrict_self(2)). + */ + u16 domain_exec; + /** + * @log_subdomains_off: Set if the domain descendants's log_status should be + * set to %LANDLOCK_LOG_DISABLED. This is not a landlock_hierarchy + * configuration because it applies to future descendant domains and it does + * not require a current domain. + */ + u8 log_subdomains_off : 1; +#endif /* CONFIG_AUDIT */ +} __packed; + +#ifdef CONFIG_AUDIT + +/* Makes sure all layer executions can be stored. */ +static_assert(BITS_PER_TYPE(typeof_member(struct landlock_cred_security, + domain_exec)) >= + LANDLOCK_MAX_NUM_LAYERS); + +#endif /* CONFIG_AUDIT */ static inline struct landlock_cred_security * landlock_cred(const struct cred *cred) @@ -53,6 +92,55 @@ static inline bool landlocked(const struct task_struct *const task) return has_dom; } +/** + * landlock_get_applicable_subject - Return the subject's Landlock credential + * if its enforced domain applies to (i.e. + * handles) at least one of the access rights + * specified in @masks + * + * @cred: credential + * @masks: access masks + * @handle_layer: returned youngest layer handling a subset of @masks. Not set + * if the function returns NULL. + * + * Returns: landlock_cred(@cred) if any access rights specified in @masks is + * handled, or NULL otherwise. + */ +static inline const struct landlock_cred_security * +landlock_get_applicable_subject(const struct cred *const cred, + const struct access_masks masks, + size_t *const handle_layer) +{ + const union access_masks_all masks_all = { + .masks = masks, + }; + const struct landlock_ruleset *domain; + ssize_t layer_level; + + if (!cred) + return NULL; + + domain = landlock_cred(cred)->domain; + if (!domain) + return NULL; + + for (layer_level = domain->num_layers - 1; layer_level >= 0; + layer_level--) { + union access_masks_all layer = { + .masks = domain->access_masks[layer_level], + }; + + if (layer.all & masks_all.all) { + if (handle_layer) + *handle_layer = layer_level; + + return landlock_cred(cred); + } + } + + return NULL; +} + __init void landlock_add_cred_hooks(void); #endif /* _SECURITY_LANDLOCK_CRED_H */ diff --git a/security/landlock/domain.c b/security/landlock/domain.c new file mode 100644 index 000000000000..a647b68e8d06 --- /dev/null +++ b/security/landlock/domain.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Landlock - Domain management + * + * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> + * Copyright © 2018-2020 ANSSI + * Copyright © 2024-2025 Microsoft Corporation + */ + +#include <kunit/test.h> +#include <linux/bitops.h> +#include <linux/bits.h> +#include <linux/cred.h> +#include <linux/file.h> +#include <linux/mm.h> +#include <linux/path.h> +#include <linux/pid.h> +#include <linux/sched.h> +#include <linux/signal.h> +#include <linux/uidgid.h> + +#include "access.h" +#include "common.h" +#include "domain.h" +#include "id.h" + +#ifdef CONFIG_AUDIT + +/** + * get_current_exe - Get the current's executable path, if any + * + * @exe_str: Returned pointer to a path string with a lifetime tied to the + * returned buffer, if any. + * @exe_size: Returned size of @exe_str (including the trailing null + * character), if any. + * + * Returns: A pointer to an allocated buffer where @exe_str point to, %NULL if + * there is no executable path, or an error otherwise. + */ +static const void *get_current_exe(const char **const exe_str, + size_t *const exe_size) +{ + const size_t buffer_size = LANDLOCK_PATH_MAX_SIZE; + struct mm_struct *mm = current->mm; + struct file *file __free(fput) = NULL; + char *buffer __free(kfree) = NULL; + const char *exe; + ssize_t size; + + if (!mm) + return NULL; + + file = get_mm_exe_file(mm); + if (!file) + return NULL; + + buffer = kmalloc(buffer_size, GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + exe = d_path(&file->f_path, buffer, buffer_size); + if (WARN_ON_ONCE(IS_ERR(exe))) + /* Should never happen according to LANDLOCK_PATH_MAX_SIZE. */ + return ERR_CAST(exe); + + size = buffer + buffer_size - exe; + if (WARN_ON_ONCE(size <= 0)) + return ERR_PTR(-ENAMETOOLONG); + + *exe_size = size; + *exe_str = exe; + return no_free_ptr(buffer); +} + +/* + * Returns: A newly allocated object describing a domain, or an error + * otherwise. + */ +static struct landlock_details *get_current_details(void) +{ + /* Cf. audit_log_d_path_exe() */ + static const char null_path[] = "(null)"; + const char *path_str = null_path; + size_t path_size = sizeof(null_path); + const void *buffer __free(kfree) = NULL; + struct landlock_details *details; + + buffer = get_current_exe(&path_str, &path_size); + if (IS_ERR(buffer)) + return ERR_CAST(buffer); + + /* + * Create the new details according to the path's length. Do not + * allocate with GFP_KERNEL_ACCOUNT because it is independent from the + * caller. + */ + details = + kzalloc(struct_size(details, exe_path, path_size), GFP_KERNEL); + if (!details) + return ERR_PTR(-ENOMEM); + + memcpy(details->exe_path, path_str, path_size); + details->pid = get_pid(task_tgid(current)); + details->uid = from_kuid(&init_user_ns, current_uid()); + get_task_comm(details->comm, current); + return details; +} + +/** + * landlock_init_hierarchy_log - Partially initialize landlock_hierarchy + * + * @hierarchy: The hierarchy to initialize. + * + * The current task is referenced as the domain that is enforcing the + * restriction. The subjective credentials must not be in an overridden state. + * + * @hierarchy->parent and @hierarchy->usage should already be set. + */ +int landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy) +{ + struct landlock_details *details; + + details = get_current_details(); + if (IS_ERR(details)) + return PTR_ERR(details); + + hierarchy->details = details; + hierarchy->id = landlock_get_id_range(1); + hierarchy->log_status = LANDLOCK_LOG_PENDING; + hierarchy->log_same_exec = true; + hierarchy->log_new_exec = false; + atomic64_set(&hierarchy->num_denials, 0); + return 0; +} + +static deny_masks_t +get_layer_deny_mask(const access_mask_t all_existing_optional_access, + const unsigned long access_bit, const size_t layer) +{ + unsigned long access_weight; + + /* This may require change with new object types. */ + WARN_ON_ONCE(all_existing_optional_access != + _LANDLOCK_ACCESS_FS_OPTIONAL); + + if (WARN_ON_ONCE(layer >= LANDLOCK_MAX_NUM_LAYERS)) + return 0; + + access_weight = hweight_long(all_existing_optional_access & + GENMASK(access_bit, 0)); + if (WARN_ON_ONCE(access_weight < 1)) + return 0; + + return layer + << ((access_weight - 1) * HWEIGHT(LANDLOCK_MAX_NUM_LAYERS - 1)); +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static void test_get_layer_deny_mask(struct kunit *const test) +{ + const unsigned long truncate = BIT_INDEX(LANDLOCK_ACCESS_FS_TRUNCATE); + const unsigned long ioctl_dev = BIT_INDEX(LANDLOCK_ACCESS_FS_IOCTL_DEV); + + KUNIT_EXPECT_EQ(test, 0, + get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, + truncate, 0)); + KUNIT_EXPECT_EQ(test, 0x3, + get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, + truncate, 3)); + + KUNIT_EXPECT_EQ(test, 0, + get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, + ioctl_dev, 0)); + KUNIT_EXPECT_EQ(test, 0xf0, + get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, + ioctl_dev, 15)); +} + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +deny_masks_t +landlock_get_deny_masks(const access_mask_t all_existing_optional_access, + const access_mask_t optional_access, + const layer_mask_t (*const layer_masks)[], + const size_t layer_masks_size) +{ + const unsigned long access_opt = optional_access; + unsigned long access_bit; + deny_masks_t deny_masks = 0; + + /* This may require change with new object types. */ + WARN_ON_ONCE(access_opt != + (optional_access & all_existing_optional_access)); + + if (WARN_ON_ONCE(!layer_masks)) + return 0; + + if (WARN_ON_ONCE(!access_opt)) + return 0; + + for_each_set_bit(access_bit, &access_opt, layer_masks_size) { + const layer_mask_t mask = (*layer_masks)[access_bit]; + + if (!mask) + continue; + + /* __fls(1) == 0 */ + deny_masks |= get_layer_deny_mask(all_existing_optional_access, + access_bit, __fls(mask)); + } + return deny_masks; +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static void test_landlock_get_deny_masks(struct kunit *const test) +{ + const layer_mask_t layers1[BITS_PER_TYPE(access_mask_t)] = { + [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0) | + BIT_ULL(9), + [BIT_INDEX(LANDLOCK_ACCESS_FS_TRUNCATE)] = BIT_ULL(1), + [BIT_INDEX(LANDLOCK_ACCESS_FS_IOCTL_DEV)] = BIT_ULL(2) | + BIT_ULL(0), + }; + + KUNIT_EXPECT_EQ(test, 0x1, + landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, + LANDLOCK_ACCESS_FS_TRUNCATE, + &layers1, ARRAY_SIZE(layers1))); + KUNIT_EXPECT_EQ(test, 0x20, + landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, + LANDLOCK_ACCESS_FS_IOCTL_DEV, + &layers1, ARRAY_SIZE(layers1))); + KUNIT_EXPECT_EQ( + test, 0x21, + landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, + LANDLOCK_ACCESS_FS_TRUNCATE | + LANDLOCK_ACCESS_FS_IOCTL_DEV, + &layers1, ARRAY_SIZE(layers1))); +} + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static struct kunit_case test_cases[] = { + /* clang-format off */ + KUNIT_CASE(test_get_layer_deny_mask), + KUNIT_CASE(test_landlock_get_deny_masks), + {} + /* clang-format on */ +}; + +static struct kunit_suite test_suite = { + .name = "landlock_domain", + .test_cases = test_cases, +}; + +kunit_test_suite(test_suite); + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +#endif /* CONFIG_AUDIT */ diff --git a/security/landlock/domain.h b/security/landlock/domain.h new file mode 100644 index 000000000000..7fb70b25f85a --- /dev/null +++ b/security/landlock/domain.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Landlock - Domain management + * + * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> + * Copyright © 2018-2020 ANSSI + * Copyright © 2024-2025 Microsoft Corporation + */ + +#ifndef _SECURITY_LANDLOCK_DOMAIN_H +#define _SECURITY_LANDLOCK_DOMAIN_H + +#include <linux/limits.h> +#include <linux/mm.h> +#include <linux/path.h> +#include <linux/pid.h> +#include <linux/refcount.h> +#include <linux/sched.h> +#include <linux/slab.h> + +#include "access.h" +#include "audit.h" + +enum landlock_log_status { + LANDLOCK_LOG_PENDING = 0, + LANDLOCK_LOG_RECORDED, + LANDLOCK_LOG_DISABLED, +}; + +/** + * struct landlock_details - Domain's creation information + * + * Rarely accessed, mainly when logging the first domain's denial. + * + * The contained pointers are initialized at the domain creation time and never + * changed again. Contrary to most other Landlock object types, this one is + * not allocated with GFP_KERNEL_ACCOUNT because its size may not be under the + * caller's control (e.g. unknown exe_path) and the data is not explicitly + * requested nor used by tasks. + */ +struct landlock_details { + /** + * @pid: PID of the task that initially restricted itself. It still + * identifies the same task. Keeping a reference to this PID ensures that + * it will not be recycled. + */ + struct pid *pid; + /** + * @uid: UID of the task that initially restricted itself, at creation time. + */ + uid_t uid; + /** + * @comm: Command line of the task that initially restricted itself, at + * creation time. Always NULL terminated. + */ + char comm[TASK_COMM_LEN]; + /** + * @exe_path: Executable path of the task that initially restricted + * itself, at creation time. Always NULL terminated, and never greater + * than LANDLOCK_PATH_MAX_SIZE. + */ + char exe_path[]; +}; + +/* Adds 11 extra characters for the potential " (deleted)" suffix. */ +#define LANDLOCK_PATH_MAX_SIZE (PATH_MAX + 11) + +/* Makes sure the greatest landlock_details can be allocated. */ +static_assert(struct_size_t(struct landlock_details, exe_path, + LANDLOCK_PATH_MAX_SIZE) <= KMALLOC_MAX_SIZE); + +/** + * struct landlock_hierarchy - Node in a domain hierarchy + */ +struct landlock_hierarchy { + /** + * @parent: Pointer to the parent node, or NULL if it is a root + * Landlock domain. + */ + struct landlock_hierarchy *parent; + /** + * @usage: Number of potential children domains plus their parent + * domain. + */ + refcount_t usage; + +#ifdef CONFIG_AUDIT + /** + * @log_status: Whether this domain should be logged or not. Because + * concurrent log entries may be created at the same time, it is still + * possible to have several domain records of the same domain. + */ + enum landlock_log_status log_status; + /** + * @num_denials: Number of access requests denied by this domain. + * Masked (i.e. never logged) denials are still counted. + */ + atomic64_t num_denials; + /** + * @id: Landlock domain ID, sets once at domain creation time. + */ + u64 id; + /** + * @details: Information about the related domain. + */ + const struct landlock_details *details; + /** + * @log_same_exec: Set if the domain is *not* configured with + * %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF. Set to true by default. + */ + u32 log_same_exec : 1, + /** + * @log_new_exec: Set if the domain is configured with + * %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON. Set to false by default. + */ + log_new_exec : 1; +#endif /* CONFIG_AUDIT */ +}; + +#ifdef CONFIG_AUDIT + +deny_masks_t +landlock_get_deny_masks(const access_mask_t all_existing_optional_access, + const access_mask_t optional_access, + const layer_mask_t (*const layer_masks)[], + size_t layer_masks_size); + +int landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy); + +static inline void +landlock_free_hierarchy_details(struct landlock_hierarchy *const hierarchy) +{ + if (!hierarchy || !hierarchy->details) + return; + + put_pid(hierarchy->details->pid); + kfree(hierarchy->details); +} + +#else /* CONFIG_AUDIT */ + +static inline int +landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy) +{ + return 0; +} + +static inline void +landlock_free_hierarchy_details(struct landlock_hierarchy *const hierarchy) +{ +} + +#endif /* CONFIG_AUDIT */ + +static inline void +landlock_get_hierarchy(struct landlock_hierarchy *const hierarchy) +{ + if (hierarchy) + refcount_inc(&hierarchy->usage); +} + +static inline void landlock_put_hierarchy(struct landlock_hierarchy *hierarchy) +{ + while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) { + const struct landlock_hierarchy *const freeme = hierarchy; + + landlock_log_drop_domain(hierarchy); + landlock_free_hierarchy_details(hierarchy); + hierarchy = hierarchy->parent; + kfree(freeme); + } +} + +#endif /* _SECURITY_LANDLOCK_DOMAIN_H */ diff --git a/security/landlock/errata.h b/security/landlock/errata.h new file mode 100644 index 000000000000..8e626accac10 --- /dev/null +++ b/security/landlock/errata.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Landlock - Errata information + * + * Copyright © 2025 Microsoft Corporation + */ + +#ifndef _SECURITY_LANDLOCK_ERRATA_H +#define _SECURITY_LANDLOCK_ERRATA_H + +#include <linux/init.h> + +struct landlock_erratum { + const int abi; + const u8 number; +}; + +/* clang-format off */ +#define LANDLOCK_ERRATUM(NUMBER) \ + { \ + .abi = LANDLOCK_ERRATA_ABI, \ + .number = NUMBER, \ + }, +/* clang-format on */ + +/* + * Some fixes may require user space to check if they are applied on the running + * kernel before using a specific feature. For instance, this applies when a + * restriction was previously too restrictive and is now getting relaxed (for + * compatibility or semantic reasons). However, non-visible changes for + * legitimate use (e.g. security fixes) do not require an erratum. + */ +static const struct landlock_erratum landlock_errata_init[] __initconst = { + +/* + * Only Sparse may not implement __has_include. If a compiler does not + * implement __has_include, a warning will be printed at boot time (see + * setup.c). + */ +#ifdef __has_include + +#define LANDLOCK_ERRATA_ABI 1 +#if __has_include("errata/abi-1.h") +#include "errata/abi-1.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 2 +#if __has_include("errata/abi-2.h") +#include "errata/abi-2.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 3 +#if __has_include("errata/abi-3.h") +#include "errata/abi-3.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 4 +#if __has_include("errata/abi-4.h") +#include "errata/abi-4.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 5 +#if __has_include("errata/abi-5.h") +#include "errata/abi-5.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 6 +#if __has_include("errata/abi-6.h") +#include "errata/abi-6.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +/* + * For each new erratum, we need to include all the ABI files up to the impacted + * ABI to make all potential future intermediate errata easy to backport. + * + * If such change involves more than one ABI addition, then it must be in a + * dedicated commit with the same Fixes tag as used for the actual fix. + * + * Each commit creating a new security/landlock/errata/abi-*.h file must have a + * Depends-on tag to reference the commit that previously added the line to + * include this new file, except if the original Fixes tag is enough. + * + * Each erratum must be documented in its related ABI file, and a dedicated + * commit must update Documentation/userspace-api/landlock.rst to include this + * erratum. This commit will not be backported. + */ + +#endif + + {} +}; + +#endif /* _SECURITY_LANDLOCK_ERRATA_H */ diff --git a/security/landlock/errata/abi-1.h b/security/landlock/errata/abi-1.h new file mode 100644 index 000000000000..e8a2bff2e5b6 --- /dev/null +++ b/security/landlock/errata/abi-1.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/** + * DOC: erratum_3 + * + * Erratum 3: Disconnected directory handling + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This fix addresses an issue with disconnected directories that occur when a + * directory is moved outside the scope of a bind mount. The change ensures + * that evaluated access rights include both those from the disconnected file + * hierarchy down to its filesystem root and those from the related mount point + * hierarchy. This prevents access right widening through rename or link + * actions. + */ +LANDLOCK_ERRATUM(3) diff --git a/security/landlock/errata/abi-4.h b/security/landlock/errata/abi-4.h new file mode 100644 index 000000000000..c052ee54f89f --- /dev/null +++ b/security/landlock/errata/abi-4.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/** + * DOC: erratum_1 + * + * Erratum 1: TCP socket identification + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This fix addresses an issue where IPv4 and IPv6 stream sockets (e.g., SMC, + * MPTCP, or SCTP) were incorrectly restricted by TCP access rights during + * :manpage:`bind(2)` and :manpage:`connect(2)` operations. This change ensures + * that only TCP sockets are subject to TCP access rights, allowing other + * protocols to operate without unnecessary restrictions. + */ +LANDLOCK_ERRATUM(1) diff --git a/security/landlock/errata/abi-6.h b/security/landlock/errata/abi-6.h new file mode 100644 index 000000000000..df7bc0e1fdf4 --- /dev/null +++ b/security/landlock/errata/abi-6.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/** + * DOC: erratum_2 + * + * Erratum 2: Scoped signal handling + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This fix addresses an issue where signal scoping was overly restrictive, + * preventing sandboxed threads from signaling other threads within the same + * process if they belonged to different domains. Because threads are not + * security boundaries, user space might assume that any thread within the same + * process can send signals between themselves (see :manpage:`nptl(7)` and + * :manpage:`libpsx(3)`). Consistent with :manpage:`ptrace(2)` behavior, direct + * interaction between threads of the same process should always be allowed. + * This change ensures that any thread is allowed to send signals to any other + * thread within the same process, regardless of their domain. + */ +LANDLOCK_ERRATUM(2) diff --git a/security/landlock/fs.c b/security/landlock/fs.c index e31b97a9f175..fe794875ad46 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -1,10 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Landlock LSM - Filesystem management and hooks + * Landlock - Filesystem management and hooks * * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI - * Copyright © 2021-2022 Microsoft Corporation + * Copyright © 2021-2025 Microsoft Corporation * Copyright © 2022 Günther Noack <gnoack3000@gmail.com> * Copyright © 2023-2024 Google LLC */ @@ -23,11 +23,14 @@ #include <linux/kernel.h> #include <linux/limits.h> #include <linux/list.h> +#include <linux/lsm_audit.h> #include <linux/lsm_hooks.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/path.h> +#include <linux/pid.h> #include <linux/rcupdate.h> +#include <linux/sched/signal.h> #include <linux/spinlock.h> #include <linux/stat.h> #include <linux/types.h> @@ -36,8 +39,11 @@ #include <uapi/linux/fiemap.h> #include <uapi/linux/landlock.h> +#include "access.h" +#include "audit.h" #include "common.h" #include "cred.h" +#include "domain.h" #include "fs.h" #include "limits.h" #include "object.h" @@ -388,24 +394,10 @@ static bool is_nouser_or_private(const struct dentry *dentry) unlikely(IS_PRIVATE(d_backing_inode(dentry)))); } -static access_mask_t -get_handled_fs_accesses(const struct landlock_ruleset *const domain) -{ - /* Handles all initially denied by default access rights. */ - return landlock_union_access_masks(domain).fs | - LANDLOCK_ACCESS_FS_INITIALLY_DENIED; -} - static const struct access_masks any_fs = { .fs = ~0, }; -static const struct landlock_ruleset *get_current_fs_domain(void) -{ - return landlock_get_applicable_domain(landlock_get_current_domain(), - any_fs); -} - /* * Check that a destination file hierarchy has more restrictions than a source * file hierarchy. This is only used for link and rename actions. @@ -572,6 +564,12 @@ static void test_no_more_access(struct kunit *const test) #undef NMA_TRUE #undef NMA_FALSE +static bool is_layer_masks_allowed( + layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]) +{ + return !memchr_inv(layer_masks, 0, sizeof(*layer_masks)); +} + /* * Removes @layer_masks accesses that are not requested. * @@ -589,7 +587,8 @@ scope_to_request(const access_mask_t access_request, for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks)) (*layer_masks)[access_bit] = 0; - return !memchr_inv(layer_masks, 0, sizeof(*layer_masks)); + + return is_layer_masks_allowed(layer_masks); } #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST @@ -715,7 +714,8 @@ static void test_is_eacces_with_write(struct kunit *const test) * is_access_to_paths_allowed - Check accesses for requests with a common path * * @domain: Domain to check against. - * @path: File hierarchy to walk through. + * @path: File hierarchy to walk through. For refer checks, this would be + * the common mountpoint. * @access_request_parent1: Accesses to check, once @layer_masks_parent1 is * equal to @layer_masks_parent2 (if any). This is tied to the unique * requested path for most actions, or the source in case of a refer action @@ -728,6 +728,7 @@ static void test_is_eacces_with_write(struct kunit *const test) * those identified by @access_request_parent1). This matrix can * initially refer to domain layer masks and, when the accesses for the * destination and source are the same, to requested layer masks. + * @log_request_parent1: Audit request to fill if the related access is denied. * @dentry_child1: Dentry to the initial child of the parent1 path. This * pointer must be NULL for non-refer actions (i.e. not link nor rename). * @access_request_parent2: Similar to @access_request_parent1 but for a @@ -736,6 +737,7 @@ static void test_is_eacces_with_write(struct kunit *const test) * the source. Must be set to 0 when using a simple path request. * @layer_masks_parent2: Similar to @layer_masks_parent1 but for a refer * action. This must be NULL otherwise. + * @log_request_parent2: Audit request to fill if the related access is denied. * @dentry_child2: Dentry to the initial child of the parent2 path. This * pointer is only set for RENAME_EXCHANGE actions and must be NULL * otherwise. @@ -755,10 +757,12 @@ static bool is_access_to_paths_allowed( const struct path *const path, const access_mask_t access_request_parent1, layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS], - const struct dentry *const dentry_child1, + struct landlock_request *const log_request_parent1, + struct dentry *const dentry_child1, const access_mask_t access_request_parent2, layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS], - const struct dentry *const dentry_child2) + struct landlock_request *const log_request_parent2, + struct dentry *const dentry_child2) { bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check, child1_is_directory = true, child2_is_directory = true; @@ -771,23 +775,31 @@ static bool is_access_to_paths_allowed( if (!access_request_parent1 && !access_request_parent2) return true; - if (WARN_ON_ONCE(!domain || !path)) + + if (WARN_ON_ONCE(!path)) return true; + if (is_nouser_or_private(path->dentry)) return true; - if (WARN_ON_ONCE(domain->num_layers < 1 || !layer_masks_parent1)) + + if (WARN_ON_ONCE(!layer_masks_parent1)) return false; + allowed_parent1 = is_layer_masks_allowed(layer_masks_parent1); + if (unlikely(layer_masks_parent2)) { if (WARN_ON_ONCE(!dentry_child1)) return false; + + allowed_parent2 = is_layer_masks_allowed(layer_masks_parent2); + /* * For a double request, first check for potential privilege * escalation by looking at domain handled accesses (which are * a superset of the meaningful requested accesses). */ access_masked_parent1 = access_masked_parent2 = - get_handled_fs_accesses(domain); + landlock_union_access_masks(domain).fs; is_dom_check = true; } else { if (WARN_ON_ONCE(dentry_child1 || dentry_child2)) @@ -826,7 +838,6 @@ static bool is_access_to_paths_allowed( * restriction. */ while (true) { - struct dentry *parent_dentry; const struct landlock_rule *rule; /* @@ -847,15 +858,6 @@ static bool is_access_to_paths_allowed( child1_is_directory, layer_masks_parent2, layer_masks_child2, child2_is_directory))) { - allowed_parent1 = scope_to_request( - access_request_parent1, layer_masks_parent1); - allowed_parent2 = scope_to_request( - access_request_parent2, layer_masks_parent2); - - /* Stops when all accesses are granted. */ - if (allowed_parent1 && allowed_parent2) - break; - /* * Now, downgrades the remaining checks from domain * handled accesses to requested accesses. @@ -863,19 +865,37 @@ static bool is_access_to_paths_allowed( is_dom_check = false; access_masked_parent1 = access_request_parent1; access_masked_parent2 = access_request_parent2; + + allowed_parent1 = + allowed_parent1 || + scope_to_request(access_masked_parent1, + layer_masks_parent1); + allowed_parent2 = + allowed_parent2 || + scope_to_request(access_masked_parent2, + layer_masks_parent2); + + /* Stops when all accesses are granted. */ + if (allowed_parent1 && allowed_parent2) + break; } rule = find_rule(domain, walker_path.dentry); - allowed_parent1 = landlock_unmask_layers( - rule, access_masked_parent1, layer_masks_parent1, - ARRAY_SIZE(*layer_masks_parent1)); - allowed_parent2 = landlock_unmask_layers( - rule, access_masked_parent2, layer_masks_parent2, - ARRAY_SIZE(*layer_masks_parent2)); + allowed_parent1 = allowed_parent1 || + landlock_unmask_layers( + rule, access_masked_parent1, + layer_masks_parent1, + ARRAY_SIZE(*layer_masks_parent1)); + allowed_parent2 = allowed_parent2 || + landlock_unmask_layers( + rule, access_masked_parent2, + layer_masks_parent2, + ARRAY_SIZE(*layer_masks_parent2)); /* Stops when a rule from each layer grants access. */ if (allowed_parent1 && allowed_parent2) break; + jump_up: if (walker_path.dentry == walker_path.mnt->mnt_root) { if (follow_up(&walker_path)) { @@ -889,58 +909,89 @@ jump_up: break; } } + if (unlikely(IS_ROOT(walker_path.dentry))) { + if (likely(walker_path.mnt->mnt_flags & MNT_INTERNAL)) { + /* + * Stops and allows access when reaching disconnected root + * directories that are part of internal filesystems (e.g. nsfs, + * which is reachable through /proc/<pid>/ns/<namespace>). + */ + allowed_parent1 = true; + allowed_parent2 = true; + break; + } + /* - * Stops at disconnected root directories. Only allows - * access to internal filesystems (e.g. nsfs, which is - * reachable through /proc/<pid>/ns/<namespace>). + * We reached a disconnected root directory from a bind mount. + * Let's continue the walk with the mount point we missed. */ - allowed_parent1 = allowed_parent2 = - !!(walker_path.mnt->mnt_flags & MNT_INTERNAL); - break; + dput(walker_path.dentry); + walker_path.dentry = walker_path.mnt->mnt_root; + dget(walker_path.dentry); + } else { + struct dentry *const parent_dentry = + dget_parent(walker_path.dentry); + + dput(walker_path.dentry); + walker_path.dentry = parent_dentry; } - parent_dentry = dget_parent(walker_path.dentry); - dput(walker_path.dentry); - walker_path.dentry = parent_dentry; } path_put(&walker_path); + if (!allowed_parent1) { + log_request_parent1->type = LANDLOCK_REQUEST_FS_ACCESS; + log_request_parent1->audit.type = LSM_AUDIT_DATA_PATH; + log_request_parent1->audit.u.path = *path; + log_request_parent1->access = access_masked_parent1; + log_request_parent1->layer_masks = layer_masks_parent1; + log_request_parent1->layer_masks_size = + ARRAY_SIZE(*layer_masks_parent1); + } + + if (!allowed_parent2) { + log_request_parent2->type = LANDLOCK_REQUEST_FS_ACCESS; + log_request_parent2->audit.type = LSM_AUDIT_DATA_PATH; + log_request_parent2->audit.u.path = *path; + log_request_parent2->access = access_masked_parent2; + log_request_parent2->layer_masks = layer_masks_parent2; + log_request_parent2->layer_masks_size = + ARRAY_SIZE(*layer_masks_parent2); + } return allowed_parent1 && allowed_parent2; } -static int check_access_path(const struct landlock_ruleset *const domain, - const struct path *const path, - access_mask_t access_request) +static int current_check_access_path(const struct path *const path, + access_mask_t access_request) { + const struct access_masks masks = { + .fs = access_request, + }; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), masks, NULL); layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {}; + struct landlock_request request = {}; - access_request = landlock_init_layer_masks( - domain, access_request, &layer_masks, LANDLOCK_KEY_INODE); - if (is_access_to_paths_allowed(domain, path, access_request, - &layer_masks, NULL, 0, NULL, NULL)) + if (!subject) return 0; - return -EACCES; -} -static int current_check_access_path(const struct path *const path, - const access_mask_t access_request) -{ - const struct landlock_ruleset *const dom = get_current_fs_domain(); - - if (!dom) + access_request = landlock_init_layer_masks(subject->domain, + access_request, &layer_masks, + LANDLOCK_KEY_INODE); + if (is_access_to_paths_allowed(subject->domain, path, access_request, + &layer_masks, &request, NULL, 0, NULL, + NULL, NULL)) return 0; - return check_access_path(dom, path, access_request); + + landlock_log_denial(subject, &request); + return -EACCES; } -static access_mask_t get_mode_access(const umode_t mode) +static __attribute_const__ access_mask_t get_mode_access(const umode_t mode) { switch (mode & S_IFMT) { case S_IFLNK: return LANDLOCK_ACCESS_FS_MAKE_SYM; - case 0: - /* A zero mode translates to S_IFREG. */ - case S_IFREG: - return LANDLOCK_ACCESS_FS_MAKE_REG; case S_IFDIR: return LANDLOCK_ACCESS_FS_MAKE_DIR; case S_IFCHR: @@ -951,9 +1002,12 @@ static access_mask_t get_mode_access(const umode_t mode) return LANDLOCK_ACCESS_FS_MAKE_FIFO; case S_IFSOCK: return LANDLOCK_ACCESS_FS_MAKE_SOCK; + case S_IFREG: + case 0: + /* A zero mode translates to S_IFREG. */ default: - WARN_ON_ONCE(1); - return 0; + /* Treats weird files as regular files. */ + return LANDLOCK_ACCESS_FS_MAKE_REG; } } @@ -979,6 +1033,9 @@ static access_mask_t maybe_remove(const struct dentry *const dentry) * file. While walking from @dir to @mnt_root, we record all the domain's * allowed accesses in @layer_masks_dom. * + * Because of disconnected directories, this walk may not reach @mnt_dir. In + * this case, the walk will continue to @mnt_dir after this call. + * * This is similar to is_access_to_paths_allowed() but much simpler because it * only handles walking on the same mount point and only checks one set of * accesses. @@ -1020,8 +1077,11 @@ static bool collect_domain_accesses( break; } - /* We should not reach a root other than @mnt_root. */ - if (dir == mnt_root || WARN_ON_ONCE(IS_ROOT(dir))) + /* + * Stops at the mount point or the filesystem root for a disconnected + * directory. + */ + if (dir == mnt_root || unlikely(IS_ROOT(dir))) break; parent_dentry = dget_parent(dir); @@ -1090,18 +1150,19 @@ static int current_check_refer_path(struct dentry *const old_dentry, struct dentry *const new_dentry, const bool removable, const bool exchange) { - const struct landlock_ruleset *const dom = get_current_fs_domain(); + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), any_fs, NULL); bool allow_parent1, allow_parent2; access_mask_t access_request_parent1, access_request_parent2; struct path mnt_dir; struct dentry *old_parent; layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {}, layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {}; + struct landlock_request request1 = {}, request2 = {}; - if (!dom) + if (!subject) return 0; - if (WARN_ON_ONCE(dom->num_layers < 1)) - return -EACCES; + if (unlikely(d_is_negative(old_dentry))) return -ENOENT; if (exchange) { @@ -1126,12 +1187,16 @@ static int current_check_refer_path(struct dentry *const old_dentry, * for same-directory referer (i.e. no reparenting). */ access_request_parent1 = landlock_init_layer_masks( - dom, access_request_parent1 | access_request_parent2, + subject->domain, + access_request_parent1 | access_request_parent2, &layer_masks_parent1, LANDLOCK_KEY_INODE); - if (is_access_to_paths_allowed( - dom, new_dir, access_request_parent1, - &layer_masks_parent1, NULL, 0, NULL, NULL)) + if (is_access_to_paths_allowed(subject->domain, new_dir, + access_request_parent1, + &layer_masks_parent1, &request1, + NULL, 0, NULL, NULL, NULL)) return 0; + + landlock_log_denial(subject, &request1); return -EACCES; } @@ -1152,10 +1217,12 @@ static int current_check_refer_path(struct dentry *const old_dentry, old_dentry->d_parent; /* new_dir->dentry is equal to new_dentry->d_parent */ - allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent, + allow_parent1 = collect_domain_accesses(subject->domain, mnt_dir.dentry, + old_parent, &layer_masks_parent1); - allow_parent2 = collect_domain_accesses( - dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2); + allow_parent2 = collect_domain_accesses(subject->domain, mnt_dir.dentry, + new_dir->dentry, + &layer_masks_parent2); if (allow_parent1 && allow_parent2) return 0; @@ -1167,11 +1234,21 @@ static int current_check_refer_path(struct dentry *const old_dentry, * destination parent access rights. */ if (is_access_to_paths_allowed( - dom, &mnt_dir, access_request_parent1, &layer_masks_parent1, - old_dentry, access_request_parent2, &layer_masks_parent2, + subject->domain, &mnt_dir, access_request_parent1, + &layer_masks_parent1, &request1, old_dentry, + access_request_parent2, &layer_masks_parent2, &request2, exchange ? new_dentry : NULL)) return 0; + if (request1.access) { + request1.audit.u.path.dentry = old_parent; + landlock_log_denial(subject, &request1); + } + if (request2.access) { + request2.audit.u.path.dentry = new_dir->dentry; + landlock_log_denial(subject, &request2); + } + /* * This prioritizes EACCES over EXDEV for all actions, including * renames with RENAME_EXCHANGE. @@ -1208,7 +1285,7 @@ static void hook_inode_free_security_rcu(void *inode_security) /* * Release the inodes used in a security policy. * - * Cf. fsnotify_unmount_inodes() and invalidate_inodes() + * Cf. fsnotify_unmount_inodes() and evict_inodes() */ static void hook_sb_delete(struct super_block *const sb) { @@ -1222,7 +1299,7 @@ static void hook_sb_delete(struct super_block *const sb) struct landlock_object *object; /* Only handles referenced inodes. */ - if (!atomic_read(&inode->i_count)) + if (!icount_read(inode)) continue; /* @@ -1237,7 +1314,7 @@ static void hook_sb_delete(struct super_block *const sb) * second call to iput() for the same Landlock object. Also * checks I_NEW because such inode cannot be tied to an object. */ - if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) { + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) { spin_unlock(&inode->i_lock); continue; } @@ -1276,11 +1353,10 @@ static void hook_sb_delete(struct super_block *const sb) * At this point, we own the ihold() reference that was * originally set up by get_inode_object() and the * __iget() reference that we just set in this loop - * walk. Therefore the following call to iput() will - * not sleep nor drop the inode because there is now at - * least two references to it. + * walk. Therefore there are at least two references + * on the inode. */ - iput(inode); + iput_not_last(inode); } else { spin_unlock(&object->lock); rcu_read_unlock(); @@ -1314,6 +1390,34 @@ static void hook_sb_delete(struct super_block *const sb) !atomic_long_read(&landlock_superblock(sb)->inode_refs)); } +static void +log_fs_change_topology_path(const struct landlock_cred_security *const subject, + size_t handle_layer, const struct path *const path) +{ + landlock_log_denial(subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY, + .audit = { + .type = LSM_AUDIT_DATA_PATH, + .u.path = *path, + }, + .layer_plus_one = handle_layer + 1, + }); +} + +static void log_fs_change_topology_dentry( + const struct landlock_cred_security *const subject, size_t handle_layer, + struct dentry *const dentry) +{ + landlock_log_denial(subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY, + .audit = { + .type = LSM_AUDIT_DATA_DENTRY, + .u.dentry = dentry, + }, + .layer_plus_one = handle_layer + 1, + }); +} + /* * Because a Landlock security policy is defined according to the filesystem * topology (i.e. the mount namespace), changing it may grant access to files @@ -1336,16 +1440,30 @@ static int hook_sb_mount(const char *const dev_name, const struct path *const path, const char *const type, const unsigned long flags, void *const data) { - if (!get_current_fs_domain()) + size_t handle_layer; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), any_fs, + &handle_layer); + + if (!subject) return 0; + + log_fs_change_topology_path(subject, handle_layer, path); return -EPERM; } static int hook_move_mount(const struct path *const from_path, const struct path *const to_path) { - if (!get_current_fs_domain()) + size_t handle_layer; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), any_fs, + &handle_layer); + + if (!subject) return 0; + + log_fs_change_topology_path(subject, handle_layer, to_path); return -EPERM; } @@ -1355,15 +1473,29 @@ static int hook_move_mount(const struct path *const from_path, */ static int hook_sb_umount(struct vfsmount *const mnt, const int flags) { - if (!get_current_fs_domain()) + size_t handle_layer; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), any_fs, + &handle_layer); + + if (!subject) return 0; + + log_fs_change_topology_dentry(subject, handle_layer, mnt->mnt_root); return -EPERM; } static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts) { - if (!get_current_fs_domain()) + size_t handle_layer; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), any_fs, + &handle_layer); + + if (!subject) return 0; + + log_fs_change_topology_dentry(subject, handle_layer, sb->s_root); return -EPERM; } @@ -1378,8 +1510,15 @@ static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts) static int hook_sb_pivotroot(const struct path *const old_path, const struct path *const new_path) { - if (!get_current_fs_domain()) + size_t handle_layer; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), any_fs, + &handle_layer); + + if (!subject) return 0; + + log_fs_change_topology_path(subject, handle_layer, new_path); return -EPERM; } @@ -1414,11 +1553,7 @@ static int hook_path_mknod(const struct path *const dir, struct dentry *const dentry, const umode_t mode, const unsigned int dev) { - const struct landlock_ruleset *const dom = get_current_fs_domain(); - - if (!dom) - return 0; - return check_access_path(dom, dir, get_mode_access(mode)); + return current_check_access_path(dir, get_mode_access(mode)); } static int hook_path_symlink(const struct path *const dir, @@ -1500,11 +1635,11 @@ static int hook_file_open(struct file *const file) layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {}; access_mask_t open_access_request, full_access_request, allowed_access, optional_access; - const struct landlock_ruleset *const dom = - landlock_get_applicable_domain( - landlock_cred(file->f_cred)->domain, any_fs); + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(file->f_cred, any_fs, NULL); + struct landlock_request request = {}; - if (!dom) + if (!subject) return 0; /* @@ -1525,10 +1660,11 @@ static int hook_file_open(struct file *const file) full_access_request = open_access_request | optional_access; if (is_access_to_paths_allowed( - dom, &file->f_path, - landlock_init_layer_masks(dom, full_access_request, - &layer_masks, LANDLOCK_KEY_INODE), - &layer_masks, NULL, 0, NULL, NULL)) { + subject->domain, &file->f_path, + landlock_init_layer_masks(subject->domain, + full_access_request, &layer_masks, + LANDLOCK_KEY_INODE), + &layer_masks, &request, NULL, 0, NULL, NULL, NULL)) { allowed_access = full_access_request; } else { unsigned long access_bit; @@ -1554,10 +1690,18 @@ static int hook_file_open(struct file *const file) * file access rights in the opened struct file. */ landlock_file(file)->allowed_access = allowed_access; +#ifdef CONFIG_AUDIT + landlock_file(file)->deny_masks = landlock_get_deny_masks( + _LANDLOCK_ACCESS_FS_OPTIONAL, optional_access, &layer_masks, + ARRAY_SIZE(layer_masks)); +#endif /* CONFIG_AUDIT */ if ((open_access_request & allowed_access) == open_access_request) return 0; + /* Sets access to reflect the actual request. */ + request.access = open_access_request; + landlock_log_denial(subject, &request); return -EACCES; } @@ -1575,11 +1719,24 @@ static int hook_file_truncate(struct file *const file) */ if (landlock_file(file)->allowed_access & LANDLOCK_ACCESS_FS_TRUNCATE) return 0; + + landlock_log_denial(landlock_cred(file->f_cred), &(struct landlock_request) { + .type = LANDLOCK_REQUEST_FS_ACCESS, + .audit = { + .type = LSM_AUDIT_DATA_FILE, + .u.file = file, + }, + .all_existing_optional_access = _LANDLOCK_ACCESS_FS_OPTIONAL, + .access = LANDLOCK_ACCESS_FS_TRUNCATE, +#ifdef CONFIG_AUDIT + .deny_masks = landlock_file(file)->deny_masks, +#endif /* CONFIG_AUDIT */ + }); return -EACCES; } -static int hook_file_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static int hook_file_ioctl_common(const struct file *const file, + const unsigned int cmd, const bool is_compat) { access_mask_t allowed_access = landlock_file(file)->allowed_access; @@ -1595,56 +1752,98 @@ static int hook_file_ioctl(struct file *file, unsigned int cmd, if (!is_device(file)) return 0; - if (is_masked_device_ioctl(cmd)) + if (unlikely(is_compat) ? is_masked_device_ioctl_compat(cmd) : + is_masked_device_ioctl(cmd)) return 0; + landlock_log_denial(landlock_cred(file->f_cred), &(struct landlock_request) { + .type = LANDLOCK_REQUEST_FS_ACCESS, + .audit = { + .type = LSM_AUDIT_DATA_IOCTL_OP, + .u.op = &(struct lsm_ioctlop_audit) { + .path = file->f_path, + .cmd = cmd, + }, + }, + .all_existing_optional_access = _LANDLOCK_ACCESS_FS_OPTIONAL, + .access = LANDLOCK_ACCESS_FS_IOCTL_DEV, +#ifdef CONFIG_AUDIT + .deny_masks = landlock_file(file)->deny_masks, +#endif /* CONFIG_AUDIT */ + }); return -EACCES; } +static int hook_file_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return hook_file_ioctl_common(file, cmd, false); +} + static int hook_file_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { - access_mask_t allowed_access = landlock_file(file)->allowed_access; + return hook_file_ioctl_common(file, cmd, true); +} + +/* + * Always allow sending signals between threads of the same process. This + * ensures consistency with hook_task_kill(). + */ +static bool control_current_fowner(struct fown_struct *const fown) +{ + struct task_struct *p; /* - * It is the access rights at the time of opening the file which - * determine whether IOCTL can be used on the opened file later. - * - * The access right is attached to the opened file in hook_file_open(). + * Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix + * file_set_fowner LSM hook inconsistencies"). */ - if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV) - return 0; - - if (!is_device(file)) - return 0; + lockdep_assert_held(&fown->lock); - if (is_masked_device_ioctl_compat(cmd)) - return 0; + /* + * Some callers (e.g. fcntl_dirnotify) may not be in an RCU read-side + * critical section. + */ + guard(rcu)(); + p = pid_task(fown->pid, fown->pid_type); + if (!p) + return true; - return -EACCES; + return !same_thread_group(p, current); } static void hook_file_set_fowner(struct file *file) { - struct landlock_ruleset *new_dom, *prev_dom; + struct landlock_ruleset *prev_dom; + struct landlock_cred_security fown_subject = {}; + size_t fown_layer = 0; + + if (control_current_fowner(file_f_owner(file))) { + static const struct access_masks signal_scope = { + .scope = LANDLOCK_SCOPE_SIGNAL, + }; + const struct landlock_cred_security *new_subject = + landlock_get_applicable_subject( + current_cred(), signal_scope, &fown_layer); + if (new_subject) { + landlock_get_ruleset(new_subject->domain); + fown_subject = *new_subject; + } + } - /* - * Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix - * file_set_fowner LSM hook inconsistencies"). - */ - lockdep_assert_held(&file_f_owner(file)->lock); - new_dom = landlock_get_current_domain(); - landlock_get_ruleset(new_dom); - prev_dom = landlock_file(file)->fown_domain; - landlock_file(file)->fown_domain = new_dom; + prev_dom = landlock_file(file)->fown_subject.domain; + landlock_file(file)->fown_subject = fown_subject; +#ifdef CONFIG_AUDIT + landlock_file(file)->fown_layer = fown_layer; +#endif /* CONFIG_AUDIT*/ - /* Called in an RCU read-side critical section. */ + /* May be called in an RCU read-side critical section. */ landlock_put_ruleset_deferred(prev_dom); } static void hook_file_free_security(struct file *file) { - landlock_put_ruleset_deferred(landlock_file(file)->fown_domain); + landlock_put_ruleset_deferred(landlock_file(file)->fown_subject.domain); } static struct security_hook_list landlock_hooks[] __ro_after_init = { diff --git a/security/landlock/fs.h b/security/landlock/fs.h index 1487e1f023a1..bf9948941f2f 100644 --- a/security/landlock/fs.h +++ b/security/landlock/fs.h @@ -1,18 +1,22 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Landlock LSM - Filesystem management and hooks + * Landlock - Filesystem management and hooks * * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI + * Copyright © 2024-2025 Microsoft Corporation */ #ifndef _SECURITY_LANDLOCK_FS_H #define _SECURITY_LANDLOCK_FS_H +#include <linux/build_bug.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/rcupdate.h> +#include "access.h" +#include "cred.h" #include "ruleset.h" #include "setup.h" @@ -52,15 +56,40 @@ struct landlock_file_security { * needed to authorize later operations on the open file. */ access_mask_t allowed_access; + +#ifdef CONFIG_AUDIT + /** + * @deny_masks: Domain layer levels that deny an optional access (see + * _LANDLOCK_ACCESS_FS_OPTIONAL). + */ + deny_masks_t deny_masks; /** - * @fown_domain: Domain of the task that set the PID that may receive a - * signal e.g., SIGURG when writing MSG_OOB to the related socket. - * This pointer is protected by the related file->f_owner->lock, as for - * fown_struct's members: pid, uid, and euid. + * @fown_layer: Layer level of @fown_subject->domain with + * LANDLOCK_SCOPE_SIGNAL. */ - struct landlock_ruleset *fown_domain; + u8 fown_layer; +#endif /* CONFIG_AUDIT */ + + /** + * @fown_subject: Landlock credential of the task that set the PID that + * may receive a signal e.g., SIGURG when writing MSG_OOB to the + * related socket. This pointer is protected by the related + * file->f_owner->lock, as for fown_struct's members: pid, uid, and + * euid. + */ + struct landlock_cred_security fown_subject; }; +#ifdef CONFIG_AUDIT + +/* Makes sure all layers can be identified. */ +/* clang-format off */ +static_assert((typeof_member(struct landlock_file_security, fown_layer))~0 >= + LANDLOCK_MAX_NUM_LAYERS); +/* clang-format off */ + +#endif /* CONFIG_AUDIT */ + /** * struct landlock_superblock_security - Superblock security blob * diff --git a/security/landlock/id.c b/security/landlock/id.c new file mode 100644 index 000000000000..838c3ed7bb82 --- /dev/null +++ b/security/landlock/id.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Landlock - Unique identification number generator + * + * Copyright © 2024-2025 Microsoft Corporation + */ + +#include <kunit/test.h> +#include <linux/atomic.h> +#include <linux/bitops.h> +#include <linux/random.h> +#include <linux/spinlock.h> + +#include "common.h" +#include "id.h" + +#define COUNTER_PRE_INIT 0 + +static atomic64_t next_id = ATOMIC64_INIT(COUNTER_PRE_INIT); + +static void __init init_id(atomic64_t *const counter, const u32 random_32bits) +{ + u64 init; + + /* + * Ensures sure 64-bit values are always used by user space (or may + * fail with -EOVERFLOW), and makes this testable. + */ + init = BIT_ULL(32); + + /* + * Makes a large (2^32) boot-time value to limit ID collision in logs + * from different boots, and to limit info leak about the number of + * initially (relative to the reader) created elements (e.g. domains). + */ + init += random_32bits; + + /* Sets first or ignores. This will be the first ID. */ + atomic64_cmpxchg(counter, COUNTER_PRE_INIT, init); +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static void __init test_init_min(struct kunit *const test) +{ + atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT); + + init_id(&counter, 0); + KUNIT_EXPECT_EQ(test, atomic64_read(&counter), 1ULL + U32_MAX); +} + +static void __init test_init_max(struct kunit *const test) +{ + atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT); + + init_id(&counter, ~0); + KUNIT_EXPECT_EQ(test, atomic64_read(&counter), 1 + (2ULL * U32_MAX)); +} + +static void __init test_init_once(struct kunit *const test) +{ + const u64 first_init = 1ULL + U32_MAX; + atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT); + + init_id(&counter, 0); + KUNIT_EXPECT_EQ(test, atomic64_read(&counter), first_init); + + init_id(&counter, ~0); + KUNIT_EXPECT_EQ_MSG( + test, atomic64_read(&counter), first_init, + "Should still have the same value after the subsequent init_id()"); +} + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +void __init landlock_init_id(void) +{ + return init_id(&next_id, get_random_u32()); +} + +/* + * It's not worth it to try to hide the monotonic counter because it can still + * be inferred (with N counter ranges), and if we are allowed to read the inode + * number we should also be allowed to read the time creation anyway, and it + * can be handy to store and sort domain IDs for user space. + * + * Returns the value of next_id and increment it to let some space for the next + * one. + */ +static u64 get_id_range(size_t number_of_ids, atomic64_t *const counter, + u8 random_4bits) +{ + u64 id, step; + + /* + * We should return at least 1 ID, and we may need a set of consecutive + * ones (e.g. to generate a set of inodes). + */ + if (WARN_ON_ONCE(number_of_ids <= 0)) + number_of_ids = 1; + + /* + * Blurs the next ID guess with 1/16 ratio. We get 2^(64 - 4) - + * (2 * 2^32), so a bit less than 2^60 available IDs, which should be + * much more than enough considering the number of CPU cycles required + * to get a new ID (e.g. a full landlock_restrict_self() call), and the + * cost of draining all available IDs during the system's uptime. + */ + random_4bits &= 0b1111; + step = number_of_ids + random_4bits; + + /* It is safe to cast a signed atomic to an unsigned value. */ + id = atomic64_fetch_add(step, counter); + + /* Warns if landlock_init_id() was not called. */ + WARN_ON_ONCE(id == COUNTER_PRE_INIT); + return id; +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static u8 get_random_u8_positive(void) +{ + /* max() evaluates its arguments once. */ + return max(1, get_random_u8()); +} + +static void test_range1_rand0(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 0), init); + KUNIT_EXPECT_EQ(test, + get_id_range(get_random_u8_positive(), &counter, + get_random_u8()), + init + 1); +} + +static void test_range1_rand1(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 1), init); + KUNIT_EXPECT_EQ(test, + get_id_range(get_random_u8_positive(), &counter, + get_random_u8()), + init + 2); +} + +static void test_range1_rand15(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 15), init); + KUNIT_EXPECT_EQ(test, + get_id_range(get_random_u8_positive(), &counter, + get_random_u8()), + init + 16); +} + +static void test_range1_rand16(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 16), init); + KUNIT_EXPECT_EQ(test, + get_id_range(get_random_u8_positive(), &counter, + get_random_u8()), + init + 1); +} + +static void test_range2_rand0(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 0), init); + KUNIT_EXPECT_EQ(test, + get_id_range(get_random_u8_positive(), &counter, + get_random_u8()), + init + 2); +} + +static void test_range2_rand1(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 1), init); + KUNIT_EXPECT_EQ(test, + get_id_range(get_random_u8_positive(), &counter, + get_random_u8()), + init + 3); +} + +static void test_range2_rand2(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 2), init); + KUNIT_EXPECT_EQ(test, + get_id_range(get_random_u8_positive(), &counter, + get_random_u8()), + init + 4); +} + +static void test_range2_rand15(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 15), init); + KUNIT_EXPECT_EQ(test, + get_id_range(get_random_u8_positive(), &counter, + get_random_u8()), + init + 17); +} + +static void test_range2_rand16(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 16), init); + KUNIT_EXPECT_EQ(test, + get_id_range(get_random_u8_positive(), &counter, + get_random_u8()), + init + 2); +} + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +/** + * landlock_get_id_range - Get a range of unique IDs + * + * @number_of_ids: Number of IDs to hold. Must be greater than one. + * + * Returns: The first ID in the range. + */ +u64 landlock_get_id_range(size_t number_of_ids) +{ + return get_id_range(number_of_ids, &next_id, get_random_u8()); +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static struct kunit_case __refdata test_cases[] = { + /* clang-format off */ + KUNIT_CASE(test_init_min), + KUNIT_CASE(test_init_max), + KUNIT_CASE(test_init_once), + KUNIT_CASE(test_range1_rand0), + KUNIT_CASE(test_range1_rand1), + KUNIT_CASE(test_range1_rand15), + KUNIT_CASE(test_range1_rand16), + KUNIT_CASE(test_range2_rand0), + KUNIT_CASE(test_range2_rand1), + KUNIT_CASE(test_range2_rand2), + KUNIT_CASE(test_range2_rand15), + KUNIT_CASE(test_range2_rand16), + {} + /* clang-format on */ +}; + +static struct kunit_suite test_suite = { + .name = "landlock_id", + .test_cases = test_cases, +}; + +kunit_test_init_section_suite(test_suite); + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ diff --git a/security/landlock/id.h b/security/landlock/id.h new file mode 100644 index 000000000000..45dcfb9e9a8b --- /dev/null +++ b/security/landlock/id.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Landlock - Unique identification number generator + * + * Copyright © 2024-2025 Microsoft Corporation + */ + +#ifndef _SECURITY_LANDLOCK_ID_H +#define _SECURITY_LANDLOCK_ID_H + +#ifdef CONFIG_AUDIT + +void __init landlock_init_id(void); + +u64 landlock_get_id_range(size_t number_of_ids); + +#else /* CONFIG_AUDIT */ + +static inline void __init landlock_init_id(void) +{ +} + +#endif /* CONFIG_AUDIT */ + +#endif /* _SECURITY_LANDLOCK_ID_H */ diff --git a/security/landlock/limits.h b/security/landlock/limits.h index 15f7606066c8..65b5ff051674 100644 --- a/security/landlock/limits.h +++ b/security/landlock/limits.h @@ -1,9 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Landlock LSM - Limits for different components + * Landlock - Limits for different components * * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI + * Copyright © 2021-2025 Microsoft Corporation */ #ifndef _SECURITY_LANDLOCK_LIMITS_H @@ -29,6 +30,10 @@ #define LANDLOCK_LAST_SCOPE LANDLOCK_SCOPE_SIGNAL #define LANDLOCK_MASK_SCOPE ((LANDLOCK_LAST_SCOPE << 1) - 1) #define LANDLOCK_NUM_SCOPE __const_hweight64(LANDLOCK_MASK_SCOPE) + +#define LANDLOCK_LAST_RESTRICT_SELF LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF +#define LANDLOCK_MASK_RESTRICT_SELF ((LANDLOCK_LAST_RESTRICT_SELF << 1) - 1) + /* clang-format on */ #endif /* _SECURITY_LANDLOCK_LIMITS_H */ diff --git a/security/landlock/net.c b/security/landlock/net.c index d5dcc4407a19..1f3915a90a80 100644 --- a/security/landlock/net.c +++ b/security/landlock/net.c @@ -1,16 +1,18 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Landlock LSM - Network management and hooks + * Landlock - Network management and hooks * * Copyright © 2022-2023 Huawei Tech. Co., Ltd. - * Copyright © 2022-2023 Microsoft Corporation + * Copyright © 2022-2025 Microsoft Corporation */ #include <linux/in.h> +#include <linux/lsm_audit.h> #include <linux/net.h> #include <linux/socket.h> #include <net/ipv6.h> +#include "audit.h" #include "common.h" #include "cred.h" #include "limits.h" @@ -39,10 +41,6 @@ int landlock_append_net_rule(struct landlock_ruleset *const ruleset, return err; } -static const struct access_masks any_net = { - .net = ~0, -}; - static int current_check_access_socket(struct socket *const sock, struct sockaddr *const address, const int addrlen, @@ -54,17 +52,17 @@ static int current_check_access_socket(struct socket *const sock, struct landlock_id id = { .type = LANDLOCK_KEY_NET_PORT, }; - const struct landlock_ruleset *const dom = - landlock_get_applicable_domain(landlock_get_current_domain(), - any_net); + const struct access_masks masks = { + .net = access_request, + }; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), masks, NULL); + struct lsm_network_audit audit_net = {}; - if (!dom) + if (!subject) return 0; - if (WARN_ON_ONCE(dom->num_layers < 1)) - return -EACCES; - /* Checks if it's a (potential) TCP socket. */ - if (sock->type != SOCK_STREAM) + if (!sk_is_tcp(sock->sk)) return 0; /* Checks for minimal header length to safely read sa_family. */ @@ -73,18 +71,48 @@ static int current_check_access_socket(struct socket *const sock, switch (address->sa_family) { case AF_UNSPEC: - case AF_INET: + case AF_INET: { + const struct sockaddr_in *addr4; + if (addrlen < sizeof(struct sockaddr_in)) return -EINVAL; - port = ((struct sockaddr_in *)address)->sin_port; + + addr4 = (struct sockaddr_in *)address; + port = addr4->sin_port; + + if (access_request == LANDLOCK_ACCESS_NET_CONNECT_TCP) { + audit_net.dport = port; + audit_net.v4info.daddr = addr4->sin_addr.s_addr; + } else if (access_request == LANDLOCK_ACCESS_NET_BIND_TCP) { + audit_net.sport = port; + audit_net.v4info.saddr = addr4->sin_addr.s_addr; + } else { + WARN_ON_ONCE(1); + } break; + } #if IS_ENABLED(CONFIG_IPV6) - case AF_INET6: + case AF_INET6: { + const struct sockaddr_in6 *addr6; + if (addrlen < SIN6_LEN_RFC2133) return -EINVAL; - port = ((struct sockaddr_in6 *)address)->sin6_port; + + addr6 = (struct sockaddr_in6 *)address; + port = addr6->sin6_port; + + if (access_request == LANDLOCK_ACCESS_NET_CONNECT_TCP) { + audit_net.dport = port; + audit_net.v6info.daddr = addr6->sin6_addr; + } else if (access_request == LANDLOCK_ACCESS_NET_BIND_TCP) { + audit_net.sport = port; + audit_net.v6info.saddr = addr6->sin6_addr; + } else { + WARN_ON_ONCE(1); + } break; + } #endif /* IS_ENABLED(CONFIG_IPV6) */ default: @@ -146,13 +174,24 @@ static int current_check_access_socket(struct socket *const sock, id.key.data = (__force uintptr_t)port; BUILD_BUG_ON(sizeof(port) > sizeof(id.key.data)); - rule = landlock_find_rule(dom, id); - access_request = landlock_init_layer_masks( - dom, access_request, &layer_masks, LANDLOCK_KEY_NET_PORT); + rule = landlock_find_rule(subject->domain, id); + access_request = landlock_init_layer_masks(subject->domain, + access_request, &layer_masks, + LANDLOCK_KEY_NET_PORT); if (landlock_unmask_layers(rule, access_request, &layer_masks, ARRAY_SIZE(layer_masks))) return 0; + audit_net.family = address->sa_family; + landlock_log_denial(subject, + &(struct landlock_request){ + .type = LANDLOCK_REQUEST_NET_ACCESS, + .audit.type = LSM_AUDIT_DATA_NET, + .audit.u.net = &audit_net, + .access = access_request, + .layer_masks = &layer_masks, + .layer_masks_size = ARRAY_SIZE(layer_masks), + }); return -EACCES; } diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index a93bdbf52fff..dfcdc19ea268 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -8,11 +8,13 @@ #include <linux/bits.h> #include <linux/bug.h> +#include <linux/cleanup.h> #include <linux/compiler_types.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/lockdep.h> +#include <linux/mutex.h> #include <linux/overflow.h> #include <linux/rbtree.h> #include <linux/refcount.h> @@ -20,6 +22,9 @@ #include <linux/spinlock.h> #include <linux/workqueue.h> +#include "access.h" +#include "audit.h" +#include "domain.h" #include "limits.h" #include "object.h" #include "ruleset.h" @@ -78,6 +83,10 @@ static void build_check_rule(void) .num_layers = ~0, }; + /* + * Checks that .num_layers is large enough for at least + * LANDLOCK_MAX_NUM_LAYERS layers. + */ BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS); } @@ -121,7 +130,7 @@ create_rule(const struct landlock_id id, return ERR_PTR(-ENOMEM); RB_CLEAR_NODE(&new_rule->node); if (is_object_pointer(id.type)) { - /* This should be catched by insert_rule(). */ + /* This should have been caught by insert_rule(). */ WARN_ON_ONCE(!id.key.object); landlock_get_object(id.key.object); } @@ -285,6 +294,10 @@ static void build_check_layer(void) .access = ~0, }; + /* + * Checks that .level and .access are large enough to contain their expected + * maximum values. + */ BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS); BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS); } @@ -304,22 +317,6 @@ int landlock_insert_rule(struct landlock_ruleset *const ruleset, return insert_rule(ruleset, id, &layers, ARRAY_SIZE(layers)); } -static void get_hierarchy(struct landlock_hierarchy *const hierarchy) -{ - if (hierarchy) - refcount_inc(&hierarchy->usage); -} - -static void put_hierarchy(struct landlock_hierarchy *hierarchy) -{ - while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) { - const struct landlock_hierarchy *const freeme = hierarchy; - - hierarchy = hierarchy->parent; - kfree(freeme); - } -} - static int merge_tree(struct landlock_ruleset *const dst, struct landlock_ruleset *const src, const enum landlock_key_type key_type) @@ -384,7 +381,8 @@ static int merge_ruleset(struct landlock_ruleset *const dst, err = -EINVAL; goto out_unlock; } - dst->access_masks[dst->num_layers - 1] = src->access_masks[0]; + dst->access_masks[dst->num_layers - 1] = + landlock_upgrade_handled_access_masks(src->access_masks[0]); /* Merges the @src inode tree. */ err = merge_tree(dst, src, LANDLOCK_KEY_INODE); @@ -473,7 +471,7 @@ static int inherit_ruleset(struct landlock_ruleset *const parent, err = -EINVAL; goto out_unlock; } - get_hierarchy(parent->hierarchy); + landlock_get_hierarchy(parent->hierarchy); child->hierarchy->parent = parent->hierarchy; out_unlock: @@ -497,7 +495,7 @@ static void free_ruleset(struct landlock_ruleset *const ruleset) free_rule(freeme, LANDLOCK_KEY_NET_PORT); #endif /* IS_ENABLED(CONFIG_INET) */ - put_hierarchy(ruleset->hierarchy); + landlock_put_hierarchy(ruleset->hierarchy); kfree(ruleset); } @@ -516,6 +514,7 @@ static void free_ruleset_work(struct work_struct *const work) free_ruleset(ruleset); } +/* Only called by hook_cred_free(). */ void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset) { if (ruleset && refcount_dec_and_test(&ruleset->usage)) { @@ -530,6 +529,9 @@ void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset) * @parent: Parent domain. * @ruleset: New ruleset to be merged. * + * The current task is requesting to be restricted. The subjective credentials + * must not be in an overridden state. cf. landlock_init_hierarchy_log(). + * * Returns the intersection of @parent and @ruleset, or returns @parent if * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty. */ @@ -537,7 +539,7 @@ struct landlock_ruleset * landlock_merge_ruleset(struct landlock_ruleset *const parent, struct landlock_ruleset *const ruleset) { - struct landlock_ruleset *new_dom; + struct landlock_ruleset *new_dom __free(landlock_put_ruleset) = NULL; u32 num_layers; int err; @@ -557,29 +559,29 @@ landlock_merge_ruleset(struct landlock_ruleset *const parent, new_dom = create_ruleset(num_layers); if (IS_ERR(new_dom)) return new_dom; + new_dom->hierarchy = kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT); - if (!new_dom->hierarchy) { - err = -ENOMEM; - goto out_put_dom; - } + if (!new_dom->hierarchy) + return ERR_PTR(-ENOMEM); + refcount_set(&new_dom->hierarchy->usage, 1); /* ...as a child of @parent... */ err = inherit_ruleset(parent, new_dom); if (err) - goto out_put_dom; + return ERR_PTR(err); /* ...and including @ruleset. */ err = merge_ruleset(new_dom, ruleset); if (err) - goto out_put_dom; + return ERR_PTR(err); - return new_dom; + err = landlock_init_hierarchy_log(new_dom->hierarchy); + if (err) + return ERR_PTR(err); -out_put_dom: - landlock_put_ruleset(new_dom); - return ERR_PTR(err); + return no_free_ptr(new_dom); } /* @@ -650,8 +652,8 @@ bool landlock_unmask_layers(const struct landlock_rule *const rule, bool is_empty; /* - * Records in @layer_masks which layer grants access to each - * requested access. + * Records in @layer_masks which layer grants access to each requested + * access: bit cleared if the related layer grants access. */ is_empty = true; for_each_set_bit(access_bit, &access_req, masks_array_size) { diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h index 631e24d4ffe9..1a78cba662b2 100644 --- a/security/landlock/ruleset.h +++ b/security/landlock/ruleset.h @@ -9,64 +9,25 @@ #ifndef _SECURITY_LANDLOCK_RULESET_H #define _SECURITY_LANDLOCK_RULESET_H -#include <linux/bitops.h> -#include <linux/build_bug.h> -#include <linux/kernel.h> +#include <linux/cleanup.h> +#include <linux/err.h> #include <linux/mutex.h> #include <linux/rbtree.h> #include <linux/refcount.h> #include <linux/workqueue.h> -#include <uapi/linux/landlock.h> +#include "access.h" #include "limits.h" #include "object.h" -/* - * All access rights that are denied by default whether they are handled or not - * by a ruleset/layer. This must be ORed with all ruleset->access_masks[] - * entries when we need to get the absolute handled access masks. - */ -/* clang-format off */ -#define LANDLOCK_ACCESS_FS_INITIALLY_DENIED ( \ - LANDLOCK_ACCESS_FS_REFER) -/* clang-format on */ - -typedef u16 access_mask_t; -/* Makes sure all filesystem access rights can be stored. */ -static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_FS); -/* Makes sure all network access rights can be stored. */ -static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_NET); -/* Makes sure all scoped rights can be stored. */ -static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_SCOPE); -/* Makes sure for_each_set_bit() and for_each_clear_bit() calls are OK. */ -static_assert(sizeof(unsigned long) >= sizeof(access_mask_t)); - -/* Ruleset access masks. */ -struct access_masks { - access_mask_t fs : LANDLOCK_NUM_ACCESS_FS; - access_mask_t net : LANDLOCK_NUM_ACCESS_NET; - access_mask_t scope : LANDLOCK_NUM_SCOPE; -}; - -union access_masks_all { - struct access_masks masks; - u32 all; -}; - -/* Makes sure all fields are covered. */ -static_assert(sizeof(typeof_member(union access_masks_all, masks)) == - sizeof(typeof_member(union access_masks_all, all))); - -typedef u16 layer_mask_t; -/* Makes sure all layers can be checked. */ -static_assert(BITS_PER_TYPE(layer_mask_t) >= LANDLOCK_MAX_NUM_LAYERS); +struct landlock_hierarchy; /** * struct landlock_layer - Access rights for a given layer */ struct landlock_layer { /** - * @level: Position of this layer in the layer stack. + * @level: Position of this layer in the layer stack. Starts from 1. */ u16 level; /** @@ -150,22 +111,6 @@ struct landlock_rule { }; /** - * struct landlock_hierarchy - Node in a ruleset hierarchy - */ -struct landlock_hierarchy { - /** - * @parent: Pointer to the parent node, or NULL if it is a root - * Landlock domain. - */ - struct landlock_hierarchy *parent; - /** - * @usage: Number of potential children domains plus their parent - * domain. - */ - refcount_t usage; -}; - -/** * struct landlock_ruleset - Landlock ruleset * * This data structure must contain unique entries, be updatable, and quick to @@ -252,6 +197,9 @@ landlock_create_ruleset(const access_mask_t access_mask_fs, void landlock_put_ruleset(struct landlock_ruleset *const ruleset); void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset); +DEFINE_FREE(landlock_put_ruleset, struct landlock_ruleset *, + if (!IS_ERR_OR_NULL(_T)) landlock_put_ruleset(_T)) + int landlock_insert_rule(struct landlock_ruleset *const ruleset, const struct landlock_id id, const access_mask_t access); @@ -295,36 +243,6 @@ landlock_union_access_masks(const struct landlock_ruleset *const domain) return matches.masks; } -/** - * landlock_get_applicable_domain - Return @domain if it applies to (handles) - * at least one of the access rights specified - * in @masks - * - * @domain: Landlock ruleset (used as a domain) - * @masks: access masks - * - * Returns: @domain if any access rights specified in @masks is handled, or - * NULL otherwise. - */ -static inline const struct landlock_ruleset * -landlock_get_applicable_domain(const struct landlock_ruleset *const domain, - const struct access_masks masks) -{ - const union access_masks_all masks_all = { - .masks = masks, - }; - union access_masks_all merge = {}; - - if (!domain) - return NULL; - - merge.masks = landlock_union_access_masks(domain); - if (merge.all & masks_all.all) - return domain; - - return NULL; -} - static inline void landlock_add_fs_access_mask(struct landlock_ruleset *const ruleset, const access_mask_t fs_access_mask, @@ -366,7 +284,7 @@ landlock_get_fs_access_mask(const struct landlock_ruleset *const ruleset, { /* Handles all initially denied by default access rights. */ return ruleset->access_masks[layer_level].fs | - LANDLOCK_ACCESS_FS_INITIALLY_DENIED; + _LANDLOCK_ACCESS_FS_INITIALLY_DENIED; } static inline access_mask_t diff --git a/security/landlock/setup.c b/security/landlock/setup.c index 28519a45b11f..47dac1736f10 100644 --- a/security/landlock/setup.c +++ b/security/landlock/setup.c @@ -6,19 +6,27 @@ * Copyright © 2018-2020 ANSSI */ +#include <linux/bits.h> #include <linux/init.h> #include <linux/lsm_hooks.h> #include <uapi/linux/lsm.h> #include "common.h" #include "cred.h" +#include "errata.h" #include "fs.h" +#include "id.h" #include "net.h" #include "setup.h" #include "task.h" bool landlock_initialized __ro_after_init = false; +const struct lsm_id landlock_lsmid = { + .name = LANDLOCK_NAME, + .id = LSM_ID_LANDLOCK, +}; + struct lsm_blob_sizes landlock_blob_sizes __ro_after_init = { .lbs_cred = sizeof(struct landlock_cred_security), .lbs_file = sizeof(struct landlock_file_security), @@ -26,24 +34,48 @@ struct lsm_blob_sizes landlock_blob_sizes __ro_after_init = { .lbs_superblock = sizeof(struct landlock_superblock_security), }; -const struct lsm_id landlock_lsmid = { - .name = LANDLOCK_NAME, - .id = LSM_ID_LANDLOCK, -}; +int landlock_errata __ro_after_init; + +static void __init compute_errata(void) +{ + size_t i; + +#ifndef __has_include + /* + * This is a safeguard to make sure the compiler implements + * __has_include (see errata.h). + */ + WARN_ON_ONCE(1); + return; +#endif + + for (i = 0; landlock_errata_init[i].number; i++) { + const int prev_errata = landlock_errata; + + if (WARN_ON_ONCE(landlock_errata_init[i].abi > + landlock_abi_version)) + continue; + + landlock_errata |= BIT(landlock_errata_init[i].number - 1); + WARN_ON_ONCE(prev_errata == landlock_errata); + } +} static int __init landlock_init(void) { + compute_errata(); landlock_add_cred_hooks(); landlock_add_task_hooks(); landlock_add_fs_hooks(); landlock_add_net_hooks(); + landlock_init_id(); landlock_initialized = true; pr_info("Up and running.\n"); return 0; } DEFINE_LSM(LANDLOCK_NAME) = { - .name = LANDLOCK_NAME, + .id = &landlock_lsmid, .init = landlock_init, .blobs = &landlock_blob_sizes, }; diff --git a/security/landlock/setup.h b/security/landlock/setup.h index c4252d46d49d..fca307c35fee 100644 --- a/security/landlock/setup.h +++ b/security/landlock/setup.h @@ -11,7 +11,10 @@ #include <linux/lsm_hooks.h> +extern const int landlock_abi_version; + extern bool landlock_initialized; +extern int landlock_errata; extern struct lsm_blob_sizes landlock_blob_sizes; extern const struct lsm_id landlock_lsmid; diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index 4ed8e70c25ed..0116e9f93ffe 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -1,15 +1,18 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Landlock LSM - System call implementations and user space interfaces + * Landlock - System call implementations and user space interfaces * * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI + * Copyright © 2021-2025 Microsoft Corporation */ #include <asm/current.h> #include <linux/anon_inodes.h> +#include <linux/bitops.h> #include <linux/build_bug.h> #include <linux/capability.h> +#include <linux/cleanup.h> #include <linux/compiler_types.h> #include <linux/dcache.h> #include <linux/err.h> @@ -27,6 +30,7 @@ #include <uapi/linux/landlock.h> #include "cred.h" +#include "domain.h" #include "fs.h" #include "limits.h" #include "net.h" @@ -150,7 +154,14 @@ static const struct file_operations ruleset_fops = { .write = fop_dummy_write, }; -#define LANDLOCK_ABI_VERSION 6 +/* + * The Landlock ABI version should be incremented for each new Landlock-related + * user space visible change (e.g. Landlock syscalls). This version should + * only be incremented once per Linux release, and the date in + * Documentation/userspace-api/landlock.rst should be updated to reflect the + * UAPI change. + */ +const int landlock_abi_version = 7; /** * sys_landlock_create_ruleset - Create a new ruleset @@ -159,14 +170,16 @@ static const struct file_operations ruleset_fops = { * the new ruleset. * @size: Size of the pointed &struct landlock_ruleset_attr (needed for * backward and forward compatibility). - * @flags: Supported value: %LANDLOCK_CREATE_RULESET_VERSION. + * @flags: Supported values: + * + * - %LANDLOCK_CREATE_RULESET_VERSION + * - %LANDLOCK_CREATE_RULESET_ERRATA * * This system call enables to create a new Landlock ruleset, and returns the * related file descriptor on success. * - * If @flags is %LANDLOCK_CREATE_RULESET_VERSION and @attr is NULL and @size is - * 0, then the returned value is the highest supported Landlock ABI version - * (starting at 1). + * If %LANDLOCK_CREATE_RULESET_VERSION or %LANDLOCK_CREATE_RULESET_ERRATA is + * set, then @attr must be NULL and @size must be 0. * * Possible returned errors are: * @@ -175,6 +188,9 @@ static const struct file_operations ruleset_fops = { * - %E2BIG: @attr or @size inconsistencies; * - %EFAULT: @attr or @size inconsistencies; * - %ENOMSG: empty &landlock_ruleset_attr.handled_access_fs. + * + * .. kernel-doc:: include/uapi/linux/landlock.h + * :identifiers: landlock_create_ruleset_flags */ SYSCALL_DEFINE3(landlock_create_ruleset, const struct landlock_ruleset_attr __user *const, attr, @@ -191,9 +207,15 @@ SYSCALL_DEFINE3(landlock_create_ruleset, return -EOPNOTSUPP; if (flags) { - if ((flags == LANDLOCK_CREATE_RULESET_VERSION) && !attr && - !size) - return LANDLOCK_ABI_VERSION; + if (attr || size) + return -EINVAL; + + if (flags == LANDLOCK_CREATE_RULESET_VERSION) + return landlock_abi_version; + + if (flags == LANDLOCK_CREATE_RULESET_ERRATA) + return landlock_errata; + return -EINVAL; } @@ -281,7 +303,6 @@ static int get_path_from_fd(const s32 fd, struct path *const path) if ((fd_file(f)->f_op == &ruleset_fops) || (fd_file(f)->f_path.mnt->mnt_flags & MNT_INTERNAL) || (fd_file(f)->f_path.dentry->d_sb->s_flags & SB_NOUSER) || - d_is_negative(fd_file(f)->f_path.dentry) || IS_PRIVATE(d_backing_inode(fd_file(f)->f_path.dentry))) return -EBADFD; @@ -398,8 +419,7 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, const enum landlock_rule_type, rule_type, const void __user *const, rule_attr, const __u32, flags) { - struct landlock_ruleset *ruleset; - int err; + struct landlock_ruleset *ruleset __free(landlock_put_ruleset) = NULL; if (!is_initialized()) return -EOPNOTSUPP; @@ -415,17 +435,12 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, switch (rule_type) { case LANDLOCK_RULE_PATH_BENEATH: - err = add_rule_path_beneath(ruleset, rule_attr); - break; + return add_rule_path_beneath(ruleset, rule_attr); case LANDLOCK_RULE_NET_PORT: - err = add_rule_net_port(ruleset, rule_attr); - break; + return add_rule_net_port(ruleset, rule_attr); default: - err = -EINVAL; - break; + return -EINVAL; } - landlock_put_ruleset(ruleset); - return err; } /* Enforcement */ @@ -434,7 +449,11 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, * sys_landlock_restrict_self - Enforce a ruleset on the calling thread * * @ruleset_fd: File descriptor tied to the ruleset to merge with the target. - * @flags: Must be 0. + * @flags: Supported values: + * + * - %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF + * - %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON + * - %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF * * This system call enables to enforce a Landlock ruleset on the current * thread. Enforcing a ruleset requires that the task has %CAP_SYS_ADMIN in its @@ -444,7 +463,7 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, * Possible returned errors are: * * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; - * - %EINVAL: @flags is not 0. + * - %EINVAL: @flags contains an unknown bit. * - %EBADF: @ruleset_fd is not a file descriptor for the current thread; * - %EBADFD: @ruleset_fd is not a ruleset file descriptor; * - %EPERM: @ruleset_fd has no read access to the underlying ruleset, or the @@ -452,14 +471,19 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, * %CAP_SYS_ADMIN in its namespace. * - %E2BIG: The maximum number of stacked rulesets is reached for the current * thread. + * + * .. kernel-doc:: include/uapi/linux/landlock.h + * :identifiers: landlock_restrict_self_flags */ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32, flags) { - struct landlock_ruleset *new_dom, *ruleset; + struct landlock_ruleset *new_dom, + *ruleset __free(landlock_put_ruleset) = NULL; struct cred *new_cred; struct landlock_cred_security *new_llcred; - int err; + bool __maybe_unused log_same_exec, log_new_exec, log_subdomains, + prev_log_subdomains; if (!is_initialized()) return -EOPNOTSUPP; @@ -472,44 +496,75 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32, !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN)) return -EPERM; - /* No flag for now. */ - if (flags) + if ((flags | LANDLOCK_MASK_RESTRICT_SELF) != + LANDLOCK_MASK_RESTRICT_SELF) return -EINVAL; - /* Gets and checks the ruleset. */ - ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_READ); - if (IS_ERR(ruleset)) - return PTR_ERR(ruleset); + /* Translates "off" flag to boolean. */ + log_same_exec = !(flags & LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF); + /* Translates "on" flag to boolean. */ + log_new_exec = !!(flags & LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON); + /* Translates "off" flag to boolean. */ + log_subdomains = !(flags & LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF); + + /* + * It is allowed to set LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF with + * -1 as ruleset_fd, but no other flag must be set. + */ + if (!(ruleset_fd == -1 && + flags == LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF)) { + /* Gets and checks the ruleset. */ + ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_READ); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + } /* Prepares new credentials. */ new_cred = prepare_creds(); - if (!new_cred) { - err = -ENOMEM; - goto out_put_ruleset; - } + if (!new_cred) + return -ENOMEM; + new_llcred = landlock_cred(new_cred); +#ifdef CONFIG_AUDIT + prev_log_subdomains = !new_llcred->log_subdomains_off; + new_llcred->log_subdomains_off = !prev_log_subdomains || + !log_subdomains; +#endif /* CONFIG_AUDIT */ + + /* + * The only case when a ruleset may not be set is if + * LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF is set and ruleset_fd is -1. + * We could optimize this case by not calling commit_creds() if this flag + * was already set, but it is not worth the complexity. + */ + if (!ruleset) + return commit_creds(new_cred); + /* * There is no possible race condition while copying and manipulating * the current credentials because they are dedicated per thread. */ new_dom = landlock_merge_ruleset(new_llcred->domain, ruleset); if (IS_ERR(new_dom)) { - err = PTR_ERR(new_dom); - goto out_put_creds; + abort_creds(new_cred); + return PTR_ERR(new_dom); } +#ifdef CONFIG_AUDIT + new_dom->hierarchy->log_same_exec = log_same_exec; + new_dom->hierarchy->log_new_exec = log_new_exec; + if ((!log_same_exec && !log_new_exec) || !prev_log_subdomains) + new_dom->hierarchy->log_status = LANDLOCK_LOG_DISABLED; +#endif /* CONFIG_AUDIT */ + /* Replaces the old (prepared) domain. */ landlock_put_ruleset(new_llcred->domain); new_llcred->domain = new_dom; - landlock_put_ruleset(ruleset); - return commit_creds(new_cred); +#ifdef CONFIG_AUDIT + new_llcred->domain_exec |= BIT(new_dom->num_layers - 1); +#endif /* CONFIG_AUDIT */ -out_put_creds: - abort_creds(new_cred); - -out_put_ruleset: - landlock_put_ruleset(ruleset); - return err; + return commit_creds(new_cred); } diff --git a/security/landlock/task.c b/security/landlock/task.c index dc7dab78392e..2385017418ca 100644 --- a/security/landlock/task.c +++ b/security/landlock/task.c @@ -1,23 +1,29 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Landlock LSM - Ptrace hooks + * Landlock - Ptrace and scope hooks * * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2019-2020 ANSSI + * Copyright © 2024-2025 Microsoft Corporation */ #include <asm/current.h> +#include <linux/cleanup.h> #include <linux/cred.h> #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/lsm_audit.h> #include <linux/lsm_hooks.h> #include <linux/rcupdate.h> #include <linux/sched.h> +#include <linux/sched/signal.h> #include <net/af_unix.h> #include <net/sock.h> +#include "audit.h" #include "common.h" #include "cred.h" +#include "domain.h" #include "fs.h" #include "ruleset.h" #include "setup.h" @@ -37,41 +43,29 @@ static bool domain_scope_le(const struct landlock_ruleset *const parent, { const struct landlock_hierarchy *walker; + /* Quick return for non-landlocked tasks. */ if (!parent) return true; + if (!child) return false; + for (walker = child->hierarchy; walker; walker = walker->parent) { if (walker == parent->hierarchy) /* @parent is in the scoped hierarchy of @child. */ return true; } + /* There is no relationship between @parent and @child. */ return false; } -static bool task_is_scoped(const struct task_struct *const parent, - const struct task_struct *const child) -{ - bool is_scoped; - const struct landlock_ruleset *dom_parent, *dom_child; - - rcu_read_lock(); - dom_parent = landlock_get_task_domain(parent); - dom_child = landlock_get_task_domain(child); - is_scoped = domain_scope_le(dom_parent, dom_child); - rcu_read_unlock(); - return is_scoped; -} - -static int task_ptrace(const struct task_struct *const parent, - const struct task_struct *const child) +static int domain_ptrace(const struct landlock_ruleset *const parent, + const struct landlock_ruleset *const child) { - /* Quick return for non-landlocked tasks. */ - if (!landlocked(parent)) - return 0; - if (task_is_scoped(parent, child)) + if (domain_scope_le(parent, child)) return 0; + return -EPERM; } @@ -91,7 +85,39 @@ static int task_ptrace(const struct task_struct *const parent, static int hook_ptrace_access_check(struct task_struct *const child, const unsigned int mode) { - return task_ptrace(current, child); + const struct landlock_cred_security *parent_subject; + const struct landlock_ruleset *child_dom; + int err; + + /* Quick return for non-landlocked tasks. */ + parent_subject = landlock_cred(current_cred()); + if (!parent_subject) + return 0; + + scoped_guard(rcu) + { + child_dom = landlock_get_task_domain(child); + err = domain_ptrace(parent_subject->domain, child_dom); + } + + if (!err) + return 0; + + /* + * For the ptrace_access_check case, we log the current/parent domain + * and the child task. + */ + if (!(mode & PTRACE_MODE_NOAUDIT)) + landlock_log_denial(parent_subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_PTRACE, + .audit = { + .type = LSM_AUDIT_DATA_TASK, + .u.tsk = child, + }, + .layer_plus_one = parent_subject->domain->num_layers, + }); + + return err; } /** @@ -108,7 +134,35 @@ static int hook_ptrace_access_check(struct task_struct *const child, */ static int hook_ptrace_traceme(struct task_struct *const parent) { - return task_ptrace(parent, current); + const struct landlock_cred_security *parent_subject; + const struct landlock_ruleset *child_dom; + int err; + + child_dom = landlock_get_current_domain(); + + guard(rcu)(); + parent_subject = landlock_cred(__task_cred(parent)); + err = domain_ptrace(parent_subject->domain, child_dom); + + if (!err) + return 0; + + /* + * For the ptrace_traceme case, we log the domain which is the cause of + * the denial, which means the parent domain instead of the current + * domain. This may look unusual because the ptrace_traceme action is a + * request to be traced, but the semantic is consistent with + * hook_ptrace_access_check(). + */ + landlock_log_denial(parent_subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_PTRACE, + .audit = { + .type = LSM_AUDIT_DATA_TASK, + .u.tsk = current, + }, + .layer_plus_one = parent_subject->domain->num_layers, + }); + return err; } /** @@ -127,7 +181,7 @@ static bool domain_is_scoped(const struct landlock_ruleset *const client, access_mask_t scope) { int client_layer, server_layer; - struct landlock_hierarchy *client_walker, *server_walker; + const struct landlock_hierarchy *client_walker, *server_walker; /* Quick return if client has no domain */ if (WARN_ON_ONCE(!client)) @@ -212,28 +266,43 @@ static int hook_unix_stream_connect(struct sock *const sock, struct sock *const other, struct sock *const newsk) { - const struct landlock_ruleset *const dom = - landlock_get_applicable_domain(landlock_get_current_domain(), - unix_scope); + size_t handle_layer; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), unix_scope, + &handle_layer); /* Quick return for non-landlocked tasks. */ - if (!dom) + if (!subject) + return 0; + + if (!is_abstract_socket(other)) return 0; - if (is_abstract_socket(other) && sock_is_scoped(other, dom)) - return -EPERM; + if (!sock_is_scoped(other, subject->domain)) + return 0; - return 0; + landlock_log_denial(subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_SCOPE_ABSTRACT_UNIX_SOCKET, + .audit = { + .type = LSM_AUDIT_DATA_NET, + .u.net = &(struct lsm_network_audit) { + .sk = other, + }, + }, + .layer_plus_one = handle_layer + 1, + }); + return -EPERM; } static int hook_unix_may_send(struct socket *const sock, struct socket *const other) { - const struct landlock_ruleset *const dom = - landlock_get_applicable_domain(landlock_get_current_domain(), - unix_scope); + size_t handle_layer; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), unix_scope, + &handle_layer); - if (!dom) + if (!subject) return 0; /* @@ -243,10 +312,23 @@ static int hook_unix_may_send(struct socket *const sock, if (unix_peer(sock->sk) == other->sk) return 0; - if (is_abstract_socket(other->sk) && sock_is_scoped(other->sk, dom)) - return -EPERM; + if (!is_abstract_socket(other->sk)) + return 0; + + if (!sock_is_scoped(other->sk, subject->domain)) + return 0; - return 0; + landlock_log_denial(subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_SCOPE_ABSTRACT_UNIX_SOCKET, + .audit = { + .type = LSM_AUDIT_DATA_NET, + .u.net = &(struct lsm_network_audit) { + .sk = other->sk, + }, + }, + .layer_plus_one = handle_layer + 1, + }); + return -EPERM; } static const struct access_masks signal_scope = { @@ -255,56 +337,97 @@ static const struct access_masks signal_scope = { static int hook_task_kill(struct task_struct *const p, struct kernel_siginfo *const info, const int sig, - const struct cred *const cred) + const struct cred *cred) { bool is_scoped; - const struct landlock_ruleset *dom; - - if (cred) { - /* Dealing with USB IO. */ - dom = landlock_cred(cred)->domain; - } else { - dom = landlock_get_current_domain(); + size_t handle_layer; + const struct landlock_cred_security *subject; + + if (!cred) { + /* + * Always allow sending signals between threads of the same process. + * This is required for process credential changes by the Native POSIX + * Threads Library and implemented by the set*id(2) wrappers and + * libcap(3) with tgkill(2). See nptl(7) and libpsx(3). + * + * This exception is similar to the __ptrace_may_access() one. + */ + if (same_thread_group(p, current)) + return 0; + + /* Not dealing with USB IO. */ + cred = current_cred(); } - dom = landlock_get_applicable_domain(dom, signal_scope); + + subject = landlock_get_applicable_subject(cred, signal_scope, + &handle_layer); /* Quick return for non-landlocked tasks. */ - if (!dom) + if (!subject) return 0; - rcu_read_lock(); - is_scoped = domain_is_scoped(dom, landlock_get_task_domain(p), - LANDLOCK_SCOPE_SIGNAL); - rcu_read_unlock(); - if (is_scoped) - return -EPERM; + scoped_guard(rcu) + { + is_scoped = domain_is_scoped(subject->domain, + landlock_get_task_domain(p), + signal_scope.scope); + } + + if (!is_scoped) + return 0; - return 0; + landlock_log_denial(subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_SCOPE_SIGNAL, + .audit = { + .type = LSM_AUDIT_DATA_TASK, + .u.tsk = p, + }, + .layer_plus_one = handle_layer + 1, + }); + return -EPERM; } static int hook_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int signum) { - const struct landlock_ruleset *dom; + const struct landlock_cred_security *subject; bool is_scoped = false; /* Lock already held by send_sigio() and send_sigurg(). */ lockdep_assert_held(&fown->lock); - dom = landlock_get_applicable_domain( - landlock_file(fown->file)->fown_domain, signal_scope); + subject = &landlock_file(fown->file)->fown_subject; - /* Quick return for unowned socket. */ - if (!dom) + /* + * Quick return for unowned socket. + * + * subject->domain has already been filtered when saved by + * hook_file_set_fowner(), so there is no need to call + * landlock_get_applicable_subject() here. + */ + if (!subject->domain) return 0; - rcu_read_lock(); - is_scoped = domain_is_scoped(dom, landlock_get_task_domain(tsk), - LANDLOCK_SCOPE_SIGNAL); - rcu_read_unlock(); - if (is_scoped) - return -EPERM; + scoped_guard(rcu) + { + is_scoped = domain_is_scoped(subject->domain, + landlock_get_task_domain(tsk), + signal_scope.scope); + } + + if (!is_scoped) + return 0; - return 0; + landlock_log_denial(subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_SCOPE_SIGNAL, + .audit = { + .type = LSM_AUDIT_DATA_TASK, + .u.tsk = tsk, + }, +#ifdef CONFIG_AUDIT + .layer_plus_one = landlock_file(fown->file)->fown_layer + 1, +#endif /* CONFIG_AUDIT */ + }); + return -EPERM; } static struct security_hook_list landlock_hooks[] __ro_after_init = { diff --git a/security/loadpin/Kconfig b/security/loadpin/Kconfig index 848f8b4a6019..aef63d3e30df 100644 --- a/security/loadpin/Kconfig +++ b/security/loadpin/Kconfig @@ -16,7 +16,7 @@ config SECURITY_LOADPIN_ENFORCE depends on SECURITY_LOADPIN # Module compression breaks LoadPin unless modules are decompressed in # the kernel. - depends on !MODULES || (MODULE_COMPRESS_NONE || MODULE_DECOMPRESS) + depends on !MODULE_COMPRESS || MODULE_DECOMPRESS help If selected, LoadPin will enforce pinning at boot. If not selected, it can be enabled at boot with the kernel parameter diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c index 68252452b66c..273ffbd6defe 100644 --- a/security/loadpin/loadpin.c +++ b/security/loadpin/loadpin.c @@ -270,11 +270,6 @@ static int __init loadpin_init(void) return 0; } -DEFINE_LSM(loadpin) = { - .name = "loadpin", - .init = loadpin_init, -}; - #ifdef CONFIG_SECURITY_LOADPIN_VERITY enum loadpin_securityfs_interface_index { @@ -434,9 +429,15 @@ static int __init init_loadpin_securityfs(void) return 0; } -fs_initcall(init_loadpin_securityfs); +#endif /* CONFIG_SECURITY_LOADPIN_VERITY */ +DEFINE_LSM(loadpin) = { + .id = &loadpin_lsmid, + .init = loadpin_init, +#ifdef CONFIG_SECURITY_LOADPIN_VERITY + .initcall_fs = init_loadpin_securityfs, #endif /* CONFIG_SECURITY_LOADPIN_VERITY */ +}; /* Should not be mutable after boot, so not listed in sysfs (perm == 0). */ module_param(enforce, int, 0); diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c index f2bdbd55aa2b..8d46886d2cca 100644 --- a/security/lockdown/lockdown.c +++ b/security/lockdown/lockdown.c @@ -96,7 +96,7 @@ static int __init lockdown_lsm_init(void) static ssize_t lockdown_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { - char temp[80]; + char temp[80] = ""; int i, offset = 0; for (i = 0; i < ARRAY_SIZE(lockdown_levels); i++) { @@ -161,13 +161,12 @@ static int __init lockdown_secfs_init(void) return PTR_ERR_OR_ZERO(dentry); } -core_initcall(lockdown_secfs_init); - #ifdef CONFIG_SECURITY_LOCKDOWN_LSM_EARLY DEFINE_EARLY_LSM(lockdown) = { #else DEFINE_LSM(lockdown) = { #endif - .name = "lockdown", + .id = &lockdown_lsmid, .init = lockdown_lsm_init, + .initcall_core = lockdown_secfs_init, }; diff --git a/security/lsm.h b/security/lsm.h new file mode 100644 index 000000000000..81aadbc61685 --- /dev/null +++ b/security/lsm.h @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * LSM functions + */ + +#ifndef _LSM_H_ +#define _LSM_H_ + +#include <linux/printk.h> +#include <linux/lsm_hooks.h> +#include <linux/lsm_count.h> + +/* LSM debugging */ +extern bool lsm_debug; +#define lsm_pr(...) pr_info(__VA_ARGS__) +#define lsm_pr_cont(...) pr_cont(__VA_ARGS__) +#define lsm_pr_dbg(...) \ + do { \ + if (lsm_debug) \ + pr_info(__VA_ARGS__); \ + } while (0) + +/* List of configured LSMs */ +extern unsigned int lsm_active_cnt; +extern const struct lsm_id *lsm_idlist[]; + +/* LSM blob configuration */ +extern struct lsm_blob_sizes blob_sizes; + +/* LSM blob caches */ +extern struct kmem_cache *lsm_file_cache; +extern struct kmem_cache *lsm_inode_cache; + +/* LSM blob allocators */ +int lsm_cred_alloc(struct cred *cred, gfp_t gfp); +int lsm_task_alloc(struct task_struct *task); + +/* LSM framework initializers */ + +#ifdef CONFIG_MMU +int min_addr_init(void); +#else +static inline int min_addr_init(void) +{ + return 0; +} +#endif /* CONFIG_MMU */ + +#ifdef CONFIG_SECURITYFS +int securityfs_init(void); +#else +static inline int securityfs_init(void) +{ + return 0; +} +#endif /* CONFIG_SECURITYFS */ + +#endif /* _LSM_H_ */ diff --git a/security/lsm_audit.c b/security/lsm_audit.c index 9a8352972086..7d623b00495c 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c @@ -3,7 +3,7 @@ * common LSM auditing functions * * Based on code written for SELinux by : - * Stephen Smalley, <sds@tycho.nsa.gov> + * Stephen Smalley * James Morris <jmorris@redhat.com> * Author : Etienne Basset, <etienne.basset@ensta.org> */ @@ -24,7 +24,6 @@ #include <net/ipv6.h> #include <linux/tcp.h> #include <linux/udp.h> -#include <linux/dccp.h> #include <linux/sctp.h> #include <linux/lsm_audit.h> #include <linux/security.h> @@ -68,13 +67,6 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb, ad->u.net->dport = uh->dest; break; } - case IPPROTO_DCCP: { - struct dccp_hdr *dh = dccp_hdr(skb); - - ad->u.net->sport = dh->dccph_sport; - ad->u.net->dport = dh->dccph_dport; - break; - } case IPPROTO_SCTP: { struct sctphdr *sh = sctp_hdr(skb); @@ -140,17 +132,6 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb, ad->u.net->dport = uh->dest; break; } - case IPPROTO_DCCP: { - struct dccp_hdr _dccph, *dh; - - dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph); - if (dh == NULL) - break; - - ad->u.net->sport = dh->dccph_sport; - ad->u.net->dport = dh->dccph_dport; - break; - } case IPPROTO_SCTP: { struct sctphdr _sctph, *sh; @@ -171,7 +152,7 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb, static inline void print_ipv6_addr(struct audit_buffer *ab, const struct in6_addr *addr, __be16 port, - char *name1, char *name2) + const char *name1, const char *name2) { if (!ipv6_addr_any(addr)) audit_log_format(ab, " %s=%pI6c", name1, addr); @@ -180,7 +161,7 @@ static inline void print_ipv6_addr(struct audit_buffer *ab, } static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr, - __be16 port, char *name1, char *name2) + __be16 port, const char *name1, const char *name2) { if (addr) audit_log_format(ab, " %s=%pI4", name1, &addr); @@ -189,16 +170,13 @@ static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr, } /** - * dump_common_audit_data - helper to dump common audit data + * audit_log_lsm_data - helper to log common LSM audit data * @ab : the audit buffer * @a : common audit data - * */ -static void dump_common_audit_data(struct audit_buffer *ab, - struct common_audit_data *a) +void audit_log_lsm_data(struct audit_buffer *ab, + const struct common_audit_data *a) { - char comm[sizeof(current->comm)]; - /* * To keep stack sizes in check force programmers to notice if they * start making this union too large! See struct lsm_network_audit @@ -206,9 +184,6 @@ static void dump_common_audit_data(struct audit_buffer *ab, */ BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2); - audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current)); - audit_log_untrustedstring(ab, get_task_comm(comm, current)); - switch (a->type) { case LSM_AUDIT_DATA_NONE: return; @@ -299,10 +274,10 @@ static void dump_common_audit_data(struct audit_buffer *ab, if (tsk) { pid_t pid = task_tgid_nr(tsk); if (pid) { - char comm[sizeof(tsk->comm)]; + char tskcomm[sizeof(tsk->comm)]; audit_log_format(ab, " opid=%d ocomm=", pid); audit_log_untrustedstring(ab, - get_task_comm(comm, tsk)); + get_task_comm(tskcomm, tsk)); } } break; @@ -425,10 +400,28 @@ static void dump_common_audit_data(struct audit_buffer *ab, case LSM_AUDIT_DATA_ANONINODE: audit_log_format(ab, " anonclass=%s", a->u.anonclass); break; + case LSM_AUDIT_DATA_NLMSGTYPE: + audit_log_format(ab, " nl-msgtype=%hu", a->u.nlmsg_type); + break; } /* switch (a->type) */ } /** + * dump_common_audit_data - helper to dump common audit data + * @ab : the audit buffer + * @a : common audit data + */ +static void dump_common_audit_data(struct audit_buffer *ab, + const struct common_audit_data *a) +{ + char comm[sizeof(current->comm)]; + + audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current)); + audit_log_untrustedstring(ab, get_task_comm(comm, current)); + audit_log_lsm_data(ab, a); +} + +/** * common_lsm_audit - generic LSM auditing function * @a: auxiliary audit data * @pre_audit: lsm-specific pre-audit callback diff --git a/security/lsm_init.c b/security/lsm_init.c new file mode 100644 index 000000000000..05bd52e6b1f2 --- /dev/null +++ b/security/lsm_init.c @@ -0,0 +1,564 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * LSM initialization functions + */ + +#define pr_fmt(fmt) "LSM: " fmt + +#include <linux/init.h> +#include <linux/lsm_hooks.h> + +#include "lsm.h" + +/* LSM enabled constants. */ +static __initdata int lsm_enabled_true = 1; +static __initdata int lsm_enabled_false = 0; + +/* Pointers to LSM sections defined in include/asm-generic/vmlinux.lds.h */ +extern struct lsm_info __start_lsm_info[], __end_lsm_info[]; +extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[]; + +/* Number of "early" LSMs */ +static __initdata unsigned int lsm_count_early; + +/* Build and boot-time LSM ordering. */ +static __initconst const char *const lsm_order_builtin = CONFIG_LSM; +static __initdata const char *lsm_order_cmdline; +static __initdata const char *lsm_order_legacy; + +/* Ordered list of LSMs to initialize. */ +static __initdata struct lsm_info *lsm_order[MAX_LSM_COUNT + 1]; +static __initdata struct lsm_info *lsm_exclusive; + +#define lsm_order_for_each(iter) \ + for ((iter) = lsm_order; *(iter); (iter)++) +#define lsm_for_each_raw(iter) \ + for ((iter) = __start_lsm_info; \ + (iter) < __end_lsm_info; (iter)++) +#define lsm_early_for_each_raw(iter) \ + for ((iter) = __start_early_lsm_info; \ + (iter) < __end_early_lsm_info; (iter)++) + +#define lsm_initcall(level) \ + ({ \ + int _r, _rc = 0; \ + struct lsm_info **_lp, *_l; \ + lsm_order_for_each(_lp) { \ + _l = *_lp; \ + if (!_l->initcall_##level) \ + continue; \ + lsm_pr_dbg("running %s %s initcall", \ + _l->id->name, #level); \ + _r = _l->initcall_##level(); \ + if (_r) { \ + pr_warn("failed LSM %s %s initcall with errno %d\n", \ + _l->id->name, #level, _r); \ + if (!_rc) \ + _rc = _r; \ + } \ + } \ + _rc; \ + }) + +/** + * lsm_choose_security - Legacy "major" LSM selection + * @str: kernel command line parameter + */ +static int __init lsm_choose_security(char *str) +{ + lsm_order_legacy = str; + return 1; +} +__setup("security=", lsm_choose_security); + +/** + * lsm_choose_lsm - Modern LSM selection + * @str: kernel command line parameter + */ +static int __init lsm_choose_lsm(char *str) +{ + lsm_order_cmdline = str; + return 1; +} +__setup("lsm=", lsm_choose_lsm); + +/** + * lsm_debug_enable - Enable LSM framework debugging + * @str: kernel command line parameter + * + * Currently we only provide debug info during LSM initialization, but we may + * want to expand this in the future. + */ +static int __init lsm_debug_enable(char *str) +{ + lsm_debug = true; + return 1; +} +__setup("lsm.debug", lsm_debug_enable); + +/** + * lsm_enabled_set - Mark a LSM as enabled + * @lsm: LSM definition + * @enabled: enabled flag + */ +static void __init lsm_enabled_set(struct lsm_info *lsm, bool enabled) +{ + /* + * When an LSM hasn't configured an enable variable, we can use + * a hard-coded location for storing the default enabled state. + */ + if (!lsm->enabled || + lsm->enabled == &lsm_enabled_true || + lsm->enabled == &lsm_enabled_false) { + lsm->enabled = enabled ? &lsm_enabled_true : &lsm_enabled_false; + } else { + *lsm->enabled = enabled; + } +} + +/** + * lsm_is_enabled - Determine if a LSM is enabled + * @lsm: LSM definition + */ +static inline bool lsm_is_enabled(struct lsm_info *lsm) +{ + return (lsm->enabled ? *lsm->enabled : false); +} + +/** + * lsm_order_exists - Determine if a LSM exists in the ordered list + * @lsm: LSM definition + */ +static bool __init lsm_order_exists(struct lsm_info *lsm) +{ + struct lsm_info **check; + + lsm_order_for_each(check) { + if (*check == lsm) + return true; + } + + return false; +} + +/** + * lsm_order_append - Append a LSM to the ordered list + * @lsm: LSM definition + * @src: source of the addition + * + * Append @lsm to the enabled LSM array after ensuring that it hasn't been + * explicitly disabled, is a duplicate entry, or would run afoul of the + * LSM_FLAG_EXCLUSIVE logic. + */ +static void __init lsm_order_append(struct lsm_info *lsm, const char *src) +{ + /* Ignore duplicate selections. */ + if (lsm_order_exists(lsm)) + return; + + /* Skip explicitly disabled LSMs. */ + if (lsm->enabled && !lsm_is_enabled(lsm)) { + lsm_pr_dbg("skip previously disabled LSM %s:%s\n", + src, lsm->id->name); + return; + } + + if (lsm_active_cnt == MAX_LSM_COUNT) { + pr_warn("exceeded maximum LSM count on %s:%s\n", + src, lsm->id->name); + lsm_enabled_set(lsm, false); + return; + } + + if (lsm->flags & LSM_FLAG_EXCLUSIVE) { + if (lsm_exclusive) { + lsm_pr_dbg("skip exclusive LSM conflict %s:%s\n", + src, lsm->id->name); + lsm_enabled_set(lsm, false); + return; + } else { + lsm_pr_dbg("select exclusive LSM %s:%s\n", + src, lsm->id->name); + lsm_exclusive = lsm; + } + } + + lsm_enabled_set(lsm, true); + lsm_order[lsm_active_cnt] = lsm; + lsm_idlist[lsm_active_cnt++] = lsm->id; + + lsm_pr_dbg("enabling LSM %s:%s\n", src, lsm->id->name); +} + +/** + * lsm_order_parse - Parse the comma delimited LSM list + * @list: LSM list + * @src: source of the list + */ +static void __init lsm_order_parse(const char *list, const char *src) +{ + struct lsm_info *lsm; + char *sep, *name, *next; + + /* Handle any Legacy LSM exclusions if one was specified. */ + if (lsm_order_legacy) { + /* + * To match the original "security=" behavior, this explicitly + * does NOT fallback to another Legacy Major if the selected + * one was separately disabled: disable all non-matching + * Legacy Major LSMs. + */ + lsm_for_each_raw(lsm) { + if ((lsm->flags & LSM_FLAG_LEGACY_MAJOR) && + strcmp(lsm->id->name, lsm_order_legacy)) { + lsm_enabled_set(lsm, false); + lsm_pr_dbg("skip legacy LSM conflict %s:%s\n", + src, lsm->id->name); + } + } + } + + /* LSM_ORDER_FIRST */ + lsm_for_each_raw(lsm) { + if (lsm->order == LSM_ORDER_FIRST) + lsm_order_append(lsm, "first"); + } + + /* Normal or "mutable" LSMs */ + sep = kstrdup(list, GFP_KERNEL); + next = sep; + /* Walk the list, looking for matching LSMs. */ + while ((name = strsep(&next, ",")) != NULL) { + lsm_for_each_raw(lsm) { + if (!strcmp(lsm->id->name, name) && + lsm->order == LSM_ORDER_MUTABLE) + lsm_order_append(lsm, src); + } + } + kfree(sep); + + /* Legacy LSM if specified. */ + if (lsm_order_legacy) { + lsm_for_each_raw(lsm) { + if (!strcmp(lsm->id->name, lsm_order_legacy)) + lsm_order_append(lsm, src); + } + } + + /* LSM_ORDER_LAST */ + lsm_for_each_raw(lsm) { + if (lsm->order == LSM_ORDER_LAST) + lsm_order_append(lsm, "last"); + } + + /* Disable all LSMs not previously enabled. */ + lsm_for_each_raw(lsm) { + if (lsm_order_exists(lsm)) + continue; + lsm_enabled_set(lsm, false); + lsm_pr_dbg("skip disabled LSM %s:%s\n", src, lsm->id->name); + } +} + +/** + * lsm_blob_size_update - Update the LSM blob size and offset information + * @sz_req: the requested additional blob size + * @sz_cur: the existing blob size + */ +static void __init lsm_blob_size_update(unsigned int *sz_req, + unsigned int *sz_cur) +{ + unsigned int offset; + + if (*sz_req == 0) + return; + + offset = ALIGN(*sz_cur, sizeof(void *)); + *sz_cur = offset + *sz_req; + *sz_req = offset; +} + +/** + * lsm_prepare - Prepare the LSM framework for a new LSM + * @lsm: LSM definition + */ +static void __init lsm_prepare(struct lsm_info *lsm) +{ + struct lsm_blob_sizes *blobs = lsm->blobs; + + if (!blobs) + return; + + /* Register the LSM blob sizes. */ + blobs = lsm->blobs; + lsm_blob_size_update(&blobs->lbs_cred, &blob_sizes.lbs_cred); + lsm_blob_size_update(&blobs->lbs_file, &blob_sizes.lbs_file); + lsm_blob_size_update(&blobs->lbs_ib, &blob_sizes.lbs_ib); + /* inode blob gets an rcu_head in addition to LSM blobs. */ + if (blobs->lbs_inode && blob_sizes.lbs_inode == 0) + blob_sizes.lbs_inode = sizeof(struct rcu_head); + lsm_blob_size_update(&blobs->lbs_inode, &blob_sizes.lbs_inode); + lsm_blob_size_update(&blobs->lbs_ipc, &blob_sizes.lbs_ipc); + lsm_blob_size_update(&blobs->lbs_key, &blob_sizes.lbs_key); + lsm_blob_size_update(&blobs->lbs_msg_msg, &blob_sizes.lbs_msg_msg); + lsm_blob_size_update(&blobs->lbs_perf_event, + &blob_sizes.lbs_perf_event); + lsm_blob_size_update(&blobs->lbs_sock, &blob_sizes.lbs_sock); + lsm_blob_size_update(&blobs->lbs_superblock, + &blob_sizes.lbs_superblock); + lsm_blob_size_update(&blobs->lbs_task, &blob_sizes.lbs_task); + lsm_blob_size_update(&blobs->lbs_tun_dev, &blob_sizes.lbs_tun_dev); + lsm_blob_size_update(&blobs->lbs_xattr_count, + &blob_sizes.lbs_xattr_count); + lsm_blob_size_update(&blobs->lbs_bdev, &blob_sizes.lbs_bdev); + lsm_blob_size_update(&blobs->lbs_bpf_map, &blob_sizes.lbs_bpf_map); + lsm_blob_size_update(&blobs->lbs_bpf_prog, &blob_sizes.lbs_bpf_prog); + lsm_blob_size_update(&blobs->lbs_bpf_token, &blob_sizes.lbs_bpf_token); +} + +/** + * lsm_init_single - Initialize a given LSM + * @lsm: LSM definition + */ +static void __init lsm_init_single(struct lsm_info *lsm) +{ + int ret; + + if (!lsm_is_enabled(lsm)) + return; + + lsm_pr_dbg("initializing %s\n", lsm->id->name); + ret = lsm->init(); + WARN(ret, "%s failed to initialize: %d\n", lsm->id->name, ret); +} + +/** + * lsm_static_call_init - Initialize a LSM's static calls + * @hl: LSM hook list + */ +static int __init lsm_static_call_init(struct security_hook_list *hl) +{ + struct lsm_static_call *scall = hl->scalls; + int i; + + for (i = 0; i < MAX_LSM_COUNT; i++) { + /* Update the first static call that is not used yet */ + if (!scall->hl) { + __static_call_update(scall->key, scall->trampoline, + hl->hook.lsm_func_addr); + scall->hl = hl; + static_branch_enable(scall->active); + return 0; + } + scall++; + } + + return -ENOSPC; +} + +/** + * security_add_hooks - Add a LSM's hooks to the LSM framework's hook lists + * @hooks: LSM hooks to add + * @count: number of hooks to add + * @lsmid: identification information for the LSM + * + * Each LSM has to register its hooks with the LSM framework. + */ +void __init security_add_hooks(struct security_hook_list *hooks, int count, + const struct lsm_id *lsmid) +{ + int i; + + for (i = 0; i < count; i++) { + hooks[i].lsmid = lsmid; + if (lsm_static_call_init(&hooks[i])) + panic("exhausted LSM callback slots with LSM %s\n", + lsmid->name); + } +} + +/** + * early_security_init - Initialize the early LSMs + */ +int __init early_security_init(void) +{ + struct lsm_info *lsm; + + /* NOTE: lsm_pr_dbg() doesn't work here as lsm_debug is not yet set */ + + lsm_early_for_each_raw(lsm) { + lsm_enabled_set(lsm, true); + lsm_order_append(lsm, "early"); + lsm_prepare(lsm); + lsm_init_single(lsm); + lsm_count_early++; + } + + return 0; +} + +/** + * security_init - Initializes the LSM framework + * + * This should be called early in the kernel initialization sequence. + */ +int __init security_init(void) +{ + unsigned int cnt; + struct lsm_info **lsm; + + if (lsm_debug) { + struct lsm_info *i; + + cnt = 0; + lsm_pr("available LSMs: "); + lsm_early_for_each_raw(i) + lsm_pr_cont("%s%s(E)", (cnt++ ? "," : ""), i->id->name); + lsm_for_each_raw(i) + lsm_pr_cont("%s%s", (cnt++ ? "," : ""), i->id->name); + lsm_pr_cont("\n"); + + lsm_pr("built-in LSM config: %s\n", lsm_order_builtin); + + lsm_pr("legacy LSM parameter: %s\n", lsm_order_legacy); + lsm_pr("boot LSM parameter: %s\n", lsm_order_cmdline); + + /* see the note about lsm_pr_dbg() in early_security_init() */ + lsm_early_for_each_raw(i) + lsm_pr("enabled LSM early:%s\n", i->id->name); + } + + if (lsm_order_cmdline) { + if (lsm_order_legacy) + lsm_order_legacy = NULL; + lsm_order_parse(lsm_order_cmdline, "cmdline"); + } else + lsm_order_parse(lsm_order_builtin, "builtin"); + + lsm_order_for_each(lsm) + lsm_prepare(*lsm); + + if (lsm_debug) { + lsm_pr("blob(cred) size %d\n", blob_sizes.lbs_cred); + lsm_pr("blob(file) size %d\n", blob_sizes.lbs_file); + lsm_pr("blob(ib) size %d\n", blob_sizes.lbs_ib); + lsm_pr("blob(inode) size %d\n", blob_sizes.lbs_inode); + lsm_pr("blob(ipc) size %d\n", blob_sizes.lbs_ipc); + lsm_pr("blob(key) size %d\n", blob_sizes.lbs_key); + lsm_pr("blob(msg_msg)_size %d\n", blob_sizes.lbs_msg_msg); + lsm_pr("blob(sock) size %d\n", blob_sizes.lbs_sock); + lsm_pr("blob(superblock) size %d\n", blob_sizes.lbs_superblock); + lsm_pr("blob(perf_event) size %d\n", blob_sizes.lbs_perf_event); + lsm_pr("blob(task) size %d\n", blob_sizes.lbs_task); + lsm_pr("blob(tun_dev) size %d\n", blob_sizes.lbs_tun_dev); + lsm_pr("blob(xattr) count %d\n", blob_sizes.lbs_xattr_count); + lsm_pr("blob(bdev) size %d\n", blob_sizes.lbs_bdev); + lsm_pr("blob(bpf_map) size %d\n", blob_sizes.lbs_bpf_map); + lsm_pr("blob(bpf_prog) size %d\n", blob_sizes.lbs_bpf_prog); + lsm_pr("blob(bpf_token) size %d\n", blob_sizes.lbs_bpf_token); + } + + if (blob_sizes.lbs_file) + lsm_file_cache = kmem_cache_create("lsm_file_cache", + blob_sizes.lbs_file, 0, + SLAB_PANIC, NULL); + if (blob_sizes.lbs_inode) + lsm_inode_cache = kmem_cache_create("lsm_inode_cache", + blob_sizes.lbs_inode, 0, + SLAB_PANIC, NULL); + + if (lsm_cred_alloc((struct cred *)unrcu_pointer(current->cred), + GFP_KERNEL)) + panic("early LSM cred alloc failed\n"); + if (lsm_task_alloc(current)) + panic("early LSM task alloc failed\n"); + + cnt = 0; + lsm_order_for_each(lsm) { + /* skip the "early" LSMs as they have already been setup */ + if (cnt++ < lsm_count_early) + continue; + lsm_init_single(*lsm); + } + + return 0; +} + +/** + * security_initcall_pure - Run the LSM pure initcalls + */ +static int __init security_initcall_pure(void) +{ + int rc_adr, rc_lsm; + + rc_adr = min_addr_init(); + rc_lsm = lsm_initcall(pure); + + return (rc_adr ? rc_adr : rc_lsm); +} +pure_initcall(security_initcall_pure); + +/** + * security_initcall_early - Run the LSM early initcalls + */ +static int __init security_initcall_early(void) +{ + return lsm_initcall(early); +} +early_initcall(security_initcall_early); + +/** + * security_initcall_core - Run the LSM core initcalls + */ +static int __init security_initcall_core(void) +{ + int rc_sfs, rc_lsm; + + rc_sfs = securityfs_init(); + rc_lsm = lsm_initcall(core); + + return (rc_sfs ? rc_sfs : rc_lsm); +} +core_initcall(security_initcall_core); + +/** + * security_initcall_subsys - Run the LSM subsys initcalls + */ +static int __init security_initcall_subsys(void) +{ + return lsm_initcall(subsys); +} +subsys_initcall(security_initcall_subsys); + +/** + * security_initcall_fs - Run the LSM fs initcalls + */ +static int __init security_initcall_fs(void) +{ + return lsm_initcall(fs); +} +fs_initcall(security_initcall_fs); + +/** + * security_initcall_device - Run the LSM device initcalls + */ +static int __init security_initcall_device(void) +{ + return lsm_initcall(device); +} +device_initcall(security_initcall_device); + +/** + * security_initcall_late - Run the LSM late initcalls + */ +static int __init security_initcall_late(void) +{ + int rc; + + rc = lsm_initcall(late); + lsm_pr_dbg("all enabled LSMs fully activated\n"); + call_blocking_lsm_notifier(LSM_STARTED_ALL, NULL); + + return rc; +} +late_initcall(security_initcall_late); diff --git a/security/lsm_notifier.c b/security/lsm_notifier.c new file mode 100644 index 000000000000..c92fad5d57d4 --- /dev/null +++ b/security/lsm_notifier.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * LSM notifier functions + * + */ + +#include <linux/notifier.h> +#include <linux/security.h> + +static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain); + +int call_blocking_lsm_notifier(enum lsm_event event, void *data) +{ + return blocking_notifier_call_chain(&blocking_lsm_notifier_chain, + event, data); +} +EXPORT_SYMBOL(call_blocking_lsm_notifier); + +int register_blocking_lsm_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&blocking_lsm_notifier_chain, + nb); +} +EXPORT_SYMBOL(register_blocking_lsm_notifier); + +int unregister_blocking_lsm_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain, + nb); +} +EXPORT_SYMBOL(unregister_blocking_lsm_notifier); diff --git a/security/lsm_syscalls.c b/security/lsm_syscalls.c index 8440948a690c..5648b1f0ce9c 100644 --- a/security/lsm_syscalls.c +++ b/security/lsm_syscalls.c @@ -17,6 +17,8 @@ #include <linux/lsm_hooks.h> #include <uapi/linux/lsm.h> +#include "lsm.h" + /** * lsm_name_to_attr - map an LSM attribute name to its ID * @name: name of the attribute diff --git a/security/min_addr.c b/security/min_addr.c index 0ce267c041ab..0fde5ec9abc8 100644 --- a/security/min_addr.c +++ b/security/min_addr.c @@ -3,6 +3,9 @@ #include <linux/mm.h> #include <linux/security.h> #include <linux/sysctl.h> +#include <linux/minmax.h> + +#include "lsm.h" /* amount of vm to protect from userspace access by both DAC and the LSM*/ unsigned long mmap_min_addr; @@ -16,10 +19,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; static void update_mmap_min_addr(void) { #ifdef CONFIG_LSM_MMAP_MIN_ADDR - if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR) - mmap_min_addr = dac_mmap_min_addr; - else - mmap_min_addr = CONFIG_LSM_MMAP_MIN_ADDR; + mmap_min_addr = umax(dac_mmap_min_addr, CONFIG_LSM_MMAP_MIN_ADDR); #else mmap_min_addr = dac_mmap_min_addr; #endif @@ -44,10 +44,20 @@ int mmap_min_addr_handler(const struct ctl_table *table, int write, return ret; } -static int __init init_mmap_min_addr(void) +static const struct ctl_table min_addr_sysctl_table[] = { + { + .procname = "mmap_min_addr", + .data = &dac_mmap_min_addr, + .maxlen = sizeof(unsigned long), + .mode = 0644, + .proc_handler = mmap_min_addr_handler, + }, +}; + +int __init min_addr_init(void) { + register_sysctl_init("vm", min_addr_sysctl_table); update_mmap_min_addr(); return 0; } -pure_initcall(init_mmap_min_addr); diff --git a/security/safesetid/lsm.c b/security/safesetid/lsm.c index 1ba564f097f5..d5fb949050dd 100644 --- a/security/safesetid/lsm.c +++ b/security/safesetid/lsm.c @@ -287,6 +287,7 @@ static int __init safesetid_security_init(void) } DEFINE_LSM(safesetid_security_init) = { + .id = &safesetid_lsmid, .init = safesetid_security_init, - .name = "safesetid", + .initcall_fs = safesetid_init_securityfs, }; diff --git a/security/safesetid/lsm.h b/security/safesetid/lsm.h index d346f4849cea..bf5172e2c3f7 100644 --- a/security/safesetid/lsm.h +++ b/security/safesetid/lsm.h @@ -70,4 +70,6 @@ enum sid_policy_type _setid_policy_lookup(struct setid_ruleset *policy, extern struct setid_ruleset __rcu *safesetid_setuid_rules; extern struct setid_ruleset __rcu *safesetid_setgid_rules; +int safesetid_init_securityfs(void); + #endif /* _SAFESETID_H */ diff --git a/security/safesetid/securityfs.c b/security/safesetid/securityfs.c index 25310468bcdd..ece259f75b0d 100644 --- a/security/safesetid/securityfs.c +++ b/security/safesetid/securityfs.c @@ -143,6 +143,9 @@ static ssize_t handle_policy_update(struct file *file, char *buf, *p, *end; int err; + if (len >= KMALLOC_MAX_SIZE) + return -EINVAL; + pol = kmalloc(sizeof(struct setid_ruleset), GFP_KERNEL); if (!pol) return -ENOMEM; @@ -305,7 +308,7 @@ static const struct file_operations safesetid_gid_file_fops = { .write = safesetid_gid_file_write, }; -static int __init safesetid_init_securityfs(void) +int __init safesetid_init_securityfs(void) { int ret; struct dentry *policy_dir; @@ -342,4 +345,3 @@ error: securityfs_remove(policy_dir); return ret; } -fs_initcall(safesetid_init_securityfs); diff --git a/security/security.c b/security/security.c index 09664e09fec9..31a688650601 100644 --- a/security/security.c +++ b/security/security.c @@ -32,24 +32,7 @@ #include <net/flow.h> #include <net/sock.h> -#define SECURITY_HOOK_ACTIVE_KEY(HOOK, IDX) security_hook_active_##HOOK##_##IDX - -/* - * Identifier for the LSM static calls. - * HOOK is an LSM hook as defined in linux/lsm_hookdefs.h - * IDX is the index of the static call. 0 <= NUM < MAX_LSM_COUNT - */ -#define LSM_STATIC_CALL(HOOK, IDX) lsm_static_call_##HOOK##_##IDX - -/* - * Call the macro M for each LSM hook MAX_LSM_COUNT times. - */ -#define LSM_LOOP_UNROLL(M, ...) \ -do { \ - UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) \ -} while (0) - -#define LSM_DEFINE_UNROLL(M, ...) UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) +#include "lsm.h" /* * These are descriptions of the reasons that can be passed to the @@ -90,23 +73,34 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX + 1] = { [LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality", }; -static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain); +bool lsm_debug __ro_after_init; -static struct kmem_cache *lsm_file_cache; -static struct kmem_cache *lsm_inode_cache; +unsigned int lsm_active_cnt __ro_after_init; +const struct lsm_id *lsm_idlist[MAX_LSM_COUNT]; + +struct lsm_blob_sizes blob_sizes; -char *lsm_names; -static struct lsm_blob_sizes blob_sizes __ro_after_init; +struct kmem_cache *lsm_file_cache; +struct kmem_cache *lsm_inode_cache; -/* Boot-time LSM user choice */ -static __initdata const char *chosen_lsm_order; -static __initdata const char *chosen_major_lsm; +#define SECURITY_HOOK_ACTIVE_KEY(HOOK, IDX) security_hook_active_##HOOK##_##IDX + +/* + * Identifier for the LSM static calls. + * HOOK is an LSM hook as defined in linux/lsm_hookdefs.h + * IDX is the index of the static call. 0 <= NUM < MAX_LSM_COUNT + */ +#define LSM_STATIC_CALL(HOOK, IDX) lsm_static_call_##HOOK##_##IDX -static __initconst const char *const builtin_lsm_order = CONFIG_LSM; +/* + * Call the macro M for each LSM hook MAX_LSM_COUNT times. + */ +#define LSM_LOOP_UNROLL(M, ...) \ +do { \ + UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) \ +} while (0) -/* Ordered list of LSMs to initialize. */ -static __initdata struct lsm_info *ordered_lsms[MAX_LSM_COUNT + 1]; -static __initdata struct lsm_info *exclusive; +#define LSM_DEFINE_UNROLL(M, ...) UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) #ifdef CONFIG_HAVE_STATIC_CALL #define LSM_HOOK_TRAMP(NAME, NUM) \ @@ -157,512 +151,26 @@ struct lsm_static_calls_table #undef INIT_LSM_STATIC_CALL }; -static __initdata bool debug; -#define init_debug(...) \ - do { \ - if (debug) \ - pr_info(__VA_ARGS__); \ - } while (0) - -static bool __init is_enabled(struct lsm_info *lsm) -{ - if (!lsm->enabled) - return false; - - return *lsm->enabled; -} - -/* Mark an LSM's enabled flag. */ -static int lsm_enabled_true __initdata = 1; -static int lsm_enabled_false __initdata = 0; -static void __init set_enabled(struct lsm_info *lsm, bool enabled) -{ - /* - * When an LSM hasn't configured an enable variable, we can use - * a hard-coded location for storing the default enabled state. - */ - if (!lsm->enabled) { - if (enabled) - lsm->enabled = &lsm_enabled_true; - else - lsm->enabled = &lsm_enabled_false; - } else if (lsm->enabled == &lsm_enabled_true) { - if (!enabled) - lsm->enabled = &lsm_enabled_false; - } else if (lsm->enabled == &lsm_enabled_false) { - if (enabled) - lsm->enabled = &lsm_enabled_true; - } else { - *lsm->enabled = enabled; - } -} - -/* Is an LSM already listed in the ordered LSMs list? */ -static bool __init exists_ordered_lsm(struct lsm_info *lsm) -{ - struct lsm_info **check; - - for (check = ordered_lsms; *check; check++) - if (*check == lsm) - return true; - - return false; -} - -/* Append an LSM to the list of ordered LSMs to initialize. */ -static int last_lsm __initdata; -static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from) -{ - /* Ignore duplicate selections. */ - if (exists_ordered_lsm(lsm)) - return; - - if (WARN(last_lsm == MAX_LSM_COUNT, "%s: out of LSM static calls!?\n", from)) - return; - - /* Enable this LSM, if it is not already set. */ - if (!lsm->enabled) - lsm->enabled = &lsm_enabled_true; - ordered_lsms[last_lsm++] = lsm; - - init_debug("%s ordered: %s (%s)\n", from, lsm->name, - is_enabled(lsm) ? "enabled" : "disabled"); -} - -/* Is an LSM allowed to be initialized? */ -static bool __init lsm_allowed(struct lsm_info *lsm) -{ - /* Skip if the LSM is disabled. */ - if (!is_enabled(lsm)) - return false; - - /* Not allowed if another exclusive LSM already initialized. */ - if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && exclusive) { - init_debug("exclusive disabled: %s\n", lsm->name); - return false; - } - - return true; -} - -static void __init lsm_set_blob_size(int *need, int *lbs) -{ - int offset; - - if (*need <= 0) - return; - - offset = ALIGN(*lbs, sizeof(void *)); - *lbs = offset + *need; - *need = offset; -} - -static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed) -{ - if (!needed) - return; - - lsm_set_blob_size(&needed->lbs_cred, &blob_sizes.lbs_cred); - lsm_set_blob_size(&needed->lbs_file, &blob_sizes.lbs_file); - lsm_set_blob_size(&needed->lbs_ib, &blob_sizes.lbs_ib); - /* - * The inode blob gets an rcu_head in addition to - * what the modules might need. - */ - if (needed->lbs_inode && blob_sizes.lbs_inode == 0) - blob_sizes.lbs_inode = sizeof(struct rcu_head); - lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode); - lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc); - lsm_set_blob_size(&needed->lbs_key, &blob_sizes.lbs_key); - lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg); - lsm_set_blob_size(&needed->lbs_perf_event, &blob_sizes.lbs_perf_event); - lsm_set_blob_size(&needed->lbs_sock, &blob_sizes.lbs_sock); - lsm_set_blob_size(&needed->lbs_superblock, &blob_sizes.lbs_superblock); - lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task); - lsm_set_blob_size(&needed->lbs_tun_dev, &blob_sizes.lbs_tun_dev); - lsm_set_blob_size(&needed->lbs_xattr_count, - &blob_sizes.lbs_xattr_count); - lsm_set_blob_size(&needed->lbs_bdev, &blob_sizes.lbs_bdev); -} - -/* Prepare LSM for initialization. */ -static void __init prepare_lsm(struct lsm_info *lsm) -{ - int enabled = lsm_allowed(lsm); - - /* Record enablement (to handle any following exclusive LSMs). */ - set_enabled(lsm, enabled); - - /* If enabled, do pre-initialization work. */ - if (enabled) { - if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && !exclusive) { - exclusive = lsm; - init_debug("exclusive chosen: %s\n", lsm->name); - } - - lsm_set_blob_sizes(lsm->blobs); - } -} - -/* Initialize a given LSM, if it is enabled. */ -static void __init initialize_lsm(struct lsm_info *lsm) -{ - if (is_enabled(lsm)) { - int ret; - - init_debug("initializing %s\n", lsm->name); - ret = lsm->init(); - WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret); - } -} - -/* - * Current index to use while initializing the lsm id list. - */ -u32 lsm_active_cnt __ro_after_init; -const struct lsm_id *lsm_idlist[MAX_LSM_COUNT]; - -/* Populate ordered LSMs list from comma-separated LSM name list. */ -static void __init ordered_lsm_parse(const char *order, const char *origin) -{ - struct lsm_info *lsm; - char *sep, *name, *next; - - /* LSM_ORDER_FIRST is always first. */ - for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { - if (lsm->order == LSM_ORDER_FIRST) - append_ordered_lsm(lsm, " first"); - } - - /* Process "security=", if given. */ - if (chosen_major_lsm) { - struct lsm_info *major; - - /* - * To match the original "security=" behavior, this - * explicitly does NOT fallback to another Legacy Major - * if the selected one was separately disabled: disable - * all non-matching Legacy Major LSMs. - */ - for (major = __start_lsm_info; major < __end_lsm_info; - major++) { - if ((major->flags & LSM_FLAG_LEGACY_MAJOR) && - strcmp(major->name, chosen_major_lsm) != 0) { - set_enabled(major, false); - init_debug("security=%s disabled: %s (only one legacy major LSM)\n", - chosen_major_lsm, major->name); - } - } - } - - sep = kstrdup(order, GFP_KERNEL); - next = sep; - /* Walk the list, looking for matching LSMs. */ - while ((name = strsep(&next, ",")) != NULL) { - bool found = false; - - for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { - if (strcmp(lsm->name, name) == 0) { - if (lsm->order == LSM_ORDER_MUTABLE) - append_ordered_lsm(lsm, origin); - found = true; - } - } - - if (!found) - init_debug("%s ignored: %s (not built into kernel)\n", - origin, name); - } - - /* Process "security=", if given. */ - if (chosen_major_lsm) { - for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { - if (exists_ordered_lsm(lsm)) - continue; - if (strcmp(lsm->name, chosen_major_lsm) == 0) - append_ordered_lsm(lsm, "security="); - } - } - - /* LSM_ORDER_LAST is always last. */ - for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { - if (lsm->order == LSM_ORDER_LAST) - append_ordered_lsm(lsm, " last"); - } - - /* Disable all LSMs not in the ordered list. */ - for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { - if (exists_ordered_lsm(lsm)) - continue; - set_enabled(lsm, false); - init_debug("%s skipped: %s (not in requested order)\n", - origin, lsm->name); - } - - kfree(sep); -} - -static void __init lsm_static_call_init(struct security_hook_list *hl) -{ - struct lsm_static_call *scall = hl->scalls; - int i; - - for (i = 0; i < MAX_LSM_COUNT; i++) { - /* Update the first static call that is not used yet */ - if (!scall->hl) { - __static_call_update(scall->key, scall->trampoline, - hl->hook.lsm_func_addr); - scall->hl = hl; - static_branch_enable(scall->active); - return; - } - scall++; - } - panic("%s - Ran out of static slots.\n", __func__); -} - -static void __init lsm_early_cred(struct cred *cred); -static void __init lsm_early_task(struct task_struct *task); - -static int lsm_append(const char *new, char **result); - -static void __init report_lsm_order(void) -{ - struct lsm_info **lsm, *early; - int first = 0; - - pr_info("initializing lsm="); - - /* Report each enabled LSM name, comma separated. */ - for (early = __start_early_lsm_info; - early < __end_early_lsm_info; early++) - if (is_enabled(early)) - pr_cont("%s%s", first++ == 0 ? "" : ",", early->name); - for (lsm = ordered_lsms; *lsm; lsm++) - if (is_enabled(*lsm)) - pr_cont("%s%s", first++ == 0 ? "" : ",", (*lsm)->name); - - pr_cont("\n"); -} - -static void __init ordered_lsm_init(void) -{ - struct lsm_info **lsm; - - if (chosen_lsm_order) { - if (chosen_major_lsm) { - pr_warn("security=%s is ignored because it is superseded by lsm=%s\n", - chosen_major_lsm, chosen_lsm_order); - chosen_major_lsm = NULL; - } - ordered_lsm_parse(chosen_lsm_order, "cmdline"); - } else - ordered_lsm_parse(builtin_lsm_order, "builtin"); - - for (lsm = ordered_lsms; *lsm; lsm++) - prepare_lsm(*lsm); - - report_lsm_order(); - - init_debug("cred blob size = %d\n", blob_sizes.lbs_cred); - init_debug("file blob size = %d\n", blob_sizes.lbs_file); - init_debug("ib blob size = %d\n", blob_sizes.lbs_ib); - init_debug("inode blob size = %d\n", blob_sizes.lbs_inode); - init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc); -#ifdef CONFIG_KEYS - init_debug("key blob size = %d\n", blob_sizes.lbs_key); -#endif /* CONFIG_KEYS */ - init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg); - init_debug("sock blob size = %d\n", blob_sizes.lbs_sock); - init_debug("superblock blob size = %d\n", blob_sizes.lbs_superblock); - init_debug("perf event blob size = %d\n", blob_sizes.lbs_perf_event); - init_debug("task blob size = %d\n", blob_sizes.lbs_task); - init_debug("tun device blob size = %d\n", blob_sizes.lbs_tun_dev); - init_debug("xattr slots = %d\n", blob_sizes.lbs_xattr_count); - init_debug("bdev blob size = %d\n", blob_sizes.lbs_bdev); - - /* - * Create any kmem_caches needed for blobs - */ - if (blob_sizes.lbs_file) - lsm_file_cache = kmem_cache_create("lsm_file_cache", - blob_sizes.lbs_file, 0, - SLAB_PANIC, NULL); - if (blob_sizes.lbs_inode) - lsm_inode_cache = kmem_cache_create("lsm_inode_cache", - blob_sizes.lbs_inode, 0, - SLAB_PANIC, NULL); - - lsm_early_cred((struct cred *) current->cred); - lsm_early_task(current); - for (lsm = ordered_lsms; *lsm; lsm++) - initialize_lsm(*lsm); -} - -int __init early_security_init(void) -{ - struct lsm_info *lsm; - - for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) { - if (!lsm->enabled) - lsm->enabled = &lsm_enabled_true; - prepare_lsm(lsm); - initialize_lsm(lsm); - } - - return 0; -} - /** - * security_init - initializes the security framework + * lsm_file_alloc - allocate a composite file blob + * @file: the file that needs a blob * - * This should be called early in the kernel initialization sequence. - */ -int __init security_init(void) -{ - struct lsm_info *lsm; - - init_debug("legacy security=%s\n", chosen_major_lsm ? : " *unspecified*"); - init_debug(" CONFIG_LSM=%s\n", builtin_lsm_order); - init_debug("boot arg lsm=%s\n", chosen_lsm_order ? : " *unspecified*"); - - /* - * Append the names of the early LSM modules now that kmalloc() is - * available - */ - for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) { - init_debug(" early started: %s (%s)\n", lsm->name, - is_enabled(lsm) ? "enabled" : "disabled"); - if (lsm->enabled) - lsm_append(lsm->name, &lsm_names); - } - - /* Load LSMs in specified order. */ - ordered_lsm_init(); - - return 0; -} - -/* Save user chosen LSM */ -static int __init choose_major_lsm(char *str) -{ - chosen_major_lsm = str; - return 1; -} -__setup("security=", choose_major_lsm); - -/* Explicitly choose LSM initialization order. */ -static int __init choose_lsm_order(char *str) -{ - chosen_lsm_order = str; - return 1; -} -__setup("lsm=", choose_lsm_order); - -/* Enable LSM order debugging. */ -static int __init enable_debug(char *str) -{ - debug = true; - return 1; -} -__setup("lsm.debug", enable_debug); - -static bool match_last_lsm(const char *list, const char *lsm) -{ - const char *last; - - if (WARN_ON(!list || !lsm)) - return false; - last = strrchr(list, ','); - if (last) - /* Pass the comma, strcmp() will check for '\0' */ - last++; - else - last = list; - return !strcmp(last, lsm); -} - -static int lsm_append(const char *new, char **result) -{ - char *cp; - - if (*result == NULL) { - *result = kstrdup(new, GFP_KERNEL); - if (*result == NULL) - return -ENOMEM; - } else { - /* Check if it is the last registered name */ - if (match_last_lsm(*result, new)) - return 0; - cp = kasprintf(GFP_KERNEL, "%s,%s", *result, new); - if (cp == NULL) - return -ENOMEM; - kfree(*result); - *result = cp; - } - return 0; -} - -/** - * security_add_hooks - Add a modules hooks to the hook lists. - * @hooks: the hooks to add - * @count: the number of hooks to add - * @lsmid: the identification information for the security module + * Allocate the file blob for all the modules * - * Each LSM has to register its hooks with the infrastructure. + * Returns 0, or -ENOMEM if memory can't be allocated. */ -void __init security_add_hooks(struct security_hook_list *hooks, int count, - const struct lsm_id *lsmid) +static int lsm_file_alloc(struct file *file) { - int i; - - /* - * A security module may call security_add_hooks() more - * than once during initialization, and LSM initialization - * is serialized. Landlock is one such case. - * Look at the previous entry, if there is one, for duplication. - */ - if (lsm_active_cnt == 0 || lsm_idlist[lsm_active_cnt - 1] != lsmid) { - if (lsm_active_cnt >= MAX_LSM_COUNT) - panic("%s Too many LSMs registered.\n", __func__); - lsm_idlist[lsm_active_cnt++] = lsmid; - } - - for (i = 0; i < count; i++) { - hooks[i].lsmid = lsmid; - lsm_static_call_init(&hooks[i]); - } - - /* - * Don't try to append during early_security_init(), we'll come back - * and fix this up afterwards. - */ - if (slab_is_available()) { - if (lsm_append(lsmid->name, &lsm_names) < 0) - panic("%s - Cannot get early memory.\n", __func__); + if (!lsm_file_cache) { + file->f_security = NULL; + return 0; } -} - -int call_blocking_lsm_notifier(enum lsm_event event, void *data) -{ - return blocking_notifier_call_chain(&blocking_lsm_notifier_chain, - event, data); -} -EXPORT_SYMBOL(call_blocking_lsm_notifier); -int register_blocking_lsm_notifier(struct notifier_block *nb) -{ - return blocking_notifier_chain_register(&blocking_lsm_notifier_chain, - nb); -} -EXPORT_SYMBOL(register_blocking_lsm_notifier); - -int unregister_blocking_lsm_notifier(struct notifier_block *nb) -{ - return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain, - nb); + file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL); + if (file->f_security == NULL) + return -ENOMEM; + return 0; } -EXPORT_SYMBOL(unregister_blocking_lsm_notifier); /** * lsm_blob_alloc - allocate a composite blob @@ -696,47 +204,12 @@ static int lsm_blob_alloc(void **dest, size_t size, gfp_t gfp) * * Returns 0, or -ENOMEM if memory can't be allocated. */ -static int lsm_cred_alloc(struct cred *cred, gfp_t gfp) +int lsm_cred_alloc(struct cred *cred, gfp_t gfp) { return lsm_blob_alloc(&cred->security, blob_sizes.lbs_cred, gfp); } /** - * lsm_early_cred - during initialization allocate a composite cred blob - * @cred: the cred that needs a blob - * - * Allocate the cred blob for all the modules - */ -static void __init lsm_early_cred(struct cred *cred) -{ - int rc = lsm_cred_alloc(cred, GFP_KERNEL); - - if (rc) - panic("%s: Early cred alloc failed.\n", __func__); -} - -/** - * lsm_file_alloc - allocate a composite file blob - * @file: the file that needs a blob - * - * Allocate the file blob for all the modules - * - * Returns 0, or -ENOMEM if memory can't be allocated. - */ -static int lsm_file_alloc(struct file *file) -{ - if (!lsm_file_cache) { - file->f_security = NULL; - return 0; - } - - file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL); - if (file->f_security == NULL) - return -ENOMEM; - return 0; -} - -/** * lsm_inode_alloc - allocate a composite inode blob * @inode: the inode that needs a blob * @gfp: allocation flags @@ -766,7 +239,7 @@ static int lsm_inode_alloc(struct inode *inode, gfp_t gfp) * * Returns 0, or -ENOMEM if memory can't be allocated. */ -static int lsm_task_alloc(struct task_struct *task) +int lsm_task_alloc(struct task_struct *task) { return lsm_blob_alloc(&task->security, blob_sizes.lbs_task, GFP_KERNEL); } @@ -823,31 +296,50 @@ static int lsm_msg_msg_alloc(struct msg_msg *mp) */ static int lsm_bdev_alloc(struct block_device *bdev) { - if (blob_sizes.lbs_bdev == 0) { - bdev->bd_security = NULL; - return 0; - } - - bdev->bd_security = kzalloc(blob_sizes.lbs_bdev, GFP_KERNEL); - if (!bdev->bd_security) - return -ENOMEM; + return lsm_blob_alloc(&bdev->bd_security, blob_sizes.lbs_bdev, + GFP_KERNEL); +} - return 0; +#ifdef CONFIG_BPF_SYSCALL +/** + * lsm_bpf_map_alloc - allocate a composite bpf_map blob + * @map: the bpf_map that needs a blob + * + * Allocate the bpf_map blob for all the modules + * + * Returns 0, or -ENOMEM if memory can't be allocated. + */ +static int lsm_bpf_map_alloc(struct bpf_map *map) +{ + return lsm_blob_alloc(&map->security, blob_sizes.lbs_bpf_map, GFP_KERNEL); } /** - * lsm_early_task - during initialization allocate a composite task blob - * @task: the task that needs a blob + * lsm_bpf_prog_alloc - allocate a composite bpf_prog blob + * @prog: the bpf_prog that needs a blob * - * Allocate the task blob for all the modules + * Allocate the bpf_prog blob for all the modules + * + * Returns 0, or -ENOMEM if memory can't be allocated. */ -static void __init lsm_early_task(struct task_struct *task) +static int lsm_bpf_prog_alloc(struct bpf_prog *prog) { - int rc = lsm_task_alloc(task); + return lsm_blob_alloc(&prog->aux->security, blob_sizes.lbs_bpf_prog, GFP_KERNEL); +} - if (rc) - panic("%s: Early task alloc failed.\n", __func__); +/** + * lsm_bpf_token_alloc - allocate a composite bpf_token blob + * @token: the bpf_token that needs a blob + * + * Allocate the bpf_token blob for all the modules + * + * Returns 0, or -ENOMEM if memory can't be allocated. + */ +static int lsm_bpf_token_alloc(struct bpf_token *token) +{ + return lsm_blob_alloc(&token->security, blob_sizes.lbs_bpf_token, GFP_KERNEL); } +#endif /* CONFIG_BPF_SYSCALL */ /** * lsm_superblock_alloc - allocate a composite superblock blob @@ -1248,6 +740,12 @@ int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) * to 1 if AT_SECURE should be set to request libc enable secure mode. @bprm * contains the linux_binprm structure. * + * If execveat(2) is called with the AT_EXECVE_CHECK flag, bprm->is_check is + * set. The result must be the same as without this flag even if the execution + * will never really happen and @bprm will always be dropped. + * + * This hook must not change current->cred, only @bprm->cred. + * * Return: Returns 0 if the hook is successful and permission is granted. */ int security_bprm_creds_for_exec(struct linux_binprm *bprm) @@ -1735,8 +1233,7 @@ void security_inode_free(struct inode *inode) * @mode: mode used to determine resource type * @name: name of the last path component * @xattr_name: name of the security/LSM xattr - * @ctx: pointer to the resulting LSM context - * @ctxlen: length of @ctx + * @lsmctx: pointer to the resulting LSM context * * Compute a context for a dentry as the inode is not yet available since NFSv4 * has no label backed by an EA anyway. It is important to note that @@ -1746,11 +1243,11 @@ void security_inode_free(struct inode *inode) */ int security_dentry_init_security(struct dentry *dentry, int mode, const struct qstr *name, - const char **xattr_name, void **ctx, - u32 *ctxlen) + const char **xattr_name, + struct lsm_context *lsmctx) { return call_int_hook(dentry_init_security, dentry, mode, name, - xattr_name, ctx, ctxlen); + xattr_name, lsmctx); } EXPORT_SYMBOL(security_dentry_init_security); @@ -1770,7 +1267,7 @@ EXPORT_SYMBOL(security_dentry_init_security); * Return: Returns 0 on success, error on failure. */ int security_dentry_create_files_as(struct dentry *dentry, int mode, - struct qstr *name, + const struct qstr *name, const struct cred *old, struct cred *new) { return call_int_hook(dentry_create_files_as, dentry, mode, @@ -2176,7 +1673,7 @@ int security_inode_symlink(struct inode *dir, struct dentry *dentry, } /** - * security_inode_mkdir() - Check if creation a new director is allowed + * security_inode_mkdir() - Check if creating a new directory is allowed * @dir: parent directory * @dentry: new directory * @mode: new directory mode @@ -2618,6 +2115,36 @@ void security_inode_post_removexattr(struct dentry *dentry, const char *name) } /** + * security_inode_file_setattr() - check if setting fsxattr is allowed + * @dentry: file to set filesystem extended attributes on + * @fa: extended attributes to set on the inode + * + * Called when file_setattr() syscall or FS_IOC_FSSETXATTR ioctl() is called on + * inode + * + * Return: Returns 0 if permission is granted. + */ +int security_inode_file_setattr(struct dentry *dentry, struct file_kattr *fa) +{ + return call_int_hook(inode_file_setattr, dentry, fa); +} + +/** + * security_inode_file_getattr() - check if retrieving fsxattr is allowed + * @dentry: file to retrieve filesystem extended attributes from + * @fa: extended attributes to get + * + * Called when file_getattr() syscall or FS_IOC_FSGETXATTR ioctl() is called on + * inode + * + * Return: Returns 0 if permission is granted. + */ +int security_inode_file_getattr(struct dentry *dentry, struct file_kattr *fa) +{ + return call_int_hook(inode_file_getattr, dentry, fa); +} + +/** * security_inode_need_killpriv() - Check if security_inode_killpriv() required * @dentry: associated dentry * @@ -3098,6 +2625,10 @@ int security_file_receive(struct file *file) * Save open-time permission checking state for later use upon file_permission, * and recheck access if anything has changed since inode_permission. * + * We can check if a file is opened for execution (e.g. execve(2) call), either + * directly or indirectly (e.g. ELF's ld.so) by checking file->f_flags & + * __FMODE_EXEC . + * * Return: Returns 0 if permission is granted. */ int security_file_open(struct file *file) @@ -3146,7 +2677,7 @@ int security_file_truncate(struct file *file) * * Return: Returns a zero on success, negative values on failure. */ -int security_task_alloc(struct task_struct *task, unsigned long clone_flags) +int security_task_alloc(struct task_struct *task, u64 clone_flags) { int rc = lsm_task_alloc(task); @@ -4139,10 +3670,8 @@ int security_getselfattr(unsigned int attr, struct lsm_ctx __user *uctx, if (base) uctx = (struct lsm_ctx __user *)(base + total); rc = scall->hl->hook.getselfattr(attr, uctx, &entrysize, flags); - if (rc == -EOPNOTSUPP) { - rc = 0; + if (rc == -EOPNOTSUPP) continue; - } if (rc == -E2BIG) { rc = 0; left = 0; @@ -4270,24 +3799,6 @@ int security_setprocattr(int lsmid, const char *name, void *value, size_t size) } /** - * security_netlink_send() - Save info and check if netlink sending is allowed - * @sk: sending socket - * @skb: netlink message - * - * Save security information for a netlink message so that permission checking - * can be performed when the message is processed. The security information - * can be saved using the eff_cap field of the netlink_skb_parms structure. - * Also may be used to provide fine grained control over message transmission. - * - * Return: Returns 0 if the information was successfully saved and message is - * allowed to be transmitted. - */ -int security_netlink_send(struct sock *sk, struct sk_buff *skb) -{ - return call_int_hook(netlink_send, sk, skb); -} - -/** * security_ismaclabel() - Check if the named attribute is a MAC label * @name: full extended attribute name * @@ -4304,40 +3815,50 @@ EXPORT_SYMBOL(security_ismaclabel); /** * security_secid_to_secctx() - Convert a secid to a secctx * @secid: secid - * @secdata: secctx - * @seclen: secctx length + * @cp: the LSM context * - * Convert secid to security context. If @secdata is NULL the length of the - * result will be returned in @seclen, but no @secdata will be returned. This + * Convert secid to security context. If @cp is NULL the length of the + * result will be returned, but no data will be returned. This * does mean that the length could change between calls to check the length and - * the next call which actually allocates and returns the @secdata. + * the next call which actually allocates and returns the data. * - * Return: Return 0 on success, error on failure. + * Return: Return length of data on success, error on failure. */ -int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) +int security_secid_to_secctx(u32 secid, struct lsm_context *cp) { - return call_int_hook(secid_to_secctx, secid, secdata, seclen); + return call_int_hook(secid_to_secctx, secid, cp); } EXPORT_SYMBOL(security_secid_to_secctx); /** * security_lsmprop_to_secctx() - Convert a lsm_prop to a secctx * @prop: lsm specific information - * @secdata: secctx - * @seclen: secctx length + * @cp: the LSM context + * @lsmid: which security module to report * - * Convert a @prop entry to security context. If @secdata is NULL the - * length of the result will be returned in @seclen, but no @secdata - * will be returned. This does mean that the length could change between - * calls to check the length and the next call which actually allocates - * and returns the @secdata. + * Convert a @prop entry to security context. If @cp is NULL the + * length of the result will be returned. This does mean that the + * length could change between calls to check the length and the + * next call which actually allocates and returns the @cp. * - * Return: Return 0 on success, error on failure. + * @lsmid identifies which LSM should supply the context. + * A value of LSM_ID_UNDEF indicates that the first LSM suppling + * the hook should be used. This is used in cases where the + * ID of the supplying LSM is unambiguous. + * + * Return: Return length of data on success, error on failure. */ -int security_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata, - u32 *seclen) +int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp, + int lsmid) { - return call_int_hook(lsmprop_to_secctx, prop, secdata, seclen); + struct lsm_static_call *scall; + + lsm_for_each_hook(scall, lsmprop_to_secctx) { + if (lsmid != LSM_ID_UNDEF && lsmid != scall->hl->lsmid->id) + continue; + return scall->hl->hook.lsmprop_to_secctx(prop, cp); + } + return LSM_RET_DEFAULT(lsmprop_to_secctx); } EXPORT_SYMBOL(security_lsmprop_to_secctx); @@ -4360,14 +3881,14 @@ EXPORT_SYMBOL(security_secctx_to_secid); /** * security_release_secctx() - Free a secctx buffer - * @secdata: secctx - * @seclen: length of secctx + * @cp: the security context * * Release the security context. */ -void security_release_secctx(char *secdata, u32 seclen) +void security_release_secctx(struct lsm_context *cp) { - call_void_hook(release_secctx, secdata, seclen); + call_void_hook(release_secctx, cp); + memset(cp, 0, sizeof(*cp)); } EXPORT_SYMBOL(security_release_secctx); @@ -4430,17 +3951,17 @@ EXPORT_SYMBOL(security_inode_setsecctx); /** * security_inode_getsecctx() - Get the security label of an inode * @inode: inode - * @ctx: secctx - * @ctxlen: length of secctx + * @cp: security context * - * On success, returns 0 and fills out @ctx and @ctxlen with the security - * context for the given @inode. + * On success, returns 0 and fills out @cp with the security context + * for the given @inode. * * Return: Returns 0 on success, error on failure. */ -int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) +int security_inode_getsecctx(struct inode *inode, struct lsm_context *cp) { - return call_int_hook(inode_getsecctx, inode, ctx, ctxlen); + memset(cp, 0, sizeof(*cp)); + return call_int_hook(inode_getsecctx, inode, cp); } EXPORT_SYMBOL(security_inode_getsecctx); @@ -4481,6 +4002,24 @@ int security_watch_key(struct key *key) #ifdef CONFIG_SECURITY_NETWORK /** + * security_netlink_send() - Save info and check if netlink sending is allowed + * @sk: sending socket + * @skb: netlink message + * + * Save security information for a netlink message so that permission checking + * can be performed when the message is processed. The security information + * can be saved using the eff_cap field of the netlink_skb_parms structure. + * Also may be used to provide fine grained control over message transmission. + * + * Return: Returns 0 if the information was successfully saved and message is + * allowed to be transmitted. + */ +int security_netlink_send(struct sock *sk, struct sk_buff *skb) +{ + return call_int_hook(netlink_send, sk, skb); +} + +/** * security_unix_stream_connect() - Check if a AF_UNIX stream is allowed * @sock: originating sock * @other: peer sock @@ -5624,6 +5163,7 @@ int security_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op, * @cmd: command * @attr: bpf attribute * @size: size + * @kernel: whether or not call originated from kernel * * Do a initial check for all bpf syscalls after the attribute is copied into * the kernel. The actual security module can implement their own rules to @@ -5631,9 +5171,9 @@ int security_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op, * * Return: Returns 0 if permission is granted. */ -int security_bpf(int cmd, union bpf_attr *attr, unsigned int size) +int security_bpf(int cmd, union bpf_attr *attr, unsigned int size, bool kernel) { - return call_int_hook(bpf, cmd, attr, size); + return call_int_hook(bpf, cmd, attr, size, kernel); } /** @@ -5670,6 +5210,7 @@ int security_bpf_prog(struct bpf_prog *prog) * @map: BPF map object * @attr: BPF syscall attributes used to create BPF map * @token: BPF token used to grant user access + * @kernel: whether or not call originated from kernel * * Do a check when the kernel creates a new BPF map. This is also the * point where LSM blob is allocated for LSMs that need them. @@ -5677,9 +5218,18 @@ int security_bpf_prog(struct bpf_prog *prog) * Return: Returns 0 on success, error on failure. */ int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr, - struct bpf_token *token) + struct bpf_token *token, bool kernel) { - return call_int_hook(bpf_map_create, map, attr, token); + int rc; + + rc = lsm_bpf_map_alloc(map); + if (unlikely(rc)) + return rc; + + rc = call_int_hook(bpf_map_create, map, attr, token, kernel); + if (unlikely(rc)) + security_bpf_map_free(map); + return rc; } /** @@ -5687,6 +5237,7 @@ int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr, * @prog: BPF program object * @attr: BPF syscall attributes used to create BPF program * @token: BPF token used to grant user access to BPF subsystem + * @kernel: whether or not call originated from kernel * * Perform an access control check when the kernel loads a BPF program and * allocates associated BPF program object. This hook is also responsible for @@ -5695,9 +5246,18 @@ int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr, * Return: Returns 0 on success, error on failure. */ int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr, - struct bpf_token *token) + struct bpf_token *token, bool kernel) { - return call_int_hook(bpf_prog_load, prog, attr, token); + int rc; + + rc = lsm_bpf_prog_alloc(prog); + if (unlikely(rc)) + return rc; + + rc = call_int_hook(bpf_prog_load, prog, attr, token, kernel); + if (unlikely(rc)) + security_bpf_prog_free(prog); + return rc; } /** @@ -5714,7 +5274,16 @@ int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr, int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr, const struct path *path) { - return call_int_hook(bpf_token_create, token, attr, path); + int rc; + + rc = lsm_bpf_token_alloc(token); + if (unlikely(rc)) + return rc; + + rc = call_int_hook(bpf_token_create, token, attr, path); + if (unlikely(rc)) + security_bpf_token_free(token); + return rc; } /** @@ -5758,6 +5327,8 @@ int security_bpf_token_capable(const struct bpf_token *token, int cap) void security_bpf_map_free(struct bpf_map *map) { call_void_hook(bpf_map_free, map); + kfree(map->security); + map->security = NULL; } /** @@ -5769,6 +5340,8 @@ void security_bpf_map_free(struct bpf_map *map) void security_bpf_prog_free(struct bpf_prog *prog) { call_void_hook(bpf_prog_free, prog); + kfree(prog->aux->security); + prog->aux->security = NULL; } /** @@ -5780,6 +5353,8 @@ void security_bpf_prog_free(struct bpf_prog *prog) void security_bpf_token_free(struct bpf_token *token) { call_void_hook(bpf_token_free, token); + kfree(token->security); + token->security = NULL; } #endif /* CONFIG_BPF_SYSCALL */ @@ -5880,16 +5455,15 @@ EXPORT_SYMBOL(security_bdev_setintegrity); #ifdef CONFIG_PERF_EVENTS /** * security_perf_event_open() - Check if a perf event open is allowed - * @attr: perf event attribute * @type: type of event * * Check whether the @type of perf_event_open syscall is allowed. * * Return: Returns 0 if permission is granted. */ -int security_perf_event_open(struct perf_event_attr *attr, int type) +int security_perf_event_open(int type) { - return call_int_hook(perf_event_open, attr, type); + return call_int_hook(perf_event_open, type); } /** @@ -5996,6 +5570,18 @@ int security_uring_cmd(struct io_uring_cmd *ioucmd) { return call_int_hook(uring_cmd, ioucmd); } + +/** + * security_uring_allowed() - Check if io_uring_setup() is allowed + * + * Check whether the current task is allowed to call io_uring_setup(). + * + * Return: Returns 0 if permission is granted. + */ +int security_uring_allowed(void) +{ + return call_int_hook(uring_allowed); +} #endif /* CONFIG_IO_URING */ /** diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig index 61abc1e094a8..5588c4d573f6 100644 --- a/security/selinux/Kconfig +++ b/security/selinux/Kconfig @@ -69,6 +69,17 @@ config SECURITY_SELINUX_SID2STR_CACHE_SIZE If unsure, keep the default value. +config SECURITY_SELINUX_AVC_HASH_BITS + int "SELinux avc hashtable size" + depends on SECURITY_SELINUX + range 9 14 + default 9 + help + This option sets the number of buckets used in the AVC hash table + to 2^SECURITY_SELINUX_AVC_HASH_BITS. A higher value helps maintain + shorter chain lengths especially when expanding AVC nodes via + /sys/fs/selinux/avc/cache_threshold. + config SECURITY_SELINUX_DEBUG bool "SELinux kernel debugging support" depends on SECURITY_SELINUX diff --git a/security/selinux/Makefile b/security/selinux/Makefile index 86f0575f670d..72d3baf7900c 100644 --- a/security/selinux/Makefile +++ b/security/selinux/Makefile @@ -15,7 +15,7 @@ ccflags-y := -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include ccflags-$(CONFIG_SECURITY_SELINUX_DEBUG) += -DDEBUG selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \ - netnode.o netport.o status.o \ + netnode.o netport.o status.o initcalls.o \ ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \ ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/context.o @@ -33,11 +33,10 @@ $(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h quiet_cmd_genhdrs = GEN $(addprefix $(obj)/,$(genhdrs)) cmd_genhdrs = $< $(addprefix $(obj)/,$(genhdrs)) -# see the note above, replace the $targets and 'flask.h' rule with the lines -# below: -# targets += $(genhdrs) +targets += $(genhdrs) + +# see the note above, replace the 'flask.h' rule with the line below: # $(addprefix $(obj)/,$(genhdrs)) &: $(obj)/genheaders FORCE -targets += flask.h $(obj)/flask.h: $(obj)/genheaders FORCE $(call if_changed,genhdrs) diff --git a/security/selinux/avc.c b/security/selinux/avc.c index cc0b0af20296..8f77b9a732e1 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -30,13 +30,14 @@ #include "avc.h" #include "avc_ss.h" #include "classmap.h" +#include "hash.h" #define CREATE_TRACE_POINTS #include <trace/events/avc.h> -#define AVC_CACHE_SLOTS 512 -#define AVC_DEF_CACHE_THRESHOLD 512 -#define AVC_CACHE_RECLAIM 16 +#define AVC_CACHE_SLOTS (1 << CONFIG_SECURITY_SELINUX_AVC_HASH_BITS) +#define AVC_DEF_CACHE_THRESHOLD AVC_CACHE_SLOTS +#define AVC_CACHE_RECLAIM 16 #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS #define avc_cache_stats_incr(field) this_cpu_inc(avc_cache_stats.field) @@ -124,7 +125,7 @@ static struct kmem_cache *avc_xperms_cachep __ro_after_init; static inline u32 avc_hash(u32 ssid, u32 tsid, u16 tclass) { - return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1); + return av_hash(ssid, tsid, (u32)tclass, (u32)(AVC_CACHE_SLOTS - 1)); } /** @@ -174,13 +175,15 @@ int avc_get_hash_stats(char *page) * using a linked list for extended_perms_decision lookup because the list is * always small. i.e. less than 5, typically 1 */ -static struct extended_perms_decision *avc_xperms_decision_lookup(u8 driver, - struct avc_xperms_node *xp_node) +static struct extended_perms_decision * +avc_xperms_decision_lookup(u8 driver, u8 base_perm, + struct avc_xperms_node *xp_node) { struct avc_xperms_decision_node *xpd_node; list_for_each_entry(xpd_node, &xp_node->xpd_head, xpd_list) { - if (xpd_node->xpd.driver == driver) + if (xpd_node->xpd.driver == driver && + xpd_node->xpd.base_perm == base_perm) return &xpd_node->xpd; } return NULL; @@ -205,11 +208,12 @@ avc_xperms_has_perm(struct extended_perms_decision *xpd, } static void avc_xperms_allow_perm(struct avc_xperms_node *xp_node, - u8 driver, u8 perm) + u8 driver, u8 base_perm, u8 perm) { struct extended_perms_decision *xpd; security_xperm_set(xp_node->xp.drivers.p, driver); - xpd = avc_xperms_decision_lookup(driver, xp_node); + xp_node->xp.base_perms |= base_perm; + xpd = avc_xperms_decision_lookup(driver, base_perm, xp_node); if (xpd && xpd->allowed) security_xperm_set(xpd->allowed->p, perm); } @@ -245,6 +249,7 @@ static void avc_xperms_free(struct avc_xperms_node *xp_node) static void avc_copy_xperms_decision(struct extended_perms_decision *dest, struct extended_perms_decision *src) { + dest->base_perm = src->base_perm; dest->driver = src->driver; dest->used = src->used; if (dest->used & XPERMS_ALLOWED) @@ -272,6 +277,7 @@ static inline void avc_quick_copy_xperms_decision(u8 perm, */ u8 i = perm >> 5; + dest->base_perm = src->base_perm; dest->used = src->used; if (dest->used & XPERMS_ALLOWED) dest->allowed->p[i] = src->allowed->p[i]; @@ -287,27 +293,26 @@ static struct avc_xperms_decision_node struct avc_xperms_decision_node *xpd_node; struct extended_perms_decision *xpd; - xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, - GFP_NOWAIT | __GFP_NOWARN); + xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT); if (!xpd_node) return NULL; xpd = &xpd_node->xpd; if (which & XPERMS_ALLOWED) { xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep, - GFP_NOWAIT | __GFP_NOWARN); + GFP_NOWAIT); if (!xpd->allowed) goto error; } if (which & XPERMS_AUDITALLOW) { xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep, - GFP_NOWAIT | __GFP_NOWARN); + GFP_NOWAIT); if (!xpd->auditallow) goto error; } if (which & XPERMS_DONTAUDIT) { xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep, - GFP_NOWAIT | __GFP_NOWARN); + GFP_NOWAIT); if (!xpd->dontaudit) goto error; } @@ -335,7 +340,7 @@ static struct avc_xperms_node *avc_xperms_alloc(void) { struct avc_xperms_node *xp_node; - xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT | __GFP_NOWARN); + xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT); if (!xp_node) return xp_node; INIT_LIST_HEAD(&xp_node->xpd_head); @@ -357,6 +362,7 @@ static int avc_xperms_populate(struct avc_node *node, memcpy(dest->xp.drivers.p, src->xp.drivers.p, sizeof(dest->xp.drivers.p)); dest->xp.len = src->xp.len; + dest->xp.base_perms = src->xp.base_perms; /* for each source xpd allocate a destination xpd and copy */ list_for_each_entry(src_xpd, &src->xpd_head, xpd_list) { @@ -489,7 +495,7 @@ static struct avc_node *avc_alloc_node(void) { struct avc_node *node; - node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT | __GFP_NOWARN); + node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT); if (!node) goto out; @@ -807,6 +813,7 @@ out: * @event : Updating event * @perms : Permission mask bits * @driver: xperm driver information + * @base_perm: the base permission associated with the extended permission * @xperm: xperm permissions * @ssid: AVC entry source sid * @tsid: AVC entry target sid @@ -820,10 +827,9 @@ out: * otherwise, this function updates the AVC entry. The original AVC-entry object * will release later by RCU. */ -static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid, - u32 tsid, u16 tclass, u32 seqno, - struct extended_perms_decision *xpd, - u32 flags) +static int avc_update_node(u32 event, u32 perms, u8 driver, u8 base_perm, + u8 xperm, u32 ssid, u32 tsid, u16 tclass, u32 seqno, + struct extended_perms_decision *xpd, u32 flags) { u32 hvalue; int rc = 0; @@ -880,7 +886,7 @@ static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid, case AVC_CALLBACK_GRANT: node->ae.avd.allowed |= perms; if (node->ae.xp_node && (flags & AVC_EXTENDED_PERMS)) - avc_xperms_allow_perm(node->ae.xp_node, driver, xperm); + avc_xperms_allow_perm(node->ae.xp_node, driver, base_perm, xperm); break; case AVC_CALLBACK_TRY_REVOKE: case AVC_CALLBACK_REVOKE: @@ -930,7 +936,7 @@ static void avc_flush(void) spin_lock_irqsave(lock, flag); /* - * With preemptable RCU, the outer spinlock does not + * With preemptible RCU, the outer spinlock does not * prevent RCU grace periods from ending. */ rcu_read_lock(); @@ -987,10 +993,9 @@ static noinline void avc_compute_av(u32 ssid, u32 tsid, u16 tclass, avc_insert(ssid, tsid, tclass, avd, xp_node); } -static noinline int avc_denied(u32 ssid, u32 tsid, - u16 tclass, u32 requested, - u8 driver, u8 xperm, unsigned int flags, - struct av_decision *avd) +static noinline int avc_denied(u32 ssid, u32 tsid, u16 tclass, u32 requested, + u8 driver, u8 base_perm, u8 xperm, + unsigned int flags, struct av_decision *avd) { if (flags & AVC_STRICT) return -EACCES; @@ -999,7 +1004,7 @@ static noinline int avc_denied(u32 ssid, u32 tsid, !(avd->flags & AVD_FLAGS_PERMISSIVE)) return -EACCES; - avc_update_node(AVC_CALLBACK_GRANT, requested, driver, + avc_update_node(AVC_CALLBACK_GRANT, requested, driver, base_perm, xperm, ssid, tsid, tclass, avd->seqno, NULL, flags); return 0; } @@ -1012,7 +1017,8 @@ static noinline int avc_denied(u32 ssid, u32 tsid, * driver field is used to specify which set contains the permission. */ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested, - u8 driver, u8 xperm, struct common_audit_data *ad) + u8 driver, u8 base_perm, u8 xperm, + struct common_audit_data *ad) { struct avc_node *node; struct av_decision avd; @@ -1047,22 +1053,23 @@ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested, local_xpd.auditallow = &auditallow; local_xpd.dontaudit = &dontaudit; - xpd = avc_xperms_decision_lookup(driver, xp_node); + xpd = avc_xperms_decision_lookup(driver, base_perm, xp_node); if (unlikely(!xpd)) { /* * Compute the extended_perms_decision only if the driver - * is flagged + * is flagged and the base permission is known. */ - if (!security_xperm_test(xp_node->xp.drivers.p, driver)) { + if (!security_xperm_test(xp_node->xp.drivers.p, driver) || + !(xp_node->xp.base_perms & base_perm)) { avd.allowed &= ~requested; goto decision; } rcu_read_unlock(); - security_compute_xperms_decision(ssid, tsid, tclass, - driver, &local_xpd); + security_compute_xperms_decision(ssid, tsid, tclass, driver, + base_perm, &local_xpd); rcu_read_lock(); - avc_update_node(AVC_CALLBACK_ADD_XPERMS, requested, - driver, xperm, ssid, tsid, tclass, avd.seqno, + avc_update_node(AVC_CALLBACK_ADD_XPERMS, requested, driver, + base_perm, xperm, ssid, tsid, tclass, avd.seqno, &local_xpd, 0); } else { avc_quick_copy_xperms_decision(xperm, &local_xpd, xpd); @@ -1075,8 +1082,8 @@ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested, decision: denied = requested & ~(avd.allowed); if (unlikely(denied)) - rc = avc_denied(ssid, tsid, tclass, requested, - driver, xperm, AVC_EXTENDED_PERMS, &avd); + rc = avc_denied(ssid, tsid, tclass, requested, driver, + base_perm, xperm, AVC_EXTENDED_PERMS, &avd); rcu_read_unlock(); @@ -1110,7 +1117,7 @@ static noinline int avc_perm_nonode(u32 ssid, u32 tsid, u16 tclass, avc_compute_av(ssid, tsid, tclass, avd, &xp_node); denied = requested & ~(avd->allowed); if (unlikely(denied)) - return avc_denied(ssid, tsid, tclass, requested, 0, 0, + return avc_denied(ssid, tsid, tclass, requested, 0, 0, 0, flags, avd); return 0; } @@ -1158,7 +1165,7 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid, rcu_read_unlock(); if (unlikely(denied)) - return avc_denied(ssid, tsid, tclass, requested, 0, 0, + return avc_denied(ssid, tsid, tclass, requested, 0, 0, 0, flags, avd); return 0; } diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index f5a08f94e094..d053ce562370 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -65,7 +65,6 @@ #include <net/netlink.h> #include <linux/tcp.h> #include <linux/udp.h> -#include <linux/dccp.h> #include <linux/sctp.h> #include <net/sctp/structs.h> #include <linux/quota.h> @@ -94,7 +93,9 @@ #include <linux/fanotify.h> #include <linux/io_uring/cmd.h> #include <uapi/linux/lsm.h> +#include <linux/memfd.h> +#include "initcalls.h" #include "avc.h" #include "objsec.h" #include "netif.h" @@ -211,10 +212,12 @@ static int selinux_lsm_notifier_avc_callback(u32 event) */ static void cred_init_security(void) { - struct task_security_struct *tsec; + struct cred_security_struct *crsec; + + /* NOTE: the lsm framework zeros out the buffer on allocation */ - tsec = selinux_cred(unrcu_pointer(current->real_cred)); - tsec->osid = tsec->sid = SECINITSID_KERNEL; + crsec = selinux_cred(unrcu_pointer(current->real_cred)); + crsec->osid = crsec->sid = SECINITSID_KERNEL; } /* @@ -222,10 +225,10 @@ static void cred_init_security(void) */ static inline u32 cred_sid(const struct cred *cred) { - const struct task_security_struct *tsec; + const struct cred_security_struct *crsec; - tsec = selinux_cred(cred); - return tsec->sid; + crsec = selinux_cred(cred); + return crsec->sid; } static void __ad_net_init(struct common_audit_data *ad, @@ -278,27 +281,21 @@ static int __inode_security_revalidate(struct inode *inode, struct dentry *dentry, bool may_sleep) { - struct inode_security_struct *isec = selinux_inode(inode); + if (!selinux_initialized()) + return 0; - might_sleep_if(may_sleep); + if (may_sleep) + might_sleep(); + else + return -ECHILD; /* - * The check of isec->initialized below is racy but - * inode_doinit_with_dentry() will recheck with - * isec->lock held. + * Check to ensure that an inode's SELinux state is valid and try + * reloading the inode security label if necessary. This will fail if + * @dentry is NULL and no dentry for this inode can be found; in that + * case, continue using the old label. */ - if (selinux_initialized() && - data_race(isec->initialized != LABEL_INITIALIZED)) { - if (!may_sleep) - return -ECHILD; - - /* - * Try reloading the inode security label. This will fail if - * @opt_dentry is NULL and no dentry for this inode can be - * found; in that case, continue using the old label. - */ - inode_doinit_with_dentry(inode, dentry); - } + inode_doinit_with_dentry(inode, dentry); return 0; } @@ -307,41 +304,53 @@ static struct inode_security_struct *inode_security_novalidate(struct inode *ino return selinux_inode(inode); } -static struct inode_security_struct *inode_security_rcu(struct inode *inode, bool rcu) +static inline struct inode_security_struct *inode_security_rcu(struct inode *inode, + bool rcu) { - int error; + int rc; + struct inode_security_struct *isec = selinux_inode(inode); - error = __inode_security_revalidate(inode, NULL, !rcu); - if (error) - return ERR_PTR(error); - return selinux_inode(inode); + /* check below is racy, but revalidate will recheck with lock held */ + if (data_race(likely(isec->initialized == LABEL_INITIALIZED))) + return isec; + rc = __inode_security_revalidate(inode, NULL, !rcu); + if (rc) + return ERR_PTR(rc); + return isec; } /* * Get the security label of an inode. */ -static struct inode_security_struct *inode_security(struct inode *inode) +static inline struct inode_security_struct *inode_security(struct inode *inode) { + struct inode_security_struct *isec = selinux_inode(inode); + + /* check below is racy, but revalidate will recheck with lock held */ + if (data_race(likely(isec->initialized == LABEL_INITIALIZED))) + return isec; __inode_security_revalidate(inode, NULL, true); - return selinux_inode(inode); + return isec; } -static struct inode_security_struct *backing_inode_security_novalidate(struct dentry *dentry) +static inline struct inode_security_struct *backing_inode_security_novalidate(struct dentry *dentry) { - struct inode *inode = d_backing_inode(dentry); - - return selinux_inode(inode); + return selinux_inode(d_backing_inode(dentry)); } /* * Get the security label of a dentry's backing inode. */ -static struct inode_security_struct *backing_inode_security(struct dentry *dentry) +static inline struct inode_security_struct *backing_inode_security(struct dentry *dentry) { struct inode *inode = d_backing_inode(dentry); + struct inode_security_struct *isec = selinux_inode(inode); + /* check below is racy, but revalidate will recheck with lock held */ + if (data_race(likely(isec->initialized == LABEL_INITIALIZED))) + return isec; __inode_security_revalidate(inode, dentry, true); - return selinux_inode(inode); + return isec; } static void inode_free_security(struct inode *inode) @@ -407,7 +416,7 @@ static const struct { static int match_opt_prefix(char *s, int l, char **arg) { - int i; + unsigned int i; for (i = 0; i < ARRAY_SIZE(tokens); i++) { size_t len = tokens[i].len; @@ -430,15 +439,15 @@ static int may_context_mount_sb_relabel(u32 sid, struct superblock_security_struct *sbsec, const struct cred *cred) { - const struct task_security_struct *tsec = selinux_cred(cred); + const struct cred_security_struct *crsec = selinux_cred(cred); int rc; - rc = avc_has_perm(tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM, + rc = avc_has_perm(crsec->sid, sbsec->sid, SECCLASS_FILESYSTEM, FILESYSTEM__RELABELFROM, NULL); if (rc) return rc; - rc = avc_has_perm(tsec->sid, sid, SECCLASS_FILESYSTEM, + rc = avc_has_perm(crsec->sid, sid, SECCLASS_FILESYSTEM, FILESYSTEM__RELABELTO, NULL); return rc; } @@ -447,9 +456,9 @@ static int may_context_mount_inode_relabel(u32 sid, struct superblock_security_struct *sbsec, const struct cred *cred) { - const struct task_security_struct *tsec = selinux_cred(cred); + const struct cred_security_struct *crsec = selinux_cred(cred); int rc; - rc = avc_has_perm(tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM, + rc = avc_has_perm(crsec->sid, sbsec->sid, SECCLASS_FILESYSTEM, FILESYSTEM__RELABELFROM, NULL); if (rc) return rc; @@ -469,7 +478,9 @@ static int selinux_is_genfs_special_handling(struct super_block *sb) !strcmp(sb->s_type->name, "rootfs") || (selinux_policycap_cgroupseclabel() && (!strcmp(sb->s_type->name, "cgroup") || - !strcmp(sb->s_type->name, "cgroup2"))); + !strcmp(sb->s_type->name, "cgroup2"))) || + (selinux_policycap_functionfs_seclabel() && + !strcmp(sb->s_type->name, "functionfs")); } static int selinux_is_sblabel_mnt(struct super_block *sb) @@ -734,7 +745,9 @@ static int selinux_set_mnt_opts(struct super_block *sb, !strcmp(sb->s_type->name, "binder") || !strcmp(sb->s_type->name, "bpf") || !strcmp(sb->s_type->name, "pstore") || - !strcmp(sb->s_type->name, "securityfs")) + !strcmp(sb->s_type->name, "securityfs") || + (selinux_policycap_functionfs_seclabel() && + !strcmp(sb->s_type->name, "functionfs"))) sbsec->flags |= SE_SBGENFS; if (!strcmp(sb->s_type->name, "sysfs") || @@ -1191,8 +1204,6 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc return SECCLASS_ICMP_SOCKET; else return SECCLASS_RAWIP_SOCKET; - case SOCK_DCCP: - return SECCLASS_DCCP_SOCKET; default: return SECCLASS_RAWIP_SOCKET; } @@ -1683,12 +1694,15 @@ static inline int dentry_has_perm(const struct cred *cred, struct dentry *dentry, u32 av) { - struct inode *inode = d_backing_inode(dentry); struct common_audit_data ad; + struct inode *inode = d_backing_inode(dentry); + struct inode_security_struct *isec = selinux_inode(inode); ad.type = LSM_AUDIT_DATA_DENTRY; ad.u.dentry = dentry; - __inode_security_revalidate(inode, dentry, true); + /* check below is racy, but revalidate will recheck with lock held */ + if (data_race(unlikely(isec->initialized != LABEL_INITIALIZED))) + __inode_security_revalidate(inode, dentry, true); return inode_has_perm(cred, inode, av, &ad); } @@ -1699,12 +1713,15 @@ static inline int path_has_perm(const struct cred *cred, const struct path *path, u32 av) { - struct inode *inode = d_backing_inode(path->dentry); struct common_audit_data ad; + struct inode *inode = d_backing_inode(path->dentry); + struct inode_security_struct *isec = selinux_inode(inode); ad.type = LSM_AUDIT_DATA_PATH; ad.u.path = *path; - __inode_security_revalidate(inode, path->dentry, true); + /* check below is racy, but revalidate will recheck with lock held */ + if (data_race(unlikely(isec->initialized != LABEL_INITIALIZED))) + __inode_security_revalidate(inode, path->dentry, true); return inode_has_perm(cred, inode, av, &ad); } @@ -1773,7 +1790,7 @@ out: * Determine the label for an inode that might be unioned. */ static int -selinux_determine_inode_label(const struct task_security_struct *tsec, +selinux_determine_inode_label(const struct cred_security_struct *crsec, struct inode *dir, const struct qstr *name, u16 tclass, u32 *_new_isid) @@ -1785,11 +1802,11 @@ selinux_determine_inode_label(const struct task_security_struct *tsec, (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)) { *_new_isid = sbsec->mntpoint_sid; } else if ((sbsec->flags & SBLABEL_MNT) && - tsec->create_sid) { - *_new_isid = tsec->create_sid; + crsec->create_sid) { + *_new_isid = crsec->create_sid; } else { const struct inode_security_struct *dsec = inode_security(dir); - return security_transition_sid(tsec->sid, + return security_transition_sid(crsec->sid, dsec->sid, tclass, name, _new_isid); } @@ -1802,7 +1819,7 @@ static int may_create(struct inode *dir, struct dentry *dentry, u16 tclass) { - const struct task_security_struct *tsec = selinux_cred(current_cred()); + const struct cred_security_struct *crsec = selinux_cred(current_cred()); struct inode_security_struct *dsec; struct superblock_security_struct *sbsec; u32 sid, newsid; @@ -1812,7 +1829,7 @@ static int may_create(struct inode *dir, dsec = inode_security(dir); sbsec = selinux_superblock(dir->i_sb); - sid = tsec->sid; + sid = crsec->sid; ad.type = LSM_AUDIT_DATA_DENTRY; ad.u.dentry = dentry; @@ -1823,7 +1840,7 @@ static int may_create(struct inode *dir, if (rc) return rc; - rc = selinux_determine_inode_label(tsec, dir, &dentry->d_name, tclass, + rc = selinux_determine_inode_label(crsec, dir, &dentry->d_name, tclass, &newsid); if (rc) return rc; @@ -2236,8 +2253,8 @@ static u32 ptrace_parent_sid(void) } static int check_nnp_nosuid(const struct linux_binprm *bprm, - const struct task_security_struct *old_tsec, - const struct task_security_struct *new_tsec) + const struct cred_security_struct *old_crsec, + const struct cred_security_struct *new_crsec) { int nnp = (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS); int nosuid = !mnt_may_suid(bprm->file->f_path.mnt); @@ -2247,7 +2264,7 @@ static int check_nnp_nosuid(const struct linux_binprm *bprm, if (!nnp && !nosuid) return 0; /* neither NNP nor nosuid */ - if (new_tsec->sid == old_tsec->sid) + if (new_crsec->sid == old_crsec->sid) return 0; /* No change in credentials */ /* @@ -2262,7 +2279,7 @@ static int check_nnp_nosuid(const struct linux_binprm *bprm, av |= PROCESS2__NNP_TRANSITION; if (nosuid) av |= PROCESS2__NOSUID_TRANSITION; - rc = avc_has_perm(old_tsec->sid, new_tsec->sid, + rc = avc_has_perm(old_crsec->sid, new_crsec->sid, SECCLASS_PROCESS2, av, NULL); if (!rc) return 0; @@ -2273,8 +2290,8 @@ static int check_nnp_nosuid(const struct linux_binprm *bprm, * i.e. SIDs that are guaranteed to only be allowed a subset * of the permissions of the current SID. */ - rc = security_bounded_transition(old_tsec->sid, - new_tsec->sid); + rc = security_bounded_transition(old_crsec->sid, + new_crsec->sid); if (!rc) return 0; @@ -2290,8 +2307,8 @@ static int check_nnp_nosuid(const struct linux_binprm *bprm, static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm) { - const struct task_security_struct *old_tsec; - struct task_security_struct *new_tsec; + const struct cred_security_struct *old_crsec; + struct cred_security_struct *new_crsec; struct inode_security_struct *isec; struct common_audit_data ad; struct inode *inode = file_inode(bprm->file); @@ -2300,18 +2317,22 @@ static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm) /* SELinux context only depends on initial program or script and not * the script interpreter */ - old_tsec = selinux_cred(current_cred()); - new_tsec = selinux_cred(bprm->cred); + old_crsec = selinux_cred(current_cred()); + new_crsec = selinux_cred(bprm->cred); isec = inode_security(inode); + if (WARN_ON(isec->sclass != SECCLASS_FILE && + isec->sclass != SECCLASS_MEMFD_FILE)) + return -EACCES; + /* Default to the current task SID. */ - new_tsec->sid = old_tsec->sid; - new_tsec->osid = old_tsec->sid; + new_crsec->sid = old_crsec->sid; + new_crsec->osid = old_crsec->sid; /* Reset fs, key, and sock SIDs on execve. */ - new_tsec->create_sid = 0; - new_tsec->keycreate_sid = 0; - new_tsec->sockcreate_sid = 0; + new_crsec->create_sid = 0; + new_crsec->keycreate_sid = 0; + new_crsec->sockcreate_sid = 0; /* * Before policy is loaded, label any task outside kernel space @@ -2320,26 +2341,26 @@ static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm) * (if the policy chooses to set SECINITSID_INIT != SECINITSID_KERNEL). */ if (!selinux_initialized()) { - new_tsec->sid = SECINITSID_INIT; + new_crsec->sid = SECINITSID_INIT; /* also clear the exec_sid just in case */ - new_tsec->exec_sid = 0; + new_crsec->exec_sid = 0; return 0; } - if (old_tsec->exec_sid) { - new_tsec->sid = old_tsec->exec_sid; + if (old_crsec->exec_sid) { + new_crsec->sid = old_crsec->exec_sid; /* Reset exec SID on execve. */ - new_tsec->exec_sid = 0; + new_crsec->exec_sid = 0; /* Fail on NNP or nosuid if not an allowed transition. */ - rc = check_nnp_nosuid(bprm, old_tsec, new_tsec); + rc = check_nnp_nosuid(bprm, old_crsec, new_crsec); if (rc) return rc; } else { /* Check for a default transition on this program. */ - rc = security_transition_sid(old_tsec->sid, + rc = security_transition_sid(old_crsec->sid, isec->sid, SECCLASS_PROCESS, NULL, - &new_tsec->sid); + &new_crsec->sid); if (rc) return rc; @@ -2347,34 +2368,34 @@ static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm) * Fallback to old SID on NNP or nosuid if not an allowed * transition. */ - rc = check_nnp_nosuid(bprm, old_tsec, new_tsec); + rc = check_nnp_nosuid(bprm, old_crsec, new_crsec); if (rc) - new_tsec->sid = old_tsec->sid; + new_crsec->sid = old_crsec->sid; } ad.type = LSM_AUDIT_DATA_FILE; ad.u.file = bprm->file; - if (new_tsec->sid == old_tsec->sid) { - rc = avc_has_perm(old_tsec->sid, isec->sid, - SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, &ad); + if (new_crsec->sid == old_crsec->sid) { + rc = avc_has_perm(old_crsec->sid, isec->sid, isec->sclass, + FILE__EXECUTE_NO_TRANS, &ad); if (rc) return rc; } else { /* Check permissions for the transition. */ - rc = avc_has_perm(old_tsec->sid, new_tsec->sid, + rc = avc_has_perm(old_crsec->sid, new_crsec->sid, SECCLASS_PROCESS, PROCESS__TRANSITION, &ad); if (rc) return rc; - rc = avc_has_perm(new_tsec->sid, isec->sid, - SECCLASS_FILE, FILE__ENTRYPOINT, &ad); + rc = avc_has_perm(new_crsec->sid, isec->sid, isec->sclass, + FILE__ENTRYPOINT, &ad); if (rc) return rc; /* Check for shared state */ if (bprm->unsafe & LSM_UNSAFE_SHARE) { - rc = avc_has_perm(old_tsec->sid, new_tsec->sid, + rc = avc_has_perm(old_crsec->sid, new_crsec->sid, SECCLASS_PROCESS, PROCESS__SHARE, NULL); if (rc) @@ -2386,7 +2407,7 @@ static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm) if (bprm->unsafe & LSM_UNSAFE_PTRACE) { u32 ptsid = ptrace_parent_sid(); if (ptsid != 0) { - rc = avc_has_perm(ptsid, new_tsec->sid, + rc = avc_has_perm(ptsid, new_crsec->sid, SECCLASS_PROCESS, PROCESS__PTRACE, NULL); if (rc) @@ -2400,7 +2421,7 @@ static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm) /* Enable secure mode for SIDs transitions unless the noatsecure permission is granted between the two SIDs, i.e. ahp returns 0. */ - rc = avc_has_perm(old_tsec->sid, new_tsec->sid, + rc = avc_has_perm(old_crsec->sid, new_crsec->sid, SECCLASS_PROCESS, PROCESS__NOATSECURE, NULL); bprm->secureexec |= !!rc; @@ -2468,12 +2489,12 @@ static inline void flush_unauthorized_files(const struct cred *cred, */ static void selinux_bprm_committing_creds(const struct linux_binprm *bprm) { - struct task_security_struct *new_tsec; + struct cred_security_struct *new_crsec; struct rlimit *rlim, *initrlim; int rc, i; - new_tsec = selinux_cred(bprm->cred); - if (new_tsec->sid == new_tsec->osid) + new_crsec = selinux_cred(bprm->cred); + if (new_crsec->sid == new_crsec->osid) return; /* Close files for which the new task SID is not authorized. */ @@ -2492,7 +2513,7 @@ static void selinux_bprm_committing_creds(const struct linux_binprm *bprm) * higher than the default soft limit for cases where the default is * lower than the hard limit, e.g. RLIMIT_CORE or RLIMIT_STACK. */ - rc = avc_has_perm(new_tsec->osid, new_tsec->sid, SECCLASS_PROCESS, + rc = avc_has_perm(new_crsec->osid, new_crsec->sid, SECCLASS_PROCESS, PROCESS__RLIMITINH, NULL); if (rc) { /* protect against do_prlimit() */ @@ -2514,12 +2535,12 @@ static void selinux_bprm_committing_creds(const struct linux_binprm *bprm) */ static void selinux_bprm_committed_creds(const struct linux_binprm *bprm) { - const struct task_security_struct *tsec = selinux_cred(current_cred()); + const struct cred_security_struct *crsec = selinux_cred(current_cred()); u32 osid, sid; int rc; - osid = tsec->osid; - sid = tsec->sid; + osid = crsec->osid; + sid = crsec->sid; if (sid == osid) return; @@ -2869,8 +2890,8 @@ static void selinux_inode_free_security(struct inode *inode) static int selinux_dentry_init_security(struct dentry *dentry, int mode, const struct qstr *name, - const char **xattr_name, void **ctx, - u32 *ctxlen) + const char **xattr_name, + struct lsm_context *cp) { u32 newsid; int rc; @@ -2885,18 +2906,18 @@ static int selinux_dentry_init_security(struct dentry *dentry, int mode, if (xattr_name) *xattr_name = XATTR_NAME_SELINUX; - return security_sid_to_context(newsid, (char **)ctx, - ctxlen); + cp->id = LSM_ID_SELINUX; + return security_sid_to_context(newsid, &cp->context, &cp->len); } static int selinux_dentry_create_files_as(struct dentry *dentry, int mode, - struct qstr *name, + const struct qstr *name, const struct cred *old, struct cred *new) { u32 newsid; int rc; - struct task_security_struct *tsec; + struct cred_security_struct *crsec; rc = selinux_determine_inode_label(selinux_cred(old), d_inode(dentry->d_parent), name, @@ -2905,8 +2926,8 @@ static int selinux_dentry_create_files_as(struct dentry *dentry, int mode, if (rc) return rc; - tsec = selinux_cred(new); - tsec->create_sid = newsid; + crsec = selinux_cred(new); + crsec->create_sid = newsid; return 0; } @@ -2914,7 +2935,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, struct xattr *xattrs, int *xattr_count) { - const struct task_security_struct *tsec = selinux_cred(current_cred()); + const struct cred_security_struct *crsec = selinux_cred(current_cred()); struct superblock_security_struct *sbsec; struct xattr *xattr = lsm_get_xattr_slot(xattrs, xattr_count); u32 newsid, clen; @@ -2924,9 +2945,9 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir, sbsec = selinux_superblock(dir->i_sb); - newsid = tsec->create_sid; + newsid = crsec->create_sid; newsclass = inode_mode_to_security_class(inode->i_mode); - rc = selinux_determine_inode_label(tsec, dir, qstr, newsclass, &newsid); + rc = selinux_determine_inode_label(crsec, dir, qstr, newsclass, &newsid); if (rc) return rc; @@ -2963,10 +2984,18 @@ static int selinux_inode_init_security_anon(struct inode *inode, struct common_audit_data ad; struct inode_security_struct *isec; int rc; + bool is_memfd = false; if (unlikely(!selinux_initialized())) return 0; + if (name != NULL && name->name != NULL && + !strcmp(name->name, MEMFD_ANON_NAME)) { + if (!selinux_policycap_memfd_class()) + return 0; + is_memfd = true; + } + isec = selinux_inode(inode); /* @@ -2986,7 +3015,10 @@ static int selinux_inode_init_security_anon(struct inode *inode, isec->sclass = context_isec->sclass; isec->sid = context_isec->sid; } else { - isec->sclass = SECCLASS_ANON_INODE; + if (is_memfd) + isec->sclass = SECCLASS_MEMFD_FILE; + else + isec->sclass = SECCLASS_ANON_INODE; rc = security_transition_sid( sid, sid, isec->sclass, name, &isec->sid); @@ -3088,44 +3120,153 @@ static noinline int audit_inode_permission(struct inode *inode, audited, denied, result, &ad); } -static int selinux_inode_permission(struct inode *inode, int mask) +/** + * task_avdcache_reset - Reset the task's AVD cache + * @tsec: the task's security state + * + * Clear the task's AVD cache in @tsec and reset it to the current policy's + * and task's info. + */ +static inline void task_avdcache_reset(struct task_security_struct *tsec) +{ + memset(&tsec->avdcache.dir, 0, sizeof(tsec->avdcache.dir)); + tsec->avdcache.sid = current_sid(); + tsec->avdcache.seqno = avc_policy_seqno(); + tsec->avdcache.dir_spot = TSEC_AVDC_DIR_SIZE - 1; +} + +/** + * task_avdcache_search - Search the task's AVD cache + * @tsec: the task's security state + * @isec: the inode to search for in the cache + * @avdc: matching avd cache entry returned to the caller + * + * Search @tsec for a AVD cache entry that matches @isec and return it to the + * caller via @avdc. Returns 0 if a match is found, negative values otherwise. + */ +static inline int task_avdcache_search(struct task_security_struct *tsec, + struct inode_security_struct *isec, + struct avdc_entry **avdc) +{ + int orig, iter; + + /* focused on path walk optimization, only cache directories */ + if (isec->sclass != SECCLASS_DIR) + return -ENOENT; + + if (unlikely(current_sid() != tsec->avdcache.sid || + tsec->avdcache.seqno != avc_policy_seqno())) { + task_avdcache_reset(tsec); + return -ENOENT; + } + + orig = iter = tsec->avdcache.dir_spot; + do { + if (tsec->avdcache.dir[iter].isid == isec->sid) { + /* cache hit */ + tsec->avdcache.dir_spot = iter; + *avdc = &tsec->avdcache.dir[iter]; + return 0; + } + iter = (iter - 1) & (TSEC_AVDC_DIR_SIZE - 1); + } while (iter != orig); + + return -ENOENT; +} + +/** + * task_avdcache_update - Update the task's AVD cache + * @tsec: the task's security state + * @isec: the inode associated with the cache entry + * @avd: the AVD to cache + * @audited: the permission audit bitmask to cache + * + * Update the AVD cache in @tsec with the @avdc and @audited info associated + * with @isec. + */ +static inline void task_avdcache_update(struct task_security_struct *tsec, + struct inode_security_struct *isec, + struct av_decision *avd, + u32 audited) { + int spot; + + /* focused on path walk optimization, only cache directories */ + if (isec->sclass != SECCLASS_DIR) + return; + + /* update cache */ + spot = (tsec->avdcache.dir_spot + 1) & (TSEC_AVDC_DIR_SIZE - 1); + tsec->avdcache.dir_spot = spot; + tsec->avdcache.dir[spot].isid = isec->sid; + tsec->avdcache.dir[spot].audited = audited; + tsec->avdcache.dir[spot].allowed = avd->allowed; + tsec->avdcache.dir[spot].permissive = avd->flags & AVD_FLAGS_PERMISSIVE; + tsec->avdcache.permissive_neveraudit = + (avd->flags == (AVD_FLAGS_PERMISSIVE|AVD_FLAGS_NEVERAUDIT)); +} + +/** + * selinux_inode_permission - Check if the current task can access an inode + * @inode: the inode that is being accessed + * @requested: the accesses being requested + * + * Check if the current task is allowed to access @inode according to + * @requested. Returns 0 if allowed, negative values otherwise. + */ +static int selinux_inode_permission(struct inode *inode, int requested) +{ + int mask; u32 perms; - bool from_access; - bool no_block = mask & MAY_NOT_BLOCK; - struct inode_security_struct *isec; u32 sid = current_sid(); - struct av_decision avd; + struct task_security_struct *tsec; + struct inode_security_struct *isec; + struct avdc_entry *avdc; int rc, rc2; u32 audited, denied; - from_access = mask & MAY_ACCESS; - mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND); + mask = requested & (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND); /* No permission to check. Existence test. */ if (!mask) return 0; - if (unlikely(IS_PRIVATE(inode))) + tsec = selinux_task(current); + if (task_avdcache_permnoaudit(tsec, sid)) return 0; - perms = file_mask_to_av(inode->i_mode, mask); - - isec = inode_security_rcu(inode, no_block); + isec = inode_security_rcu(inode, requested & MAY_NOT_BLOCK); if (IS_ERR(isec)) return PTR_ERR(isec); + perms = file_mask_to_av(inode->i_mode, mask); + + rc = task_avdcache_search(tsec, isec, &avdc); + if (likely(!rc)) { + /* Cache hit. */ + audited = perms & avdc->audited; + denied = perms & ~avdc->allowed; + if (unlikely(denied && enforcing_enabled() && + !avdc->permissive)) + rc = -EACCES; + } else { + struct av_decision avd; + + /* Cache miss. */ + rc = avc_has_perm_noaudit(sid, isec->sid, isec->sclass, + perms, 0, &avd); + audited = avc_audit_required(perms, &avd, rc, + (requested & MAY_ACCESS) ? FILE__AUDIT_ACCESS : 0, + &denied); + task_avdcache_update(tsec, isec, &avd, audited); + } - rc = avc_has_perm_noaudit(sid, isec->sid, isec->sclass, perms, 0, - &avd); - audited = avc_audit_required(perms, &avd, rc, - from_access ? FILE__AUDIT_ACCESS : 0, - &denied); if (likely(!audited)) return rc; rc2 = audit_inode_permission(inode, perms, audited, denied, rc); if (rc2) return rc2; + return rc; } @@ -3135,7 +3276,7 @@ static int selinux_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry, const struct cred *cred = current_cred(); struct inode *inode = d_backing_inode(dentry); unsigned int ia_valid = iattr->ia_valid; - __u32 av = FILE__WRITE; + u32 av = FILE__WRITE; /* ATTR_FORCE is just used for ATTR_KILL_S[UG]ID. */ if (ia_valid & ATTR_FORCE) { @@ -3160,6 +3301,13 @@ static int selinux_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry, static int selinux_inode_getattr(const struct path *path) { + struct task_security_struct *tsec; + + tsec = selinux_task(current); + + if (task_avdcache_permnoaudit(tsec, current_sid())) + return 0; + return path_has_perm(current_cred(), path, FILE__GETATTR); } @@ -3366,6 +3514,18 @@ static int selinux_inode_removexattr(struct mnt_idmap *idmap, return -EACCES; } +static int selinux_inode_file_setattr(struct dentry *dentry, + struct file_kattr *fa) +{ + return dentry_has_perm(current_cred(), dentry, FILE__SETATTR); +} + +static int selinux_inode_file_getattr(struct dentry *dentry, + struct file_kattr *fa) +{ + return dentry_has_perm(current_cred(), dentry, FILE__GETATTR); +} + static int selinux_path_notify(const struct path *path, u64 mask, unsigned int obj_type) { @@ -3395,6 +3555,9 @@ static int selinux_path_notify(const struct path *path, u64 mask, case FSNOTIFY_OBJ_TYPE_INODE: perm = FILE__WATCH; break; + case FSNOTIFY_OBJ_TYPE_MNTNS: + perm = FILE__WATCH_MOUNTNS; + break; default: return -EINVAL; } @@ -3404,7 +3567,8 @@ static int selinux_path_notify(const struct path *path, u64 mask, perm |= FILE__WATCH_WITH_PERM; /* watches on read-like events need the file:watch_reads permission */ - if (mask & (FS_ACCESS | FS_ACCESS_PERM | FS_CLOSE_NOWRITE)) + if (mask & (FS_ACCESS | FS_ACCESS_PERM | FS_PRE_ACCESS | + FS_CLOSE_NOWRITE)) perm |= FILE__WATCH_READS; return path_has_perm(current_cred(), path, perm); @@ -3513,7 +3677,7 @@ static void selinux_inode_getlsmprop(struct inode *inode, struct lsm_prop *prop) static int selinux_inode_copy_up(struct dentry *src, struct cred **new) { struct lsm_prop prop; - struct task_security_struct *tsec; + struct cred_security_struct *crsec; struct cred *new_creds = *new; if (new_creds == NULL) { @@ -3522,10 +3686,10 @@ static int selinux_inode_copy_up(struct dentry *src, struct cred **new) return -ENOMEM; } - tsec = selinux_cred(new_creds); + crsec = selinux_cred(new_creds); /* Get label from overlay inode and set it in create_sid */ selinux_inode_getlsmprop(d_inode(src), &prop); - tsec->create_sid = prop.selinux.secid; + crsec->create_sid = prop.selinux.secid; *new = new_creds; return 0; } @@ -3551,7 +3715,7 @@ static int selinux_inode_copy_up_xattr(struct dentry *dentry, const char *name) static int selinux_kernfs_init_security(struct kernfs_node *kn_dir, struct kernfs_node *kn) { - const struct task_security_struct *tsec = selinux_cred(current_cred()); + const struct cred_security_struct *crsec = selinux_cred(current_cred()); u32 parent_sid, newsid, clen; int rc; char *context; @@ -3579,16 +3743,19 @@ static int selinux_kernfs_init_security(struct kernfs_node *kn_dir, if (rc) return rc; - if (tsec->create_sid) { - newsid = tsec->create_sid; + if (crsec->create_sid) { + newsid = crsec->create_sid; } else { u16 secclass = inode_mode_to_security_class(kn->mode); + const char *kn_name; struct qstr q; - q.name = kn->name; - q.hash_len = hashlen_string(kn_dir, kn->name); + /* kn is fresh, can't be renamed, name goes not away */ + kn_name = rcu_dereference_check(kn->name, true); + q.name = kn_name; + q.hash_len = hashlen_string(kn_dir, kn_name); - rc = security_transition_sid(tsec->sid, + rc = security_transition_sid(crsec->sid, parent_sid, secclass, &q, &newsid); if (rc) @@ -3688,8 +3855,8 @@ static int ioctl_has_perm(const struct cred *cred, struct file *file, return 0; isec = inode_security(inode); - rc = avc_has_extended_perms(ssid, isec->sid, isec->sclass, - requested, driver, xperm, &ad); + rc = avc_has_extended_perms(ssid, isec->sid, isec->sclass, requested, + driver, AVC_EXT_IOCTL, xperm, &ad); out: return rc; } @@ -3999,10 +4166,13 @@ static int selinux_file_open(struct file *file) /* task security operations */ static int selinux_task_alloc(struct task_struct *task, - unsigned long clone_flags) + u64 clone_flags) { u32 sid = current_sid(); + struct task_security_struct *old_tsec = selinux_task(current); + struct task_security_struct *new_tsec = selinux_task(task); + *new_tsec = *old_tsec; return avc_has_perm(sid, sid, SECCLASS_PROCESS, PROCESS__FORK, NULL); } @@ -4012,10 +4182,10 @@ static int selinux_task_alloc(struct task_struct *task, static int selinux_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp) { - const struct task_security_struct *old_tsec = selinux_cred(old); - struct task_security_struct *tsec = selinux_cred(new); + const struct cred_security_struct *old_crsec = selinux_cred(old); + struct cred_security_struct *crsec = selinux_cred(new); - *tsec = *old_tsec; + *crsec = *old_crsec; return 0; } @@ -4024,10 +4194,10 @@ static int selinux_cred_prepare(struct cred *new, const struct cred *old, */ static void selinux_cred_transfer(struct cred *new, const struct cred *old) { - const struct task_security_struct *old_tsec = selinux_cred(old); - struct task_security_struct *tsec = selinux_cred(new); + const struct cred_security_struct *old_crsec = selinux_cred(old); + struct cred_security_struct *crsec = selinux_cred(new); - *tsec = *old_tsec; + *crsec = *old_crsec; } static void selinux_cred_getsecid(const struct cred *c, u32 *secid) @@ -4046,7 +4216,7 @@ static void selinux_cred_getlsmprop(const struct cred *c, struct lsm_prop *prop) */ static int selinux_kernel_act_as(struct cred *new, u32 secid) { - struct task_security_struct *tsec = selinux_cred(new); + struct cred_security_struct *crsec = selinux_cred(new); u32 sid = current_sid(); int ret; @@ -4055,10 +4225,10 @@ static int selinux_kernel_act_as(struct cred *new, u32 secid) KERNEL_SERVICE__USE_AS_OVERRIDE, NULL); if (ret == 0) { - tsec->sid = secid; - tsec->create_sid = 0; - tsec->keycreate_sid = 0; - tsec->sockcreate_sid = 0; + crsec->sid = secid; + crsec->create_sid = 0; + crsec->keycreate_sid = 0; + crsec->sockcreate_sid = 0; } return ret; } @@ -4070,7 +4240,7 @@ static int selinux_kernel_act_as(struct cred *new, u32 secid) static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode) { struct inode_security_struct *isec = inode_security(inode); - struct task_security_struct *tsec = selinux_cred(new); + struct cred_security_struct *crsec = selinux_cred(new); u32 sid = current_sid(); int ret; @@ -4080,7 +4250,7 @@ static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode) NULL); if (ret == 0) - tsec->create_sid = isec->sid; + crsec->create_sid = isec->sid; return ret; } @@ -4095,7 +4265,7 @@ static int selinux_kernel_module_request(char *kmod_name) SYSTEM__MODULE_REQUEST, &ad); } -static int selinux_kernel_module_from_file(struct file *file) +static int selinux_kernel_load_from_file(struct file *file, u32 requested) { struct common_audit_data ad; struct inode_security_struct *isec; @@ -4103,12 +4273,8 @@ static int selinux_kernel_module_from_file(struct file *file) u32 sid = current_sid(); int rc; - /* init_module */ if (file == NULL) - return avc_has_perm(sid, sid, SECCLASS_SYSTEM, - SYSTEM__MODULE_LOAD, NULL); - - /* finit_module */ + return avc_has_perm(sid, sid, SECCLASS_SYSTEM, requested, NULL); ad.type = LSM_AUDIT_DATA_FILE; ad.u.file = file; @@ -4121,8 +4287,7 @@ static int selinux_kernel_module_from_file(struct file *file) } isec = inode_security(file_inode(file)); - return avc_has_perm(sid, isec->sid, SECCLASS_SYSTEM, - SYSTEM__MODULE_LOAD, &ad); + return avc_has_perm(sid, isec->sid, SECCLASS_SYSTEM, requested, &ad); } static int selinux_kernel_read_file(struct file *file, @@ -4131,9 +4296,31 @@ static int selinux_kernel_read_file(struct file *file, { int rc = 0; + BUILD_BUG_ON_MSG(READING_MAX_ID > 8, + "New kernel_read_file_id introduced; update SELinux!"); + switch (id) { + case READING_FIRMWARE: + rc = selinux_kernel_load_from_file(file, SYSTEM__FIRMWARE_LOAD); + break; case READING_MODULE: - rc = selinux_kernel_module_from_file(contents ? file : NULL); + case READING_MODULE_COMPRESSED: + rc = selinux_kernel_load_from_file(file, SYSTEM__MODULE_LOAD); + break; + case READING_KEXEC_IMAGE: + rc = selinux_kernel_load_from_file(file, + SYSTEM__KEXEC_IMAGE_LOAD); + break; + case READING_KEXEC_INITRAMFS: + rc = selinux_kernel_load_from_file(file, + SYSTEM__KEXEC_INITRAMFS_LOAD); + break; + case READING_POLICY: + rc = selinux_kernel_load_from_file(file, SYSTEM__POLICY_LOAD); + break; + case READING_X509_CERTIFICATE: + rc = selinux_kernel_load_from_file(file, + SYSTEM__X509_CERTIFICATE_LOAD); break; default: break; @@ -4146,9 +4333,31 @@ static int selinux_kernel_load_data(enum kernel_load_data_id id, bool contents) { int rc = 0; + BUILD_BUG_ON_MSG(LOADING_MAX_ID > 8, + "New kernel_load_data_id introduced; update SELinux!"); + switch (id) { + case LOADING_FIRMWARE: + rc = selinux_kernel_load_from_file(NULL, SYSTEM__FIRMWARE_LOAD); + break; case LOADING_MODULE: - rc = selinux_kernel_module_from_file(NULL); + rc = selinux_kernel_load_from_file(NULL, SYSTEM__MODULE_LOAD); + break; + case LOADING_KEXEC_IMAGE: + rc = selinux_kernel_load_from_file(NULL, + SYSTEM__KEXEC_IMAGE_LOAD); + break; + case LOADING_KEXEC_INITRAMFS: + rc = selinux_kernel_load_from_file(NULL, + SYSTEM__KEXEC_INITRAMFS_LOAD); + break; + case LOADING_POLICY: + rc = selinux_kernel_load_from_file(NULL, + SYSTEM__POLICY_LOAD); + break; + case LOADING_X509_CERTIFICATE: + rc = selinux_kernel_load_from_file(NULL, + SYSTEM__X509_CERTIFICATE_LOAD); break; default: break; @@ -4347,22 +4556,6 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb, break; } - case IPPROTO_DCCP: { - struct dccp_hdr _dccph, *dh; - - if (ntohs(ih->frag_off) & IP_OFFSET) - break; - - offset += ihlen; - dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph); - if (dh == NULL) - break; - - ad->u.net->sport = dh->dccph_sport; - ad->u.net->dport = dh->dccph_dport; - break; - } - #if IS_ENABLED(CONFIG_IP_SCTP) case IPPROTO_SCTP: { struct sctphdr _sctph, *sh; @@ -4441,18 +4634,6 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb, break; } - case IPPROTO_DCCP: { - struct dccp_hdr _dccph, *dh; - - dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph); - if (dh == NULL) - break; - - ad->u.net->sport = dh->dccph_sport; - ad->u.net->dport = dh->dccph_dport; - break; - } - #if IS_ENABLED(CONFIG_IP_SCTP) case IPPROTO_SCTP: { struct sctphdr _sctph, *sh; @@ -4585,15 +4766,15 @@ static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid) /* socket security operations */ -static int socket_sockcreate_sid(const struct task_security_struct *tsec, +static int socket_sockcreate_sid(const struct cred_security_struct *crsec, u16 secclass, u32 *socksid) { - if (tsec->sockcreate_sid > SECSID_NULL) { - *socksid = tsec->sockcreate_sid; + if (crsec->sockcreate_sid > SECSID_NULL) { + *socksid = crsec->sockcreate_sid; return 0; } - return security_transition_sid(tsec->sid, tsec->sid, + return security_transition_sid(crsec->sid, crsec->sid, secclass, NULL, socksid); } @@ -4638,7 +4819,7 @@ static int sock_has_perm(struct sock *sk, u32 perms) static int selinux_socket_create(int family, int type, int protocol, int kern) { - const struct task_security_struct *tsec = selinux_cred(current_cred()); + const struct cred_security_struct *crsec = selinux_cred(current_cred()); u32 newsid; u16 secclass; int rc; @@ -4647,17 +4828,17 @@ static int selinux_socket_create(int family, int type, return 0; secclass = socket_type_to_security_class(family, type, protocol); - rc = socket_sockcreate_sid(tsec, secclass, &newsid); + rc = socket_sockcreate_sid(crsec, secclass, &newsid); if (rc) return rc; - return avc_has_perm(tsec->sid, newsid, secclass, SOCKET__CREATE, NULL); + return avc_has_perm(crsec->sid, newsid, secclass, SOCKET__CREATE, NULL); } static int selinux_socket_post_create(struct socket *sock, int family, int type, int protocol, int kern) { - const struct task_security_struct *tsec = selinux_cred(current_cred()); + const struct cred_security_struct *crsec = selinux_cred(current_cred()); struct inode_security_struct *isec = inode_security_novalidate(SOCK_INODE(sock)); struct sk_security_struct *sksec; u16 sclass = socket_type_to_security_class(family, type, protocol); @@ -4665,7 +4846,7 @@ static int selinux_socket_post_create(struct socket *sock, int family, int err = 0; if (!kern) { - err = socket_sockcreate_sid(tsec, sclass, &sid); + err = socket_sockcreate_sid(crsec, sclass, &sid); if (err) return err; } @@ -4804,10 +4985,6 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in node_perm = UDP_SOCKET__NODE_BIND; break; - case SECCLASS_DCCP_SOCKET: - node_perm = DCCP_SOCKET__NODE_BIND; - break; - case SECCLASS_SCTP_SOCKET: node_perm = SCTP_SOCKET__NODE_BIND; break; @@ -4835,7 +5012,7 @@ out: return err; err_af: /* Note that SCTP services expect -EINVAL, others -EAFNOSUPPORT. */ - if (sksec->sclass == SECCLASS_SCTP_SOCKET) + if (sk->sk_protocol == IPPROTO_SCTP) return -EINVAL; return -EAFNOSUPPORT; } @@ -4863,11 +5040,10 @@ static int selinux_socket_connect_helper(struct socket *sock, return 0; /* - * If a TCP, DCCP or SCTP socket, check name_connect permission + * If a TCP or SCTP socket, check name_connect permission * for the port. */ if (sksec->sclass == SECCLASS_TCP_SOCKET || - sksec->sclass == SECCLASS_DCCP_SOCKET || sksec->sclass == SECCLASS_SCTP_SOCKET) { struct common_audit_data ad; struct lsm_network_audit net = {0,}; @@ -4912,9 +5088,6 @@ static int selinux_socket_connect_helper(struct socket *sock, case SECCLASS_TCP_SOCKET: perm = TCP_SOCKET__NAME_CONNECT; break; - case SECCLASS_DCCP_SOCKET: - perm = DCCP_SOCKET__NAME_CONNECT; - break; case SECCLASS_SCTP_SOCKET: perm = SCTP_SOCKET__NAME_CONNECT; break; @@ -5738,7 +5911,7 @@ static unsigned int selinux_ip_output(void *priv, struct sk_buff *skb, /* we do this in the LOCAL_OUT path and not the POST_ROUTING path * because we want to make sure we apply the necessary labeling * before IPsec is applied so we can leverage AH protection */ - sk = skb->sk; + sk = skb_to_full_sk(skb); if (sk) { struct sk_security_struct *sksec; @@ -5939,20 +6112,20 @@ static int nlmsg_sock_has_extended_perms(struct sock *sk, u32 perms, u16 nlmsg_t { struct sk_security_struct *sksec = sk->sk_security; struct common_audit_data ad; - struct lsm_network_audit net; u8 driver; u8 xperm; if (sock_skip_has_perm(sksec->sid)) return 0; - ad_net_init_from_sk(&ad, &net, sk); + ad.type = LSM_AUDIT_DATA_NLMSGTYPE; + ad.u.nlmsg_type = nlmsg_type; driver = nlmsg_type >> 8; xperm = nlmsg_type & 0xff; return avc_has_extended_perms(current_sid(), sksec->sid, sksec->sclass, - perms, driver, xperm, &ad); + perms, driver, AVC_EXT_NLMSG, xperm, &ad); } static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb) @@ -6375,37 +6548,37 @@ static void selinux_d_instantiate(struct dentry *dentry, struct inode *inode) static int selinux_lsm_getattr(unsigned int attr, struct task_struct *p, char **value) { - const struct task_security_struct *tsec; + const struct cred_security_struct *crsec; int error; u32 sid; u32 len; rcu_read_lock(); - tsec = selinux_cred(__task_cred(p)); + crsec = selinux_cred(__task_cred(p)); if (p != current) { - error = avc_has_perm(current_sid(), tsec->sid, + error = avc_has_perm(current_sid(), crsec->sid, SECCLASS_PROCESS, PROCESS__GETATTR, NULL); if (error) goto err_unlock; } switch (attr) { case LSM_ATTR_CURRENT: - sid = tsec->sid; + sid = crsec->sid; break; case LSM_ATTR_PREV: - sid = tsec->osid; + sid = crsec->osid; break; case LSM_ATTR_EXEC: - sid = tsec->exec_sid; + sid = crsec->exec_sid; break; case LSM_ATTR_FSCREATE: - sid = tsec->create_sid; + sid = crsec->create_sid; break; case LSM_ATTR_KEYCREATE: - sid = tsec->keycreate_sid; + sid = crsec->keycreate_sid; break; case LSM_ATTR_SOCKCREATE: - sid = tsec->sockcreate_sid; + sid = crsec->sockcreate_sid; break; default: error = -EOPNOTSUPP; @@ -6430,7 +6603,7 @@ err_unlock: static int selinux_lsm_setattr(u64 attr, void *value, size_t size) { - struct task_security_struct *tsec; + struct cred_security_struct *crsec; struct cred *new; u32 mysid = current_sid(), sid = 0, ptsid; int error; @@ -6516,11 +6689,11 @@ static int selinux_lsm_setattr(u64 attr, void *value, size_t size) operation. See selinux_bprm_creds_for_exec for the execve checks and may_create for the file creation checks. The operation will then fail if the context is not permitted. */ - tsec = selinux_cred(new); + crsec = selinux_cred(new); if (attr == LSM_ATTR_EXEC) { - tsec->exec_sid = sid; + crsec->exec_sid = sid; } else if (attr == LSM_ATTR_FSCREATE) { - tsec->create_sid = sid; + crsec->create_sid = sid; } else if (attr == LSM_ATTR_KEYCREATE) { if (sid) { error = avc_has_perm(mysid, sid, @@ -6528,22 +6701,22 @@ static int selinux_lsm_setattr(u64 attr, void *value, size_t size) if (error) goto abort_change; } - tsec->keycreate_sid = sid; + crsec->keycreate_sid = sid; } else if (attr == LSM_ATTR_SOCKCREATE) { - tsec->sockcreate_sid = sid; + crsec->sockcreate_sid = sid; } else if (attr == LSM_ATTR_CURRENT) { error = -EINVAL; if (sid == 0) goto abort_change; if (!current_is_single_threaded()) { - error = security_bounded_transition(tsec->sid, sid); + error = security_bounded_transition(crsec->sid, sid); if (error) goto abort_change; } /* Check permissions for the transition. */ - error = avc_has_perm(tsec->sid, sid, SECCLASS_PROCESS, + error = avc_has_perm(crsec->sid, sid, SECCLASS_PROCESS, PROCESS__DYNTRANSITION, NULL); if (error) goto abort_change; @@ -6558,7 +6731,7 @@ static int selinux_lsm_setattr(u64 attr, void *value, size_t size) goto abort_change; } - tsec->sid = sid; + crsec->sid = sid; } else { error = -EINVAL; goto abort_change; @@ -6640,15 +6813,28 @@ static int selinux_ismaclabel(const char *name) return (strcmp(name, XATTR_SELINUX_SUFFIX) == 0); } -static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) +static int selinux_secid_to_secctx(u32 secid, struct lsm_context *cp) { - return security_sid_to_context(secid, secdata, seclen); + u32 seclen; + int ret; + + if (cp) { + cp->id = LSM_ID_SELINUX; + ret = security_sid_to_context(secid, &cp->context, &cp->len); + if (ret < 0) + return ret; + return cp->len; + } + ret = security_sid_to_context(secid, NULL, &seclen); + if (ret < 0) + return ret; + return seclen; } -static int selinux_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata, - u32 *seclen) +static int selinux_lsmprop_to_secctx(struct lsm_prop *prop, + struct lsm_context *cp) { - return selinux_secid_to_secctx(prop->selinux.secid, secdata, seclen); + return selinux_secid_to_secctx(prop->selinux.secid, cp); } static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) @@ -6657,9 +6843,13 @@ static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) secid, GFP_KERNEL); } -static void selinux_release_secctx(char *secdata, u32 seclen) +static void selinux_release_secctx(struct lsm_context *cp) { - kfree(secdata); + if (cp->id == LSM_ID_SELINUX) { + kfree(cp->context); + cp->context = NULL; + cp->id = LSM_ID_UNDEF; + } } static void selinux_inode_invalidate_secctx(struct inode *inode) @@ -6691,14 +6881,16 @@ static int selinux_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) ctx, ctxlen, 0, NULL); } -static int selinux_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) +static int selinux_inode_getsecctx(struct inode *inode, struct lsm_context *cp) { - int len = 0; + int len; len = selinux_inode_getsecurity(&nop_mnt_idmap, inode, - XATTR_SELINUX_SUFFIX, ctx, true); + XATTR_SELINUX_SUFFIX, + (void **)&cp->context, true); if (len < 0) return len; - *ctxlen = len; + cp->len = len; + cp->id = LSM_ID_SELINUX; return 0; } #ifdef CONFIG_KEYS @@ -6706,14 +6898,14 @@ static int selinux_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) static int selinux_key_alloc(struct key *k, const struct cred *cred, unsigned long flags) { - const struct task_security_struct *tsec; + const struct cred_security_struct *crsec; struct key_security_struct *ksec = selinux_key(k); - tsec = selinux_cred(cred); - if (tsec->keycreate_sid) - ksec->sid = tsec->keycreate_sid; + crsec = selinux_cred(cred); + if (crsec->keycreate_sid) + ksec->sid = crsec->keycreate_sid; else - ksec->sid = tsec->sid; + ksec->sid = crsec->sid; return 0; } @@ -6846,7 +7038,7 @@ static int selinux_ib_alloc_security(void *ib_sec) #ifdef CONFIG_BPF_SYSCALL static int selinux_bpf(int cmd, union bpf_attr *attr, - unsigned int size) + unsigned int size, bool kernel) { u32 sid = current_sid(); int ret; @@ -6896,14 +7088,14 @@ static int bpf_fd_pass(const struct file *file, u32 sid) if (file->f_op == &bpf_map_fops) { map = file->private_data; - bpfsec = map->security; + bpfsec = selinux_bpf_map_security(map); ret = avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF, bpf_map_fmode_to_av(file->f_mode), NULL); if (ret) return ret; } else if (file->f_op == &bpf_prog_fops) { prog = file->private_data; - bpfsec = prog->aux->security; + bpfsec = selinux_bpf_prog_security(prog); ret = avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF, BPF__PROG_RUN, NULL); if (ret) @@ -6917,7 +7109,7 @@ static int selinux_bpf_map(struct bpf_map *map, fmode_t fmode) u32 sid = current_sid(); struct bpf_security_struct *bpfsec; - bpfsec = map->security; + bpfsec = selinux_bpf_map_security(map); return avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF, bpf_map_fmode_to_av(fmode), NULL); } @@ -6927,83 +7119,48 @@ static int selinux_bpf_prog(struct bpf_prog *prog) u32 sid = current_sid(); struct bpf_security_struct *bpfsec; - bpfsec = prog->aux->security; + bpfsec = selinux_bpf_prog_security(prog); return avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF, BPF__PROG_RUN, NULL); } static int selinux_bpf_map_create(struct bpf_map *map, union bpf_attr *attr, - struct bpf_token *token) + struct bpf_token *token, bool kernel) { struct bpf_security_struct *bpfsec; - bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL); - if (!bpfsec) - return -ENOMEM; - + bpfsec = selinux_bpf_map_security(map); bpfsec->sid = current_sid(); - map->security = bpfsec; return 0; } -static void selinux_bpf_map_free(struct bpf_map *map) -{ - struct bpf_security_struct *bpfsec = map->security; - - map->security = NULL; - kfree(bpfsec); -} - static int selinux_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr, - struct bpf_token *token) + struct bpf_token *token, bool kernel) { struct bpf_security_struct *bpfsec; - bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL); - if (!bpfsec) - return -ENOMEM; - + bpfsec = selinux_bpf_prog_security(prog); bpfsec->sid = current_sid(); - prog->aux->security = bpfsec; return 0; } -static void selinux_bpf_prog_free(struct bpf_prog *prog) -{ - struct bpf_security_struct *bpfsec = prog->aux->security; - - prog->aux->security = NULL; - kfree(bpfsec); -} - static int selinux_bpf_token_create(struct bpf_token *token, union bpf_attr *attr, const struct path *path) { struct bpf_security_struct *bpfsec; - bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL); - if (!bpfsec) - return -ENOMEM; - + bpfsec = selinux_bpf_token_security(token); bpfsec->sid = current_sid(); - token->security = bpfsec; return 0; } - -static void selinux_bpf_token_free(struct bpf_token *token) -{ - struct bpf_security_struct *bpfsec = token->security; - - token->security = NULL; - kfree(bpfsec); -} #endif struct lsm_blob_sizes selinux_blob_sizes __ro_after_init = { - .lbs_cred = sizeof(struct task_security_struct), + .lbs_cred = sizeof(struct cred_security_struct), + .lbs_task = sizeof(struct task_security_struct), .lbs_file = sizeof(struct file_security_struct), .lbs_inode = sizeof(struct inode_security_struct), .lbs_ipc = sizeof(struct ipc_security_struct), @@ -7017,10 +7174,13 @@ struct lsm_blob_sizes selinux_blob_sizes __ro_after_init = { .lbs_xattr_count = SELINUX_INODE_INIT_XATTRS, .lbs_tun_dev = sizeof(struct tun_security_struct), .lbs_ib = sizeof(struct ib_security_struct), + .lbs_bpf_map = sizeof(struct bpf_security_struct), + .lbs_bpf_prog = sizeof(struct bpf_security_struct), + .lbs_bpf_token = sizeof(struct bpf_security_struct), }; #ifdef CONFIG_PERF_EVENTS -static int selinux_perf_event_open(struct perf_event_attr *attr, int type) +static int selinux_perf_event_open(int type) { u32 requested, sid = current_sid(); @@ -7117,6 +7277,19 @@ static int selinux_uring_cmd(struct io_uring_cmd *ioucmd) return avc_has_perm(current_sid(), isec->sid, SECCLASS_IO_URING, IO_URING__CMD, &ad); } + +/** + * selinux_uring_allowed - check if io_uring_setup() can be called + * + * Check to see if the current task is allowed to call io_uring_setup(). + */ +static int selinux_uring_allowed(void) +{ + u32 sid = current_sid(); + + return avc_has_perm(sid, sid, SECCLASS_IO_URING, IO_URING__ALLOWED, + NULL); +} #endif /* CONFIG_IO_URING */ static const struct lsm_id selinux_lsmid = { @@ -7195,6 +7368,8 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = { LSM_HOOK_INIT(inode_getxattr, selinux_inode_getxattr), LSM_HOOK_INIT(inode_listxattr, selinux_inode_listxattr), LSM_HOOK_INIT(inode_removexattr, selinux_inode_removexattr), + LSM_HOOK_INIT(inode_file_getattr, selinux_inode_file_getattr), + LSM_HOOK_INIT(inode_file_setattr, selinux_inode_file_setattr), LSM_HOOK_INIT(inode_set_acl, selinux_inode_set_acl), LSM_HOOK_INIT(inode_get_acl, selinux_inode_get_acl), LSM_HOOK_INIT(inode_remove_acl, selinux_inode_remove_acl), @@ -7355,9 +7530,6 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = { LSM_HOOK_INIT(bpf, selinux_bpf), LSM_HOOK_INIT(bpf_map, selinux_bpf_map), LSM_HOOK_INIT(bpf_prog, selinux_bpf_prog), - LSM_HOOK_INIT(bpf_map_free, selinux_bpf_map_free), - LSM_HOOK_INIT(bpf_prog_free, selinux_bpf_prog_free), - LSM_HOOK_INIT(bpf_token_free, selinux_bpf_token_free), #endif #ifdef CONFIG_PERF_EVENTS @@ -7370,6 +7542,7 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = { LSM_HOOK_INIT(uring_override_creds, selinux_uring_override_creds), LSM_HOOK_INIT(uring_sqpoll, selinux_uring_sqpoll), LSM_HOOK_INIT(uring_cmd, selinux_uring_cmd), + LSM_HOOK_INIT(uring_allowed, selinux_uring_allowed), #endif /* @@ -7436,6 +7609,11 @@ static __init int selinux_init(void) /* Set the security state for the initial task. */ cred_init_security(); + /* Inform the audit system that secctx is used */ + audit_cfg_lsm(&selinux_lsmid, + AUDIT_CFG_LSM_SECCTX_SUBJECT | + AUDIT_CFG_LSM_SECCTX_OBJECT); + default_noexec = !(VM_DATA_DEFAULT_FLAGS & VM_EXEC); if (!default_noexec) pr_notice("SELinux: virtual memory is executable by default\n"); @@ -7457,6 +7635,10 @@ static __init int selinux_init(void) if (avc_add_callback(selinux_lsm_notifier_avc_callback, AVC_CALLBACK_RESET)) panic("SELinux: Unable to register AVC LSM notifier callback\n"); + if (avc_add_callback(selinux_audit_rule_avc_callback, + AVC_CALLBACK_RESET)) + panic("SELinux: Unable to register AVC audit callback\n"); + if (selinux_enforcing_boot) pr_debug("SELinux: Starting in enforcing mode\n"); else @@ -7484,11 +7666,12 @@ void selinux_complete_init(void) /* SELinux requires early initialization in order to label all processes and objects when they are created. */ DEFINE_LSM(selinux) = { - .name = "selinux", + .id = &selinux_lsmid, .flags = LSM_FLAG_LEGACY_MAJOR | LSM_FLAG_EXCLUSIVE, .enabled = &selinux_enabled_boot, .blobs = &selinux_blob_sizes, .init = selinux_init, + .initcall_device = selinux_initcall, }; #if defined(CONFIG_NETFILTER) @@ -7550,7 +7733,7 @@ static struct pernet_operations selinux_net_ops = { .exit = selinux_nf_unregister, }; -static int __init selinux_nf_ip_init(void) +int __init selinux_nf_ip_init(void) { int err; @@ -7565,5 +7748,4 @@ static int __init selinux_nf_ip_init(void) return 0; } -__initcall(selinux_nf_ip_init); #endif /* CONFIG_NETFILTER */ diff --git a/security/selinux/ibpkey.c b/security/selinux/ibpkey.c index 48f537b41c58..ea1d9b2c7d2b 100644 --- a/security/selinux/ibpkey.c +++ b/security/selinux/ibpkey.c @@ -23,6 +23,7 @@ #include <linux/list.h> #include <linux/spinlock.h> +#include "initcalls.h" #include "ibpkey.h" #include "objsec.h" @@ -130,7 +131,7 @@ static int sel_ib_pkey_sid_slow(u64 subnet_prefix, u16 pkey_num, u32 *sid) { int ret; struct sel_ib_pkey *pkey; - struct sel_ib_pkey *new = NULL; + struct sel_ib_pkey *new; unsigned long flags; spin_lock_irqsave(&sel_ib_pkey_lock, flags); @@ -146,12 +147,11 @@ static int sel_ib_pkey_sid_slow(u64 subnet_prefix, u16 pkey_num, u32 *sid) if (ret) goto out; - /* If this memory allocation fails still return 0. The SID - * is valid, it just won't be added to the cache. - */ - new = kzalloc(sizeof(*new), GFP_ATOMIC); + new = kmalloc(sizeof(*new), GFP_ATOMIC); if (!new) { - ret = -ENOMEM; + /* If this memory allocation fails still return 0. The SID + * is valid, it just won't be added to the cache. + */ goto out; } @@ -184,7 +184,7 @@ int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *sid) rcu_read_lock(); pkey = sel_ib_pkey_find(subnet_prefix, pkey_num); - if (pkey) { + if (likely(pkey)) { *sid = pkey->psec.sid; rcu_read_unlock(); return 0; @@ -219,7 +219,7 @@ void sel_ib_pkey_flush(void) spin_unlock_irqrestore(&sel_ib_pkey_lock, flags); } -static __init int sel_ib_pkey_init(void) +int __init sel_ib_pkey_init(void) { int iter; @@ -233,5 +233,3 @@ static __init int sel_ib_pkey_init(void) return 0; } - -subsys_initcall(sel_ib_pkey_init); diff --git a/security/selinux/include/audit.h b/security/selinux/include/audit.h index d5b0425055e4..85a531ac737b 100644 --- a/security/selinux/include/audit.h +++ b/security/selinux/include/audit.h @@ -16,6 +16,15 @@ #include <linux/types.h> /** + * selinux_audit_rule_avc_callback - update the audit LSM rules on AVC events. + * @event: the AVC event + * + * Update any audit LSM rules based on the AVC event specified in @event. + * Returns 0 on success, negative values otherwise. + */ +int selinux_audit_rule_avc_callback(u32 event); + +/** * selinux_audit_rule_init - alloc/init an selinux audit rule structure. * @field: the field this rule refers to * @op: the operator the rule uses diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h index 96a614d47df8..01b5167fee1a 100644 --- a/security/selinux/include/avc.h +++ b/security/selinux/include/avc.h @@ -65,6 +65,10 @@ static inline u32 avc_audit_required(u32 requested, struct av_decision *avd, int result, u32 auditdeny, u32 *deniedp) { u32 denied, audited; + + if (avd->flags & AVD_FLAGS_NEVERAUDIT) + return 0; + denied = requested & ~avd->allowed; if (unlikely(denied)) { audited = denied & avd->auditdeny; @@ -136,8 +140,11 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested, int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, u32 requested, struct common_audit_data *auditdata); +#define AVC_EXT_IOCTL (1 << 0) /* Cache entry for an ioctl extended permission */ +#define AVC_EXT_NLMSG (1 << 1) /* Cache entry for an nlmsg extended permission */ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested, - u8 driver, u8 perm, struct common_audit_data *ad); + u8 driver, u8 base_perm, u8 perm, + struct common_audit_data *ad); u32 avc_policy_seqno(void); diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 2bc20135324a..3ec85142771f 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -8,7 +8,7 @@ COMMON_FILE_SOCK_PERMS, "unlink", "link", "rename", "execute", \ "quotaon", "mounton", "audit_access", "open", "execmod", \ "watch", "watch_mount", "watch_sb", "watch_with_perm", \ - "watch_reads" + "watch_reads", "watch_mountns" #define COMMON_SOCK_PERMS \ COMMON_FILE_SOCK_PERMS, "bind", "connect", "listen", "accept", \ @@ -63,7 +63,9 @@ const struct security_class_mapping secclass_map[] = { { "process2", { "nnp_transition", "nosuid_transition", NULL } }, { "system", { "ipc_info", "syslog_read", "syslog_mod", "syslog_console", - "module_request", "module_load", NULL } }, + "module_request", "module_load", "firmware_load", + "kexec_image_load", "kexec_initramfs_load", "policy_load", + "x509_certificate_load", NULL } }, { "capability", { COMMON_CAP_PERMS, NULL } }, { "filesystem", { "mount", "remount", "unmount", "getattr", "relabelfrom", @@ -125,8 +127,6 @@ const struct security_class_mapping secclass_map[] = { { "key", { "view", "read", "write", "search", "link", "setattr", "create", NULL } }, - { "dccp_socket", - { COMMON_SOCK_PERMS, "node_bind", "name_connect", NULL } }, { "memprotect", { "mmap_zero", NULL } }, { "peer", { "recv", NULL } }, { "capability2", { COMMON_CAP2_PERMS, NULL } }, @@ -177,9 +177,11 @@ const struct security_class_mapping secclass_map[] = { { "perf_event", { "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } }, { "anon_inode", { COMMON_FILE_PERMS, NULL } }, - { "io_uring", { "override_creds", "sqpoll", "cmd", NULL } }, + { "io_uring", { "override_creds", "sqpoll", "cmd", "allowed", NULL } }, { "user_namespace", { "create", NULL } }, - { NULL } + { "memfd_file", + { COMMON_FILE_PERMS, "execute_no_trans", "entrypoint", NULL } }, + /* last one */ { NULL, {} } }; #ifdef __KERNEL__ /* avoid this check when building host programs */ diff --git a/security/selinux/include/conditional.h b/security/selinux/include/conditional.h index 5910bb7c2eca..060833e2dba2 100644 --- a/security/selinux/include/conditional.h +++ b/security/selinux/include/conditional.h @@ -16,7 +16,7 @@ int security_get_bools(struct selinux_policy *policy, u32 *len, char ***names, int **values); -int security_set_bools(u32 len, int *values); +int security_set_bools(u32 len, const int *values); int security_get_bool_value(u32 index); diff --git a/security/selinux/include/hash.h b/security/selinux/include/hash.h new file mode 100644 index 000000000000..18956dbef8ff --- /dev/null +++ b/security/selinux/include/hash.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _SELINUX_HASH_H_ +#define _SELINUX_HASH_H_ + +/* + * Based on MurmurHash3, written by Austin Appleby and placed in the + * public domain. + */ +static inline u32 av_hash(u32 key1, u32 key2, u32 key3, u32 mask) +{ + static const u32 c1 = 0xcc9e2d51; + static const u32 c2 = 0x1b873593; + static const u32 r1 = 15; + static const u32 r2 = 13; + static const u32 m = 5; + static const u32 n = 0xe6546b64; + + u32 hash = 0; + +#define mix(input) \ + do { \ + u32 v = input; \ + v *= c1; \ + v = (v << r1) | (v >> (32 - r1)); \ + v *= c2; \ + hash ^= v; \ + hash = (hash << r2) | (hash >> (32 - r2)); \ + hash = hash * m + n; \ + } while (0) + + mix(key1); + mix(key2); + mix(key3); + +#undef mix + + hash ^= hash >> 16; + hash *= 0x85ebca6b; + hash ^= hash >> 13; + hash *= 0xc2b2ae35; + hash ^= hash >> 16; + + return hash & mask; +} + +#endif /* _SELINUX_HASH_H_ */ diff --git a/security/selinux/include/initcalls.h b/security/selinux/include/initcalls.h new file mode 100644 index 000000000000..6674cf489473 --- /dev/null +++ b/security/selinux/include/initcalls.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SELinux initcalls + */ + +#ifndef _SELINUX_INITCALLS_H +#define _SELINUX_INITCALLS_H + +int init_sel_fs(void); +int sel_netport_init(void); +int sel_netnode_init(void); +int sel_netif_init(void); +int sel_netlink_init(void); +int sel_ib_pkey_init(void); +int selinux_nf_ip_init(void); + +int selinux_initcall(void); + +#endif diff --git a/security/selinux/include/netnode.h b/security/selinux/include/netnode.h index 9b8b655a8cd3..e4dc904c3585 100644 --- a/security/selinux/include/netnode.h +++ b/security/selinux/include/netnode.h @@ -21,6 +21,6 @@ void sel_netnode_flush(void); -int sel_netnode_sid(void *addr, u16 family, u32 *sid); +int sel_netnode_sid(const void *addr, u16 family, u32 *sid); #endif diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index c88cae81ee4c..8fc3de5234ac 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -26,10 +26,18 @@ #include <linux/lsm_hooks.h> #include <linux/msg.h> #include <net/net_namespace.h> +#include <linux/bpf.h> #include "flask.h" #include "avc.h" -struct task_security_struct { +struct avdc_entry { + u32 isid; /* inode SID */ + u32 allowed; /* allowed permission bitmask */ + u32 audited; /* audited permission bitmask */ + bool permissive; /* AVC permissive flag */ +}; + +struct cred_security_struct { u32 osid; /* SID prior to last execve */ u32 sid; /* current SID */ u32 exec_sid; /* exec SID */ @@ -38,6 +46,25 @@ struct task_security_struct { u32 sockcreate_sid; /* fscreate SID */ } __randomize_layout; +struct task_security_struct { +#define TSEC_AVDC_DIR_SIZE (1 << 2) + struct { + u32 sid; /* current SID for cached entries */ + u32 seqno; /* AVC sequence number */ + unsigned int dir_spot; /* dir cache index to check first */ + struct avdc_entry dir[TSEC_AVDC_DIR_SIZE]; /* dir entries */ + bool permissive_neveraudit; /* permissive and neveraudit */ + } avdcache; +} __randomize_layout; + +static inline bool task_avdcache_permnoaudit(struct task_security_struct *tsec, + u32 sid) +{ + return (tsec->avdcache.permissive_neveraudit && + sid == tsec->avdcache.sid && + tsec->avdcache.seqno == avc_policy_seqno()); +} + enum label_initialized { LABEL_INVALID, /* invalid or not initialized */ LABEL_INITIALIZED, /* initialized */ @@ -82,7 +109,7 @@ struct ipc_security_struct { }; struct netif_security_struct { - struct net *ns; /* network namespace */ + const struct net *ns; /* network namespace */ int ifindex; /* device index */ u32 sid; /* SID for this interface */ }; @@ -149,11 +176,17 @@ struct perf_event_security_struct { }; extern struct lsm_blob_sizes selinux_blob_sizes; -static inline struct task_security_struct *selinux_cred(const struct cred *cred) +static inline struct cred_security_struct *selinux_cred(const struct cred *cred) { return cred->security + selinux_blob_sizes.lbs_cred; } +static inline struct task_security_struct * +selinux_task(const struct task_struct *task) +{ + return task->security + selinux_blob_sizes.lbs_task; +} + static inline struct file_security_struct *selinux_file(const struct file *file) { return file->f_security + selinux_blob_sizes.lbs_file; @@ -184,9 +217,9 @@ selinux_ipc(const struct kern_ipc_perm *ipc) */ static inline u32 current_sid(void) { - const struct task_security_struct *tsec = selinux_cred(current_cred()); + const struct cred_security_struct *crsec = selinux_cred(current_cred()); - return tsec->sid; + return crsec->sid; } static inline struct superblock_security_struct * @@ -223,4 +256,23 @@ selinux_perf_event(void *perf_event) return perf_event + selinux_blob_sizes.lbs_perf_event; } +#ifdef CONFIG_BPF_SYSCALL +static inline struct bpf_security_struct * +selinux_bpf_map_security(struct bpf_map *map) +{ + return map->security + selinux_blob_sizes.lbs_bpf_map; +} + +static inline struct bpf_security_struct * +selinux_bpf_prog_security(struct bpf_prog *prog) +{ + return prog->aux->security + selinux_blob_sizes.lbs_bpf_prog; +} + +static inline struct bpf_security_struct * +selinux_bpf_token_security(struct bpf_token *token) +{ + return token->security + selinux_blob_sizes.lbs_bpf_token; +} +#endif /* CONFIG_BPF_SYSCALL */ #endif /* _SELINUX_OBJSEC_H_ */ diff --git a/security/selinux/include/policycap.h b/security/selinux/include/policycap.h index 079679fe7254..231d02227e59 100644 --- a/security/selinux/include/policycap.h +++ b/security/selinux/include/policycap.h @@ -15,6 +15,10 @@ enum { POLICYDB_CAP_IOCTL_SKIP_CLOEXEC, POLICYDB_CAP_USERSPACE_INITIAL_CONTEXT, POLICYDB_CAP_NETLINK_XPERM, + POLICYDB_CAP_NETIF_WILDCARD, + POLICYDB_CAP_GENFS_SECLABEL_WILDCARD, + POLICYDB_CAP_FUNCTIONFS_SECLABEL, + POLICYDB_CAP_MEMFD_CLASS, __POLICYDB_CAP_MAX }; #define POLICYDB_CAP_MAX (__POLICYDB_CAP_MAX - 1) diff --git a/security/selinux/include/policycap_names.h b/security/selinux/include/policycap_names.h index e080827408c4..454dab37bda3 100644 --- a/security/selinux/include/policycap_names.h +++ b/security/selinux/include/policycap_names.h @@ -18,6 +18,10 @@ const char *const selinux_policycap_names[__POLICYDB_CAP_MAX] = { "ioctl_skip_cloexec", "userspace_initial_context", "netlink_xperm", + "netif_wildcard", + "genfs_seclabel_wildcard", + "functionfs_seclabel", + "memfd_class", }; /* clang-format on */ diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index c7f2731abd03..5d1dad8058b1 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h @@ -46,10 +46,12 @@ #define POLICYDB_VERSION_INFINIBAND 31 #define POLICYDB_VERSION_GLBLUB 32 #define POLICYDB_VERSION_COMP_FTRANS 33 /* compressed filename transitions */ +#define POLICYDB_VERSION_COND_XPERMS 34 /* extended permissions in conditional policies */ +#define POLICYDB_VERSION_NEVERAUDIT 35 /* neveraudit types */ /* Range of policy versions we understand*/ #define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE -#define POLICYDB_VERSION_MAX POLICYDB_VERSION_COMP_FTRANS +#define POLICYDB_VERSION_MAX POLICYDB_VERSION_NEVERAUDIT /* Mask for just the mount related flags */ #define SE_MNTMASK 0x0f @@ -201,6 +203,17 @@ static inline bool selinux_policycap_netlink_xperm(void) selinux_state.policycap[POLICYDB_CAP_NETLINK_XPERM]); } +static inline bool selinux_policycap_functionfs_seclabel(void) +{ + return READ_ONCE( + selinux_state.policycap[POLICYDB_CAP_FUNCTIONFS_SECLABEL]); +} + +static inline bool selinux_policycap_memfd_class(void) +{ + return READ_ONCE(selinux_state.policycap[POLICYDB_CAP_MEMFD_CLASS]); +} + struct selinux_policy_convert_data; struct selinux_load_state { @@ -239,6 +252,7 @@ struct extended_perms_data { struct extended_perms_decision { u8 used; u8 driver; + u8 base_perm; struct extended_perms_data *allowed; struct extended_perms_data *auditallow; struct extended_perms_data *dontaudit; @@ -246,17 +260,20 @@ struct extended_perms_decision { struct extended_perms { u16 len; /* length associated decision chain */ + u8 base_perms; /* which base permissions are covered */ struct extended_perms_data drivers; /* flag drivers that are used */ }; /* definitions of av_decision.flags */ #define AVD_FLAGS_PERMISSIVE 0x0001 +#define AVD_FLAGS_NEVERAUDIT 0x0002 void security_compute_av(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd, struct extended_perms *xperms); void security_compute_xperms_decision(u32 ssid, u32 tsid, u16 tclass, u8 driver, + u8 base_perm, struct extended_perms_decision *xpermd); void security_compute_av_user(u32 ssid, u32 tsid, u16 tclass, @@ -289,7 +306,7 @@ int security_context_to_sid_default(const char *scontext, u32 scontext_len, int security_context_to_sid_force(const char *scontext, u32 scontext_len, u32 *sid); -int security_get_user_sids(u32 callsid, char *username, u32 **sids, u32 *nel); +int security_get_user_sids(u32 fromsid, const char *username, u32 **sids, u32 *nel); int security_port_sid(u8 protocol, u16 port, u32 *out_sid); @@ -297,9 +314,9 @@ int security_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *out_sid); int security_ib_endport_sid(const char *dev_name, u8 port_num, u32 *out_sid); -int security_netif_sid(char *name, u32 *if_sid); +int security_netif_sid(const char *name, u32 *if_sid); -int security_node_sid(u16 domain, void *addr, u32 addrlen, u32 *out_sid); +int security_node_sid(u16 domain, const void *addr, u32 addrlen, u32 *out_sid); int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, u16 tclass); @@ -307,7 +324,7 @@ int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, int security_validate_transition_user(u32 oldsid, u32 newsid, u32 tasksid, u16 tclass); -int security_bounded_transition(u32 oldsid, u32 newsid); +int security_bounded_transition(u32 old_sid, u32 new_sid); int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid); diff --git a/security/selinux/initcalls.c b/security/selinux/initcalls.c new file mode 100644 index 000000000000..f6716a1d38c1 --- /dev/null +++ b/security/selinux/initcalls.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SELinux initcalls + */ + +#include <linux/init.h> + +#include "initcalls.h" + +/** + * selinux_initcall - Perform the SELinux initcalls + * + * Used as a device initcall in the SELinux LSM definition. + */ +int __init selinux_initcall(void) +{ + int rc = 0, rc_tmp = 0; + + rc_tmp = init_sel_fs(); + if (!rc && rc_tmp) + rc = rc_tmp; + + rc_tmp = sel_netport_init(); + if (!rc && rc_tmp) + rc = rc_tmp; + + rc_tmp = sel_netnode_init(); + if (!rc && rc_tmp) + rc = rc_tmp; + + rc_tmp = sel_netif_init(); + if (!rc && rc_tmp) + rc = rc_tmp; + + rc_tmp = sel_netlink_init(); + if (!rc && rc_tmp) + rc = rc_tmp; + +#if defined(CONFIG_SECURITY_INFINIBAND) + rc_tmp = sel_ib_pkey_init(); + if (!rc && rc_tmp) + rc = rc_tmp; +#endif + +#if defined(CONFIG_NETFILTER) + rc_tmp = selinux_nf_ip_init(); + if (!rc && rc_tmp) + rc = rc_tmp; +#endif + + return rc; +} diff --git a/security/selinux/netif.c b/security/selinux/netif.c index 43a0d3594b72..e24b2cba28ea 100644 --- a/security/selinux/netif.c +++ b/security/selinux/netif.c @@ -22,6 +22,7 @@ #include <linux/rcupdate.h> #include <net/net_namespace.h> +#include "initcalls.h" #include "security.h" #include "objsec.h" #include "netif.h" @@ -156,7 +157,11 @@ static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid) ret = security_netif_sid(dev->name, sid); if (ret != 0) goto out; - new = kzalloc(sizeof(*new), GFP_ATOMIC); + + /* If this memory allocation fails still return 0. The SID + * is valid, it just won't be added to the cache. + */ + new = kmalloc(sizeof(*new), GFP_ATOMIC); if (new) { new->nsec.ns = ns; new->nsec.ifindex = ifindex; @@ -261,7 +266,7 @@ static struct notifier_block sel_netif_netdev_notifier = { .notifier_call = sel_netif_netdev_notifier_handler, }; -static __init int sel_netif_init(void) +int __init sel_netif_init(void) { int i; @@ -276,5 +281,3 @@ static __init int sel_netif_init(void) return 0; } -__initcall(sel_netif_init); - diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c index 1760aee712fd..eb40e4603475 100644 --- a/security/selinux/netlink.c +++ b/security/selinux/netlink.c @@ -17,6 +17,7 @@ #include <net/net_namespace.h> #include <net/netlink.h> +#include "initcalls.h" #include "security.h" static struct sock *selnl __ro_after_init; @@ -105,7 +106,7 @@ void selnl_notify_policyload(u32 seqno) selnl_notify(SELNL_MSG_POLICYLOAD, &seqno); } -static int __init selnl_init(void) +int __init sel_netlink_init(void) { struct netlink_kernel_cfg cfg = { .groups = SELNLGRP_MAX, @@ -117,5 +118,3 @@ static int __init selnl_init(void) panic("SELinux: Cannot create netlink socket."); return 0; } - -__initcall(selnl_init); diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c index 5c8c77e50aad..9b3da5ce8d39 100644 --- a/security/selinux/netnode.c +++ b/security/selinux/netnode.c @@ -30,6 +30,7 @@ #include <net/ip.h> #include <net/ipv6.h> +#include "initcalls.h" #include "netnode.h" #include "objsec.h" @@ -187,7 +188,7 @@ static void sel_netnode_insert(struct sel_netnode *node) * failure. * */ -static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid) +static int sel_netnode_sid_slow(const void *addr, u16 family, u32 *sid) { int ret; struct sel_netnode *node; @@ -201,19 +202,22 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid) return 0; } - new = kzalloc(sizeof(*new), GFP_ATOMIC); + /* If this memory allocation fails still return 0. The SID + * is valid, it just won't be added to the cache. + */ + new = kmalloc(sizeof(*new), GFP_ATOMIC); switch (family) { case PF_INET: ret = security_node_sid(PF_INET, addr, sizeof(struct in_addr), sid); if (new) - new->nsec.addr.ipv4 = *(__be32 *)addr; + new->nsec.addr.ipv4 = *(const __be32 *)addr; break; case PF_INET6: ret = security_node_sid(PF_INET6, addr, sizeof(struct in6_addr), sid); if (new) - new->nsec.addr.ipv6 = *(struct in6_addr *)addr; + new->nsec.addr.ipv6 = *(const struct in6_addr *)addr; break; default: BUG(); @@ -247,13 +251,13 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid) * on failure. * */ -int sel_netnode_sid(void *addr, u16 family, u32 *sid) +int sel_netnode_sid(const void *addr, u16 family, u32 *sid) { struct sel_netnode *node; rcu_read_lock(); node = sel_netnode_find(addr, family); - if (node != NULL) { + if (likely(node != NULL)) { *sid = node->nsec.sid; rcu_read_unlock(); return 0; @@ -287,7 +291,7 @@ void sel_netnode_flush(void) spin_unlock_bh(&sel_netnode_lock); } -static __init int sel_netnode_init(void) +int __init sel_netnode_init(void) { int iter; @@ -301,5 +305,3 @@ static __init int sel_netnode_init(void) return 0; } - -__initcall(sel_netnode_init); diff --git a/security/selinux/netport.c b/security/selinux/netport.c index 2e22ad9c2bd0..9e62f7285e81 100644 --- a/security/selinux/netport.c +++ b/security/selinux/netport.c @@ -29,6 +29,7 @@ #include <net/ip.h> #include <net/ipv6.h> +#include "initcalls.h" #include "netport.h" #include "objsec.h" @@ -47,12 +48,6 @@ struct sel_netport { struct rcu_head rcu; }; -/* NOTE: we are using a combined hash table for both IPv4 and IPv6, the reason - * for this is that I suspect most users will not make heavy use of both - * address families at the same time so one table will usually end up wasted, - * if this becomes a problem we can always add a hash table for each address - * family later */ - static DEFINE_SPINLOCK(sel_netport_lock); static struct sel_netport_bkt sel_netport_hash[SEL_NETPORT_HASH_SIZE]; @@ -151,7 +146,11 @@ static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid) ret = security_port_sid(protocol, pnum, sid); if (ret != 0) goto out; - new = kzalloc(sizeof(*new), GFP_ATOMIC); + + /* If this memory allocation fails still return 0. The SID + * is valid, it just won't be added to the cache. + */ + new = kmalloc(sizeof(*new), GFP_ATOMIC); if (new) { new->psec.port = pnum; new->psec.protocol = protocol; @@ -186,7 +185,7 @@ int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid) rcu_read_lock(); port = sel_netport_find(protocol, pnum); - if (port != NULL) { + if (likely(port != NULL)) { *sid = port->psec.sid; rcu_read_unlock(); return 0; @@ -220,7 +219,7 @@ void sel_netport_flush(void) spin_unlock_bh(&sel_netport_lock); } -static __init int sel_netport_init(void) +int __init sel_netport_init(void) { int iter; @@ -234,5 +233,3 @@ static __init int sel_netport_init(void) return 0; } - -__initcall(sel_netport_init); diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 3a95986b134f..2c0b07f9fbbd 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -98,7 +98,6 @@ static const struct nlmsg_perm nlmsg_route_perms[] = { static const struct nlmsg_perm nlmsg_tcpdiag_perms[] = { { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, - { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, { SOCK_DESTROY, NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE }, }; diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 6cd5bb0ba380..896acad1f5f7 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -35,6 +35,7 @@ /* selinuxfs pseudo filesystem for exporting the security policy API. Based on the proc code and the fs/nfsd/nfsctl.c code. */ +#include "initcalls.h" #include "flask.h" #include "avc.h" #include "avc_ss.h" @@ -75,7 +76,6 @@ struct selinux_fs_info { struct dentry *class_dir; unsigned long last_class_ino; bool policy_opened; - struct dentry *policycap_dir; unsigned long last_ino; struct super_block *sb; }; @@ -117,7 +117,6 @@ static void selinux_fs_info_free(struct super_block *sb) #define BOOL_DIR_NAME "booleans" #define CLASS_DIR_NAME "class" -#define POLICYCAP_DIR_NAME "policy_capabilities" #define TMPBUFLEN 12 static ssize_t sel_read_enforce(struct file *filp, char __user *buf, @@ -506,6 +505,7 @@ static int sel_make_policy_nodes(struct selinux_fs_info *fsi, { int ret = 0; struct dentry *tmp_parent, *tmp_bool_dir, *tmp_class_dir; + struct renamedata rd = {}; unsigned int bool_num = 0; char **bool_names = NULL; int *bool_values = NULL; @@ -539,9 +539,14 @@ static int sel_make_policy_nodes(struct selinux_fs_info *fsi, if (ret) goto out; - lock_rename(tmp_parent, fsi->sb->s_root); + rd.old_parent = tmp_parent; + rd.new_parent = fsi->sb->s_root; /* booleans */ + ret = start_renaming_two_dentries(&rd, tmp_bool_dir, fsi->bool_dir); + if (ret) + goto out; + d_exchange(tmp_bool_dir, fsi->bool_dir); swap(fsi->bool_num, bool_num); @@ -549,12 +554,17 @@ static int sel_make_policy_nodes(struct selinux_fs_info *fsi, swap(fsi->bool_pending_values, bool_values); fsi->bool_dir = tmp_bool_dir; + end_renaming(&rd); /* classes */ + ret = start_renaming_two_dentries(&rd, tmp_class_dir, fsi->class_dir); + if (ret) + goto out; + d_exchange(tmp_class_dir, fsi->class_dir); fsi->class_dir = tmp_class_dir; - unlock_rename(tmp_parent, fsi->sb->s_root); + end_renaming(&rd); out: sel_remove_old_bool_data(bool_num, bool_names, bool_values); @@ -1072,6 +1082,7 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size) pr_warn_ratelimited("SELinux: %s (%d) wrote to /sys/fs/selinux/user!" " This will not be supported in the future; please update your" " userspace.\n", current->comm, current->pid); + ssleep(5); length = avc_has_perm(current_sid(), SECINITSID_SECURITY, SECCLASS_SECURITY, SECURITY__COMPUTE_USER, @@ -1198,11 +1209,31 @@ static struct inode *sel_make_inode(struct super_block *sb, umode_t mode) return ret; } +static struct dentry *sel_attach(struct dentry *parent, const char *name, + struct inode *inode) +{ + struct dentry *dentry = d_alloc_name(parent, name); + if (unlikely(!dentry)) { + iput(inode); + return ERR_PTR(-ENOMEM); + } + d_make_persistent(dentry, inode); + dput(dentry); + return dentry; +} + +static int sel_attach_file(struct dentry *parent, const char *name, + struct inode *inode) +{ + struct dentry *dentry = sel_attach(parent, name, inode); + return PTR_ERR_OR_ZERO(dentry); +} + static ssize_t sel_read_bool(struct file *filep, char __user *buf, size_t count, loff_t *ppos) { struct selinux_fs_info *fsi = file_inode(filep)->i_sb->s_fs_info; - char *page = NULL; + char buffer[4]; ssize_t length; ssize_t ret; int cur_enforcing; @@ -1216,27 +1247,19 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf, fsi->bool_pending_names[index])) goto out_unlock; - ret = -ENOMEM; - page = (char *)get_zeroed_page(GFP_KERNEL); - if (!page) - goto out_unlock; - cur_enforcing = security_get_bool_value(index); if (cur_enforcing < 0) { ret = cur_enforcing; goto out_unlock; } - length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing, - fsi->bool_pending_values[index]); + length = scnprintf(buffer, sizeof(buffer), "%d %d", !!cur_enforcing, + !!fsi->bool_pending_values[index]); mutex_unlock(&selinux_state.policy_mutex); - ret = simple_read_from_buffer(buf, count, ppos, page, length); -out_free: - free_page((unsigned long)page); - return ret; + return simple_read_from_buffer(buf, count, ppos, buffer, length); out_unlock: mutex_unlock(&selinux_state.policy_mutex); - goto out_free; + return ret; } static ssize_t sel_write_bool(struct file *filep, const char __user *buf, @@ -1365,8 +1388,7 @@ static int sel_make_bools(struct selinux_policy *newpolicy, struct dentry *bool_ *bool_num = num; *bool_pending_names = names; - for (i = 0; i < num; i++) { - struct dentry *dentry; + for (i = 0; !ret && i < num; i++) { struct inode *inode; struct inode_security_struct *isec; ssize_t len; @@ -1377,15 +1399,9 @@ static int sel_make_bools(struct selinux_policy *newpolicy, struct dentry *bool_ ret = -ENAMETOOLONG; break; } - dentry = d_alloc_name(bool_dir, names[i]); - if (!dentry) { - ret = -ENOMEM; - break; - } inode = sel_make_inode(bool_dir->d_sb, S_IFREG | S_IRUGO | S_IWUSR); if (!inode) { - dput(dentry); ret = -ENOMEM; break; } @@ -1403,7 +1419,8 @@ static int sel_make_bools(struct selinux_policy *newpolicy, struct dentry *bool_ isec->initialized = LABEL_INITIALIZED; inode->i_fop = &sel_bool_ops; inode->i_ino = i|SEL_BOOL_INO_OFFSET; - d_add(dentry, inode); + + ret = sel_attach_file(bool_dir, names[i], inode); } out: free_page((unsigned long)page); @@ -1515,7 +1532,7 @@ static const struct file_operations sel_avc_hash_stats_ops = { #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx) { - int cpu; + loff_t cpu; for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) { if (!cpu_possible(cpu)) @@ -1588,6 +1605,7 @@ static int sel_make_avc_files(struct dentry *dir) struct super_block *sb = dir->d_sb; struct selinux_fs_info *fsi = sb->s_fs_info; unsigned int i; + int err = 0; static const struct tree_descr files[] = { { "cache_threshold", &sel_avc_cache_threshold_ops, S_IRUGO|S_IWUSR }, @@ -1597,26 +1615,20 @@ static int sel_make_avc_files(struct dentry *dir) #endif }; - for (i = 0; i < ARRAY_SIZE(files); i++) { + for (i = 0; !err && i < ARRAY_SIZE(files); i++) { struct inode *inode; - struct dentry *dentry; - - dentry = d_alloc_name(dir, files[i].name); - if (!dentry) - return -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode); - if (!inode) { - dput(dentry); + if (!inode) return -ENOMEM; - } inode->i_fop = files[i].ops; inode->i_ino = ++fsi->last_ino; - d_add(dentry, inode); + + err = sel_attach_file(dir, files[i].name, inode); } - return 0; + return err; } static int sel_make_ss_files(struct dentry *dir) @@ -1624,30 +1636,25 @@ static int sel_make_ss_files(struct dentry *dir) struct super_block *sb = dir->d_sb; struct selinux_fs_info *fsi = sb->s_fs_info; unsigned int i; + int err = 0; static const struct tree_descr files[] = { { "sidtab_hash_stats", &sel_sidtab_hash_stats_ops, S_IRUGO }, }; - for (i = 0; i < ARRAY_SIZE(files); i++) { + for (i = 0; !err && i < ARRAY_SIZE(files); i++) { struct inode *inode; - struct dentry *dentry; - - dentry = d_alloc_name(dir, files[i].name); - if (!dentry) - return -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode); - if (!inode) { - dput(dentry); + if (!inode) return -ENOMEM; - } inode->i_fop = files[i].ops; inode->i_ino = ++fsi->last_ino; - d_add(dentry, inode); + + err = sel_attach_file(dir, files[i].name, inode); } - return 0; + return err; } static ssize_t sel_read_initcon(struct file *file, char __user *buf, @@ -1675,30 +1682,25 @@ static const struct file_operations sel_initcon_ops = { static int sel_make_initcon_files(struct dentry *dir) { unsigned int i; + int err = 0; - for (i = 1; i <= SECINITSID_NUM; i++) { - struct inode *inode; - struct dentry *dentry; + for (i = 1; !err && i <= SECINITSID_NUM; i++) { const char *s = security_get_initial_sid_context(i); + struct inode *inode; if (!s) continue; - dentry = d_alloc_name(dir, s); - if (!dentry) - return -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO); - if (!inode) { - dput(dentry); + if (!inode) return -ENOMEM; - } inode->i_fop = &sel_initcon_ops; inode->i_ino = i|SEL_INITCON_INO_OFFSET; - d_add(dentry, inode); + err = sel_attach_file(dir, s, inode); } - return 0; + return err; } static inline unsigned long sel_class_to_ino(u16 class) @@ -1780,29 +1782,21 @@ static int sel_make_perm_files(struct selinux_policy *newpolicy, if (rc) return rc; - for (i = 0; i < nperms; i++) { + for (i = 0; !rc && i < nperms; i++) { struct inode *inode; - struct dentry *dentry; - - rc = -ENOMEM; - dentry = d_alloc_name(dir, perms[i]); - if (!dentry) - goto out; - rc = -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO); if (!inode) { - dput(dentry); - goto out; + rc = -ENOMEM; + break; } inode->i_fop = &sel_perm_ops; /* i+1 since perm values are 1-indexed */ inode->i_ino = sel_perm_to_ino(classvalue, i + 1); - d_add(dentry, inode); + + rc = sel_attach_file(dir, perms[i], inode); } - rc = 0; -out: for (i = 0; i < nperms; i++) kfree(perms[i]); kfree(perms); @@ -1817,20 +1811,18 @@ static int sel_make_class_dir_entries(struct selinux_policy *newpolicy, struct selinux_fs_info *fsi = sb->s_fs_info; struct dentry *dentry = NULL; struct inode *inode = NULL; - - dentry = d_alloc_name(dir, "index"); - if (!dentry) - return -ENOMEM; + int err; inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO); - if (!inode) { - dput(dentry); + if (!inode) return -ENOMEM; - } inode->i_fop = &sel_class_ops; inode->i_ino = sel_class_to_ino(index); - d_add(dentry, inode); + + err = sel_attach_file(dir, "index", inode); + if (err) + return err; dentry = sel_make_dir(dir, "perms", &fsi->last_class_ino); if (IS_ERR(dentry)) @@ -1878,61 +1870,51 @@ out: return rc; } -static int sel_make_policycap(struct selinux_fs_info *fsi) +static int sel_make_policycap(struct dentry *dir) { + struct super_block *sb = dir->d_sb; unsigned int iter; - struct dentry *dentry = NULL; struct inode *inode = NULL; + int err = 0; + + for (iter = 0; !err && iter <= POLICYDB_CAP_MAX; iter++) { + const char *name; - for (iter = 0; iter <= POLICYDB_CAP_MAX; iter++) { if (iter < ARRAY_SIZE(selinux_policycap_names)) - dentry = d_alloc_name(fsi->policycap_dir, - selinux_policycap_names[iter]); + name = selinux_policycap_names[iter]; else - dentry = d_alloc_name(fsi->policycap_dir, "unknown"); + name = "unknown"; - if (dentry == NULL) + inode = sel_make_inode(sb, S_IFREG | 0444); + if (!inode) return -ENOMEM; - inode = sel_make_inode(fsi->sb, S_IFREG | 0444); - if (inode == NULL) { - dput(dentry); - return -ENOMEM; - } - inode->i_fop = &sel_policycap_ops; inode->i_ino = iter | SEL_POLICYCAP_INO_OFFSET; - d_add(dentry, inode); + err = sel_attach_file(dir, name, inode); } - return 0; + return err; } static struct dentry *sel_make_dir(struct dentry *dir, const char *name, unsigned long *ino) { - struct dentry *dentry = d_alloc_name(dir, name); struct inode *inode; - if (!dentry) - return ERR_PTR(-ENOMEM); - inode = sel_make_inode(dir->d_sb, S_IFDIR | S_IRUGO | S_IXUGO); - if (!inode) { - dput(dentry); + if (!inode) return ERR_PTR(-ENOMEM); - } inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; inode->i_ino = ++(*ino); /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); - d_add(dentry, inode); /* bump link count on parent directory, too */ inc_nlink(d_inode(dir)); - return dentry; + return sel_attach(dir, name, inode); } static int reject_all(struct mnt_idmap *idmap, struct inode *inode, int mask) @@ -1965,10 +1947,11 @@ static struct dentry *sel_make_swapover_dir(struct super_block *sb, /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); inode_lock(sb->s_root->d_inode); - d_add(dentry, inode); + d_make_persistent(dentry, inode); inc_nlink(sb->s_root->d_inode); inode_unlock(sb->s_root->d_inode); - return dentry; + dput(dentry); + return dentry; // borrowed } #define NULL_FILE_NAME "null" @@ -2001,7 +1984,7 @@ static int sel_fill_super(struct super_block *sb, struct fs_context *fc) [SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUGO}, [SEL_VALIDATE_TRANS] = {"validatetrans", &sel_transition_ops, S_IWUGO}, - /* last one */ {""} + /* last one */ {"", NULL, 0} }; ret = selinux_fs_info_create(sb); @@ -2021,16 +2004,9 @@ static int sel_fill_super(struct super_block *sb, struct fs_context *fc) } ret = -ENOMEM; - dentry = d_alloc_name(sb->s_root, NULL_FILE_NAME); - if (!dentry) - goto err; - - ret = -ENOMEM; inode = sel_make_inode(sb, S_IFCHR | S_IRUGO | S_IWUGO); - if (!inode) { - dput(dentry); + if (!inode) goto err; - } inode->i_ino = ++fsi->last_ino; isec = selinux_inode(inode); @@ -2039,7 +2015,9 @@ static int sel_fill_super(struct super_block *sb, struct fs_context *fc) isec->initialized = LABEL_INITIALIZED; init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO, MKDEV(MEM_MAJOR, 3)); - d_add(dentry, inode); + ret = sel_attach_file(sb->s_root, NULL_FILE_NAME, inode); + if (ret) + goto err; dentry = sel_make_dir(sb->s_root, "avc", &fsi->last_ino); if (IS_ERR(dentry)) { @@ -2078,15 +2056,13 @@ static int sel_fill_super(struct super_block *sb, struct fs_context *fc) goto err; } - fsi->policycap_dir = sel_make_dir(sb->s_root, POLICYCAP_DIR_NAME, - &fsi->last_ino); - if (IS_ERR(fsi->policycap_dir)) { - ret = PTR_ERR(fsi->policycap_dir); - fsi->policycap_dir = NULL; + dentry = sel_make_dir(sb->s_root, "policy_capabilities", &fsi->last_ino); + if (IS_ERR(dentry)) { + ret = PTR_ERR(dentry); goto err; } - ret = sel_make_policycap(fsi); + ret = sel_make_policycap(dentry); if (ret) { pr_err("SELinux: failed to load policy capabilities\n"); goto err; @@ -2097,8 +2073,6 @@ err: pr_err("SELinux: %s: failed while creating inodes\n", __func__); - selinux_fs_info_free(sb); - return ret; } @@ -2120,7 +2094,7 @@ static int sel_init_fs_context(struct fs_context *fc) static void sel_kill_sb(struct super_block *sb) { selinux_fs_info_free(sb); - kill_litter_super(sb); + kill_anon_super(sb); } static struct file_system_type sel_fs_type = { @@ -2131,7 +2105,7 @@ static struct file_system_type sel_fs_type = { struct path selinux_null __ro_after_init; -static int __init init_sel_fs(void) +int __init init_sel_fs(void) { struct qstr null_name = QSTR_INIT(NULL_FILE_NAME, sizeof(NULL_FILE_NAME)-1); @@ -2158,8 +2132,8 @@ static int __init init_sel_fs(void) return err; } - selinux_null.dentry = d_hash_and_lookup(selinux_null.mnt->mnt_root, - &null_name); + selinux_null.dentry = try_lookup_noperm(&null_name, + selinux_null.mnt->mnt_root); if (IS_ERR(selinux_null.dentry)) { pr_err("selinuxfs: could not lookup null!\n"); err = PTR_ERR(selinux_null.dentry); @@ -2175,5 +2149,3 @@ static int __init init_sel_fs(void) return err; } - -__initcall(init_sel_fs); diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c index 8e400dd736b7..d12ca337e649 100644 --- a/security/selinux/ss/avtab.c +++ b/security/selinux/ss/avtab.c @@ -20,48 +20,15 @@ #include <linux/errno.h> #include "avtab.h" #include "policydb.h" +#include "hash.h" static struct kmem_cache *avtab_node_cachep __ro_after_init; static struct kmem_cache *avtab_xperms_cachep __ro_after_init; -/* Based on MurmurHash3, written by Austin Appleby and placed in the - * public domain. - */ static inline u32 avtab_hash(const struct avtab_key *keyp, u32 mask) { - static const u32 c1 = 0xcc9e2d51; - static const u32 c2 = 0x1b873593; - static const u32 r1 = 15; - static const u32 r2 = 13; - static const u32 m = 5; - static const u32 n = 0xe6546b64; - - u32 hash = 0; - -#define mix(input) \ - do { \ - u32 v = input; \ - v *= c1; \ - v = (v << r1) | (v >> (32 - r1)); \ - v *= c2; \ - hash ^= v; \ - hash = (hash << r2) | (hash >> (32 - r2)); \ - hash = hash * m + n; \ - } while (0) - - mix(keyp->target_class); - mix(keyp->target_type); - mix(keyp->source_type); - -#undef mix - - hash ^= hash >> 16; - hash *= 0x85ebca6b; - hash ^= hash >> 13; - hash *= 0xc2b2ae35; - hash ^= hash >> 16; - - return hash & mask; + return av_hash((u32)keyp->target_class, (u32)keyp->target_type, + (u32)keyp->source_type, mask); } static struct avtab_node *avtab_insert_node(struct avtab *h, @@ -336,10 +303,10 @@ static const uint16_t spec_order[] = { }; /* clang-format on */ -int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol, +int avtab_read_item(struct avtab *a, struct policy_file *fp, struct policydb *pol, int (*insertf)(struct avtab *a, const struct avtab_key *k, const struct avtab_datum *d, void *p), - void *p) + void *p, bool conditional) { __le16 buf16[4]; u16 enabled; @@ -457,6 +424,13 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol, "was specified\n", vers); return -EINVAL; + } else if ((vers < POLICYDB_VERSION_COND_XPERMS) && + (key.specified & AVTAB_XPERMS) && conditional) { + pr_err("SELinux: avtab: policy version %u does not " + "support extended permissions rules in conditional " + "policies and one was specified\n", + vers); + return -EINVAL; } else if (key.specified & AVTAB_XPERMS) { memset(&xperms, 0, sizeof(struct avtab_extended_perms)); rc = next_entry(&xperms.specified, fp, sizeof(u8)); @@ -500,7 +474,7 @@ static int avtab_insertf(struct avtab *a, const struct avtab_key *k, return avtab_insert(a, k, d); } -int avtab_read(struct avtab *a, void *fp, struct policydb *pol) +int avtab_read(struct avtab *a, struct policy_file *fp, struct policydb *pol) { int rc; __le32 buf[1]; @@ -523,7 +497,7 @@ int avtab_read(struct avtab *a, void *fp, struct policydb *pol) goto bad; for (i = 0; i < nel; i++) { - rc = avtab_read_item(a, fp, pol, avtab_insertf, NULL); + rc = avtab_read_item(a, fp, pol, avtab_insertf, NULL, false); if (rc) { if (rc == -ENOMEM) pr_err("SELinux: avtab: out of memory\n"); @@ -543,7 +517,7 @@ bad: goto out; } -int avtab_write_item(struct policydb *p, const struct avtab_node *cur, void *fp) +int avtab_write_item(struct policydb *p, const struct avtab_node *cur, struct policy_file *fp) { __le16 buf16[4]; __le32 buf32[ARRAY_SIZE(cur->datum.u.xperms->perms.p)]; @@ -579,7 +553,7 @@ int avtab_write_item(struct policydb *p, const struct avtab_node *cur, void *fp) return 0; } -int avtab_write(struct policydb *p, struct avtab *a, void *fp) +int avtab_write(struct policydb *p, struct avtab *a, struct policy_file *fp) { u32 i; int rc = 0; diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h index f4407185401c..850b3453f259 100644 --- a/security/selinux/ss/avtab.h +++ b/security/selinux/ss/avtab.h @@ -89,7 +89,7 @@ struct avtab { }; void avtab_init(struct avtab *h); -int avtab_alloc(struct avtab *, u32); +int avtab_alloc(struct avtab *h, u32 nrules); int avtab_alloc_dup(struct avtab *new, const struct avtab *orig); void avtab_destroy(struct avtab *h); @@ -105,15 +105,16 @@ static inline void avtab_hash_eval(struct avtab *h, const char *tag) #endif struct policydb; -int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol, +struct policy_file; +int avtab_read_item(struct avtab *a, struct policy_file *fp, struct policydb *pol, int (*insert)(struct avtab *a, const struct avtab_key *k, const struct avtab_datum *d, void *p), - void *p); + void *p, bool conditional); -int avtab_read(struct avtab *a, void *fp, struct policydb *pol); +int avtab_read(struct avtab *a, struct policy_file *fp, struct policydb *pol); int avtab_write_item(struct policydb *p, const struct avtab_node *cur, - void *fp); -int avtab_write(struct policydb *p, struct avtab *a, void *fp); + struct policy_file *fp); +int avtab_write(struct policydb *p, struct avtab *a, struct policy_file *fp); struct avtab_node *avtab_insert_nonunique(struct avtab *h, const struct avtab_key *key, diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c index 64ba95e40a6f..1bebfcb9c6a1 100644 --- a/security/selinux/ss/conditional.c +++ b/security/selinux/ss/conditional.c @@ -206,7 +206,7 @@ static int bool_isvalid(struct cond_bool_datum *b) return 1; } -int cond_read_bool(struct policydb *p, struct symtab *s, void *fp) +int cond_read_bool(struct policydb *p, struct symtab *s, struct policy_file *fp) { char *key = NULL; struct cond_bool_datum *booldatum; @@ -230,17 +230,11 @@ int cond_read_bool(struct policydb *p, struct symtab *s, void *fp) goto err; len = le32_to_cpu(buf[2]); - if (((len == 0) || (len == (u32)-1))) - goto err; - rc = -ENOMEM; - key = kmalloc(len + 1, GFP_KERNEL); - if (!key) - goto err; - rc = next_entry(key, fp, len); + rc = str_read(&key, GFP_KERNEL, fp, len); if (rc) goto err; - key[len] = '\0'; + rc = symtab_insert(s, key, booldatum); if (rc) goto err; @@ -323,7 +317,7 @@ static int cond_insertf(struct avtab *a, const struct avtab_key *k, return 0; } -static int cond_read_av_list(struct policydb *p, void *fp, +static int cond_read_av_list(struct policydb *p, struct policy_file *fp, struct cond_av_list *list, struct cond_av_list *other) { @@ -349,7 +343,7 @@ static int cond_read_av_list(struct policydb *p, void *fp, for (i = 0; i < len; i++) { data.dst = &list->nodes[i]; rc = avtab_read_item(&p->te_cond_avtab, fp, p, cond_insertf, - &data); + &data, true); if (rc) { kfree(list->nodes); list->nodes = NULL; @@ -375,7 +369,7 @@ static int expr_node_isvalid(struct policydb *p, struct cond_expr_node *expr) return 1; } -static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp) +static int cond_read_node(struct policydb *p, struct cond_node *node, struct policy_file *fp) { __le32 buf[2]; u32 i, len; @@ -415,7 +409,7 @@ static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp) return cond_read_av_list(p, fp, &node->false_list, &node->true_list); } -int cond_read_list(struct policydb *p, void *fp) +int cond_read_list(struct policydb *p, struct policy_file *fp) { __le32 buf[1]; u32 i, len; @@ -453,7 +447,7 @@ int cond_write_bool(void *vkey, void *datum, void *ptr) char *key = vkey; struct cond_bool_datum *booldatum = datum; struct policy_data *pd = ptr; - void *fp = pd->fp; + struct policy_file *fp = pd->fp; __le32 buf[3]; u32 len; int rc; @@ -536,7 +530,7 @@ static int cond_write_node(struct policydb *p, struct cond_node *node, return 0; } -int cond_write_list(struct policydb *p, void *fp) +int cond_write_list(struct policydb *p, struct policy_file *fp) { u32 i; __le32 buf[1]; diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h index 8827715bad75..468e98ad3ea1 100644 --- a/security/selinux/ss/conditional.h +++ b/security/selinux/ss/conditional.h @@ -68,10 +68,10 @@ int cond_destroy_bool(void *key, void *datum, void *p); int cond_index_bool(void *key, void *datum, void *datap); -int cond_read_bool(struct policydb *p, struct symtab *s, void *fp); -int cond_read_list(struct policydb *p, void *fp); +int cond_read_bool(struct policydb *p, struct symtab *s, struct policy_file *fp); +int cond_read_list(struct policydb *p, struct policy_file *fp); int cond_write_bool(void *key, void *datum, void *ptr); -int cond_write_list(struct policydb *p, void *fp); +int cond_write_list(struct policydb *p, struct policy_file *fp); void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd, struct extended_perms *xperms); diff --git a/security/selinux/ss/context.c b/security/selinux/ss/context.c index e39990f494dd..a528b7f76280 100644 --- a/security/selinux/ss/context.c +++ b/security/selinux/ss/context.c @@ -20,7 +20,7 @@ u32 context_compute_hash(const struct context *c) * context struct with only the len & str set (and vice versa) * under a given policy. Since context structs from different * policies should never meet, it is safe to hash valid and - * invalid contexts differently. The context_cmp() function + * invalid contexts differently. The context_equal() function * already operates under the same assumption. */ if (c->len) diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h index 7ccab2e6965f..dd3b9b5b588e 100644 --- a/security/selinux/ss/context.h +++ b/security/selinux/ss/context.h @@ -132,13 +132,13 @@ out: return rc; } -static inline int mls_context_cmp(const struct context *c1, - const struct context *c2) +static inline bool mls_context_equal(const struct context *c1, + const struct context *c2) { return ((c1->range.level[0].sens == c2->range.level[0].sens) && - ebitmap_cmp(&c1->range.level[0].cat, &c2->range.level[0].cat) && + ebitmap_equal(&c1->range.level[0].cat, &c2->range.level[0].cat) && (c1->range.level[1].sens == c2->range.level[1].sens) && - ebitmap_cmp(&c1->range.level[1].cat, &c2->range.level[1].cat)); + ebitmap_equal(&c1->range.level[1].cat, &c2->range.level[1].cat)); } static inline void mls_context_destroy(struct context *c) @@ -188,15 +188,15 @@ static inline void context_destroy(struct context *c) mls_context_destroy(c); } -static inline int context_cmp(const struct context *c1, - const struct context *c2) +static inline bool context_equal(const struct context *c1, + const struct context *c2) { if (c1->len && c2->len) return (c1->len == c2->len && !strcmp(c1->str, c2->str)); if (c1->len || c2->len) return 0; return ((c1->user == c2->user) && (c1->role == c2->role) && - (c1->type == c2->type) && mls_context_cmp(c1, c2)); + (c1->type == c2->type) && mls_context_equal(c1, c2)); } u32 context_compute_hash(const struct context *c); diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c index 99c01be15115..43bc19e21960 100644 --- a/security/selinux/ss/ebitmap.c +++ b/security/selinux/ss/ebitmap.c @@ -25,12 +25,12 @@ static struct kmem_cache *ebitmap_node_cachep __ro_after_init; -int ebitmap_cmp(const struct ebitmap *e1, const struct ebitmap *e2) +bool ebitmap_equal(const struct ebitmap *e1, const struct ebitmap *e2) { const struct ebitmap_node *n1, *n2; if (e1->highbit != e2->highbit) - return 0; + return false; n1 = e1->node; n2 = e2->node; @@ -41,9 +41,9 @@ int ebitmap_cmp(const struct ebitmap *e1, const struct ebitmap *e2) } if (n1 || n2) - return 0; + return false; - return 1; + return true; } int ebitmap_cpy(struct ebitmap *dst, const struct ebitmap *src) @@ -360,7 +360,7 @@ void ebitmap_destroy(struct ebitmap *e) e->node = NULL; } -int ebitmap_read(struct ebitmap *e, void *fp) +int ebitmap_read(struct ebitmap *e, struct policy_file *fp) { struct ebitmap_node *n = NULL; u32 mapunit, count, startbit, index, i; @@ -478,7 +478,7 @@ bad: goto out; } -int ebitmap_write(const struct ebitmap *e, void *fp) +int ebitmap_write(const struct ebitmap *e, struct policy_file *fp) { struct ebitmap_node *n; u32 bit, count, last_bit, last_startbit; diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h index ba2ac3da1153..c9569998f287 100644 --- a/security/selinux/ss/ebitmap.h +++ b/security/selinux/ss/ebitmap.h @@ -120,7 +120,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n, u32 bit) (bit) < ebitmap_length(e); \ (bit) = ebitmap_next_positive(e, &(n), bit)) -int ebitmap_cmp(const struct ebitmap *e1, const struct ebitmap *e2); +bool ebitmap_equal(const struct ebitmap *e1, const struct ebitmap *e2); int ebitmap_cpy(struct ebitmap *dst, const struct ebitmap *src); int ebitmap_and(struct ebitmap *dst, const struct ebitmap *e1, const struct ebitmap *e2); @@ -129,8 +129,9 @@ int ebitmap_contains(const struct ebitmap *e1, const struct ebitmap *e2, int ebitmap_get_bit(const struct ebitmap *e, u32 bit); int ebitmap_set_bit(struct ebitmap *e, u32 bit, int value); void ebitmap_destroy(struct ebitmap *e); -int ebitmap_read(struct ebitmap *e, void *fp); -int ebitmap_write(const struct ebitmap *e, void *fp); +struct policy_file; +int ebitmap_read(struct ebitmap *e, struct policy_file *fp); +int ebitmap_write(const struct ebitmap *e, struct policy_file *fp); u32 ebitmap_hash(const struct ebitmap *e, u32 hash); #ifdef CONFIG_NETLABEL diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c index 383fd2d70878..1382eb3bfde1 100644 --- a/security/selinux/ss/hashtab.c +++ b/security/selinux/ss/hashtab.c @@ -40,7 +40,8 @@ int hashtab_init(struct hashtab *h, u32 nel_hint) h->htable = NULL; if (size) { - h->htable = kcalloc(size, sizeof(*h->htable), GFP_KERNEL); + h->htable = kcalloc(size, sizeof(*h->htable), + GFP_KERNEL | __GFP_NOWARN); if (!h->htable) return -ENOMEM; h->size = size; diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c index 989c809d310d..a6e49269f535 100644 --- a/security/selinux/ss/mls.c +++ b/security/selinux/ss/mls.c @@ -171,7 +171,7 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l) * levdatum->level->cat and no bit in l->cat is larger than * p->p_cats.nprim. */ - return ebitmap_contains(&levdatum->level->cat, &l->cat, + return ebitmap_contains(&levdatum->level.cat, &l->cat, p->p_cats.nprim); } @@ -289,7 +289,7 @@ int mls_context_to_sid(struct policydb *pol, char oldc, char *scontext, levdatum = symtab_search(&pol->p_levels, sensitivity); if (!levdatum) return -EINVAL; - context->range.level[l].sens = levdatum->level->sens; + context->range.level[l].sens = levdatum->level.sens; /* Extract category set. */ while (next_cat != NULL) { @@ -456,7 +456,7 @@ int mls_convert_context(struct policydb *oldp, struct policydb *newp, if (!levdatum) return -EINVAL; - newc->range.level[l].sens = levdatum->level->sens; + newc->range.level[l].sens = levdatum->level.sens; ebitmap_for_each_positive_bit(&oldc->range.level[l].cat, node, i) diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h index 7ef6e8cb0cf4..51df2ebd1211 100644 --- a/security/selinux/ss/mls_types.h +++ b/security/selinux/ss/mls_types.h @@ -29,7 +29,7 @@ struct mls_range { static inline int mls_level_eq(const struct mls_level *l1, const struct mls_level *l2) { - return ((l1->sens == l2->sens) && ebitmap_cmp(&l1->cat, &l2->cat)); + return ((l1->sens == l2->sens) && ebitmap_equal(&l1->cat, &l2->cat)); } static inline int mls_level_dom(const struct mls_level *l1, diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 383f3ae82a73..91df3db6a88c 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -155,6 +155,16 @@ static const struct policydb_compat_info policydb_compat[] = { .sym_num = SYM_NUM, .ocon_num = OCON_NUM, }, + { + .version = POLICYDB_VERSION_COND_XPERMS, + .sym_num = SYM_NUM, + .ocon_num = OCON_NUM, + }, + { + .version = POLICYDB_VERSION_NEVERAUDIT, + .sym_num = SYM_NUM, + .ocon_num = OCON_NUM, + }, }; static const struct policydb_compat_info * @@ -296,9 +306,7 @@ static int sens_destroy(void *key, void *datum, void *p) kfree(key); if (datum) { levdatum = datum; - if (levdatum->level) - ebitmap_destroy(&levdatum->level->cat); - kfree(levdatum->level); + ebitmap_destroy(&levdatum->level.cat); } kfree(datum); return 0; @@ -528,6 +536,7 @@ static void policydb_init(struct policydb *p) ebitmap_init(&p->filename_trans_ttypes); ebitmap_init(&p->policycaps); ebitmap_init(&p->permissive_map); + ebitmap_init(&p->neveraudit_map); } /* @@ -630,11 +639,11 @@ static int sens_index(void *key, void *datum, void *datap) p = datap; if (!levdatum->isalias) { - if (!levdatum->level->sens || - levdatum->level->sens > p->p_levels.nprim) + if (!levdatum->level.sens || + levdatum->level.sens > p->p_levels.nprim) return -EINVAL; - p->sym_val_to_name[SYM_LEVELS][levdatum->level->sens - 1] = key; + p->sym_val_to_name[SYM_LEVELS][levdatum->level.sens - 1] = key; } return 0; @@ -849,6 +858,7 @@ void policydb_destroy(struct policydb *p) ebitmap_destroy(&p->filename_trans_ttypes); ebitmap_destroy(&p->policycaps); ebitmap_destroy(&p->permissive_map); + ebitmap_destroy(&p->neveraudit_map); } /* @@ -992,7 +1002,7 @@ int policydb_context_isvalid(struct policydb *p, struct context *c) * Read a MLS range structure from a policydb binary * representation file. */ -static int mls_read_range_helper(struct mls_range *r, void *fp) +static int mls_read_range_helper(struct mls_range *r, struct policy_file *fp) { __le32 buf[2]; u32 items; @@ -1052,7 +1062,7 @@ out: * from a policydb binary representation file. */ static int context_read_and_validate(struct context *c, struct policydb *p, - void *fp) + struct policy_file *fp) { __le32 buf[3]; int rc; @@ -1090,7 +1100,7 @@ out: * binary representation file. */ -static int str_read(char **strp, gfp_t flags, void *fp, u32 len) +int str_read(char **strp, gfp_t flags, struct policy_file *fp, u32 len) { int rc; char *str; @@ -1113,7 +1123,7 @@ static int str_read(char **strp, gfp_t flags, void *fp, u32 len) return 0; } -static int perm_read(struct policydb *p, struct symtab *s, void *fp) +static int perm_read(struct policydb *p, struct symtab *s, struct policy_file *fp) { char *key = NULL; struct perm_datum *perdatum; @@ -1146,7 +1156,7 @@ bad: return rc; } -static int common_read(struct policydb *p, struct symtab *s, void *fp) +static int common_read(struct policydb *p, struct symtab *s, struct policy_file *fp) { char *key = NULL; struct common_datum *comdatum; @@ -1198,7 +1208,7 @@ static void type_set_init(struct type_set *t) ebitmap_init(&t->negset); } -static int type_set_read(struct type_set *t, void *fp) +static int type_set_read(struct type_set *t, struct policy_file *fp) { __le32 buf[1]; int rc; @@ -1217,7 +1227,7 @@ static int type_set_read(struct type_set *t, void *fp) } static int read_cons_helper(struct policydb *p, struct constraint_node **nodep, - u32 ncons, int allowxtarget, void *fp) + u32 ncons, int allowxtarget, struct policy_file *fp) { struct constraint_node *c, *lc; struct constraint_expr *e, *le; @@ -1311,7 +1321,7 @@ static int read_cons_helper(struct policydb *p, struct constraint_node **nodep, return 0; } -static int class_read(struct policydb *p, struct symtab *s, void *fp) +static int class_read(struct policydb *p, struct symtab *s, struct policy_file *fp) { char *key = NULL; struct class_datum *cladatum; @@ -1408,7 +1418,7 @@ bad: return rc; } -static int role_read(struct policydb *p, struct symtab *s, void *fp) +static int role_read(struct policydb *p, struct symtab *s, struct policy_file *fp) { char *key = NULL; struct role_datum *role; @@ -1465,7 +1475,7 @@ bad: return rc; } -static int type_read(struct policydb *p, struct symtab *s, void *fp) +static int type_read(struct policydb *p, struct symtab *s, struct policy_file *fp) { char *key = NULL; struct type_datum *typdatum; @@ -1517,7 +1527,7 @@ bad: * Read a MLS level structure from a policydb binary * representation file. */ -static int mls_read_level(struct mls_level *lp, void *fp) +static int mls_read_level(struct mls_level *lp, struct policy_file *fp) { __le32 buf[1]; int rc; @@ -1539,7 +1549,7 @@ static int mls_read_level(struct mls_level *lp, void *fp) return 0; } -static int user_read(struct policydb *p, struct symtab *s, void *fp) +static int user_read(struct policydb *p, struct symtab *s, struct policy_file *fp) { char *key = NULL; struct user_datum *usrdatum; @@ -1590,7 +1600,7 @@ bad: return rc; } -static int sens_read(struct policydb *p, struct symtab *s, void *fp) +static int sens_read(struct policydb *p, struct symtab *s, struct policy_file *fp) { char *key = NULL; struct level_datum *levdatum; @@ -1613,12 +1623,7 @@ static int sens_read(struct policydb *p, struct symtab *s, void *fp) if (rc) goto bad; - rc = -ENOMEM; - levdatum->level = kmalloc(sizeof(*levdatum->level), GFP_KERNEL); - if (!levdatum->level) - goto bad; - - rc = mls_read_level(levdatum->level, fp); + rc = mls_read_level(&levdatum->level, fp); if (rc) goto bad; @@ -1631,7 +1636,7 @@ bad: return rc; } -static int cat_read(struct policydb *p, struct symtab *s, void *fp) +static int cat_read(struct policydb *p, struct symtab *s, struct policy_file *fp) { char *key = NULL; struct cat_datum *catdatum; @@ -1666,7 +1671,7 @@ bad: /* clang-format off */ static int (*const read_f[SYM_NUM])(struct policydb *p, struct symtab *s, - void *fp) = { + struct policy_file *fp) = { common_read, class_read, role_read, @@ -1836,7 +1841,7 @@ u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name) return 1U << (perdatum->value - 1); } -static int range_read(struct policydb *p, void *fp) +static int range_read(struct policydb *p, struct policy_file *fp) { struct range_trans *rt = NULL; struct mls_range *r = NULL; @@ -1913,7 +1918,7 @@ out: return rc; } -static int filename_trans_read_helper_compat(struct policydb *p, void *fp) +static int filename_trans_read_helper_compat(struct policydb *p, struct policy_file *fp) { struct filename_trans_key key, *ft = NULL; struct filename_trans_datum *last, *datum = NULL; @@ -1998,7 +2003,7 @@ out: return rc; } -static int filename_trans_read_helper(struct policydb *p, void *fp) +static int filename_trans_read_helper(struct policydb *p, struct policy_file *fp) { struct filename_trans_key *ft = NULL; struct filename_trans_datum **dst, *datum, *first = NULL; @@ -2087,7 +2092,7 @@ out: return rc; } -static int filename_trans_read(struct policydb *p, void *fp) +static int filename_trans_read(struct policydb *p, struct policy_file *fp) { u32 nel, i; __le32 buf[1]; @@ -2128,7 +2133,7 @@ static int filename_trans_read(struct policydb *p, void *fp) return 0; } -static int genfs_read(struct policydb *p, void *fp) +static int genfs_read(struct policydb *p, struct policy_file *fp) { int rc; u32 i, j, nel, nel2, len, len2; @@ -2242,7 +2247,7 @@ out: } static int ocontext_read(struct policydb *p, - const struct policydb_compat_info *info, void *fp) + const struct policydb_compat_info *info, struct policy_file *fp) { int rc; unsigned int i; @@ -2439,7 +2444,7 @@ out: * Read the configuration data from a policy database binary * representation file into a policy database structure. */ -int policydb_read(struct policydb *p, void *fp) +int policydb_read(struct policydb *p, struct policy_file *fp) { struct role_allow *ra, *lra; struct role_trans_key *rtk = NULL; @@ -2475,24 +2480,18 @@ int policydb_read(struct policydb *p, void *fp) goto bad; } - rc = -ENOMEM; - policydb_str = kmalloc(len + 1, GFP_KERNEL); - if (!policydb_str) { - pr_err("SELinux: unable to allocate memory for policydb " - "string of length %d\n", - len); - goto bad; - } - - rc = next_entry(policydb_str, fp, len); + rc = str_read(&policydb_str, GFP_KERNEL, fp, len); if (rc) { - pr_err("SELinux: truncated policydb string identifier\n"); - kfree(policydb_str); + if (rc == -ENOMEM) { + pr_err("SELinux: unable to allocate memory for policydb string of length %d\n", + len); + } else { + pr_err("SELinux: truncated policydb string identifier\n"); + } goto bad; } rc = -EINVAL; - policydb_str[len] = '\0'; if (strcmp(policydb_str, POLICYDB_STRING)) { pr_err("SELinux: policydb string %s does not match " "my string %s\n", @@ -2546,6 +2545,12 @@ int policydb_read(struct policydb *p, void *fp) goto bad; } + if (p->policyvers >= POLICYDB_VERSION_NEVERAUDIT) { + rc = ebitmap_read(&p->neveraudit_map, fp); + if (rc) + goto bad; + } + rc = -EINVAL; info = policydb_lookup_compat(p->policyvers); if (!info) { @@ -2762,7 +2767,7 @@ bad: * Write a MLS level structure to a policydb binary * representation file. */ -static int mls_write_level(struct mls_level *l, void *fp) +static int mls_write_level(struct mls_level *l, struct policy_file *fp) { __le32 buf[1]; int rc; @@ -2783,7 +2788,7 @@ static int mls_write_level(struct mls_level *l, void *fp) * Write a MLS range structure to a policydb binary * representation file. */ -static int mls_write_range_helper(struct mls_range *r, void *fp) +static int mls_write_range_helper(struct mls_range *r, struct policy_file *fp) { __le32 buf[3]; size_t items; @@ -2823,7 +2828,7 @@ static int sens_write(void *vkey, void *datum, void *ptr) char *key = vkey; struct level_datum *levdatum = datum; struct policy_data *pd = ptr; - void *fp = pd->fp; + struct policy_file *fp = pd->fp; __le32 buf[2]; size_t len; int rc; @@ -2839,7 +2844,7 @@ static int sens_write(void *vkey, void *datum, void *ptr) if (rc) return rc; - rc = mls_write_level(levdatum->level, fp); + rc = mls_write_level(&levdatum->level, fp); if (rc) return rc; @@ -2851,7 +2856,7 @@ static int cat_write(void *vkey, void *datum, void *ptr) char *key = vkey; struct cat_datum *catdatum = datum; struct policy_data *pd = ptr; - void *fp = pd->fp; + struct policy_file *fp = pd->fp; __le32 buf[3]; size_t len; int rc; @@ -2876,7 +2881,7 @@ static int role_trans_write_one(void *key, void *datum, void *ptr) struct role_trans_key *rtk = key; struct role_trans_datum *rtd = datum; struct policy_data *pd = ptr; - void *fp = pd->fp; + struct policy_file *fp = pd->fp; struct policydb *p = pd->p; __le32 buf[3]; int rc; @@ -2896,7 +2901,7 @@ static int role_trans_write_one(void *key, void *datum, void *ptr) return 0; } -static int role_trans_write(struct policydb *p, void *fp) +static int role_trans_write(struct policydb *p, struct policy_file *fp) { struct policy_data pd = { .p = p, .fp = fp }; __le32 buf[1]; @@ -2910,7 +2915,7 @@ static int role_trans_write(struct policydb *p, void *fp) return hashtab_map(&p->role_tr, role_trans_write_one, &pd); } -static int role_allow_write(struct role_allow *r, void *fp) +static int role_allow_write(struct role_allow *r, struct policy_file *fp) { struct role_allow *ra; __le32 buf[2]; @@ -2938,7 +2943,7 @@ static int role_allow_write(struct role_allow *r, void *fp) * Write a security context structure * to a policydb binary representation file. */ -static int context_write(struct policydb *p, struct context *c, void *fp) +static int context_write(struct policydb *p, struct context *c, struct policy_file *fp) { int rc; __le32 buf[3]; @@ -2991,7 +2996,7 @@ static int common_write(void *vkey, void *datum, void *ptr) char *key = vkey; struct common_datum *comdatum = datum; struct policy_data *pd = ptr; - void *fp = pd->fp; + struct policy_file *fp = pd->fp; __le32 buf[4]; size_t len; int rc; @@ -3016,7 +3021,7 @@ static int common_write(void *vkey, void *datum, void *ptr) return 0; } -static int type_set_write(struct type_set *t, void *fp) +static int type_set_write(struct type_set *t, struct policy_file *fp) { int rc; __le32 buf[1]; @@ -3035,7 +3040,7 @@ static int type_set_write(struct type_set *t, void *fp) } static int write_cons_helper(struct policydb *p, struct constraint_node *node, - void *fp) + struct policy_file *fp) { struct constraint_node *c; struct constraint_expr *e; @@ -3086,7 +3091,7 @@ static int class_write(void *vkey, void *datum, void *ptr) char *key = vkey; struct class_datum *cladatum = datum; struct policy_data *pd = ptr; - void *fp = pd->fp; + struct policy_file *fp = pd->fp; struct policydb *p = pd->p; struct constraint_node *c; __le32 buf[6]; @@ -3171,7 +3176,7 @@ static int role_write(void *vkey, void *datum, void *ptr) char *key = vkey; struct role_datum *role = datum; struct policy_data *pd = ptr; - void *fp = pd->fp; + struct policy_file *fp = pd->fp; struct policydb *p = pd->p; __le32 buf[3]; size_t items, len; @@ -3211,7 +3216,7 @@ static int type_write(void *vkey, void *datum, void *ptr) struct type_datum *typdatum = datum; struct policy_data *pd = ptr; struct policydb *p = pd->p; - void *fp = pd->fp; + struct policy_file *fp = pd->fp; __le32 buf[4]; int rc; size_t items, len; @@ -3252,7 +3257,7 @@ static int user_write(void *vkey, void *datum, void *ptr) struct user_datum *usrdatum = datum; struct policy_data *pd = ptr; struct policydb *p = pd->p; - void *fp = pd->fp; + struct policy_file *fp = pd->fp; __le32 buf[3]; size_t items, len; int rc; @@ -3301,7 +3306,8 @@ static int (*const write_f[SYM_NUM])(void *key, void *datum, void *datap) = { /* clang-format on */ static int ocontext_write(struct policydb *p, - const struct policydb_compat_info *info, void *fp) + const struct policydb_compat_info *info, + struct policy_file *fp) { unsigned int i, j; int rc; @@ -3437,7 +3443,7 @@ static int ocontext_write(struct policydb *p, return 0; } -static int genfs_write(struct policydb *p, void *fp) +static int genfs_write(struct policydb *p, struct policy_file *fp) { struct genfs *genfs; struct ocontext *c; @@ -3495,7 +3501,7 @@ static int range_write_helper(void *key, void *data, void *ptr) struct range_trans *rt = key; struct mls_range *r = data; struct policy_data *pd = ptr; - void *fp = pd->fp; + struct policy_file *fp = pd->fp; struct policydb *p = pd->p; int rc; @@ -3517,7 +3523,7 @@ static int range_write_helper(void *key, void *data, void *ptr) return 0; } -static int range_write(struct policydb *p, void *fp) +static int range_write(struct policydb *p, struct policy_file *fp) { __le32 buf[1]; int rc; @@ -3544,7 +3550,7 @@ static int filename_write_helper_compat(void *key, void *data, void *ptr) struct filename_trans_key *ft = key; struct filename_trans_datum *datum = data; struct ebitmap_node *node; - void *fp = ptr; + struct policy_file *fp = ptr; __le32 buf[4]; int rc; u32 bit, len = strlen(ft->name); @@ -3581,7 +3587,7 @@ static int filename_write_helper(void *key, void *data, void *ptr) { struct filename_trans_key *ft = key; struct filename_trans_datum *datum; - void *fp = ptr; + struct policy_file *fp = ptr; __le32 buf[3]; int rc; u32 ndatum, len = strlen(ft->name); @@ -3626,7 +3632,7 @@ static int filename_write_helper(void *key, void *data, void *ptr) return 0; } -static int filename_trans_write(struct policydb *p, void *fp) +static int filename_trans_write(struct policydb *p, struct policy_file *fp) { __le32 buf[1]; int rc; @@ -3658,7 +3664,7 @@ static int filename_trans_write(struct policydb *p, void *fp) * structure to a policy database binary representation * file. */ -int policydb_write(struct policydb *p, void *fp) +int policydb_write(struct policydb *p, struct policy_file *fp) { unsigned int num_syms; int rc; @@ -3730,6 +3736,12 @@ int policydb_write(struct policydb *p, void *fp) return rc; } + if (p->policyvers >= POLICYDB_VERSION_NEVERAUDIT) { + rc = ebitmap_write(&p->neveraudit_map, fp); + if (rc) + return rc; + } + num_syms = info->sym_num; for (i = 0; i < num_syms; i++) { struct policy_data pd; diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h index 4bba386264a3..89a180b1742f 100644 --- a/security/selinux/ss/policydb.h +++ b/security/selinux/ss/policydb.h @@ -126,7 +126,7 @@ struct user_datum { /* Sensitivity attributes */ struct level_datum { - struct mls_level *level; /* sensitivity and associated categories */ + struct mls_level level; /* sensitivity and associated categories */ unsigned char isalias; /* is this sensitivity an alias for another? */ }; @@ -144,7 +144,7 @@ struct range_trans { /* Boolean data type */ struct cond_bool_datum { - __u32 value; /* internal type value */ + u32 value; /* internal type value */ int state; }; @@ -300,6 +300,8 @@ struct policydb { struct ebitmap permissive_map; + struct ebitmap neveraudit_map; + /* length of this policy when it was loaded */ size_t len; @@ -312,14 +314,19 @@ struct policydb { u32 process_trans_perms; } __randomize_layout; +struct policy_file { + char *data; + size_t len; +}; + extern void policydb_destroy(struct policydb *p); extern int policydb_load_isids(struct policydb *p, struct sidtab *s); extern int policydb_context_isvalid(struct policydb *p, struct context *c); extern int policydb_class_isvalid(struct policydb *p, unsigned int class); extern int policydb_type_isvalid(struct policydb *p, unsigned int type); extern int policydb_role_isvalid(struct policydb *p, unsigned int role); -extern int policydb_read(struct policydb *p, void *fp); -extern int policydb_write(struct policydb *p, void *fp); +extern int policydb_read(struct policydb *p, struct policy_file *fp); +extern int policydb_write(struct policydb *p, struct policy_file *fp); extern struct filename_trans_datum * policydb_filenametr_search(struct policydb *p, struct filename_trans_key *key); @@ -342,14 +349,9 @@ policydb_roletr_search(struct policydb *p, struct role_trans_key *key); #define POLICYDB_MAGIC SELINUX_MAGIC #define POLICYDB_STRING "SE Linux" -struct policy_file { - char *data; - size_t len; -}; - struct policy_data { struct policydb *p; - void *fp; + struct policy_file *fp; }; static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes) @@ -386,6 +388,8 @@ static inline char *sym_name(struct policydb *p, unsigned int sym_num, return p->sym_val_to_name[sym_num][element_nr]; } +extern int str_read(char **strp, gfp_t flags, struct policy_file *fp, u32 len); + extern u16 string_to_security_class(struct policydb *p, const char *name); extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name); diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 971c45d576ba..13fc712d5923 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -46,6 +46,7 @@ #include <linux/in.h> #include <linux/sched.h> #include <linux/audit.h> +#include <linux/parser.h> #include <linux/vmalloc.h> #include <linux/lsm_hooks.h> #include <net/netlabel.h> @@ -582,7 +583,7 @@ static void type_attribute_bounds_av(struct policydb *policydb, } /* - * Flag which drivers have permissions. + * Flag which drivers have permissions and which base permissions are covered. */ void services_compute_xperms_drivers( struct extended_perms *xperms, @@ -592,12 +593,19 @@ void services_compute_xperms_drivers( switch (node->datum.u.xperms->specified) { case AVTAB_XPERMS_IOCTLDRIVER: + xperms->base_perms |= AVC_EXT_IOCTL; /* if one or more driver has all permissions allowed */ for (i = 0; i < ARRAY_SIZE(xperms->drivers.p); i++) xperms->drivers.p[i] |= node->datum.u.xperms->perms.p[i]; break; case AVTAB_XPERMS_IOCTLFUNCTION: + xperms->base_perms |= AVC_EXT_IOCTL; + /* if allowing permissions within a driver */ + security_xperm_set(xperms->drivers.p, + node->datum.u.xperms->driver); + break; case AVTAB_XPERMS_NLMSG: + xperms->base_perms |= AVC_EXT_NLMSG; /* if allowing permissions within a driver */ security_xperm_set(xperms->drivers.p, node->datum.u.xperms->driver); @@ -631,8 +639,7 @@ static void context_struct_compute_av(struct policydb *policydb, avd->auditallow = 0; avd->auditdeny = 0xffffffff; if (xperms) { - memset(&xperms->drivers, 0, sizeof(xperms->drivers)); - xperms->len = 0; + memset(xperms, 0, sizeof(*xperms)); } if (unlikely(!tclass || tclass > policydb->p_classes.nprim)) { @@ -946,7 +953,7 @@ static void avd_init(struct selinux_policy *policy, struct av_decision *avd) } static void update_xperms_extended_data(u8 specified, - struct extended_perms_data *from, + const struct extended_perms_data *from, struct extended_perms_data *xp_data) { unsigned int i; @@ -967,38 +974,52 @@ static void update_xperms_extended_data(u8 specified, void services_compute_xperms_decision(struct extended_perms_decision *xpermd, struct avtab_node *node) { + u16 specified; + switch (node->datum.u.xperms->specified) { case AVTAB_XPERMS_IOCTLFUNCTION: - case AVTAB_XPERMS_NLMSG: - if (xpermd->driver != node->datum.u.xperms->driver) + if (xpermd->base_perm != AVC_EXT_IOCTL || + xpermd->driver != node->datum.u.xperms->driver) return; break; case AVTAB_XPERMS_IOCTLDRIVER: - if (!security_xperm_test(node->datum.u.xperms->perms.p, - xpermd->driver)) + if (xpermd->base_perm != AVC_EXT_IOCTL || + !security_xperm_test(node->datum.u.xperms->perms.p, + xpermd->driver)) + return; + break; + case AVTAB_XPERMS_NLMSG: + if (xpermd->base_perm != AVC_EXT_NLMSG || + xpermd->driver != node->datum.u.xperms->driver) return; break; default: - BUG(); + pr_warn_once( + "SELinux: unknown extended permission (%u) will be ignored\n", + node->datum.u.xperms->specified); + return; } - if (node->key.specified == AVTAB_XPERMS_ALLOWED) { + specified = node->key.specified & ~(AVTAB_ENABLED | AVTAB_ENABLED_OLD); + + if (specified == AVTAB_XPERMS_ALLOWED) { xpermd->used |= XPERMS_ALLOWED; update_xperms_extended_data(node->datum.u.xperms->specified, &node->datum.u.xperms->perms, xpermd->allowed); - } else if (node->key.specified == AVTAB_XPERMS_AUDITALLOW) { + } else if (specified == AVTAB_XPERMS_AUDITALLOW) { xpermd->used |= XPERMS_AUDITALLOW; update_xperms_extended_data(node->datum.u.xperms->specified, &node->datum.u.xperms->perms, xpermd->auditallow); - } else if (node->key.specified == AVTAB_XPERMS_DONTAUDIT) { + } else if (specified == AVTAB_XPERMS_DONTAUDIT) { xpermd->used |= XPERMS_DONTAUDIT; update_xperms_extended_data(node->datum.u.xperms->specified, &node->datum.u.xperms->perms, xpermd->dontaudit); } else { - BUG(); + pr_warn_once("SELinux: unknown specified key (%u)\n", + node->key.specified); } } @@ -1006,6 +1027,7 @@ void security_compute_xperms_decision(u32 ssid, u32 tsid, u16 orig_tclass, u8 driver, + u8 base_perm, struct extended_perms_decision *xpermd) { struct selinux_policy *policy; @@ -1019,6 +1041,7 @@ void security_compute_xperms_decision(u32 ssid, struct ebitmap_node *snode, *tnode; unsigned int i, j; + xpermd->base_perm = base_perm; xpermd->driver = driver; xpermd->used = 0; memset(xpermd->allowed->p, 0, sizeof(xpermd->allowed->p)); @@ -1130,6 +1153,14 @@ void security_compute_av(u32 ssid, if (ebitmap_get_bit(&policydb->permissive_map, scontext->type)) avd->flags |= AVD_FLAGS_PERMISSIVE; + /* neveraudit domain? */ + if (ebitmap_get_bit(&policydb->neveraudit_map, scontext->type)) + avd->flags |= AVD_FLAGS_NEVERAUDIT; + + /* both permissive and neveraudit => allow */ + if (avd->flags == (AVD_FLAGS_PERMISSIVE|AVD_FLAGS_NEVERAUDIT)) + goto allow; + tcontext = sidtab_search(sidtab, tsid); if (!tcontext) { pr_err("SELinux: %s: unrecognized SID %d\n", @@ -1149,6 +1180,8 @@ void security_compute_av(u32 ssid, policydb->allow_unknown); out: rcu_read_unlock(); + if (avd->flags & AVD_FLAGS_NEVERAUDIT) + avd->auditallow = avd->auditdeny = 0; return; allow: avd->allowed = 0xffffffff; @@ -1185,6 +1218,14 @@ void security_compute_av_user(u32 ssid, if (ebitmap_get_bit(&policydb->permissive_map, scontext->type)) avd->flags |= AVD_FLAGS_PERMISSIVE; + /* neveraudit domain? */ + if (ebitmap_get_bit(&policydb->neveraudit_map, scontext->type)) + avd->flags |= AVD_FLAGS_NEVERAUDIT; + + /* both permissive and neveraudit => allow */ + if (avd->flags == (AVD_FLAGS_PERMISSIVE|AVD_FLAGS_NEVERAUDIT)) + goto allow; + tcontext = sidtab_search(sidtab, tsid); if (!tcontext) { pr_err("SELinux: %s: unrecognized SID %d\n", @@ -1202,6 +1243,8 @@ void security_compute_av_user(u32 ssid, NULL); out: rcu_read_unlock(); + if (avd->flags & AVD_FLAGS_NEVERAUDIT) + avd->auditallow = avd->auditdeny = 0; return; allow: avd->allowed = 0xffffffff; @@ -1886,11 +1929,17 @@ retry: goto out_unlock; } /* Obtain the sid for the context. */ - rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid); - if (rc == -ESTALE) { - rcu_read_unlock(); - context_destroy(&newcontext); - goto retry; + if (context_equal(scontext, &newcontext)) + *out_sid = ssid; + else if (context_equal(tcontext, &newcontext)) + *out_sid = tsid; + else { + rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid); + if (rc == -ESTALE) { + rcu_read_unlock(); + context_destroy(&newcontext); + goto retry; + } } out_unlock: rcu_read_unlock(); @@ -2550,13 +2599,14 @@ out: * @name: interface name * @if_sid: interface SID */ -int security_netif_sid(char *name, u32 *if_sid) +int security_netif_sid(const char *name, u32 *if_sid) { struct selinux_policy *policy; struct policydb *policydb; struct sidtab *sidtab; int rc; struct ocontext *c; + bool wildcard_support; if (!selinux_initialized()) { *if_sid = SECINITSID_NETIF; @@ -2569,11 +2619,18 @@ retry: policy = rcu_dereference(selinux_state.policy); policydb = &policy->policydb; sidtab = policy->sidtab; + wildcard_support = ebitmap_get_bit(&policydb->policycaps, POLICYDB_CAP_NETIF_WILDCARD); c = policydb->ocontexts[OCON_NETIF]; while (c) { - if (strcmp(name, c->u.name) == 0) - break; + if (wildcard_support) { + if (match_wildcard(c->u.name, name)) + break; + } else { + if (strcmp(c->u.name, name) == 0) + break; + } + c = c->next; } @@ -2593,17 +2650,15 @@ out: return rc; } -static int match_ipv6_addrmask(u32 *input, u32 *addr, u32 *mask) +static bool match_ipv6_addrmask(const u32 input[4], const u32 addr[4], const u32 mask[4]) { - int i, fail = 0; + int i; for (i = 0; i < 4; i++) - if (addr[i] != (input[i] & mask[i])) { - fail = 1; - break; - } + if (addr[i] != (input[i] & mask[i])) + return false; - return !fail; + return true; } /** @@ -2614,7 +2669,7 @@ static int match_ipv6_addrmask(u32 *input, u32 *addr, u32 *mask) * @out_sid: security identifier */ int security_node_sid(u16 domain, - void *addrp, + const void *addrp, u32 addrlen, u32 *out_sid) { @@ -2643,7 +2698,7 @@ retry: if (addrlen != sizeof(u32)) goto out; - addr = *((u32 *)addrp); + addr = *((const u32 *)addrp); c = policydb->ocontexts[OCON_NODE]; while (c) { @@ -2708,7 +2763,7 @@ out: */ int security_get_user_sids(u32 fromsid, - char *username, + const char *username, u32 **sids, u32 *nel) { @@ -2843,6 +2898,7 @@ static inline int __security_genfs_sid(struct selinux_policy *policy, struct genfs *genfs; struct ocontext *c; int cmp = 0; + bool wildcard; while (path[0] == '/' && path[1] == '/') path++; @@ -2859,11 +2915,20 @@ static inline int __security_genfs_sid(struct selinux_policy *policy, if (!genfs || cmp) return -ENOENT; + wildcard = ebitmap_get_bit(&policy->policydb.policycaps, + POLICYDB_CAP_GENFS_SECLABEL_WILDCARD); for (c = genfs->head; c; c = c->next) { - size_t len = strlen(c->u.name); - if ((!c->v.sclass || sclass == c->v.sclass) && - (strncmp(c->u.name, path, len) == 0)) - break; + if (!c->v.sclass || sclass == c->v.sclass) { + if (wildcard) { + if (match_wildcard(c->u.name, path)) + break; + } else { + size_t len = strlen(c->u.name); + + if ((strncmp(c->u.name, path, len)) == 0) + break; + } + } } if (!c) @@ -3030,7 +3095,7 @@ err: } -int security_set_bools(u32 len, int *values) +int security_set_bools(u32 len, const int *values) { struct selinux_state *state = &selinux_state; struct selinux_policy *newpolicy, *oldpolicy; @@ -3329,7 +3394,7 @@ int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type, __func__, xfrm_sid); goto out; } - rc = (mls_context_cmp(nlbl_ctx, xfrm_ctx) ? 0 : -EACCES); + rc = (mls_context_equal(nlbl_ctx, xfrm_ctx) ? 0 : -EACCES); if (rc) goto out; @@ -3505,6 +3570,13 @@ struct selinux_audit_rule { struct context au_ctxt; }; +int selinux_audit_rule_avc_callback(u32 event) +{ + if (event == AVC_CALLBACK_RESET) + return audit_update_lsm_rules(); + return 0; +} + void selinux_audit_rule_free(void *vrule) { struct selinux_audit_rule *rule = vrule; @@ -3755,25 +3827,6 @@ out: return match; } -static int aurule_avc_callback(u32 event) -{ - if (event == AVC_CALLBACK_RESET) - return audit_update_lsm_rules(); - return 0; -} - -static int __init aurule_init(void) -{ - int err; - - err = avc_add_callback(aurule_avc_callback, AVC_CALLBACK_RESET); - if (err) - panic("avc_add_callback() failed, error %d\n", err); - - return err; -} -__initcall(aurule_init); - #ifdef CONFIG_NETLABEL /** * security_netlbl_cache_add - Add an entry to the NetLabel cache diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c index c8848cbba81f..59f8c09158ef 100644 --- a/security/selinux/ss/sidtab.c +++ b/security/selinux/ss/sidtab.c @@ -66,7 +66,7 @@ static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash) hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) { if (entry->hash != hash) continue; - if (context_cmp(&entry->context, context)) { + if (context_equal(&entry->context, context)) { sid = entry->sid; break; } @@ -114,12 +114,12 @@ int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context) int sidtab_hash_stats(struct sidtab *sidtab, char *page) { - int i; + unsigned int i; int chain_len = 0; int slots_used = 0; int entries = 0; int max_chain_len = 0; - int cur_bucket = 0; + unsigned int cur_bucket = 0; struct sidtab_entry *entry; rcu_read_lock(); diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index 90ec4ef1b082..61d56b0c2be1 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c @@ -94,7 +94,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, ctx->ctx_doi = XFRM_SC_DOI_LSM; ctx->ctx_alg = XFRM_SC_ALG_SELINUX; - ctx->ctx_len = str_len; + ctx->ctx_len = str_len + 1; memcpy(ctx->ctx_str, &uctx[1], str_len); ctx->ctx_str[str_len] = '\0'; rc = security_context_to_sid(ctx->ctx_str, str_len, diff --git a/security/smack/smack.h b/security/smack/smack.h index dbf8d7226eb5..9b9eb262fe33 100644 --- a/security/smack/smack.h +++ b/security/smack/smack.h @@ -42,7 +42,7 @@ /* * This is the repository for labels seen so that it is - * not necessary to keep allocating tiny chuncks of memory + * not necessary to keep allocating tiny chunks of memory * and so that they can be shared. * * Labels are never modified in place. Anytime a label @@ -152,6 +152,7 @@ struct smk_net4addr { struct smack_known *smk_label; /* label */ }; +#if IS_ENABLED(CONFIG_IPV6) /* * An entry in the table identifying IPv6 hosts. */ @@ -162,7 +163,9 @@ struct smk_net6addr { int smk_masks; /* mask size */ struct smack_known *smk_label; /* label */ }; +#endif /* CONFIG_IPV6 */ +#ifdef SMACK_IPV6_PORT_LABELING /* * An entry in the table identifying ports. */ @@ -175,6 +178,7 @@ struct smk_port_label { short smk_sock_type; /* Socket type */ short smk_can_reuse; }; +#endif /* SMACK_IPV6_PORT_LABELING */ struct smack_known_list_elem { struct list_head list; @@ -272,6 +276,20 @@ struct smk_audit_info { }; /* + * Initialization + */ +#if defined(CONFIG_SECURITY_SMACK_NETFILTER) +int smack_nf_ip_init(void); +#else +static inline int smack_nf_ip_init(void) +{ + return 0; +} +#endif +int init_smk_fs(void); +int smack_initcall(void); + +/* * These functions are in smack_access.c */ int smk_access_entry(char *, char *, struct list_head *); @@ -280,10 +298,14 @@ int smk_access(struct smack_known *, struct smack_known *, int smk_tskacc(struct task_smack *, struct smack_known *, u32, struct smk_audit_info *); int smk_curacc(struct smack_known *, u32, struct smk_audit_info *); +int smack_str_from_perm(char *string, int access); struct smack_known *smack_from_secid(const u32); +int smk_parse_label_len(const char *string, int len); char *smk_parse_smack(const char *string, int len); int smk_netlbl_mls(int, char *, struct netlbl_lsm_secattr *, int); struct smack_known *smk_import_entry(const char *, int); +struct smack_known *smk_import_valid_label(const char *label, int label_len, + gfp_t gfp); void smk_insert_entry(struct smack_known *skp); struct smack_known *smk_find_entry(const char *); bool smack_privileged(int cap); @@ -314,7 +336,9 @@ extern struct smack_known smack_known_web; extern struct mutex smack_known_lock; extern struct list_head smack_known_list; extern struct list_head smk_net4addr_list; +#if IS_ENABLED(CONFIG_IPV6) extern struct list_head smk_net6addr_list; +#endif /* CONFIG_IPV6 */ extern struct mutex smack_onlycap_lock; extern struct list_head smack_onlycap_list; @@ -425,6 +449,12 @@ static inline struct smack_known *smk_of_current(void) return smk_of_task(smack_cred(current_cred())); } +void smack_log(char *subject_label, char *object_label, + int request, + int result, struct smk_audit_info *auditdata); + +#ifdef CONFIG_AUDIT + /* * logging functions */ @@ -432,12 +462,6 @@ static inline struct smack_known *smk_of_current(void) #define SMACK_AUDIT_ACCEPT 0x2 extern int log_policy; -void smack_log(char *subject_label, char *object_label, - int request, - int result, struct smk_audit_info *auditdata); - -#ifdef CONFIG_AUDIT - /* * some inline functions to set up audit data * they do nothing if CONFIG_AUDIT is not set diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c index 585e5e35710b..fc507dcc7ea5 100644 --- a/security/smack/smack_access.c +++ b/security/smack/smack_access.c @@ -45,11 +45,13 @@ LIST_HEAD(smack_known_list); */ static u32 smack_next_secid = 10; +#ifdef CONFIG_AUDIT /* * what events do we log * can be overwritten at run-time by /smack/logging */ int log_policy = SMACK_AUDIT_DENIED; +#endif /* CONFIG_AUDIT */ /** * smk_access_entry - look up matching access rule @@ -242,7 +244,7 @@ int smk_tskacc(struct task_smack *tsp, struct smack_known *obj_known, } /* - * Allow for priviliged to override policy. + * Allow for privileged to override policy. */ if (rc != 0 && smack_privileged(CAP_MAC_OVERRIDE)) rc = 0; @@ -275,15 +277,14 @@ int smk_curacc(struct smack_known *obj_known, return smk_tskacc(tsp, obj_known, mode, a); } -#ifdef CONFIG_AUDIT /** - * smack_str_from_perm : helper to transalate an int to a + * smack_str_from_perm : helper to translate an int to a * readable string * @string : the string to fill * @access : the int * */ -static inline void smack_str_from_perm(char *string, int access) +int smack_str_from_perm(char *string, int access) { int i = 0; @@ -299,8 +300,15 @@ static inline void smack_str_from_perm(char *string, int access) string[i++] = 't'; if (access & MAY_LOCK) string[i++] = 'l'; + if (access & MAY_BRINGUP) + string[i++] = 'b'; + if (i == 0) + string[i++] = '-'; string[i] = '\0'; + return i; } + +#ifdef CONFIG_AUDIT /** * smack_log_callback - SMACK specific information * will be called by generic audit code @@ -435,19 +443,19 @@ struct smack_known *smk_find_entry(const char *string) } /** - * smk_parse_smack - parse smack label from a text string - * @string: a text string that might contain a Smack label - * @len: the maximum size, or zero if it is NULL terminated. + * smk_parse_label_len - calculate the length of the starting segment + * in the string that constitutes a valid smack label + * @string: a text string that might contain a Smack label at the beginning + * @len: the maximum size to look into, may be zero if string is null-terminated * - * Returns a pointer to the clean label or an error code. + * Returns the length of the segment (0 < L < SMK_LONGLABEL) or an error code. */ -char *smk_parse_smack(const char *string, int len) +int smk_parse_label_len(const char *string, int len) { - char *smack; int i; - if (len <= 0) - len = strlen(string) + 1; + if (len <= 0 || len > SMK_LONGLABEL) + len = SMK_LONGLABEL; /* * Reserve a leading '-' as an indicator that @@ -455,7 +463,7 @@ char *smk_parse_smack(const char *string, int len) * including /smack/cipso and /smack/cipso2 */ if (string[0] == '-') - return ERR_PTR(-EINVAL); + return -EINVAL; for (i = 0; i < len; i++) if (string[i] > '~' || string[i] <= ' ' || string[i] == '/' || @@ -463,6 +471,25 @@ char *smk_parse_smack(const char *string, int len) break; if (i == 0 || i >= SMK_LONGLABEL) + return -EINVAL; + + return i; +} + +/** + * smk_parse_smack - copy the starting segment in the string + * that constitutes a valid smack label + * @string: a text string that might contain a Smack label at the beginning + * @len: the maximum size to look into, may be zero if string is null-terminated + * + * Returns a pointer to the copy of the label or an error code. + */ +char *smk_parse_smack(const char *string, int len) +{ + char *smack; + int i = smk_parse_label_len(string, len); + + if (i < 0) return ERR_PTR(-EINVAL); smack = kstrndup(string, i, GFP_NOFS); @@ -546,31 +573,26 @@ int smack_populate_secattr(struct smack_known *skp) } /** - * smk_import_entry - import a label, return the list entry - * @string: a text string that might be a Smack label - * @len: the maximum size, or zero if it is NULL terminated. + * smk_import_valid_allocated_label - import a label, return the list entry + * @smack: a text string that is a valid Smack label and may be kfree()ed. + * It is consumed: either becomes a part of the entry or kfree'ed. + * @gfp: Allocation type * - * Returns a pointer to the entry in the label list that - * matches the passed string, adding it if necessary, - * or an error code. + * Returns: see description of smk_import_entry() */ -struct smack_known *smk_import_entry(const char *string, int len) +static struct smack_known * +smk_import_allocated_label(char *smack, gfp_t gfp) { struct smack_known *skp; - char *smack; int rc; - smack = smk_parse_smack(string, len); - if (IS_ERR(smack)) - return ERR_CAST(smack); - mutex_lock(&smack_known_lock); skp = smk_find_entry(smack); if (skp != NULL) goto freeout; - skp = kzalloc(sizeof(*skp), GFP_NOFS); + skp = kzalloc(sizeof(*skp), gfp); if (skp == NULL) { skp = ERR_PTR(-ENOMEM); goto freeout; @@ -601,6 +623,44 @@ unlockout: } /** + * smk_import_entry - import a label, return the list entry + * @string: a text string that might contain a Smack label at the beginning + * @len: the maximum size to look into, may be zero if string is null-terminated + * + * Returns a pointer to the entry in the label list that + * matches the passed string, adding it if necessary, + * or an error code. + */ +struct smack_known *smk_import_entry(const char *string, int len) +{ + char *smack = smk_parse_smack(string, len); + + if (IS_ERR(smack)) + return ERR_CAST(smack); + + return smk_import_allocated_label(smack, GFP_NOFS); +} + +/** + * smk_import_valid_label - import a label, return the list entry + * @label: a text string that is a valid Smack label, not null-terminated + * @label_len: the length of the text string in the @label + * @gfp: the GFP mask used for allocating memory for the @label text string copy + * + * Return: see description of smk_import_entry() + */ +struct smack_known * +smk_import_valid_label(const char *label, int label_len, gfp_t gfp) +{ + char *smack = kstrndup(label, label_len, gfp); + + if (!smack) + return ERR_PTR(-ENOMEM); + + return smk_import_allocated_label(smack, gfp); +} + +/** * smack_from_secid - find the Smack label associated with a secid * @secid: an integer that might be associated with a Smack label * diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 0c476282e279..a0bd4919a9d9 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -24,7 +24,6 @@ #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> -#include <linux/dccp.h> #include <linux/icmpv6.h> #include <linux/slab.h> #include <linux/mutex.h> @@ -107,23 +106,7 @@ static char *smk_bu_mess[] = { static void smk_bu_mode(int mode, char *s) { - int i = 0; - - if (mode & MAY_READ) - s[i++] = 'r'; - if (mode & MAY_WRITE) - s[i++] = 'w'; - if (mode & MAY_EXEC) - s[i++] = 'x'; - if (mode & MAY_APPEND) - s[i++] = 'a'; - if (mode & MAY_TRANSMUTE) - s[i++] = 't'; - if (mode & MAY_LOCK) - s[i++] = 'l'; - if (i == 0) - s[i++] = '-'; - s[i] = '\0'; + smack_str_from_perm(s, mode); } #endif @@ -980,6 +963,42 @@ static int smack_inode_alloc_security(struct inode *inode) } /** + * smk_rule_transmutes - does access rule for (subject,object) contain 't'? + * @subject: a pointer to the subject's Smack label entry + * @object: a pointer to the object's Smack label entry + */ +static bool +smk_rule_transmutes(struct smack_known *subject, + const struct smack_known *object) +{ + int may; + + rcu_read_lock(); + may = smk_access_entry(subject->smk_known, object->smk_known, + &subject->smk_rules); + rcu_read_unlock(); + return (may > 0) && (may & MAY_TRANSMUTE); +} + +static int +xattr_dupval(struct xattr *xattrs, int *xattr_count, + const char *name, const void *value, unsigned int vallen) +{ + struct xattr * const xattr = lsm_get_xattr_slot(xattrs, xattr_count); + + if (!xattr) + return 0; + + xattr->value = kmemdup(value, vallen, GFP_NOFS); + if (!xattr->value) + return -ENOMEM; + + xattr->value_len = vallen; + xattr->name = name; + return 0; +} + +/** * smack_inode_init_security - copy out the smack from an inode * @inode: the newly created inode * @dir: containing directory object @@ -994,23 +1013,30 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir, struct xattr *xattrs, int *xattr_count) { struct task_smack *tsp = smack_cred(current_cred()); - struct inode_smack *issp = smack_inode(inode); - struct smack_known *skp = smk_of_task(tsp); - struct smack_known *isp = smk_of_inode(inode); + struct inode_smack * const issp = smack_inode(inode); struct smack_known *dsp = smk_of_inode(dir); - struct xattr *xattr = lsm_get_xattr_slot(xattrs, xattr_count); - int may; + int rc = 0; + int transflag = 0; + bool trans_cred; + bool trans_rule; /* + * UNIX domain sockets use lower level socket data. Let + * UDS inode have fixed * label to keep smack_inode_permission() calm + * when called from unix_find_bsd() + */ + if (S_ISSOCK(inode->i_mode)) { + /* forced label, no need to save to xattrs */ + issp->smk_inode = &smack_known_star; + goto instant_inode; + } + /* * If equal, transmuting already occurred in * smack_dentry_create_files_as(). No need to check again. */ - if (tsp->smk_task != tsp->smk_transmuted) { - rcu_read_lock(); - may = smk_access_entry(skp->smk_known, dsp->smk_known, - &skp->smk_rules); - rcu_read_unlock(); - } + trans_cred = (tsp->smk_task == tsp->smk_transmuted); + if (!trans_cred) + trans_rule = smk_rule_transmutes(smk_of_task(tsp), dsp); /* * In addition to having smk_task equal to smk_transmuted, @@ -1018,47 +1044,38 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir, * requests transmutation then by all means transmute. * Mark the inode as changed. */ - if ((tsp->smk_task == tsp->smk_transmuted) || - (may > 0 && ((may & MAY_TRANSMUTE) != 0) && - smk_inode_transmutable(dir))) { - struct xattr *xattr_transmute; - + if (trans_cred || (trans_rule && smk_inode_transmutable(dir))) { /* * The caller of smack_dentry_create_files_as() * should have overridden the current cred, so the * inode label was already set correctly in * smack_inode_alloc_security(). */ - if (tsp->smk_task != tsp->smk_transmuted) - isp = issp->smk_inode = dsp; - - issp->smk_flags |= SMK_INODE_TRANSMUTE; - xattr_transmute = lsm_get_xattr_slot(xattrs, - xattr_count); - if (xattr_transmute) { - xattr_transmute->value = kmemdup(TRANS_TRUE, - TRANS_TRUE_SIZE, - GFP_NOFS); - if (!xattr_transmute->value) - return -ENOMEM; + if (!trans_cred) + issp->smk_inode = dsp; - xattr_transmute->value_len = TRANS_TRUE_SIZE; - xattr_transmute->name = XATTR_SMACK_TRANSMUTE; + if (S_ISDIR(inode->i_mode)) { + transflag = SMK_INODE_TRANSMUTE; + + if (xattr_dupval(xattrs, xattr_count, + XATTR_SMACK_TRANSMUTE, + TRANS_TRUE, + TRANS_TRUE_SIZE + )) + rc = -ENOMEM; } } - issp->smk_flags |= SMK_INODE_INSTANT; - - if (xattr) { - xattr->value = kstrdup(isp->smk_known, GFP_NOFS); - if (!xattr->value) - return -ENOMEM; - - xattr->value_len = strlen(isp->smk_known); - xattr->name = XATTR_SMACK_SUFFIX; - } - - return 0; + if (rc == 0) + if (xattr_dupval(xattrs, xattr_count, + XATTR_SMACK_SUFFIX, + issp->smk_inode->smk_known, + strlen(issp->smk_inode->smk_known) + )) + rc = -ENOMEM; +instant_inode: + issp->smk_flags |= (SMK_INODE_INSTANT | transflag); + return rc; } /** @@ -1332,13 +1349,23 @@ static int smack_inode_setxattr(struct mnt_idmap *idmap, int check_import = 0; int check_star = 0; int rc = 0; + umode_t const i_mode = d_backing_inode(dentry)->i_mode; /* * Check label validity here so import won't fail in post_setxattr */ - if (strcmp(name, XATTR_NAME_SMACK) == 0 || - strcmp(name, XATTR_NAME_SMACKIPIN) == 0 || - strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) { + if (strcmp(name, XATTR_NAME_SMACK) == 0) { + /* + * UDS inode has fixed label + */ + if (S_ISSOCK(i_mode)) { + rc = -EINVAL; + } else { + check_priv = 1; + check_import = 1; + } + } else if (strcmp(name, XATTR_NAME_SMACKIPIN) == 0 || + strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) { check_priv = 1; check_import = 1; } else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0 || @@ -1348,7 +1375,7 @@ static int smack_inode_setxattr(struct mnt_idmap *idmap, check_star = 1; } else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) { check_priv = 1; - if (!S_ISDIR(d_backing_inode(dentry)->i_mode) || + if (!S_ISDIR(i_mode) || size != TRANS_TRUE_SIZE || strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0) rc = -EINVAL; @@ -1479,12 +1506,15 @@ static int smack_inode_removexattr(struct mnt_idmap *idmap, * Don't do anything special for these. * XATTR_NAME_SMACKIPIN * XATTR_NAME_SMACKIPOUT + * XATTR_NAME_SMACK if S_ISSOCK (UDS inode has fixed label) */ if (strcmp(name, XATTR_NAME_SMACK) == 0) { - struct super_block *sbp = dentry->d_sb; - struct superblock_smack *sbsp = smack_superblock(sbp); + if (!S_ISSOCK(d_backing_inode(dentry)->i_mode)) { + struct super_block *sbp = dentry->d_sb; + struct superblock_smack *sbsp = smack_superblock(sbp); - isp->smk_inode = sbsp->smk_default; + isp->smk_inode = sbsp->smk_default; + } } else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) isp->smk_task = NULL; else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0) @@ -1950,7 +1980,7 @@ static int smack_file_send_sigiotask(struct task_struct *tsk, */ file = fown->file; - /* we don't log here as rc can be overriden */ + /* we don't log here as rc can be overridden */ blob = smack_file(file); skp = *blob; rc = smk_access(skp, tkp, MAY_DELIVER, NULL); @@ -2508,6 +2538,7 @@ static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip) return NULL; } +#if IS_ENABLED(CONFIG_IPV6) /* * smk_ipv6_localhost - Check for local ipv6 host address * @sip: the address @@ -2575,6 +2606,7 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip) return NULL; } +#endif /* CONFIG_IPV6 */ /** * smack_netlbl_add - Set the secattr on a socket @@ -2679,6 +2711,7 @@ static int smk_ipv4_check(struct sock *sk, struct sockaddr_in *sap) return rc; } +#if IS_ENABLED(CONFIG_IPV6) /** * smk_ipv6_check - check Smack access * @subject: subject Smack label @@ -2711,6 +2744,7 @@ static int smk_ipv6_check(struct smack_known *subject, rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc); return rc; } +#endif /* CONFIG_IPV6 */ #ifdef SMACK_IPV6_PORT_LABELING /** @@ -3043,7 +3077,9 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, return 0; if (addrlen < offsetofend(struct sockaddr, sa_family)) return 0; - if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) { + +#if IS_ENABLED(CONFIG_IPV6) + if (sap->sa_family == AF_INET6) { struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap; struct smack_known *rsp = NULL; @@ -3063,6 +3099,8 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, return rc; } +#endif /* CONFIG_IPV6 */ + if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in)) return 0; rc = smk_ipv4_check(sock->sk, (struct sockaddr_in *)sap); @@ -3594,7 +3632,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) */ /* - * UNIX domain sockets use lower level socket data. + * UDS inode has fixed label (*) */ if (S_ISSOCK(inode->i_mode)) { final = &smack_known_star; @@ -3672,7 +3710,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) * @attr: which attribute to fetch * @ctx: buffer to receive the result * @size: available size in, actual size out - * @flags: unused + * @flags: reserved, currently zero * * Fill the passed user space @ctx with the details of the requested * attribute. @@ -3733,47 +3771,55 @@ static int smack_getprocattr(struct task_struct *p, const char *name, char **val * Sets the Smack value of the task. Only setting self * is permitted and only with privilege * - * Returns the length of the smack label or an error code + * Returns zero on success or an error code */ -static int do_setattr(u64 attr, void *value, size_t size) +static int do_setattr(unsigned int attr, void *value, size_t size) { struct task_smack *tsp = smack_cred(current_cred()); struct cred *new; struct smack_known *skp; - struct smack_known_list_elem *sklep; - int rc; - - if (!smack_privileged(CAP_MAC_ADMIN) && list_empty(&tsp->smk_relabel)) - return -EPERM; + int label_len; + /* + * let unprivileged user validate input, check permissions later + */ if (value == NULL || size == 0 || size >= SMK_LONGLABEL) return -EINVAL; - if (attr != LSM_ATTR_CURRENT) - return -EOPNOTSUPP; - - skp = smk_import_entry(value, size); - if (IS_ERR(skp)) - return PTR_ERR(skp); + label_len = smk_parse_label_len(value, size); + if (label_len < 0 || label_len != size) + return -EINVAL; /* * No process is ever allowed the web ("@") label * and the star ("*") label. */ - if (skp == &smack_known_web || skp == &smack_known_star) - return -EINVAL; + if (label_len == 1 /* '@', '*' */) { + const char c = *(const char *)value; + + if (c == *smack_known_web.smk_known || + c == *smack_known_star.smk_known) + return -EPERM; + } if (!smack_privileged(CAP_MAC_ADMIN)) { - rc = -EPERM; - list_for_each_entry(sklep, &tsp->smk_relabel, list) - if (sklep->smk_label == skp) { - rc = 0; - break; - } - if (rc) - return rc; + const struct smack_known_list_elem *sklep; + list_for_each_entry(sklep, &tsp->smk_relabel, list) { + const char *cp = sklep->smk_label->smk_known; + + if (strlen(cp) == label_len && + strncmp(cp, value, label_len) == 0) + goto in_relabel; + } + return -EPERM; +in_relabel: + ; } + skp = smk_import_valid_label(value, label_len, GFP_KERNEL); + if (IS_ERR(skp)) + return PTR_ERR(skp); + new = prepare_creds(); if (new == NULL) return -ENOMEM; @@ -3786,7 +3832,7 @@ static int do_setattr(u64 attr, void *value, size_t size) smk_destroy_label_list(&tsp->smk_relabel); commit_creds(new); - return size; + return 0; } /** @@ -3794,7 +3840,7 @@ static int do_setattr(u64 attr, void *value, size_t size) * @attr: which attribute to set * @ctx: buffer containing the data * @size: size of @ctx - * @flags: unused + * @flags: reserved, must be zero * * Fill the passed user space @ctx with the details of the requested * attribute. @@ -3804,12 +3850,26 @@ static int do_setattr(u64 attr, void *value, size_t size) static int smack_setselfattr(unsigned int attr, struct lsm_ctx *ctx, u32 size, u32 flags) { - int rc; + if (attr != LSM_ATTR_CURRENT) + return -EOPNOTSUPP; - rc = do_setattr(attr, ctx->ctx, ctx->ctx_len); - if (rc > 0) - return 0; - return rc; + if (ctx->flags) + return -EINVAL; + /* + * string must have \0 terminator, included in ctx->ctx + * (see description of struct lsm_ctx) + */ + if (ctx->ctx_len == 0) + return -EINVAL; + + if (ctx->ctx[ctx->ctx_len - 1] != '\0') + return -EINVAL; + /* + * other do_setattr() caller, smack_setprocattr(), + * does not count \0 into size, so + * decreasing length by 1 to accommodate the divergence. + */ + return do_setattr(attr, ctx->ctx, ctx->ctx_len - 1); } /** @@ -3821,15 +3881,39 @@ static int smack_setselfattr(unsigned int attr, struct lsm_ctx *ctx, * Sets the Smack value of the task. Only setting self * is permitted and only with privilege * - * Returns the length of the smack label or an error code + * Returns the size of the input value or an error code */ static int smack_setprocattr(const char *name, void *value, size_t size) { - int attr = lsm_name_to_attr(name); + size_t realsize = size; + unsigned int attr = lsm_name_to_attr(name); - if (attr != LSM_ATTR_UNDEF) - return do_setattr(attr, value, size); - return -EINVAL; + switch (attr) { + case LSM_ATTR_UNDEF: return -EINVAL; + default: return -EOPNOTSUPP; + case LSM_ATTR_CURRENT: + ; + } + + /* + * The value for the "current" attribute is the label + * followed by one of the 4 trailers: none, \0, \n, \n\0 + * + * I.e. following inputs are accepted as 3-characters long label "foo": + * + * "foo" (3 characters) + * "foo\0" (4 characters) + * "foo\n" (4 characters) + * "foo\n\0" (5 characters) + */ + + if (realsize && (((const char *)value)[realsize - 1] == '\0')) + --realsize; + + if (realsize && (((const char *)value)[realsize - 1] == '\n')) + --realsize; + + return do_setattr(attr, value, realsize) ? : size; } /** @@ -4069,7 +4153,6 @@ static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip) __be16 frag_off; struct tcphdr _tcph, *th; struct udphdr _udph, *uh; - struct dccp_hdr _dccph, *dh; sip->sin6_port = 0; @@ -4098,11 +4181,6 @@ static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip) if (uh != NULL) sip->sin6_port = uh->source; break; - case IPPROTO_DCCP: - dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph); - if (dh != NULL) - sip->sin6_port = dh->dccph_sport; - break; } return proto; } @@ -4211,7 +4289,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) /* * Receiving a packet requires that the other end * be able to write here. Read access is not required. - * This is the simplist possible security model + * This is the simplest possible security model * for networking. */ rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad); @@ -4224,7 +4302,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) case PF_INET6: proto = smk_skb_to_addr_ipv6(skb, &sadd); if (proto != IPPROTO_UDP && proto != IPPROTO_UDPLITE && - proto != IPPROTO_TCP && proto != IPPROTO_DCCP) + proto != IPPROTO_TCP) break; #ifdef SMACK_IPV6_SECMARK_LABELING skp = smack_from_skb(skb); @@ -4359,29 +4437,6 @@ static int smack_socket_getpeersec_dgram(struct socket *sock, } /** - * smack_sock_graft - Initialize a newly created socket with an existing sock - * @sk: child sock - * @parent: parent socket - * - * Set the smk_{in,out} state of an existing sock based on the process that - * is creating the new socket. - */ -static void smack_sock_graft(struct sock *sk, struct socket *parent) -{ - struct socket_smack *ssp; - struct smack_known *skp = smk_of_current(); - - if (sk == NULL || - (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)) - return; - - ssp = smack_sock(sk); - ssp->smk_in = skp; - ssp->smk_out = skp; - /* cssp->smk_packet is already set in smack_inet_csk_clone() */ -} - -/** * smack_inet_conn_request - Smack access check on connect * @sk: socket involved * @skb: packet @@ -4717,7 +4772,7 @@ static int smack_post_notification(const struct cred *w_cred, * @gfp: type of the memory for the allocation * * Prepare to audit cases where (@field @op @rulestr) is true. - * The label to be audited is created if necessay. + * The label to be audited is created if necessary. */ static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp) @@ -4818,40 +4873,47 @@ static int smack_ismaclabel(const char *name) } /** + * smack_to_secctx - fill a lsm_context + * @skp: Smack label + * @cp: destination + * + * Fill the passed @cp and return the length of the string + */ +static int smack_to_secctx(struct smack_known *skp, struct lsm_context *cp) +{ + int len = strlen(skp->smk_known); + + if (cp) { + cp->context = skp->smk_known; + cp->len = len; + cp->id = LSM_ID_SMACK; + } + return len; +} + +/** * smack_secid_to_secctx - return the smack label for a secid * @secid: incoming integer - * @secdata: destination - * @seclen: how long it is + * @cp: destination * * Exists for networking code. */ -static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) +static int smack_secid_to_secctx(u32 secid, struct lsm_context *cp) { - struct smack_known *skp = smack_from_secid(secid); - - if (secdata) - *secdata = skp->smk_known; - *seclen = strlen(skp->smk_known); - return 0; + return smack_to_secctx(smack_from_secid(secid), cp); } /** * smack_lsmprop_to_secctx - return the smack label * @prop: includes incoming Smack data - * @secdata: destination - * @seclen: how long it is + * @cp: destination * * Exists for audit code. */ -static int smack_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata, - u32 *seclen) +static int smack_lsmprop_to_secctx(struct lsm_prop *prop, + struct lsm_context *cp) { - struct smack_known *skp = prop->smack.skp; - - if (secdata) - *secdata = skp->smk_known; - *seclen = strlen(skp->smk_known); - return 0; + return smack_to_secctx(prop->smack.skp, cp); } /** @@ -4881,6 +4943,11 @@ static int smack_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) static int smack_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) { + /* + * UDS inode has fixed label. Ignore nfs label. + */ + if (S_ISSOCK(inode->i_mode)) + return 0; return smack_inode_setsecurity(inode, XATTR_SMACK_SUFFIX, ctx, ctxlen, 0); } @@ -4891,12 +4958,13 @@ static int smack_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) ctx, ctxlen, 0, NULL); } -static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) +static int smack_inode_getsecctx(struct inode *inode, struct lsm_context *cp) { struct smack_known *skp = smk_of_inode(inode); - *ctx = skp->smk_known; - *ctxlen = strlen(skp->smk_known); + cp->context = skp->smk_known; + cp->len = strlen(skp->smk_known); + cp->id = LSM_ID_SMACK; return 0; } @@ -4938,14 +5006,13 @@ static int smack_inode_copy_up_xattr(struct dentry *src, const char *name) } static int smack_dentry_create_files_as(struct dentry *dentry, int mode, - struct qstr *name, + const struct qstr *name, const struct cred *old, struct cred *new) { struct task_smack *otsp = smack_cred(old); struct task_smack *ntsp = smack_cred(new); struct inode_smack *isp; - int may; /* * Use the process credential unless all of @@ -4959,18 +5026,12 @@ static int smack_dentry_create_files_as(struct dentry *dentry, int mode, isp = smack_inode(d_inode(dentry->d_parent)); if (isp->smk_flags & SMK_INODE_TRANSMUTE) { - rcu_read_lock(); - may = smk_access_entry(otsp->smk_task->smk_known, - isp->smk_inode->smk_known, - &otsp->smk_task->smk_rules); - rcu_read_unlock(); - /* * If the directory is transmuting and the rule * providing access is transmuting use the containing * directory label instead of the process label. */ - if (may > 0 && (may & MAY_TRANSMUTE)) { + if (smk_rule_transmutes(otsp->smk_task, isp->smk_inode)) { ntsp->smk_task = isp->smk_inode; ntsp->smk_transmuted = ntsp->smk_task; } @@ -5187,7 +5248,6 @@ static struct security_hook_list smack_hooks[] __ro_after_init = { LSM_HOOK_INIT(sk_free_security, smack_sk_free_security), #endif LSM_HOOK_INIT(sk_clone_security, smack_sk_clone_security), - LSM_HOOK_INIT(sock_graft, smack_sock_graft), LSM_HOOK_INIT(inet_conn_request, smack_inet_conn_request), LSM_HOOK_INIT(inet_csk_clone, smack_inet_csk_clone), @@ -5298,16 +5358,30 @@ static __init int smack_init(void) /* initialize the smack_known_list */ init_smack_known_list(); + /* Inform the audit system that secctx is used */ + audit_cfg_lsm(&smack_lsmid, + AUDIT_CFG_LSM_SECCTX_SUBJECT | + AUDIT_CFG_LSM_SECCTX_OBJECT); + return 0; } +int __init smack_initcall(void) +{ + int rc_fs = init_smk_fs(); + int rc_nf = smack_nf_ip_init(); + + return rc_fs ? rc_fs : rc_nf; +} + /* * Smack requires early initialization in order to label * all processes and objects when they are created. */ DEFINE_LSM(smack) = { - .name = "smack", + .id = &smack_lsmid, .flags = LSM_FLAG_LEGACY_MAJOR | LSM_FLAG_EXCLUSIVE, .blobs = &smack_blob_sizes, .init = smack_init, + .initcall_device = smack_initcall, }; diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c index 8fd747b3653a..17ba578b1308 100644 --- a/security/smack/smack_netfilter.c +++ b/security/smack/smack_netfilter.c @@ -68,7 +68,7 @@ static struct pernet_operations smack_net_ops = { .exit = smack_nf_unregister, }; -static int __init smack_nf_ip_init(void) +int __init smack_nf_ip_init(void) { if (smack_enabled == 0) return 0; @@ -76,5 +76,3 @@ static int __init smack_nf_ip_init(void) printk(KERN_DEBUG "Smack: Registering netfilter hooks\n"); return register_pernet_subsys(&smack_net_ops); } - -__initcall(smack_nf_ip_init); diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index 1401412fd794..2a9d3f2ebbe1 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -41,7 +41,9 @@ enum smk_inos { SMK_AMBIENT = 7, /* internet ambient label */ SMK_NET4ADDR = 8, /* single label hosts */ SMK_ONLYCAP = 9, /* the only "capable" label */ +#ifdef CONFIG_AUDIT SMK_LOGGING = 10, /* logging */ +#endif /* CONFIG_AUDIT */ SMK_LOAD_SELF = 11, /* task specific rules */ SMK_ACCESSES = 12, /* access policy */ SMK_MAPPED = 13, /* CIPSO level indicating mapped label */ @@ -165,7 +167,7 @@ static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT; #define SMK_LOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_ACCESSLEN) /* - * Stricly for CIPSO level manipulation. + * Strictly for CIPSO level manipulation. * Set the category bit number in a smack label sized buffer. */ static inline void smack_catset_bit(unsigned int cat, char *catsetp) @@ -562,6 +564,7 @@ static void smk_seq_stop(struct seq_file *s, void *v) static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max) { + char acc[SMK_NUM_ACCESS_TYPE + 1]; /* * Don't show any rules with label names too long for * interface file (/smack/load or /smack/load2) @@ -575,28 +578,11 @@ static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max) if (srp->smk_access == 0) return; - seq_printf(s, "%s %s", + smack_str_from_perm(acc, srp->smk_access); + seq_printf(s, "%s %s %s\n", srp->smk_subject->smk_known, - srp->smk_object->smk_known); - - seq_putc(s, ' '); - - if (srp->smk_access & MAY_READ) - seq_putc(s, 'r'); - if (srp->smk_access & MAY_WRITE) - seq_putc(s, 'w'); - if (srp->smk_access & MAY_EXEC) - seq_putc(s, 'x'); - if (srp->smk_access & MAY_APPEND) - seq_putc(s, 'a'); - if (srp->smk_access & MAY_TRANSMUTE) - seq_putc(s, 't'); - if (srp->smk_access & MAY_LOCK) - seq_putc(s, 'l'); - if (srp->smk_access & MAY_BRINGUP) - seq_putc(s, 'b'); - - seq_putc(s, '\n'); + srp->smk_object->smk_known, + acc); } /* @@ -828,7 +814,7 @@ static int smk_open_cipso(struct inode *inode, struct file *file) static ssize_t smk_set_cipso(struct file *file, const char __user *buf, size_t count, loff_t *ppos, int format) { - struct netlbl_lsm_catmap *old_cat, *new_cat = NULL; + struct netlbl_lsm_catmap *old_cat; struct smack_known *skp; struct netlbl_lsm_secattr ncats; char mapcatset[SMK_CIPSOLEN]; @@ -915,22 +901,15 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, smack_catset_bit(cat, mapcatset); } - ncats.flags = 0; - if (catlen == 0) { - ncats.attr.mls.cat = NULL; - ncats.attr.mls.lvl = maplevel; - new_cat = netlbl_catmap_alloc(GFP_ATOMIC); - if (new_cat) - new_cat->next = ncats.attr.mls.cat; - ncats.attr.mls.cat = new_cat; - skp->smk_netlabel.flags &= ~(1U << 3); - rc = 0; - } else { - rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN); - } + + rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN); if (rc >= 0) { old_cat = skp->smk_netlabel.attr.mls.cat; rcu_assign_pointer(skp->smk_netlabel.attr.mls.cat, ncats.attr.mls.cat); + if (ncats.attr.mls.cat) + skp->smk_netlabel.flags |= NETLBL_SECATTR_MLS_CAT; + else + skp->smk_netlabel.flags &= ~(u32)NETLBL_SECATTR_MLS_CAT; skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl; synchronize_rcu(); netlbl_catmap_free(old_cat); @@ -1098,13 +1077,12 @@ static int smk_open_net4addr(struct inode *inode, struct file *file) } /** - * smk_net4addr_insert + * smk_net4addr_insert - insert a new entry into the net4addrs list * @new : netlabel to insert * - * This helper insert netlabel in the smack_net4addrs list + * This helper inserts netlabel in the smack_net4addrs list * sorted by netmask length (longest to smallest) - * locked by &smk_net4addr_lock in smk_write_net4addr - * + * locked by &smk_net4addr_lock in smk_write_net4addr. */ static void smk_net4addr_insert(struct smk_net4addr *new) { @@ -1361,13 +1339,12 @@ static int smk_open_net6addr(struct inode *inode, struct file *file) } /** - * smk_net6addr_insert + * smk_net6addr_insert - insert a new entry into the net6addrs list * @new : entry to insert * * This inserts an entry in the smack_net6addrs list * sorted by netmask length (longest to smallest) - * locked by &smk_net6addr_lock in smk_write_net6addr - * + * locked by &smk_net6addr_lock in smk_write_net6addr. */ static void smk_net6addr_insert(struct smk_net6addr *new) { @@ -2149,6 +2126,7 @@ static const struct file_operations smk_unconfined_ops = { }; #endif /* CONFIG_SECURITY_SMACK_BRINGUP */ +#ifdef CONFIG_AUDIT /** * smk_read_logging - read() for /smack/logging * @filp: file pointer, not actually used @@ -2213,6 +2191,7 @@ static const struct file_operations smk_logging_ops = { .write = smk_write_logging, .llseek = default_llseek, }; +#endif /* CONFIG_AUDIT */ /* * Seq_file read operations for /smack/load-self @@ -2899,8 +2878,10 @@ static int smk_fill_super(struct super_block *sb, struct fs_context *fc) "netlabel", &smk_net4addr_ops, S_IRUGO|S_IWUSR}, [SMK_ONLYCAP] = { "onlycap", &smk_onlycap_ops, S_IRUGO|S_IWUSR}, +#ifdef CONFIG_AUDIT [SMK_LOGGING] = { "logging", &smk_logging_ops, S_IRUGO|S_IWUSR}, +#endif /* CONFIG_AUDIT */ [SMK_LOAD_SELF] = { "load-self", &smk_load_self_ops, S_IRUGO|S_IWUGO}, [SMK_ACCESSES] = { @@ -2979,7 +2960,7 @@ static int smk_init_fs_context(struct fs_context *fc) static struct file_system_type smk_fs_type = { .name = "smackfs", .init_fs_context = smk_init_fs_context, - .kill_sb = kill_litter_super, + .kill_sb = kill_anon_super, }; static struct vfsmount *smackfs_mount; @@ -2997,7 +2978,7 @@ static struct vfsmount *smackfs_mount; * Returns true if we were not chosen on boot or if * we were chosen and filesystem registration succeeded. */ -static int __init init_smk_fs(void) +int __init init_smk_fs(void) { int err; int rc; @@ -3040,5 +3021,3 @@ static int __init init_smk_fs(void) return err; } - -__initcall(init_smk_fs); diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 5c7b059a332a..0f78898bce09 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -1981,6 +1981,114 @@ static int tomoyo_truncate(char *str) } /** + * tomoyo_numscan - sscanf() which stores the length of a decimal integer value. + * + * @str: String to scan. + * @head: Leading string that must start with. + * @width: Pointer to "int" for storing length of a decimal integer value after @head. + * @tail: Optional character that must match after a decimal integer value. + * + * Returns whether @str starts with @head and a decimal value follows @head. + */ +static bool tomoyo_numscan(const char *str, const char *head, int *width, const char tail) +{ + const char *cp; + const int n = strlen(head); + + if (!strncmp(str, head, n)) { + cp = str + n; + while (*cp && *cp >= '0' && *cp <= '9') + cp++; + if (*cp == tail || !tail) { + *width = cp - (str + n); + return *width != 0; + } + } + *width = 0; + return 0; +} + +/** + * tomoyo_patternize_path - Make patterns for file path. Used by learning mode. + * + * @buffer: Destination buffer. + * @len: Size of @buffer. + * @entry: Original line. + * + * Returns nothing. + */ +static void tomoyo_patternize_path(char *buffer, const int len, char *entry) +{ + int width; + char *cp = entry; + + /* Nothing to do if this line is not for "file" related entry. */ + if (strncmp(entry, "file ", 5)) + goto flush; + /* + * Nothing to do if there is no colon in this line, for this rewriting + * applies to only filesystems where numeric values in the path are volatile. + */ + cp = strchr(entry + 5, ':'); + if (!cp) { + cp = entry; + goto flush; + } + /* Flush e.g. "file ioctl" part. */ + while (*cp != ' ') + cp--; + *cp++ = '\0'; + tomoyo_addprintf(buffer, len, "%s ", entry); + /* e.g. file ioctl pipe:[$INO] $CMD */ + if (tomoyo_numscan(cp, "pipe:[", &width, ']')) { + cp += width + 7; + tomoyo_addprintf(buffer, len, "pipe:[\\$]"); + goto flush; + } + /* e.g. file ioctl socket:[$INO] $CMD */ + if (tomoyo_numscan(cp, "socket:[", &width, ']')) { + cp += width + 9; + tomoyo_addprintf(buffer, len, "socket:[\\$]"); + goto flush; + } + if (!strncmp(cp, "proc:/self", 10)) { + /* e.g. file read proc:/self/task/$TID/fdinfo/$FD */ + cp += 10; + tomoyo_addprintf(buffer, len, "proc:/self"); + } else if (tomoyo_numscan(cp, "proc:/", &width, 0)) { + /* e.g. file read proc:/$PID/task/$TID/fdinfo/$FD */ + /* + * Don't patternize $PID part if $PID == 1, for several + * programs access only files in /proc/1/ directory. + */ + cp += width + 6; + if (width == 1 && *(cp - 1) == '1') + tomoyo_addprintf(buffer, len, "proc:/1"); + else + tomoyo_addprintf(buffer, len, "proc:/\\$"); + } else { + goto flush; + } + /* Patternize $TID part if "/task/" follows. */ + if (tomoyo_numscan(cp, "/task/", &width, 0)) { + cp += width + 6; + tomoyo_addprintf(buffer, len, "/task/\\$"); + } + /* Patternize $FD part if "/fd/" or "/fdinfo/" follows. */ + if (tomoyo_numscan(cp, "/fd/", &width, 0)) { + cp += width + 4; + tomoyo_addprintf(buffer, len, "/fd/\\$"); + } else if (tomoyo_numscan(cp, "/fdinfo/", &width, 0)) { + cp += width + 8; + tomoyo_addprintf(buffer, len, "/fdinfo/\\$"); + } +flush: + /* Flush remaining part if any. */ + if (*cp) + tomoyo_addprintf(buffer, len, "%s", cp); +} + +/** * tomoyo_add_entry - Add an ACL to current thread's domain. Used by learning mode. * * @domain: Pointer to "struct tomoyo_domain_info". @@ -2003,7 +2111,8 @@ static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header) if (!cp) return; *cp++ = '\0'; - len = strlen(cp) + 1; + /* Reserve some space for potentially using patterns. */ + len = strlen(cp) + 16; /* strstr() will return NULL if ordering is wrong. */ if (*cp == 'f') { argv0 = strstr(header, " argv[]={ \""); @@ -2020,10 +2129,10 @@ static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header) if (symlink) len += tomoyo_truncate(symlink + 1) + 1; } - buffer = kmalloc(len, GFP_NOFS); + buffer = kmalloc(len, GFP_NOFS | __GFP_ZERO); if (!buffer) return; - snprintf(buffer, len - 1, "%s", cp); + tomoyo_patternize_path(buffer, len, cp); if (realpath) tomoyo_addprintf(buffer, len, " exec.%s", realpath); if (argv0) @@ -2665,7 +2774,7 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, if (head->w.avail >= head->writebuf_size - 1) { const int len = head->writebuf_size * 2; - char *cp = kzalloc(len, GFP_NOFS); + char *cp = kzalloc(len, GFP_NOFS | __GFP_NOWARN); if (!cp) { error = -ENOMEM; diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index 0e8e2e959aef..3b2a97d10a5d 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -924,6 +924,8 @@ struct tomoyo_task { /********** Function prototypes. **********/ +int tomoyo_interface_init(void); + bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address, const struct tomoyo_group *group); bool tomoyo_compare_number_union(const unsigned long value, diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index aed9e3ef2c9e..5f9ccab26e9a 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -722,10 +722,17 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm) ee->bprm = bprm; ee->r.obj = &ee->obj; ee->obj.path1 = bprm->file->f_path; - /* Get symlink's pathname of program. */ + /* + * Get symlink's pathname of program, but fallback to realpath if + * symlink's pathname does not exist or symlink's pathname refers + * to proc filesystem (e.g. /dev/fd/<num> or /proc/self/fd/<num> ). + */ exename.name = tomoyo_realpath_nofollow(original_name); + if (exename.name && !strncmp(exename.name, "proc:/", 6)) { + kfree(exename.name); + exename.name = NULL; + } if (!exename.name) { - /* Fallback to realpath if symlink's pathname does not exist. */ exename.name = tomoyo_realpath_from_path(&bprm->file->f_path); if (!exename.name) goto out; @@ -913,7 +920,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, #ifdef CONFIG_MMU /* * This is called at execve() time in order to dig around - * in the argv/environment of the new proceess + * in the argv/environment of the new process * (represented by bprm). */ mmap_read_lock(bprm->mm); diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c index a2705798476f..33933645f5b9 100644 --- a/security/tomoyo/securityfs_if.c +++ b/security/tomoyo/securityfs_if.c @@ -229,11 +229,11 @@ static void __init tomoyo_create_entry(const char *name, const umode_t mode, } /** - * tomoyo_initerface_init - Initialize /sys/kernel/security/tomoyo/ interface. + * tomoyo_interface_init - Initialize /sys/kernel/security/tomoyo/ interface. * * Returns 0. */ -static int __init tomoyo_initerface_init(void) +int __init tomoyo_interface_init(void) { struct tomoyo_domain_info *domain; struct dentry *tomoyo_dir; @@ -269,5 +269,3 @@ static int __init tomoyo_initerface_init(void) tomoyo_load_builtin_policy(); return 0; } - -fs_initcall(tomoyo_initerface_init); diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c index 04a92c3d65d4..c66e02ed8ee3 100644 --- a/security/tomoyo/tomoyo.c +++ b/security/tomoyo/tomoyo.c @@ -514,7 +514,7 @@ struct lsm_blob_sizes tomoyo_blob_sizes __ro_after_init = { * Returns 0. */ static int tomoyo_task_alloc(struct task_struct *task, - unsigned long clone_flags) + u64 clone_flags) { struct tomoyo_task *old = tomoyo_task(current); struct tomoyo_task *new = tomoyo_task(task); @@ -549,10 +549,7 @@ static const struct lsm_id tomoyo_lsmid = { .id = LSM_ID_TOMOYO, }; -/* - * tomoyo_security_ops is a "struct security_operations" which is used for - * registering TOMOYO. - */ +/* tomoyo_hooks is used for registering TOMOYO. */ static struct security_hook_list tomoyo_hooks[] __ro_after_init = { LSM_HOOK_INIT(cred_prepare, tomoyo_cred_prepare), LSM_HOOK_INIT(bprm_committed_creds, tomoyo_bprm_committed_creds), @@ -615,9 +612,10 @@ static int __init tomoyo_init(void) } DEFINE_LSM(tomoyo) = { - .name = "tomoyo", + .id = &tomoyo_lsmid, .enabled = &tomoyo_enabled, .flags = LSM_FLAG_LEGACY_MAJOR, .blobs = &tomoyo_blob_sizes, .init = tomoyo_init, + .initcall_fs = tomoyo_interface_init, }; diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index e1a5e13ea269..38b21ee0c560 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -76,7 +76,6 @@ static void report_access(const char *access, struct task_struct *target, struct task_struct *agent) { struct access_report_info *info; - char agent_comm[sizeof(agent->comm)]; assert_spin_locked(&target->alloc_lock); /* for target->comm */ @@ -86,8 +85,7 @@ static void report_access(const char *access, struct task_struct *target, */ pr_notice_ratelimited( "ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n", - access, target->comm, target->pid, - get_task_comm(agent_comm, agent), agent->pid); + access, target->comm, target->pid, agent->comm, agent->pid); return; } @@ -224,7 +222,7 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { int rc = -ENOSYS; - struct task_struct *myself = current; + struct task_struct *myself; switch (option) { case PR_SET_PTRACER: @@ -234,11 +232,7 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, * leader checking is handled later when walking the ancestry * at the time of PTRACE_ATTACH check. */ - rcu_read_lock(); - if (!thread_group_leader(myself)) - myself = rcu_dereference(myself->group_leader); - get_task_struct(myself); - rcu_read_unlock(); + myself = current->group_leader; if (arg2 == 0) { yama_ptracer_del(NULL, myself); @@ -257,7 +251,6 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, } } - put_task_struct(myself); break; } @@ -454,7 +447,7 @@ static int yama_dointvec_minmax(const struct ctl_table *table, int write, static int max_scope = YAMA_SCOPE_NO_ATTACH; -static struct ctl_table yama_sysctl_table[] = { +static const struct ctl_table yama_sysctl_table[] = { { .procname = "ptrace_scope", .data = &ptrace_scope, @@ -483,6 +476,6 @@ static int __init yama_init(void) } DEFINE_LSM(yama) = { - .name = "yama", + .id = &yama_lsmid, .init = yama_init, }; |
