summaryrefslogtreecommitdiff
path: root/security/security.c
diff options
context:
space:
mode:
Diffstat (limited to 'security/security.c')
-rw-r--r--security/security.c1502
1 files changed, 709 insertions, 793 deletions
diff --git a/security/security.c b/security/security.c
index e5ca08789f74..31a688650601 100644
--- a/security/security.c
+++ b/security/security.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/kernel_read_file.h>
#include <linux/lsm_hooks.h>
-#include <linux/fsnotify.h>
#include <linux/mman.h>
#include <linux/mount.h>
#include <linux/personality.h>
@@ -28,30 +27,12 @@
#include <linux/xattr.h>
#include <linux/msg.h>
#include <linux/overflow.h>
+#include <linux/perf_event.h>
+#include <linux/fs.h>
#include <net/flow.h>
+#include <net/sock.h>
-/* How many LSMs were built into the kernel? */
-#define LSM_COUNT (__end_lsm_info - __start_lsm_info)
-
-/*
- * How many LSMs are built into the kernel as determined at
- * build time. Used to determine fixed array sizes.
- * The capability module is accounted for by CONFIG_SECURITY
- */
-#define LSM_CONFIG_COUNT ( \
- (IS_ENABLED(CONFIG_SECURITY) ? 1 : 0) + \
- (IS_ENABLED(CONFIG_SECURITY_SELINUX) ? 1 : 0) + \
- (IS_ENABLED(CONFIG_SECURITY_SMACK) ? 1 : 0) + \
- (IS_ENABLED(CONFIG_SECURITY_TOMOYO) ? 1 : 0) + \
- (IS_ENABLED(CONFIG_SECURITY_APPARMOR) ? 1 : 0) + \
- (IS_ENABLED(CONFIG_SECURITY_YAMA) ? 1 : 0) + \
- (IS_ENABLED(CONFIG_SECURITY_LOADPIN) ? 1 : 0) + \
- (IS_ENABLED(CONFIG_SECURITY_SAFESETID) ? 1 : 0) + \
- (IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM) ? 1 : 0) + \
- (IS_ENABLED(CONFIG_BPF_LSM) ? 1 : 0) + \
- (IS_ENABLED(CONFIG_SECURITY_LANDLOCK) ? 1 : 0) + \
- (IS_ENABLED(CONFIG_IMA) ? 1 : 0) + \
- (IS_ENABLED(CONFIG_EVM) ? 1 : 0))
+#include "lsm.h"
/*
* These are descriptions of the reasons that can be passed to the
@@ -92,582 +73,159 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX + 1] = {
[LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
};
-struct security_hook_heads security_hook_heads __ro_after_init;
-static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain);
-
-static struct kmem_cache *lsm_file_cache;
-static struct kmem_cache *lsm_inode_cache;
-
-char *lsm_names;
-static struct lsm_blob_sizes blob_sizes __ro_after_init;
+bool lsm_debug __ro_after_init;
-/* Boot-time LSM user choice */
-static __initdata const char *chosen_lsm_order;
-static __initdata const char *chosen_major_lsm;
-
-static __initconst const char *const builtin_lsm_order = CONFIG_LSM;
-
-/* Ordered list of LSMs to initialize. */
-static __initdata struct lsm_info **ordered_lsms;
-static __initdata struct lsm_info *exclusive;
-
-static __initdata bool debug;
-#define init_debug(...) \
- do { \
- if (debug) \
- pr_info(__VA_ARGS__); \
- } while (0)
+unsigned int lsm_active_cnt __ro_after_init;
+const struct lsm_id *lsm_idlist[MAX_LSM_COUNT];
-static bool __init is_enabled(struct lsm_info *lsm)
-{
- if (!lsm->enabled)
- return false;
+struct lsm_blob_sizes blob_sizes;
- return *lsm->enabled;
-}
+struct kmem_cache *lsm_file_cache;
+struct kmem_cache *lsm_inode_cache;
-/* Mark an LSM's enabled flag. */
-static int lsm_enabled_true __initdata = 1;
-static int lsm_enabled_false __initdata = 0;
-static void __init set_enabled(struct lsm_info *lsm, bool enabled)
-{
- /*
- * When an LSM hasn't configured an enable variable, we can use
- * a hard-coded location for storing the default enabled state.
- */
- if (!lsm->enabled) {
- if (enabled)
- lsm->enabled = &lsm_enabled_true;
- else
- lsm->enabled = &lsm_enabled_false;
- } else if (lsm->enabled == &lsm_enabled_true) {
- if (!enabled)
- lsm->enabled = &lsm_enabled_false;
- } else if (lsm->enabled == &lsm_enabled_false) {
- if (enabled)
- lsm->enabled = &lsm_enabled_true;
- } else {
- *lsm->enabled = enabled;
- }
-}
-
-/* Is an LSM already listed in the ordered LSMs list? */
-static bool __init exists_ordered_lsm(struct lsm_info *lsm)
-{
- struct lsm_info **check;
-
- for (check = ordered_lsms; *check; check++)
- if (*check == lsm)
- return true;
-
- return false;
-}
-
-/* Append an LSM to the list of ordered LSMs to initialize. */
-static int last_lsm __initdata;
-static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from)
-{
- /* Ignore duplicate selections. */
- if (exists_ordered_lsm(lsm))
- return;
-
- if (WARN(last_lsm == LSM_COUNT, "%s: out of LSM slots!?\n", from))
- return;
-
- /* Enable this LSM, if it is not already set. */
- if (!lsm->enabled)
- lsm->enabled = &lsm_enabled_true;
- ordered_lsms[last_lsm++] = lsm;
-
- init_debug("%s ordered: %s (%s)\n", from, lsm->name,
- is_enabled(lsm) ? "enabled" : "disabled");
-}
-
-/* Is an LSM allowed to be initialized? */
-static bool __init lsm_allowed(struct lsm_info *lsm)
-{
- /* Skip if the LSM is disabled. */
- if (!is_enabled(lsm))
- return false;
-
- /* Not allowed if another exclusive LSM already initialized. */
- if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && exclusive) {
- init_debug("exclusive disabled: %s\n", lsm->name);
- return false;
- }
-
- return true;
-}
-
-static void __init lsm_set_blob_size(int *need, int *lbs)
-{
- int offset;
-
- if (*need <= 0)
- return;
-
- offset = ALIGN(*lbs, sizeof(void *));
- *lbs = offset + *need;
- *need = offset;
-}
-
-static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed)
-{
- if (!needed)
- return;
-
- lsm_set_blob_size(&needed->lbs_cred, &blob_sizes.lbs_cred);
- lsm_set_blob_size(&needed->lbs_file, &blob_sizes.lbs_file);
- /*
- * The inode blob gets an rcu_head in addition to
- * what the modules might need.
- */
- if (needed->lbs_inode && blob_sizes.lbs_inode == 0)
- blob_sizes.lbs_inode = sizeof(struct rcu_head);
- lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode);
- lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc);
- lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg);
- lsm_set_blob_size(&needed->lbs_superblock, &blob_sizes.lbs_superblock);
- lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task);
- lsm_set_blob_size(&needed->lbs_xattr_count,
- &blob_sizes.lbs_xattr_count);
-}
-
-/* Prepare LSM for initialization. */
-static void __init prepare_lsm(struct lsm_info *lsm)
-{
- int enabled = lsm_allowed(lsm);
-
- /* Record enablement (to handle any following exclusive LSMs). */
- set_enabled(lsm, enabled);
-
- /* If enabled, do pre-initialization work. */
- if (enabled) {
- if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && !exclusive) {
- exclusive = lsm;
- init_debug("exclusive chosen: %s\n", lsm->name);
- }
-
- lsm_set_blob_sizes(lsm->blobs);
- }
-}
-
-/* Initialize a given LSM, if it is enabled. */
-static void __init initialize_lsm(struct lsm_info *lsm)
-{
- if (is_enabled(lsm)) {
- int ret;
-
- init_debug("initializing %s\n", lsm->name);
- ret = lsm->init();
- WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret);
- }
-}
+#define SECURITY_HOOK_ACTIVE_KEY(HOOK, IDX) security_hook_active_##HOOK##_##IDX
/*
- * Current index to use while initializing the lsm id list.
+ * Identifier for the LSM static calls.
+ * HOOK is an LSM hook as defined in linux/lsm_hookdefs.h
+ * IDX is the index of the static call. 0 <= NUM < MAX_LSM_COUNT
*/
-u32 lsm_active_cnt __ro_after_init;
-const struct lsm_id *lsm_idlist[LSM_CONFIG_COUNT];
-
-/* Populate ordered LSMs list from comma-separated LSM name list. */
-static void __init ordered_lsm_parse(const char *order, const char *origin)
-{
- struct lsm_info *lsm;
- char *sep, *name, *next;
-
- /* LSM_ORDER_FIRST is always first. */
- for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
- if (lsm->order == LSM_ORDER_FIRST)
- append_ordered_lsm(lsm, " first");
- }
-
- /* Process "security=", if given. */
- if (chosen_major_lsm) {
- struct lsm_info *major;
+#define LSM_STATIC_CALL(HOOK, IDX) lsm_static_call_##HOOK##_##IDX
- /*
- * To match the original "security=" behavior, this
- * explicitly does NOT fallback to another Legacy Major
- * if the selected one was separately disabled: disable
- * all non-matching Legacy Major LSMs.
- */
- for (major = __start_lsm_info; major < __end_lsm_info;
- major++) {
- if ((major->flags & LSM_FLAG_LEGACY_MAJOR) &&
- strcmp(major->name, chosen_major_lsm) != 0) {
- set_enabled(major, false);
- init_debug("security=%s disabled: %s (only one legacy major LSM)\n",
- chosen_major_lsm, major->name);
- }
- }
- }
-
- sep = kstrdup(order, GFP_KERNEL);
- next = sep;
- /* Walk the list, looking for matching LSMs. */
- while ((name = strsep(&next, ",")) != NULL) {
- bool found = false;
-
- for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
- if (strcmp(lsm->name, name) == 0) {
- if (lsm->order == LSM_ORDER_MUTABLE)
- append_ordered_lsm(lsm, origin);
- found = true;
- }
- }
-
- if (!found)
- init_debug("%s ignored: %s (not built into kernel)\n",
- origin, name);
- }
-
- /* Process "security=", if given. */
- if (chosen_major_lsm) {
- for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
- if (exists_ordered_lsm(lsm))
- continue;
- if (strcmp(lsm->name, chosen_major_lsm) == 0)
- append_ordered_lsm(lsm, "security=");
- }
- }
-
- /* LSM_ORDER_LAST is always last. */
- for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
- if (lsm->order == LSM_ORDER_LAST)
- append_ordered_lsm(lsm, " last");
- }
-
- /* Disable all LSMs not in the ordered list. */
- for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
- if (exists_ordered_lsm(lsm))
- continue;
- set_enabled(lsm, false);
- init_debug("%s skipped: %s (not in requested order)\n",
- origin, lsm->name);
- }
-
- kfree(sep);
-}
-
-static void __init lsm_early_cred(struct cred *cred);
-static void __init lsm_early_task(struct task_struct *task);
-
-static int lsm_append(const char *new, char **result);
-
-static void __init report_lsm_order(void)
-{
- struct lsm_info **lsm, *early;
- int first = 0;
-
- pr_info("initializing lsm=");
-
- /* Report each enabled LSM name, comma separated. */
- for (early = __start_early_lsm_info;
- early < __end_early_lsm_info; early++)
- if (is_enabled(early))
- pr_cont("%s%s", first++ == 0 ? "" : ",", early->name);
- for (lsm = ordered_lsms; *lsm; lsm++)
- if (is_enabled(*lsm))
- pr_cont("%s%s", first++ == 0 ? "" : ",", (*lsm)->name);
-
- pr_cont("\n");
-}
-
-static void __init ordered_lsm_init(void)
-{
- struct lsm_info **lsm;
-
- ordered_lsms = kcalloc(LSM_COUNT + 1, sizeof(*ordered_lsms),
- GFP_KERNEL);
-
- if (chosen_lsm_order) {
- if (chosen_major_lsm) {
- pr_warn("security=%s is ignored because it is superseded by lsm=%s\n",
- chosen_major_lsm, chosen_lsm_order);
- chosen_major_lsm = NULL;
- }
- ordered_lsm_parse(chosen_lsm_order, "cmdline");
- } else
- ordered_lsm_parse(builtin_lsm_order, "builtin");
-
- for (lsm = ordered_lsms; *lsm; lsm++)
- prepare_lsm(*lsm);
-
- report_lsm_order();
-
- init_debug("cred blob size = %d\n", blob_sizes.lbs_cred);
- init_debug("file blob size = %d\n", blob_sizes.lbs_file);
- init_debug("inode blob size = %d\n", blob_sizes.lbs_inode);
- init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc);
- init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg);
- init_debug("superblock blob size = %d\n", blob_sizes.lbs_superblock);
- init_debug("task blob size = %d\n", blob_sizes.lbs_task);
- init_debug("xattr slots = %d\n", blob_sizes.lbs_xattr_count);
-
- /*
- * Create any kmem_caches needed for blobs
- */
- if (blob_sizes.lbs_file)
- lsm_file_cache = kmem_cache_create("lsm_file_cache",
- blob_sizes.lbs_file, 0,
- SLAB_PANIC, NULL);
- if (blob_sizes.lbs_inode)
- lsm_inode_cache = kmem_cache_create("lsm_inode_cache",
- blob_sizes.lbs_inode, 0,
- SLAB_PANIC, NULL);
+/*
+ * Call the macro M for each LSM hook MAX_LSM_COUNT times.
+ */
+#define LSM_LOOP_UNROLL(M, ...) \
+do { \
+ UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) \
+} while (0)
- lsm_early_cred((struct cred *) current->cred);
- lsm_early_task(current);
- for (lsm = ordered_lsms; *lsm; lsm++)
- initialize_lsm(*lsm);
+#define LSM_DEFINE_UNROLL(M, ...) UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__)
- kfree(ordered_lsms);
-}
+#ifdef CONFIG_HAVE_STATIC_CALL
+#define LSM_HOOK_TRAMP(NAME, NUM) \
+ &STATIC_CALL_TRAMP(LSM_STATIC_CALL(NAME, NUM))
+#else
+#define LSM_HOOK_TRAMP(NAME, NUM) NULL
+#endif
-int __init early_security_init(void)
-{
- struct lsm_info *lsm;
+/*
+ * Define static calls and static keys for each LSM hook.
+ */
+#define DEFINE_LSM_STATIC_CALL(NUM, NAME, RET, ...) \
+ DEFINE_STATIC_CALL_NULL(LSM_STATIC_CALL(NAME, NUM), \
+ *((RET(*)(__VA_ARGS__))NULL)); \
+ DEFINE_STATIC_KEY_FALSE(SECURITY_HOOK_ACTIVE_KEY(NAME, NUM));
-#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
- INIT_HLIST_HEAD(&security_hook_heads.NAME);
-#include "linux/lsm_hook_defs.h"
+#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ LSM_DEFINE_UNROLL(DEFINE_LSM_STATIC_CALL, NAME, RET, __VA_ARGS__)
+#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
+#undef DEFINE_LSM_STATIC_CALL
- for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
- if (!lsm->enabled)
- lsm->enabled = &lsm_enabled_true;
- prepare_lsm(lsm);
- initialize_lsm(lsm);
- }
-
- return 0;
-}
+/*
+ * Initialise a table of static calls for each LSM hook.
+ * DEFINE_STATIC_CALL_NULL invocation above generates a key (STATIC_CALL_KEY)
+ * and a trampoline (STATIC_CALL_TRAMP) which are used to call
+ * __static_call_update when updating the static call.
+ *
+ * The static calls table is used by early LSMs, some architectures can fault on
+ * unaligned accesses and the fault handling code may not be ready by then.
+ * Thus, the static calls table should be aligned to avoid any unhandled faults
+ * in early init.
+ */
+struct lsm_static_calls_table
+ static_calls_table __ro_after_init __aligned(sizeof(u64)) = {
+#define INIT_LSM_STATIC_CALL(NUM, NAME) \
+ (struct lsm_static_call) { \
+ .key = &STATIC_CALL_KEY(LSM_STATIC_CALL(NAME, NUM)), \
+ .trampoline = LSM_HOOK_TRAMP(NAME, NUM), \
+ .active = &SECURITY_HOOK_ACTIVE_KEY(NAME, NUM), \
+ },
+#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ .NAME = { \
+ LSM_DEFINE_UNROLL(INIT_LSM_STATIC_CALL, NAME) \
+ },
+#include <linux/lsm_hook_defs.h>
+#undef LSM_HOOK
+#undef INIT_LSM_STATIC_CALL
+ };
/**
- * security_init - initializes the security framework
+ * lsm_file_alloc - allocate a composite file blob
+ * @file: the file that needs a blob
*
- * This should be called early in the kernel initialization sequence.
- */
-int __init security_init(void)
-{
- struct lsm_info *lsm;
-
- init_debug("legacy security=%s\n", chosen_major_lsm ? : " *unspecified*");
- init_debug(" CONFIG_LSM=%s\n", builtin_lsm_order);
- init_debug("boot arg lsm=%s\n", chosen_lsm_order ? : " *unspecified*");
-
- /*
- * Append the names of the early LSM modules now that kmalloc() is
- * available
- */
- for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
- init_debug(" early started: %s (%s)\n", lsm->name,
- is_enabled(lsm) ? "enabled" : "disabled");
- if (lsm->enabled)
- lsm_append(lsm->name, &lsm_names);
- }
-
- /* Load LSMs in specified order. */
- ordered_lsm_init();
-
- return 0;
-}
-
-/* Save user chosen LSM */
-static int __init choose_major_lsm(char *str)
-{
- chosen_major_lsm = str;
- return 1;
-}
-__setup("security=", choose_major_lsm);
-
-/* Explicitly choose LSM initialization order. */
-static int __init choose_lsm_order(char *str)
-{
- chosen_lsm_order = str;
- return 1;
-}
-__setup("lsm=", choose_lsm_order);
-
-/* Enable LSM order debugging. */
-static int __init enable_debug(char *str)
-{
- debug = true;
- return 1;
-}
-__setup("lsm.debug", enable_debug);
-
-static bool match_last_lsm(const char *list, const char *lsm)
-{
- const char *last;
-
- if (WARN_ON(!list || !lsm))
- return false;
- last = strrchr(list, ',');
- if (last)
- /* Pass the comma, strcmp() will check for '\0' */
- last++;
- else
- last = list;
- return !strcmp(last, lsm);
-}
-
-static int lsm_append(const char *new, char **result)
-{
- char *cp;
-
- if (*result == NULL) {
- *result = kstrdup(new, GFP_KERNEL);
- if (*result == NULL)
- return -ENOMEM;
- } else {
- /* Check if it is the last registered name */
- if (match_last_lsm(*result, new))
- return 0;
- cp = kasprintf(GFP_KERNEL, "%s,%s", *result, new);
- if (cp == NULL)
- return -ENOMEM;
- kfree(*result);
- *result = cp;
- }
- return 0;
-}
-
-/**
- * security_add_hooks - Add a modules hooks to the hook lists.
- * @hooks: the hooks to add
- * @count: the number of hooks to add
- * @lsmid: the identification information for the security module
+ * Allocate the file blob for all the modules
*
- * Each LSM has to register its hooks with the infrastructure.
+ * Returns 0, or -ENOMEM if memory can't be allocated.
*/
-void __init security_add_hooks(struct security_hook_list *hooks, int count,
- const struct lsm_id *lsmid)
+static int lsm_file_alloc(struct file *file)
{
- int i;
-
- /*
- * A security module may call security_add_hooks() more
- * than once during initialization, and LSM initialization
- * is serialized. Landlock is one such case.
- * Look at the previous entry, if there is one, for duplication.
- */
- if (lsm_active_cnt == 0 || lsm_idlist[lsm_active_cnt - 1] != lsmid) {
- if (lsm_active_cnt >= LSM_CONFIG_COUNT)
- panic("%s Too many LSMs registered.\n", __func__);
- lsm_idlist[lsm_active_cnt++] = lsmid;
- }
-
- for (i = 0; i < count; i++) {
- hooks[i].lsmid = lsmid;
- hlist_add_tail_rcu(&hooks[i].list, hooks[i].head);
- }
-
- /*
- * Don't try to append during early_security_init(), we'll come back
- * and fix this up afterwards.
- */
- if (slab_is_available()) {
- if (lsm_append(lsmid->name, &lsm_names) < 0)
- panic("%s - Cannot get early memory.\n", __func__);
+ if (!lsm_file_cache) {
+ file->f_security = NULL;
+ return 0;
}
-}
-
-int call_blocking_lsm_notifier(enum lsm_event event, void *data)
-{
- return blocking_notifier_call_chain(&blocking_lsm_notifier_chain,
- event, data);
-}
-EXPORT_SYMBOL(call_blocking_lsm_notifier);
-
-int register_blocking_lsm_notifier(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&blocking_lsm_notifier_chain,
- nb);
-}
-EXPORT_SYMBOL(register_blocking_lsm_notifier);
-int unregister_blocking_lsm_notifier(struct notifier_block *nb)
-{
- return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain,
- nb);
+ file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL);
+ if (file->f_security == NULL)
+ return -ENOMEM;
+ return 0;
}
-EXPORT_SYMBOL(unregister_blocking_lsm_notifier);
/**
- * lsm_cred_alloc - allocate a composite cred blob
- * @cred: the cred that needs a blob
+ * lsm_blob_alloc - allocate a composite blob
+ * @dest: the destination for the blob
+ * @size: the size of the blob
* @gfp: allocation type
*
- * Allocate the cred blob for all the modules
+ * Allocate a blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
-static int lsm_cred_alloc(struct cred *cred, gfp_t gfp)
+static int lsm_blob_alloc(void **dest, size_t size, gfp_t gfp)
{
- if (blob_sizes.lbs_cred == 0) {
- cred->security = NULL;
+ if (size == 0) {
+ *dest = NULL;
return 0;
}
- cred->security = kzalloc(blob_sizes.lbs_cred, gfp);
- if (cred->security == NULL)
+ *dest = kzalloc(size, gfp);
+ if (*dest == NULL)
return -ENOMEM;
return 0;
}
/**
- * lsm_early_cred - during initialization allocate a composite cred blob
+ * lsm_cred_alloc - allocate a composite cred blob
* @cred: the cred that needs a blob
+ * @gfp: allocation type
*
* Allocate the cred blob for all the modules
- */
-static void __init lsm_early_cred(struct cred *cred)
-{
- int rc = lsm_cred_alloc(cred, GFP_KERNEL);
-
- if (rc)
- panic("%s: Early cred alloc failed.\n", __func__);
-}
-
-/**
- * lsm_file_alloc - allocate a composite file blob
- * @file: the file that needs a blob
- *
- * Allocate the file blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
-static int lsm_file_alloc(struct file *file)
+int lsm_cred_alloc(struct cred *cred, gfp_t gfp)
{
- if (!lsm_file_cache) {
- file->f_security = NULL;
- return 0;
- }
-
- file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL);
- if (file->f_security == NULL)
- return -ENOMEM;
- return 0;
+ return lsm_blob_alloc(&cred->security, blob_sizes.lbs_cred, gfp);
}
/**
* lsm_inode_alloc - allocate a composite inode blob
* @inode: the inode that needs a blob
+ * @gfp: allocation flags
*
* Allocate the inode blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
-int lsm_inode_alloc(struct inode *inode)
+static int lsm_inode_alloc(struct inode *inode, gfp_t gfp)
{
if (!lsm_inode_cache) {
inode->i_security = NULL;
return 0;
}
- inode->i_security = kmem_cache_zalloc(lsm_inode_cache, GFP_NOFS);
+ inode->i_security = kmem_cache_zalloc(lsm_inode_cache, gfp);
if (inode->i_security == NULL)
return -ENOMEM;
return 0;
@@ -681,17 +239,9 @@ int lsm_inode_alloc(struct inode *inode)
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
-static int lsm_task_alloc(struct task_struct *task)
+int lsm_task_alloc(struct task_struct *task)
{
- if (blob_sizes.lbs_task == 0) {
- task->security = NULL;
- return 0;
- }
-
- task->security = kzalloc(blob_sizes.lbs_task, GFP_KERNEL);
- if (task->security == NULL)
- return -ENOMEM;
- return 0;
+ return lsm_blob_alloc(&task->security, blob_sizes.lbs_task, GFP_KERNEL);
}
/**
@@ -704,16 +254,23 @@ static int lsm_task_alloc(struct task_struct *task)
*/
static int lsm_ipc_alloc(struct kern_ipc_perm *kip)
{
- if (blob_sizes.lbs_ipc == 0) {
- kip->security = NULL;
- return 0;
- }
+ return lsm_blob_alloc(&kip->security, blob_sizes.lbs_ipc, GFP_KERNEL);
+}
- kip->security = kzalloc(blob_sizes.lbs_ipc, GFP_KERNEL);
- if (kip->security == NULL)
- return -ENOMEM;
- return 0;
+#ifdef CONFIG_KEYS
+/**
+ * lsm_key_alloc - allocate a composite key blob
+ * @key: the key that needs a blob
+ *
+ * Allocate the key blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_key_alloc(struct key *key)
+{
+ return lsm_blob_alloc(&key->security, blob_sizes.lbs_key, GFP_KERNEL);
}
+#endif /* CONFIG_KEYS */
/**
* lsm_msg_msg_alloc - allocate a composite msg_msg blob
@@ -725,30 +282,64 @@ static int lsm_ipc_alloc(struct kern_ipc_perm *kip)
*/
static int lsm_msg_msg_alloc(struct msg_msg *mp)
{
- if (blob_sizes.lbs_msg_msg == 0) {
- mp->security = NULL;
- return 0;
- }
+ return lsm_blob_alloc(&mp->security, blob_sizes.lbs_msg_msg,
+ GFP_KERNEL);
+}
- mp->security = kzalloc(blob_sizes.lbs_msg_msg, GFP_KERNEL);
- if (mp->security == NULL)
- return -ENOMEM;
- return 0;
+/**
+ * lsm_bdev_alloc - allocate a composite block_device blob
+ * @bdev: the block_device that needs a blob
+ *
+ * Allocate the block_device blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_bdev_alloc(struct block_device *bdev)
+{
+ return lsm_blob_alloc(&bdev->bd_security, blob_sizes.lbs_bdev,
+ GFP_KERNEL);
}
+#ifdef CONFIG_BPF_SYSCALL
/**
- * lsm_early_task - during initialization allocate a composite task blob
- * @task: the task that needs a blob
+ * lsm_bpf_map_alloc - allocate a composite bpf_map blob
+ * @map: the bpf_map that needs a blob
*
- * Allocate the task blob for all the modules
+ * Allocate the bpf_map blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
*/
-static void __init lsm_early_task(struct task_struct *task)
+static int lsm_bpf_map_alloc(struct bpf_map *map)
{
- int rc = lsm_task_alloc(task);
+ return lsm_blob_alloc(&map->security, blob_sizes.lbs_bpf_map, GFP_KERNEL);
+}
- if (rc)
- panic("%s: Early task alloc failed.\n", __func__);
+/**
+ * lsm_bpf_prog_alloc - allocate a composite bpf_prog blob
+ * @prog: the bpf_prog that needs a blob
+ *
+ * Allocate the bpf_prog blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_bpf_prog_alloc(struct bpf_prog *prog)
+{
+ return lsm_blob_alloc(&prog->aux->security, blob_sizes.lbs_bpf_prog, GFP_KERNEL);
+}
+
+/**
+ * lsm_bpf_token_alloc - allocate a composite bpf_token blob
+ * @token: the bpf_token that needs a blob
+ *
+ * Allocate the bpf_token blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_bpf_token_alloc(struct bpf_token *token)
+{
+ return lsm_blob_alloc(&token->security, blob_sizes.lbs_bpf_token, GFP_KERNEL);
}
+#endif /* CONFIG_BPF_SYSCALL */
/**
* lsm_superblock_alloc - allocate a composite superblock blob
@@ -760,15 +351,8 @@ static void __init lsm_early_task(struct task_struct *task)
*/
static int lsm_superblock_alloc(struct super_block *sb)
{
- if (blob_sizes.lbs_superblock == 0) {
- sb->s_security = NULL;
- return 0;
- }
-
- sb->s_security = kzalloc(blob_sizes.lbs_superblock, GFP_KERNEL);
- if (sb->s_security == NULL)
- return -ENOMEM;
- return 0;
+ return lsm_blob_alloc(&sb->s_security, blob_sizes.lbs_superblock,
+ GFP_KERNEL);
}
/**
@@ -853,29 +437,43 @@ out:
* call_int_hook:
* This is a hook that returns a value.
*/
+#define __CALL_STATIC_VOID(NUM, HOOK, ...) \
+do { \
+ if (static_branch_unlikely(&SECURITY_HOOK_ACTIVE_KEY(HOOK, NUM))) { \
+ static_call(LSM_STATIC_CALL(HOOK, NUM))(__VA_ARGS__); \
+ } \
+} while (0);
-#define call_void_hook(FUNC, ...) \
- do { \
- struct security_hook_list *P; \
- \
- hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
- P->hook.FUNC(__VA_ARGS__); \
+#define call_void_hook(HOOK, ...) \
+ do { \
+ LSM_LOOP_UNROLL(__CALL_STATIC_VOID, HOOK, __VA_ARGS__); \
} while (0)
-#define call_int_hook(FUNC, ...) ({ \
- int RC = LSM_RET_DEFAULT(FUNC); \
- do { \
- struct security_hook_list *P; \
- \
- hlist_for_each_entry(P, &security_hook_heads.FUNC, list) { \
- RC = P->hook.FUNC(__VA_ARGS__); \
- if (RC != LSM_RET_DEFAULT(FUNC)) \
- break; \
- } \
- } while (0); \
- RC; \
+
+#define __CALL_STATIC_INT(NUM, R, HOOK, LABEL, ...) \
+do { \
+ if (static_branch_unlikely(&SECURITY_HOOK_ACTIVE_KEY(HOOK, NUM))) { \
+ R = static_call(LSM_STATIC_CALL(HOOK, NUM))(__VA_ARGS__); \
+ if (R != LSM_RET_DEFAULT(HOOK)) \
+ goto LABEL; \
+ } \
+} while (0);
+
+#define call_int_hook(HOOK, ...) \
+({ \
+ __label__ OUT; \
+ int RC = LSM_RET_DEFAULT(HOOK); \
+ \
+ LSM_LOOP_UNROLL(__CALL_STATIC_INT, RC, HOOK, OUT, __VA_ARGS__); \
+OUT: \
+ RC; \
})
+#define lsm_for_each_hook(scall, NAME) \
+ for (scall = static_calls_table.NAME; \
+ scall - static_calls_table.NAME < MAX_LSM_COUNT; scall++) \
+ if (static_key_enabled(&scall->active->key))
+
/* Security operations */
/**
@@ -1110,20 +708,19 @@ int security_settime64(const struct timespec64 *ts, const struct timezone *tz)
*/
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
- struct security_hook_list *hp;
+ struct lsm_static_call *scall;
int cap_sys_admin = 1;
int rc;
/*
- * The module will respond with a positive value if
- * it thinks the __vm_enough_memory() call should be
- * made with the cap_sys_admin set. If all of the modules
- * agree that it should be set it will. If any module
- * thinks it should not be set it won't.
+ * The module will respond with 0 if it thinks the __vm_enough_memory()
+ * call should be made with the cap_sys_admin set. If all of the modules
+ * agree that it should be set it will. If any module thinks it should
+ * not be set it won't.
*/
- hlist_for_each_entry(hp, &security_hook_heads.vm_enough_memory, list) {
- rc = hp->hook.vm_enough_memory(mm, pages);
- if (rc <= 0) {
+ lsm_for_each_hook(scall, vm_enough_memory) {
+ rc = scall->hl->hook.vm_enough_memory(mm, pages);
+ if (rc < 0) {
cap_sys_admin = 0;
break;
}
@@ -1143,6 +740,12 @@ int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
* to 1 if AT_SECURE should be set to request libc enable secure mode. @bprm
* contains the linux_binprm structure.
*
+ * If execveat(2) is called with the AT_EXECVE_CHECK flag, bprm->is_check is
+ * set. The result must be the same as without this flag even if the execution
+ * will never really happen and @bprm will always be dropped.
+ *
+ * This hook must not change current->cred, only @bprm->cred.
+ *
* Return: Returns 0 if the hook is successful and permission is granted.
*/
int security_bprm_creds_for_exec(struct linux_binprm *bprm)
@@ -1269,13 +872,12 @@ int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
int security_fs_context_parse_param(struct fs_context *fc,
struct fs_parameter *param)
{
- struct security_hook_list *hp;
+ struct lsm_static_call *scall;
int trc;
int rc = -ENOPARAM;
- hlist_for_each_entry(hp, &security_hook_heads.fs_context_parse_param,
- list) {
- trc = hp->hook.fs_context_parse_param(fc, param);
+ lsm_for_each_hook(scall, fs_context_parse_param) {
+ trc = scall->hl->hook.fs_context_parse_param(fc, param);
if (trc == 0)
rc = 0;
else if (trc != -ENOPARAM)
@@ -1505,12 +1107,11 @@ int security_sb_set_mnt_opts(struct super_block *sb,
unsigned long kern_flags,
unsigned long *set_kern_flags)
{
- struct security_hook_list *hp;
+ struct lsm_static_call *scall;
int rc = mnt_opts ? -EOPNOTSUPP : LSM_RET_DEFAULT(sb_set_mnt_opts);
- hlist_for_each_entry(hp, &security_hook_heads.sb_set_mnt_opts,
- list) {
- rc = hp->hook.sb_set_mnt_opts(sb, mnt_opts, kern_flags,
+ lsm_for_each_hook(scall, sb_set_mnt_opts) {
+ rc = scall->hl->hook.sb_set_mnt_opts(sb, mnt_opts, kern_flags,
set_kern_flags);
if (rc != LSM_RET_DEFAULT(sb_set_mnt_opts))
break;
@@ -1575,6 +1176,7 @@ int security_path_notify(const struct path *path, u64 mask,
/**
* security_inode_alloc() - Allocate an inode LSM blob
* @inode: the inode
+ * @gfp: allocation flags
*
* Allocate and attach a security structure to @inode->i_security. The
* i_security field is initialized to NULL when the inode structure is
@@ -1582,9 +1184,9 @@ int security_path_notify(const struct path *path, u64 mask,
*
* Return: Return 0 if operation was successful.
*/
-int security_inode_alloc(struct inode *inode)
+int security_inode_alloc(struct inode *inode, gfp_t gfp)
{
- int rc = lsm_inode_alloc(inode);
+ int rc = lsm_inode_alloc(inode, gfp);
if (unlikely(rc))
return rc;
@@ -1596,9 +1198,8 @@ int security_inode_alloc(struct inode *inode)
static void inode_free_by_rcu(struct rcu_head *head)
{
- /*
- * The rcu head is at the start of the inode blob
- */
+ /* The rcu head is at the start of the inode blob */
+ call_void_hook(inode_free_security_rcu, head);
kmem_cache_free(lsm_inode_cache, head);
}
@@ -1606,23 +1207,24 @@ static void inode_free_by_rcu(struct rcu_head *head)
* security_inode_free() - Free an inode's LSM blob
* @inode: the inode
*
- * Deallocate the inode security structure and set @inode->i_security to NULL.
+ * Release any LSM resources associated with @inode, although due to the
+ * inode's RCU protections it is possible that the resources will not be
+ * fully released until after the current RCU grace period has elapsed.
+ *
+ * It is important for LSMs to note that despite being present in a call to
+ * security_inode_free(), @inode may still be referenced in a VFS path walk
+ * and calls to security_inode_permission() may be made during, or after,
+ * a call to security_inode_free(). For this reason the inode->i_security
+ * field is released via a call_rcu() callback and any LSMs which need to
+ * retain inode state for use in security_inode_permission() should only
+ * release that state in the inode_free_security_rcu() LSM hook callback.
*/
void security_inode_free(struct inode *inode)
{
call_void_hook(inode_free_security, inode);
- /*
- * The inode may still be referenced in a path walk and
- * a call to security_inode_permission() can be made
- * after inode_free_security() is called. Ideally, the VFS
- * wouldn't do this, but fixing that is a much harder
- * job. For now, simply free the i_security via RCU, and
- * leave the current inode->i_security pointer intact.
- * The inode will be freed after the RCU grace period too.
- */
- if (inode->i_security)
- call_rcu((struct rcu_head *)inode->i_security,
- inode_free_by_rcu);
+ if (!inode->i_security)
+ return;
+ call_rcu((struct rcu_head *)inode->i_security, inode_free_by_rcu);
}
/**
@@ -1631,8 +1233,7 @@ void security_inode_free(struct inode *inode)
* @mode: mode used to determine resource type
* @name: name of the last path component
* @xattr_name: name of the security/LSM xattr
- * @ctx: pointer to the resulting LSM context
- * @ctxlen: length of @ctx
+ * @lsmctx: pointer to the resulting LSM context
*
* Compute a context for a dentry as the inode is not yet available since NFSv4
* has no label backed by an EA anyway. It is important to note that
@@ -1642,11 +1243,11 @@ void security_inode_free(struct inode *inode)
*/
int security_dentry_init_security(struct dentry *dentry, int mode,
const struct qstr *name,
- const char **xattr_name, void **ctx,
- u32 *ctxlen)
+ const char **xattr_name,
+ struct lsm_context *lsmctx)
{
return call_int_hook(dentry_init_security, dentry, mode, name,
- xattr_name, ctx, ctxlen);
+ xattr_name, lsmctx);
}
EXPORT_SYMBOL(security_dentry_init_security);
@@ -1666,7 +1267,7 @@ EXPORT_SYMBOL(security_dentry_init_security);
* Return: Returns 0 on success, error on failure.
*/
int security_dentry_create_files_as(struct dentry *dentry, int mode,
- struct qstr *name,
+ const struct qstr *name,
const struct cred *old, struct cred *new)
{
return call_int_hook(dentry_create_files_as, dentry, mode,
@@ -1705,7 +1306,7 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
const initxattrs initxattrs, void *fs_data)
{
- struct security_hook_list *hp;
+ struct lsm_static_call *scall;
struct xattr *new_xattrs = NULL;
int ret = -EOPNOTSUPP, xattr_count = 0;
@@ -1723,9 +1324,8 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
return -ENOMEM;
}
- hlist_for_each_entry(hp, &security_hook_heads.inode_init_security,
- list) {
- ret = hp->hook.inode_init_security(inode, dir, qstr, new_xattrs,
+ lsm_for_each_hook(scall, inode_init_security) {
+ ret = scall->hl->hook.inode_init_security(inode, dir, qstr, new_xattrs,
&xattr_count);
if (ret && ret != -EOPNOTSUPP)
goto out;
@@ -2073,7 +1673,7 @@ int security_inode_symlink(struct inode *dir, struct dentry *dentry,
}
/**
- * security_inode_mkdir() - Check if creation a new director is allowed
+ * security_inode_mkdir() - Check if creating a new directory is allowed
* @dir: parent directory
* @dentry: new directory
* @mode: new directory mode
@@ -2278,7 +1878,20 @@ int security_inode_getattr(const struct path *path)
* @size: size of xattr value
* @flags: flags
*
- * Check permission before setting the extended attributes.
+ * This hook performs the desired permission checks before setting the extended
+ * attributes (xattrs) on @dentry. It is important to note that we have some
+ * additional logic before the main LSM implementation calls to detect if we
+ * need to perform an additional capability check at the LSM layer.
+ *
+ * Normally we enforce a capability check prior to executing the various LSM
+ * hook implementations, but if a LSM wants to avoid this capability check,
+ * it can register a 'inode_xattr_skipcap' hook and return a value of 1 for
+ * xattrs that it wants to avoid the capability check, leaving the LSM fully
+ * responsible for enforcing the access control for the specific xattr. If all
+ * of the enabled LSMs refrain from registering a 'inode_xattr_skipcap' hook,
+ * or return a 0 (the default return value), the capability check is still
+ * performed. If no 'inode_xattr_skipcap' hooks are registered the capability
+ * check is performed.
*
* Return: Returns 0 if permission is granted.
*/
@@ -2286,20 +1899,20 @@ int security_inode_setxattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
- int ret;
+ int rc;
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
- /*
- * SELinux and Smack integrate the cap call,
- * so assume that all LSMs supplying this call do so.
- */
- ret = call_int_hook(inode_setxattr, idmap, dentry, name, value, size,
- flags);
- if (ret == 1)
- ret = cap_inode_setxattr(dentry, name, value, size, flags);
- return ret;
+ /* enforce the capability checks at the lsm layer, if needed */
+ if (!call_int_hook(inode_xattr_skipcap, name)) {
+ rc = cap_inode_setxattr(dentry, name, value, size, flags);
+ if (rc)
+ return rc;
+ }
+
+ return call_int_hook(inode_setxattr, idmap, dentry, name, value, size,
+ flags);
}
/**
@@ -2452,26 +2065,39 @@ int security_inode_listxattr(struct dentry *dentry)
* @dentry: file
* @name: xattr name
*
- * Check permission before removing the extended attribute identified by @name
- * for @dentry.
+ * This hook performs the desired permission checks before setting the extended
+ * attributes (xattrs) on @dentry. It is important to note that we have some
+ * additional logic before the main LSM implementation calls to detect if we
+ * need to perform an additional capability check at the LSM layer.
+ *
+ * Normally we enforce a capability check prior to executing the various LSM
+ * hook implementations, but if a LSM wants to avoid this capability check,
+ * it can register a 'inode_xattr_skipcap' hook and return a value of 1 for
+ * xattrs that it wants to avoid the capability check, leaving the LSM fully
+ * responsible for enforcing the access control for the specific xattr. If all
+ * of the enabled LSMs refrain from registering a 'inode_xattr_skipcap' hook,
+ * or return a 0 (the default return value), the capability check is still
+ * performed. If no 'inode_xattr_skipcap' hooks are registered the capability
+ * check is performed.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name)
{
- int ret;
+ int rc;
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
- /*
- * SELinux and Smack integrate the cap call,
- * so assume that all LSMs supplying this call do so.
- */
- ret = call_int_hook(inode_removexattr, idmap, dentry, name);
- if (ret == 1)
- ret = cap_inode_removexattr(idmap, dentry, name);
- return ret;
+
+ /* enforce the capability checks at the lsm layer, if needed */
+ if (!call_int_hook(inode_xattr_skipcap, name)) {
+ rc = cap_inode_removexattr(idmap, dentry, name);
+ if (rc)
+ return rc;
+ }
+
+ return call_int_hook(inode_removexattr, idmap, dentry, name);
}
/**
@@ -2489,6 +2115,36 @@ void security_inode_post_removexattr(struct dentry *dentry, const char *name)
}
/**
+ * security_inode_file_setattr() - check if setting fsxattr is allowed
+ * @dentry: file to set filesystem extended attributes on
+ * @fa: extended attributes to set on the inode
+ *
+ * Called when file_setattr() syscall or FS_IOC_FSSETXATTR ioctl() is called on
+ * inode
+ *
+ * Return: Returns 0 if permission is granted.
+ */
+int security_inode_file_setattr(struct dentry *dentry, struct file_kattr *fa)
+{
+ return call_int_hook(inode_file_setattr, dentry, fa);
+}
+
+/**
+ * security_inode_file_getattr() - check if retrieving fsxattr is allowed
+ * @dentry: file to retrieve filesystem extended attributes from
+ * @fa: extended attributes to get
+ *
+ * Called when file_getattr() syscall or FS_IOC_FSGETXATTR ioctl() is called on
+ * inode
+ *
+ * Return: Returns 0 if permission is granted.
+ */
+int security_inode_file_getattr(struct dentry *dentry, struct file_kattr *fa)
+{
+ return call_int_hook(inode_file_getattr, dentry, fa);
+}
+
+/**
* security_inode_need_killpriv() - Check if security_inode_killpriv() required
* @dentry: associated dentry
*
@@ -2596,16 +2252,15 @@ int security_inode_listsecurity(struct inode *inode,
EXPORT_SYMBOL(security_inode_listsecurity);
/**
- * security_inode_getsecid() - Get an inode's secid
+ * security_inode_getlsmprop() - Get an inode's LSM data
* @inode: inode
- * @secid: secid to return
+ * @prop: lsm specific information to return
*
- * Get the secid associated with the node. In case of failure, @secid will be
- * set to zero.
+ * Get the lsm specific information associated with the node.
*/
-void security_inode_getsecid(struct inode *inode, u32 *secid)
+void security_inode_getlsmprop(struct inode *inode, struct lsm_prop *prop)
{
- call_void_hook(inode_getsecid, inode, secid);
+ call_void_hook(inode_getlsmprop, inode, prop);
}
/**
@@ -2635,19 +2290,14 @@ EXPORT_SYMBOL(security_inode_copy_up);
* lower layer to the union/overlay layer. The caller is responsible for
* reading and writing the xattrs, this hook is merely a filter.
*
- * Return: Returns 0 to accept the xattr, 1 to discard the xattr, -EOPNOTSUPP
- * if the security module does not know about attribute, or a negative
- * error code to abort the copy up.
+ * Return: Returns 0 to accept the xattr, -ECANCELED to discard the xattr,
+ * -EOPNOTSUPP if the security module does not know about attribute,
+ * or a negative error code to abort the copy up.
*/
int security_inode_copy_up_xattr(struct dentry *src, const char *name)
{
int rc;
- /*
- * The implementation can return 0 (accept the xattr), 1 (discard the
- * xattr), -EOPNOTSUPP if it does not know anything about the xattr or
- * any other error code in case of an error.
- */
rc = call_int_hook(inode_copy_up_xattr, src, name);
if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr))
return rc;
@@ -2657,6 +2307,26 @@ int security_inode_copy_up_xattr(struct dentry *src, const char *name)
EXPORT_SYMBOL(security_inode_copy_up_xattr);
/**
+ * security_inode_setintegrity() - Set the inode's integrity data
+ * @inode: inode
+ * @type: type of integrity, e.g. hash digest, signature, etc
+ * @value: the integrity value
+ * @size: size of the integrity value
+ *
+ * Register a verified integrity measurement of a inode with LSMs.
+ * LSMs should free the previously saved data if @value is NULL.
+ *
+ * Return: Returns 0 on success, negative values on failure.
+ */
+int security_inode_setintegrity(const struct inode *inode,
+ enum lsm_integrity_type type, const void *value,
+ size_t size)
+{
+ return call_int_hook(inode_setintegrity, inode, type, value, size);
+}
+EXPORT_SYMBOL(security_inode_setintegrity);
+
+/**
* security_kernfs_init_security() - Init LSM context for a kernfs node
* @kn_dir: parent kernfs node
* @kn: the kernfs node to initialize
@@ -2905,6 +2575,8 @@ int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
* Save owner security information (typically from current->security) in
* file->f_security for later use by the send_sigiotask hook.
*
+ * This hook is called with file->f_owner.lock held.
+ *
* Return: Returns 0 on success.
*/
void security_file_set_fowner(struct file *file)
@@ -2953,17 +2625,15 @@ int security_file_receive(struct file *file)
* Save open-time permission checking state for later use upon file_permission,
* and recheck access if anything has changed since inode_permission.
*
+ * We can check if a file is opened for execution (e.g. execve(2) call), either
+ * directly or indirectly (e.g. ELF's ld.so) by checking file->f_flags &
+ * __FMODE_EXEC .
+ *
* Return: Returns 0 if permission is granted.
*/
int security_file_open(struct file *file)
{
- int ret;
-
- ret = call_int_hook(file_open, file);
- if (ret)
- return ret;
-
- return fsnotify_open_perm(file);
+ return call_int_hook(file_open, file);
}
/**
@@ -3007,7 +2677,7 @@ int security_file_truncate(struct file *file)
*
* Return: Returns a zero on success, negative values on failure.
*/
-int security_task_alloc(struct task_struct *task, unsigned long clone_flags)
+int security_task_alloc(struct task_struct *task, u64 clone_flags)
{
int rc = lsm_task_alloc(task);
@@ -3129,6 +2799,21 @@ void security_cred_getsecid(const struct cred *c, u32 *secid)
EXPORT_SYMBOL(security_cred_getsecid);
/**
+ * security_cred_getlsmprop() - Get the LSM data from a set of credentials
+ * @c: credentials
+ * @prop: destination for the LSM data
+ *
+ * Retrieve the security data of the cred structure @c. In case of
+ * failure, @prop will be cleared.
+ */
+void security_cred_getlsmprop(const struct cred *c, struct lsm_prop *prop)
+{
+ lsmprop_init(prop);
+ call_void_hook(cred_getlsmprop, c, prop);
+}
+EXPORT_SYMBOL(security_cred_getlsmprop);
+
+/**
* security_kernel_act_as() - Set the kernel credentials to act as secid
* @new: credentials
* @secid: secid
@@ -3347,33 +3032,33 @@ int security_task_getsid(struct task_struct *p)
}
/**
- * security_current_getsecid_subj() - Get the current task's subjective secid
- * @secid: secid value
+ * security_current_getlsmprop_subj() - Current task's subjective LSM data
+ * @prop: lsm specific information
*
* Retrieve the subjective security identifier of the current task and return
- * it in @secid. In case of failure, @secid will be set to zero.
+ * it in @prop.
*/
-void security_current_getsecid_subj(u32 *secid)
+void security_current_getlsmprop_subj(struct lsm_prop *prop)
{
- *secid = 0;
- call_void_hook(current_getsecid_subj, secid);
+ lsmprop_init(prop);
+ call_void_hook(current_getlsmprop_subj, prop);
}
-EXPORT_SYMBOL(security_current_getsecid_subj);
+EXPORT_SYMBOL(security_current_getlsmprop_subj);
/**
- * security_task_getsecid_obj() - Get a task's objective secid
+ * security_task_getlsmprop_obj() - Get a task's objective LSM data
* @p: target task
- * @secid: secid value
+ * @prop: lsm specific information
*
* Retrieve the objective security identifier of the task_struct in @p and
- * return it in @secid. In case of failure, @secid will be set to zero.
+ * return it in @prop.
*/
-void security_task_getsecid_obj(struct task_struct *p, u32 *secid)
+void security_task_getlsmprop_obj(struct task_struct *p, struct lsm_prop *prop)
{
- *secid = 0;
- call_void_hook(task_getsecid_obj, p, secid);
+ lsmprop_init(prop);
+ call_void_hook(task_getlsmprop_obj, p, prop);
}
-EXPORT_SYMBOL(security_task_getsecid_obj);
+EXPORT_SYMBOL(security_task_getlsmprop_obj);
/**
* security_task_setnice() - Check if setting a task's nice value is allowed
@@ -3531,10 +3216,10 @@ int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
{
int thisrc;
int rc = LSM_RET_DEFAULT(task_prctl);
- struct security_hook_list *hp;
+ struct lsm_static_call *scall;
- hlist_for_each_entry(hp, &security_hook_heads.task_prctl, list) {
- thisrc = hp->hook.task_prctl(option, arg2, arg3, arg4, arg5);
+ lsm_for_each_hook(scall, task_prctl) {
+ thisrc = scall->hl->hook.task_prctl(option, arg2, arg3, arg4, arg5);
if (thisrc != LSM_RET_DEFAULT(task_prctl)) {
rc = thisrc;
if (thisrc != 0)
@@ -3585,17 +3270,17 @@ int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag)
}
/**
- * security_ipc_getsecid() - Get the sysv ipc object's secid
+ * security_ipc_getlsmprop() - Get the sysv ipc object LSM data
* @ipcp: ipc permission structure
- * @secid: secid pointer
+ * @prop: pointer to lsm information
*
- * Get the secid associated with the ipc object. In case of failure, @secid
- * will be set to zero.
+ * Get the lsm information associated with the ipc object.
*/
-void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
+
+void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp, struct lsm_prop *prop)
{
- *secid = 0;
- call_void_hook(ipc_getsecid, ipcp, secid);
+ lsmprop_init(prop);
+ call_void_hook(ipc_getlsmprop, ipcp, prop);
}
/**
@@ -3940,7 +3625,7 @@ EXPORT_SYMBOL(security_d_instantiate);
int security_getselfattr(unsigned int attr, struct lsm_ctx __user *uctx,
u32 __user *size, u32 flags)
{
- struct security_hook_list *hp;
+ struct lsm_static_call *scall;
struct lsm_ctx lctx = { .id = LSM_ID_UNDEF, };
u8 __user *base = (u8 __user *)uctx;
u32 entrysize;
@@ -3978,17 +3663,15 @@ int security_getselfattr(unsigned int attr, struct lsm_ctx __user *uctx,
* In the usual case gather all the data from the LSMs.
* In the single case only get the data from the LSM specified.
*/
- hlist_for_each_entry(hp, &security_hook_heads.getselfattr, list) {
- if (single && lctx.id != hp->lsmid->id)
+ lsm_for_each_hook(scall, getselfattr) {
+ if (single && lctx.id != scall->hl->lsmid->id)
continue;
entrysize = left;
if (base)
uctx = (struct lsm_ctx __user *)(base + total);
- rc = hp->hook.getselfattr(attr, uctx, &entrysize, flags);
- if (rc == -EOPNOTSUPP) {
- rc = 0;
+ rc = scall->hl->hook.getselfattr(attr, uctx, &entrysize, flags);
+ if (rc == -EOPNOTSUPP)
continue;
- }
if (rc == -E2BIG) {
rc = 0;
left = 0;
@@ -4033,7 +3716,7 @@ int security_getselfattr(unsigned int attr, struct lsm_ctx __user *uctx,
int security_setselfattr(unsigned int attr, struct lsm_ctx __user *uctx,
u32 size, u32 flags)
{
- struct security_hook_list *hp;
+ struct lsm_static_call *scall;
struct lsm_ctx *lctx;
int rc = LSM_RET_DEFAULT(setselfattr);
u64 required_len;
@@ -4056,9 +3739,9 @@ int security_setselfattr(unsigned int attr, struct lsm_ctx __user *uctx,
goto free_out;
}
- hlist_for_each_entry(hp, &security_hook_heads.setselfattr, list)
- if ((hp->lsmid->id) == lctx->id) {
- rc = hp->hook.setselfattr(attr, lctx, size, flags);
+ lsm_for_each_hook(scall, setselfattr)
+ if ((scall->hl->lsmid->id) == lctx->id) {
+ rc = scall->hl->hook.setselfattr(attr, lctx, size, flags);
break;
}
@@ -4081,12 +3764,12 @@ free_out:
int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
char **value)
{
- struct security_hook_list *hp;
+ struct lsm_static_call *scall;
- hlist_for_each_entry(hp, &security_hook_heads.getprocattr, list) {
- if (lsmid != 0 && lsmid != hp->lsmid->id)
+ lsm_for_each_hook(scall, getprocattr) {
+ if (lsmid != 0 && lsmid != scall->hl->lsmid->id)
continue;
- return hp->hook.getprocattr(p, name, value);
+ return scall->hl->hook.getprocattr(p, name, value);
}
return LSM_RET_DEFAULT(getprocattr);
}
@@ -4105,35 +3788,17 @@ int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
*/
int security_setprocattr(int lsmid, const char *name, void *value, size_t size)
{
- struct security_hook_list *hp;
+ struct lsm_static_call *scall;
- hlist_for_each_entry(hp, &security_hook_heads.setprocattr, list) {
- if (lsmid != 0 && lsmid != hp->lsmid->id)
+ lsm_for_each_hook(scall, setprocattr) {
+ if (lsmid != 0 && lsmid != scall->hl->lsmid->id)
continue;
- return hp->hook.setprocattr(name, value, size);
+ return scall->hl->hook.setprocattr(name, value, size);
}
return LSM_RET_DEFAULT(setprocattr);
}
/**
- * security_netlink_send() - Save info and check if netlink sending is allowed
- * @sk: sending socket
- * @skb: netlink message
- *
- * Save security information for a netlink message so that permission checking
- * can be performed when the message is processed. The security information
- * can be saved using the eff_cap field of the netlink_skb_parms structure.
- * Also may be used to provide fine grained control over message transmission.
- *
- * Return: Returns 0 if the information was successfully saved and message is
- * allowed to be transmitted.
- */
-int security_netlink_send(struct sock *sk, struct sk_buff *skb)
-{
- return call_int_hook(netlink_send, sk, skb);
-}
-
-/**
* security_ismaclabel() - Check if the named attribute is a MAC label
* @name: full extended attribute name
*
@@ -4150,23 +3815,54 @@ EXPORT_SYMBOL(security_ismaclabel);
/**
* security_secid_to_secctx() - Convert a secid to a secctx
* @secid: secid
- * @secdata: secctx
- * @seclen: secctx length
+ * @cp: the LSM context
*
- * Convert secid to security context. If @secdata is NULL the length of the
- * result will be returned in @seclen, but no @secdata will be returned. This
+ * Convert secid to security context. If @cp is NULL the length of the
+ * result will be returned, but no data will be returned. This
* does mean that the length could change between calls to check the length and
- * the next call which actually allocates and returns the @secdata.
+ * the next call which actually allocates and returns the data.
*
- * Return: Return 0 on success, error on failure.
+ * Return: Return length of data on success, error on failure.
*/
-int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+int security_secid_to_secctx(u32 secid, struct lsm_context *cp)
{
- return call_int_hook(secid_to_secctx, secid, secdata, seclen);
+ return call_int_hook(secid_to_secctx, secid, cp);
}
EXPORT_SYMBOL(security_secid_to_secctx);
/**
+ * security_lsmprop_to_secctx() - Convert a lsm_prop to a secctx
+ * @prop: lsm specific information
+ * @cp: the LSM context
+ * @lsmid: which security module to report
+ *
+ * Convert a @prop entry to security context. If @cp is NULL the
+ * length of the result will be returned. This does mean that the
+ * length could change between calls to check the length and the
+ * next call which actually allocates and returns the @cp.
+ *
+ * @lsmid identifies which LSM should supply the context.
+ * A value of LSM_ID_UNDEF indicates that the first LSM suppling
+ * the hook should be used. This is used in cases where the
+ * ID of the supplying LSM is unambiguous.
+ *
+ * Return: Return length of data on success, error on failure.
+ */
+int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp,
+ int lsmid)
+{
+ struct lsm_static_call *scall;
+
+ lsm_for_each_hook(scall, lsmprop_to_secctx) {
+ if (lsmid != LSM_ID_UNDEF && lsmid != scall->hl->lsmid->id)
+ continue;
+ return scall->hl->hook.lsmprop_to_secctx(prop, cp);
+ }
+ return LSM_RET_DEFAULT(lsmprop_to_secctx);
+}
+EXPORT_SYMBOL(security_lsmprop_to_secctx);
+
+/**
* security_secctx_to_secid() - Convert a secctx to a secid
* @secdata: secctx
* @seclen: length of secctx
@@ -4185,14 +3881,14 @@ EXPORT_SYMBOL(security_secctx_to_secid);
/**
* security_release_secctx() - Free a secctx buffer
- * @secdata: secctx
- * @seclen: length of secctx
+ * @cp: the security context
*
* Release the security context.
*/
-void security_release_secctx(char *secdata, u32 seclen)
+void security_release_secctx(struct lsm_context *cp)
{
- call_void_hook(release_secctx, secdata, seclen);
+ call_void_hook(release_secctx, cp);
+ memset(cp, 0, sizeof(*cp));
}
EXPORT_SYMBOL(security_release_secctx);
@@ -4255,17 +3951,17 @@ EXPORT_SYMBOL(security_inode_setsecctx);
/**
* security_inode_getsecctx() - Get the security label of an inode
* @inode: inode
- * @ctx: secctx
- * @ctxlen: length of secctx
+ * @cp: security context
*
- * On success, returns 0 and fills out @ctx and @ctxlen with the security
- * context for the given @inode.
+ * On success, returns 0 and fills out @cp with the security context
+ * for the given @inode.
*
* Return: Returns 0 on success, error on failure.
*/
-int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+int security_inode_getsecctx(struct inode *inode, struct lsm_context *cp)
{
- return call_int_hook(inode_getsecctx, inode, ctx, ctxlen);
+ memset(cp, 0, sizeof(*cp));
+ return call_int_hook(inode_getsecctx, inode, cp);
}
EXPORT_SYMBOL(security_inode_getsecctx);
@@ -4306,6 +4002,24 @@ int security_watch_key(struct key *key)
#ifdef CONFIG_SECURITY_NETWORK
/**
+ * security_netlink_send() - Save info and check if netlink sending is allowed
+ * @sk: sending socket
+ * @skb: netlink message
+ *
+ * Save security information for a netlink message so that permission checking
+ * can be performed when the message is processed. The security information
+ * can be saved using the eff_cap field of the netlink_skb_parms structure.
+ * Also may be used to provide fine grained control over message transmission.
+ *
+ * Return: Returns 0 if the information was successfully saved and message is
+ * allowed to be transmitted.
+ */
+int security_netlink_send(struct sock *sk, struct sk_buff *skb)
+{
+ return call_int_hook(netlink_send, sk, skb);
+}
+
+/**
* security_unix_stream_connect() - Check if a AF_UNIX stream is allowed
* @sock: originating sock
* @other: peer sock
@@ -4648,6 +4362,20 @@ int security_socket_getpeersec_dgram(struct socket *sock,
EXPORT_SYMBOL(security_socket_getpeersec_dgram);
/**
+ * lsm_sock_alloc - allocate a composite sock blob
+ * @sock: the sock that needs a blob
+ * @gfp: allocation mode
+ *
+ * Allocate the sock blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_sock_alloc(struct sock *sock, gfp_t gfp)
+{
+ return lsm_blob_alloc(&sock->sk_security, blob_sizes.lbs_sock, gfp);
+}
+
+/**
* security_sk_alloc() - Allocate and initialize a sock's LSM blob
* @sk: sock
* @family: protocol family
@@ -4660,7 +4388,14 @@ EXPORT_SYMBOL(security_socket_getpeersec_dgram);
*/
int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
{
- return call_int_hook(sk_alloc_security, sk, family, priority);
+ int rc = lsm_sock_alloc(sk, priority);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(sk_alloc_security, sk, family, priority);
+ if (unlikely(rc))
+ security_sk_free(sk);
+ return rc;
}
/**
@@ -4672,6 +4407,8 @@ int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
void security_sk_free(struct sock *sk)
{
call_void_hook(sk_free_security, sk);
+ kfree(sk->sk_security);
+ sk->sk_security = NULL;
}
/**
@@ -4819,7 +4556,18 @@ EXPORT_SYMBOL(security_secmark_refcount_dec);
*/
int security_tun_dev_alloc_security(void **security)
{
- return call_int_hook(tun_dev_alloc_security, security);
+ int rc;
+
+ rc = lsm_blob_alloc(security, blob_sizes.lbs_tun_dev, GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ rc = call_int_hook(tun_dev_alloc_security, *security);
+ if (rc) {
+ kfree(*security);
+ *security = NULL;
+ }
+ return rc;
}
EXPORT_SYMBOL(security_tun_dev_alloc_security);
@@ -4831,7 +4579,7 @@ EXPORT_SYMBOL(security_tun_dev_alloc_security);
*/
void security_tun_dev_free_security(void *security)
{
- call_void_hook(tun_dev_free_security, security);
+ kfree(security);
}
EXPORT_SYMBOL(security_tun_dev_free_security);
@@ -5027,7 +4775,18 @@ EXPORT_SYMBOL(security_ib_endport_manage_subnet);
*/
int security_ib_alloc_security(void **sec)
{
- return call_int_hook(ib_alloc_security, sec);
+ int rc;
+
+ rc = lsm_blob_alloc(sec, blob_sizes.lbs_ib, GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ rc = call_int_hook(ib_alloc_security, *sec);
+ if (rc) {
+ kfree(*sec);
+ *sec = NULL;
+ }
+ return rc;
}
EXPORT_SYMBOL(security_ib_alloc_security);
@@ -5039,7 +4798,7 @@ EXPORT_SYMBOL(security_ib_alloc_security);
*/
void security_ib_free_security(void *sec)
{
- call_void_hook(ib_free_security, sec);
+ kfree(sec);
}
EXPORT_SYMBOL(security_ib_free_security);
#endif /* CONFIG_SECURITY_INFINIBAND */
@@ -5197,7 +4956,7 @@ int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
const struct flowi_common *flic)
{
- struct security_hook_list *hp;
+ struct lsm_static_call *scall;
int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match);
/*
@@ -5209,9 +4968,8 @@ int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
* For speed optimization, we explicitly break the loop rather than
* using the macro
*/
- hlist_for_each_entry(hp, &security_hook_heads.xfrm_state_pol_flow_match,
- list) {
- rc = hp->hook.xfrm_state_pol_flow_match(x, xp, flic);
+ lsm_for_each_hook(scall, xfrm_state_pol_flow_match) {
+ rc = scall->hl->hook.xfrm_state_pol_flow_match(x, xp, flic);
break;
}
return rc;
@@ -5256,7 +5014,14 @@ EXPORT_SYMBOL(security_skb_classify_flow);
int security_key_alloc(struct key *key, const struct cred *cred,
unsigned long flags)
{
- return call_int_hook(key_alloc, key, cred, flags);
+ int rc = lsm_key_alloc(key);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(key_alloc, key, cred, flags);
+ if (unlikely(rc))
+ security_key_free(key);
+ return rc;
}
/**
@@ -5267,7 +5032,8 @@ int security_key_alloc(struct key *key, const struct cred *cred,
*/
void security_key_free(struct key *key)
{
- call_void_hook(key_free, key);
+ kfree(key->security);
+ key->security = NULL;
}
/**
@@ -5373,7 +5139,7 @@ void security_audit_rule_free(void *lsmrule)
/**
* security_audit_rule_match() - Check if a label matches an audit rule
- * @secid: security label
+ * @prop: security label
* @field: LSM audit field
* @op: matching operator
* @lsmrule: audit rule
@@ -5384,9 +5150,10 @@ void security_audit_rule_free(void *lsmrule)
* Return: Returns 1 if secid matches the rule, 0 if it does not, -ERRNO on
* failure.
*/
-int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule)
+int security_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op,
+ void *lsmrule)
{
- return call_int_hook(audit_rule_match, secid, field, op, lsmrule);
+ return call_int_hook(audit_rule_match, prop, field, op, lsmrule);
}
#endif /* CONFIG_AUDIT */
@@ -5396,6 +5163,7 @@ int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule)
* @cmd: command
* @attr: bpf attribute
* @size: size
+ * @kernel: whether or not call originated from kernel
*
* Do a initial check for all bpf syscalls after the attribute is copied into
* the kernel. The actual security module can implement their own rules to
@@ -5403,9 +5171,9 @@ int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule)
*
* Return: Returns 0 if permission is granted.
*/
-int security_bpf(int cmd, union bpf_attr *attr, unsigned int size)
+int security_bpf(int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
{
- return call_int_hook(bpf, cmd, attr, size);
+ return call_int_hook(bpf, cmd, attr, size, kernel);
}
/**
@@ -5442,6 +5210,7 @@ int security_bpf_prog(struct bpf_prog *prog)
* @map: BPF map object
* @attr: BPF syscall attributes used to create BPF map
* @token: BPF token used to grant user access
+ * @kernel: whether or not call originated from kernel
*
* Do a check when the kernel creates a new BPF map. This is also the
* point where LSM blob is allocated for LSMs that need them.
@@ -5449,9 +5218,18 @@ int security_bpf_prog(struct bpf_prog *prog)
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
{
- return call_int_hook(bpf_map_create, map, attr, token);
+ int rc;
+
+ rc = lsm_bpf_map_alloc(map);
+ if (unlikely(rc))
+ return rc;
+
+ rc = call_int_hook(bpf_map_create, map, attr, token, kernel);
+ if (unlikely(rc))
+ security_bpf_map_free(map);
+ return rc;
}
/**
@@ -5459,6 +5237,7 @@ int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
* @prog: BPF program object
* @attr: BPF syscall attributes used to create BPF program
* @token: BPF token used to grant user access to BPF subsystem
+ * @kernel: whether or not call originated from kernel
*
* Perform an access control check when the kernel loads a BPF program and
* allocates associated BPF program object. This hook is also responsible for
@@ -5467,9 +5246,18 @@ int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
{
- return call_int_hook(bpf_prog_load, prog, attr, token);
+ int rc;
+
+ rc = lsm_bpf_prog_alloc(prog);
+ if (unlikely(rc))
+ return rc;
+
+ rc = call_int_hook(bpf_prog_load, prog, attr, token, kernel);
+ if (unlikely(rc))
+ security_bpf_prog_free(prog);
+ return rc;
}
/**
@@ -5484,9 +5272,18 @@ int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
{
- return call_int_hook(bpf_token_create, token, attr, path);
+ int rc;
+
+ rc = lsm_bpf_token_alloc(token);
+ if (unlikely(rc))
+ return rc;
+
+ rc = call_int_hook(bpf_token_create, token, attr, path);
+ if (unlikely(rc))
+ security_bpf_token_free(token);
+ return rc;
}
/**
@@ -5530,6 +5327,8 @@ int security_bpf_token_capable(const struct bpf_token *token, int cap)
void security_bpf_map_free(struct bpf_map *map)
{
call_void_hook(bpf_map_free, map);
+ kfree(map->security);
+ map->security = NULL;
}
/**
@@ -5541,6 +5340,8 @@ void security_bpf_map_free(struct bpf_map *map)
void security_bpf_prog_free(struct bpf_prog *prog)
{
call_void_hook(bpf_prog_free, prog);
+ kfree(prog->aux->security);
+ prog->aux->security = NULL;
}
/**
@@ -5552,6 +5353,8 @@ void security_bpf_prog_free(struct bpf_prog *prog)
void security_bpf_token_free(struct bpf_token *token)
{
call_void_hook(bpf_token_free, token);
+ kfree(token->security);
+ token->security = NULL;
}
#endif /* CONFIG_BPF_SYSCALL */
@@ -5570,19 +5373,97 @@ int security_locked_down(enum lockdown_reason what)
}
EXPORT_SYMBOL(security_locked_down);
+/**
+ * security_bdev_alloc() - Allocate a block device LSM blob
+ * @bdev: block device
+ *
+ * Allocate and attach a security structure to @bdev->bd_security. The
+ * security field is initialized to NULL when the bdev structure is
+ * allocated.
+ *
+ * Return: Return 0 if operation was successful.
+ */
+int security_bdev_alloc(struct block_device *bdev)
+{
+ int rc = 0;
+
+ rc = lsm_bdev_alloc(bdev);
+ if (unlikely(rc))
+ return rc;
+
+ rc = call_int_hook(bdev_alloc_security, bdev);
+ if (unlikely(rc))
+ security_bdev_free(bdev);
+
+ return rc;
+}
+EXPORT_SYMBOL(security_bdev_alloc);
+
+/**
+ * security_bdev_free() - Free a block device's LSM blob
+ * @bdev: block device
+ *
+ * Deallocate the bdev security structure and set @bdev->bd_security to NULL.
+ */
+void security_bdev_free(struct block_device *bdev)
+{
+ if (!bdev->bd_security)
+ return;
+
+ call_void_hook(bdev_free_security, bdev);
+
+ kfree(bdev->bd_security);
+ bdev->bd_security = NULL;
+}
+EXPORT_SYMBOL(security_bdev_free);
+
+/**
+ * security_bdev_setintegrity() - Set the device's integrity data
+ * @bdev: block device
+ * @type: type of integrity, e.g. hash digest, signature, etc
+ * @value: the integrity value
+ * @size: size of the integrity value
+ *
+ * Register a verified integrity measurement of a bdev with LSMs.
+ * LSMs should free the previously saved data if @value is NULL.
+ * Please note that the new hook should be invoked every time the security
+ * information is updated to keep these data current. For example, in dm-verity,
+ * if the mapping table is reloaded and configured to use a different dm-verity
+ * target with a new roothash and signing information, the previously stored
+ * data in the LSM blob will become obsolete. It is crucial to re-invoke the
+ * hook to refresh these data and ensure they are up to date. This necessity
+ * arises from the design of device-mapper, where a device-mapper device is
+ * first created, and then targets are subsequently loaded into it. These
+ * targets can be modified multiple times during the device's lifetime.
+ * Therefore, while the LSM blob is allocated during the creation of the block
+ * device, its actual contents are not initialized at this stage and can change
+ * substantially over time. This includes alterations from data that the LSMs
+ * 'trusts' to those they do not, making it essential to handle these changes
+ * correctly. Failure to address this dynamic aspect could potentially allow
+ * for bypassing LSM checks.
+ *
+ * Return: Returns 0 on success, negative values on failure.
+ */
+int security_bdev_setintegrity(struct block_device *bdev,
+ enum lsm_integrity_type type, const void *value,
+ size_t size)
+{
+ return call_int_hook(bdev_setintegrity, bdev, type, value, size);
+}
+EXPORT_SYMBOL(security_bdev_setintegrity);
+
#ifdef CONFIG_PERF_EVENTS
/**
* security_perf_event_open() - Check if a perf event open is allowed
- * @attr: perf event attribute
* @type: type of event
*
* Check whether the @type of perf_event_open syscall is allowed.
*
* Return: Returns 0 if permission is granted.
*/
-int security_perf_event_open(struct perf_event_attr *attr, int type)
+int security_perf_event_open(int type)
{
- return call_int_hook(perf_event_open, attr, type);
+ return call_int_hook(perf_event_open, type);
}
/**
@@ -5595,7 +5476,19 @@ int security_perf_event_open(struct perf_event_attr *attr, int type)
*/
int security_perf_event_alloc(struct perf_event *event)
{
- return call_int_hook(perf_event_alloc, event);
+ int rc;
+
+ rc = lsm_blob_alloc(&event->security, blob_sizes.lbs_perf_event,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ rc = call_int_hook(perf_event_alloc, event);
+ if (rc) {
+ kfree(event->security);
+ event->security = NULL;
+ }
+ return rc;
}
/**
@@ -5606,7 +5499,8 @@ int security_perf_event_alloc(struct perf_event *event)
*/
void security_perf_event_free(struct perf_event *event)
{
- call_void_hook(perf_event_free, event);
+ kfree(event->security);
+ event->security = NULL;
}
/**
@@ -5676,4 +5570,26 @@ int security_uring_cmd(struct io_uring_cmd *ioucmd)
{
return call_int_hook(uring_cmd, ioucmd);
}
+
+/**
+ * security_uring_allowed() - Check if io_uring_setup() is allowed
+ *
+ * Check whether the current task is allowed to call io_uring_setup().
+ *
+ * Return: Returns 0 if permission is granted.
+ */
+int security_uring_allowed(void)
+{
+ return call_int_hook(uring_allowed);
+}
#endif /* CONFIG_IO_URING */
+
+/**
+ * security_initramfs_populated() - Notify LSMs that initramfs has been loaded
+ *
+ * Tells the LSMs the initramfs has been unpacked into the rootfs.
+ */
+void security_initramfs_populated(void)
+{
+ call_void_hook(initramfs_populated);
+}