summaryrefslogtreecommitdiff
path: root/security
diff options
context:
space:
mode:
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig4
-rw-r--r--security/Kconfig.hardening36
-rw-r--r--security/apparmor/apparmorfs.c20
-rw-r--r--security/apparmor/label.c8
-rw-r--r--security/commoncap.c6
-rw-r--r--security/device_cgroup.c2
-rw-r--r--security/inode.c21
-rw-r--r--security/integrity/Kconfig2
-rw-r--r--security/integrity/digsig.c48
-rw-r--r--security/integrity/digsig_asymmetric.c4
-rw-r--r--security/integrity/evm/evm_main.c8
-rw-r--r--security/integrity/ima/Kconfig16
-rw-r--r--security/integrity/ima/Makefile1
-rw-r--r--security/integrity/ima/ima.h79
-rw-r--r--security/integrity/ima/ima_api.c61
-rw-r--r--security/integrity/ima/ima_appraise.c201
-rw-r--r--security/integrity/ima/ima_crypto.c10
-rw-r--r--security/integrity/ima/ima_init.c6
-rw-r--r--security/integrity/ima/ima_main.c145
-rw-r--r--security/integrity/ima/ima_modsig.c168
-rw-r--r--security/integrity/ima/ima_policy.c230
-rw-r--r--security/integrity/ima/ima_template.c52
-rw-r--r--security/integrity/ima/ima_template_lib.c85
-rw-r--r--security/integrity/ima/ima_template_lib.h8
-rw-r--r--security/integrity/integrity.h26
-rw-r--r--security/keys/Kconfig18
-rw-r--r--security/keys/compat.c6
-rw-r--r--security/keys/gc.c2
-rw-r--r--security/keys/internal.h23
-rw-r--r--security/keys/key.c36
-rw-r--r--security/keys/keyctl.c96
-rw-r--r--security/keys/keyring.c557
-rw-r--r--security/keys/persistent.c10
-rw-r--r--security/keys/proc.c7
-rw-r--r--security/keys/process_keys.c327
-rw-r--r--security/keys/request_key.c206
-rw-r--r--security/keys/request_key_auth.c73
-rw-r--r--security/keys/sysctl.c26
-rw-r--r--security/keys/trusted.c10
-rw-r--r--security/loadpin/loadpin.c54
-rw-r--r--security/safesetid/lsm.c276
-rw-r--r--security/safesetid/lsm.h34
-rw-r--r--security/safesetid/securityfs.c308
-rw-r--r--security/security.c29
-rw-r--r--security/selinux/hooks.c62
-rw-r--r--security/selinux/include/classmap.h5
-rw-r--r--security/selinux/include/objsec.h20
-rw-r--r--security/selinux/netif.c31
-rw-r--r--security/selinux/netnode.c30
-rw-r--r--security/selinux/netport.c24
-rw-r--r--security/selinux/nlmsgtab.c5
-rw-r--r--security/selinux/selinuxfs.c22
-rw-r--r--security/selinux/ss/ebitmap.c10
-rw-r--r--security/selinux/ss/policydb.c404
-rw-r--r--security/selinux/ss/policydb.h2
-rw-r--r--security/selinux/ss/services.c39
-rw-r--r--security/selinux/ss/sidtab.c53
-rw-r--r--security/selinux/ss/sidtab.h19
-rw-r--r--security/smack/smack_access.c6
-rw-r--r--security/smack/smack_lsm.c40
-rw-r--r--security/smack/smackfs.c34
-rw-r--r--security/yama/yama_lsm.c3
62 files changed, 2827 insertions, 1327 deletions
diff --git a/security/Kconfig b/security/Kconfig
index 7c62d446e209..2a1a2d396228 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -63,7 +63,7 @@ config PAGE_TABLE_ISOLATION
ensuring that the majority of kernel addresses are not mapped
into userspace.
- See Documentation/x86/pti.txt for more details.
+ See Documentation/x86/pti.rst for more details.
config SECURITY_INFINIBAND
bool "Infiniband Security Hooks"
@@ -121,7 +121,7 @@ config INTEL_TXT
See <http://www.intel.com/technology/security/> for more information
about Intel(R) TXT.
See <http://tboot.sourceforge.net> for more information about tboot.
- See Documentation/intel_txt.txt for a description of how to enable
+ See Documentation/x86/intel_txt.rst for a description of how to enable
Intel TXT support in a kernel boot.
If you are unsure as to whether this is required, answer N.
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index c6cb2d9b2905..af4c979b38ee 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -61,6 +61,7 @@ choice
config GCC_PLUGIN_STRUCTLEAK_BYREF
bool "zero-init structs passed by reference (strong)"
depends on GCC_PLUGINS
+ depends on !(KASAN && KASAN_STACK=1)
select GCC_PLUGIN_STRUCTLEAK
help
Zero-initialize any structures on the stack that may
@@ -70,9 +71,15 @@ choice
exposures, like CVE-2017-1000410:
https://git.kernel.org/linus/06e7e776ca4d3654
+ As a side-effect, this keeps a lot of variables on the
+ stack that can otherwise be optimized out, so combining
+ this with CONFIG_KASAN_STACK can lead to a stack overflow
+ and is disallowed.
+
config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
bool "zero-init anything passed by reference (very strong)"
depends on GCC_PLUGINS
+ depends on !(KASAN && KASAN_STACK=1)
select GCC_PLUGIN_STRUCTLEAK
help
Zero-initialize any stack variables that may be passed
@@ -160,6 +167,35 @@ config STACKLEAK_RUNTIME_DISABLE
runtime to control kernel stack erasing for kernels built with
CONFIG_GCC_PLUGIN_STACKLEAK.
+config INIT_ON_ALLOC_DEFAULT_ON
+ bool "Enable heap memory zeroing on allocation by default"
+ help
+ This has the effect of setting "init_on_alloc=1" on the kernel
+ command line. This can be disabled with "init_on_alloc=0".
+ When "init_on_alloc" is enabled, all page allocator and slab
+ allocator memory will be zeroed when allocated, eliminating
+ many kinds of "uninitialized heap memory" flaws, especially
+ heap content exposures. The performance impact varies by
+ workload, but most cases see <1% impact. Some synthetic
+ workloads have measured as high as 7%.
+
+config INIT_ON_FREE_DEFAULT_ON
+ bool "Enable heap memory zeroing on free by default"
+ help
+ This has the effect of setting "init_on_free=1" on the kernel
+ command line. This can be disabled with "init_on_free=0".
+ Similar to "init_on_alloc", when "init_on_free" is enabled,
+ all page allocator and slab allocator memory will be zeroed
+ when freed, eliminating many kinds of "uninitialized heap memory"
+ flaws, especially heap content exposures. The primary difference
+ with "init_on_free" is that data lifetime in memory is reduced,
+ as anything freed is wiped immediately, making live forensics or
+ cold boot memory attacks unable to recover freed memory contents.
+ The performance impact varies by workload, but is more expensive
+ than "init_on_alloc" due to the negative cache effects of
+ touching "cold" memory areas. Most cases see 3-5% impact. Some
+ synthetic workloads have measured as high as 8%.
+
endmenu
endmenu
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 66d0b4245ef6..45d13b6462aa 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -19,6 +19,7 @@
#include <linux/capability.h>
#include <linux/rcupdate.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/poll.h>
#include <uapi/linux/major.h>
#include <uapi/linux/magic.h>
@@ -132,7 +133,7 @@ static const struct super_operations aafs_super_ops = {
.show_path = aafs_show_path,
};
-static int fill_super(struct super_block *sb, void *data, int silent)
+static int apparmorfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
static struct tree_descr files[] = { {""} };
int error;
@@ -145,16 +146,25 @@ static int fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct dentry *aafs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int apparmorfs_get_tree(struct fs_context *fc)
{
- return mount_single(fs_type, flags, data, fill_super);
+ return get_tree_single(fc, apparmorfs_fill_super);
+}
+
+static const struct fs_context_operations apparmorfs_context_ops = {
+ .get_tree = apparmorfs_get_tree,
+};
+
+static int apparmorfs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &apparmorfs_context_ops;
+ return 0;
}
static struct file_system_type aafs_ops = {
.owner = THIS_MODULE,
.name = AAFS_NAME,
- .mount = aafs_mount,
+ .init_fs_context = apparmorfs_init_fs_context,
.kill_sb = kill_anon_super,
};
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index 068e93c5d29c..59f1cc2557a7 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -76,7 +76,7 @@ void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new)
AA_BUG(!orig);
AA_BUG(!new);
- lockdep_assert_held_exclusive(&labels_set(orig)->lock);
+ lockdep_assert_held_write(&labels_set(orig)->lock);
tmp = rcu_dereference_protected(orig->proxy->label,
&labels_ns(orig)->lock);
@@ -566,7 +566,7 @@ static bool __label_remove(struct aa_label *label, struct aa_label *new)
AA_BUG(!ls);
AA_BUG(!label);
- lockdep_assert_held_exclusive(&ls->lock);
+ lockdep_assert_held_write(&ls->lock);
if (new)
__aa_proxy_redirect(label, new);
@@ -603,7 +603,7 @@ static bool __label_replace(struct aa_label *old, struct aa_label *new)
AA_BUG(!ls);
AA_BUG(!old);
AA_BUG(!new);
- lockdep_assert_held_exclusive(&ls->lock);
+ lockdep_assert_held_write(&ls->lock);
AA_BUG(new->flags & FLAG_IN_TREE);
if (!label_is_stale(old))
@@ -640,7 +640,7 @@ static struct aa_label *__label_insert(struct aa_labelset *ls,
AA_BUG(!ls);
AA_BUG(!label);
AA_BUG(labels_set(label) != ls);
- lockdep_assert_held_exclusive(&ls->lock);
+ lockdep_assert_held_write(&ls->lock);
AA_BUG(label->flags & FLAG_IN_TREE);
/* Figure out where to put new node */
diff --git a/security/commoncap.c b/security/commoncap.c
index c0b9664ee49e..f4ee0ae106b2 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -915,7 +915,7 @@ int cap_inode_setxattr(struct dentry *dentry, const char *name,
/* Ignore non-security xattrs */
if (strncmp(name, XATTR_SECURITY_PREFIX,
- sizeof(XATTR_SECURITY_PREFIX) - 1) != 0)
+ XATTR_SECURITY_PREFIX_LEN) != 0)
return 0;
/*
@@ -947,7 +947,7 @@ int cap_inode_removexattr(struct dentry *dentry, const char *name)
/* Ignore non-security xattrs */
if (strncmp(name, XATTR_SECURITY_PREFIX,
- sizeof(XATTR_SECURITY_PREFIX) - 1) != 0)
+ XATTR_SECURITY_PREFIX_LEN) != 0)
return 0;
if (strcmp(name, XATTR_NAME_CAPS) == 0) {
@@ -1339,7 +1339,7 @@ int cap_mmap_file(struct file *file, unsigned long reqprot,
#ifdef CONFIG_SECURITY
-struct security_hook_list capability_hooks[] __lsm_ro_after_init = {
+static struct security_hook_list capability_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(capable, cap_capable),
LSM_HOOK_INIT(settime, cap_settime),
LSM_HOOK_INIT(ptrace_access_check, cap_ptrace_access_check),
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index dc28914fa72e..725674f3276d 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -509,7 +509,7 @@ static inline int may_allow_all(struct dev_cgroup *parent)
* This is one of the three key functions for hierarchy implementation.
* This function is responsible for re-evaluating all the cgroup's active
* exceptions due to a parent's exception change.
- * Refer to Documentation/cgroup-v1/devices.txt for more details.
+ * Refer to Documentation/admin-guide/cgroup-v1/devices.rst for more details.
*/
static void revalidate_active_exceptions(struct dev_cgroup *devcg)
{
diff --git a/security/inode.c b/security/inode.c
index fcff7f08bb1c..6c326939750d 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -13,6 +13,7 @@
#include <linux/sysfs.h>
#include <linux/kobject.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
@@ -36,7 +37,7 @@ static const struct super_operations securityfs_super_operations = {
.free_inode = securityfs_free_inode,
};
-static int fill_super(struct super_block *sb, void *data, int silent)
+static int securityfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
static const struct tree_descr files[] = {{""}};
int error;
@@ -50,17 +51,25 @@ static int fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct dentry *get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int securityfs_get_tree(struct fs_context *fc)
{
- return mount_single(fs_type, flags, data, fill_super);
+ return get_tree_single(fc, securityfs_fill_super);
+}
+
+static const struct fs_context_operations securityfs_context_ops = {
+ .get_tree = securityfs_get_tree,
+};
+
+static int securityfs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &securityfs_context_ops;
+ return 0;
}
static struct file_system_type fs_type = {
.owner = THIS_MODULE,
.name = "securityfs",
- .mount = get_sb,
+ .init_fs_context = securityfs_init_fs_context,
.kill_sb = kill_litter_super,
};
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
index c352532b8f84..0bae6adb63a9 100644
--- a/security/integrity/Kconfig
+++ b/security/integrity/Kconfig
@@ -18,8 +18,8 @@ if INTEGRITY
config INTEGRITY_SIGNATURE
bool "Digital signature verification using multiple keyrings"
- depends on KEYS
default n
+ select KEYS
select SIGNATURE
help
This option enables digital signature verification support
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 4582bc26770a..ea1aae3d07b3 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -39,11 +39,10 @@ static const char * const keyring_name[INTEGRITY_KEYRING_MAX] = {
#define restrict_link_to_ima restrict_link_by_builtin_trusted
#endif
-int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
- const char *digest, int digestlen)
+static struct key *integrity_keyring_from_id(const unsigned int id)
{
- if (id >= INTEGRITY_KEYRING_MAX || siglen < 2)
- return -EINVAL;
+ if (id >= INTEGRITY_KEYRING_MAX)
+ return ERR_PTR(-EINVAL);
if (!keyring[id]) {
keyring[id] =
@@ -52,25 +51,52 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
int err = PTR_ERR(keyring[id]);
pr_err("no %s keyring: %d\n", keyring_name[id], err);
keyring[id] = NULL;
- return err;
+ return ERR_PTR(err);
}
}
+ return keyring[id];
+}
+
+int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
+ const char *digest, int digestlen)
+{
+ struct key *keyring;
+
+ if (siglen < 2)
+ return -EINVAL;
+
+ keyring = integrity_keyring_from_id(id);
+ if (IS_ERR(keyring))
+ return PTR_ERR(keyring);
+
switch (sig[1]) {
case 1:
/* v1 API expect signature without xattr type */
- return digsig_verify(keyring[id], sig + 1, siglen - 1,
- digest, digestlen);
+ return digsig_verify(keyring, sig + 1, siglen - 1, digest,
+ digestlen);
case 2:
- return asymmetric_verify(keyring[id], sig, siglen,
- digest, digestlen);
+ return asymmetric_verify(keyring, sig, siglen, digest,
+ digestlen);
}
return -EOPNOTSUPP;
}
-static int __integrity_init_keyring(const unsigned int id, key_perm_t perm,
- struct key_restriction *restriction)
+int integrity_modsig_verify(const unsigned int id, const struct modsig *modsig)
+{
+ struct key *keyring;
+
+ keyring = integrity_keyring_from_id(id);
+ if (IS_ERR(keyring))
+ return PTR_ERR(keyring);
+
+ return ima_modsig_verify(keyring, modsig);
+}
+
+static int __init __integrity_init_keyring(const unsigned int id,
+ key_perm_t perm,
+ struct key_restriction *restriction)
{
const struct cred *cred = current_cred();
int err = 0;
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index ad4b323ecea1..55aec161d0e1 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -35,7 +35,7 @@ static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
key_ref_t kref;
kref = keyring_search(make_key_ref(key, 1),
- &key_type_asymmetric, name);
+ &key_type_asymmetric, name, true);
if (!IS_ERR(kref)) {
pr_err("Key '%s' is in ima_blacklist_keyring\n", name);
return ERR_PTR(-EKEYREJECTED);
@@ -47,7 +47,7 @@ static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
key_ref_t kref;
kref = keyring_search(make_key_ref(keyring, 1),
- &key_type_asymmetric, name);
+ &key_type_asymmetric, name, true);
if (IS_ERR(kref))
key = ERR_CAST(kref);
else
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 5bbd8b4dc29a..f9a81b187fae 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -166,7 +166,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
/* check value type */
switch (xattr_data->type) {
case EVM_XATTR_HMAC:
- if (xattr_len != sizeof(struct evm_ima_xattr_data)) {
+ if (xattr_len != sizeof(struct evm_xattr)) {
evm_status = INTEGRITY_FAIL;
goto out;
}
@@ -176,7 +176,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
xattr_value_len, &digest);
if (rc)
break;
- rc = crypto_memneq(xattr_data->digest, digest.digest,
+ rc = crypto_memneq(xattr_data->data, digest.digest,
SHA1_DIGEST_SIZE);
if (rc)
rc = -EINVAL;
@@ -520,7 +520,7 @@ int evm_inode_init_security(struct inode *inode,
const struct xattr *lsm_xattr,
struct xattr *evm_xattr)
{
- struct evm_ima_xattr_data *xattr_data;
+ struct evm_xattr *xattr_data;
int rc;
if (!evm_key_loaded() || !evm_protected_xattr(lsm_xattr->name))
@@ -530,7 +530,7 @@ int evm_inode_init_security(struct inode *inode,
if (!xattr_data)
return -ENOMEM;
- xattr_data->type = EVM_XATTR_HMAC;
+ xattr_data->data.type = EVM_XATTR_HMAC;
rc = evm_init_hmac(inode, lsm_xattr, xattr_data->digest);
if (rc < 0)
goto out;
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 32cd25fa44a5..838476d780e5 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -160,7 +160,8 @@ config IMA_APPRAISE
config IMA_ARCH_POLICY
bool "Enable loading an IMA architecture specific policy"
- depends on KEXEC_SIG || IMA_APPRAISE && INTEGRITY_ASYMMETRIC_KEYS
+ depends on (KEXEC_SIG && IMA) || IMA_APPRAISE \
+ && INTEGRITY_ASYMMETRIC_KEYS
default n
help
This option enables loading an IMA architecture specific policy
@@ -232,6 +233,19 @@ config IMA_APPRAISE_BOOTPARAM
This option enables the different "ima_appraise=" modes
(eg. fix, log) from the boot command line.
+config IMA_APPRAISE_MODSIG
+ bool "Support module-style signatures for appraisal"
+ depends on IMA_APPRAISE
+ depends on INTEGRITY_ASYMMETRIC_KEYS
+ select PKCS7_MESSAGE_PARSER
+ select MODULE_SIG_FORMAT
+ default n
+ help
+ Adds support for signatures appended to files. The format of the
+ appended signature is the same used for signed kernel modules.
+ The modsig keyword can be used in the IMA policy to allow a hook
+ to accept such signatures.
+
config IMA_TRUSTED_KEYRING
bool "Require all keys on the .ima keyring be signed (deprecated)"
depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index d921dc4f9eb0..31d57cdf2421 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -9,5 +9,6 @@ obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
ima_policy.o ima_template.o ima_template_lib.o
ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
+ima-$(CONFIG_IMA_APPRAISE_MODSIG) += ima_modsig.o
ima-$(CONFIG_HAVE_IMA_KEXEC) += ima_kexec.o
obj-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 874bd77d3b91..3689081aaf38 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -60,7 +60,10 @@ struct ima_event_data {
const unsigned char *filename;
struct evm_ima_xattr_data *xattr_value;
int xattr_len;
+ const struct modsig *modsig;
const char *violation;
+ const void *buf;
+ int buf_len;
};
/* IMA template field data definition */
@@ -144,7 +147,12 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
int ima_init_crypto(void);
void ima_putc(struct seq_file *m, void *data, int datalen);
void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);
+int template_desc_init_fields(const char *template_fmt,
+ const struct ima_template_field ***fields,
+ int *num_fields);
struct ima_template_desc *ima_template_desc_current(void);
+struct ima_template_desc *lookup_template_desc(const char *name);
+bool ima_template_has_modsig(const struct ima_template_desc *ima_template);
int ima_restore_measurement_entry(struct ima_template_entry *entry);
int ima_restore_measurement_list(loff_t bufsize, void *buf);
int ima_measurements_show(struct seq_file *m, void *v);
@@ -152,6 +160,8 @@ unsigned long ima_get_binary_runtime_size(void);
int ima_init_template(void);
void ima_init_template_list(void);
int __init ima_init_digests(void);
+int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
+ void *lsm_data);
/*
* used to protect h_table and sha_table
@@ -182,6 +192,7 @@ static inline unsigned long ima_hash_key(u8 *digest)
hook(KEXEC_KERNEL_CHECK) \
hook(KEXEC_INITRAMFS_CHECK) \
hook(POLICY_CHECK) \
+ hook(KEXEC_CMDLINE) \
hook(MAX_CHECK)
#define __ima_hook_enumify(ENUM) ENUM,
@@ -189,21 +200,28 @@ enum ima_hooks {
__ima_hooks(__ima_hook_enumify)
};
+extern const char *const func_tokens[];
+
+struct modsig;
+
/* LIM API function definitions */
int ima_get_action(struct inode *inode, const struct cred *cred, u32 secid,
- int mask, enum ima_hooks func, int *pcr);
+ int mask, enum ima_hooks func, int *pcr,
+ struct ima_template_desc **template_desc);
int ima_must_measure(struct inode *inode, int mask, enum ima_hooks func);
int ima_collect_measurement(struct integrity_iint_cache *iint,
struct file *file, void *buf, loff_t size,
- enum hash_algo algo);
+ enum hash_algo algo, struct modsig *modsig);
void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len, int pcr);
+ int xattr_len, const struct modsig *modsig, int pcr,
+ struct ima_template_desc *template_desc);
void ima_audit_measurement(struct integrity_iint_cache *iint,
const unsigned char *filename);
int ima_alloc_init_template(struct ima_event_data *event_data,
- struct ima_template_entry **entry);
+ struct ima_template_entry **entry,
+ struct ima_template_desc *template_desc);
int ima_store_template(struct ima_template_entry *entry, int violation,
struct inode *inode,
const unsigned char *filename, int pcr);
@@ -212,7 +230,8 @@ const char *ima_d_path(const struct path *path, char **pathbuf, char *filename);
/* IMA policy related functions */
int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
- enum ima_hooks func, int mask, int flags, int *pcr);
+ enum ima_hooks func, int mask, int flags, int *pcr,
+ struct ima_template_desc **template_desc);
void ima_init_policy(void);
void ima_update_policy(void);
void ima_update_policy_flag(void);
@@ -238,7 +257,7 @@ int ima_appraise_measurement(enum ima_hooks func,
struct integrity_iint_cache *iint,
struct file *file, const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len);
+ int xattr_len, const struct modsig *modsig);
int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func);
void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
@@ -254,7 +273,8 @@ static inline int ima_appraise_measurement(enum ima_hooks func,
struct file *file,
const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len)
+ int xattr_len,
+ const struct modsig *modsig)
{
return INTEGRITY_UNKNOWN;
}
@@ -291,6 +311,51 @@ static inline int ima_read_xattr(struct dentry *dentry,
#endif /* CONFIG_IMA_APPRAISE */
+#ifdef CONFIG_IMA_APPRAISE_MODSIG
+bool ima_hook_supports_modsig(enum ima_hooks func);
+int ima_read_modsig(enum ima_hooks func, const void *buf, loff_t buf_len,
+ struct modsig **modsig);
+void ima_collect_modsig(struct modsig *modsig, const void *buf, loff_t size);
+int ima_get_modsig_digest(const struct modsig *modsig, enum hash_algo *algo,
+ const u8 **digest, u32 *digest_size);
+int ima_get_raw_modsig(const struct modsig *modsig, const void **data,
+ u32 *data_len);
+void ima_free_modsig(struct modsig *modsig);
+#else
+static inline bool ima_hook_supports_modsig(enum ima_hooks func)
+{
+ return false;
+}
+
+static inline int ima_read_modsig(enum ima_hooks func, const void *buf,
+ loff_t buf_len, struct modsig **modsig)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ima_collect_modsig(struct modsig *modsig, const void *buf,
+ loff_t size)
+{
+}
+
+static inline int ima_get_modsig_digest(const struct modsig *modsig,
+ enum hash_algo *algo, const u8 **digest,
+ u32 *digest_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ima_get_raw_modsig(const struct modsig *modsig,
+ const void **data, u32 *data_len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ima_free_modsig(struct modsig *modsig)
+{
+}
+#endif /* CONFIG_IMA_APPRAISE_MODSIG */
+
/* LSM based policy rules require audit */
#ifdef CONFIG_IMA_LSM_RULES
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 35c129cbb7e9..610759fe63b8 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -34,13 +34,19 @@ void ima_free_template_entry(struct ima_template_entry *entry)
* ima_alloc_init_template - create and initialize a new template entry
*/
int ima_alloc_init_template(struct ima_event_data *event_data,
- struct ima_template_entry **entry)
+ struct ima_template_entry **entry,
+ struct ima_template_desc *desc)
{
- struct ima_template_desc *template_desc = ima_template_desc_current();
+ struct ima_template_desc *template_desc;
int i, result = 0;
- *entry = kzalloc(sizeof(**entry) + template_desc->num_fields *
- sizeof(struct ima_field_data), GFP_NOFS);
+ if (desc)
+ template_desc = desc;
+ else
+ template_desc = ima_template_desc_current();
+
+ *entry = kzalloc(struct_size(*entry, template_data,
+ template_desc->num_fields), GFP_NOFS);
if (!*entry)
return -ENOMEM;
@@ -129,15 +135,17 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
{
struct ima_template_entry *entry;
struct inode *inode = file_inode(file);
- struct ima_event_data event_data = {iint, file, filename, NULL, 0,
- cause};
+ struct ima_event_data event_data = { .iint = iint,
+ .file = file,
+ .filename = filename,
+ .violation = cause };
int violation = 1;
int result;
/* can overflow, only indicator */
atomic_long_inc(&ima_htable.violations);
- result = ima_alloc_init_template(&event_data, &entry);
+ result = ima_alloc_init_template(&event_data, &entry, NULL);
if (result < 0) {
result = -ENOMEM;
goto err_out;
@@ -160,11 +168,13 @@ err_out:
* MAY_APPEND)
* @func: caller identifier
* @pcr: pointer filled in if matched measure policy sets pcr=
+ * @template_desc: pointer filled in if matched measure policy sets template=
*
* The policy is defined in terms of keypairs:
* subj=, obj=, type=, func=, mask=, fsmagic=
* subj,obj, and type: are LSM specific.
* func: FILE_CHECK | BPRM_CHECK | CREDS_CHECK | MMAP_CHECK | MODULE_CHECK
+ * | KEXEC_CMDLINE
* mask: contains the permission mask
* fsmagic: hex value
*
@@ -172,13 +182,15 @@ err_out:
*
*/
int ima_get_action(struct inode *inode, const struct cred *cred, u32 secid,
- int mask, enum ima_hooks func, int *pcr)
+ int mask, enum ima_hooks func, int *pcr,
+ struct ima_template_desc **template_desc)
{
int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE | IMA_HASH;
flags &= ima_policy_flag;
- return ima_match_policy(inode, cred, secid, func, mask, flags, pcr);
+ return ima_match_policy(inode, cred, secid, func, mask, flags, pcr,
+ template_desc);
}
/*
@@ -193,7 +205,7 @@ int ima_get_action(struct inode *inode, const struct cred *cred, u32 secid,
*/
int ima_collect_measurement(struct integrity_iint_cache *iint,
struct file *file, void *buf, loff_t size,
- enum hash_algo algo)
+ enum hash_algo algo, struct modsig *modsig)
{
const char *audit_cause = "failed";
struct inode *inode = file_inode(file);
@@ -207,6 +219,14 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
char digest[IMA_MAX_DIGEST_SIZE];
} hash;
+ /*
+ * Always collect the modsig, because IMA might have already collected
+ * the file digest without collecting the modsig in a previous
+ * measurement rule.
+ */
+ if (modsig)
+ ima_collect_modsig(modsig, buf, size);
+
if (iint->flags & IMA_COLLECTED)
goto out;
@@ -273,21 +293,32 @@ out:
void ima_store_measurement(struct integrity_iint_cache *iint,
struct file *file, const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len, int pcr)
+ int xattr_len, const struct modsig *modsig, int pcr,
+ struct ima_template_desc *template_desc)
{
static const char op[] = "add_template_measure";
static const char audit_cause[] = "ENOMEM";
int result = -ENOMEM;
struct inode *inode = file_inode(file);
struct ima_template_entry *entry;
- struct ima_event_data event_data = {iint, file, filename, xattr_value,
- xattr_len, NULL};
+ struct ima_event_data event_data = { .iint = iint,
+ .file = file,
+ .filename = filename,
+ .xattr_value = xattr_value,
+ .xattr_len = xattr_len,
+ .modsig = modsig };
int violation = 0;
- if (iint->measured_pcrs & (0x1 << pcr))
+ /*
+ * We still need to store the measurement in the case of MODSIG because
+ * we only have its contents to put in the list at the time of
+ * appraisal, but a file measurement from earlier might already exist in
+ * the measurement list.
+ */
+ if (iint->measured_pcrs & (0x1 << pcr) && !modsig)
return;
- result = ima_alloc_init_template(&event_data, &entry);
+ result = ima_alloc_init_template(&event_data, &entry, template_desc);
if (result < 0) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, audit_cause, result, 0);
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index f0cd67cab6aa..136ae4e0ee92 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -54,7 +54,7 @@ int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func)
security_task_getsecid(current, &secid);
return ima_match_policy(inode, current_cred(), secid, func, mask,
- IMA_APPRAISE | IMA_HASH, NULL);
+ IMA_APPRAISE | IMA_HASH, NULL, NULL);
}
static int ima_fix_xattr(struct dentry *dentry,
@@ -165,7 +165,8 @@ enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
return sig->hash_algo;
break;
case IMA_XATTR_DIGEST_NG:
- ret = xattr_value->digest[0];
+ /* first byte contains algorithm id */
+ ret = xattr_value->data[0];
if (ret < HASH_ALGO__LAST)
return ret;
break;
@@ -173,7 +174,7 @@ enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
/* this is for backward compatibility */
if (xattr_len == 21) {
unsigned int zero = 0;
- if (!memcmp(&xattr_value->digest[16], &zero, 4))
+ if (!memcmp(&xattr_value->data[16], &zero, 4))
return HASH_ALGO_MD5;
else
return HASH_ALGO_SHA1;
@@ -199,6 +200,110 @@ int ima_read_xattr(struct dentry *dentry,
}
/*
+ * xattr_verify - verify xattr digest or signature
+ *
+ * Verify whether the hash or signature matches the file contents.
+ *
+ * Return 0 on success, error code otherwise.
+ */
+static int xattr_verify(enum ima_hooks func, struct integrity_iint_cache *iint,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ enum integrity_status *status, const char **cause)
+{
+ int rc = -EINVAL, hash_start = 0;
+
+ switch (xattr_value->type) {
+ case IMA_XATTR_DIGEST_NG:
+ /* first byte contains algorithm id */
+ hash_start = 1;
+ /* fall through */
+ case IMA_XATTR_DIGEST:
+ if (iint->flags & IMA_DIGSIG_REQUIRED) {
+ *cause = "IMA-signature-required";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
+ if (xattr_len - sizeof(xattr_value->type) - hash_start >=
+ iint->ima_hash->length)
+ /*
+ * xattr length may be longer. md5 hash in previous
+ * version occupied 20 bytes in xattr, instead of 16
+ */
+ rc = memcmp(&xattr_value->data[hash_start],
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ else
+ rc = -EINVAL;
+ if (rc) {
+ *cause = "invalid-hash";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+ *status = INTEGRITY_PASS;
+ break;
+ case EVM_IMA_XATTR_DIGSIG:
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
+ (const char *)xattr_value,
+ xattr_len,
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ if (rc == -EOPNOTSUPP) {
+ *status = INTEGRITY_UNKNOWN;
+ break;
+ }
+ if (IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING) && rc &&
+ func == KEXEC_KERNEL_CHECK)
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_PLATFORM,
+ (const char *)xattr_value,
+ xattr_len,
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ if (rc) {
+ *cause = "invalid-signature";
+ *status = INTEGRITY_FAIL;
+ } else {
+ *status = INTEGRITY_PASS;
+ }
+ break;
+ default:
+ *status = INTEGRITY_UNKNOWN;
+ *cause = "unknown-ima-data";
+ break;
+ }
+
+ return rc;
+}
+
+/*
+ * modsig_verify - verify modsig signature
+ *
+ * Verify whether the signature matches the file contents.
+ *
+ * Return 0 on success, error code otherwise.
+ */
+static int modsig_verify(enum ima_hooks func, const struct modsig *modsig,
+ enum integrity_status *status, const char **cause)
+{
+ int rc;
+
+ rc = integrity_modsig_verify(INTEGRITY_KEYRING_IMA, modsig);
+ if (IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING) && rc &&
+ func == KEXEC_KERNEL_CHECK)
+ rc = integrity_modsig_verify(INTEGRITY_KEYRING_PLATFORM,
+ modsig);
+ if (rc) {
+ *cause = "invalid-signature";
+ *status = INTEGRITY_FAIL;
+ } else {
+ *status = INTEGRITY_PASS;
+ }
+
+ return rc;
+}
+
+/*
* ima_appraise_measurement - appraise file measurement
*
* Call evm_verifyxattr() to verify the integrity of 'security.ima'.
@@ -210,19 +315,22 @@ int ima_appraise_measurement(enum ima_hooks func,
struct integrity_iint_cache *iint,
struct file *file, const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len)
+ int xattr_len, const struct modsig *modsig)
{
static const char op[] = "appraise_data";
const char *cause = "unknown";
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_backing_inode(dentry);
enum integrity_status status = INTEGRITY_UNKNOWN;
- int rc = xattr_len, hash_start = 0;
+ int rc = xattr_len;
+ bool try_modsig = iint->flags & IMA_MODSIG_ALLOWED && modsig;
- if (!(inode->i_opflags & IOP_XATTR))
+ /* If not appraising a modsig, we need an xattr. */
+ if (!(inode->i_opflags & IOP_XATTR) && !try_modsig)
return INTEGRITY_UNKNOWN;
- if (rc <= 0) {
+ /* If reading the xattr failed and there's no modsig, error out. */
+ if (rc <= 0 && !try_modsig) {
if (rc && rc != -ENODATA)
goto out;
@@ -245,6 +353,10 @@ int ima_appraise_measurement(enum ima_hooks func,
case INTEGRITY_UNKNOWN:
break;
case INTEGRITY_NOXATTRS: /* No EVM protected xattrs. */
+ /* It's fine not to have xattrs when using a modsig. */
+ if (try_modsig)
+ break;
+ /* fall through */
case INTEGRITY_NOLABEL: /* No security.evm xattr. */
cause = "missing-HMAC";
goto out;
@@ -255,65 +367,18 @@ int ima_appraise_measurement(enum ima_hooks func,
WARN_ONCE(true, "Unexpected integrity status %d\n", status);
}
- switch (xattr_value->type) {
- case IMA_XATTR_DIGEST_NG:
- /* first byte contains algorithm id */
- hash_start = 1;
- /* fall through */
- case IMA_XATTR_DIGEST:
- if (iint->flags & IMA_DIGSIG_REQUIRED) {
- cause = "IMA-signature-required";
- status = INTEGRITY_FAIL;
- break;
- }
- clear_bit(IMA_DIGSIG, &iint->atomic_flags);
- if (xattr_len - sizeof(xattr_value->type) - hash_start >=
- iint->ima_hash->length)
- /* xattr length may be longer. md5 hash in previous
- version occupied 20 bytes in xattr, instead of 16
- */
- rc = memcmp(&xattr_value->digest[hash_start],
- iint->ima_hash->digest,
- iint->ima_hash->length);
- else
- rc = -EINVAL;
- if (rc) {
- cause = "invalid-hash";
- status = INTEGRITY_FAIL;
- break;
- }
- status = INTEGRITY_PASS;
- break;
- case EVM_IMA_XATTR_DIGSIG:
- set_bit(IMA_DIGSIG, &iint->atomic_flags);
- rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
- (const char *)xattr_value,
- xattr_len,
- iint->ima_hash->digest,
- iint->ima_hash->length);
- if (rc == -EOPNOTSUPP) {
- status = INTEGRITY_UNKNOWN;
- break;
- }
- if (IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING) && rc &&
- func == KEXEC_KERNEL_CHECK)
- rc = integrity_digsig_verify(INTEGRITY_KEYRING_PLATFORM,
- (const char *)xattr_value,
- xattr_len,
- iint->ima_hash->digest,
- iint->ima_hash->length);
- if (rc) {
- cause = "invalid-signature";
- status = INTEGRITY_FAIL;
- } else {
- status = INTEGRITY_PASS;
- }
- break;
- default:
- status = INTEGRITY_UNKNOWN;
- cause = "unknown-ima-data";
- break;
- }
+ if (xattr_value)
+ rc = xattr_verify(func, iint, xattr_value, xattr_len, &status,
+ &cause);
+
+ /*
+ * If we have a modsig and either no imasig or the imasig's key isn't
+ * known, then try verifying the modsig.
+ */
+ if (try_modsig &&
+ (!xattr_value || xattr_value->type == IMA_XATTR_DIGEST_NG ||
+ rc == -ENOKEY))
+ rc = modsig_verify(func, modsig, &status, &cause);
out:
/*
@@ -331,7 +396,7 @@ out:
op, cause, rc, 0);
} else if (status != INTEGRITY_PASS) {
/* Fix mode, but don't replace file signatures. */
- if ((ima_appraise & IMA_APPRAISE_FIX) &&
+ if ((ima_appraise & IMA_APPRAISE_FIX) && !try_modsig &&
(!xattr_value ||
xattr_value->type != EVM_IMA_XATTR_DIGSIG)) {
if (!ima_fix_xattr(dentry, iint))
@@ -370,7 +435,7 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
!(iint->flags & IMA_HASH))
return;
- rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo);
+ rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo, NULL);
if (rc < 0)
return;
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index d4c7b8e1b083..73044fc6a952 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -268,8 +268,16 @@ static int ima_calc_file_hash_atfm(struct file *file,
rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
rc = integrity_kernel_read(file, offset, rbuf[active],
rbuf_len);
- if (rc != rbuf_len)
+ if (rc != rbuf_len) {
+ if (rc >= 0)
+ rc = -EINVAL;
+ /*
+ * Forward current rc, do not overwrite with return value
+ * from ahash_wait()
+ */
+ ahash_wait(ahash_rc, &wait);
goto out3;
+ }
if (rbuf[1] && offset) {
/* Using two buffers, and it is not the first
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 1e47c1026471..5d55ade5f3b9 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -45,8 +45,8 @@ static int __init ima_add_boot_aggregate(void)
const char *audit_cause = "ENOMEM";
struct ima_template_entry *entry;
struct integrity_iint_cache tmp_iint, *iint = &tmp_iint;
- struct ima_event_data event_data = {iint, NULL, boot_aggregate_name,
- NULL, 0, NULL};
+ struct ima_event_data event_data = { .iint = iint,
+ .filename = boot_aggregate_name };
int result = -ENOMEM;
int violation = 0;
struct {
@@ -68,7 +68,7 @@ static int __init ima_add_boot_aggregate(void)
}
}
- result = ima_alloc_init_template(&event_data, &entry);
+ result = ima_alloc_init_template(&event_data, &entry, NULL);
if (result < 0) {
audit_cause = "alloc_entry";
goto err_out;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 1747bc7bcb60..60027c643ecd 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -39,6 +39,10 @@ int ima_appraise;
int ima_hash_algo = HASH_ALGO_SHA1;
static int hash_setup_done;
+static struct notifier_block ima_lsm_policy_notifier = {
+ .notifier_call = ima_lsm_policy_change,
+};
+
static int __init hash_setup(char *str)
{
struct ima_template_desc *template_desc = ima_template_desc_current();
@@ -68,6 +72,27 @@ out:
}
__setup("ima_hash=", hash_setup);
+/* Prevent mmap'ing a file execute that is already mmap'ed write */
+static int mmap_violation_check(enum ima_hooks func, struct file *file,
+ char **pathbuf, const char **pathname,
+ char *filename)
+{
+ struct inode *inode;
+ int rc = 0;
+
+ if ((func == MMAP_CHECK) && mapping_writably_mapped(file->f_mapping)) {
+ rc = -ETXTBSY;
+ inode = file_inode(file);
+
+ if (!*pathbuf) /* ima_rdwr_violation possibly pre-fetched */
+ *pathname = ima_d_path(&file->f_path, pathbuf,
+ filename);
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, *pathname,
+ "mmap_file", "mmapped_writers", rc, 0);
+ }
+ return rc;
+}
+
/*
* ima_rdwr_violation_check
*
@@ -170,13 +195,14 @@ static int process_measurement(struct file *file, const struct cred *cred,
{
struct inode *inode = file_inode(file);
struct integrity_iint_cache *iint = NULL;
- struct ima_template_desc *template_desc;
+ struct ima_template_desc *template_desc = NULL;
char *pathbuf = NULL;
char filename[NAME_MAX];
const char *pathname = NULL;
int rc = 0, action, must_appraise = 0;
int pcr = CONFIG_IMA_MEASURE_PCR_IDX;
struct evm_ima_xattr_data *xattr_value = NULL;
+ struct modsig *modsig = NULL;
int xattr_len = 0;
bool violation_check;
enum hash_algo hash_algo;
@@ -188,7 +214,8 @@ static int process_measurement(struct file *file, const struct cred *cred,
* bitmask based on the appraise/audit/measurement policy.
* Included is the appraise submask.
*/
- action = ima_get_action(inode, cred, secid, mask, func, &pcr);
+ action = ima_get_action(inode, cred, secid, mask, func, &pcr,
+ &template_desc);
violation_check = ((func == FILE_CHECK || func == MMAP_CHECK) &&
(ima_policy_flag & IMA_MEASURE));
if (!action && !violation_check)
@@ -266,20 +293,37 @@ static int process_measurement(struct file *file, const struct cred *cred,
/* Nothing to do, just return existing appraised status */
if (!action) {
- if (must_appraise)
- rc = ima_get_cache_status(iint, func);
+ if (must_appraise) {
+ rc = mmap_violation_check(func, file, &pathbuf,
+ &pathname, filename);
+ if (!rc)
+ rc = ima_get_cache_status(iint, func);
+ }
goto out_locked;
}
- template_desc = ima_template_desc_current();
if ((action & IMA_APPRAISE_SUBMASK) ||
- strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0)
+ strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0) {
/* read 'security.ima' */
xattr_len = ima_read_xattr(file_dentry(file), &xattr_value);
+ /*
+ * Read the appended modsig if allowed by the policy, and allow
+ * an additional measurement list entry, if needed, based on the
+ * template format and whether the file was already measured.
+ */
+ if (iint->flags & IMA_MODSIG_ALLOWED) {
+ rc = ima_read_modsig(func, buf, size, &modsig);
+
+ if (!rc && ima_template_has_modsig(template_desc) &&
+ iint->flags & IMA_MEASURED)
+ action |= IMA_MEASURE;
+ }
+ }
+
hash_algo = ima_get_hash_algo(xattr_value, xattr_len);
- rc = ima_collect_measurement(iint, file, buf, size, hash_algo);
+ rc = ima_collect_measurement(iint, file, buf, size, hash_algo, modsig);
if (rc != 0 && rc != -EBADF && rc != -EINVAL)
goto out_locked;
@@ -288,12 +332,16 @@ static int process_measurement(struct file *file, const struct cred *cred,
if (action & IMA_MEASURE)
ima_store_measurement(iint, file, pathname,
- xattr_value, xattr_len, pcr);
+ xattr_value, xattr_len, modsig, pcr,
+ template_desc);
if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) {
inode_lock(inode);
rc = ima_appraise_measurement(func, iint, file, pathname,
- xattr_value, xattr_len);
+ xattr_value, xattr_len, modsig);
inode_unlock(inode);
+ if (!rc)
+ rc = mmap_violation_check(func, file, &pathbuf,
+ &pathname, filename);
}
if (action & IMA_AUDIT)
ima_audit_measurement(iint, pathname);
@@ -306,6 +354,7 @@ out_locked:
rc = -EACCES;
mutex_unlock(&iint->mutex);
kfree(xattr_value);
+ ima_free_modsig(modsig);
out:
if (pathbuf)
__putname(pathbuf);
@@ -572,6 +621,80 @@ int ima_load_data(enum kernel_load_data_id id)
return 0;
}
+/*
+ * process_buffer_measurement - Measure the buffer to ima log.
+ * @buf: pointer to the buffer that needs to be added to the log.
+ * @size: size of buffer(in bytes).
+ * @eventname: event name to be used for the buffer entry.
+ * @cred: a pointer to a credentials structure for user validation.
+ * @secid: the secid of the task to be validated.
+ *
+ * Based on policy, the buffer is measured into the ima log.
+ */
+static void process_buffer_measurement(const void *buf, int size,
+ const char *eventname,
+ const struct cred *cred, u32 secid)
+{
+ int ret = 0;
+ struct ima_template_entry *entry = NULL;
+ struct integrity_iint_cache iint = {};
+ struct ima_event_data event_data = {.iint = &iint,
+ .filename = eventname,
+ .buf = buf,
+ .buf_len = size};
+ struct ima_template_desc *template_desc = NULL;
+ struct {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+ } hash = {};
+ int violation = 0;
+ int pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+ int action = 0;
+
+ action = ima_get_action(NULL, cred, secid, 0, KEXEC_CMDLINE, &pcr,
+ &template_desc);
+ if (!(action & IMA_MEASURE))
+ return;
+
+ iint.ima_hash = &hash.hdr;
+ iint.ima_hash->algo = ima_hash_algo;
+ iint.ima_hash->length = hash_digest_size[ima_hash_algo];
+
+ ret = ima_calc_buffer_hash(buf, size, iint.ima_hash);
+ if (ret < 0)
+ goto out;
+
+ ret = ima_alloc_init_template(&event_data, &entry, template_desc);
+ if (ret < 0)
+ goto out;
+
+ ret = ima_store_template(entry, violation, NULL, buf, pcr);
+
+ if (ret < 0)
+ ima_free_template_entry(entry);
+
+out:
+ return;
+}
+
+/**
+ * ima_kexec_cmdline - measure kexec cmdline boot args
+ * @buf: pointer to buffer
+ * @size: size of buffer
+ *
+ * Buffers can only be measured, not appraised.
+ */
+void ima_kexec_cmdline(const void *buf, int size)
+{
+ u32 secid;
+
+ if (buf && size != 0) {
+ security_task_getsecid(current, &secid);
+ process_buffer_measurement(buf, size, "kexec-cmdline",
+ current_cred(), secid);
+ }
+}
+
static int __init init_ima(void)
{
int error;
@@ -589,6 +712,10 @@ static int __init init_ima(void)
error = ima_init();
}
+ error = register_blocking_lsm_notifier(&ima_lsm_policy_notifier);
+ if (error)
+ pr_warn("Couldn't register LSM notifier, error %d\n", error);
+
if (!error)
ima_update_policy_flag();
diff --git a/security/integrity/ima/ima_modsig.c b/security/integrity/ima/ima_modsig.c
new file mode 100644
index 000000000000..d106885cc495
--- /dev/null
+++ b/security/integrity/ima/ima_modsig.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * IMA support for appraising module-style appended signatures.
+ *
+ * Copyright (C) 2019 IBM Corporation
+ *
+ * Author:
+ * Thiago Jung Bauermann <bauerman@linux.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/module_signature.h>
+#include <keys/asymmetric-type.h>
+#include <crypto/pkcs7.h>
+
+#include "ima.h"
+
+struct modsig {
+ struct pkcs7_message *pkcs7_msg;
+
+ enum hash_algo hash_algo;
+
+ /* This digest will go in the 'd-modsig' field of the IMA template. */
+ const u8 *digest;
+ u32 digest_size;
+
+ /*
+ * This is what will go to the measurement list if the template requires
+ * storing the signature.
+ */
+ int raw_pkcs7_len;
+ u8 raw_pkcs7[];
+};
+
+/**
+ * ima_hook_supports_modsig - can the policy allow modsig for this hook?
+ *
+ * modsig is only supported by hooks using ima_post_read_file(), because only
+ * they preload the contents of the file in a buffer. FILE_CHECK does that in
+ * some cases, but not when reached from vfs_open(). POLICY_CHECK can support
+ * it, but it's not useful in practice because it's a text file so deny.
+ */
+bool ima_hook_supports_modsig(enum ima_hooks func)
+{
+ switch (func) {
+ case KEXEC_KERNEL_CHECK:
+ case KEXEC_INITRAMFS_CHECK:
+ case MODULE_CHECK:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * ima_read_modsig - Read modsig from buf.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int ima_read_modsig(enum ima_hooks func, const void *buf, loff_t buf_len,
+ struct modsig **modsig)
+{
+ const size_t marker_len = strlen(MODULE_SIG_STRING);
+ const struct module_signature *sig;
+ struct modsig *hdr;
+ size_t sig_len;
+ const void *p;
+ int rc;
+
+ if (buf_len <= marker_len + sizeof(*sig))
+ return -ENOENT;
+
+ p = buf + buf_len - marker_len;
+ if (memcmp(p, MODULE_SIG_STRING, marker_len))
+ return -ENOENT;
+
+ buf_len -= marker_len;
+ sig = (const struct module_signature *)(p - sizeof(*sig));
+
+ rc = mod_check_sig(sig, buf_len, func_tokens[func]);
+ if (rc)
+ return rc;
+
+ sig_len = be32_to_cpu(sig->sig_len);
+ buf_len -= sig_len + sizeof(*sig);
+
+ /* Allocate sig_len additional bytes to hold the raw PKCS#7 data. */
+ hdr = kzalloc(sizeof(*hdr) + sig_len, GFP_KERNEL);
+ if (!hdr)
+ return -ENOMEM;
+
+ hdr->pkcs7_msg = pkcs7_parse_message(buf + buf_len, sig_len);
+ if (IS_ERR(hdr->pkcs7_msg)) {
+ rc = PTR_ERR(hdr->pkcs7_msg);
+ kfree(hdr);
+ return rc;
+ }
+
+ memcpy(hdr->raw_pkcs7, buf + buf_len, sig_len);
+ hdr->raw_pkcs7_len = sig_len;
+
+ /* We don't know the hash algorithm yet. */
+ hdr->hash_algo = HASH_ALGO__LAST;
+
+ *modsig = hdr;
+
+ return 0;
+}
+
+/**
+ * ima_collect_modsig - Calculate the file hash without the appended signature.
+ *
+ * Since the modsig is part of the file contents, the hash used in its signature
+ * isn't the same one ordinarily calculated by IMA. Therefore PKCS7 code
+ * calculates a separate one for signature verification.
+ */
+void ima_collect_modsig(struct modsig *modsig, const void *buf, loff_t size)
+{
+ int rc;
+
+ /*
+ * Provide the file contents (minus the appended sig) so that the PKCS7
+ * code can calculate the file hash.
+ */
+ size -= modsig->raw_pkcs7_len + strlen(MODULE_SIG_STRING) +
+ sizeof(struct module_signature);
+ rc = pkcs7_supply_detached_data(modsig->pkcs7_msg, buf, size);
+ if (rc)
+ return;
+
+ /* Ask the PKCS7 code to calculate the file hash. */
+ rc = pkcs7_get_digest(modsig->pkcs7_msg, &modsig->digest,
+ &modsig->digest_size, &modsig->hash_algo);
+}
+
+int ima_modsig_verify(struct key *keyring, const struct modsig *modsig)
+{
+ return verify_pkcs7_message_sig(NULL, 0, modsig->pkcs7_msg, keyring,
+ VERIFYING_MODULE_SIGNATURE, NULL, NULL);
+}
+
+int ima_get_modsig_digest(const struct modsig *modsig, enum hash_algo *algo,
+ const u8 **digest, u32 *digest_size)
+{
+ *algo = modsig->hash_algo;
+ *digest = modsig->digest;
+ *digest_size = modsig->digest_size;
+
+ return 0;
+}
+
+int ima_get_raw_modsig(const struct modsig *modsig, const void **data,
+ u32 *data_len)
+{
+ *data = &modsig->raw_pkcs7;
+ *data_len = modsig->raw_pkcs7_len;
+
+ return 0;
+}
+
+void ima_free_modsig(struct modsig *modsig)
+{
+ if (!modsig)
+ return;
+
+ pkcs7_free_message(modsig->pkcs7_msg);
+ kfree(modsig);
+}
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index b8773f05f9da..5380aca2b351 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -6,6 +6,9 @@
* ima_policy.c
* - initialize default measure policy rules
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/list.h>
#include <linux/fs.h>
@@ -76,6 +79,7 @@ struct ima_rule_entry {
int type; /* audit type */
} lsm[MAX_LSM_RULES];
char *fsname;
+ struct ima_template_desc *template;
};
/*
@@ -195,7 +199,7 @@ static struct ima_rule_entry secure_boot_rules[] __ro_after_init = {
};
/* An array of architecture specific rules */
-struct ima_rule_entry *arch_policy_entry __ro_after_init;
+static struct ima_rule_entry *arch_policy_entry __ro_after_init;
static LIST_HEAD(ima_default_rules);
static LIST_HEAD(ima_policy_rules);
@@ -245,31 +249,113 @@ static int __init default_appraise_policy_setup(char *str)
}
__setup("ima_appraise_tcb", default_appraise_policy_setup);
+static void ima_lsm_free_rule(struct ima_rule_entry *entry)
+{
+ int i;
+
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ kfree(entry->lsm[i].rule);
+ kfree(entry->lsm[i].args_p);
+ }
+ kfree(entry);
+}
+
+static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+{
+ struct ima_rule_entry *nentry;
+ int i, result;
+
+ nentry = kmalloc(sizeof(*nentry), GFP_KERNEL);
+ if (!nentry)
+ return NULL;
+
+ /*
+ * Immutable elements are copied over as pointers and data; only
+ * lsm rules can change
+ */
+ memcpy(nentry, entry, sizeof(*nentry));
+ memset(nentry->lsm, 0, FIELD_SIZEOF(struct ima_rule_entry, lsm));
+
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ if (!entry->lsm[i].rule)
+ continue;
+
+ nentry->lsm[i].type = entry->lsm[i].type;
+ nentry->lsm[i].args_p = kstrdup(entry->lsm[i].args_p,
+ GFP_KERNEL);
+ if (!nentry->lsm[i].args_p)
+ goto out_err;
+
+ result = security_filter_rule_init(nentry->lsm[i].type,
+ Audit_equal,
+ nentry->lsm[i].args_p,
+ &nentry->lsm[i].rule);
+ if (result == -EINVAL)
+ pr_warn("ima: rule for LSM \'%d\' is undefined\n",
+ entry->lsm[i].type);
+ }
+ return nentry;
+
+out_err:
+ ima_lsm_free_rule(nentry);
+ return NULL;
+}
+
+static int ima_lsm_update_rule(struct ima_rule_entry *entry)
+{
+ struct ima_rule_entry *nentry;
+
+ nentry = ima_lsm_copy_rule(entry);
+ if (!nentry)
+ return -ENOMEM;
+
+ list_replace_rcu(&entry->list, &nentry->list);
+ synchronize_rcu();
+ ima_lsm_free_rule(entry);
+
+ return 0;
+}
+
/*
* The LSM policy can be reloaded, leaving the IMA LSM based rules referring
* to the old, stale LSM policy. Update the IMA LSM based rules to reflect
- * the reloaded LSM policy. We assume the rules still exist; and BUG_ON() if
- * they don't.
+ * the reloaded LSM policy.
*/
static void ima_lsm_update_rules(void)
{
- struct ima_rule_entry *entry;
- int result;
- int i;
+ struct ima_rule_entry *entry, *e;
+ int i, result, needs_update;
- list_for_each_entry(entry, &ima_policy_rules, list) {
+ list_for_each_entry_safe(entry, e, &ima_policy_rules, list) {
+ needs_update = 0;
for (i = 0; i < MAX_LSM_RULES; i++) {
- if (!entry->lsm[i].rule)
- continue;
- result = security_filter_rule_init(entry->lsm[i].type,
- Audit_equal,
- entry->lsm[i].args_p,
- &entry->lsm[i].rule);
- BUG_ON(!entry->lsm[i].rule);
+ if (entry->lsm[i].rule) {
+ needs_update = 1;
+ break;
+ }
+ }
+ if (!needs_update)
+ continue;
+
+ result = ima_lsm_update_rule(entry);
+ if (result) {
+ pr_err("ima: lsm rule update error %d\n",
+ result);
+ return;
}
}
}
+int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
+ void *lsm_data)
+{
+ if (event != LSM_POLICY_CHANGE)
+ return NOTIFY_DONE;
+
+ ima_lsm_update_rules();
+ return NOTIFY_OK;
+}
+
/**
* ima_match_rules - determine whether an inode matches the measure rule.
* @rule: a pointer to a rule
@@ -287,6 +373,11 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
{
int i;
+ if (func == KEXEC_CMDLINE) {
+ if ((rule->flags & IMA_FUNC) && (rule->func == func))
+ return true;
+ return false;
+ }
if ((rule->flags & IMA_FUNC) &&
(rule->func != func && func != POST_SETATTR))
return false;
@@ -323,11 +414,10 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
for (i = 0; i < MAX_LSM_RULES; i++) {
int rc = 0;
u32 osid;
- int retried = 0;
if (!rule->lsm[i].rule)
continue;
-retry:
+
switch (i) {
case LSM_OBJ_USER:
case LSM_OBJ_ROLE:
@@ -348,11 +438,6 @@ retry:
default:
break;
}
- if ((rc < 0) && (!retried)) {
- retried = 1;
- ima_lsm_update_rules();
- goto retry;
- }
if (!rc)
return false;
}
@@ -393,6 +478,7 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
* @func: IMA hook identifier
* @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
* @pcr: set the pcr to extend
+ * @template_desc: the template that should be used for this rule
*
* Measure decision based on func/mask/fsmagic and LSM(subj/obj/type)
* conditions.
@@ -402,11 +488,15 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
* than writes so ima_match_policy() is classical RCU candidate.
*/
int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
- enum ima_hooks func, int mask, int flags, int *pcr)
+ enum ima_hooks func, int mask, int flags, int *pcr,
+ struct ima_template_desc **template_desc)
{
struct ima_rule_entry *entry;
int action = 0, actmask = flags | (flags << 1);
+ if (template_desc)
+ *template_desc = ima_template_desc_current();
+
rcu_read_lock();
list_for_each_entry_rcu(entry, ima_rules, list) {
@@ -426,6 +516,7 @@ int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
action |= IMA_FAIL_UNVERIFIABLE_SIGS;
}
+
if (entry->action & IMA_DO_MASK)
actmask &= ~(entry->action | entry->action << 1);
else
@@ -434,6 +525,9 @@ int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
if ((pcr) && (entry->flags & IMA_PCR))
*pcr = entry->pcr;
+ if (template_desc && entry->template)
+ *template_desc = entry->template;
+
if (!actmask)
break;
}
@@ -672,7 +766,7 @@ enum {
Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt,
Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt,
Opt_appraise_type, Opt_permit_directio,
- Opt_pcr, Opt_err
+ Opt_pcr, Opt_template, Opt_err
};
static const match_table_t policy_tokens = {
@@ -706,6 +800,7 @@ static const match_table_t policy_tokens = {
{Opt_appraise_type, "appraise_type=%s"},
{Opt_permit_directio, "permit_directio"},
{Opt_pcr, "pcr=%s"},
+ {Opt_template, "template=%s"},
{Opt_err, NULL}
};
@@ -753,12 +848,45 @@ static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
ima_log_string_op(ab, key, value, NULL);
}
+/*
+ * Validating the appended signature included in the measurement list requires
+ * the file hash calculated without the appended signature (i.e., the 'd-modsig'
+ * field). Therefore, notify the user if they have the 'modsig' field but not
+ * the 'd-modsig' field in the template.
+ */
+static void check_template_modsig(const struct ima_template_desc *template)
+{
+#define MSG "template with 'modsig' field also needs 'd-modsig' field\n"
+ bool has_modsig, has_dmodsig;
+ static bool checked;
+ int i;
+
+ /* We only need to notify the user once. */
+ if (checked)
+ return;
+
+ has_modsig = has_dmodsig = false;
+ for (i = 0; i < template->num_fields; i++) {
+ if (!strcmp(template->fields[i]->field_id, "modsig"))
+ has_modsig = true;
+ else if (!strcmp(template->fields[i]->field_id, "d-modsig"))
+ has_dmodsig = true;
+ }
+
+ if (has_modsig && !has_dmodsig)
+ pr_notice(MSG);
+
+ checked = true;
+#undef MSG
+}
+
static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
{
struct audit_buffer *ab;
char *from;
char *p;
bool uid_token;
+ struct ima_template_desc *template_desc;
int result = 0;
ab = integrity_audit_log_start(audit_context(), GFP_KERNEL,
@@ -866,6 +994,8 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
entry->func = KEXEC_INITRAMFS_CHECK;
else if (strcmp(args[0].from, "POLICY_CHECK") == 0)
entry->func = POLICY_CHECK;
+ else if (strcmp(args[0].from, "KEXEC_CMDLINE") == 0)
+ entry->func = KEXEC_CMDLINE;
else
result = -EINVAL;
if (!result)
@@ -1035,6 +1165,10 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
ima_log_string(ab, "appraise_type", args[0].from);
if ((strcmp(args[0].from, "imasig")) == 0)
entry->flags |= IMA_DIGSIG_REQUIRED;
+ else if (ima_hook_supports_modsig(entry->func) &&
+ strcmp(args[0].from, "imasig|modsig") == 0)
+ entry->flags |= IMA_DIGSIG_REQUIRED |
+ IMA_MODSIG_ALLOWED;
else
result = -EINVAL;
break;
@@ -1055,6 +1189,28 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
entry->flags |= IMA_PCR;
break;
+ case Opt_template:
+ ima_log_string(ab, "template", args[0].from);
+ if (entry->action != MEASURE) {
+ result = -EINVAL;
+ break;
+ }
+ template_desc = lookup_template_desc(args[0].from);
+ if (!template_desc || entry->template) {
+ result = -EINVAL;
+ break;
+ }
+
+ /*
+ * template_desc_init_fields() does nothing if
+ * the template is already initialised, so
+ * it's safe to do this unconditionally
+ */
+ template_desc_init_fields(template_desc->fmt,
+ &(template_desc->fields),
+ &(template_desc->num_fields));
+ entry->template = template_desc;
+ break;
case Opt_err:
ima_log_string(ab, "UNKNOWN", p);
result = -EINVAL;
@@ -1066,6 +1222,12 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
else if (entry->action == APPRAISE)
temp_ima_appraise |= ima_appraise_flag(entry->func);
+ if (!result && entry->flags & IMA_MODSIG_ALLOWED) {
+ template_desc = entry->template ? entry->template :
+ ima_template_desc_current();
+ check_template_modsig(template_desc);
+ }
+
audit_log_format(ab, "res=%d", !result);
audit_log_end(ab);
return result;
@@ -1137,6 +1299,12 @@ void ima_delete_rules(void)
}
}
+#define __ima_hook_stringify(str) (#str),
+
+const char *const func_tokens[] = {
+ __ima_hooks(__ima_hook_stringify)
+};
+
#ifdef CONFIG_IMA_READ_POLICY
enum {
mask_exec = 0, mask_write, mask_read, mask_append
@@ -1149,12 +1317,6 @@ static const char *const mask_tokens[] = {
"^MAY_APPEND"
};
-#define __ima_hook_stringify(str) (#str),
-
-static const char *const func_tokens[] = {
- __ima_hooks(__ima_hook_stringify)
-};
-
void *ima_policy_start(struct seq_file *m, loff_t *pos)
{
loff_t l = *pos;
@@ -1330,8 +1492,14 @@ int ima_policy_show(struct seq_file *m, void *v)
}
}
}
- if (entry->flags & IMA_DIGSIG_REQUIRED)
- seq_puts(m, "appraise_type=imasig ");
+ if (entry->template)
+ seq_printf(m, "template=%s ", entry->template->name);
+ if (entry->flags & IMA_DIGSIG_REQUIRED) {
+ if (entry->flags & IMA_MODSIG_ALLOWED)
+ seq_puts(m, "appraise_type=imasig|modsig ");
+ else
+ seq_puts(m, "appraise_type=imasig ");
+ }
if (entry->flags & IMA_PERMIT_DIRECTIO)
seq_puts(m, "permit_directio ");
rcu_read_unlock();
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index f4354c267396..6aa6408603e3 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -22,6 +22,8 @@ static struct ima_template_desc builtin_templates[] = {
{.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
{.name = "ima-ng", .fmt = "d-ng|n-ng"},
{.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
+ {.name = "ima-buf", .fmt = "d-ng|n-ng|buf"},
+ {.name = "ima-modsig", .fmt = "d-ng|n-ng|sig|d-modsig|modsig"},
{.name = "", .fmt = ""}, /* placeholder for a custom format */
};
@@ -39,14 +41,41 @@ static const struct ima_template_field supported_fields[] = {
.field_show = ima_show_template_string},
{.field_id = "sig", .field_init = ima_eventsig_init,
.field_show = ima_show_template_sig},
+ {.field_id = "buf", .field_init = ima_eventbuf_init,
+ .field_show = ima_show_template_buf},
+ {.field_id = "d-modsig", .field_init = ima_eventdigest_modsig_init,
+ .field_show = ima_show_template_digest_ng},
+ {.field_id = "modsig", .field_init = ima_eventmodsig_init,
+ .field_show = ima_show_template_sig},
};
-#define MAX_TEMPLATE_NAME_LEN 15
+
+/*
+ * Used when restoring measurements carried over from a kexec. 'd' and 'n' don't
+ * need to be accounted for since they shouldn't be defined in the same template
+ * description as 'd-ng' and 'n-ng' respectively.
+ */
+#define MAX_TEMPLATE_NAME_LEN sizeof("d-ng|n-ng|sig|buf|d-modisg|modsig")
static struct ima_template_desc *ima_template;
-static struct ima_template_desc *lookup_template_desc(const char *name);
-static int template_desc_init_fields(const char *template_fmt,
- const struct ima_template_field ***fields,
- int *num_fields);
+
+/**
+ * ima_template_has_modsig - Check whether template has modsig-related fields.
+ * @ima_template: IMA template to check.
+ *
+ * Tells whether the given template has fields referencing a file's appended
+ * signature.
+ */
+bool ima_template_has_modsig(const struct ima_template_desc *ima_template)
+{
+ int i;
+
+ for (i = 0; i < ima_template->num_fields; i++)
+ if (!strcmp(ima_template->fields[i]->field_id, "modsig") ||
+ !strcmp(ima_template->fields[i]->field_id, "d-modsig"))
+ return true;
+
+ return false;
+}
static int __init ima_template_setup(char *str)
{
@@ -104,7 +133,7 @@ static int __init ima_template_fmt_setup(char *str)
}
__setup("ima_template_fmt=", ima_template_fmt_setup);
-static struct ima_template_desc *lookup_template_desc(const char *name)
+struct ima_template_desc *lookup_template_desc(const char *name)
{
struct ima_template_desc *template_desc;
int found = 0;
@@ -149,9 +178,9 @@ static int template_fmt_size(const char *template_fmt)
return j + 1;
}
-static int template_desc_init_fields(const char *template_fmt,
- const struct ima_template_field ***fields,
- int *num_fields)
+int template_desc_init_fields(const char *template_fmt,
+ const struct ima_template_field ***fields,
+ int *num_fields)
{
const char *template_fmt_ptr;
const struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
@@ -277,9 +306,8 @@ static int ima_restore_template_data(struct ima_template_desc *template_desc,
int ret = 0;
int i;
- *entry = kzalloc(sizeof(**entry) +
- template_desc->num_fields * sizeof(struct ima_field_data),
- GFP_NOFS);
+ *entry = kzalloc(struct_size(*entry, template_data,
+ template_desc->num_fields), GFP_NOFS);
if (!*entry)
return -ENOMEM;
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index 9fe0ef7f91e2..32ae05d88257 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -158,6 +158,12 @@ void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
}
+void ima_show_template_buf(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
+}
+
/**
* ima_parse_buf() - Parses lengths and data from an input buffer
* @bufstartp: Buffer start address.
@@ -219,7 +225,8 @@ int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp,
return 0;
}
-static int ima_eventdigest_init_common(u8 *digest, u32 digestsize, u8 hash_algo,
+static int ima_eventdigest_init_common(const u8 *digest, u32 digestsize,
+ u8 hash_algo,
struct ima_field_data *field_data)
{
/*
@@ -322,6 +329,41 @@ out:
hash_algo, field_data);
}
+/*
+ * This function writes the digest of the file which is expected to match the
+ * digest contained in the file's appended signature.
+ */
+int ima_eventdigest_modsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ enum hash_algo hash_algo;
+ const u8 *cur_digest;
+ u32 cur_digestsize;
+
+ if (!event_data->modsig)
+ return 0;
+
+ if (event_data->violation) {
+ /* Recording a violation. */
+ hash_algo = HASH_ALGO_SHA1;
+ cur_digest = NULL;
+ cur_digestsize = 0;
+ } else {
+ int rc;
+
+ rc = ima_get_modsig_digest(event_data->modsig, &hash_algo,
+ &cur_digest, &cur_digestsize);
+ if (rc)
+ return rc;
+ else if (hash_algo == HASH_ALGO__LAST || cur_digestsize == 0)
+ /* There was some error collecting the digest. */
+ return -EINVAL;
+ }
+
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ hash_algo, field_data);
+}
+
static int ima_eventname_init_common(struct ima_event_data *event_data,
struct ima_field_data *field_data,
bool size_limit)
@@ -385,3 +427,44 @@ int ima_eventsig_init(struct ima_event_data *event_data,
return ima_write_template_field_data(xattr_value, event_data->xattr_len,
DATA_FMT_HEX, field_data);
}
+
+/*
+ * ima_eventbuf_init - include the buffer(kexec-cmldine) as part of the
+ * template data.
+ */
+int ima_eventbuf_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ if ((!event_data->buf) || (event_data->buf_len == 0))
+ return 0;
+
+ return ima_write_template_field_data(event_data->buf,
+ event_data->buf_len, DATA_FMT_HEX,
+ field_data);
+}
+
+/*
+ * ima_eventmodsig_init - include the appended file signature as part of the
+ * template data
+ */
+int ima_eventmodsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ const void *data;
+ u32 data_len;
+ int rc;
+
+ if (!event_data->modsig)
+ return 0;
+
+ /*
+ * modsig is a runtime structure containing pointers. Get its raw data
+ * instead.
+ */
+ rc = ima_get_raw_modsig(event_data->modsig, &data, &data_len);
+ if (rc)
+ return rc;
+
+ return ima_write_template_field_data(data, data_len, DATA_FMT_HEX,
+ field_data);
+}
diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h
index e515955456a3..9a88c79a7a61 100644
--- a/security/integrity/ima/ima_template_lib.h
+++ b/security/integrity/ima/ima_template_lib.h
@@ -25,6 +25,8 @@ void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
struct ima_field_data *field_data);
void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
struct ima_field_data *field_data);
+void ima_show_template_buf(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp,
int maxfields, struct ima_field_data *fields, int *curfields,
unsigned long *len_mask, int enforce_mask, char *bufname);
@@ -34,8 +36,14 @@ int ima_eventname_init(struct ima_event_data *event_data,
struct ima_field_data *field_data);
int ima_eventdigest_ng_init(struct ima_event_data *event_data,
struct ima_field_data *field_data);
+int ima_eventdigest_modsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
int ima_eventname_ng_init(struct ima_event_data *event_data,
struct ima_field_data *field_data);
int ima_eventsig_init(struct ima_event_data *event_data,
struct ima_field_data *field_data);
+int ima_eventbuf_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventmodsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
#endif /* __LINUX_IMA_TEMPLATE_LIB_H */
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index 65377848fbc5..d9323d31a3a8 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -31,6 +31,7 @@
#define IMA_NEW_FILE 0x04000000
#define EVM_IMMUTABLE_DIGSIG 0x08000000
#define IMA_FAIL_UNVERIFIABLE_SIGS 0x10000000
+#define IMA_MODSIG_ALLOWED 0x20000000
#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
IMA_HASH | IMA_APPRAISE_SUBMASK)
@@ -74,6 +75,12 @@ enum evm_ima_xattr_type {
struct evm_ima_xattr_data {
u8 type;
+ u8 data[];
+} __packed;
+
+/* Only used in the EVM HMAC code. */
+struct evm_xattr {
+ struct evm_ima_xattr_data data;
u8 digest[SHA1_DIGEST_SIZE];
} __packed;
@@ -141,10 +148,13 @@ int integrity_kernel_read(struct file *file, loff_t offset,
extern struct dentry *integrity_dir;
+struct modsig;
+
#ifdef CONFIG_INTEGRITY_SIGNATURE
int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
const char *digest, int digestlen);
+int integrity_modsig_verify(unsigned int id, const struct modsig *modsig);
int __init integrity_init_keyring(const unsigned int id);
int __init integrity_load_x509(const unsigned int id, const char *path);
@@ -159,6 +169,12 @@ static inline int integrity_digsig_verify(const unsigned int id,
return -EOPNOTSUPP;
}
+static inline int integrity_modsig_verify(unsigned int id,
+ const struct modsig *modsig)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int integrity_init_keyring(const unsigned int id)
{
return 0;
@@ -184,6 +200,16 @@ static inline int asymmetric_verify(struct key *keyring, const char *sig,
}
#endif
+#ifdef CONFIG_IMA_APPRAISE_MODSIG
+int ima_modsig_verify(struct key *keyring, const struct modsig *modsig);
+#else
+static inline int ima_modsig_verify(struct key *keyring,
+ const struct modsig *modsig)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
#ifdef CONFIG_IMA_LOAD_X509
void __init ima_load_x509(void);
#else
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index ee502e4d390b..dd313438fecf 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -25,6 +25,24 @@ config KEYS_COMPAT
def_bool y
depends on COMPAT && KEYS
+config KEYS_REQUEST_CACHE
+ bool "Enable temporary caching of the last request_key() result"
+ depends on KEYS
+ help
+ This option causes the result of the last successful request_key()
+ call that didn't upcall to the kernel to be cached temporarily in the
+ task_struct. The cache is cleared by exit and just prior to the
+ resumption of userspace.
+
+ This allows the key used for multiple step processes where each step
+ wants to request a key that is likely the same as the one requested
+ by the last step to save on the searching.
+
+ An example of such a process is a pathwalk through a network
+ filesystem in which each method needs to request an authentication
+ key. Pathwalk will call multiple methods for each dentry traversed
+ (permission, d_revalidate, lookup, getxattr, getacl, ...).
+
config PERSISTENT_KEYRINGS
bool "Enable register of persistent per-UID keyrings"
depends on KEYS
diff --git a/security/keys/compat.c b/security/keys/compat.c
index 35ce47ce2285..9bcc404131aa 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -155,6 +155,12 @@ COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
return keyctl_pkey_verify(compat_ptr(arg2), compat_ptr(arg3),
compat_ptr(arg4), compat_ptr(arg5));
+ case KEYCTL_MOVE:
+ return keyctl_keyring_move(arg2, arg3, arg4, arg5);
+
+ case KEYCTL_CAPABILITIES:
+ return keyctl_capabilities(compat_ptr(arg2), arg3);
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 44e58a3e5663..671dd730ecfc 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -150,7 +150,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
atomic_dec(&key->user->nikeys);
key_user_put(key->user);
-
+ key_put_tag(key->domain_tag);
kfree(key->description);
memzero_explicit(key, sizeof(*key));
diff --git a/security/keys/internal.h b/security/keys/internal.h
index d59bc25a9249..c039373488bd 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -85,10 +85,14 @@ extern spinlock_t key_serial_lock;
extern struct mutex key_construction_mutex;
extern wait_queue_head_t request_key_conswq;
-
+extern void key_set_index_key(struct keyring_index_key *index_key);
extern struct key_type *key_type_lookup(const char *type);
extern void key_type_put(struct key_type *ktype);
+extern int __key_link_lock(struct key *keyring,
+ const struct keyring_index_key *index_key);
+extern int __key_move_lock(struct key *l_keyring, struct key *u_keyring,
+ const struct keyring_index_key *index_key);
extern int __key_link_begin(struct key *keyring,
const struct keyring_index_key *index_key,
struct assoc_array_edit **_edit);
@@ -119,6 +123,7 @@ struct keyring_search_context {
#define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */
#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */
#define KEYRING_SEARCH_SKIP_EXPIRED 0x0020 /* Ignore expired keys (intention to replace) */
+#define KEYRING_SEARCH_RECURSE 0x0040 /* Search child keyrings also */
int (*iterator)(const void *object, void *iterator_data);
@@ -131,21 +136,23 @@ struct keyring_search_context {
extern bool key_default_cmp(const struct key *key,
const struct key_match_data *match_data);
-extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
+extern key_ref_t keyring_search_rcu(key_ref_t keyring_ref,
struct keyring_search_context *ctx);
-extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
-extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
+extern key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx);
+extern key_ref_t search_process_keyrings_rcu(struct keyring_search_context *ctx);
extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
-extern int install_user_keyrings(void);
+extern int look_up_user_keyrings(struct key **, struct key **);
+extern struct key *get_user_session_keyring_rcu(const struct cred *);
extern int install_thread_keyring_to_cred(struct cred *);
extern int install_process_keyring_to_cred(struct cred *);
extern int install_session_keyring_to_cred(struct cred *, struct key *);
extern struct key *request_key_and_link(struct key_type *type,
const char *description,
+ struct key_tag *domain_tag,
const void *callout_info,
size_t callout_len,
void *aux,
@@ -199,7 +206,8 @@ static inline bool key_is_dead(const struct key *key, time64_t limit)
return
key->flags & ((1 << KEY_FLAG_DEAD) |
(1 << KEY_FLAG_INVALIDATED)) ||
- (key->expiry > 0 && key->expiry <= limit);
+ (key->expiry > 0 && key->expiry <= limit) ||
+ key->domain_tag->removed;
}
/*
@@ -211,6 +219,7 @@ extern long keyctl_update_key(key_serial_t, const void __user *, size_t);
extern long keyctl_revoke_key(key_serial_t);
extern long keyctl_keyring_clear(key_serial_t);
extern long keyctl_keyring_link(key_serial_t, key_serial_t);
+extern long keyctl_keyring_move(key_serial_t, key_serial_t, key_serial_t, unsigned int);
extern long keyctl_keyring_unlink(key_serial_t, key_serial_t);
extern long keyctl_describe_key(key_serial_t, char __user *, size_t);
extern long keyctl_keyring_search(key_serial_t, const char __user *,
@@ -320,6 +329,8 @@ static inline long keyctl_pkey_e_d_s(int op,
}
#endif
+extern long keyctl_capabilities(unsigned char __user *_buffer, size_t buflen);
+
/*
* Debugging key validation
*/
diff --git a/security/keys/key.c b/security/keys/key.c
index 9a6108aefae9..764f4c57913e 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -281,11 +281,12 @@ struct key *key_alloc(struct key_type *type, const char *desc,
key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
if (!key->index_key.description)
goto no_memory_3;
+ key->index_key.type = type;
+ key_set_index_key(&key->index_key);
refcount_set(&key->usage, 1);
init_rwsem(&key->sem);
lockdep_set_class(&key->sem, &type->lock_class);
- key->index_key.type = type;
key->user = user;
key->quotalen = quotalen;
key->datalen = type->def_datalen;
@@ -312,6 +313,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
goto security_error;
/* publish the key by giving it a serial number */
+ refcount_inc(&key->domain_tag->usage);
atomic_inc(&user->nkeys);
key_alloc_serial(key);
@@ -455,7 +457,7 @@ static int __key_instantiate_and_link(struct key *key,
/* disable the authorisation key */
if (authkey)
- key_revoke(authkey);
+ key_invalidate(authkey);
if (prep->expiry != TIME64_MAX) {
key->expiry = prep->expiry;
@@ -496,7 +498,7 @@ int key_instantiate_and_link(struct key *key,
struct key *authkey)
{
struct key_preparsed_payload prep;
- struct assoc_array_edit *edit;
+ struct assoc_array_edit *edit = NULL;
int ret;
memset(&prep, 0, sizeof(prep));
@@ -511,10 +513,14 @@ int key_instantiate_and_link(struct key *key,
}
if (keyring) {
- ret = __key_link_begin(keyring, &key->index_key, &edit);
+ ret = __key_link_lock(keyring, &key->index_key);
if (ret < 0)
goto error;
+ ret = __key_link_begin(keyring, &key->index_key, &edit);
+ if (ret < 0)
+ goto error_link_end;
+
if (keyring->restrict_link && keyring->restrict_link->check) {
struct key_restriction *keyres = keyring->restrict_link;
@@ -566,7 +572,7 @@ int key_reject_and_link(struct key *key,
struct key *keyring,
struct key *authkey)
{
- struct assoc_array_edit *edit;
+ struct assoc_array_edit *edit = NULL;
int ret, awaken, link_ret = 0;
key_check(key);
@@ -579,7 +585,12 @@ int key_reject_and_link(struct key *key,
if (keyring->restrict_link)
return -EPERM;
- link_ret = __key_link_begin(keyring, &key->index_key, &edit);
+ link_ret = __key_link_lock(keyring, &key->index_key);
+ if (link_ret == 0) {
+ link_ret = __key_link_begin(keyring, &key->index_key, &edit);
+ if (link_ret < 0)
+ __key_link_end(keyring, &key->index_key, edit);
+ }
}
mutex_lock(&key_construction_mutex);
@@ -603,7 +614,7 @@ int key_reject_and_link(struct key *key,
/* disable the authorisation key */
if (authkey)
- key_revoke(authkey);
+ key_invalidate(authkey);
}
mutex_unlock(&key_construction_mutex);
@@ -806,7 +817,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
.description = description,
};
struct key_preparsed_payload prep;
- struct assoc_array_edit *edit;
+ struct assoc_array_edit *edit = NULL;
const struct cred *cred = current_cred();
struct key *keyring, *key = NULL;
key_ref_t key_ref;
@@ -855,13 +866,20 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
goto error_free_prep;
}
index_key.desc_len = strlen(index_key.description);
+ key_set_index_key(&index_key);
- ret = __key_link_begin(keyring, &index_key, &edit);
+ ret = __key_link_lock(keyring, &index_key);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_free_prep;
}
+ ret = __key_link_begin(keyring, &index_key, &edit);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_link_end;
+ }
+
if (restrict_link && restrict_link->check) {
ret = restrict_link->check(keyring, index_key.type,
&prep.payload, restrict_link->key);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 5aa605ef8d9d..9b898c969558 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -26,6 +26,20 @@
#define KEY_MAX_DESC_SIZE 4096
+static const unsigned char keyrings_capabilities[2] = {
+ [0] = (KEYCTL_CAPS0_CAPABILITIES |
+ (IS_ENABLED(CONFIG_PERSISTENT_KEYRINGS) ? KEYCTL_CAPS0_PERSISTENT_KEYRINGS : 0) |
+ (IS_ENABLED(CONFIG_KEY_DH_OPERATIONS) ? KEYCTL_CAPS0_DIFFIE_HELLMAN : 0) |
+ (IS_ENABLED(CONFIG_ASYMMETRIC_KEY_TYPE) ? KEYCTL_CAPS0_PUBLIC_KEY : 0) |
+ (IS_ENABLED(CONFIG_BIG_KEYS) ? KEYCTL_CAPS0_BIG_KEY : 0) |
+ KEYCTL_CAPS0_INVALIDATE |
+ KEYCTL_CAPS0_RESTRICT_KEYRING |
+ KEYCTL_CAPS0_MOVE
+ ),
+ [1] = (KEYCTL_CAPS1_NS_KEYRING_NAME |
+ KEYCTL_CAPS1_NS_KEY_TAG),
+};
+
static int key_get_type_from_user(char *type,
const char __user *_type,
unsigned len)
@@ -206,7 +220,7 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
}
/* do the search */
- key = request_key_and_link(ktype, description, callout_info,
+ key = request_key_and_link(ktype, description, NULL, callout_info,
callout_len, NULL, key_ref_to_ptr(dest_ref),
KEY_ALLOC_IN_QUOTA);
if (IS_ERR(key)) {
@@ -569,6 +583,52 @@ error:
}
/*
+ * Move a link to a key from one keyring to another, displacing any matching
+ * key from the destination keyring.
+ *
+ * The key must grant the caller Link permission and both keyrings must grant
+ * the caller Write permission. There must also be a link in the from keyring
+ * to the key. If both keyrings are the same, nothing is done.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_keyring_move(key_serial_t id, key_serial_t from_ringid,
+ key_serial_t to_ringid, unsigned int flags)
+{
+ key_ref_t key_ref, from_ref, to_ref;
+ long ret;
+
+ if (flags & ~KEYCTL_MOVE_EXCL)
+ return -EINVAL;
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK);
+ if (IS_ERR(key_ref))
+ return PTR_ERR(key_ref);
+
+ from_ref = lookup_user_key(from_ringid, 0, KEY_NEED_WRITE);
+ if (IS_ERR(from_ref)) {
+ ret = PTR_ERR(from_ref);
+ goto error2;
+ }
+
+ to_ref = lookup_user_key(to_ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(to_ref)) {
+ ret = PTR_ERR(to_ref);
+ goto error3;
+ }
+
+ ret = key_move(key_ref_to_ptr(key_ref), key_ref_to_ptr(from_ref),
+ key_ref_to_ptr(to_ref), flags);
+
+ key_ref_put(to_ref);
+error3:
+ key_ref_put(from_ref);
+error2:
+ key_ref_put(key_ref);
+ return ret;
+}
+
+/*
* Return a description of a key to userspace.
*
* The key must grant the caller View permission for this to work.
@@ -700,7 +760,7 @@ long keyctl_keyring_search(key_serial_t ringid,
}
/* do the search */
- key_ref = keyring_search(keyring_ref, ktype, description);
+ key_ref = keyring_search(keyring_ref, ktype, description, true);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
@@ -1520,7 +1580,8 @@ long keyctl_session_to_parent(void)
ret = -EPERM;
oldwork = NULL;
- parent = me->real_parent;
+ parent = rcu_dereference_protected(me->real_parent,
+ lockdep_is_held(&tasklist_lock));
/* the parent mustn't be init and mustn't be a kernel thread */
if (parent->pid <= 1 || !parent->mm)
@@ -1628,6 +1689,26 @@ error:
}
/*
+ * Get keyrings subsystem capabilities.
+ */
+long keyctl_capabilities(unsigned char __user *_buffer, size_t buflen)
+{
+ size_t size = buflen;
+
+ if (size > 0) {
+ if (size > sizeof(keyrings_capabilities))
+ size = sizeof(keyrings_capabilities);
+ if (copy_to_user(_buffer, keyrings_capabilities, size) != 0)
+ return -EFAULT;
+ if (size < buflen &&
+ clear_user(_buffer + size, buflen - size) != 0)
+ return -EFAULT;
+ }
+
+ return sizeof(keyrings_capabilities);
+}
+
+/*
* The key control system call
*/
SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
@@ -1767,6 +1848,15 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
(const void __user *)arg4,
(const void __user *)arg5);
+ case KEYCTL_MOVE:
+ return keyctl_keyring_move((key_serial_t)arg2,
+ (key_serial_t)arg3,
+ (key_serial_t)arg4,
+ (unsigned int)arg5);
+
+ case KEYCTL_CAPABILITIES:
+ return keyctl_capabilities((unsigned char __user *)arg2, (size_t)arg3);
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index e311cc5df358..febf36c6ddc5 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -12,10 +12,13 @@
#include <linux/security.h>
#include <linux/seq_file.h>
#include <linux/err.h>
+#include <linux/user_namespace.h>
+#include <linux/nsproxy.h>
#include <keys/keyring-type.h>
#include <keys/user-type.h>
#include <linux/assoc_array_priv.h>
#include <linux/uaccess.h>
+#include <net/net_namespace.h>
#include "internal.h"
/*
@@ -25,11 +28,6 @@
#define KEYRING_SEARCH_MAX_DEPTH 6
/*
- * We keep all named keyrings in a hash to speed looking them up.
- */
-#define KEYRING_NAME_HASH_SIZE (1 << 5)
-
-/*
* We mark pointers we pass to the associative array with bit 1 set if
* they're keyrings and clear otherwise.
*/
@@ -51,17 +49,21 @@ static inline void *keyring_key_to_ptr(struct key *key)
return key;
}
-static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE];
static DEFINE_RWLOCK(keyring_name_lock);
-static inline unsigned keyring_hash(const char *desc)
+/*
+ * Clean up the bits of user_namespace that belong to us.
+ */
+void key_free_user_ns(struct user_namespace *ns)
{
- unsigned bucket = 0;
-
- for (; *desc; desc++)
- bucket += (unsigned char)*desc;
-
- return bucket & (KEYRING_NAME_HASH_SIZE - 1);
+ write_lock(&keyring_name_lock);
+ list_del_init(&ns->keyring_name_list);
+ write_unlock(&keyring_name_lock);
+
+ key_put(ns->user_keyring_register);
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ key_put(ns->persistent_keyring_register);
+#endif
}
/*
@@ -96,27 +98,21 @@ EXPORT_SYMBOL(key_type_keyring);
* Semaphore to serialise link/link calls to prevent two link calls in parallel
* introducing a cycle.
*/
-static DECLARE_RWSEM(keyring_serialise_link_sem);
+static DEFINE_MUTEX(keyring_serialise_link_lock);
/*
* Publish the name of a keyring so that it can be found by name (if it has
- * one).
+ * one and it doesn't begin with a dot).
*/
static void keyring_publish_name(struct key *keyring)
{
- int bucket;
-
- if (keyring->description) {
- bucket = keyring_hash(keyring->description);
+ struct user_namespace *ns = current_user_ns();
+ if (keyring->description &&
+ keyring->description[0] &&
+ keyring->description[0] != '.') {
write_lock(&keyring_name_lock);
-
- if (!keyring_name_hash[bucket].next)
- INIT_LIST_HEAD(&keyring_name_hash[bucket]);
-
- list_add_tail(&keyring->name_link,
- &keyring_name_hash[bucket]);
-
+ list_add_tail(&keyring->name_link, &ns->keyring_name_list);
write_unlock(&keyring_name_lock);
}
}
@@ -164,7 +160,7 @@ static u64 mult_64x32_and_fold(u64 x, u32 y)
/*
* Hash a key type and description.
*/
-static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key)
+static void hash_key_type_and_desc(struct keyring_index_key *index_key)
{
const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP;
const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK;
@@ -175,9 +171,12 @@ static unsigned long hash_key_type_and_desc(const struct keyring_index_key *inde
int n, desc_len = index_key->desc_len;
type = (unsigned long)index_key->type;
-
acc = mult_64x32_and_fold(type, desc_len + 13);
acc = mult_64x32_and_fold(acc, 9207);
+ piece = (unsigned long)index_key->domain_tag;
+ acc = mult_64x32_and_fold(acc, piece);
+ acc = mult_64x32_and_fold(acc, 9207);
+
for (;;) {
n = desc_len;
if (n <= 0)
@@ -202,24 +201,67 @@ static unsigned long hash_key_type_and_desc(const struct keyring_index_key *inde
* zero for keyrings and non-zero otherwise.
*/
if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0)
- return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
- if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
- return (hash + (hash << level_shift)) & ~fan_mask;
- return hash;
+ hash |= (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
+ else if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
+ hash = (hash + (hash << level_shift)) & ~fan_mask;
+ index_key->hash = hash;
}
/*
- * Build the next index key chunk.
- *
- * On 32-bit systems the index key is laid out as:
- *
- * 0 4 5 9...
- * hash desclen typeptr desc[]
+ * Finalise an index key to include a part of the description actually in the
+ * index key, to set the domain tag and to calculate the hash.
+ */
+void key_set_index_key(struct keyring_index_key *index_key)
+{
+ static struct key_tag default_domain_tag = { .usage = REFCOUNT_INIT(1), };
+ size_t n = min_t(size_t, index_key->desc_len, sizeof(index_key->desc));
+
+ memcpy(index_key->desc, index_key->description, n);
+
+ if (!index_key->domain_tag) {
+ if (index_key->type->flags & KEY_TYPE_NET_DOMAIN)
+ index_key->domain_tag = current->nsproxy->net_ns->key_domain;
+ else
+ index_key->domain_tag = &default_domain_tag;
+ }
+
+ hash_key_type_and_desc(index_key);
+}
+
+/**
+ * key_put_tag - Release a ref on a tag.
+ * @tag: The tag to release.
*
- * On 64-bit systems:
+ * This releases a reference the given tag and returns true if that ref was the
+ * last one.
+ */
+bool key_put_tag(struct key_tag *tag)
+{
+ if (refcount_dec_and_test(&tag->usage)) {
+ kfree_rcu(tag, rcu);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * key_remove_domain - Kill off a key domain and gc its keys
+ * @domain_tag: The domain tag to release.
*
- * 0 8 9 17...
- * hash desclen typeptr desc[]
+ * This marks a domain tag as being dead and releases a ref on it. If that
+ * wasn't the last reference, the garbage collector is poked to try and delete
+ * all keys that were in the domain.
+ */
+void key_remove_domain(struct key_tag *domain_tag)
+{
+ domain_tag->removed = true;
+ if (!key_put_tag(domain_tag))
+ key_schedule_gc_links();
+}
+
+/*
+ * Build the next index key chunk.
*
* We return it one word-sized chunk at a time.
*/
@@ -227,41 +269,33 @@ static unsigned long keyring_get_key_chunk(const void *data, int level)
{
const struct keyring_index_key *index_key = data;
unsigned long chunk = 0;
- long offset = 0;
+ const u8 *d;
int desc_len = index_key->desc_len, n = sizeof(chunk);
level /= ASSOC_ARRAY_KEY_CHUNK_SIZE;
switch (level) {
case 0:
- return hash_key_type_and_desc(index_key);
+ return index_key->hash;
case 1:
- return ((unsigned long)index_key->type << 8) | desc_len;
+ return index_key->x;
case 2:
- if (desc_len == 0)
- return (u8)((unsigned long)index_key->type >>
- (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
- n--;
- offset = 1;
- /* fall through */
+ return (unsigned long)index_key->type;
+ case 3:
+ return (unsigned long)index_key->domain_tag;
default:
- offset += sizeof(chunk) - 1;
- offset += (level - 3) * sizeof(chunk);
- if (offset >= desc_len)
+ level -= 4;
+ if (desc_len <= sizeof(index_key->desc))
return 0;
- desc_len -= offset;
+
+ d = index_key->description + sizeof(index_key->desc);
+ d += level * sizeof(long);
+ desc_len -= sizeof(index_key->desc);
if (desc_len > n)
desc_len = n;
- offset += desc_len;
do {
chunk <<= 8;
- chunk |= ((u8*)index_key->description)[--offset];
+ chunk |= *d++;
} while (--desc_len > 0);
-
- if (level == 2) {
- chunk <<= 8;
- chunk |= (u8)((unsigned long)index_key->type >>
- (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
- }
return chunk;
}
}
@@ -278,6 +312,7 @@ static bool keyring_compare_object(const void *object, const void *data)
const struct key *key = keyring_ptr_to_key(object);
return key->index_key.type == index_key->type &&
+ key->index_key.domain_tag == index_key->domain_tag &&
key->index_key.desc_len == index_key->desc_len &&
memcmp(key->index_key.description, index_key->description,
index_key->desc_len) == 0;
@@ -296,43 +331,38 @@ static int keyring_diff_objects(const void *object, const void *data)
int level, i;
level = 0;
- seg_a = hash_key_type_and_desc(a);
- seg_b = hash_key_type_and_desc(b);
+ seg_a = a->hash;
+ seg_b = b->hash;
if ((seg_a ^ seg_b) != 0)
goto differ;
+ level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8;
/* The number of bits contributed by the hash is controlled by a
* constant in the assoc_array headers. Everything else thereafter we
* can deal with as being machine word-size dependent.
*/
- level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8;
- seg_a = a->desc_len;
- seg_b = b->desc_len;
+ seg_a = a->x;
+ seg_b = b->x;
if ((seg_a ^ seg_b) != 0)
goto differ;
+ level += sizeof(unsigned long);
/* The next bit may not work on big endian */
- level++;
seg_a = (unsigned long)a->type;
seg_b = (unsigned long)b->type;
if ((seg_a ^ seg_b) != 0)
goto differ;
+ level += sizeof(unsigned long);
+ seg_a = (unsigned long)a->domain_tag;
+ seg_b = (unsigned long)b->domain_tag;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
level += sizeof(unsigned long);
- if (a->desc_len == 0)
- goto same;
- i = 0;
- if (((unsigned long)a->description | (unsigned long)b->description) &
- (sizeof(unsigned long) - 1)) {
- do {
- seg_a = *(unsigned long *)(a->description + i);
- seg_b = *(unsigned long *)(b->description + i);
- if ((seg_a ^ seg_b) != 0)
- goto differ_plus_i;
- i += sizeof(unsigned long);
- } while (i < (a->desc_len & (sizeof(unsigned long) - 1)));
- }
+ i = sizeof(a->desc);
+ if (a->desc_len <= i)
+ goto same;
for (; i < a->desc_len; i++) {
seg_a = *(unsigned char *)(a->description + i);
@@ -516,7 +546,7 @@ EXPORT_SYMBOL(keyring_alloc);
* @keyring: The keyring being added to.
* @type: The type of key being added.
* @payload: The payload of the key intended to be added.
- * @data: Additional data for evaluating restriction.
+ * @restriction_key: Keys providing additional data for evaluating restriction.
*
* Reject the addition of any links to a keyring. It can be overridden by
* passing KEY_ALLOC_BYPASS_RESTRICTION to key_instantiate_and_link() when
@@ -658,6 +688,9 @@ static bool search_nested_keyrings(struct key *keyring,
BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
(ctx->flags & STATE_CHECKS) == STATE_CHECKS);
+ if (ctx->index_key.description)
+ key_set_index_key(&ctx->index_key);
+
/* Check to see if this top-level keyring is what we are looking for
* and whether it is valid or not.
*/
@@ -697,6 +730,9 @@ descend_to_keyring:
* Non-keyrings avoid the leftmost branch of the root entirely (root
* slots 1-15).
*/
+ if (!(ctx->flags & KEYRING_SEARCH_RECURSE))
+ goto not_this_keyring;
+
ptr = READ_ONCE(keyring->keys.root);
if (!ptr)
goto not_this_keyring;
@@ -831,7 +867,7 @@ found:
}
/**
- * keyring_search_aux - Search a keyring tree for a key matching some criteria
+ * keyring_search_rcu - Search a keyring tree for a matching key under RCU
* @keyring_ref: A pointer to the keyring with possession indicator.
* @ctx: The keyring search context.
*
@@ -843,7 +879,9 @@ found:
* addition, the LSM gets to forbid keyring searches and key matches.
*
* The search is performed as a breadth-then-depth search up to the prescribed
- * limit (KEYRING_SEARCH_MAX_DEPTH).
+ * limit (KEYRING_SEARCH_MAX_DEPTH). The caller must hold the RCU read lock to
+ * prevent keyrings from being destroyed or rearranged whilst they are being
+ * searched.
*
* Keys are matched to the type provided and are then filtered by the match
* function, which is given the description to use in any way it sees fit. The
@@ -862,7 +900,7 @@ found:
* In the case of a successful return, the possession attribute from
* @keyring_ref is propagated to the returned key reference.
*/
-key_ref_t keyring_search_aux(key_ref_t keyring_ref,
+key_ref_t keyring_search_rcu(key_ref_t keyring_ref,
struct keyring_search_context *ctx)
{
struct key *keyring;
@@ -884,11 +922,9 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
return ERR_PTR(err);
}
- rcu_read_lock();
ctx->now = ktime_get_real_seconds();
if (search_nested_keyrings(keyring, ctx))
__key_get(key_ref_to_ptr(ctx->result));
- rcu_read_unlock();
return ctx->result;
}
@@ -897,13 +933,15 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
* @keyring: The root of the keyring tree to be searched.
* @type: The type of keyring we want to find.
* @description: The name of the keyring we want to find.
+ * @recurse: True to search the children of @keyring also
*
- * As keyring_search_aux() above, but using the current task's credentials and
+ * As keyring_search_rcu() above, but using the current task's credentials and
* type's default matching function and preferred search method.
*/
key_ref_t keyring_search(key_ref_t keyring,
struct key_type *type,
- const char *description)
+ const char *description,
+ bool recurse)
{
struct keyring_search_context ctx = {
.index_key.type = type,
@@ -918,13 +956,17 @@ key_ref_t keyring_search(key_ref_t keyring,
key_ref_t key;
int ret;
+ if (recurse)
+ ctx.flags |= KEYRING_SEARCH_RECURSE;
if (type->match_preparse) {
ret = type->match_preparse(&ctx.match_data);
if (ret < 0)
return ERR_PTR(ret);
}
- key = keyring_search_aux(keyring, &ctx);
+ rcu_read_lock();
+ key = keyring_search_rcu(keyring, &ctx);
+ rcu_read_unlock();
if (type->match_free)
type->match_free(&ctx.match_data);
@@ -972,9 +1014,13 @@ static bool keyring_detect_restriction_cycle(const struct key *dest_keyring,
/**
* keyring_restrict - Look up and apply a restriction to a keyring
- *
- * @keyring: The keyring to be restricted
+ * @keyring_ref: The keyring to be restricted
+ * @type: The key type that will provide the restriction checker.
* @restriction: The restriction options to apply to the keyring
+ *
+ * Look up a keyring and apply a restriction to it. The restriction is managed
+ * by the specific key type, but can be configured by the options specified in
+ * the restriction string.
*/
int keyring_restrict(key_ref_t keyring_ref, const char *type,
const char *restriction)
@@ -1096,50 +1142,44 @@ found:
*/
struct key *find_keyring_by_name(const char *name, bool uid_keyring)
{
+ struct user_namespace *ns = current_user_ns();
struct key *keyring;
- int bucket;
if (!name)
return ERR_PTR(-EINVAL);
- bucket = keyring_hash(name);
-
read_lock(&keyring_name_lock);
- if (keyring_name_hash[bucket].next) {
- /* search this hash bucket for a keyring with a matching name
- * that's readable and that hasn't been revoked */
- list_for_each_entry(keyring,
- &keyring_name_hash[bucket],
- name_link
- ) {
- if (!kuid_has_mapping(current_user_ns(), keyring->user->uid))
- continue;
-
- if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
- continue;
+ /* Search this hash bucket for a keyring with a matching name that
+ * grants Search permission and that hasn't been revoked
+ */
+ list_for_each_entry(keyring, &ns->keyring_name_list, name_link) {
+ if (!kuid_has_mapping(ns, keyring->user->uid))
+ continue;
- if (strcmp(keyring->description, name) != 0)
- continue;
+ if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
+ continue;
- if (uid_keyring) {
- if (!test_bit(KEY_FLAG_UID_KEYRING,
- &keyring->flags))
- continue;
- } else {
- if (key_permission(make_key_ref(keyring, 0),
- KEY_NEED_SEARCH) < 0)
- continue;
- }
+ if (strcmp(keyring->description, name) != 0)
+ continue;
- /* we've got a match but we might end up racing with
- * key_cleanup() if the keyring is currently 'dead'
- * (ie. it has a zero usage count) */
- if (!refcount_inc_not_zero(&keyring->usage))
+ if (uid_keyring) {
+ if (!test_bit(KEY_FLAG_UID_KEYRING,
+ &keyring->flags))
+ continue;
+ } else {
+ if (key_permission(make_key_ref(keyring, 0),
+ KEY_NEED_SEARCH) < 0)
continue;
- keyring->last_used_at = ktime_get_real_seconds();
- goto out;
}
+
+ /* we've got a match but we might end up racing with
+ * key_cleanup() if the keyring is currently 'dead'
+ * (ie. it has a zero usage count) */
+ if (!refcount_inc_not_zero(&keyring->usage))
+ continue;
+ keyring->last_used_at = ktime_get_real_seconds();
+ goto out;
}
keyring = ERR_PTR(-ENOKEY);
@@ -1182,7 +1222,8 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
.flags = (KEYRING_SEARCH_NO_STATE_CHECK |
KEYRING_SEARCH_NO_UPDATE_TIME |
KEYRING_SEARCH_NO_CHECK_PERM |
- KEYRING_SEARCH_DETECT_TOO_DEEP),
+ KEYRING_SEARCH_DETECT_TOO_DEEP |
+ KEYRING_SEARCH_RECURSE),
};
rcu_read_lock();
@@ -1192,13 +1233,67 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
}
/*
+ * Lock keyring for link.
+ */
+int __key_link_lock(struct key *keyring,
+ const struct keyring_index_key *index_key)
+ __acquires(&keyring->sem)
+ __acquires(&keyring_serialise_link_lock)
+{
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ down_write(&keyring->sem);
+
+ /* Serialise link/link calls to prevent parallel calls causing a cycle
+ * when linking two keyring in opposite orders.
+ */
+ if (index_key->type == &key_type_keyring)
+ mutex_lock(&keyring_serialise_link_lock);
+
+ return 0;
+}
+
+/*
+ * Lock keyrings for move (link/unlink combination).
+ */
+int __key_move_lock(struct key *l_keyring, struct key *u_keyring,
+ const struct keyring_index_key *index_key)
+ __acquires(&l_keyring->sem)
+ __acquires(&u_keyring->sem)
+ __acquires(&keyring_serialise_link_lock)
+{
+ if (l_keyring->type != &key_type_keyring ||
+ u_keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ /* We have to be very careful here to take the keyring locks in the
+ * right order, lest we open ourselves to deadlocking against another
+ * move operation.
+ */
+ if (l_keyring < u_keyring) {
+ down_write(&l_keyring->sem);
+ down_write_nested(&u_keyring->sem, 1);
+ } else {
+ down_write(&u_keyring->sem);
+ down_write_nested(&l_keyring->sem, 1);
+ }
+
+ /* Serialise link/link calls to prevent parallel calls causing a cycle
+ * when linking two keyring in opposite orders.
+ */
+ if (index_key->type == &key_type_keyring)
+ mutex_lock(&keyring_serialise_link_lock);
+
+ return 0;
+}
+
+/*
* Preallocate memory so that a key can be linked into to a keyring.
*/
int __key_link_begin(struct key *keyring,
const struct keyring_index_key *index_key,
struct assoc_array_edit **_edit)
- __acquires(&keyring->sem)
- __acquires(&keyring_serialise_link_sem)
{
struct assoc_array_edit *edit;
int ret;
@@ -1207,20 +1302,13 @@ int __key_link_begin(struct key *keyring,
keyring->serial, index_key->type->name, index_key->description);
BUG_ON(index_key->desc_len == 0);
+ BUG_ON(*_edit != NULL);
- if (keyring->type != &key_type_keyring)
- return -ENOTDIR;
-
- down_write(&keyring->sem);
+ *_edit = NULL;
ret = -EKEYREVOKED;
if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
- goto error_krsem;
-
- /* serialise link/link calls to prevent parallel calls causing a cycle
- * when linking two keyring in opposite orders */
- if (index_key->type == &key_type_keyring)
- down_write(&keyring_serialise_link_sem);
+ goto error;
/* Create an edit script that will insert/replace the key in the
* keyring tree.
@@ -1231,7 +1319,7 @@ int __key_link_begin(struct key *keyring,
NULL);
if (IS_ERR(edit)) {
ret = PTR_ERR(edit);
- goto error_sem;
+ goto error;
}
/* If we're not replacing a link in-place then we're going to need some
@@ -1250,11 +1338,7 @@ int __key_link_begin(struct key *keyring,
error_cancel:
assoc_array_cancel_edit(edit);
-error_sem:
- if (index_key->type == &key_type_keyring)
- up_write(&keyring_serialise_link_sem);
-error_krsem:
- up_write(&keyring->sem);
+error:
kleave(" = %d", ret);
return ret;
}
@@ -1299,14 +1383,11 @@ void __key_link_end(struct key *keyring,
const struct keyring_index_key *index_key,
struct assoc_array_edit *edit)
__releases(&keyring->sem)
- __releases(&keyring_serialise_link_sem)
+ __releases(&keyring_serialise_link_lock)
{
BUG_ON(index_key->type == NULL);
kenter("%d,%s,", keyring->serial, index_key->type->name);
- if (index_key->type == &key_type_keyring)
- up_write(&keyring_serialise_link_sem);
-
if (edit) {
if (!edit->dead_leaf) {
key_payload_reserve(keyring,
@@ -1315,6 +1396,9 @@ void __key_link_end(struct key *keyring,
assoc_array_cancel_edit(edit);
}
up_write(&keyring->sem);
+
+ if (index_key->type == &key_type_keyring)
+ mutex_unlock(&keyring_serialise_link_lock);
}
/*
@@ -1350,7 +1434,7 @@ static int __key_link_check_restriction(struct key *keyring, struct key *key)
*/
int key_link(struct key *keyring, struct key *key)
{
- struct assoc_array_edit *edit;
+ struct assoc_array_edit *edit = NULL;
int ret;
kenter("{%d,%d}", keyring->serial, refcount_read(&keyring->usage));
@@ -1358,22 +1442,88 @@ int key_link(struct key *keyring, struct key *key)
key_check(keyring);
key_check(key);
+ ret = __key_link_lock(keyring, &key->index_key);
+ if (ret < 0)
+ goto error;
+
ret = __key_link_begin(keyring, &key->index_key, &edit);
- if (ret == 0) {
- kdebug("begun {%d,%d}", keyring->serial, refcount_read(&keyring->usage));
- ret = __key_link_check_restriction(keyring, key);
- if (ret == 0)
- ret = __key_link_check_live_key(keyring, key);
- if (ret == 0)
- __key_link(key, &edit);
- __key_link_end(keyring, &key->index_key, edit);
- }
+ if (ret < 0)
+ goto error_end;
+
+ kdebug("begun {%d,%d}", keyring->serial, refcount_read(&keyring->usage));
+ ret = __key_link_check_restriction(keyring, key);
+ if (ret == 0)
+ ret = __key_link_check_live_key(keyring, key);
+ if (ret == 0)
+ __key_link(key, &edit);
+error_end:
+ __key_link_end(keyring, &key->index_key, edit);
+error:
kleave(" = %d {%d,%d}", ret, keyring->serial, refcount_read(&keyring->usage));
return ret;
}
EXPORT_SYMBOL(key_link);
+/*
+ * Lock a keyring for unlink.
+ */
+static int __key_unlink_lock(struct key *keyring)
+ __acquires(&keyring->sem)
+{
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ down_write(&keyring->sem);
+ return 0;
+}
+
+/*
+ * Begin the process of unlinking a key from a keyring.
+ */
+static int __key_unlink_begin(struct key *keyring, struct key *key,
+ struct assoc_array_edit **_edit)
+{
+ struct assoc_array_edit *edit;
+
+ BUG_ON(*_edit != NULL);
+
+ edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops,
+ &key->index_key);
+ if (IS_ERR(edit))
+ return PTR_ERR(edit);
+
+ if (!edit)
+ return -ENOENT;
+
+ *_edit = edit;
+ return 0;
+}
+
+/*
+ * Apply an unlink change.
+ */
+static void __key_unlink(struct key *keyring, struct key *key,
+ struct assoc_array_edit **_edit)
+{
+ assoc_array_apply_edit(*_edit);
+ *_edit = NULL;
+ key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES);
+}
+
+/*
+ * Finish unlinking a key from to a keyring.
+ */
+static void __key_unlink_end(struct key *keyring,
+ struct key *key,
+ struct assoc_array_edit *edit)
+ __releases(&keyring->sem)
+{
+ if (edit)
+ assoc_array_cancel_edit(edit);
+ up_write(&keyring->sem);
+}
+
/**
* key_unlink - Unlink the first link to a key from a keyring.
* @keyring: The keyring to remove the link from.
@@ -1393,36 +1543,97 @@ EXPORT_SYMBOL(key_link);
*/
int key_unlink(struct key *keyring, struct key *key)
{
- struct assoc_array_edit *edit;
+ struct assoc_array_edit *edit = NULL;
int ret;
key_check(keyring);
key_check(key);
- if (keyring->type != &key_type_keyring)
- return -ENOTDIR;
+ ret = __key_unlink_lock(keyring);
+ if (ret < 0)
+ return ret;
- down_write(&keyring->sem);
+ ret = __key_unlink_begin(keyring, key, &edit);
+ if (ret == 0)
+ __key_unlink(keyring, key, &edit);
+ __key_unlink_end(keyring, key, edit);
+ return ret;
+}
+EXPORT_SYMBOL(key_unlink);
- edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops,
- &key->index_key);
- if (IS_ERR(edit)) {
- ret = PTR_ERR(edit);
+/**
+ * key_move - Move a key from one keyring to another
+ * @key: The key to move
+ * @from_keyring: The keyring to remove the link from.
+ * @to_keyring: The keyring to make the link in.
+ * @flags: Qualifying flags, such as KEYCTL_MOVE_EXCL.
+ *
+ * Make a link in @to_keyring to a key, such that the keyring holds a reference
+ * on that key and the key can potentially be found by searching that keyring
+ * whilst simultaneously removing a link to the key from @from_keyring.
+ *
+ * This function will write-lock both keyring's semaphores and will consume
+ * some of the user's key data quota to hold the link on @to_keyring.
+ *
+ * Returns 0 if successful, -ENOTDIR if either keyring isn't a keyring,
+ * -EKEYREVOKED if either keyring has been revoked, -ENFILE if the second
+ * keyring is full, -EDQUOT if there is insufficient key data quota remaining
+ * to add another link or -ENOMEM if there's insufficient memory. If
+ * KEYCTL_MOVE_EXCL is set, then -EEXIST will be returned if there's already a
+ * matching key in @to_keyring.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be made (the keyring should have Write permission and the key Link
+ * permission).
+ */
+int key_move(struct key *key,
+ struct key *from_keyring,
+ struct key *to_keyring,
+ unsigned int flags)
+{
+ struct assoc_array_edit *from_edit = NULL, *to_edit = NULL;
+ int ret;
+
+ kenter("%d,%d,%d", key->serial, from_keyring->serial, to_keyring->serial);
+
+ if (from_keyring == to_keyring)
+ return 0;
+
+ key_check(key);
+ key_check(from_keyring);
+ key_check(to_keyring);
+
+ ret = __key_move_lock(from_keyring, to_keyring, &key->index_key);
+ if (ret < 0)
+ goto out;
+ ret = __key_unlink_begin(from_keyring, key, &from_edit);
+ if (ret < 0)
goto error;
- }
- ret = -ENOENT;
- if (edit == NULL)
+ ret = __key_link_begin(to_keyring, &key->index_key, &to_edit);
+ if (ret < 0)
goto error;
- assoc_array_apply_edit(edit);
- key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES);
- ret = 0;
+ ret = -EEXIST;
+ if (to_edit->dead_leaf && (flags & KEYCTL_MOVE_EXCL))
+ goto error;
+ ret = __key_link_check_restriction(to_keyring, key);
+ if (ret < 0)
+ goto error;
+ ret = __key_link_check_live_key(to_keyring, key);
+ if (ret < 0)
+ goto error;
+
+ __key_unlink(from_keyring, key, &from_edit);
+ __key_link(key, &to_edit);
error:
- up_write(&keyring->sem);
+ __key_link_end(to_keyring, &key->index_key, to_edit);
+ __key_unlink_end(from_keyring, key, from_edit);
+out:
+ kleave(" = %d", ret);
return ret;
}
-EXPORT_SYMBOL(key_unlink);
+EXPORT_SYMBOL(key_move);
/**
* keyring_clear - Clear a keyring
diff --git a/security/keys/persistent.c b/security/keys/persistent.c
index da9a0f42b795..97af230aa4b2 100644
--- a/security/keys/persistent.c
+++ b/security/keys/persistent.c
@@ -80,15 +80,17 @@ static long key_get_persistent(struct user_namespace *ns, kuid_t uid,
long ret;
/* Look in the register if it exists */
+ memset(&index_key, 0, sizeof(index_key));
index_key.type = &key_type_keyring;
index_key.description = buf;
index_key.desc_len = sprintf(buf, "_persistent.%u", from_kuid(ns, uid));
+ key_set_index_key(&index_key);
if (ns->persistent_keyring_register) {
reg_ref = make_key_ref(ns->persistent_keyring_register, true);
- down_read(&ns->persistent_keyring_register_sem);
+ down_read(&ns->keyring_sem);
persistent_ref = find_key_to_update(reg_ref, &index_key);
- up_read(&ns->persistent_keyring_register_sem);
+ up_read(&ns->keyring_sem);
if (persistent_ref)
goto found;
@@ -97,9 +99,9 @@ static long key_get_persistent(struct user_namespace *ns, kuid_t uid,
/* It wasn't in the register, so we'll need to create it. We might
* also need to create the register.
*/
- down_write(&ns->persistent_keyring_register_sem);
+ down_write(&ns->keyring_sem);
persistent_ref = key_create_persistent(ns, uid, &index_key);
- up_write(&ns->persistent_keyring_register_sem);
+ up_write(&ns->keyring_sem);
if (!IS_ERR(persistent_ref))
goto found;
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 4e3266a2529e..415f3f1c2da0 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -166,7 +166,8 @@ static int proc_keys_show(struct seq_file *m, void *v)
.match_data.cmp = lookup_user_key_possessed,
.match_data.raw_data = key,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
- .flags = KEYRING_SEARCH_NO_STATE_CHECK,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_RECURSE),
};
key_ref = make_key_ref(key, 0);
@@ -175,7 +176,9 @@ static int proc_keys_show(struct seq_file *m, void *v)
* skip if the key does not indicate the possessor can view it
*/
if (key->perm & KEY_POS_VIEW) {
- skey_ref = search_my_process_keyrings(&ctx);
+ rcu_read_lock();
+ skey_ref = search_cred_keyrings_rcu(&ctx);
+ rcu_read_unlock();
if (!IS_ERR(skey_ref)) {
key_ref_put(skey_ref);
key_ref = make_key_ref(key, 1);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 0b9406bf60e5..09541de31f2f 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -15,15 +15,13 @@
#include <linux/security.h>
#include <linux/user_namespace.h>
#include <linux/uaccess.h>
+#include <linux/init_task.h>
#include <keys/request_key_auth-type.h>
#include "internal.h"
/* Session keyring create vs join semaphore */
static DEFINE_MUTEX(key_session_mutex);
-/* User keyring creation semaphore */
-static DEFINE_MUTEX(key_user_keyring_mutex);
-
/* The root user's tracking struct */
struct key_user root_key_user = {
.usage = REFCOUNT_INIT(3),
@@ -35,99 +33,186 @@ struct key_user root_key_user = {
};
/*
- * Install the user and user session keyrings for the current process's UID.
+ * Get or create a user register keyring.
+ */
+static struct key *get_user_register(struct user_namespace *user_ns)
+{
+ struct key *reg_keyring = READ_ONCE(user_ns->user_keyring_register);
+
+ if (reg_keyring)
+ return reg_keyring;
+
+ down_write(&user_ns->keyring_sem);
+
+ /* Make sure there's a register keyring. It gets owned by the
+ * user_namespace's owner.
+ */
+ reg_keyring = user_ns->user_keyring_register;
+ if (!reg_keyring) {
+ reg_keyring = keyring_alloc(".user_reg",
+ user_ns->owner, INVALID_GID,
+ &init_cred,
+ KEY_POS_WRITE | KEY_POS_SEARCH |
+ KEY_USR_VIEW | KEY_USR_READ,
+ 0,
+ NULL, NULL);
+ if (!IS_ERR(reg_keyring))
+ smp_store_release(&user_ns->user_keyring_register,
+ reg_keyring);
+ }
+
+ up_write(&user_ns->keyring_sem);
+
+ /* We don't return a ref since the keyring is pinned by the user_ns */
+ return reg_keyring;
+}
+
+/*
+ * Look up the user and user session keyrings for the current process's UID,
+ * creating them if they don't exist.
*/
-int install_user_keyrings(void)
+int look_up_user_keyrings(struct key **_user_keyring,
+ struct key **_user_session_keyring)
{
- struct user_struct *user;
- const struct cred *cred;
- struct key *uid_keyring, *session_keyring;
+ const struct cred *cred = current_cred();
+ struct user_namespace *user_ns = current_user_ns();
+ struct key *reg_keyring, *uid_keyring, *session_keyring;
key_perm_t user_keyring_perm;
+ key_ref_t uid_keyring_r, session_keyring_r;
+ uid_t uid = from_kuid(user_ns, cred->user->uid);
char buf[20];
int ret;
- uid_t uid;
user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL;
- cred = current_cred();
- user = cred->user;
- uid = from_kuid(cred->user_ns, user->uid);
- kenter("%p{%u}", user, uid);
+ kenter("%u", uid);
- if (READ_ONCE(user->uid_keyring) && READ_ONCE(user->session_keyring)) {
- kleave(" = 0 [exist]");
- return 0;
- }
+ reg_keyring = get_user_register(user_ns);
+ if (IS_ERR(reg_keyring))
+ return PTR_ERR(reg_keyring);
- mutex_lock(&key_user_keyring_mutex);
+ down_write(&user_ns->keyring_sem);
ret = 0;
- if (!user->uid_keyring) {
- /* get the UID-specific keyring
- * - there may be one in existence already as it may have been
- * pinned by a session, but the user_struct pointing to it
- * may have been destroyed by setuid */
- sprintf(buf, "_uid.%u", uid);
-
- uid_keyring = find_keyring_by_name(buf, true);
+ /* Get the user keyring. Note that there may be one in existence
+ * already as it may have been pinned by a session, but the user_struct
+ * pointing to it may have been destroyed by setuid.
+ */
+ snprintf(buf, sizeof(buf), "_uid.%u", uid);
+ uid_keyring_r = keyring_search(make_key_ref(reg_keyring, true),
+ &key_type_keyring, buf, false);
+ kdebug("_uid %p", uid_keyring_r);
+ if (uid_keyring_r == ERR_PTR(-EAGAIN)) {
+ uid_keyring = keyring_alloc(buf, cred->user->uid, INVALID_GID,
+ cred, user_keyring_perm,
+ KEY_ALLOC_UID_KEYRING |
+ KEY_ALLOC_IN_QUOTA,
+ NULL, reg_keyring);
if (IS_ERR(uid_keyring)) {
- uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
- cred, user_keyring_perm,
- KEY_ALLOC_UID_KEYRING |
- KEY_ALLOC_IN_QUOTA,
- NULL, NULL);
- if (IS_ERR(uid_keyring)) {
- ret = PTR_ERR(uid_keyring);
- goto error;
- }
+ ret = PTR_ERR(uid_keyring);
+ goto error;
}
+ } else if (IS_ERR(uid_keyring_r)) {
+ ret = PTR_ERR(uid_keyring_r);
+ goto error;
+ } else {
+ uid_keyring = key_ref_to_ptr(uid_keyring_r);
+ }
- /* get a default session keyring (which might also exist
- * already) */
- sprintf(buf, "_uid_ses.%u", uid);
-
- session_keyring = find_keyring_by_name(buf, true);
+ /* Get a default session keyring (which might also exist already) */
+ snprintf(buf, sizeof(buf), "_uid_ses.%u", uid);
+ session_keyring_r = keyring_search(make_key_ref(reg_keyring, true),
+ &key_type_keyring, buf, false);
+ kdebug("_uid_ses %p", session_keyring_r);
+ if (session_keyring_r == ERR_PTR(-EAGAIN)) {
+ session_keyring = keyring_alloc(buf, cred->user->uid, INVALID_GID,
+ cred, user_keyring_perm,
+ KEY_ALLOC_UID_KEYRING |
+ KEY_ALLOC_IN_QUOTA,
+ NULL, NULL);
if (IS_ERR(session_keyring)) {
- session_keyring =
- keyring_alloc(buf, user->uid, INVALID_GID,
- cred, user_keyring_perm,
- KEY_ALLOC_UID_KEYRING |
- KEY_ALLOC_IN_QUOTA,
- NULL, NULL);
- if (IS_ERR(session_keyring)) {
- ret = PTR_ERR(session_keyring);
- goto error_release;
- }
-
- /* we install a link from the user session keyring to
- * the user keyring */
- ret = key_link(session_keyring, uid_keyring);
- if (ret < 0)
- goto error_release_both;
+ ret = PTR_ERR(session_keyring);
+ goto error_release;
}
- /* install the keyrings */
- /* paired with READ_ONCE() */
- smp_store_release(&user->uid_keyring, uid_keyring);
- /* paired with READ_ONCE() */
- smp_store_release(&user->session_keyring, session_keyring);
+ /* We install a link from the user session keyring to
+ * the user keyring.
+ */
+ ret = key_link(session_keyring, uid_keyring);
+ if (ret < 0)
+ goto error_release_session;
+
+ /* And only then link the user-session keyring to the
+ * register.
+ */
+ ret = key_link(reg_keyring, session_keyring);
+ if (ret < 0)
+ goto error_release_session;
+ } else if (IS_ERR(session_keyring_r)) {
+ ret = PTR_ERR(session_keyring_r);
+ goto error_release;
+ } else {
+ session_keyring = key_ref_to_ptr(session_keyring_r);
}
- mutex_unlock(&key_user_keyring_mutex);
+ up_write(&user_ns->keyring_sem);
+
+ if (_user_session_keyring)
+ *_user_session_keyring = session_keyring;
+ else
+ key_put(session_keyring);
+ if (_user_keyring)
+ *_user_keyring = uid_keyring;
+ else
+ key_put(uid_keyring);
kleave(" = 0");
return 0;
-error_release_both:
+error_release_session:
key_put(session_keyring);
error_release:
key_put(uid_keyring);
error:
- mutex_unlock(&key_user_keyring_mutex);
+ up_write(&user_ns->keyring_sem);
kleave(" = %d", ret);
return ret;
}
/*
+ * Get the user session keyring if it exists, but don't create it if it
+ * doesn't.
+ */
+struct key *get_user_session_keyring_rcu(const struct cred *cred)
+{
+ struct key *reg_keyring = READ_ONCE(cred->user_ns->user_keyring_register);
+ key_ref_t session_keyring_r;
+ char buf[20];
+
+ struct keyring_search_context ctx = {
+ .index_key.type = &key_type_keyring,
+ .index_key.description = buf,
+ .cred = cred,
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = buf,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = KEYRING_SEARCH_DO_STATE_CHECK,
+ };
+
+ if (!reg_keyring)
+ return NULL;
+
+ ctx.index_key.desc_len = snprintf(buf, sizeof(buf), "_uid_ses.%u",
+ from_kuid(cred->user_ns,
+ cred->user->uid));
+
+ session_keyring_r = keyring_search_rcu(make_key_ref(reg_keyring, true),
+ &ctx);
+ if (IS_ERR(session_keyring_r))
+ return NULL;
+ return key_ref_to_ptr(session_keyring_r);
+}
+
+/*
* Install a thread keyring to the given credentials struct if it didn't have
* one already. This is allowed to overrun the quota.
*
@@ -289,34 +374,33 @@ static int install_session_keyring(struct key *keyring)
/*
* Handle the fsuid changing.
*/
-void key_fsuid_changed(struct task_struct *tsk)
+void key_fsuid_changed(struct cred *new_cred)
{
/* update the ownership of the thread keyring */
- BUG_ON(!tsk->cred);
- if (tsk->cred->thread_keyring) {
- down_write(&tsk->cred->thread_keyring->sem);
- tsk->cred->thread_keyring->uid = tsk->cred->fsuid;
- up_write(&tsk->cred->thread_keyring->sem);
+ if (new_cred->thread_keyring) {
+ down_write(&new_cred->thread_keyring->sem);
+ new_cred->thread_keyring->uid = new_cred->fsuid;
+ up_write(&new_cred->thread_keyring->sem);
}
}
/*
* Handle the fsgid changing.
*/
-void key_fsgid_changed(struct task_struct *tsk)
+void key_fsgid_changed(struct cred *new_cred)
{
/* update the ownership of the thread keyring */
- BUG_ON(!tsk->cred);
- if (tsk->cred->thread_keyring) {
- down_write(&tsk->cred->thread_keyring->sem);
- tsk->cred->thread_keyring->gid = tsk->cred->fsgid;
- up_write(&tsk->cred->thread_keyring->sem);
+ if (new_cred->thread_keyring) {
+ down_write(&new_cred->thread_keyring->sem);
+ new_cred->thread_keyring->gid = new_cred->fsgid;
+ up_write(&new_cred->thread_keyring->sem);
}
}
/*
* Search the process keyrings attached to the supplied cred for the first
- * matching key.
+ * matching key under RCU conditions (the caller must be holding the RCU read
+ * lock).
*
* The search criteria are the type and the match function. The description is
* given to the match function as a parameter, but doesn't otherwise influence
@@ -335,8 +419,9 @@ void key_fsgid_changed(struct task_struct *tsk)
* In the case of a successful return, the possession attribute is set on the
* returned key reference.
*/
-key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
+key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx)
{
+ struct key *user_session;
key_ref_t key_ref, ret, err;
const struct cred *cred = ctx->cred;
@@ -353,7 +438,7 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
/* search the thread keyring first */
if (cred->thread_keyring) {
- key_ref = keyring_search_aux(
+ key_ref = keyring_search_rcu(
make_key_ref(cred->thread_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -371,7 +456,7 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
/* search the process keyring second */
if (cred->process_keyring) {
- key_ref = keyring_search_aux(
+ key_ref = keyring_search_rcu(
make_key_ref(cred->process_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -392,7 +477,7 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
/* search the session keyring */
if (cred->session_keyring) {
- key_ref = keyring_search_aux(
+ key_ref = keyring_search_rcu(
make_key_ref(cred->session_keyring, 1), ctx);
if (!IS_ERR(key_ref))
@@ -412,10 +497,11 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
}
}
/* or search the user-session keyring */
- else if (READ_ONCE(cred->user->session_keyring)) {
- key_ref = keyring_search_aux(
- make_key_ref(READ_ONCE(cred->user->session_keyring), 1),
- ctx);
+ else if ((user_session = get_user_session_keyring_rcu(cred))) {
+ key_ref = keyring_search_rcu(make_key_ref(user_session, 1),
+ ctx);
+ key_put(user_session);
+
if (!IS_ERR(key_ref))
goto found;
@@ -446,16 +532,16 @@ found:
* the keys attached to the assumed authorisation key using its credentials if
* one is available.
*
- * Return same as search_my_process_keyrings().
+ * The caller must be holding the RCU read lock.
+ *
+ * Return same as search_cred_keyrings_rcu().
*/
-key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
+key_ref_t search_process_keyrings_rcu(struct keyring_search_context *ctx)
{
struct request_key_auth *rka;
key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
- might_sleep();
-
- key_ref = search_my_process_keyrings(ctx);
+ key_ref = search_cred_keyrings_rcu(ctx);
if (!IS_ERR(key_ref))
goto found;
err = key_ref;
@@ -470,24 +556,17 @@ key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
) {
const struct cred *cred = ctx->cred;
- /* defend against the auth key being revoked */
- down_read(&cred->request_key_auth->sem);
-
- if (key_validate(ctx->cred->request_key_auth) == 0) {
+ if (key_validate(cred->request_key_auth) == 0) {
rka = ctx->cred->request_key_auth->payload.data[0];
+ //// was search_process_keyrings() [ie. recursive]
ctx->cred = rka->cred;
- key_ref = search_process_keyrings(ctx);
+ key_ref = search_cred_keyrings_rcu(ctx);
ctx->cred = cred;
- up_read(&cred->request_key_auth->sem);
-
if (!IS_ERR(key_ref))
goto found;
-
ret = key_ref;
- } else {
- up_read(&cred->request_key_auth->sem);
}
}
@@ -502,7 +581,6 @@ key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
found:
return key_ref;
}
-
/*
* See if the key we're looking at is the target key.
*/
@@ -536,10 +614,11 @@ key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
struct keyring_search_context ctx = {
.match_data.cmp = lookup_user_key_possessed,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
- .flags = KEYRING_SEARCH_NO_STATE_CHECK,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_RECURSE),
};
struct request_key_auth *rka;
- struct key *key;
+ struct key *key, *user_session;
key_ref_t key_ref, skey_ref;
int ret;
@@ -588,20 +667,20 @@ try_again:
if (!ctx.cred->session_keyring) {
/* always install a session keyring upon access if one
* doesn't exist yet */
- ret = install_user_keyrings();
+ ret = look_up_user_keyrings(NULL, &user_session);
if (ret < 0)
goto error;
if (lflags & KEY_LOOKUP_CREATE)
ret = join_session_keyring(NULL);
else
- ret = install_session_keyring(
- ctx.cred->user->session_keyring);
+ ret = install_session_keyring(user_session);
+ key_put(user_session);
if (ret < 0)
goto error;
goto reget_creds;
- } else if (ctx.cred->session_keyring ==
- READ_ONCE(ctx.cred->user->session_keyring) &&
+ } else if (test_bit(KEY_FLAG_UID_KEYRING,
+ &ctx.cred->session_keyring->flags) &&
lflags & KEY_LOOKUP_CREATE) {
ret = join_session_keyring(NULL);
if (ret < 0)
@@ -615,26 +694,16 @@ try_again:
break;
case KEY_SPEC_USER_KEYRING:
- if (!READ_ONCE(ctx.cred->user->uid_keyring)) {
- ret = install_user_keyrings();
- if (ret < 0)
- goto error;
- }
-
- key = ctx.cred->user->uid_keyring;
- __key_get(key);
+ ret = look_up_user_keyrings(&key, NULL);
+ if (ret < 0)
+ goto error;
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_SESSION_KEYRING:
- if (!READ_ONCE(ctx.cred->user->session_keyring)) {
- ret = install_user_keyrings();
- if (ret < 0)
- goto error;
- }
-
- key = ctx.cred->user->session_keyring;
- __key_get(key);
+ ret = look_up_user_keyrings(NULL, &key);
+ if (ret < 0)
+ goto error;
key_ref = make_key_ref(key, 1);
break;
@@ -686,12 +755,12 @@ try_again:
key_ref = make_key_ref(key, 0);
/* check to see if we possess the key */
- ctx.index_key.type = key->type;
- ctx.index_key.description = key->description;
- ctx.index_key.desc_len = strlen(key->description);
+ ctx.index_key = key->index_key;
ctx.match_data.raw_data = key;
kdebug("check possessed");
- skey_ref = search_process_keyrings(&ctx);
+ rcu_read_lock();
+ skey_ref = search_process_keyrings_rcu(&ctx);
+ rcu_read_unlock();
kdebug("possessed=%p", skey_ref);
if (!IS_ERR(skey_ref)) {
@@ -883,7 +952,7 @@ void key_change_session_keyring(struct callback_head *twork)
*/
static int __init init_root_keyring(void)
{
- return install_user_keyrings();
+ return look_up_user_keyrings(NULL, NULL);
}
late_initcall(init_root_keyring);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 8ae3b7b18801..957b9e3e1492 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -13,14 +13,40 @@
#include <linux/err.h>
#include <linux/keyctl.h>
#include <linux/slab.h>
+#include <net/net_namespace.h>
#include "internal.h"
#include <keys/request_key_auth-type.h>
#define key_negative_timeout 60 /* default timeout on a negative key's existence */
+static struct key *check_cached_key(struct keyring_search_context *ctx)
+{
+#ifdef CONFIG_KEYS_REQUEST_CACHE
+ struct key *key = current->cached_requested_key;
+
+ if (key &&
+ ctx->match_data.cmp(key, &ctx->match_data) &&
+ !(key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))))
+ return key_get(key);
+#endif
+ return NULL;
+}
+
+static void cache_requested_key(struct key *key)
+{
+#ifdef CONFIG_KEYS_REQUEST_CACHE
+ struct task_struct *t = current;
+
+ key_put(t->cached_requested_key);
+ t->cached_requested_key = key_get(key);
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+#endif
+}
+
/**
* complete_request_key - Complete the construction of a key.
- * @auth_key: The authorisation key.
+ * @authkey: The authorisation key.
* @error: The success or failute of the construction.
*
* Complete the attempt to construct a key. The key will be negated
@@ -92,7 +118,7 @@ static int call_sbin_request_key(struct key *authkey, void *aux)
struct request_key_auth *rka = get_request_key_auth(authkey);
const struct cred *cred = current_cred();
key_serial_t prkey, sskey;
- struct key *key = rka->target_key, *keyring, *session;
+ struct key *key = rka->target_key, *keyring, *session, *user_session;
char *argv[9], *envp[3], uid_str[12], gid_str[12];
char key_str[12], keyring_str[3][12];
char desc[20];
@@ -100,9 +126,9 @@ static int call_sbin_request_key(struct key *authkey, void *aux)
kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op);
- ret = install_user_keyrings();
+ ret = look_up_user_keyrings(NULL, &user_session);
if (ret < 0)
- goto error_alloc;
+ goto error_us;
/* allocate a new session keyring */
sprintf(desc, "_req.%u", key->serial);
@@ -140,7 +166,7 @@ static int call_sbin_request_key(struct key *authkey, void *aux)
session = cred->session_keyring;
if (!session)
- session = cred->user->session_keyring;
+ session = user_session;
sskey = session->serial;
sprintf(keyring_str[2], "%d", sskey);
@@ -182,6 +208,8 @@ error_link:
key_put(keyring);
error_alloc:
+ key_put(user_session);
+error_us:
complete_request_key(authkey, ret);
kleave(" = %d", ret);
return ret;
@@ -218,7 +246,7 @@ static int construct_key(struct key *key, const void *callout_info,
/* check that the actor called complete_request_key() prior to
* returning an error */
WARN_ON(ret < 0 &&
- !test_bit(KEY_FLAG_REVOKED, &authkey->flags));
+ !test_bit(KEY_FLAG_INVALIDATED, &authkey->flags));
key_put(authkey);
kleave(" = %d", ret);
@@ -288,13 +316,15 @@ static int construct_get_dest_keyring(struct key **_dest_keyring)
/* fall through */
case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
- dest_keyring =
- key_get(READ_ONCE(cred->user->session_keyring));
+ ret = look_up_user_keyrings(NULL, &dest_keyring);
+ if (ret < 0)
+ return ret;
break;
case KEY_REQKEY_DEFL_USER_KEYRING:
- dest_keyring =
- key_get(READ_ONCE(cred->user->uid_keyring));
+ ret = look_up_user_keyrings(&dest_keyring, NULL);
+ if (ret < 0)
+ return ret;
break;
case KEY_REQKEY_DEFL_GROUP_KEYRING:
@@ -339,7 +369,7 @@ static int construct_alloc_key(struct keyring_search_context *ctx,
struct key_user *user,
struct key **_key)
{
- struct assoc_array_edit *edit;
+ struct assoc_array_edit *edit = NULL;
struct key *key;
key_perm_t perm;
key_ref_t key_ref;
@@ -368,6 +398,9 @@ static int construct_alloc_key(struct keyring_search_context *ctx,
set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
if (dest_keyring) {
+ ret = __key_link_lock(dest_keyring, &ctx->index_key);
+ if (ret < 0)
+ goto link_lock_failed;
ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit);
if (ret < 0)
goto link_prealloc_failed;
@@ -378,7 +411,9 @@ static int construct_alloc_key(struct keyring_search_context *ctx,
* waited for locks */
mutex_lock(&key_construction_mutex);
- key_ref = search_process_keyrings(ctx);
+ rcu_read_lock();
+ key_ref = search_process_keyrings_rcu(ctx);
+ rcu_read_unlock();
if (!IS_ERR(key_ref))
goto key_already_present;
@@ -419,6 +454,8 @@ link_check_failed:
return ret;
link_prealloc_failed:
+ __key_link_end(dest_keyring, &ctx->index_key, edit);
+link_lock_failed:
mutex_unlock(&user->cons_lock);
key_put(key);
kleave(" = %d [prelink]", ret);
@@ -493,16 +530,18 @@ error:
* request_key_and_link - Request a key and cache it in a keyring.
* @type: The type of key we want.
* @description: The searchable description of the key.
+ * @domain_tag: The domain in which the key operates.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
* @callout_len: The length of callout_info.
* @aux: Auxiliary data for the upcall.
* @dest_keyring: Where to cache the key.
* @flags: Flags to key_alloc().
*
- * A key matching the specified criteria is searched for in the process's
- * keyrings and returned with its usage count incremented if found. Otherwise,
- * if callout_info is not NULL, a key will be allocated and some service
- * (probably in userspace) will be asked to instantiate it.
+ * A key matching the specified criteria (type, description, domain_tag) is
+ * searched for in the process's keyrings and returned with its usage count
+ * incremented if found. Otherwise, if callout_info is not NULL, a key will be
+ * allocated and some service (probably in userspace) will be asked to
+ * instantiate it.
*
* If successfully found or created, the key will be linked to the destination
* keyring if one is provided.
@@ -518,6 +557,7 @@ error:
*/
struct key *request_key_and_link(struct key_type *type,
const char *description,
+ struct key_tag *domain_tag,
const void *callout_info,
size_t callout_len,
void *aux,
@@ -526,6 +566,7 @@ struct key *request_key_and_link(struct key_type *type,
{
struct keyring_search_context ctx = {
.index_key.type = type,
+ .index_key.domain_tag = domain_tag,
.index_key.description = description,
.index_key.desc_len = strlen(description),
.cred = current_cred(),
@@ -533,7 +574,8 @@ struct key *request_key_and_link(struct key_type *type,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.flags = (KEYRING_SEARCH_DO_STATE_CHECK |
- KEYRING_SEARCH_SKIP_EXPIRED),
+ KEYRING_SEARCH_SKIP_EXPIRED |
+ KEYRING_SEARCH_RECURSE),
};
struct key *key;
key_ref_t key_ref;
@@ -551,10 +593,26 @@ struct key *request_key_and_link(struct key_type *type,
}
}
+ key = check_cached_key(&ctx);
+ if (key)
+ goto error_free;
+
/* search all the process keyrings for a key */
- key_ref = search_process_keyrings(&ctx);
+ rcu_read_lock();
+ key_ref = search_process_keyrings_rcu(&ctx);
+ rcu_read_unlock();
if (!IS_ERR(key_ref)) {
+ if (dest_keyring) {
+ ret = key_task_permission(key_ref, current_cred(),
+ KEY_NEED_LINK);
+ if (ret < 0) {
+ key_ref_put(key_ref);
+ key = ERR_PTR(ret);
+ goto error_free;
+ }
+ }
+
key = key_ref_to_ptr(key_ref);
if (dest_keyring) {
ret = key_link(dest_keyring, key);
@@ -564,6 +622,9 @@ struct key *request_key_and_link(struct key_type *type,
goto error_free;
}
}
+
+ /* Only cache the key on immediate success */
+ cache_requested_key(key);
} else if (PTR_ERR(key_ref) != -EAGAIN) {
key = ERR_CAST(key_ref);
} else {
@@ -612,9 +673,10 @@ int wait_for_key_construction(struct key *key, bool intr)
EXPORT_SYMBOL(wait_for_key_construction);
/**
- * request_key - Request a key and wait for construction
+ * request_key_tag - Request a key and wait for construction
* @type: Type of key.
* @description: The searchable description of the key.
+ * @domain_tag: The domain in which the key operates.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
*
* As for request_key_and_link() except that it does not add the returned key
@@ -625,9 +687,10 @@ EXPORT_SYMBOL(wait_for_key_construction);
* Furthermore, it then works as wait_for_key_construction() to wait for the
* completion of keys undergoing construction with a non-interruptible wait.
*/
-struct key *request_key(struct key_type *type,
- const char *description,
- const char *callout_info)
+struct key *request_key_tag(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag,
+ const char *callout_info)
{
struct key *key;
size_t callout_len = 0;
@@ -635,7 +698,8 @@ struct key *request_key(struct key_type *type,
if (callout_info)
callout_len = strlen(callout_info);
- key = request_key_and_link(type, description, callout_info, callout_len,
+ key = request_key_and_link(type, description, domain_tag,
+ callout_info, callout_len,
NULL, NULL, KEY_ALLOC_IN_QUOTA);
if (!IS_ERR(key)) {
ret = wait_for_key_construction(key, false);
@@ -646,12 +710,13 @@ struct key *request_key(struct key_type *type,
}
return key;
}
-EXPORT_SYMBOL(request_key);
+EXPORT_SYMBOL(request_key_tag);
/**
* request_key_with_auxdata - Request a key with auxiliary data for the upcaller
* @type: The type of key we want.
* @description: The searchable description of the key.
+ * @domain_tag: The domain in which the key operates.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
* @callout_len: The length of callout_info.
* @aux: Auxiliary data for the upcall.
@@ -664,6 +729,7 @@ EXPORT_SYMBOL(request_key);
*/
struct key *request_key_with_auxdata(struct key_type *type,
const char *description,
+ struct key_tag *domain_tag,
const void *callout_info,
size_t callout_len,
void *aux)
@@ -671,7 +737,8 @@ struct key *request_key_with_auxdata(struct key_type *type,
struct key *key;
int ret;
- key = request_key_and_link(type, description, callout_info, callout_len,
+ key = request_key_and_link(type, description, domain_tag,
+ callout_info, callout_len,
aux, NULL, KEY_ALLOC_IN_QUOTA);
if (!IS_ERR(key)) {
ret = wait_for_key_construction(key, false);
@@ -684,52 +751,55 @@ struct key *request_key_with_auxdata(struct key_type *type,
}
EXPORT_SYMBOL(request_key_with_auxdata);
-/*
- * request_key_async - Request a key (allow async construction)
- * @type: Type of key.
- * @description: The searchable description of the key.
- * @callout_info: The data to pass to the instantiation upcall (or NULL).
- * @callout_len: The length of callout_info.
+/**
+ * request_key_rcu - Request key from RCU-read-locked context
+ * @type: The type of key we want.
+ * @description: The name of the key we want.
+ * @domain_tag: The domain in which the key operates.
*
- * As for request_key_and_link() except that it does not add the returned key
- * to a keyring if found, new keys are always allocated in the user's quota and
- * no auxiliary data can be passed.
+ * Request a key from a context that we may not sleep in (such as RCU-mode
+ * pathwalk). Keys under construction are ignored.
*
- * The caller should call wait_for_key_construction() to wait for the
- * completion of the returned key if it is still undergoing construction.
+ * Return a pointer to the found key if successful, -ENOKEY if we couldn't find
+ * a key or some other error if the key found was unsuitable or inaccessible.
*/
-struct key *request_key_async(struct key_type *type,
- const char *description,
- const void *callout_info,
- size_t callout_len)
+struct key *request_key_rcu(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag)
{
- return request_key_and_link(type, description, callout_info,
- callout_len, NULL, NULL,
- KEY_ALLOC_IN_QUOTA);
-}
-EXPORT_SYMBOL(request_key_async);
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.domain_tag = domain_tag,
+ .index_key.description = description,
+ .index_key.desc_len = strlen(description),
+ .cred = current_cred(),
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = (KEYRING_SEARCH_DO_STATE_CHECK |
+ KEYRING_SEARCH_SKIP_EXPIRED),
+ };
+ struct key *key;
+ key_ref_t key_ref;
-/*
- * request a key with auxiliary data for the upcaller (allow async construction)
- * @type: Type of key.
- * @description: The searchable description of the key.
- * @callout_info: The data to pass to the instantiation upcall (or NULL).
- * @callout_len: The length of callout_info.
- * @aux: Auxiliary data for the upcall.
- *
- * As for request_key_and_link() except that it does not add the returned key
- * to a keyring if found and new keys are always allocated in the user's quota.
- *
- * The caller should call wait_for_key_construction() to wait for the
- * completion of the returned key if it is still undergoing construction.
- */
-struct key *request_key_async_with_auxdata(struct key_type *type,
- const char *description,
- const void *callout_info,
- size_t callout_len,
- void *aux)
-{
- return request_key_and_link(type, description, callout_info,
- callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA);
+ kenter("%s,%s", type->name, description);
+
+ key = check_cached_key(&ctx);
+ if (key)
+ return key;
+
+ /* search all the process keyrings for a key */
+ key_ref = search_process_keyrings_rcu(&ctx);
+ if (IS_ERR(key_ref)) {
+ key = ERR_CAST(key_ref);
+ if (PTR_ERR(key_ref) == -EAGAIN)
+ key = ERR_PTR(-ENOKEY);
+ } else {
+ key = key_ref_to_ptr(key_ref);
+ cache_requested_key(key);
+ }
+
+ kleave(" = %p", key);
+ return key;
}
-EXPORT_SYMBOL(request_key_async_with_auxdata);
+EXPORT_SYMBOL(request_key_rcu);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index e45b5cf3b97f..ecba39c93fd9 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -54,7 +54,7 @@ static void request_key_auth_free_preparse(struct key_preparsed_payload *prep)
static int request_key_auth_instantiate(struct key *key,
struct key_preparsed_payload *prep)
{
- key->payload.data[0] = (struct request_key_auth *)prep->data;
+ rcu_assign_keypointer(key, (struct request_key_auth *)prep->data);
return 0;
}
@@ -64,7 +64,10 @@ static int request_key_auth_instantiate(struct key *key,
static void request_key_auth_describe(const struct key *key,
struct seq_file *m)
{
- struct request_key_auth *rka = get_request_key_auth(key);
+ struct request_key_auth *rka = dereference_key_rcu(key);
+
+ if (!rka)
+ return;
seq_puts(m, "key:");
seq_puts(m, key->description);
@@ -79,10 +82,13 @@ static void request_key_auth_describe(const struct key *key,
static long request_key_auth_read(const struct key *key,
char __user *buffer, size_t buflen)
{
- struct request_key_auth *rka = get_request_key_auth(key);
+ struct request_key_auth *rka = dereference_key_locked(key);
size_t datalen;
long ret;
+ if (!rka)
+ return -EKEYREVOKED;
+
datalen = rka->callout_len;
ret = datalen;
@@ -98,23 +104,6 @@ static long request_key_auth_read(const struct key *key,
return ret;
}
-/*
- * Handle revocation of an authorisation token key.
- *
- * Called with the key sem write-locked.
- */
-static void request_key_auth_revoke(struct key *key)
-{
- struct request_key_auth *rka = get_request_key_auth(key);
-
- kenter("{%d}", key->serial);
-
- if (rka->cred) {
- put_cred(rka->cred);
- rka->cred = NULL;
- }
-}
-
static void free_request_key_auth(struct request_key_auth *rka)
{
if (!rka)
@@ -128,15 +117,42 @@ static void free_request_key_auth(struct request_key_auth *rka)
}
/*
+ * Dispose of the request_key_auth record under RCU conditions
+ */
+static void request_key_auth_rcu_disposal(struct rcu_head *rcu)
+{
+ struct request_key_auth *rka =
+ container_of(rcu, struct request_key_auth, rcu);
+
+ free_request_key_auth(rka);
+}
+
+/*
+ * Handle revocation of an authorisation token key.
+ *
+ * Called with the key sem write-locked.
+ */
+static void request_key_auth_revoke(struct key *key)
+{
+ struct request_key_auth *rka = dereference_key_locked(key);
+
+ kenter("{%d}", key->serial);
+ rcu_assign_keypointer(key, NULL);
+ call_rcu(&rka->rcu, request_key_auth_rcu_disposal);
+}
+
+/*
* Destroy an instantiation authorisation token key.
*/
static void request_key_auth_destroy(struct key *key)
{
- struct request_key_auth *rka = get_request_key_auth(key);
+ struct request_key_auth *rka = rcu_access_pointer(key->payload.rcu_data0);
kenter("{%d}", key->serial);
-
- free_request_key_auth(rka);
+ if (rka) {
+ rcu_assign_keypointer(key, NULL);
+ call_rcu(&rka->rcu, request_key_auth_rcu_disposal);
+ }
}
/*
@@ -148,7 +164,7 @@ struct key *request_key_auth_new(struct key *target, const char *op,
struct key *dest_keyring)
{
struct request_key_auth *rka, *irka;
- const struct cred *cred = current->cred;
+ const struct cred *cred = current_cred();
struct key *authkey = NULL;
char desc[20];
int ret = -ENOMEM;
@@ -200,7 +216,7 @@ struct key *request_key_auth_new(struct key *target, const char *op,
authkey = key_alloc(&key_type_request_key_auth, desc,
cred->fsuid, cred->fsgid, cred,
- KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH |
+ KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_POS_LINK |
KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL);
if (IS_ERR(authkey)) {
ret = PTR_ERR(authkey);
@@ -238,14 +254,17 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
- .flags = KEYRING_SEARCH_DO_STATE_CHECK,
+ .flags = (KEYRING_SEARCH_DO_STATE_CHECK |
+ KEYRING_SEARCH_RECURSE),
};
struct key *authkey;
key_ref_t authkey_ref;
ctx.index_key.desc_len = sprintf(description, "%x", target_id);
- authkey_ref = search_process_keyrings(&ctx);
+ rcu_read_lock();
+ authkey_ref = search_process_keyrings_rcu(&ctx);
+ rcu_read_unlock();
if (IS_ERR(authkey_ref)) {
authkey = ERR_CAST(authkey_ref);
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
index dd1e21fab827..b46b651b3c4c 100644
--- a/security/keys/sysctl.c
+++ b/security/keys/sysctl.c
@@ -9,8 +9,6 @@
#include <linux/sysctl.h>
#include "internal.h"
-static const int zero, one = 1, max = INT_MAX;
-
struct ctl_table key_sysctls[] = {
{
.procname = "maxkeys",
@@ -18,8 +16,8 @@ struct ctl_table key_sysctls[] = {
.maxlen = sizeof(unsigned),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = (void *) &one,
- .extra2 = (void *) &max,
+ .extra1 = (void *) SYSCTL_ONE,
+ .extra2 = (void *) SYSCTL_INT_MAX,
},
{
.procname = "maxbytes",
@@ -27,8 +25,8 @@ struct ctl_table key_sysctls[] = {
.maxlen = sizeof(unsigned),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = (void *) &one,
- .extra2 = (void *) &max,
+ .extra1 = (void *) SYSCTL_ONE,
+ .extra2 = (void *) SYSCTL_INT_MAX,
},
{
.procname = "root_maxkeys",
@@ -36,8 +34,8 @@ struct ctl_table key_sysctls[] = {
.maxlen = sizeof(unsigned),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = (void *) &one,
- .extra2 = (void *) &max,
+ .extra1 = (void *) SYSCTL_ONE,
+ .extra2 = (void *) SYSCTL_INT_MAX,
},
{
.procname = "root_maxbytes",
@@ -45,8 +43,8 @@ struct ctl_table key_sysctls[] = {
.maxlen = sizeof(unsigned),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = (void *) &one,
- .extra2 = (void *) &max,
+ .extra1 = (void *) SYSCTL_ONE,
+ .extra2 = (void *) SYSCTL_INT_MAX,
},
{
.procname = "gc_delay",
@@ -54,8 +52,8 @@ struct ctl_table key_sysctls[] = {
.maxlen = sizeof(unsigned),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = (void *) &zero,
- .extra2 = (void *) &max,
+ .extra1 = (void *) SYSCTL_ZERO,
+ .extra2 = (void *) SYSCTL_INT_MAX,
},
#ifdef CONFIG_PERSISTENT_KEYRINGS
{
@@ -64,8 +62,8 @@ struct ctl_table key_sysctls[] = {
.maxlen = sizeof(unsigned),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = (void *) &zero,
- .extra2 = (void *) &max,
+ .extra1 = (void *) SYSCTL_ZERO,
+ .extra2 = (void *) SYSCTL_INT_MAX,
},
#endif
{ }
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 9a94672e7adc..1fbd77816610 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1228,23 +1228,15 @@ hashalg_fail:
static int __init init_digests(void)
{
- u8 digest[TPM_MAX_DIGEST_SIZE];
- int ret;
int i;
- ret = tpm_get_random(chip, digest, TPM_MAX_DIGEST_SIZE);
- if (ret < 0)
- return ret;
- if (ret < TPM_MAX_DIGEST_SIZE)
- return -EFAULT;
-
digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
GFP_KERNEL);
if (!digests)
return -ENOMEM;
for (i = 0; i < chip->nr_allocated_banks; i++)
- memcpy(digests[i].digest, digest, TPM_MAX_DIGEST_SIZE);
+ digests[i].alg_id = chip->allocated_banks[i].alg_id;
return 0;
}
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
index 79131efa9634..ee5cb944f4ad 100644
--- a/security/loadpin/loadpin.c
+++ b/security/loadpin/loadpin.c
@@ -37,12 +37,12 @@ static void report_load(const char *origin, struct file *file, char *operation)
}
static int enforce = IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE);
+static char *exclude_read_files[READING_MAX_ID];
+static int ignore_read_file_id[READING_MAX_ID] __ro_after_init;
static struct super_block *pinned_root;
static DEFINE_SPINLOCK(pinned_root_spinlock);
#ifdef CONFIG_SYSCTL
-static int zero;
-static int one = 1;
static struct ctl_path loadpin_sysctl_path[] = {
{ .procname = "kernel", },
@@ -57,8 +57,8 @@ static struct ctl_table loadpin_sysctl_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{ }
};
@@ -121,6 +121,13 @@ static int loadpin_read_file(struct file *file, enum kernel_read_file_id id)
struct super_block *load_root;
const char *origin = kernel_read_file_id_str(id);
+ /* If the file id is excluded, ignore the pinning. */
+ if ((unsigned int)id < ARRAY_SIZE(ignore_read_file_id) &&
+ ignore_read_file_id[id]) {
+ report_load(origin, file, "pinning-excluded");
+ return 0;
+ }
+
/* This handles the older init_module API that has a NULL file. */
if (!file) {
if (!enforce) {
@@ -179,10 +186,47 @@ static struct security_hook_list loadpin_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(kernel_load_data, loadpin_load_data),
};
+static void __init parse_exclude(void)
+{
+ int i, j;
+ char *cur;
+
+ /*
+ * Make sure all the arrays stay within expected sizes. This
+ * is slightly weird because kernel_read_file_str[] includes
+ * READING_MAX_ID, which isn't actually meaningful here.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(exclude_read_files) !=
+ ARRAY_SIZE(ignore_read_file_id));
+ BUILD_BUG_ON(ARRAY_SIZE(kernel_read_file_str) <
+ ARRAY_SIZE(ignore_read_file_id));
+
+ for (i = 0; i < ARRAY_SIZE(exclude_read_files); i++) {
+ cur = exclude_read_files[i];
+ if (!cur)
+ break;
+ if (*cur == '\0')
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(ignore_read_file_id); j++) {
+ if (strcmp(cur, kernel_read_file_str[j]) == 0) {
+ pr_info("excluding: %s\n",
+ kernel_read_file_str[j]);
+ ignore_read_file_id[j] = 1;
+ /*
+ * Can not break, because one read_file_str
+ * may map to more than on read_file_id.
+ */
+ }
+ }
+ }
+}
+
static int __init loadpin_init(void)
{
pr_info("ready to pin (currently %senforcing)\n",
enforce ? "" : "not ");
+ parse_exclude();
security_add_hooks(loadpin_hooks, ARRAY_SIZE(loadpin_hooks), "loadpin");
return 0;
}
@@ -195,3 +239,5 @@ DEFINE_LSM(loadpin) = {
/* Should not be mutable after boot, so not listed in sysfs (perm == 0). */
module_param(enforce, int, 0);
MODULE_PARM_DESC(enforce, "Enforce module/firmware pinning");
+module_param_array_named(exclude, exclude_read_files, charp, NULL, 0);
+MODULE_PARM_DESC(exclude, "Exclude pinning specific read file types");
diff --git a/security/safesetid/lsm.c b/security/safesetid/lsm.c
index cecd38e2ac80..7760019ad35d 100644
--- a/security/safesetid/lsm.c
+++ b/security/safesetid/lsm.c
@@ -14,67 +14,50 @@
#define pr_fmt(fmt) "SafeSetID: " fmt
-#include <linux/hashtable.h>
#include <linux/lsm_hooks.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/sched/task_stack.h>
#include <linux/security.h>
+#include "lsm.h"
/* Flag indicating whether initialization completed */
int safesetid_initialized;
-#define NUM_BITS 8 /* 128 buckets in hash table */
+struct setuid_ruleset __rcu *safesetid_setuid_rules;
-static DEFINE_HASHTABLE(safesetid_whitelist_hashtable, NUM_BITS);
-
-/*
- * Hash table entry to store safesetid policy signifying that 'parent' user
- * can setid to 'child' user.
- */
-struct entry {
- struct hlist_node next;
- struct hlist_node dlist; /* for deletion cleanup */
- uint64_t parent_kuid;
- uint64_t child_kuid;
-};
-
-static DEFINE_SPINLOCK(safesetid_whitelist_hashtable_spinlock);
-
-static bool check_setuid_policy_hashtable_key(kuid_t parent)
+/* Compute a decision for a transition from @src to @dst under @policy. */
+enum sid_policy_type _setuid_policy_lookup(struct setuid_ruleset *policy,
+ kuid_t src, kuid_t dst)
{
- struct entry *entry;
-
- rcu_read_lock();
- hash_for_each_possible_rcu(safesetid_whitelist_hashtable,
- entry, next, __kuid_val(parent)) {
- if (entry->parent_kuid == __kuid_val(parent)) {
- rcu_read_unlock();
- return true;
- }
+ struct setuid_rule *rule;
+ enum sid_policy_type result = SIDPOL_DEFAULT;
+
+ hash_for_each_possible(policy->rules, rule, next, __kuid_val(src)) {
+ if (!uid_eq(rule->src_uid, src))
+ continue;
+ if (uid_eq(rule->dst_uid, dst))
+ return SIDPOL_ALLOWED;
+ result = SIDPOL_CONSTRAINED;
}
- rcu_read_unlock();
-
- return false;
+ return result;
}
-static bool check_setuid_policy_hashtable_key_value(kuid_t parent,
- kuid_t child)
+/*
+ * Compute a decision for a transition from @src to @dst under the active
+ * policy.
+ */
+static enum sid_policy_type setuid_policy_lookup(kuid_t src, kuid_t dst)
{
- struct entry *entry;
+ enum sid_policy_type result = SIDPOL_DEFAULT;
+ struct setuid_ruleset *pol;
rcu_read_lock();
- hash_for_each_possible_rcu(safesetid_whitelist_hashtable,
- entry, next, __kuid_val(parent)) {
- if (entry->parent_kuid == __kuid_val(parent) &&
- entry->child_kuid == __kuid_val(child)) {
- rcu_read_unlock();
- return true;
- }
- }
+ pol = rcu_dereference(safesetid_setuid_rules);
+ if (pol)
+ result = _setuid_policy_lookup(pol, src, dst);
rcu_read_unlock();
-
- return false;
+ return result;
}
static int safesetid_security_capable(const struct cred *cred,
@@ -82,37 +65,59 @@ static int safesetid_security_capable(const struct cred *cred,
int cap,
unsigned int opts)
{
- if (cap == CAP_SETUID &&
- check_setuid_policy_hashtable_key(cred->uid)) {
- if (!(opts & CAP_OPT_INSETID)) {
- /*
- * Deny if we're not in a set*uid() syscall to avoid
- * giving powers gated by CAP_SETUID that are related
- * to functionality other than calling set*uid() (e.g.
- * allowing user to set up userns uid mappings).
- */
- pr_warn("Operation requires CAP_SETUID, which is not available to UID %u for operations besides approved set*uid transitions",
- __kuid_val(cred->uid));
- return -1;
- }
- }
- return 0;
+ /* We're only interested in CAP_SETUID. */
+ if (cap != CAP_SETUID)
+ return 0;
+
+ /*
+ * If CAP_SETUID is currently used for a set*uid() syscall, we want to
+ * let it go through here; the real security check happens later, in the
+ * task_fix_setuid hook.
+ */
+ if ((opts & CAP_OPT_INSETID) != 0)
+ return 0;
+
+ /*
+ * If no policy applies to this task, allow the use of CAP_SETUID for
+ * other purposes.
+ */
+ if (setuid_policy_lookup(cred->uid, INVALID_UID) == SIDPOL_DEFAULT)
+ return 0;
+
+ /*
+ * Reject use of CAP_SETUID for functionality other than calling
+ * set*uid() (e.g. setting up userns uid mappings).
+ */
+ pr_warn("Operation requires CAP_SETUID, which is not available to UID %u for operations besides approved set*uid transitions\n",
+ __kuid_val(cred->uid));
+ return -EPERM;
}
-static int check_uid_transition(kuid_t parent, kuid_t child)
+/*
+ * Check whether a caller with old credentials @old is allowed to switch to
+ * credentials that contain @new_uid.
+ */
+static bool uid_permitted_for_cred(const struct cred *old, kuid_t new_uid)
{
- if (check_setuid_policy_hashtable_key_value(parent, child))
- return 0;
- pr_warn("UID transition (%d -> %d) blocked",
- __kuid_val(parent),
- __kuid_val(child));
+ bool permitted;
+
+ /* If our old creds already had this UID in it, it's fine. */
+ if (uid_eq(new_uid, old->uid) || uid_eq(new_uid, old->euid) ||
+ uid_eq(new_uid, old->suid))
+ return true;
+
/*
- * Kill this process to avoid potential security vulnerabilities
- * that could arise from a missing whitelist entry preventing a
- * privileged process from dropping to a lesser-privileged one.
+ * Transitions to new UIDs require a check against the policy of the old
+ * RUID.
*/
- force_sig(SIGKILL, current);
- return -EACCES;
+ permitted =
+ setuid_policy_lookup(old->uid, new_uid) != SIDPOL_CONSTRAINED;
+ if (!permitted) {
+ pr_warn("UID transition ((%d,%d,%d) -> %d) blocked\n",
+ __kuid_val(old->uid), __kuid_val(old->euid),
+ __kuid_val(old->suid), __kuid_val(new_uid));
+ }
+ return permitted;
}
/*
@@ -125,134 +130,23 @@ static int safesetid_task_fix_setuid(struct cred *new,
int flags)
{
- /* Do nothing if there are no setuid restrictions for this UID. */
- if (!check_setuid_policy_hashtable_key(old->uid))
+ /* Do nothing if there are no setuid restrictions for our old RUID. */
+ if (setuid_policy_lookup(old->uid, INVALID_UID) == SIDPOL_DEFAULT)
return 0;
- switch (flags) {
- case LSM_SETID_RE:
- /*
- * Users for which setuid restrictions exist can only set the
- * real UID to the real UID or the effective UID, unless an
- * explicit whitelist policy allows the transition.
- */
- if (!uid_eq(old->uid, new->uid) &&
- !uid_eq(old->euid, new->uid)) {
- return check_uid_transition(old->uid, new->uid);
- }
- /*
- * Users for which setuid restrictions exist can only set the
- * effective UID to the real UID, the effective UID, or the
- * saved set-UID, unless an explicit whitelist policy allows
- * the transition.
- */
- if (!uid_eq(old->uid, new->euid) &&
- !uid_eq(old->euid, new->euid) &&
- !uid_eq(old->suid, new->euid)) {
- return check_uid_transition(old->euid, new->euid);
- }
- break;
- case LSM_SETID_ID:
- /*
- * Users for which setuid restrictions exist cannot change the
- * real UID or saved set-UID unless an explicit whitelist
- * policy allows the transition.
- */
- if (!uid_eq(old->uid, new->uid))
- return check_uid_transition(old->uid, new->uid);
- if (!uid_eq(old->suid, new->suid))
- return check_uid_transition(old->suid, new->suid);
- break;
- case LSM_SETID_RES:
- /*
- * Users for which setuid restrictions exist cannot change the
- * real UID, effective UID, or saved set-UID to anything but
- * one of: the current real UID, the current effective UID or
- * the current saved set-user-ID unless an explicit whitelist
- * policy allows the transition.
- */
- if (!uid_eq(new->uid, old->uid) &&
- !uid_eq(new->uid, old->euid) &&
- !uid_eq(new->uid, old->suid)) {
- return check_uid_transition(old->uid, new->uid);
- }
- if (!uid_eq(new->euid, old->uid) &&
- !uid_eq(new->euid, old->euid) &&
- !uid_eq(new->euid, old->suid)) {
- return check_uid_transition(old->euid, new->euid);
- }
- if (!uid_eq(new->suid, old->uid) &&
- !uid_eq(new->suid, old->euid) &&
- !uid_eq(new->suid, old->suid)) {
- return check_uid_transition(old->suid, new->suid);
- }
- break;
- case LSM_SETID_FS:
- /*
- * Users for which setuid restrictions exist cannot change the
- * filesystem UID to anything but one of: the current real UID,
- * the current effective UID or the current saved set-UID
- * unless an explicit whitelist policy allows the transition.
- */
- if (!uid_eq(new->fsuid, old->uid) &&
- !uid_eq(new->fsuid, old->euid) &&
- !uid_eq(new->fsuid, old->suid) &&
- !uid_eq(new->fsuid, old->fsuid)) {
- return check_uid_transition(old->fsuid, new->fsuid);
- }
- break;
- default:
- pr_warn("Unknown setid state %d\n", flags);
- force_sig(SIGKILL, current);
- return -EINVAL;
- }
- return 0;
-}
-
-int add_safesetid_whitelist_entry(kuid_t parent, kuid_t child)
-{
- struct entry *new;
-
- /* Return if entry already exists */
- if (check_setuid_policy_hashtable_key_value(parent, child))
+ if (uid_permitted_for_cred(old, new->uid) &&
+ uid_permitted_for_cred(old, new->euid) &&
+ uid_permitted_for_cred(old, new->suid) &&
+ uid_permitted_for_cred(old, new->fsuid))
return 0;
- new = kzalloc(sizeof(struct entry), GFP_KERNEL);
- if (!new)
- return -ENOMEM;
- new->parent_kuid = __kuid_val(parent);
- new->child_kuid = __kuid_val(child);
- spin_lock(&safesetid_whitelist_hashtable_spinlock);
- hash_add_rcu(safesetid_whitelist_hashtable,
- &new->next,
- __kuid_val(parent));
- spin_unlock(&safesetid_whitelist_hashtable_spinlock);
- return 0;
-}
-
-void flush_safesetid_whitelist_entries(void)
-{
- struct entry *entry;
- struct hlist_node *hlist_node;
- unsigned int bkt_loop_cursor;
- HLIST_HEAD(free_list);
-
/*
- * Could probably use hash_for_each_rcu here instead, but this should
- * be fine as well.
+ * Kill this process to avoid potential security vulnerabilities
+ * that could arise from a missing whitelist entry preventing a
+ * privileged process from dropping to a lesser-privileged one.
*/
- spin_lock(&safesetid_whitelist_hashtable_spinlock);
- hash_for_each_safe(safesetid_whitelist_hashtable, bkt_loop_cursor,
- hlist_node, entry, next) {
- hash_del_rcu(&entry->next);
- hlist_add_head(&entry->dlist, &free_list);
- }
- spin_unlock(&safesetid_whitelist_hashtable_spinlock);
- synchronize_rcu();
- hlist_for_each_entry_safe(entry, hlist_node, &free_list, dlist) {
- hlist_del(&entry->dlist);
- kfree(entry);
- }
+ force_sig(SIGKILL);
+ return -EACCES;
}
static struct security_hook_list safesetid_security_hooks[] = {
diff --git a/security/safesetid/lsm.h b/security/safesetid/lsm.h
index c1ea3c265fcf..db6d16e6bbc3 100644
--- a/security/safesetid/lsm.h
+++ b/security/safesetid/lsm.h
@@ -15,19 +15,39 @@
#define _SAFESETID_H
#include <linux/types.h>
+#include <linux/uidgid.h>
+#include <linux/hashtable.h>
/* Flag indicating whether initialization completed */
extern int safesetid_initialized;
-/* Function type. */
-enum safesetid_whitelist_file_write_type {
- SAFESETID_WHITELIST_ADD, /* Add whitelist policy. */
- SAFESETID_WHITELIST_FLUSH, /* Flush whitelist policies. */
+enum sid_policy_type {
+ SIDPOL_DEFAULT, /* source ID is unaffected by policy */
+ SIDPOL_CONSTRAINED, /* source ID is affected by policy */
+ SIDPOL_ALLOWED /* target ID explicitly allowed */
};
-/* Add entry to safesetid whitelist to allow 'parent' to setid to 'child'. */
-int add_safesetid_whitelist_entry(kuid_t parent, kuid_t child);
+/*
+ * Hash table entry to store safesetid policy signifying that 'src_uid'
+ * can setuid to 'dst_uid'.
+ */
+struct setuid_rule {
+ struct hlist_node next;
+ kuid_t src_uid;
+ kuid_t dst_uid;
+};
+
+#define SETID_HASH_BITS 8 /* 256 buckets in hash table */
+
+struct setuid_ruleset {
+ DECLARE_HASHTABLE(rules, SETID_HASH_BITS);
+ char *policy_str;
+ struct rcu_head rcu;
+};
+
+enum sid_policy_type _setuid_policy_lookup(struct setuid_ruleset *policy,
+ kuid_t src, kuid_t dst);
-void flush_safesetid_whitelist_entries(void);
+extern struct setuid_ruleset __rcu *safesetid_setuid_rules;
#endif /* _SAFESETID_H */
diff --git a/security/safesetid/securityfs.c b/security/safesetid/securityfs.c
index 2c6c829be044..74a13d432ed8 100644
--- a/security/safesetid/securityfs.c
+++ b/security/safesetid/securityfs.c
@@ -11,92 +11,185 @@
* published by the Free Software Foundation.
*
*/
+
+#define pr_fmt(fmt) "SafeSetID: " fmt
+
#include <linux/security.h>
#include <linux/cred.h>
#include "lsm.h"
-static struct dentry *safesetid_policy_dir;
-
-struct safesetid_file_entry {
- const char *name;
- enum safesetid_whitelist_file_write_type type;
- struct dentry *dentry;
-};
-
-static struct safesetid_file_entry safesetid_files[] = {
- {.name = "add_whitelist_policy",
- .type = SAFESETID_WHITELIST_ADD},
- {.name = "flush_whitelist_policies",
- .type = SAFESETID_WHITELIST_FLUSH},
-};
+static DEFINE_MUTEX(policy_update_lock);
/*
* In the case the input buffer contains one or more invalid UIDs, the kuid_t
- * variables pointed to by 'parent' and 'child' will get updated but this
+ * variables pointed to by @parent and @child will get updated but this
* function will return an error.
+ * Contents of @buf may be modified.
*/
-static int parse_safesetid_whitelist_policy(const char __user *buf,
- size_t len,
- kuid_t *parent,
- kuid_t *child)
+static int parse_policy_line(struct file *file, char *buf,
+ struct setuid_rule *rule)
{
- char *kern_buf;
- char *parent_buf;
- char *child_buf;
- const char separator[] = ":";
+ char *child_str;
int ret;
- size_t first_substring_length;
- long parsed_parent;
- long parsed_child;
+ u32 parsed_parent, parsed_child;
- /* Duplicate string from user memory and NULL-terminate */
- kern_buf = memdup_user_nul(buf, len);
- if (IS_ERR(kern_buf))
- return PTR_ERR(kern_buf);
+ /* Format of |buf| string should be <UID>:<UID>. */
+ child_str = strchr(buf, ':');
+ if (child_str == NULL)
+ return -EINVAL;
+ *child_str = '\0';
+ child_str++;
- /*
- * Format of |buf| string should be <UID>:<UID>.
- * Find location of ":" in kern_buf (copied from |buf|).
- */
- first_substring_length = strcspn(kern_buf, separator);
- if (first_substring_length == 0 || first_substring_length == len) {
- ret = -EINVAL;
- goto free_kern;
- }
+ ret = kstrtou32(buf, 0, &parsed_parent);
+ if (ret)
+ return ret;
+
+ ret = kstrtou32(child_str, 0, &parsed_child);
+ if (ret)
+ return ret;
- parent_buf = kmemdup_nul(kern_buf, first_substring_length, GFP_KERNEL);
- if (!parent_buf) {
- ret = -ENOMEM;
- goto free_kern;
+ rule->src_uid = make_kuid(file->f_cred->user_ns, parsed_parent);
+ rule->dst_uid = make_kuid(file->f_cred->user_ns, parsed_child);
+ if (!uid_valid(rule->src_uid) || !uid_valid(rule->dst_uid))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void __release_ruleset(struct rcu_head *rcu)
+{
+ struct setuid_ruleset *pol =
+ container_of(rcu, struct setuid_ruleset, rcu);
+ int bucket;
+ struct setuid_rule *rule;
+ struct hlist_node *tmp;
+
+ hash_for_each_safe(pol->rules, bucket, tmp, rule, next)
+ kfree(rule);
+ kfree(pol->policy_str);
+ kfree(pol);
+}
+
+static void release_ruleset(struct setuid_ruleset *pol)
+{
+ call_rcu(&pol->rcu, __release_ruleset);
+}
+
+static void insert_rule(struct setuid_ruleset *pol, struct setuid_rule *rule)
+{
+ hash_add(pol->rules, &rule->next, __kuid_val(rule->src_uid));
+}
+
+static int verify_ruleset(struct setuid_ruleset *pol)
+{
+ int bucket;
+ struct setuid_rule *rule, *nrule;
+ int res = 0;
+
+ hash_for_each(pol->rules, bucket, rule, next) {
+ if (_setuid_policy_lookup(pol, rule->dst_uid, INVALID_UID) ==
+ SIDPOL_DEFAULT) {
+ pr_warn("insecure policy detected: uid %d is constrained but transitively unconstrained through uid %d\n",
+ __kuid_val(rule->src_uid),
+ __kuid_val(rule->dst_uid));
+ res = -EINVAL;
+
+ /* fix it up */
+ nrule = kmalloc(sizeof(struct setuid_rule), GFP_KERNEL);
+ if (!nrule)
+ return -ENOMEM;
+ nrule->src_uid = rule->dst_uid;
+ nrule->dst_uid = rule->dst_uid;
+ insert_rule(pol, nrule);
+ }
}
+ return res;
+}
- ret = kstrtol(parent_buf, 0, &parsed_parent);
- if (ret)
- goto free_both;
+static ssize_t handle_policy_update(struct file *file,
+ const char __user *ubuf, size_t len)
+{
+ struct setuid_ruleset *pol;
+ char *buf, *p, *end;
+ int err;
- child_buf = kern_buf + first_substring_length + 1;
- ret = kstrtol(child_buf, 0, &parsed_child);
- if (ret)
- goto free_both;
+ pol = kmalloc(sizeof(struct setuid_ruleset), GFP_KERNEL);
+ if (!pol)
+ return -ENOMEM;
+ pol->policy_str = NULL;
+ hash_init(pol->rules);
- *parent = make_kuid(current_user_ns(), parsed_parent);
- if (!uid_valid(*parent)) {
- ret = -EINVAL;
- goto free_both;
+ p = buf = memdup_user_nul(ubuf, len);
+ if (IS_ERR(buf)) {
+ err = PTR_ERR(buf);
+ goto out_free_pol;
}
+ pol->policy_str = kstrdup(buf, GFP_KERNEL);
+ if (pol->policy_str == NULL) {
+ err = -ENOMEM;
+ goto out_free_buf;
+ }
+
+ /* policy lines, including the last one, end with \n */
+ while (*p != '\0') {
+ struct setuid_rule *rule;
+
+ end = strchr(p, '\n');
+ if (end == NULL) {
+ err = -EINVAL;
+ goto out_free_buf;
+ }
+ *end = '\0';
+
+ rule = kmalloc(sizeof(struct setuid_rule), GFP_KERNEL);
+ if (!rule) {
+ err = -ENOMEM;
+ goto out_free_buf;
+ }
- *child = make_kuid(current_user_ns(), parsed_child);
- if (!uid_valid(*child)) {
- ret = -EINVAL;
- goto free_both;
+ err = parse_policy_line(file, p, rule);
+ if (err)
+ goto out_free_rule;
+
+ if (_setuid_policy_lookup(pol, rule->src_uid, rule->dst_uid) ==
+ SIDPOL_ALLOWED) {
+ pr_warn("bad policy: duplicate entry\n");
+ err = -EEXIST;
+ goto out_free_rule;
+ }
+
+ insert_rule(pol, rule);
+ p = end + 1;
+ continue;
+
+out_free_rule:
+ kfree(rule);
+ goto out_free_buf;
}
-free_both:
- kfree(parent_buf);
-free_kern:
- kfree(kern_buf);
- return ret;
+ err = verify_ruleset(pol);
+ /* bogus policy falls through after fixing it up */
+ if (err && err != -EINVAL)
+ goto out_free_buf;
+
+ /*
+ * Everything looks good, apply the policy and release the old one.
+ * What we really want here is an xchg() wrapper for RCU, but since that
+ * doesn't currently exist, just use a spinlock for now.
+ */
+ mutex_lock(&policy_update_lock);
+ rcu_swap_protected(safesetid_setuid_rules, pol,
+ lockdep_is_held(&policy_update_lock));
+ mutex_unlock(&policy_update_lock);
+ err = len;
+
+out_free_buf:
+ kfree(buf);
+out_free_pol:
+ if (pol)
+ release_ruleset(pol);
+ return err;
}
static ssize_t safesetid_file_write(struct file *file,
@@ -104,90 +197,65 @@ static ssize_t safesetid_file_write(struct file *file,
size_t len,
loff_t *ppos)
{
- struct safesetid_file_entry *file_entry =
- file->f_inode->i_private;
- kuid_t parent;
- kuid_t child;
- int ret;
-
- if (!ns_capable(current_user_ns(), CAP_MAC_ADMIN))
+ if (!file_ns_capable(file, &init_user_ns, CAP_MAC_ADMIN))
return -EPERM;
if (*ppos != 0)
return -EINVAL;
- switch (file_entry->type) {
- case SAFESETID_WHITELIST_FLUSH:
- flush_safesetid_whitelist_entries();
- break;
- case SAFESETID_WHITELIST_ADD:
- ret = parse_safesetid_whitelist_policy(buf, len, &parent,
- &child);
- if (ret)
- return ret;
-
- ret = add_safesetid_whitelist_entry(parent, child);
- if (ret)
- return ret;
- break;
- default:
- pr_warn("Unknown securityfs file %d\n", file_entry->type);
- break;
- }
-
- /* Return len on success so caller won't keep trying to write */
- return len;
+ return handle_policy_update(file, buf, len);
}
-static const struct file_operations safesetid_file_fops = {
- .write = safesetid_file_write,
-};
-
-static void safesetid_shutdown_securityfs(void)
+static ssize_t safesetid_file_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
{
- int i;
+ ssize_t res = 0;
+ struct setuid_ruleset *pol;
+ const char *kbuf;
- for (i = 0; i < ARRAY_SIZE(safesetid_files); ++i) {
- struct safesetid_file_entry *entry =
- &safesetid_files[i];
- securityfs_remove(entry->dentry);
- entry->dentry = NULL;
+ mutex_lock(&policy_update_lock);
+ pol = rcu_dereference_protected(safesetid_setuid_rules,
+ lockdep_is_held(&policy_update_lock));
+ if (pol) {
+ kbuf = pol->policy_str;
+ res = simple_read_from_buffer(buf, len, ppos,
+ kbuf, strlen(kbuf));
}
-
- securityfs_remove(safesetid_policy_dir);
- safesetid_policy_dir = NULL;
+ mutex_unlock(&policy_update_lock);
+ return res;
}
+static const struct file_operations safesetid_file_fops = {
+ .read = safesetid_file_read,
+ .write = safesetid_file_write,
+};
+
static int __init safesetid_init_securityfs(void)
{
- int i;
int ret;
+ struct dentry *policy_dir;
+ struct dentry *policy_file;
if (!safesetid_initialized)
return 0;
- safesetid_policy_dir = securityfs_create_dir("safesetid", NULL);
- if (IS_ERR(safesetid_policy_dir)) {
- ret = PTR_ERR(safesetid_policy_dir);
+ policy_dir = securityfs_create_dir("safesetid", NULL);
+ if (IS_ERR(policy_dir)) {
+ ret = PTR_ERR(policy_dir);
goto error;
}
- for (i = 0; i < ARRAY_SIZE(safesetid_files); ++i) {
- struct safesetid_file_entry *entry =
- &safesetid_files[i];
- entry->dentry = securityfs_create_file(
- entry->name, 0200, safesetid_policy_dir,
- entry, &safesetid_file_fops);
- if (IS_ERR(entry->dentry)) {
- ret = PTR_ERR(entry->dentry);
- goto error;
- }
+ policy_file = securityfs_create_file("whitelist_policy", 0600,
+ policy_dir, NULL, &safesetid_file_fops);
+ if (IS_ERR(policy_file)) {
+ ret = PTR_ERR(policy_file);
+ goto error;
}
return 0;
error:
- safesetid_shutdown_securityfs();
+ securityfs_remove(policy_dir);
return ret;
}
fs_initcall(safesetid_init_securityfs);
diff --git a/security/security.c b/security/security.c
index 7fc373486d7a..1bc000f834e2 100644
--- a/security/security.c
+++ b/security/security.c
@@ -36,7 +36,7 @@
#define EARLY_LSM_COUNT (__end_early_lsm_info - __start_early_lsm_info)
struct security_hook_heads security_hook_heads __lsm_ro_after_init;
-static ATOMIC_NOTIFIER_HEAD(lsm_notifier_chain);
+static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain);
static struct kmem_cache *lsm_file_cache;
static struct kmem_cache *lsm_inode_cache;
@@ -460,23 +460,26 @@ void __init security_add_hooks(struct security_hook_list *hooks, int count,
}
}
-int call_lsm_notifier(enum lsm_event event, void *data)
+int call_blocking_lsm_notifier(enum lsm_event event, void *data)
{
- return atomic_notifier_call_chain(&lsm_notifier_chain, event, data);
+ return blocking_notifier_call_chain(&blocking_lsm_notifier_chain,
+ event, data);
}
-EXPORT_SYMBOL(call_lsm_notifier);
+EXPORT_SYMBOL(call_blocking_lsm_notifier);
-int register_lsm_notifier(struct notifier_block *nb)
+int register_blocking_lsm_notifier(struct notifier_block *nb)
{
- return atomic_notifier_chain_register(&lsm_notifier_chain, nb);
+ return blocking_notifier_chain_register(&blocking_lsm_notifier_chain,
+ nb);
}
-EXPORT_SYMBOL(register_lsm_notifier);
+EXPORT_SYMBOL(register_blocking_lsm_notifier);
-int unregister_lsm_notifier(struct notifier_block *nb)
+int unregister_blocking_lsm_notifier(struct notifier_block *nb)
{
- return atomic_notifier_chain_unregister(&lsm_notifier_chain, nb);
+ return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain,
+ nb);
}
-EXPORT_SYMBOL(unregister_lsm_notifier);
+EXPORT_SYMBOL(unregister_blocking_lsm_notifier);
/**
* lsm_cred_alloc - allocate a composite cred blob
@@ -901,6 +904,12 @@ int security_move_mount(const struct path *from_path, const struct path *to_path
return call_int_hook(move_mount, 0, from_path, to_path);
}
+int security_path_notify(const struct path *path, u64 mask,
+ unsigned int obj_type)
+{
+ return call_int_hook(path_notify, 0, path, mask, obj_type);
+}
+
int security_inode_alloc(struct inode *inode)
{
int rc = lsm_inode_alloc(inode);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 94de51628fdc..9625b99e677f 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -89,6 +89,8 @@
#include <linux/kernfs.h>
#include <linux/stringhash.h> /* for hashlen_string() */
#include <uapi/linux/mount.h>
+#include <linux/fsnotify.h>
+#include <linux/fanotify.h>
#include "avc.h"
#include "objsec.h"
@@ -194,7 +196,7 @@ static int selinux_lsm_notifier_avc_callback(u32 event)
{
if (event == AVC_CALLBACK_RESET) {
sel_ib_pkey_flush();
- call_lsm_notifier(LSM_POLICY_CHANGE, NULL);
+ call_blocking_lsm_notifier(LSM_POLICY_CHANGE, NULL);
}
return 0;
@@ -3275,6 +3277,50 @@ static int selinux_inode_removexattr(struct dentry *dentry, const char *name)
return -EACCES;
}
+static int selinux_path_notify(const struct path *path, u64 mask,
+ unsigned int obj_type)
+{
+ int ret;
+ u32 perm;
+
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_PATH;
+ ad.u.path = *path;
+
+ /*
+ * Set permission needed based on the type of mark being set.
+ * Performs an additional check for sb watches.
+ */
+ switch (obj_type) {
+ case FSNOTIFY_OBJ_TYPE_VFSMOUNT:
+ perm = FILE__WATCH_MOUNT;
+ break;
+ case FSNOTIFY_OBJ_TYPE_SB:
+ perm = FILE__WATCH_SB;
+ ret = superblock_has_perm(current_cred(), path->dentry->d_sb,
+ FILESYSTEM__WATCH, &ad);
+ if (ret)
+ return ret;
+ break;
+ case FSNOTIFY_OBJ_TYPE_INODE:
+ perm = FILE__WATCH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* blocking watches require the file:watch_with_perm permission */
+ if (mask & (ALL_FSNOTIFY_PERM_EVENTS))
+ perm |= FILE__WATCH_WITH_PERM;
+
+ /* watches on read-like events need the file:watch_reads permission */
+ if (mask & (FS_ACCESS | FS_ACCESS_PERM | FS_CLOSE_NOWRITE))
+ perm |= FILE__WATCH_READS;
+
+ return path_has_perm(current_cred(), path, perm);
+}
+
/*
* Copy the inode security context value to the user.
*
@@ -3403,7 +3449,7 @@ static int selinux_inode_copy_up_xattr(const char *name)
static int selinux_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn)
{
- const struct task_security_struct *tsec = current_security();
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
u32 parent_sid, newsid, clen;
int rc;
char *context;
@@ -6351,11 +6397,12 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
} else if (!strcmp(name, "fscreate")) {
tsec->create_sid = sid;
} else if (!strcmp(name, "keycreate")) {
- error = avc_has_perm(&selinux_state,
- mysid, sid, SECCLASS_KEY, KEY__CREATE,
- NULL);
- if (error)
- goto abort_change;
+ if (sid) {
+ error = avc_has_perm(&selinux_state, mysid, sid,
+ SECCLASS_KEY, KEY__CREATE, NULL);
+ if (error)
+ goto abort_change;
+ }
tsec->keycreate_sid = sid;
} else if (!strcmp(name, "sockcreate")) {
tsec->sockcreate_sid = sid;
@@ -6817,6 +6864,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(inode_getsecid, selinux_inode_getsecid),
LSM_HOOK_INIT(inode_copy_up, selinux_inode_copy_up),
LSM_HOOK_INIT(inode_copy_up_xattr, selinux_inode_copy_up_xattr),
+ LSM_HOOK_INIT(path_notify, selinux_path_notify),
LSM_HOOK_INIT(kernfs_init_security, selinux_kernfs_init_security),
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 201f7e588a29..32e9b03be3dd 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -7,7 +7,8 @@
#define COMMON_FILE_PERMS COMMON_FILE_SOCK_PERMS, "unlink", "link", \
"rename", "execute", "quotaon", "mounton", "audit_access", \
- "open", "execmod"
+ "open", "execmod", "watch", "watch_mount", "watch_sb", \
+ "watch_with_perm", "watch_reads"
#define COMMON_SOCK_PERMS COMMON_FILE_SOCK_PERMS, "bind", "connect", \
"listen", "accept", "getopt", "setopt", "shutdown", "recvfrom", \
@@ -60,7 +61,7 @@ struct security_class_mapping secclass_map[] = {
{ "filesystem",
{ "mount", "remount", "unmount", "getattr",
"relabelfrom", "relabelto", "associate", "quotamod",
- "quotaget", NULL } },
+ "quotaget", "watch", NULL } },
{ "file",
{ COMMON_FILE_PERMS,
"execute_no_trans", "entrypoint", NULL } },
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 91c5395dd20c..586b7abd0aa7 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -37,16 +37,6 @@ struct task_security_struct {
u32 sockcreate_sid; /* fscreate SID */
};
-/*
- * get the subjective security ID of the current task
- */
-static inline u32 current_sid(void)
-{
- const struct task_security_struct *tsec = current_security();
-
- return tsec->sid;
-}
-
enum label_initialized {
LABEL_INVALID, /* invalid or not initialized */
LABEL_INITIALIZED, /* initialized */
@@ -185,4 +175,14 @@ static inline struct ipc_security_struct *selinux_ipc(
return ipc->security + selinux_blob_sizes.lbs_ipc;
}
+/*
+ * get the subjective security ID of the current task
+ */
+static inline u32 current_sid(void)
+{
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
+
+ return tsec->sid;
+}
+
#endif /* _SELINUX_OBJSEC_H_ */
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index 9cb83eeee1d9..e40fecd73752 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -132,9 +132,9 @@ static void sel_netif_destroy(struct sel_netif *netif)
*/
static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
{
- int ret;
+ int ret = 0;
struct sel_netif *netif;
- struct sel_netif *new = NULL;
+ struct sel_netif *new;
struct net_device *dev;
/* NOTE: we always use init's network namespace since we don't
@@ -151,32 +151,27 @@ static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
netif = sel_netif_find(ns, ifindex);
if (netif != NULL) {
*sid = netif->nsec.sid;
- ret = 0;
goto out;
}
- new = kzalloc(sizeof(*new), GFP_ATOMIC);
- if (new == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- ret = security_netif_sid(&selinux_state, dev->name, &new->nsec.sid);
- if (ret != 0)
- goto out;
- new->nsec.ns = ns;
- new->nsec.ifindex = ifindex;
- ret = sel_netif_insert(new);
+
+ ret = security_netif_sid(&selinux_state, dev->name, sid);
if (ret != 0)
goto out;
- *sid = new->nsec.sid;
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (new) {
+ new->nsec.ns = ns;
+ new->nsec.ifindex = ifindex;
+ new->nsec.sid = *sid;
+ if (sel_netif_insert(new))
+ kfree(new);
+ }
out:
spin_unlock_bh(&sel_netif_lock);
dev_put(dev);
- if (unlikely(ret)) {
+ if (unlikely(ret))
pr_warn("SELinux: failure in %s(), unable to determine network interface label (%d)\n",
__func__, ifindex);
- kfree(new);
- }
return ret;
}
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index cae1fcaffd1a..9ab84efa46c7 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -189,9 +189,9 @@ static void sel_netnode_insert(struct sel_netnode *node)
*/
static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
{
- int ret = -ENOMEM;
+ int ret;
struct sel_netnode *node;
- struct sel_netnode *new = NULL;
+ struct sel_netnode *new;
spin_lock_bh(&sel_netnode_lock);
node = sel_netnode_find(addr, family);
@@ -200,38 +200,36 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
spin_unlock_bh(&sel_netnode_lock);
return 0;
}
+
new = kzalloc(sizeof(*new), GFP_ATOMIC);
- if (new == NULL)
- goto out;
switch (family) {
case PF_INET:
ret = security_node_sid(&selinux_state, PF_INET,
addr, sizeof(struct in_addr), sid);
- new->nsec.addr.ipv4 = *(__be32 *)addr;
+ if (new)
+ new->nsec.addr.ipv4 = *(__be32 *)addr;
break;
case PF_INET6:
ret = security_node_sid(&selinux_state, PF_INET6,
addr, sizeof(struct in6_addr), sid);
- new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
+ if (new)
+ new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
break;
default:
BUG();
ret = -EINVAL;
}
- if (ret != 0)
- goto out;
-
- new->nsec.family = family;
- new->nsec.sid = *sid;
- sel_netnode_insert(new);
+ if (ret == 0 && new) {
+ new->nsec.family = family;
+ new->nsec.sid = *sid;
+ sel_netnode_insert(new);
+ } else
+ kfree(new);
-out:
spin_unlock_bh(&sel_netnode_lock);
- if (unlikely(ret)) {
+ if (unlikely(ret))
pr_warn("SELinux: failure in %s(), unable to determine network node label\n",
__func__);
- kfree(new);
- }
return ret;
}
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
index 364b6d5b8968..3f8b2c0458c8 100644
--- a/security/selinux/netport.c
+++ b/security/selinux/netport.c
@@ -137,9 +137,9 @@ static void sel_netport_insert(struct sel_netport *port)
*/
static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
{
- int ret = -ENOMEM;
+ int ret;
struct sel_netport *port;
- struct sel_netport *new = NULL;
+ struct sel_netport *new;
spin_lock_bh(&sel_netport_lock);
port = sel_netport_find(protocol, pnum);
@@ -148,25 +148,23 @@ static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
spin_unlock_bh(&sel_netport_lock);
return 0;
}
- new = kzalloc(sizeof(*new), GFP_ATOMIC);
- if (new == NULL)
- goto out;
+
ret = security_port_sid(&selinux_state, protocol, pnum, sid);
if (ret != 0)
goto out;
-
- new->psec.port = pnum;
- new->psec.protocol = protocol;
- new->psec.sid = *sid;
- sel_netport_insert(new);
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (new) {
+ new->psec.port = pnum;
+ new->psec.protocol = protocol;
+ new->psec.sid = *sid;
+ sel_netport_insert(new);
+ }
out:
spin_unlock_bh(&sel_netport_lock);
- if (unlikely(ret)) {
+ if (unlikely(ret))
pr_warn("SELinux: failure in %s(), unable to determine network port label\n",
__func__);
- kfree(new);
- }
return ret;
}
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 8cd7038389fd..58345ba0528e 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -80,6 +80,9 @@ static const struct nlmsg_perm nlmsg_route_perms[] =
{ RTM_NEWCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_READ },
};
static const struct nlmsg_perm nlmsg_tcpdiag_perms[] =
@@ -163,7 +166,7 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
* structures at the top of this file with the new mappings
* before updating the BUILD_BUG_ON() macro!
*/
- BUILD_BUG_ON(RTM_MAX != (RTM_NEWCHAIN + 3));
+ BUILD_BUG_ON(RTM_MAX != (RTM_NEWNEXTHOP + 3));
err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
sizeof(nlmsg_route_perms));
break;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 1884f34bb983..e6c7643c3fc0 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/init.h>
@@ -178,7 +179,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
selnl_notify_setenforce(new_value);
selinux_status_update_setenforce(state, new_value);
if (!new_value)
- call_lsm_notifier(LSM_POLICY_CHANGE, NULL);
+ call_blocking_lsm_notifier(LSM_POLICY_CHANGE, NULL);
}
length = count;
out:
@@ -1891,7 +1892,7 @@ static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
#define NULL_FILE_NAME "null"
-static int sel_fill_super(struct super_block *sb, void *data, int silent)
+static int sel_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct selinux_fs_info *fsi;
int ret;
@@ -2007,10 +2008,19 @@ err:
return ret;
}
-static struct dentry *sel_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int sel_get_tree(struct fs_context *fc)
{
- return mount_single(fs_type, flags, data, sel_fill_super);
+ return get_tree_single(fc, sel_fill_super);
+}
+
+static const struct fs_context_operations sel_context_ops = {
+ .get_tree = sel_get_tree,
+};
+
+static int sel_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &sel_context_ops;
+ return 0;
}
static void sel_kill_sb(struct super_block *sb)
@@ -2021,7 +2031,7 @@ static void sel_kill_sb(struct super_block *sb)
static struct file_system_type sel_fs_type = {
.name = "selinuxfs",
- .mount = sel_mount,
+ .init_fs_context = sel_init_fs_context,
.kill_sb = sel_kill_sb,
};
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 8f624f80055b..09929fc5ab47 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -347,7 +347,9 @@ int ebitmap_read(struct ebitmap *e, void *fp)
{
struct ebitmap_node *n = NULL;
u32 mapunit, count, startbit, index;
+ __le32 ebitmap_start;
u64 map;
+ __le64 mapbits;
__le32 buf[3];
int rc, i;
@@ -381,12 +383,12 @@ int ebitmap_read(struct ebitmap *e, void *fp)
goto bad;
for (i = 0; i < count; i++) {
- rc = next_entry(&startbit, fp, sizeof(u32));
+ rc = next_entry(&ebitmap_start, fp, sizeof(u32));
if (rc < 0) {
pr_err("SELinux: ebitmap: truncated map\n");
goto bad;
}
- startbit = le32_to_cpu(startbit);
+ startbit = le32_to_cpu(ebitmap_start);
if (startbit & (mapunit - 1)) {
pr_err("SELinux: ebitmap start bit (%d) is "
@@ -423,12 +425,12 @@ int ebitmap_read(struct ebitmap *e, void *fp)
goto bad;
}
- rc = next_entry(&map, fp, sizeof(u64));
+ rc = next_entry(&mapbits, fp, sizeof(u64));
if (rc < 0) {
pr_err("SELinux: ebitmap: truncated map\n");
goto bad;
}
- map = le64_to_cpu(map);
+ map = le64_to_cpu(mapbits);
index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE;
while (map) {
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 624ccc6ac744..1260f5fb766e 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -177,6 +177,195 @@ static struct policydb_compat_info *policydb_lookup_compat(int version)
}
/*
+ * The following *_destroy functions are used to
+ * free any memory allocated for each kind of
+ * symbol data in the policy database.
+ */
+
+static int perm_destroy(void *key, void *datum, void *p)
+{
+ kfree(key);
+ kfree(datum);
+ return 0;
+}
+
+static int common_destroy(void *key, void *datum, void *p)
+{
+ struct common_datum *comdatum;
+
+ kfree(key);
+ if (datum) {
+ comdatum = datum;
+ hashtab_map(comdatum->permissions.table, perm_destroy, NULL);
+ hashtab_destroy(comdatum->permissions.table);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static void constraint_expr_destroy(struct constraint_expr *expr)
+{
+ if (expr) {
+ ebitmap_destroy(&expr->names);
+ if (expr->type_names) {
+ ebitmap_destroy(&expr->type_names->types);
+ ebitmap_destroy(&expr->type_names->negset);
+ kfree(expr->type_names);
+ }
+ kfree(expr);
+ }
+}
+
+static int cls_destroy(void *key, void *datum, void *p)
+{
+ struct class_datum *cladatum;
+ struct constraint_node *constraint, *ctemp;
+ struct constraint_expr *e, *etmp;
+
+ kfree(key);
+ if (datum) {
+ cladatum = datum;
+ hashtab_map(cladatum->permissions.table, perm_destroy, NULL);
+ hashtab_destroy(cladatum->permissions.table);
+ constraint = cladatum->constraints;
+ while (constraint) {
+ e = constraint->expr;
+ while (e) {
+ etmp = e;
+ e = e->next;
+ constraint_expr_destroy(etmp);
+ }
+ ctemp = constraint;
+ constraint = constraint->next;
+ kfree(ctemp);
+ }
+
+ constraint = cladatum->validatetrans;
+ while (constraint) {
+ e = constraint->expr;
+ while (e) {
+ etmp = e;
+ e = e->next;
+ constraint_expr_destroy(etmp);
+ }
+ ctemp = constraint;
+ constraint = constraint->next;
+ kfree(ctemp);
+ }
+ kfree(cladatum->comkey);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static int role_destroy(void *key, void *datum, void *p)
+{
+ struct role_datum *role;
+
+ kfree(key);
+ if (datum) {
+ role = datum;
+ ebitmap_destroy(&role->dominates);
+ ebitmap_destroy(&role->types);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static int type_destroy(void *key, void *datum, void *p)
+{
+ kfree(key);
+ kfree(datum);
+ return 0;
+}
+
+static int user_destroy(void *key, void *datum, void *p)
+{
+ struct user_datum *usrdatum;
+
+ kfree(key);
+ if (datum) {
+ usrdatum = datum;
+ ebitmap_destroy(&usrdatum->roles);
+ ebitmap_destroy(&usrdatum->range.level[0].cat);
+ ebitmap_destroy(&usrdatum->range.level[1].cat);
+ ebitmap_destroy(&usrdatum->dfltlevel.cat);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static int sens_destroy(void *key, void *datum, void *p)
+{
+ struct level_datum *levdatum;
+
+ kfree(key);
+ if (datum) {
+ levdatum = datum;
+ if (levdatum->level)
+ ebitmap_destroy(&levdatum->level->cat);
+ kfree(levdatum->level);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static int cat_destroy(void *key, void *datum, void *p)
+{
+ kfree(key);
+ kfree(datum);
+ return 0;
+}
+
+static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) =
+{
+ common_destroy,
+ cls_destroy,
+ role_destroy,
+ type_destroy,
+ user_destroy,
+ cond_destroy_bool,
+ sens_destroy,
+ cat_destroy,
+};
+
+static int filenametr_destroy(void *key, void *datum, void *p)
+{
+ struct filename_trans *ft = key;
+
+ kfree(ft->name);
+ kfree(key);
+ kfree(datum);
+ cond_resched();
+ return 0;
+}
+
+static int range_tr_destroy(void *key, void *datum, void *p)
+{
+ struct mls_range *rt = datum;
+
+ kfree(key);
+ ebitmap_destroy(&rt->level[0].cat);
+ ebitmap_destroy(&rt->level[1].cat);
+ kfree(datum);
+ cond_resched();
+ return 0;
+}
+
+static void ocontext_destroy(struct ocontext *c, int i)
+{
+ if (!c)
+ return;
+
+ context_destroy(&c->context[0]);
+ context_destroy(&c->context[1]);
+ if (i == OCON_ISID || i == OCON_FS ||
+ i == OCON_NETIF || i == OCON_FSUSE)
+ kfree(c->u.name);
+ kfree(c);
+}
+
+/*
* Initialize the role table.
*/
static int roles_init(struct policydb *p)
@@ -250,6 +439,7 @@ static int filenametr_cmp(struct hashtab *h, const void *k1, const void *k2)
static u32 rangetr_hash(struct hashtab *h, const void *k)
{
const struct range_trans *key = k;
+
return (key->source_type + (key->target_type << 3) +
(key->target_class << 5)) & (h->size - 1);
}
@@ -299,7 +489,8 @@ static int policydb_init(struct policydb *p)
if (rc)
goto out;
- p->filename_trans = hashtab_create(filenametr_hash, filenametr_cmp, (1 << 10));
+ p->filename_trans = hashtab_create(filenametr_hash, filenametr_cmp,
+ (1 << 10));
if (!p->filename_trans) {
rc = -ENOMEM;
goto out;
@@ -319,8 +510,10 @@ static int policydb_init(struct policydb *p)
out:
hashtab_destroy(p->filename_trans);
hashtab_destroy(p->range_tr);
- for (i = 0; i < SYM_NUM; i++)
+ for (i = 0; i < SYM_NUM; i++) {
+ hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
hashtab_destroy(p->symtab[i].table);
+ }
return rc;
}
@@ -395,7 +588,7 @@ static int type_index(void *key, void *datum, void *datap)
|| typdatum->bounds > p->p_types.nprim)
return -EINVAL;
p->sym_val_to_name[SYM_TYPES][typdatum->value - 1] = key;
- p->type_val_to_struct_array[typdatum->value - 1] = typdatum;
+ p->type_val_to_struct[typdatum->value - 1] = typdatum;
}
return 0;
@@ -473,9 +666,9 @@ static void hash_eval(struct hashtab *h, const char *hash_name)
struct hashtab_info info;
hashtab_stat(h, &info);
- pr_debug("SELinux: %s: %d entries and %d/%d buckets used, "
- "longest chain length %d\n", hash_name, h->nel,
- info.slots_used, h->size, info.max_chain_len);
+ pr_debug("SELinux: %s: %d entries and %d/%d buckets used, longest chain length %d\n",
+ hash_name, h->nel, info.slots_used, h->size,
+ info.max_chain_len);
}
static void symtab_hash_eval(struct symtab *s)
@@ -537,10 +730,10 @@ static int policydb_index(struct policydb *p)
if (!p->user_val_to_struct)
return -ENOMEM;
- p->type_val_to_struct_array = kvcalloc(p->p_types.nprim,
- sizeof(*p->type_val_to_struct_array),
- GFP_KERNEL);
- if (!p->type_val_to_struct_array)
+ p->type_val_to_struct = kvcalloc(p->p_types.nprim,
+ sizeof(*p->type_val_to_struct),
+ GFP_KERNEL);
+ if (!p->type_val_to_struct)
return -ENOMEM;
rc = cond_init_bool_indexes(p);
@@ -564,193 +757,6 @@ out:
}
/*
- * The following *_destroy functions are used to
- * free any memory allocated for each kind of
- * symbol data in the policy database.
- */
-
-static int perm_destroy(void *key, void *datum, void *p)
-{
- kfree(key);
- kfree(datum);
- return 0;
-}
-
-static int common_destroy(void *key, void *datum, void *p)
-{
- struct common_datum *comdatum;
-
- kfree(key);
- if (datum) {
- comdatum = datum;
- hashtab_map(comdatum->permissions.table, perm_destroy, NULL);
- hashtab_destroy(comdatum->permissions.table);
- }
- kfree(datum);
- return 0;
-}
-
-static void constraint_expr_destroy(struct constraint_expr *expr)
-{
- if (expr) {
- ebitmap_destroy(&expr->names);
- if (expr->type_names) {
- ebitmap_destroy(&expr->type_names->types);
- ebitmap_destroy(&expr->type_names->negset);
- kfree(expr->type_names);
- }
- kfree(expr);
- }
-}
-
-static int cls_destroy(void *key, void *datum, void *p)
-{
- struct class_datum *cladatum;
- struct constraint_node *constraint, *ctemp;
- struct constraint_expr *e, *etmp;
-
- kfree(key);
- if (datum) {
- cladatum = datum;
- hashtab_map(cladatum->permissions.table, perm_destroy, NULL);
- hashtab_destroy(cladatum->permissions.table);
- constraint = cladatum->constraints;
- while (constraint) {
- e = constraint->expr;
- while (e) {
- etmp = e;
- e = e->next;
- constraint_expr_destroy(etmp);
- }
- ctemp = constraint;
- constraint = constraint->next;
- kfree(ctemp);
- }
-
- constraint = cladatum->validatetrans;
- while (constraint) {
- e = constraint->expr;
- while (e) {
- etmp = e;
- e = e->next;
- constraint_expr_destroy(etmp);
- }
- ctemp = constraint;
- constraint = constraint->next;
- kfree(ctemp);
- }
- kfree(cladatum->comkey);
- }
- kfree(datum);
- return 0;
-}
-
-static int role_destroy(void *key, void *datum, void *p)
-{
- struct role_datum *role;
-
- kfree(key);
- if (datum) {
- role = datum;
- ebitmap_destroy(&role->dominates);
- ebitmap_destroy(&role->types);
- }
- kfree(datum);
- return 0;
-}
-
-static int type_destroy(void *key, void *datum, void *p)
-{
- kfree(key);
- kfree(datum);
- return 0;
-}
-
-static int user_destroy(void *key, void *datum, void *p)
-{
- struct user_datum *usrdatum;
-
- kfree(key);
- if (datum) {
- usrdatum = datum;
- ebitmap_destroy(&usrdatum->roles);
- ebitmap_destroy(&usrdatum->range.level[0].cat);
- ebitmap_destroy(&usrdatum->range.level[1].cat);
- ebitmap_destroy(&usrdatum->dfltlevel.cat);
- }
- kfree(datum);
- return 0;
-}
-
-static int sens_destroy(void *key, void *datum, void *p)
-{
- struct level_datum *levdatum;
-
- kfree(key);
- if (datum) {
- levdatum = datum;
- if (levdatum->level)
- ebitmap_destroy(&levdatum->level->cat);
- kfree(levdatum->level);
- }
- kfree(datum);
- return 0;
-}
-
-static int cat_destroy(void *key, void *datum, void *p)
-{
- kfree(key);
- kfree(datum);
- return 0;
-}
-
-static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) =
-{
- common_destroy,
- cls_destroy,
- role_destroy,
- type_destroy,
- user_destroy,
- cond_destroy_bool,
- sens_destroy,
- cat_destroy,
-};
-
-static int filenametr_destroy(void *key, void *datum, void *p)
-{
- struct filename_trans *ft = key;
- kfree(ft->name);
- kfree(key);
- kfree(datum);
- cond_resched();
- return 0;
-}
-
-static int range_tr_destroy(void *key, void *datum, void *p)
-{
- struct mls_range *rt = datum;
- kfree(key);
- ebitmap_destroy(&rt->level[0].cat);
- ebitmap_destroy(&rt->level[1].cat);
- kfree(datum);
- cond_resched();
- return 0;
-}
-
-static void ocontext_destroy(struct ocontext *c, int i)
-{
- if (!c)
- return;
-
- context_destroy(&c->context[0]);
- context_destroy(&c->context[1]);
- if (i == OCON_ISID || i == OCON_FS ||
- i == OCON_NETIF || i == OCON_FSUSE)
- kfree(c->u.name);
- kfree(c);
-}
-
-/*
* Free any memory allocated by a policy database structure.
*/
void policydb_destroy(struct policydb *p)
@@ -773,7 +779,7 @@ void policydb_destroy(struct policydb *p)
kfree(p->class_val_to_struct);
kfree(p->role_val_to_struct);
kfree(p->user_val_to_struct);
- kvfree(p->type_val_to_struct_array);
+ kvfree(p->type_val_to_struct);
avtab_destroy(&p->te_avtab);
@@ -1718,7 +1724,7 @@ static int type_bounds_sanity_check(void *key, void *datum, void *datap)
return -EINVAL;
}
- upper = p->type_val_to_struct_array[upper->bounds - 1];
+ upper = p->type_val_to_struct[upper->bounds - 1];
BUG_ON(!upper);
if (upper->attribute) {
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index fcc6366b447f..162d0e79b85b 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -253,7 +253,7 @@ struct policydb {
struct class_datum **class_val_to_struct;
struct role_datum **role_val_to_struct;
struct user_datum **user_val_to_struct;
- struct type_datum **type_val_to_struct_array;
+ struct type_datum **type_val_to_struct;
/* type enforcement access vectors and transitions */
struct avtab te_avtab;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index d3f5568c1f60..3a29e7c24ba9 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -542,13 +542,13 @@ static void type_attribute_bounds_av(struct policydb *policydb,
struct type_datum *target;
u32 masked = 0;
- source = policydb->type_val_to_struct_array[scontext->type - 1];
+ source = policydb->type_val_to_struct[scontext->type - 1];
BUG_ON(!source);
if (!source->bounds)
return;
- target = policydb->type_val_to_struct_array[tcontext->type - 1];
+ target = policydb->type_val_to_struct[tcontext->type - 1];
BUG_ON(!target);
memset(&lo_avd, 0, sizeof(lo_avd));
@@ -649,9 +649,7 @@ static void context_struct_compute_av(struct policydb *policydb,
avkey.target_class = tclass;
avkey.specified = AVTAB_AV | AVTAB_XPERMS;
sattr = &policydb->type_attr_map_array[scontext->type - 1];
- BUG_ON(!sattr);
tattr = &policydb->type_attr_map_array[tcontext->type - 1];
- BUG_ON(!tattr);
ebitmap_for_each_positive_bit(sattr, snode, i) {
ebitmap_for_each_positive_bit(tattr, tnode, j) {
avkey.source_type = i + 1;
@@ -893,7 +891,7 @@ int security_bounded_transition(struct selinux_state *state,
index = new_context->type;
while (true) {
- type = policydb->type_val_to_struct_array[index - 1];
+ type = policydb->type_val_to_struct[index - 1];
BUG_ON(!type);
/* not bounded anymore */
@@ -1057,9 +1055,7 @@ void security_compute_xperms_decision(struct selinux_state *state,
avkey.target_class = tclass;
avkey.specified = AVTAB_XPERMS;
sattr = &policydb->type_attr_map_array[scontext->type - 1];
- BUG_ON(!sattr);
tattr = &policydb->type_attr_map_array[tcontext->type - 1];
- BUG_ON(!tattr);
ebitmap_for_each_positive_bit(sattr, snode, i) {
ebitmap_for_each_positive_bit(tattr, tnode, j) {
avkey.source_type = i + 1;
@@ -1586,6 +1582,7 @@ static int compute_sid_handle_invalid_context(
struct policydb *policydb = &state->ss->policydb;
char *s = NULL, *t = NULL, *n = NULL;
u32 slen, tlen, nlen;
+ struct audit_buffer *ab;
if (context_struct_to_string(policydb, scontext, &s, &slen))
goto out;
@@ -1593,12 +1590,14 @@ static int compute_sid_handle_invalid_context(
goto out;
if (context_struct_to_string(policydb, newcontext, &n, &nlen))
goto out;
- audit_log(audit_context(), GFP_ATOMIC, AUDIT_SELINUX_ERR,
- "op=security_compute_sid invalid_context=%s"
- " scontext=%s"
- " tcontext=%s"
- " tclass=%s",
- n, s, t, sym_name(policydb, SYM_CLASSES, tclass-1));
+ ab = audit_log_start(audit_context(), GFP_ATOMIC, AUDIT_SELINUX_ERR);
+ audit_log_format(ab,
+ "op=security_compute_sid invalid_context=");
+ /* no need to record the NUL with untrusted strings */
+ audit_log_n_untrustedstring(ab, n, nlen - 1);
+ audit_log_format(ab, " scontext=%s tcontext=%s tclass=%s",
+ s, t, sym_name(policydb, SYM_CLASSES, tclass-1));
+ audit_log_end(ab);
out:
kfree(s);
kfree(t);
@@ -3005,10 +3004,16 @@ int security_sid_mls_copy(struct selinux_state *state,
if (rc) {
if (!context_struct_to_string(policydb, &newcon, &s,
&len)) {
- audit_log(audit_context(),
- GFP_ATOMIC, AUDIT_SELINUX_ERR,
- "op=security_sid_mls_copy "
- "invalid_context=%s", s);
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(audit_context(),
+ GFP_ATOMIC,
+ AUDIT_SELINUX_ERR);
+ audit_log_format(ab,
+ "op=security_sid_mls_copy invalid_context=");
+ /* don't record NUL with untrusted strings */
+ audit_log_n_untrustedstring(ab, s, len - 1);
+ audit_log_end(ab);
kfree(s);
}
goto out_unlock;
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index e63a90ff2728..7d49994e8d5f 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
-#include <linux/atomic.h>
+#include <asm/barrier.h>
#include "flask.h"
#include "security.h"
#include "sidtab.h"
@@ -23,14 +23,14 @@ int sidtab_init(struct sidtab *s)
memset(s->roots, 0, sizeof(s->roots));
+ /* max count is SIDTAB_MAX so valid index is always < SIDTAB_MAX */
for (i = 0; i < SIDTAB_RCACHE_SIZE; i++)
- atomic_set(&s->rcache[i], -1);
+ s->rcache[i] = SIDTAB_MAX;
for (i = 0; i < SECINITSID_NUM; i++)
s->isids[i].set = 0;
- atomic_set(&s->count, 0);
-
+ s->count = 0;
s->convert = NULL;
spin_lock_init(&s->lock);
@@ -130,14 +130,12 @@ static struct context *sidtab_do_lookup(struct sidtab *s, u32 index, int alloc)
static struct context *sidtab_lookup(struct sidtab *s, u32 index)
{
- u32 count = (u32)atomic_read(&s->count);
+ /* read entries only after reading count */
+ u32 count = smp_load_acquire(&s->count);
if (index >= count)
return NULL;
- /* read entries after reading count */
- smp_rmb();
-
return sidtab_do_lookup(s, index, 0);
}
@@ -210,10 +208,10 @@ static int sidtab_find_context(union sidtab_entry_inner entry,
static void sidtab_rcache_update(struct sidtab *s, u32 index, u32 pos)
{
while (pos > 0) {
- atomic_set(&s->rcache[pos], atomic_read(&s->rcache[pos - 1]));
+ WRITE_ONCE(s->rcache[pos], READ_ONCE(s->rcache[pos - 1]));
--pos;
}
- atomic_set(&s->rcache[0], (int)index);
+ WRITE_ONCE(s->rcache[0], index);
}
static void sidtab_rcache_push(struct sidtab *s, u32 index)
@@ -227,14 +225,14 @@ static int sidtab_rcache_search(struct sidtab *s, struct context *context,
u32 i;
for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) {
- int v = atomic_read(&s->rcache[i]);
+ u32 v = READ_ONCE(s->rcache[i]);
- if (v < 0)
+ if (v >= SIDTAB_MAX)
continue;
- if (context_cmp(sidtab_do_lookup(s, (u32)v, 0), context)) {
- sidtab_rcache_update(s, (u32)v, i);
- *index = (u32)v;
+ if (context_cmp(sidtab_do_lookup(s, v, 0), context)) {
+ sidtab_rcache_update(s, v, i);
+ *index = v;
return 0;
}
}
@@ -245,8 +243,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
u32 *index)
{
unsigned long flags;
- u32 count = (u32)atomic_read(&s->count);
- u32 count_locked, level, pos;
+ u32 count, count_locked, level, pos;
struct sidtab_convert_params *convert;
struct context *dst, *dst_convert;
int rc;
@@ -255,11 +252,10 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
if (rc == 0)
return 0;
+ /* read entries only after reading count */
+ count = smp_load_acquire(&s->count);
level = sidtab_level_from_count(count);
- /* read entries after reading count */
- smp_rmb();
-
pos = 0;
rc = sidtab_find_context(s->roots[level], &pos, count, level,
context, index);
@@ -272,7 +268,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
spin_lock_irqsave(&s->lock, flags);
convert = s->convert;
- count_locked = (u32)atomic_read(&s->count);
+ count_locked = s->count;
level = sidtab_level_from_count(count_locked);
/* if count has changed before we acquired the lock, then catch up */
@@ -286,6 +282,11 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
++count;
}
+ /* bail out if we already reached max entries */
+ rc = -EOVERFLOW;
+ if (count >= SIDTAB_MAX)
+ goto out_unlock;
+
/* insert context into new entry */
rc = -ENOMEM;
dst = sidtab_do_lookup(s, count, 1);
@@ -315,7 +316,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
}
/* at this point we know the insert won't fail */
- atomic_set(&convert->target->count, count + 1);
+ convert->target->count = count + 1;
}
if (context->len)
@@ -326,9 +327,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
*index = count;
/* write entries before writing new count */
- smp_wmb();
-
- atomic_set(&s->count, count + 1);
+ smp_store_release(&s->count, count + 1);
rc = 0;
out_unlock:
@@ -418,7 +417,7 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
return -EBUSY;
}
- count = (u32)atomic_read(&s->count);
+ count = s->count;
level = sidtab_level_from_count(count);
/* allocate last leaf in the new sidtab (to avoid race with
@@ -431,7 +430,7 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
}
/* set count in case no new entries are added during conversion */
- atomic_set(&params->target->count, count);
+ params->target->count = count;
/* enable live convert of new entries */
s->convert = params;
diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h
index bbd5c0d1f3bd..1f4763141aa1 100644
--- a/security/selinux/ss/sidtab.h
+++ b/security/selinux/ss/sidtab.h
@@ -40,8 +40,8 @@ union sidtab_entry_inner {
#define SIDTAB_LEAF_ENTRIES \
(SIDTAB_NODE_ALLOC_SIZE / sizeof(struct sidtab_entry_leaf))
-#define SIDTAB_MAX_BITS 31 /* limited to INT_MAX due to atomic_t range */
-#define SIDTAB_MAX (((u32)1 << SIDTAB_MAX_BITS) - 1)
+#define SIDTAB_MAX_BITS 32
+#define SIDTAB_MAX U32_MAX
/* ensure enough tree levels for SIDTAB_MAX entries */
#define SIDTAB_MAX_LEVEL \
DIV_ROUND_UP(SIDTAB_MAX_BITS - size_to_shift(SIDTAB_LEAF_ENTRIES), \
@@ -69,13 +69,22 @@ struct sidtab_convert_params {
#define SIDTAB_RCACHE_SIZE 3
struct sidtab {
+ /*
+ * lock-free read access only for as many items as a prior read of
+ * 'count'
+ */
union sidtab_entry_inner roots[SIDTAB_MAX_LEVEL + 1];
- atomic_t count;
+ /*
+ * access atomically via {READ|WRITE}_ONCE(); only increment under
+ * spinlock
+ */
+ u32 count;
+ /* access only under spinlock */
struct sidtab_convert_params *convert;
spinlock_t lock;
- /* reverse lookup cache */
- atomic_t rcache[SIDTAB_RCACHE_SIZE];
+ /* reverse lookup cache - access atomically via {READ|WRITE}_ONCE() */
+ u32 rcache[SIDTAB_RCACHE_SIZE];
/* index == SID - 1 (no entry for SECSID_NULL) */
struct sidtab_isid_entry isids[SECINITSID_NUM];
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index f1c93a7be9ec..38ac3da4e791 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -465,7 +465,7 @@ char *smk_parse_smack(const char *string, int len)
if (i == 0 || i >= SMK_LONGLABEL)
return ERR_PTR(-EINVAL);
- smack = kzalloc(i + 1, GFP_KERNEL);
+ smack = kzalloc(i + 1, GFP_NOFS);
if (smack == NULL)
return ERR_PTR(-ENOMEM);
@@ -500,7 +500,7 @@ int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap,
if ((m & *cp) == 0)
continue;
rc = netlbl_catmap_setbit(&sap->attr.mls.cat,
- cat, GFP_KERNEL);
+ cat, GFP_NOFS);
if (rc < 0) {
netlbl_catmap_free(sap->attr.mls.cat);
return rc;
@@ -536,7 +536,7 @@ struct smack_known *smk_import_entry(const char *string, int len)
if (skp != NULL)
goto freeout;
- skp = kzalloc(sizeof(*skp), GFP_KERNEL);
+ skp = kzalloc(sizeof(*skp), GFP_NOFS);
if (skp == NULL) {
skp = ERR_PTR(-ENOMEM);
goto freeout;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 4c5e5a438f8b..abeb09c30633 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -288,7 +288,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
if (!(ip->i_opflags & IOP_XATTR))
return ERR_PTR(-EOPNOTSUPP);
- buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
+ buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS);
if (buffer == NULL)
return ERR_PTR(-ENOMEM);
@@ -307,7 +307,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
/**
* init_inode_smack - initialize an inode security blob
- * @isp: the blob to initialize
+ * @inode: inode to extract the info from
* @skp: a pointer to the Smack label entry to use in the blob
*
*/
@@ -509,7 +509,7 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
/**
* smack_syslog - Smack approval on syslog
- * @type: message type
+ * @typefrom_file: unused
*
* Returns 0 on success, error code otherwise.
*/
@@ -765,7 +765,7 @@ static int smack_sb_eat_lsm_opts(char *options, void **mnt_opts)
/**
* smack_set_mnt_opts - set Smack specific mount options
* @sb: the file system superblock
- * @opts: Smack mount options
+ * @mnt_opts: Smack mount options
* @kern_flags: mount option from kernel space or user space
* @set_kern_flags: where to store converted mount opts
*
@@ -937,7 +937,8 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
if (rc != 0)
return rc;
- } else if (bprm->unsafe)
+ }
+ if (bprm->unsafe & ~LSM_UNSAFE_PTRACE)
return -EPERM;
bsp->smk_task = isp->smk_task;
@@ -958,7 +959,7 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
* smack_inode_alloc_security - allocate an inode blob
* @inode: the inode in need of a blob
*
- * Returns 0 if it gets a blob, -ENOMEM otherwise
+ * Returns 0
*/
static int smack_inode_alloc_security(struct inode *inode)
{
@@ -1164,7 +1165,7 @@ static int smack_inode_rename(struct inode *old_inode,
*
* This is the important Smack hook.
*
- * Returns 0 if access is permitted, -EACCES otherwise
+ * Returns 0 if access is permitted, an error code otherwise
*/
static int smack_inode_permission(struct inode *inode, int mask)
{
@@ -1222,8 +1223,7 @@ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
/**
* smack_inode_getattr - Smack check for getting attributes
- * @mnt: vfsmount of the object
- * @dentry: the object
+ * @path: path to extract the info from
*
* Returns 0 if access is permitted, an error code otherwise
*/
@@ -1870,14 +1870,13 @@ static int smack_file_receive(struct file *file)
/**
* smack_file_open - Smack dentry open processing
* @file: the object
- * @cred: task credential
*
* Set the security blob in the file structure.
* Allow the open only if the task has read access. There are
* many read operations (e.g. fstat) that you can do with an
* fd even if you have the file open write-only.
*
- * Returns 0
+ * Returns 0 if current has access, error code otherwise
*/
static int smack_file_open(struct file *file)
{
@@ -1900,7 +1899,7 @@ static int smack_file_open(struct file *file)
/**
* smack_cred_alloc_blank - "allocate" blank task-level security credentials
- * @new: the new credentials
+ * @cred: the new credentials
* @gfp: the atomicity of any memory allocations
*
* Prepare a blank set of credentials for modification. This must allocate all
@@ -1983,7 +1982,7 @@ static void smack_cred_transfer(struct cred *new, const struct cred *old)
/**
* smack_cred_getsecid - get the secid corresponding to a creds structure
- * @c: the object creds
+ * @cred: the object creds
* @secid: where to put the result
*
* Sets the secid to contain a u32 version of the smack label.
@@ -2140,8 +2139,6 @@ static int smack_task_getioprio(struct task_struct *p)
/**
* smack_task_setscheduler - Smack check on setting scheduler
* @p: the task object
- * @policy: unused
- * @lp: unused
*
* Return 0 if read access is permitted
*/
@@ -2611,8 +2608,9 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address)
/**
* smk_ipv6_port_check - check Smack port access
- * @sock: socket
+ * @sk: socket
* @address: address
+ * @act: the action being taken
*
* Create or update the port list entry
*/
@@ -2782,7 +2780,7 @@ static int smack_socket_post_create(struct socket *sock, int family,
*
* Cross reference the peer labels for SO_PEERSEC
*
- * Returns 0 on success, and error code otherwise
+ * Returns 0
*/
static int smack_socket_socketpair(struct socket *socka,
struct socket *sockb)
@@ -3014,13 +3012,13 @@ static int smack_shm_shmctl(struct kern_ipc_perm *isp, int cmd)
*
* Returns 0 if current has the requested access, error code otherwise
*/
-static int smack_shm_shmat(struct kern_ipc_perm *ipc, char __user *shmaddr,
+static int smack_shm_shmat(struct kern_ipc_perm *isp, char __user *shmaddr,
int shmflg)
{
int may;
may = smack_flags_to_may(shmflg);
- return smk_curacc_shm(ipc, may);
+ return smk_curacc_shm(isp, may);
}
/**
@@ -3925,6 +3923,8 @@ access_check:
skp = smack_ipv6host_label(&sadd);
if (skp == NULL)
skp = smack_net_ambient;
+ if (skb == NULL)
+ break;
#ifdef CONFIG_AUDIT
smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
ad.a.u.net->family = family;
@@ -4762,7 +4762,7 @@ static __init void init_smack_known_list(void)
/**
* smack_init - initialize the smack system
*
- * Returns 0
+ * Returns 0 on success, -ENOMEM is there's no memory
*/
static __init int smack_init(void)
{
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index ef0d8712d318..e3e05c04dbd1 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -23,6 +23,7 @@
#include <linux/ctype.h>
#include <linux/audit.h>
#include <linux/magic.h>
+#include <linux/fs_context.h>
#include "smack.h"
#define BEBITS (sizeof(__be32) * 8)
@@ -2816,14 +2817,13 @@ static const struct file_operations smk_ptrace_ops = {
/**
* smk_fill_super - fill the smackfs superblock
* @sb: the empty superblock
- * @data: unused
- * @silent: unused
+ * @fc: unused
*
* Fill in the well known entries for the smack filesystem
*
* Returns 0 on success, an error code on failure
*/
-static int smk_fill_super(struct super_block *sb, void *data, int silent)
+static int smk_fill_super(struct super_block *sb, struct fs_context *fc)
{
int rc;
@@ -2893,25 +2893,35 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
}
/**
- * smk_mount - get the smackfs superblock
- * @fs_type: passed along without comment
- * @flags: passed along without comment
- * @dev_name: passed along without comment
- * @data: passed along without comment
+ * smk_get_tree - get the smackfs superblock
+ * @fc: The mount context, including any options
*
* Just passes everything along.
*
* Returns what the lower level code does.
*/
-static struct dentry *smk_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int smk_get_tree(struct fs_context *fc)
{
- return mount_single(fs_type, flags, data, smk_fill_super);
+ return get_tree_single(fc, smk_fill_super);
+}
+
+static const struct fs_context_operations smk_context_ops = {
+ .get_tree = smk_get_tree,
+};
+
+/**
+ * smk_init_fs_context - Initialise a filesystem context for smackfs
+ * @fc: The blank mount context
+ */
+static int smk_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &smk_context_ops;
+ return 0;
}
static struct file_system_type smk_fs_type = {
.name = "smackfs",
- .mount = smk_mount,
+ .init_fs_context = smk_init_fs_context,
.kill_sb = kill_litter_super,
};
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 01c6239c4493..94dc346370b1 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -445,7 +445,6 @@ static int yama_dointvec_minmax(struct ctl_table *table, int write,
return proc_dointvec_minmax(&table_copy, write, buffer, lenp, ppos);
}
-static int zero;
static int max_scope = YAMA_SCOPE_NO_ATTACH;
static struct ctl_path yama_sysctl_path[] = {
@@ -461,7 +460,7 @@ static struct ctl_table yama_sysctl_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = yama_dointvec_minmax,
- .extra1 = &zero,
+ .extra1 = SYSCTL_ZERO,
.extra2 = &max_scope,
},
{ }